* [dpdk-dev] [RFC] net/i40e: refactor of hash flow
@ 2020-10-23 6:56 Zhang,Alvin
2020-10-23 8:42 ` Wang, ShougangX
0 siblings, 1 reply; 4+ messages in thread
From: Zhang,Alvin @ 2020-10-23 6:56 UTC (permalink / raw)
To: dev; +Cc: Alvin Zhang
From: Alvin Zhang <alvinx.zhang@intel.com>
1. Delete original code.
2. Add 2 tables(pattern RSS type matched PCTYPE, RSS type to input set).
3. Parse RSS pattern and RSS type to get PCTYPE.
4. Parse RSS action to get queues, RSS function and hash field.
5. Create and destroy RSS filters.
6. Create new files for hash flows.
7. Update doc.
Signed-off-by: Alvin Zhang <alvinx.zhang@intel.com>
---
doc/guides/nics/i40e.rst | 4 +-
drivers/net/i40e/i40e_ethdev.c | 840 ++++++-------------------
drivers/net/i40e/i40e_ethdev.h | 43 +-
drivers/net/i40e/i40e_flow.c | 617 +------------------
drivers/net/i40e/i40e_hash.c | 1315 ++++++++++++++++++++++++++++++++++++++++
drivers/net/i40e/i40e_hash.h | 34 ++
drivers/net/i40e/meson.build | 1 +
7 files changed, 1587 insertions(+), 1267 deletions(-)
create mode 100644 drivers/net/i40e/i40e_hash.c
create mode 100644 drivers/net/i40e/i40e_hash.h
diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index a0b81e6..077b3dc 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -585,9 +585,9 @@ Generic flow API
- ``RSS Flow``
RSS Flow supports to set hash input set, hash function, enable hash
- and configure queue region.
+ and configure queues.
For example:
- Configure queue region as queue 0, 1, 2, 3.
+ Configure queues as queue 0, 1, 2, 3.
.. code-block:: console
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 4778aaf..21903be 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -39,6 +39,7 @@
#include "i40e_pf.h"
#include "i40e_regs.h"
#include "rte_pmd_i40e.h"
+#include "i40e_hash.h"
#define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb"
#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
@@ -399,7 +400,6 @@ static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
static void i40e_filter_restore(struct i40e_pf *pf);
static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
-static int i40e_pf_config_rss(struct i40e_pf *pf);
static const char *const valid_keys[] = {
ETH_I40E_FLOATING_VEB_ARG,
@@ -1767,10 +1767,6 @@ static inline void i40e_config_automask(struct i40e_pf *pf)
/* initialize queue region configuration */
i40e_init_queue_region_conf(dev);
- /* initialize RSS configuration from rte_flow */
- memset(&pf->rss_info, 0,
- sizeof(struct i40e_rte_flow_rss_conf));
-
/* reset all stats of the device, including pf and main vsi */
i40e_dev_stats_reset(dev);
@@ -4542,7 +4538,6 @@ static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
{
struct i40e_pf *pf;
struct i40e_hw *hw;
- int ret;
if (!vsi || !lut)
return -EINVAL;
@@ -4551,12 +4546,16 @@ static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
hw = I40E_VSI_TO_HW(vsi);
if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
- ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
- vsi->type != I40E_VSI_SRIOV,
- lut, lut_size);
- if (ret) {
- PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
- return ret;
+ enum i40e_status_code status;
+
+ status = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
+ vsi->type != I40E_VSI_SRIOV,
+ lut, lut_size);
+ if (status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to update RSS lookup table, error status: %d",
+ status);
+ return -EIO;
}
} else {
uint32_t *lut_dw = (uint32_t *)lut;
@@ -7689,7 +7688,7 @@ struct i40e_vsi *
}
/* Disable RSS */
-static void
+void
i40e_pf_disable_rss(struct i40e_pf *pf)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
@@ -7707,7 +7706,6 @@ struct i40e_vsi *
uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
I40E_VFQF_HKEY_MAX_INDEX :
I40E_PFQF_HKEY_MAX_INDEX;
- int ret = 0;
if (!key || key_len == 0) {
PMD_DRV_LOG(DEBUG, "No key to be configured");
@@ -7720,11 +7718,16 @@ struct i40e_vsi *
if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
struct i40e_aqc_get_set_rss_key_data *key_dw =
- (struct i40e_aqc_get_set_rss_key_data *)key;
+ (struct i40e_aqc_get_set_rss_key_data *)key;
+ enum i40e_status_code status =
+ i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
- ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
- if (ret)
- PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
+ if (status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to configure RSS key via AQ, error status: %d",
+ status);
+ return -EIO;
+ }
} else {
uint32_t *hash_key = (uint32_t *)key;
uint16_t i;
@@ -7744,7 +7747,7 @@ struct i40e_vsi *
I40E_WRITE_FLUSH(hw);
}
- return ret;
+ return 0;
}
static int
@@ -9037,7 +9040,7 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
}
/* Calculate the maximum number of contiguous PF queues that are configured */
-static int
+int
i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
{
struct rte_eth_dev_data *data = pf->dev_data;
@@ -9056,19 +9059,72 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
return num;
}
-/* Configure RSS */
-static int
-i40e_pf_config_rss(struct i40e_pf *pf)
+/* Reset the global configure of hash function and input sets */
+static void
+i40e_pf_global_rss_reset(struct i40e_pf *pf)
{
- enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct rte_eth_rss_conf rss_conf;
- uint32_t i, lut = 0;
- uint16_t j, num;
+ uint32_t reg, reg_val;
+ int i;
- /*
- * If both VMDQ and RSS enabled, not all of PF queues are configured.
- * It's necessary to calculate the actual PF queues that are configured.
+ /* Reset global RSS function sets */
+ reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
+ if (!(reg_val & I40E_GLQF_CTL_HTOEP_MASK)) {
+ reg_val |= I40E_GLQF_CTL_HTOEP_MASK;
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg_val);
+ }
+
+ for (i = 0; i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; i++) {
+ uint64_t inset;
+ int j, pctype;
+
+ if (hw->mac.type == I40E_MAC_X722)
+ pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(i));
+ else
+ pctype = i;
+
+ /* Reset pctype insets */
+ inset = i40e_get_default_input_set(i);
+ if (inset) {
+ pf->hash_input_set[pctype] = inset;
+ inset = i40e_translate_input_set_reg(hw->mac.type,
+ inset);
+
+ reg = I40E_GLQF_HASH_INSET(0, pctype);
+ i40e_check_write_global_reg(hw, reg, (uint32_t)inset);
+ reg = I40E_GLQF_HASH_INSET(1, pctype);
+ i40e_check_write_global_reg(hw, reg,
+ (uint32_t)(inset >> 32));
+
+ /* Clear unused mask registers of the pctype */
+ for (j = 0; j < I40E_INSET_MASK_NUM_REG; j++) {
+ reg = I40E_GLQF_HASH_MSK(j, pctype);
+ i40e_check_write_global_reg(hw, reg, 0);
+ }
+ }
+
+ /* Reset pctype symmetric sets */
+ reg = I40E_GLQF_HSYM(pctype);
+ reg_val = i40e_read_rx_ctl(hw, reg);
+ if (reg_val & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
+ reg_val &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK;
+ i40e_write_global_rx_ctl(hw, reg, reg_val);
+ }
+ }
+ I40E_WRITE_FLUSH(hw);
+}
+
+int
+i40e_pf_reset_rss_reta(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->adapter->hw;
+ uint8_t lut[ETH_RSS_RETA_SIZE_512];
+ uint32_t i;
+ int num;
+
+ /* If both VMDQ and RSS enabled, not all of PF queues are
+ * configured. It's necessary to calculate the actual PF
+ * queues that are configured.
*/
if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
num = i40e_pf_calc_configured_queues_num(pf);
@@ -9076,48 +9132,89 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
num = pf->dev_data->nb_rx_queues;
num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
- PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
- num);
+ if (num <= 0)
+ return 0;
- if (num == 0) {
- PMD_INIT_LOG(ERR,
- "No PF queues are configured to enable RSS for port %u",
- pf->dev_data->port_id);
- return -ENOTSUP;
- }
+ for (i = 0; i < hw->func_caps.rss_table_size; i++)
+ lut[i] = (uint8_t)(i % (uint32_t)num);
- if (pf->adapter->rss_reta_updated == 0) {
- for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
- if (j == num)
- j = 0;
- lut = (lut << 8) | (j & ((0x1 <<
- hw->func_caps.rss_table_entry_width) - 1));
- if ((i & 3) == 3)
- I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2),
- rte_bswap32(lut));
- }
- }
+ return i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i);
+}
- rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
- if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0 ||
- !(mq_mode & ETH_MQ_RX_RSS_FLAG)) {
- i40e_pf_disable_rss(pf);
- return 0;
- }
- if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
- (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
- /* Random default keys */
+int
+i40e_pf_reset_rss_key(struct i40e_pf *pf)
+{
+ const uint8_t key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ uint8_t *rss_key;
+
+ /* Reset key */
+ rss_key = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key;
+ if (!rss_key ||
+ pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key_len < key_len) {
static uint32_t rss_key_default[] = {0x6b793944,
0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
- rss_conf.rss_key = (uint8_t *)rss_key_default;
- rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
- sizeof(uint32_t);
+ rss_key = (uint8_t *)rss_key_default;
+ }
+
+ return i40e_set_rss_key(pf->main_vsi, rss_key, key_len);
+}
+
+static int
+i40e_pf_rss_reset(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ int ret;
+
+ pf->hash_filter_enable = 0;
+ i40e_pf_disable_rss(pf);
+ i40e_set_symmetric_hash_enable_per_port(hw, 0);
+
+ if (!pf->support_multi_driver)
+ i40e_pf_global_rss_reset(pf);
+
+ /* Reset RETA table */
+ if (pf->adapter->rss_reta_updated == 0) {
+ ret = i40e_pf_reset_rss_reta(pf);
+ if (ret)
+ return ret;
+ }
+
+ return i40e_pf_reset_rss_key(pf);
+}
+
+/* Configure RSS */
+int
+i40e_pf_config_rss(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw;
+ enum rte_eth_rx_mq_mode mq_mode;
+ uint64_t rss_hf, hena;
+ int ret;
+
+ ret = i40e_pf_rss_reset(pf);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Reset RSS failed, RSS has been disabled");
+ return ret;
}
- return i40e_hw_rss_hash_set(pf, &rss_conf);
+ rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+ mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
+ if (!(rss_hf & pf->adapter->flow_types_mask) ||
+ !(mq_mode & ETH_MQ_RX_RSS_FLAG))
+ return 0;
+
+ hw = I40E_PF_TO_HW(pf);
+ hena = i40e_config_hena(pf->adapter, rss_hf);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
}
static int
@@ -9283,24 +9380,20 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
}
/* Set the symmetric hash enable configurations per port */
-static void
+void
i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
{
uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
if (enable > 0) {
- if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
- PMD_DRV_LOG(INFO,
- "Symmetric hash has already been enabled");
+ if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)
return;
- }
+
reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
} else {
- if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
- PMD_DRV_LOG(INFO,
- "Symmetric hash has already been disabled");
+ if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK))
return;
- }
+
reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
}
i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
@@ -10151,9 +10244,8 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
{
struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
enum i40e_filter_pctype pctype;
- uint64_t input_set, inset_reg = 0;
- uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
- int ret, i, num;
+ uint64_t input_set;
+ int ret;
if (!conf) {
PMD_DRV_LOG(ERR, "Invalid pointer");
@@ -10165,31 +10257,42 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
return -EINVAL;
}
- if (pf->support_multi_driver) {
- PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
- return -ENOTSUP;
- }
-
pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
if (pctype == I40E_FILTER_PCTYPE_INVALID) {
PMD_DRV_LOG(ERR, "invalid flow_type input.");
return -EINVAL;
}
- if (hw->mac.type == I40E_MAC_X722) {
- /* get translated pctype value in fd pctype register */
- pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
- I40E_GLQF_FD_PCTYPES((int)pctype));
- }
-
ret = i40e_parse_input_set(&input_set, pctype, conf->field,
conf->inset_size);
- if (ret) {
- PMD_DRV_LOG(ERR, "Failed to parse input set");
- return -EINVAL;
+ if (ret)
+ return ret;
+
+ return i40e_set_hash_inset(hw, input_set, pctype,
+ (conf->op == RTE_ETH_INPUT_SET_ADD) ?
+ true : false);
+}
+
+int
+i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set,
+ uint32_t pctype, bool add)
+{
+ struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
+ uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
+ uint64_t inset_reg = 0;
+ int num, i;
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR,
+ "Modify input set is not permitted when multi-driver enabled.");
+ return -EPERM;
}
- if (conf->op == RTE_ETH_INPUT_SET_ADD) {
+ /* For X722, get translated pctype in fd pctype register */
+ if (hw->mac.type == I40E_MAC_X722)
+ pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
+
+ if (add) {
/* get inset value in register */
inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
inset_reg <<= I40E_32_BIT_WIDTH;
@@ -12623,25 +12726,13 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
}
}
-/* Restore RSS filter */
-static inline void
-i40e_rss_filter_restore(struct i40e_pf *pf)
-{
- struct i40e_rss_conf_list *list = &pf->rss_config_list;
- struct i40e_rss_filter *filter;
-
- TAILQ_FOREACH(filter, list, next) {
- i40e_config_rss_filter(pf, &filter->rss_filter_info, TRUE);
- }
-}
-
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
i40e_fdir_filter_restore(pf);
- i40e_rss_filter_restore(pf);
+ (void)i40e_hash_filter_restore(pf);
}
bool
@@ -13219,551 +13310,6 @@ struct i40e_customized_pctype*
return ret;
}
-int
-i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
- const struct rte_flow_action_rss *in)
-{
- if (in->key_len > RTE_DIM(out->key) ||
- in->queue_num > RTE_DIM(out->queue))
- return -EINVAL;
- if (!in->key && in->key_len)
- return -EINVAL;
- out->conf = (struct rte_flow_action_rss){
- .func = in->func,
- .level = in->level,
- .types = in->types,
- .key_len = in->key_len,
- .queue_num = in->queue_num,
- .queue = memcpy(out->queue, in->queue,
- sizeof(*in->queue) * in->queue_num),
- };
- if (in->key)
- out->conf.key = memcpy(out->key, in->key, in->key_len);
- return 0;
-}
-
-/* Write HENA register to enable hash */
-static int
-i40e_rss_hash_set(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *rss_conf)
-{
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- uint8_t *key = (void *)(uintptr_t)rss_conf->conf.key;
- uint64_t hena;
- int ret;
-
- ret = i40e_set_rss_key(pf->main_vsi, key,
- rss_conf->conf.key_len);
- if (ret)
- return ret;
-
- hena = i40e_config_hena(pf->adapter, rss_conf->conf.types);
- i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
- i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
- I40E_WRITE_FLUSH(hw);
-
- return 0;
-}
-
-/* Configure hash input set */
-static int
-i40e_rss_conf_hash_inset(struct i40e_pf *pf, uint64_t types)
-{
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct rte_eth_input_set_conf conf;
- uint64_t mask0;
- int ret = 0;
- uint32_t j;
- int i;
- static const struct {
- uint64_t type;
- enum rte_eth_input_set_field field;
- } inset_match_table[] = {
- {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP4},
- {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP4},
- {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_UNKNOWN},
- {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_UNKNOWN},
-
- {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP4},
- {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP4},
- {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
- {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
-
- {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP4},
- {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP4},
- {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
- {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
-
- {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP4},
- {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP4},
- {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
- {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
-
- {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP4},
- {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP4},
- {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_UNKNOWN},
- {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_UNKNOWN},
-
- {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP6},
- {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP6},
- {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_UNKNOWN},
- {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_UNKNOWN},
-
- {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP6},
- {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP6},
- {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
- {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
-
- {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP6},
- {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP6},
- {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
- {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
-
- {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP6},
- {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP6},
- {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
- {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
-
- {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP6},
- {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP6},
- {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_UNKNOWN},
- {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_UNKNOWN},
- };
-
- mask0 = types & pf->adapter->flow_types_mask;
- conf.op = RTE_ETH_INPUT_SET_SELECT;
- conf.inset_size = 0;
- for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; i++) {
- if (mask0 & (1ULL << i)) {
- conf.flow_type = i;
- break;
- }
- }
-
- for (j = 0; j < RTE_DIM(inset_match_table); j++) {
- if ((types & inset_match_table[j].type) ==
- inset_match_table[j].type) {
- if (inset_match_table[j].field ==
- RTE_ETH_INPUT_SET_UNKNOWN)
- return -EINVAL;
-
- conf.field[conf.inset_size] =
- inset_match_table[j].field;
- conf.inset_size++;
- }
- }
-
- if (conf.inset_size) {
- ret = i40e_hash_filter_inset_select(hw, &conf);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-/* Look up the conflicted rule then mark it as invalid */
-static void
-i40e_rss_mark_invalid_rule(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf)
-{
- struct i40e_rss_filter *rss_item;
- uint64_t rss_inset;
-
- /* Clear input set bits before comparing the pctype */
- rss_inset = ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
- ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
-
- /* Look up the conflicted rule then mark it as invalid */
- TAILQ_FOREACH(rss_item, &pf->rss_config_list, next) {
- if (!rss_item->rss_filter_info.valid)
- continue;
-
- if (conf->conf.queue_num &&
- rss_item->rss_filter_info.conf.queue_num)
- rss_item->rss_filter_info.valid = false;
-
- if (conf->conf.types &&
- (rss_item->rss_filter_info.conf.types &
- rss_inset) ==
- (conf->conf.types & rss_inset))
- rss_item->rss_filter_info.valid = false;
-
- if (conf->conf.func ==
- RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
- rss_item->rss_filter_info.conf.func ==
- RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
- rss_item->rss_filter_info.valid = false;
- }
-}
-
-/* Configure RSS hash function */
-static int
-i40e_rss_config_hash_function(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf)
-{
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- uint32_t reg, i;
- uint64_t mask0;
- uint16_t j;
-
- if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
- reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
- if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
- PMD_DRV_LOG(DEBUG, "Hash function already set to Simple XOR");
- I40E_WRITE_FLUSH(hw);
- i40e_rss_mark_invalid_rule(pf, conf);
-
- return 0;
- }
- reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
-
- i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
- I40E_WRITE_FLUSH(hw);
- i40e_rss_mark_invalid_rule(pf, conf);
- } else if (conf->conf.func ==
- RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
- mask0 = conf->conf.types & pf->adapter->flow_types_mask;
-
- i40e_set_symmetric_hash_enable_per_port(hw, 1);
- for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
- if (mask0 & (1UL << i))
- break;
- }
-
- if (i == UINT64_BIT)
- return -EINVAL;
-
- for (j = I40E_FILTER_PCTYPE_INVALID + 1;
- j < I40E_FILTER_PCTYPE_MAX; j++) {
- if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
- i40e_write_global_rx_ctl(hw,
- I40E_GLQF_HSYM(j),
- I40E_GLQF_HSYM_SYMH_ENA_MASK);
- }
- }
-
- return 0;
-}
-
-/* Enable RSS according to the configuration */
-static int
-i40e_rss_enable_hash(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf)
-{
- struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
- struct i40e_rte_flow_rss_conf rss_conf;
-
- if (!(conf->conf.types & pf->adapter->flow_types_mask))
- return -ENOTSUP;
-
- memset(&rss_conf, 0, sizeof(rss_conf));
- rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
-
- /* Configure hash input set */
- if (i40e_rss_conf_hash_inset(pf, conf->conf.types))
- return -EINVAL;
-
- if (rss_conf.conf.key == NULL || rss_conf.conf.key_len <
- (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
- /* Random default keys */
- static uint32_t rss_key_default[] = {0x6b793944,
- 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
- 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
- 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
-
- rss_conf.conf.key = (uint8_t *)rss_key_default;
- rss_conf.conf.key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
- sizeof(uint32_t);
- PMD_DRV_LOG(INFO,
- "No valid RSS key config for i40e, using default\n");
- }
-
- rss_conf.conf.types |= rss_info->conf.types;
- i40e_rss_hash_set(pf, &rss_conf);
-
- if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
- i40e_rss_config_hash_function(pf, conf);
-
- i40e_rss_mark_invalid_rule(pf, conf);
-
- return 0;
-}
-
-/* Configure RSS queue region */
-static int
-i40e_rss_config_queue_region(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf)
-{
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- uint32_t lut = 0;
- uint16_t j, num;
- uint32_t i;
-
- /* If both VMDQ and RSS enabled, not all of PF queues are configured.
- * It's necessary to calculate the actual PF queues that are configured.
- */
- if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
- num = i40e_pf_calc_configured_queues_num(pf);
- else
- num = pf->dev_data->nb_rx_queues;
-
- num = RTE_MIN(num, conf->conf.queue_num);
- PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
- num);
-
- if (num == 0) {
- PMD_DRV_LOG(ERR,
- "No PF queues are configured to enable RSS for port %u",
- pf->dev_data->port_id);
- return -ENOTSUP;
- }
-
- /* Fill in redirection table */
- for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
- if (j == num)
- j = 0;
- lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
- hw->func_caps.rss_table_entry_width) - 1));
- if ((i & 3) == 3)
- I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
- }
-
- i40e_rss_mark_invalid_rule(pf, conf);
-
- return 0;
-}
-
-/* Configure RSS hash function to default */
-static int
-i40e_rss_clear_hash_function(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf)
-{
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- uint32_t i, reg;
- uint64_t mask0;
- uint16_t j;
-
- if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
- reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
- if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
- PMD_DRV_LOG(DEBUG,
- "Hash function already set to Toeplitz");
- I40E_WRITE_FLUSH(hw);
-
- return 0;
- }
- reg |= I40E_GLQF_CTL_HTOEP_MASK;
-
- i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
- I40E_WRITE_FLUSH(hw);
- } else if (conf->conf.func ==
- RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
- mask0 = conf->conf.types & pf->adapter->flow_types_mask;
-
- for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
- if (mask0 & (1UL << i))
- break;
- }
-
- if (i == UINT64_BIT)
- return -EINVAL;
-
- for (j = I40E_FILTER_PCTYPE_INVALID + 1;
- j < I40E_FILTER_PCTYPE_MAX; j++) {
- if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
- i40e_write_global_rx_ctl(hw,
- I40E_GLQF_HSYM(j),
- 0);
- }
- }
-
- return 0;
-}
-
-/* Disable RSS hash and configure default input set */
-static int
-i40e_rss_disable_hash(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf)
-{
- struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct i40e_rte_flow_rss_conf rss_conf;
- uint32_t i;
-
- memset(&rss_conf, 0, sizeof(rss_conf));
- rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
-
- /* Disable RSS hash */
- rss_conf.conf.types = rss_info->conf.types & ~(conf->conf.types);
- i40e_rss_hash_set(pf, &rss_conf);
-
- for (i = RTE_ETH_FLOW_IPV4; i <= RTE_ETH_FLOW_L2_PAYLOAD; i++) {
- if (!(pf->adapter->flow_types_mask & (1ULL << i)) ||
- !(conf->conf.types & (1ULL << i)))
- continue;
-
- /* Configure default input set */
- struct rte_eth_input_set_conf input_conf = {
- .op = RTE_ETH_INPUT_SET_SELECT,
- .flow_type = i,
- .inset_size = 1,
- };
- input_conf.field[0] = RTE_ETH_INPUT_SET_DEFAULT;
- i40e_hash_filter_inset_select(hw, &input_conf);
- }
-
- rss_info->conf.types = rss_conf.conf.types;
-
- i40e_rss_clear_hash_function(pf, conf);
-
- return 0;
-}
-
-/* Configure RSS queue region to default */
-static int
-i40e_rss_clear_queue_region(struct i40e_pf *pf)
-{
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
- uint16_t queue[I40E_MAX_Q_PER_TC];
- uint32_t num_rxq, i;
- uint32_t lut = 0;
- uint16_t j, num;
-
- num_rxq = RTE_MIN(pf->dev_data->nb_rx_queues, I40E_MAX_Q_PER_TC);
-
- for (j = 0; j < num_rxq; j++)
- queue[j] = j;
-
- /* If both VMDQ and RSS enabled, not all of PF queues are configured.
- * It's necessary to calculate the actual PF queues that are configured.
- */
- if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
- num = i40e_pf_calc_configured_queues_num(pf);
- else
- num = pf->dev_data->nb_rx_queues;
-
- num = RTE_MIN(num, num_rxq);
- PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
- num);
-
- if (num == 0) {
- PMD_DRV_LOG(ERR,
- "No PF queues are configured to enable RSS for port %u",
- pf->dev_data->port_id);
- return -ENOTSUP;
- }
-
- /* Fill in redirection table */
- for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
- if (j == num)
- j = 0;
- lut = (lut << 8) | (queue[j] & ((0x1 <<
- hw->func_caps.rss_table_entry_width) - 1));
- if ((i & 3) == 3)
- I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
- }
-
- rss_info->conf.queue_num = 0;
- memset(&rss_info->conf.queue, 0, sizeof(uint16_t));
-
- return 0;
-}
-
-int
-i40e_config_rss_filter(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf, bool add)
-{
- struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
- struct rte_flow_action_rss update_conf = rss_info->conf;
- int ret = 0;
-
- if (add) {
- if (conf->conf.queue_num) {
- /* Configure RSS queue region */
- ret = i40e_rss_config_queue_region(pf, conf);
- if (ret)
- return ret;
-
- update_conf.queue_num = conf->conf.queue_num;
- update_conf.queue = conf->conf.queue;
- } else if (conf->conf.func ==
- RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
- /* Configure hash function */
- ret = i40e_rss_config_hash_function(pf, conf);
- if (ret)
- return ret;
-
- update_conf.func = conf->conf.func;
- } else {
- /* Configure hash enable and input set */
- ret = i40e_rss_enable_hash(pf, conf);
- if (ret)
- return ret;
-
- update_conf.types |= conf->conf.types;
- update_conf.key = conf->conf.key;
- update_conf.key_len = conf->conf.key_len;
- }
-
- /* Update RSS info in pf */
- if (i40e_rss_conf_init(rss_info, &update_conf))
- return -EINVAL;
- } else {
- if (!conf->valid)
- return 0;
-
- if (conf->conf.queue_num)
- i40e_rss_clear_queue_region(pf);
- else if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
- i40e_rss_clear_hash_function(pf, conf);
- else
- i40e_rss_disable_hash(pf, conf);
- }
-
- return 0;
-}
-
RTE_LOG_REGISTER(i40e_logtype_init, pmd.net.i40e.init, NOTICE);
RTE_LOG_REGISTER(i40e_logtype_driver, pmd.net.i40e.driver, NOTICE);
#ifdef RTE_LIBRTE_I40E_DEBUG_RX
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 1466998..4c84965 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -16,6 +16,8 @@
#include "rte_pmd_i40e.h"
#include "base/i40e_register.h"
+#include "base/i40e_type.h"
+#include "base/virtchnl.h"
#define I40E_VLAN_TAG_SIZE 4
@@ -1047,17 +1049,24 @@ struct i40e_customized_pctype {
bool valid; /* Check if it's valid */
};
+#define I40E_MAX_PCTYPE_PER_RSS 8
+
struct i40e_rte_flow_rss_conf {
- struct rte_flow_action_rss conf; /**< RSS parameters. */
- uint16_t queue_region_conf; /**< Queue region config flag */
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
+
uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ?
I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) *
- sizeof(uint32_t)]; /* Hash key. */
- uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */
- bool valid; /* Check if it's valid */
-};
+ sizeof(uint32_t)]; /**< Hash key. */
+ uint16_t queue[ETH_RSS_RETA_SIZE_512]; /**< Queues indices to use. */
-TAILQ_HEAD(i40e_rss_conf_list, i40e_rss_filter);
+ uint8_t pctypes[I40E_MAX_PCTYPE_PER_RSS];
+ uint8_t pctype_count;
+
+ uint8_t region_flags; /**< configure a queue region, yes(1)/no(0) */
+ uint8_t region_priority;/**< queue region priority */
+ uint8_t region_queue_num;
+ uint16_t region_queue_start;
+};
/* RSS filter list structure */
struct i40e_rss_filter {
@@ -1065,6 +1074,8 @@ struct i40e_rss_filter {
struct i40e_rte_flow_rss_conf rss_filter_info;
};
+TAILQ_HEAD(i40e_rss_conf_list, i40e_rss_filter);
+
struct i40e_vf_msg_cfg {
/* maximal VF message during a statistic period */
uint32_t max_msg;
@@ -1119,6 +1130,7 @@ struct i40e_pf {
uint16_t fdir_qp_offset;
uint16_t hash_lut_size; /* The size of hash lookup table */
+ uint64_t hash_enabled_queues;
/* input set bits for each pctype */
uint64_t hash_input_set[I40E_FILTER_PCTYPE_MAX];
/* store VXLAN UDP ports */
@@ -1133,7 +1145,6 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
- struct i40e_rte_flow_rss_conf rss_info; /* RSS info */
struct i40e_rss_conf_list rss_config_list; /* RSS rule list */
struct i40e_queue_regions queue_region; /* queue region info */
struct i40e_fc_conf fc_conf; /* Flow control conf */
@@ -1151,7 +1162,7 @@ struct i40e_pf {
bool dport_replace_flag; /* Destination port replace is done */
struct i40e_tm_conf tm_conf;
bool support_multi_driver; /* 1 - support multiple driver */
-
+ bool hash_filter_enable;
/* Dynamic Device Personalization */
bool gtp_support; /* 1 - support GTP-C and GTP-U */
bool esp_support; /* 1 - support ESP SPI */
@@ -1364,6 +1375,8 @@ int i40e_select_filter_input_set(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf,
enum rte_filter_type filter);
void i40e_fdir_filter_restore(struct i40e_pf *pf);
+int i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set,
+ uint32_t pctype, bool add);
int i40e_hash_filter_inset_select(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf);
int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
@@ -1425,7 +1438,8 @@ int i40e_add_macvlan_filters(struct i40e_vsi *vsi,
bool is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv);
bool is_i40e_supported(struct rte_eth_dev *dev);
bool is_i40evf_supported(struct rte_eth_dev *dev);
-
+void i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw,
+ uint8_t enable);
int i40e_validate_input_set(enum i40e_filter_pctype pctype,
enum rte_filter_type filter, uint64_t inset);
int i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask,
@@ -1448,12 +1462,13 @@ int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on);
void i40e_init_queue_region_conf(struct rte_eth_dev *dev);
void i40e_flex_payload_reg_set_default(struct i40e_hw *hw);
+void i40e_pf_disable_rss(struct i40e_pf *pf);
+int i40e_pf_calc_configured_queues_num(struct i40e_pf *pf);
+int i40e_pf_reset_rss_reta(struct i40e_pf *pf);
+int i40e_pf_reset_rss_key(struct i40e_pf *pf);
+int i40e_pf_config_rss(struct i40e_pf *pf);
int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len);
int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size);
-int i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
- const struct rte_flow_action_rss *in);
-int i40e_config_rss_filter(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf, bool add);
int i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params);
int i40e_vf_representor_uninit(struct rte_eth_dev *ethdev);
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index adc5da1..e5fe912 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -23,6 +23,7 @@
#include "base/i40e_type.h"
#include "base/i40e_prototype.h"
#include "i40e_ethdev.h"
+#include "i40e_hash.h"
#define I40E_IPV6_TC_MASK (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
#define I40E_IPV6_FRAG_HEADER 44
@@ -115,7 +116,6 @@ static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
-static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
static int
i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -4699,566 +4699,6 @@ static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
return ret;
}
-/**
- * This function is used to do configuration i40e existing RSS with rte_flow.
- * It also enable queue region configuration using flow API for i40e.
- * pattern can be used indicate what parameters will be include in flow,
- * like user_priority or flowtype for queue region or HASH function for RSS.
- * Action is used to transmit parameter like queue index and HASH
- * function for RSS, or flowtype for queue region configuration.
- * For example:
- * pattern:
- * Case 1: try to transform patterns to pctype. valid pctype will be
- * used in parse action.
- * Case 2: only ETH, indicate flowtype for queue region will be parsed.
- * Case 3: only VLAN, indicate user_priority for queue region will be parsed.
- * So, pattern choice is depened on the purpose of configuration of
- * that flow.
- * action:
- * action RSS will be used to transmit valid parameter with
- * struct rte_flow_action_rss for all the 3 case.
- */
-static int
-i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
- const struct rte_flow_item *pattern,
- struct rte_flow_error *error,
- struct i40e_rss_pattern_info *p_info,
- struct i40e_queue_regions *info)
-{
- const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
- const struct rte_flow_item *item = pattern;
- enum rte_flow_item_type item_type;
- struct rte_flow_item *items;
- uint32_t item_num = 0; /* non-void item number of pattern*/
- uint32_t i = 0;
- static const struct {
- enum rte_flow_item_type *item_array;
- uint64_t type;
- } i40e_rss_pctype_patterns[] = {
- { pattern_fdir_ipv4,
- ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER },
- { pattern_fdir_ipv4_tcp, ETH_RSS_NONFRAG_IPV4_TCP },
- { pattern_fdir_ipv4_udp, ETH_RSS_NONFRAG_IPV4_UDP },
- { pattern_fdir_ipv4_sctp, ETH_RSS_NONFRAG_IPV4_SCTP },
- { pattern_fdir_ipv4_esp, ETH_RSS_ESP },
- { pattern_fdir_ipv4_udp_esp, ETH_RSS_ESP },
- { pattern_fdir_ipv6,
- ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER },
- { pattern_fdir_ipv6_tcp, ETH_RSS_NONFRAG_IPV6_TCP },
- { pattern_fdir_ipv6_udp, ETH_RSS_NONFRAG_IPV6_UDP },
- { pattern_fdir_ipv6_sctp, ETH_RSS_NONFRAG_IPV6_SCTP },
- { pattern_ethertype, ETH_RSS_L2_PAYLOAD },
- { pattern_fdir_ipv6_esp, ETH_RSS_ESP },
- { pattern_fdir_ipv6_udp_esp, ETH_RSS_ESP },
- };
-
- p_info->types = I40E_RSS_TYPE_INVALID;
-
- if (item->type == RTE_FLOW_ITEM_TYPE_END) {
- p_info->types = I40E_RSS_TYPE_NONE;
- return 0;
- }
-
- /* Convert pattern to RSS offload types */
- while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
- if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
- item_num++;
- i++;
- }
- item_num++;
-
- items = rte_zmalloc("i40e_pattern",
- item_num * sizeof(struct rte_flow_item), 0);
- if (!items) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
- NULL, "No memory for PMD internal items.");
- return -ENOMEM;
- }
-
- i40e_pattern_skip_void_item(items, pattern);
-
- for (i = 0; i < RTE_DIM(i40e_rss_pctype_patterns); i++) {
- if (i40e_match_pattern(i40e_rss_pctype_patterns[i].item_array,
- items)) {
- p_info->types = i40e_rss_pctype_patterns[i].type;
- break;
- }
- }
-
- rte_free(items);
-
- for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
- if (item->last) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Not support range");
- return -rte_errno;
- }
- item_type = item->type;
- switch (item_type) {
- case RTE_FLOW_ITEM_TYPE_ETH:
- p_info->action_flag = 1;
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec = item->spec;
- vlan_mask = item->mask;
- if (vlan_spec && vlan_mask) {
- if (vlan_mask->tci ==
- rte_cpu_to_be_16(I40E_TCI_MASK)) {
- info->region[0].user_priority[0] =
- (rte_be_to_cpu_16(
- vlan_spec->tci) >> 13) & 0x7;
- info->region[0].user_priority_num = 1;
- info->queue_region_number = 1;
- p_info->action_flag = 0;
- }
- }
- break;
- default:
- p_info->action_flag = 0;
- memset(info, 0, sizeof(struct i40e_queue_regions));
- return 0;
- }
- }
-
- return 0;
-}
-
-/**
- * This function is used to parse RSS queue index, total queue number and
- * hash functions, If the purpose of this configuration is for queue region
- * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
- * In queue region configuration, it also need to parse hardware flowtype
- * and user_priority from configuration, it will also cheeck the validity
- * of these parameters. For example, The queue region sizes should
- * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
- * hw_flowtype or PCTYPE max index should be 63, the user priority
- * max index should be 7, and so on. And also, queue index should be
- * continuous sequence and queue region index should be part of RSS
- * queue index for this port.
- * For hash params, the pctype in action and pattern must be same.
- * Set queue index must be with non-types.
- */
-static int
-i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
- const struct rte_flow_action *actions,
- struct rte_flow_error *error,
- struct i40e_rss_pattern_info p_info,
- struct i40e_queue_regions *conf_info,
- union i40e_filter_t *filter)
-{
- const struct rte_flow_action *act;
- const struct rte_flow_action_rss *rss;
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_queue_regions *info = &pf->queue_region;
- struct i40e_rte_flow_rss_conf *rss_config =
- &filter->rss_conf;
- struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
- uint16_t i, j, n, m, tmp, nb_types;
- uint32_t index = 0;
- uint64_t hf_bit = 1;
-
- static const struct {
- uint64_t rss_type;
- enum i40e_filter_pctype pctype;
- } pctype_match_table[] = {
- {ETH_RSS_FRAG_IPV4,
- I40E_FILTER_PCTYPE_FRAG_IPV4},
- {ETH_RSS_NONFRAG_IPV4_TCP,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP},
- {ETH_RSS_NONFRAG_IPV4_UDP,
- I40E_FILTER_PCTYPE_NONF_IPV4_UDP},
- {ETH_RSS_NONFRAG_IPV4_SCTP,
- I40E_FILTER_PCTYPE_NONF_IPV4_SCTP},
- {ETH_RSS_NONFRAG_IPV4_OTHER,
- I40E_FILTER_PCTYPE_NONF_IPV4_OTHER},
- {ETH_RSS_FRAG_IPV6,
- I40E_FILTER_PCTYPE_FRAG_IPV6},
- {ETH_RSS_NONFRAG_IPV6_TCP,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP},
- {ETH_RSS_NONFRAG_IPV6_UDP,
- I40E_FILTER_PCTYPE_NONF_IPV6_UDP},
- {ETH_RSS_NONFRAG_IPV6_SCTP,
- I40E_FILTER_PCTYPE_NONF_IPV6_SCTP},
- {ETH_RSS_NONFRAG_IPV6_OTHER,
- I40E_FILTER_PCTYPE_NONF_IPV6_OTHER},
- {ETH_RSS_L2_PAYLOAD,
- I40E_FILTER_PCTYPE_L2_PAYLOAD},
- };
-
- static const struct {
- uint64_t rss_type;
- enum i40e_filter_pctype pctype;
- } pctype_match_table_x722[] = {
- {ETH_RSS_NONFRAG_IPV4_TCP,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK},
- {ETH_RSS_NONFRAG_IPV4_UDP,
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP},
- {ETH_RSS_NONFRAG_IPV4_UDP,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP},
- {ETH_RSS_NONFRAG_IPV6_TCP,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK},
- {ETH_RSS_NONFRAG_IPV6_UDP,
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP},
- {ETH_RSS_NONFRAG_IPV6_UDP,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP},
- };
-
- NEXT_ITEM_OF_ACTION(act, actions, index);
- rss = act->conf;
-
- /**
- * RSS only supports forwarding,
- * check if the first not void action is RSS.
- */
- if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
- memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act, "Not supported action.");
- return -rte_errno;
- }
-
- if (p_info.action_flag && rss->queue_num) {
- for (j = 0; j < RTE_DIM(pctype_match_table); j++) {
- if (rss->types & pctype_match_table[j].rss_type) {
- conf_info->region[0].hw_flowtype[0] =
- (uint8_t)pctype_match_table[j].pctype;
- conf_info->region[0].flowtype_num = 1;
- conf_info->queue_region_number = 1;
- break;
- }
- }
-
- if (hw->mac.type == I40E_MAC_X722)
- for (j = 0; j < RTE_DIM(pctype_match_table_x722); j++) {
- if (rss->types &
- pctype_match_table_x722[j].rss_type) {
- m = conf_info->region[0].flowtype_num;
- conf_info->region[0].hw_flowtype[m] =
- pctype_match_table_x722[j].pctype;
- conf_info->region[0].flowtype_num++;
- conf_info->queue_region_number = 1;
- }
- }
- }
-
- /**
- * Do some queue region related parameters check
- * in order to keep queue index for queue region to be
- * continuous sequence and also to be part of RSS
- * queue index for this port.
- */
- if (conf_info->queue_region_number) {
- for (i = 0; i < rss->queue_num; i++) {
- for (j = 0; j < rss_info->conf.queue_num; j++) {
- if (rss->queue[i] == rss_info->conf.queue[j])
- break;
- }
- if (j == rss_info->conf.queue_num) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "no valid queues");
- return -rte_errno;
- }
- }
-
- for (i = 0; i < rss->queue_num - 1; i++) {
- if (rss->queue[i + 1] != rss->queue[i] + 1) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "no valid queues");
- return -rte_errno;
- }
- }
- }
-
- /* Parse queue region related parameters from configuration */
- for (n = 0; n < conf_info->queue_region_number; n++) {
- if (conf_info->region[n].user_priority_num ||
- conf_info->region[n].flowtype_num) {
- if (!((rte_is_power_of_2(rss->queue_num)) &&
- rss->queue_num <= 64)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
- "total number of queues do not exceed the VSI allocation");
- return -rte_errno;
- }
-
- if (conf_info->region[n].user_priority[n] >=
- I40E_MAX_USER_PRIORITY) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "the user priority max index is 7");
- return -rte_errno;
- }
-
- if (conf_info->region[n].hw_flowtype[n] >=
- I40E_FILTER_PCTYPE_MAX) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "the hw_flowtype or PCTYPE max index is 63");
- return -rte_errno;
- }
-
- for (i = 0; i < info->queue_region_number; i++) {
- if (info->region[i].queue_num ==
- rss->queue_num &&
- info->region[i].queue_start_index ==
- rss->queue[0])
- break;
- }
-
- if (i == info->queue_region_number) {
- if (i > I40E_REGION_MAX_INDEX) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "the queue region max index is 7");
- return -rte_errno;
- }
-
- info->region[i].queue_num =
- rss->queue_num;
- info->region[i].queue_start_index =
- rss->queue[0];
- info->region[i].region_id =
- info->queue_region_number;
-
- j = info->region[i].user_priority_num;
- tmp = conf_info->region[n].user_priority[0];
- if (conf_info->region[n].user_priority_num) {
- info->region[i].user_priority[j] = tmp;
- info->region[i].user_priority_num++;
- }
-
- for (m = 0; m < conf_info->region[n].flowtype_num; m++) {
- j = info->region[i].flowtype_num;
- tmp = conf_info->region[n].hw_flowtype[m];
- info->region[i].hw_flowtype[j] = tmp;
- info->region[i].flowtype_num++;
- }
- info->queue_region_number++;
- } else {
- j = info->region[i].user_priority_num;
- tmp = conf_info->region[n].user_priority[0];
- if (conf_info->region[n].user_priority_num) {
- info->region[i].user_priority[j] = tmp;
- info->region[i].user_priority_num++;
- }
-
- for (m = 0; m < conf_info->region[n].flowtype_num; m++) {
- j = info->region[i].flowtype_num;
- tmp = conf_info->region[n].hw_flowtype[m];
- info->region[i].hw_flowtype[j] = tmp;
- info->region[i].flowtype_num++;
- }
- }
- }
-
- rss_config->queue_region_conf = TRUE;
- }
-
- /**
- * Return function if this flow is used for queue region configuration
- */
- if (rss_config->queue_region_conf)
- return 0;
-
- if (!rss) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "invalid rule");
- return -rte_errno;
- }
-
- for (n = 0; n < rss->queue_num; n++) {
- if (rss->queue[n] >= dev->data->nb_rx_queues) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "queue id > max number of queues");
- return -rte_errno;
- }
- }
-
- if (rss->queue_num && (p_info.types || rss->types))
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "RSS types must be empty while configuring queue region");
-
- /* validate pattern and pctype */
- if (!(rss->types & p_info.types) &&
- (rss->types || p_info.types) && !rss->queue_num)
- return rte_flow_error_set
- (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
- act, "invalid pctype");
-
- nb_types = 0;
- for (n = 0; n < RTE_ETH_FLOW_MAX; n++) {
- if (rss->types & (hf_bit << n))
- nb_types++;
- if (nb_types > 1)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
- act, "multi pctype is not supported");
- }
-
- if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
- (p_info.types || rss->types || rss->queue_num))
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "pattern, type and queues must be empty while"
- " setting hash function as simple_xor");
-
- if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ &&
- !(p_info.types && rss->types))
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "pctype and queues can not be empty while"
- " setting hash function as symmetric toeplitz");
-
- /* Parse RSS related parameters from configuration */
- if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX ||
- rss->func == RTE_ETH_HASH_FUNCTION_TOEPLITZ)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "RSS hash functions are not supported");
- if (rss->level)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "a nonzero RSS encapsulation level is not supported");
- if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "RSS hash key too large");
- if (rss->queue_num > RTE_DIM(rss_config->queue))
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "too many queues for RSS context");
- if (i40e_rss_conf_init(rss_config, rss))
- return rte_flow_error_set
- (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "RSS context initialization failure");
-
- index++;
-
- /* check if the next not void action is END */
- NEXT_ITEM_OF_ACTION(act, actions, index);
- if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act, "Not supported action.");
- return -rte_errno;
- }
- rss_config->queue_region_conf = FALSE;
-
- return 0;
-}
-
-static int
-i40e_parse_rss_filter(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- union i40e_filter_t *filter,
- struct rte_flow_error *error)
-{
- struct i40e_rss_pattern_info p_info;
- struct i40e_queue_regions info;
- int ret;
-
- memset(&info, 0, sizeof(struct i40e_queue_regions));
- memset(&p_info, 0, sizeof(struct i40e_rss_pattern_info));
-
- ret = i40e_flow_parse_rss_pattern(dev, pattern,
- error, &p_info, &info);
- if (ret)
- return ret;
-
- ret = i40e_flow_parse_rss_action(dev, actions, error,
- p_info, &info, filter);
- if (ret)
- return ret;
-
- ret = i40e_flow_parse_attr(attr, error);
- if (ret)
- return ret;
-
- cons_filter_type = RTE_ETH_FILTER_HASH;
-
- return 0;
-}
-
-static int
-i40e_config_rss_filter_set(struct rte_eth_dev *dev,
- struct i40e_rte_flow_rss_conf *conf)
-{
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_rss_filter *rss_filter;
- int ret;
-
- if (conf->queue_region_conf) {
- ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
- } else {
- ret = i40e_config_rss_filter(pf, conf, 1);
- }
-
- if (ret)
- return ret;
-
- rss_filter = rte_zmalloc("i40e_rss_filter",
- sizeof(*rss_filter), 0);
- if (rss_filter == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory.");
- return -ENOMEM;
- }
- rss_filter->rss_filter_info = *conf;
- /* the rule new created is always valid
- * the existing rule covered by new rule will be set invalid
- */
- rss_filter->rss_filter_info.valid = true;
-
- TAILQ_INSERT_TAIL(&pf->rss_config_list, rss_filter, next);
-
- return 0;
-}
-
-static int
-i40e_config_rss_filter_del(struct rte_eth_dev *dev,
- struct i40e_rte_flow_rss_conf *conf)
-{
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_rss_filter *rss_filter;
- void *temp;
-
- if (conf->queue_region_conf)
- i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
- else
- i40e_config_rss_filter(pf, conf, 0);
-
- TAILQ_FOREACH_SAFE(rss_filter, &pf->rss_config_list, next, temp) {
- if (!memcmp(&rss_filter->rss_filter_info, conf,
- sizeof(struct rte_flow_action_rss))) {
- TAILQ_REMOVE(&pf->rss_config_list, rss_filter, next);
- rte_free(rss_filter);
- }
- }
- return 0;
-}
-
static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -5299,9 +4739,13 @@ static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
i++;
if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
- ret = i40e_parse_rss_filter(dev, attr, pattern,
- actions, &cons_filter, error);
- return ret;
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_HASH;
+ return i40e_hash_parse(dev, pattern, actions + i,
+ &cons_filter.rss_conf, error);
}
i = 0;
@@ -5416,12 +4860,11 @@ static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
i40e_tunnel_filter_list);
break;
case RTE_ETH_FILTER_HASH:
- ret = i40e_config_rss_filter_set(dev,
- &cons_filter.rss_conf);
+ ret = i40e_hash_filter_create(pf, &cons_filter.rss_conf);
if (ret)
goto free_flow;
flow->rule = TAILQ_LAST(&pf->rss_config_list,
- i40e_rss_conf_list);
+ i40e_rss_conf_list);
break;
default:
goto free_flow;
@@ -5474,8 +4917,7 @@ static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
}
break;
case RTE_ETH_FILTER_HASH:
- ret = i40e_config_rss_filter_del(dev,
- &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
+ ret = i40e_hash_filter_destroy(pf, flow->rule);
break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
@@ -5623,14 +5065,11 @@ static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- ret = i40e_flow_flush_rss_filter(dev);
- if (ret) {
+ ret = i40e_hash_filter_flush(pf);
+ if (ret)
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to flush RSS flows.");
- return -rte_errno;
- }
-
return ret;
}
@@ -5747,36 +5186,6 @@ static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
return ret;
}
-/* remove the RSS filter */
-static int
-i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
-{
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_flow *flow;
- void *temp;
- int32_t ret = -EINVAL;
-
- ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
-
- /* Delete RSS flows in flow list. */
- TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
- if (flow->filter_type != RTE_ETH_FILTER_HASH)
- continue;
-
- if (flow->rule) {
- ret = i40e_config_rss_filter_del(dev,
- &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
- if (ret)
- return ret;
- }
- TAILQ_REMOVE(&pf->flow_list, flow, node);
- rte_free(flow);
- }
-
- return ret;
-}
-
static int
i40e_flow_query(struct rte_eth_dev *dev __rte_unused,
struct rte_flow *flow,
diff --git a/drivers/net/i40e/i40e_hash.c b/drivers/net/i40e/i40e_hash.c
new file mode 100644
index 0000000..4e23984
--- /dev/null
+++ b/drivers/net/i40e/i40e_hash.c
@@ -0,0 +1,1315 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+#include "base/i40e_prototype.h"
+#include "i40e_logs.h"
+#include "i40e_ethdev.h"
+#include "i40e_hash.h"
+
+#ifndef BIT
+#define BIT(n) (1UL << (n))
+#endif
+
+#ifndef BIT_ULL
+#define BIT_ULL(n) (1ULL << (n))
+#endif
+
+#define I40E_TCI_MASK 0xFFFF
+
+/* Pattern item headers */
+#define I40E_HASH_HDR_ETH 0x01ULL
+#define I40E_HASH_HDR_IPV4 0x10ULL
+#define I40E_HASH_HDR_IPV6 0x20ULL
+#define I40E_HASH_HDR_TCP 0x100ULL
+#define I40E_HASH_HDR_UDP 0x200ULL
+#define I40E_HASH_HDR_SCTP 0x400ULL
+#define I40E_HASH_HDR_ESP 0x10000ULL
+#define I40E_HASH_HDR_L2TPV3 0x20000ULL
+#define I40E_HASH_HDR_AH 0x40000ULL
+#define I40E_HASH_HDR_GTPC 0x100000ULL
+#define I40E_HASH_HDR_GTPU 0x200000ULL
+
+#define I40E_HASH_HDR_INNER_SHIFT 32
+#define I40E_HASH_HDR_IPV4_INNER (I40E_HASH_HDR_IPV4 << \
+ I40E_HASH_HDR_INNER_SHIFT)
+#define I40E_HASH_HDR_IPV6_INNER (I40E_HASH_HDR_IPV6 << \
+ I40E_HASH_HDR_INNER_SHIFT)
+
+/* ETH */
+#define I40E_PHINT_ETH I40E_HASH_HDR_ETH
+
+/* IPv4 */
+#define I40E_PHINT_IPV4 (I40E_HASH_HDR_ETH | I40E_HASH_HDR_IPV4)
+#define I40E_PHINT_IPV4_TCP (I40E_PHINT_IPV4 | I40E_HASH_HDR_TCP)
+#define I40E_PHINT_IPV4_UDP (I40E_PHINT_IPV4 | I40E_HASH_HDR_UDP)
+#define I40E_PHINT_IPV4_SCTP (I40E_PHINT_IPV4 | I40E_HASH_HDR_SCTP)
+
+/* IPv6 */
+#define I40E_PHINT_IPV6 (I40E_HASH_HDR_ETH | I40E_HASH_HDR_IPV6)
+#define I40E_PHINT_IPV6_TCP (I40E_PHINT_IPV6 | I40E_HASH_HDR_TCP)
+#define I40E_PHINT_IPV6_UDP (I40E_PHINT_IPV6 | I40E_HASH_HDR_UDP)
+#define I40E_PHINT_IPV6_SCTP (I40E_PHINT_IPV6 | I40E_HASH_HDR_SCTP)
+
+/* ESP */
+#define I40E_PHINT_IPV4_ESP (I40E_PHINT_IPV4 | I40E_HASH_HDR_ESP)
+#define I40E_PHINT_IPV6_ESP (I40E_PHINT_IPV6 | I40E_HASH_HDR_ESP)
+#define I40E_PHINT_IPV4_UDP_ESP (I40E_PHINT_IPV4_UDP | \
+ I40E_HASH_HDR_ESP)
+#define I40E_PHINT_IPV6_UDP_ESP (I40E_PHINT_IPV6_UDP | \
+ I40E_HASH_HDR_ESP)
+
+/* GTPC */
+#define I40E_PHINT_IPV4_GTPC (I40E_PHINT_IPV4_UDP | \
+ I40E_HASH_HDR_GTPC)
+#define I40E_PHINT_IPV6_GTPC (I40E_PHINT_IPV6_UDP | \
+ I40E_HASH_HDR_GTPC)
+
+/* GTPU */
+#define I40E_PHINT_IPV4_GTPU (I40E_PHINT_IPV4_UDP | \
+ I40E_HASH_HDR_GTPU)
+#define I40E_PHINT_IPV4_GTPU_IPV4 (I40E_PHINT_IPV4_GTPU | \
+ I40E_HASH_HDR_IPV4_INNER)
+#define I40E_PHINT_IPV4_GTPU_IPV6 (I40E_PHINT_IPV4_GTPU | \
+ I40E_HASH_HDR_IPV6_INNER)
+#define I40E_PHINT_IPV6_GTPU (I40E_PHINT_IPV6_UDP | \
+ I40E_HASH_HDR_GTPU)
+#define I40E_PHINT_IPV6_GTPU_IPV4 (I40E_PHINT_IPV6_GTPU | \
+ I40E_HASH_HDR_IPV4_INNER)
+#define I40E_PHINT_IPV6_GTPU_IPV6 (I40E_PHINT_IPV6_GTPU | \
+ I40E_HASH_HDR_IPV6_INNER)
+
+/* L2TPV3 */
+#define I40E_PHINT_IPV4_L2TPV3 (I40E_PHINT_IPV4 | I40E_HASH_HDR_L2TPV3)
+#define I40E_PHINT_IPV6_L2TPV3 (I40E_PHINT_IPV6 | I40E_HASH_HDR_L2TPV3)
+
+/* AH */
+#define I40E_PHINT_IPV4_AH (I40E_PHINT_IPV4 | I40E_HASH_HDR_AH)
+#define I40E_PHINT_IPV6_AH (I40E_PHINT_IPV6 | I40E_HASH_HDR_AH)
+
+/* Structure of mapping RSS type to input set */
+struct i40e_hash_map_rss_inset {
+ uint64_t rss_type;
+ uint64_t inset;
+};
+
+const struct i40e_hash_map_rss_inset i40e_hash_rss_inset[] = {
+ /* IPv4 */
+ { ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+ { ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+
+ { ETH_RSS_NONFRAG_IPV4_OTHER,
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+
+ { ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+
+ { ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+
+ { ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
+
+ /* IPv6 */
+ { ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+ { ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+
+ { ETH_RSS_NONFRAG_IPV6_OTHER,
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+
+ { ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+
+ { ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+
+ { ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
+
+ /* Ether */
+ { ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
+ { ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
+
+ /* VLAN */
+ { ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
+ { ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
+};
+
+#define I40E_HASH_VOID_NEXT_ALLOW BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH)
+
+#define I40E_HASH_ETH_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV4) | \
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV6) | \
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_VLAN))
+
+#define I40E_HASH_IP_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_TCP) | \
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_UDP) | \
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_SCTP) | \
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_ESP) | \
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_L2TPV3OIP) |\
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_AH))
+
+#define I40E_HASH_UDP_NEXT_ALLOW BIT_ULL(RTE_FLOW_ITEM_TYPE_GTPU)
+
+#define I40E_HASH_GTPU_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV4) | \
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV6))
+
+static const uint64_t pattern_next_allow_items[] = {
+ [RTE_FLOW_ITEM_TYPE_VOID] = I40E_HASH_VOID_NEXT_ALLOW,
+ [RTE_FLOW_ITEM_TYPE_ETH] = I40E_HASH_ETH_NEXT_ALLOW,
+ [RTE_FLOW_ITEM_TYPE_IPV4] = I40E_HASH_IP_NEXT_ALLOW,
+ [RTE_FLOW_ITEM_TYPE_IPV6] = I40E_HASH_IP_NEXT_ALLOW,
+ [RTE_FLOW_ITEM_TYPE_UDP] = I40E_HASH_UDP_NEXT_ALLOW,
+ [RTE_FLOW_ITEM_TYPE_GTPU] = I40E_HASH_GTPU_NEXT_ALLOW,
+};
+
+static const uint64_t pattern_item_header[] = {
+ [RTE_FLOW_ITEM_TYPE_ETH] = I40E_HASH_HDR_ETH,
+ [RTE_FLOW_ITEM_TYPE_IPV4] = I40E_HASH_HDR_IPV4,
+ [RTE_FLOW_ITEM_TYPE_IPV6] = I40E_HASH_HDR_IPV6,
+ [RTE_FLOW_ITEM_TYPE_TCP] = I40E_HASH_HDR_TCP,
+ [RTE_FLOW_ITEM_TYPE_UDP] = I40E_HASH_HDR_UDP,
+ [RTE_FLOW_ITEM_TYPE_SCTP] = I40E_HASH_HDR_SCTP,
+ [RTE_FLOW_ITEM_TYPE_ESP] = I40E_HASH_HDR_ESP,
+ [RTE_FLOW_ITEM_TYPE_GTPU] = I40E_HASH_HDR_GTPU,
+ [RTE_FLOW_ITEM_TYPE_L2TPV3OIP] = I40E_HASH_HDR_L2TPV3,
+ [RTE_FLOW_ITEM_TYPE_AH] = I40E_HASH_HDR_AH,
+};
+
+/* Structure of matched pattern */
+struct i40e_hash_match_pattern {
+ uint64_t pattern_type;
+ uint64_t rss_mask; /* Supported RSS type for this pattern */
+ bool custom_pctype_flag;/* true for custom packet type */
+ uint8_t pctype;
+};
+
+#define I40E_HASH_MAP_PATTERN(pattern, rss_mask, pctype) { \
+ pattern, rss_mask, false, pctype }
+
+#define I40E_HASH_MAP_CUS_PATTERN(pattern, rss_mask, cus_pctype) { \
+ pattern, rss_mask, true, cus_pctype }
+
+#define I40E_HASH_VLAN_RSS_MASK (ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+#define I40E_HASH_L2_RSS_MASK (ETH_RSS_ETH | ETH_RSS_L2_SRC_ONLY | \
+ ETH_RSS_L2_SRC_ONLY)
+
+#define I40E_HASH_L23_RSS_MASK (I40E_HASH_L2_RSS_MASK | \
+ I40E_HASH_VLAN_RSS_MASK | \
+ ETH_RSS_L3_SRC_ONLY | \
+ ETH_RSS_L3_SRC_ONLY)
+
+#define I40E_HASH_L234_RSS_MASK (I40E_HASH_L23_RSS_MASK | \
+ ETH_RSS_PORT | ETH_RSS_L3_SRC_ONLY | \
+ ETH_RSS_L3_SRC_ONLY)
+
+#define I40E_HASH_IPV4_L234_RSS_MASK (I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV4)
+#define I40E_HASH_IPV6_L234_RSS_MASK (I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV6)
+
+/* Current supported patterns and RSS types.
+ * All items that have the same pattern types are together.
+ */
+static const struct i40e_hash_match_pattern match_patterns[] = {
+ /* Ether */
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_ETH,
+ ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
+ I40E_FILTER_PCTYPE_L2_PAYLOAD),
+
+ /* IPv4 */
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
+ ETH_RSS_FRAG_IPV4 | I40E_HASH_L23_RSS_MASK,
+ I40E_FILTER_PCTYPE_FRAG_IPV4),
+
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
+ ETH_RSS_NONFRAG_IPV4_OTHER |
+ I40E_HASH_L23_RSS_MASK,
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER),
+
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_TCP,
+ ETH_RSS_NONFRAG_IPV4_TCP |
+ I40E_HASH_IPV4_L234_RSS_MASK,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP),
+
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_UDP,
+ ETH_RSS_NONFRAG_IPV4_UDP |
+ I40E_HASH_IPV4_L234_RSS_MASK,
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP),
+
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_SCTP,
+ ETH_RSS_NONFRAG_IPV4_SCTP |
+ I40E_HASH_IPV4_L234_RSS_MASK,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP),
+
+ /* IPv6 */
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
+ ETH_RSS_FRAG_IPV6 | I40E_HASH_L23_RSS_MASK,
+ I40E_FILTER_PCTYPE_FRAG_IPV6),
+
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
+ ETH_RSS_NONFRAG_IPV6_OTHER |
+ I40E_HASH_L23_RSS_MASK,
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER),
+
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_TCP,
+ ETH_RSS_NONFRAG_IPV6_TCP |
+ I40E_HASH_IPV6_L234_RSS_MASK,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP),
+
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_UDP,
+ ETH_RSS_NONFRAG_IPV6_UDP |
+ I40E_HASH_IPV6_L234_RSS_MASK,
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP),
+
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_SCTP,
+ ETH_RSS_NONFRAG_IPV6_SCTP |
+ I40E_HASH_IPV6_L234_RSS_MASK,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP),
+
+ /* ESP */
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_ESP,
+ ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_ESP,
+ ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_UDP_ESP,
+ ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_UDP_ESP,
+ ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
+
+ /* GTPC */
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPC,
+ I40E_HASH_IPV4_L234_RSS_MASK,
+ I40E_CUSTOMIZED_GTPC),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPC,
+ I40E_HASH_IPV6_L234_RSS_MASK,
+ I40E_CUSTOMIZED_GTPC),
+
+ /* GTPU */
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU,
+ I40E_HASH_IPV4_L234_RSS_MASK,
+ I40E_CUSTOMIZED_GTPU),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV4,
+ ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV6,
+ ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU,
+ I40E_HASH_IPV6_L234_RSS_MASK,
+ I40E_CUSTOMIZED_GTPU),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV4,
+ ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV6,
+ ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+
+ /* L2TPV3 */
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_L2TPV3,
+ ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_L2TPV3,
+ ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
+
+ /* AH */
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, ETH_RSS_AH,
+ I40E_CUSTOMIZED_AH_IPV4),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, ETH_RSS_AH,
+ I40E_CUSTOMIZED_AH_IPV6),
+};
+
+static int
+i40e_hash_get_pattern_type(const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ uint64_t *pattern_types)
+{
+ const char *message = "Pattern not supported";
+ enum rte_flow_item_type prev_item_type = RTE_FLOW_ITEM_TYPE_VOID;
+ enum rte_flow_item_type last_item_type = prev_item_type;
+ uint64_t item_hdr, pattern_hdrs = 0;
+ bool inner_flag = false;
+ int vlan_count = 0;
+
+ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+ if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+
+ if (pattern->mask || pattern->spec || pattern->last) {
+ message = "Header info should not be specified";
+ goto not_sup;
+ }
+
+ /* Check the previous item allows this sub-item. */
+ if (prev_item_type >= RTE_DIM(pattern_next_allow_items) ||
+ !(pattern_next_allow_items[prev_item_type] &
+ BIT_ULL(pattern->type)))
+ goto not_sup;
+
+ /* For VLAN item, it does no matter about to pattern type
+ * recognition. So just count the number of VLAN and do not
+ * change the value of prev_item_type.
+ */
+ last_item_type = pattern->type;
+ if (last_item_type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ if (vlan_count >= 2)
+ goto not_sup;
+ vlan_count++;
+ continue;
+ }
+
+ prev_item_type = last_item_type;
+ assert(last_item_type < RTE_DIM(pattern_item_header));
+ item_hdr = pattern_item_header[last_item_type];
+ assert(item_hdr);
+
+ if (inner_flag) {
+ item_hdr <<= I40E_HASH_HDR_INNER_SHIFT;
+
+ /* Inner layer should not have GTPU item */
+ if (last_item_type == RTE_FLOW_ITEM_TYPE_GTPU)
+ goto not_sup;
+
+ } else if (last_item_type == RTE_FLOW_ITEM_TYPE_GTPU) {
+ inner_flag = true;
+ vlan_count = 0;
+ }
+
+ if (item_hdr & pattern_hdrs)
+ goto not_sup;
+
+ pattern_hdrs |= item_hdr;
+ }
+
+ if (pattern_hdrs && last_item_type != RTE_FLOW_ITEM_TYPE_VLAN) {
+ *pattern_types = pattern_hdrs;
+ return 0;
+ }
+
+not_sup:
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, message);
+}
+
+static int
+i40e_hash_get_x722_ext_pctypes(const struct i40e_hash_match_pattern *match,
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ uint8_t *pctypes = rss_conf->pctypes;
+ uint8_t count = rss_conf->pctype_count;
+
+ switch (match->pctype) {
+ case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+ if (count >= RTE_DIM(rss_conf->pctypes))
+ goto out_of_range;
+
+ pctypes[count++] = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK;
+ break;
+
+ case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+ if (count + 2 > (int)RTE_DIM(rss_conf->pctypes))
+ goto out_of_range;
+
+ pctypes[count++] = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
+ pctypes[count++] = I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP;
+ break;
+
+ case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+ if (count >= RTE_DIM(rss_conf->pctypes))
+ goto out_of_range;
+
+ pctypes[count++] = I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK;
+ break;
+
+ case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+ if (count + 2 > (int)RTE_DIM(rss_conf->pctypes))
+ goto out_of_range;
+
+ pctypes[count++] = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP;
+ pctypes[count++] = I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP;
+ break;
+ }
+
+ rss_conf->pctype_count = count;
+ return 0;
+
+out_of_range:
+ return rte_flow_error_set(error, ERANGE,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "RSS PCTYPE number out of range");
+}
+
+static int
+i40e_hash_get_pctypes(const struct rte_eth_dev *dev,
+ const struct i40e_hash_match_pattern *match,
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ if (rss_conf->pctype_count >= RTE_DIM(rss_conf->pctypes))
+ return rte_flow_error_set(error, ERANGE,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "RSS PCTYPE number out of range");
+
+ if (match->custom_pctype_flag) {
+ struct i40e_pf *pf;
+ struct i40e_customized_pctype *custom_type;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ custom_type = i40e_find_customized_pctype(pf, match->pctype);
+ if (!custom_type || !custom_type->valid)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "PCTYPE not supported");
+
+ rss_conf->pctypes[rss_conf->pctype_count] = custom_type->pctype;
+ rss_conf->pctype_count++;
+ } else {
+ struct i40e_hw *hw;
+
+ rss_conf->pctypes[rss_conf->pctype_count] = match->pctype;
+ rss_conf->pctype_count++;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (hw->mac.type == I40E_MAC_X722)
+ return i40e_hash_get_x722_ext_pctypes(match,
+ rss_conf, error);
+ }
+
+ return 0;
+}
+
+static int
+i40e_hash_get_pattern_pctypes(const struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action_rss *rss_act,
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ uint64_t pattern_types = 0;
+ bool match_flag = false;
+ int i, ret;
+
+ ret = i40e_hash_get_pattern_type(pattern, error, &pattern_types);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < (int)RTE_DIM(match_patterns); i++) {
+ const struct i40e_hash_match_pattern *match =
+ &match_patterns[i];
+
+ /* Check pattern types match. All items that have the same
+ * pattern types are together, so if the pattern types match
+ * previous item but they doesn't match current item, it means
+ * the pattern types do not match all remain items.
+ */
+ if (pattern_types != match->pattern_type) {
+ if (match_flag)
+ break;
+ continue;
+ }
+ match_flag = true;
+
+ /* Check RSS types match */
+ if ((rss_act->types & ~match->rss_mask) == 0) {
+ ret = i40e_hash_get_pctypes(dev, match,
+ rss_conf, error);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (rss_conf->pctype_count)
+ return 0;
+
+ if (match_flag)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "RSS types not supported");
+
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Pattern not supported");
+}
+
+static int
+i40e_hash_config_pctype_inset(struct i40e_hw *hw, uint64_t rss_types,
+ uint64_t inset, uint32_t pctype)
+{
+ uint64_t mask;
+
+ /* If SRC_ONLY and DST_ONLY of the same level are used simultaneously,
+ * it is the same case as none of them are added.
+ */
+ mask = rss_types & (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY);
+ if (mask == ETH_RSS_L2_SRC_ONLY)
+ inset &= ~I40E_INSET_DMAC;
+ else if (mask == ETH_RSS_L2_DST_ONLY)
+ inset &= ~I40E_INSET_SMAC;
+
+ mask = rss_types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+ if (mask == ETH_RSS_L4_SRC_ONLY)
+ inset &= ~I40E_INSET_DST_PORT;
+ else if (mask == ETH_RSS_L4_DST_ONLY)
+ inset &= ~I40E_INSET_SRC_PORT;
+
+ mask = rss_types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
+ if (mask == ETH_RSS_L3_SRC_ONLY)
+ inset &= ~(I40E_INSET_IPV4_DST | I40E_INSET_IPV6_DST);
+ else if (mask == ETH_RSS_L3_DST_ONLY)
+ inset &= ~(I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC);
+
+ return i40e_set_hash_inset(hw, inset, pctype, false);
+}
+
+static int
+i40e_hash_config_func(struct i40e_hw *hw, enum rte_eth_hash_function func)
+{
+ struct i40e_pf *pf;
+ uint32_t reg;
+ uint8_t symmetric = 0;
+
+ reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
+
+ if (func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
+ if (!(reg & I40E_GLQF_CTL_HTOEP_MASK))
+ goto set_symmetric;
+
+ reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
+ } else {
+ if (func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
+ symmetric = 1;
+
+ if (reg & I40E_GLQF_CTL_HTOEP_MASK)
+ goto set_symmetric;
+
+ reg |= I40E_GLQF_CTL_HTOEP_MASK;
+ }
+
+ pf = &((struct i40e_adapter *)hw->back)->pf;
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR,
+ "Modify hash function is not permitted when multi-driver enabled");
+ return -EPERM;
+ }
+
+ PMD_DRV_LOG(INFO, "NIC hash function is setting to %d", func);
+ i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
+ I40E_WRITE_FLUSH(hw);
+
+set_symmetric:
+ i40e_set_symmetric_hash_enable_per_port(hw, symmetric);
+ return 0;
+}
+
+static int
+i40e_hash_config_pctype_symmetric(struct i40e_hw *hw, uint32_t pctype,
+ enum rte_eth_hash_function func)
+{
+ struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
+ uint32_t reg;
+
+ /* For X722, get translated pctype in fd pctype register */
+ if (hw->mac.type == I40E_MAC_X722)
+ pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
+
+ reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype));
+ if (func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
+ if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
+ return 0;
+ reg |= I40E_GLQF_HSYM_SYMH_ENA_MASK;
+ } else {
+ if (!(reg & I40E_GLQF_HSYM_SYMH_ENA_MASK))
+ return 0;
+ reg &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK;
+ }
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR,
+ "Enable/Disable symmetric hash is not permitted when multi-driver enabled");
+ return -EPERM;
+ }
+
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
+ I40E_WRITE_FLUSH(hw);
+ return 0;
+}
+
+static void
+i40e_hash_enable_pctype(struct i40e_hw *hw,
+ uint32_t pctype, bool enable)
+{
+ uint32_t reg, reg_val, mask;
+
+ /* For X722, get translated pctype in fd pctype register */
+ if (hw->mac.type == I40E_MAC_X722)
+ pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
+
+ if (pctype < 32) {
+ mask = 1U << pctype;
+ reg = I40E_PFQF_HENA(0);
+ } else {
+ mask = 1U << (pctype - 32);
+ reg = I40E_PFQF_HENA(1);
+ }
+
+ reg_val = i40e_read_rx_ctl(hw, reg);
+
+ if (enable) {
+ if (reg_val & mask)
+ return;
+
+ reg_val |= mask;
+ } else {
+ if (!(reg_val & mask))
+ return;
+
+ reg_val &= ~mask;
+ }
+
+ i40e_write_rx_ctl(hw, reg, reg_val);
+ I40E_WRITE_FLUSH(hw);
+}
+
+static int
+i40e_hash_config_pctype(struct i40e_hw *hw,
+ const struct rte_flow_action_rss *rss_act,
+ uint32_t pctype)
+{
+ uint64_t inset = 0;
+ int i, ret;
+
+ if (rss_act->types == 0) {
+ i40e_hash_enable_pctype(hw, pctype, false);
+ return 0;
+ }
+
+ /* Get input sets */
+ for (i = 0; i < (int)RTE_DIM(i40e_hash_rss_inset); i++) {
+ if (rss_act->types & i40e_hash_rss_inset[i].rss_type)
+ inset |= i40e_hash_rss_inset[i].inset;
+ }
+
+ if (inset) {
+ ret = i40e_hash_config_pctype_inset(hw, rss_act->types,
+ inset, pctype);
+ if (ret)
+ return ret;
+ }
+
+ if (rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
+ ret = i40e_hash_config_pctype_symmetric(hw, pctype,
+ rss_act->func);
+ if (ret)
+ return ret;
+ }
+
+ i40e_hash_enable_pctype(hw, pctype, true);
+ return 0;
+}
+
+static int
+i40e_hash_reset_pctype(struct i40e_hw *hw,
+ const struct rte_flow_action_rss *rss_act,
+ uint32_t pctype)
+{
+ const enum rte_eth_hash_function def_func =
+ RTE_ETH_HASH_FUNCTION_TOEPLITZ;
+ uint64_t inset = 0;
+ int ret;
+
+ i40e_hash_enable_pctype(hw, pctype, false);
+
+ inset = i40e_get_default_input_set(pctype);
+ if (inset) {
+ ret = i40e_set_hash_inset(hw, inset, pctype, false);
+ if (ret)
+ return ret;
+ }
+
+ if (rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
+ rss_act->func != def_func) {
+ ret = i40e_hash_config_pctype_symmetric(hw, pctype, def_func);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i40e_hash_config_region(struct i40e_pf *pf,
+ const struct i40e_rte_flow_rss_conf *rss_conf)
+{
+ struct i40e_hw *hw = &pf->adapter->hw;
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct i40e_queue_region_info *regions = pf->queue_region.region;
+ uint32_t num = pf->queue_region.queue_region_number;
+ uint32_t i, region_id_mask = 0;
+
+ if (!rss_conf->region_queue_num)
+ return 0;
+
+ RTE_BUILD_BUG_ON(I40E_REGION_MAX_INDEX > 31);
+
+ /* Re-configure the region if it existed */
+ for (i = 0; i < num; i++) {
+ if (rss_conf->region_queue_start ==
+ regions[i].queue_start_index &&
+ rss_conf->region_queue_num == regions[i].queue_num) {
+ regions[i].user_priority[0] = rss_conf->region_priority;
+
+ return i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+ }
+
+ region_id_mask |= BIT(regions[i].region_id);
+ }
+
+ if (num > I40E_REGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "Queue region resource used up");
+ return -ERANGE;
+ }
+
+ /* Add a new region */
+
+ pf->queue_region.queue_region_number++;
+ memset(®ions[num], 0, sizeof(regions[0]));
+
+ for (i = 0; i <= I40E_REGION_MAX_INDEX; i++)
+ if (!(BIT(i) & region_id_mask))
+ break;
+
+ regions[num].region_id = i;
+ regions[num].queue_num = rss_conf->region_queue_num;
+ regions[num].queue_start_index = rss_conf->region_queue_start;
+ regions[num].user_priority[0] = rss_conf->region_priority;
+ regions[num].user_priority_num = 1;
+
+ return i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+}
+
+static int
+i40e_hash_config(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *rss_conf)
+{
+ struct rte_flow_action_rss *rss_info = &rss_conf->conf;
+ struct i40e_hw *hw = &pf->adapter->hw;
+ uint32_t i;
+ int ret;
+
+ if (rss_conf->region_flags)
+ return i40e_hash_config_region(pf, rss_conf);
+
+ if (rss_info->key_len > 0) {
+ ret = i40e_set_rss_key(pf->main_vsi, rss_conf->key,
+ rss_info->key_len);
+ if (ret)
+ return ret;
+ }
+
+ if (rss_info->func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
+ ret = i40e_hash_config_func(hw, rss_conf->conf.func);
+ if (ret)
+ return ret;
+ }
+
+ /* Update lookup table */
+ if (rss_info->queue_num > 0) {
+ uint8_t lut[ETH_RSS_RETA_SIZE_512];
+ uint32_t j = 0;
+
+ for (i = 0; i < hw->func_caps.rss_table_size; i++) {
+ lut[i] = (uint8_t)rss_info->queue[j];
+ j = (j == rss_info->queue_num - 1) ? 0 : (j + 1);
+ }
+
+ ret = i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i);
+ if (ret)
+ return ret;
+
+ pf->hash_enabled_queues = 0;
+ for (i = 0; i < rss_info->queue_num; i++)
+ pf->hash_enabled_queues |= BIT_ULL(lut[i]);
+
+ pf->adapter->rss_reta_updated = 0;
+ }
+
+ if (rss_conf->pctype_count <= 0)
+ return 0;
+
+ if (!pf->hash_filter_enable) {
+ i40e_pf_disable_rss(pf);
+ pf->hash_filter_enable = true;
+ }
+
+ for (i = 0; i < rss_conf->pctype_count; i++) {
+ ret = i40e_hash_config_pctype(hw, &rss_conf->conf,
+ rss_conf->pctypes[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+i40e_hash_parse_key(const struct rte_flow_action_rss *rss_act,
+ struct i40e_rte_flow_rss_conf *rss_conf)
+{
+ const uint8_t *key = rss_act->key;
+
+ if (rss_act->key_len != sizeof(rss_conf->key)) {
+ const uint32_t rss_key_default[] = {0x6b793944,
+ 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+ 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+ 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
+ PMD_DRV_LOG(WARNING, "RSS key invalid, set to default");
+ key = (const uint8_t *)rss_key_default;
+ }
+
+ memcpy(rss_conf->key, key, sizeof(rss_conf->key));
+ rss_conf->conf.key = rss_conf->key;
+ rss_conf->conf.key_len = sizeof(rss_conf->key);
+}
+
+static int
+i40e_hash_parse_queues(const struct rte_eth_dev *dev,
+ const struct rte_flow_action_rss *rss_act,
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ const char *message;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ uint16_t i;
+ int max_queue;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!rss_act->queue_num ||
+ rss_act->queue_num > hw->func_caps.rss_table_size) {
+ message = "Invalid RSS queue number";
+ goto invalid;
+ }
+
+ if (rss_act->key_len)
+ PMD_DRV_LOG(WARNING,
+ "RSS key is ignored when queues specified");
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ max_queue = i40e_pf_calc_configured_queues_num(pf);
+ else
+ max_queue = pf->dev_data->nb_rx_queues;
+
+ max_queue = RTE_MIN(max_queue, I40E_MAX_Q_PER_TC);
+
+ for (i = 0; i < rss_act->queue_num; i++) {
+ if ((int)rss_act->queue[i] >= max_queue) {
+ message = "Invalid RSS queues";
+ goto invalid;
+ }
+ }
+
+ memcpy(rss_conf->queue, rss_act->queue,
+ rss_act->queue_num * sizeof(rss_conf->queue[0]));
+ rss_conf->conf.queue = rss_conf->queue;
+ rss_conf->conf.queue_num = rss_act->queue_num;
+
+ return 0;
+
+invalid:
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, message);
+}
+
+static int
+i40e_hash_parse_queue_region(const struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action_rss *rss_act,
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf;
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ uint64_t hash_queues;
+ uint32_t i;
+
+ if (pattern[1].type != RTE_FLOW_ITEM_TYPE_END)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ &pattern[1],
+ "Pattern not supported.");
+
+ vlan_spec = pattern->spec;
+ vlan_mask = pattern->mask;
+ if (!vlan_spec || !vlan_mask ||
+ vlan_mask->tci != rte_cpu_to_be_16(I40E_TCI_MASK))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Pattern error.");
+
+ if (!rss_act->queue)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "Queues not specified");
+
+ if (rss_act->types)
+ PMD_DRV_LOG(WARNING,
+ "RSS type is ignored when configure queue region");
+
+ if (rss_act->key_len)
+ PMD_DRV_LOG(WARNING,
+ "RSS key is ignored when configure queue region");
+
+ RTE_BUILD_BUG_ON(I40E_MAX_Q_PER_TC > 64);
+
+ if (!rss_act->queue_num ||
+ rss_act->queue_num > I40E_MAX_Q_PER_TC ||
+ !rte_is_power_of_2(rss_act->queue_num))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "Queue number error");
+
+ hash_queues = BIT_ULL(rss_act->queue[0]);
+
+ for (i = 1; i < rss_act->queue_num; i++) {
+ if (rss_act->queue[i - 1] + 1 != rss_act->queue[i]) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "Queues must be incremented continuously");
+ return -EINVAL;
+ }
+
+ hash_queues |= BIT_ULL(rss_act->queue[i]);
+ }
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ if (hash_queues & ~pf->hash_enabled_queues)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "Invalid queues");
+
+ rss_conf->region_queue_num = (uint8_t)rss_act->queue_num;
+ rss_conf->region_queue_start = rss_act->queue[0];
+ rss_conf->region_priority = rte_be_to_cpu_16(vlan_spec->tci) >> 13;
+ rss_conf->region_flags = 1;
+ return 0;
+}
+
+static bool
+i40e_hash_validate_rss_types(uint64_t rss_types)
+{
+ uint64_t type, mask;
+
+ /* Validate L2 */
+ type = ETH_RSS_ETH & rss_types;
+ mask = (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY) & rss_types;
+ if (!type && mask)
+ return false;
+
+ /* Validate L4 */
+ type = (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_NONFRAG_IPV6_SCTP) &
+ rss_types;
+ mask = (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY) & rss_types;
+ if (!type && mask)
+ return false;
+
+ /* Validate L3 */
+ type = type | ((ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_IPV6 |
+ ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types);
+ mask = (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY) & rss_types;
+ if (!type && mask)
+ return false;
+
+ return true;
+}
+
+static int
+i40e_hash_parse_pattern_act(const struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action_rss *rss_act,
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ if (rss_act->queue) {
+ const char *message =
+ "RSS Queues are not supported when pattern specified";
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, message);
+ }
+
+ if (!i40e_hash_validate_rss_types(rss_act->types))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "RSS types are invalid");
+
+ rss_conf->conf.types = rss_act->types;
+ return i40e_hash_get_pattern_pctypes(dev, pattern, rss_act,
+ rss_conf, error);
+}
+
+int
+i40e_hash_parse(const struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_rss *rss_act;
+
+ if (actions[1].type != RTE_FLOW_ACTION_TYPE_END)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ &actions[1],
+ "Only support one action for RSS.");
+
+ rss_act = (const struct rte_flow_action_rss *)actions[0].conf;
+ if (rss_act->level)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ actions,
+ "RSS level is not supported");
+
+ while (pattern->type == RTE_FLOW_ITEM_TYPE_VOID)
+ pattern++;
+
+ if (pattern[0].type == RTE_FLOW_ITEM_TYPE_VLAN)
+ return i40e_hash_parse_queue_region(dev, pattern, rss_act,
+ rss_conf, error);
+
+ if (pattern[0].type == RTE_FLOW_ITEM_TYPE_END) {
+ if (rss_act->types)
+ PMD_DRV_LOG(INFO,
+ "RSS types are not supported when no pattern specified");
+
+ if (rss_act->queue)
+ return i40e_hash_parse_queues(dev, rss_act,
+ rss_conf, error);
+
+ if (rss_act->key) {
+ i40e_hash_parse_key(rss_act, rss_conf);
+ return 0;
+ }
+
+ PMD_DRV_LOG(WARNING, "Nothing change");
+ return 0;
+ }
+
+ if (rss_act->key)
+ i40e_hash_parse_key(rss_act, rss_conf);
+
+ rss_conf->conf.func = rss_act->func;
+ return i40e_hash_parse_pattern_act(dev, pattern, rss_act,
+ rss_conf, error);
+}
+
+int
+i40e_hash_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_rss_filter *filter;
+ int ret;
+
+ TAILQ_FOREACH(filter, &pf->rss_config_list, next) {
+ ret = i40e_hash_config(pf, &filter->rss_filter_info);
+ if (ret) {
+ pf->hash_filter_enable = 0;
+ i40e_pf_disable_rss(pf);
+ PMD_DRV_LOG(ERR,
+ "Re-configure RSS failed, RSS has been disabled");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void
+i40e_hash_invalidate_conf(const struct i40e_rte_flow_rss_conf *ref_conf,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ int i, j;
+
+ if (ref_conf->region_flags) {
+ if (conf->region_flags &&
+ ref_conf->region_queue_start == conf->region_queue_start &&
+ ref_conf->region_queue_num == conf->region_queue_num)
+ conf->region_queue_num = 0;
+
+ return;
+ }
+
+ if (conf->region_flags)
+ return;
+
+ /* Set same PCTYPE input sets invalid */
+ for (i = 0; i < ref_conf->pctype_count; i++) {
+ for (j = 0; j < conf->pctype_count; j++) {
+ if (ref_conf->pctypes[i] == conf->pctypes[j]) {
+ conf->pctype_count--;
+ if (conf->pctype_count - j > 0)
+ memcpy(&conf->pctypes[j],
+ &conf->pctypes[j + 1],
+ (conf->pctype_count - j) *
+ sizeof(conf->pctypes[0]));
+ break;
+ }
+ }
+ }
+
+ if (ref_conf->conf.key_len && conf->conf.key_len) {
+ conf->conf.key_len = 0;
+ conf->conf.key = NULL;
+ }
+
+ if (ref_conf->conf.queue_num && conf->conf.queue_num) {
+ conf->conf.queue_num = 0;
+ conf->conf.queue = NULL;
+ }
+
+ if (ref_conf->conf.func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ conf->conf.func = RTE_ETH_HASH_FUNCTION_DEFAULT;
+}
+
+int
+i40e_hash_filter_create(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *rss_conf)
+{
+ struct i40e_rss_filter *filter, *old;
+ struct i40e_rte_flow_rss_conf *new_conf;
+ int ret;
+
+ filter = rte_zmalloc("i40e_rss_filter", sizeof(*filter), 0);
+ if (!filter) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory.");
+ return -ENOMEM;
+ }
+
+ new_conf = &filter->rss_filter_info;
+
+ memcpy(new_conf, rss_conf, sizeof(*new_conf));
+ if (rss_conf->conf.queue_num)
+ new_conf->conf.queue = new_conf->queue;
+ if (rss_conf->conf.key_len)
+ new_conf->conf.key = new_conf->key;
+
+ ret = i40e_hash_config(pf, rss_conf);
+ if (ret) {
+ rte_free(filter);
+ if (i40e_pf_config_rss(pf))
+ return ret;
+
+ (void)i40e_hash_filter_restore(pf);
+ return ret;
+ }
+
+ TAILQ_FOREACH(old, &pf->rss_config_list, next)
+ i40e_hash_invalidate_conf(new_conf, &old->rss_filter_info);
+
+ TAILQ_INSERT_TAIL(&pf->rss_config_list, filter, next);
+ return 0;
+}
+
+static int
+i40e_hash_reset_conf(struct i40e_pf *pf,
+ const struct i40e_rte_flow_rss_conf *rss_conf)
+{
+ const struct rte_flow_action_rss *rss_act = &rss_conf->conf;
+ struct i40e_hw *hw = &pf->adapter->hw;
+ const enum rte_eth_hash_function def_func =
+ RTE_ETH_HASH_FUNCTION_TOEPLITZ;
+ uint32_t i;
+ int ret;
+
+ if (rss_conf->region_queue_num)
+ return i40e_flush_queue_region_all_conf(pf->adapter->eth_dev,
+ hw, pf, 0);
+
+ for (i = 0; i < rss_conf->pctype_count; i++) {
+ ret = i40e_hash_reset_pctype(hw, rss_act, rss_conf->pctypes[i]);
+ if (ret)
+ return ret;
+ }
+
+ if (rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
+ rss_act->func != def_func) {
+ ret = i40e_hash_config_func(hw, def_func);
+ if (ret)
+ return ret;
+ }
+
+ if (rss_act->key) {
+ ret = i40e_pf_reset_rss_key(pf);
+ if (ret)
+ return ret;
+ }
+
+ if (rss_act->queue && !pf->adapter->rss_reta_updated) {
+ pf->hash_enabled_queues = 0;
+ return i40e_pf_reset_rss_reta(pf);
+ }
+
+ return 0;
+}
+
+int
+i40e_hash_filter_destroy(struct i40e_pf *pf,
+ const struct i40e_rss_filter *rss_filter)
+{
+ struct i40e_rss_filter *filter;
+ int ret;
+
+ TAILQ_FOREACH(filter, &pf->rss_config_list, next) {
+ if (rss_filter == filter) {
+ ret = i40e_hash_reset_conf(pf,
+ &filter->rss_filter_info);
+ if (ret)
+ return ret;
+
+ TAILQ_REMOVE(&pf->rss_config_list, filter, next);
+ rte_free(filter);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int
+i40e_hash_filter_flush(struct i40e_pf *pf)
+{
+ struct rte_flow *flow, *next;
+
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, next) {
+ if (flow->filter_type != RTE_ETH_FILTER_HASH)
+ continue;
+
+ if (flow->rule) {
+ struct i40e_rss_filter *filter = flow->rule;
+ int ret;
+
+ ret = i40e_hash_reset_conf(pf,
+ &filter->rss_filter_info);
+ if (ret)
+ return ret;
+
+ TAILQ_REMOVE(&pf->rss_config_list, filter, next);
+ rte_free(filter);
+ }
+
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+
+ assert(!pf->rss_config_list.tqh_first);
+ return 0;
+}
diff --git a/drivers/net/i40e/i40e_hash.h b/drivers/net/i40e/i40e_hash.h
new file mode 100644
index 0000000..ff8c91c
--- /dev/null
+++ b/drivers/net/i40e/i40e_hash.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _I40E_HASH_H_
+#define _I40E_HASH_H_
+
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include "i40e_ethdev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int i40e_hash_parse(const struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error);
+
+int i40e_hash_filter_create(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *rss_conf);
+
+int i40e_hash_filter_restore(struct i40e_pf *pf);
+int i40e_hash_filter_destroy(struct i40e_pf *pf,
+ const struct i40e_rss_filter *rss_filter);
+int i40e_hash_filter_flush(struct i40e_pf *pf);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* I40E_HASH_H_ */
diff --git a/drivers/net/i40e/meson.build b/drivers/net/i40e/meson.build
index 68f9895..4dcb3a3 100644
--- a/drivers/net/i40e/meson.build
+++ b/drivers/net/i40e/meson.build
@@ -17,6 +17,7 @@ sources = files(
'i40e_fdir.c',
'i40e_flow.c',
'i40e_tm.c',
+ 'i40e_hash.c',
'i40e_vf_representor.c',
'rte_pmd_i40e.c'
)
--
1.8.3.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dpdk-dev] [RFC] net/i40e: refactor of hash flow
2020-10-23 6:56 [dpdk-dev] [RFC] net/i40e: refactor of hash flow Zhang,Alvin
@ 2020-10-23 8:42 ` Wang, ShougangX
2020-10-26 2:05 ` Zhang, AlvinX
0 siblings, 1 reply; 4+ messages in thread
From: Wang, ShougangX @ 2020-10-23 8:42 UTC (permalink / raw)
To: Zhang, AlvinX, dev
Hi, Alvin
> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Zhang,Alvin
> Sent: Friday, October 23, 2020 2:56 PM
> To: dev@dpdk.org
> Cc: Zhang, AlvinX <alvinx.zhang@intel.com>
> Subject: [dpdk-dev] [RFC] net/i40e: refactor of hash flow
>
> From: Alvin Zhang <alvinx.zhang@intel.com>
>
> 1. Delete original code.
> 2. Add 2 tables(pattern RSS type matched PCTYPE, RSS type to input set).
> 3. Parse RSS pattern and RSS type to get PCTYPE.
> 4. Parse RSS action to get queues, RSS function and hash field.
> 5. Create and destroy RSS filters.
> 6. Create new files for hash flows.
> 7. Update doc.
>
> Signed-off-by: Alvin Zhang <alvinx.zhang@intel.com>
> ---
> doc/guides/nics/i40e.rst | 4 +-
> drivers/net/i40e/i40e_ethdev.c | 840 ++++++-------------------
> drivers/net/i40e/i40e_ethdev.h | 43 +-
> drivers/net/i40e/i40e_flow.c | 617 +------------------
> drivers/net/i40e/i40e_hash.c | 1315
> ++++++++++++++++++++++++++++++++++++++++
> drivers/net/i40e/i40e_hash.h | 34 ++
> drivers/net/i40e/meson.build | 1 +
> 7 files changed, 1587 insertions(+), 1267 deletions(-)
> create mode 100644 drivers/net/i40e/i40e_hash.c
> create mode 100644 drivers/net/i40e/i40e_hash.h
>
<snip>
> diff --git a/drivers/net/i40e/i40e_hash.c b/drivers/net/i40e/i40e_hash.c
<snip>
> +#define I40E_HASH_VLAN_RSS_MASK (ETH_RSS_S_VLAN |
> ETH_RSS_C_VLAN)
> +#define I40E_HASH_L2_RSS_MASK (ETH_RSS_ETH |
> ETH_RSS_L2_SRC_ONLY | \
> + ETH_RSS_L2_SRC_ONLY)
This should be ETH_RSS_L2_DST_ONLY, right?
> +
> +#define I40E_HASH_L23_RSS_MASK (I40E_HASH_L2_RSS_MASK |
> \
> + I40E_HASH_VLAN_RSS_MASK | \
> + ETH_RSS_L3_SRC_ONLY | \
> + ETH_RSS_L3_SRC_ONLY)
ETH_RSS_L3_DST_ONLY ?
> +
> +#define I40E_HASH_L234_RSS_MASK (I40E_HASH_L23_RSS_MASK
> | \
> + ETH_RSS_PORT |
> ETH_RSS_L3_SRC_ONLY | \
> + ETH_RSS_L3_SRC_ONLY)
ETH_RSS_L3_DST_ONLY ?
Thanks
Shougang
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dpdk-dev] [RFC] net/i40e: refactor of hash flow
2020-10-23 8:42 ` Wang, ShougangX
@ 2020-10-26 2:05 ` Zhang, AlvinX
0 siblings, 0 replies; 4+ messages in thread
From: Zhang, AlvinX @ 2020-10-26 2:05 UTC (permalink / raw)
To: Wang, ShougangX, dev
Thanks Shougang,
Yes, I will update them.
BR,
Alvin
> -----Original Message-----
> From: Wang, ShougangX <shougangx.wang@intel.com>
> Sent: Friday, October 23, 2020 4:43 PM
> To: Zhang, AlvinX <alvinx.zhang@intel.com>; dev@dpdk.org
> Subject: RE: [dpdk-dev] [RFC] net/i40e: refactor of hash flow
>
> Hi, Alvin
>
> > -----Original Message-----
> > From: dev <dev-bounces@dpdk.org> On Behalf Of Zhang,Alvin
> > Sent: Friday, October 23, 2020 2:56 PM
> > To: dev@dpdk.org
> > Cc: Zhang, AlvinX <alvinx.zhang@intel.com>
> > Subject: [dpdk-dev] [RFC] net/i40e: refactor of hash flow
> >
> > From: Alvin Zhang <alvinx.zhang@intel.com>
> >
> > 1. Delete original code.
> > 2. Add 2 tables(pattern RSS type matched PCTYPE, RSS type to input set).
> > 3. Parse RSS pattern and RSS type to get PCTYPE.
> > 4. Parse RSS action to get queues, RSS function and hash field.
> > 5. Create and destroy RSS filters.
> > 6. Create new files for hash flows.
> > 7. Update doc.
> >
> > Signed-off-by: Alvin Zhang <alvinx.zhang@intel.com>
> > ---
> > doc/guides/nics/i40e.rst | 4 +-
> > drivers/net/i40e/i40e_ethdev.c | 840 ++++++-------------------
> > drivers/net/i40e/i40e_ethdev.h | 43 +-
> > drivers/net/i40e/i40e_flow.c | 617 +------------------
> > drivers/net/i40e/i40e_hash.c | 1315
> > ++++++++++++++++++++++++++++++++++++++++
> > drivers/net/i40e/i40e_hash.h | 34 ++
> > drivers/net/i40e/meson.build | 1 +
> > 7 files changed, 1587 insertions(+), 1267 deletions(-) create mode
> > 100644 drivers/net/i40e/i40e_hash.c create mode 100644
> > drivers/net/i40e/i40e_hash.h
> >
> <snip>
> > diff --git a/drivers/net/i40e/i40e_hash.c
> > b/drivers/net/i40e/i40e_hash.c
> <snip>
> > +#define I40E_HASH_VLAN_RSS_MASK (ETH_RSS_S_VLAN |
> > ETH_RSS_C_VLAN)
> > +#define I40E_HASH_L2_RSS_MASK (ETH_RSS_ETH |
> > ETH_RSS_L2_SRC_ONLY | \
> > + ETH_RSS_L2_SRC_ONLY)
>
> This should be ETH_RSS_L2_DST_ONLY, right?
> > +
> > +#define I40E_HASH_L23_RSS_MASK (I40E_HASH_L2_RSS_MASK |
> > \
> > + I40E_HASH_VLAN_RSS_MASK | \
> > + ETH_RSS_L3_SRC_ONLY | \
> > + ETH_RSS_L3_SRC_ONLY)
>
> ETH_RSS_L3_DST_ONLY ?
>
> > +
> > +#define I40E_HASH_L234_RSS_MASK (I40E_HASH_L23_RSS_MASK
> > | \
> > + ETH_RSS_PORT |
> > ETH_RSS_L3_SRC_ONLY | \
> > + ETH_RSS_L3_SRC_ONLY)
>
> ETH_RSS_L3_DST_ONLY ?
>
> Thanks
> Shougang
^ permalink raw reply [flat|nested] 4+ messages in thread
* [dpdk-dev] [RFC] net/i40e: refactor of hash flow
@ 2020-11-12 2:59 Zhang,Alvin
0 siblings, 0 replies; 4+ messages in thread
From: Zhang,Alvin @ 2020-11-12 2:59 UTC (permalink / raw)
To: dev; +Cc: Alvin Zhang
From: Alvin Zhang <alvinx.zhang@intel.com>
1. Delete original code.
2. Add 2 tables(pattern RSS type matched PCTYPE, RSS type to input set).
3. Parse RSS pattern and RSS type to get PCTYPE.
4. Parse RSS action to get queues, RSS function and hash field.
5. Create and destroy RSS filters.
6. Create new files for hash flows.
7. Update doc.
Signed-off-by: Alvin Zhang <alvinx.zhang@intel.com>
---
doc/guides/nics/i40e.rst | 4 +-
drivers/net/i40e/i40e_ethdev.c | 840 ++++++------------------
drivers/net/i40e/i40e_ethdev.h | 53 +-
drivers/net/i40e/i40e_flow.c | 617 +-----------------
drivers/net/i40e/i40e_hash.c | 1385 ++++++++++++++++++++++++++++++++++++++++
drivers/net/i40e/i40e_hash.h | 34 +
drivers/net/i40e/meson.build | 1 +
7 files changed, 1667 insertions(+), 1267 deletions(-)
create mode 100644 drivers/net/i40e/i40e_hash.c
create mode 100644 drivers/net/i40e/i40e_hash.h
diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index 828a259..dfc9e15 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -558,9 +558,9 @@ Generic flow API
- ``RSS Flow``
RSS Flow supports to set hash input set, hash function, enable hash
- and configure queue region.
+ and configure queues.
For example:
- Configure queue region as queue 0, 1, 2, 3.
+ Configure queues as queue 0, 1, 2, 3.
.. code-block:: console
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index f54769c..b06ce08 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -39,6 +39,7 @@
#include "i40e_pf.h"
#include "i40e_regs.h"
#include "rte_pmd_i40e.h"
+#include "i40e_hash.h"
#define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb"
#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
@@ -396,7 +397,6 @@ static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
static void i40e_filter_restore(struct i40e_pf *pf);
static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
-static int i40e_pf_config_rss(struct i40e_pf *pf);
static const char *const valid_keys[] = {
ETH_I40E_FLOATING_VEB_ARG,
@@ -1764,10 +1764,6 @@ static inline void i40e_config_automask(struct i40e_pf *pf)
/* initialize queue region configuration */
i40e_init_queue_region_conf(dev);
- /* initialize RSS configuration from rte_flow */
- memset(&pf->rss_info, 0,
- sizeof(struct i40e_rte_flow_rss_conf));
-
/* reset all stats of the device, including pf and main vsi */
i40e_dev_stats_reset(dev);
@@ -4426,7 +4422,6 @@ static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
{
struct i40e_pf *pf;
struct i40e_hw *hw;
- int ret;
if (!vsi || !lut)
return -EINVAL;
@@ -4435,12 +4430,16 @@ static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
hw = I40E_VSI_TO_HW(vsi);
if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
- ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
- vsi->type != I40E_VSI_SRIOV,
- lut, lut_size);
- if (ret) {
- PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
- return ret;
+ enum i40e_status_code status;
+
+ status = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
+ vsi->type != I40E_VSI_SRIOV,
+ lut, lut_size);
+ if (status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to update RSS lookup table, error status: %d",
+ status);
+ return -EIO;
}
} else {
uint32_t *lut_dw = (uint32_t *)lut;
@@ -7573,7 +7572,7 @@ struct i40e_vsi *
}
/* Disable RSS */
-static void
+void
i40e_pf_disable_rss(struct i40e_pf *pf)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
@@ -7591,7 +7590,6 @@ struct i40e_vsi *
uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
I40E_VFQF_HKEY_MAX_INDEX :
I40E_PFQF_HKEY_MAX_INDEX;
- int ret = 0;
if (!key || key_len == 0) {
PMD_DRV_LOG(DEBUG, "No key to be configured");
@@ -7604,11 +7602,16 @@ struct i40e_vsi *
if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
struct i40e_aqc_get_set_rss_key_data *key_dw =
- (struct i40e_aqc_get_set_rss_key_data *)key;
+ (struct i40e_aqc_get_set_rss_key_data *)key;
+ enum i40e_status_code status =
+ i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
- ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
- if (ret)
- PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
+ if (status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to configure RSS key via AQ, error status: %d",
+ status);
+ return -EIO;
+ }
} else {
uint32_t *hash_key = (uint32_t *)key;
uint16_t i;
@@ -7628,7 +7631,7 @@ struct i40e_vsi *
I40E_WRITE_FLUSH(hw);
}
- return ret;
+ return 0;
}
static int
@@ -8782,7 +8785,7 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
}
/* Calculate the maximum number of contiguous PF queues that are configured */
-static int
+int
i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
{
struct rte_eth_dev_data *data = pf->dev_data;
@@ -8801,19 +8804,72 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
return num;
}
-/* Configure RSS */
-static int
-i40e_pf_config_rss(struct i40e_pf *pf)
+/* Reset the global configure of hash function and input sets */
+static void
+i40e_pf_global_rss_reset(struct i40e_pf *pf)
{
- enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct rte_eth_rss_conf rss_conf;
- uint32_t i, lut = 0;
- uint16_t j, num;
+ uint32_t reg, reg_val;
+ int i;
- /*
- * If both VMDQ and RSS enabled, not all of PF queues are configured.
- * It's necessary to calculate the actual PF queues that are configured.
+ /* Reset global RSS function sets */
+ reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
+ if (!(reg_val & I40E_GLQF_CTL_HTOEP_MASK)) {
+ reg_val |= I40E_GLQF_CTL_HTOEP_MASK;
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg_val);
+ }
+
+ for (i = 0; i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; i++) {
+ uint64_t inset;
+ int j, pctype;
+
+ if (hw->mac.type == I40E_MAC_X722)
+ pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(i));
+ else
+ pctype = i;
+
+ /* Reset pctype insets */
+ inset = i40e_get_default_input_set(i);
+ if (inset) {
+ pf->hash_input_set[pctype] = inset;
+ inset = i40e_translate_input_set_reg(hw->mac.type,
+ inset);
+
+ reg = I40E_GLQF_HASH_INSET(0, pctype);
+ i40e_check_write_global_reg(hw, reg, (uint32_t)inset);
+ reg = I40E_GLQF_HASH_INSET(1, pctype);
+ i40e_check_write_global_reg(hw, reg,
+ (uint32_t)(inset >> 32));
+
+ /* Clear unused mask registers of the pctype */
+ for (j = 0; j < I40E_INSET_MASK_NUM_REG; j++) {
+ reg = I40E_GLQF_HASH_MSK(j, pctype);
+ i40e_check_write_global_reg(hw, reg, 0);
+ }
+ }
+
+ /* Reset pctype symmetric sets */
+ reg = I40E_GLQF_HSYM(pctype);
+ reg_val = i40e_read_rx_ctl(hw, reg);
+ if (reg_val & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
+ reg_val &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK;
+ i40e_write_global_rx_ctl(hw, reg, reg_val);
+ }
+ }
+ I40E_WRITE_FLUSH(hw);
+}
+
+int
+i40e_pf_reset_rss_reta(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->adapter->hw;
+ uint8_t lut[ETH_RSS_RETA_SIZE_512];
+ uint32_t i;
+ int num;
+
+ /* If both VMDQ and RSS enabled, not all of PF queues are
+ * configured. It's necessary to calculate the actual PF
+ * queues that are configured.
*/
if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
num = i40e_pf_calc_configured_queues_num(pf);
@@ -8821,48 +8877,89 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
num = pf->dev_data->nb_rx_queues;
num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
- PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
- num);
+ if (num <= 0)
+ return 0;
- if (num == 0) {
- PMD_INIT_LOG(ERR,
- "No PF queues are configured to enable RSS for port %u",
- pf->dev_data->port_id);
- return -ENOTSUP;
- }
+ for (i = 0; i < hw->func_caps.rss_table_size; i++)
+ lut[i] = (uint8_t)(i % (uint32_t)num);
- if (pf->adapter->rss_reta_updated == 0) {
- for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
- if (j == num)
- j = 0;
- lut = (lut << 8) | (j & ((0x1 <<
- hw->func_caps.rss_table_entry_width) - 1));
- if ((i & 3) == 3)
- I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2),
- rte_bswap32(lut));
- }
- }
+ return i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i);
+}
- rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
- if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0 ||
- !(mq_mode & ETH_MQ_RX_RSS_FLAG)) {
- i40e_pf_disable_rss(pf);
- return 0;
- }
- if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
- (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
- /* Random default keys */
+int
+i40e_pf_reset_rss_key(struct i40e_pf *pf)
+{
+ const uint8_t key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ uint8_t *rss_key;
+
+ /* Reset key */
+ rss_key = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key;
+ if (!rss_key ||
+ pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key_len < key_len) {
static uint32_t rss_key_default[] = {0x6b793944,
0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
- rss_conf.rss_key = (uint8_t *)rss_key_default;
- rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
- sizeof(uint32_t);
+ rss_key = (uint8_t *)rss_key_default;
+ }
+
+ return i40e_set_rss_key(pf->main_vsi, rss_key, key_len);
+}
+
+static int
+i40e_pf_rss_reset(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ int ret;
+
+ pf->hash_filter_enable = 0;
+ i40e_pf_disable_rss(pf);
+ i40e_set_symmetric_hash_enable_per_port(hw, 0);
+
+ if (!pf->support_multi_driver)
+ i40e_pf_global_rss_reset(pf);
+
+ /* Reset RETA table */
+ if (pf->adapter->rss_reta_updated == 0) {
+ ret = i40e_pf_reset_rss_reta(pf);
+ if (ret)
+ return ret;
+ }
+
+ return i40e_pf_reset_rss_key(pf);
+}
+
+/* Configure RSS */
+int
+i40e_pf_config_rss(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw;
+ enum rte_eth_rx_mq_mode mq_mode;
+ uint64_t rss_hf, hena;
+ int ret;
+
+ ret = i40e_pf_rss_reset(pf);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Reset RSS failed, RSS has been disabled");
+ return ret;
}
- return i40e_hw_rss_hash_set(pf, &rss_conf);
+ rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+ mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
+ if (!(rss_hf & pf->adapter->flow_types_mask) ||
+ !(mq_mode & ETH_MQ_RX_RSS_FLAG))
+ return 0;
+
+ hw = I40E_PF_TO_HW(pf);
+ hena = i40e_config_hena(pf->adapter, rss_hf);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
}
#define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
@@ -8910,24 +9007,20 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
}
/* Set the symmetric hash enable configurations per port */
-static void
+void
i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
{
uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
if (enable > 0) {
- if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
- PMD_DRV_LOG(INFO,
- "Symmetric hash has already been enabled");
+ if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)
return;
- }
+
reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
} else {
- if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
- PMD_DRV_LOG(INFO,
- "Symmetric hash has already been disabled");
+ if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK))
return;
- }
+
reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
}
i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
@@ -9614,9 +9707,8 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
{
struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
enum i40e_filter_pctype pctype;
- uint64_t input_set, inset_reg = 0;
- uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
- int ret, i, num;
+ uint64_t input_set;
+ int ret;
if (!conf) {
PMD_DRV_LOG(ERR, "Invalid pointer");
@@ -9628,31 +9720,42 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
return -EINVAL;
}
- if (pf->support_multi_driver) {
- PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
- return -ENOTSUP;
- }
-
pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
if (pctype == I40E_FILTER_PCTYPE_INVALID) {
PMD_DRV_LOG(ERR, "invalid flow_type input.");
return -EINVAL;
}
- if (hw->mac.type == I40E_MAC_X722) {
- /* get translated pctype value in fd pctype register */
- pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
- I40E_GLQF_FD_PCTYPES((int)pctype));
- }
-
ret = i40e_parse_input_set(&input_set, pctype, conf->field,
conf->inset_size);
- if (ret) {
- PMD_DRV_LOG(ERR, "Failed to parse input set");
- return -EINVAL;
+ if (ret)
+ return ret;
+
+ return i40e_set_hash_inset(hw, input_set, pctype,
+ (conf->op == RTE_ETH_INPUT_SET_ADD) ?
+ true : false);
+}
+
+int
+i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set,
+ uint32_t pctype, bool add)
+{
+ struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
+ uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
+ uint64_t inset_reg = 0;
+ int num, i;
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR,
+ "Modify input set is not permitted when multi-driver enabled.");
+ return -EPERM;
}
- if (conf->op == RTE_ETH_INPUT_SET_ADD) {
+ /* For X722, get translated pctype in fd pctype register */
+ if (hw->mac.type == I40E_MAC_X722)
+ pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
+
+ if (add) {
/* get inset value in register */
inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
inset_reg <<= I40E_32_BIT_WIDTH;
@@ -11853,25 +11956,13 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
}
}
-/* Restore RSS filter */
-static inline void
-i40e_rss_filter_restore(struct i40e_pf *pf)
-{
- struct i40e_rss_conf_list *list = &pf->rss_config_list;
- struct i40e_rss_filter *filter;
-
- TAILQ_FOREACH(filter, list, next) {
- i40e_config_rss_filter(pf, &filter->rss_filter_info, TRUE);
- }
-}
-
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
i40e_fdir_filter_restore(pf);
- i40e_rss_filter_restore(pf);
+ (void)i40e_hash_filter_restore(pf);
}
bool
@@ -12449,551 +12540,6 @@ struct i40e_customized_pctype*
return ret;
}
-int
-i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
- const struct rte_flow_action_rss *in)
-{
- if (in->key_len > RTE_DIM(out->key) ||
- in->queue_num > RTE_DIM(out->queue))
- return -EINVAL;
- if (!in->key && in->key_len)
- return -EINVAL;
- out->conf = (struct rte_flow_action_rss){
- .func = in->func,
- .level = in->level,
- .types = in->types,
- .key_len = in->key_len,
- .queue_num = in->queue_num,
- .queue = memcpy(out->queue, in->queue,
- sizeof(*in->queue) * in->queue_num),
- };
- if (in->key)
- out->conf.key = memcpy(out->key, in->key, in->key_len);
- return 0;
-}
-
-/* Write HENA register to enable hash */
-static int
-i40e_rss_hash_set(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *rss_conf)
-{
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- uint8_t *key = (void *)(uintptr_t)rss_conf->conf.key;
- uint64_t hena;
- int ret;
-
- ret = i40e_set_rss_key(pf->main_vsi, key,
- rss_conf->conf.key_len);
- if (ret)
- return ret;
-
- hena = i40e_config_hena(pf->adapter, rss_conf->conf.types);
- i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
- i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
- I40E_WRITE_FLUSH(hw);
-
- return 0;
-}
-
-/* Configure hash input set */
-static int
-i40e_rss_conf_hash_inset(struct i40e_pf *pf, uint64_t types)
-{
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct rte_eth_input_set_conf conf;
- uint64_t mask0;
- int ret = 0;
- uint32_t j;
- int i;
- static const struct {
- uint64_t type;
- enum rte_eth_input_set_field field;
- } inset_match_table[] = {
- {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP4},
- {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP4},
- {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_UNKNOWN},
- {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_UNKNOWN},
-
- {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP4},
- {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP4},
- {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
- {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
-
- {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP4},
- {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP4},
- {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
- {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
-
- {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP4},
- {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP4},
- {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
- {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
-
- {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP4},
- {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP4},
- {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_UNKNOWN},
- {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_UNKNOWN},
-
- {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP6},
- {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP6},
- {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_UNKNOWN},
- {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_UNKNOWN},
-
- {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP6},
- {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP6},
- {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
- {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
-
- {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP6},
- {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP6},
- {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
- {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
-
- {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP6},
- {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP6},
- {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
- {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
-
- {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
- RTE_ETH_INPUT_SET_L3_SRC_IP6},
- {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
- RTE_ETH_INPUT_SET_L3_DST_IP6},
- {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_SRC_ONLY,
- RTE_ETH_INPUT_SET_UNKNOWN},
- {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_DST_ONLY,
- RTE_ETH_INPUT_SET_UNKNOWN},
- };
-
- mask0 = types & pf->adapter->flow_types_mask;
- conf.op = RTE_ETH_INPUT_SET_SELECT;
- conf.inset_size = 0;
- for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; i++) {
- if (mask0 & (1ULL << i)) {
- conf.flow_type = i;
- break;
- }
- }
-
- for (j = 0; j < RTE_DIM(inset_match_table); j++) {
- if ((types & inset_match_table[j].type) ==
- inset_match_table[j].type) {
- if (inset_match_table[j].field ==
- RTE_ETH_INPUT_SET_UNKNOWN)
- return -EINVAL;
-
- conf.field[conf.inset_size] =
- inset_match_table[j].field;
- conf.inset_size++;
- }
- }
-
- if (conf.inset_size) {
- ret = i40e_hash_filter_inset_select(hw, &conf);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-/* Look up the conflicted rule then mark it as invalid */
-static void
-i40e_rss_mark_invalid_rule(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf)
-{
- struct i40e_rss_filter *rss_item;
- uint64_t rss_inset;
-
- /* Clear input set bits before comparing the pctype */
- rss_inset = ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
- ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
-
- /* Look up the conflicted rule then mark it as invalid */
- TAILQ_FOREACH(rss_item, &pf->rss_config_list, next) {
- if (!rss_item->rss_filter_info.valid)
- continue;
-
- if (conf->conf.queue_num &&
- rss_item->rss_filter_info.conf.queue_num)
- rss_item->rss_filter_info.valid = false;
-
- if (conf->conf.types &&
- (rss_item->rss_filter_info.conf.types &
- rss_inset) ==
- (conf->conf.types & rss_inset))
- rss_item->rss_filter_info.valid = false;
-
- if (conf->conf.func ==
- RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
- rss_item->rss_filter_info.conf.func ==
- RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
- rss_item->rss_filter_info.valid = false;
- }
-}
-
-/* Configure RSS hash function */
-static int
-i40e_rss_config_hash_function(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf)
-{
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- uint32_t reg, i;
- uint64_t mask0;
- uint16_t j;
-
- if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
- reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
- if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
- PMD_DRV_LOG(DEBUG, "Hash function already set to Simple XOR");
- I40E_WRITE_FLUSH(hw);
- i40e_rss_mark_invalid_rule(pf, conf);
-
- return 0;
- }
- reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
-
- i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
- I40E_WRITE_FLUSH(hw);
- i40e_rss_mark_invalid_rule(pf, conf);
- } else if (conf->conf.func ==
- RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
- mask0 = conf->conf.types & pf->adapter->flow_types_mask;
-
- i40e_set_symmetric_hash_enable_per_port(hw, 1);
- for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
- if (mask0 & (1UL << i))
- break;
- }
-
- if (i == UINT64_BIT)
- return -EINVAL;
-
- for (j = I40E_FILTER_PCTYPE_INVALID + 1;
- j < I40E_FILTER_PCTYPE_MAX; j++) {
- if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
- i40e_write_global_rx_ctl(hw,
- I40E_GLQF_HSYM(j),
- I40E_GLQF_HSYM_SYMH_ENA_MASK);
- }
- }
-
- return 0;
-}
-
-/* Enable RSS according to the configuration */
-static int
-i40e_rss_enable_hash(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf)
-{
- struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
- struct i40e_rte_flow_rss_conf rss_conf;
-
- if (!(conf->conf.types & pf->adapter->flow_types_mask))
- return -ENOTSUP;
-
- memset(&rss_conf, 0, sizeof(rss_conf));
- rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
-
- /* Configure hash input set */
- if (i40e_rss_conf_hash_inset(pf, conf->conf.types))
- return -EINVAL;
-
- if (rss_conf.conf.key == NULL || rss_conf.conf.key_len <
- (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
- /* Random default keys */
- static uint32_t rss_key_default[] = {0x6b793944,
- 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
- 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
- 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
-
- rss_conf.conf.key = (uint8_t *)rss_key_default;
- rss_conf.conf.key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
- sizeof(uint32_t);
- PMD_DRV_LOG(INFO,
- "No valid RSS key config for i40e, using default\n");
- }
-
- rss_conf.conf.types |= rss_info->conf.types;
- i40e_rss_hash_set(pf, &rss_conf);
-
- if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
- i40e_rss_config_hash_function(pf, conf);
-
- i40e_rss_mark_invalid_rule(pf, conf);
-
- return 0;
-}
-
-/* Configure RSS queue region */
-static int
-i40e_rss_config_queue_region(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf)
-{
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- uint32_t lut = 0;
- uint16_t j, num;
- uint32_t i;
-
- /* If both VMDQ and RSS enabled, not all of PF queues are configured.
- * It's necessary to calculate the actual PF queues that are configured.
- */
- if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
- num = i40e_pf_calc_configured_queues_num(pf);
- else
- num = pf->dev_data->nb_rx_queues;
-
- num = RTE_MIN(num, conf->conf.queue_num);
- PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
- num);
-
- if (num == 0) {
- PMD_DRV_LOG(ERR,
- "No PF queues are configured to enable RSS for port %u",
- pf->dev_data->port_id);
- return -ENOTSUP;
- }
-
- /* Fill in redirection table */
- for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
- if (j == num)
- j = 0;
- lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
- hw->func_caps.rss_table_entry_width) - 1));
- if ((i & 3) == 3)
- I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
- }
-
- i40e_rss_mark_invalid_rule(pf, conf);
-
- return 0;
-}
-
-/* Configure RSS hash function to default */
-static int
-i40e_rss_clear_hash_function(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf)
-{
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- uint32_t i, reg;
- uint64_t mask0;
- uint16_t j;
-
- if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
- reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
- if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
- PMD_DRV_LOG(DEBUG,
- "Hash function already set to Toeplitz");
- I40E_WRITE_FLUSH(hw);
-
- return 0;
- }
- reg |= I40E_GLQF_CTL_HTOEP_MASK;
-
- i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
- I40E_WRITE_FLUSH(hw);
- } else if (conf->conf.func ==
- RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
- mask0 = conf->conf.types & pf->adapter->flow_types_mask;
-
- for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
- if (mask0 & (1UL << i))
- break;
- }
-
- if (i == UINT64_BIT)
- return -EINVAL;
-
- for (j = I40E_FILTER_PCTYPE_INVALID + 1;
- j < I40E_FILTER_PCTYPE_MAX; j++) {
- if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
- i40e_write_global_rx_ctl(hw,
- I40E_GLQF_HSYM(j),
- 0);
- }
- }
-
- return 0;
-}
-
-/* Disable RSS hash and configure default input set */
-static int
-i40e_rss_disable_hash(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf)
-{
- struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct i40e_rte_flow_rss_conf rss_conf;
- uint32_t i;
-
- memset(&rss_conf, 0, sizeof(rss_conf));
- rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
-
- /* Disable RSS hash */
- rss_conf.conf.types = rss_info->conf.types & ~(conf->conf.types);
- i40e_rss_hash_set(pf, &rss_conf);
-
- for (i = RTE_ETH_FLOW_IPV4; i <= RTE_ETH_FLOW_L2_PAYLOAD; i++) {
- if (!(pf->adapter->flow_types_mask & (1ULL << i)) ||
- !(conf->conf.types & (1ULL << i)))
- continue;
-
- /* Configure default input set */
- struct rte_eth_input_set_conf input_conf = {
- .op = RTE_ETH_INPUT_SET_SELECT,
- .flow_type = i,
- .inset_size = 1,
- };
- input_conf.field[0] = RTE_ETH_INPUT_SET_DEFAULT;
- i40e_hash_filter_inset_select(hw, &input_conf);
- }
-
- rss_info->conf.types = rss_conf.conf.types;
-
- i40e_rss_clear_hash_function(pf, conf);
-
- return 0;
-}
-
-/* Configure RSS queue region to default */
-static int
-i40e_rss_clear_queue_region(struct i40e_pf *pf)
-{
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
- uint16_t queue[I40E_MAX_Q_PER_TC];
- uint32_t num_rxq, i;
- uint32_t lut = 0;
- uint16_t j, num;
-
- num_rxq = RTE_MIN(pf->dev_data->nb_rx_queues, I40E_MAX_Q_PER_TC);
-
- for (j = 0; j < num_rxq; j++)
- queue[j] = j;
-
- /* If both VMDQ and RSS enabled, not all of PF queues are configured.
- * It's necessary to calculate the actual PF queues that are configured.
- */
- if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
- num = i40e_pf_calc_configured_queues_num(pf);
- else
- num = pf->dev_data->nb_rx_queues;
-
- num = RTE_MIN(num, num_rxq);
- PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
- num);
-
- if (num == 0) {
- PMD_DRV_LOG(ERR,
- "No PF queues are configured to enable RSS for port %u",
- pf->dev_data->port_id);
- return -ENOTSUP;
- }
-
- /* Fill in redirection table */
- for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
- if (j == num)
- j = 0;
- lut = (lut << 8) | (queue[j] & ((0x1 <<
- hw->func_caps.rss_table_entry_width) - 1));
- if ((i & 3) == 3)
- I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
- }
-
- rss_info->conf.queue_num = 0;
- memset(&rss_info->conf.queue, 0, sizeof(uint16_t));
-
- return 0;
-}
-
-int
-i40e_config_rss_filter(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf, bool add)
-{
- struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
- struct rte_flow_action_rss update_conf = rss_info->conf;
- int ret = 0;
-
- if (add) {
- if (conf->conf.queue_num) {
- /* Configure RSS queue region */
- ret = i40e_rss_config_queue_region(pf, conf);
- if (ret)
- return ret;
-
- update_conf.queue_num = conf->conf.queue_num;
- update_conf.queue = conf->conf.queue;
- } else if (conf->conf.func ==
- RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
- /* Configure hash function */
- ret = i40e_rss_config_hash_function(pf, conf);
- if (ret)
- return ret;
-
- update_conf.func = conf->conf.func;
- } else {
- /* Configure hash enable and input set */
- ret = i40e_rss_enable_hash(pf, conf);
- if (ret)
- return ret;
-
- update_conf.types |= conf->conf.types;
- update_conf.key = conf->conf.key;
- update_conf.key_len = conf->conf.key_len;
- }
-
- /* Update RSS info in pf */
- if (i40e_rss_conf_init(rss_info, &update_conf))
- return -EINVAL;
- } else {
- if (!conf->valid)
- return 0;
-
- if (conf->conf.queue_num)
- i40e_rss_clear_queue_region(pf);
- else if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
- i40e_rss_clear_hash_function(pf, conf);
- else
- i40e_rss_disable_hash(pf, conf);
- }
-
- return 0;
-}
-
RTE_LOG_REGISTER(i40e_logtype_init, pmd.net.i40e.init, NOTICE);
RTE_LOG_REGISTER(i40e_logtype_driver, pmd.net.i40e.driver, NOTICE);
#ifdef RTE_LIBRTE_I40E_DEBUG_RX
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 696c5aa..f13436b 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -16,6 +16,8 @@
#include "rte_pmd_i40e.h"
#include "base/i40e_register.h"
+#include "base/i40e_type.h"
+#include "base/virtchnl.h"
#define I40E_VLAN_TAG_SIZE 4
@@ -265,6 +267,7 @@ enum i40e_flxpld_layer_idx {
#define I40E_DEFAULT_DCB_APP_PRIO 3
#define I40E_FDIR_PRG_PKT_CNT 128
+#define I40E_PCTYPE_MAX 64
/*
* Struct to store flow created.
@@ -1062,16 +1065,32 @@ struct i40e_customized_pctype {
};
struct i40e_rte_flow_rss_conf {
- struct rte_flow_action_rss conf; /**< RSS parameters. */
- uint16_t queue_region_conf; /**< Queue region config flag */
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
+
uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ?
I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) *
- sizeof(uint32_t)]; /* Hash key. */
- uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */
- bool valid; /* Check if it's valid */
-};
+ sizeof(uint32_t)]; /**< Hash key. */
+ uint16_t queue[ETH_RSS_RETA_SIZE_512]; /**< Queues indices to use. */
-TAILQ_HEAD(i40e_rss_conf_list, i40e_rss_filter);
+ bool symmetric_enable; /**< true, if enable symmetric */
+ uint64_t config_pctypes; /**< All PCTYPES with the flow */
+ uint64_t inset; /**< input sets */
+
+ uint8_t region_priority; /**< queue region priority */
+ uint8_t region_queue_num; /**< region queue number */
+ uint16_t region_queue_start; /**< region queue start */
+
+ uint32_t misc_reset_flags;
+#define I40E_HASH_FLOW_RESET_FLAG_FUNC 0x01UL
+#define I40E_HASH_FLOW_RESET_FLAG_KEY 0x02UL
+#define I40E_HASH_FLOW_RESET_FLAG_QUEUE 0x04UL
+#define I40E_HASH_FLOW_RESET_FLAG_REGION 0x08UL
+
+ /**< All PCTYPES that reset with the flow */
+ uint64_t reset_config_pctypes;
+ /**< Symmetric function should reset on PCTYPES */
+ uint64_t reset_symmetric_pctypes;
+};
/* RSS filter list structure */
struct i40e_rss_filter {
@@ -1079,6 +1098,8 @@ struct i40e_rss_filter {
struct i40e_rte_flow_rss_conf rss_filter_info;
};
+TAILQ_HEAD(i40e_rss_conf_list, i40e_rss_filter);
+
struct i40e_vf_msg_cfg {
/* maximal VF message during a statistic period */
uint32_t max_msg;
@@ -1133,6 +1154,7 @@ struct i40e_pf {
uint16_t fdir_qp_offset;
uint16_t hash_lut_size; /* The size of hash lookup table */
+ uint64_t hash_enabled_queues;
/* input set bits for each pctype */
uint64_t hash_input_set[I40E_FILTER_PCTYPE_MAX];
/* store VXLAN UDP ports */
@@ -1147,7 +1169,6 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
- struct i40e_rte_flow_rss_conf rss_info; /* RSS info */
struct i40e_rss_conf_list rss_config_list; /* RSS rule list */
struct i40e_queue_regions queue_region; /* queue region info */
struct i40e_fc_conf fc_conf; /* Flow control conf */
@@ -1165,7 +1186,7 @@ struct i40e_pf {
bool dport_replace_flag; /* Destination port replace is done */
struct i40e_tm_conf tm_conf;
bool support_multi_driver; /* 1 - support multiple driver */
-
+ bool hash_filter_enable;
/* Dynamic Device Personalization */
bool gtp_support; /* 1 - support GTP-C and GTP-U */
bool esp_support; /* 1 - support ESP SPI */
@@ -1376,6 +1397,8 @@ int i40e_select_filter_input_set(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf,
enum rte_filter_type filter);
void i40e_fdir_filter_restore(struct i40e_pf *pf);
+int i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set,
+ uint32_t pctype, bool add);
int i40e_hash_filter_inset_select(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf);
int i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf, uint32_t opcode,
@@ -1432,7 +1455,8 @@ int i40e_add_macvlan_filters(struct i40e_vsi *vsi,
bool is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv);
bool is_i40e_supported(struct rte_eth_dev *dev);
bool is_i40evf_supported(struct rte_eth_dev *dev);
-
+void i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw,
+ uint8_t enable);
int i40e_validate_input_set(enum i40e_filter_pctype pctype,
enum rte_filter_type filter, uint64_t inset);
int i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask,
@@ -1455,12 +1479,13 @@ int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on);
void i40e_init_queue_region_conf(struct rte_eth_dev *dev);
void i40e_flex_payload_reg_set_default(struct i40e_hw *hw);
+void i40e_pf_disable_rss(struct i40e_pf *pf);
+int i40e_pf_calc_configured_queues_num(struct i40e_pf *pf);
+int i40e_pf_reset_rss_reta(struct i40e_pf *pf);
+int i40e_pf_reset_rss_key(struct i40e_pf *pf);
+int i40e_pf_config_rss(struct i40e_pf *pf);
int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len);
int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size);
-int i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
- const struct rte_flow_action_rss *in);
-int i40e_config_rss_filter(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf, bool add);
int i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params);
int i40e_vf_representor_uninit(struct rte_eth_dev *ethdev);
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b09ff65..d69a794 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -23,6 +23,7 @@
#include "base/i40e_type.h"
#include "base/i40e_prototype.h"
#include "i40e_ethdev.h"
+#include "i40e_hash.h"
#define I40E_IPV6_TC_MASK (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
#define I40E_IPV6_FRAG_HEADER 44
@@ -118,7 +119,6 @@ static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
-static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
static int
i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -4530,566 +4530,6 @@ static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
return ret;
}
-/**
- * This function is used to do configuration i40e existing RSS with rte_flow.
- * It also enable queue region configuration using flow API for i40e.
- * pattern can be used indicate what parameters will be include in flow,
- * like user_priority or flowtype for queue region or HASH function for RSS.
- * Action is used to transmit parameter like queue index and HASH
- * function for RSS, or flowtype for queue region configuration.
- * For example:
- * pattern:
- * Case 1: try to transform patterns to pctype. valid pctype will be
- * used in parse action.
- * Case 2: only ETH, indicate flowtype for queue region will be parsed.
- * Case 3: only VLAN, indicate user_priority for queue region will be parsed.
- * So, pattern choice is depened on the purpose of configuration of
- * that flow.
- * action:
- * action RSS will be used to transmit valid parameter with
- * struct rte_flow_action_rss for all the 3 case.
- */
-static int
-i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
- const struct rte_flow_item *pattern,
- struct rte_flow_error *error,
- struct i40e_rss_pattern_info *p_info,
- struct i40e_queue_regions *info)
-{
- const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
- const struct rte_flow_item *item = pattern;
- enum rte_flow_item_type item_type;
- struct rte_flow_item *items;
- uint32_t item_num = 0; /* non-void item number of pattern*/
- uint32_t i = 0;
- static const struct {
- enum rte_flow_item_type *item_array;
- uint64_t type;
- } i40e_rss_pctype_patterns[] = {
- { pattern_fdir_ipv4,
- ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER },
- { pattern_fdir_ipv4_tcp, ETH_RSS_NONFRAG_IPV4_TCP },
- { pattern_fdir_ipv4_udp, ETH_RSS_NONFRAG_IPV4_UDP },
- { pattern_fdir_ipv4_sctp, ETH_RSS_NONFRAG_IPV4_SCTP },
- { pattern_fdir_ipv4_esp, ETH_RSS_ESP },
- { pattern_fdir_ipv4_udp_esp, ETH_RSS_ESP },
- { pattern_fdir_ipv6,
- ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER },
- { pattern_fdir_ipv6_tcp, ETH_RSS_NONFRAG_IPV6_TCP },
- { pattern_fdir_ipv6_udp, ETH_RSS_NONFRAG_IPV6_UDP },
- { pattern_fdir_ipv6_sctp, ETH_RSS_NONFRAG_IPV6_SCTP },
- { pattern_ethertype, ETH_RSS_L2_PAYLOAD },
- { pattern_fdir_ipv6_esp, ETH_RSS_ESP },
- { pattern_fdir_ipv6_udp_esp, ETH_RSS_ESP },
- };
-
- p_info->types = I40E_RSS_TYPE_INVALID;
-
- if (item->type == RTE_FLOW_ITEM_TYPE_END) {
- p_info->types = I40E_RSS_TYPE_NONE;
- return 0;
- }
-
- /* Convert pattern to RSS offload types */
- while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
- if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
- item_num++;
- i++;
- }
- item_num++;
-
- items = rte_zmalloc("i40e_pattern",
- item_num * sizeof(struct rte_flow_item), 0);
- if (!items) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
- NULL, "No memory for PMD internal items.");
- return -ENOMEM;
- }
-
- i40e_pattern_skip_void_item(items, pattern);
-
- for (i = 0; i < RTE_DIM(i40e_rss_pctype_patterns); i++) {
- if (i40e_match_pattern(i40e_rss_pctype_patterns[i].item_array,
- items)) {
- p_info->types = i40e_rss_pctype_patterns[i].type;
- break;
- }
- }
-
- rte_free(items);
-
- for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
- if (item->last) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Not support range");
- return -rte_errno;
- }
- item_type = item->type;
- switch (item_type) {
- case RTE_FLOW_ITEM_TYPE_ETH:
- p_info->action_flag = 1;
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec = item->spec;
- vlan_mask = item->mask;
- if (vlan_spec && vlan_mask) {
- if (vlan_mask->tci ==
- rte_cpu_to_be_16(I40E_VLAN_TCI_MASK)) {
- info->region[0].user_priority[0] =
- (rte_be_to_cpu_16(
- vlan_spec->tci) >> 13) & 0x7;
- info->region[0].user_priority_num = 1;
- info->queue_region_number = 1;
- p_info->action_flag = 0;
- }
- }
- break;
- default:
- p_info->action_flag = 0;
- memset(info, 0, sizeof(struct i40e_queue_regions));
- return 0;
- }
- }
-
- return 0;
-}
-
-/**
- * This function is used to parse RSS queue index, total queue number and
- * hash functions, If the purpose of this configuration is for queue region
- * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
- * In queue region configuration, it also need to parse hardware flowtype
- * and user_priority from configuration, it will also cheeck the validity
- * of these parameters. For example, The queue region sizes should
- * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
- * hw_flowtype or PCTYPE max index should be 63, the user priority
- * max index should be 7, and so on. And also, queue index should be
- * continuous sequence and queue region index should be part of RSS
- * queue index for this port.
- * For hash params, the pctype in action and pattern must be same.
- * Set queue index must be with non-types.
- */
-static int
-i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
- const struct rte_flow_action *actions,
- struct rte_flow_error *error,
- struct i40e_rss_pattern_info p_info,
- struct i40e_queue_regions *conf_info,
- union i40e_filter_t *filter)
-{
- const struct rte_flow_action *act;
- const struct rte_flow_action_rss *rss;
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_queue_regions *info = &pf->queue_region;
- struct i40e_rte_flow_rss_conf *rss_config =
- &filter->rss_conf;
- struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
- uint16_t i, j, n, m, tmp, nb_types;
- uint32_t index = 0;
- uint64_t hf_bit = 1;
-
- static const struct {
- uint64_t rss_type;
- enum i40e_filter_pctype pctype;
- } pctype_match_table[] = {
- {ETH_RSS_FRAG_IPV4,
- I40E_FILTER_PCTYPE_FRAG_IPV4},
- {ETH_RSS_NONFRAG_IPV4_TCP,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP},
- {ETH_RSS_NONFRAG_IPV4_UDP,
- I40E_FILTER_PCTYPE_NONF_IPV4_UDP},
- {ETH_RSS_NONFRAG_IPV4_SCTP,
- I40E_FILTER_PCTYPE_NONF_IPV4_SCTP},
- {ETH_RSS_NONFRAG_IPV4_OTHER,
- I40E_FILTER_PCTYPE_NONF_IPV4_OTHER},
- {ETH_RSS_FRAG_IPV6,
- I40E_FILTER_PCTYPE_FRAG_IPV6},
- {ETH_RSS_NONFRAG_IPV6_TCP,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP},
- {ETH_RSS_NONFRAG_IPV6_UDP,
- I40E_FILTER_PCTYPE_NONF_IPV6_UDP},
- {ETH_RSS_NONFRAG_IPV6_SCTP,
- I40E_FILTER_PCTYPE_NONF_IPV6_SCTP},
- {ETH_RSS_NONFRAG_IPV6_OTHER,
- I40E_FILTER_PCTYPE_NONF_IPV6_OTHER},
- {ETH_RSS_L2_PAYLOAD,
- I40E_FILTER_PCTYPE_L2_PAYLOAD},
- };
-
- static const struct {
- uint64_t rss_type;
- enum i40e_filter_pctype pctype;
- } pctype_match_table_x722[] = {
- {ETH_RSS_NONFRAG_IPV4_TCP,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK},
- {ETH_RSS_NONFRAG_IPV4_UDP,
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP},
- {ETH_RSS_NONFRAG_IPV4_UDP,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP},
- {ETH_RSS_NONFRAG_IPV6_TCP,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK},
- {ETH_RSS_NONFRAG_IPV6_UDP,
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP},
- {ETH_RSS_NONFRAG_IPV6_UDP,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP},
- };
-
- NEXT_ITEM_OF_ACTION(act, actions, index);
- rss = act->conf;
-
- /**
- * RSS only supports forwarding,
- * check if the first not void action is RSS.
- */
- if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
- memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act, "Not supported action.");
- return -rte_errno;
- }
-
- if (p_info.action_flag && rss->queue_num) {
- for (j = 0; j < RTE_DIM(pctype_match_table); j++) {
- if (rss->types & pctype_match_table[j].rss_type) {
- conf_info->region[0].hw_flowtype[0] =
- (uint8_t)pctype_match_table[j].pctype;
- conf_info->region[0].flowtype_num = 1;
- conf_info->queue_region_number = 1;
- break;
- }
- }
-
- if (hw->mac.type == I40E_MAC_X722)
- for (j = 0; j < RTE_DIM(pctype_match_table_x722); j++) {
- if (rss->types &
- pctype_match_table_x722[j].rss_type) {
- m = conf_info->region[0].flowtype_num;
- conf_info->region[0].hw_flowtype[m] =
- pctype_match_table_x722[j].pctype;
- conf_info->region[0].flowtype_num++;
- conf_info->queue_region_number = 1;
- }
- }
- }
-
- /**
- * Do some queue region related parameters check
- * in order to keep queue index for queue region to be
- * continuous sequence and also to be part of RSS
- * queue index for this port.
- */
- if (conf_info->queue_region_number) {
- for (i = 0; i < rss->queue_num; i++) {
- for (j = 0; j < rss_info->conf.queue_num; j++) {
- if (rss->queue[i] == rss_info->conf.queue[j])
- break;
- }
- if (j == rss_info->conf.queue_num) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "no valid queues");
- return -rte_errno;
- }
- }
-
- for (i = 0; i < rss->queue_num - 1; i++) {
- if (rss->queue[i + 1] != rss->queue[i] + 1) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "no valid queues");
- return -rte_errno;
- }
- }
- }
-
- /* Parse queue region related parameters from configuration */
- for (n = 0; n < conf_info->queue_region_number; n++) {
- if (conf_info->region[n].user_priority_num ||
- conf_info->region[n].flowtype_num) {
- if (!((rte_is_power_of_2(rss->queue_num)) &&
- rss->queue_num <= 64)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
- "total number of queues do not exceed the VSI allocation");
- return -rte_errno;
- }
-
- if (conf_info->region[n].user_priority[n] >=
- I40E_MAX_USER_PRIORITY) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "the user priority max index is 7");
- return -rte_errno;
- }
-
- if (conf_info->region[n].hw_flowtype[n] >=
- I40E_FILTER_PCTYPE_MAX) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "the hw_flowtype or PCTYPE max index is 63");
- return -rte_errno;
- }
-
- for (i = 0; i < info->queue_region_number; i++) {
- if (info->region[i].queue_num ==
- rss->queue_num &&
- info->region[i].queue_start_index ==
- rss->queue[0])
- break;
- }
-
- if (i == info->queue_region_number) {
- if (i > I40E_REGION_MAX_INDEX) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "the queue region max index is 7");
- return -rte_errno;
- }
-
- info->region[i].queue_num =
- rss->queue_num;
- info->region[i].queue_start_index =
- rss->queue[0];
- info->region[i].region_id =
- info->queue_region_number;
-
- j = info->region[i].user_priority_num;
- tmp = conf_info->region[n].user_priority[0];
- if (conf_info->region[n].user_priority_num) {
- info->region[i].user_priority[j] = tmp;
- info->region[i].user_priority_num++;
- }
-
- for (m = 0; m < conf_info->region[n].flowtype_num; m++) {
- j = info->region[i].flowtype_num;
- tmp = conf_info->region[n].hw_flowtype[m];
- info->region[i].hw_flowtype[j] = tmp;
- info->region[i].flowtype_num++;
- }
- info->queue_region_number++;
- } else {
- j = info->region[i].user_priority_num;
- tmp = conf_info->region[n].user_priority[0];
- if (conf_info->region[n].user_priority_num) {
- info->region[i].user_priority[j] = tmp;
- info->region[i].user_priority_num++;
- }
-
- for (m = 0; m < conf_info->region[n].flowtype_num; m++) {
- j = info->region[i].flowtype_num;
- tmp = conf_info->region[n].hw_flowtype[m];
- info->region[i].hw_flowtype[j] = tmp;
- info->region[i].flowtype_num++;
- }
- }
- }
-
- rss_config->queue_region_conf = TRUE;
- }
-
- /**
- * Return function if this flow is used for queue region configuration
- */
- if (rss_config->queue_region_conf)
- return 0;
-
- if (!rss) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "invalid rule");
- return -rte_errno;
- }
-
- for (n = 0; n < rss->queue_num; n++) {
- if (rss->queue[n] >= dev->data->nb_rx_queues) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "queue id > max number of queues");
- return -rte_errno;
- }
- }
-
- if (rss->queue_num && (p_info.types || rss->types))
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "RSS types must be empty while configuring queue region");
-
- /* validate pattern and pctype */
- if (!(rss->types & p_info.types) &&
- (rss->types || p_info.types) && !rss->queue_num)
- return rte_flow_error_set
- (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
- act, "invalid pctype");
-
- nb_types = 0;
- for (n = 0; n < RTE_ETH_FLOW_MAX; n++) {
- if (rss->types & (hf_bit << n))
- nb_types++;
- if (nb_types > 1)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
- act, "multi pctype is not supported");
- }
-
- if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
- (p_info.types || rss->types || rss->queue_num))
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "pattern, type and queues must be empty while"
- " setting hash function as simple_xor");
-
- if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ &&
- !(p_info.types && rss->types))
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "pctype and queues can not be empty while"
- " setting hash function as symmetric toeplitz");
-
- /* Parse RSS related parameters from configuration */
- if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX ||
- rss->func == RTE_ETH_HASH_FUNCTION_TOEPLITZ)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "RSS hash functions are not supported");
- if (rss->level)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "a nonzero RSS encapsulation level is not supported");
- if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "RSS hash key too large");
- if (rss->queue_num > RTE_DIM(rss_config->queue))
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "too many queues for RSS context");
- if (i40e_rss_conf_init(rss_config, rss))
- return rte_flow_error_set
- (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
- "RSS context initialization failure");
-
- index++;
-
- /* check if the next not void action is END */
- NEXT_ITEM_OF_ACTION(act, actions, index);
- if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act, "Not supported action.");
- return -rte_errno;
- }
- rss_config->queue_region_conf = FALSE;
-
- return 0;
-}
-
-static int
-i40e_parse_rss_filter(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- union i40e_filter_t *filter,
- struct rte_flow_error *error)
-{
- struct i40e_rss_pattern_info p_info;
- struct i40e_queue_regions info;
- int ret;
-
- memset(&info, 0, sizeof(struct i40e_queue_regions));
- memset(&p_info, 0, sizeof(struct i40e_rss_pattern_info));
-
- ret = i40e_flow_parse_rss_pattern(dev, pattern,
- error, &p_info, &info);
- if (ret)
- return ret;
-
- ret = i40e_flow_parse_rss_action(dev, actions, error,
- p_info, &info, filter);
- if (ret)
- return ret;
-
- ret = i40e_flow_parse_attr(attr, error);
- if (ret)
- return ret;
-
- cons_filter_type = RTE_ETH_FILTER_HASH;
-
- return 0;
-}
-
-static int
-i40e_config_rss_filter_set(struct rte_eth_dev *dev,
- struct i40e_rte_flow_rss_conf *conf)
-{
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_rss_filter *rss_filter;
- int ret;
-
- if (conf->queue_region_conf) {
- ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
- } else {
- ret = i40e_config_rss_filter(pf, conf, 1);
- }
-
- if (ret)
- return ret;
-
- rss_filter = rte_zmalloc("i40e_rss_filter",
- sizeof(*rss_filter), 0);
- if (rss_filter == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory.");
- return -ENOMEM;
- }
- rss_filter->rss_filter_info = *conf;
- /* the rule new created is always valid
- * the existing rule covered by new rule will be set invalid
- */
- rss_filter->rss_filter_info.valid = true;
-
- TAILQ_INSERT_TAIL(&pf->rss_config_list, rss_filter, next);
-
- return 0;
-}
-
-static int
-i40e_config_rss_filter_del(struct rte_eth_dev *dev,
- struct i40e_rte_flow_rss_conf *conf)
-{
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_rss_filter *rss_filter;
- void *temp;
-
- if (conf->queue_region_conf)
- i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
- else
- i40e_config_rss_filter(pf, conf, 0);
-
- TAILQ_FOREACH_SAFE(rss_filter, &pf->rss_config_list, next, temp) {
- if (!memcmp(&rss_filter->rss_filter_info, conf,
- sizeof(struct rte_flow_action_rss))) {
- TAILQ_REMOVE(&pf->rss_config_list, rss_filter, next);
- rte_free(rss_filter);
- }
- }
- return 0;
-}
-
static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -5130,9 +4570,13 @@ static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
i++;
if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
- ret = i40e_parse_rss_filter(dev, attr, pattern,
- actions, &cons_filter, error);
- return ret;
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_HASH;
+ return i40e_hash_parse(dev, pattern, actions + i,
+ &cons_filter.rss_conf, error);
}
i = 0;
@@ -5247,12 +4691,11 @@ static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
i40e_tunnel_filter_list);
break;
case RTE_ETH_FILTER_HASH:
- ret = i40e_config_rss_filter_set(dev,
- &cons_filter.rss_conf);
+ ret = i40e_hash_filter_create(pf, &cons_filter.rss_conf);
if (ret)
goto free_flow;
flow->rule = TAILQ_LAST(&pf->rss_config_list,
- i40e_rss_conf_list);
+ i40e_rss_conf_list);
break;
default:
goto free_flow;
@@ -5305,8 +4748,7 @@ static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
}
break;
case RTE_ETH_FILTER_HASH:
- ret = i40e_config_rss_filter_del(dev,
- &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
+ ret = i40e_hash_filter_destroy(pf, flow->rule);
break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
@@ -5454,14 +4896,11 @@ static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- ret = i40e_flow_flush_rss_filter(dev);
- if (ret) {
+ ret = i40e_hash_filter_flush(pf);
+ if (ret)
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to flush RSS flows.");
- return -rte_errno;
- }
-
return ret;
}
@@ -5578,36 +5017,6 @@ static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
return ret;
}
-/* remove the RSS filter */
-static int
-i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
-{
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_flow *flow;
- void *temp;
- int32_t ret = -EINVAL;
-
- ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
-
- /* Delete RSS flows in flow list. */
- TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
- if (flow->filter_type != RTE_ETH_FILTER_HASH)
- continue;
-
- if (flow->rule) {
- ret = i40e_config_rss_filter_del(dev,
- &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
- if (ret)
- return ret;
- }
- TAILQ_REMOVE(&pf->flow_list, flow, node);
- rte_free(flow);
- }
-
- return ret;
-}
-
static int
i40e_flow_query(struct rte_eth_dev *dev __rte_unused,
struct rte_flow *flow,
diff --git a/drivers/net/i40e/i40e_hash.c b/drivers/net/i40e/i40e_hash.c
new file mode 100644
index 0000000..7457c9a
--- /dev/null
+++ b/drivers/net/i40e/i40e_hash.c
@@ -0,0 +1,1385 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <assert.h>
+
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+#include "base/i40e_prototype.h"
+#include "i40e_logs.h"
+#include "i40e_ethdev.h"
+#include "i40e_hash.h"
+
+#ifndef BIT
+#define BIT(n) (1UL << (n))
+#endif
+
+#ifndef BIT_ULL
+#define BIT_ULL(n) (1ULL << (n))
+#endif
+
+/* Pattern item headers */
+#define I40E_HASH_HDR_ETH 0x01ULL
+#define I40E_HASH_HDR_IPV4 0x10ULL
+#define I40E_HASH_HDR_IPV6 0x20ULL
+#define I40E_HASH_HDR_TCP 0x100ULL
+#define I40E_HASH_HDR_UDP 0x200ULL
+#define I40E_HASH_HDR_SCTP 0x400ULL
+#define I40E_HASH_HDR_ESP 0x10000ULL
+#define I40E_HASH_HDR_L2TPV3 0x20000ULL
+#define I40E_HASH_HDR_AH 0x40000ULL
+#define I40E_HASH_HDR_GTPC 0x100000ULL
+#define I40E_HASH_HDR_GTPU 0x200000ULL
+
+#define I40E_HASH_HDR_INNER_SHIFT 32
+#define I40E_HASH_HDR_IPV4_INNER (I40E_HASH_HDR_IPV4 << \
+ I40E_HASH_HDR_INNER_SHIFT)
+#define I40E_HASH_HDR_IPV6_INNER (I40E_HASH_HDR_IPV6 << \
+ I40E_HASH_HDR_INNER_SHIFT)
+
+/* ETH */
+#define I40E_PHINT_ETH I40E_HASH_HDR_ETH
+
+/* IPv4 */
+#define I40E_PHINT_IPV4 (I40E_HASH_HDR_ETH | I40E_HASH_HDR_IPV4)
+#define I40E_PHINT_IPV4_TCP (I40E_PHINT_IPV4 | I40E_HASH_HDR_TCP)
+#define I40E_PHINT_IPV4_UDP (I40E_PHINT_IPV4 | I40E_HASH_HDR_UDP)
+#define I40E_PHINT_IPV4_SCTP (I40E_PHINT_IPV4 | I40E_HASH_HDR_SCTP)
+
+/* IPv6 */
+#define I40E_PHINT_IPV6 (I40E_HASH_HDR_ETH | I40E_HASH_HDR_IPV6)
+#define I40E_PHINT_IPV6_TCP (I40E_PHINT_IPV6 | I40E_HASH_HDR_TCP)
+#define I40E_PHINT_IPV6_UDP (I40E_PHINT_IPV6 | I40E_HASH_HDR_UDP)
+#define I40E_PHINT_IPV6_SCTP (I40E_PHINT_IPV6 | I40E_HASH_HDR_SCTP)
+
+/* ESP */
+#define I40E_PHINT_IPV4_ESP (I40E_PHINT_IPV4 | I40E_HASH_HDR_ESP)
+#define I40E_PHINT_IPV6_ESP (I40E_PHINT_IPV6 | I40E_HASH_HDR_ESP)
+#define I40E_PHINT_IPV4_UDP_ESP (I40E_PHINT_IPV4_UDP | \
+ I40E_HASH_HDR_ESP)
+#define I40E_PHINT_IPV6_UDP_ESP (I40E_PHINT_IPV6_UDP | \
+ I40E_HASH_HDR_ESP)
+
+/* GTPC */
+#define I40E_PHINT_IPV4_GTPC (I40E_PHINT_IPV4_UDP | \
+ I40E_HASH_HDR_GTPC)
+#define I40E_PHINT_IPV6_GTPC (I40E_PHINT_IPV6_UDP | \
+ I40E_HASH_HDR_GTPC)
+
+/* GTPU */
+#define I40E_PHINT_IPV4_GTPU (I40E_PHINT_IPV4_UDP | \
+ I40E_HASH_HDR_GTPU)
+#define I40E_PHINT_IPV4_GTPU_IPV4 (I40E_PHINT_IPV4_GTPU | \
+ I40E_HASH_HDR_IPV4_INNER)
+#define I40E_PHINT_IPV4_GTPU_IPV6 (I40E_PHINT_IPV4_GTPU | \
+ I40E_HASH_HDR_IPV6_INNER)
+#define I40E_PHINT_IPV6_GTPU (I40E_PHINT_IPV6_UDP | \
+ I40E_HASH_HDR_GTPU)
+#define I40E_PHINT_IPV6_GTPU_IPV4 (I40E_PHINT_IPV6_GTPU | \
+ I40E_HASH_HDR_IPV4_INNER)
+#define I40E_PHINT_IPV6_GTPU_IPV6 (I40E_PHINT_IPV6_GTPU | \
+ I40E_HASH_HDR_IPV6_INNER)
+
+/* L2TPV3 */
+#define I40E_PHINT_IPV4_L2TPV3 (I40E_PHINT_IPV4 | I40E_HASH_HDR_L2TPV3)
+#define I40E_PHINT_IPV6_L2TPV3 (I40E_PHINT_IPV6 | I40E_HASH_HDR_L2TPV3)
+
+/* AH */
+#define I40E_PHINT_IPV4_AH (I40E_PHINT_IPV4 | I40E_HASH_HDR_AH)
+#define I40E_PHINT_IPV6_AH (I40E_PHINT_IPV6 | I40E_HASH_HDR_AH)
+
+/* Structure of mapping RSS type to input set */
+struct i40e_hash_map_rss_inset {
+ uint64_t rss_type;
+ uint64_t inset;
+};
+
+const struct i40e_hash_map_rss_inset i40e_hash_rss_inset[] = {
+ /* IPv4 */
+ { ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+ { ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+
+ { ETH_RSS_NONFRAG_IPV4_OTHER,
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
+
+ { ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+
+ { ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+
+ { ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
+
+ /* IPv6 */
+ { ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+ { ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+
+ { ETH_RSS_NONFRAG_IPV6_OTHER,
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
+
+ { ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+
+ { ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+
+ { ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
+
+ /* Port */
+ { ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
+
+ /* Ether */
+ { ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
+ { ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
+
+ /* VLAN */
+ { ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
+ { ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
+};
+
+#define I40E_HASH_VOID_NEXT_ALLOW BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH)
+
+#define I40E_HASH_ETH_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV4) | \
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV6) | \
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_VLAN))
+
+#define I40E_HASH_IP_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_TCP) | \
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_UDP) | \
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_SCTP) | \
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_ESP) | \
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_L2TPV3OIP) |\
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_AH))
+
+#define I40E_HASH_UDP_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_GTPU) | \
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_GTPC))
+
+#define I40E_HASH_GTPU_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV4) | \
+ BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV6))
+
+static const uint64_t pattern_next_allow_items[] = {
+ [RTE_FLOW_ITEM_TYPE_VOID] = I40E_HASH_VOID_NEXT_ALLOW,
+ [RTE_FLOW_ITEM_TYPE_ETH] = I40E_HASH_ETH_NEXT_ALLOW,
+ [RTE_FLOW_ITEM_TYPE_IPV4] = I40E_HASH_IP_NEXT_ALLOW,
+ [RTE_FLOW_ITEM_TYPE_IPV6] = I40E_HASH_IP_NEXT_ALLOW,
+ [RTE_FLOW_ITEM_TYPE_UDP] = I40E_HASH_UDP_NEXT_ALLOW,
+ [RTE_FLOW_ITEM_TYPE_GTPU] = I40E_HASH_GTPU_NEXT_ALLOW,
+};
+
+static const uint64_t pattern_item_header[] = {
+ [RTE_FLOW_ITEM_TYPE_ETH] = I40E_HASH_HDR_ETH,
+ [RTE_FLOW_ITEM_TYPE_IPV4] = I40E_HASH_HDR_IPV4,
+ [RTE_FLOW_ITEM_TYPE_IPV6] = I40E_HASH_HDR_IPV6,
+ [RTE_FLOW_ITEM_TYPE_TCP] = I40E_HASH_HDR_TCP,
+ [RTE_FLOW_ITEM_TYPE_UDP] = I40E_HASH_HDR_UDP,
+ [RTE_FLOW_ITEM_TYPE_SCTP] = I40E_HASH_HDR_SCTP,
+ [RTE_FLOW_ITEM_TYPE_ESP] = I40E_HASH_HDR_ESP,
+ [RTE_FLOW_ITEM_TYPE_GTPC] = I40E_HASH_HDR_GTPC,
+ [RTE_FLOW_ITEM_TYPE_GTPU] = I40E_HASH_HDR_GTPU,
+ [RTE_FLOW_ITEM_TYPE_L2TPV3OIP] = I40E_HASH_HDR_L2TPV3,
+ [RTE_FLOW_ITEM_TYPE_AH] = I40E_HASH_HDR_AH,
+};
+
+/* Structure of matched pattern */
+struct i40e_hash_match_pattern {
+ uint64_t pattern_type;
+ uint64_t rss_mask; /* Supported RSS type for this pattern */
+ bool custom_pctype_flag;/* true for custom packet type */
+ uint8_t pctype;
+};
+
+#define I40E_HASH_MAP_PATTERN(pattern, rss_mask, pctype) { \
+ pattern, rss_mask, false, pctype }
+
+#define I40E_HASH_MAP_CUS_PATTERN(pattern, rss_mask, cus_pctype) { \
+ pattern, rss_mask, true, cus_pctype }
+
+#define I40E_HASH_L2_RSS_MASK (ETH_RSS_ETH | ETH_RSS_L2_SRC_ONLY | \
+ ETH_RSS_L2_DST_ONLY)
+
+#define I40E_HASH_L23_RSS_MASK (I40E_HASH_L2_RSS_MASK | \
+ ETH_RSS_VLAN | \
+ ETH_RSS_L3_SRC_ONLY | \
+ ETH_RSS_L3_DST_ONLY)
+
+#define I40E_HASH_IPV4_L23_RSS_MASK (ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
+#define I40E_HASH_IPV6_L23_RSS_MASK (ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
+
+#define I40E_HASH_L234_RSS_MASK (I40E_HASH_L23_RSS_MASK | \
+ ETH_RSS_PORT | ETH_RSS_L4_SRC_ONLY | \
+ ETH_RSS_L4_DST_ONLY)
+
+#define I40E_HASH_IPV4_L234_RSS_MASK (I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV4)
+#define I40E_HASH_IPV6_L234_RSS_MASK (I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV6)
+
+#define I40E_HASH_L4_TYPES (ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_NONFRAG_IPV4_SCTP | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_NONFRAG_IPV6_SCTP)
+
+/* Current supported patterns and RSS types.
+ * All items that have the same pattern types are together.
+ */
+static const struct i40e_hash_match_pattern match_patterns[] = {
+ /* Ether */
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_ETH,
+ ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
+ I40E_FILTER_PCTYPE_L2_PAYLOAD),
+
+ /* IPv4 */
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
+ ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
+ I40E_FILTER_PCTYPE_FRAG_IPV4),
+
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
+ ETH_RSS_NONFRAG_IPV4_OTHER |
+ I40E_HASH_IPV4_L23_RSS_MASK,
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER),
+
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_TCP,
+ ETH_RSS_NONFRAG_IPV4_TCP |
+ I40E_HASH_IPV4_L234_RSS_MASK,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP),
+
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_UDP,
+ ETH_RSS_NONFRAG_IPV4_UDP |
+ I40E_HASH_IPV4_L234_RSS_MASK,
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP),
+
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_SCTP,
+ ETH_RSS_NONFRAG_IPV4_SCTP |
+ I40E_HASH_IPV4_L234_RSS_MASK,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP),
+
+ /* IPv6 */
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
+ ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
+ I40E_FILTER_PCTYPE_FRAG_IPV6),
+
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
+ ETH_RSS_NONFRAG_IPV6_OTHER |
+ I40E_HASH_IPV6_L23_RSS_MASK,
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER),
+
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_TCP,
+ ETH_RSS_NONFRAG_IPV6_TCP |
+ I40E_HASH_IPV6_L234_RSS_MASK,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP),
+
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_UDP,
+ ETH_RSS_NONFRAG_IPV6_UDP |
+ I40E_HASH_IPV6_L234_RSS_MASK,
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP),
+
+ I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_SCTP,
+ ETH_RSS_NONFRAG_IPV6_SCTP |
+ I40E_HASH_IPV6_L234_RSS_MASK,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP),
+
+ /* ESP */
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_ESP,
+ ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_ESP,
+ ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_UDP_ESP,
+ ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_UDP_ESP,
+ ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
+
+ /* GTPC */
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPC,
+ I40E_HASH_IPV4_L234_RSS_MASK,
+ I40E_CUSTOMIZED_GTPC),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPC,
+ I40E_HASH_IPV6_L234_RSS_MASK,
+ I40E_CUSTOMIZED_GTPC),
+
+ /* GTPU */
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU,
+ I40E_HASH_IPV4_L234_RSS_MASK,
+ I40E_CUSTOMIZED_GTPU),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV4,
+ ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV6,
+ ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU,
+ I40E_HASH_IPV6_L234_RSS_MASK,
+ I40E_CUSTOMIZED_GTPU),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV4,
+ ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV6,
+ ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
+
+ /* L2TPV3 */
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_L2TPV3,
+ ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_L2TPV3,
+ ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
+
+ /* AH */
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, ETH_RSS_AH,
+ I40E_CUSTOMIZED_AH_IPV4),
+ I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, ETH_RSS_AH,
+ I40E_CUSTOMIZED_AH_IPV6),
+};
+
+static int
+i40e_hash_get_pattern_type(const struct rte_flow_item pattern[],
+ uint64_t *pattern_types,
+ struct rte_flow_error *error)
+{
+ const char *message = "Pattern not supported";
+ enum rte_flow_item_type prev_item_type = RTE_FLOW_ITEM_TYPE_VOID;
+ enum rte_flow_item_type last_item_type = prev_item_type;
+ uint64_t item_hdr, pattern_hdrs = 0;
+ bool inner_flag = false;
+ int vlan_count = 0;
+
+ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+ if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+
+ if (pattern->mask || pattern->spec || pattern->last) {
+ message = "Header info should not be specified";
+ goto not_sup;
+ }
+
+ /* Check the previous item allows this sub-item. */
+ if (prev_item_type >= RTE_DIM(pattern_next_allow_items) ||
+ !(pattern_next_allow_items[prev_item_type] &
+ BIT_ULL(pattern->type)))
+ goto not_sup;
+
+ /* For VLAN item, it does no matter about to pattern type
+ * recognition. So just count the number of VLAN and do not
+ * change the value of variable `prev_item_type`.
+ */
+ last_item_type = pattern->type;
+ if (last_item_type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ if (vlan_count >= 2)
+ goto not_sup;
+ vlan_count++;
+ continue;
+ }
+
+ prev_item_type = last_item_type;
+ assert(last_item_type < RTE_DIM(pattern_item_header));
+ item_hdr = pattern_item_header[last_item_type];
+ assert(item_hdr);
+
+ if (inner_flag) {
+ item_hdr <<= I40E_HASH_HDR_INNER_SHIFT;
+
+ /* Inner layer should not have GTPU item */
+ if (last_item_type == RTE_FLOW_ITEM_TYPE_GTPU)
+ goto not_sup;
+ } else {
+ if (last_item_type == RTE_FLOW_ITEM_TYPE_GTPU) {
+ inner_flag = true;
+ vlan_count = 0;
+ }
+ }
+
+ if (item_hdr & pattern_hdrs)
+ goto not_sup;
+
+ pattern_hdrs |= item_hdr;
+ }
+
+ if (pattern_hdrs && last_item_type != RTE_FLOW_ITEM_TYPE_VLAN) {
+ *pattern_types = pattern_hdrs;
+ return 0;
+ }
+
+not_sup:
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, message);
+}
+
+static uint64_t
+i40e_hash_get_x722_ext_pctypes(uint8_t match_pctype)
+{
+ uint64_t pctypes = 0;
+
+ switch (match_pctype) {
+ case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
+ pctypes = BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ break;
+
+ case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
+ pctypes = BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+ break;
+
+ case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
+ pctypes = BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+ break;
+
+ case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
+ pctypes = BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+ break;
+ }
+
+ return pctypes;
+}
+
+static int
+i40e_hash_translate_gtp_inset(struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ if (rss_conf->inset &
+ (I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC |
+ I40E_INSET_DST_PORT | I40E_INSET_SRC_PORT))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "Only support external destination IP");
+
+ if (rss_conf->inset & I40E_INSET_IPV4_DST)
+ rss_conf->inset = (rss_conf->inset & ~I40E_INSET_IPV4_DST) |
+ I40E_INSET_TUNNEL_IPV4_DST;
+
+ if (rss_conf->inset & I40E_INSET_IPV6_DST)
+ rss_conf->inset = (rss_conf->inset & ~I40E_INSET_IPV6_DST) |
+ I40E_INSET_TUNNEL_IPV6_DST;
+
+ return 0;
+}
+
+static int
+i40e_hash_get_pctypes(const struct rte_eth_dev *dev,
+ const struct i40e_hash_match_pattern *match,
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ if (match->custom_pctype_flag) {
+ struct i40e_pf *pf;
+ struct i40e_customized_pctype *custom_type;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ custom_type = i40e_find_customized_pctype(pf, match->pctype);
+ if (!custom_type || !custom_type->valid)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "PCTYPE not supported");
+
+ rss_conf->config_pctypes |= BIT_ULL(custom_type->pctype);
+
+ if (match->pctype == I40E_CUSTOMIZED_GTPU ||
+ match->pctype == I40E_CUSTOMIZED_GTPC)
+ return i40e_hash_translate_gtp_inset(rss_conf, error);
+ } else {
+ struct i40e_hw *hw =
+ I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t types;
+
+ rss_conf->config_pctypes |= BIT_ULL(match->pctype);
+ if (hw->mac.type == I40E_MAC_X722) {
+ types = i40e_hash_get_x722_ext_pctypes(match->pctype);
+ rss_conf->config_pctypes |= types;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_hash_get_pattern_pctypes(const struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action_rss *rss_act,
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ uint64_t pattern_types = 0;
+ bool match_flag = false;
+ int i, ret;
+
+ ret = i40e_hash_get_pattern_type(pattern, &pattern_types, error);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < (int)RTE_DIM(match_patterns); i++) {
+ const struct i40e_hash_match_pattern *match =
+ &match_patterns[i];
+
+ /* Check pattern types match. All items that have the same
+ * pattern types are together, so if the pattern types match
+ * previous item but they doesn't match current item, it means
+ * the pattern types do not match all remain items.
+ */
+ if (pattern_types != match->pattern_type) {
+ if (match_flag)
+ break;
+ continue;
+ }
+ match_flag = true;
+
+ /* Check RSS types match */
+ if (!(rss_act->types & ~match->rss_mask)) {
+ ret = i40e_hash_get_pctypes(dev, match,
+ rss_conf, error);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (rss_conf->config_pctypes)
+ return 0;
+
+ if (match_flag)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "RSS types not supported");
+
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Pattern not supported");
+}
+
+static uint64_t
+i40e_hash_get_inset(uint64_t rss_types)
+{
+ uint64_t mask, inset = 0;
+ int i;
+
+ for (i = 0; i < (int)RTE_DIM(i40e_hash_rss_inset); i++) {
+ if (rss_types & i40e_hash_rss_inset[i].rss_type)
+ inset |= i40e_hash_rss_inset[i].inset;
+ }
+
+ if (!inset)
+ return 0;
+
+ /* If SRC_ONLY and DST_ONLY of the same level are used simultaneously,
+ * it is the same case as none of them are added.
+ */
+ mask = rss_types & (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY);
+ if (mask == ETH_RSS_L2_SRC_ONLY)
+ inset &= ~I40E_INSET_DMAC;
+ else if (mask == ETH_RSS_L2_DST_ONLY)
+ inset &= ~I40E_INSET_SMAC;
+
+ mask = rss_types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
+ if (mask == ETH_RSS_L3_SRC_ONLY)
+ inset &= ~(I40E_INSET_IPV4_DST | I40E_INSET_IPV6_DST);
+ else if (mask == ETH_RSS_L3_DST_ONLY)
+ inset &= ~(I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC);
+
+ mask = rss_types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+ if (mask == ETH_RSS_L4_SRC_ONLY)
+ inset &= ~I40E_INSET_DST_PORT;
+ else if (mask == ETH_RSS_L4_DST_ONLY)
+ inset &= ~I40E_INSET_SRC_PORT;
+
+ if (rss_types & I40E_HASH_L4_TYPES) {
+ mask = ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
+ ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY;
+
+ switch (mask & rss_types) {
+ case ETH_RSS_L3_SRC_ONLY:
+ case ETH_RSS_L3_DST_ONLY:
+ inset &= ~(I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT);
+ break;
+ case ETH_RSS_L4_SRC_ONLY:
+ case ETH_RSS_L4_DST_ONLY:
+ inset &= ~(I40E_INSET_IPV4_DST | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC);
+ break;
+ }
+ }
+
+ return inset;
+}
+
+static int
+i40e_hash_config_func(struct i40e_hw *hw, enum rte_eth_hash_function func)
+{
+ struct i40e_pf *pf;
+ uint32_t reg;
+ uint8_t symmetric = 0;
+
+ reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
+
+ if (func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
+ if (!(reg & I40E_GLQF_CTL_HTOEP_MASK))
+ goto set_symmetric;
+
+ reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
+ } else {
+ if (func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
+ symmetric = 1;
+
+ if (reg & I40E_GLQF_CTL_HTOEP_MASK)
+ goto set_symmetric;
+
+ reg |= I40E_GLQF_CTL_HTOEP_MASK;
+ }
+
+ pf = &((struct i40e_adapter *)hw->back)->pf;
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR,
+ "Modify hash function is not permitted when multi-driver enabled");
+ return -EPERM;
+ }
+
+ PMD_DRV_LOG(INFO, "NIC hash function is setting to %d", func);
+ i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
+ I40E_WRITE_FLUSH(hw);
+
+set_symmetric:
+ i40e_set_symmetric_hash_enable_per_port(hw, symmetric);
+ return 0;
+}
+
+static int
+i40e_hash_config_pctype_symmetric(struct i40e_hw *hw,
+ uint32_t pctype,
+ bool symmetric)
+{
+ struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
+ uint32_t reg;
+
+ /* For X722, get translated pctype in fd pctype register */
+ if (hw->mac.type == I40E_MAC_X722)
+ pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
+
+ reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype));
+ if (symmetric) {
+ if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
+ return 0;
+ reg |= I40E_GLQF_HSYM_SYMH_ENA_MASK;
+ } else {
+ if (!(reg & I40E_GLQF_HSYM_SYMH_ENA_MASK))
+ return 0;
+ reg &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK;
+ }
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR,
+ "Enable/Disable symmetric hash is not permitted when multi-driver enabled");
+ return -EPERM;
+ }
+
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
+ I40E_WRITE_FLUSH(hw);
+ return 0;
+}
+
+static void
+i40e_hash_enable_pctype(struct i40e_hw *hw,
+ uint32_t pctype, bool enable)
+{
+ uint32_t reg, reg_val, mask;
+
+ /* For X722, get translated pctype in fd pctype register */
+ if (hw->mac.type == I40E_MAC_X722)
+ pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
+
+ if (pctype < 32) {
+ mask = BIT(pctype);
+ reg = I40E_PFQF_HENA(0);
+ } else {
+ mask = BIT(pctype - 32);
+ reg = I40E_PFQF_HENA(1);
+ }
+
+ reg_val = i40e_read_rx_ctl(hw, reg);
+
+ if (enable) {
+ if (reg_val & mask)
+ return;
+
+ reg_val |= mask;
+ } else {
+ if (!(reg_val & mask))
+ return;
+
+ reg_val &= ~mask;
+ }
+
+ i40e_write_rx_ctl(hw, reg, reg_val);
+ I40E_WRITE_FLUSH(hw);
+}
+
+static int
+i40e_hash_config_pctype(struct i40e_hw *hw,
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ uint32_t pctype)
+{
+ uint64_t rss_types = rss_conf->conf.types;
+ int ret;
+
+ if (rss_types == 0) {
+ i40e_hash_enable_pctype(hw, pctype, false);
+ return 0;
+ }
+
+ if (rss_conf->inset) {
+ ret = i40e_set_hash_inset(hw, rss_conf->inset, pctype, false);
+ if (ret)
+ return ret;
+ }
+
+ i40e_hash_enable_pctype(hw, pctype, true);
+ return 0;
+}
+
+static int
+i40e_hash_config_region(struct i40e_pf *pf,
+ const struct i40e_rte_flow_rss_conf *rss_conf)
+{
+ struct i40e_hw *hw = &pf->adapter->hw;
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct i40e_queue_region_info *regions = pf->queue_region.region;
+ uint32_t num = pf->queue_region.queue_region_number;
+ uint32_t i, region_id_mask = 0;
+
+ /* Use a 32 bit variable to represent all regions */
+ RTE_BUILD_BUG_ON(I40E_REGION_MAX_INDEX > 31);
+
+ /* Re-configure the region if it existed */
+ for (i = 0; i < num; i++) {
+ if (rss_conf->region_queue_start ==
+ regions[i].queue_start_index &&
+ rss_conf->region_queue_num == regions[i].queue_num) {
+ uint32_t j;
+
+ for (j = 0; j < regions[i].user_priority_num; j++) {
+ if (regions[i].user_priority[j] ==
+ rss_conf->region_priority)
+ return 0;
+ }
+
+ if (j >= I40E_MAX_USER_PRIORITY) {
+ PMD_DRV_LOG(ERR,
+ "Priority number exceed the maximum %d",
+ I40E_MAX_USER_PRIORITY);
+ return -ENOSPC;
+ }
+
+ regions[i].user_priority[j] = rss_conf->region_priority;
+ regions[i].user_priority_num++;
+ return i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+ }
+
+ region_id_mask |= BIT(regions[i].region_id);
+ }
+
+ if (num > I40E_REGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "Queue region resource used up");
+ return -ENOSPC;
+ }
+
+ /* Add a new region */
+
+ pf->queue_region.queue_region_number++;
+ memset(®ions[num], 0, sizeof(regions[0]));
+
+ regions[num].region_id = rte_bsf32(~region_id_mask);
+ regions[num].queue_num = rss_conf->region_queue_num;
+ regions[num].queue_start_index = rss_conf->region_queue_start;
+ regions[num].user_priority[0] = rss_conf->region_priority;
+ regions[num].user_priority_num = 1;
+
+ return i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+}
+
+static int
+i40e_hash_config(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *rss_conf)
+{
+ struct rte_flow_action_rss *rss_info = &rss_conf->conf;
+ struct i40e_hw *hw = &pf->adapter->hw;
+ uint64_t pctypes;
+ int ret;
+
+ if (rss_info->func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
+ ret = i40e_hash_config_func(hw, rss_info->func);
+ if (ret)
+ return ret;
+
+ if (rss_info->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
+ rss_conf->misc_reset_flags |=
+ I40E_HASH_FLOW_RESET_FLAG_FUNC;
+ }
+
+ if (rss_conf->region_queue_num > 0) {
+ ret = i40e_hash_config_region(pf, rss_conf);
+ if (ret)
+ return ret;
+
+ rss_conf->misc_reset_flags |= I40E_HASH_FLOW_RESET_FLAG_REGION;
+ }
+
+ if (rss_info->key_len > 0) {
+ ret = i40e_set_rss_key(pf->main_vsi, rss_conf->key,
+ rss_info->key_len);
+ if (ret)
+ return ret;
+
+ rss_conf->misc_reset_flags |= I40E_HASH_FLOW_RESET_FLAG_KEY;
+ }
+
+ /* Update lookup table */
+ if (rss_info->queue_num > 0) {
+ uint8_t lut[ETH_RSS_RETA_SIZE_512];
+ uint32_t i, j = 0;
+
+ for (i = 0; i < hw->func_caps.rss_table_size; i++) {
+ lut[i] = (uint8_t)rss_info->queue[j];
+ j = (j == rss_info->queue_num - 1) ? 0 : (j + 1);
+ }
+
+ ret = i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i);
+ if (ret)
+ return ret;
+
+ pf->hash_enabled_queues = 0;
+ for (i = 0; i < rss_info->queue_num; i++)
+ pf->hash_enabled_queues |= BIT_ULL(lut[i]);
+
+ pf->adapter->rss_reta_updated = 0;
+ rss_conf->misc_reset_flags |= I40E_HASH_FLOW_RESET_FLAG_QUEUE;
+ }
+
+ /* The codes behind configure the input sets and symmetric hash
+ * function of the packet types and enable hash on them.
+ */
+ pctypes = rss_conf->config_pctypes;
+ if (!pctypes)
+ return 0;
+
+ /* For first flow that will enable hash on any packet type, we clean
+ * the RSS sets that by legacy configuration commands and parameters.
+ */
+ if (!pf->hash_filter_enable) {
+ i40e_pf_disable_rss(pf);
+ pf->hash_filter_enable = true;
+ }
+
+ do {
+ uint32_t idx = rte_bsf64(pctypes);
+ uint64_t bit = BIT_ULL(idx);
+
+ if (rss_conf->symmetric_enable) {
+ ret = i40e_hash_config_pctype_symmetric(hw, idx, true);
+ if (ret)
+ return ret;
+
+ rss_conf->reset_symmetric_pctypes |= bit;
+ }
+
+ ret = i40e_hash_config_pctype(hw, rss_conf, idx);
+ if (ret)
+ return ret;
+
+ rss_conf->reset_config_pctypes |= bit;
+ pctypes &= ~bit;
+ } while (pctypes);
+
+ return 0;
+}
+
+static void
+i40e_hash_parse_key(const struct rte_flow_action_rss *rss_act,
+ struct i40e_rte_flow_rss_conf *rss_conf)
+{
+ const uint8_t *key = rss_act->key;
+
+ if (rss_act->key_len != sizeof(rss_conf->key)) {
+ const uint32_t rss_key_default[] = {0x6b793944,
+ 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+ 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+ 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
+ PMD_DRV_LOG(WARNING, "RSS key invalid, set to default");
+ key = (const uint8_t *)rss_key_default;
+ }
+
+ memcpy(rss_conf->key, key, sizeof(rss_conf->key));
+ rss_conf->conf.key = rss_conf->key;
+ rss_conf->conf.key_len = sizeof(rss_conf->key);
+}
+
+static int
+i40e_hash_parse_queues(const struct rte_eth_dev *dev,
+ const struct rte_flow_action_rss *rss_act,
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ uint16_t i;
+ int max_queue;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!rss_act->queue_num ||
+ rss_act->queue_num > hw->func_caps.rss_table_size)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "Invalid RSS queue number");
+
+ if (rss_act->key_len)
+ PMD_DRV_LOG(WARNING,
+ "RSS key is ignored when queues specified");
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ max_queue = i40e_pf_calc_configured_queues_num(pf);
+ else
+ max_queue = pf->dev_data->nb_rx_queues;
+
+ max_queue = RTE_MIN(max_queue, I40E_MAX_Q_PER_TC);
+
+ for (i = 0; i < rss_act->queue_num; i++) {
+ if ((int)rss_act->queue[i] >= max_queue) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "Invalid RSS queues");
+ return -EINVAL;
+ }
+ }
+
+ memcpy(rss_conf->queue, rss_act->queue,
+ rss_act->queue_num * sizeof(rss_conf->queue[0]));
+ rss_conf->conf.queue = rss_conf->queue;
+ rss_conf->conf.queue_num = rss_act->queue_num;
+ return 0;
+}
+
+static int
+i40e_hash_parse_queue_region(const struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action_rss *rss_act,
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf;
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ uint64_t hash_queues;
+ uint32_t i;
+
+ if (pattern[1].type != RTE_FLOW_ITEM_TYPE_END)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ &pattern[1],
+ "Pattern not supported.");
+
+ vlan_spec = pattern->spec;
+ vlan_mask = pattern->mask;
+ if (!vlan_spec || !vlan_mask ||
+ (rte_be_to_cpu_16(vlan_mask->tci) >> 13) != 7)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Pattern error.");
+
+ if (!rss_act->queue)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "Queues not specified");
+
+ if (rss_act->key_len)
+ PMD_DRV_LOG(WARNING,
+ "RSS key is ignored when configure queue region");
+
+ /* Use a 64 bit variable to represent all queues in a region. */
+ RTE_BUILD_BUG_ON(I40E_MAX_Q_PER_TC > 64);
+
+ if (!rss_act->queue_num ||
+ !rte_is_power_of_2(rss_act->queue_num) ||
+ rss_act->queue_num + rss_act->queue[0] > I40E_MAX_Q_PER_TC)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "Queue number error");
+
+ for (i = 1; i < rss_act->queue_num; i++) {
+ if (rss_act->queue[i - 1] + 1 != rss_act->queue[i]) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "Queues must be incremented continuously");
+ return -EINVAL;
+ }
+ }
+
+ /* Map all queues to bits of uint64_t */
+ hash_queues = (BIT_ULL(rss_act->queue[0] + rss_act->queue_num) - 1) &
+ ~(BIT_ULL(rss_act->queue[0]) - 1);
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ if (hash_queues & ~pf->hash_enabled_queues)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "Some queues are not in LUT");
+
+ rss_conf->region_queue_num = (uint8_t)rss_act->queue_num;
+ rss_conf->region_queue_start = rss_act->queue[0];
+ rss_conf->region_priority = rte_be_to_cpu_16(vlan_spec->tci) >> 13;
+ return 0;
+}
+
+static int
+i40e_hash_parse_global_conf(const struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action_rss *rss_act,
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ if (rss_act->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "Symmetric function should be set with pattern types");
+
+ rss_conf->conf.func = rss_act->func;
+
+ if (rss_act->types)
+ PMD_DRV_LOG(WARNING,
+ "RSS types are ignored when no pattern specified");
+
+ if (pattern[0].type == RTE_FLOW_ITEM_TYPE_VLAN)
+ return i40e_hash_parse_queue_region(dev, pattern,
+ rss_act, rss_conf,
+ error);
+
+ if (rss_act->queue)
+ return i40e_hash_parse_queues(dev, rss_act,
+ rss_conf, error);
+
+ if (rss_act->key && rss_act->key_len) {
+ i40e_hash_parse_key(rss_act, rss_conf);
+ return 0;
+ }
+
+ PMD_DRV_LOG(WARNING, "Nothing change");
+ return 0;
+}
+
+static bool
+i40e_hash_validate_rss_types(uint64_t rss_types)
+{
+ uint64_t type, mask;
+
+ /* Validate L2 */
+ type = ETH_RSS_ETH & rss_types;
+ mask = (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY) & rss_types;
+ if (!type && mask)
+ return false;
+
+ /* Validate L3 */
+ type = (I40E_HASH_L4_TYPES | ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_IPV6 |
+ ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
+ mask = (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY) & rss_types;
+ if (!type && mask)
+ return false;
+
+ /* Validate L4 */
+ type = (I40E_HASH_L4_TYPES | ETH_RSS_PORT) & rss_types;
+ mask = (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY) & rss_types;
+ if (!type && mask)
+ return false;
+
+ return true;
+}
+
+static int
+i40e_hash_parse_pattern_act(const struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action_rss *rss_act,
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ if (rss_act->queue)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "RSS Queues not supported when pattern specified");
+
+ if (rss_act->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
+ rss_conf->symmetric_enable = true;
+ else if (rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set(error, -EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "Only symmetric TOEPLITZ supported when pattern specified");
+
+ if (!i40e_hash_validate_rss_types(rss_act->types))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "RSS types are invalid");
+
+ if (rss_act->key && rss_act->key_len)
+ i40e_hash_parse_key(rss_act, rss_conf);
+
+ rss_conf->conf.func = rss_act->func;
+ rss_conf->conf.types = rss_act->types;
+ rss_conf->inset = i40e_hash_get_inset(rss_act->types);
+
+ return i40e_hash_get_pattern_pctypes(dev, pattern, rss_act,
+ rss_conf, error);
+}
+
+int
+i40e_hash_parse(const struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_rss *rss_act;
+
+ if (actions[1].type != RTE_FLOW_ACTION_TYPE_END)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ &actions[1],
+ "Only support one action for RSS.");
+
+ rss_act = (const struct rte_flow_action_rss *)actions[0].conf;
+ if (rss_act->level)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ actions,
+ "RSS level is not supported");
+
+ while (pattern->type == RTE_FLOW_ITEM_TYPE_VOID)
+ pattern++;
+
+ if (pattern[0].type == RTE_FLOW_ITEM_TYPE_END ||
+ pattern[0].type == RTE_FLOW_ITEM_TYPE_VLAN)
+ return i40e_hash_parse_global_conf(dev, pattern, rss_act,
+ rss_conf, error);
+
+ return i40e_hash_parse_pattern_act(dev, pattern, rss_act,
+ rss_conf, error);
+}
+
+static void
+i40e_invalid_rss_filter(const struct i40e_rte_flow_rss_conf *ref_conf,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ uint32_t reset_flags = conf->misc_reset_flags;
+
+ conf->misc_reset_flags &= ~ref_conf->misc_reset_flags;
+
+ if ((reset_flags & I40E_HASH_FLOW_RESET_FLAG_REGION) &&
+ (ref_conf->misc_reset_flags & I40E_HASH_FLOW_RESET_FLAG_REGION) &&
+ (conf->region_queue_start != ref_conf->region_queue_start ||
+ conf->region_queue_num != ref_conf->region_queue_num))
+ conf->misc_reset_flags |= I40E_HASH_FLOW_RESET_FLAG_REGION;
+
+ conf->reset_config_pctypes &= ~ref_conf->reset_config_pctypes;
+ conf->reset_symmetric_pctypes &= ~ref_conf->reset_symmetric_pctypes;
+}
+
+int
+i40e_hash_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_rss_filter *filter;
+ int ret;
+
+ TAILQ_FOREACH(filter, &pf->rss_config_list, next) {
+ struct i40e_rte_flow_rss_conf *rss_conf =
+ &filter->rss_filter_info;
+ struct i40e_rss_filter *prev;
+
+ rss_conf->misc_reset_flags = 0;
+ rss_conf->reset_config_pctypes = 0;
+ rss_conf->reset_symmetric_pctypes = 0;
+
+ ret = i40e_hash_config(pf, rss_conf);
+ if (ret) {
+ pf->hash_filter_enable = 0;
+ i40e_pf_disable_rss(pf);
+ PMD_DRV_LOG(ERR,
+ "Re-configure RSS failed, RSS has been disabled");
+ return ret;
+ }
+
+ /* Invalid previous RSS filter */
+ TAILQ_FOREACH(prev, &pf->rss_config_list, next) {
+ if (prev == filter)
+ break;
+ i40e_invalid_rss_filter(rss_conf,
+ &prev->rss_filter_info);
+ }
+ }
+
+ return 0;
+}
+
+int
+i40e_hash_filter_create(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *rss_conf)
+{
+ struct i40e_rss_filter *filter, *prev;
+ struct i40e_rte_flow_rss_conf *new_conf;
+ int ret;
+
+ filter = rte_zmalloc("i40e_rss_filter", sizeof(*filter), 0);
+ if (!filter) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory.");
+ return -ENOMEM;
+ }
+
+ new_conf = &filter->rss_filter_info;
+
+ memcpy(new_conf, rss_conf, sizeof(*new_conf));
+ if (new_conf->conf.queue_num)
+ new_conf->conf.queue = new_conf->queue;
+ if (new_conf->conf.key_len)
+ new_conf->conf.key = new_conf->key;
+
+ ret = i40e_hash_config(pf, new_conf);
+ if (ret) {
+ rte_free(filter);
+ if (i40e_pf_config_rss(pf))
+ return ret;
+
+ (void)i40e_hash_filter_restore(pf);
+ return ret;
+ }
+
+ /* Invalid previous RSS filter */
+ TAILQ_FOREACH(prev, &pf->rss_config_list, next)
+ i40e_invalid_rss_filter(new_conf, &prev->rss_filter_info);
+
+ TAILQ_INSERT_TAIL(&pf->rss_config_list, filter, next);
+ return 0;
+}
+
+static int
+i40e_hash_reset_conf(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *rss_conf)
+{
+ struct i40e_hw *hw = &pf->adapter->hw;
+ uint64_t inset;
+ uint32_t idx;
+ int ret;
+
+ if (rss_conf->misc_reset_flags & I40E_HASH_FLOW_RESET_FLAG_FUNC) {
+ ret = i40e_hash_config_func(hw, RTE_ETH_HASH_FUNCTION_TOEPLITZ);
+ if (ret)
+ return ret;
+
+ rss_conf->misc_reset_flags &= ~I40E_HASH_FLOW_RESET_FLAG_FUNC;
+ }
+
+ if (rss_conf->misc_reset_flags & I40E_HASH_FLOW_RESET_FLAG_REGION) {
+ ret = i40e_flush_queue_region_all_conf(pf->adapter->eth_dev,
+ hw, pf, 0);
+ if (ret)
+ return ret;
+
+ rss_conf->misc_reset_flags &= ~I40E_HASH_FLOW_RESET_FLAG_REGION;
+ }
+
+ if (rss_conf->misc_reset_flags & I40E_HASH_FLOW_RESET_FLAG_KEY) {
+ ret = i40e_pf_reset_rss_key(pf);
+ if (ret)
+ return ret;
+
+ rss_conf->misc_reset_flags &= ~I40E_HASH_FLOW_RESET_FLAG_KEY;
+ }
+
+ if (rss_conf->misc_reset_flags & I40E_HASH_FLOW_RESET_FLAG_QUEUE) {
+ if (!pf->adapter->rss_reta_updated) {
+ ret = i40e_pf_reset_rss_reta(pf);
+ if (ret)
+ return ret;
+ }
+
+ pf->hash_enabled_queues = 0;
+ rss_conf->misc_reset_flags &= ~I40E_HASH_FLOW_RESET_FLAG_QUEUE;
+ }
+
+ while (rss_conf->reset_config_pctypes) {
+ idx = rte_bsf64(rss_conf->reset_config_pctypes);
+
+ i40e_hash_enable_pctype(hw, idx, false);
+ inset = i40e_get_default_input_set(idx);
+ if (inset) {
+ ret = i40e_set_hash_inset(hw, inset, idx, false);
+ if (ret)
+ return ret;
+ }
+
+ rss_conf->reset_config_pctypes &= ~BIT_ULL(idx);
+ }
+
+ while (rss_conf->reset_symmetric_pctypes) {
+ idx = rte_bsf64(rss_conf->reset_symmetric_pctypes);
+
+ ret = i40e_hash_config_pctype_symmetric(hw, idx, false);
+ if (ret)
+ return ret;
+
+ rss_conf->reset_symmetric_pctypes &= ~BIT_ULL(idx);
+ }
+
+ return 0;
+}
+
+int
+i40e_hash_filter_destroy(struct i40e_pf *pf,
+ const struct i40e_rss_filter *rss_filter)
+{
+ struct i40e_rss_filter *filter;
+ int ret;
+
+ TAILQ_FOREACH(filter, &pf->rss_config_list, next) {
+ if (rss_filter == filter) {
+ ret = i40e_hash_reset_conf(pf,
+ &filter->rss_filter_info);
+ if (ret)
+ return ret;
+
+ TAILQ_REMOVE(&pf->rss_config_list, filter, next);
+ rte_free(filter);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int
+i40e_hash_filter_flush(struct i40e_pf *pf)
+{
+ struct rte_flow *flow, *next;
+
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, next) {
+ if (flow->filter_type != RTE_ETH_FILTER_HASH)
+ continue;
+
+ if (flow->rule) {
+ struct i40e_rss_filter *filter = flow->rule;
+ int ret;
+
+ ret = i40e_hash_reset_conf(pf,
+ &filter->rss_filter_info);
+ if (ret)
+ return ret;
+
+ TAILQ_REMOVE(&pf->rss_config_list, filter, next);
+ rte_free(filter);
+ }
+
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+
+ assert(!pf->rss_config_list.tqh_first);
+ return 0;
+}
diff --git a/drivers/net/i40e/i40e_hash.h b/drivers/net/i40e/i40e_hash.h
new file mode 100644
index 0000000..ff8c91c
--- /dev/null
+++ b/drivers/net/i40e/i40e_hash.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _I40E_HASH_H_
+#define _I40E_HASH_H_
+
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include "i40e_ethdev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int i40e_hash_parse(const struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct i40e_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error);
+
+int i40e_hash_filter_create(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *rss_conf);
+
+int i40e_hash_filter_restore(struct i40e_pf *pf);
+int i40e_hash_filter_destroy(struct i40e_pf *pf,
+ const struct i40e_rss_filter *rss_filter);
+int i40e_hash_filter_flush(struct i40e_pf *pf);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* I40E_HASH_H_ */
diff --git a/drivers/net/i40e/meson.build b/drivers/net/i40e/meson.build
index bb0c542..882168c 100644
--- a/drivers/net/i40e/meson.build
+++ b/drivers/net/i40e/meson.build
@@ -17,6 +17,7 @@ sources = files(
'i40e_fdir.c',
'i40e_flow.c',
'i40e_tm.c',
+ 'i40e_hash.c',
'i40e_vf_representor.c',
'rte_pmd_i40e.c'
)
--
1.8.3.1
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2020-11-12 2:59 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-23 6:56 [dpdk-dev] [RFC] net/i40e: refactor of hash flow Zhang,Alvin
2020-10-23 8:42 ` Wang, ShougangX
2020-10-26 2:05 ` Zhang, AlvinX
2020-11-12 2:59 Zhang,Alvin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).