From: Wenzhuo Lu <wenzhuo.lu@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v4 6/7] ixgbe: implementation for fdir new modes' config
Date: Fri, 23 Oct 2015 10:18:09 +0800 [thread overview]
Message-ID: <1445566690-15767-7-git-send-email-wenzhuo.lu@intel.com> (raw)
In-Reply-To: <1445566690-15767-1-git-send-email-wenzhuo.lu@intel.com>
Implement the new CLIs for fdir mac vlan and tunnel modes, including
flow_director_filter and flow_director_mask. Set the mask of fdir.
Add, delete or update the entities of filter.
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
drivers/net/ixgbe/ixgbe_ethdev.h | 3 +
drivers/net/ixgbe/ixgbe_fdir.c | 261 ++++++++++++++++++++++++++++++++++-----
2 files changed, 234 insertions(+), 30 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index c3d4f4f..1e971b9 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -133,6 +133,9 @@ struct ixgbe_hw_fdir_mask {
uint16_t src_port_mask;
uint16_t dst_port_mask;
uint16_t flex_bytes_mask;
+ uint8_t mac_addr_byte_mask;
+ uint32_t tunnel_id_mask;
+ uint8_t tunnel_type_mask;
};
struct ixgbe_hw_fdir_info {
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 5c8b833..c8352f4 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -105,15 +105,23 @@
rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
} while (0)
+#define DEFAULT_VXLAN_PORT 4789
+#define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4
+
static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
+static int fdir_set_input_mask(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask);
static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask);
+static int fdir_set_input_mask_x550(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask);
static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
static int ixgbe_fdir_filter_to_atr_input(
const struct rte_eth_fdir_filter *fdir_filter,
- union ixgbe_atr_input *input);
+ union ixgbe_atr_input *input,
+ enum rte_fdir_mode mode);
static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
uint32_t key);
static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
@@ -122,7 +130,8 @@ static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
enum rte_fdir_pballoc_type pballoc);
static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input, uint8_t queue,
- uint32_t fdircmd, uint32_t fdirhash);
+ uint32_t fdircmd, uint32_t fdirhash,
+ enum rte_fdir_mode mode);
static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
uint32_t fdirhash);
@@ -243,9 +252,16 @@ configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
*fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
IXGBE_FDIRCTRL_FLEX_SHIFT;
- if (conf->mode == RTE_FDIR_MODE_PERFECT) {
+ if (conf->mode >= RTE_FDIR_MODE_PERFECT
+ && conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
*fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
*fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
+ if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
+ *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
+ << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
+ else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+ *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
+ << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
}
return 0;
@@ -274,7 +290,7 @@ reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
}
/*
- * This is based on ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
+ * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
* but makes use of the rte_fdir_masks structure to see which bits to set.
*/
static int
@@ -342,7 +358,6 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
/*
- * IPv6 mask is only meaningful in signature mode
* Store source and destination IPv6 masks (bit reversed)
*/
IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
@@ -358,6 +373,122 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
}
/*
+ * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
+ * but makes use of the rte_fdir_masks structure to see which bits to set.
+ */
+static int
+fdir_set_input_mask_x550(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ /*
+ * mask VM pool and DIPv6 since there are currently not supported
+ * mask FLEX byte, it will be set in flex_conf
+ */
+ uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
+ uint32_t fdiripv6m;
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+ uint16_t mac_mask;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* set the default UDP port for VxLAN */
+ if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT);
+
+ /* some bits must be set for mac vlan or tunnel mode */
+ fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
+
+ if (input_mask->vlan_tci_mask == 0x0FFF)
+ /* mask VLAN Priority */
+ fdirm |= IXGBE_FDIRM_VLANP;
+ else if (input_mask->vlan_tci_mask == 0xE000)
+ /* mask VLAN ID */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ else if (input_mask->vlan_tci_mask == 0)
+ /* mask VLAN ID and Priority */
+ fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
+ else if (input_mask->vlan_tci_mask != 0xEFFF) {
+ PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
+ return -EINVAL;
+ }
+ info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+
+ fdiripv6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
+ fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
+ if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
+ fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
+ IXGBE_FDIRIP6M_TNI_VNI;
+
+ mac_mask = input_mask->mac_addr_byte_mask;
+ fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
+ & IXGBE_FDIRIP6M_INNER_MAC;
+ info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
+
+ if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ switch (input_mask->tunnel_type_mask) {
+ case 0:
+ /* Mask turnnel type */
+ fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
+ break;
+ case 1:
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
+ return -EINVAL;
+ }
+ info->mask.tunnel_type_mask =
+ input_mask->tunnel_type_mask;
+
+ switch (input_mask->tunnel_id_mask & 0xFFFFFFFF) {
+ case 0x0:
+ /* Mask vxlan id */
+ fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
+ break;
+ case 0x00FFFFFF:
+ fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
+ break;
+ case 0xFFFFFFFF:
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
+ return -EINVAL;
+ }
+ info->mask.tunnel_id_mask =
+ input_mask->tunnel_id_mask;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
+
+ return IXGBE_SUCCESS;
+}
+
+static int
+fdir_set_input_mask(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (mode >= RTE_FDIR_MODE_SIGNATURE
+ && mode <= RTE_FDIR_MODE_PERFECT)
+ return fdir_set_input_mask_82599(dev, input_mask);
+ else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN
+ && mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ return fdir_set_input_mask_x550(dev, input_mask);
+
+ PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+ return -ENOTSUP;
+}
+/*
* ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
* arguments are valid
*/
@@ -431,6 +562,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
int err;
uint32_t fdirctrl, pbsize;
int i;
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
PMD_INIT_FUNC_TRACE();
@@ -440,6 +572,13 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
hw->mac.type != ixgbe_mac_X550EM_x)
return -ENOSYS;
+ /* x550 supports mac-vlan and tunnel mode but other NICs not */
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ mode != RTE_FDIR_MODE_SIGNATURE &&
+ mode != RTE_FDIR_MODE_PERFECT)
+ return -ENOSYS;
+
err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
if (err)
return err;
@@ -462,7 +601,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
for (i = 1; i < 8; i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
- err = fdir_set_input_mask_82599(dev, &dev->data->dev_conf.fdir_conf.mask);
+ err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask);
if (err < 0) {
PMD_INIT_LOG(ERR, " Error on setting FD mask");
return err;
@@ -488,7 +627,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
*/
static int
ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
- union ixgbe_atr_input *input)
+ union ixgbe_atr_input *input, enum rte_fdir_mode mode)
{
input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
input->formatted.flex_bytes = (uint16_t)(
@@ -521,8 +660,7 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
break;
default:
- PMD_DRV_LOG(ERR, " Error on flow_type input");
- return -EINVAL;
+ break;
}
switch (fdir_filter->input.flow_type) {
@@ -558,8 +696,23 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
sizeof(input->formatted.dst_ip));
break;
default:
- PMD_DRV_LOG(ERR, " Error on flow_type input");
- return -EINVAL;
+ break;
+ }
+
+ if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ rte_memcpy(
+ input->formatted.inner_mac,
+ fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes,
+ sizeof(input->formatted.inner_mac));
+ } else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ rte_memcpy(
+ input->formatted.inner_mac,
+ fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
+ sizeof(input->formatted.inner_mac));
+ input->formatted.tunnel_type =
+ fdir_filter->input.flow.tunnel_flow.tunnel_type;
+ input->formatted.tni_vni =
+ fdir_filter->input.flow.tunnel_flow.tunnel_id;
}
return 0;
@@ -743,20 +896,52 @@ atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
static int
fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input, uint8_t queue,
- uint32_t fdircmd, uint32_t fdirhash)
+ uint32_t fdircmd, uint32_t fdirhash,
+ enum rte_fdir_mode mode)
{
uint32_t fdirport, fdirvlan;
+ u32 addr_low, addr_high;
+ u32 tunnel_type = 0;
int err = 0;
- /* record the IPv4 address (big-endian) */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
-
- /* record source and destination port (little-endian)*/
- fdirport = IXGBE_NTOHS(input->formatted.dst_port);
- fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
- fdirport |= IXGBE_NTOHS(input->formatted.src_port);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+ if (mode == RTE_FDIR_MODE_PERFECT) {
+ /* record the IPv4 address (big-endian) */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA,
+ input->formatted.src_ip[0]);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA,
+ input->formatted.dst_ip[0]);
+
+ /* record source and destination port (little-endian)*/
+ fdirport = IXGBE_NTOHS(input->formatted.dst_port);
+ fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+ fdirport |= IXGBE_NTOHS(input->formatted.src_port);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+ } else if(mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN
+ && mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ /* for mac vlan and tunnel modes */
+ addr_low = ((u32)input->formatted.inner_mac[0] |
+ ((u32)input->formatted.inner_mac[1] << 8) |
+ ((u32)input->formatted.inner_mac[2] << 16) |
+ ((u32)input->formatted.inner_mac[3] << 24));
+ addr_high = ((u32)input->formatted.inner_mac[4] |
+ ((u32)input->formatted.inner_mac[5] << 8));
+
+ if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
+ } else {
+ /* tunnel mode */
+ if (input->formatted.tunnel_type !=
+ RTE_FDIR_TUNNEL_TYPE_NVGRE)
+ tunnel_type = 0x80000000;
+ tunnel_type |= addr_high;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
+ input->formatted.tni_vni);
+ }
+ }
/* record vlan (little-endian) and flex_bytes(big-endian) */
fdirvlan = input->formatted.flex_bytes;
@@ -894,8 +1079,9 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
int err;
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
- if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE)
+ if (fdir_mode == RTE_FDIR_MODE_NONE)
return -ENOTSUP;
/*
@@ -917,12 +1103,14 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
return -ENOTSUP;
}
- if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
+ if (fdir_mode >= RTE_FDIR_MODE_PERFECT
+ && fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
is_perfect = TRUE;
memset(&input, 0, sizeof(input));
- err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input);
+ err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
+ fdir_mode);
if (err)
return err;
@@ -966,7 +1154,8 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
if (is_perfect) {
err = fdir_write_perfect_filter_82599(hw, &input, queue,
- fdircmd_flags, fdirhash);
+ fdircmd_flags, fdirhash,
+ fdir_mode);
} else {
err = fdir_add_signature_filter_82599(hw, &input, queue,
fdircmd_flags, fdirhash);
@@ -1018,7 +1207,8 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info
fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
(fdirctrl & FDIRCTRL_PBALLOC_MASK)));
- if (fdir_info->mode == RTE_FDIR_MODE_PERFECT)
+ if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT
+ && fdir_info->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
fdir_info->guarant_spc = max_num;
else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
fdir_info->guarant_spc = max_num * 4;
@@ -1032,11 +1222,20 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info
fdir_info->mask.ipv6_mask.dst_ip);
fdir_info->mask.src_port_mask = info->mask.src_port_mask;
fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
+ fdir_info->mask.mac_addr_byte_mask = info->mask.mac_addr_byte_mask;
+ fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
+ fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
- fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
+
+ if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN
+ || fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+ fdir_info->flow_types_mask[0] = 0;
+ else
+ fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
+
fdir_info->flex_payload_unit = sizeof(uint16_t);
fdir_info->max_flex_payload_segment_num = 1;
- fdir_info->flex_payload_limit = 62;
+ fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
fdir_info->flex_conf.nb_payloads = 1;
fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
@@ -1056,6 +1255,7 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_st
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
uint32_t reg, max_num;
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
/* Get the information from registers */
reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
@@ -1095,9 +1295,10 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_st
reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
(reg & FDIRCTRL_PBALLOC_MASK)));
- if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
+ if (fdir_mode >= RTE_FDIR_MODE_PERFECT
+ && fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
fdir_stats->guarant_cnt = max_num - fdir_stats->free;
- else if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE)
+ else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
}
--
1.9.3
next prev parent reply other threads:[~2015-10-23 2:18 UTC|newest]
Thread overview: 58+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-09-25 6:05 [dpdk-dev] [PATCH 0/6] Support new flow director modes on Intel x550 NIC Wenzhuo Lu
2015-09-25 6:05 ` [dpdk-dev] [PATCH 1/6] lib/librte_ether: modify the structures for fdir new modes Wenzhuo Lu
2015-09-25 7:00 ` Thomas Monjalon
2015-09-25 8:14 ` Lu, Wenzhuo
2015-09-25 8:29 ` Thomas Monjalon
2015-09-28 1:00 ` Lu, Wenzhuo
2015-09-25 6:05 ` [dpdk-dev] [PATCH 2/6] app/testpmd: initialize the new fields for fdir mask Wenzhuo Lu
2015-09-25 6:05 ` [dpdk-dev] [PATCH 3/6] app/testpmd: new fdir modes for testpmd parameter Wenzhuo Lu
2015-09-25 6:05 ` [dpdk-dev] [PATCH 4/6] app/testpmd: modify the output of CLI, show port fdir Wenzhuo Lu
2015-09-25 6:05 ` [dpdk-dev] [PATCH 5/6] app/testpmd: modify and add fdir filter and mask CLIs for new modes Wenzhuo Lu
2015-09-25 6:05 ` [dpdk-dev] [PATCH 6/6] ixgbe: implementation for fdir new modes' config Wenzhuo Lu
2015-09-29 5:31 ` [dpdk-dev] [PATCH v2 0/6] Support new flow director modes on Intel x550 NIC Wenzhuo Lu
2015-09-29 5:31 ` [dpdk-dev] [PATCH v2 1/6] lib/librte_ether: modify the structures for fdir new modes Wenzhuo Lu
2015-09-29 5:31 ` [dpdk-dev] [PATCH v2 2/6] app/testpmd: initialize the new fields for fdir mask Wenzhuo Lu
2015-09-29 5:31 ` [dpdk-dev] [PATCH v2 3/6] app/testpmd: new fdir modes for testpmd parameter Wenzhuo Lu
2015-09-29 5:31 ` [dpdk-dev] [PATCH v2 4/6] app/testpmd: modify the output of the CLI show port fdir Wenzhuo Lu
2015-09-29 5:31 ` [dpdk-dev] [PATCH v2 5/6] app/testpmd: modify and add fdir filter and mask CLIs for new modes Wenzhuo Lu
2015-09-29 5:31 ` [dpdk-dev] [PATCH v2 6/6] ixgbe: implementation for fdir new modes' config Wenzhuo Lu
2015-10-20 13:55 ` Ananyev, Konstantin
2015-10-21 1:48 ` Lu, Wenzhuo
2015-10-21 10:19 ` Ananyev, Konstantin
2015-10-22 1:23 ` Lu, Wenzhuo
2015-10-22 7:11 ` [dpdk-dev] [PATCH v3 0/7] Support new flow director modes on Intel x550 NIC Wenzhuo Lu
2015-10-22 7:11 ` [dpdk-dev] [PATCH v3 1/7] lib/librte_ether: modify the structures for fdir new modes Wenzhuo Lu
2015-10-22 12:57 ` Bruce Richardson
2015-10-23 1:22 ` Lu, Wenzhuo
2015-10-23 7:29 ` Thomas Monjalon
2015-10-23 8:08 ` Lu, Wenzhuo
2015-10-23 9:58 ` Bruce Richardson
2015-10-23 13:06 ` Lu, Wenzhuo
2015-10-22 7:11 ` [dpdk-dev] [PATCH v3 2/7] app/testpmd: initialize the new fields for fdir mask Wenzhuo Lu
2015-10-22 7:11 ` [dpdk-dev] [PATCH v3 3/7] app/testpmd: new fdir modes for testpmd parameter Wenzhuo Lu
2015-10-22 7:11 ` [dpdk-dev] [PATCH v3 4/7] app/testpmd: modify the output of the CLI show port fdir Wenzhuo Lu
2015-10-22 7:11 ` [dpdk-dev] [PATCH v3 5/7] app/testpmd: modify and add fdir filter and mask CLIs for new modes Wenzhuo Lu
2015-10-22 7:11 ` [dpdk-dev] [PATCH v3 6/7] ixgbe: implementation for fdir new modes' config Wenzhuo Lu
2015-10-22 7:11 ` [dpdk-dev] [PATCH v3 7/7] doc: release notes update for flow director enhancement Wenzhuo Lu
2015-10-22 8:36 ` [dpdk-dev] [PATCH v3 0/7] Support new flow director modes on Intel x550 NIC Ananyev, Konstantin
2015-10-23 2:18 ` [dpdk-dev] [PATCH v4 " Wenzhuo Lu
2015-10-23 2:18 ` [dpdk-dev] [PATCH v4 1/7] lib/librte_ether: modify the structures for fdir new modes Wenzhuo Lu
2015-10-23 10:39 ` Chilikin, Andrey
2015-10-23 10:53 ` Ananyev, Konstantin
2015-10-23 12:55 ` Lu, Wenzhuo
2015-10-23 2:18 ` [dpdk-dev] [PATCH v4 2/7] app/testpmd: initialize the new fields for fdir mask Wenzhuo Lu
2015-10-23 2:18 ` [dpdk-dev] [PATCH v4 3/7] app/testpmd: new fdir modes for testpmd parameter Wenzhuo Lu
2015-10-23 2:18 ` [dpdk-dev] [PATCH v4 4/7] app/testpmd: modify the output of the CLI show port fdir Wenzhuo Lu
2015-10-23 2:18 ` [dpdk-dev] [PATCH v4 5/7] app/testpmd: modify and add fdir filter and mask CLIs for new modes Wenzhuo Lu
2015-10-23 2:18 ` Wenzhuo Lu [this message]
2015-10-23 2:18 ` [dpdk-dev] [PATCH v4 7/7] doc: release notes update for flow director enhancement Wenzhuo Lu
2015-10-26 5:27 ` [dpdk-dev] [PATCH v5 0/7] Support new flow director modes on Intel x550 NIC Wenzhuo Lu
2015-10-26 5:27 ` [dpdk-dev] [PATCH v5 1/7] lib/librte_ether: modify the structures for fdir new modes Wenzhuo Lu
2015-10-26 5:27 ` [dpdk-dev] [PATCH v5 2/7] app/testpmd: initialize the new fields for fdir mask Wenzhuo Lu
2015-10-26 5:27 ` [dpdk-dev] [PATCH v5 3/7] app/testpmd: new fdir modes for testpmd parameter Wenzhuo Lu
2015-10-26 5:27 ` [dpdk-dev] [PATCH v5 4/7] app/testpmd: modify the output of the CLI show port fdir Wenzhuo Lu
2015-10-26 5:27 ` [dpdk-dev] [PATCH v5 5/7] app/testpmd: modify and add fdir filter and mask CLIs for new modes Wenzhuo Lu
2015-10-26 5:27 ` [dpdk-dev] [PATCH v5 6/7] ixgbe: implementation for fdir new modes' config Wenzhuo Lu
2015-10-26 5:27 ` [dpdk-dev] [PATCH v5 7/7] doc: release notes update for flow director enhancement Wenzhuo Lu
2015-10-27 11:24 ` [dpdk-dev] [PATCH v5 0/7] Support new flow director modes on Intel x550 NIC Ananyev, Konstantin
2015-10-28 23:08 ` Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1445566690-15767-7-git-send-email-wenzhuo.lu@intel.com \
--to=wenzhuo.lu@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).