From: "Ming, LiX" <lix.ming@intel.com>
To: "Wu, Jingjing" <jingjing.wu@intel.com>, "dev@dpdk.org" <dev@dpdk.org>
Subject: Re: [dpdk-dev] [PATCH v2 2/4]e1000: add igb NIC filters of generic filter feature
Date: Tue, 27 May 2014 07:59:30 +0000 [thread overview]
Message-ID: <0976FC66838DDE4585886952506C9436FBA62F@SHSMSX104.ccr.corp.intel.com> (raw)
In-Reply-To: <1400895442-32433-3-git-send-email-jingjing.wu@intel.com>
This patch adds following igb NIC filters implement:
syn filter, ethertype filter, 2tuple filter, flex filter for intel NIC 82580 and i350
syn filter, ethertype filter, 5tuple filter for intel NIC 82576
Signed-off-by: jingjing.wu <jingjing.wu@intel.com>
---
lib/librte_pmd_e1000/e1000_ethdev.h | 53 +++
lib/librte_pmd_e1000/igb_ethdev.c | 745 ++++++++++++++++++++++++++++++++++++
2 files changed, 798 insertions(+)
Test-by: lmingX lix,ming@intel.com
Compile pass
>>Compile OS: FC20 x86_64
>>Kernel version: 3.11.10-301
>>GCC version: 4.8.2
>>Server: Crownpass
diff --git a/lib/librte_pmd_e1000/e1000_ethdev.h b/lib/librte_pmd_e1000/e1000_ethdev.h
index d09064e..239e849 100644
--- a/lib/librte_pmd_e1000/e1000_ethdev.h
+++ b/lib/librte_pmd_e1000/e1000_ethdev.h
@@ -52,6 +52,59 @@
#define E1000_CTRL_EXT_EXTEND_VLAN (1<<26) /* EXTENDED VLAN */
#define IGB_VFTA_SIZE 128
+#define IGB_MAX_RX_QUEUE_NUM 8
+#define IGB_MAX_RX_QUEUE_NUM_82576 16
+
+#define E1000_SYN_FILTER_ENABLE 0x00000001 /** syn filter enable field*/
+#define E1000_SYN_FILTER_QUEUE 0x0000000E /** syn filter queue field*/
+#define E1000_SYN_FILTER_QUEUE_SHIFT 1 /** syn filter queue field*/
+#define E1000_RFCTL_SYNQFP 0x00080000 /** SYNQFP in RFCTL register*/
+
+#define E1000_ETQF_ETHERTYPE 0x0000FFFF
+#define E1000_ETQF_QUEUE 0x00070000
+#define E1000_ETQF_QUEUE_SHIFT 16
+#define E1000_MAX_ETQF_FILTERS 8
+
+#define E1000_IMIR_DSTPORT 0x0000FFFF
+#define E1000_IMIR_PRIORITY 0xE0000000
+#define E1000_IMIR_EXT_SIZE_BP 0x00001000
+#define E1000_IMIR_EXT_CTRL_UGR 0x00002000
+#define E1000_IMIR_EXT_CTRL_ACK 0x00004000
+#define E1000_IMIR_EXT_CTRL_PSH 0x00008000
+#define E1000_IMIR_EXT_CTRL_RST 0x00010000
+#define E1000_IMIR_EXT_CTRL_SYN 0x00020000
+#define E1000_IMIR_EXT_CTRL_FIN 0x00040000
+#define E1000_IMIR_EXT_CTRL_BP 0x00080000
+#define E1000_MAX_TTQF_FILTERS 8
+#define E1000_2TUPLE_MAX_PRI 7
+
+#define E1000_MAX_FLEXIBLE_FILTERS 8
+#define E1000_MAX_FHFT 4
+#define E1000_MAX_FHFT_EXT 4
+#define E1000_MAX_FLEX_FILTER_PRI 7
+#define E1000_MAX_FLEX_FILTER_LEN 128
+#define E1000_FHFT_QUEUEING_LEN 0x0000007F
+#define E1000_FHFT_QUEUEING_QUEUE 0x00000700
+#define E1000_FHFT_QUEUEING_PRIO 0x00070000
+#define E1000_FHFT_QUEUEING_OFFSET 0xFC
+#define E1000_FHFT_QUEUEING_QUEUE_SHIFT 8
+#define E1000_FHFT_QUEUEING_PRIO_SHIFT 16
+#define E1000_WUFC_FLEX_HQ 0x00004000
+
+#define E1000_SPQF_SRCPORT 0x0000FFFF /** src port field */
+
+#define E1000_MAX_FTQF_FILTERS 8
+#define E1000_FTQF_PROTOCOL_MASK 0x000000FF
+#define E1000_FTQF_5TUPLE_MASK_SHIFT 28
+#define E1000_FTQF_PROTOCOL_COMP_MASK 0x10000000
+#define E1000_FTQF_SOURCE_ADDR_MASK 0x20000000
+#define E1000_FTQF_DEST_ADDR_MASK 0x40000000
+#define E1000_FTQF_SOURCE_PORT_MASK 0x80000000
+#define E1000_FTQF_VF_MASK_EN 0x00008000
+#define E1000_FTQF_QUEUE_MASK 0x03ff0000
+#define E1000_FTQF_QUEUE_SHIFT 16
+#define E1000_FTQF_QUEUE_ENABLE 0x00000100
+
/* structure for interrupt relative data */
struct e1000_interrupt {
uint32_t flags;
diff --git a/lib/librte_pmd_e1000/igb_ethdev.c b/lib/librte_pmd_e1000/igb_ethdev.c
index 044eac3..1e63a8f 100644
--- a/lib/librte_pmd_e1000/igb_ethdev.c
+++ b/lib/librte_pmd_e1000/igb_ethdev.c
@@ -124,6 +124,43 @@ static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta *reta_conf);
+static int eth_igb_set_syn_filter(struct rte_eth_dev *dev,
+ struct rte_syn_filter *filter, uint8_t rx_queue);
+static int eth_igb_get_syn_filter(struct rte_eth_dev *dev,
+ struct rte_syn_filter *filter, uint8_t *rx_queue);
+static int eth_igb_add_ethertype_filter(struct rte_eth_dev *dev,
+ uint16_t index,
+ struct rte_ethertype_filter *filter, uint8_t rx_queue);
+static int eth_igb_remove_ethertype_filter(struct rte_eth_dev *dev,
+ uint16_t index);
+static int eth_igb_get_ethertype_filter(struct rte_eth_dev *dev,
+ uint16_t index,
+ struct rte_ethertype_filter *filter, uint8_t *rx_queue);
+static int eth_igb_add_2tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index,
+ struct rte_2tuple_filter *filter, uint8_t rx_queue);
+static int eth_igb_remove_2tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index);
+static int eth_igb_get_2tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index,
+ struct rte_2tuple_filter *filter, uint8_t *rx_queue);
+static int eth_igb_add_flex_filter(struct rte_eth_dev *dev,
+ uint16_t index,
+ struct rte_flex_filter *filter, uint8_t rx_queue);
+static int eth_igb_remove_flex_filter(struct rte_eth_dev *dev,
+ uint16_t index);
+static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
+ uint16_t index,
+ struct rte_flex_filter *filter, uint8_t *rx_queue);
+static int eth_igb_add_5tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index,
+ struct rte_5tuple_filter *filter, uint8_t rx_queue);
+static int eth_igb_remove_5tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index);
+static int eth_igb_get_5tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index,
+ struct rte_5tuple_filter *filter, uint8_t *rx_queue);
+
/*
* Define VF Stats MACRO for Non "cleared on read" register
*/
@@ -194,6 +231,20 @@ static struct eth_dev_ops eth_igb_ops = {
.mac_addr_remove = eth_igb_rar_clear,
.reta_update = eth_igb_rss_reta_update,
.reta_query = eth_igb_rss_reta_query,
+ .set_syn_filter = eth_igb_set_syn_filter,
+ .get_syn_filter = eth_igb_get_syn_filter,
+ .add_ethertype_filter = eth_igb_add_ethertype_filter,
+ .remove_ethertype_filter = eth_igb_remove_ethertype_filter,
+ .get_ethertype_filter = eth_igb_get_ethertype_filter,
+ .add_2tuple_filter = eth_igb_add_2tuple_filter,
+ .remove_2tuple_filter = eth_igb_remove_2tuple_filter,
+ .get_2tuple_filter = eth_igb_get_2tuple_filter,
+ .add_flex_filter = eth_igb_add_flex_filter,
+ .remove_flex_filter = eth_igb_remove_flex_filter,
+ .get_flex_filter = eth_igb_get_flex_filter,
+ .add_5tuple_filter = eth_igb_add_5tuple_filter,
+ .remove_5tuple_filter = eth_igb_remove_5tuple_filter,
+ .get_5tuple_filter = eth_igb_get_5tuple_filter,
};
/*
@@ -2184,6 +2235,700 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
return 0;
}
+#define MAC_TYPE_FILTER_SUP(type) do {\
+ if ((type) != e1000_82580 && (type) != e1000_i350 &&\
+ (type) != e1000_82576)\
+ return -ENOSYS;\
+} while (0)
+
+/*
+ *set the syn filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * syn: 1 means enable, 0 means disable.
+ * rx_queue: the queue id the filter assigned to
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_set_syn_filter(struct rte_eth_dev *dev,
+ struct rte_syn_filter *filter, uint8_t rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t synqf, rfctl;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (rx_queue >= IGB_MAX_RX_QUEUE_NUM)
+ return (-EINVAL);
+
+ synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
+ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+
+ if (filter->enable) {
+ if (synqf & E1000_SYN_FILTER_ENABLE)
+ return (-EINVAL);
+ synqf = (uint32_t)(((rx_queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
+ E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
+ } else
+ synqf = 0;
+
+ if (filter->hig_pri)
+ rfctl |= E1000_RFCTL_SYNQFP;
+ else
+ rfctl &= ~E1000_RFCTL_SYNQFP;
+
+ E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
+ return 0;
+}
+
+/*
+ *get the syn filter's info
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * *syn: pointer to syn value (1 means enable, 0 means disable).
+ * *rx_queue: pointer to the queue id the filter assigned to
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_get_syn_filter(struct rte_eth_dev *dev,
+ struct rte_syn_filter *filter, uint8_t *rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t synqf, rfctl;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+ synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
+ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+ filter->enable = (synqf & E1000_SYN_FILTER_ENABLE) ? 1 : 0;
+ filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
+ *rx_queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
+ E1000_SYN_FILTER_QUEUE_SHIFT);
+ return 0;
+}
+
+/*
+ *add an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ * filter: ponter to the filter that will be added
+ * rx_queue: the queue id the filter assigned to
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_add_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_ethertype_filter *filter, uint8_t rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t etqf;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (index >= E1000_MAX_ETQF_FILTERS || rx_queue >= IGB_MAX_RX_QUEUE_NUM)
+ return (-EINVAL);
+
+ etqf = E1000_READ_REG(hw, E1000_ETQF(index));
+ if (etqf & E1000_ETQF_FILTER_ENABLE)
+ return (-EINVAL); /**filter index is in use*/
+ else
+ etqf = 0;
+
+ etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
+ etqf |= (uint32_t)(filter->ethertype & E1000_ETQF_ETHERTYPE);
+ etqf |= rx_queue << E1000_ETQF_QUEUE_SHIFT;
+
+ if (filter->priority_en) {
+ PMD_INIT_LOG(ERR, "vlan and priority (%d) is not support"
+ " in E1000.\n", filter->priority);
+ return (-EINVAL);
+ }
+
+ E1000_WRITE_REG(hw, E1000_ETQF(index), etqf);
+ return 0;
+}
+
+/*
+ *remove an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_remove_ethertype_filter(struct rte_eth_dev *dev, uint16_t index)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (index >= E1000_MAX_ETQF_FILTERS)
+ return (-EINVAL);
+
+ E1000_WRITE_REG(hw, E1000_ETQF(index), 0);
+ return 0;
+}
+
+/*
+ *gets an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ * filter: ponter to the filter that will be gotten
+ * *rx_queue: the ponited of the queue id the filter assigned to
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_get_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_ethertype_filter *filter, uint8_t *rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t etqf;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (index >= E1000_MAX_ETQF_FILTERS)
+ return (-EINVAL);
+
+ etqf = E1000_READ_REG(hw, E1000_ETQF(index));
+ if (etqf & E1000_ETQF_FILTER_ENABLE) {
+ filter->ethertype = etqf & E1000_ETQF_ETHERTYPE;
+ filter->priority_en = 0;
+ *rx_queue = (etqf & E1000_ETQF_QUEUE) >> E1000_ETQF_QUEUE_SHIFT;
+ return 0;
+ }
+ return (-ENOENT);
+}
+
+#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
+ if ((type) != e1000_82580 && (type) != e1000_i350)\
+ return -ENOSYS; \
+} while (0)
+
+/*
+ *add a 2tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ * filter: ponter to the filter that will be added
+ * rx_queue: the queue id the filter assigned to
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_add_2tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_2tuple_filter *filter, uint8_t rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ttqf, imir = 0;
+ uint32_t imir_ext = 0;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ if (index >= E1000_MAX_TTQF_FILTERS ||
+ rx_queue >= IGB_MAX_RX_QUEUE_NUM ||
+ filter->priority > E1000_2TUPLE_MAX_PRI)
+ return (-EINVAL); /**filter index is out of range*/
+ if (filter->tcp_flags > TCP_FLAG_ALL)
+ return (-EINVAL); /**flags is invalid*/
+
+ ttqf = E1000_READ_REG(hw, E1000_TTQF(index));
+ if (ttqf & E1000_TTQF_QUEUE_ENABLE)
+ return (-EINVAL); /**filter index is in use*/
+
+ imir = (uint32_t)(filter->dst_port & E1000_IMIR_DSTPORT);
+ if (filter->dst_port_mask == 1) /**1b means not compare*/
+ imir |= E1000_IMIR_PORT_BP;
+ else
+ imir &= ~E1000_IMIR_PORT_BP;
+
+ imir |= filter->priority << E1000_IMIR_PRIORITY_SHIFT;
+
+ ttqf = 0;
+ ttqf |= E1000_TTQF_QUEUE_ENABLE;
+ ttqf |= (uint32_t)(rx_queue << E1000_TTQF_QUEUE_SHIFT);
+ ttqf |= (uint32_t)(filter->protocol & E1000_TTQF_PROTOCOL_MASK);
+ if (filter->protocol_mask == 1)
+ ttqf |= E1000_TTQF_MASK_ENABLE;
+ else
+ ttqf &= ~E1000_TTQF_MASK_ENABLE;
+
+ imir_ext |= E1000_IMIR_EXT_SIZE_BP;
+ /*tcp flags bits setting*/
+ if (filter->tcp_flags & TCP_FLAG_ALL) {
+ if (filter->tcp_flags & TCP_UGR_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_UGR;
+ if (filter->tcp_flags & TCP_ACK_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_ACK;
+ if (filter->tcp_flags & TCP_PSH_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_PSH;
+ if (filter->tcp_flags & TCP_RST_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_RST;
+ if (filter->tcp_flags & TCP_SYN_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_SYN;
+ if (filter->tcp_flags & TCP_FIN_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_FIN;
+ imir_ext &= ~E1000_IMIR_EXT_CTRL_BP;
+ } else
+ imir_ext |= E1000_IMIR_EXT_CTRL_BP;
+ E1000_WRITE_REG(hw, E1000_IMIR(index), imir);
+ E1000_WRITE_REG(hw, E1000_TTQF(index), ttqf);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(index), imir_ext);
+ return 0;
+}
+
+/*
+ *remove a 2tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_remove_2tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ if (index >= E1000_MAX_TTQF_FILTERS)
+ return (-EINVAL); /**filter index is out of range*/
+
+ E1000_WRITE_REG(hw, E1000_TTQF(index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIR(index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(index), 0);
+ return 0;
+}
+
+/*
+ *get a 2tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ * filter: ponter to the filter that returns
+ * *rx_queue: pointer of the queue id the filter assigned to
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_get_2tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_2tuple_filter *filter, uint8_t *rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t imir, ttqf, imir_ext;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ if (index >= E1000_MAX_TTQF_FILTERS)
+ return (-EINVAL); /**filter index is out of range*/
+
+ ttqf = E1000_READ_REG(hw, E1000_TTQF(index));
+ if (ttqf & E1000_TTQF_QUEUE_ENABLE) {
+ imir = E1000_READ_REG(hw, E1000_IMIR(index));
+ filter->protocol = ttqf & E1000_TTQF_PROTOCOL_MASK;
+ filter->protocol_mask = (ttqf & E1000_TTQF_MASK_ENABLE) ? 1 : 0;
+ *rx_queue = (ttqf & E1000_TTQF_RX_QUEUE_MASK) >>
+ E1000_TTQF_QUEUE_SHIFT;
+ filter->dst_port = (uint16_t)(imir & E1000_IMIR_DSTPORT);
+ filter->dst_port_mask = (imir & E1000_IMIR_PORT_BP) ? 1 : 0;
+ filter->priority = (imir & E1000_IMIR_PRIORITY) >>
+ E1000_IMIR_PRIORITY_SHIFT;
+
+ imir_ext = E1000_READ_REG(hw, E1000_IMIREXT(index));
+ if (!(imir_ext & E1000_IMIR_EXT_CTRL_BP)) {
+ if (imir_ext & E1000_IMIR_EXT_CTRL_UGR)
+ filter->tcp_flags |= TCP_UGR_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_ACK)
+ filter->tcp_flags |= TCP_ACK_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_PSH)
+ filter->tcp_flags |= TCP_PSH_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_RST)
+ filter->tcp_flags |= TCP_RST_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_SYN)
+ filter->tcp_flags |= TCP_SYN_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_FIN)
+ filter->tcp_flags |= TCP_FIN_FLAG;
+ } else
+ filter->tcp_flags = 0;
+ return 0;
+ }
+ return (-ENOENT);
+}
+
+/*
+ *add a flex filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ * filter: ponter to the filter that will be added
+ * rx_queue: the queue id the filter assigned to
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_add_flex_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_flex_filter *filter, uint8_t rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t wufc, en_bits = 0;
+ uint32_t queueing = 0;
+ uint32_t reg_off = 0;
+ uint8_t i, j = 0;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ if (index >= E1000_MAX_FLEXIBLE_FILTERS)
+ return (-EINVAL); /**filter index is out of range*/
+
+ if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
+ filter->len % 8 != 0 ||
+ filter->priority > E1000_MAX_FLEX_FILTER_PRI)
+ return (-EINVAL);
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ en_bits = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << index);
+ if ((wufc & en_bits) == en_bits)
+ return (-EINVAL); /**the filter is enabled*/
+
+ E1000_WRITE_REG(hw, E1000_WUFC,
+ wufc | E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << index));
+
+ j = 0;
+ if (index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(index);
+ else
+ reg_off = E1000_FHFT_EXT(index - E1000_MAX_FHFT);
+
+ for (i = 0; i < 16; i++) {
+ E1000_WRITE_REG(hw, reg_off + i*4*4, filter->dwords[j]);
+ E1000_WRITE_REG(hw, reg_off + (i*4+1)*4, filter->dwords[++j]);
+ E1000_WRITE_REG(hw, reg_off + (i*4+2)*4,
+ (uint32_t)filter->mask[i]);
+ ++j;
+ }
+ queueing |= filter->len |
+ (rx_queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
+ (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
+ E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET, queueing);
+ return 0;
+}
+
+/*
+ *remove a flex filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_remove_flex_filter(struct rte_eth_dev *dev,
+ uint16_t index)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t wufc, reg_off = 0;
+ uint8_t i;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ if (index >= E1000_MAX_FLEXIBLE_FILTERS)
+ return (-EINVAL); /**filter index is out of range*/
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ E1000_WRITE_REG(hw, E1000_WUFC, wufc & (~(E1000_WUFC_FLX0 << index)));
+
+ if (index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(index);
+ else
+ reg_off = E1000_FHFT_EXT(index - E1000_MAX_FHFT);
+
+ for (i = 0; i < 64; i++)
+ E1000_WRITE_REG(hw, reg_off + i*4, 0);
+ return 0;
+}
+
+/*
+ *get a flex filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ * filter: ponter to the filter that returns
+ * *rx_queue: the pointer of the queue id the filter assigned to
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_get_flex_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_flex_filter *filter, uint8_t *rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t wufc, queueing, wufc_en = 0;
+ uint8_t i, j;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ if (index >= E1000_MAX_FLEXIBLE_FILTERS)
+ return (-EINVAL); /**filter index is out of range*/
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << index);
+
+ if ((wufc & wufc_en) == wufc_en) {
+ uint32_t reg_off = 0;
+ j = 0;
+ if (index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(index);
+ else
+ reg_off = E1000_FHFT_EXT(index - E1000_MAX_FHFT);
+
+ for (i = 0; i < 16; i++, j = i * 2) {
+ filter->dwords[j] =
+ E1000_READ_REG(hw, reg_off + i*4*4);
+ filter->dwords[j+1] =
+ E1000_READ_REG(hw, reg_off + (i*4+1)*4);
+ filter->mask[i] =
+ E1000_READ_REG(hw, reg_off + (i*4+2)*4);
+ }
+ queueing = E1000_READ_REG(hw,
+ reg_off + E1000_FHFT_QUEUEING_OFFSET);
+ filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
+ filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
+ E1000_FHFT_QUEUEING_PRIO_SHIFT;
+ *rx_queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
+ E1000_FHFT_QUEUEING_QUEUE_SHIFT;
+ return 0;
+ }
+ return (-ENOENT);
+}
+
+/*
+ *add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ * filter: ponter to the filter that will be added
+ * rx_queue: the queue id the filter assigned to
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_5tuple_filter *filter, uint8_t rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ftqf, spqf = 0;
+ uint32_t imir = 0;
+ uint32_t imir_ext = 0;
+
+ if (hw->mac.type != e1000_82576)
+ return (-ENOSYS);
+
+ if (index >= E1000_MAX_FTQF_FILTERS ||
+ rx_queue >= IGB_MAX_RX_QUEUE_NUM_82576)
+ return (-EINVAL); /**filter index is out of range*/
+
+ ftqf = E1000_READ_REG(hw, E1000_FTQF(index));
+ if (ftqf & E1000_FTQF_QUEUE_ENABLE)
+ return (-EINVAL); /**filter index is in use*/
+
+ ftqf = 0;
+ ftqf |= filter->protocol & E1000_FTQF_PROTOCOL_MASK;
+ if (filter->src_ip_mask == 1) /** 1b means not compare*/
+ ftqf |= E1000_FTQF_SOURCE_ADDR_MASK;
+ if (filter->dst_ip_mask == 1)
+ ftqf |= E1000_FTQF_DEST_ADDR_MASK;
+ if (filter->src_port_mask == 1)
+ ftqf |= E1000_FTQF_SOURCE_PORT_MASK;
+ if (filter->protocol_mask == 1)
+ ftqf |= E1000_FTQF_PROTOCOL_COMP_MASK;
+ ftqf |= (rx_queue << E1000_FTQF_QUEUE_SHIFT) & E1000_FTQF_QUEUE_MASK;
+ ftqf |= E1000_FTQF_VF_MASK_EN;
+ ftqf |= E1000_FTQF_QUEUE_ENABLE;
+ E1000_WRITE_REG(hw, E1000_FTQF(index), ftqf);
+ E1000_WRITE_REG(hw, E1000_DAQF(index), filter->dst_ip);
+ E1000_WRITE_REG(hw, E1000_SAQF(index), filter->src_ip);
+
+ spqf |= filter->src_port & E1000_SPQF_SRCPORT;
+ E1000_WRITE_REG(hw, E1000_SPQF(index), spqf);
+
+ imir |= (uint32_t)(filter->dst_port & E1000_IMIR_DSTPORT);
+ if (filter->dst_port_mask == 1) /**1b means not compare*/
+ imir |= E1000_IMIR_PORT_BP;
+ else
+ imir &= ~E1000_IMIR_PORT_BP;
+ imir |= filter->priority << E1000_IMIR_PRIORITY_SHIFT;
+
+ imir_ext |= E1000_IMIR_EXT_SIZE_BP;
+ /*tcp flags bits setting*/
+ if (filter->tcp_flags & TCP_FLAG_ALL) {
+ if (filter->tcp_flags & TCP_UGR_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_UGR;
+ if (filter->tcp_flags & TCP_ACK_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_ACK;
+ if (filter->tcp_flags & TCP_PSH_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_PSH;
+ if (filter->tcp_flags & TCP_RST_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_RST;
+ if (filter->tcp_flags & TCP_SYN_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_SYN;
+ if (filter->tcp_flags & TCP_FIN_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_FIN;
+ } else
+ imir_ext |= E1000_IMIR_EXT_CTRL_BP;
+ E1000_WRITE_REG(hw, E1000_IMIR(index), imir);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(index), imir_ext);
+ return 0;
+}
+
+/*
+ *remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_remove_5tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != e1000_82576)
+ return (-ENOSYS);
+
+ if (index >= E1000_MAX_FTQF_FILTERS)
+ return (-EINVAL); /**filter index is out of range*/
+
+ E1000_WRITE_REG(hw, E1000_FTQF(index), 0);
+ E1000_WRITE_REG(hw, E1000_DAQF(index), 0);
+ E1000_WRITE_REG(hw, E1000_SAQF(index), 0);
+ E1000_WRITE_REG(hw, E1000_SPQF(index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIR(index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(index), 0);
+ return 0;
+}
+
+/*
+ *get a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ * filter: ponter to the filter that returns
+ * *rx_queue: pointer of the queue id the filter assigned to
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_5tuple_filter *filter, uint8_t *rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t spqf, ftqf, imir, imir_ext;
+
+ if (hw->mac.type != e1000_82576)
+ return (-ENOSYS);
+
+ if (index >= E1000_MAX_FTQF_FILTERS)
+ return (-EINVAL); /**filter index is out of range*/
+
+ ftqf = E1000_READ_REG(hw, E1000_FTQF(index));
+ if (ftqf & E1000_FTQF_QUEUE_ENABLE) {
+ filter->src_ip_mask =
+ (ftqf & E1000_FTQF_SOURCE_ADDR_MASK) ? 1 : 0;
+ filter->dst_ip_mask =
+ (ftqf & E1000_FTQF_DEST_ADDR_MASK) ? 1 : 0;
+ filter->src_port_mask =
+ (ftqf & E1000_FTQF_SOURCE_PORT_MASK) ? 1 : 0;
+ filter->protocol_mask =
+ (ftqf & E1000_FTQF_PROTOCOL_COMP_MASK) ? 1 : 0;
+ filter->protocol =
+ (uint8_t)ftqf & E1000_FTQF_PROTOCOL_MASK;
+ *rx_queue = (uint8_t)((ftqf & E1000_FTQF_QUEUE_MASK) >>
+ E1000_FTQF_QUEUE_SHIFT);
+
+ spqf = E1000_READ_REG(hw, E1000_SPQF(index));
+ filter->src_port = spqf & E1000_SPQF_SRCPORT;
+
+ filter->dst_ip = E1000_READ_REG(hw, E1000_DAQF(index));
+ filter->src_ip = E1000_READ_REG(hw, E1000_SAQF(index));
+
+ imir = E1000_READ_REG(hw, E1000_IMIR(index));
+ filter->dst_port_mask = (imir & E1000_IMIR_PORT_BP) ? 1 : 0;
+ filter->dst_port = (uint16_t)(imir & E1000_IMIR_DSTPORT);
+ filter->priority = (imir & E1000_IMIR_PRIORITY) >>
+ E1000_IMIR_PRIORITY_SHIFT;
+
+ imir_ext = E1000_READ_REG(hw, E1000_IMIREXT(index));
+ if (!(imir_ext & E1000_IMIR_EXT_CTRL_BP)) {
+ if (imir_ext & E1000_IMIR_EXT_CTRL_UGR)
+ filter->tcp_flags |= TCP_UGR_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_ACK)
+ filter->tcp_flags |= TCP_ACK_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_PSH)
+ filter->tcp_flags |= TCP_PSH_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_RST)
+ filter->tcp_flags |= TCP_RST_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_SYN)
+ filter->tcp_flags |= TCP_SYN_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_FIN)
+ filter->tcp_flags |= TCP_FIN_FLAG;
+ } else
+ filter->tcp_flags = 0;
+ return 0;
+ }
+ return (-ENOENT);
+}
+
static struct rte_driver pmd_igb_drv = {
.type = PMD_PDEV,
.init = rte_igb_pmd_init,
--
1.8.1.4
next prev parent reply other threads:[~2014-05-27 7:59 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-05-24 1:37 [dpdk-dev] [PATCH v2 0/4] NIC filters support for generic filter Jingjing Wu
2014-05-24 1:37 ` [dpdk-dev] [PATCH v2 1/4]ethdev: add ethdev APIs for NIC filters of " Jingjing Wu
2014-05-27 7:57 ` Ming, LiX
2014-05-27 9:18 ` Thomas Monjalon
2014-05-24 1:37 ` [dpdk-dev] [PATCH v2 2/4]e1000: add igb NIC filters of generic filter feature Jingjing Wu
2014-05-27 7:59 ` Ming, LiX [this message]
2014-05-24 1:37 ` [dpdk-dev] [PATCH v2 3/4]ixgbe: add ixgbe " Jingjing Wu
2014-05-27 8:00 ` Ming, LiX
2014-05-24 1:37 ` [dpdk-dev] [PATCH v2 4/4]app/test-pmd: add commands in testpmd for NIC filters Jingjing Wu
2014-05-27 8:00 ` Ming, LiX
2014-05-27 23:21 ` [dpdk-dev] [PATCH v2 0/4] NIC filters support for generic filter Thomas Monjalon
2014-05-28 0:53 ` Wu, Jingjing
2014-05-28 1:12 ` Wu, Jingjing
2014-06-11 15:45 ` Thomas Monjalon
2014-06-12 8:08 ` Wu, Jingjing
2014-06-12 15:36 ` Thomas Monjalon
2014-06-13 11:51 ` Vladimir Medvedkin
2014-06-13 14:12 ` Wu, Jingjing
2014-06-13 16:19 ` Vladimir Medvedkin
2014-06-14 1:00 ` Wu, Jingjing
2014-06-14 9:00 ` Vladimir Medvedkin
2014-06-04 4:16 ` Cao, Waterman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=0976FC66838DDE4585886952506C9436FBA62F@SHSMSX104.ccr.corp.intel.com \
--to=lix.ming@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).