* [dpdk-dev] [PATCH 1/6]i40e:vxlan packet identification
2014-08-12 3:12 [dpdk-dev] [PATCH 0/6]Support VxLAN on fortville Jijiang Liu
@ 2014-08-12 3:12 ` Jijiang Liu
2014-08-12 3:12 ` [dpdk-dev] [PATCH 2/6]app/test-pmd:test vxlan " Jijiang Liu
` (4 subsequent siblings)
5 siblings, 0 replies; 12+ messages in thread
From: Jijiang Liu @ 2014-08-12 3:12 UTC (permalink / raw)
To: dev
VxLAN UDP port configuration on i40e, it includes
- VxLAN UDP port initialization
- Add VxLAN UDP port API
Signed-off-by: jijiangl <jijiang.liu@intel.com>
Acked-by: Helin Zhang <helin.zhang@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
Acked-by: Jing Chen <jing.d.chen@intel.com>
---
lib/librte_ether/rte_ethdev.c | 63 ++++++++++++
lib/librte_ether/rte_ethdev.h | 76 ++++++++++++++
lib/librte_ether/rte_ether.h | 10 ++
lib/librte_pmd_i40e/i40e_ethdev.c | 199 ++++++++++++++++++++++++++++++++++++-
lib/librte_pmd_i40e/i40e_ethdev.h | 5 +
5 files changed, 352 insertions(+), 1 deletions(-)
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index fd1010a..325edb1 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1892,6 +1892,69 @@ rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
}
int
+rte_eth_dev_udp_tunnel_add(uint8_t port_id,
+ struct rte_eth_udp_tunnel *udp_tunnel,
+ uint8_t count)
+{
+ uint8_t i;
+ struct rte_eth_dev *dev;
+ struct rte_eth_udp_tunnel *tunnel;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ if (udp_tunnel == NULL) {
+ PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
+ return -EINVAL;
+ }
+ tunnel = udp_tunnel;
+
+ for (i = 0; i < count; i++, tunnel++) {
+ if (tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+ PMD_DEBUG_TRACE("Invalid tunnel type\n");
+ return -EINVAL;
+ }
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
+ return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel, count);
+}
+
+int
+rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
+ struct rte_eth_udp_tunnel *udp_tunnel,
+ uint8_t count)
+{
+ uint8_t i;
+ struct rte_eth_dev *dev;
+ struct rte_eth_udp_tunnel *tunnel;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ if (udp_tunnel == NULL) {
+ PMD_DEBUG_TRACE("Invalid udp_tunnel parametr\n");
+ return -EINVAL;
+ }
+ tunnel = udp_tunnel;
+ for (i = 0; i < count; i++, tunnel++) {
+ if (tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+ PMD_DEBUG_TRACE("Invalid tunnel type\n");
+ return -EINVAL;
+ }
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
+ return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel, count);
+}
+
+int
rte_eth_led_on(uint8_t port_id)
{
struct rte_eth_dev *dev;
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 50df654..d24907f 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -708,6 +708,26 @@ struct rte_fdir_conf {
};
/**
+ * Tunneled type.
+ */
+enum rte_eth_tunnel_type {
+ RTE_TUNNEL_TYPE_NONE = 0,
+ RTE_TUNNEL_TYPE_VXLAN,
+ RTE_TUNNEL_TYPE_GENEVE,
+ RTE_TUNNEL_TYPE_TEREDO,
+ RTE_TUNNEL_TYPE_NVGRE,
+ RTE_TUNNEL_TYPE_MAX,
+};
+
+/**
+ * UDP tunneling configuration.
+ */
+struct rte_eth_udp_tunnel {
+ uint16_t udp_port;
+ uint8_t prot_type;
+};
+
+/**
* Possible l4type of FDIR filters.
*/
enum rte_l4type {
@@ -829,6 +849,7 @@ struct rte_intr_conf {
* configuration settings may be needed.
*/
struct rte_eth_conf {
+ enum rte_eth_tunnel_type tunnel_type;
uint16_t link_speed;
/**< ETH_LINK_SPEED_10[0|00|000], or 0 for autonegotation */
uint16_t link_duplex;
@@ -1240,6 +1261,17 @@ typedef int (*eth_mirror_rule_reset_t)(struct rte_eth_dev *dev,
uint8_t rule_id);
/**< @internal Remove a traffic mirroring rule on an Ethernet device */
+typedef int (*eth_udp_tunnel_add_t)(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *tunnel_udp,
+ uint8_t count);
+/**< @internal Add tunneling UDP info */
+
+typedef int (*eth_udp_tunnel_del_t)(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *tunnel_udp,
+ uint8_t count);
+/**< @internal Delete tunneling UDP info */
+
+
#ifdef RTE_NIC_BYPASS
enum {
@@ -1412,6 +1444,8 @@ struct eth_dev_ops {
eth_set_vf_rx_t set_vf_rx; /**< enable/disable a VF receive */
eth_set_vf_tx_t set_vf_tx; /**< enable/disable a VF transmit */
eth_set_vf_vlan_filter_t set_vf_vlan_filter; /**< Set VF VLAN filter */
+ eth_udp_tunnel_add_t udp_tunnel_add;
+ eth_udp_tunnel_del_t udp_tunnel_del;
eth_set_queue_rate_limit_t set_queue_rate_limit; /**< Set queue rate limit */
eth_set_vf_rate_limit_t set_vf_rate_limit; /**< Set VF rate limit */
@@ -3268,6 +3302,48 @@ int
rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
struct rte_eth_rss_conf *rss_conf);
+ /**
+ * Add tunneling UDP port configuration of Ethernet device
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param tunnel_udp
+ * Where to store the current Tunneling UDP configuration
+ * of the Ethernet device.
+ * @param count
+ * How many configurations are going to added.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if port identifier is invalid.
+ * - (-ENOTSUP) if hardware doesn't support tunnel type.
+ */
+int
+rte_eth_dev_udp_tunnel_add(uint8_t port_id,
+ struct rte_eth_udp_tunnel *tunnel_udp,
+ uint8_t count);
+
+ /**
+ * Detete tunneling UDP port configuration of Ethernet device
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param tunnel_udp
+ * Where to store the current Tunneling UDP configuration
+ * of the Ethernet device.
+ * @param count
+ * How many configurations are going to deleted.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if port identifier is invalid.
+ * - (-ENOTSUP) if hardware doesn't support tunnel type.
+ */
+int
+rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
+ struct rte_eth_udp_tunnel *tunnel_udp,
+ uint8_t count);
+
/**
* add syn filter
*
diff --git a/lib/librte_ether/rte_ether.h b/lib/librte_ether/rte_ether.h
index 2e08f23..604712a 100644
--- a/lib/librte_ether/rte_ether.h
+++ b/lib/librte_ether/rte_ether.h
@@ -69,6 +69,8 @@ extern "C" {
#define ETHER_MIN_MTU 68 /**< Minimum MTU for IPv4 packets, see RFC 791. */
+#define ETHER_VXLAN_UDP_PORT 4789 /** < default port assigned to VxLAN */
+
/**
* Ethernet address:
* A universally administered address is uniquely assigned to a device by its
@@ -286,6 +288,12 @@ struct vlan_hdr {
uint16_t eth_proto;/**< Ethernet type of encapsulated frame. */
} __attribute__((__packed__));
+/* VXLAN protocol header */
+struct vxlan_hdr {
+ uint32_t vx_flags; /**< VxLAN flag. */
+ uint32_t vx_vni; /**< VxLAN ID. */
+} __attribute__((__packed__));
+
/* Ethernet frame types */
#define ETHER_TYPE_IPv4 0x0800 /**< IPv4 Protocol. */
#define ETHER_TYPE_IPv6 0x86DD /**< IPv6 Protocol. */
@@ -294,6 +302,8 @@ struct vlan_hdr {
#define ETHER_TYPE_VLAN 0x8100 /**< IEEE 802.1Q VLAN tagging. */
#define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */
+#define ETHER_VXLAN_HLEN (sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr))
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c
index 9ed31b5..bb3d39a 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev.c
@@ -187,7 +187,7 @@ static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
static int i40e_veb_release(struct i40e_veb *veb);
static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
- struct i40e_vsi *vsi);
+ struct i40e_vsi *vsi);
static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
@@ -203,6 +203,13 @@ static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
+static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel,
+ uint8_t count);
+static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel,
+ uint8_t count);
+static int i40e_pf_config_vxlan(struct i40e_pf *pf);
/* Default hash key buffer for RSS */
static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1];
@@ -248,6 +255,8 @@ static struct eth_dev_ops i40e_eth_dev_ops = {
.reta_query = i40e_dev_rss_reta_query,
.rss_hash_update = i40e_dev_rss_hash_update,
.rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
+ .udp_tunnel_add = i40e_dev_udp_tunnel_add,
+ .udp_tunnel_del = i40e_dev_udp_tunnel_del,
};
static struct eth_driver rte_i40e_pmd = {
@@ -2365,6 +2374,34 @@ i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
return 0;
}
+static int
+i40e_vxlan_filters_init(struct i40e_pf *pf)
+{
+ uint8_t filter_index;
+ int ret = 0;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ if (!(pf->flags & I40E_FLAG_VXLAN))
+ return 0;
+
+ /* Init first entry in tunneling UDP table */
+ ret = i40e_aq_add_udp_tunnel(hw, ETHER_VXLAN_UDP_PORT,
+ I40E_AQC_TUNNEL_TYPE_VXLAN,
+ &filter_index, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add UDP tunnel port %d "
+ "with index=%d\n", RTE_VXLAN_UDP_PORT,
+ filter_index);
+ } else {
+ pf->vxlan_bitmap |= 1;
+ pf->vxlan_ports[0] = ETHER_VXLAN_UDP_PORT;
+ PMD_DRV_LOG(INFO, "Added UDP tunnel port %d with "
+ "index=%d\n", RTE_VXLAN_UDP_PORT, filter_index);
+ }
+
+ return ret;
+}
+
/* Setup a VSI */
struct i40e_vsi *
i40e_vsi_setup(struct i40e_pf *pf,
@@ -2988,6 +3025,12 @@ i40e_vsi_rx_init(struct i40e_vsi *vsi)
uint16_t i;
i40e_pf_config_mq_rx(pf);
+
+ if (data->dev_conf.tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
+ pf->flags |= I40E_FLAG_VXLAN;
+ i40e_pf_config_vxlan(pf);
+ }
+
for (i = 0; i < data->nb_rx_queues; i++) {
ret = i40e_rx_queue_init(data->rx_queues[i]);
if (ret != I40E_SUCCESS) {
@@ -3904,6 +3947,150 @@ i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
return 0;
}
+static int
+i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
+{
+ uint8_t i;
+
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->vxlan_ports[i] == port)
+ return i;
+ }
+
+ return -1;
+}
+
+static int
+i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
+{
+ int idx, ret;
+ uint8_t filter_idx;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ if (!(pf->flags & I40E_FLAG_VXLAN)) {
+ PMD_DRV_LOG(ERR, "VxLAN tunneling mode is not configured\n");
+ return -EINVAL;
+ }
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx >= 0) {
+ PMD_DRV_LOG(ERR, "Port %d already offloaded\n", port);
+ return -1;
+ }
+
+ /* Now check if there is space to add the new port */
+ idx = i40e_get_vxlan_port_idx(pf, 0);
+ if (idx < 0) {
+ PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
+ "not adding port %d\n", port);
+ return -ENOSPC;
+ }
+
+ ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
+ &filter_idx, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add VxLAN UDP port %d\n", port);
+ return -1;
+ }
+
+ PMD_DRV_LOG(INFO, "Added %s port %d with AQ command with index %d\n",
+ port, filter_index);
+
+ /* New port: add it and mark its index in the bitmap */
+ pf->vxlan_ports[idx] = port;
+ pf->vxlan_bitmap |= (1 << idx);
+
+ return 0;
+}
+
+static int
+i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
+{
+ int idx;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ if (idx < 0) {
+ PMD_DRV_LOG(ERR, "Port %d doesn't exist\n", port);
+ return -1;
+ }
+
+ if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete VxLAN UDP port %d\n", port);
+ return -1;
+ }
+
+ PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d\n",
+ port, idx);
+
+ pf->vxlan_ports[idx] = 0;
+ pf->vxlan_bitmap &= ~(1 << idx);
+
+ return 0;
+}
+
+/* configure port of UDP tunneling */
+static int
+i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel, uint8_t count)
+{
+ uint16_t i;
+ int ret = 0;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ for (i = 0; i < count; i++, udp_tunnel++) {
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
+ break;
+
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_TEREDO:
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported now.\n");
+ ret = -1;
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type\n");
+ ret = -1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel, uint8_t count)
+{
+ uint16_t i;
+ int ret = 0;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ for (i = 0; i < count; i++, udp_tunnel++) {
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_TEREDO:
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported now.\n");
+ ret = -1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type\n");
+ ret = -1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
/* Configure RSS */
static int
i40e_pf_config_rss(struct i40e_pf *pf)
@@ -3940,6 +4127,16 @@ i40e_pf_config_rss(struct i40e_pf *pf)
return i40e_hw_rss_hash_set(hw, &rss_conf);
}
+/* Configure VxLAN */
+static int
+i40e_pf_config_vxlan(struct i40e_pf *pf)
+{
+ if (pf->flags & I40E_FLAG_VXLAN)
+ i40e_vxlan_filters_init(pf);
+
+ return 0;
+}
+
static int
i40e_pf_config_mq_rx(struct i40e_pf *pf)
{
diff --git a/lib/librte_pmd_i40e/i40e_ethdev.h b/lib/librte_pmd_i40e/i40e_ethdev.h
index 64deef2..22d0628 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev.h
+++ b/lib/librte_pmd_i40e/i40e_ethdev.h
@@ -60,6 +60,7 @@
#define I40E_FLAG_HEADER_SPLIT_DISABLED (1ULL << 4)
#define I40E_FLAG_HEADER_SPLIT_ENABLED (1ULL << 5)
#define I40E_FLAG_FDIR (1ULL << 6)
+#define I40E_FLAG_VXLAN (1ULL << 7)
#define I40E_FLAG_ALL (I40E_FLAG_RSS | \
I40E_FLAG_DCB | \
I40E_FLAG_VMDQ | \
@@ -216,6 +217,10 @@ struct i40e_pf {
uint16_t vmdq_nb_qps; /* The number of queue pairs of VMDq */
uint16_t vf_nb_qps; /* The number of queue pairs of VF */
uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
+
+ /* store VxLAN UDP ports */
+ uint16_t vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
+ uint16_t vxlan_bitmap; /* Vxlan bit mask */
};
enum pending_msg {
--
1.7.7.6
^ permalink raw reply [flat|nested] 12+ messages in thread
* [dpdk-dev] [PATCH 2/6]app/test-pmd:test vxlan packet identification
2014-08-12 3:12 [dpdk-dev] [PATCH 0/6]Support VxLAN on fortville Jijiang Liu
2014-08-12 3:12 ` [dpdk-dev] [PATCH 1/6]i40e:vxlan packet identification Jijiang Liu
@ 2014-08-12 3:12 ` Jijiang Liu
2014-08-12 3:12 ` [dpdk-dev] [PATCH 3/6]i40e:Add VxLAN Cloud filter API Jijiang Liu
` (3 subsequent siblings)
5 siblings, 0 replies; 12+ messages in thread
From: Jijiang Liu @ 2014-08-12 3:12 UTC (permalink / raw)
To: dev
Add commands to test receive vxlan packet identification, which include
- use command to add/delete VxLAN UDP port.
- use rxonly mode to receive VxLAN packet.
Signed-off-by: jijiangl <jijiang.liu@intel.com>
Acked-by: Helin Zhang <helin.zhang@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
Acked-by: Jing Chen <jing.d.chen@intel.com>
---
app/test-pmd/cmdline.c | 78 ++++++++++++++++++++++++++++++++++++++++++--
app/test-pmd/parameters.c | 13 +++++++
app/test-pmd/rxonly.c | 49 ++++++++++++++++++++++++++++
app/test-pmd/testpmd.c | 8 +++++
app/test-pmd/testpmd.h | 9 +++++
5 files changed, 153 insertions(+), 4 deletions(-)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 345be11..67cf63e 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -285,6 +285,12 @@ static void cmd_help_long_parsed(void *parsed_result,
" Set the outer VLAN TPID for Packet Filtering on"
" a port\n\n"
+ "rx_vxlan_port add (udp_port) (port_id)\n"
+ " Add an UDP port for VxLAN packet filter on a port\n\n"
+
+ "rx_vxlan_port rm (udp_port) (port_id)\n"
+ " Remove an UDP port for VxLAN packet filter on a port\n\n"
+
"tx_vlan set vlan_id (port_id)\n"
" Set hardware insertion of VLAN ID in packets sent"
" on a port.\n\n"
@@ -296,13 +302,17 @@ static void cmd_help_long_parsed(void *parsed_result,
" Disable hardware insertion of a VLAN header in"
" packets sent on a port.\n\n"
- "tx_checksum set mask (port_id)\n"
+ "tx_checksum set (mask) (port_id)\n"
" Enable hardware insertion of checksum offload with"
- " the 4-bit mask, 0~0xf, in packets sent on a port.\n"
+ " the 8-bit mask, 0~0xff, in packets sent on a port.\n"
" bit 0 - insert ip checksum offload if set\n"
" bit 1 - insert udp checksum offload if set\n"
" bit 2 - insert tcp checksum offload if set\n"
" bit 3 - insert sctp checksum offload if set\n"
+ " bit 4 - insert inner ip checksum offload if set\n"
+ " bit 5 - insert inner udp checksum offload if set\n"
+ " bit 6 - insert inner tcp checksum offload if set\n"
+ " bit 7 - insert inner sctp checksum offload if set\n"
" Please check the NIC datasheet for HW limits.\n\n"
"set fwd (%s)\n"
@@ -2646,8 +2656,9 @@ cmdline_parse_inst_t cmd_tx_cksum_set = {
.f = cmd_tx_cksum_set_parsed,
.data = NULL,
.help_str = "enable hardware insertion of L3/L4checksum with a given "
- "mask in packets sent on a port, the bit mapping is given as, Bit 0 for ip"
- "Bit 1 for UDP, Bit 2 for TCP, Bit 3 for SCTP",
+ "mask in packets sent on a port, the bit mapping is given as, Bit 0 for ip "
+ "Bit 1 for UDP, Bit 2 for TCP, Bit 3 for SCTP, Bit 4 for inner ip "
+ "Bit 5 for inner UDP, Bit 6 for inner TCP, Bit 7 for inner SCTP",
.tokens = {
(void *)&cmd_tx_cksum_set_tx_cksum,
(void *)&cmd_tx_cksum_set_set,
@@ -6112,6 +6123,64 @@ cmdline_parse_inst_t cmd_vf_rate_limit = {
},
};
+/* *** CONFIGURE TUNNEL UDP PORT *** */
+struct cmd_tunnel_udp_config {
+ cmdline_fixed_string_t cmd;
+ cmdline_fixed_string_t what;
+ uint16_t udp_port;
+ uint8_t port_id;
+};
+
+static void
+cmd_tunnel_udp_config_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_tunnel_udp_config *res = parsed_result;
+ struct rte_eth_udp_tunnel tunnel_udp;
+ int ret;
+
+ tunnel_udp.udp_port = res->udp_port;
+
+ if (!strcmp(res->cmd, "rx_vxlan_port"))
+ tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+
+ if (!strcmp(res->what, "add"))
+ ret = rte_eth_dev_udp_tunnel_add(res->port_id, &tunnel_udp, 1);
+ else
+ ret = rte_eth_dev_udp_tunnel_delete(res->port_id, &tunnel_udp, 1);
+
+ if (ret < 0)
+ printf("udp tunneling add error: (%s)\n", strerror(-ret));
+}
+
+cmdline_parse_token_string_t cmd_tunnel_udp_config_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_tunnel_udp_config,
+ cmd, "rx_vxlan_port");
+cmdline_parse_token_string_t cmd_tunnel_udp_config_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_tunnel_udp_config,
+ what, "add#rm");
+cmdline_parse_token_num_t cmd_tunnel_udp_config_udp_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_tunnel_udp_config,
+ udp_port, UINT16);
+cmdline_parse_token_num_t cmd_tunnel_udp_config_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_tunnel_udp_config,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_tunnel_udp_config = {
+ .f = cmd_tunnel_udp_config_parsed,
+ .data = (void *)0,
+ .help_str = "add/rm an UDP tunneling port filter: "
+ "rx_vxlan_port add udp_port port_id",
+ .tokens = {
+ (void *)&cmd_tunnel_udp_config_cmd,
+ (void *)&cmd_tunnel_udp_config_what,
+ (void *)&cmd_tunnel_udp_config_udp_port,
+ (void *)&cmd_tunnel_udp_config_port_id,
+ NULL,
+ },
+};
+
/* *** CONFIGURE VM MIRROR VLAN/POOL RULE *** */
struct cmd_set_mirror_mask_result {
cmdline_fixed_string_t set;
@@ -7406,6 +7475,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_vf_rxvlan_filter,
(cmdline_parse_inst_t *)&cmd_queue_rate_limit,
(cmdline_parse_inst_t *)&cmd_vf_rate_limit,
+ (cmdline_parse_inst_t *)&cmd_tunnel_udp_config,
(cmdline_parse_inst_t *)&cmd_set_mirror_mask,
(cmdline_parse_inst_t *)&cmd_set_mirror_link,
(cmdline_parse_inst_t *)&cmd_reset_mirror_rule,
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 9573a43..fda8c1d 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -202,6 +202,10 @@ usage(char* progname)
printf(" --txpkts=X[,Y]*: set TX segment sizes.\n");
printf(" --disable-link-check: disable check on link status when "
"starting/stopping ports.\n");
+ printf(" --tunnel-type=N: set tunneling packet type "
+ "(0 <= N <= 4).(0:non-tunneling packet;1:VxLAN; "
+ "2:GENEVE;3: TEREDO;4: NVGRE)\n");
+
}
#ifdef RTE_LIBRTE_CMDLINE
@@ -600,6 +604,7 @@ launch_args_parse(int argc, char** argv)
{ "no-flush-rx", 0, 0, 0 },
{ "txpkts", 1, 0, 0 },
{ "disable-link-check", 0, 0, 0 },
+ { "tunnel-type", 1, 0, 0 },
{ 0, 0, 0, 0 },
};
@@ -1032,6 +1037,14 @@ launch_args_parse(int argc, char** argv)
else
rte_exit(EXIT_FAILURE, "rxfreet must be >= 0\n");
}
+ if (!strcmp(lgopts[opt_idx].name, "tunnel-type")) {
+ n = atoi(optarg);
+ if ((n >= 0) && (n < RTE_TUNNEL_TYPE_MAX))
+ rx_tunnel_type = (uint16_t)n;
+ else
+ rte_exit(EXIT_FAILURE, "tunnel-type must be 0-%d\n",
+ RTE_TUNNEL_TYPE_MAX);
+ }
if (!strcmp(lgopts[opt_idx].name, "tx-queue-stats-mapping")) {
if (parse_queue_stats_mapping_config(optarg, TX)) {
rte_exit(EXIT_FAILURE,
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index 5f21a3e..4f529f6 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -66,6 +66,8 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_string_fns.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
#include "testpmd.h"
@@ -112,6 +114,9 @@ pkt_burst_receive(struct fwd_stream *fs)
uint16_t ol_flags;
uint16_t nb_rx;
uint16_t i;
+ uint8_t ptype;
+ uint8_t is_encapsulation;
+
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
uint64_t end_tsc;
@@ -152,6 +157,11 @@ pkt_burst_receive(struct fwd_stream *fs)
eth_hdr = (struct ether_hdr *) mb->pkt.data;
eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type);
ol_flags = mb->ol_flags;
+ ptype = mb->reserved;
+
+ is_encapsulation = IS_ETH_IPV4_TUNNEL(ptype) |
+ IS_ETH_IPV6_TUNNEL(ptype);
+
print_ether_addr(" src=", ð_hdr->s_addr);
print_ether_addr(" - dst=", ð_hdr->d_addr);
printf(" - type=0x%04x - length=%u - nb_segs=%d",
@@ -167,6 +177,45 @@ pkt_burst_receive(struct fwd_stream *fs)
if (ol_flags & PKT_RX_VLAN_PKT)
printf(" - VLAN tci=0x%x",
mb->pkt.vlan_macip.f.vlan_tci);
+ if (is_encapsulation) {
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+ struct udp_hdr *udp_hdr;
+ uint8_t l2_len;
+ uint8_t l3_len;
+ uint8_t l4_len;
+ uint8_t l4_proto;
+ struct vxlan_hdr *vxlan_hdr;
+
+ l2_len = sizeof(struct ether_hdr);
+
+ /* Do not support ipv4 option field */
+ if (IS_ETH_IPV4_TUNNEL(ptype)) {
+ l3_len = sizeof(struct ipv4_hdr);
+ ipv4_hdr = (struct ipv4_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + l2_len);
+ l4_proto = ipv4_hdr->next_proto_id;
+ } else {
+ l3_len = sizeof(struct ipv6_hdr);
+ ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + l2_len);
+ l4_proto = ipv6_hdr->proto;
+ }
+ if (l4_proto == IPPROTO_UDP) {
+ udp_hdr = (struct udp_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + l2_len + l3_len);
+ l4_len = sizeof(struct udp_hdr);
+ vxlan_hdr = (struct vxlan_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + l2_len + l3_len
+ + l4_len);
+
+ printf(" - VxLAN packet: packet type =%d, "
+ "Destination UDP port =%d, VNI = %d",
+ ptype, RTE_BE_TO_CPU_16(udp_hdr->dst_port),
+ rte_be_to_cpu_32(vxlan_hdr->vx_vni) >> 8);
+ }
+ }
+ printf(" - Receive queue=0x%x", (unsigned) fs->rx_queue);
printf("\n");
if (ol_flags != 0) {
int rxf;
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index e8a4b45..daa8f41 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -243,6 +243,12 @@ uint16_t tx_free_thresh = 0; /* Use default values. */
uint16_t tx_rs_thresh = 0; /* Use default values. */
/*
+ * Configurable value of tunnel type.
+ */
+
+uint8_t rx_tunnel_type = 0; /* Use default values. */
+
+/*
* Configurable value of TX queue flags.
*/
uint32_t txq_flags = 0; /* No flags set. */
@@ -1676,6 +1682,8 @@ init_port_config(void)
port = &ports[pid];
port->dev_conf.rxmode = rx_mode;
port->dev_conf.fdir_conf = fdir_conf;
+ if (rx_tunnel_type == 1)
+ port->dev_conf.tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
if (nb_rxq > 1) {
port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index ac86bfe..a304c47 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -80,6 +80,10 @@ typedef uint16_t streamid_t;
#define MAX_QUEUE_ID ((1 << (sizeof(queueid_t) * 8)) - 1)
+/* For FVL NIC */
+#define IS_ETH_IPV4_TUNNEL(ptype) ((ptype > 58) && (ptype < 87))
+#define IS_ETH_IPV6_TUNNEL(ptype) ((ptype > 124) && (ptype < 153))
+
enum {
PORT_TOPOLOGY_PAIRED,
PORT_TOPOLOGY_CHAINED,
@@ -130,6 +134,10 @@ struct fwd_stream {
* Bit 1: Insert UDP checksum
* Bit 2: Insert TCP checksum
* Bit 3: Insert SCTP checksum
+ * Bit 4: Insert inner IP checksum
+ * Bit 5: Insert inner UDP checksum
+ * Bit 6: Insert inner TCP checksum
+ * Bit 7: Insert inner SCTP checksum
* Bit 11: Insert VLAN Label
*/
struct rte_port {
@@ -342,6 +350,7 @@ extern uint8_t rx_drop_en;
extern uint16_t tx_free_thresh;
extern uint16_t tx_rs_thresh;
extern uint32_t txq_flags;
+extern uint8_t rx_tunnel_type;
extern uint8_t dcb_config;
extern uint8_t dcb_test;
--
1.7.7.6
^ permalink raw reply [flat|nested] 12+ messages in thread
* [dpdk-dev] [PATCH 3/6]i40e:Add VxLAN Cloud filter API
2014-08-12 3:12 [dpdk-dev] [PATCH 0/6]Support VxLAN on fortville Jijiang Liu
2014-08-12 3:12 ` [dpdk-dev] [PATCH 1/6]i40e:vxlan packet identification Jijiang Liu
2014-08-12 3:12 ` [dpdk-dev] [PATCH 2/6]app/test-pmd:test vxlan " Jijiang Liu
@ 2014-08-12 3:12 ` Jijiang Liu
2014-08-12 10:40 ` Thomas Monjalon
2014-08-12 3:12 ` [dpdk-dev] [PATCH 4/6]app/testpmd:test VxLAN cloud " Jijiang Liu
` (2 subsequent siblings)
5 siblings, 1 reply; 12+ messages in thread
From: Jijiang Liu @ 2014-08-12 3:12 UTC (permalink / raw)
To: dev
Support VxLAN cloud filters,which is used to use MAC, VLAN to point
to a queue. The filter types supported include below:
1. Inner MAC and Inner VLAN ID
2. Inner MAC address and inner VLAN ID, tenned ID.
3. Inner MAC and tenant ID
4. Inner MAC address
5. Outer MAC address, tenant ID and inner MAC
Signed-off-by: jijiangl <jijiang.liu@intel.com>
Acked-by: Helin Zhang <helin.zhang@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
Acked-by: Jing Chen <jing.d.chen@intel.com>
---
lib/librte_ether/rte_ethdev.c | 50 ++++++++++++++++
lib/librte_ether/rte_ethdev.h | 72 ++++++++++++++++++++++++
lib/librte_pmd_i40e/i40e_ethdev.c | 112 +++++++++++++++++++++++++++++++++++++
3 files changed, 234 insertions(+), 0 deletions(-)
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 325edb1..0e5b16d 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1955,6 +1955,56 @@ rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
}
int
+rte_eth_dev_cloud_filter_set(uint8_t port_id,
+ struct rte_eth_cloud_filter_conf *cloud_filter,
+ uint8_t filter_count, uint8_t add)
+{
+ uint8_t i;
+ struct rte_eth_dev *dev;
+ struct rte_eth_cloud_filter_conf *pfilter;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ if (cloud_filter == NULL) {
+ PMD_DEBUG_TRACE("Invalid oud_filter parameter\n");
+ return -EINVAL;
+ }
+ pfilter = cloud_filter;
+
+ dev = &rte_eth_devices[port_id];
+ for (i = 0; i < filter_count; i++, pfilter++) {
+ if (pfilter->queue_id >= dev->data->nb_rx_queues) {
+ PMD_DEBUG_TRACE("Invalid queue number\n");
+ return -EINVAL;
+ }
+
+ if (pfilter->inner_vlan > ETHER_MAX_VLAN_ID) {
+ PMD_DEBUG_TRACE("Invalid inner VLAN ID\n");
+ return -EINVAL;
+ }
+
+ if (is_zero_ether_addr(pfilter->outer_mac)) {
+ PMD_DEBUG_TRACE("port %d: Cannot add NULL outer MAC address\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ if (is_zero_ether_addr(pfilter->inner_mac)) {
+ PMD_DEBUG_TRACE("port %d: Cannot add NULL inner MAC address\n",
+ port_id);
+ return -EINVAL;
+ }
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->cloud_filter_set, -ENOTSUP);
+ return (*dev->dev_ops->cloud_filter_set)(dev, cloud_filter,
+ filter_count, add);
+}
+
+int
rte_eth_led_on(uint8_t port_id)
{
struct rte_eth_dev *dev;
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index d24907f..c95bab5 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -707,6 +707,28 @@ struct rte_fdir_conf {
uint8_t drop_queue;
};
+enum rte_cloud_filter_type {
+ RTE_CLOUD_FILTER_TYPE_NONE = 0,
+ RTE_CLOUD_FILTER_IMAC_IVLAN, /**< Filter by inner MAC and VLAN ID. */
+ RTE_CLOUD_FILTER_IMAC_IVLAN_TENID,
+ /**< Filter by inner MAC address and VLAN ID, tenned ID. */
+ RTE_CLOUD_FILTER_IMAC_TENID, /**< Filter by inner MAC and tenant ID. */
+ RTE_CLOUD_FILTER_IMAC, /**< Filter by inner MAC address */
+ RTE_CLOUD_FILTER_OMAC_TENID_IMAC,
+ /**< Filter by outer MAC address, tenant ID and Inner MAC */
+ RTE_CLOUD_FILTER_TYPE_MAX,
+};
+
+#define RTE_CLOUD_FLAGS_TO_QUEUE 1
+
+/**
+ * Select IPv4 or IPv6 Cloud filters.
+ */
+enum rte_cloud_iptype {
+ RTE_CLOUD_IPTYPE_IPV4 = 0, /**< IPv4. */
+ RTE_CLOUD_IPTYPE_IPV6, /**< IPv6. */
+};
+
/**
* Tunneled type.
*/
@@ -720,6 +742,26 @@ enum rte_eth_tunnel_type {
};
/**
+ * Cloud filter configuration.
+ */
+struct rte_eth_cloud_filter_conf {
+ struct ether_addr *outer_mac; /**< Outer MAC address fiter. */
+ struct ether_addr *inner_mac; /**< Inner MAC address fiter. */
+ uint16_t inner_vlan; /**< Inner VLAN fiter. */
+ enum rte_cloud_iptype ip_type; /**< IP address type. */
+ union {
+ uint32_t ipv4_addr; /**< IPv4 source address to match. */
+ uint32_t ipv6_addr[4]; /**< IPv6 source address to match. */
+ } ip_addr; /**< IPv4/IPv6 source address to match (union of above). */
+
+ uint8_t filter_type; /**< Filter type. */
+ uint8_t to_queue; /**< Use MAC and VLAN to point to a queue. */
+ enum rte_eth_tunnel_type tunnel_type; /**< Tunnel Type. */
+ uint32_t tenant_id; /** < Tenant number. */
+ uint16_t queue_id; /** < queue number. */
+};
+
+/**
* UDP tunneling configuration.
*/
struct rte_eth_udp_tunnel {
@@ -1251,6 +1293,11 @@ typedef int (*eth_set_vf_rate_limit_t)(struct rte_eth_dev *dev,
uint64_t q_msk);
/**< @internal Set VF TX rate */
+typedef int (*eth_cloud_filter_set_t)(struct rte_eth_dev *dev,
+ struct rte_eth_cloud_filter_conf *cloud_filter,
+ uint8_t filter_count, uint8_t add);
+/**< @internal Set cloud filter */
+
typedef int (*eth_mirror_rule_set_t)(struct rte_eth_dev *dev,
struct rte_eth_vmdq_mirror_conf *mirror_conf,
uint8_t rule_id,
@@ -1446,6 +1493,7 @@ struct eth_dev_ops {
eth_set_vf_vlan_filter_t set_vf_vlan_filter; /**< Set VF VLAN filter */
eth_udp_tunnel_add_t udp_tunnel_add;
eth_udp_tunnel_del_t udp_tunnel_del;
+ eth_cloud_filter_set_t cloud_filter_set;
eth_set_queue_rate_limit_t set_queue_rate_limit; /**< Set queue rate limit */
eth_set_vf_rate_limit_t set_vf_rate_limit; /**< Set VF rate limit */
@@ -3344,6 +3392,30 @@ rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
struct rte_eth_udp_tunnel *tunnel_udp,
uint8_t count);
+ /**
+ * Add Cloud filter configuration of Ethernet device
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param cloud_filter
+ * Where to store the current Tunneling UDP configuration
+ * of the Ethernet device.
+ * @param filter_count
+ * How many filters are going to added.
+ * @param add
+ * 0: remove cloud filter
+ * 1: add cloud filter
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if port identifier is invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) if hardware doesn't support tunnel type.
+ */
+int
+rte_eth_dev_cloud_filter_set(uint8_t port_id,
+ struct rte_eth_cloud_filter_conf *cloud_filter,
+ uint8_t filter_count, uint8_t add);
/**
* add syn filter
*
diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c
index bb3d39a..29bb931 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev.c
@@ -209,6 +209,9 @@ static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel,
uint8_t count);
+static int i40e_dev_cloud_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_cloud_filter_conf *cloud_filter,
+ uint8_t filter_count, uint8_t add);
static int i40e_pf_config_vxlan(struct i40e_pf *pf);
/* Default hash key buffer for RSS */
@@ -257,6 +260,7 @@ static struct eth_dev_ops i40e_eth_dev_ops = {
.rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
.udp_tunnel_add = i40e_dev_udp_tunnel_add,
.udp_tunnel_del = i40e_dev_udp_tunnel_del,
+ .cloud_filter_set = i40e_dev_cloud_filter_set,
};
static struct eth_driver rte_i40e_pmd = {
@@ -3948,6 +3952,114 @@ i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
}
static int
+i40e_dev_get_filter_type(enum rte_cloud_filter_type filter_type, uint16_t *flag)
+{
+ switch (filter_type) {
+ case RTE_CLOUD_FILTER_IMAC_IVLAN:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
+ break;
+ case RTE_CLOUD_FILTER_IMAC_IVLAN_TENID:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
+ break;
+ case RTE_CLOUD_FILTER_IMAC_TENID:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
+ break;
+ case RTE_CLOUD_FILTER_OMAC_TENID_IMAC:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
+ break;
+ case RTE_CLOUD_FILTER_IMAC:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid cloud filter type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+i40e_dev_cloud_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_cloud_filter_conf *cloud_filter,
+ uint8_t filter_count, uint8_t add)
+{
+ uint16_t ip_type;
+ uint8_t i, tun_type = 0;
+ enum i40e_status_code ret;
+ int val;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
+ struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
+
+ cld_filter = rte_zmalloc("cloud_filter", filter_count *
+ sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
+ 0);
+
+ if (NULL == cld_filter) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.\n");
+ return -EINVAL;
+ }
+ pfilter = cld_filter;
+
+ for (i = 0; i < filter_count; i++, cloud_filter++, pfilter++) {
+
+ (void)rte_memcpy(&pfilter->outer_mac, cloud_filter->outer_mac,
+ sizeof(struct ether_addr));
+ (void)rte_memcpy(&pfilter->inner_mac, cloud_filter->inner_mac,
+ sizeof(struct ether_addr));
+
+ pfilter->inner_vlan = cloud_filter->inner_vlan;
+ if (cloud_filter->ip_type == RTE_CLOUD_IPTYPE_IPV4) {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
+ (void)rte_memcpy(&pfilter->ipaddr.v4.data,
+ &cloud_filter->ip_addr,
+ sizeof(pfilter->ipaddr.v4.data));
+ } else {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
+ (void)rte_memcpy(&pfilter->ipaddr.v6.data,
+ &cloud_filter->ip_addr,
+ sizeof(pfilter->ipaddr.v6.data));
+ }
+
+ /* check tunnel type */
+ switch (cloud_filter->tunnel_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
+ break;
+ default:
+ /* Other tunnel types is not supported. */
+ PMD_DRV_LOG(ERR, "tunnel type is not supported.\n");
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+
+ val = i40e_dev_get_filter_type(cloud_filter->filter_type,
+ &pfilter->flags);
+ if (val < 0) {
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+
+ pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type |
+ (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
+ pfilter->tenant_id = cloud_filter->tenant_id;
+ pfilter->queue_number = cloud_filter->queue_id;
+ }
+
+ if (add)
+ ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter,
+ filter_count);
+ else
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, cld_filter,
+ filter_count);
+ rte_free(cld_filter);
+
+ return ret;
+}
+
+static int
i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
{
uint8_t i;
--
1.7.7.6
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [dpdk-dev] [PATCH 3/6]i40e:Add VxLAN Cloud filter API
2014-08-12 3:12 ` [dpdk-dev] [PATCH 3/6]i40e:Add VxLAN Cloud filter API Jijiang Liu
@ 2014-08-12 10:40 ` Thomas Monjalon
2014-08-13 8:23 ` Liu, Jijiang
0 siblings, 1 reply; 12+ messages in thread
From: Thomas Monjalon @ 2014-08-12 10:40 UTC (permalink / raw)
To: Jijiang Liu; +Cc: dev
Hi Jijiang,
2014-08-12 11:12, Jijiang Liu:
> Support VxLAN cloud filters,which is used to use MAC, VLAN to point
> to a queue. The filter types supported include below:
> 1. Inner MAC and Inner VLAN ID
> 2. Inner MAC address and inner VLAN ID, tenned ID.
> 3. Inner MAC and tenant ID
> 4. Inner MAC address
> 5. Outer MAC address, tenant ID and inner MAC
>
> Signed-off-by: jijiangl <jijiang.liu@intel.com>
> Acked-by: Helin Zhang <helin.zhang@intel.com>
> Acked-by: Jingjing Wu <jingjing.wu@intel.com>
> Acked-by: Jing Chen <jing.d.chen@intel.com>
> ---
> lib/librte_ether/rte_ethdev.c | 50 ++++++++++++++++
> lib/librte_ether/rte_ethdev.h | 72 ++++++++++++++++++++++++
> lib/librte_pmd_i40e/i40e_ethdev.c | 112 +++++++++++++++++++++++++++++++++++++
> 3 files changed, 234 insertions(+), 0 deletions(-)
I prefer to have a separated commit for API (ethdev) and another one for
implementation (i40e).
About API, why name it cloud filter instead of VxLAN?
Thanks
--
Thomas
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [dpdk-dev] [PATCH 3/6]i40e:Add VxLAN Cloud filter API
2014-08-12 10:40 ` Thomas Monjalon
@ 2014-08-13 8:23 ` Liu, Jijiang
2014-08-13 13:50 ` Thomas Monjalon
0 siblings, 1 reply; 12+ messages in thread
From: Liu, Jijiang @ 2014-08-13 8:23 UTC (permalink / raw)
To: Thomas Monjalon; +Cc: dev
Hi Thomas,
> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas.monjalon@6wind.com]
> Sent: Tuesday, August 12, 2014 6:40 PM
> To: Liu, Jijiang
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH 3/6]i40e:Add VxLAN Cloud filter API
>
> Hi Jijiang,
>
> 2014-08-12 11:12, Jijiang Liu:
> > Support VxLAN cloud filters,which is used to use MAC, VLAN to point
> > to a queue. The filter types supported include below:
> > 1. Inner MAC and Inner VLAN ID
> > 2. Inner MAC address and inner VLAN ID, tenned ID.
> > 3. Inner MAC and tenant ID
> > 4. Inner MAC address
> > 5. Outer MAC address, tenant ID and inner MAC
> >
> > Signed-off-by: jijiangl <jijiang.liu@intel.com>
> > Acked-by: Helin Zhang <helin.zhang@intel.com>
> > Acked-by: Jingjing Wu <jingjing.wu@intel.com>
> > Acked-by: Jing Chen <jing.d.chen@intel.com>
> > ---
> > lib/librte_ether/rte_ethdev.c | 50 ++++++++++++++++
> > lib/librte_ether/rte_ethdev.h | 72 ++++++++++++++++++++++++
> > lib/librte_pmd_i40e/i40e_ethdev.c | 112
> > +++++++++++++++++++++++++++++++++++++
> > 3 files changed, 234 insertions(+), 0 deletions(-)
>
> I prefer to have a separated commit for API (ethdev) and another one for
> implementation (i40e).
>
> About API, why name it cloud filter instead of VxLAN?
>
Ok, I can separate the commit into two patches.
VxLAN is just a kind tunnel type, there are another tunnel types based on protocol type, they are below.
Tunnel Type:
* 0x0: VXLAN
* 0x1: NVGRE or other MAC in GRE
* 0x2: Geneve
0x3: IP in GRE
Currently, I just implemented VxLAN tunnel type, and we will support another tunnel types in cloud filter API later.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [dpdk-dev] [PATCH 3/6]i40e:Add VxLAN Cloud filter API
2014-08-13 8:23 ` Liu, Jijiang
@ 2014-08-13 13:50 ` Thomas Monjalon
2014-08-13 14:17 ` Alex Markuze
0 siblings, 1 reply; 12+ messages in thread
From: Thomas Monjalon @ 2014-08-13 13:50 UTC (permalink / raw)
To: Liu, Jijiang; +Cc: dev
2014-08-13 08:23, Liu, Jijiang:
> From: Thomas Monjalon [mailto:thomas.monjalon@6wind.com]
> > About API, why name it cloud filter instead of VxLAN?
>
> VxLAN is just a kind tunnel type, there are another tunnel types based
> on protocol type, they are below.
> Tunnel Type:
> * 0x0: VXLAN
> * 0x1: NVGRE or other MAC in GRE
> * 0x2: Geneve
> 0x3: IP in GRE
> Currently, I just implemented VxLAN tunnel type, and we will support
> another tunnel types in cloud filter API later.
OK, I understand. But cloud filter is just a marketing name.
Please let's stick to technical and precise names.
It seems these tunnels are L2 over IP, right?
--
Thomas
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [dpdk-dev] [PATCH 3/6]i40e:Add VxLAN Cloud filter API
2014-08-13 13:50 ` Thomas Monjalon
@ 2014-08-13 14:17 ` Alex Markuze
2014-08-18 1:44 ` Liu, Jijiang
0 siblings, 1 reply; 12+ messages in thread
From: Alex Markuze @ 2014-08-13 14:17 UTC (permalink / raw)
To: Thomas Monjalon; +Cc: dev
All are L2 over L3(UDP) - General name - Network Overlay.
On Wed, Aug 13, 2014 at 4:50 PM, Thomas Monjalon
<thomas.monjalon@6wind.com> wrote:
> 2014-08-13 08:23, Liu, Jijiang:
>> From: Thomas Monjalon [mailto:thomas.monjalon@6wind.com]
>> > About API, why name it cloud filter instead of VxLAN?
>>
>> VxLAN is just a kind tunnel type, there are another tunnel types based
>> on protocol type, they are below.
>> Tunnel Type:
>> * 0x0: VXLAN
>> * 0x1: NVGRE or other MAC in GRE
>> * 0x2: Geneve
>> 0x3: IP in GRE
>> Currently, I just implemented VxLAN tunnel type, and we will support
>> another tunnel types in cloud filter API later.
>
> OK, I understand. But cloud filter is just a marketing name.
> Please let's stick to technical and precise names.
> It seems these tunnels are L2 over IP, right?
>
> --
> Thomas
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [dpdk-dev] [PATCH 3/6]i40e:Add VxLAN Cloud filter API
2014-08-13 14:17 ` Alex Markuze
@ 2014-08-18 1:44 ` Liu, Jijiang
0 siblings, 0 replies; 12+ messages in thread
From: Liu, Jijiang @ 2014-08-18 1:44 UTC (permalink / raw)
To: Thomas Monjalon; +Cc: dev
Hi Thomas,
> -----Original Message-----
> From: Alex Markuze [mailto:alex@weka.io]
> Sent: Wednesday, August 13, 2014 10:18 PM
> To: Thomas Monjalon
> Cc: Liu, Jijiang; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH 3/6]i40e:Add VxLAN Cloud filter API
>
> All are L2 over L3(UDP) - General name - Network Overlay.
>
>
> On Wed, Aug 13, 2014 at 4:50 PM, Thomas Monjalon
> <thomas.monjalon@6wind.com> wrote:
> > 2014-08-13 08:23, Liu, Jijiang:
> >> From: Thomas Monjalon [mailto:thomas.monjalon@6wind.com]
> >> > About API, why name it cloud filter instead of VxLAN?
> >>
> >> VxLAN is just a kind tunnel type, there are another tunnel types
> >> based on protocol type, they are below.
> >> Tunnel Type:
> >> * 0x0: VXLAN
> >> * 0x1: NVGRE or other MAC in GRE
> >> * 0x2: Geneve
> >> 0x3: IP in GRE
> >> Currently, I just implemented VxLAN tunnel type, and we will support
> >> another tunnel types in cloud filter API later.
> >
> > OK, I understand. But cloud filter is just a marketing name.
> > Please let's stick to technical and precise names.
> > It seems these tunnels are L2 over IP, right?
> >
> > --
> > Thomas
I agree. I will change the API name as rte_eth_dev_tunnel_filter_set(), which means the API supports tunneled packet filtering.
Any comments?
Thanks
Jijiang Liu
^ permalink raw reply [flat|nested] 12+ messages in thread
* [dpdk-dev] [PATCH 4/6]app/testpmd:test VxLAN cloud filter API
2014-08-12 3:12 [dpdk-dev] [PATCH 0/6]Support VxLAN on fortville Jijiang Liu
` (2 preceding siblings ...)
2014-08-12 3:12 ` [dpdk-dev] [PATCH 3/6]i40e:Add VxLAN Cloud filter API Jijiang Liu
@ 2014-08-12 3:12 ` Jijiang Liu
2014-08-12 3:12 ` [dpdk-dev] [PATCH 5/6]i40e:VxLAN Tx checksum offload Jijiang Liu
2014-08-12 3:12 ` [dpdk-dev] [PATCH 6/6]app/testpmd:test VxLAN " Jijiang Liu
5 siblings, 0 replies; 12+ messages in thread
From: Jijiang Liu @ 2014-08-12 3:12 UTC (permalink / raw)
To: dev
Add commands to test VxLAN cloud filter API.
Signed-off-by: jijiangl <jijiang.liu@intel.com>
Acked-by: Helin Zhang <helin.zhang@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
Acked-by: Jing Chen <jing.d.chen@intel.com>
---
app/test-pmd/cmdline.c | 142 ++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 142 insertions(+), 0 deletions(-)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 67cf63e..a5adac0 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -285,6 +285,14 @@ static void cmd_help_long_parsed(void *parsed_result,
" Set the outer VLAN TPID for Packet Filtering on"
" a port\n\n"
+ "cloud_filter add (port_id) (outer_mac) (inner_mac) (ip_addr) "
+ "(inner_vlan) (tunnel_type) (filter_type) (tenant_id) (queue_id)\n"
+ " add a cloud fiter of a port.\n\n"
+
+ "cloud_filter rm (port_id) (outer_mac) (inner_mac) (ip_addr) "
+ "(inner_vlan) (tunnel_type) (filter_type) (tenant_id) (queue_id)\n"
+ " remove a cloud fiter of a port.\n\n"
+
"rx_vxlan_port add (udp_port) (port_id)\n"
" Add an UDP port for VxLAN packet filter on a port\n\n"
@@ -6123,6 +6131,139 @@ cmdline_parse_inst_t cmd_vf_rate_limit = {
},
};
+/* *** ADD CLOUD FILTER OF A PORT *** */
+struct cmd_cloud_filter_result {
+ cmdline_fixed_string_t cmd;
+ cmdline_fixed_string_t what;
+ uint8_t port_id;
+ struct ether_addr outer_mac;
+ struct ether_addr inner_mac;
+ cmdline_ipaddr_t ip_value;
+ uint16_t inner_vlan;
+ cmdline_fixed_string_t tunnel_type;
+ cmdline_fixed_string_t filter_type;
+ uint32_t tenant_id;
+ uint16_t queue_num;
+};
+
+static void
+cmd_cloud_filter_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_cloud_filter_result *res = parsed_result;
+ struct rte_eth_cloud_filter_conf cloud_filter_conf;
+
+ cloud_filter_conf.outer_mac = &res->outer_mac;
+ cloud_filter_conf.inner_mac = &res->inner_mac;
+ cloud_filter_conf.inner_vlan = res->inner_vlan;
+
+ if (res->ip_value.family == AF_INET) {
+ cloud_filter_conf.ip_addr.ipv4_addr =
+ res->ip_value.addr.ipv4.s_addr;
+ cloud_filter_conf.ip_type = RTE_CLOUD_IPTYPE_IPV4;
+ } else {
+ memcpy(&(cloud_filter_conf.ip_addr.ipv6_addr),
+ &(res->ip_value.addr.ipv6),
+ sizeof(struct in6_addr));
+ cloud_filter_conf.ip_type = RTE_CLOUD_IPTYPE_IPV6;
+ }
+
+ if (!strcmp(res->filter_type, "imac-ivlan"))
+ cloud_filter_conf.filter_type = RTE_CLOUD_FILTER_IMAC_IVLAN;
+ else if (!strcmp(res->filter_type, "imac-ivlan-tenid"))
+ cloud_filter_conf.filter_type =
+ RTE_CLOUD_FILTER_IMAC_IVLAN_TENID;
+ else if (!strcmp(res->filter_type, "imac-tenid"))
+ cloud_filter_conf.filter_type = RTE_CLOUD_FILTER_IMAC_TENID;
+ else if (!strcmp(res->filter_type, "imac"))
+ cloud_filter_conf.filter_type = RTE_CLOUD_FILTER_IMAC;
+ else if (!strcmp(res->filter_type, "omac-imac-tenid"))
+ cloud_filter_conf.filter_type =
+ RTE_CLOUD_FILTER_OMAC_TENID_IMAC;
+ else {
+ printf("The filter type is not supported");
+ return;
+ }
+
+ cloud_filter_conf.to_queue = RTE_CLOUD_FLAGS_TO_QUEUE;
+
+ if (!strcmp(res->tunnel_type, "vxlan"))
+ cloud_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
+ else {
+ printf("Only VxLAN is supported now.\n");
+ return;
+ }
+
+ cloud_filter_conf.tenant_id = res->tenant_id;
+ cloud_filter_conf.queue_id = res->queue_num;
+ if (!strcmp(res->what, "add"))
+ rte_eth_dev_cloud_filter_set(res->port_id,
+ &cloud_filter_conf, 1, 1);
+ else
+ rte_eth_dev_cloud_filter_set(res->port_id,
+ &cloud_filter_conf, 1, 0);
+}
+cmdline_parse_token_string_t cmd_cloud_filter_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_cloud_filter_result,
+ cmd, "cloud_filter");
+cmdline_parse_token_string_t cmd_cloud_filter_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_cloud_filter_result,
+ what, "add#rm");
+cmdline_parse_token_num_t cmd_cloud_filter_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_cloud_filter_result,
+ port_id, UINT8);
+cmdline_parse_token_etheraddr_t cmd_cloud_filter_outer_mac =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_cloud_filter_result,
+ outer_mac);
+cmdline_parse_token_etheraddr_t cmd_cloud_filter_inner_mac =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_cloud_filter_result,
+ inner_mac);
+cmdline_parse_token_num_t cmd_cloud_filter_innner_vlan =
+ TOKEN_NUM_INITIALIZER(struct cmd_cloud_filter_result,
+ inner_vlan, UINT16);
+cmdline_parse_token_ipaddr_t cmd_cloud_filter_ip_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_cloud_filter_result,
+ ip_value);
+cmdline_parse_token_string_t cmd_cloud_filter_tunnel_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_cloud_filter_result,
+ tunnel_type, "vxlan");
+
+cmdline_parse_token_string_t cmd_cloud_filter_filter_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_cloud_filter_result,
+ filter_type, "imac-ivlan#imac-ivlan-tenid#imac-tenid#"
+ "imac#omac-imac-tenid");
+cmdline_parse_token_num_t cmd_cloud_filter_tenant_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_cloud_filter_result,
+ tenant_id, UINT32);
+cmdline_parse_token_num_t cmd_cloud_filter_queue_num =
+ TOKEN_NUM_INITIALIZER(struct cmd_cloud_filter_result,
+ queue_num, UINT16);
+
+cmdline_parse_inst_t cmd_cloud_filter = {
+ .f = cmd_cloud_filter_parsed,
+ .data = (void *)0,
+ .help_str = "add/rm cloud filter of a port: "
+ "cloud_filter add port_id outer_mac inner_mac ip "
+ "inner_vlan tunnel_type(vxlan) filter_type "
+ "(imac-ivlan|imac-ivlan-tenid|imac-tenid|imac|omac-imac-tenid) "
+ "tenant_id queue_num",
+ .tokens = {
+ (void *)&cmd_cloud_filter_cmd,
+ (void *)&cmd_cloud_filter_what,
+ (void *)&cmd_cloud_filter_port_id,
+ (void *)&cmd_cloud_filter_outer_mac,
+ (void *)&cmd_cloud_filter_inner_mac,
+ (void *)&cmd_cloud_filter_ip_value,
+ (void *)&cmd_cloud_filter_innner_vlan,
+ (void *)&cmd_cloud_filter_tunnel_type,
+ (void *)&cmd_cloud_filter_filter_type,
+ (void *)&cmd_cloud_filter_tenant_id,
+ (void *)&cmd_cloud_filter_queue_num,
+ NULL,
+ },
+};
+
/* *** CONFIGURE TUNNEL UDP PORT *** */
struct cmd_tunnel_udp_config {
cmdline_fixed_string_t cmd;
@@ -7475,6 +7616,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_vf_rxvlan_filter,
(cmdline_parse_inst_t *)&cmd_queue_rate_limit,
(cmdline_parse_inst_t *)&cmd_vf_rate_limit,
+ (cmdline_parse_inst_t *)&cmd_cloud_filter,
(cmdline_parse_inst_t *)&cmd_tunnel_udp_config,
(cmdline_parse_inst_t *)&cmd_set_mirror_mask,
(cmdline_parse_inst_t *)&cmd_set_mirror_link,
--
1.7.7.6
^ permalink raw reply [flat|nested] 12+ messages in thread
* [dpdk-dev] [PATCH 5/6]i40e:VxLAN Tx checksum offload
2014-08-12 3:12 [dpdk-dev] [PATCH 0/6]Support VxLAN on fortville Jijiang Liu
` (3 preceding siblings ...)
2014-08-12 3:12 ` [dpdk-dev] [PATCH 4/6]app/testpmd:test VxLAN cloud " Jijiang Liu
@ 2014-08-12 3:12 ` Jijiang Liu
2014-08-12 3:12 ` [dpdk-dev] [PATCH 6/6]app/testpmd:test VxLAN " Jijiang Liu
5 siblings, 0 replies; 12+ messages in thread
From: Jijiang Liu @ 2014-08-12 3:12 UTC (permalink / raw)
To: dev
Support VxLAN TX check offload, which include outer and inner L3(IP),
inner L4(UDP,TCP and SCTP).
Signed-off-by: jijiangl <jijiang.liu@intel.com>
Acked-by: Helin Zhang <helin.zhang@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
Acked-by: Jing Chen <jing.d.chen@intel.com>
---
lib/librte_mbuf/rte_mbuf.h | 4 +++
lib/librte_pmd_i40e/i40e_rxtx.c | 58 +++++++++++++++++++++++++++++++++++++--
2 files changed, 59 insertions(+), 3 deletions(-)
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 2735f37..212ac3a 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -97,6 +97,8 @@ struct rte_ctrlmbuf {
#define PKT_RX_IEEE1588_PTP 0x0200 /**< RX IEEE1588 L2 Ethernet PT Packet. */
#define PKT_RX_IEEE1588_TMST 0x0400 /**< RX IEEE1588 L2/L4 timestamped packet.*/
+#define PKT_TX_VXLAN_CKSUM 0x0001 /**< Checksum of TX VxLAN pkt. computed by NIC.. */
+#define PKT_TX_IVLAN_PKT 0x0002 /**< TX packet is VxLAN packet with an inner VLAN. */
#define PKT_TX_VLAN_PKT 0x0800 /**< TX packet is a 802.1q VLAN packet. */
#define PKT_TX_IP_CKSUM 0x1000 /**< IP cksum of TX pkt. computed by NIC. */
#define PKT_TX_IPV4_CSUM 0x1000 /**< Alias of PKT_TX_IP_CKSUM. */
@@ -594,6 +596,7 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
m->pkt.in_port = 0xff;
m->ol_flags = 0;
+ m->reserved = 0;
buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
RTE_PKTMBUF_HEADROOM : m->buf_len;
m->pkt.data = (char*) m->buf_addr + buf_ofs;
@@ -658,6 +661,7 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md)
mi->pkt.pkt_len = mi->pkt.data_len;
mi->pkt.nb_segs = 1;
mi->ol_flags = md->ol_flags;
+ mi->reserved = md->reserved;
__rte_mbuf_sanity_check(mi, RTE_MBUF_PKT, 1);
__rte_mbuf_sanity_check(md, RTE_MBUF_PKT, 0);
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
index 83b9462..17633e9 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.c
+++ b/lib/librte_pmd_i40e/i40e_rxtx.c
@@ -415,12 +415,16 @@ i40e_rxd_ptype_to_pkt_flags(uint64_t qword)
return ip_ptype_map[ptype];
}
+#define L4TUN_LEN (sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr)\
+ + sizeof(struct ether_hdr))
static inline void
i40e_txd_enable_checksum(uint32_t ol_flags,
uint32_t *td_cmd,
uint32_t *td_offset,
uint8_t l2_len,
- uint8_t l3_len)
+ uint8_t l3_len,
+ uint8_t inner_l3_len,
+ uint32_t *cd_tunneling)
{
if (!l2_len) {
PMD_DRV_LOG(DEBUG, "L2 length set to 0\n");
@@ -433,6 +437,31 @@ i40e_txd_enable_checksum(uint32_t ol_flags,
return;
}
+ /* VxLAN packet TX checksum offload */
+ if (unlikely(ol_flags & PKT_TX_VXLAN_CKSUM)) {
+ uint8_t l4tun_len;
+
+ /* packet with inner VLAN */
+ if (ol_flags & PKT_TX_IVLAN_PKT)
+ l4tun_len = L4TUN_LEN + sizeof(struct vlan_hdr);
+ else
+ l4tun_len = L4TUN_LEN;
+
+ if (ol_flags & PKT_TX_IPV4_CSUM)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+ else if (ol_flags & PKT_TX_IPV6)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+
+ /* Now set the ctx descriptor fields */
+ *cd_tunneling |= (l3_len >> 2) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+ I40E_TXD_CTX_UDP_TUNNELING |
+ (l4tun_len >> 1) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+ l3_len = inner_l3_len;
+ }
+
/* Enable L3 checksum offloads */
if (ol_flags & PKT_TX_IPV4_CSUM) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
@@ -614,6 +643,12 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
I40E_RXD_QW1_STATUS_SHIFT;
pkt_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
+
+ /* reserved is used to store packet type for RX side */
+ mb->reserved = (uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT);
+
mb->pkt.data_len = pkt_len;
mb->pkt.pkt_len = pkt_len;
mb->pkt.vlan_macip.f.vlan_tci = rx_status &
@@ -860,6 +895,8 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
+ rxm->reserved = (uint8_t)((qword1 & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT);
rxm->ol_flags = pkt_flags;
if (pkt_flags & PKT_RX_RSS_HASH)
rxm->pkt.hash.rss =
@@ -1013,6 +1050,9 @@ i40e_recv_scattered_pkts(void *rx_queue,
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
+ first_seg->reserved = (uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT);
first_seg->ol_flags = pkt_flags;
if (pkt_flags & PKT_RX_RSS_HASH)
rxm->pkt.hash.rss =
@@ -1055,6 +1095,9 @@ i40e_calc_context_desc(uint16_t flags)
{
uint16_t mask = 0;
+ if (flags | PKT_TX_VXLAN_CKSUM)
+ mask |= PKT_TX_VXLAN_CKSUM;
+
#ifdef RTE_LIBRTE_IEEE1588
mask |= PKT_TX_IEEE1588_TMST;
#endif
@@ -1074,6 +1117,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
volatile struct i40e_tx_desc *txr;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
+ uint32_t cd_tunneling_params;
uint16_t tx_id;
uint16_t nb_tx;
uint32_t td_cmd;
@@ -1083,6 +1127,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
uint16_t ol_flags;
uint8_t l2_len;
uint8_t l3_len;
+ uint8_t inner_l3_len;
uint16_t nb_used;
uint16_t nb_ctx;
uint16_t tx_last;
@@ -1112,6 +1157,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
l2_len = tx_pkt->pkt.vlan_macip.f.l2_len;
l3_len = tx_pkt->pkt.vlan_macip.f.l3_len;
+ /**
+ * the reserved in mbuf is used to store innel L3
+ * header length.
+ */
+ inner_l3_len = tx_pkt->reserved;
+
/* Calculate the number of context descriptors needed. */
nb_ctx = i40e_calc_context_desc(ol_flags);
@@ -1158,15 +1209,16 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
td_cmd |= I40E_TX_DESC_CMD_ICRC;
/* Enable checksum offloading */
+ cd_tunneling_params = 0;
i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
- l2_len, l3_len);
+ l2_len, l3_len, inner_l3_len,
+ &cd_tunneling_params);
if (unlikely(nb_ctx)) {
/* Setup TX context descriptor if required */
volatile struct i40e_tx_context_desc *ctx_txd =
(volatile struct i40e_tx_context_desc *)\
&txr[tx_id];
- uint32_t cd_tunneling_params = 0;
uint16_t cd_l2tag2 = 0;
uint64_t cd_type_cmd_tso_mss =
I40E_TX_DESC_DTYPE_CONTEXT;
--
1.7.7.6
^ permalink raw reply [flat|nested] 12+ messages in thread
* [dpdk-dev] [PATCH 6/6]app/testpmd:test VxLAN Tx checksum offload
2014-08-12 3:12 [dpdk-dev] [PATCH 0/6]Support VxLAN on fortville Jijiang Liu
` (4 preceding siblings ...)
2014-08-12 3:12 ` [dpdk-dev] [PATCH 5/6]i40e:VxLAN Tx checksum offload Jijiang Liu
@ 2014-08-12 3:12 ` Jijiang Liu
5 siblings, 0 replies; 12+ messages in thread
From: Jijiang Liu @ 2014-08-12 3:12 UTC (permalink / raw)
To: dev
Add test cases in testpmd to test VxLAN Tx Checksum offlad, which include IP4 and IPV6 case,
and also include inner L3 and L4 test cases.
Signed-off-by: jijiangl <jijiang.liu@intel.com>
Acked-by: Helin Zhang <helin.zhang@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
Acked-by: Jing Chen <jing.d.chen@intel.com>
---
app/test-pmd/config.c | 6 +-
app/test-pmd/csumonly.c | 194 ++++++++++++++++++++++++++++++++++++--
lib/librte_pmd_i40e/i40e_rxtx.c | 4 +-
3 files changed, 188 insertions(+), 16 deletions(-)
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index c72f6ee..c397640 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -1719,9 +1719,9 @@ tx_cksum_set(portid_t port_id, uint8_t cksum_mask)
uint16_t tx_ol_flags;
if (port_id_is_invalid(port_id))
return;
- /* Clear last 4 bits and then set L3/4 checksum mask again */
- tx_ol_flags = (uint16_t) (ports[port_id].tx_ol_flags & 0xFFF0);
- ports[port_id].tx_ol_flags = (uint16_t) ((cksum_mask & 0xf) | tx_ol_flags);
+ /* Clear last 8 bits and then set L3/4 checksum mask again */
+ tx_ol_flags = (uint16_t) (ports[port_id].tx_ol_flags & 0xFF00);
+ ports[port_id].tx_ol_flags = (uint16_t) ((cksum_mask & 0xff) | tx_ol_flags);
}
void
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index e5a1f52..401f6dc 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -196,7 +196,6 @@ get_ipv6_udptcp_checksum(struct ipv6_hdr *ipv6_hdr, uint16_t *l4_hdr)
return (uint16_t)cksum;
}
-
/*
* Forwarding of packets. Change the checksum field with HW or SW methods
* The HW/SW method selection depends on the ol_flags on every packet
@@ -209,10 +208,16 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
struct rte_mbuf *mb;
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
+ struct ether_hdr *inner_eth_hdr;
+ struct ipv4_hdr *inner_ipv4_hdr = NULL;
struct ipv6_hdr *ipv6_hdr;
+ struct ipv6_hdr *inner_ipv6_hdr = NULL;
struct udp_hdr *udp_hdr;
+ struct udp_hdr *inner_udp_hdr;
struct tcp_hdr *tcp_hdr;
+ struct tcp_hdr *inner_tcp_hdr;
struct sctp_hdr *sctp_hdr;
+ struct sctp_hdr *inner_sctp_hdr;
uint16_t nb_rx;
uint16_t nb_tx;
@@ -221,12 +226,18 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
uint16_t pkt_ol_flags;
uint16_t tx_ol_flags;
uint16_t l4_proto;
+ uint16_t inner_l4_proto = 0;
uint16_t eth_type;
uint8_t l2_len;
uint8_t l3_len;
+ uint8_t inner_l2_len;
+ uint8_t inner_l3_len = 0;
uint32_t rx_bad_ip_csum;
uint32_t rx_bad_l4_csum;
+ uint8_t ipv4_tunnel;
+ uint8_t ipv6_tunnel;
+ uint16_t ptype;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
@@ -261,8 +272,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
mb = pkts_burst[i];
l2_len = sizeof(struct ether_hdr);
pkt_ol_flags = mb->ol_flags;
+ ptype = mb->reserved;
ol_flags = (uint16_t) (pkt_ol_flags & (~PKT_TX_L4_MASK));
+ ipv4_tunnel = IS_ETH_IPV4_TUNNEL(ptype);
+ ipv6_tunnel = IS_ETH_IPV6_TUNNEL(ptype);
+
eth_hdr = (struct ether_hdr *) mb->pkt.data;
eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
if (eth_type == ETHER_TYPE_VLAN) {
@@ -295,7 +310,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
* + ipv4 or ipv6
* + udp or tcp or sctp or others
*/
- if (pkt_ol_flags & PKT_RX_IPV4_HDR) {
+ if (pkt_ol_flags & (PKT_RX_IPV4_HDR | PKT_RX_IPV4_HDR_EXT)) {
/* Do not support ipv4 option field */
l3_len = sizeof(struct ipv4_hdr) ;
@@ -325,15 +340,96 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
if (tx_ol_flags & 0x2) {
/* HW Offload */
ol_flags |= PKT_TX_UDP_CKSUM;
- /* Pseudo header sum need be set properly */
- udp_hdr->dgram_cksum = get_ipv4_psd_sum(ipv4_hdr);
+ if (ipv4_tunnel)
+ udp_hdr->dgram_cksum = 0;
+ else
+ /* Pseudo header sum need be set properly */
+ udp_hdr->dgram_cksum =
+ get_ipv4_psd_sum(ipv4_hdr);
}
else {
/* SW Implementation, clear checksum field first */
udp_hdr->dgram_cksum = 0;
udp_hdr->dgram_cksum = get_ipv4_udptcp_checksum(ipv4_hdr,
- (uint16_t*)udp_hdr);
+ (uint16_t *)udp_hdr);
}
+
+ if (ipv4_tunnel) {
+
+ uint16_t len;
+
+ /* Check if inner L3/L4 checkum flag is set */
+ if (tx_ol_flags & 0xF0)
+ ol_flags |= PKT_TX_VXLAN_CKSUM;
+
+ inner_l2_len = sizeof(struct ether_hdr);
+ inner_eth_hdr = (struct ether_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + l2_len + l3_len
+ + ETHER_VXLAN_HLEN);
+
+ eth_type = rte_be_to_cpu_16(inner_eth_hdr->ether_type);
+ if (eth_type == ETHER_TYPE_VLAN) {
+ ol_flags |= PKT_TX_IVLAN_PKT;
+ inner_l2_len += sizeof(struct vlan_hdr);
+ eth_type = rte_be_to_cpu_16(*(uint16_t *)
+ ((uintptr_t)ð_hdr->ether_type +
+ sizeof(struct vlan_hdr)));
+ }
+
+ len = l2_len + l3_len + ETHER_VXLAN_HLEN + inner_l2_len;
+ if (eth_type == ETHER_TYPE_IPv4) {
+ inner_l3_len = sizeof(struct ipv4_hdr);
+ inner_ipv4_hdr = (struct ipv4_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len);
+ inner_l4_proto = inner_ipv4_hdr->next_proto_id;
+
+ if (tx_ol_flags & 0x10)
+ /* Do not delete, this is required by HW*/
+ inner_ipv4_hdr->hdr_checksum = 0;
+ ol_flags |= PKT_TX_IPV4_CSUM;
+
+ } else if (eth_type == ETHER_TYPE_IPv6) {
+ inner_l3_len = sizeof(struct ipv6_hdr);
+ inner_ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len);
+ inner_l4_proto = inner_ipv6_hdr->proto;
+ }
+ if ((inner_l4_proto == IPPROTO_UDP) && (tx_ol_flags & 0x20)) {
+
+ /* HW Offload */
+ ol_flags |= PKT_TX_UDP_CKSUM;
+ inner_udp_hdr = (struct udp_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len + inner_l3_len);
+ if (eth_type == ETHER_TYPE_IPv4)
+ inner_udp_hdr->dgram_cksum = get_ipv4_psd_sum(inner_ipv4_hdr);
+ else
+ inner_udp_hdr->dgram_cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
+
+ } else if ((inner_l4_proto == IPPROTO_TCP) && (tx_ol_flags & 0x40)) {
+ /* HW Offload */
+ ol_flags |= PKT_TX_TCP_CKSUM;
+ inner_tcp_hdr = (struct tcp_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len + inner_l3_len);
+ if (eth_type == ETHER_TYPE_IPv4)
+ inner_tcp_hdr->cksum = get_ipv4_psd_sum(inner_ipv4_hdr);
+ else
+ inner_tcp_hdr->cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
+ } else if ((inner_l4_proto == IPPROTO_SCTP) && (tx_ol_flags & 0x80)) {
+ /* HW Offload */
+ ol_flags |= PKT_TX_SCTP_CKSUM;
+ inner_sctp_hdr = (struct sctp_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len + inner_l3_len);
+ inner_sctp_hdr->cksum = 0;
+
+ /* Sanity check, only number of 4 bytes supported */
+ if ((rte_be_to_cpu_16(inner_ipv4_hdr->total_length) % 4) != 0)
+ printf("sctp payload must be a multiple "
+ "of 4 bytes for checksum offload");
+ }
+
+ mb->reserved = inner_l3_len;
+ }
+
}
else if (l4_proto == IPPROTO_TCP){
tcp_hdr = (struct tcp_hdr*) (rte_pktmbuf_mtod(mb,
@@ -367,14 +463,11 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
}
}
/* End of L4 Handling*/
- }
- else if (pkt_ol_flags & PKT_RX_IPV6_HDR) {
-
+ } else if (pkt_ol_flags & (PKT_RX_IPV6_HDR | PKT_RX_IPV6_HDR_EXT)) {
ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb,
unsigned char *) + l2_len);
l3_len = sizeof(struct ipv6_hdr) ;
l4_proto = ipv6_hdr->proto;
- ol_flags |= PKT_TX_IPV6;
if (l4_proto == IPPROTO_UDP) {
udp_hdr = (struct udp_hdr*) (rte_pktmbuf_mtod(mb,
@@ -382,15 +475,94 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
if (tx_ol_flags & 0x2) {
/* HW Offload */
ol_flags |= PKT_TX_UDP_CKSUM;
- udp_hdr->dgram_cksum = get_ipv6_psd_sum(ipv6_hdr);
+ if (ipv6_tunnel)
+ udp_hdr->dgram_cksum = 0;
+ else
+ udp_hdr->dgram_cksum =
+ get_ipv6_psd_sum(ipv6_hdr);
}
else {
/* SW Implementation */
/* checksum field need be clear first */
udp_hdr->dgram_cksum = 0;
udp_hdr->dgram_cksum = get_ipv6_udptcp_checksum(ipv6_hdr,
- (uint16_t*)udp_hdr);
+ (uint16_t *)udp_hdr);
}
+
+ if (ipv6_tunnel) {
+
+ uint16_t len;
+
+ /* Check if inner L3/L4 checksum flag is set */
+ if (tx_ol_flags & 0xF0)
+ ol_flags |= PKT_TX_VXLAN_CKSUM;
+
+ inner_l2_len = sizeof(struct ether_hdr);
+ inner_eth_hdr = (struct ether_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + l2_len + l3_len + ETHER_VXLAN_HLEN);
+ eth_type = rte_be_to_cpu_16(inner_eth_hdr->ether_type);
+
+ if (eth_type == ETHER_TYPE_VLAN) {
+ ol_flags |= PKT_TX_IVLAN_PKT;
+ inner_l2_len += sizeof(struct vlan_hdr);
+ eth_type = rte_be_to_cpu_16(*(uint16_t *)
+ ((uintptr_t)ð_hdr->ether_type +
+ sizeof(struct vlan_hdr)));
+ }
+
+ len = l2_len + l3_len + ETHER_VXLAN_HLEN + inner_l2_len;
+
+ if (eth_type == ETHER_TYPE_IPv4) {
+ inner_l3_len = sizeof(struct ipv4_hdr);
+ inner_ipv4_hdr = (struct ipv4_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len);
+ inner_l4_proto = inner_ipv4_hdr->next_proto_id;
+
+ /* HW offload */
+ if (tx_ol_flags & 0x10)
+ /* Do not delete, this is required by HW*/
+ inner_ipv4_hdr->hdr_checksum = 0;
+ ol_flags |= PKT_TX_IPV4_CSUM;
+ } else if (eth_type == ETHER_TYPE_IPv6) {
+ inner_l3_len = sizeof(struct ipv6_hdr);
+ inner_ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len);
+ inner_l4_proto = inner_ipv6_hdr->proto;
+ }
+
+ if ((inner_l4_proto == IPPROTO_UDP) && (tx_ol_flags & 0x20)) {
+ inner_udp_hdr = (struct udp_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len + inner_l3_len);
+ /* HW offload */
+ ol_flags |= PKT_TX_UDP_CKSUM;
+ inner_udp_hdr->dgram_cksum = 0;
+ if (eth_type == ETHER_TYPE_IPv4)
+ inner_udp_hdr->dgram_cksum = get_ipv4_psd_sum(inner_ipv4_hdr);
+ else
+ inner_udp_hdr->dgram_cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
+ } else if ((inner_l4_proto == IPPROTO_TCP) && (tx_ol_flags & 0x40)) {
+ /* HW offload */
+ ol_flags |= PKT_TX_TCP_CKSUM;
+ inner_tcp_hdr = (struct tcp_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len + inner_l3_len);
+
+ if (eth_type == ETHER_TYPE_IPv4)
+ inner_tcp_hdr->cksum = get_ipv4_psd_sum(inner_ipv4_hdr);
+ else
+ inner_tcp_hdr->cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
+
+ } else if ((inner_l4_proto == IPPROTO_SCTP) && (tx_ol_flags & 0x80)) {
+ /* HW offload */
+ ol_flags |= PKT_TX_SCTP_CKSUM;
+ inner_sctp_hdr = (struct sctp_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len + inner_l3_len);
+ inner_sctp_hdr->cksum = 0;
+ }
+
+ /* pass the inner l3 length to driver */
+ mb->reserved = inner_l3_len;
+ }
+
}
else if (l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct tcp_hdr*) (rte_pktmbuf_mtod(mb,
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
index 17633e9..4cb69dc 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.c
+++ b/lib/librte_pmd_i40e/i40e_rxtx.c
@@ -645,7 +645,7 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
/* reserved is used to store packet type for RX side */
- mb->reserved = (uint8_t)((qword1 &
+ mb->reserved = (uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT);
@@ -1050,7 +1050,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
- first_seg->reserved = (uint8_t)((qword1 &
+ first_seg->reserved = (uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT);
first_seg->ol_flags = pkt_flags;
--
1.7.7.6
^ permalink raw reply [flat|nested] 12+ messages in thread