From: Jijiang Liu <jijiang.liu@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 04/10] examples/tep_termination:implement VXLAN packet processing
Date: Fri, 15 May 2015 14:08:55 +0800 [thread overview]
Message-ID: <1431670141-7835-5-git-send-email-jijiang.liu@intel.com> (raw)
In-Reply-To: <1431670141-7835-1-git-send-email-jijiang.liu@intel.com>
Implement the following functions:
1> VXLAN port configuration
2> VXLAN tunnel setup
3> VXLAN tunnel destroying
4> VXLAN packet processing for Rx side
5> VXLAN packet processing for Tx side
Signed-off-by: Jijiang Liu <jijiang.liu@intel.com>
Signed-off-by: Thomas Long <thomas.long at intel.com>
---
examples/tep_termination/Makefile | 2 +-
examples/tep_termination/main.c | 39 +++--
examples/tep_termination/vxlan.c | 174 +++++++++++++++
examples/tep_termination/vxlan.h | 17 ++-
examples/tep_termination/vxlan_setup.c | 367 ++++++++++++++++++++++++++++++++
5 files changed, 584 insertions(+), 15 deletions(-)
create mode 100644 examples/tep_termination/vxlan.c
create mode 100644 examples/tep_termination/vxlan_setup.c
diff --git a/examples/tep_termination/Makefile b/examples/tep_termination/Makefile
index 42a380b..0a0cf4b 100644
--- a/examples/tep_termination/Makefile
+++ b/examples/tep_termination/Makefile
@@ -47,7 +47,7 @@ endif
APP = tep_termination
# all source are stored in SRCS-y
-SRCS-y := main.c
+SRCS-y := main.c vxlan_setup.c vxlan.c
CFLAGS += -O2 -D_FILE_OFFSET_BITS=64
CFLAGS += $(WERROR_FLAGS)
diff --git a/examples/tep_termination/main.c b/examples/tep_termination/main.c
index f50715c..7fd9ae5 100644
--- a/examples/tep_termination/main.c
+++ b/examples/tep_termination/main.c
@@ -133,6 +133,16 @@ struct vpool {
uint32_t buf_size;
} vpool_array[MAX_QUEUES+MAX_QUEUES];
+/* overlay packet operation */
+struct ol_switch_ops overlay_options = {
+ .port_configure = vxlan_port_init,
+ .tunnel_setup = vxlan_link,
+ .tunnel_destroy = vxlan_unlink,
+ .tx_handle = vxlan_tx_pkts,
+ .rx_handle = vxlan_rx_pkts,
+ .param_handle = NULL,
+};
+
/* Enable stats. */
uint32_t enable_stats = 0;
/* Enable retries on RX. */
@@ -312,9 +322,8 @@ tep_termination_parse_args(int argc, char **argv)
"Invalid argument for rx-retry [0|1]\n");
tep_termination_usage(prgname);
return -1;
- } else {
+ } else
enable_retry = ret;
- }
}
/* Specify the retries delay time (in useconds) on RX. */
@@ -326,9 +335,8 @@ tep_termination_parse_args(int argc, char **argv)
"Invalid argument for rx-retry-delay [0-N]\n");
tep_termination_usage(prgname);
return -1;
- } else {
+ } else
burst_rx_delay_time = ret;
- }
}
/* Specify the retries number on RX. */
@@ -340,9 +348,8 @@ tep_termination_parse_args(int argc, char **argv)
"Invalid argument for rx-retry-num [0-N]\n");
tep_termination_usage(prgname);
return -1;
- } else {
+ } else
burst_rx_retry_num = ret;
- }
}
/* Enable/disable stats. */
@@ -354,9 +361,8 @@ tep_termination_parse_args(int argc, char **argv)
"Invalid argument for stats [0..N]\n");
tep_termination_usage(prgname);
return -1;
- } else {
+ } else
enable_stats = ret;
- }
}
/* Set character device basename. */
@@ -448,6 +454,8 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
if (unlikely(len == MAX_PKT_BURST)) {
m_table = (struct rte_mbuf **)tx_q->m_table;
+ ret = overlay_options.tx_handle(ports[0], (uint16_t)tx_q->txq_id,
+ m_table, (uint16_t)tx_q->len);
/* Free any buffers not handled by TX and update the port stats. */
if (unlikely(ret < len)) {
do {
@@ -508,6 +516,10 @@ switch_worker(__rte_unused void *arg)
if (tx_q->len) {
LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
+ ret = overlay_options.tx_handle(ports[0],
+ (uint16_t)tx_q->txq_id,
+ (struct rte_mbuf **)tx_q->m_table,
+ (uint16_t)tx_q->len);
if (unlikely(ret < tx_q->len)) {
do {
rte_pktmbuf_free(tx_q->m_table[ret]);
@@ -542,6 +554,7 @@ switch_worker(__rte_unused void *arg)
if (unlikely(vdev->remove)) {
dev_ll = dev_ll->next;
+ overlay_options.tunnel_destroy(vdev);
vdev->ready = DEVICE_SAFE_REMOVE;
continue;
}
@@ -563,6 +576,7 @@ switch_worker(__rte_unused void *arg)
}
}
+ ret_count = overlay_options.rx_handle(dev, pkts_burst, rx_count);
if (enable_stats) {
rte_atomic64_add(
&dev_statistics[dev_ll->vdev->dev->device_fh].rx_total_atomic,
@@ -583,7 +597,7 @@ switch_worker(__rte_unused void *arg)
tx_count = rte_vhost_dequeue_burst(dev, VIRTIO_TXQ, mbuf_pool, pkts_burst, MAX_PKT_BURST);
/* If this is the first received packet we need to learn the MAC */
if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) {
- if (vdev->remove) {
+ if (vdev->remove || (overlay_options.tunnel_setup(vdev, pkts_burst[0]) == -1)) {
while (tx_count)
rte_pktmbuf_free(pkts_burst[--tx_count]);
}
@@ -992,14 +1006,12 @@ main(int argc, char *argv[])
ret = tep_termination_parse_args(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid argument\n");
-
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
if (rte_lcore_is_enabled(lcore_id))
lcore_ids[core_id++] = lcore_id;
/*set the number of swithcing cores available*/
num_switching_cores = rte_lcore_count()-1;
-
/* Get the number of physical ports. */
nb_ports = rte_eth_dev_count();
if (nb_ports > RTE_MAX_ETHPORTS)
@@ -1015,7 +1027,6 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE, "Current enabled port number is %u,"
"but only %u port can be enabled\n", nb_ports, MAX_SUP_PORTS);
}
-
/* Create the mbuf pool. */
mbuf_pool = rte_mempool_create(
"MBUF_POOL",
@@ -1043,6 +1054,8 @@ main(int argc, char *argv[])
"Skipping disabled port %d\n", portid);
continue;
}
+ if (overlay_options.port_configure(portid, mbuf_pool) != 0)
+ rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
}
/* Initialise all linked lists. */
@@ -1073,6 +1086,6 @@ main(int argc, char *argv[])
/* Start CUSE session. */
rte_vhost_driver_session_start();
- return 0;
+ return 0;
}
diff --git a/examples/tep_termination/vxlan.c b/examples/tep_termination/vxlan.c
new file mode 100644
index 0000000..59b5e2a
--- /dev/null
+++ b/examples/tep_termination/vxlan.c
@@ -0,0 +1,174 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdint.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_hash_crc.h>
+#include <rte_byteorder.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_ip.h>
+
+#include "main.h"
+#include "vxlan.h"
+
+extern struct vxlan_conf vxdev;
+extern struct ipv4_hdr app_ip_hdr[VXLAN_N_PORTS];
+extern struct ether_hdr app_l2_hdr[VXLAN_N_PORTS];
+
+/* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8)))
+#else
+#define _htons(x) (x)
+#endif
+
+/* *
+ * Parse an ethernet header to fill the ethertype, outer_l2_len, outer_l3_len and
+ * ipproto. This function is able to recognize IPv4/IPv6 with one optional vlan
+ * header.
+ */
+static void
+parse_ethernet(struct ether_hdr *eth_hdr, union tunnel_offload_info *info,
+ uint8_t *l4_proto)
+{
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+ uint16_t ethertype;
+
+ info->outer_l2_len = sizeof(struct ether_hdr);
+ ethertype = eth_hdr->ether_type;
+
+ if (ethertype == _htons(ETHER_TYPE_VLAN)) {
+ struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
+ info->outer_l2_len += sizeof(struct vlan_hdr);
+ ethertype = vlan_hdr->eth_proto;
+ }
+
+ switch (ethertype) {
+ case _htons(ETHER_TYPE_IPv4):
+ ipv4_hdr = (struct ipv4_hdr *) ((char *)eth_hdr + info->outer_l2_len);
+ info->outer_l3_len = sizeof(struct ipv4_hdr);
+ *l4_proto = ipv4_hdr->next_proto_id;
+ break;
+ case _htons(ETHER_TYPE_IPv6):
+ ipv6_hdr = (struct ipv6_hdr *) ((char *)eth_hdr + info->outer_l2_len);
+ info->outer_l3_len = sizeof(struct ipv6_hdr);
+ *l4_proto = ipv6_hdr->proto;
+ break;
+ default:
+ info->outer_l3_len = 0;
+ *l4_proto = 0;
+ break;
+ }
+
+}
+
+int
+decapsulation(struct rte_mbuf *pkt)
+{
+ uint8_t l4_proto = 0;
+ uint16_t outer_header_len;
+ struct udp_hdr *udp_hdr;
+ union tunnel_offload_info info = { .data = 0 };
+ struct ether_hdr *phdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+
+ parse_ethernet(phdr, &info, &l4_proto);
+ if (l4_proto != IPPROTO_UDP)
+ return -1;
+
+ udp_hdr = (struct udp_hdr *)((char *)phdr +
+ info.outer_l2_len + info.outer_l3_len);
+
+ /** check udp destination port, 4789 is the default vxlan port
+ * (rfc7348) or that the rx offload flag is set (i40e only
+ * currently)*/
+ if (udp_hdr->dst_port != rte_cpu_to_be_16(DEFAULT_VXLAN_PORT) &&
+ (pkt->ol_flags & (PKT_RX_TUNNEL_IPV4_HDR |
+ PKT_RX_TUNNEL_IPV6_HDR)) == 0)
+ return -1;
+ outer_header_len = info.outer_l2_len + info.outer_l3_len
+ + sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr);
+ rte_pktmbuf_adj(pkt, outer_header_len);
+ return 0;
+}
+
+int
+encapsulation(struct rte_mbuf *m, uint8_t vport_id)
+{
+ uint64_t ol_flags = 0;
+ uint32_t old_len = m->pkt_len, hash;
+ struct ether_hdr *phdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+
+ /*Allocate space for new ethernet, IPv4, UDP and VXLAN headers*/
+ struct ether_hdr *pneth = (struct ether_hdr *) rte_pktmbuf_prepend(m,
+ sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr)
+ + sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr));
+
+ struct ipv4_hdr *ip = (struct ipv4_hdr *) &pneth[1];
+ struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ struct vxlan_hdr *vxlan = (struct vxlan_hdr *) &udp[1];
+
+ /* replace original Ethernet header with ours */
+ pneth = rte_memcpy(pneth, &app_l2_hdr[vport_id], sizeof(struct ether_hdr));
+
+ /* copy in IP header */
+ ip = rte_memcpy(ip, &app_ip_hdr[vport_id], sizeof(struct ipv4_hdr));
+ ip->total_length = rte_cpu_to_be_16(m->data_len - sizeof(struct ether_hdr));
+
+ /* outer IP checksum */
+ ol_flags |= PKT_TX_OUTER_IP_CKSUM;
+ ip->hdr_checksum = 0;
+
+ m->outer_l2_len = sizeof(struct ether_hdr);
+ m->outer_l3_len = sizeof(struct ipv4_hdr);
+
+ m->ol_flags |= ol_flags;
+
+ /*VXLAN HEADER*/
+ vxlan->vx_flags = rte_cpu_to_be_32(VXLAN_HF_VNI);
+ vxlan->vx_vni = rte_cpu_to_be_32(vxdev.out_key << 8);
+
+ /*UDP HEADER*/
+ udp->dgram_cksum = 0;
+ udp->dgram_len = rte_cpu_to_be_16(old_len
+ + sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr));
+
+ udp->dst_port = rte_cpu_to_be_16(vxdev.dst_port);
+ hash = rte_hash_crc(phdr, 2 * ETHER_ADDR_LEN, phdr->ether_type);
+ udp->src_port = rte_cpu_to_be_16((((uint64_t) hash * PORT_RANGE) >> 32)
+ + PORT_MIN);
+
+ return 0;
+}
diff --git a/examples/tep_termination/vxlan.h b/examples/tep_termination/vxlan.h
index 8595eed..f000b93 100644
--- a/examples/tep_termination/vxlan.h
+++ b/examples/tep_termination/vxlan.h
@@ -1,4 +1,4 @@
-/*-
+/*
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
@@ -57,4 +57,19 @@ struct vxlan_conf {
struct vxlan_port port[VXLAN_N_PORTS]; /**< VXLAN configuration */
} __rte_cache_aligned;
+/* structure that caches offload info for the current packet */
+union tunnel_offload_info {
+ uint64_t data;
+ struct {
+ uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint64_t l4_len:8; /**< L4 Header Length. */
+ uint64_t outer_l2_len:7; /**< outer L2 Header Length */
+ uint64_t outer_l3_len:16; /**< outer L3 Header Length */
+ };
+} __rte_cache_aligned;
+
+int decapsulation(struct rte_mbuf *pkt);
+int encapsulation(struct rte_mbuf *m, uint8_t portid);
+
#endif /* _MAIN_H_ */
diff --git a/examples/tep_termination/vxlan_setup.c b/examples/tep_termination/vxlan_setup.c
new file mode 100644
index 0000000..71414eb
--- /dev/null
+++ b/examples/tep_termination/vxlan_setup.c
@@ -0,0 +1,367 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <getopt.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/virtio_net.h>
+#include <linux/virtio_ring.h>
+#include <sys/param.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_string_fns.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+
+#include "main.h"
+#include "rte_virtio_net.h"
+#include "vxlan.h"
+#include "vxlan_setup.h"
+
+#define VXLAN_ENCAP_HDR_SIZE (sizeof(struct ether_hdr) + IPV4_HEADER_LEN + \
+ UDP_HEADER_LEN + VXLAN_HEADER_LEN)
+
+#define IPV4_HEADER_LEN 20
+#define UDP_HEADER_LEN 8
+#define VXLAN_HEADER_LEN 8
+
+#define IP_VERSION 0x40
+#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
+#define IP_DEFTTL 64 /* from RFC 1340. */
+#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
+
+#define IP_DN_FRAGMENT_FLAG 0x0040
+
+/* Used to compare MAC addresses. */
+#define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL
+
+/* Configurable number of RX/TX ring descriptors */
+#define RTE_TEST_RX_DESC_DEFAULT 1024
+#define RTE_TEST_TX_DESC_DEFAULT 512
+
+extern uint16_t num_devices;
+extern uint8_t ports[RTE_MAX_ETHPORTS];
+
+/* ethernet addresses of ports */
+extern struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
+
+/* heads for the main used and free linked lists for the data path. */
+static struct virtio_net_data_ll *ll_root_used;
+
+/* VXLAN device */
+struct vxlan_conf vxdev;
+
+struct ipv4_hdr app_ip_hdr[VXLAN_N_PORTS];
+struct ether_hdr app_l2_hdr[VXLAN_N_PORTS];
+
+/* local VTEP IP address */
+uint8_t vxlan_multicast_ips[2][4] = { {239, 1, 1, 1 }, {239, 1, 2, 1 } };
+
+/* remote VTEP IP address */
+uint8_t vxlan_overlay_ips[2][4] = { {192, 168, 10, 1}, {192, 168, 30, 1} };
+
+/* remote VTEP MAC address */
+uint8_t peer_mac[6] = {0x00, 0x11, 0x01, 0x00, 0x00, 0x01};
+
+/* Options for configuring ethernet port */
+static const struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .split_hdr_size = 0,
+ .header_split = 0, /**< Header Split disabled */
+ .hw_ip_checksum = 0, /**< IP checksum offload disabled */
+ .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+ .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
+ .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+};
+
+/**
+ * The one or two device(s) that belongs to the same tenant ID can
+ * be assigned in a VM.
+ */
+const uint16_t tenant_id_conf[] = {
+ 1000, 1000, 1001, 1001, 1002, 1002, 1003, 1003,
+ 1004, 1004, 1005, 1005, 1006, 1006, 1007, 1007,
+ 1008, 1008, 1009, 1009, 1010, 1010, 1011, 1011,
+ 1012, 1012, 1013, 1013, 1014, 1014, 1015, 1015,
+ 1016, 1016, 1017, 1017, 1018, 1018, 1019, 1019,
+ 1020, 1020, 1021, 1021, 1022, 1022, 1023, 1023,
+ 1024, 1024, 1025, 1025, 1026, 1026, 1027, 1027,
+ 1028, 1028, 1029, 1029, 1030, 1030, 1031, 1031,
+};
+
+/**
+ * Initialises a given port using global settings and with the rx buffers
+ * coming from the mbuf_pool passed as parameter
+ */
+int
+vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+{
+ int retval;
+ uint16_t num_queues, q;
+ struct rte_eth_dev_info dev_info;
+ uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
+ const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
+ const uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
+ struct rte_eth_rxconf *rxconf;
+ struct rte_eth_txconf *txconf;
+
+ rte_eth_dev_info_get (port, &dev_info);
+
+ dev_info.max_rx_queues = num_devices;
+
+ if (dev_info.max_rx_queues > MAX_QUEUES) {
+ rte_exit(EXIT_FAILURE,
+ "please define MAX_QUEUES no less than %u in %s\n",
+ dev_info.max_rx_queues, __FILE__);
+ }
+
+ rxconf = &dev_info.default_rxconf;
+ txconf = &dev_info.default_txconf;
+ txconf->txq_flags = 0;
+ num_queues = num_devices;
+
+ if (port >= rte_eth_dev_count())
+ return -1;
+
+ rx_rings = (uint16_t)num_queues;
+
+ /* Configure ethernet device. */
+ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
+ if (retval != 0)
+ return retval;
+ /* Setup the queues. */
+ for (q = 0; q < rx_rings; q++) {
+ retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
+ rte_eth_dev_socket_id(port), rxconf,
+ mbuf_pool);
+ if (retval < 0)
+ return retval;
+ }
+ for (q = 0; q < tx_rings; q++) {
+ retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
+ rte_eth_dev_socket_id(port), txconf);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Start the device. */
+ retval = rte_eth_dev_start(port);
+ if (retval < 0)
+ return retval;
+
+ rte_eth_macaddr_get(port, &ports_eth_addr[port]);
+ RTE_LOG(INFO, PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
+ " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
+ (unsigned)port,
+ ports_eth_addr[port].addr_bytes[0],
+ ports_eth_addr[port].addr_bytes[1],
+ ports_eth_addr[port].addr_bytes[2],
+ ports_eth_addr[port].addr_bytes[3],
+ ports_eth_addr[port].addr_bytes[4],
+ ports_eth_addr[port].addr_bytes[5]);
+ return 0;
+}
+
+static int
+vxlan_rx_process(struct rte_mbuf *pkt)
+{
+ return decapsulation(pkt);
+}
+
+static int
+vxlan_tx_process(uint8_t vport_id, struct rte_mbuf *pkt)
+{
+ int ret = 0;
+
+ if ((pkt->pkt_len + VXLAN_ENCAP_HDR_SIZE) > ETHER_MAX_LEN) {
+ rte_pktmbuf_free(pkt);
+ return -1;
+ }
+
+ ret = encapsulation(pkt, vport_id);
+ return ret;
+}
+
+/*
+ * This function learns the MAC address of the device
+ */
+int
+vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
+{
+ int i;
+ struct ether_hdr *pkt_hdr;
+ struct virtio_net_data_ll *dev_ll;
+ struct virtio_net *dev = vdev->dev;
+ uint64_t portid = dev->device_fh;
+
+ dev_ll = ll_root_used;
+
+ if (unlikely(portid > VXLAN_N_PORTS)) {
+ RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: Not configuring device,"
+ "as already have %d ports for VXLAN.", dev->device_fh, VXLAN_N_PORTS);
+ return -1;
+ }
+
+ /* Learn MAC address of guest device from packet */
+ pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ while (dev_ll != NULL) {
+ if (is_same_ether_addr(&(pkt_hdr->s_addr), &dev_ll->vdev->mac_address)) {
+ RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: This device is using an existing"
+ " MAC address and has not been registered.\n", dev->device_fh);
+ return -1;
+ }
+ dev_ll = dev_ll->next;
+ }
+
+ for (i = 0; i < ETHER_ADDR_LEN; i++) {
+ vdev->mac_address.addr_bytes[i] =
+ vxdev.port[portid].vport_mac.addr_bytes[i] =
+ pkt_hdr->s_addr.addr_bytes[i];
+ vxdev.port[portid].peer_mac.addr_bytes[i] = peer_mac[i];
+ }
+
+ /* Print out inner MAC and VNI info. */
+ RTE_LOG(INFO, VHOST_DATA, "(%d) MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VNI %d registered\n",
+ vdev->rx_q,
+ vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
+ vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
+ vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
+ tenant_id_conf[vdev->rx_q]);
+
+ vxdev.port[portid].vport_id = portid;
+
+ for (i = 0; i < 4; i++) {
+ /*Local VTEP IP */
+ vxdev.port_ip |= vxlan_multicast_ips[portid][i] << (8 * i);
+ /* remote VTEP IP */
+ vxdev.port[portid].peer_ip |= vxlan_overlay_ips[portid][i] << (8 * i);
+ }
+
+ vxdev.out_key = tenant_id_conf[vdev->rx_q];
+ ether_addr_copy(&vxdev.port[portid].peer_mac, &app_l2_hdr[portid].d_addr);
+ ether_addr_copy(&ports_eth_addr[0], &app_l2_hdr[portid].s_addr);
+ app_l2_hdr[portid].ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+
+ struct ipv4_hdr *ip;
+ ip = &app_ip_hdr[portid];
+ ip->version_ihl = IP_VHL_DEF;
+ ip->type_of_service = 0;
+ ip->total_length = 0;
+ ip->packet_id = 0;
+ ip->fragment_offset = IP_DN_FRAGMENT_FLAG;
+ ip->time_to_live = IP_DEFTTL;
+ ip->next_proto_id = IPPROTO_UDP;
+ ip->hdr_checksum = 0;
+ ip->src_addr = vxdev.port_ip;
+ ip->dst_addr = vxdev.port[portid].peer_ip;
+
+ /* Set device as ready for RX. */
+ vdev->ready = DEVICE_RX;
+
+ return 0;
+}
+
+/**
+ * Removes cloud filter. Ensures that nothing is adding buffers to the RX
+ * queue before disabling RX on the device.
+ */
+void
+vxlan_unlink(struct vhost_dev *vdev)
+{
+ unsigned i = 0, rx_count;
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+
+ if (vdev->ready == DEVICE_RX) {
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ vdev->mac_address.addr_bytes[i] = 0;
+
+ /*Clear out the receive buffers*/
+ rx_count = rte_eth_rx_burst(ports[0],
+ (uint16_t)vdev->rx_q, pkts_burst, MAX_PKT_BURST);
+
+ while (rx_count) {
+ for (i = 0; i < rx_count; i++)
+ rte_pktmbuf_free(pkts_burst[i]);
+
+ rx_count = rte_eth_rx_burst(ports[0],
+ (uint16_t)vdev->rx_q, pkts_burst, MAX_PKT_BURST);
+ }
+ vdev->ready = DEVICE_MAC_LEARNING;
+ }
+}
+
+/* Transmit packets after encapsulating */
+int
+vxlan_tx_pkts (uint8_t port_id, uint16_t queue_id,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts) {
+ int ret = 0;
+ uint16_t count = nb_pkts, i;
+ uint16_t vport_id = queue_id - 1;
+
+ for (i = 0; i < count; i++)
+ ret = vxlan_tx_process(vport_id, tx_pkts[i]);
+
+ ret = rte_eth_tx_burst(port_id, queue_id, tx_pkts, nb_pkts);
+ return ret;
+
+}
+
+/* Check for decapsulation and pass packets directly to VIRTIO device */
+int
+vxlan_rx_pkts (struct virtio_net *dev, struct rte_mbuf **pkts_burst, uint32_t rx_count)
+{
+ uint32_t i = 0, count = 0;
+ int ret;
+ struct rte_mbuf *pkts_valid[rx_count];
+
+ for (i = 0; i < rx_count; i++) {
+ ret = vxlan_rx_process(pkts_burst[i]);
+ if (unlikely(ret < 0))
+ continue;
+
+ pkts_valid[count] = pkts_burst[i];
+ count++;
+ }
+
+ ret = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_valid, rx_count);
+ return ret;
+}
--
1.7.7.6
next prev parent reply other threads:[~2015-05-15 6:09 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-05-15 6:08 [dpdk-dev] [PATCH 00/10] Add a VXLAN sample Jijiang Liu
2015-05-15 6:08 ` [dpdk-dev] [PATCH 01/10] examples/tep_termination:initialize the " Jijiang Liu
2015-05-15 23:53 ` Stephen Hemminger
2015-05-18 1:37 ` Liu, Jijiang
2015-05-15 23:55 ` Stephen Hemminger
2015-05-18 2:42 ` Liu, Jijiang
2015-05-15 6:08 ` [dpdk-dev] [PATCH 02/10] examples/tep_termination:define the basic VXLAN port information Jijiang Liu
2015-05-15 6:08 ` [dpdk-dev] [PATCH 03/10] examples/tep_termination:add the pluggable structures for VXLAN packet processing Jijiang Liu
2015-05-15 6:08 ` Jijiang Liu [this message]
2015-05-15 6:08 ` [dpdk-dev] [PATCH 05/10] examples/tep_termination:add UDP port configuration for UDP tunneling packet Jijiang Liu
2015-05-15 6:08 ` [dpdk-dev] [PATCH 06/10] examples/tep_termination:add tunnel filter type configuration Jijiang Liu
2015-05-15 6:08 ` [dpdk-dev] [PATCH 07/10] examples/tep_termination:add Tx checksum offload configuration for inner header Jijiang Liu
2015-05-15 6:08 ` [dpdk-dev] [PATCH 08/10] examples/tep_termination:add TSO offload configuration Jijiang Liu
2015-05-15 6:09 ` [dpdk-dev] [PATCH 09/10] examples/tep_termination:add bad Rx checksum statistics of inner IP and L4 Jijiang Liu
2015-05-15 6:09 ` [dpdk-dev] [PATCH 10/10] examples/tep_termination:add the configuration for encapsulation and the decapsulation Jijiang Liu
2015-05-15 8:18 ` [dpdk-dev] [PATCH 00/10] Add a VXLAN sample Mcnamara, John
2015-05-18 2:52 ` Liu, Jijiang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1431670141-7835-5-git-send-email-jijiang.liu@intel.com \
--to=jijiang.liu@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).