From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 4B1E77DEB for ; Fri, 26 Sep 2014 03:56:05 +0200 (CEST) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga103.jf.intel.com with ESMTP; 25 Sep 2014 19:00:22 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.04,601,1406617200"; d="scan'208";a="579237670" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by orsmga001.jf.intel.com with ESMTP; 25 Sep 2014 19:02:21 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id s8Q22Ijk029557; Fri, 26 Sep 2014 10:02:18 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id s8Q22Gul014153; Fri, 26 Sep 2014 10:02:18 +0800 Received: (from jijiangl@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id s8Q22FOc014149; Fri, 26 Sep 2014 10:02:15 +0800 From: Jijiang Liu To: dev@dpdk.org Date: Fri, 26 Sep 2014 10:02:03 +0800 Message-Id: <1411696929-13856-3-git-send-email-jijiang.liu@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1411696929-13856-1-git-send-email-jijiang.liu@intel.com> References: <1411696929-13856-1-git-send-email-jijiang.liu@intel.com> Subject: [dpdk-dev] [PATCH v4 2/8]i40e:support VxLAN packet identification in librte_pmd_i40e X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 26 Sep 2014 01:56:06 -0000 Support tunneling UDP port configuration on i40e in librte_pmd_i40e. Currently, only VxLAN is implemented, which include - VxLAN UDP port initialization - Implement the APIs to configure VxLAN UDP port in librte_pmd_i40e. Signed-off-by: Jijiang Liu Acked-by: Helin Zhang Acked-by: Jingjing Wu Acked-by: Jing Chen --- config/common_linuxapp | 5 + lib/librte_mbuf/rte_mbuf.h | 2 + lib/librte_pmd_i40e/i40e_ethdev.c | 200 ++++++++++++++++++++++++++++++++++++- lib/librte_pmd_i40e/i40e_ethdev.h | 5 + lib/librte_pmd_i40e/i40e_rxtx.c | 10 ++ 5 files changed, 221 insertions(+), 1 deletions(-) diff --git a/config/common_linuxapp b/config/common_linuxapp index 5bee910..75a4cd7 100644 --- a/config/common_linuxapp +++ b/config/common_linuxapp @@ -212,6 +212,11 @@ CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4 CONFIG_RTE_LIBRTE_I40E_ITR_INTERVAL=-1 # +# Compile tunneling UDP port support +# +CONFIG_RTE_LIBRTE_TUNNEL_UDP_PORT=4789 + +# # Compile burst-oriented VIRTIO PMD driver # CONFIG_RTE_LIBRTE_VIRTIO_PMD=y diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index 1c6e115..4955684 100644 --- a/lib/librte_mbuf/rte_mbuf.h +++ b/lib/librte_mbuf/rte_mbuf.h @@ -538,6 +538,7 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf *m) m->port = 0xff; m->ol_flags = 0; + m->reserved = 0; m->data_off = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ? RTE_PKTMBUF_HEADROOM : m->buf_len; @@ -607,6 +608,7 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md) mi->pkt_len = mi->data_len; mi->nb_segs = 1; mi->ol_flags = md->ol_flags; + mi->reserved = md->reserved; __rte_mbuf_sanity_check(mi, 1); __rte_mbuf_sanity_check(md, 0); diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c index a00d6ca..ddc7ea0 100644 --- a/lib/librte_pmd_i40e/i40e_ethdev.c +++ b/lib/librte_pmd_i40e/i40e_ethdev.c @@ -189,7 +189,7 @@ static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool, static int i40e_dev_init_vlan(struct rte_eth_dev *dev); static int i40e_veb_release(struct i40e_veb *veb); static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, - struct i40e_vsi *vsi); + struct i40e_vsi *vsi); static int i40e_pf_config_mq_rx(struct i40e_pf *pf); static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on); static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, @@ -205,6 +205,14 @@ static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); +static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel, + uint8_t count); +static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel, + uint8_t count); +static int i40e_pf_config_vxlan(struct i40e_pf *pf); + /* Default hash key buffer for RSS */ static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1]; @@ -256,6 +264,8 @@ static struct eth_dev_ops i40e_eth_dev_ops = { .reta_query = i40e_dev_rss_reta_query, .rss_hash_update = i40e_dev_rss_hash_update, .rss_hash_conf_get = i40e_dev_rss_hash_conf_get, + .udp_tunnel_add = i40e_dev_udp_tunnel_add, + .udp_tunnel_del = i40e_dev_udp_tunnel_del, }; static struct eth_driver rte_i40e_pmd = { @@ -2532,6 +2542,34 @@ i40e_vsi_dump_bw_config(struct i40e_vsi *vsi) return 0; } +static int +i40e_vxlan_filters_init(struct i40e_pf *pf) +{ + uint8_t filter_index; + int ret = 0; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + + if (!(pf->flags & I40E_FLAG_VXLAN)) + return 0; + + /* Init first entry in tunneling UDP table */ + ret = i40e_aq_add_udp_tunnel(hw, RTE_LIBRTE_TUNNEL_UDP_PORT, + I40E_AQC_TUNNEL_TYPE_VXLAN, + &filter_index, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add UDP tunnel port %d " + "with index=%d\n", RTE_VXLAN_UDP_PORT, + filter_index); + } else { + pf->vxlan_bitmap |= 1; + pf->vxlan_ports[0] = RTE_LIBRTE_TUNNEL_UDP_PORT; + PMD_DRV_LOG(INFO, "Added UDP tunnel port %d with " + "index=%d\n", RTE_VXLAN_UDP_PORT, filter_index); + } + + return ret; +} + /* Setup a VSI */ struct i40e_vsi * i40e_vsi_setup(struct i40e_pf *pf, @@ -3163,6 +3201,12 @@ i40e_vsi_rx_init(struct i40e_vsi *vsi) uint16_t i; i40e_pf_config_mq_rx(pf); + + if (data->dev_conf.tunnel_type == RTE_TUNNEL_TYPE_VXLAN) { + pf->flags |= I40E_FLAG_VXLAN; + i40e_pf_config_vxlan(pf); + } + for (i = 0; i < data->nb_rx_queues; i++) { ret = i40e_rx_queue_init(data->rx_queues[i]); if (ret != I40E_SUCCESS) { @@ -4079,6 +4123,150 @@ i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, return 0; } +static int +i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port) +{ + uint8_t i; + + for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { + if (pf->vxlan_ports[i] == port) + return i; + } + + return -1; +} + +static int +i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port) +{ + int idx, ret; + uint8_t filter_idx; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + + if (!(pf->flags & I40E_FLAG_VXLAN)) { + PMD_DRV_LOG(ERR, "VxLAN tunneling mode is not configured\n"); + return -EINVAL; + } + + idx = i40e_get_vxlan_port_idx(pf, port); + + /* Check if port already exists */ + if (idx >= 0) { + PMD_DRV_LOG(ERR, "Port %d already offloaded\n", port); + return -1; + } + + /* Now check if there is space to add the new port */ + idx = i40e_get_vxlan_port_idx(pf, 0); + if (idx < 0) { + PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached," + "not adding port %d\n", port); + return -ENOSPC; + } + + ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN, + &filter_idx, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add VxLAN UDP port %d\n", port); + return -1; + } + + PMD_DRV_LOG(INFO, "Added %s port %d with AQ command with index %d\n", + port, filter_index); + + /* New port: add it and mark its index in the bitmap */ + pf->vxlan_ports[idx] = port; + pf->vxlan_bitmap |= (1 << idx); + + return 0; +} + +static int +i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port) +{ + int idx; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + + idx = i40e_get_vxlan_port_idx(pf, port); + + if (idx < 0) { + PMD_DRV_LOG(ERR, "Port %d doesn't exist\n", port); + return -1; + } + + if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) { + PMD_DRV_LOG(ERR, "Failed to delete VxLAN UDP port %d\n", port); + return -1; + } + + PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d\n", + port, idx); + + pf->vxlan_ports[idx] = 0; + pf->vxlan_bitmap &= ~(1 << idx); + + return 0; +} + +/* configure port of UDP tunneling */ +static int +i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel, uint8_t count) +{ + uint16_t i; + int ret = 0; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + for (i = 0; i < count; i++, udp_tunnel++) { + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port); + break; + + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_TEREDO: + PMD_DRV_LOG(ERR, "Tunnel type is not supported now.\n"); + ret = -1; + break; + + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type\n"); + ret = -1; + break; + } + } + + return ret; +} + +static int +i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel, uint8_t count) +{ + uint16_t i; + int ret = 0; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + for (i = 0; i < count; i++, udp_tunnel++) { + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port); + break; + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_TEREDO: + PMD_DRV_LOG(ERR, "Tunnel type is not supported now.\n"); + ret = -1; + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type\n"); + ret = -1; + break; + } + } + + return ret; +} + /* Configure RSS */ static int i40e_pf_config_rss(struct i40e_pf *pf) @@ -4115,6 +4303,16 @@ i40e_pf_config_rss(struct i40e_pf *pf) return i40e_hw_rss_hash_set(hw, &rss_conf); } +/* Configure VxLAN */ +static int +i40e_pf_config_vxlan(struct i40e_pf *pf) +{ + if (pf->flags & I40E_FLAG_VXLAN) + i40e_vxlan_filters_init(pf); + + return 0; +} + static int i40e_pf_config_mq_rx(struct i40e_pf *pf) { diff --git a/lib/librte_pmd_i40e/i40e_ethdev.h b/lib/librte_pmd_i40e/i40e_ethdev.h index 64deef2..22d0628 100644 --- a/lib/librte_pmd_i40e/i40e_ethdev.h +++ b/lib/librte_pmd_i40e/i40e_ethdev.h @@ -60,6 +60,7 @@ #define I40E_FLAG_HEADER_SPLIT_DISABLED (1ULL << 4) #define I40E_FLAG_HEADER_SPLIT_ENABLED (1ULL << 5) #define I40E_FLAG_FDIR (1ULL << 6) +#define I40E_FLAG_VXLAN (1ULL << 7) #define I40E_FLAG_ALL (I40E_FLAG_RSS | \ I40E_FLAG_DCB | \ I40E_FLAG_VMDQ | \ @@ -216,6 +217,10 @@ struct i40e_pf { uint16_t vmdq_nb_qps; /* The number of queue pairs of VMDq */ uint16_t vf_nb_qps; /* The number of queue pairs of VF */ uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ + + /* store VxLAN UDP ports */ + uint16_t vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; + uint16_t vxlan_bitmap; /* Vxlan bit mask */ }; enum pending_msg { diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c index 099699c..abdf406 100644 --- a/lib/librte_pmd_i40e/i40e_rxtx.c +++ b/lib/librte_pmd_i40e/i40e_rxtx.c @@ -638,6 +638,11 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq) pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1); mb->ol_flags = pkt_flags; + + /* reserved is used to store packet type for RX side */ + mb->reserved = (uint8_t)((qword1 & + I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT); if (pkt_flags & PKT_RX_RSS_HASH) mb->hash.rss = rte_le_to_cpu_32(\ rxdp->wb.qword0.hi_dword.rss); @@ -873,6 +878,8 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1); + rxm->reserved = (uint8_t)((qword1 & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT); rxm->ol_flags = pkt_flags; if (pkt_flags & PKT_RX_RSS_HASH) rxm->hash.rss = @@ -1027,6 +1034,9 @@ i40e_recv_scattered_pkts(void *rx_queue, pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1); + first_seg->reserved = (uint8_t)((qword1 & + I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT); first_seg->ol_flags = pkt_flags; if (pkt_flags & PKT_RX_RSS_HASH) rxm->hash.rss = -- 1.7.7.6