From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id 1CCFFE72 for ; Mon, 21 Dec 2015 06:21:31 +0100 (CET) Received: from orsmga003.jf.intel.com ([10.7.209.27]) by fmsmga101.fm.intel.com with ESMTP; 20 Dec 2015 21:21:31 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.20,458,1444719600"; d="scan'208";a="711832664" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by orsmga003.jf.intel.com with ESMTP; 20 Dec 2015 21:21:29 -0800 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id tBL5LSlj025891; Mon, 21 Dec 2015 13:21:28 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id tBL5LOfT012098; Mon, 21 Dec 2015 13:21:26 +0800 Received: (from jijiangl@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id tBL5LOx5012094; Mon, 21 Dec 2015 13:21:24 +0800 From: Jijiang Liu To: dev@dpdk.org Date: Mon, 21 Dec 2015 13:21:17 +0800 Message-Id: <1450675280-12056-2-git-send-email-jijiang.liu@intel.com> X-Mailer: git-send-email 1.7.12.2 In-Reply-To: <1450675280-12056-1-git-send-email-jijiang.liu@intel.com> References: <1450675280-12056-1-git-send-email-jijiang.liu@intel.com> Subject: [dpdk-dev] [PATCH v6 1/4] vhost/lib: add vhost TX offload capabilities in vhost lib X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 21 Dec 2015 05:21:33 -0000 Add vhost TX offload(CSUM and TSO) support capabilities in vhost lib. Refer to feature bits in Virtual I/O Device (VIRTIO) Version 1.0 below, VIRTIO_NET_F_CSUM (0) Device handles packets with partial checksum. This "checksum offload" is a common feature on modern network cards. VIRTIO_NET_F_HOST_TSO4 (11) Device can receive TSOv4. VIRTIO_NET_F_HOST_TSO6 (12) Device can receive TSOv6. In order to support these features, and the following changes are added, 1. Extend 'VHOST_SUPPORTED_FEATURES' macro to add the offload features negotiation. 2. Dequeue TX offload: convert the fileds in virtio_net_hdr to the related fileds in mbuf. Signed-off-by: Jijiang Liu --- lib/librte_vhost/vhost_rxtx.c | 103 +++++++++++++++++++++++++++++++++++++++++ lib/librte_vhost/virtio-net.c | 6 ++- 2 files changed, 108 insertions(+), 1 deletions(-) diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c index 9322ce6..47d5f85 100644 --- a/lib/librte_vhost/vhost_rxtx.c +++ b/lib/librte_vhost/vhost_rxtx.c @@ -37,7 +37,12 @@ #include #include +#include +#include #include +#include +#include +#include #include "vhost-net.h" @@ -568,6 +573,97 @@ rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id, return virtio_dev_rx(dev, queue_id, pkts, count); } +static void +parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr) +{ + struct ipv4_hdr *ipv4_hdr; + struct ipv6_hdr *ipv6_hdr; + void *l3_hdr = NULL; + struct ether_hdr *eth_hdr; + uint16_t ethertype; + + eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); + + m->l2_len = sizeof(struct ether_hdr); + ethertype = rte_be_to_cpu_16(eth_hdr->ether_type); + + if (ethertype == ETHER_TYPE_VLAN) { + struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1); + + m->l2_len += sizeof(struct vlan_hdr); + ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto); + } + + l3_hdr = (char *)eth_hdr + m->l2_len; + + switch (ethertype) { + case ETHER_TYPE_IPv4: + ipv4_hdr = (struct ipv4_hdr *)l3_hdr; + *l4_proto = ipv4_hdr->next_proto_id; + m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4; + *l4_hdr = (char *)l3_hdr + m->l3_len; + m->ol_flags |= PKT_TX_IPV4; + break; + case ETHER_TYPE_IPv6: + ipv6_hdr = (struct ipv6_hdr *)l3_hdr; + *l4_proto = ipv6_hdr->proto; + m->l3_len = sizeof(struct ipv6_hdr); + *l4_hdr = (char *)l3_hdr + m->l3_len; + m->ol_flags |= PKT_TX_IPV6; + break; + default: + m->l3_len = 0; + *l4_proto = 0; + break; + } +} + +static inline void __attribute__((always_inline)) +vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m) +{ + uint16_t l4_proto = 0; + void *l4_hdr = NULL; + struct tcp_hdr *tcp_hdr = NULL; + + parse_ethernet(m, &l4_proto, &l4_hdr); + if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) { + if (hdr->csum_start == (m->l2_len + m->l3_len)) { + switch (hdr->csum_offset) { + case (offsetof(struct tcp_hdr, cksum)): + if (l4_proto == IPPROTO_TCP) + m->ol_flags |= PKT_TX_TCP_CKSUM; + break; + case (offsetof(struct udp_hdr, dgram_cksum)): + if (l4_proto == IPPROTO_UDP) + m->ol_flags |= PKT_TX_UDP_CKSUM; + break; + case (offsetof(struct sctp_hdr, cksum)): + if (l4_proto == IPPROTO_SCTP) + m->ol_flags |= PKT_TX_SCTP_CKSUM; + break; + default: + break; + } + } + } + + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { + switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { + case VIRTIO_NET_HDR_GSO_TCPV4: + case VIRTIO_NET_HDR_GSO_TCPV6: + tcp_hdr = (struct tcp_hdr *)l4_hdr; + m->ol_flags |= PKT_TX_TCP_SEG; + m->tso_segsz = hdr->gso_size; + m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2; + break; + default: + RTE_LOG(WARNING, VHOST_DATA, + "unsupported gso type %u.\n", hdr->gso_type); + break; + } + } +} + uint16_t rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) @@ -576,11 +672,13 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, struct vhost_virtqueue *vq; struct vring_desc *desc; uint64_t vb_addr = 0; + uint64_t vb_net_hdr_addr = 0; uint32_t head[MAX_PKT_BURST]; uint32_t used_idx; uint32_t i; uint16_t free_entries, entry_success = 0; uint16_t avail_idx; + struct virtio_net_hdr *hdr = NULL; if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) { RTE_LOG(ERR, VHOST_DATA, @@ -632,6 +730,9 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, desc = &vq->desc[head[entry_success]]; + vb_net_hdr_addr = gpa_to_vva(dev, desc->addr); + hdr = (struct virtio_net_hdr *)((uintptr_t)vb_net_hdr_addr); + /* Discard first buffer as it is the virtio header */ if (desc->flags & VRING_DESC_F_NEXT) { desc = &vq->desc[desc->next]; @@ -770,6 +871,8 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, break; m->nb_segs = seg_num; + if ((hdr->flags != 0) || (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE)) + vhost_dequeue_offload(hdr, m); pkts[entry_success] = m; vq->last_used_idx++; diff --git a/lib/librte_vhost/virtio-net.c b/lib/librte_vhost/virtio-net.c index 14278de..81bd309 100644 --- a/lib/librte_vhost/virtio-net.c +++ b/lib/librte_vhost/virtio-net.c @@ -77,7 +77,11 @@ static struct virtio_net_config_ll *ll_root; (VHOST_SUPPORTS_MQ) | \ (1ULL << VIRTIO_F_VERSION_1) | \ (1ULL << VHOST_F_LOG_ALL) | \ - (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) + (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \ + (1ULL << VIRTIO_NET_F_HOST_TSO4) | \ + (1ULL << VIRTIO_NET_F_HOST_TSO6) | \ + (1ULL << VIRTIO_NET_F_CSUM)) + static uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES; -- 1.7.7.6