DPDK patches and discussions
 help / color / mirror / Atom feed
From: Marvin Liu <yong.liu@intel.com>
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com, zhihong.wang@intel.com
Cc: dev@dpdk.org, Marvin Liu <yong.liu@intel.com>
Subject: [dpdk-dev] [PATCH v2 2/5] vhost: reuse packed ring functions
Date: Mon, 21 Sep 2020 14:48:34 +0800	[thread overview]
Message-ID: <20200921064837.15957-3-yong.liu@intel.com> (raw)
In-Reply-To: <20200921064837.15957-1-yong.liu@intel.com>

Move parse_ethernet, offload, extbuf functions to header file. These
functions will be reused by vhost vectorized path.

Signed-off-by: Marvin Liu <yong.liu@intel.com>

diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index b556eb3bf6..5a5c945551 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -20,6 +20,10 @@
 #include <rte_rwlock.h>
 #include <rte_malloc.h>
 
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_sctp.h>
 #include "rte_vhost.h"
 #include "rte_vdpa.h"
 #include "rte_vdpa_dev.h"
@@ -905,4 +909,215 @@ put_zmbuf(struct zcopy_mbuf *zmbuf)
 	zmbuf->in_use = 0;
 }
 
+static  __rte_always_inline bool
+virtio_net_is_inorder(struct virtio_net *dev)
+{
+	return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
+}
+
+static __rte_always_inline void
+parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
+{
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	void *l3_hdr = NULL;
+	struct rte_ether_hdr *eth_hdr;
+	uint16_t ethertype;
+
+	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+
+	m->l2_len = sizeof(struct rte_ether_hdr);
+	ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
+
+	if (ethertype == RTE_ETHER_TYPE_VLAN) {
+		struct rte_vlan_hdr *vlan_hdr =
+			(struct rte_vlan_hdr *)(eth_hdr + 1);
+
+		m->l2_len += sizeof(struct rte_vlan_hdr);
+		ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+	}
+
+	l3_hdr = (char *)eth_hdr + m->l2_len;
+
+	switch (ethertype) {
+	case RTE_ETHER_TYPE_IPV4:
+		ipv4_hdr = l3_hdr;
+		*l4_proto = ipv4_hdr->next_proto_id;
+		m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
+		*l4_hdr = (char *)l3_hdr + m->l3_len;
+		m->ol_flags |= PKT_TX_IPV4;
+		break;
+	case RTE_ETHER_TYPE_IPV6:
+		ipv6_hdr = l3_hdr;
+		*l4_proto = ipv6_hdr->proto;
+		m->l3_len = sizeof(struct rte_ipv6_hdr);
+		*l4_hdr = (char *)l3_hdr + m->l3_len;
+		m->ol_flags |= PKT_TX_IPV6;
+		break;
+	default:
+		m->l3_len = 0;
+		*l4_proto = 0;
+		*l4_hdr = NULL;
+		break;
+	}
+}
+
+static __rte_always_inline bool
+virtio_net_with_host_offload(struct virtio_net *dev)
+{
+	if (dev->features &
+			((1ULL << VIRTIO_NET_F_CSUM) |
+			 (1ULL << VIRTIO_NET_F_HOST_ECN) |
+			 (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+			 (1ULL << VIRTIO_NET_F_HOST_TSO6) |
+			 (1ULL << VIRTIO_NET_F_HOST_UFO)))
+		return true;
+
+	return false;
+}
+
+static __rte_always_inline void
+vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
+{
+	uint16_t l4_proto = 0;
+	void *l4_hdr = NULL;
+	struct rte_tcp_hdr *tcp_hdr = NULL;
+
+	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
+		return;
+
+	parse_ethernet(m, &l4_proto, &l4_hdr);
+	if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+		if (hdr->csum_start == (m->l2_len + m->l3_len)) {
+			switch (hdr->csum_offset) {
+			case (offsetof(struct rte_tcp_hdr, cksum)):
+				if (l4_proto == IPPROTO_TCP)
+					m->ol_flags |= PKT_TX_TCP_CKSUM;
+				break;
+			case (offsetof(struct rte_udp_hdr, dgram_cksum)):
+				if (l4_proto == IPPROTO_UDP)
+					m->ol_flags |= PKT_TX_UDP_CKSUM;
+				break;
+			case (offsetof(struct rte_sctp_hdr, cksum)):
+				if (l4_proto == IPPROTO_SCTP)
+					m->ol_flags |= PKT_TX_SCTP_CKSUM;
+				break;
+			default:
+				break;
+			}
+		}
+	}
+
+	if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+		case VIRTIO_NET_HDR_GSO_TCPV4:
+		case VIRTIO_NET_HDR_GSO_TCPV6:
+			tcp_hdr = l4_hdr;
+			m->ol_flags |= PKT_TX_TCP_SEG;
+			m->tso_segsz = hdr->gso_size;
+			m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+			break;
+		case VIRTIO_NET_HDR_GSO_UDP:
+			m->ol_flags |= PKT_TX_UDP_SEG;
+			m->tso_segsz = hdr->gso_size;
+			m->l4_len = sizeof(struct rte_udp_hdr);
+			break;
+		default:
+			VHOST_LOG_DATA(WARNING,
+				"unsupported gso type %u.\n", hdr->gso_type);
+			break;
+		}
+	}
+}
+
+static void
+virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
+{
+	rte_free(opaque);
+}
+
+static int
+virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
+{
+	struct rte_mbuf_ext_shared_info *shinfo = NULL;
+	uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
+	uint16_t buf_len;
+	rte_iova_t iova;
+	void *buf;
+
+	/* Try to use pkt buffer to store shinfo to reduce the amount of memory
+	 * required, otherwise store shinfo in the new buffer.
+	 */
+	if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
+		shinfo = rte_pktmbuf_mtod(pkt,
+					  struct rte_mbuf_ext_shared_info *);
+	else {
+		total_len += sizeof(*shinfo) + sizeof(uintptr_t);
+		total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
+	}
+
+	if (unlikely(total_len > UINT16_MAX))
+		return -ENOSPC;
+
+	buf_len = total_len;
+	buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
+	if (unlikely(buf == NULL))
+		return -ENOMEM;
+
+	/* Initialize shinfo */
+	if (shinfo) {
+		shinfo->free_cb = virtio_dev_extbuf_free;
+		shinfo->fcb_opaque = buf;
+		rte_mbuf_ext_refcnt_set(shinfo, 1);
+	} else {
+		shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
+					      virtio_dev_extbuf_free, buf);
+		if (unlikely(shinfo == NULL)) {
+			rte_free(buf);
+			VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
+			return -1;
+		}
+	}
+
+	iova = rte_malloc_virt2iova(buf);
+	rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
+	rte_pktmbuf_reset_headroom(pkt);
+
+	return 0;
+}
+
+/*
+ * Allocate a host supported pktmbuf.
+ */
+static __rte_always_inline struct rte_mbuf *
+virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
+			 uint32_t data_len)
+{
+	struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
+
+	if (unlikely(pkt == NULL)) {
+		VHOST_LOG_DATA(ERR,
+			"Failed to allocate memory for mbuf.\n");
+		return NULL;
+	}
+
+	if (rte_pktmbuf_tailroom(pkt) >= data_len)
+		return pkt;
+
+	/* attach an external buffer if supported */
+	if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
+		return pkt;
+
+	/* check if chained buffers are allowed */
+	if (!dev->linearbuf)
+		return pkt;
+
+	/* Data doesn't fit into the buffer and the host supports
+	 * only linear buffers
+	 */
+	rte_pktmbuf_free(pkt);
+
+	return NULL;
+}
+
 #endif /* _VHOST_NET_CDEV_H_ */
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index bd9303c8a9..6107662685 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -32,12 +32,6 @@ rxvq_is_mergeable(struct virtio_net *dev)
 	return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
 }
 
-static  __rte_always_inline bool
-virtio_net_is_inorder(struct virtio_net *dev)
-{
-	return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
-}
-
 static bool
 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
 {
@@ -1804,121 +1798,6 @@ rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
 	return virtio_dev_rx_async_submit(dev, queue_id, pkts, count);
 }
 
-static inline bool
-virtio_net_with_host_offload(struct virtio_net *dev)
-{
-	if (dev->features &
-			((1ULL << VIRTIO_NET_F_CSUM) |
-			 (1ULL << VIRTIO_NET_F_HOST_ECN) |
-			 (1ULL << VIRTIO_NET_F_HOST_TSO4) |
-			 (1ULL << VIRTIO_NET_F_HOST_TSO6) |
-			 (1ULL << VIRTIO_NET_F_HOST_UFO)))
-		return true;
-
-	return false;
-}
-
-static void
-parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
-{
-	struct rte_ipv4_hdr *ipv4_hdr;
-	struct rte_ipv6_hdr *ipv6_hdr;
-	void *l3_hdr = NULL;
-	struct rte_ether_hdr *eth_hdr;
-	uint16_t ethertype;
-
-	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
-
-	m->l2_len = sizeof(struct rte_ether_hdr);
-	ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
-
-	if (ethertype == RTE_ETHER_TYPE_VLAN) {
-		struct rte_vlan_hdr *vlan_hdr =
-			(struct rte_vlan_hdr *)(eth_hdr + 1);
-
-		m->l2_len += sizeof(struct rte_vlan_hdr);
-		ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
-	}
-
-	l3_hdr = (char *)eth_hdr + m->l2_len;
-
-	switch (ethertype) {
-	case RTE_ETHER_TYPE_IPV4:
-		ipv4_hdr = l3_hdr;
-		*l4_proto = ipv4_hdr->next_proto_id;
-		m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
-		*l4_hdr = (char *)l3_hdr + m->l3_len;
-		m->ol_flags |= PKT_TX_IPV4;
-		break;
-	case RTE_ETHER_TYPE_IPV6:
-		ipv6_hdr = l3_hdr;
-		*l4_proto = ipv6_hdr->proto;
-		m->l3_len = sizeof(struct rte_ipv6_hdr);
-		*l4_hdr = (char *)l3_hdr + m->l3_len;
-		m->ol_flags |= PKT_TX_IPV6;
-		break;
-	default:
-		m->l3_len = 0;
-		*l4_proto = 0;
-		*l4_hdr = NULL;
-		break;
-	}
-}
-
-static __rte_always_inline void
-vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
-{
-	uint16_t l4_proto = 0;
-	void *l4_hdr = NULL;
-	struct rte_tcp_hdr *tcp_hdr = NULL;
-
-	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
-		return;
-
-	parse_ethernet(m, &l4_proto, &l4_hdr);
-	if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
-		if (hdr->csum_start == (m->l2_len + m->l3_len)) {
-			switch (hdr->csum_offset) {
-			case (offsetof(struct rte_tcp_hdr, cksum)):
-				if (l4_proto == IPPROTO_TCP)
-					m->ol_flags |= PKT_TX_TCP_CKSUM;
-				break;
-			case (offsetof(struct rte_udp_hdr, dgram_cksum)):
-				if (l4_proto == IPPROTO_UDP)
-					m->ol_flags |= PKT_TX_UDP_CKSUM;
-				break;
-			case (offsetof(struct rte_sctp_hdr, cksum)):
-				if (l4_proto == IPPROTO_SCTP)
-					m->ol_flags |= PKT_TX_SCTP_CKSUM;
-				break;
-			default:
-				break;
-			}
-		}
-	}
-
-	if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
-		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
-		case VIRTIO_NET_HDR_GSO_TCPV4:
-		case VIRTIO_NET_HDR_GSO_TCPV6:
-			tcp_hdr = l4_hdr;
-			m->ol_flags |= PKT_TX_TCP_SEG;
-			m->tso_segsz = hdr->gso_size;
-			m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
-			break;
-		case VIRTIO_NET_HDR_GSO_UDP:
-			m->ol_flags |= PKT_TX_UDP_SEG;
-			m->tso_segsz = hdr->gso_size;
-			m->l4_len = sizeof(struct rte_udp_hdr);
-			break;
-		default:
-			VHOST_LOG_DATA(WARNING,
-				"unsupported gso type %u.\n", hdr->gso_type);
-			break;
-		}
-	}
-}
-
 static __rte_noinline void
 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
 		struct buf_vector *buf_vec)
@@ -2145,96 +2024,6 @@ get_zmbuf(struct vhost_virtqueue *vq)
 	return NULL;
 }
 
-static void
-virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
-{
-	rte_free(opaque);
-}
-
-static int
-virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
-{
-	struct rte_mbuf_ext_shared_info *shinfo = NULL;
-	uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
-	uint16_t buf_len;
-	rte_iova_t iova;
-	void *buf;
-
-	/* Try to use pkt buffer to store shinfo to reduce the amount of memory
-	 * required, otherwise store shinfo in the new buffer.
-	 */
-	if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
-		shinfo = rte_pktmbuf_mtod(pkt,
-					  struct rte_mbuf_ext_shared_info *);
-	else {
-		total_len += sizeof(*shinfo) + sizeof(uintptr_t);
-		total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
-	}
-
-	if (unlikely(total_len > UINT16_MAX))
-		return -ENOSPC;
-
-	buf_len = total_len;
-	buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
-	if (unlikely(buf == NULL))
-		return -ENOMEM;
-
-	/* Initialize shinfo */
-	if (shinfo) {
-		shinfo->free_cb = virtio_dev_extbuf_free;
-		shinfo->fcb_opaque = buf;
-		rte_mbuf_ext_refcnt_set(shinfo, 1);
-	} else {
-		shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
-					      virtio_dev_extbuf_free, buf);
-		if (unlikely(shinfo == NULL)) {
-			rte_free(buf);
-			VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
-			return -1;
-		}
-	}
-
-	iova = rte_malloc_virt2iova(buf);
-	rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
-	rte_pktmbuf_reset_headroom(pkt);
-
-	return 0;
-}
-
-/*
- * Allocate a host supported pktmbuf.
- */
-static __rte_always_inline struct rte_mbuf *
-virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
-			 uint32_t data_len)
-{
-	struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
-
-	if (unlikely(pkt == NULL)) {
-		VHOST_LOG_DATA(ERR,
-			"Failed to allocate memory for mbuf.\n");
-		return NULL;
-	}
-
-	if (rte_pktmbuf_tailroom(pkt) >= data_len)
-		return pkt;
-
-	/* attach an external buffer if supported */
-	if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
-		return pkt;
-
-	/* check if chained buffers are allowed */
-	if (!dev->linearbuf)
-		return pkt;
-
-	/* Data doesn't fit into the buffer and the host supports
-	 * only linear buffers
-	 */
-	rte_pktmbuf_free(pkt);
-
-	return NULL;
-}
-
 static __rte_noinline uint16_t
 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
-- 
2.17.1


  parent reply	other threads:[~2020-09-21  6:53 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-19  3:24 [dpdk-dev] [PATCH v1 0/5] vhost add vectorized data path Marvin Liu
2020-08-19  3:24 ` [dpdk-dev] [PATCH v1 1/5] vhost: " Marvin Liu
2020-09-21  6:48   ` [dpdk-dev] [PATCH v2 0/5] vhost " Marvin Liu
2020-09-21  6:48     ` [dpdk-dev] [PATCH v2 1/5] vhost: " Marvin Liu
2020-09-21  6:48     ` Marvin Liu [this message]
2020-09-21  6:48     ` [dpdk-dev] [PATCH v2 3/5] vhost: prepare memory regions addresses Marvin Liu
2020-10-06 15:06       ` Maxime Coquelin
2020-09-21  6:48     ` [dpdk-dev] [PATCH v2 4/5] vhost: add packed ring vectorized dequeue Marvin Liu
2020-10-06 14:59       ` Maxime Coquelin
2020-10-08  7:05         ` Liu, Yong
2020-10-06 15:18       ` Maxime Coquelin
2020-10-09  7:59         ` Liu, Yong
2020-09-21  6:48     ` [dpdk-dev] [PATCH v2 5/5] vhost: add packed ring vectorized enqueue Marvin Liu
2020-10-06 15:00       ` Maxime Coquelin
2020-10-08  7:09         ` Liu, Yong
2020-10-06 13:34     ` [dpdk-dev] [PATCH v2 0/5] vhost add vectorized data path Maxime Coquelin
2020-10-08  6:20       ` Liu, Yong
2020-10-09  8:14   ` [dpdk-dev] [PATCH v3 " Marvin Liu
2020-10-09  8:14     ` [dpdk-dev] [PATCH v3 1/5] vhost: " Marvin Liu
2020-10-09  8:14     ` [dpdk-dev] [PATCH v3 2/5] vhost: reuse packed ring functions Marvin Liu
2020-10-09  8:14     ` [dpdk-dev] [PATCH v3 3/5] vhost: prepare memory regions addresses Marvin Liu
2020-10-09  8:14     ` [dpdk-dev] [PATCH v3 4/5] vhost: add packed ring vectorized dequeue Marvin Liu
2020-10-09  8:14     ` [dpdk-dev] [PATCH v3 5/5] vhost: add packed ring vectorized enqueue Marvin Liu
2020-10-12  8:21     ` [dpdk-dev] [PATCH v3 0/5] vhost add vectorized data path Maxime Coquelin
2020-10-12  9:10       ` Liu, Yong
2020-10-12  9:57         ` Maxime Coquelin
2020-10-12 13:24           ` Liu, Yong
2020-10-15 15:28       ` Liu, Yong
2020-10-15 15:35         ` Maxime Coquelin
2020-08-19  3:24 ` [dpdk-dev] [PATCH v1 2/5] vhost: reuse packed ring functions Marvin Liu
2020-08-19  3:24 ` [dpdk-dev] [PATCH v1 3/5] vhost: prepare memory regions addresses Marvin Liu
2020-08-19  3:24 ` [dpdk-dev] [PATCH v1 4/5] vhost: add packed ring vectorized dequeue Marvin Liu
2020-09-18 13:44   ` Maxime Coquelin
2020-09-21  6:26     ` Liu, Yong
2020-09-21  7:47       ` Liu, Yong
2020-08-19  3:24 ` [dpdk-dev] [PATCH v1 5/5] vhost: add packed ring vectorized enqueue Marvin Liu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200921064837.15957-3-yong.liu@intel.com \
    --to=yong.liu@intel.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).