DPDK patches and discussions
 help / color / mirror / Atom feed
From: Tetsuya Mukawa <mukawa@igel.co.jp>
To: dev@dpdk.org
Subject: [dpdk-dev] [RFC PATCH] lib/librte_vhost: cleanup white spaces, tabs and indents
Date: Mon, 17 Nov 2014 15:06:04 +0900	[thread overview]
Message-ID: <1416204364-4149-1-git-send-email-mukawa@igel.co.jp> (raw)
In-Reply-To: <1416014087-22499-1-git-send-email-huawei.xie@intel.com>

---
 lib/librte_vhost/rte_virtio_net.h             |  4 +--
 lib/librte_vhost/vhost-cuse/vhost-net-cdev.c  |  4 +--
 lib/librte_vhost/vhost-cuse/virtio-net-cdev.c |  8 ++---
 lib/librte_vhost/vhost-user/fd_man.c          | 13 ++++----
 lib/librte_vhost/vhost-user/fd_man.h          |  2 +-
 lib/librte_vhost/vhost-user/vhost-net-user.c  | 37 +++++++++++-----------
 lib/librte_vhost/vhost-user/virtio-net-user.c | 44 +++++++++++++--------------
 lib/librte_vhost/vhost_rxtx.c                 |  2 +-
 lib/librte_vhost/virtio-net.c                 | 10 +++---
 9 files changed, 61 insertions(+), 63 deletions(-)

diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
index 7a05dab..7d7d001 100644
--- a/lib/librte_vhost/rte_virtio_net.h
+++ b/lib/librte_vhost/rte_virtio_net.h
@@ -140,12 +140,12 @@ gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
 }
 
 /**
- *  Disable features in feature_mask. Returns 0 on success.
+ * Disable features in feature_mask. Returns 0 on success.
  */
 int rte_vhost_feature_disable(uint64_t feature_mask);
 
 /**
- *  Enable features in feature_mask. Returns 0 on success.
+ * Enable features in feature_mask. Returns 0 on success.
  */
 int rte_vhost_feature_enable(uint64_t feature_mask);
 
diff --git a/lib/librte_vhost/vhost-cuse/vhost-net-cdev.c b/lib/librte_vhost/vhost-cuse/vhost-net-cdev.c
index 4671643..688ec00 100644
--- a/lib/librte_vhost/vhost-cuse/vhost-net-cdev.c
+++ b/lib/librte_vhost/vhost-cuse/vhost-net-cdev.c
@@ -329,7 +329,7 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
 		} else {
 			int fd;
 			file = *(const struct vhost_vring_file *)in_buf;
-			LOG_DEBUG(VHOST_CONFIG, 
+			LOG_DEBUG(VHOST_CONFIG,
 				"kick/call idx:%d fd:%d\n", file.index, file.fd);
 			if ((fd = eventfd_copy(file.fd, ctx.pid)) < 0){
 				fuse_reply_ioctl(req, -1, NULL, 0);
@@ -338,7 +338,7 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
 			if (cmd == VHOST_SET_VRING_KICK) {
 				VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_call);
 			}
-			else { 
+			else {
 				VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_kick);
 			}
 		}
diff --git a/lib/librte_vhost/vhost-cuse/virtio-net-cdev.c b/lib/librte_vhost/vhost-cuse/virtio-net-cdev.c
index 5c16aa5..7381140 100644
--- a/lib/librte_vhost/vhost-cuse/virtio-net-cdev.c
+++ b/lib/librte_vhost/vhost-cuse/virtio-net-cdev.c
@@ -288,7 +288,7 @@ cuse_set_mem_table(struct vhost_device_ctx ctx, const struct vhost_memory *mem_r
 			base_address =
 				regions[idx].userspace_address;
 			/* Map VM memory file */
-			if (host_memory_map(ctx.pid, base_address, 
+			if (host_memory_map(ctx.pid, base_address,
 				&mapped_address, &mapped_size) != 0) {
 				return -1;
 			}
@@ -297,18 +297,18 @@ cuse_set_mem_table(struct vhost_device_ctx ctx, const struct vhost_memory *mem_r
 
 	/* Check that we have a valid base address. */
 	if (base_address == 0) {
-		RTE_LOG(ERR, VHOST_CONFIG, 
+		RTE_LOG(ERR, VHOST_CONFIG,
 			"Failed to find base address of qemu memory file.\n");
 		return -1;
 	}
 
 	for (idx = 0; idx < nregions; idx++) {
-		regions[idx].address_offset = 
+		regions[idx].address_offset =
 			mapped_address - base_address +
 			regions[idx].userspace_address -
 			regions[idx].guest_phys_address;
 	}
-	
+
 	ops->set_mem_table(ctx, &regions[0], nregions);
 	return 0;
 }
diff --git a/lib/librte_vhost/vhost-user/fd_man.c b/lib/librte_vhost/vhost-user/fd_man.c
index c7fd3f2..cbc656b 100644
--- a/lib/librte_vhost/vhost-user/fd_man.c
+++ b/lib/librte_vhost/vhost-user/fd_man.c
@@ -15,7 +15,7 @@
  * Returns the index in the fdset for a fd.
  * If fd is -1, it means to search for a free entry.
  * @return
- *   Index for the fd, or -1 if fd isn't in the fdset.
+ *  Index for the fd, or -1 if fd isn't in the fdset.
  */
 static int
 fdset_find_fd(struct fdset *pfdset, int fd)
@@ -23,8 +23,8 @@ fdset_find_fd(struct fdset *pfdset, int fd)
 	int i;
 
 	for (i = 0; i < pfdset->num && pfdset->fd[i].fd != fd; i++);
-		
-	return i ==  pfdset->num ? -1 : i;
+
+	return i == pfdset->num ? -1 : i;
 }
 
 static int
@@ -35,7 +35,7 @@ fdset_find_free_slot(struct fdset *pfdset)
 }
 
 static void
-fdset_add_fd(struct fdset  *pfdset, int idx, int fd, fd_cb rcb, 
+fdset_add_fd(struct fdset *pfdset, int idx, int fd, fd_cb rcb,
 		fd_cb wcb, uint64_t dat)
 {
 	struct fdentry *pfdentry = &pfdset->fd[idx];
@@ -111,7 +111,7 @@ fdset_add(struct fdset *pfdset, int fd, fd_cb rcb, fd_cb wcb, uint64_t dat)
 }
 
 /**
- *  Unregister the fd from the fdset.
+ * Unregister the fd from the fdset.
  */
 void
 fdset_del(struct fdset *pfdset, int fd)
@@ -148,11 +148,10 @@ fdset_event_dispatch(struct fdset *pfdset)
 
 		for (i = 0; i < num; i++) {
 			pfdentry = &pfdset->fd[i];
-			if (FD_ISSET(pfdentry->fd, &rfds)) 
+			if (FD_ISSET(pfdentry->fd, &rfds))
 				pfdentry->rcb(pfdentry->fd, pfdentry->dat);
 			if (FD_ISSET(pfdentry->fd, &wfds))
 				pfdentry->wcb(pfdentry->fd, pfdentry->dat);
 		}
-		
 	}
 }
diff --git a/lib/librte_vhost/vhost-user/fd_man.h b/lib/librte_vhost/vhost-user/fd_man.h
index 57cc81d..8df17b4 100644
--- a/lib/librte_vhost/vhost-user/fd_man.h
+++ b/lib/librte_vhost/vhost-user/fd_man.h
@@ -15,7 +15,7 @@ struct fdentry {
 
 struct fdset {
 	struct fdentry fd[MAX_FDS];
-	int num;	
+	int num;
 };
 
 
diff --git a/lib/librte_vhost/vhost-user/vhost-net-user.c b/lib/librte_vhost/vhost-user/vhost-net-user.c
index 34450f4..0b100ba 100644
--- a/lib/librte_vhost/vhost-user/vhost-net-user.c
+++ b/lib/librte_vhost/vhost-user/vhost-net-user.c
@@ -106,7 +106,7 @@ uds_socket(const char *path)
 	ret = listen(sockfd, 1);
 	if (ret == -1)
 		goto err;
-	
+
 	return sockfd;
 
 err:
@@ -129,7 +129,7 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
 
 	iov.iov_base = buf;
 	iov.iov_len  = buflen;
-	
+
 	msgh.msg_iov = &iov;
 	msgh.msg_iovlen = 1;
 	msgh.msg_control = control;
@@ -148,7 +148,7 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
 
 	for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
 		cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
-		if ( (cmsg->cmsg_level == SOL_SOCKET) && 
+		if ( (cmsg->cmsg_level == SOL_SOCKET) &&
 			(cmsg->cmsg_type == SCM_RIGHTS)) {
 			memcpy(fds, CMSG_DATA(cmsg), fdsize);
 			break;
@@ -162,14 +162,14 @@ read_vhost_message(int sockfd, struct VhostUserMsg *msg)
 {
 	int ret;
 
-	ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE, 
+	ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
 		msg->fds, VHOST_MEMORY_MAX_NREGIONS);
 	if (ret <= 0)
 		return ret;
 
 	if (msg->size) {
 		if (msg->size > sizeof(msg->payload)) {
-			RTE_LOG(ERR, VHOST_CONFIG, 
+			RTE_LOG(ERR, VHOST_CONFIG,
 				"%s: invalid size:%d\n", __func__, msg->size);
 			return -1;
 		}
@@ -182,7 +182,7 @@ read_vhost_message(int sockfd, struct VhostUserMsg *msg)
 		}
 	}
 
-	return ret; 
+	return ret;
 }
 
 static int
@@ -200,7 +200,7 @@ send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
 	iov.iov_len = buflen;
 	msgh.msg_iov = &iov;
 	msgh.msg_iovlen = 1;
-	
+
 	if (fds && fd_num > 0) {
 		msgh.msg_control = control;
 		msgh.msg_controllen = sizeof(control);
@@ -222,7 +222,7 @@ send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
 		RTE_LOG(ERR, VHOST_CONFIG,  "sendmsg error\n");
 		return -1;
 	}
-	
+
 	return 0;
 }
 
@@ -233,15 +233,15 @@ send_vhost_message(int sockfd, struct VhostUserMsg *msg)
 
 	msg->flags &= ~VHOST_USER_VERSION_MASK;
         msg->flags |= VHOST_USER_VERSION;
-        msg->flags |= VHOST_USER_REPLY_MASK;	
+        msg->flags |= VHOST_USER_REPLY_MASK;
 
-	ret = send_fd_message(sockfd, (char *)msg, 
+	ret = send_fd_message(sockfd, (char *)msg,
 		VHOST_USER_HDR_SIZE + msg->size, NULL, 0);
-	
+
 	return ret;
 }
 
-/* call back when there is new connection.  */
+/* call back when there is new connection. */
 static void
 vserver_new_vq_conn(int fd, uint64_t dat)
 {
@@ -251,7 +251,7 @@ vserver_new_vq_conn(int fd, uint64_t dat)
 	struct vhost_device_ctx vdev_ctx = { 0 };
 
 	conn_fd = accept(fd, NULL, NULL);
-	RTE_LOG(INFO, VHOST_CONFIG, 
+	RTE_LOG(INFO, VHOST_CONFIG,
 		"%s: new connection is %d\n", __func__, conn_fd);
 	if (conn_fd < 0)
 		return;
@@ -259,8 +259,8 @@ vserver_new_vq_conn(int fd, uint64_t dat)
 	fh = ops->new_device(vdev_ctx);
 	RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", fh);
 
-	fdset_add(&vserver->fdset, 
-		conn_fd, vserver_message_handler, NULL, fh);	
+	fdset_add(&vserver->fdset,
+		conn_fd, vserver_message_handler, NULL, fh);
 }
 
 /* callback when there is message on the connfd */
@@ -277,7 +277,7 @@ vserver_message_handler(int connfd, uint64_t dat)
 	ret = read_vhost_message(connfd, &msg);
 	if (ret < 0) {
 		printf("vhost read message failed\n");
-	
+
 		/*TODO: cleanup */
 		close(connfd);
 		fdset_del(&g_vhost_server->fdset, connfd);
@@ -286,7 +286,7 @@ vserver_message_handler(int connfd, uint64_t dat)
 		return;
 	} else if (ret == 0) {
 		/*TODO: cleanup */
-		RTE_LOG(INFO, VHOST_CONFIG, 
+		RTE_LOG(INFO, VHOST_CONFIG,
 			"vhost peer closed\n");
 		close(connfd);
 		fdset_del(&g_vhost_server->fdset, connfd);
@@ -296,7 +296,7 @@ vserver_message_handler(int connfd, uint64_t dat)
 	}
 	if (msg.request > VHOST_USER_MAX) {
 		/*TODO: cleanup */
-		RTE_LOG(INFO, VHOST_CONFIG, 
+		RTE_LOG(INFO, VHOST_CONFIG,
 			"vhost read incorrect message\n");
 		close(connfd);
 		fdset_del(&g_vhost_server->fdset, connfd);
@@ -363,7 +363,6 @@ vserver_message_handler(int connfd, uint64_t dat)
 
 	default:
 		break;
-	
 	}
 }
 
diff --git a/lib/librte_vhost/vhost-user/virtio-net-user.c b/lib/librte_vhost/vhost-user/virtio-net-user.c
index f38e6cc..4103977 100644
--- a/lib/librte_vhost/vhost-user/virtio-net-user.c
+++ b/lib/librte_vhost/vhost-user/virtio-net-user.c
@@ -65,7 +65,7 @@ user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
 	}
 
 	for (idx = 0; idx < memory.nregions;  idx++) {
-		uint64_t size = memory.regions[idx].userspace_addr - 
+		uint64_t size = memory.regions[idx].userspace_addr -
 			base_address + memory.regions[idx].memory_size;
 		if (mem_size < size)
 			mem_size = size;
@@ -75,28 +75,28 @@ user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
 	 * here we assume qemu will map only one file for memory allocation,
 	 * we only use fds[0] with offset 0.
 	 */
-	mapped_address = (uint64_t)(uintptr_t)mmap(NULL, mem_size, 
+	mapped_address = (uint64_t)(uintptr_t)mmap(NULL, mem_size,
 		PROT_READ | PROT_WRITE, MAP_SHARED, pmsg->fds[0], 0);
 
 	if (mapped_address == (uint64_t)(uintptr_t)MAP_FAILED) {
 		RTE_LOG(ERR, VHOST_CONFIG, " mmap qemu guest failed.\n");
 		return -1;
 	}
-			
+
 	for (idx = 0; idx < memory.nregions; idx++) {
-		regions[idx].guest_phys_address = 
+		regions[idx].guest_phys_address =
 			memory.regions[idx].guest_phys_addr;
-		regions[idx].guest_phys_address_end = 
+		regions[idx].guest_phys_address_end =
 			memory.regions[idx].guest_phys_addr +
 			memory.regions[idx].memory_size;
 		regions[idx].memory_size = memory.regions[idx].memory_size;
-		regions[idx].userspace_address = 
+		regions[idx].userspace_address =
 			memory.regions[idx].userspace_addr;
 
-		regions[idx].address_offset = mapped_address - base_address + 
+		regions[idx].address_offset = mapped_address - base_address +
 			regions[idx].userspace_address -
 			regions[idx].guest_phys_address;
-		LOG_DEBUG(VHOST_CONFIG, 
+		LOG_DEBUG(VHOST_CONFIG,
 			"REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n",
 			idx,
 			(void *)(uintptr_t)regions[idx].guest_phys_address,
@@ -129,28 +129,28 @@ user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
 
 
 	for (idx = 0; idx < memory.nregions; idx++) {
-		regions[idx].guest_phys_address = 
+		regions[idx].guest_phys_address =
 			memory.regions[idx].guest_phys_addr;
-		regions[idx].guest_phys_address_end = 
+		regions[idx].guest_phys_address_end =
 			memory.regions[idx].guest_phys_addr +
 			memory.regions[idx].memory_size;
 		regions[idx].memory_size = memory.regions[idx].memory_size;
-		regions[idx].userspace_address = 
+		regions[idx].userspace_address =
 			memory.regions[idx].userspace_addr;
 /*
-		mapped_address = (uint64_t)(uintptr_t)mmap(NULL, 
-			regions[idx].memory_size, 
-			PROT_READ | PROT_WRITE, MAP_SHARED, 
-			pmsg->fds[idx], 
+		mapped_address = (uint64_t)(uintptr_t)mmap(NULL,
+			regions[idx].memory_size,
+			PROT_READ | PROT_WRITE, MAP_SHARED,
+			pmsg->fds[idx],
 			memory.regions[idx].mmap_offset);
 */
 
 /* This is ugly */
-		mapped_address = (uint64_t)(uintptr_t)mmap(NULL, 
+		mapped_address = (uint64_t)(uintptr_t)mmap(NULL,
 			regions[idx].memory_size +
-				memory.regions[idx].mmap_offset, 
-			PROT_READ | PROT_WRITE, MAP_SHARED, 
-			pmsg->fds[idx], 
+				memory.regions[idx].mmap_offset,
+			PROT_READ | PROT_WRITE, MAP_SHARED,
+			pmsg->fds[idx],
 			0);
 		printf("mapped to %p\n", (void *)mapped_address);
 
@@ -165,7 +165,7 @@ user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
 
 		regions[idx].address_offset = mapped_address -
 			regions[idx].guest_phys_address;
-		LOG_DEBUG(VHOST_CONFIG, 
+		LOG_DEBUG(VHOST_CONFIG,
 			"REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n",
 			idx,
 			(void *)(uintptr_t)regions[idx].guest_phys_address,
@@ -189,7 +189,7 @@ user_set_vring_call(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
 
 	file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
 	file.fd = pmsg->fds[0];
-	RTE_LOG(INFO, VHOST_CONFIG, 
+	RTE_LOG(INFO, VHOST_CONFIG,
 		"vring call idx:%d file:%d\n", file.index, file.fd);
 	ops->set_vring_call(ctx, &file);
 }
@@ -202,7 +202,7 @@ user_set_vring_kick(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
 
 	file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
 	file.fd = pmsg->fds[0];
-	RTE_LOG(INFO, VHOST_CONFIG, 
+	RTE_LOG(INFO, VHOST_CONFIG,
 		"vring kick idx:%d file:%d\n", file.index, file.fd);
 	ops->set_vring_kick(ctx, &file);
 }
diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index 8ff0301..3a33eb0 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -217,7 +217,7 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_me
 	if (vq->last_used_idx == avail_idx)
 		return 0;
 
-	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") %s(%d->%d)\n", 
+	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") %s(%d->%d)\n",
 		dev->device_fh, __func__, vq->last_used_idx, avail_idx);
 
 	/* Prefetch available ring to retrieve head indexes. */
diff --git a/lib/librte_vhost/virtio-net.c b/lib/librte_vhost/virtio-net.c
index 516e743..30661e3 100644
--- a/lib/librte_vhost/virtio-net.c
+++ b/lib/librte_vhost/virtio-net.c
@@ -82,7 +82,7 @@ qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
 		if ((qemu_va >= region->userspace_address) &&
 			(qemu_va <= region->userspace_address +
 			region->memory_size)) {
-			vhost_va = qemu_va +  region->guest_phys_address + 
+			vhost_va = qemu_va + region->guest_phys_address +
 				region->address_offset -
 				region->userspace_address;
 			break;
@@ -476,7 +476,7 @@ set_mem_table(struct vhost_device_ctx ctx,
 		if (mem->regions[regionidx].guest_phys_address == 0x0) {
 			mem->base_address =
 				mem->regions[regionidx].userspace_address;
-			mem->mapped_address = 
+			mem->mapped_address =
 				mem->regions[regionidx].address_offset;
 		}
 	}
@@ -602,7 +602,7 @@ get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
 	state->num = dev->virtqueue[state->index]->last_used_idx;
 
 	if (dev->flags & VIRTIO_DEV_RUNNING) {
-		RTE_LOG(INFO, VHOST_CONFIG, 
+		RTE_LOG(INFO, VHOST_CONFIG,
 			"get_vring_base message is for release\n");
 		notify_ops->destroy_device(dev);
 		/*
@@ -626,7 +626,7 @@ get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
 		close((int)dev->virtqueue[VIRTIO_TXQ]->callfd);
 	dev->virtqueue[VIRTIO_TXQ]->callfd = -1;
 	/* We don't cleanup callfd here as we willn't get CALLFD again */
-	
+
 	dev->virtqueue[VIRTIO_RXQ]->desc = NULL;
 	dev->virtqueue[VIRTIO_RXQ]->avail = NULL;
 	dev->virtqueue[VIRTIO_RXQ]->used = NULL;
@@ -650,7 +650,7 @@ virtio_is_ready(struct virtio_net *dev, int index)
 	/* mq support in future.*/
 	vq1 = dev->virtqueue[index];
 	vq2 = dev->virtqueue[index ^ 1];
-	if (vq1 && vq2 && vq1->desc && vq2->desc && 
+	if (vq1 && vq2 && vq1->desc && vq2->desc &&
 		(vq1->kickfd > 0) && (vq1->callfd > 0) &&
 		(vq2->kickfd > 0) && (vq2->callfd > 0)) {
 		LOG_DEBUG(VHOST_CONFIG, "virtio is ready for processing.\n");
-- 
1.9.1

  parent reply	other threads:[~2014-11-17  5:56 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-11-15  1:14 [dpdk-dev] [PATCH RFC] lib/librte_vhost: vhost-user Huawei Xie
2014-11-17  6:04 ` Tetsuya Mukawa
2014-11-17  6:11   ` Tetsuya Mukawa
2014-11-17  6:06 ` Tetsuya Mukawa [this message]
2014-11-17  6:07 ` [dpdk-dev] [RFC PATCH 1/2] lib/librte_vhost: change macro name of include guard Tetsuya Mukawa
2014-11-17  6:07   ` [dpdk-dev] [RFC PATCH 2/2] lib/librte_vhost: Add device abstraction layer Tetsuya Mukawa

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1416204364-4149-1-git-send-email-mukawa@igel.co.jp \
    --to=mukawa@igel.co.jp \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).