DPDK patches and discussions
 help / color / mirror / Atom feed
From: Anatoly Burakov <anatoly.burakov@intel.com>
To: dev@dpdk.org
Cc: Bruce Richardson <bruce.richardson@intel.com>,
	konstantin.ananyev@intel.com, thomas@monjalon.net,
	qi.z.zhang@intel.com
Subject: [dpdk-dev] [PATCH v2 3/7] eal/bsdapp: add interrupt thread
Date: Tue, 26 Jun 2018 11:53:14 +0100	[thread overview]
Message-ID: <efc55c28d1d7698fcc838f032af45d68c9289384.1530009564.git.anatoly.burakov@intel.com> (raw)
In-Reply-To: <cover.1530009564.git.anatoly.burakov@intel.com>
In-Reply-To: <cover.1530009564.git.anatoly.burakov@intel.com>

Add interrupt thread to FreeBSD. It is largely a copy-paste from
Linuxapp interrupt thread, except for a few key differences:

* Use kevent instead of epoll
* Do not recreate the event queue on adding/removing interrupt
  sources, add/remove them to/from the queue on the fly instead
* No support for UIO/VFIO handles

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
 lib/librte_eal/bsdapp/eal/eal_interrupts.c | 436 ++++++++++++++++++++-
 1 file changed, 418 insertions(+), 18 deletions(-)

diff --git a/lib/librte_eal/bsdapp/eal/eal_interrupts.c b/lib/librte_eal/bsdapp/eal/eal_interrupts.c
index 290d53ab9..d0db6c6ff 100644
--- a/lib/librte_eal/bsdapp/eal/eal_interrupts.c
+++ b/lib/librte_eal/bsdapp/eal/eal_interrupts.c
@@ -1,51 +1,451 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2010-2018 Intel Corporation
  */
 
+#include <string.h>
+#include <sys/types.h>
+#include <sys/event.h>
+#include <sys/queue.h>
+#include <unistd.h>
+
+#include <rte_errno.h>
+#include <rte_interrupts.h>
+#include <rte_lcore.h>
+#include <rte_spinlock.h>
 #include <rte_common.h>
 #include <rte_interrupts.h>
+
 #include "eal_private.h"
 
+#define MAX_INTR_EVENTS 16
+
+/**
+ * union buffer for reading on different devices
+ */
+union rte_intr_read_buffer {
+	char charbuf[16];                /* for others */
+};
+
+TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
+TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
+
+struct rte_intr_callback {
+	TAILQ_ENTRY(rte_intr_callback) next;
+	rte_intr_callback_fn cb_fn;  /**< callback address */
+	void *cb_arg;                /**< parameter for callback */
+};
+
+struct rte_intr_source {
+	TAILQ_ENTRY(rte_intr_source) next;
+	struct rte_intr_handle intr_handle; /**< interrupt handle */
+	struct rte_intr_cb_list callbacks;  /**< user callbacks */
+	uint32_t active;
+};
+
+/* global spinlock for interrupt data operation */
+static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
+
+/* interrupt sources list */
+static struct rte_intr_source_list intr_sources;
+
+/* interrupt handling thread */
+static pthread_t intr_thread;
+
+static volatile int kq = -1;
+
+static int
+intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
+{
+	ke->filter = EVFILT_READ;
+	ke->ident = ih->fd;
+
+	return 0;
+}
+
 int
 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
-			rte_intr_callback_fn cb,
-			void *cb_arg)
+		rte_intr_callback_fn cb, void *cb_arg)
 {
-	RTE_SET_USED(intr_handle);
-	RTE_SET_USED(cb);
-	RTE_SET_USED(cb_arg);
+	struct rte_intr_callback *callback = NULL;
+	struct rte_intr_source *src = NULL;
+	int ret, add_event;
 
-	return -ENOTSUP;
+	/* first do parameter checking */
+	if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
+		RTE_LOG(ERR, EAL,
+			"Registering with invalid input parameter\n");
+		return -EINVAL;
+	}
+	if (kq < 0) {
+		RTE_LOG(ERR, EAL, "Kqueue is not active: %d\n", kq);
+		return -ENODEV;
+	}
+
+	/* allocate a new interrupt callback entity */
+	callback = calloc(1, sizeof(*callback));
+	if (callback == NULL) {
+		RTE_LOG(ERR, EAL, "Can not allocate memory\n");
+		return -ENOMEM;
+	}
+	callback->cb_fn = cb;
+	callback->cb_arg = cb_arg;
+
+	rte_spinlock_lock(&intr_lock);
+
+	/* check if there is at least one callback registered for the fd */
+	TAILQ_FOREACH(src, &intr_sources, next) {
+		if (src->intr_handle.fd == intr_handle->fd) {
+			/* we had no interrupts for this */
+			if (TAILQ_EMPTY(&src->callbacks))
+				add_event = 1;
+
+			TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
+			ret = 0;
+			break;
+		}
+	}
+
+	/* no existing callbacks for this - add new source */
+	if (src == NULL) {
+		src = calloc(1, sizeof(*src));
+		if (src == NULL) {
+			RTE_LOG(ERR, EAL, "Can not allocate memory\n");
+			ret = -ENOMEM;
+			goto fail;
+		} else {
+			src->intr_handle = *intr_handle;
+			TAILQ_INIT(&src->callbacks);
+			TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
+			TAILQ_INSERT_TAIL(&intr_sources, src, next);
+			add_event = 1;
+			ret = 0;
+		}
+	}
+
+	/* add events to the queue */
+	if (add_event) {
+		struct kevent ke;
+
+		memset(&ke, 0, sizeof(ke));
+		ke.flags = EV_ADD; /* mark for addition to the queue */
+
+		if (intr_source_to_kevent(intr_handle, &ke) < 0) {
+			RTE_LOG(ERR, EAL, "Cannot convert interrupt handle to kevent\n");
+			ret = -ENODEV;
+			goto fail;
+		}
+
+		/**
+		 * add the intr file descriptor into wait list.
+		 */
+		if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
+			RTE_LOG(ERR, EAL, "Error adding fd %d kevent, %s\n",
+				src->intr_handle.fd, strerror(errno));
+			ret = -errno;
+			goto fail;
+		}
+	}
+	rte_spinlock_unlock(&intr_lock);
+
+	return ret;
+fail:
+	/* clean up */
+	if (src != NULL) {
+		TAILQ_REMOVE(&(src->callbacks), callback, next);
+		if (TAILQ_EMPTY(&(src->callbacks))) {
+			TAILQ_REMOVE(&intr_sources, src, next);
+			free(src);
+		}
+	}
+	free(callback);
+	rte_spinlock_unlock(&intr_lock);
+	return ret;
 }
 
 int
 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
-			rte_intr_callback_fn cb,
-			void *cb_arg)
+		rte_intr_callback_fn cb_fn, void *cb_arg)
 {
-	RTE_SET_USED(intr_handle);
-	RTE_SET_USED(cb);
-	RTE_SET_USED(cb_arg);
+	int ret;
+	struct rte_intr_source *src;
+	struct rte_intr_callback *cb, *next;
 
-	return -ENOTSUP;
+	/* do parameter checking first */
+	if (intr_handle == NULL || intr_handle->fd < 0) {
+		RTE_LOG(ERR, EAL,
+		"Unregistering with invalid input parameter\n");
+		return -EINVAL;
+	}
+	if (kq < 0) {
+		RTE_LOG(ERR, EAL, "Kqueue is not active\n");
+		return -ENODEV;
+	}
+
+	rte_spinlock_lock(&intr_lock);
+
+	/* check if the insterrupt source for the fd is existent */
+	TAILQ_FOREACH(src, &intr_sources, next)
+		if (src->intr_handle.fd == intr_handle->fd)
+			break;
+
+	/* No interrupt source registered for the fd */
+	if (src == NULL) {
+		ret = -ENOENT;
+
+	/* interrupt source has some active callbacks right now. */
+	} else if (src->active != 0) {
+		ret = -EAGAIN;
+
+	/* ok to remove. */
+	} else {
+		struct kevent ke;
+
+		ret = 0;
+
+		/* remove it from the kqueue */
+		memset(&ke, 0, sizeof(ke));
+		ke.flags = EV_DELETE; /* mark for deletion from the queue */
+
+		if (intr_source_to_kevent(intr_handle, &ke) < 0) {
+			RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
+			ret = -ENODEV;
+			goto out;
+		}
+
+		/**
+		 * remove intr file descriptor from wait list.
+		 */
+		if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
+			RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
+				src->intr_handle.fd, strerror(errno));
+			ret = -errno;
+			goto out;
+		}
+
+		/*walk through the callbacks and remove all that match. */
+		for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
+			next = TAILQ_NEXT(cb, next);
+			if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
+					cb->cb_arg == cb_arg)) {
+				TAILQ_REMOVE(&src->callbacks, cb, next);
+				free(cb);
+				ret++;
+			}
+		}
+
+		/* all callbacks for that source are removed. */
+		if (TAILQ_EMPTY(&src->callbacks)) {
+			TAILQ_REMOVE(&intr_sources, src, next);
+			free(src);
+		}
+	}
+out:
+	rte_spinlock_unlock(&intr_lock);
+
+	return ret;
 }
 
 int
-rte_intr_enable(const struct rte_intr_handle *intr_handle __rte_unused)
+rte_intr_enable(const struct rte_intr_handle *intr_handle)
 {
-	return -ENOTSUP;
+	if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
+		return 0;
+
+	if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
+		return -1;
+
+	switch (intr_handle->type) {
+	/* not used at this moment */
+	case RTE_INTR_HANDLE_ALARM:
+		return -1;
+	/* not used at this moment */
+	case RTE_INTR_HANDLE_DEV_EVENT:
+		return -1;
+	/* unknown handle type */
+	default:
+		RTE_LOG(ERR, EAL,
+			"Unknown handle type of fd %d\n",
+					intr_handle->fd);
+		return -1;
+	}
+
+	return 0;
 }
 
 int
-rte_intr_disable(const struct rte_intr_handle *intr_handle __rte_unused)
+rte_intr_disable(const struct rte_intr_handle *intr_handle)
 {
-	return -ENOTSUP;
+	if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
+		return 0;
+
+	if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
+		return -1;
+
+	switch (intr_handle->type) {
+	/* not used at this moment */
+	case RTE_INTR_HANDLE_ALARM:
+		return -1;
+	/* not used at this moment */
+	case RTE_INTR_HANDLE_DEV_EVENT:
+		return -1;
+	/* unknown handle type */
+	default:
+		RTE_LOG(ERR, EAL,
+			"Unknown handle type of fd %d\n",
+					intr_handle->fd);
+		return -1;
+	}
+
+	return 0;
+}
+
+static void
+eal_intr_process_interrupts(struct kevent *events, int nfds)
+{
+	struct rte_intr_callback active_cb;
+	union rte_intr_read_buffer buf;
+	struct rte_intr_callback *cb;
+	struct rte_intr_source *src;
+	bool call = false;
+	int n, bytes_read;
+
+	for (n = 0; n < nfds; n++) {
+		int event_fd = events[n].ident;
+
+		rte_spinlock_lock(&intr_lock);
+		TAILQ_FOREACH(src, &intr_sources, next)
+			if (src->intr_handle.fd == event_fd)
+				break;
+		if (src == NULL) {
+			rte_spinlock_unlock(&intr_lock);
+			continue;
+		}
+
+		/* mark this interrupt source as active and release the lock. */
+		src->active = 1;
+		rte_spinlock_unlock(&intr_lock);
+
+		/* set the length to be read dor different handle type */
+		switch (src->intr_handle.type) {
+		case RTE_INTR_HANDLE_ALARM:
+			bytes_read = 0;
+			call = true;
+			break;
+		case RTE_INTR_HANDLE_VDEV:
+		case RTE_INTR_HANDLE_EXT:
+			bytes_read = 0;
+			call = true;
+			break;
+		case RTE_INTR_HANDLE_DEV_EVENT:
+			bytes_read = 0;
+			call = true;
+			break;
+		default:
+			bytes_read = 1;
+			break;
+		}
+
+		if (bytes_read > 0) {
+			/**
+			 * read out to clear the ready-to-be-read flag
+			 * for epoll_wait.
+			 */
+			bytes_read = read(event_fd, &buf, bytes_read);
+			if (bytes_read < 0) {
+				if (errno == EINTR || errno == EWOULDBLOCK)
+					continue;
+
+				RTE_LOG(ERR, EAL, "Error reading from file "
+					"descriptor %d: %s\n",
+					event_fd,
+					strerror(errno));
+			} else if (bytes_read == 0)
+				RTE_LOG(ERR, EAL, "Read nothing from file "
+					"descriptor %d\n", event_fd);
+			else
+				call = true;
+		}
+
+		/* grab a lock, again to call callbacks and update status. */
+		rte_spinlock_lock(&intr_lock);
+
+		if (call) {
+			/* Finally, call all callbacks. */
+			TAILQ_FOREACH(cb, &src->callbacks, next) {
+
+				/* make a copy and unlock. */
+				active_cb = *cb;
+				rte_spinlock_unlock(&intr_lock);
+
+				/* call the actual callback */
+				active_cb.cb_fn(active_cb.cb_arg);
+
+				/*get the lock back. */
+				rte_spinlock_lock(&intr_lock);
+			}
+		}
+
+		/* we done with that interrupt source, release it. */
+		src->active = 0;
+		rte_spinlock_unlock(&intr_lock);
+	}
+}
+
+static void *
+eal_intr_thread_main(void *arg __rte_unused)
+{
+	struct kevent events[MAX_INTR_EVENTS];
+	int nfds;
+
+	/* host thread, never break out */
+	for (;;) {
+		/* do not change anything, just wait */
+		nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
+
+		/* kevent fail */
+		if (nfds < 0) {
+			if (errno == EINTR)
+				continue;
+			RTE_LOG(ERR, EAL,
+				"kevent returns with fail\n");
+			break;
+		}
+		/* kevent timeout, will never happen here */
+		else if (nfds == 0)
+			continue;
+
+		/* kevent has at least one fd ready to read */
+		eal_intr_process_interrupts(events, nfds);
+	}
+	close(kq);
+	kq = -1;
+	return NULL;
 }
 
 int
 rte_eal_intr_init(void)
 {
-	return 0;
+	int ret = 0;
+
+	/* init the global interrupt source head */
+	TAILQ_INIT(&intr_sources);
+
+	kq = kqueue();
+	if (kq < 0) {
+		RTE_LOG(ERR, EAL, "Cannot create kqueue instance\n");
+		return -1;
+	}
+
+	/* create the host thread to wait/handle the interrupt */
+	ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
+			eal_intr_thread_main, NULL);
+	if (ret != 0) {
+		rte_errno = -ret;
+		RTE_LOG(ERR, EAL,
+			"Failed to create thread for interrupt handling\n");
+	}
+
+	return ret;
 }
 
 int
-- 
2.17.1

  parent reply	other threads:[~2018-06-26 10:53 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-15 14:25 [dpdk-dev] [PATCH 0/8] Remove IPC threads Anatoly Burakov
2018-06-15 14:25 ` [dpdk-dev] [PATCH 1/8] eal/linux: use glibc malloc in alarm Anatoly Burakov
2018-06-15 14:25 ` [dpdk-dev] [PATCH 2/8] eal/linux: use glibc malloc in interrupt handling Anatoly Burakov
2018-06-15 14:25 ` [dpdk-dev] [PATCH 3/8] ipc: remove IPC thread for async requests Anatoly Burakov
2018-06-15 14:25 ` [dpdk-dev] [PATCH 4/8] eal: bring forward init of interrupt handling Anatoly Burakov
2018-06-15 14:25 ` [dpdk-dev] [PATCH 5/8] eal: add IPC type for interrupt thread Anatoly Burakov
2018-06-15 14:25 ` [dpdk-dev] [PATCH 6/8] eal/bsdapp: add " Anatoly Burakov
2018-06-15 14:25 ` [dpdk-dev] [PATCH 7/8] eal/bsdapp: add alarm support Anatoly Burakov
2018-06-15 14:25 ` [dpdk-dev] [PATCH 8/8] ipc: remove main IPC thread Anatoly Burakov
2018-06-26  1:19 ` [dpdk-dev] [PATCH 0/8] Remove IPC threads Zhang, Qi Z
2018-06-26  7:03 ` Zhang, Qi Z
2018-06-26 10:53 ` [dpdk-dev] [PATCH v2 0/7] Remove asynchronous IPC thread Anatoly Burakov
2018-07-13 10:44   ` Thomas Monjalon
2018-06-26 10:53 ` [dpdk-dev] [PATCH v2 1/7] eal/linux: use glibc malloc in alarm Anatoly Burakov
2018-06-26 10:53 ` [dpdk-dev] [PATCH v2 2/7] eal/linux: use glibc malloc in interrupt handling Anatoly Burakov
2018-06-26 10:53 ` Anatoly Burakov [this message]
2018-06-26 10:53 ` [dpdk-dev] [PATCH v2 4/7] eal/bsdapp: add alarm support Anatoly Burakov
2018-06-26 10:53 ` [dpdk-dev] [PATCH v2 5/7] eal: bring forward init of interrupt handling Anatoly Burakov
2018-07-12 22:36   ` Thomas Monjalon
2018-07-13  7:41     ` Burakov, Anatoly
2018-07-13  8:09     ` David Marchand
2018-07-13  9:10       ` Tiwei Bie
2018-07-13 11:28         ` David Marchand
2018-06-26 10:53 ` [dpdk-dev] [PATCH v2 6/7] ipc: remove IPC thread for async requests Anatoly Burakov
2018-06-26 10:53 ` [dpdk-dev] [PATCH v2 7/7] doc: document IPC callback limitations Anatoly Burakov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=efc55c28d1d7698fcc838f032af45d68c9289384.1530009564.git.anatoly.burakov@intel.com \
    --to=anatoly.burakov@intel.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=konstantin.ananyev@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).