From: Spike Du <spiked@nvidia.com>
To: <matan@nvidia.com>, <viacheslavo@nvidia.com>, <orika@nvidia.com>,
<thomas@monjalon.net>
Cc: <dev@dpdk.org>, <rasland@nvidia.com>
Subject: [PATCH v3 2/7] common/mlx5: share interrupt management
Date: Tue, 24 May 2022 18:20:36 +0300 [thread overview]
Message-ID: <20220524152041.737154-3-spiked@nvidia.com> (raw)
In-Reply-To: <20220524152041.737154-1-spiked@nvidia.com>
There are many duplicate code of creating and initializing rte_intr_handle.
Add a new mlx5_os API to do this, replace all PMD related code with this
API.
Signed-off-by: Spike Du <spiked@nvidia.com>
---
drivers/common/mlx5/linux/mlx5_common_os.c | 131 ++++++++++++++++++
drivers/common/mlx5/linux/mlx5_common_os.h | 11 ++
drivers/common/mlx5/version.map | 2 +
drivers/common/mlx5/windows/mlx5_common_os.h | 24 ++++
drivers/net/mlx5/linux/mlx5_ethdev_os.c | 71 ----------
drivers/net/mlx5/linux/mlx5_os.c | 132 ++++---------------
drivers/net/mlx5/linux/mlx5_socket.c | 53 +-------
drivers/net/mlx5/mlx5.h | 2 -
drivers/net/mlx5/mlx5_txpp.c | 28 +---
drivers/net/mlx5/windows/mlx5_ethdev_os.c | 22 ----
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 48 +------
11 files changed, 217 insertions(+), 307 deletions(-)
diff --git a/drivers/common/mlx5/linux/mlx5_common_os.c b/drivers/common/mlx5/linux/mlx5_common_os.c
index d40cfd5cd1..f10a981a37 100644
--- a/drivers/common/mlx5/linux/mlx5_common_os.c
+++ b/drivers/common/mlx5/linux/mlx5_common_os.c
@@ -11,6 +11,7 @@
#endif
#include <dirent.h>
#include <net/if.h>
+#include <fcntl.h>
#include <rte_errno.h>
#include <rte_string_fns.h>
@@ -964,3 +965,133 @@ mlx5_os_wrapped_mkey_destroy(struct mlx5_pmd_wrapped_mr *pmd_mr)
claim_zero(mlx5_glue->dereg_mr(pmd_mr->obj));
memset(pmd_mr, 0, sizeof(*pmd_mr));
}
+
+/**
+ * Rte_intr_handle create and init helper.
+ *
+ * @param[in] mode
+ * interrupt instance can be shared between primary and secondary
+ * processes or not.
+ * @param[in] set_fd_nonblock
+ * Whether to set fd to O_NONBLOCK.
+ * @param[in] fd
+ * Fd to set in created intr_handle.
+ * @param[in] cb
+ * Callback to register for intr_handle.
+ * @param[in] cb_arg
+ * Callback argument for cb.
+ *
+ * @return
+ * - Interrupt handle on success.
+ * - NULL on failure, with rte_errno set.
+ */
+struct rte_intr_handle *
+mlx5_os_interrupt_handler_create(int mode, bool set_fd_nonblock, int fd,
+ rte_intr_callback_fn cb, void *cb_arg)
+{
+ struct rte_intr_handle *tmp_intr_handle;
+ int ret, flags;
+
+ tmp_intr_handle = rte_intr_instance_alloc(mode);
+ if (!tmp_intr_handle) {
+ rte_errno = ENOMEM;
+ goto err;
+ }
+ if (set_fd_nonblock) {
+ flags = fcntl(fd, F_GETFL);
+ ret = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
+ if (ret) {
+ rte_errno = errno;
+ goto err;
+ }
+ }
+ ret = rte_intr_fd_set(tmp_intr_handle, fd);
+ if (ret)
+ goto err;
+ ret = rte_intr_type_set(tmp_intr_handle, RTE_INTR_HANDLE_EXT);
+ if (ret)
+ goto err;
+ ret = rte_intr_callback_register(tmp_intr_handle, cb, cb_arg);
+ if (ret) {
+ rte_errno = -ret;
+ goto err;
+ }
+ return tmp_intr_handle;
+err:
+ if (tmp_intr_handle)
+ rte_intr_instance_free(tmp_intr_handle);
+ return NULL;
+}
+
+/* Safe unregistration for interrupt callback. */
+static void
+mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
+ rte_intr_callback_fn cb_fn, void *cb_arg)
+{
+ uint64_t twait = 0;
+ uint64_t start = 0;
+
+ do {
+ int ret;
+
+ ret = rte_intr_callback_unregister(handle, cb_fn, cb_arg);
+ if (ret >= 0)
+ return;
+ if (ret != -EAGAIN) {
+ DRV_LOG(INFO, "failed to unregister interrupt"
+ " handler (error: %d)", ret);
+ MLX5_ASSERT(false);
+ return;
+ }
+ if (twait) {
+ struct timespec onems;
+
+ /* Wait one millisecond and try again. */
+ onems.tv_sec = 0;
+ onems.tv_nsec = NS_PER_S / MS_PER_S;
+ nanosleep(&onems, 0);
+ /* Check whether one second elapsed. */
+ if ((rte_get_timer_cycles() - start) <= twait)
+ continue;
+ } else {
+ /*
+ * We get the amount of timer ticks for one second.
+ * If this amount elapsed it means we spent one
+ * second in waiting. This branch is executed once
+ * on first iteration.
+ */
+ twait = rte_get_timer_hz();
+ MLX5_ASSERT(twait);
+ }
+ /*
+ * Timeout elapsed, show message (once a second) and retry.
+ * We have no other acceptable option here, if we ignore
+ * the unregistering return code the handler will not
+ * be unregistered, fd will be closed and we may get the
+ * crush. Hanging and messaging in the loop seems not to be
+ * the worst choice.
+ */
+ DRV_LOG(INFO, "Retrying to unregister interrupt handler");
+ start = rte_get_timer_cycles();
+ } while (true);
+}
+
+/**
+ * Rte_intr_handle destroy helper.
+ *
+ * @param[in] intr_handle
+ * Rte_intr_handle to destroy.
+ * @param[in] cb
+ * Callback which is registered to intr_handle.
+ * @param[in] cb_arg
+ * Callback argument for cb.
+ *
+ */
+void
+mlx5_os_interrupt_handler_destroy(struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb, void *cb_arg)
+{
+ if (rte_intr_fd_get(intr_handle) >= 0)
+ mlx5_intr_callback_unregister(intr_handle, cb, cb_arg);
+ rte_intr_instance_free(intr_handle);
+}
diff --git a/drivers/common/mlx5/linux/mlx5_common_os.h b/drivers/common/mlx5/linux/mlx5_common_os.h
index 27f1192205..479bb3c7cb 100644
--- a/drivers/common/mlx5/linux/mlx5_common_os.h
+++ b/drivers/common/mlx5/linux/mlx5_common_os.h
@@ -15,6 +15,7 @@
#include <rte_log.h>
#include <rte_kvargs.h>
#include <rte_devargs.h>
+#include <rte_interrupts.h>
#include "mlx5_autoconf.h"
#include "mlx5_glue.h"
@@ -299,4 +300,14 @@ __rte_internal
int
mlx5_get_device_guid(const struct rte_pci_addr *dev, uint8_t *guid, size_t len);
+__rte_internal
+struct rte_intr_handle *
+mlx5_os_interrupt_handler_create(int mode, bool set_fd_nonblock, int fd,
+ rte_intr_callback_fn cb, void *cb_arg);
+
+__rte_internal
+void
+mlx5_os_interrupt_handler_destroy(struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb, void *cb_arg);
+
#endif /* RTE_PMD_MLX5_COMMON_OS_H_ */
diff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map
index a23a30a6c0..413dec14ab 100644
--- a/drivers/common/mlx5/version.map
+++ b/drivers/common/mlx5/version.map
@@ -153,5 +153,7 @@ INTERNAL {
mlx5_mr_mempool2mr_bh;
mlx5_mr_mempool_populate_cache;
+ mlx5_os_interrupt_handler_create; # WINDOWS_NO_EXPORT
+ mlx5_os_interrupt_handler_destroy; # WINDOWS_NO_EXPORT
local: *;
};
diff --git a/drivers/common/mlx5/windows/mlx5_common_os.h b/drivers/common/mlx5/windows/mlx5_common_os.h
index ee7973f1ec..e9e9108127 100644
--- a/drivers/common/mlx5/windows/mlx5_common_os.h
+++ b/drivers/common/mlx5/windows/mlx5_common_os.h
@@ -9,6 +9,7 @@
#include <sys/types.h>
#include <rte_errno.h>
+#include <rte_interrupts.h>
#include "mlx5_autoconf.h"
#include "mlx5_glue.h"
@@ -253,4 +254,27 @@ void *mlx5_os_umem_reg(void *ctx, void *addr, size_t size, uint32_t access);
__rte_internal
int mlx5_os_umem_dereg(void *pumem);
+static inline struct rte_intr_handle *
+mlx5_os_interrupt_handler_create(int mode, bool set_fd_nonblock, int fd,
+ rte_intr_callback_fn cb, void *cb_arg)
+{
+ (void)mode;
+ (void)set_fd_nonblock;
+ (void)fd;
+ (void)cb;
+ (void)cb_arg;
+ rte_errno = ENOTSUP;
+ return NULL;
+}
+
+static inline void
+mlx5_os_interrupt_handler_destroy(struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb, void *cb_arg)
+{
+ (void)intr_handle;
+ (void)cb;
+ (void)cb_arg;
+}
+
+
#endif /* RTE_PMD_MLX5_COMMON_OS_H_ */
diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
index 8fe73f1adb..a276b2ba4f 100644
--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
@@ -881,77 +881,6 @@ mlx5_dev_interrupt_handler(void *cb_arg)
}
}
-/*
- * Unregister callback handler safely. The handler may be active
- * while we are trying to unregister it, in this case code -EAGAIN
- * is returned by rte_intr_callback_unregister(). This routine checks
- * the return code and tries to unregister handler again.
- *
- * @param handle
- * interrupt handle
- * @param cb_fn
- * pointer to callback routine
- * @cb_arg
- * opaque callback parameter
- */
-void
-mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
- rte_intr_callback_fn cb_fn, void *cb_arg)
-{
- /*
- * Try to reduce timeout management overhead by not calling
- * the timer related routines on the first iteration. If the
- * unregistering succeeds on first call there will be no
- * timer calls at all.
- */
- uint64_t twait = 0;
- uint64_t start = 0;
-
- do {
- int ret;
-
- ret = rte_intr_callback_unregister(handle, cb_fn, cb_arg);
- if (ret >= 0)
- return;
- if (ret != -EAGAIN) {
- DRV_LOG(INFO, "failed to unregister interrupt"
- " handler (error: %d)", ret);
- MLX5_ASSERT(false);
- return;
- }
- if (twait) {
- struct timespec onems;
-
- /* Wait one millisecond and try again. */
- onems.tv_sec = 0;
- onems.tv_nsec = NS_PER_S / MS_PER_S;
- nanosleep(&onems, 0);
- /* Check whether one second elapsed. */
- if ((rte_get_timer_cycles() - start) <= twait)
- continue;
- } else {
- /*
- * We get the amount of timer ticks for one second.
- * If this amount elapsed it means we spent one
- * second in waiting. This branch is executed once
- * on first iteration.
- */
- twait = rte_get_timer_hz();
- MLX5_ASSERT(twait);
- }
- /*
- * Timeout elapsed, show message (once a second) and retry.
- * We have no other acceptable option here, if we ignore
- * the unregistering return code the handler will not
- * be unregistered, fd will be closed and we may get the
- * crush. Hanging and messaging in the loop seems not to be
- * the worst choice.
- */
- DRV_LOG(INFO, "Retrying to unregister interrupt handler");
- start = rte_get_timer_cycles();
- } while (true);
-}
-
/**
* Handle DEVX interrupts from the NIC.
* This function is probably called from the DPDK host thread.
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index a821153b35..0741028dab 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -2494,40 +2494,6 @@ mlx5_os_net_cleanup(void)
mlx5_pmd_socket_uninit();
}
-static int
-mlx5_os_dev_shared_handler_install_lsc(struct mlx5_dev_ctx_shared *sh)
-{
- int nlsk_fd, flags, ret;
-
- nlsk_fd = mlx5_nl_init(NETLINK_ROUTE, RTMGRP_LINK);
- if (nlsk_fd < 0) {
- DRV_LOG(ERR, "Failed to create a socket for Netlink events: %s",
- rte_strerror(rte_errno));
- return -1;
- }
- flags = fcntl(nlsk_fd, F_GETFL);
- ret = fcntl(nlsk_fd, F_SETFL, flags | O_NONBLOCK);
- if (ret != 0) {
- DRV_LOG(ERR, "Failed to make Netlink event socket non-blocking: %s",
- strerror(errno));
- rte_errno = errno;
- goto error;
- }
- rte_intr_type_set(sh->intr_handle_nl, RTE_INTR_HANDLE_EXT);
- rte_intr_fd_set(sh->intr_handle_nl, nlsk_fd);
- if (rte_intr_callback_register(sh->intr_handle_nl,
- mlx5_dev_interrupt_handler_nl,
- sh) != 0) {
- DRV_LOG(ERR, "Failed to register Netlink events interrupt");
- rte_intr_fd_set(sh->intr_handle_nl, -1);
- goto error;
- }
- return 0;
-error:
- close(nlsk_fd);
- return -1;
-}
-
/**
* Install shared asynchronous device events handler.
* This function is implemented to support event sharing
@@ -2539,76 +2505,47 @@ mlx5_os_dev_shared_handler_install_lsc(struct mlx5_dev_ctx_shared *sh)
void
mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
{
- int ret;
- int flags;
struct ibv_context *ctx = sh->cdev->ctx;
+ int nlsk_fd;
- sh->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
- if (sh->intr_handle == NULL) {
- DRV_LOG(ERR, "Fail to allocate intr_handle");
- rte_errno = ENOMEM;
+ sh->intr_handle = mlx5_os_interrupt_handler_create
+ (RTE_INTR_INSTANCE_F_SHARED, true,
+ ctx->async_fd, mlx5_dev_interrupt_handler, sh);
+ if (!sh->intr_handle) {
+ DRV_LOG(ERR, "Failed to allocate intr_handle.");
return;
}
- rte_intr_fd_set(sh->intr_handle, -1);
-
- flags = fcntl(ctx->async_fd, F_GETFL);
- ret = fcntl(ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
- if (ret) {
- DRV_LOG(INFO, "failed to change file descriptor async event"
- " queue");
- } else {
- rte_intr_fd_set(sh->intr_handle, ctx->async_fd);
- rte_intr_type_set(sh->intr_handle, RTE_INTR_HANDLE_EXT);
- if (rte_intr_callback_register(sh->intr_handle,
- mlx5_dev_interrupt_handler, sh)) {
- DRV_LOG(INFO, "Fail to install the shared interrupt.");
- rte_intr_fd_set(sh->intr_handle, -1);
- }
+ nlsk_fd = mlx5_nl_init(NETLINK_ROUTE, RTMGRP_LINK);
+ if (nlsk_fd < 0) {
+ DRV_LOG(ERR, "Failed to create a socket for Netlink events: %s",
+ rte_strerror(rte_errno));
+ return;
}
- sh->intr_handle_nl = rte_intr_instance_alloc
- (RTE_INTR_INSTANCE_F_SHARED);
+ sh->intr_handle_nl = mlx5_os_interrupt_handler_create
+ (RTE_INTR_INSTANCE_F_SHARED, true,
+ nlsk_fd, mlx5_dev_interrupt_handler_nl, sh);
if (sh->intr_handle_nl == NULL) {
DRV_LOG(ERR, "Fail to allocate intr_handle");
- rte_errno = ENOMEM;
return;
}
- rte_intr_fd_set(sh->intr_handle_nl, -1);
- if (mlx5_os_dev_shared_handler_install_lsc(sh) < 0) {
- DRV_LOG(INFO, "Fail to install the shared Netlink event handler.");
- rte_intr_fd_set(sh->intr_handle_nl, -1);
- }
if (sh->cdev->config.devx) {
#ifdef HAVE_IBV_DEVX_ASYNC
- sh->intr_handle_devx =
- rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
- if (!sh->intr_handle_devx) {
- DRV_LOG(ERR, "Fail to allocate intr_handle");
- rte_errno = ENOMEM;
- return;
- }
- rte_intr_fd_set(sh->intr_handle_devx, -1);
+ struct mlx5dv_devx_cmd_comp *devx_comp;
+
sh->devx_comp = (void *)mlx5_glue->devx_create_cmd_comp(ctx);
- struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp;
+ devx_comp = sh->devx_comp;
if (!devx_comp) {
DRV_LOG(INFO, "failed to allocate devx_comp.");
return;
}
- flags = fcntl(devx_comp->fd, F_GETFL);
- ret = fcntl(devx_comp->fd, F_SETFL, flags | O_NONBLOCK);
- if (ret) {
- DRV_LOG(INFO, "failed to change file descriptor"
- " devx comp");
+ sh->intr_handle_devx = mlx5_os_interrupt_handler_create
+ (RTE_INTR_INSTANCE_F_SHARED, true,
+ devx_comp->fd,
+ mlx5_dev_interrupt_handler_devx, sh);
+ if (!sh->intr_handle_devx) {
+ DRV_LOG(ERR, "Failed to allocate intr_handle.");
return;
}
- rte_intr_fd_set(sh->intr_handle_devx, devx_comp->fd);
- rte_intr_type_set(sh->intr_handle_devx,
- RTE_INTR_HANDLE_EXT);
- if (rte_intr_callback_register(sh->intr_handle_devx,
- mlx5_dev_interrupt_handler_devx, sh)) {
- DRV_LOG(INFO, "Fail to install the devx shared"
- " interrupt.");
- rte_intr_fd_set(sh->intr_handle_devx, -1);
- }
#endif /* HAVE_IBV_DEVX_ASYNC */
}
}
@@ -2624,24 +2561,13 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
void
mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
{
- int nlsk_fd;
-
- if (rte_intr_fd_get(sh->intr_handle) >= 0)
- mlx5_intr_callback_unregister(sh->intr_handle,
- mlx5_dev_interrupt_handler, sh);
- rte_intr_instance_free(sh->intr_handle);
- nlsk_fd = rte_intr_fd_get(sh->intr_handle_nl);
- if (nlsk_fd >= 0) {
- mlx5_intr_callback_unregister
- (sh->intr_handle_nl, mlx5_dev_interrupt_handler_nl, sh);
- close(nlsk_fd);
- }
- rte_intr_instance_free(sh->intr_handle_nl);
+ mlx5_os_interrupt_handler_destroy(sh->intr_handle,
+ mlx5_dev_interrupt_handler, sh);
+ mlx5_os_interrupt_handler_destroy(sh->intr_handle_nl,
+ mlx5_dev_interrupt_handler_nl, sh);
#ifdef HAVE_IBV_DEVX_ASYNC
- if (rte_intr_fd_get(sh->intr_handle_devx) >= 0)
- rte_intr_callback_unregister(sh->intr_handle_devx,
- mlx5_dev_interrupt_handler_devx, sh);
- rte_intr_instance_free(sh->intr_handle_devx);
+ mlx5_os_interrupt_handler_destroy(sh->intr_handle_devx,
+ mlx5_dev_interrupt_handler_devx, sh);
if (sh->devx_comp)
mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp);
#endif
diff --git a/drivers/net/mlx5/linux/mlx5_socket.c b/drivers/net/mlx5/linux/mlx5_socket.c
index 4882e5fa2f..0e01aff0e7 100644
--- a/drivers/net/mlx5/linux/mlx5_socket.c
+++ b/drivers/net/mlx5/linux/mlx5_socket.c
@@ -133,51 +133,6 @@ mlx5_pmd_socket_handle(void *cb __rte_unused)
fclose(file);
}
-/**
- * Install interrupt handler.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @return
- * 0 on success, a negative errno value otherwise.
- */
-static int
-mlx5_pmd_interrupt_handler_install(void)
-{
- MLX5_ASSERT(server_socket != -1);
-
- server_intr_handle =
- rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
- if (server_intr_handle == NULL) {
- DRV_LOG(ERR, "Fail to allocate intr_handle");
- return -ENOMEM;
- }
- if (rte_intr_fd_set(server_intr_handle, server_socket))
- return -rte_errno;
-
- if (rte_intr_type_set(server_intr_handle, RTE_INTR_HANDLE_EXT))
- return -rte_errno;
-
- return rte_intr_callback_register(server_intr_handle,
- mlx5_pmd_socket_handle, NULL);
-}
-
-/**
- * Uninstall interrupt handler.
- */
-static void
-mlx5_pmd_interrupt_handler_uninstall(void)
-{
- if (server_socket != -1) {
- mlx5_intr_callback_unregister(server_intr_handle,
- mlx5_pmd_socket_handle,
- NULL);
- }
- rte_intr_fd_set(server_intr_handle, 0);
- rte_intr_type_set(server_intr_handle, RTE_INTR_HANDLE_UNKNOWN);
- rte_intr_instance_free(server_intr_handle);
-}
-
/**
* Initialise the socket to communicate with external tools.
*
@@ -224,7 +179,10 @@ mlx5_pmd_socket_init(void)
strerror(errno));
goto remove;
}
- if (mlx5_pmd_interrupt_handler_install()) {
+ server_intr_handle = mlx5_os_interrupt_handler_create
+ (RTE_INTR_INSTANCE_F_PRIVATE, false,
+ server_socket, mlx5_pmd_socket_handle, NULL);
+ if (server_intr_handle == NULL) {
DRV_LOG(WARNING, "cannot register interrupt handler for mlx5 socket: %s",
strerror(errno));
goto remove;
@@ -248,7 +206,8 @@ mlx5_pmd_socket_uninit(void)
{
if (server_socket == -1)
return;
- mlx5_pmd_interrupt_handler_uninstall();
+ mlx5_os_interrupt_handler_destroy(server_intr_handle,
+ mlx5_pmd_socket_handle, NULL);
claim_zero(close(server_socket));
server_socket = -1;
MKSTR(path, MLX5_SOCKET_PATH, getpid());
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 305edffe71..7ebb2cc961 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1682,8 +1682,6 @@ int mlx5_sysfs_switch_info(unsigned int ifindex,
struct mlx5_switch_info *info);
void mlx5_translate_port_name(const char *port_name_in,
struct mlx5_switch_info *port_info_out);
-void mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
- rte_intr_callback_fn cb_fn, void *cb_arg);
int mlx5_sysfs_bond_info(unsigned int pf_ifindex, unsigned int *ifindex,
char *ifname);
int mlx5_get_module_info(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index fe74317fe8..f853a67f58 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -741,11 +741,8 @@ mlx5_txpp_interrupt_handler(void *cb_arg)
static void
mlx5_txpp_stop_service(struct mlx5_dev_ctx_shared *sh)
{
- if (!rte_intr_fd_get(sh->txpp.intr_handle))
- return;
- mlx5_intr_callback_unregister(sh->txpp.intr_handle,
- mlx5_txpp_interrupt_handler, sh);
- rte_intr_instance_free(sh->txpp.intr_handle);
+ mlx5_os_interrupt_handler_destroy(sh->txpp.intr_handle,
+ mlx5_txpp_interrupt_handler, sh);
}
/* Attach interrupt handler and fires first request to Rearm Queue. */
@@ -769,23 +766,12 @@ mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
rte_errno = errno;
return -rte_errno;
}
- sh->txpp.intr_handle =
- rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
- if (sh->txpp.intr_handle == NULL) {
- DRV_LOG(ERR, "Fail to allocate intr_handle");
- return -ENOMEM;
- }
fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
- if (rte_intr_fd_set(sh->txpp.intr_handle, fd))
- return -rte_errno;
-
- if (rte_intr_type_set(sh->txpp.intr_handle, RTE_INTR_HANDLE_EXT))
- return -rte_errno;
-
- if (rte_intr_callback_register(sh->txpp.intr_handle,
- mlx5_txpp_interrupt_handler, sh)) {
- rte_intr_fd_set(sh->txpp.intr_handle, 0);
- DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
+ sh->txpp.intr_handle = mlx5_os_interrupt_handler_create
+ (RTE_INTR_INSTANCE_F_SHARED, false,
+ fd, mlx5_txpp_interrupt_handler, sh);
+ if (!sh->txpp.intr_handle) {
+ DRV_LOG(ERR, "Fail to allocate intr_handle");
return -rte_errno;
}
/* Subscribe CQ event to the event channel controlled by the driver. */
diff --git a/drivers/net/mlx5/windows/mlx5_ethdev_os.c b/drivers/net/mlx5/windows/mlx5_ethdev_os.c
index f97526580d..88d8213f55 100644
--- a/drivers/net/mlx5/windows/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/windows/mlx5_ethdev_os.c
@@ -140,28 +140,6 @@ mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
return 0;
}
-/*
- * Unregister callback handler safely. The handler may be active
- * while we are trying to unregister it, in this case code -EAGAIN
- * is returned by rte_intr_callback_unregister(). This routine checks
- * the return code and tries to unregister handler again.
- *
- * @param handle
- * interrupt handle
- * @param cb_fn
- * pointer to callback routine
- * @cb_arg
- * opaque callback parameter
- */
-void
-mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
- rte_intr_callback_fn cb_fn, void *cb_arg)
-{
- RTE_SET_USED(handle);
- RTE_SET_USED(cb_fn);
- RTE_SET_USED(cb_arg);
-}
-
/**
* DPDK callback to get flow control status.
*
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index e025be47d2..fd447cc650 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -93,22 +93,10 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
static int
mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
{
- int ret = -EAGAIN;
-
- if (rte_intr_fd_get(virtq->intr_handle) >= 0) {
- while (ret == -EAGAIN) {
- ret = rte_intr_callback_unregister(virtq->intr_handle,
- mlx5_vdpa_virtq_kick_handler, virtq);
- if (ret == -EAGAIN) {
- DRV_LOG(DEBUG, "Try again to unregister fd %d of virtq %hu interrupt",
- rte_intr_fd_get(virtq->intr_handle),
- virtq->index);
- usleep(MLX5_VDPA_INTR_RETRIES_USEC);
- }
- }
- rte_intr_fd_set(virtq->intr_handle, -1);
- }
- rte_intr_instance_free(virtq->intr_handle);
+ int ret;
+
+ mlx5_os_interrupt_handler_destroy(virtq->intr_handle,
+ mlx5_vdpa_virtq_kick_handler, virtq);
if (virtq->virtq) {
ret = mlx5_vdpa_virtq_stop(virtq->priv, virtq->index);
if (ret)
@@ -365,35 +353,13 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
virtq->priv = priv;
rte_write32(virtq->index, priv->virtq_db_addr);
/* Setup doorbell mapping. */
- virtq->intr_handle =
- rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
+ virtq->intr_handle = mlx5_os_interrupt_handler_create(
+ RTE_INTR_INSTANCE_F_SHARED, false,
+ vq.kickfd, mlx5_vdpa_virtq_kick_handler, virtq);
if (virtq->intr_handle == NULL) {
DRV_LOG(ERR, "Fail to allocate intr_handle");
goto error;
}
-
- if (rte_intr_fd_set(virtq->intr_handle, vq.kickfd))
- goto error;
-
- if (rte_intr_fd_get(virtq->intr_handle) == -1) {
- DRV_LOG(WARNING, "Virtq %d kickfd is invalid.", index);
- } else {
- if (rte_intr_type_set(virtq->intr_handle, RTE_INTR_HANDLE_EXT))
- goto error;
-
- if (rte_intr_callback_register(virtq->intr_handle,
- mlx5_vdpa_virtq_kick_handler,
- virtq)) {
- rte_intr_fd_set(virtq->intr_handle, -1);
- DRV_LOG(ERR, "Failed to register virtq %d interrupt.",
- index);
- goto error;
- } else {
- DRV_LOG(DEBUG, "Register fd %d interrupt for virtq %d.",
- rte_intr_fd_get(virtq->intr_handle),
- index);
- }
- }
/* Subscribe virtq error event. */
virtq->version++;
cookie = ((uint64_t)virtq->version << 32) + index;
--
2.27.0
next prev parent reply other threads:[~2022-05-24 15:21 UTC|newest]
Thread overview: 131+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-01 3:22 [RFC 0/6] net/mlx5: introduce limit watermark and host shaper Spike Du
2022-04-01 3:22 ` [RFC 1/6] net/mlx5: add LWM support for Rxq Spike Du
2022-05-06 3:56 ` [RFC v1 0/7] net/mlx5: introduce limit watermark and host shaper Spike Du
2022-05-06 3:56 ` [RFC v1 1/7] net/mlx5: add LWM support for Rxq Spike Du
2022-05-06 3:56 ` [RFC v1 2/7] common/mlx5: share interrupt management Spike Du
2022-05-06 3:56 ` [RFC v1 3/7] ethdev: introduce Rx queue based limit watermark Spike Du
2022-05-19 9:37 ` Andrew Rybchenko
2022-05-06 3:56 ` [RFC v1 4/7] net/mlx5: add LWM event handling support Spike Du
2022-05-06 3:56 ` [RFC v1 5/7] net/mlx5: support Rx queue based limit watermark Spike Du
2022-05-06 3:56 ` [RFC v1 6/7] net/mlx5: add private API to config host port shaper Spike Du
2022-05-06 3:56 ` [RFC v1 7/7] app/testpmd: add LWM and Host Shaper command Spike Du
2022-05-22 5:58 ` [RFC v2 0/7] introduce per-queue limit watermark and host shaper Spike Du
2022-05-22 5:58 ` [RFC v2 1/7] net/mlx5: add LWM support for Rxq Spike Du
2022-05-22 5:58 ` [RFC v2 2/7] common/mlx5: share interrupt management Spike Du
2022-05-22 5:58 ` [RFC v2 3/7] ethdev: introduce Rx queue based limit watermark Spike Du
2022-05-22 15:23 ` Stephen Hemminger
2022-05-23 3:01 ` Spike Du
2022-05-23 21:45 ` Thomas Monjalon
2022-05-24 2:50 ` Spike Du
2022-05-24 8:18 ` Thomas Monjalon
2022-05-25 12:59 ` Andrew Rybchenko
2022-05-25 13:58 ` Thomas Monjalon
2022-05-25 14:23 ` Andrew Rybchenko
2022-05-23 22:54 ` Stephen Hemminger
2022-05-24 3:46 ` Spike Du
2022-05-22 15:24 ` Stephen Hemminger
2022-05-23 2:18 ` Spike Du
2022-05-23 6:07 ` Morten Brørup
2022-05-23 10:58 ` Thomas Monjalon
2022-05-23 14:10 ` Spike Du
2022-05-23 14:39 ` Thomas Monjalon
2022-05-24 6:35 ` Andrew Rybchenko
2022-05-24 9:40 ` Morten Brørup
2022-05-22 5:58 ` [RFC v2 4/7] net/mlx5: add LWM event handling support Spike Du
2022-05-22 5:58 ` [RFC v2 5/7] net/mlx5: support Rx queue based limit watermark Spike Du
2022-05-22 5:58 ` [RFC v2 6/7] net/mlx5: add private API to config host port shaper Spike Du
2022-05-22 5:59 ` [RFC v2 7/7] app/testpmd: add LWM and Host Shaper command Spike Du
2022-05-24 15:20 ` [PATCH v3 0/7] introduce per-queue limit watermark and host shaper Spike Du
2022-05-24 15:20 ` [PATCH v3 1/7] net/mlx5: add LWM support for Rxq Spike Du
2022-05-24 15:20 ` Spike Du [this message]
2022-05-24 15:20 ` [PATCH v3 3/7] ethdev: introduce Rx queue based limit watermark Spike Du
2022-05-24 15:20 ` [PATCH v3 4/7] net/mlx5: add LWM event handling support Spike Du
2022-05-24 15:20 ` [PATCH v3 5/7] net/mlx5: support Rx queue based limit watermark Spike Du
2022-05-24 15:20 ` [PATCH v3 6/7] net/mlx5: add private API to config host port shaper Spike Du
2022-05-24 15:20 ` [PATCH v3 7/7] app/testpmd: add LWM and Host Shaper command Spike Du
2022-05-24 15:59 ` [PATCH v3 0/7] introduce per-queue limit watermark and host shaper Thomas Monjalon
2022-05-24 19:00 ` Morten Brørup
2022-05-24 19:22 ` Thomas Monjalon
2022-05-25 14:11 ` Andrew Rybchenko
2022-05-25 13:14 ` Spike Du
2022-05-25 13:40 ` Morten Brørup
2022-05-25 13:59 ` Spike Du
2022-05-25 14:16 ` Morten Brørup
2022-05-25 14:30 ` Andrew Rybchenko
2022-06-03 12:48 ` [PATCH v4 0/7] introduce per-queue fill threshold " Spike Du
2022-06-03 12:48 ` [PATCH v4 1/7] net/mlx5: add LWM support for Rxq Spike Du
2022-06-03 12:48 ` [PATCH v4 2/7] common/mlx5: share interrupt management Spike Du
2022-06-03 14:30 ` Ray Kinsella
2022-06-03 12:48 ` [PATCH v4 3/7] ethdev: introduce Rx queue based fill threshold Spike Du
2022-06-03 14:30 ` Ray Kinsella
2022-06-04 12:46 ` Andrew Rybchenko
2022-06-06 13:16 ` Spike Du
2022-06-06 17:15 ` Andrew Rybchenko
2022-06-06 21:30 ` Thomas Monjalon
2022-06-07 8:02 ` Andrew Rybchenko
2022-06-07 6:00 ` Spike Du
2022-06-06 15:49 ` Stephen Hemminger
2022-06-03 12:48 ` [PATCH v4 4/7] net/mlx5: add LWM event handling support Spike Du
2022-06-03 12:48 ` [PATCH v4 5/7] net/mlx5: support Rx queue based fill threshold Spike Du
2022-06-03 12:48 ` [PATCH v4 6/7] net/mlx5: add private API to config host port shaper Spike Du
2022-06-03 14:55 ` Ray Kinsella
2022-06-03 12:48 ` [PATCH v4 7/7] app/testpmd: add Host Shaper command Spike Du
2022-06-07 12:59 ` [PATCH v5 0/7] introduce per-queue available descriptor threshold and host shaper Spike Du
2022-06-07 12:59 ` [PATCH v5 1/7] net/mlx5: add LWM support for Rxq Spike Du
2022-06-08 20:10 ` Matan Azrad
2022-06-07 12:59 ` [PATCH v5 2/7] common/mlx5: share interrupt management Spike Du
2022-06-07 12:59 ` [PATCH v5 3/7] ethdev: introduce Rx queue based available descriptor threshold Spike Du
2022-06-07 12:59 ` [PATCH v5 4/7] net/mlx5: add LWM event handling support Spike Du
2022-06-07 12:59 ` [PATCH v5 5/7] net/mlx5: support Rx queue based available descriptor threshold Spike Du
2022-06-07 12:59 ` [PATCH v5 6/7] net/mlx5: add private API to config host port shaper Spike Du
2022-06-07 12:59 ` [PATCH v5 7/7] app/testpmd: add Host Shaper command Spike Du
2022-06-09 7:55 ` Andrew Rybchenko
2022-06-10 2:22 ` Spike Du
2022-06-13 2:50 ` [PATCH v6] " Spike Du
2022-06-13 2:50 ` Spike Du
2022-06-14 9:43 ` Singh, Aman Deep
2022-06-14 9:54 ` Spike Du
2022-06-14 12:01 ` [PATCH v7] " Spike Du
2022-06-14 12:01 ` Spike Du
2022-06-15 7:51 ` Matan Azrad
2022-06-15 11:08 ` Thomas Monjalon
2022-06-15 12:58 ` [PATCH v8 0/6] introduce per-queue available descriptor threshold and host shaper Spike Du
2022-06-15 12:58 ` [PATCH v8 1/6] net/mlx5: add LWM support for Rxq Spike Du
2022-06-15 14:43 ` [PATCH v9 0/6] introduce per-queue available descriptor threshold and host shaper Spike Du
2022-06-15 14:43 ` [PATCH v9 1/6] net/mlx5: add LWM support for Rxq Spike Du
2022-06-16 8:41 ` [PATCH v10 0/6] introduce per-queue available descriptor threshold and host shaper Spike Du
2022-06-16 8:41 ` [PATCH v10 1/6] net/mlx5: add LWM support for Rxq Spike Du
2022-06-16 8:41 ` [PATCH v10 2/6] common/mlx5: share interrupt management Spike Du
2022-06-23 16:05 ` Ray Kinsella
2022-06-16 8:41 ` [PATCH v10 3/6] net/mlx5: add LWM event handling support Spike Du
2022-06-16 8:41 ` [PATCH v10 4/6] net/mlx5: support Rx queue based available descriptor threshold Spike Du
2022-06-16 8:41 ` [PATCH v10 5/6] net/mlx5: add private API to config host port shaper Spike Du
2022-06-16 8:41 ` [PATCH v10 6/6] app/testpmd: add Host Shaper command Spike Du
2022-06-19 8:14 ` [PATCH v10 0/6] introduce per-queue available descriptor threshold and host shaper Raslan Darawsheh
2022-06-15 14:43 ` [PATCH v9 2/6] common/mlx5: share interrupt management Spike Du
2022-06-15 14:43 ` [PATCH v9 3/6] net/mlx5: add LWM event handling support Spike Du
2022-06-15 14:43 ` [PATCH v9 4/6] net/mlx5: support Rx queue based available descriptor threshold Spike Du
2022-06-15 14:43 ` [PATCH v9 5/6] net/mlx5: add private API to config host port shaper Spike Du
2022-06-15 14:43 ` [PATCH v9 6/6] app/testpmd: add Host Shaper command Spike Du
2022-06-15 12:58 ` [PATCH v8 2/6] common/mlx5: share interrupt management Spike Du
2022-06-15 12:58 ` [PATCH v8 3/6] net/mlx5: add LWM event handling support Spike Du
2022-06-15 12:58 ` [PATCH v8 4/6] net/mlx5: support Rx queue based available descriptor threshold Spike Du
2022-06-15 12:58 ` [PATCH v8 5/6] net/mlx5: add private API to config host port shaper Spike Du
2022-06-15 12:58 ` [PATCH v8 6/6] app/testpmd: add Host Shaper command Spike Du
2022-06-08 9:43 ` [PATCH v5 0/7] introduce per-queue available descriptor threshold and host shaper Andrew Rybchenko
2022-06-08 16:35 ` [PATCH v6] ethdev: introduce available Rx descriptors threshold Andrew Rybchenko
2022-06-08 17:22 ` Thomas Monjalon
2022-06-08 17:46 ` Thomas Monjalon
2022-06-09 0:17 ` fengchengwen
2022-06-09 7:05 ` Thomas Monjalon
2022-06-10 0:01 ` fengchengwen
2022-04-01 3:22 ` [RFC 2/6] common/mlx5: share interrupt management Spike Du
2022-04-01 3:22 ` [RFC 3/6] net/mlx5: add LWM event handling support Spike Du
2022-04-01 3:22 ` [RFC 4/6] net/mlx5: add private API to configure Rxq LWM Spike Du
2022-04-01 3:22 ` [RFC 5/6] net/mlx5: add private API to config host port shaper Spike Du
2022-04-01 3:22 ` [RFC 6/6] app/testpmd: add LWM and Host Shaper command Spike Du
2022-04-05 8:58 ` [RFC 0/6] net/mlx5: introduce limit watermark and host shaper Jerin Jacob
2022-04-26 2:42 ` Spike Du
2022-05-01 12:50 ` Jerin Jacob
2022-05-02 3:58 ` Spike Du
2022-04-29 5:48 ` Spike Du
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220524152041.737154-3-spiked@nvidia.com \
--to=spiked@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=thomas@monjalon.net \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).