* [PATCH v3] net/iavf: fix error of virtchnl command
@ 2022-10-08 8:11 Yiding Zhou
0 siblings, 0 replies; 4+ messages in thread
From: Yiding Zhou @ 2022-10-08 8:11 UTC (permalink / raw)
To: dev; +Cc: jingjing.wu, beilei.xing, Yiding Zhou, stable
When the device is bonded, bond pmd will register callback for LSC event.
This callback will execute some virtchnl commands in eal-intr-thread to
reinitialize the device with interrupts disabled. In this case, responses
to all commands not be received.
This commit starts a thread to handle all events to fix this issue.
Fixes: 48de41ca11f0 ("net/avf: enable link status update")
CC: stable@dpdk.org
Signed-off-by: Yiding Zhou <yidingx.zhou@intel.com>
---
v3: fix CI error
---
drivers/net/iavf/iavf.h | 2 +
drivers/net/iavf/iavf_ethdev.c | 5 ++
drivers/net/iavf/iavf_vchnl.c | 134 ++++++++++++++++++++++++++++++++-
3 files changed, 140 insertions(+), 1 deletion(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 26b858f6f0..1edebab8dc 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -424,6 +424,8 @@ _atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
}
int iavf_check_api_version(struct iavf_adapter *adapter);
int iavf_get_vf_resource(struct iavf_adapter *adapter);
+void iavf_dev_event_handler_fini(void);
+int iavf_dev_event_handler_init(void);
void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 782be82c7f..633d684804 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -2631,6 +2631,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
ð_dev->data->mac_addrs[0]);
+ if (iavf_dev_event_handler_init())
+ goto init_vf_err;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
/* register callback func to eal lib */
rte_intr_callback_register(pci_dev->intr_handle,
@@ -2785,6 +2788,8 @@ iavf_dev_uninit(struct rte_eth_dev *dev)
iavf_dev_close(dev);
+ iavf_dev_event_handler_fini();
+
return 0;
}
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 0fa2617cd2..6284a5b125 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -2,6 +2,7 @@
* Copyright(c) 2017 Intel Corporation
*/
+#include <unistd.h>
#include <stdio.h>
#include <errno.h>
#include <stdint.h>
@@ -11,6 +12,7 @@
#include <inttypes.h>
#include <rte_byteorder.h>
#include <rte_common.h>
+#include <rte_os_shim.h>
#include <rte_debug.h>
#include <rte_alarm.h>
@@ -27,6 +29,136 @@
#define MAX_TRY_TIMES 2000
#define ASQ_DELAY_MS 1
+#define MAX_EVENT_PENDING 16
+
+struct iavf_arq_event_element {
+ TAILQ_ENTRY(iavf_arq_event_element) next;
+ struct rte_eth_dev *dev;
+ enum rte_eth_event_type event;
+ void *param;
+};
+
+struct iavf_event_handler {
+ rte_atomic32_t ndev;
+ pthread_t tid;
+ int fd[2];
+ pthread_mutex_t lock;
+ TAILQ_HEAD(event_lsit, iavf_arq_event_element) pending;
+};
+
+static struct iavf_event_handler event_handler = {
+ .fd = {-1, -1},
+};
+
+#ifndef TAILQ_FOREACH_SAFE
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = TAILQ_FIRST((head)); \
+ (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+#endif
+
+static void *
+iavf_dev_event_handle(void *param __rte_unused)
+{
+ struct iavf_event_handler *handler = &event_handler;
+ TAILQ_HEAD(event_list, iavf_arq_event_element) pending;
+
+ while (true) {
+ char unused[MAX_EVENT_PENDING];
+ ssize_t nr = read(handler->fd[0], &unused, sizeof(unused));
+ if (nr <= 0)
+ break;
+
+ TAILQ_INIT(&pending);
+ pthread_mutex_lock(&handler->lock);
+ TAILQ_CONCAT(&pending, &handler->pending, next);
+ pthread_mutex_unlock(&handler->lock);
+
+ struct iavf_arq_event_element *pos, *save_next;
+ TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) {
+ TAILQ_REMOVE(&pending, pos, next);
+ rte_eth_dev_callback_process(pos->dev, pos->event, pos->param);
+ rte_free(pos);
+ }
+ }
+ return NULL;
+}
+
+static void
+iavf_dev_event_post(struct rte_eth_dev *dev,
+ enum rte_eth_event_type event,
+ void *param)
+{
+ struct iavf_event_handler *handler = &event_handler;
+ char notify_byte;
+ struct iavf_arq_event_element *elem = rte_malloc(NULL, sizeof(*elem), 0);
+ if (!elem)
+ return;
+
+ elem->dev = dev;
+ elem->event = event;
+ elem->param = param;
+
+ pthread_mutex_lock(&handler->lock);
+ TAILQ_INSERT_TAIL(&handler->pending, elem, next);
+ pthread_mutex_unlock(&handler->lock);
+
+ RTE_SET_USED(write(handler->fd[1], ¬ify_byte, 1));
+}
+
+int
+iavf_dev_event_handler_init(void)
+{
+ struct iavf_event_handler *handler = &event_handler;
+
+ if (rte_atomic32_add_return(&handler->ndev, 1) != 1)
+ return 0;
+#if defined(RTE_EXEC_ENV_IS_WINDOWS) && RTE_EXEC_ENV_IS_WINDOWS != 0
+ int err = _pipe(handler->fd, MAX_EVENT_PENDING, _O_BINARY);
+#else
+ int err = pipe(handler->fd);
+#endif
+ if (err != 0) {
+ rte_atomic32_dec(&handler->ndev);
+ return -1;
+ }
+
+ TAILQ_INIT(&handler->pending);
+ pthread_mutex_init(&handler->lock, NULL);
+
+ if (rte_ctrl_thread_create(&handler->tid, "iavf-event-thread",
+ NULL, iavf_dev_event_handle, NULL)) {
+ rte_atomic32_dec(&handler->ndev);
+ return -1;
+ }
+
+ return 0;
+}
+
+void
+iavf_dev_event_handler_fini(void)
+{
+ struct iavf_event_handler *handler = &event_handler;
+
+ if (!rte_atomic32_dec_and_test(&handler->ndev))
+ return;
+
+ RTE_SET_USED(pthread_cancel(handler->tid));
+ close(handler->fd[0]);
+ close(handler->fd[1]);
+ handler->fd[0] = -1;
+ handler->fd[1] = -1;
+
+ pthread_join(handler->tid, NULL);
+ pthread_mutex_destroy(&handler->lock);
+
+ struct iavf_arq_event_element *pos, *save_next;
+ TAILQ_FOREACH_SAFE(pos, &handler->pending, next, save_next) {
+ TAILQ_REMOVE(&handler->pending, pos, next);
+ rte_free(pos);
+ }
+}
+
static uint32_t
iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
{
@@ -293,7 +425,7 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
vf->link_speed = iavf_convert_link_speed(speed);
}
iavf_dev_link_update(dev, 0);
- rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
break;
case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
--
2.34.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH] net/iavf: fix error of virtchnl command
@ 2022-09-19 6:06 Yiding Zhou
2022-10-08 8:48 ` [PATCH v3] " Yiding Zhou
0 siblings, 1 reply; 4+ messages in thread
From: Yiding Zhou @ 2022-09-19 6:06 UTC (permalink / raw)
To: dev; +Cc: jingjing.wu, beilei.xing, Yiding Zhou, stable
When the device is bonded, bond pmd will register callback for LSC event.
This callback will execute some virtchnl commands in eal-intr-thread to
reinitialize the device with interrupts disabled. In this case, responses
to all commands not be received.
This commit starts a thread to handle all events to fix this issue.
Fixes: 48de41ca11f0 ("net/avf: enable link status update")
CC: stable@dpdk.org
Signed-off-by: Yiding Zhou <yidingx.zhou@intel.com>
---
drivers/net/iavf/iavf.h | 2 +
drivers/net/iavf/iavf_ethdev.c | 5 ++
drivers/net/iavf/iavf_vchnl.c | 130 ++++++++++++++++++++++++++++++++-
3 files changed, 136 insertions(+), 1 deletion(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 025ab3ff60..7c215e1797 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -417,6 +417,8 @@ _atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
}
int iavf_check_api_version(struct iavf_adapter *adapter);
int iavf_get_vf_resource(struct iavf_adapter *adapter);
+void iavf_dev_event_handler_fini(void);
+int iavf_dev_event_handler_init(void);
void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 506fcff6e3..a584b8918d 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -2629,6 +2629,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
ð_dev->data->mac_addrs[0]);
+ if (iavf_dev_event_handler_init())
+ goto init_vf_err;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
/* register callback func to eal lib */
rte_intr_callback_register(pci_dev->intr_handle,
@@ -2783,6 +2786,8 @@ iavf_dev_uninit(struct rte_eth_dev *dev)
iavf_dev_close(dev);
+ iavf_dev_event_handler_fini();
+
return 0;
}
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 21bd1e2193..5f3dd8779e 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -2,6 +2,8 @@
* Copyright(c) 2017 Intel Corporation
*/
+#include <unistd.h>
+#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <stdint.h>
@@ -27,6 +29,132 @@
#define MAX_TRY_TIMES 2000
#define ASQ_DELAY_MS 1
+struct iavf_arq_event_element {
+ TAILQ_ENTRY(iavf_arq_event_element) next;
+ struct rte_eth_dev *dev;
+ enum rte_eth_event_type event;
+ void *param;
+};
+
+struct iavf_event_handler {
+ rte_atomic32_t ndev;
+ pthread_t tid;
+ int fd[2];
+ pthread_mutex_t lock;
+ TAILQ_HEAD(event_lsit, iavf_arq_event_element) pending;
+};
+
+static struct iavf_event_handler event_handler = {
+ .fd = {-1, -1},
+};
+
+#ifndef TAILQ_FOREACH_SAFE
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = TAILQ_FIRST((head)); \
+ (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+#endif
+
+static void *
+iavf_dev_event_handle(void *param __rte_unused)
+{
+ struct iavf_event_handler *handler = &event_handler;
+ TAILQ_HEAD(event_list, iavf_arq_event_element) pending;
+
+ while (true) {
+ char unused;
+ int nr = read(handler->fd[0], &unused, 1);
+ if (nr <= 0)
+ break;
+
+ TAILQ_INIT(&pending);
+ pthread_mutex_lock(&handler->lock);
+ TAILQ_CONCAT(&pending, &handler->pending, next);
+ pthread_mutex_unlock(&handler->lock);
+
+ struct iavf_arq_event_element *pos, *save_next;
+ TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) {
+ TAILQ_REMOVE(&pending, pos, next);
+ rte_eth_dev_callback_process(pos->dev, pos->event, pos->param);
+ rte_free(pos);
+ }
+ }
+ return NULL;
+}
+
+static void
+iavf_dev_event_post(struct rte_eth_dev *dev,
+ enum rte_eth_event_type event,
+ void *param)
+{
+ struct iavf_event_handler *handler = &event_handler;
+ char notify_byte;
+ struct iavf_arq_event_element *elem = rte_malloc(NULL, sizeof(*elem), 0);
+ if (!elem)
+ return;
+
+ elem->dev = dev;
+ elem->event = event;
+ elem->param = param;
+
+ pthread_mutex_lock(&handler->lock);
+ TAILQ_INSERT_TAIL(&handler->pending, elem, next);
+ pthread_mutex_unlock(&handler->lock);
+
+ write(handler->fd[1], ¬ify_byte, 1);
+}
+
+int
+iavf_dev_event_handler_init(void)
+{
+ struct iavf_event_handler *handler = &event_handler;
+
+ if (rte_atomic32_add_return(&handler->ndev, 1) != 1)
+ return 0;
+
+ if (pipe(handler->fd)) {
+ rte_atomic32_dec(&handler->ndev);
+ return -1;
+ }
+
+ TAILQ_INIT(&handler->pending);
+ pthread_mutex_init(&handler->lock, NULL);
+
+ int flags = fcntl(handler->fd[1], F_GETFL);
+ fcntl(handler->fd[1], F_SETFL, flags | O_NONBLOCK);
+ if (rte_ctrl_thread_create(&handler->tid, "iavf-event-thread",
+ NULL, iavf_dev_event_handle, NULL)) {
+ rte_atomic32_dec(&handler->ndev);
+ return -1;
+ }
+
+ return 0;
+}
+
+void
+iavf_dev_event_handler_fini(void)
+{
+ struct iavf_event_handler *handler = &event_handler;
+
+ if (!rte_atomic32_dec_and_test(&handler->ndev))
+ return;
+
+ pthread_cancel(handler->tid);
+ close(handler->fd[0]);
+ close(handler->fd[1]);
+ handler->fd[0] = -1;
+ handler->fd[1] = -1;
+
+ pthread_join(handler->tid, NULL);
+ pthread_mutex_destroy(&handler->lock);
+
+ struct iavf_arq_event_element *pos, *save_next;
+ TAILQ_FOREACH_SAFE(pos, &handler->pending, next, save_next) {
+ TAILQ_REMOVE(&handler->pending, pos, next);
+ rte_free(pos);
+ }
+}
+
static uint32_t
iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
{
@@ -293,7 +421,7 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
vf->link_speed = iavf_convert_link_speed(speed);
}
iavf_dev_link_update(dev, 0);
- rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
break;
case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
--
2.34.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH v3] net/iavf: fix error of virtchnl command
2022-09-19 6:06 [PATCH] " Yiding Zhou
@ 2022-10-08 8:48 ` Yiding Zhou
2022-10-09 6:03 ` Zhang, Qi Z
0 siblings, 1 reply; 4+ messages in thread
From: Yiding Zhou @ 2022-10-08 8:48 UTC (permalink / raw)
To: dev; +Cc: jingjing.wu, beilei.xing, Yiding Zhou, stable
When the device is bonded, bond pmd will register callback for LSC event.
This callback will execute some virtchnl commands in eal-intr-thread to
reinitialize the device with interrupts disabled. In this case, responses
to all commands not be received.
This commit starts a thread to handle all events to fix this issue.
Fixes: 48de41ca11f0 ("net/avf: enable link status update")
CC: stable@dpdk.org
Signed-off-by: Yiding Zhou <yidingx.zhou@intel.com>
---
v3: fix CI errors
---
drivers/net/iavf/iavf.h | 2 +
drivers/net/iavf/iavf_ethdev.c | 5 ++
drivers/net/iavf/iavf_vchnl.c | 136 ++++++++++++++++++++++++++++++++-
3 files changed, 142 insertions(+), 1 deletion(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 26b858f6f0..1edebab8dc 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -424,6 +424,8 @@ _atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
}
int iavf_check_api_version(struct iavf_adapter *adapter);
int iavf_get_vf_resource(struct iavf_adapter *adapter);
+void iavf_dev_event_handler_fini(void);
+int iavf_dev_event_handler_init(void);
void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 782be82c7f..633d684804 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -2631,6 +2631,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
ð_dev->data->mac_addrs[0]);
+ if (iavf_dev_event_handler_init())
+ goto init_vf_err;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
/* register callback func to eal lib */
rte_intr_callback_register(pci_dev->intr_handle,
@@ -2785,6 +2788,8 @@ iavf_dev_uninit(struct rte_eth_dev *dev)
iavf_dev_close(dev);
+ iavf_dev_event_handler_fini();
+
return 0;
}
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 0fa2617cd2..35ceccaedd 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -2,6 +2,7 @@
* Copyright(c) 2017 Intel Corporation
*/
+#include <unistd.h>
#include <stdio.h>
#include <errno.h>
#include <stdint.h>
@@ -11,6 +12,7 @@
#include <inttypes.h>
#include <rte_byteorder.h>
#include <rte_common.h>
+#include <rte_os_shim.h>
#include <rte_debug.h>
#include <rte_alarm.h>
@@ -27,6 +29,138 @@
#define MAX_TRY_TIMES 2000
#define ASQ_DELAY_MS 1
+#define MAX_EVENT_PENDING 16
+
+struct iavf_arq_event_element {
+ TAILQ_ENTRY(iavf_arq_event_element) next;
+ struct rte_eth_dev *dev;
+ enum rte_eth_event_type event;
+ void *param;
+};
+
+struct iavf_event_handler {
+ rte_atomic32_t ndev;
+ pthread_t tid;
+ int fd[2];
+ pthread_mutex_t lock;
+ TAILQ_HEAD(event_lsit, iavf_arq_event_element) pending;
+};
+
+static struct iavf_event_handler event_handler = {
+ .fd = {-1, -1},
+};
+
+#ifndef TAILQ_FOREACH_SAFE
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = TAILQ_FIRST((head)); \
+ (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+#endif
+
+static void *
+iavf_dev_event_handle(void *param __rte_unused)
+{
+ struct iavf_event_handler *handler = &event_handler;
+ TAILQ_HEAD(event_list, iavf_arq_event_element) pending;
+
+ while (true) {
+ char unused[MAX_EVENT_PENDING];
+ ssize_t nr = read(handler->fd[0], &unused, sizeof(unused));
+ if (nr <= 0)
+ break;
+
+ TAILQ_INIT(&pending);
+ pthread_mutex_lock(&handler->lock);
+ TAILQ_CONCAT(&pending, &handler->pending, next);
+ pthread_mutex_unlock(&handler->lock);
+
+ struct iavf_arq_event_element *pos, *save_next;
+ TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) {
+ TAILQ_REMOVE(&pending, pos, next);
+ rte_eth_dev_callback_process(pos->dev, pos->event, pos->param);
+ rte_free(pos);
+ }
+ }
+ return NULL;
+}
+
+static void
+iavf_dev_event_post(struct rte_eth_dev *dev,
+ enum rte_eth_event_type event,
+ void *param)
+{
+ struct iavf_event_handler *handler = &event_handler;
+ char notify_byte;
+ struct iavf_arq_event_element *elem = rte_malloc(NULL, sizeof(*elem), 0);
+ if (!elem)
+ return;
+
+ elem->dev = dev;
+ elem->event = event;
+ elem->param = param;
+
+ pthread_mutex_lock(&handler->lock);
+ TAILQ_INSERT_TAIL(&handler->pending, elem, next);
+ pthread_mutex_unlock(&handler->lock);
+
+ ssize_t nw = write(handler->fd[1], ¬ify_byte, 1);
+ RTE_SET_USED(nw);
+}
+
+int
+iavf_dev_event_handler_init(void)
+{
+ struct iavf_event_handler *handler = &event_handler;
+
+ if (rte_atomic32_add_return(&handler->ndev, 1) != 1)
+ return 0;
+#if defined(RTE_EXEC_ENV_IS_WINDOWS) && RTE_EXEC_ENV_IS_WINDOWS != 0
+ int err = _pipe(handler->fd, MAX_EVENT_PENDING, _O_BINARY);
+#else
+ int err = pipe(handler->fd);
+#endif
+ if (err != 0) {
+ rte_atomic32_dec(&handler->ndev);
+ return -1;
+ }
+
+ TAILQ_INIT(&handler->pending);
+ pthread_mutex_init(&handler->lock, NULL);
+
+ if (rte_ctrl_thread_create(&handler->tid, "iavf-event-thread",
+ NULL, iavf_dev_event_handle, NULL)) {
+ rte_atomic32_dec(&handler->ndev);
+ return -1;
+ }
+
+ return 0;
+}
+
+void
+iavf_dev_event_handler_fini(void)
+{
+ struct iavf_event_handler *handler = &event_handler;
+
+ if (!rte_atomic32_dec_and_test(&handler->ndev))
+ return;
+
+ int unused = pthread_cancel(handler->tid);
+ RTE_SET_USED(unused);
+ close(handler->fd[0]);
+ close(handler->fd[1]);
+ handler->fd[0] = -1;
+ handler->fd[1] = -1;
+
+ pthread_join(handler->tid, NULL);
+ pthread_mutex_destroy(&handler->lock);
+
+ struct iavf_arq_event_element *pos, *save_next;
+ TAILQ_FOREACH_SAFE(pos, &handler->pending, next, save_next) {
+ TAILQ_REMOVE(&handler->pending, pos, next);
+ rte_free(pos);
+ }
+}
+
static uint32_t
iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
{
@@ -293,7 +427,7 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
vf->link_speed = iavf_convert_link_speed(speed);
}
iavf_dev_link_update(dev, 0);
- rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
break;
case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
--
2.34.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* RE: [PATCH v3] net/iavf: fix error of virtchnl command
2022-10-08 8:48 ` [PATCH v3] " Yiding Zhou
@ 2022-10-09 6:03 ` Zhang, Qi Z
2022-10-10 2:02 ` Zhou, YidingX
0 siblings, 1 reply; 4+ messages in thread
From: Zhang, Qi Z @ 2022-10-09 6:03 UTC (permalink / raw)
To: Zhou, YidingX, dev; +Cc: Wu, Jingjing, Xing, Beilei, Zhou, YidingX, stable
> -----Original Message-----
> From: Yiding Zhou <yidingx.zhou@intel.com>
> Sent: Saturday, October 8, 2022 4:49 PM
> To: dev@dpdk.org
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Zhou, YidingX <yidingx.zhou@intel.com>;
> stable@dpdk.org
> Subject: [PATCH v3] net/iavf: fix error of virtchnl command
...
> When the device is bonded, bond pmd will register callback for LSC event.
> This callback will execute some virtchnl commands in eal-intr-thread to
> reinitialize the device with interrupts disabled. In this case, responses to all
> commands not be received.
>
> This commit starts a thread to handle all events to fix this issue.
>
> Fixes: 48de41ca11f0 ("net/avf: enable link status update")
> CC: stable@dpdk.org
>
> Signed-off-by: Yiding Zhou <yidingx.zhou@intel.com>
> ---
> +
> static uint32_t
> iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed) { @@ -
> 293,7 +427,7 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev,
> uint8_t *msg,
> vf->link_speed = iavf_convert_link_speed(speed);
> }
> iavf_dev_link_update(dev, 0);
> - rte_eth_dev_callback_process(dev,
> RTE_ETH_EVENT_INTR_LSC, NULL);
> + iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
If we decide to raise an etherdev event from a separate thread, is it better to do this for all events ?
^ permalink raw reply [flat|nested] 4+ messages in thread
* RE: [PATCH v3] net/iavf: fix error of virtchnl command
2022-10-09 6:03 ` Zhang, Qi Z
@ 2022-10-10 2:02 ` Zhou, YidingX
0 siblings, 0 replies; 4+ messages in thread
From: Zhou, YidingX @ 2022-10-10 2:02 UTC (permalink / raw)
To: Zhang, Qi Z, dev; +Cc: Wu, Jingjing, Xing, Beilei, stable
> > This commit starts a thread to handle all events to fix this issue.
> >
> > Fixes: 48de41ca11f0 ("net/avf: enable link status update")
> > CC: stable@dpdk.org
> >
> > Signed-off-by: Yiding Zhou <yidingx.zhou@intel.com>
> > ---
> > +
> > static uint32_t
> > iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed) {
> > @@ -
> > 293,7 +427,7 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev,
> > uint8_t *msg,
> > vf->link_speed = iavf_convert_link_speed(speed);
> > }
> > iavf_dev_link_update(dev, 0);
> > - rte_eth_dev_callback_process(dev,
> > RTE_ETH_EVENT_INTR_LSC, NULL);
> > + iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
>
> If we decide to raise an etherdev event from a separate thread, is it better to
> do this for all events ?
Since there are no bug reports for other events, I have not handle them in event thread, I will follow your comments in v4.
Thanks~
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2022-10-10 2:02 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-10-08 8:11 [PATCH v3] net/iavf: fix error of virtchnl command Yiding Zhou
-- strict thread matches above, loose matches on Subject: below --
2022-09-19 6:06 [PATCH] " Yiding Zhou
2022-10-08 8:48 ` [PATCH v3] " Yiding Zhou
2022-10-09 6:03 ` Zhang, Qi Z
2022-10-10 2:02 ` Zhou, YidingX
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).