From: Mingxia Liu <mingxia.liu@intel.com>
To: dev@dpdk.org
Cc: jingjing.wu@intel.com, beilei.xing@intel.com,
Mingxia Liu <mingxia.liu@intel.com>
Subject: [PATCH v7 5/6] net/idpf: add alarm to support handle vchnl message
Date: Wed, 8 Feb 2023 07:34:00 +0000 [thread overview]
Message-ID: <20230208073401.2468579-6-mingxia.liu@intel.com> (raw)
In-Reply-To: <20230208073401.2468579-1-mingxia.liu@intel.com>
Handle virtual channel message.
Refine link status update.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_device.h | 5 +
drivers/common/idpf/idpf_common_virtchnl.c | 33 ++--
drivers/common/idpf/idpf_common_virtchnl.h | 6 +
drivers/common/idpf/version.map | 2 +
drivers/net/idpf/idpf_ethdev.c | 169 ++++++++++++++++++++-
drivers/net/idpf/idpf_ethdev.h | 2 +
6 files changed, 195 insertions(+), 22 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 7abc4d2a3a..364a60221a 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -118,6 +118,11 @@ struct idpf_vport {
bool tx_use_avx512;
struct virtchnl2_vport_stats eth_stats_offset;
+
+ void *dev;
+ /* Event from ipf */
+ bool link_up;
+ uint32_t link_speed;
};
/* Message type read in virtual channel from PF */
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 10cfa33704..99d9efbb7c 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -202,25 +202,6 @@ idpf_vc_cmd_execute(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
switch (args->ops) {
case VIRTCHNL_OP_VERSION:
case VIRTCHNL2_OP_GET_CAPS:
- case VIRTCHNL2_OP_CREATE_VPORT:
- case VIRTCHNL2_OP_DESTROY_VPORT:
- case VIRTCHNL2_OP_SET_RSS_KEY:
- case VIRTCHNL2_OP_SET_RSS_LUT:
- case VIRTCHNL2_OP_SET_RSS_HASH:
- case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
- case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- case VIRTCHNL2_OP_ENABLE_VPORT:
- case VIRTCHNL2_OP_DISABLE_VPORT:
- case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
- case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
- case VIRTCHNL2_OP_ALLOC_VECTORS:
- case VIRTCHNL2_OP_DEALLOC_VECTORS:
- case VIRTCHNL2_OP_GET_STATS:
- case VIRTCHNL2_OP_GET_RSS_KEY:
- case VIRTCHNL2_OP_GET_RSS_HASH:
- case VIRTCHNL2_OP_GET_RSS_LUT:
/* for init virtchnl ops, need to poll the response */
err = idpf_vc_one_msg_read(adapter, args->ops, args->out_size, args->out_buffer);
clear_cmd(adapter);
@@ -1111,3 +1092,17 @@ idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
return err;
}
+
+int
+idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+ struct idpf_ctlq_msg *q_msg)
+{
+ return idpf_ctlq_recv(cq, num_q_msg, q_msg);
+}
+
+int
+idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ u16 *buff_count, struct idpf_dma_mem **buffs)
+{
+ return idpf_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
+}
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 205d1a932d..d479d93c8e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -58,4 +58,10 @@ __rte_internal
int idpf_vc_rss_lut_get(struct idpf_vport *vport);
__rte_internal
int idpf_vc_rss_hash_get(struct idpf_vport *vport);
+__rte_internal
+int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+ struct idpf_ctlq_msg *q_msg);
+__rte_internal
+int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ u16 *buff_count, struct idpf_dma_mem **buffs);
#endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index e31f6ff4d9..70334a1b03 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -38,6 +38,8 @@ INTERNAL {
idpf_vc_api_version_check;
idpf_vc_caps_get;
idpf_vc_cmd_execute;
+ idpf_vc_ctlq_post_rx_buffs;
+ idpf_vc_ctlq_recv;
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 11f0ca0085..751c0d8717 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -9,6 +9,7 @@
#include <rte_memzone.h>
#include <rte_dev.h>
#include <errno.h>
+#include <rte_alarm.h>
#include "idpf_ethdev.h"
#include "idpf_rxtx.h"
@@ -83,14 +84,51 @@ static int
idpf_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
+ struct idpf_vport *vport = dev->data->dev_private;
struct rte_eth_link new_link;
memset(&new_link, 0, sizeof(new_link));
- new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ switch (vport->link_speed) {
+ case RTE_ETH_SPEED_NUM_10M:
+ new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
+ break;
+ case RTE_ETH_SPEED_NUM_100M:
+ new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
+ break;
+ case RTE_ETH_SPEED_NUM_1G:
+ new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
+ break;
+ case RTE_ETH_SPEED_NUM_10G:
+ new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
+ break;
+ case RTE_ETH_SPEED_NUM_20G:
+ new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
+ break;
+ case RTE_ETH_SPEED_NUM_25G:
+ new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
+ break;
+ case RTE_ETH_SPEED_NUM_40G:
+ new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
+ break;
+ case RTE_ETH_SPEED_NUM_50G:
+ new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
+ break;
+ case RTE_ETH_SPEED_NUM_100G:
+ new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
+ break;
+ case RTE_ETH_SPEED_NUM_200G:
+ new_link.link_speed = RTE_ETH_SPEED_NUM_200G;
+ break;
+ default:
+ new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ }
+
new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
- new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- RTE_ETH_LINK_SPEED_FIXED);
+ new_link.link_status = vport->link_up ? RTE_ETH_LINK_UP :
+ RTE_ETH_LINK_DOWN;
+ new_link.link_autoneg = (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED) ?
+ RTE_ETH_LINK_FIXED : RTE_ETH_LINK_AUTONEG;
return rte_eth_linkstatus_set(dev, &new_link);
}
@@ -891,6 +929,127 @@ idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *adap
return ret;
}
+static struct idpf_vport *
+idpf_find_vport(struct idpf_adapter_ext *adapter, uint32_t vport_id)
+{
+ struct idpf_vport *vport = NULL;
+ int i;
+
+ for (i = 0; i < adapter->cur_vport_nb; i++) {
+ vport = adapter->vports[i];
+ if (vport->vport_id != vport_id)
+ continue;
+ else
+ return vport;
+ }
+
+ return vport;
+}
+
+static void
+idpf_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen)
+{
+ struct virtchnl2_event *vc_event = (struct virtchnl2_event *)msg;
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)vport->dev;
+
+ if (msglen < sizeof(struct virtchnl2_event)) {
+ PMD_DRV_LOG(ERR, "Error event");
+ return;
+ }
+
+ switch (vc_event->event) {
+ case VIRTCHNL2_EVENT_LINK_CHANGE:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL2_EVENT_LINK_CHANGE");
+ vport->link_up = !!(vc_event->link_status);
+ vport->link_speed = vc_event->link_speed;
+ idpf_dev_link_update(dev, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, " unknown event received %u", vc_event->event);
+ break;
+ }
+}
+
+static void
+idpf_handle_virtchnl_msg(struct idpf_adapter_ext *adapter_ex)
+{
+ struct idpf_adapter *adapter = &adapter_ex->base;
+ struct idpf_dma_mem *dma_mem = NULL;
+ struct idpf_hw *hw = &adapter->hw;
+ struct virtchnl2_event *vc_event;
+ struct idpf_ctlq_msg ctlq_msg;
+ enum idpf_mbx_opc mbx_op;
+ struct idpf_vport *vport;
+ enum virtchnl_ops vc_op;
+ uint16_t pending = 1;
+ int ret;
+
+ while (pending) {
+ ret = idpf_vc_ctlq_recv(hw->arq, &pending, &ctlq_msg);
+ if (ret) {
+ PMD_DRV_LOG(INFO, "Failed to read msg from virtual channel, ret: %d", ret);
+ return;
+ }
+
+ rte_memcpy(adapter->mbx_resp, ctlq_msg.ctx.indirect.payload->va,
+ IDPF_DFLT_MBX_BUF_SIZE);
+
+ mbx_op = rte_le_to_cpu_16(ctlq_msg.opcode);
+ vc_op = rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_opcode);
+ adapter->cmd_retval = rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_retval);
+
+ switch (mbx_op) {
+ case idpf_mbq_opc_send_msg_to_peer_pf:
+ if (vc_op == VIRTCHNL2_OP_EVENT) {
+ if (ctlq_msg.data_len < sizeof(struct virtchnl2_event)) {
+ PMD_DRV_LOG(ERR, "Error event");
+ return;
+ }
+ vc_event = (struct virtchnl2_event *)adapter->mbx_resp;
+ vport = idpf_find_vport(adapter_ex, vc_event->vport_id);
+ if (!vport) {
+ PMD_DRV_LOG(ERR, "Can't find vport.");
+ return;
+ }
+ idpf_handle_event_msg(vport, adapter->mbx_resp,
+ ctlq_msg.data_len);
+ } else {
+ if (vc_op == adapter->pend_cmd)
+ notify_cmd(adapter, adapter->cmd_retval);
+ else
+ PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u",
+ adapter->pend_cmd, vc_op);
+
+ PMD_DRV_LOG(DEBUG, " Virtual channel response is received,"
+ "opcode = %d", vc_op);
+ }
+ goto post_buf;
+ default:
+ PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", mbx_op);
+ }
+ }
+
+post_buf:
+ if (ctlq_msg.data_len)
+ dma_mem = ctlq_msg.ctx.indirect.payload;
+ else
+ pending = 0;
+
+ ret = idpf_vc_ctlq_post_rx_buffs(hw, hw->arq, &pending, &dma_mem);
+ if (ret && dma_mem)
+ idpf_free_dma_mem(hw, dma_mem);
+}
+
+static void
+idpf_dev_alarm_handler(void *param)
+{
+ struct idpf_adapter_ext *adapter = param;
+
+ idpf_handle_virtchnl_msg(adapter);
+
+ rte_eal_alarm_set(IDPF_ALARM_INTERVAL, idpf_dev_alarm_handler, adapter);
+}
+
static int
idpf_adapter_ext_init(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *adapter)
{
@@ -913,6 +1072,8 @@ idpf_adapter_ext_init(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *a
goto err_adapter_init;
}
+ rte_eal_alarm_set(IDPF_ALARM_INTERVAL, idpf_dev_alarm_handler, adapter);
+
adapter->max_vport_nb = adapter->base.caps.max_vports;
adapter->vports = rte_zmalloc("vports",
@@ -996,6 +1157,7 @@ idpf_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
vport->adapter = &adapter->base;
vport->sw_idx = param->idx;
vport->devarg_id = param->devarg_id;
+ vport->dev = dev;
memset(&create_vport_info, 0, sizeof(create_vport_info));
ret = idpf_vport_info_init(vport, &create_vport_info);
@@ -1065,6 +1227,7 @@ idpf_find_adapter_ext(struct rte_pci_device *pci_dev)
static void
idpf_adapter_ext_deinit(struct idpf_adapter_ext *adapter)
{
+ rte_eal_alarm_cancel(idpf_dev_alarm_handler, adapter);
idpf_adapter_deinit(&adapter->base);
rte_free(adapter->vports);
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 839a2bd82c..3c2c932438 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -53,6 +53,8 @@
#define IDPF_ADAPTER_NAME_LEN (PCI_PRI_STR_SIZE + 1)
+#define IDPF_ALARM_INTERVAL 50000 /* us */
+
struct idpf_vport_param {
struct idpf_adapter_ext *adapter;
uint16_t devarg_id; /* arg id from user */
--
2.25.1
next prev parent reply other threads:[~2023-02-08 8:32 UTC|newest]
Thread overview: 63+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-16 9:36 [PATCH 0/7] add idpf pmd enhancement features Mingxia Liu
2022-12-16 9:37 ` [PATCH 1/7] common/idpf: add hw statistics Mingxia Liu
2022-12-16 9:37 ` [PATCH 2/7] common/idpf: add RSS set/get ops Mingxia Liu
2022-12-16 9:37 ` [PATCH 3/7] common/idpf: support single q scatter RX datapath Mingxia Liu
2022-12-16 9:37 ` [PATCH 4/7] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2022-12-16 9:37 ` [PATCH 5/7] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2022-12-16 9:37 ` [PATCH 6/7] common/idpf: add xstats ops Mingxia Liu
2022-12-16 9:37 ` [PATCH 7/7] common/idpf: update mbuf_alloc_failed multi-thread process Mingxia Liu
2023-01-11 7:15 ` [PATCH 0/6] add idpf pmd enhancement features Mingxia Liu
2023-01-11 7:15 ` [PATCH v2 1/6] common/idpf: add hw statistics Mingxia Liu
2023-01-11 7:15 ` [PATCH v2 2/6] common/idpf: add RSS set/get ops Mingxia Liu
2023-01-11 7:15 ` [PATCH v2 3/6] common/idpf: support single q scatter RX datapath Mingxia Liu
2023-01-11 7:15 ` [PATCH v2 4/6] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-01-11 7:15 ` [PATCH v2 5/6] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-01-11 7:15 ` [PATCH v2 6/6] common/idpf: add xstats ops Mingxia Liu
2023-01-18 7:14 ` [PATCH v3 0/6] add idpf pmd enhancement features Mingxia Liu
2023-01-18 7:14 ` [PATCH v3 1/6] common/idpf: add hw statistics Mingxia Liu
2023-02-01 8:48 ` Wu, Jingjing
2023-02-01 12:34 ` Liu, Mingxia
2023-01-18 7:14 ` [PATCH v3 2/6] common/idpf: add RSS set/get ops Mingxia Liu
2023-02-02 3:28 ` Wu, Jingjing
2023-02-07 3:10 ` Liu, Mingxia
2023-01-18 7:14 ` [PATCH v3 3/6] common/idpf: support single q scatter RX datapath Mingxia Liu
2023-02-02 3:45 ` Wu, Jingjing
2023-02-02 7:19 ` Liu, Mingxia
2023-01-18 7:14 ` [PATCH v3 4/6] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-01-18 7:14 ` [PATCH v3 5/6] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-02-02 4:23 ` Wu, Jingjing
2023-02-02 7:39 ` Liu, Mingxia
2023-02-02 8:46 ` Wu, Jingjing
2023-01-18 7:14 ` [PATCH v3 6/6] common/idpf: add xstats ops Mingxia Liu
2023-02-07 9:56 ` [PATCH v4 0/6] add idpf pmd enhancement features Mingxia Liu
2023-02-07 9:56 ` [PATCH v4 1/6] common/idpf: add hw statistics Mingxia Liu
2023-02-07 9:56 ` [PATCH v4 2/6] common/idpf: add RSS set/get ops Mingxia Liu
2023-02-07 9:56 ` [PATCH v4 3/6] common/idpf: support single q scatter RX datapath Mingxia Liu
2023-02-07 9:56 ` [PATCH v4 4/6] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-02-07 9:57 ` [PATCH v4 5/6] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-02-07 9:57 ` [PATCH v4 6/6] common/idpf: add xstats ops Mingxia Liu
2023-02-07 10:08 ` [PATCH v4 0/6] add idpf pmd enhancement features Mingxia Liu
2023-02-07 10:08 ` [PATCH v5 1/6] common/idpf: add hw statistics Mingxia Liu
2023-02-07 10:16 ` [PATCH v6 0/6] add idpf pmd enhancement features Mingxia Liu
2023-02-07 10:16 ` [PATCH v6 1/6] common/idpf: add hw statistics Mingxia Liu
2023-02-08 2:00 ` Zhang, Qi Z
2023-02-08 8:28 ` Liu, Mingxia
2023-02-07 10:16 ` [PATCH v6 2/6] common/idpf: add RSS set/get ops Mingxia Liu
2023-02-07 10:16 ` [PATCH v6 3/6] common/idpf: support single q scatter RX datapath Mingxia Liu
2023-02-07 10:16 ` [PATCH v6 4/6] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-02-07 10:16 ` [PATCH v6 5/6] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-02-07 10:16 ` [PATCH v6 6/6] common/idpf: add xstats ops Mingxia Liu
2023-02-08 0:28 ` [PATCH v6 0/6] add idpf pmd enhancement features Wu, Jingjing
2023-02-08 7:33 ` [PATCH v7 " Mingxia Liu
2023-02-08 7:33 ` [PATCH v7 1/6] net/idpf: add hw statistics Mingxia Liu
2023-02-08 7:33 ` [PATCH v7 2/6] net/idpf: add RSS set/get ops Mingxia Liu
2023-02-08 7:33 ` [PATCH v7 3/6] net/idpf: support single q scatter RX datapath Mingxia Liu
2023-02-08 7:33 ` [PATCH v7 4/6] net/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-02-08 7:34 ` Mingxia Liu [this message]
2023-02-08 7:34 ` [PATCH v7 6/6] net/idpf: add xstats ops Mingxia Liu
2023-02-08 9:32 ` [PATCH v7 0/6] add idpf pmd enhancement features Zhang, Qi Z
2023-02-07 10:08 ` [PATCH v5 2/6] common/idpf: add RSS set/get ops Mingxia Liu
2023-02-07 10:08 ` [PATCH v5 3/6] common/idpf: support single q scatter RX datapath Mingxia Liu
2023-02-07 10:08 ` [PATCH v5 4/6] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-02-07 10:08 ` [PATCH v5 5/6] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-02-07 10:08 ` [PATCH v5 6/6] common/idpf: add xstats ops Mingxia Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230208073401.2468579-6-mingxia.liu@intel.com \
--to=mingxia.liu@intel.com \
--cc=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).