From: beilei.xing@intel.com
To: jingjing.wu@intel.com
Cc: dev@dpdk.org, qi.z.zhang@intel.com, Beilei Xing <beilei.xing@intel.com>
Subject: [PATCH v6 19/19] common/idpf: refine API name for virtual channel functions
Date: Fri, 3 Feb 2023 09:43:40 +0000 [thread overview]
Message-ID: <20230203094340.8103-20-beilei.xing@intel.com> (raw)
In-Reply-To: <20230203094340.8103-1-beilei.xing@intel.com>
From: Beilei Xing <beilei.xing@intel.com>
This patch refines API name for all virtual channel functions.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_device.c | 24 ++++----
drivers/common/idpf/idpf_common_virtchnl.c | 70 +++++++++++-----------
drivers/common/idpf/idpf_common_virtchnl.h | 36 +++++------
drivers/common/idpf/version.map | 38 ++++++------
drivers/net/idpf/idpf_ethdev.c | 10 ++--
drivers/net/idpf/idpf_rxtx.c | 12 ++--
6 files changed, 95 insertions(+), 95 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c
index f17b7736ae..6c5f10a8ce 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -104,7 +104,7 @@ idpf_get_pkt_type(struct idpf_adapter *adapter)
uint16_t ptype_recvd = 0;
int ret;
- ret = idpf_vc_query_ptype_info(adapter);
+ ret = idpf_vc_ptype_info_query(adapter);
if (ret != 0) {
DRV_LOG(ERR, "Fail to query packet type information");
return ret;
@@ -115,7 +115,7 @@ idpf_get_pkt_type(struct idpf_adapter *adapter)
return -ENOMEM;
while (ptype_recvd < IDPF_MAX_PKT_TYPE) {
- ret = idpf_vc_read_one_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
+ ret = idpf_vc_one_msg_read(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
IDPF_DFLT_MBX_BUF_SIZE, (uint8_t *)ptype_info);
if (ret != 0) {
DRV_LOG(ERR, "Fail to get packet type information");
@@ -333,13 +333,13 @@ idpf_adapter_init(struct idpf_adapter *adapter)
goto err_mbx_resp;
}
- ret = idpf_vc_check_api_version(adapter);
+ ret = idpf_vc_api_version_check(adapter);
if (ret != 0) {
DRV_LOG(ERR, "Failed to check api version");
goto err_check_api;
}
- ret = idpf_vc_get_caps(adapter);
+ ret = idpf_vc_caps_get(adapter);
if (ret != 0) {
DRV_LOG(ERR, "Failed to get capabilities");
goto err_check_api;
@@ -382,7 +382,7 @@ idpf_vport_init(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_info;
int i, type, ret;
- ret = idpf_vc_create_vport(vport, create_vport_info);
+ ret = idpf_vc_vport_create(vport, create_vport_info);
if (ret != 0) {
DRV_LOG(ERR, "Failed to create vport.");
goto err_create_vport;
@@ -483,7 +483,7 @@ idpf_vport_init(struct idpf_vport *vport,
rte_free(vport->rss_key);
vport->rss_key = NULL;
err_rss_key:
- idpf_vc_destroy_vport(vport);
+ idpf_vc_vport_destroy(vport);
err_create_vport:
return ret;
}
@@ -500,7 +500,7 @@ idpf_vport_deinit(struct idpf_vport *vport)
vport->dev_data = NULL;
- idpf_vc_destroy_vport(vport);
+ idpf_vc_vport_destroy(vport);
return 0;
}
@@ -509,19 +509,19 @@ idpf_vport_rss_config(struct idpf_vport *vport)
{
int ret;
- ret = idpf_vc_set_rss_key(vport);
+ ret = idpf_vc_rss_key_set(vport);
if (ret != 0) {
DRV_LOG(ERR, "Failed to configure RSS key");
return ret;
}
- ret = idpf_vc_set_rss_lut(vport);
+ ret = idpf_vc_rss_lut_set(vport);
if (ret != 0) {
DRV_LOG(ERR, "Failed to configure RSS lut");
return ret;
}
- ret = idpf_vc_set_rss_hash(vport);
+ ret = idpf_vc_rss_hash_set(vport);
if (ret != 0) {
DRV_LOG(ERR, "Failed to configure RSS hash");
return ret;
@@ -589,7 +589,7 @@ idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
}
vport->qv_map = qv_map;
- ret = idpf_vc_config_irq_map_unmap(vport, nb_rx_queues, true);
+ ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
if (ret != 0) {
DRV_LOG(ERR, "config interrupt mapping failed");
goto config_irq_map_err;
@@ -608,7 +608,7 @@ idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
int
idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
{
- idpf_vc_config_irq_map_unmap(vport, nb_rx_queues, false);
+ idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, false);
rte_free(vport->qv_map);
vport->qv_map = NULL;
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 299caa19f1..50e2ade89e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -159,7 +159,7 @@ idpf_read_msg_from_cp(struct idpf_adapter *adapter, uint16_t buf_len,
#define ASQ_DELAY_MS 10
int
-idpf_vc_read_one_msg(struct idpf_adapter *adapter, uint32_t ops, uint16_t buf_len,
+idpf_vc_one_msg_read(struct idpf_adapter *adapter, uint32_t ops, uint16_t buf_len,
uint8_t *buf)
{
int err = 0;
@@ -183,7 +183,7 @@ idpf_vc_read_one_msg(struct idpf_adapter *adapter, uint32_t ops, uint16_t buf_le
}
int
-idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
+idpf_vc_cmd_execute(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
{
int err = 0;
int i = 0;
@@ -218,7 +218,7 @@ idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
case VIRTCHNL2_OP_ALLOC_VECTORS:
case VIRTCHNL2_OP_DEALLOC_VECTORS:
/* for init virtchnl ops, need to poll the response */
- err = idpf_vc_read_one_msg(adapter, args->ops, args->out_size, args->out_buffer);
+ err = idpf_vc_one_msg_read(adapter, args->ops, args->out_size, args->out_buffer);
clear_cmd(adapter);
break;
case VIRTCHNL2_OP_GET_PTYPE_INFO:
@@ -251,7 +251,7 @@ idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
}
int
-idpf_vc_check_api_version(struct idpf_adapter *adapter)
+idpf_vc_api_version_check(struct idpf_adapter *adapter)
{
struct virtchnl2_version_info version, *pver;
struct idpf_cmd_info args;
@@ -267,7 +267,7 @@ idpf_vc_check_api_version(struct idpf_adapter *adapter)
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_vc_cmd_execute(adapter, &args);
if (err != 0) {
DRV_LOG(ERR,
"Failed to execute command of VIRTCHNL_OP_VERSION");
@@ -291,7 +291,7 @@ idpf_vc_check_api_version(struct idpf_adapter *adapter)
}
int
-idpf_vc_get_caps(struct idpf_adapter *adapter)
+idpf_vc_caps_get(struct idpf_adapter *adapter)
{
struct virtchnl2_get_capabilities caps_msg;
struct idpf_cmd_info args;
@@ -341,7 +341,7 @@ idpf_vc_get_caps(struct idpf_adapter *adapter)
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_vc_cmd_execute(adapter, &args);
if (err != 0) {
DRV_LOG(ERR,
"Failed to execute command of VIRTCHNL2_OP_GET_CAPS");
@@ -354,7 +354,7 @@ idpf_vc_get_caps(struct idpf_adapter *adapter)
}
int
-idpf_vc_create_vport(struct idpf_vport *vport,
+idpf_vc_vport_create(struct idpf_vport *vport,
struct virtchnl2_create_vport *create_vport_info)
{
struct idpf_adapter *adapter = vport->adapter;
@@ -378,7 +378,7 @@ idpf_vc_create_vport(struct idpf_vport *vport,
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_vc_cmd_execute(adapter, &args);
if (err != 0) {
DRV_LOG(ERR,
"Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
@@ -390,7 +390,7 @@ idpf_vc_create_vport(struct idpf_vport *vport,
}
int
-idpf_vc_destroy_vport(struct idpf_vport *vport)
+idpf_vc_vport_destroy(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_vport vc_vport;
@@ -406,7 +406,7 @@ idpf_vc_destroy_vport(struct idpf_vport *vport)
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_vc_cmd_execute(adapter, &args);
if (err != 0)
DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DESTROY_VPORT");
@@ -414,7 +414,7 @@ idpf_vc_destroy_vport(struct idpf_vport *vport)
}
int
-idpf_vc_set_rss_key(struct idpf_vport *vport)
+idpf_vc_rss_key_set(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_rss_key *rss_key;
@@ -439,7 +439,7 @@ idpf_vc_set_rss_key(struct idpf_vport *vport)
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_vc_cmd_execute(adapter, &args);
if (err != 0)
DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_KEY");
@@ -448,7 +448,7 @@ idpf_vc_set_rss_key(struct idpf_vport *vport)
}
int
-idpf_vc_set_rss_lut(struct idpf_vport *vport)
+idpf_vc_rss_lut_set(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_rss_lut *rss_lut;
@@ -473,7 +473,7 @@ idpf_vc_set_rss_lut(struct idpf_vport *vport)
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_vc_cmd_execute(adapter, &args);
if (err != 0)
DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_LUT");
@@ -482,7 +482,7 @@ idpf_vc_set_rss_lut(struct idpf_vport *vport)
}
int
-idpf_vc_set_rss_hash(struct idpf_vport *vport)
+idpf_vc_rss_hash_set(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_rss_hash rss_hash;
@@ -500,7 +500,7 @@ idpf_vc_set_rss_hash(struct idpf_vport *vport)
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_vc_cmd_execute(adapter, &args);
if (err != 0)
DRV_LOG(ERR, "Failed to execute command of OP_SET_RSS_HASH");
@@ -508,7 +508,7 @@ idpf_vc_set_rss_hash(struct idpf_vport *vport)
}
int
-idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, uint16_t nb_rxq, bool map)
+idpf_vc_irq_map_unmap_config(struct idpf_vport *vport, uint16_t nb_rxq, bool map)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_queue_vector_maps *map_info;
@@ -539,7 +539,7 @@ idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, uint16_t nb_rxq, bool map
args.in_args_size = len;
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_vc_cmd_execute(adapter, &args);
if (err != 0)
DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUE_VECTOR",
map ? "MAP" : "UNMAP");
@@ -549,7 +549,7 @@ idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, uint16_t nb_rxq, bool map
}
int
-idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors)
+idpf_vc_vectors_alloc(struct idpf_vport *vport, uint16_t num_vectors)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_alloc_vectors *alloc_vec;
@@ -569,7 +569,7 @@ idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors)
args.in_args_size = len;
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_vc_cmd_execute(adapter, &args);
if (err != 0)
DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_ALLOC_VECTORS");
@@ -579,7 +579,7 @@ idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors)
}
int
-idpf_vc_dealloc_vectors(struct idpf_vport *vport)
+idpf_vc_vectors_dealloc(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_alloc_vectors *alloc_vec;
@@ -598,7 +598,7 @@ idpf_vc_dealloc_vectors(struct idpf_vport *vport)
args.in_args_size = len;
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_vc_cmd_execute(adapter, &args);
if (err != 0)
DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_DEALLOC_VECTORS");
@@ -634,7 +634,7 @@ idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
args.in_args_size = len;
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_vc_cmd_execute(adapter, &args);
if (err != 0)
DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
on ? "ENABLE" : "DISABLE");
@@ -644,7 +644,7 @@ idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
}
int
-idpf_vc_switch_queue(struct idpf_vport *vport, uint16_t qid,
+idpf_vc_queue_switch(struct idpf_vport *vport, uint16_t qid,
bool rx, bool on)
{
uint32_t type;
@@ -688,7 +688,7 @@ idpf_vc_switch_queue(struct idpf_vport *vport, uint16_t qid,
#define IDPF_RXTX_QUEUE_CHUNKS_NUM 2
int
-idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable)
+idpf_vc_queues_ena_dis(struct idpf_vport *vport, bool enable)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_del_ena_dis_queues *queue_select;
@@ -746,7 +746,7 @@ idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable)
args.in_args_size = len;
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_vc_cmd_execute(adapter, &args);
if (err != 0)
DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
enable ? "ENABLE" : "DISABLE");
@@ -756,7 +756,7 @@ idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable)
}
int
-idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)
+idpf_vc_vport_ena_dis(struct idpf_vport *vport, bool enable)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_vport vc_vport;
@@ -771,7 +771,7 @@ idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_vc_cmd_execute(adapter, &args);
if (err != 0) {
DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_VPORT",
enable ? "ENABLE" : "DISABLE");
@@ -781,7 +781,7 @@ idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)
}
int
-idpf_vc_query_ptype_info(struct idpf_adapter *adapter)
+idpf_vc_ptype_info_query(struct idpf_adapter *adapter)
{
struct virtchnl2_get_ptype_info *ptype_info;
struct idpf_cmd_info args;
@@ -798,7 +798,7 @@ idpf_vc_query_ptype_info(struct idpf_adapter *adapter)
args.in_args = (uint8_t *)ptype_info;
args.in_args_size = len;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_vc_cmd_execute(adapter, &args);
if (err != 0)
DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_PTYPE_INFO");
@@ -808,7 +808,7 @@ idpf_vc_query_ptype_info(struct idpf_adapter *adapter)
#define IDPF_RX_BUF_STRIDE 64
int
-idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
+idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
@@ -887,7 +887,7 @@ idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_vc_cmd_execute(adapter, &args);
rte_free(vc_rxqs);
if (err != 0)
DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
@@ -896,7 +896,7 @@ idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
}
int
-idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq)
+idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_config_tx_queues *vc_txqs = NULL;
@@ -958,7 +958,7 @@ idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq)
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_vc_cmd_execute(adapter, &args);
rte_free(vc_txqs);
if (err != 0)
DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 07755d4923..dcd855c08c 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -9,44 +9,44 @@
#include <idpf_common_rxtx.h>
__rte_internal
-int idpf_vc_check_api_version(struct idpf_adapter *adapter);
+int idpf_vc_api_version_check(struct idpf_adapter *adapter);
__rte_internal
-int idpf_vc_get_caps(struct idpf_adapter *adapter);
+int idpf_vc_caps_get(struct idpf_adapter *adapter);
__rte_internal
-int idpf_vc_create_vport(struct idpf_vport *vport,
+int idpf_vc_vport_create(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_info);
__rte_internal
-int idpf_vc_destroy_vport(struct idpf_vport *vport);
+int idpf_vc_vport_destroy(struct idpf_vport *vport);
__rte_internal
-int idpf_vc_set_rss_key(struct idpf_vport *vport);
+int idpf_vc_rss_key_set(struct idpf_vport *vport);
__rte_internal
-int idpf_vc_set_rss_lut(struct idpf_vport *vport);
+int idpf_vc_rss_lut_set(struct idpf_vport *vport);
__rte_internal
-int idpf_vc_set_rss_hash(struct idpf_vport *vport);
+int idpf_vc_rss_hash_set(struct idpf_vport *vport);
__rte_internal
-int idpf_vc_config_irq_map_unmap(struct idpf_vport *vport,
+int idpf_vc_irq_map_unmap_config(struct idpf_vport *vport,
uint16_t nb_rxq, bool map);
__rte_internal
-int idpf_execute_vc_cmd(struct idpf_adapter *adapter,
+int idpf_vc_cmd_execute(struct idpf_adapter *adapter,
struct idpf_cmd_info *args);
__rte_internal
-int idpf_vc_switch_queue(struct idpf_vport *vport, uint16_t qid,
+int idpf_vc_queue_switch(struct idpf_vport *vport, uint16_t qid,
bool rx, bool on);
__rte_internal
-int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable);
+int idpf_vc_queues_ena_dis(struct idpf_vport *vport, bool enable);
__rte_internal
-int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);
+int idpf_vc_vport_ena_dis(struct idpf_vport *vport, bool enable);
__rte_internal
-int idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors);
+int idpf_vc_vectors_alloc(struct idpf_vport *vport, uint16_t num_vectors);
__rte_internal
-int idpf_vc_dealloc_vectors(struct idpf_vport *vport);
+int idpf_vc_vectors_dealloc(struct idpf_vport *vport);
__rte_internal
-int idpf_vc_query_ptype_info(struct idpf_adapter *adapter);
+int idpf_vc_ptype_info_query(struct idpf_adapter *adapter);
__rte_internal
-int idpf_vc_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
+int idpf_vc_one_msg_read(struct idpf_adapter *adapter, uint32_t ops,
uint16_t buf_len, uint8_t *buf);
__rte_internal
-int idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq);
+int idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq);
__rte_internal
-int idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq);
+int idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq);
#endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index e37a40771b..1c35761611 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -31,6 +31,25 @@ INTERNAL {
idpf_qc_tx_thresh_check;
idpf_qc_txq_mbufs_release;
+ idpf_vc_api_version_check;
+ idpf_vc_caps_get;
+ idpf_vc_cmd_execute;
+ idpf_vc_irq_map_unmap_config;
+ idpf_vc_one_msg_read;
+ idpf_vc_ptype_info_query;
+ idpf_vc_queue_switch;
+ idpf_vc_queues_ena_dis;
+ idpf_vc_rss_hash_set;
+ idpf_vc_rss_key_set;
+ idpf_vc_rss_lut_set;
+ idpf_vc_rxq_config;
+ idpf_vc_txq_config;
+ idpf_vc_vectors_alloc;
+ idpf_vc_vectors_dealloc;
+ idpf_vc_vport_create;
+ idpf_vc_vport_destroy;
+ idpf_vc_vport_ena_dis;
+
idpf_vport_deinit;
idpf_vport_info_init;
idpf_vport_init;
@@ -38,24 +57,5 @@ INTERNAL {
idpf_vport_irq_unmap_config;
idpf_vport_rss_config;
- idpf_execute_vc_cmd;
- idpf_vc_alloc_vectors;
- idpf_vc_check_api_version;
- idpf_vc_config_irq_map_unmap;
- idpf_vc_config_rxq;
- idpf_vc_config_txq;
- idpf_vc_create_vport;
- idpf_vc_dealloc_vectors;
- idpf_vc_destroy_vport;
- idpf_vc_ena_dis_queues;
- idpf_vc_ena_dis_vport;
- idpf_vc_get_caps;
- idpf_vc_query_ptype_info;
- idpf_vc_read_one_msg;
- idpf_vc_set_rss_hash;
- idpf_vc_set_rss_key;
- idpf_vc_set_rss_lut;
- idpf_vc_switch_queue;
-
local: *;
};
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index b324c0dc83..33f5e90743 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -299,7 +299,7 @@ idpf_dev_start(struct rte_eth_dev *dev)
goto err_vec;
}
- ret = idpf_vc_alloc_vectors(vport, req_vecs_num);
+ ret = idpf_vc_vectors_alloc(vport, req_vecs_num);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Failed to allocate interrupt vectors");
goto err_vec;
@@ -321,7 +321,7 @@ idpf_dev_start(struct rte_eth_dev *dev)
idpf_set_rx_function(dev);
idpf_set_tx_function(dev);
- ret = idpf_vc_ena_dis_vport(vport, true);
+ ret = idpf_vc_vport_ena_dis(vport, true);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Failed to enable vport");
goto err_vport;
@@ -336,7 +336,7 @@ idpf_dev_start(struct rte_eth_dev *dev)
err_startq:
idpf_vport_irq_unmap_config(vport, dev->data->nb_rx_queues);
err_irq:
- idpf_vc_dealloc_vectors(vport);
+ idpf_vc_vectors_dealloc(vport);
err_vec:
return ret;
}
@@ -349,13 +349,13 @@ idpf_dev_stop(struct rte_eth_dev *dev)
if (vport->stopped == 1)
return 0;
- idpf_vc_ena_dis_vport(vport, false);
+ idpf_vc_vport_ena_dis(vport, false);
idpf_stop_queues(dev);
idpf_vport_irq_unmap_config(vport, dev->data->nb_rx_queues);
- idpf_vc_dealloc_vectors(vport);
+ idpf_vc_vectors_dealloc(vport);
vport->stopped = 1;
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 41e91b16b6..f41783daea 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -566,7 +566,7 @@ idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
dev->data->rx_queues[rx_queue_id];
int err = 0;
- err = idpf_vc_config_rxq(vport, rxq);
+ err = idpf_vc_rxq_config(vport, rxq);
if (err != 0) {
PMD_DRV_LOG(ERR, "Fail to configure Rx queue %u", rx_queue_id);
return err;
@@ -580,7 +580,7 @@ idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
/* Ready to switch the queue on */
- err = idpf_vc_switch_queue(vport, rx_queue_id, true, true);
+ err = idpf_vc_queue_switch(vport, rx_queue_id, true, true);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
rx_queue_id);
@@ -617,7 +617,7 @@ idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
dev->data->tx_queues[tx_queue_id];
int err = 0;
- err = idpf_vc_config_txq(vport, txq);
+ err = idpf_vc_txq_config(vport, txq);
if (err != 0) {
PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
return err;
@@ -631,7 +631,7 @@ idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
}
/* Ready to switch the queue on */
- err = idpf_vc_switch_queue(vport, tx_queue_id, false, true);
+ err = idpf_vc_queue_switch(vport, tx_queue_id, false, true);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
@@ -654,7 +654,7 @@ idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
- err = idpf_vc_switch_queue(vport, rx_queue_id, true, false);
+ err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
rx_queue_id);
@@ -685,7 +685,7 @@ idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
- err = idpf_vc_switch_queue(vport, tx_queue_id, false, false);
+ err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
tx_queue_id);
--
2.26.2
next prev parent reply other threads:[~2023-02-03 10:12 UTC|newest]
Thread overview: 79+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <https://patches.dpdk.org/project/dpdk/cover/20230117072626.93796-1-beilei.xing@intel.com/>
2023-01-17 8:06 ` [PATCH v4 00/15] net/idpf: introduce idpf common modle beilei.xing
2023-01-17 8:06 ` [PATCH v4 01/15] common/idpf: add adapter structure beilei.xing
2023-01-17 8:06 ` [PATCH v4 02/15] common/idpf: add vport structure beilei.xing
2023-01-17 8:06 ` [PATCH v4 03/15] common/idpf: add virtual channel functions beilei.xing
2023-01-18 4:00 ` Zhang, Qi Z
2023-01-18 4:10 ` Zhang, Qi Z
2023-01-17 8:06 ` [PATCH v4 04/15] common/idpf: introduce adapter init and deinit beilei.xing
2023-01-17 8:06 ` [PATCH v4 05/15] common/idpf: add vport init/deinit beilei.xing
2023-01-17 8:06 ` [PATCH v4 06/15] common/idpf: add config RSS beilei.xing
2023-01-17 8:06 ` [PATCH v4 07/15] common/idpf: add irq map/unmap beilei.xing
2023-01-31 8:11 ` Wu, Jingjing
2023-01-17 8:06 ` [PATCH v4 08/15] common/idpf: support get packet type beilei.xing
2023-01-17 8:06 ` [PATCH v4 09/15] common/idpf: add vport info initialization beilei.xing
2023-01-31 8:24 ` Wu, Jingjing
2023-01-17 8:06 ` [PATCH v4 10/15] common/idpf: add vector flags in vport beilei.xing
2023-01-17 8:06 ` [PATCH v4 11/15] common/idpf: add rxq and txq struct beilei.xing
2023-01-17 8:06 ` [PATCH v4 12/15] common/idpf: add help functions for queue setup and release beilei.xing
2023-01-17 8:06 ` [PATCH v4 13/15] common/idpf: add Rx and Tx data path beilei.xing
2023-01-17 8:06 ` [PATCH v4 14/15] common/idpf: add vec queue setup beilei.xing
2023-01-17 8:06 ` [PATCH v4 15/15] common/idpf: add avx512 for single queue model beilei.xing
2023-02-02 9:53 ` [PATCH v5 00/15] net/idpf: introduce idpf common modle beilei.xing
2023-02-02 9:53 ` [PATCH v5 01/15] common/idpf: add adapter structure beilei.xing
2023-02-02 9:53 ` [PATCH v5 02/15] common/idpf: add vport structure beilei.xing
2023-02-02 9:53 ` [PATCH v5 03/15] common/idpf: add virtual channel functions beilei.xing
2023-02-02 9:53 ` [PATCH v5 04/15] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-02 9:53 ` [PATCH v5 05/15] common/idpf: add vport init/deinit beilei.xing
2023-02-02 9:53 ` [PATCH v5 06/15] common/idpf: add config RSS beilei.xing
2023-02-02 9:53 ` [PATCH v5 07/15] common/idpf: add irq map/unmap beilei.xing
2023-02-02 9:53 ` [PATCH v5 08/15] common/idpf: support get packet type beilei.xing
2023-02-02 9:53 ` [PATCH v5 09/15] common/idpf: add vport info initialization beilei.xing
2023-02-02 9:53 ` [PATCH v5 10/15] common/idpf: add vector flags in vport beilei.xing
2023-02-02 9:53 ` [PATCH v5 11/15] common/idpf: add rxq and txq struct beilei.xing
2023-02-02 9:53 ` [PATCH v5 12/15] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-02 9:53 ` [PATCH v5 13/15] common/idpf: add Rx and Tx data path beilei.xing
2023-02-02 9:53 ` [PATCH v5 14/15] common/idpf: add vec queue setup beilei.xing
2023-02-02 9:53 ` [PATCH v5 15/15] common/idpf: add avx512 for single queue model beilei.xing
2023-02-03 9:43 ` [PATCH v6 00/19] net/idpf: introduce idpf common modle beilei.xing
2023-02-03 9:43 ` [PATCH v6 01/19] common/idpf: add adapter structure beilei.xing
2023-02-03 9:43 ` [PATCH v6 02/19] common/idpf: add vport structure beilei.xing
2023-02-03 9:43 ` [PATCH v6 03/19] common/idpf: add virtual channel functions beilei.xing
2023-02-03 9:43 ` [PATCH v6 04/19] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-03 9:43 ` [PATCH v6 05/19] common/idpf: add vport init/deinit beilei.xing
2023-02-03 9:43 ` [PATCH v6 06/19] common/idpf: add config RSS beilei.xing
2023-02-03 9:43 ` [PATCH v6 07/19] common/idpf: add irq map/unmap beilei.xing
2023-02-03 9:43 ` [PATCH v6 08/19] common/idpf: support get packet type beilei.xing
2023-02-03 9:43 ` [PATCH v6 09/19] common/idpf: add vport info initialization beilei.xing
2023-02-03 9:43 ` [PATCH v6 10/19] common/idpf: add vector flags in vport beilei.xing
2023-02-03 9:43 ` [PATCH v6 11/19] common/idpf: add rxq and txq struct beilei.xing
2023-02-03 9:43 ` [PATCH v6 12/19] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-03 9:43 ` [PATCH v6 13/19] common/idpf: add Rx and Tx data path beilei.xing
2023-02-03 9:43 ` [PATCH v6 14/19] common/idpf: add vec queue setup beilei.xing
2023-02-03 9:43 ` [PATCH v6 15/19] common/idpf: add avx512 for single queue model beilei.xing
2023-02-03 9:43 ` [PATCH v6 16/19] common/idpf: refine API name for vport functions beilei.xing
2023-02-03 9:43 ` [PATCH v6 17/19] common/idpf: refine API name for queue config module beilei.xing
2023-02-03 9:43 ` [PATCH v6 18/19] common/idpf: refine API name for data path module beilei.xing
2023-02-03 9:43 ` beilei.xing [this message]
2023-02-06 2:58 ` [PATCH v6 00/19] net/idpf: introduce idpf common modle Zhang, Qi Z
2023-02-06 6:16 ` Xing, Beilei
2023-02-06 5:45 ` [PATCH v7 " beilei.xing
2023-02-06 5:46 ` [PATCH v7 01/19] common/idpf: add adapter structure beilei.xing
2023-02-06 5:46 ` [PATCH v7 02/19] common/idpf: add vport structure beilei.xing
2023-02-06 5:46 ` [PATCH v7 03/19] common/idpf: add virtual channel functions beilei.xing
2023-02-06 5:46 ` [PATCH v7 04/19] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-06 5:46 ` [PATCH v7 05/19] common/idpf: add vport init/deinit beilei.xing
2023-02-06 5:46 ` [PATCH v7 06/19] common/idpf: add config RSS beilei.xing
2023-02-06 5:46 ` [PATCH v7 07/19] common/idpf: add irq map/unmap beilei.xing
2023-02-06 5:46 ` [PATCH v7 08/19] common/idpf: support get packet type beilei.xing
2023-02-06 5:46 ` [PATCH v7 09/19] common/idpf: add vport info initialization beilei.xing
2023-02-06 5:46 ` [PATCH v7 10/19] common/idpf: add vector flags in vport beilei.xing
2023-02-06 5:46 ` [PATCH v7 11/19] common/idpf: add rxq and txq struct beilei.xing
2023-02-06 5:46 ` [PATCH v7 12/19] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-06 5:46 ` [PATCH v7 13/19] common/idpf: add Rx and Tx data path beilei.xing
2023-02-06 5:46 ` [PATCH v7 14/19] common/idpf: add vec queue setup beilei.xing
2023-02-06 5:46 ` [PATCH v7 15/19] common/idpf: add avx512 for single queue model beilei.xing
2023-02-06 5:46 ` [PATCH v7 16/19] common/idpf: refine API name for vport functions beilei.xing
2023-02-06 5:46 ` [PATCH v7 17/19] common/idpf: refine API name for queue config module beilei.xing
2023-02-06 5:46 ` [PATCH v7 18/19] common/idpf: refine API name for data path module beilei.xing
2023-02-06 5:46 ` [PATCH v7 19/19] common/idpf: refine API name for virtual channel functions beilei.xing
2023-02-06 13:15 ` [PATCH v7 00/19] net/idpf: introduce idpf common modle Zhang, Qi Z
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230203094340.8103-20-beilei.xing@intel.com \
--to=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=qi.z.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).