From: beilei.xing@intel.com
To: jingjing.wu@intel.com
Cc: dev@dpdk.org, qi.z.zhang@intel.com,
Beilei Xing <beilei.xing@intel.com>,
Wenjun Wu <wenjun1.wu@intel.com>
Subject: [PATCH v4 01/15] common/idpf: add adapter structure
Date: Tue, 17 Jan 2023 08:06:08 +0000 [thread overview]
Message-ID: <20230117080622.105657-2-beilei.xing@intel.com> (raw)
In-Reply-To: <20230117080622.105657-1-beilei.xing@intel.com>
From: Beilei Xing <beilei.xing@intel.com>
Add structure idpf_adapter in common module, the structure includes
some basic fields.
Introduce structure idpf_adapter_ext in PMD, this structure includes
extra fields except idpf_adapter.
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_device.h | 20 ++++++
drivers/net/idpf/idpf_ethdev.c | 91 ++++++++++--------------
drivers/net/idpf/idpf_ethdev.h | 25 +++----
drivers/net/idpf/idpf_rxtx.c | 16 ++---
drivers/net/idpf/idpf_rxtx.h | 4 +-
drivers/net/idpf/idpf_rxtx_vec_avx512.c | 3 +-
drivers/net/idpf/idpf_vchnl.c | 30 ++++----
7 files changed, 99 insertions(+), 90 deletions(-)
create mode 100644 drivers/common/idpf/idpf_common_device.h
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
new file mode 100644
index 0000000000..4f548a7185
--- /dev/null
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#ifndef _IDPF_COMMON_DEVICE_H_
+#define _IDPF_COMMON_DEVICE_H_
+
+#include <base/idpf_prototype.h>
+#include <base/virtchnl2.h>
+
+struct idpf_adapter {
+ struct idpf_hw hw;
+ struct virtchnl2_version_info virtchnl_version;
+ struct virtchnl2_get_capabilities caps;
+ volatile uint32_t pend_cmd; /* pending command not finished */
+ uint32_t cmd_retval; /* return value of the cmd response from cp */
+ uint8_t *mbx_resp; /* buffer to store the mailbox response from cp */
+};
+
+#endif /* _IDPF_COMMON_DEVICE_H_ */
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 3f1b77144c..1b13d081a7 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -53,8 +53,8 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct idpf_vport *vport = dev->data->dev_private;
struct idpf_adapter *adapter = vport->adapter;
- dev_info->max_rx_queues = adapter->caps->max_rx_q;
- dev_info->max_tx_queues = adapter->caps->max_tx_q;
+ dev_info->max_rx_queues = adapter->caps.max_rx_q;
+ dev_info->max_tx_queues = adapter->caps.max_tx_q;
dev_info->min_rx_bufsize = IDPF_MIN_BUF_SIZE;
dev_info->max_rx_pktlen = vport->max_mtu + IDPF_ETH_OVERHEAD;
@@ -147,7 +147,7 @@ idpf_init_vport_req_info(struct rte_eth_dev *dev,
struct virtchnl2_create_vport *vport_info)
{
struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_adapter_ext *adapter = IDPF_ADAPTER_TO_EXT(vport->adapter);
vport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
if (adapter->txq_model == 0) {
@@ -379,7 +379,7 @@ idpf_dev_configure(struct rte_eth_dev *dev)
return -ENOTSUP;
}
- if (adapter->caps->rss_caps != 0 && dev->data->nb_rx_queues != 0) {
+ if (adapter->caps.rss_caps != 0 && dev->data->nb_rx_queues != 0) {
ret = idpf_init_rss(vport);
if (ret != 0) {
PMD_INIT_LOG(ERR, "Failed to init rss");
@@ -420,7 +420,7 @@ idpf_config_rx_queues_irqs(struct rte_eth_dev *dev)
/* Rx interrupt disabled, Map interrupt only for writeback */
- /* The capability flags adapter->caps->other_caps should be
+ /* The capability flags adapter->caps.other_caps should be
* compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
* condition should be updated when the FW can return the
* correct flag bits.
@@ -518,9 +518,9 @@ static int
idpf_dev_start(struct rte_eth_dev *dev)
{
struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_adapter *adapter = vport->adapter;
- uint16_t num_allocated_vectors =
- adapter->caps->num_allocated_vectors;
+ struct idpf_adapter *base = vport->adapter;
+ struct idpf_adapter_ext *adapter = IDPF_ADAPTER_TO_EXT(base);
+ uint16_t num_allocated_vectors = base->caps.num_allocated_vectors;
uint16_t req_vecs_num;
int ret;
@@ -596,7 +596,7 @@ static int
idpf_dev_close(struct rte_eth_dev *dev)
{
struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_adapter_ext *adapter = IDPF_ADAPTER_TO_EXT(vport->adapter);
idpf_dev_stop(dev);
@@ -728,7 +728,7 @@ parse_bool(const char *key, const char *value, void *args)
}
static int
-idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter,
+idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *adapter,
struct idpf_devargs *idpf_args)
{
struct rte_devargs *devargs = pci_dev->device.devargs;
@@ -875,14 +875,14 @@ idpf_init_mbx(struct idpf_hw *hw)
}
static int
-idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
+idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *adapter)
{
- struct idpf_hw *hw = &adapter->hw;
+ struct idpf_hw *hw = &adapter->base.hw;
int ret = 0;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->hw_addr_len = pci_dev->mem_resource[0].len;
- hw->back = adapter;
+ hw->back = &adapter->base;
hw->vendor_id = pci_dev->id.vendor_id;
hw->device_id = pci_dev->id.device_id;
hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
@@ -902,15 +902,15 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
goto err;
}
- adapter->mbx_resp = rte_zmalloc("idpf_adapter_mbx_resp",
- IDPF_DFLT_MBX_BUF_SIZE, 0);
- if (adapter->mbx_resp == NULL) {
+ adapter->base.mbx_resp = rte_zmalloc("idpf_adapter_mbx_resp",
+ IDPF_DFLT_MBX_BUF_SIZE, 0);
+ if (adapter->base.mbx_resp == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate idpf_adapter_mbx_resp memory");
ret = -ENOMEM;
goto err_mbx;
}
- ret = idpf_vc_check_api_version(adapter);
+ ret = idpf_vc_check_api_version(&adapter->base);
if (ret != 0) {
PMD_INIT_LOG(ERR, "Failed to check api version");
goto err_api;
@@ -922,21 +922,13 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
goto err_api;
}
- adapter->caps = rte_zmalloc("idpf_caps",
- sizeof(struct virtchnl2_get_capabilities), 0);
- if (adapter->caps == NULL) {
- PMD_INIT_LOG(ERR, "Failed to allocate idpf_caps memory");
- ret = -ENOMEM;
- goto err_api;
- }
-
- ret = idpf_vc_get_caps(adapter);
+ ret = idpf_vc_get_caps(&adapter->base);
if (ret != 0) {
PMD_INIT_LOG(ERR, "Failed to get capabilities");
- goto err_caps;
+ goto err_api;
}
- adapter->max_vport_nb = adapter->caps->max_vports;
+ adapter->max_vport_nb = adapter->base.caps.max_vports;
adapter->vports = rte_zmalloc("vports",
adapter->max_vport_nb *
@@ -945,7 +937,7 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
if (adapter->vports == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate vports memory");
ret = -ENOMEM;
- goto err_vports;
+ goto err_api;
}
adapter->max_rxq_per_msg = (IDPF_DFLT_MBX_BUF_SIZE -
@@ -962,13 +954,9 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
return ret;
-err_vports:
-err_caps:
- rte_free(adapter->caps);
- adapter->caps = NULL;
err_api:
- rte_free(adapter->mbx_resp);
- adapter->mbx_resp = NULL;
+ rte_free(adapter->base.mbx_resp);
+ adapter->base.mbx_resp = NULL;
err_mbx:
idpf_ctlq_deinit(hw);
err:
@@ -995,7 +983,7 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
};
static uint16_t
-idpf_vport_idx_alloc(struct idpf_adapter *ad)
+idpf_vport_idx_alloc(struct idpf_adapter_ext *ad)
{
uint16_t vport_idx;
uint16_t i;
@@ -1018,13 +1006,13 @@ idpf_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
struct idpf_vport *vport = dev->data->dev_private;
struct idpf_vport_param *param = init_params;
- struct idpf_adapter *adapter = param->adapter;
+ struct idpf_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
struct virtchnl2_create_vport vport_req_info;
int ret = 0;
dev->dev_ops = &idpf_eth_dev_ops;
- vport->adapter = adapter;
+ vport->adapter = &adapter->base;
vport->sw_idx = param->idx;
vport->devarg_id = param->devarg_id;
@@ -1085,10 +1073,10 @@ static const struct rte_pci_id pci_id_idpf_map[] = {
{ .vendor_id = 0, /* sentinel */ },
};
-struct idpf_adapter *
-idpf_find_adapter(struct rte_pci_device *pci_dev)
+struct idpf_adapter_ext *
+idpf_find_adapter_ext(struct rte_pci_device *pci_dev)
{
- struct idpf_adapter *adapter;
+ struct idpf_adapter_ext *adapter;
int found = 0;
if (pci_dev == NULL)
@@ -1110,17 +1098,14 @@ idpf_find_adapter(struct rte_pci_device *pci_dev)
}
static void
-idpf_adapter_rel(struct idpf_adapter *adapter)
+idpf_adapter_rel(struct idpf_adapter_ext *adapter)
{
- struct idpf_hw *hw = &adapter->hw;
+ struct idpf_hw *hw = &adapter->base.hw;
idpf_ctlq_deinit(hw);
- rte_free(adapter->caps);
- adapter->caps = NULL;
-
- rte_free(adapter->mbx_resp);
- adapter->mbx_resp = NULL;
+ rte_free(adapter->base.mbx_resp);
+ adapter->base.mbx_resp = NULL;
rte_free(adapter->vports);
adapter->vports = NULL;
@@ -1131,7 +1116,7 @@ idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
struct idpf_vport_param vport_param;
- struct idpf_adapter *adapter;
+ struct idpf_adapter_ext *adapter;
struct idpf_devargs devargs;
char name[RTE_ETH_NAME_MAX_LEN];
int i, retval;
@@ -1143,11 +1128,11 @@ idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
idpf_adapter_list_init = true;
}
- adapter = idpf_find_adapter(pci_dev);
+ adapter = idpf_find_adapter_ext(pci_dev);
if (adapter == NULL) {
first_probe = true;
- adapter = rte_zmalloc("idpf_adapter",
- sizeof(struct idpf_adapter), 0);
+ adapter = rte_zmalloc("idpf_adapter_ext",
+ sizeof(struct idpf_adapter_ext), 0);
if (adapter == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate adapter.");
return -ENOMEM;
@@ -1225,7 +1210,7 @@ idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
static int
idpf_pci_remove(struct rte_pci_device *pci_dev)
{
- struct idpf_adapter *adapter = idpf_find_adapter(pci_dev);
+ struct idpf_adapter_ext *adapter = idpf_find_adapter_ext(pci_dev);
uint16_t port_id;
/* Ethdev created can be found RTE_ETH_FOREACH_DEV_OF through rte_device */
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index b0746e5041..e956fa989c 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -15,6 +15,7 @@
#include "idpf_logs.h"
+#include <idpf_common_device.h>
#include <base/idpf_prototype.h>
#include <base/virtchnl2.h>
@@ -91,7 +92,7 @@ struct idpf_chunks_info {
};
struct idpf_vport_param {
- struct idpf_adapter *adapter;
+ struct idpf_adapter_ext *adapter;
uint16_t devarg_id; /* arg id from user */
uint16_t idx; /* index in adapter->vports[]*/
};
@@ -144,17 +145,11 @@ struct idpf_devargs {
uint16_t req_vport_nb;
};
-struct idpf_adapter {
- TAILQ_ENTRY(idpf_adapter) next;
- struct idpf_hw hw;
- char name[IDPF_ADAPTER_NAME_LEN];
-
- struct virtchnl2_version_info virtchnl_version;
- struct virtchnl2_get_capabilities *caps;
+struct idpf_adapter_ext {
+ TAILQ_ENTRY(idpf_adapter_ext) next;
+ struct idpf_adapter base;
- volatile uint32_t pend_cmd; /* pending command not finished */
- uint32_t cmd_retval; /* return value of the cmd response from ipf */
- uint8_t *mbx_resp; /* buffer to store the mailbox response from ipf */
+ char name[IDPF_ADAPTER_NAME_LEN];
uint32_t txq_model; /* 0 - split queue model, non-0 - single queue model */
uint32_t rxq_model; /* 0 - split queue model, non-0 - single queue model */
@@ -182,10 +177,12 @@ struct idpf_adapter {
uint64_t time_hw;
};
-TAILQ_HEAD(idpf_adapter_list, idpf_adapter);
+TAILQ_HEAD(idpf_adapter_list, idpf_adapter_ext);
#define IDPF_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
+#define IDPF_ADAPTER_TO_EXT(p) \
+ container_of((p), struct idpf_adapter_ext, base)
/* structure used for sending and checking response of virtchnl ops */
struct idpf_cmd_info {
@@ -234,10 +231,10 @@ atomic_set_cmd(struct idpf_adapter *adapter, uint32_t ops)
return !ret;
}
-struct idpf_adapter *idpf_find_adapter(struct rte_pci_device *pci_dev);
+struct idpf_adapter_ext *idpf_find_adapter_ext(struct rte_pci_device *pci_dev);
void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev);
int idpf_vc_check_api_version(struct idpf_adapter *adapter);
-int idpf_get_pkt_type(struct idpf_adapter *adapter);
+int idpf_get_pkt_type(struct idpf_adapter_ext *adapter);
int idpf_vc_get_caps(struct idpf_adapter *adapter);
int idpf_vc_create_vport(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_info);
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 5aef8ba2b6..4845f2ea0a 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -1384,7 +1384,7 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
struct idpf_rx_queue *rxq;
const uint32_t *ptype_tbl;
uint8_t status_err0_qw1;
- struct idpf_adapter *ad;
+ struct idpf_adapter_ext *ad;
struct rte_mbuf *rxm;
uint16_t rx_id_bufq1;
uint16_t rx_id_bufq2;
@@ -1398,7 +1398,7 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_rx = 0;
rxq = rx_queue;
- ad = rxq->adapter;
+ ad = IDPF_ADAPTER_TO_EXT(rxq->adapter);
if (unlikely(rxq == NULL) || unlikely(!rxq->q_started))
return nb_rx;
@@ -1407,7 +1407,7 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rx_id_bufq1 = rxq->bufq1->rx_next_avail;
rx_id_bufq2 = rxq->bufq2->rx_next_avail;
rx_desc_ring = rxq->rx_ring;
- ptype_tbl = rxq->adapter->ptype_tbl;
+ ptype_tbl = ad->ptype_tbl;
if ((rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0)
rxq->hw_register_set = 1;
@@ -1791,7 +1791,7 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
const uint32_t *ptype_tbl;
uint16_t rx_id, nb_hold;
struct rte_eth_dev *dev;
- struct idpf_adapter *ad;
+ struct idpf_adapter_ext *ad;
uint16_t rx_packet_len;
struct rte_mbuf *rxm;
struct rte_mbuf *nmb;
@@ -1805,14 +1805,14 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_hold = 0;
rxq = rx_queue;
- ad = rxq->adapter;
+ ad = IDPF_ADAPTER_TO_EXT(rxq->adapter);
if (unlikely(rxq == NULL) || unlikely(!rxq->q_started))
return nb_rx;
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
- ptype_tbl = rxq->adapter->ptype_tbl;
+ ptype_tbl = ad->ptype_tbl;
if ((rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0)
rxq->hw_register_set = 1;
@@ -2221,7 +2221,7 @@ idpf_set_rx_function(struct rte_eth_dev *dev)
{
struct idpf_vport *vport = dev->data->dev_private;
#ifdef RTE_ARCH_X86
- struct idpf_adapter *ad = vport->adapter;
+ struct idpf_adapter_ext *ad = IDPF_ADAPTER_TO_EXT(vport->adapter);
struct idpf_rx_queue *rxq;
int i;
@@ -2275,7 +2275,7 @@ idpf_set_tx_function(struct rte_eth_dev *dev)
{
struct idpf_vport *vport = dev->data->dev_private;
#ifdef RTE_ARCH_X86
- struct idpf_adapter *ad = vport->adapter;
+ struct idpf_adapter_ext *ad = IDPF_ADAPTER_TO_EXT(vport->adapter);
#ifdef CC_AVX512_SUPPORT
struct idpf_tx_queue *txq;
int i;
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index 730dc64ebc..047fc03614 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -247,11 +247,11 @@ void idpf_set_tx_function(struct rte_eth_dev *dev);
/* Helper function to convert a 32b nanoseconds timestamp to 64b. */
static inline uint64_t
-idpf_tstamp_convert_32b_64b(struct idpf_adapter *ad, uint32_t flag,
+idpf_tstamp_convert_32b_64b(struct idpf_adapter_ext *ad, uint32_t flag,
uint32_t in_timestamp)
{
#ifdef RTE_ARCH_X86_64
- struct idpf_hw *hw = &ad->hw;
+ struct idpf_hw *hw = &ad->base.hw;
const uint64_t mask = 0xFFFFFFFF;
uint32_t hi, lo, lo2, delta;
uint64_t ns;
diff --git a/drivers/net/idpf/idpf_rxtx_vec_avx512.c b/drivers/net/idpf/idpf_rxtx_vec_avx512.c
index fb2b6bb53c..efa7cd2187 100644
--- a/drivers/net/idpf/idpf_rxtx_vec_avx512.c
+++ b/drivers/net/idpf/idpf_rxtx_vec_avx512.c
@@ -245,7 +245,8 @@ _idpf_singleq_recv_raw_pkts_avx512(struct idpf_rx_queue *rxq,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- const uint32_t *type_table = rxq->adapter->ptype_tbl;
+ struct idpf_adapter_ext *adapter = IDPF_ADAPTER_TO_EXT(rxq->adapter);
+ const uint32_t *type_table = adapter->ptype_tbl;
const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0,
rxq->mbuf_initializer);
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index 14b34619af..ca481bb915 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -311,13 +311,17 @@ idpf_vc_check_api_version(struct idpf_adapter *adapter)
}
int __rte_cold
-idpf_get_pkt_type(struct idpf_adapter *adapter)
+idpf_get_pkt_type(struct idpf_adapter_ext *adapter)
{
struct virtchnl2_get_ptype_info *ptype_info;
- uint16_t ptype_recvd = 0, ptype_offset, i, j;
+ struct idpf_adapter *base;
+ uint16_t ptype_offset, i, j;
+ uint16_t ptype_recvd = 0;
int ret;
- ret = idpf_vc_query_ptype_info(adapter);
+ base = &adapter->base;
+
+ ret = idpf_vc_query_ptype_info(base);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Fail to query packet type information");
return ret;
@@ -328,7 +332,7 @@ idpf_get_pkt_type(struct idpf_adapter *adapter)
return -ENOMEM;
while (ptype_recvd < IDPF_MAX_PKT_TYPE) {
- ret = idpf_read_one_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
+ ret = idpf_read_one_msg(base, VIRTCHNL2_OP_GET_PTYPE_INFO,
IDPF_DFLT_MBX_BUF_SIZE, (u8 *)ptype_info);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Fail to get packet type information");
@@ -515,7 +519,7 @@ idpf_get_pkt_type(struct idpf_adapter *adapter)
free_ptype_info:
rte_free(ptype_info);
- clear_cmd(adapter);
+ clear_cmd(base);
return ret;
}
@@ -577,7 +581,7 @@ idpf_vc_get_caps(struct idpf_adapter *adapter)
return err;
}
- rte_memcpy(adapter->caps, args.out_buffer, sizeof(caps_msg));
+ rte_memcpy(&adapter->caps, args.out_buffer, sizeof(caps_msg));
return 0;
}
@@ -740,7 +744,8 @@ idpf_vc_set_rss_hash(struct idpf_vport *vport)
int
idpf_vc_config_rxqs(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_adapter *base = vport->adapter;
+ struct idpf_adapter_ext *adapter = IDPF_ADAPTER_TO_EXT(base);
struct idpf_rx_queue **rxq =
(struct idpf_rx_queue **)vport->dev_data->rx_queues;
struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
@@ -832,10 +837,10 @@ idpf_vc_config_rxqs(struct idpf_vport *vport)
args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
args.in_args = (uint8_t *)vc_rxqs;
args.in_args_size = size;
- args.out_buffer = adapter->mbx_resp;
+ args.out_buffer = base->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_execute_vc_cmd(base, &args);
rte_free(vc_rxqs);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
@@ -940,7 +945,8 @@ idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id)
int
idpf_vc_config_txqs(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_adapter *base = vport->adapter;
+ struct idpf_adapter_ext *adapter = IDPF_ADAPTER_TO_EXT(base);
struct idpf_tx_queue **txq =
(struct idpf_tx_queue **)vport->dev_data->tx_queues;
struct virtchnl2_config_tx_queues *vc_txqs = NULL;
@@ -1010,10 +1016,10 @@ idpf_vc_config_txqs(struct idpf_vport *vport)
args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
args.in_args = (uint8_t *)vc_txqs;
args.in_args_size = size;
- args.out_buffer = adapter->mbx_resp;
+ args.out_buffer = base->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
- err = idpf_execute_vc_cmd(adapter, &args);
+ err = idpf_execute_vc_cmd(base, &args);
rte_free(vc_txqs);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
--
2.26.2
next prev parent reply other threads:[~2023-01-17 8:30 UTC|newest]
Thread overview: 79+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <https://patches.dpdk.org/project/dpdk/cover/20230117072626.93796-1-beilei.xing@intel.com/>
2023-01-17 8:06 ` [PATCH v4 00/15] net/idpf: introduce idpf common modle beilei.xing
2023-01-17 8:06 ` beilei.xing [this message]
2023-01-17 8:06 ` [PATCH v4 02/15] common/idpf: add vport structure beilei.xing
2023-01-17 8:06 ` [PATCH v4 03/15] common/idpf: add virtual channel functions beilei.xing
2023-01-18 4:00 ` Zhang, Qi Z
2023-01-18 4:10 ` Zhang, Qi Z
2023-01-17 8:06 ` [PATCH v4 04/15] common/idpf: introduce adapter init and deinit beilei.xing
2023-01-17 8:06 ` [PATCH v4 05/15] common/idpf: add vport init/deinit beilei.xing
2023-01-17 8:06 ` [PATCH v4 06/15] common/idpf: add config RSS beilei.xing
2023-01-17 8:06 ` [PATCH v4 07/15] common/idpf: add irq map/unmap beilei.xing
2023-01-31 8:11 ` Wu, Jingjing
2023-01-17 8:06 ` [PATCH v4 08/15] common/idpf: support get packet type beilei.xing
2023-01-17 8:06 ` [PATCH v4 09/15] common/idpf: add vport info initialization beilei.xing
2023-01-31 8:24 ` Wu, Jingjing
2023-01-17 8:06 ` [PATCH v4 10/15] common/idpf: add vector flags in vport beilei.xing
2023-01-17 8:06 ` [PATCH v4 11/15] common/idpf: add rxq and txq struct beilei.xing
2023-01-17 8:06 ` [PATCH v4 12/15] common/idpf: add help functions for queue setup and release beilei.xing
2023-01-17 8:06 ` [PATCH v4 13/15] common/idpf: add Rx and Tx data path beilei.xing
2023-01-17 8:06 ` [PATCH v4 14/15] common/idpf: add vec queue setup beilei.xing
2023-01-17 8:06 ` [PATCH v4 15/15] common/idpf: add avx512 for single queue model beilei.xing
2023-02-02 9:53 ` [PATCH v5 00/15] net/idpf: introduce idpf common modle beilei.xing
2023-02-02 9:53 ` [PATCH v5 01/15] common/idpf: add adapter structure beilei.xing
2023-02-02 9:53 ` [PATCH v5 02/15] common/idpf: add vport structure beilei.xing
2023-02-02 9:53 ` [PATCH v5 03/15] common/idpf: add virtual channel functions beilei.xing
2023-02-02 9:53 ` [PATCH v5 04/15] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-02 9:53 ` [PATCH v5 05/15] common/idpf: add vport init/deinit beilei.xing
2023-02-02 9:53 ` [PATCH v5 06/15] common/idpf: add config RSS beilei.xing
2023-02-02 9:53 ` [PATCH v5 07/15] common/idpf: add irq map/unmap beilei.xing
2023-02-02 9:53 ` [PATCH v5 08/15] common/idpf: support get packet type beilei.xing
2023-02-02 9:53 ` [PATCH v5 09/15] common/idpf: add vport info initialization beilei.xing
2023-02-02 9:53 ` [PATCH v5 10/15] common/idpf: add vector flags in vport beilei.xing
2023-02-02 9:53 ` [PATCH v5 11/15] common/idpf: add rxq and txq struct beilei.xing
2023-02-02 9:53 ` [PATCH v5 12/15] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-02 9:53 ` [PATCH v5 13/15] common/idpf: add Rx and Tx data path beilei.xing
2023-02-02 9:53 ` [PATCH v5 14/15] common/idpf: add vec queue setup beilei.xing
2023-02-02 9:53 ` [PATCH v5 15/15] common/idpf: add avx512 for single queue model beilei.xing
2023-02-03 9:43 ` [PATCH v6 00/19] net/idpf: introduce idpf common modle beilei.xing
2023-02-03 9:43 ` [PATCH v6 01/19] common/idpf: add adapter structure beilei.xing
2023-02-03 9:43 ` [PATCH v6 02/19] common/idpf: add vport structure beilei.xing
2023-02-03 9:43 ` [PATCH v6 03/19] common/idpf: add virtual channel functions beilei.xing
2023-02-03 9:43 ` [PATCH v6 04/19] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-03 9:43 ` [PATCH v6 05/19] common/idpf: add vport init/deinit beilei.xing
2023-02-03 9:43 ` [PATCH v6 06/19] common/idpf: add config RSS beilei.xing
2023-02-03 9:43 ` [PATCH v6 07/19] common/idpf: add irq map/unmap beilei.xing
2023-02-03 9:43 ` [PATCH v6 08/19] common/idpf: support get packet type beilei.xing
2023-02-03 9:43 ` [PATCH v6 09/19] common/idpf: add vport info initialization beilei.xing
2023-02-03 9:43 ` [PATCH v6 10/19] common/idpf: add vector flags in vport beilei.xing
2023-02-03 9:43 ` [PATCH v6 11/19] common/idpf: add rxq and txq struct beilei.xing
2023-02-03 9:43 ` [PATCH v6 12/19] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-03 9:43 ` [PATCH v6 13/19] common/idpf: add Rx and Tx data path beilei.xing
2023-02-03 9:43 ` [PATCH v6 14/19] common/idpf: add vec queue setup beilei.xing
2023-02-03 9:43 ` [PATCH v6 15/19] common/idpf: add avx512 for single queue model beilei.xing
2023-02-03 9:43 ` [PATCH v6 16/19] common/idpf: refine API name for vport functions beilei.xing
2023-02-03 9:43 ` [PATCH v6 17/19] common/idpf: refine API name for queue config module beilei.xing
2023-02-03 9:43 ` [PATCH v6 18/19] common/idpf: refine API name for data path module beilei.xing
2023-02-03 9:43 ` [PATCH v6 19/19] common/idpf: refine API name for virtual channel functions beilei.xing
2023-02-06 2:58 ` [PATCH v6 00/19] net/idpf: introduce idpf common modle Zhang, Qi Z
2023-02-06 6:16 ` Xing, Beilei
2023-02-06 5:45 ` [PATCH v7 " beilei.xing
2023-02-06 5:46 ` [PATCH v7 01/19] common/idpf: add adapter structure beilei.xing
2023-02-06 5:46 ` [PATCH v7 02/19] common/idpf: add vport structure beilei.xing
2023-02-06 5:46 ` [PATCH v7 03/19] common/idpf: add virtual channel functions beilei.xing
2023-02-06 5:46 ` [PATCH v7 04/19] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-06 5:46 ` [PATCH v7 05/19] common/idpf: add vport init/deinit beilei.xing
2023-02-06 5:46 ` [PATCH v7 06/19] common/idpf: add config RSS beilei.xing
2023-02-06 5:46 ` [PATCH v7 07/19] common/idpf: add irq map/unmap beilei.xing
2023-02-06 5:46 ` [PATCH v7 08/19] common/idpf: support get packet type beilei.xing
2023-02-06 5:46 ` [PATCH v7 09/19] common/idpf: add vport info initialization beilei.xing
2023-02-06 5:46 ` [PATCH v7 10/19] common/idpf: add vector flags in vport beilei.xing
2023-02-06 5:46 ` [PATCH v7 11/19] common/idpf: add rxq and txq struct beilei.xing
2023-02-06 5:46 ` [PATCH v7 12/19] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-06 5:46 ` [PATCH v7 13/19] common/idpf: add Rx and Tx data path beilei.xing
2023-02-06 5:46 ` [PATCH v7 14/19] common/idpf: add vec queue setup beilei.xing
2023-02-06 5:46 ` [PATCH v7 15/19] common/idpf: add avx512 for single queue model beilei.xing
2023-02-06 5:46 ` [PATCH v7 16/19] common/idpf: refine API name for vport functions beilei.xing
2023-02-06 5:46 ` [PATCH v7 17/19] common/idpf: refine API name for queue config module beilei.xing
2023-02-06 5:46 ` [PATCH v7 18/19] common/idpf: refine API name for data path module beilei.xing
2023-02-06 5:46 ` [PATCH v7 19/19] common/idpf: refine API name for virtual channel functions beilei.xing
2023-02-06 13:15 ` [PATCH v7 00/19] net/idpf: introduce idpf common modle Zhang, Qi Z
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230117080622.105657-2-beilei.xing@intel.com \
--to=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=qi.z.zhang@intel.com \
--cc=wenjun1.wu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).