From: beilei.xing@intel.com
To: jingjing.wu@intel.com
Cc: dev@dpdk.org, mingxia.liu@intel.com,
Beilei Xing <beilei.xing@intel.com>,
Qi Zhang <qi.z.zhang@intel.com>
Subject: [PATCH v5 09/10] net/cpfl: create port representor
Date: Tue, 12 Sep 2023 16:26:39 +0000 [thread overview]
Message-ID: <20230912162640.1439383-10-beilei.xing@intel.com> (raw)
In-Reply-To: <20230912162640.1439383-1-beilei.xing@intel.com>
From: Beilei Xing <beilei.xing@intel.com>
Track representor request in the allowlist.
Representor will only be created for active vport.
Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 117 +++---
drivers/net/cpfl/cpfl_ethdev.h | 39 +-
drivers/net/cpfl/cpfl_representor.c | 581 ++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_representor.h | 26 ++
drivers/net/cpfl/meson.build | 1 +
5 files changed, 715 insertions(+), 49 deletions(-)
create mode 100644 drivers/net/cpfl/cpfl_representor.c
create mode 100644 drivers/net/cpfl/cpfl_representor.h
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 47c4c5c796..375bc8098c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1645,10 +1645,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
}
}
-static int
+int
cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
struct cpfl_vport_id *vport_identity,
- struct cpchnl2_vport_info *vport_info)
+ struct cpchnl2_event_vport_created *vport_created)
{
struct cpfl_vport_info *info = NULL;
int ret;
@@ -1659,7 +1659,7 @@ cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
PMD_DRV_LOG(WARNING, "vport already exist, overwrite info anyway");
/* overwrite info */
if (info)
- info->vport_info = *vport_info;
+ info->vport = *vport_created;
goto fini;
}
@@ -1670,7 +1670,7 @@ cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
goto err;
}
- info->vport_info = *vport_info;
+ info->vport = *vport_created;
ret = rte_hash_add_key_data(adapter->vport_map_hash, vport_identity, info);
if (ret < 0) {
@@ -1696,7 +1696,7 @@ cpfl_vport_info_destroy(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id *
rte_spinlock_lock(&adapter->vport_map_lock);
ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "vport id not exist");
+ PMD_DRV_LOG(ERR, "vport id does not exist");
goto err;
}
@@ -1898,6 +1898,42 @@ cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
rte_hash_free(adapter->vport_map_hash);
}
+static int
+cpfl_repr_allowlist_init(struct cpfl_adapter_ext *adapter)
+{
+ char hname[32];
+
+ snprintf(hname, 32, "%s-repr_al", adapter->name);
+
+ rte_spinlock_init(&adapter->repr_lock);
+
+#define CPFL_REPR_HASH_ENTRY_NUM 2048
+
+ struct rte_hash_parameters params = {
+ .name = hname,
+ .entries = CPFL_REPR_HASH_ENTRY_NUM,
+ .key_len = sizeof(struct cpfl_repr_id),
+ .hash_func = rte_hash_crc,
+ .socket_id = SOCKET_ID_ANY,
+ };
+
+ adapter->repr_allowlist_hash = rte_hash_create(¶ms);
+
+ if (adapter->repr_allowlist_hash == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to create repr allowlist hash");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+cpfl_repr_allowlist_uninit(struct cpfl_adapter_ext *adapter)
+{
+ rte_hash_free(adapter->repr_allowlist_hash);
+}
+
+
static int
cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
{
@@ -1928,6 +1964,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
goto err_vport_map_init;
}
+ ret = cpfl_repr_allowlist_init(adapter);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init representor allowlist");
+ goto err_repr_allowlist_init;
+ }
+
rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ?
@@ -1952,6 +1994,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
err_vports_alloc:
rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+ cpfl_repr_allowlist_uninit(adapter);
+err_repr_allowlist_init:
cpfl_vport_map_uninit(adapter);
err_vport_map_init:
idpf_adapter_deinit(base);
@@ -2227,48 +2271,6 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
return 0;
}
-static int
-cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
-{
- struct cpfl_devargs *devargs = &adapter->devargs;
- int i, j;
-
- /* check and refine repr args */
- for (i = 0; i < devargs->repr_args_num; i++) {
- struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
-
- /* set default host_id to xeon host */
- if (eth_da->nb_mh_controllers == 0) {
- eth_da->nb_mh_controllers = 1;
- eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
- } else {
- for (j = 0; j < eth_da->nb_mh_controllers; j++) {
- if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
- PMD_INIT_LOG(ERR, "Invalid Host ID %d",
- eth_da->mh_controllers[j]);
- return -EINVAL;
- }
- }
- }
-
- /* set default pf to APF */
- if (eth_da->nb_ports == 0) {
- eth_da->nb_ports = 1;
- eth_da->ports[0] = CPFL_PF_TYPE_APF;
- } else {
- for (j = 0; j < eth_da->nb_ports; j++) {
- if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
- PMD_INIT_LOG(ERR, "Invalid Host ID %d",
- eth_da->ports[j]);
- return -EINVAL;
- }
- }
- }
- }
-
- return 0;
-}
-
static int
cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
{
@@ -2304,6 +2306,7 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
{
struct cpfl_adapter_ext *adapter;
int retval;
+ uint16_t port_id;
adapter = rte_zmalloc("cpfl_adapter_ext",
sizeof(struct cpfl_adapter_ext), 0);
@@ -2343,11 +2346,23 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
retval = cpfl_repr_devargs_process(adapter);
if (retval != 0) {
PMD_INIT_LOG(ERR, "Failed to process repr devargs");
- goto err;
+ goto close_ethdev;
}
+ retval = cpfl_repr_create(pci_dev, adapter);
+ if (retval != 0) {
+ PMD_INIT_LOG(ERR, "Failed to create representors ");
+ goto close_ethdev;
+ }
+
+
return 0;
+close_ethdev:
+ /* Ethdev created can be found RTE_ETH_FOREACH_DEV_OF through rte_device */
+ RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
+ rte_eth_dev_close(port_id);
+ }
err:
rte_spinlock_lock(&cpfl_adapter_lock);
TAILQ_REMOVE(&cpfl_adapter_list, adapter, next);
@@ -2374,6 +2389,12 @@ cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *ad
return ret;
}
+ ret = cpfl_repr_create(pci_dev, adapter);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to create representors ");
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index b03666f5ea..a4ffd51fb3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -21,6 +21,7 @@
#include "cpfl_logs.h"
#include "cpfl_cpchnl.h"
+#include "cpfl_representor.h"
/* Currently, backend supports up to 8 vports */
#define CPFL_MAX_VPORT_NUM 8
@@ -60,11 +61,31 @@
#define IDPF_DEV_ID_CPF 0x1453
#define VIRTCHNL2_QUEUE_GROUP_P2P 0x100
+#define CPFL_HOST_ID_NUM 2
+#define CPFL_PF_TYPE_NUM 2
#define CPFL_HOST_ID_HOST 0
#define CPFL_HOST_ID_ACC 1
#define CPFL_PF_TYPE_APF 0
#define CPFL_PF_TYPE_CPF 1
+/* Function IDs on IMC side */
+#define CPFL_HOST0_APF 0
+#define CPFL_ACC_APF_ID 4
+#define CPFL_HOST0_CPF_ID 8
+#define CPFL_ACC_CPF_ID 12
+
+#define CPFL_VPORT_LAN_PF 0
+#define CPFL_VPORT_LAN_VF 1
+
+/* bit[15:14] type
+ * bit[13] host/accelerator core
+ * bit[12] apf/cpf
+ * bit[11:0] vf
+ */
+#define CPFL_REPRESENTOR_ID(type, host_id, pf_id, vf_id) \
+ ((((type) & 0x3) << 14) + (((host_id) & 0x1) << 13) + \
+ (((pf_id) & 0x1) << 12) + ((vf_id) & 0xfff))
+
struct cpfl_vport_param {
struct cpfl_adapter_ext *adapter;
uint16_t devarg_id; /* arg id from user */
@@ -104,12 +125,13 @@ struct cpfl_vport_id {
};
struct cpfl_vport_info {
- struct cpchnl2_vport_info vport_info;
+ struct cpchnl2_event_vport_created vport;
bool enabled;
};
enum cpfl_itf_type {
CPFL_ITF_TYPE_VPORT,
+ CPFL_ITF_TYPE_REPRESENTOR,
};
struct cpfl_itf {
@@ -135,6 +157,13 @@ struct cpfl_vport {
bool p2p_manual_bind;
};
+struct cpfl_repr {
+ struct cpfl_itf itf;
+ struct cpfl_repr_id repr_id;
+ struct rte_ether_addr mac_addr;
+ struct cpfl_vport_info *vport_info;
+};
+
struct cpfl_adapter_ext {
TAILQ_ENTRY(cpfl_adapter_ext) next;
struct idpf_adapter base;
@@ -152,10 +181,16 @@ struct cpfl_adapter_ext {
rte_spinlock_t vport_map_lock;
struct rte_hash *vport_map_hash;
+
+ rte_spinlock_t repr_lock;
+ struct rte_hash *repr_allowlist_hash;
};
TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
+int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+ struct cpfl_vport_id *vport_identity,
+ struct cpchnl2_event_vport_created *vport);
int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
struct cpfl_vport_id *vi,
struct cpchnl2_get_vport_list_response *response);
@@ -170,6 +205,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
container_of((p), struct cpfl_adapter_ext, base)
#define CPFL_DEV_TO_VPORT(dev) \
((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_REPR(dev) \
+ ((struct cpfl_repr *)((dev)->data->dev_private))
#define CPFL_DEV_TO_ITF(dev) \
((struct cpfl_itf *)((dev)->data->dev_private))
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
new file mode 100644
index 0000000000..d2558c39a8
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -0,0 +1,581 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include "cpfl_representor.h"
+#include "cpfl_rxtx.h"
+
+static int
+cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
+ struct cpfl_repr_id *repr_id,
+ struct rte_eth_dev *dev)
+{
+ int ret;
+
+ if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) < 0)
+ return -ENOENT;
+
+ ret = rte_hash_add_key_data(adapter->repr_allowlist_hash, repr_id, dev);
+
+ return ret;
+}
+
+static int
+cpfl_repr_allowlist_add(struct cpfl_adapter_ext *adapter,
+ struct cpfl_repr_id *repr_id)
+{
+ int ret;
+
+ rte_spinlock_lock(&adapter->repr_lock);
+ if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) >= 0) {
+ ret = -EEXIST;
+ goto err;
+ }
+
+ ret = rte_hash_add_key(adapter->repr_allowlist_hash, repr_id);
+ if (ret < 0)
+ goto err;
+
+ rte_spinlock_unlock(&adapter->repr_lock);
+ return 0;
+err:
+ rte_spinlock_unlock(&adapter->repr_lock);
+ return ret;
+}
+
+static int
+cpfl_repr_devargs_process_one(struct cpfl_adapter_ext *adapter,
+ struct rte_eth_devargs *eth_da)
+{
+ struct cpfl_repr_id repr_id;
+ int ret, c, p, v;
+
+ for (c = 0; c < eth_da->nb_mh_controllers; c++) {
+ for (p = 0; p < eth_da->nb_ports; p++) {
+ repr_id.type = eth_da->type;
+ if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+ repr_id.host_id = eth_da->mh_controllers[c];
+ repr_id.pf_id = eth_da->ports[p];
+ repr_id.vf_id = 0;
+ ret = cpfl_repr_allowlist_add(adapter, &repr_id);
+ if (ret == -EEXIST)
+ continue;
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to add PF repr to allowlist, "
+ "host_id = %d, pf_id = %d.",
+ repr_id.host_id, repr_id.pf_id);
+ return ret;
+ }
+ } else if (eth_da->type == RTE_ETH_REPRESENTOR_VF) {
+ for (v = 0; v < eth_da->nb_representor_ports; v++) {
+ repr_id.host_id = eth_da->mh_controllers[c];
+ repr_id.pf_id = eth_da->ports[p];
+ repr_id.vf_id = eth_da->representor_ports[v];
+ ret = cpfl_repr_allowlist_add(adapter, &repr_id);
+ if (ret == -EEXIST)
+ continue;
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to add VF repr to allowlist, "
+ "host_id = %d, pf_id = %d, vf_id = %d.",
+ repr_id.host_id,
+ repr_id.pf_id,
+ repr_id.vf_id);
+ return ret;
+ }
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+int
+cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+ struct cpfl_devargs *devargs = &adapter->devargs;
+ int ret, i, j;
+
+ /* check and refine repr args */
+ for (i = 0; i < devargs->repr_args_num; i++) {
+ struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
+
+ /* set default host_id to host */
+ if (eth_da->nb_mh_controllers == 0) {
+ eth_da->nb_mh_controllers = 1;
+ eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
+ } else {
+ for (j = 0; j < eth_da->nb_mh_controllers; j++) {
+ if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
+ PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+ eth_da->mh_controllers[j]);
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* set default pf to APF */
+ if (eth_da->nb_ports == 0) {
+ eth_da->nb_ports = 1;
+ eth_da->ports[0] = CPFL_PF_TYPE_APF;
+ } else {
+ for (j = 0; j < eth_da->nb_ports; j++) {
+ if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
+ PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+ eth_da->ports[j]);
+ return -EINVAL;
+ }
+ }
+ }
+
+ ret = cpfl_repr_devargs_process_one(adapter, eth_da);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+cpfl_repr_allowlist_del(struct cpfl_adapter_ext *adapter,
+ struct cpfl_repr_id *repr_id)
+{
+ int ret;
+
+ rte_spinlock_lock(&adapter->repr_lock);
+
+ ret = rte_hash_del_key(adapter->repr_allowlist_hash, repr_id);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete repr from allowlist."
+ "host_id = %d, type = %d, pf_id = %d, vf_id = %d",
+ repr_id->host_id, repr_id->type,
+ repr_id->pf_id, repr_id->vf_id);
+ goto err;
+ }
+
+ rte_spinlock_unlock(&adapter->repr_lock);
+ return 0;
+err:
+ rte_spinlock_unlock(&adapter->repr_lock);
+ return ret;
+}
+
+static int
+cpfl_repr_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+ struct cpfl_adapter_ext *adapter = repr->itf.adapter;
+
+ eth_dev->data->mac_addrs = NULL;
+
+ cpfl_repr_allowlist_del(adapter, &repr->repr_id);
+
+ return 0;
+}
+
+static int
+cpfl_repr_dev_configure(struct rte_eth_dev *dev)
+{
+ /* now only 1 RX queue is supported */
+ if (dev->data->nb_rx_queues > 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+cpfl_repr_dev_close(struct rte_eth_dev *dev)
+{
+ return cpfl_repr_uninit(dev);
+}
+
+static int
+cpfl_repr_dev_info_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+
+ dev_info->device = ethdev->device;
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_queues = 1;
+ dev_info->max_tx_queues = 1;
+ dev_info->min_rx_bufsize = CPFL_MIN_BUF_SIZE;
+ dev_info->max_rx_pktlen = CPFL_MAX_FRAME_SIZE;
+
+ dev_info->flow_type_rss_offloads = CPFL_RSS_OFFLOAD_ALL;
+
+ dev_info->rx_offload_capa =
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
+ dev_info->tx_offload_capa =
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = CPFL_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_free_thresh = CPFL_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = CPFL_MAX_RING_DESC,
+ .nb_min = CPFL_MIN_RING_DESC,
+ .nb_align = CPFL_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = CPFL_MAX_RING_DESC,
+ .nb_min = CPFL_MIN_RING_DESC,
+ .nb_align = CPFL_ALIGN_RING_DESC,
+ };
+
+ dev_info->switch_info.name = ethdev->device->name;
+ dev_info->switch_info.domain_id = 0; /* the same domain*/
+ dev_info->switch_info.port_id = repr->vport_info->vport.info.vsi_id;
+
+ return 0;
+}
+
+static int
+cpfl_repr_dev_start(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+cpfl_repr_dev_stop(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ dev->data->dev_started = 0;
+ return 0;
+}
+
+static int
+cpfl_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue_id,
+ __rte_unused uint16_t nb_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *conf,
+ __rte_unused struct rte_mempool *pool)
+{
+ /* Dummy */
+ return 0;
+}
+
+static int
+cpfl_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue_id,
+ __rte_unused uint16_t nb_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *conf)
+{
+ /* Dummy */
+ return 0;
+}
+
+static const struct eth_dev_ops cpfl_repr_dev_ops = {
+ .dev_start = cpfl_repr_dev_start,
+ .dev_stop = cpfl_repr_dev_stop,
+ .dev_configure = cpfl_repr_dev_configure,
+ .dev_close = cpfl_repr_dev_close,
+ .dev_infos_get = cpfl_repr_dev_info_get,
+
+ .rx_queue_setup = cpfl_repr_rx_queue_setup,
+ .tx_queue_setup = cpfl_repr_tx_queue_setup,
+};
+
+static int
+cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
+{
+ struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+ struct cpfl_repr_param *param = init_param;
+ struct cpfl_adapter_ext *adapter = param->adapter;
+
+ repr->repr_id = param->repr_id;
+ repr->vport_info = param->vport_info;
+ repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
+ repr->itf.adapter = adapter;
+ repr->itf.data = eth_dev->data;
+
+ eth_dev->dev_ops = &cpfl_repr_dev_ops;
+
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+ eth_dev->data->representor_id =
+ CPFL_REPRESENTOR_ID(repr->repr_id.type,
+ repr->repr_id.host_id,
+ repr->repr_id.pf_id,
+ repr->repr_id.vf_id);
+
+ eth_dev->data->mac_addrs = &repr->mac_addr;
+
+ rte_eth_random_addr(repr->mac_addr.addr_bytes);
+
+ return cpfl_repr_allowlist_update(adapter, &repr->repr_id, eth_dev);
+}
+
+static int
+cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
+{
+ if ((host_id != CPFL_HOST_ID_HOST &&
+ host_id != CPFL_HOST_ID_ACC) ||
+ (pf_id != CPFL_PF_TYPE_APF &&
+ pf_id != CPFL_PF_TYPE_CPF))
+ return -EINVAL;
+
+ static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = {
+ [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF,
+ [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID,
+ [CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID,
+ [CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID,
+ };
+
+ return func_id_map[host_id][pf_id];
+}
+
+static bool
+cpfl_match_repr_with_vport(const struct cpfl_repr_id *repr_id,
+ struct cpchnl2_vport_info *info)
+{
+ int func_id;
+
+ if (repr_id->type == RTE_ETH_REPRESENTOR_PF &&
+ info->func_type == CPFL_VPORT_LAN_PF) {
+ func_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+ if (func_id < 0 || func_id != info->pf_id)
+ return false;
+ else
+ return true;
+ } else if (repr_id->type == RTE_ETH_REPRESENTOR_VF &&
+ info->func_type == CPFL_VPORT_LAN_VF) {
+ if (repr_id->vf_id == info->vf_id)
+ return true;
+ }
+
+ return false;
+}
+
+static int
+cpfl_repr_vport_list_query(struct cpfl_adapter_ext *adapter,
+ const struct cpfl_repr_id *repr_id,
+ struct cpchnl2_get_vport_list_response *response)
+{
+ struct cpfl_vport_id vi;
+ int ret;
+
+ if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+ /* PF */
+ vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+ vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+ vi.vf_id = 0;
+ } else {
+ /* VF */
+ vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+ vi.pf_id = CPFL_HOST0_APF;
+ vi.vf_id = repr_id->vf_id;
+ }
+
+ ret = cpfl_cc_vport_list_get(adapter, &vi, response);
+
+ return ret;
+}
+
+static int
+cpfl_repr_vport_info_query(struct cpfl_adapter_ext *adapter,
+ const struct cpfl_repr_id *repr_id,
+ struct cpchnl2_vport_id *vport_id,
+ struct cpchnl2_get_vport_info_response *response)
+{
+ struct cpfl_vport_id vi;
+ int ret;
+
+ if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+ /* PF */
+ vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+ vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+ vi.vf_id = 0;
+ } else {
+ /* VF */
+ vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+ vi.pf_id = CPFL_HOST0_APF;
+ vi.vf_id = repr_id->vf_id;
+ }
+
+ ret = cpfl_cc_vport_info_get(adapter, vport_id, &vi, response);
+
+ return ret;
+}
+
+static int
+cpfl_repr_vport_map_update(struct cpfl_adapter_ext *adapter,
+ const struct cpfl_repr_id *repr_id, uint32_t vport_id,
+ struct cpchnl2_get_vport_info_response *response)
+{
+ struct cpfl_vport_id vi;
+ int ret;
+
+ vi.vport_id = vport_id;
+ if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+ /* PF */
+ vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+ vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+ } else {
+ /* VF */
+ vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+ vi.pf_id = CPFL_HOST0_APF;
+ vi.vf_id = repr_id->vf_id;
+ }
+
+ ret = cpfl_vport_info_create(adapter, &vi, (struct cpchnl2_event_vport_created *)response);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Fail to update vport map hash for representor.");
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+ struct rte_eth_dev *dev;
+ uint32_t iter = 0;
+ const struct cpfl_repr_id *repr_id;
+ const struct cpfl_vport_id *vp_id;
+ struct cpchnl2_get_vport_list_response *vlist_resp;
+ struct cpchnl2_get_vport_info_response vinfo_resp;
+ int ret;
+
+ vlist_resp = rte_zmalloc(NULL, IDPF_DFLT_MBX_BUF_SIZE, 0);
+ if (vlist_resp == NULL)
+ return -ENOMEM;
+
+ rte_spinlock_lock(&adapter->repr_lock);
+
+ while (rte_hash_iterate(adapter->repr_allowlist_hash,
+ (const void **)&repr_id, (void **)&dev, &iter) >= 0) {
+ struct cpfl_vport_info *vi;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ uint32_t iter_iter = 0;
+ int i;
+
+ /* skip representor already be created */
+ if (dev != NULL)
+ continue;
+
+ if (repr_id->type == RTE_ETH_REPRESENTOR_VF)
+ snprintf(name, sizeof(name), "net_%s_representor_c%dpf%dvf%d",
+ pci_dev->name,
+ repr_id->host_id,
+ repr_id->pf_id,
+ repr_id->vf_id);
+ else
+ snprintf(name, sizeof(name), "net_%s_representor_c%dpf%d",
+ pci_dev->name,
+ repr_id->host_id,
+ repr_id->pf_id);
+
+ /* get vport list for the port representor */
+ ret = cpfl_repr_vport_list_query(adapter, repr_id, vlist_resp);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d's vport list",
+ repr_id->host_id, repr_id->pf_id, repr_id->vf_id);
+ goto err;
+ }
+
+ if (vlist_resp->nof_vports == 0) {
+ PMD_INIT_LOG(WARNING, "No matched vport for representor %s", name);
+ continue;
+ }
+
+ /* get all vport info for the port representor */
+ for (i = 0; i < vlist_resp->nof_vports; i++) {
+ ret = cpfl_repr_vport_info_query(adapter, repr_id,
+ &vlist_resp->vports[i], &vinfo_resp);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d vport[%d]'s info",
+ repr_id->host_id, repr_id->pf_id, repr_id->vf_id,
+ vlist_resp->vports[i].vport_id);
+ goto err;
+ }
+
+ ret = cpfl_repr_vport_map_update(adapter, repr_id,
+ vlist_resp->vports[i].vport_id, &vinfo_resp);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to update host%d pf%d vf%d vport[%d]'s info to vport_map_hash",
+ repr_id->host_id, repr_id->pf_id, repr_id->vf_id,
+ vlist_resp->vports[i].vport_id);
+ goto err;
+ }
+ }
+
+ /* find the matched vport */
+ rte_spinlock_lock(&adapter->vport_map_lock);
+
+ while (rte_hash_iterate(adapter->vport_map_hash,
+ (const void **)&vp_id, (void **)&vi, &iter_iter) >= 0) {
+ struct cpfl_repr_param param;
+
+ if (!cpfl_match_repr_with_vport(repr_id, &vi->vport.info))
+ continue;
+
+ param.adapter = adapter;
+ param.repr_id = *repr_id;
+ param.vport_info = vi;
+
+ ret = rte_eth_dev_create(&pci_dev->device,
+ name,
+ sizeof(struct cpfl_repr),
+ NULL, NULL, cpfl_repr_init,
+ ¶m);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to create representor %s", name);
+ rte_spinlock_unlock(&adapter->vport_map_lock);
+ goto err;
+ }
+ break;
+ }
+
+ rte_spinlock_unlock(&adapter->vport_map_lock);
+ }
+
+err:
+ rte_spinlock_unlock(&adapter->repr_lock);
+ rte_free(vlist_resp);
+ return ret;
+}
diff --git a/drivers/net/cpfl/cpfl_representor.h b/drivers/net/cpfl/cpfl_representor.h
new file mode 100644
index 0000000000..d3a4de531e
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_REPRESENTOR_H_
+#define _CPFL_REPRESENTOR_H_
+
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+struct cpfl_repr_id {
+ uint8_t host_id;
+ uint8_t pf_id;
+ uint8_t type;
+ uint8_t vf_id;
+};
+
+struct cpfl_repr_param {
+ struct cpfl_adapter_ext *adapter;
+ struct cpfl_repr_id repr_id;
+ struct cpfl_vport_info *vport_info;
+};
+
+int cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter);
+int cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 2f0f5d8434..d8b92ae16a 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -17,6 +17,7 @@ sources = files(
'cpfl_ethdev.c',
'cpfl_rxtx.c',
'cpfl_vchnl.c',
+ 'cpfl_representor.c',
)
if arch_subdir == 'x86'
--
2.34.1
next prev parent reply other threads:[~2023-09-12 8:09 UTC|newest]
Thread overview: 89+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-09 15:51 [PATCH 00/19] net/cpfl: support " beilei.xing
2023-08-09 15:51 ` [PATCH 01/19] net/cpfl: refine devargs parse and process beilei.xing
2023-08-09 15:51 ` [PATCH 02/19] net/cpfl: introduce interface structure beilei.xing
2023-08-09 15:51 ` [PATCH 03/19] net/cpfl: add cp channel beilei.xing
2023-08-09 15:51 ` [PATCH 04/19] net/cpfl: enable vport mapping beilei.xing
2023-08-09 15:51 ` [PATCH 05/19] net/cpfl: parse representor devargs beilei.xing
2023-08-09 15:51 ` [PATCH 06/19] net/cpfl: support probe again beilei.xing
2023-08-09 15:51 ` [PATCH 07/19] net/cpfl: create port representor beilei.xing
2023-08-09 15:51 ` [PATCH 08/19] net/cpfl: support vport list/info get beilei.xing
2023-08-09 15:51 ` [PATCH 09/19] net/cpfl: update vport info before creating representor beilei.xing
2023-08-09 15:51 ` [PATCH 10/19] net/cpfl: refine handle virtual channel message beilei.xing
2023-08-09 15:51 ` [PATCH 11/19] net/cpfl: add exceptional vport beilei.xing
2023-08-09 15:51 ` [PATCH 12/19] net/cpfl: support representor Rx/Tx queue setup beilei.xing
2023-08-09 15:51 ` [PATCH 13/19] net/cpfl: support link update for representor beilei.xing
2023-08-09 15:51 ` [PATCH 14/19] net/cpfl: add stats ops " beilei.xing
2023-08-09 15:51 ` [PATCH 15/19] common/idpf: refine inline function beilei.xing
2023-08-09 15:51 ` [PATCH 16/19] net/cpfl: support representor data path beilei.xing
2023-08-09 15:51 ` [PATCH 17/19] net/cpfl: support dispatch process beilei.xing
2023-08-09 15:51 ` [PATCH 18/19] net/cpfl: add dispatch service beilei.xing
2023-08-09 15:51 ` [PATCH 19/19] doc: update release notes for representor beilei.xing
2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
2023-08-16 15:05 ` [PATCH v2 01/12] net/cpfl: refine devargs parse and process beilei.xing
2023-08-16 15:05 ` [PATCH v2 02/12] net/cpfl: introduce interface structure beilei.xing
2023-08-16 15:05 ` [PATCH v2 03/12] net/cpfl: add cp channel beilei.xing
2023-08-16 15:05 ` [PATCH v2 04/12] net/cpfl: enable vport mapping beilei.xing
2023-08-16 15:05 ` [PATCH v2 05/12] net/cpfl: parse representor devargs beilei.xing
2023-08-16 15:05 ` [PATCH v2 06/12] net/cpfl: support probe again beilei.xing
2023-08-16 15:05 ` [PATCH v2 07/12] net/cpfl: create port representor beilei.xing
2023-09-05 7:35 ` Liu, Mingxia
2023-09-05 8:30 ` Liu, Mingxia
2023-08-16 15:05 ` [PATCH v2 08/12] net/cpfl: support vport list/info get beilei.xing
2023-08-16 15:05 ` [PATCH v2 09/12] net/cpfl: update vport info before creating representor beilei.xing
2023-09-06 2:33 ` Liu, Mingxia
2023-08-16 15:05 ` [PATCH v2 10/12] net/cpfl: refine handle virtual channel message beilei.xing
2023-08-16 15:05 ` [PATCH v2 11/12] net/cpfl: support link update for representor beilei.xing
2023-08-16 15:05 ` [PATCH v2 12/12] net/cpfl: support Rx/Tx queue setup " beilei.xing
2023-09-06 3:02 ` Liu, Mingxia
2023-09-07 15:15 ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
2023-09-07 15:15 ` [PATCH v3 01/11] net/cpfl: refine devargs parse and process beilei.xing
2023-09-07 15:15 ` [PATCH v3 02/11] net/cpfl: introduce interface structure beilei.xing
2023-09-07 15:15 ` [PATCH v3 03/11] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-07 15:15 ` [PATCH v3 04/11] net/cpfl: introduce CP channel API beilei.xing
2023-09-07 15:16 ` [PATCH v3 05/11] net/cpfl: enable vport mapping beilei.xing
2023-09-07 15:16 ` [PATCH v3 06/11] net/cpfl: parse representor devargs beilei.xing
2023-09-07 15:16 ` [PATCH v3 07/11] net/cpfl: support probe again beilei.xing
2023-09-07 15:16 ` [PATCH v3 08/11] net/cpfl: create port representor beilei.xing
2023-09-07 15:16 ` [PATCH v3 09/11] net/cpfl: support vport list/info get beilei.xing
2023-09-07 15:16 ` [PATCH v3 10/11] net/cpfl: update vport info before creating representor beilei.xing
2023-09-07 15:16 ` [PATCH v3 11/11] net/cpfl: support link update for representor beilei.xing
2023-09-08 11:16 ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
2023-09-08 11:16 ` [PATCH v4 01/10] net/cpfl: refine devargs parse and process beilei.xing
2023-09-08 11:16 ` [PATCH v4 02/10] net/cpfl: introduce interface structure beilei.xing
2023-09-09 2:08 ` Wu, Jingjing
2023-09-08 11:16 ` [PATCH v4 03/10] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-09 2:13 ` Wu, Jingjing
2023-09-08 11:16 ` [PATCH v4 04/10] net/cpfl: introduce CP channel API beilei.xing
2023-09-08 11:16 ` [PATCH v4 05/10] net/cpfl: enable vport mapping beilei.xing
2023-09-08 11:16 ` [PATCH v4 06/10] net/cpfl: parse representor devargs beilei.xing
2023-09-08 11:16 ` [PATCH v4 07/10] net/cpfl: support probe again beilei.xing
2023-09-08 11:16 ` [PATCH v4 08/10] net/cpfl: support vport list/info get beilei.xing
2023-09-09 2:34 ` Wu, Jingjing
2023-09-08 11:17 ` [PATCH v4 09/10] net/cpfl: create port representor beilei.xing
2023-09-09 3:04 ` Wu, Jingjing
2023-09-08 11:17 ` [PATCH v4 10/10] net/cpfl: support link update for representor beilei.xing
2023-09-09 3:05 ` Wu, Jingjing
2023-09-12 16:26 ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
2023-09-12 16:26 ` [PATCH v5 01/10] net/cpfl: refine devargs parse and process beilei.xing
2023-09-12 16:26 ` [PATCH v5 02/10] net/cpfl: introduce interface structure beilei.xing
2023-09-12 16:26 ` [PATCH v5 03/10] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-12 16:26 ` [PATCH v5 04/10] net/cpfl: introduce CP channel API beilei.xing
2023-09-12 16:26 ` [PATCH v5 05/10] net/cpfl: enable vport mapping beilei.xing
2023-09-12 16:26 ` [PATCH v5 06/10] net/cpfl: support vport list/info get beilei.xing
2023-09-12 16:26 ` [PATCH v5 07/10] net/cpfl: parse representor devargs beilei.xing
2023-09-12 16:26 ` [PATCH v5 08/10] net/cpfl: support probe again beilei.xing
2023-09-12 16:26 ` beilei.xing [this message]
2023-09-12 16:26 ` [PATCH v5 10/10] net/cpfl: support link update for representor beilei.xing
2023-09-12 17:30 ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
2023-09-12 17:30 ` [PATCH v6 01/10] net/cpfl: refine devargs parse and process beilei.xing
2023-09-12 17:30 ` [PATCH v6 02/10] net/cpfl: introduce interface structure beilei.xing
2023-09-12 17:30 ` [PATCH v6 03/10] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-12 17:30 ` [PATCH v6 04/10] net/cpfl: introduce CP channel API beilei.xing
2023-09-12 17:30 ` [PATCH v6 05/10] net/cpfl: enable vport mapping beilei.xing
2023-09-12 17:30 ` [PATCH v6 06/10] net/cpfl: support vport list/info get beilei.xing
2023-09-12 17:30 ` [PATCH v6 07/10] net/cpfl: parse representor devargs beilei.xing
2023-09-12 17:30 ` [PATCH v6 08/10] net/cpfl: support probe again beilei.xing
2023-09-12 17:30 ` [PATCH v6 09/10] net/cpfl: create port representor beilei.xing
2023-09-12 17:30 ` [PATCH v6 10/10] net/cpfl: support link update for representor beilei.xing
2023-09-13 1:01 ` [PATCH v6 00/10] net/cpfl: support port representor Wu, Jingjing
2023-09-13 5:41 ` Zhang, Qi Z
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230912162640.1439383-10-beilei.xing@intel.com \
--to=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=mingxia.liu@intel.com \
--cc=qi.z.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).