From: beilei.xing@intel.com
To: jingjing.wu@intel.com
Cc: dev@dpdk.org, mingxia.liu@intel.com,
Beilei Xing <beilei.xing@intel.com>,
Qi Zhang <qi.z.zhang@intel.com>
Subject: [PATCH v3 05/11] net/cpfl: enable vport mapping
Date: Thu, 7 Sep 2023 15:16:00 +0000 [thread overview]
Message-ID: <20230907151606.849612-6-beilei.xing@intel.com> (raw)
In-Reply-To: <20230907151606.849612-1-beilei.xing@intel.com>
From: Beilei Xing <beilei.xing@intel.com>
1. Handle cpchnl event for vport create/destroy
2. Use hash table to store vport_id to vport_info mapping
3. Use spinlock for thread safe.
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 157 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_ethdev.h | 21 ++++-
drivers/net/cpfl/meson.build | 2 +-
3 files changed, 177 insertions(+), 3 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 6b6e9b37b1..f51aa6e95a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -10,6 +10,7 @@
#include <rte_dev.h>
#include <errno.h>
#include <rte_alarm.h>
+#include <rte_hash_crc.h>
#include "cpfl_ethdev.h"
#include "cpfl_rxtx.h"
@@ -1502,6 +1503,108 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
}
}
+static int
+cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+ struct cpfl_vport_id *vport_identity,
+ struct cpchnl2_vport_info *vport_info)
+{
+ struct cpfl_vport_info *info = NULL;
+ int ret;
+
+ rte_spinlock_lock(&adapter->vport_map_lock);
+ ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info);
+ if (ret >= 0) {
+ PMD_DRV_LOG(WARNING, "vport already exist, overwrite info anyway");
+ /* overwrite info */
+ if (info)
+ info->vport_info = *vport_info;
+ goto fini;
+ }
+
+ info = rte_zmalloc(NULL, sizeof(*info), 0);
+ if (info == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for vport map info");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ info->vport_info = *vport_info;
+
+ ret = rte_hash_add_key_data(adapter->vport_map_hash, vport_identity, info);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add vport map into hash");
+ rte_free(info);
+ goto err;
+ }
+
+fini:
+ rte_spinlock_unlock(&adapter->vport_map_lock);
+ return 0;
+err:
+ rte_spinlock_unlock(&adapter->vport_map_lock);
+ return ret;
+}
+
+static int
+cpfl_vport_info_destroy(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id *vport_identity)
+{
+ struct cpfl_vport_info *info;
+ int ret;
+
+ rte_spinlock_lock(&adapter->vport_map_lock);
+ ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "vport id not exist");
+ goto err;
+ }
+
+ rte_hash_del_key(adapter->vport_map_hash, vport_identity);
+ rte_spinlock_unlock(&adapter->vport_map_lock);
+ rte_free(info);
+
+ return 0;
+
+err:
+ rte_spinlock_unlock(&adapter->vport_map_lock);
+ return ret;
+}
+
+static void
+cpfl_handle_cpchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen)
+{
+ struct cpchnl2_event_info *cpchnl2_event = (struct cpchnl2_event_info *)msg;
+ struct cpchnl2_vport_info *info;
+ struct cpfl_vport_id vport_identity = { 0 };
+
+ if (msglen < sizeof(struct cpchnl2_event_info)) {
+ PMD_DRV_LOG(ERR, "Error event");
+ return;
+ }
+
+ switch (cpchnl2_event->header.type) {
+ case CPCHNL2_EVENT_VPORT_CREATED:
+ vport_identity.vport_id = cpchnl2_event->data.vport_created.vport.vport_id;
+ info = &cpchnl2_event->data.vport_created.info;
+ vport_identity.func_type = info->func_type;
+ vport_identity.pf_id = info->pf_id;
+ vport_identity.vf_id = info->vf_id;
+ if (cpfl_vport_info_create(adapter, &vport_identity, info))
+ PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_CREATED");
+ break;
+ case CPCHNL2_EVENT_VPORT_DESTROYED:
+ vport_identity.vport_id = cpchnl2_event->data.vport_destroyed.vport.vport_id;
+ vport_identity.func_type = cpchnl2_event->data.vport_destroyed.func.func_type;
+ vport_identity.pf_id = cpchnl2_event->data.vport_destroyed.func.pf_id;
+ vport_identity.vf_id = cpchnl2_event->data.vport_destroyed.func.vf_id;
+ if (cpfl_vport_info_destroy(adapter, &vport_identity))
+ PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_DESTROY");
+ break;
+ default:
+ PMD_DRV_LOG(ERR, " unknown event received %u", cpchnl2_event->header.type);
+ break;
+ }
+}
+
static void
cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
{
@@ -1533,6 +1636,9 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
if (vc_op == VIRTCHNL2_OP_EVENT) {
cpfl_handle_vchnl_event_msg(adapter, adapter->base.mbx_resp,
ctlq_msg.data_len);
+ } else if (vc_op == CPCHNL2_OP_EVENT) {
+ cpfl_handle_cpchnl_event_msg(adapter, adapter->base.mbx_resp,
+ ctlq_msg.data_len);
} else {
if (vc_op == base->pend_cmd)
notify_cmd(base, base->cmd_retval);
@@ -1608,6 +1714,48 @@ static struct virtchnl2_get_capabilities req_caps = {
.other_caps = VIRTCHNL2_CAP_WB_ON_ITR
};
+static int
+cpfl_vport_map_init(struct cpfl_adapter_ext *adapter)
+{
+ char hname[32];
+
+ snprintf(hname, 32, "%s-vport", adapter->name);
+
+ rte_spinlock_init(&adapter->vport_map_lock);
+
+#define CPFL_VPORT_MAP_HASH_ENTRY_NUM 2048
+
+ struct rte_hash_parameters params = {
+ .name = adapter->name,
+ .entries = CPFL_VPORT_MAP_HASH_ENTRY_NUM,
+ .key_len = sizeof(struct cpfl_vport_id),
+ .hash_func = rte_hash_crc,
+ .socket_id = SOCKET_ID_ANY,
+ };
+
+ adapter->vport_map_hash = rte_hash_create(¶ms);
+
+ if (adapter->vport_map_hash == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to create vport map hash");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
+{
+ const void *key = NULL;
+ struct cpfl_vport_map_info *info;
+ uint32_t iter = 0;
+
+ while (rte_hash_iterate(adapter->vport_map_hash, &key, (void **)&info, &iter) >= 0)
+ rte_free(info);
+
+ rte_hash_free(adapter->vport_map_hash);
+}
+
static int
cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
{
@@ -1632,6 +1780,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
goto err_adapter_init;
}
+ ret = cpfl_vport_map_init(adapter);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init vport map");
+ goto err_vport_map_init;
+ }
+
rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ?
@@ -1656,6 +1810,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
err_vports_alloc:
rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+ cpfl_vport_map_uninit(adapter);
+err_vport_map_init:
idpf_adapter_deinit(base);
err_adapter_init:
return ret;
@@ -1885,6 +2041,7 @@ static void
cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
{
rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+ cpfl_vport_map_uninit(adapter);
idpf_adapter_deinit(&adapter->base);
rte_free(adapter->vports);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 53e45035e8..3515fec4f7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -10,16 +10,18 @@
#include <rte_spinlock.h>
#include <rte_ethdev.h>
#include <rte_kvargs.h>
+#include <rte_hash.h>
#include <ethdev_driver.h>
#include <ethdev_pci.h>
-#include "cpfl_logs.h"
-
#include <idpf_common_device.h>
#include <idpf_common_virtchnl.h>
#include <base/idpf_prototype.h>
#include <base/virtchnl2.h>
+#include "cpfl_logs.h"
+#include "cpfl_cpchnl.h"
+
/* Currently, backend supports up to 8 vports */
#define CPFL_MAX_VPORT_NUM 8
@@ -86,6 +88,18 @@ struct p2p_queue_chunks_info {
uint32_t rx_buf_qtail_spacing;
};
+struct cpfl_vport_id {
+ uint32_t vport_id;
+ uint8_t func_type;
+ uint8_t pf_id;
+ uint16_t vf_id;
+};
+
+struct cpfl_vport_info {
+ struct cpchnl2_vport_info vport_info;
+ bool enabled;
+};
+
enum cpfl_itf_type {
CPFL_ITF_TYPE_VPORT,
CPFL_ITF_TYPE_REPRESENTOR
@@ -128,6 +142,9 @@ struct cpfl_adapter_ext {
uint16_t used_vecs_num;
struct cpfl_devargs devargs;
+
+ rte_spinlock_t vport_map_lock;
+ struct rte_hash *vport_map_hash;
};
TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 8d62ebfd77..28167bb81d 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -11,7 +11,7 @@ if dpdk_conf.get('RTE_IOVA_IN_MBUF') == 0
subdir_done()
endif
-deps += ['common_idpf']
+deps += ['hash', 'common_idpf']
sources = files(
'cpfl_ethdev.c',
--
2.34.1
next prev parent reply other threads:[~2023-09-07 6:58 UTC|newest]
Thread overview: 89+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
2023-08-09 15:51 ` [PATCH 01/19] net/cpfl: refine devargs parse and process beilei.xing
2023-08-09 15:51 ` [PATCH 02/19] net/cpfl: introduce interface structure beilei.xing
2023-08-09 15:51 ` [PATCH 03/19] net/cpfl: add cp channel beilei.xing
2023-08-09 15:51 ` [PATCH 04/19] net/cpfl: enable vport mapping beilei.xing
2023-08-09 15:51 ` [PATCH 05/19] net/cpfl: parse representor devargs beilei.xing
2023-08-09 15:51 ` [PATCH 06/19] net/cpfl: support probe again beilei.xing
2023-08-09 15:51 ` [PATCH 07/19] net/cpfl: create port representor beilei.xing
2023-08-09 15:51 ` [PATCH 08/19] net/cpfl: support vport list/info get beilei.xing
2023-08-09 15:51 ` [PATCH 09/19] net/cpfl: update vport info before creating representor beilei.xing
2023-08-09 15:51 ` [PATCH 10/19] net/cpfl: refine handle virtual channel message beilei.xing
2023-08-09 15:51 ` [PATCH 11/19] net/cpfl: add exceptional vport beilei.xing
2023-08-09 15:51 ` [PATCH 12/19] net/cpfl: support representor Rx/Tx queue setup beilei.xing
2023-08-09 15:51 ` [PATCH 13/19] net/cpfl: support link update for representor beilei.xing
2023-08-09 15:51 ` [PATCH 14/19] net/cpfl: add stats ops " beilei.xing
2023-08-09 15:51 ` [PATCH 15/19] common/idpf: refine inline function beilei.xing
2023-08-09 15:51 ` [PATCH 16/19] net/cpfl: support representor data path beilei.xing
2023-08-09 15:51 ` [PATCH 17/19] net/cpfl: support dispatch process beilei.xing
2023-08-09 15:51 ` [PATCH 18/19] net/cpfl: add dispatch service beilei.xing
2023-08-09 15:51 ` [PATCH 19/19] doc: update release notes for representor beilei.xing
2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
2023-08-16 15:05 ` [PATCH v2 01/12] net/cpfl: refine devargs parse and process beilei.xing
2023-08-16 15:05 ` [PATCH v2 02/12] net/cpfl: introduce interface structure beilei.xing
2023-08-16 15:05 ` [PATCH v2 03/12] net/cpfl: add cp channel beilei.xing
2023-08-16 15:05 ` [PATCH v2 04/12] net/cpfl: enable vport mapping beilei.xing
2023-08-16 15:05 ` [PATCH v2 05/12] net/cpfl: parse representor devargs beilei.xing
2023-08-16 15:05 ` [PATCH v2 06/12] net/cpfl: support probe again beilei.xing
2023-08-16 15:05 ` [PATCH v2 07/12] net/cpfl: create port representor beilei.xing
2023-09-05 7:35 ` Liu, Mingxia
2023-09-05 8:30 ` Liu, Mingxia
2023-08-16 15:05 ` [PATCH v2 08/12] net/cpfl: support vport list/info get beilei.xing
2023-08-16 15:05 ` [PATCH v2 09/12] net/cpfl: update vport info before creating representor beilei.xing
2023-09-06 2:33 ` Liu, Mingxia
2023-08-16 15:05 ` [PATCH v2 10/12] net/cpfl: refine handle virtual channel message beilei.xing
2023-08-16 15:05 ` [PATCH v2 11/12] net/cpfl: support link update for representor beilei.xing
2023-08-16 15:05 ` [PATCH v2 12/12] net/cpfl: support Rx/Tx queue setup " beilei.xing
2023-09-06 3:02 ` Liu, Mingxia
2023-09-07 15:15 ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
2023-09-07 15:15 ` [PATCH v3 01/11] net/cpfl: refine devargs parse and process beilei.xing
2023-09-07 15:15 ` [PATCH v3 02/11] net/cpfl: introduce interface structure beilei.xing
2023-09-07 15:15 ` [PATCH v3 03/11] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-07 15:15 ` [PATCH v3 04/11] net/cpfl: introduce CP channel API beilei.xing
2023-09-07 15:16 ` beilei.xing [this message]
2023-09-07 15:16 ` [PATCH v3 06/11] net/cpfl: parse representor devargs beilei.xing
2023-09-07 15:16 ` [PATCH v3 07/11] net/cpfl: support probe again beilei.xing
2023-09-07 15:16 ` [PATCH v3 08/11] net/cpfl: create port representor beilei.xing
2023-09-07 15:16 ` [PATCH v3 09/11] net/cpfl: support vport list/info get beilei.xing
2023-09-07 15:16 ` [PATCH v3 10/11] net/cpfl: update vport info before creating representor beilei.xing
2023-09-07 15:16 ` [PATCH v3 11/11] net/cpfl: support link update for representor beilei.xing
2023-09-08 11:16 ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
2023-09-08 11:16 ` [PATCH v4 01/10] net/cpfl: refine devargs parse and process beilei.xing
2023-09-08 11:16 ` [PATCH v4 02/10] net/cpfl: introduce interface structure beilei.xing
2023-09-09 2:08 ` Wu, Jingjing
2023-09-08 11:16 ` [PATCH v4 03/10] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-09 2:13 ` Wu, Jingjing
2023-09-08 11:16 ` [PATCH v4 04/10] net/cpfl: introduce CP channel API beilei.xing
2023-09-08 11:16 ` [PATCH v4 05/10] net/cpfl: enable vport mapping beilei.xing
2023-09-08 11:16 ` [PATCH v4 06/10] net/cpfl: parse representor devargs beilei.xing
2023-09-08 11:16 ` [PATCH v4 07/10] net/cpfl: support probe again beilei.xing
2023-09-08 11:16 ` [PATCH v4 08/10] net/cpfl: support vport list/info get beilei.xing
2023-09-09 2:34 ` Wu, Jingjing
2023-09-08 11:17 ` [PATCH v4 09/10] net/cpfl: create port representor beilei.xing
2023-09-09 3:04 ` Wu, Jingjing
2023-09-08 11:17 ` [PATCH v4 10/10] net/cpfl: support link update for representor beilei.xing
2023-09-09 3:05 ` Wu, Jingjing
2023-09-12 16:26 ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
2023-09-12 16:26 ` [PATCH v5 01/10] net/cpfl: refine devargs parse and process beilei.xing
2023-09-12 16:26 ` [PATCH v5 02/10] net/cpfl: introduce interface structure beilei.xing
2023-09-12 16:26 ` [PATCH v5 03/10] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-12 16:26 ` [PATCH v5 04/10] net/cpfl: introduce CP channel API beilei.xing
2023-09-12 16:26 ` [PATCH v5 05/10] net/cpfl: enable vport mapping beilei.xing
2023-09-12 16:26 ` [PATCH v5 06/10] net/cpfl: support vport list/info get beilei.xing
2023-09-12 16:26 ` [PATCH v5 07/10] net/cpfl: parse representor devargs beilei.xing
2023-09-12 16:26 ` [PATCH v5 08/10] net/cpfl: support probe again beilei.xing
2023-09-12 16:26 ` [PATCH v5 09/10] net/cpfl: create port representor beilei.xing
2023-09-12 16:26 ` [PATCH v5 10/10] net/cpfl: support link update for representor beilei.xing
2023-09-12 17:30 ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
2023-09-12 17:30 ` [PATCH v6 01/10] net/cpfl: refine devargs parse and process beilei.xing
2023-09-12 17:30 ` [PATCH v6 02/10] net/cpfl: introduce interface structure beilei.xing
2023-09-12 17:30 ` [PATCH v6 03/10] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-12 17:30 ` [PATCH v6 04/10] net/cpfl: introduce CP channel API beilei.xing
2023-09-12 17:30 ` [PATCH v6 05/10] net/cpfl: enable vport mapping beilei.xing
2023-09-12 17:30 ` [PATCH v6 06/10] net/cpfl: support vport list/info get beilei.xing
2023-09-12 17:30 ` [PATCH v6 07/10] net/cpfl: parse representor devargs beilei.xing
2023-09-12 17:30 ` [PATCH v6 08/10] net/cpfl: support probe again beilei.xing
2023-09-12 17:30 ` [PATCH v6 09/10] net/cpfl: create port representor beilei.xing
2023-09-12 17:30 ` [PATCH v6 10/10] net/cpfl: support link update for representor beilei.xing
2023-09-13 1:01 ` [PATCH v6 00/10] net/cpfl: support port representor Wu, Jingjing
2023-09-13 5:41 ` Zhang, Qi Z
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230907151606.849612-6-beilei.xing@intel.com \
--to=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=mingxia.liu@intel.com \
--cc=qi.z.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).