DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 00/19] net/cpfl: support port representor
@ 2023-08-09 15:51 beilei.xing
  2023-08-09 15:51 ` [PATCH 01/19] net/cpfl: refine devargs parse and process beilei.xing
                   ` (19 more replies)
  0 siblings, 20 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

1. code refine for representor support
2. support port representor

Beilei Xing (19):
  net/cpfl: refine devargs parse and process
  net/cpfl: introduce interface structure
  net/cpfl: add cp channel
  net/cpfl: enable vport mapping
  net/cpfl: parse representor devargs
  net/cpfl: support probe again
  net/cpfl: create port representor
  net/cpfl: support vport list/info get
  net/cpfl: update vport info before creating representor
  net/cpfl: refine handle virtual channel message
  net/cpfl: add exceptional vport
  net/cpfl: support representor Rx/Tx queue setup
  net/cpfl: support link update for representor
  net/cpfl: add stats ops for representor
  common/idpf: refine inline function
  net/cpfl: support representor data path
  net/cpfl: support dispatch process
  net/cpfl: add dispatch service
  doc: update release notes for representor

 doc/guides/rel_notes/release_23_11.rst |   3 +
 drivers/common/idpf/idpf_common_rxtx.c | 246 -------
 drivers/common/idpf/idpf_common_rxtx.h | 246 +++++++
 drivers/common/idpf/version.map        |   3 +
 drivers/net/cpfl/cpfl_cpchnl.h         | 313 +++++++++
 drivers/net/cpfl/cpfl_ethdev.c         | 884 ++++++++++++++++++++---
 drivers/net/cpfl/cpfl_ethdev.h         | 120 +++-
 drivers/net/cpfl/cpfl_representor.c    | 935 +++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.h    |  26 +
 drivers/net/cpfl/cpfl_rxtx.c           | 268 +++++++
 drivers/net/cpfl/cpfl_rxtx.h           |  19 +
 drivers/net/cpfl/cpfl_vchnl.c          |  72 ++
 drivers/net/cpfl/meson.build           |   4 +-
 13 files changed, 2783 insertions(+), 356 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 01/19] net/cpfl: refine devargs parse and process
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 02/19] net/cpfl: introduce interface structure beilei.xing
                   ` (18 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

1. Keep devargs in adapter.
2. Refine handling the case with no vport be specified in devargs.
3. Separate devargs parse and devargs process

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 154 ++++++++++++++++++---------------
 drivers/net/cpfl/cpfl_ethdev.h |   1 +
 2 files changed, 84 insertions(+), 71 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c4ca9343c3..46b3a52e49 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1407,12 +1407,12 @@ parse_bool(const char *key, const char *value, void *args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter,
-		   struct cpfl_devargs *cpfl_args)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
 	struct rte_devargs *devargs = pci_dev->device.devargs;
+	struct cpfl_devargs *cpfl_args = &adapter->devargs;
 	struct rte_kvargs *kvlist;
-	int i, ret;
+	int ret;
 
 	cpfl_args->req_vport_nb = 0;
 
@@ -1445,31 +1445,6 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (ret != 0)
 		goto fail;
 
-	/* check parsed devargs */
-	if (adapter->cur_vport_nb + cpfl_args->req_vport_nb >
-	    adapter->max_vport_nb) {
-		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
-			     adapter->max_vport_nb);
-		ret = -EINVAL;
-		goto fail;
-	}
-
-	for (i = 0; i < cpfl_args->req_vport_nb; i++) {
-		if (cpfl_args->req_vports[i] > adapter->max_vport_nb - 1) {
-			PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d",
-				     cpfl_args->req_vports[i], adapter->max_vport_nb - 1);
-			ret = -EINVAL;
-			goto fail;
-		}
-
-		if (adapter->cur_vports & RTE_BIT32(cpfl_args->req_vports[i])) {
-			PMD_INIT_LOG(ERR, "Vport %d has been requested",
-				     cpfl_args->req_vports[i]);
-			ret = -EINVAL;
-			goto fail;
-		}
-	}
-
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
@@ -1915,15 +1890,79 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 	adapter->vports = NULL;
 }
 
+static int
+cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int i;
+
+	/* refine vport number, at least 1 vport */
+	if (devargs->req_vport_nb == 0) {
+		devargs->req_vport_nb = 1;
+		devargs->req_vports[0] = 0;
+	}
+
+	/* check parsed devargs */
+	if (adapter->cur_vport_nb + devargs->req_vport_nb >
+	    adapter->max_vport_nb) {
+		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
+			     adapter->max_vport_nb);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < devargs->req_vport_nb; i++) {
+		if (devargs->req_vports[i] > adapter->max_vport_nb - 1) {
+			PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d",
+				     devargs->req_vports[i], adapter->max_vport_nb - 1);
+			return -EINVAL;
+		}
+
+		if (adapter->cur_vports & RTE_BIT32(devargs->req_vports[i])) {
+			PMD_INIT_LOG(ERR, "Vport %d has been requested",
+				     devargs->req_vports[i]);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport_param vport_param;
+	char name[RTE_ETH_NAME_MAX_LEN];
+	int ret, i;
+
+	for (i = 0; i < adapter->devargs.req_vport_nb; i++) {
+		vport_param.adapter = adapter;
+		vport_param.devarg_id = adapter->devargs.req_vports[i];
+		vport_param.idx = cpfl_vport_idx_alloc(adapter);
+		if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
+			PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
+			break;
+		}
+		snprintf(name, sizeof(name), "net_%s_vport_%d",
+			 pci_dev->device.name,
+			 adapter->devargs.req_vports[i]);
+		ret = rte_eth_dev_create(&pci_dev->device, name,
+					    sizeof(struct cpfl_vport),
+					    NULL, NULL, cpfl_dev_vport_init,
+					    &vport_param);
+		if (ret != 0)
+			PMD_DRV_LOG(ERR, "Failed to create vport %d",
+				    vport_param.devarg_id);
+	}
+
+	return 0;
+}
+
 static int
 cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	       struct rte_pci_device *pci_dev)
 {
-	struct cpfl_vport_param vport_param;
 	struct cpfl_adapter_ext *adapter;
-	struct cpfl_devargs devargs;
-	char name[RTE_ETH_NAME_MAX_LEN];
-	int i, retval;
+	int retval;
 
 	if (!cpfl_adapter_list_init) {
 		rte_spinlock_init(&cpfl_adapter_lock);
@@ -1938,6 +1977,12 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		return -ENOMEM;
 	}
 
+	retval = cpfl_parse_devargs(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		return retval;
+	}
+
 	retval = cpfl_adapter_ext_init(pci_dev, adapter);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to init adapter.");
@@ -1948,49 +1993,16 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	TAILQ_INSERT_TAIL(&cpfl_adapter_list, adapter, next);
 	rte_spinlock_unlock(&cpfl_adapter_lock);
 
-	retval = cpfl_parse_devargs(pci_dev, adapter, &devargs);
+	retval = cpfl_vport_devargs_process(adapter);
 	if (retval != 0) {
-		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		PMD_INIT_LOG(ERR, "Failed to process vport devargs");
 		goto err;
 	}
 
-	if (devargs.req_vport_nb == 0) {
-		/* If no vport devarg, create vport 0 by default. */
-		vport_param.adapter = adapter;
-		vport_param.devarg_id = 0;
-		vport_param.idx = cpfl_vport_idx_alloc(adapter);
-		if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
-			PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
-			return 0;
-		}
-		snprintf(name, sizeof(name), "cpfl_%s_vport_0",
-			 pci_dev->device.name);
-		retval = rte_eth_dev_create(&pci_dev->device, name,
-					    sizeof(struct cpfl_vport),
-					    NULL, NULL, cpfl_dev_vport_init,
-					    &vport_param);
-		if (retval != 0)
-			PMD_DRV_LOG(ERR, "Failed to create default vport 0");
-	} else {
-		for (i = 0; i < devargs.req_vport_nb; i++) {
-			vport_param.adapter = adapter;
-			vport_param.devarg_id = devargs.req_vports[i];
-			vport_param.idx = cpfl_vport_idx_alloc(adapter);
-			if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
-				PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
-				break;
-			}
-			snprintf(name, sizeof(name), "cpfl_%s_vport_%d",
-				 pci_dev->device.name,
-				 devargs.req_vports[i]);
-			retval = rte_eth_dev_create(&pci_dev->device, name,
-						    sizeof(struct cpfl_vport),
-						    NULL, NULL, cpfl_dev_vport_init,
-						    &vport_param);
-			if (retval != 0)
-				PMD_DRV_LOG(ERR, "Failed to create vport %d",
-					    vport_param.devarg_id);
-		}
+	retval = cpfl_vport_create(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create vports.");
+		goto err;
 	}
 
 	return 0;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 2e42354f70..b637bf2e45 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -115,6 +115,7 @@ struct cpfl_adapter_ext {
 	uint16_t cur_vport_nb;
 
 	uint16_t used_vecs_num;
+	struct cpfl_devargs devargs;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 02/19] net/cpfl: introduce interface structure
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
  2023-08-09 15:51 ` [PATCH 01/19] net/cpfl: refine devargs parse and process beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 03/19] net/cpfl: add cp channel beilei.xing
                   ` (17 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Introduce cplf interface structure to distingush vport and port
representor.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  3 +++
 drivers/net/cpfl/cpfl_ethdev.h | 16 ++++++++++++++++
 2 files changed, 19 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 46b3a52e49..92fe92c00f 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1803,6 +1803,9 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 		goto err;
 	}
 
+	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
+	cpfl_vport->itf.adapter = adapter;
+	cpfl_vport->itf.data = dev->data;
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index b637bf2e45..53e45035e8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -86,7 +86,19 @@ struct p2p_queue_chunks_info {
 	uint32_t rx_buf_qtail_spacing;
 };
 
+enum cpfl_itf_type {
+	CPFL_ITF_TYPE_VPORT,
+	CPFL_ITF_TYPE_REPRESENTOR
+};
+
+struct cpfl_itf {
+	enum cpfl_itf_type type;
+	struct cpfl_adapter_ext *adapter;
+	void *data;
+};
+
 struct cpfl_vport {
+	struct cpfl_itf itf;
 	struct idpf_vport base;
 	struct p2p_queue_chunks_info *p2p_q_chunks_info;
 
@@ -124,5 +136,9 @@ TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 	RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p)					\
 	container_of((p), struct cpfl_adapter_ext, base)
+#define CPFL_DEV_TO_VPORT(dev)					\
+	((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_ITF(dev)				\
+	((struct cpfl_itf *)((dev)->data->dev_private))
 
 #endif /* _CPFL_ETHDEV_H_ */
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 03/19] net/cpfl: add cp channel
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
  2023-08-09 15:51 ` [PATCH 01/19] net/cpfl: refine devargs parse and process beilei.xing
  2023-08-09 15:51 ` [PATCH 02/19] net/cpfl: introduce interface structure beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 04/19] net/cpfl: enable vport mapping beilei.xing
                   ` (16 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Add cpchnl header file.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_cpchnl.h | 313 +++++++++++++++++++++++++++++++++
 1 file changed, 313 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h

diff --git a/drivers/net/cpfl/cpfl_cpchnl.h b/drivers/net/cpfl/cpfl_cpchnl.h
new file mode 100644
index 0000000000..5633fba15e
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_cpchnl.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CPCHNL_H_
+#define _CPFL_CPCHNL_H_
+
+/** @brief      Command Opcodes
+ *              Values are to be different from virtchnl.h opcodes
+ */
+enum cpchnl2_ops {
+	/* vport info */
+	CPCHNL2_OP_GET_VPORT_LIST		= 0x8025,
+	CPCHNL2_OP_GET_VPORT_INFO		= 0x8026,
+
+	/* DPHMA Event notifications */
+	CPCHNL2_OP_EVENT			= 0x8050,
+};
+
+/* Note! This affects the size of structs below */
+#define CPCHNL2_MAX_TC_AMOUNT		8
+
+#define CPCHNL2_ETH_LENGTH_OF_ADDRESS	6
+
+#define CPCHNL2_FUNC_TYPE_PF		0
+#define CPCHNL2_FUNC_TYPE_SRIOV		1
+
+/* vport statuses - must match the DB ones - see enum cp_vport_status*/
+#define CPCHNL2_VPORT_STATUS_CREATED	0
+#define CPCHNL2_VPORT_STATUS_ENABLED	1
+#define CPCHNL2_VPORT_STATUS_DISABLED	2
+#define CPCHNL2_VPORT_STATUS_DESTROYED	3
+
+/* Queue Groups Extension */
+/**************************************************/
+
+#define MAX_Q_REGIONS 16
+/* TBD - with current structure sizes, in order not to exceed 4KB ICQH buffer
+ * no more than 11 queue groups are allowed per a single vport..
+ * More will be possible only with future msg fragmentation.
+ */
+#define MAX_Q_VPORT_GROUPS 11
+
+struct cpchnl2_queue_chunk {
+	u32 type;	       /* 0:QUEUE_TYPE_TX, 1:QUEUE_TYPE_RX */ /* enum nsl_lan_queue_type */
+	u32 start_queue_id;
+	u32 num_queues;
+	u8 pad[4];
+};
+
+/* structure to specify several chunks of contiguous queues */
+struct cpchnl2_queue_grp_chunks {
+	u16 num_chunks;
+	u8 reserved[6];
+	struct cpchnl2_queue_chunk chunks[MAX_Q_REGIONS];
+};
+
+struct cpchnl2_rx_queue_group_info {
+	/* User can ask to update rss_lut size originally allocated
+	 * by CreateVport command. New size will be returned if allocation succeeded,
+	 * otherwise original rss_size from CreateVport will be returned.
+	 */
+	u16 rss_lut_size;
+	u8 pad[6]; /*Future extension purpose*/
+};
+
+struct cpchnl2_tx_queue_group_info {
+	u8 tx_tc; /*TX TC queue group will be connected to*/
+	/* Each group can have its own priority, value 0-7, while each group with unique
+	 * priority is strict priority. It can be single set of queue groups which configured with
+	 * same priority, then they are assumed part of WFQ arbitration group and are expected to be
+	 * assigned with weight.
+	 */
+	u8 priority;
+	u8 is_sp; /*Determines if queue group is expected to be Strict Priority according to its priority*/
+	u8 pad;
+	/* Peak Info Rate Weight in case Queue Group is part of WFQ arbitration set.
+	 * The weights of the groups are independent of each other. Possible values: 1-200.
+	 */
+	u16 pir_weight;
+	/* Future extension purpose for CIR only */
+	u8 cir_pad[2];
+	u8 pad2[8]; /* Future extension purpose*/
+};
+
+struct cpchnl2_queue_group_id {
+	/* Queue group ID - depended on it's type:
+	 * Data & p2p - is an index which is relative to Vport.
+	 * Config & Mailbox - is an ID which is relative to func.
+	 * This ID is used in future calls, i.e. delete.
+	 * Requested by host and assigned by Control plane.
+	 */
+	u16 queue_group_id;
+	/* Functional type: see CPCHNL2_QUEUE_GROUP_TYPE definitions */
+	u16 queue_group_type;
+	u8 pad[4];
+};
+
+struct cpchnl2_queue_group_info {
+	/* IN */
+	struct cpchnl2_queue_group_id qg_id;
+
+	/* IN, Number of queues of different types in the group. */
+	u16 num_tx_q;
+	u16 num_tx_complq;
+	u16 num_rx_q;
+	u16 num_rx_bufq;
+
+	struct cpchnl2_tx_queue_group_info tx_q_grp_info;
+	struct cpchnl2_rx_queue_group_info rx_q_grp_info;
+
+	u8 egress_port;
+	u8 pad[39]; /*Future extension purpose*/
+	struct cpchnl2_queue_grp_chunks chunks;
+};
+
+struct cpchnl2_queue_groups {
+	u16 num_queue_groups; /* Number of queue groups in struct below */
+	u8 pad[6];
+	/* group information , number is determined by param above */
+	struct cpchnl2_queue_group_info groups[MAX_Q_VPORT_GROUPS];
+};
+
+/**
+ * @brief function types
+ */
+enum cpchnl2_func_type {
+	CPCHNL2_FTYPE_LAN_PF = 0,
+	CPCHNL2_FTYPE_LAN_VF = 1,
+	CPCHNL2_FTYPE_LAN_MAX
+};
+
+/**
+ * @brief containing vport id & type
+ */
+struct cpchnl2_vport_id {
+	u32 vport_id;
+	u16 vport_type;
+	u8 pad[2];
+};
+
+struct cpchnl2_func_id {
+	/* Function type: 0 - LAN PF, 1 -  LAN VF, Rest - "reserved" */
+	u8 func_type;
+	/* Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs
+	 * and 8-12 CPFs are valid
+	 */
+	u8 pf_id;
+	/* Valid only if "type" above is VF, indexing is relative to PF specified above. */
+	u16 vf_id;
+	u8 pad[4];
+};
+
+/* Note! Do not change the fields and especially their order as should eventually
+ * be aligned to 32bit. Must match the virtchnl structure definition.
+ * If should change, change also the relevant FAS and virtchnl code, under permission.
+ */
+struct cpchnl2_vport_info {
+	u16 vport_index;
+	/* VSI index, global indexing aligned to HW.
+	 * Index of HW VSI is allocated by HMA during "CreateVport" virtChnl command.
+	 * Relevant for VSI backed Vports only, not relevant for vport_type = "Qdev".
+	 */
+	u16 vsi_id;
+	u8 vport_status;	/* enum cpchnl2_vport_status */
+	/* 0 - LAN PF, 1 - LAN VF. Rest - reserved. Can be later expanded to other PEs */
+	u8 func_type;
+	/* Valid only if "type" above is VF, indexing is relative to PF specified above. */
+	u16 vf_id;
+	/* Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12 CPFs are valid. */
+	u8 pf_id;
+	u8 rss_enabled; /* if RSS is enabled for Vport. Driven by Node Policy. Currently '0' */
+	/* MAC Address assigned for this vport, all 0s for "Qdev" Vport type */
+	u8 mac_addr[CPCHNL2_ETH_LENGTH_OF_ADDRESS];
+	u16 vmrl_id;
+	/* Indicates if IMC created SEM MAC rule for this Vport.
+	 * Currently this is done by IMC for all Vport of type "Default" only,
+	 * but can be different in the future.
+	 */
+	u8 sem_mac_rule_exist;
+	/* Bitmask to inform which TC is valid.
+	 * 0x1 << TCnum. 1b: valid else 0.
+	 * Driven by Node Policy on system level, then Sysetm level TCs are
+	 * reported to IDPF and it can enable Vport level TCs on TX according
+	 * to Syetm enabled ones.
+	 * If TC aware mode - bit set for valid TC. otherwise =1 (only bit 0 is set. represents the VSI
+	 */
+	u8 tx_tc_bitmask;
+	/* For each valid TC, TEID of VPORT node over TC in TX LAN WS.
+	 * If TC aware mode - up to 8 TC TEIDs. Otherwise vport_tc_teid[0] shall hold VSI TEID
+	 */
+	u32 vport_tc_teid[CPCHNL2_MAX_TC_AMOUNT];
+	/* For each valid TC, bandwidth in mbps.
+	 * Default BW per Vport is from Node policy
+	 * If TC aware mode -per TC. Otherwise, bandwidth[0] holds VSI bandwidth
+	 */
+	u32 bandwidth[CPCHNL2_MAX_TC_AMOUNT];
+	/* From Node Policy. */
+	u16 max_mtu;
+	u16 default_rx_qid;	/* Default LAN RX Queue ID */
+	u16 vport_flags; /* see: VPORT_FLAGS */
+	u8 egress_port;
+	u8 pad_reserved[5];
+};
+
+/*
+ * CPCHNL2_OP_GET_VPORT_LIST
+ */
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode request
+ * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved (see enum cpchnl2_func_type)
+ * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12 CPFs are valid
+ * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above
+ */
+struct cpchnl2_get_vport_list_request {
+	u8 func_type;
+	u8 pf_id;
+	u16 vf_id;
+	u8 pad[4];
+};
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode response
+ * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved. Can be later extended to other PE types
+ * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12 CPFs are valid
+ * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above
+ * @param nof_vports Number of vports created on the function
+ * @param vports array of the IDs and types. vport ID is elative to its func (PF/VF). same as in Create Vport
+ * vport_type: Aligned to VirtChnl types: Default, SIOV, etc.
+ */
+struct cpchnl2_get_vport_list_response {
+	u8 func_type;
+	u8 pf_id;
+	u16 vf_id;
+	u16 nof_vports;
+	u8 pad[2];
+	struct cpchnl2_vport_id vports[];
+};
+
+/*
+ * CPCHNL2_OP_GET_VPORT_INFO
+ */
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_INFO opcode request
+ * @param vport a structure containing vport_id (relative to function) and type
+ * @param func a structure containing function type, pf_id, vf_id
+ */
+struct cpchnl2_get_vport_info_request {
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_func_id func;
+};
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_INFO opcode response
+ * @param vport a structure containing vport_id (relative to function) and type to get info for
+ * @param info a structure all the information for a given vport
+ * @param queue_groups a structure containing all the queue groups of the given vport
+ */
+struct cpchnl2_get_vport_info_response {
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_vport_info info;
+	struct cpchnl2_queue_groups queue_groups;
+};
+
+ /* Cpchnl events
+  * Sends event message to inform the peer of notification that may affect it.
+  * No direct response is expected from the peer, though it may generate other
+  * messages in response to this one.
+  */
+enum cpchnl2_event {
+	CPCHNL2_EVENT_UNKNOWN = 0,
+	CPCHNL2_EVENT_VPORT_CREATED,
+	CPCHNL2_EVENT_VPORT_DESTROYED,
+	CPCHNL2_EVENT_VPORT_ENABLED,
+	CPCHNL2_EVENT_VPORT_DISABLED,
+	CPCHNL2_PKG_EVENT,
+	CPCHNL2_EVENT_ADD_QUEUE_GROUPS,
+	CPCHNL2_EVENT_DEL_QUEUE_GROUPS,
+	CPCHNL2_EVENT_ADD_QUEUES,
+	CPCHNL2_EVENT_DEL_QUEUES
+};
+
+/*
+ * This is for CPCHNL2_EVENT_VPORT_CREATED
+ */
+struct cpchnl2_event_vport_created {
+	struct cpchnl2_vport_id vport; /* Vport identifier to point to specific Vport */
+	struct cpchnl2_vport_info info; /* Vport configuration info */
+	struct cpchnl2_queue_groups queue_groups; /* Vport assign queue groups configuration info */
+};
+
+/*
+ * This is for CPCHNL2_EVENT_VPORT_DESTROYED
+ */
+struct cpchnl2_event_vport_destroyed {
+	/* Vport identifier to point to specific Vport */
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_func_id func;
+};
+
+struct cpchnl2_event_info {
+	struct {
+		s32 type;		/* See enum cpchnl2_event */
+		uint8_t reserved[4];	/* Reserved */
+	} header;
+	union {
+		struct cpchnl2_event_vport_created vport_created;
+		struct cpchnl2_event_vport_destroyed vport_destroyed;
+	} data;
+};
+
+#endif /* _CPFL_CPCHNL_H_ */
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 04/19] net/cpfl: enable vport mapping
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (2 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 03/19] net/cpfl: add cp channel beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 05/19] net/cpfl: parse representor devargs beilei.xing
                   ` (15 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

1. Handle cpchnl event for vport create/destroy
2. Use hash table to store vport_id to vport_info mapping
3. Use spinlock for thread safe.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 157 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.h |  21 ++++-
 drivers/net/cpfl/meson.build   |   2 +-
 3 files changed, 177 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 92fe92c00f..17a69c16fe 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -10,6 +10,7 @@
 #include <rte_dev.h>
 #include <errno.h>
 #include <rte_alarm.h>
+#include <rte_hash_crc.h>
 
 #include "cpfl_ethdev.h"
 #include "cpfl_rxtx.h"
@@ -1492,6 +1493,108 @@ cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen)
 	}
 }
 
+static int
+cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_vport_id *vport_identity,
+		       struct cpchnl2_vport_info *vport_info)
+{
+	struct cpfl_vport_info *info = NULL;
+	int ret;
+
+	rte_spinlock_lock(&adapter->vport_map_lock);
+	ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info);
+	if (ret >= 0) {
+		PMD_DRV_LOG(WARNING, "vport already exist, overwrite info anyway");
+		/* overwrite info */
+		if (info)
+			info->vport_info = *vport_info;
+		goto fini;
+	}
+
+	info = rte_zmalloc(NULL, sizeof(*info), 0);
+	if (info == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory for vport map info");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	info->vport_info = *vport_info;
+
+	ret = rte_hash_add_key_data(adapter->vport_map_hash, vport_identity, info);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to add vport map into hash");
+		rte_free(info);
+		goto err;
+	}
+
+fini:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return ret;
+}
+
+static int
+cpfl_vport_info_destroy(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id *vport_identity)
+{
+	struct cpfl_vport_info *info;
+	int ret;
+
+	rte_spinlock_lock(&adapter->vport_map_lock);
+	ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "vport id not exist");
+		goto err;
+	}
+
+	rte_hash_del_key(adapter->vport_map_hash, vport_identity);
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	rte_free(info);
+
+	return 0;
+
+err:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return ret;
+}
+
+static void
+cpfl_handle_cpchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen)
+{
+	struct cpchnl2_event_info *cpchnl2_event = (struct cpchnl2_event_info *)msg;
+	struct cpchnl2_vport_info *info;
+	struct cpfl_vport_id vport_identity = { 0 };
+
+	if (msglen < sizeof(struct cpchnl2_event_info)) {
+		PMD_DRV_LOG(ERR, "Error event");
+		return;
+	}
+
+	switch (cpchnl2_event->header.type) {
+	case CPCHNL2_EVENT_VPORT_CREATED:
+		vport_identity.vport_id = cpchnl2_event->data.vport_created.vport.vport_id;
+		info = &cpchnl2_event->data.vport_created.info;
+		vport_identity.func_type = info->func_type;
+		vport_identity.pf_id = info->pf_id;
+		vport_identity.vf_id = info->vf_id;
+		if (cpfl_vport_info_create(adapter, &vport_identity, info))
+			PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_CREATED");
+		break;
+	case CPCHNL2_EVENT_VPORT_DESTROYED:
+		vport_identity.vport_id = cpchnl2_event->data.vport_destroyed.vport.vport_id;
+		vport_identity.func_type = cpchnl2_event->data.vport_destroyed.func.func_type;
+		vport_identity.pf_id = cpchnl2_event->data.vport_destroyed.func.pf_id;
+		vport_identity.vf_id = cpchnl2_event->data.vport_destroyed.func.vf_id;
+		if (cpfl_vport_info_destroy(adapter, &vport_identity))
+			PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_DESTROY");
+		break;
+	default:
+		PMD_DRV_LOG(ERR, " unknown event received %u", cpchnl2_event->header.type);
+		break;
+	}
+}
+
 static void
 cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 {
@@ -1535,6 +1638,9 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 				}
 				cpfl_handle_event_msg(vport, base->mbx_resp,
 						      ctlq_msg.data_len);
+			} else if (vc_op == CPCHNL2_OP_EVENT) {
+				cpfl_handle_cpchnl_event_msg(adapter, adapter->base.mbx_resp,
+							     ctlq_msg.data_len);
 			} else {
 				if (vc_op == base->pend_cmd)
 					notify_cmd(base, base->cmd_retval);
@@ -1610,6 +1716,48 @@ static struct virtchnl2_get_capabilities req_caps = {
 	.other_caps = VIRTCHNL2_CAP_WB_ON_ITR
 };
 
+static int
+cpfl_vport_map_init(struct cpfl_adapter_ext *adapter)
+{
+	char hname[32];
+
+	snprintf(hname, 32, "%s-vport", adapter->name);
+
+	rte_spinlock_init(&adapter->vport_map_lock);
+
+#define CPFL_VPORT_MAP_HASH_ENTRY_NUM 2048
+
+	struct rte_hash_parameters params = {
+		.name = adapter->name,
+		.entries = CPFL_VPORT_MAP_HASH_ENTRY_NUM,
+		.key_len = sizeof(struct cpfl_vport_id),
+		.hash_func = rte_hash_crc,
+		.socket_id = SOCKET_ID_ANY,
+	};
+
+	adapter->vport_map_hash = rte_hash_create(&params);
+
+	if (adapter->vport_map_hash == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to create vport map hash");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
+{
+	const void *key = NULL;
+	struct cpfl_vport_map_info *info;
+	uint32_t iter = 0;
+
+	while (rte_hash_iterate(adapter->vport_map_hash, &key, (void **)&info, &iter) >= 0)
+		rte_free(info);
+
+	rte_hash_free(adapter->vport_map_hash);
+}
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1634,6 +1782,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_adapter_init;
 	}
 
+	ret = cpfl_vport_map_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init vport map");
+		goto err_vport_map_init;
+	}
+
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
 	adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ?
@@ -1658,6 +1812,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_vport_map_uninit(adapter);
+err_vport_map_init:
 	idpf_adapter_deinit(base);
 err_adapter_init:
 	return ret;
@@ -1887,6 +2043,7 @@ static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
 
 	rte_free(adapter->vports);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 53e45035e8..3515fec4f7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -10,16 +10,18 @@
 #include <rte_spinlock.h>
 #include <rte_ethdev.h>
 #include <rte_kvargs.h>
+#include <rte_hash.h>
 #include <ethdev_driver.h>
 #include <ethdev_pci.h>
 
-#include "cpfl_logs.h"
-
 #include <idpf_common_device.h>
 #include <idpf_common_virtchnl.h>
 #include <base/idpf_prototype.h>
 #include <base/virtchnl2.h>
 
+#include "cpfl_logs.h"
+#include "cpfl_cpchnl.h"
+
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
 
@@ -86,6 +88,18 @@ struct p2p_queue_chunks_info {
 	uint32_t rx_buf_qtail_spacing;
 };
 
+struct cpfl_vport_id {
+	uint32_t vport_id;
+	uint8_t func_type;
+	uint8_t pf_id;
+	uint16_t vf_id;
+};
+
+struct cpfl_vport_info {
+	struct cpchnl2_vport_info vport_info;
+	bool enabled;
+};
+
 enum cpfl_itf_type {
 	CPFL_ITF_TYPE_VPORT,
 	CPFL_ITF_TYPE_REPRESENTOR
@@ -128,6 +142,9 @@ struct cpfl_adapter_ext {
 
 	uint16_t used_vecs_num;
 	struct cpfl_devargs devargs;
+
+	rte_spinlock_t vport_map_lock;
+	struct rte_hash *vport_map_hash;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 8d62ebfd77..28167bb81d 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -11,7 +11,7 @@ if dpdk_conf.get('RTE_IOVA_IN_MBUF') == 0
     subdir_done()
 endif
 
-deps += ['common_idpf']
+deps += ['hash', 'common_idpf']
 
 sources = files(
         'cpfl_ethdev.c',
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 05/19] net/cpfl: parse representor devargs
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (3 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 04/19] net/cpfl: enable vport mapping beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 06/19] net/cpfl: support probe again beilei.xing
                   ` (14 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Format:

[[c<controler_id>]pf<pf_id>]vf<vf_id>

  control_id:

  0 : xeon (default)
  1:  acc

  pf_id:

  0 : apf (default)
  1 : cpf

Example:

representor=c0pf0vf[0-3]
  -- xeon > apf > vf 0,1,2,3
     same as pf0vf[0-3] and vf[0-3] if omit default value.

representor=c0pf0
  -- xeon> apf
     same as pf0 if omit default value.

representor=c1pf0
  -- acc > apf

multiple representor devargs are supported.
e.g.: create 4 representors for 4 vfs on xeon APF and one
representor for acc APF.

  -- representor=vf[0-3],representor=c1pf0

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 179 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.h |   8 ++
 2 files changed, 187 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 17a69c16fe..a820528a0d 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -13,8 +13,10 @@
 #include <rte_hash_crc.h>
 
 #include "cpfl_ethdev.h"
+#include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 
+#define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
@@ -25,6 +27,7 @@ struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
 static const char * const cpfl_valid_args[] = {
+	CPFL_REPRESENTOR,
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
@@ -1407,6 +1410,128 @@ parse_bool(const char *key, const char *value, void *args)
 	return 0;
 }
 
+static int
+enlist(uint16_t *list, uint16_t *len_list, const uint16_t max_list, uint16_t val)
+{
+	uint16_t i;
+
+	for (i = 0; i < *len_list; i++) {
+		if (list[i] == val)
+			return 0;
+	}
+	if (*len_list >= max_list)
+		return -1;
+	list[(*len_list)++] = val;
+	return 0;
+}
+
+static const char *
+process_range(const char *str, uint16_t *list, uint16_t *len_list,
+	const uint16_t max_list)
+{
+	uint16_t lo, hi, val;
+	int result, n = 0;
+	const char *pos = str;
+
+	result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n);
+	if (result == 1) {
+		if (enlist(list, len_list, max_list, lo) != 0)
+			return NULL;
+	} else if (result == 2) {
+		if (lo > hi)
+			return NULL;
+		for (val = lo; val <= hi; val++) {
+			if (enlist(list, len_list, max_list, val) != 0)
+				return NULL;
+		}
+	} else {
+		return NULL;
+	}
+	return pos + n;
+}
+
+static const char *
+process_list(const char *str, uint16_t *list, uint16_t *len_list, const uint16_t max_list)
+{
+	const char *pos = str;
+
+	if (*pos == '[')
+		pos++;
+	while (1) {
+		pos = process_range(pos, list, len_list, max_list);
+		if (pos == NULL)
+			return NULL;
+		if (*pos != ',') /* end of list */
+			break;
+		pos++;
+	}
+	if (*str == '[' && *pos != ']')
+		return NULL;
+	if (*pos == ']')
+		pos++;
+	return pos;
+}
+
+static int
+parse_repr(const char *key __rte_unused, const char *value, void *args)
+{
+	struct cpfl_devargs *devargs = args;
+	struct rte_eth_devargs *eth_da;
+	const char *str = value;
+
+	if (devargs->repr_args_num == CPFL_REPR_ARG_NUM_MAX)
+		return -EINVAL;
+
+	eth_da = &devargs->repr_args[devargs->repr_args_num];
+
+	if (str[0] == 'c') {
+		str += 1;
+		str = process_list(str, eth_da->mh_controllers,
+				&eth_da->nb_mh_controllers,
+				RTE_DIM(eth_da->mh_controllers));
+		if (str == NULL)
+			goto done;
+	}
+	if (str[0] == 'p' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_PF;
+		str += 2;
+		str = process_list(str, eth_da->ports,
+				&eth_da->nb_ports, RTE_DIM(eth_da->ports));
+		if (str == NULL || str[0] == '\0')
+			goto done;
+	} else if (eth_da->nb_mh_controllers > 0) {
+		/* 'c' must followed by 'pf'. */
+		str = NULL;
+		goto done;
+	}
+	if (str[0] == 'v' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_VF;
+		str += 2;
+	} else if (str[0] == 's' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_SF;
+		str += 2;
+	} else {
+		/* 'pf' must followed by 'vf' or 'sf'. */
+		if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+			str = NULL;
+			goto done;
+		}
+		eth_da->type = RTE_ETH_REPRESENTOR_VF;
+	}
+	str = process_list(str, eth_da->representor_ports,
+		&eth_da->nb_representor_ports,
+		RTE_DIM(eth_da->representor_ports));
+done:
+	if (str == NULL) {
+		RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str);
+		return -1;
+	}
+
+	devargs->repr_args_num++;
+
+	return 0;
+}
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1431,6 +1556,12 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 		return -EINVAL;
 	}
 
+	cpfl_args->repr_args_num = 0;
+	ret = rte_kvargs_process(kvlist, CPFL_REPRESENTOR, &parse_repr, cpfl_args);
+
+	if (ret != 0)
+		goto fail;
+
 	ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport,
 				 cpfl_args);
 	if (ret != 0)
@@ -2087,6 +2218,48 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
 	return 0;
 }
 
+static int
+cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int i, j;
+
+	/* check and refine repr args */
+	for (i = 0; i < devargs->repr_args_num; i++) {
+		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
+
+		/* set default host_id to xeon host */
+		if (eth_da->nb_mh_controllers == 0) {
+			eth_da->nb_mh_controllers = 1;
+			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
+		} else {
+			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
+				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->mh_controllers[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		/* set default pf to APF */
+		if (eth_da->nb_ports == 0) {
+			eth_da->nb_ports = 1;
+			eth_da->ports[0] = CPFL_PF_TYPE_APF;
+		} else {
+			for (j = 0; j < eth_da->nb_ports; j++) {
+				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->ports[j]);
+					return -EINVAL;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
 static int
 cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -2165,6 +2338,12 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		goto err;
 	}
 
+	retval = cpfl_repr_devargs_process(adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to process repr devargs");
+		goto err;
+	}
+
 	return 0;
 
 err:
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 3515fec4f7..9c4d8d3ea1 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -60,16 +60,24 @@
 #define IDPF_DEV_ID_CPF			0x1453
 #define VIRTCHNL2_QUEUE_GROUP_P2P	0x100
 
+#define CPFL_HOST_ID_HOST	0
+#define CPFL_HOST_ID_ACC	1
+#define CPFL_PF_TYPE_APF	0
+#define CPFL_PF_TYPE_CPF	1
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
 	uint16_t idx;       /* index in adapter->vports[]*/
 };
 
+#define CPFL_REPR_ARG_NUM_MAX	4
 /* Struct used when parse driver specific devargs */
 struct cpfl_devargs {
 	uint16_t req_vports[CPFL_MAX_VPORT_NUM];
 	uint16_t req_vport_nb;
+	uint8_t repr_args_num;
+	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
 };
 
 struct p2p_queue_chunks_info {
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 06/19] net/cpfl: support probe again
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (4 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 05/19] net/cpfl: parse representor devargs beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 07/19] net/cpfl: create port representor beilei.xing
                   ` (13 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Only representor will be parsed for probe again.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 69 +++++++++++++++++++++++++++-------
 1 file changed, 56 insertions(+), 13 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a820528a0d..09015fbb08 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -26,7 +26,7 @@ rte_spinlock_t cpfl_adapter_lock;
 struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
-static const char * const cpfl_valid_args[] = {
+static const char * const cpfl_valid_args_first[] = {
 	CPFL_REPRESENTOR,
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
@@ -34,6 +34,11 @@ static const char * const cpfl_valid_args[] = {
 	NULL
 };
 
+static const char * const cpfl_valid_args_again[] = {
+	CPFL_REPRESENTOR,
+	NULL
+};
+
 uint32_t cpfl_supported_speeds[] = {
 	RTE_ETH_SPEED_NUM_NONE,
 	RTE_ETH_SPEED_NUM_10M,
@@ -1533,7 +1538,7 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
 	struct rte_devargs *devargs = pci_dev->device.devargs;
 	struct cpfl_devargs *cpfl_args = &adapter->devargs;
@@ -1545,7 +1550,8 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (devargs == NULL)
 		return 0;
 
-	kvlist = rte_kvargs_parse(devargs->args, cpfl_valid_args);
+	kvlist = rte_kvargs_parse(devargs->args,
+			first ? cpfl_valid_args_first : cpfl_valid_args_again);
 	if (kvlist == NULL) {
 		PMD_INIT_LOG(ERR, "invalid kvargs key");
 		return -EINVAL;
@@ -1562,6 +1568,9 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (ret != 0)
 		goto fail;
 
+	if (!first)
+		return 0;
+
 	ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport,
 				 cpfl_args);
 	if (ret != 0)
@@ -2291,18 +2300,11 @@ cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapt
 }
 
 static int
-cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
-	       struct rte_pci_device *pci_dev)
+cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
 	struct cpfl_adapter_ext *adapter;
 	int retval;
 
-	if (!cpfl_adapter_list_init) {
-		rte_spinlock_init(&cpfl_adapter_lock);
-		TAILQ_INIT(&cpfl_adapter_list);
-		cpfl_adapter_list_init = true;
-	}
-
 	adapter = rte_zmalloc("cpfl_adapter_ext",
 			      sizeof(struct cpfl_adapter_ext), 0);
 	if (adapter == NULL) {
@@ -2310,7 +2312,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		return -ENOMEM;
 	}
 
-	retval = cpfl_parse_devargs(pci_dev, adapter);
+	retval = cpfl_parse_devargs(pci_dev, adapter, true);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
 		return retval;
@@ -2355,6 +2357,46 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	return retval;
 }
 
+static int
+cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_parse_devargs(pci_dev, adapter, false);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		return ret;
+	}
+
+	ret = cpfl_repr_devargs_process(adapter);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to process reprenstor devargs");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+	       struct rte_pci_device *pci_dev)
+{
+	struct cpfl_adapter_ext *adapter;
+
+	if (!cpfl_adapter_list_init) {
+		rte_spinlock_init(&cpfl_adapter_lock);
+		TAILQ_INIT(&cpfl_adapter_list);
+		cpfl_adapter_list_init = true;
+	}
+
+	adapter = cpfl_find_adapter_ext(pci_dev);
+
+	if (adapter == NULL)
+		return cpfl_pci_probe_first(pci_dev);
+	else
+		return cpfl_pci_probe_again(pci_dev, adapter);
+}
+
 static int
 cpfl_pci_remove(struct rte_pci_device *pci_dev)
 {
@@ -2377,7 +2419,8 @@ cpfl_pci_remove(struct rte_pci_device *pci_dev)
 
 static struct rte_pci_driver rte_cpfl_pmd = {
 	.id_table	= pci_id_cpfl_map,
-	.drv_flags	= RTE_PCI_DRV_NEED_MAPPING,
+	.drv_flags	= RTE_PCI_DRV_NEED_MAPPING |
+			  RTE_PCI_DRV_PROBE_AGAIN,
 	.probe		= cpfl_pci_probe,
 	.remove		= cpfl_pci_remove,
 };
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 07/19] net/cpfl: create port representor
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (5 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 06/19] net/cpfl: support probe again beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 08/19] net/cpfl: support vport list/info get beilei.xing
                   ` (12 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Track representor request in a whitelist.
Representor will only be created for active vport.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c      | 107 ++++---
 drivers/net/cpfl/cpfl_ethdev.h      |  34 +++
 drivers/net/cpfl/cpfl_representor.c | 448 ++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.h |  26 ++
 drivers/net/cpfl/meson.build        |   1 +
 5 files changed, 573 insertions(+), 43 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 09015fbb08..949a2c8069 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1898,6 +1898,42 @@ cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
 	rte_hash_free(adapter->vport_map_hash);
 }
 
+static int
+cpfl_repr_whitelist_init(struct cpfl_adapter_ext *adapter)
+{
+	char hname[32];
+
+	snprintf(hname, 32, "%s-repr_wl", adapter->name);
+
+	rte_spinlock_init(&adapter->repr_lock);
+
+#define CPFL_REPR_HASH_ENTRY_NUM 2048
+
+	struct rte_hash_parameters params = {
+		.name = hname,
+		.entries = CPFL_REPR_HASH_ENTRY_NUM,
+		.key_len = sizeof(struct cpfl_repr_id),
+		.hash_func = rte_hash_crc,
+		.socket_id = SOCKET_ID_ANY,
+	};
+
+	adapter->repr_whitelist_hash = rte_hash_create(&params);
+
+	if (adapter->repr_whitelist_hash == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to create repr whitelist hash");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+cpfl_repr_whitelist_uninit(struct cpfl_adapter_ext *adapter)
+{
+	rte_hash_free(adapter->repr_whitelist_hash);
+}
+
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1928,6 +1964,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vport_map_init;
 	}
 
+	ret = cpfl_repr_whitelist_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init representor whitelist");
+		goto err_repr_whitelist_init;
+	}
+
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
 	adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ?
@@ -1952,6 +1994,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_repr_whitelist_uninit(adapter);
+err_repr_whitelist_init:
 	cpfl_vport_map_uninit(adapter);
 err_vport_map_init:
 	idpf_adapter_deinit(base);
@@ -2227,48 +2271,6 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
 	return 0;
 }
 
-static int
-cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
-{
-	struct cpfl_devargs *devargs = &adapter->devargs;
-	int i, j;
-
-	/* check and refine repr args */
-	for (i = 0; i < devargs->repr_args_num; i++) {
-		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
-
-		/* set default host_id to xeon host */
-		if (eth_da->nb_mh_controllers == 0) {
-			eth_da->nb_mh_controllers = 1;
-			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
-		} else {
-			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
-				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
-					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-						     eth_da->mh_controllers[j]);
-					return -EINVAL;
-				}
-			}
-		}
-
-		/* set default pf to APF */
-		if (eth_da->nb_ports == 0) {
-			eth_da->nb_ports = 1;
-			eth_da->ports[0] = CPFL_PF_TYPE_APF;
-		} else {
-			for (j = 0; j < eth_da->nb_ports; j++) {
-				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
-					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-						     eth_da->ports[j]);
-					return -EINVAL;
-				}
-			}
-		}
-	}
-
-	return 0;
-}
-
 static int
 cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -2304,6 +2306,7 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
 	struct cpfl_adapter_ext *adapter;
 	int retval;
+	uint16_t port_id;
 
 	adapter = rte_zmalloc("cpfl_adapter_ext",
 			      sizeof(struct cpfl_adapter_ext), 0);
@@ -2343,11 +2346,23 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 	retval = cpfl_repr_devargs_process(adapter);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to process repr devargs");
-		goto err;
+		goto close_ethdev;
 	}
 
+	retval = cpfl_repr_create(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create representors ");
+		goto close_ethdev;
+	}
+
+
 	return 0;
 
+close_ethdev:
+	/* Ethdev created can be found RTE_ETH_FOREACH_DEV_OF through rte_device */
+	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
+		rte_eth_dev_close(port_id);
+	}
 err:
 	rte_spinlock_lock(&cpfl_adapter_lock);
 	TAILQ_REMOVE(&cpfl_adapter_list, adapter, next);
@@ -2374,6 +2389,12 @@ cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *ad
 		return ret;
 	}
 
+	ret = cpfl_repr_create(pci_dev, adapter);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create representors ");
+		return ret;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 9c4d8d3ea1..1f5c3a39b8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -21,6 +21,7 @@
 
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
+#include "cpfl_representor.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -60,11 +61,32 @@
 #define IDPF_DEV_ID_CPF			0x1453
 #define VIRTCHNL2_QUEUE_GROUP_P2P	0x100
 
+#define CPFL_HOST_ID_NUM	2
+#define CPFL_PF_TYPE_NUM	2
 #define CPFL_HOST_ID_HOST	0
 #define CPFL_HOST_ID_ACC	1
 #define CPFL_PF_TYPE_APF	0
 #define CPFL_PF_TYPE_CPF	1
 
+/* Function IDs on IMC side */
+#define HOST0_APF	0
+#define HOST1_APF	1
+#define HOST2_APF	2
+#define HOST3_APF	3
+#define ACC_APF_ID	4
+#define IMC_APF_ID	5
+#define HOST0_NVME_ID	6
+#define ACC_NVME_ID	7
+#define HOST0_CPF_ID	8
+#define HOST1_CPF_ID	9
+#define HOST2_CPF_ID	10
+#define HOST3_CPF_ID	11
+#define ACC_CPF_ID	12
+#define IMC_IPF_ID	13
+#define ATE_CPF_ID	14
+#define ACC_LCE_ID	15
+#define IMC_MBX_EFD_ID	0
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
@@ -136,6 +158,13 @@ struct cpfl_vport {
 	bool p2p_manual_bind;
 };
 
+struct cpfl_repr {
+	struct cpfl_itf itf;
+	struct cpfl_repr_id repr_id;
+	struct rte_ether_addr mac_addr;
+	struct cpfl_vport_info *vport_info;
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -153,6 +182,9 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t vport_map_lock;
 	struct rte_hash *vport_map_hash;
+
+	rte_spinlock_t repr_lock;
+	struct rte_hash *repr_whitelist_hash;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -163,6 +195,8 @@ TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 	container_of((p), struct cpfl_adapter_ext, base)
 #define CPFL_DEV_TO_VPORT(dev)					\
 	((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_REPR(dev)					\
+	((struct cpfl_repr *)((dev)->data->dev_private))
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
new file mode 100644
index 0000000000..4d91d7311d
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -0,0 +1,448 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include "cpfl_representor.h"
+#include "cpfl_rxtx.h"
+
+static int
+cpfl_repr_whitelist_update(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_repr_id *repr_id,
+			   struct rte_eth_dev *dev)
+{
+	int ret;
+
+	if (rte_hash_lookup(adapter->repr_whitelist_hash, repr_id) < 0)
+		return -ENOENT;
+
+	ret = rte_hash_add_key_data(adapter->repr_whitelist_hash, repr_id, dev);
+
+	return ret;
+}
+
+static int
+cpfl_repr_whitelist_add(struct cpfl_adapter_ext *adapter,
+			struct cpfl_repr_id *repr_id)
+{
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+	if (rte_hash_lookup(adapter->repr_whitelist_hash, repr_id) >= 0) {
+		ret = -EEXIST;
+		goto err;
+	}
+
+	ret = rte_hash_add_key(adapter->repr_whitelist_hash, repr_id);
+	if (ret < 0)
+		goto err;
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return ret;
+}
+
+static int
+cpfl_repr_devargs_process_one(struct cpfl_adapter_ext *adapter,
+			      struct rte_eth_devargs *eth_da)
+{
+	struct cpfl_repr_id repr_id;
+	int ret, c, p, v;
+
+	for (c = 0; c < eth_da->nb_mh_controllers; c++) {
+		for (p = 0; p < eth_da->nb_ports; p++) {
+			repr_id.type = eth_da->type;
+			if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+				repr_id.host_id = eth_da->mh_controllers[c];
+				repr_id.pf_id = eth_da->ports[p];
+				repr_id.vf_id = 0;
+				ret = cpfl_repr_whitelist_add(adapter, &repr_id);
+				if (ret == -EEXIST)
+					continue;
+				if (ret) {
+					PMD_DRV_LOG(ERR, "Failed to add PF repr to whitelist, "
+							 "host_id = %d, pf_id = %d.",
+						    repr_id.host_id, repr_id.pf_id);
+					return ret;
+				}
+			} else if (eth_da->type == RTE_ETH_REPRESENTOR_VF) {
+				for (v = 0; v < eth_da->nb_representor_ports; v++) {
+					repr_id.host_id = eth_da->mh_controllers[c];
+					repr_id.pf_id = eth_da->ports[p];
+					repr_id.vf_id = eth_da->representor_ports[v];
+					ret = cpfl_repr_whitelist_add(adapter, &repr_id);
+					if (ret == -EEXIST)
+						continue;
+					if (ret) {
+						PMD_DRV_LOG(ERR, "Failed to add VF repr to whitelist, "
+								 "host_id = %d, pf_id = %d, vf_id = %d.",
+							    repr_id.host_id,
+							    repr_id.pf_id,
+							    repr_id.vf_id);
+						return ret;
+					}
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+int
+cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int ret, i, j;
+
+	/* check and refine repr args */
+	for (i = 0; i < devargs->repr_args_num; i++) {
+		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
+
+		/* set default host_id to xeon host */
+		if (eth_da->nb_mh_controllers == 0) {
+			eth_da->nb_mh_controllers = 1;
+			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
+		} else {
+			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
+				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->mh_controllers[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		/* set default pf to APF */
+		if (eth_da->nb_ports == 0) {
+			eth_da->nb_ports = 1;
+			eth_da->ports[0] = CPFL_PF_TYPE_APF;
+		} else {
+			for (j = 0; j < eth_da->nb_ports; j++) {
+				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->ports[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		ret = cpfl_repr_devargs_process_one(adapter, eth_da);
+		if (ret != 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_repr_whitelist_del(struct cpfl_adapter_ext *adapter,
+			struct cpfl_repr_id *repr_id)
+{
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	ret = rte_hash_del_key(adapter->repr_whitelist_hash, repr_id);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to delete repr from whitelist."
+				 "host_id = %d, type = %d, pf_id = %d, vf_id = %d",
+				 repr_id->host_id, repr_id->type,
+				 repr_id->pf_id, repr_id->vf_id);
+		goto err;
+	}
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return ret;
+}
+
+static int
+cpfl_repr_uninit(struct rte_eth_dev *eth_dev)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+	struct cpfl_adapter_ext *adapter = repr->itf.adapter;
+
+	eth_dev->data->mac_addrs = NULL;
+
+	cpfl_repr_whitelist_del(adapter, &repr->repr_id);
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_configure(struct rte_eth_dev *dev)
+{
+	/* now only 1 RX queue is supported */
+	if (dev->data->nb_rx_queues > 1)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_close(struct rte_eth_dev *dev)
+{
+	return cpfl_repr_uninit(dev);
+}
+
+static int
+cpfl_repr_dev_info_get(struct rte_eth_dev *ethdev,
+		       struct rte_eth_dev_info *dev_info)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+
+	dev_info->device = ethdev->device;
+	dev_info->max_mac_addrs = 1;
+	dev_info->max_rx_queues = 1;
+	dev_info->max_tx_queues = 1;
+	dev_info->min_rx_bufsize = CPFL_MIN_BUF_SIZE;
+	dev_info->max_rx_pktlen = CPFL_MAX_FRAME_SIZE;
+
+	dev_info->flow_type_rss_offloads = CPFL_RSS_OFFLOAD_ALL;
+
+	dev_info->rx_offload_capa =
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP		|
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP		|
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM	|
+		RTE_ETH_RX_OFFLOAD_SCATTER		|
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER		|
+		RTE_ETH_RX_OFFLOAD_RSS_HASH		|
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
+	dev_info->tx_offload_capa =
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT		|
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT		|
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM	|
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS		|
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_free_thresh = CPFL_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+		.offloads = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_free_thresh = CPFL_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH,
+		.offloads = 0,
+	};
+
+	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
+	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
+	dev_info->switch_info.name = ethdev->device->name;
+	dev_info->switch_info.domain_id = 0; /* the same domain*/
+	dev_info->switch_info.port_id = repr->vport_info->vport_info.vsi_id;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_start(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_stop(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+static const struct eth_dev_ops cpfl_repr_dev_ops = {
+	.dev_start		= cpfl_repr_dev_start,
+	.dev_stop		= cpfl_repr_dev_stop,
+	.dev_configure		= cpfl_repr_dev_configure,
+	.dev_close		= cpfl_repr_dev_close,
+	.dev_infos_get		= cpfl_repr_dev_info_get,
+};
+
+static int
+cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+	struct cpfl_repr_param *param = init_param;
+	struct cpfl_adapter_ext *adapter = param->adapter;
+
+	repr->repr_id = param->repr_id;
+	repr->vport_info = param->vport_info;
+	repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
+	repr->itf.adapter = adapter;
+	repr->itf.data = eth_dev->data;
+
+	eth_dev->dev_ops = &cpfl_repr_dev_ops;
+
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+	/* bit[15:14] type
+	 * bit[13] xeon/acc
+	 * bit[12] apf/cpf
+	 * bit[11:0] vf
+	 */
+	eth_dev->data->representor_id =
+		(uint16_t)(repr->repr_id.type << 14 |
+			   repr->repr_id.host_id << 13 |
+			   repr->repr_id.pf_id << 12 |
+			   repr->repr_id.vf_id);
+
+	eth_dev->data->mac_addrs = &repr->mac_addr;
+
+	rte_eth_random_addr(repr->mac_addr.addr_bytes);
+
+	return cpfl_repr_whitelist_update(adapter, &repr->repr_id, eth_dev);
+}
+
+static int
+cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
+{
+	if ((host_id != CPFL_HOST_ID_HOST &&
+	     host_id != CPFL_HOST_ID_ACC) ||
+	    (pf_id != CPFL_PF_TYPE_APF &&
+	     pf_id != CPFL_PF_TYPE_CPF))
+		return -EINVAL;
+
+	static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = {
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = HOST0_APF,
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = HOST0_CPF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = ACC_APF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = ACC_CPF_ID,
+	};
+
+	return func_id_map[host_id][pf_id];
+}
+
+static bool
+match_repr_with_vport(const struct cpfl_repr_id *repr_id,
+		      struct cpchnl2_vport_info *info)
+{
+	int func_id;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF &&
+	    info->func_type == 0) {
+		func_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		if (func_id < 0)
+			return false;
+		else
+			return true;
+	} else if (repr_id->type == RTE_ETH_REPRESENTOR_VF &&
+		   info->func_type == 1) {
+		if (repr_id->vf_id == info->vf_id)
+			return true;
+	}
+
+	return false;
+}
+
+int
+cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	struct rte_eth_dev *dev;
+	uint32_t iter = 0;
+	const struct cpfl_repr_id *repr_id;
+	const struct cpfl_vport_id *vp_id;
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	while (rte_hash_iterate(adapter->repr_whitelist_hash,
+				(const void **)&repr_id, (void **)&dev, &iter) >= 0) {
+		struct cpfl_vport_info *vi;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		uint32_t iter_iter = 0;
+		bool matched;
+
+		/* skip representor already be created */
+		if (dev != NULL)
+			continue;
+
+		if (repr_id->type == RTE_ETH_REPRESENTOR_VF)
+			snprintf(name, sizeof(name), "net_%s_representor_c%dpf%dvf%d",
+				 pci_dev->name,
+				 repr_id->host_id,
+				 repr_id->pf_id,
+				 repr_id->vf_id);
+		else
+			snprintf(name, sizeof(name), "net_%s_representor_c%dpf%d",
+				 pci_dev->name,
+				 repr_id->host_id,
+				 repr_id->pf_id);
+
+		/* find a matched vport */
+		rte_spinlock_lock(&adapter->vport_map_lock);
+
+		matched = false;
+		while (rte_hash_iterate(adapter->vport_map_hash,
+					(const void **)&vp_id, (void **)&vi, &iter_iter) >= 0) {
+			struct cpfl_repr_param param;
+
+			if (!match_repr_with_vport(repr_id, &vi->vport_info))
+				continue;
+
+			matched = true;
+
+			param.adapter = adapter;
+			param.repr_id = *repr_id;
+			param.vport_info = vi;
+
+			ret = rte_eth_dev_create(&pci_dev->device,
+						 name,
+						 sizeof(struct cpfl_repr),
+						 NULL, NULL, cpfl_repr_init,
+						 &param);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to create representor %s", name);
+				rte_spinlock_unlock(&adapter->vport_map_lock);
+				rte_spinlock_unlock(&adapter->repr_lock);
+				return ret;
+			}
+			break;
+		}
+
+		/* warning if no match vport detected */
+		if (!matched)
+			PMD_INIT_LOG(WARNING, "No matched vport for representor %s "
+					      "creation will be deferred when vport is detected",
+					      name);
+
+		rte_spinlock_unlock(&adapter->vport_map_lock);
+	}
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_representor.h b/drivers/net/cpfl/cpfl_representor.h
new file mode 100644
index 0000000000..d3a4de531e
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_REPRESENTOR_H_
+#define _CPFL_REPRESENTOR_H_
+
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+struct cpfl_repr_id {
+	uint8_t host_id;
+	uint8_t pf_id;
+	uint8_t type;
+	uint8_t vf_id;
+};
+
+struct cpfl_repr_param {
+	struct cpfl_adapter_ext *adapter;
+	struct cpfl_repr_id repr_id;
+	struct cpfl_vport_info *vport_info;
+};
+
+int cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter);
+int cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 28167bb81d..1d963e5fd1 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -16,6 +16,7 @@ deps += ['hash', 'common_idpf']
 sources = files(
         'cpfl_ethdev.c',
         'cpfl_rxtx.c',
+        'cpfl_representor.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 08/19] net/cpfl: support vport list/info get
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (6 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 07/19] net/cpfl: create port representor beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 09/19] net/cpfl: update vport info before creating representor beilei.xing
                   ` (11 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Support cp channel ops CPCHNL2_OP_CPF_GET_VPORT_LIST and
CPCHNL2_OP_CPF_GET_VPORT_INFO.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h |  8 ++++
 drivers/net/cpfl/cpfl_vchnl.c  | 72 ++++++++++++++++++++++++++++++++++
 drivers/net/cpfl/meson.build   |  1 +
 3 files changed, 81 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 1f5c3a39b8..4b8c0da632 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -189,6 +189,14 @@ struct cpfl_adapter_ext {
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_vport_id *vi,
+			   struct cpchnl2_get_vport_list_response *response);
+int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+			   struct cpchnl2_vport_id *vport_id,
+			   struct cpfl_vport_id *vi,
+			   struct cpchnl2_get_vport_info_response *response);
+
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p)					\
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
new file mode 100644
index 0000000000..a21a4a451f
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include "cpfl_ethdev.h"
+#include <idpf_common_virtchnl.h>
+
+int
+cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_vport_id *vi,
+		       struct cpchnl2_get_vport_list_response *response)
+{
+	struct cpchnl2_get_vport_list_request request;
+	struct idpf_cmd_info args;
+	int err;
+
+	memset(&request, 0, sizeof(request));
+	request.func_type = vi->func_type;
+	request.pf_id = vi->pf_id;
+	request.vf_id = vi->vf_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = CPCHNL2_OP_GET_VPORT_LIST;
+	args.in_args = (uint8_t *)&request;
+	args.in_args_size = sizeof(struct cpchnl2_get_vport_list_request);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err != 0) {
+		PMD_DRV_LOG(ERR, "Failed to execute command of CPCHNL2_OP_GET_VPORT_LIST");
+		return err;
+	}
+
+	rte_memcpy(response, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+
+	return 0;
+}
+
+int
+cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+		       struct cpchnl2_vport_id *vport_id,
+		       struct cpfl_vport_id *vi,
+		       struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpchnl2_get_vport_info_request request;
+	struct idpf_cmd_info args;
+	int err;
+
+	request.vport.vport_id = vport_id->vport_id;
+	request.vport.vport_type = vport_id->vport_type;
+	request.func.func_type = vi->func_type;
+	request.func.pf_id = vi->pf_id;
+	request.func.vf_id = vi->vf_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = CPCHNL2_OP_GET_VPORT_INFO;
+	args.in_args = (uint8_t *)&request;
+	args.in_args_size = sizeof(struct cpchnl2_get_vport_info_request);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err != 0) {
+		PMD_DRV_LOG(ERR, "Failed to execute command of CPCHNL2_OP_GET_VPORT_INFO");
+		return err;
+	}
+
+	rte_memcpy(response, args.out_buffer, sizeof(*response));
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 1d963e5fd1..fb075c6860 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -17,6 +17,7 @@ sources = files(
         'cpfl_ethdev.c',
         'cpfl_rxtx.c',
         'cpfl_representor.c',
+        'cpfl_vchnl.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 09/19] net/cpfl: update vport info before creating representor
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (7 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 08/19] net/cpfl: support vport list/info get beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 10/19] net/cpfl: refine handle virtual channel message beilei.xing
                   ` (10 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Get port representor's vport list and update vport_map_hash
before creating the port representor.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c      |   2 +-
 drivers/net/cpfl/cpfl_ethdev.h      |   3 +
 drivers/net/cpfl/cpfl_representor.c | 124 ++++++++++++++++++++++++++++
 3 files changed, 128 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 949a2c8069..fc0ebc6fb7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1633,7 +1633,7 @@ cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen)
 	}
 }
 
-static int
+int
 cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
 		       struct cpfl_vport_id *vport_identity,
 		       struct cpchnl2_vport_info *vport_info)
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 4b8c0da632..9cc96839ed 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -189,6 +189,9 @@ struct cpfl_adapter_ext {
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_vport_id *vport_identity,
+			   struct cpchnl2_vport_info *vport_info);
 int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_list_response *response);
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 4d91d7311d..dcc01d0669 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -368,6 +368,86 @@ match_repr_with_vport(const struct cpfl_repr_id *repr_id,
 	return false;
 }
 
+static int
+cpfl_repr_vport_list_query(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_get_vport_list_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		vi.vf_id = 0;
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_cc_vport_list_get(adapter, &vi, response);
+
+	return ret;
+}
+
+static int
+cpfl_repr_vport_info_query(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_vport_id *vport_id,
+			   struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		vi.vf_id = 0;
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_cc_vport_info_get(adapter, vport_id, &vi, response);
+
+	return ret;
+}
+
+static int
+cpfl_repr_vport_map_update(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id, uint32_t vport_id,
+			   struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	vi.vport_id = vport_id;
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_vport_info_create(adapter, &vi, &response->info);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Fail to update vport map hash for representor.");
+		return ret;
+	}
+
+	return 0;
+}
+
 int
 cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -375,8 +455,14 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte
 	uint32_t iter = 0;
 	const struct cpfl_repr_id *repr_id;
 	const struct cpfl_vport_id *vp_id;
+	struct cpchnl2_get_vport_list_response *vlist_resp;
+	struct cpchnl2_get_vport_info_response vinfo_resp;
 	int ret;
 
+	vlist_resp = rte_zmalloc(NULL, IDPF_DFLT_MBX_BUF_SIZE, 0);
+	if (vlist_resp == NULL)
+		return -ENOMEM;
+
 	rte_spinlock_lock(&adapter->repr_lock);
 
 	while (rte_hash_iterate(adapter->repr_whitelist_hash,
@@ -385,6 +471,7 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte
 		char name[RTE_ETH_NAME_MAX_LEN];
 		uint32_t iter_iter = 0;
 		bool matched;
+		int i;
 
 		/* skip representor already be created */
 		if (dev != NULL)
@@ -402,6 +489,41 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte
 				 repr_id->host_id,
 				 repr_id->pf_id);
 
+		/* get vport list for the port representor */
+		ret = cpfl_repr_vport_list_query(adapter, repr_id, vlist_resp);
+		if (ret != 0) {
+			PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d's vport list",
+				     repr_id->host_id, repr_id->pf_id, repr_id->vf_id);
+			rte_spinlock_unlock(&adapter->repr_lock);
+			rte_free(vlist_resp);
+			return ret;
+		}
+
+		/* get all vport info for the port representor */
+		for (i = 0; i < vlist_resp->nof_vports; i++) {
+			ret = cpfl_repr_vport_info_query(adapter, repr_id,
+							 &vlist_resp->vports[i], &vinfo_resp);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d vport[%d]'s info",
+					     repr_id->host_id, repr_id->pf_id, repr_id->vf_id,
+					     vlist_resp->vports[i].vport_id);
+				rte_spinlock_unlock(&adapter->repr_lock);
+				rte_free(vlist_resp);
+				return ret;
+			}
+
+			ret = cpfl_repr_vport_map_update(adapter, repr_id,
+						 vlist_resp->vports[i].vport_id, &vinfo_resp);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to update  host%d pf%d vf%d vport[%d]'s info to vport_map_hash",
+					     repr_id->host_id, repr_id->pf_id, repr_id->vf_id,
+					     vlist_resp->vports[i].vport_id);
+				rte_spinlock_unlock(&adapter->repr_lock);
+				rte_free(vlist_resp);
+				return ret;
+			}
+		}
+
 		/* find a matched vport */
 		rte_spinlock_lock(&adapter->vport_map_lock);
 
@@ -428,6 +550,7 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte
 				PMD_INIT_LOG(ERR, "Failed to create representor %s", name);
 				rte_spinlock_unlock(&adapter->vport_map_lock);
 				rte_spinlock_unlock(&adapter->repr_lock);
+				rte_free(vlist_resp);
 				return ret;
 			}
 			break;
@@ -443,6 +566,7 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte
 	}
 
 	rte_spinlock_unlock(&adapter->repr_lock);
+	rte_free(vlist_resp);
 
 	return 0;
 }
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 10/19] net/cpfl: refine handle virtual channel message
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (8 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 09/19] net/cpfl: update vport info before creating representor beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 11/19] net/cpfl: add exceptional vport beilei.xing
                   ` (9 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Refine handle virtual channel event message.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 46 ++++++++++++++++------------------
 1 file changed, 22 insertions(+), 24 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index fc0ebc6fb7..88c1479f3a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1591,40 +1591,50 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	return ret;
 }
 
-static struct idpf_vport *
+static struct cpfl_vport *
 cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
 {
-	struct idpf_vport *vport = NULL;
+	struct cpfl_vport *vport = NULL;
 	int i;
 
 	for (i = 0; i < adapter->cur_vport_nb; i++) {
-		vport = &adapter->vports[i]->base;
-		if (vport->vport_id != vport_id)
+		vport = adapter->vports[i];
+		if (vport->base.vport_id != vport_id)
 			continue;
 		else
 			return vport;
 	}
 
-	return vport;
+	return NULL;
 }
 
 static void
-cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen)
+cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen)
 {
 	struct virtchnl2_event *vc_event = (struct virtchnl2_event *)msg;
-	struct rte_eth_dev_data *data = vport->dev_data;
-	struct rte_eth_dev *dev = &rte_eth_devices[data->port_id];
+	struct cpfl_vport *vport;
+	struct rte_eth_dev_data *data;
+	struct rte_eth_dev *dev;
 
 	if (msglen < sizeof(struct virtchnl2_event)) {
 		PMD_DRV_LOG(ERR, "Error event");
 		return;
 	}
 
+	vport = cpfl_find_vport(adapter, vc_event->vport_id);
+	if (!vport) {
+		PMD_DRV_LOG(ERR, "Can't find vport.");
+		return;
+	}
+
+	data = vport->itf.data;
+	dev = &rte_eth_devices[data->port_id];
+
 	switch (vc_event->event) {
 	case VIRTCHNL2_EVENT_LINK_CHANGE:
 		PMD_DRV_LOG(DEBUG, "VIRTCHNL2_EVENT_LINK_CHANGE");
-		vport->link_up = !!(vc_event->link_status);
-		vport->link_speed = vc_event->link_speed;
+		vport->base.link_up = !!(vc_event->link_status);
+		vport->base.link_speed = vc_event->link_speed;
 		cpfl_dev_link_update(dev, 0);
 		break;
 	default:
@@ -1741,10 +1751,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 	struct idpf_adapter *base = &adapter->base;
 	struct idpf_dma_mem *dma_mem = NULL;
 	struct idpf_hw *hw = &base->hw;
-	struct virtchnl2_event *vc_event;
 	struct idpf_ctlq_msg ctlq_msg;
 	enum idpf_mbx_opc mbx_op;
-	struct idpf_vport *vport;
 	uint16_t pending = 1;
 	uint32_t vc_op;
 	int ret;
@@ -1766,18 +1774,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 		switch (mbx_op) {
 		case idpf_mbq_opc_send_msg_to_peer_pf:
 			if (vc_op == VIRTCHNL2_OP_EVENT) {
-				if (ctlq_msg.data_len < sizeof(struct virtchnl2_event)) {
-					PMD_DRV_LOG(ERR, "Error event");
-					return;
-				}
-				vc_event = (struct virtchnl2_event *)base->mbx_resp;
-				vport = cpfl_find_vport(adapter, vc_event->vport_id);
-				if (!vport) {
-					PMD_DRV_LOG(ERR, "Can't find vport.");
-					return;
-				}
-				cpfl_handle_event_msg(vport, base->mbx_resp,
-						      ctlq_msg.data_len);
+				cpfl_handle_vchnl_event_msg(adapter, adapter->base.mbx_resp,
+							    ctlq_msg.data_len);
 			} else if (vc_op == CPCHNL2_OP_EVENT) {
 				cpfl_handle_cpchnl_event_msg(adapter, adapter->base.mbx_resp,
 							     ctlq_msg.data_len);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 11/19] net/cpfl: add exceptional vport
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (9 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 10/19] net/cpfl: refine handle virtual channel message beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 12/19] net/cpfl: support representor Rx/Tx queue setup beilei.xing
                   ` (8 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

This patch creates exceptional vport when there's port representor.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 107 ++++++++++++++++++++++++++++++---
 drivers/net/cpfl/cpfl_ethdev.h |   8 +++
 drivers/net/cpfl/cpfl_rxtx.c   |  16 +++++
 drivers/net/cpfl/cpfl_rxtx.h   |   7 +++
 4 files changed, 131 insertions(+), 7 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 88c1479f3a..f674d93050 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1023,8 +1023,13 @@ cpfl_dev_start(struct rte_eth_dev *dev)
 		goto err_startq;
 	}
 
-	cpfl_set_rx_function(dev);
-	cpfl_set_tx_function(dev);
+	if (cpfl_vport->exceptional) {
+		dev->rx_pkt_burst = cpfl_dummy_recv_pkts;
+		dev->tx_pkt_burst = cpfl_dummy_xmit_pkts;
+	} else {
+		cpfl_set_rx_function(dev);
+		cpfl_set_tx_function(dev);
+	}
 
 	ret = idpf_vc_vport_ena_dis(vport, true);
 	if (ret != 0) {
@@ -1098,13 +1103,15 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
 		cpfl_p2p_queue_grps_del(vport);
 
+	if (!cpfl_vport->exceptional) {
+		adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
+		adapter->cur_vport_nb--;
+		adapter->vports[vport->sw_idx] = NULL;
+	}
+
 	idpf_vport_deinit(vport);
 	rte_free(cpfl_vport->p2p_q_chunks_info);
-
-	adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
-	adapter->cur_vport_nb--;
 	dev->data->dev_private = NULL;
-	adapter->vports[vport->sw_idx] = NULL;
 	rte_free(cpfl_vport);
 
 	return 0;
@@ -1621,6 +1628,11 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 		return;
 	}
 
+	/* ignore if it is exceptional vport */
+	if (adapter->exceptional_vport &&
+	    adapter->exceptional_vport->base.vport_id == vc_event->vport_id)
+		return;
+
 	vport = cpfl_find_vport(adapter, vc_event->vport_id);
 	if (!vport) {
 		PMD_DRV_LOG(ERR, "Can't find vport.");
@@ -2192,6 +2204,56 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	return ret;
 }
 
+static int
+cpfl_exceptional_vport_init(struct rte_eth_dev *dev, void *init_params)
+{
+	struct cpfl_vport *cpfl_vport = CPFL_DEV_TO_VPORT(dev);
+	struct idpf_vport *vport = &cpfl_vport->base;
+	struct cpfl_adapter_ext *adapter = init_params;
+	/* for sending create vport virtchnl msg prepare */
+	struct virtchnl2_create_vport create_vport_info;
+	int ret = 0;
+
+	dev->dev_ops = &cpfl_eth_dev_ops;
+	vport->adapter = &adapter->base;
+
+	memset(&create_vport_info, 0, sizeof(create_vport_info));
+	ret = idpf_vport_info_init(vport, &create_vport_info);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to init exceptional vport req_info.");
+		goto err;
+	}
+
+	ret = idpf_vport_init(vport, &create_vport_info, dev->data);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to init exceptional vport.");
+		goto err;
+	}
+
+	cpfl_vport->itf.adapter = adapter;
+	cpfl_vport->itf.data = dev->data;
+	cpfl_vport->exceptional = TRUE;
+
+	dev->data->mac_addrs = rte_zmalloc(NULL, RTE_ETHER_ADDR_LEN, 0);
+	if (dev->data->mac_addrs == NULL) {
+		PMD_INIT_LOG(ERR, "Cannot allocate mac_addr for exceptional vport.");
+		ret = -ENOMEM;
+		goto err_mac_addrs;
+	}
+
+	rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
+			    &dev->data->mac_addrs[0]);
+
+	adapter->exceptional_vport = cpfl_vport;
+
+	return 0;
+
+err_mac_addrs:
+	idpf_vport_deinit(vport);
+err:
+	return ret;
+}
+
 static const struct rte_pci_id pci_id_cpfl_map[] = {
 	{ RTE_PCI_DEVICE(IDPF_INTEL_VENDOR_ID, IDPF_DEV_ID_CPF) },
 	{ .vendor_id = 0, /* sentinel */ },
@@ -2299,6 +2361,23 @@ cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapt
 	return 0;
 }
 
+static int
+cpfl_exceptional_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	char name[RTE_ETH_NAME_MAX_LEN];
+	int ret;
+
+	snprintf(name, sizeof(name), "cpfl_%s_exceptional_vport", pci_dev->name);
+	ret = rte_eth_dev_create(&pci_dev->device, name,
+				 sizeof(struct cpfl_vport),
+				 NULL, NULL, cpfl_exceptional_vport_init,
+				 adapter);
+	if (ret != 0)
+		PMD_DRV_LOG(ERR, "Failed to create exceptional vport");
+
+	return ret;
+}
+
 static int
 cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
@@ -2347,13 +2426,19 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 		goto close_ethdev;
 	}
 
+	if (adapter->devargs.repr_args_num > 0) {
+		retval = cpfl_exceptional_vport_create(pci_dev, adapter);
+		if (retval != 0) {
+			PMD_INIT_LOG(ERR, "Failed to create exceptional vport. ");
+			goto close_ethdev;
+		}
+	}
 	retval = cpfl_repr_create(pci_dev, adapter);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to create representors ");
 		goto close_ethdev;
 	}
 
-
 	return 0;
 
 close_ethdev:
@@ -2387,6 +2472,14 @@ cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *ad
 		return ret;
 	}
 
+	if (adapter->exceptional_vport == NULL && adapter->devargs.repr_args_num > 0) {
+		ret = cpfl_exceptional_vport_create(pci_dev, adapter);
+		if (ret != 0) {
+			PMD_INIT_LOG(ERR, "Failed to create exceptional vport. ");
+			return ret;
+		}
+	}
+
 	ret = cpfl_repr_create(pci_dev, adapter);
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Failed to create representors ");
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 9cc96839ed..b0fb05c7b9 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -156,6 +156,11 @@ struct cpfl_vport {
 	struct idpf_rx_queue *p2p_rx_bufq;
 	struct idpf_tx_queue *p2p_tx_complq;
 	bool p2p_manual_bind;
+
+	/* exceptional vport */
+	bool exceptional;  /* this vport is for exceptional one */
+	uint32_t dispatch_service_id;
+	uint32_t dispatch_core_id;
 };
 
 struct cpfl_repr {
@@ -180,6 +185,9 @@ struct cpfl_adapter_ext {
 	uint16_t used_vecs_num;
 	struct cpfl_devargs devargs;
 
+	/* exceptional vport and exceptional queues */
+	struct cpfl_vport *exceptional_vport;
+
 	rte_spinlock_t vport_map_lock;
 	struct rte_hash *vport_map_hash;
 
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 2ef6871a85..df6a8c1940 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1409,6 +1409,22 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
 	}
 }
 
+uint16_t
+cpfl_dummy_recv_pkts(__rte_unused void *queue,
+		     __rte_unused struct rte_mbuf **tx_pkts,
+		     __rte_unused uint16_t nb_pkts)
+{
+	return 0;
+}
+
+uint16_t
+cpfl_dummy_xmit_pkts(__rte_unused void *queue,
+		     __rte_unused struct rte_mbuf **tx_pkts,
+		     __rte_unused uint16_t nb_pkts)
+{
+	return 0;
+}
+
 void
 cpfl_set_rx_function(struct rte_eth_dev *dev)
 {
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index aacd087b56..914a0485b5 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -117,4 +117,11 @@ int cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on);
 int cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on);
 int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
 				   bool rx, bool on);
+uint16_t cpfl_dummy_recv_pkts(void *queue,
+			      struct rte_mbuf **tx_pkts,
+			      uint16_t nb_pkts);
+
+uint16_t cpfl_dummy_xmit_pkts(void *queue,
+			      struct rte_mbuf **tx_pkts,
+			      uint16_t nb_pkts);
 #endif /* _CPFL_RXTX_H_ */
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 12/19] net/cpfl: support representor Rx/Tx queue setup
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (10 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 11/19] net/cpfl: add exceptional vport beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 13/19] net/cpfl: support link update for representor beilei.xing
                   ` (7 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Support Rx/Tx queue setup for port representor.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h      |  11 +++
 drivers/net/cpfl/cpfl_representor.c | 126 ++++++++++++++++++++++++++++
 2 files changed, 137 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index b0fb05c7b9..8a8721bbe9 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -170,6 +170,17 @@ struct cpfl_repr {
 	struct cpfl_vport_info *vport_info;
 };
 
+struct cpfl_repr_rx_queue {
+	struct cpfl_repr *repr;
+	struct rte_mempool *mb_pool;
+	struct rte_ring *rx_ring;
+};
+
+struct cpfl_repr_tx_queue {
+	struct cpfl_repr *repr;
+	struct cpfl_tx_queue *txq;
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index dcc01d0669..19c7fb4cb9 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -285,12 +285,138 @@ cpfl_repr_dev_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+cpfl_repr_rx_queue_setup(struct rte_eth_dev *dev,
+			 uint16_t queue_id,
+			 uint16_t nb_desc,
+			 unsigned int socket_id,
+			 __rte_unused const struct rte_eth_rxconf *conf,
+			 struct rte_mempool *pool)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(dev);
+	struct cpfl_repr_rx_queue *rxq;
+	char ring_name[RTE_RING_NAMESIZE];
+	struct rte_ring *rx_ring;
+
+	if (!(dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
+		PMD_INIT_LOG(ERR, "This ethdev is not representor.");
+		return -EINVAL;
+	}
+
+	if (!RTE_IS_POWER_OF_2(nb_desc) ||
+	    nb_desc > CPFL_MAX_RING_DESC ||
+	    nb_desc < CPFL_MIN_RING_DESC) {
+		PMD_INIT_LOG(ERR, "nb_desc should < %u, > %u and power of 2)",
+			     CPFL_MAX_RING_DESC, CPFL_MIN_RING_DESC);
+		return -EINVAL;
+	}
+
+	/* Free memory if needed */
+	rxq = dev->data->rx_queues[queue_id];
+	if (rxq) {
+		rte_ring_free(rxq->rx_ring);
+		rte_free(rxq);
+		dev->data->rx_queues[queue_id] = NULL;
+	}
+
+	/* Allocate rx queue data structure */
+	rxq = rte_zmalloc_socket("cpfl representor rx queue",
+				 sizeof(struct cpfl_repr_rx_queue),
+				 RTE_CACHE_LINE_SIZE,
+				 socket_id);
+	if (!rxq) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for representor rx queue");
+		return -ENOMEM;
+	}
+
+	/* use rte_ring as rx queue of representor */
+	if (repr->repr_id.type == RTE_ETH_REPRESENTOR_VF)
+		snprintf(ring_name, sizeof(ring_name), "cpfl_repr_c%dpf%dvf%d_rx",
+			 repr->repr_id.host_id, repr->repr_id.pf_id, repr->repr_id.vf_id);
+	else
+		snprintf(ring_name, sizeof(ring_name), "cpfl_repr_c%dpf%d_rx",
+			 repr->repr_id.host_id, repr->repr_id.pf_id);
+	rx_ring = rte_ring_lookup(ring_name);
+	if (rx_ring) {
+		PMD_INIT_LOG(ERR, "rte_ring %s is occuriped.", ring_name);
+		rte_free(rxq);
+		return -EEXIST;
+	}
+
+	rx_ring = rte_ring_create(ring_name, nb_desc, socket_id,
+				  RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!rx_ring) {
+		PMD_INIT_LOG(ERR, "Failed to create ring %s.", ring_name);
+		rte_free(rxq);
+		return -EINVAL;
+	}
+
+	rxq->mb_pool = pool;
+	rxq->repr = repr;
+	rxq->rx_ring = rx_ring;
+	dev->data->rx_queues[queue_id] = rxq;
+
+	return 0;
+}
+
+static int
+cpfl_repr_tx_queue_setup(struct rte_eth_dev *dev,
+			 uint16_t queue_id,
+			 __rte_unused uint16_t nb_desc,
+			 unsigned int socket_id,
+			 __rte_unused const struct rte_eth_txconf *conf)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(dev);
+	struct cpfl_adapter_ext *adapter = repr->itf.adapter;
+	struct cpfl_repr_tx_queue *txq;
+	struct cpfl_vport *vport;
+
+	if (!(dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
+		PMD_INIT_LOG(ERR, "This ethdev is not representor.");
+		return -EINVAL;
+	}
+
+	txq = dev->data->tx_queues[queue_id];
+	if (txq) {
+		rte_free(txq);
+		dev->data->rx_queues[queue_id] = NULL;
+	}
+	txq = rte_zmalloc_socket("cpfl representor tx queue",
+				 sizeof(struct cpfl_repr_tx_queue),
+				 RTE_CACHE_LINE_SIZE,
+				 socket_id);
+	if (!txq) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for representor tx queue");
+		return -ENOMEM;
+	}
+	/* use vport HW queue to transmit, no need to allocate
+	 * a ring for it
+	 */
+	txq->repr = repr;
+	dev->data->tx_queues[queue_id] = txq;
+
+	vport = adapter->exceptional_vport;
+	if (!vport) {
+		PMD_INIT_LOG(ERR, "No default vport is created for exceptianl path");
+		return -ENODEV;
+	}
+	/* TODO: need to select the hw txq when multi txqs are there.
+	 * Now just use the default queue 0
+	 */
+	txq->txq = ((struct rte_eth_dev_data *)vport->itf.data)->tx_queues[0];
+
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
 	.dev_configure		= cpfl_repr_dev_configure,
 	.dev_close		= cpfl_repr_dev_close,
 	.dev_infos_get		= cpfl_repr_dev_info_get,
+
+	.rx_queue_setup		= cpfl_repr_rx_queue_setup,
+	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
 };
 
 static int
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 13/19] net/cpfl: support link update for representor
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (11 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 12/19] net/cpfl: support representor Rx/Tx queue setup beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 14/19] net/cpfl: add stats ops " beilei.xing
                   ` (6 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Add link update ops for representor.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h      |  1 +
 drivers/net/cpfl/cpfl_representor.c | 20 ++++++++++++++++++++
 2 files changed, 21 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 8a8721bbe9..7813b9173e 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -168,6 +168,7 @@ struct cpfl_repr {
 	struct cpfl_repr_id repr_id;
 	struct rte_ether_addr mac_addr;
 	struct cpfl_vport_info *vport_info;
+	bool func_up; /* If the represented function is up */
 };
 
 struct cpfl_repr_rx_queue {
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 19c7fb4cb9..862464602f 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -408,6 +408,23 @@ cpfl_repr_tx_queue_setup(struct rte_eth_dev *dev,
 	return 0;
 }
 
+static int
+cpfl_repr_link_update(struct rte_eth_dev *ethdev,
+		      __rte_unused int wait_to_complete)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+	struct rte_eth_link *dev_link = &ethdev->data->dev_link;
+
+	if (!(ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
+		PMD_INIT_LOG(ERR, "This ethdev is not representor.");
+		return -EINVAL;
+	}
+	dev_link->link_status = repr->func_up ?
+			RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -417,6 +434,7 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 
 	.rx_queue_setup		= cpfl_repr_rx_queue_setup,
 	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
+	.link_update		= cpfl_repr_link_update,
 };
 
 static int
@@ -431,6 +449,8 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
 	repr->itf.adapter = adapter;
 	repr->itf.data = eth_dev->data;
+	if (repr->vport_info->vport_info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
+		repr->func_up = true;
 
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 14/19] net/cpfl: add stats ops for representor
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (12 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 13/19] net/cpfl: support link update for representor beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 15/19] common/idpf: refine inline function beilei.xing
                   ` (5 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Support stats_get and stats_reset ops fot port representor.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h      |  8 +++++
 drivers/net/cpfl/cpfl_representor.c | 54 +++++++++++++++++++++++++++++
 2 files changed, 62 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 7813b9173e..33e810408b 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -171,15 +171,23 @@ struct cpfl_repr {
 	bool func_up; /* If the represented function is up */
 };
 
+struct cpfl_repr_stats {
+	uint64_t packets;
+	uint64_t bytes;
+	uint64_t errors;
+};
+
 struct cpfl_repr_rx_queue {
 	struct cpfl_repr *repr;
 	struct rte_mempool *mb_pool;
 	struct rte_ring *rx_ring;
+	struct cpfl_repr_stats stats; /* Statistics */
 };
 
 struct cpfl_repr_tx_queue {
 	struct cpfl_repr *repr;
 	struct cpfl_tx_queue *txq;
+	struct cpfl_repr_stats stats; /* Statistics */
 };
 
 struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 862464602f..79cb7f76d4 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -425,6 +425,58 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
 	return 0;
 }
 
+static int
+idpf_repr_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+	struct cpfl_repr_tx_queue *txq;
+	struct cpfl_repr_rx_queue *rxq;
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		if (!txq)
+			continue;
+		stats->opackets += __atomic_load_n(&txq->stats.packets, __ATOMIC_RELAXED);
+		stats->obytes += __atomic_load_n(&txq->stats.bytes, __ATOMIC_RELAXED);
+	}
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		if (!rxq)
+			continue;
+		stats->ipackets += __atomic_load_n(&rxq->stats.packets, __ATOMIC_RELAXED);
+		stats->ibytes += __atomic_load_n(&rxq->stats.bytes, __ATOMIC_RELAXED);
+		stats->ierrors += __atomic_load_n(&rxq->stats.errors, __ATOMIC_RELAXED);
+	}
+	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+	return 0;
+}
+
+static int
+idpf_repr_stats_reset(struct rte_eth_dev *dev)
+{
+	struct cpfl_repr_tx_queue *txq;
+	struct cpfl_repr_rx_queue *rxq;
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		if (!txq)
+			continue;
+		__atomic_store_n(&txq->stats.packets, 0, __ATOMIC_RELAXED);
+		__atomic_store_n(&txq->stats.bytes, 0, __ATOMIC_RELAXED);
+		__atomic_store_n(&txq->stats.errors, 0, __ATOMIC_RELAXED);
+	}
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		if (!rxq)
+			continue;
+		__atomic_store_n(&rxq->stats.packets, 0, __ATOMIC_RELAXED);
+		__atomic_store_n(&rxq->stats.bytes, 0, __ATOMIC_RELAXED);
+		__atomic_store_n(&rxq->stats.errors, 0, __ATOMIC_RELAXED);
+	}
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -435,6 +487,8 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.rx_queue_setup		= cpfl_repr_rx_queue_setup,
 	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
 	.link_update		= cpfl_repr_link_update,
+	.stats_get		= idpf_repr_stats_get,
+	.stats_reset		= idpf_repr_stats_reset,
 };
 
 static int
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 15/19] common/idpf: refine inline function
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (13 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 14/19] net/cpfl: add stats ops " beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 16/19] net/cpfl: support representor data path beilei.xing
                   ` (4 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Move some static inline functions to header file.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/common/idpf/idpf_common_rxtx.c | 246 -------------------------
 drivers/common/idpf/idpf_common_rxtx.h | 246 +++++++++++++++++++++++++
 drivers/common/idpf/version.map        |   3 +
 3 files changed, 249 insertions(+), 246 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c
index fc87e3e243..50465e76ea 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -442,188 +442,6 @@ idpf_qc_split_rxq_mbufs_alloc(struct idpf_rx_queue *rxq)
 	return 0;
 }
 
-#define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND  10000
-/* Helper function to convert a 32b nanoseconds timestamp to 64b. */
-static inline uint64_t
-idpf_tstamp_convert_32b_64b(struct idpf_adapter *ad, uint32_t flag,
-			    uint32_t in_timestamp)
-{
-#ifdef RTE_ARCH_X86_64
-	struct idpf_hw *hw = &ad->hw;
-	const uint64_t mask = 0xFFFFFFFF;
-	uint32_t hi, lo, lo2, delta;
-	uint64_t ns;
-
-	if (flag != 0) {
-		IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
-		IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_EXEC_CMD_M |
-			       PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
-		lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
-		hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
-		/*
-		 * On typical system, the delta between lo and lo2 is ~1000ns,
-		 * so 10000 seems a large-enough but not overly-big guard band.
-		 */
-		if (lo > (UINT32_MAX - IDPF_TIMESYNC_REG_WRAP_GUARD_BAND))
-			lo2 = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
-		else
-			lo2 = lo;
-
-		if (lo2 < lo) {
-			lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
-			hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
-		}
-
-		ad->time_hw = ((uint64_t)hi << 32) | lo;
-	}
-
-	delta = (in_timestamp - (uint32_t)(ad->time_hw & mask));
-	if (delta > (mask / 2)) {
-		delta = ((uint32_t)(ad->time_hw & mask) - in_timestamp);
-		ns = ad->time_hw - delta;
-	} else {
-		ns = ad->time_hw + delta;
-	}
-
-	return ns;
-#else /* !RTE_ARCH_X86_64 */
-	RTE_SET_USED(ad);
-	RTE_SET_USED(flag);
-	RTE_SET_USED(in_timestamp);
-	return 0;
-#endif /* RTE_ARCH_X86_64 */
-}
-
-#define IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S				\
-	(RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) |     \
-	 RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) |     \
-	 RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) |    \
-	 RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S))
-
-static inline uint64_t
-idpf_splitq_rx_csum_offload(uint8_t err)
-{
-	uint64_t flags = 0;
-
-	if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S)) == 0))
-		return flags;
-
-	if (likely((err & IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S) == 0)) {
-		flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
-			  RTE_MBUF_F_RX_L4_CKSUM_GOOD);
-		return flags;
-	}
-
-	if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S)) != 0))
-		flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
-	else
-		flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
-
-	if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S)) != 0))
-		flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
-	else
-		flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
-
-	if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S)) != 0))
-		flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
-
-	if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) != 0))
-		flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
-	else
-		flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
-
-	return flags;
-}
-
-#define IDPF_RX_FLEX_DESC_ADV_HASH1_S  0
-#define IDPF_RX_FLEX_DESC_ADV_HASH2_S  16
-#define IDPF_RX_FLEX_DESC_ADV_HASH3_S  24
-
-static inline uint64_t
-idpf_splitq_rx_rss_offload(struct rte_mbuf *mb,
-			   volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
-{
-	uint8_t status_err0_qw0;
-	uint64_t flags = 0;
-
-	status_err0_qw0 = rx_desc->status_err0_qw0;
-
-	if ((status_err0_qw0 & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) != 0) {
-		flags |= RTE_MBUF_F_RX_RSS_HASH;
-		mb->hash.rss = (rte_le_to_cpu_16(rx_desc->hash1) <<
-				IDPF_RX_FLEX_DESC_ADV_HASH1_S) |
-			((uint32_t)(rx_desc->ff2_mirrid_hash2.hash2) <<
-			 IDPF_RX_FLEX_DESC_ADV_HASH2_S) |
-			((uint32_t)(rx_desc->hash3) <<
-			 IDPF_RX_FLEX_DESC_ADV_HASH3_S);
-	}
-
-	return flags;
-}
-
-static void
-idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)
-{
-	volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_ring;
-	volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_desc;
-	uint16_t nb_refill = rx_bufq->rx_free_thresh;
-	uint16_t nb_desc = rx_bufq->nb_rx_desc;
-	uint16_t next_avail = rx_bufq->rx_tail;
-	struct rte_mbuf *nmb[rx_bufq->rx_free_thresh];
-	uint64_t dma_addr;
-	uint16_t delta;
-	int i;
-
-	if (rx_bufq->nb_rx_hold < rx_bufq->rx_free_thresh)
-		return;
-
-	rx_buf_ring = rx_bufq->rx_ring;
-	delta = nb_desc - next_avail;
-	if (unlikely(delta < nb_refill)) {
-		if (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, delta) == 0)) {
-			for (i = 0; i < delta; i++) {
-				rx_buf_desc = &rx_buf_ring[next_avail + i];
-				rx_bufq->sw_ring[next_avail + i] = nmb[i];
-				dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
-				rx_buf_desc->hdr_addr = 0;
-				rx_buf_desc->pkt_addr = dma_addr;
-			}
-			nb_refill -= delta;
-			next_avail = 0;
-			rx_bufq->nb_rx_hold -= delta;
-		} else {
-			__atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
-					   nb_desc - next_avail, __ATOMIC_RELAXED);
-			RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
-			       rx_bufq->port_id, rx_bufq->queue_id);
-			return;
-		}
-	}
-
-	if (nb_desc - next_avail >= nb_refill) {
-		if (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, nb_refill) == 0)) {
-			for (i = 0; i < nb_refill; i++) {
-				rx_buf_desc = &rx_buf_ring[next_avail + i];
-				rx_bufq->sw_ring[next_avail + i] = nmb[i];
-				dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
-				rx_buf_desc->hdr_addr = 0;
-				rx_buf_desc->pkt_addr = dma_addr;
-			}
-			next_avail += nb_refill;
-			rx_bufq->nb_rx_hold -= nb_refill;
-		} else {
-			__atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
-					   nb_desc - next_avail, __ATOMIC_RELAXED);
-			RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
-			       rx_bufq->port_id, rx_bufq->queue_id);
-		}
-	}
-
-	IDPF_PCI_REG_WRITE(rx_bufq->qrx_tail, next_avail);
-
-	rx_bufq->rx_tail = next_avail;
-}
-
 uint16_t
 idpf_dp_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 			 uint16_t nb_pkts)
@@ -749,70 +567,6 @@ idpf_dp_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	return nb_rx;
 }
 
-static inline void
-idpf_split_tx_free(struct idpf_tx_queue *cq)
-{
-	volatile struct idpf_splitq_tx_compl_desc *compl_ring = cq->compl_ring;
-	volatile struct idpf_splitq_tx_compl_desc *txd;
-	uint16_t next = cq->tx_tail;
-	struct idpf_tx_entry *txe;
-	struct idpf_tx_queue *txq;
-	uint16_t gen, qid, q_head;
-	uint16_t nb_desc_clean;
-	uint8_t ctype;
-
-	txd = &compl_ring[next];
-	gen = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
-	       IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;
-	if (gen != cq->expected_gen_id)
-		return;
-
-	ctype = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
-		 IDPF_TXD_COMPLQ_COMPL_TYPE_M) >> IDPF_TXD_COMPLQ_COMPL_TYPE_S;
-	qid = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
-	       IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;
-	q_head = rte_le_to_cpu_16(txd->q_head_compl_tag.compl_tag);
-	txq = cq->txqs[qid - cq->tx_start_qid];
-
-	switch (ctype) {
-	case IDPF_TXD_COMPLT_RE:
-		/* clean to q_head which indicates be fetched txq desc id + 1.
-		 * TODO: need to refine and remove the if condition.
-		 */
-		if (unlikely(q_head % 32)) {
-			TX_LOG(ERR, "unexpected desc (head = %u) completion.",
-			       q_head);
-			return;
-		}
-		if (txq->last_desc_cleaned > q_head)
-			nb_desc_clean = (txq->nb_tx_desc - txq->last_desc_cleaned) +
-				q_head;
-		else
-			nb_desc_clean = q_head - txq->last_desc_cleaned;
-		txq->nb_free += nb_desc_clean;
-		txq->last_desc_cleaned = q_head;
-		break;
-	case IDPF_TXD_COMPLT_RS:
-		/* q_head indicates sw_id when ctype is 2 */
-		txe = &txq->sw_ring[q_head];
-		if (txe->mbuf != NULL) {
-			rte_pktmbuf_free_seg(txe->mbuf);
-			txe->mbuf = NULL;
-		}
-		break;
-	default:
-		TX_LOG(ERR, "unknown completion type.");
-		return;
-	}
-
-	if (++next == cq->nb_tx_desc) {
-		next = 0;
-		cq->expected_gen_id ^= 1;
-	}
-
-	cq->tx_tail = next;
-}
-
 /* Check if the context descriptor is needed for TX offloading */
 static inline uint16_t
 idpf_calc_context_desc(uint64_t flags)
diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h
index 6cb83fc0a6..a53335616a 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -229,6 +229,252 @@ struct idpf_txq_ops {
 extern int idpf_timestamp_dynfield_offset;
 extern uint64_t idpf_timestamp_dynflag;
 
+static inline void
+idpf_split_tx_free(struct idpf_tx_queue *cq)
+{
+	volatile struct idpf_splitq_tx_compl_desc *compl_ring = cq->compl_ring;
+	volatile struct idpf_splitq_tx_compl_desc *txd;
+	uint16_t next = cq->tx_tail;
+	struct idpf_tx_entry *txe;
+	struct idpf_tx_queue *txq;
+	uint16_t gen, qid, q_head;
+	uint16_t nb_desc_clean;
+	uint8_t ctype;
+
+	txd = &compl_ring[next];
+	gen = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
+	       IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;
+	if (gen != cq->expected_gen_id)
+		return;
+
+	ctype = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
+		 IDPF_TXD_COMPLQ_COMPL_TYPE_M) >> IDPF_TXD_COMPLQ_COMPL_TYPE_S;
+	qid = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
+	       IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;
+	q_head = rte_le_to_cpu_16(txd->q_head_compl_tag.compl_tag);
+	txq = cq->txqs[qid - cq->tx_start_qid];
+
+	switch (ctype) {
+	case IDPF_TXD_COMPLT_RE:
+		/* clean to q_head which indicates be fetched txq desc id + 1.
+		 * TODO: need to refine and remove the if condition.
+		 */
+		if (unlikely(q_head % 32)) {
+			TX_LOG(ERR, "unexpected desc (head = %u) completion.",
+			       q_head);
+			return;
+		}
+		if (txq->last_desc_cleaned > q_head)
+			nb_desc_clean = (txq->nb_tx_desc - txq->last_desc_cleaned) +
+				q_head;
+		else
+			nb_desc_clean = q_head - txq->last_desc_cleaned;
+		txq->nb_free += nb_desc_clean;
+		txq->last_desc_cleaned = q_head;
+		break;
+	case IDPF_TXD_COMPLT_RS:
+		/* q_head indicates sw_id when ctype is 2 */
+		txe = &txq->sw_ring[q_head];
+		if (txe->mbuf != NULL) {
+			rte_pktmbuf_free_seg(txe->mbuf);
+			txe->mbuf = NULL;
+		}
+		break;
+	default:
+		TX_LOG(ERR, "unknown completion type.");
+		return;
+	}
+
+	if (++next == cq->nb_tx_desc) {
+		next = 0;
+		cq->expected_gen_id ^= 1;
+	}
+
+	cq->tx_tail = next;
+}
+
+#define IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S				\
+	(RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) |     \
+	 RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) |     \
+	 RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) |    \
+	 RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S))
+
+static inline uint64_t
+idpf_splitq_rx_csum_offload(uint8_t err)
+{
+	uint64_t flags = 0;
+
+	if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S)) == 0))
+		return flags;
+
+	if (likely((err & IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S) == 0)) {
+		flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+			  RTE_MBUF_F_RX_L4_CKSUM_GOOD);
+		return flags;
+	}
+
+	if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S)) != 0))
+		flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+	else
+		flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+
+	if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S)) != 0))
+		flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
+	else
+		flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+
+	if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S)) != 0))
+		flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
+
+	if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) != 0))
+		flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
+	else
+		flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
+
+	return flags;
+}
+
+#define IDPF_RX_FLEX_DESC_ADV_HASH1_S  0
+#define IDPF_RX_FLEX_DESC_ADV_HASH2_S  16
+#define IDPF_RX_FLEX_DESC_ADV_HASH3_S  24
+
+static inline uint64_t
+idpf_splitq_rx_rss_offload(struct rte_mbuf *mb,
+			   volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
+{
+	uint8_t status_err0_qw0;
+	uint64_t flags = 0;
+
+	status_err0_qw0 = rx_desc->status_err0_qw0;
+
+	if ((status_err0_qw0 & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) != 0) {
+		flags |= RTE_MBUF_F_RX_RSS_HASH;
+		mb->hash.rss = (rte_le_to_cpu_16(rx_desc->hash1) <<
+				IDPF_RX_FLEX_DESC_ADV_HASH1_S) |
+			((uint32_t)(rx_desc->ff2_mirrid_hash2.hash2) <<
+			 IDPF_RX_FLEX_DESC_ADV_HASH2_S) |
+			((uint32_t)(rx_desc->hash3) <<
+			 IDPF_RX_FLEX_DESC_ADV_HASH3_S);
+	}
+
+	return flags;
+}
+
+#define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND  10000
+/* Helper function to convert a 32b nanoseconds timestamp to 64b. */
+static inline uint64_t
+idpf_tstamp_convert_32b_64b(struct idpf_adapter *ad, uint32_t flag,
+			    uint32_t in_timestamp)
+{
+#ifdef RTE_ARCH_X86_64
+	struct idpf_hw *hw = &ad->hw;
+	const uint64_t mask = 0xFFFFFFFF;
+	uint32_t hi, lo, lo2, delta;
+	uint64_t ns;
+
+	if (flag != 0) {
+		IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
+		IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_EXEC_CMD_M |
+			       PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
+		lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
+		hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
+		/*
+		 * On typical system, the delta between lo and lo2 is ~1000ns,
+		 * so 10000 seems a large-enough but not overly-big guard band.
+		 */
+		if (lo > (UINT32_MAX - IDPF_TIMESYNC_REG_WRAP_GUARD_BAND))
+			lo2 = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
+		else
+			lo2 = lo;
+
+		if (lo2 < lo) {
+			lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
+			hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
+		}
+
+		ad->time_hw = ((uint64_t)hi << 32) | lo;
+	}
+
+	delta = (in_timestamp - (uint32_t)(ad->time_hw & mask));
+	if (delta > (mask / 2)) {
+		delta = ((uint32_t)(ad->time_hw & mask) - in_timestamp);
+		ns = ad->time_hw - delta;
+	} else {
+		ns = ad->time_hw + delta;
+	}
+
+	return ns;
+#else /* !RTE_ARCH_X86_64 */
+	RTE_SET_USED(ad);
+	RTE_SET_USED(flag);
+	RTE_SET_USED(in_timestamp);
+	return 0;
+#endif /* RTE_ARCH_X86_64 */
+}
+
+static inline void
+idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)
+{
+	volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_ring;
+	volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_desc;
+	uint16_t nb_refill = rx_bufq->rx_free_thresh;
+	uint16_t nb_desc = rx_bufq->nb_rx_desc;
+	uint16_t next_avail = rx_bufq->rx_tail;
+	struct rte_mbuf *nmb[rx_bufq->rx_free_thresh];
+	uint64_t dma_addr;
+	uint16_t delta;
+	int i;
+
+	if (rx_bufq->nb_rx_hold < rx_bufq->rx_free_thresh)
+		return;
+
+	rx_buf_ring = rx_bufq->rx_ring;
+	delta = nb_desc - next_avail;
+	if (unlikely(delta < nb_refill)) {
+		if (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, delta) == 0)) {
+			for (i = 0; i < delta; i++) {
+				rx_buf_desc = &rx_buf_ring[next_avail + i];
+				rx_bufq->sw_ring[next_avail + i] = nmb[i];
+				dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
+				rx_buf_desc->hdr_addr = 0;
+				rx_buf_desc->pkt_addr = dma_addr;
+			}
+			nb_refill -= delta;
+			next_avail = 0;
+			rx_bufq->nb_rx_hold -= delta;
+		} else {
+			__atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
+					   nb_desc - next_avail, __ATOMIC_RELAXED);
+			RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
+			       rx_bufq->port_id, rx_bufq->queue_id);
+			return;
+		}
+	}
+
+	if (nb_desc - next_avail >= nb_refill) {
+		if (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, nb_refill) == 0)) {
+			for (i = 0; i < nb_refill; i++) {
+				rx_buf_desc = &rx_buf_ring[next_avail + i];
+				rx_bufq->sw_ring[next_avail + i] = nmb[i];
+				dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
+				rx_buf_desc->hdr_addr = 0;
+				rx_buf_desc->pkt_addr = dma_addr;
+			}
+			next_avail += nb_refill;
+			rx_bufq->nb_rx_hold -= nb_refill;
+		} else {
+			__atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
+					   nb_desc - next_avail, __ATOMIC_RELAXED);
+			RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
+			       rx_bufq->port_id, rx_bufq->queue_id);
+		}
+	}
+
+	IDPF_PCI_REG_WRITE(rx_bufq->qrx_tail, next_avail);
+
+	rx_bufq->rx_tail = next_avail;
+}
+
 __rte_internal
 int idpf_qc_rx_thresh_check(uint16_t nb_desc, uint16_t thresh);
 __rte_internal
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 0729f6b912..8a637b3a0d 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -74,5 +74,8 @@ INTERNAL {
 	idpf_vport_rss_config;
 	idpf_vport_stats_update;
 
+	idpf_timestamp_dynfield_offset;
+	idpf_timestamp_dynflag;
+
 	local: *;
 };
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 16/19] net/cpfl: support representor data path
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (14 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 15/19] common/idpf: refine inline function beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 17/19] net/cpfl: support dispatch process beilei.xing
                   ` (3 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Add Rx/Tx burst for port representor.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_representor.c |  83 +++++++++++++++++++
 drivers/net/cpfl/cpfl_rxtx.c        | 121 ++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_rxtx.h        |   4 +
 3 files changed, 208 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 79cb7f76d4..51b70ea346 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -491,6 +491,87 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.stats_reset		= idpf_repr_stats_reset,
 };
 
+#define MAX_IDPF_REPRENSENTOR_BURST  128
+static uint16_t
+cpfl_repr_rx_burst(void *rxq,
+		   struct rte_mbuf **rx_pkts,
+		   uint16_t nb_pkts)
+{
+	struct cpfl_repr_rx_queue *rx_queue = rxq;
+	struct rte_ring *ring = rx_queue->rx_ring;
+	struct rte_mbuf *mbuf[MAX_IDPF_REPRENSENTOR_BURST] = {NULL};
+	unsigned int nb_recv;
+	uint16_t i;
+
+	if (unlikely(!ring))
+		return 0;
+
+	nb_recv = rte_ring_dequeue_burst(ring, (void **)mbuf,
+					 RTE_MIN(nb_pkts, MAX_IDPF_REPRENSENTOR_BURST), NULL);
+	for (i = 0; i < nb_recv; i++) {
+		if (mbuf[i]->pool != rx_queue->mb_pool) {
+			/* need copy if mpools used for vport and represntor queue are different */
+			rx_pkts[i] = rte_pktmbuf_copy(mbuf[i], rx_queue->mb_pool, 0, UINT32_MAX);
+			rte_pktmbuf_free(mbuf[i]);
+		} else {
+			rx_pkts[i] = mbuf[i];
+		}
+	}
+
+	__atomic_fetch_add(&rx_queue->stats.packets, nb_recv, __ATOMIC_RELAXED);
+	/* TODO: bytes stats */
+	return nb_recv;
+}
+
+static uint16_t
+cpfl_get_vsi_from_vf_representor(struct cpfl_repr *repr)
+{
+	return repr->vport_info->vport_info.vsi_id;
+}
+
+static uint16_t
+cpfl_repr_tx_burst(void *txq,
+		   struct rte_mbuf **tx_pkts,
+		   uint16_t nb_pkts)
+{
+	struct cpfl_repr_tx_queue *tx_queue = txq;
+	struct idpf_tx_queue *hw_txq = &tx_queue->txq->base;
+	struct cpfl_repr *repr;
+	uint16_t vsi_id;
+	uint16_t nb;
+
+	if (unlikely(!tx_queue->txq))
+		return 0;
+
+	repr = tx_queue->repr;
+
+	if (!hw_txq) {
+		PMD_INIT_LOG(ERR, "No Queue associated with representor host_id: %d, %s %d",
+			     repr->repr_id.host_id,
+			     (repr->repr_id.type == RTE_ETH_REPRESENTOR_VF) ? "vf" : "pf",
+			     (repr->repr_id.type == RTE_ETH_REPRESENTOR_VF) ? repr->repr_id.vf_id :
+			     repr->repr_id.pf_id);
+		return 0;
+	}
+
+	if (repr->repr_id.type == RTE_ETH_REPRESENTOR_VF) {
+		vsi_id = cpfl_get_vsi_from_vf_representor(repr);
+	} else {
+		/* TODO: RTE_ETH_REPRESENTOR_PF */
+		PMD_INIT_LOG(ERR, "Get vsi from pf representor is not supported.");
+		return 0;
+	}
+
+	rte_spinlock_lock(&tx_queue->txq->lock);
+	nb = cpfl_xmit_pkts_to_vsi(tx_queue->txq, tx_pkts, nb_pkts, vsi_id);
+	rte_spinlock_unlock(&tx_queue->txq->lock);
+
+	__atomic_fetch_add(&tx_queue->stats.packets, nb, __ATOMIC_RELAXED);
+	__atomic_fetch_add(&tx_queue->stats.errors, nb, __ATOMIC_RELAXED);
+	/* TODO: bytes stats */
+	return nb;
+}
+
 static int
 cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 {
@@ -507,6 +588,8 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 		repr->func_up = true;
 
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
+	eth_dev->rx_pkt_burst = cpfl_repr_rx_burst;
+	eth_dev->tx_pkt_burst = cpfl_repr_tx_burst;
 
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
 	/* bit[15:14] type
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index df6a8c1940..882efe04cf 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -616,6 +616,9 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	txq->ops = &def_txq_ops;
 	cpfl_vport->nb_data_txq++;
 	txq->q_set = true;
+
+	rte_spinlock_init(&cpfl_txq->lock);
+
 	dev->data->tx_queues[queue_idx] = cpfl_txq;
 
 	return 0;
@@ -1409,6 +1412,124 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
 	}
 }
 
+static inline void
+cpfl_set_tx_switch_ctx(uint16_t vsi_id, bool is_vsi,
+		       volatile union idpf_flex_tx_ctx_desc *ctx_desc)
+{
+	uint16_t cmd_dtype;
+
+	/* Use TX Native TSO Context Descriptor to carry VSI
+	 * so TSO is not supported
+	 */
+	if (is_vsi) {
+		cmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
+			IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_TARGETVSI;
+		ctx_desc->tso.qw0.mss_rt =
+			rte_cpu_to_le_16((uint16_t)vsi_id &
+				 IDPF_TXD_FLEX_CTX_MSS_RT_M);
+	} else {
+		cmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
+			IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_UPLNK;
+	}
+
+	ctx_desc->tso.qw1.cmd_dtype = rte_cpu_to_le_16(cmd_dtype);
+}
+
+/* Transmit pkts to destination VSI,
+ * much similar as idpf_splitq_xmit_pkts
+ */
+uint16_t
+cpfl_xmit_pkts_to_vsi(struct cpfl_tx_queue *cpfl_txq, struct rte_mbuf **tx_pkts,
+		      uint16_t nb_pkts, uint16_t vsi_id)
+{
+	volatile struct idpf_flex_tx_sched_desc *txr;
+	volatile struct idpf_flex_tx_sched_desc *txd;
+	volatile union idpf_flex_tx_ctx_desc *ctx_desc;
+	struct idpf_tx_entry *sw_ring;
+	struct idpf_tx_entry *txe, *txn;
+	uint16_t nb_used, tx_id, sw_id;
+	struct idpf_tx_queue *txq;
+	struct rte_mbuf *tx_pkt;
+	uint16_t nb_to_clean;
+	uint16_t nb_tx = 0;
+
+	if (unlikely(!cpfl_txq))
+		return nb_tx;
+
+	txq = &cpfl_txq->base;
+	if (unlikely(!txq) || unlikely(!txq->q_started))
+		return nb_tx;
+
+	txr = txq->desc_ring;
+	sw_ring = txq->sw_ring;
+	tx_id = txq->tx_tail;
+	sw_id = txq->sw_tail;
+	txe = &sw_ring[sw_id];
+
+	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+		tx_pkt = tx_pkts[nb_tx];
+
+		if (txq->nb_free <= txq->free_thresh) {
+			/* TODO: Need to refine, refer to idpf_splitq_xmit_pkts */
+			nb_to_clean = 2 * txq->rs_thresh;
+			while (nb_to_clean--)
+				idpf_split_tx_free(txq->complq);
+		}
+
+		if (txq->nb_free < tx_pkt->nb_segs + 1)
+			break;
+		/* need context desc carry target vsi, no TSO support. */
+		nb_used = tx_pkt->nb_segs + 1;
+
+		/* context descriptor prepare*/
+		ctx_desc = (volatile union idpf_flex_tx_ctx_desc *)&txr[tx_id];
+
+		cpfl_set_tx_switch_ctx(vsi_id, true, ctx_desc);
+		tx_id++;
+		if (tx_id == txq->nb_tx_desc)
+			tx_id = 0;
+
+		do {
+			txd = &txr[tx_id];
+			txn = &sw_ring[txe->next_id];
+			txe->mbuf = tx_pkt;
+
+			/* Setup TX descriptor */
+			txd->buf_addr =
+				rte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt));
+			txd->qw1.cmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
+			txd->qw1.rxr_bufsize = tx_pkt->data_len;
+			txd->qw1.compl_tag = sw_id;
+			tx_id++;
+			if (tx_id == txq->nb_tx_desc)
+				tx_id = 0;
+			sw_id = txe->next_id;
+			txe = txn;
+			tx_pkt = tx_pkt->next;
+		} while (tx_pkt);
+
+		/* fill the last descriptor with End of Packet (EOP) bit */
+		txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_EOP;
+
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
+
+		if (txq->nb_used >= 32) {
+			txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_RE;
+			/* Update txq RE bit counters */
+			txq->nb_used = 0;
+		}
+	}
+
+	/* update the tail pointer if any packets were processed */
+	if (likely(nb_tx)) {
+		IDPF_PCI_REG_WRITE(txq->qtx_tail, tx_id);
+		txq->tx_tail = tx_id;
+		txq->sw_tail = sw_id;
+	}
+	return nb_tx;
+}
+
 uint16_t
 cpfl_dummy_recv_pkts(__rte_unused void *queue,
 		     __rte_unused struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 914a0485b5..463ab73323 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -72,6 +72,7 @@ struct cpfl_txq_hairpin_info {
 struct cpfl_tx_queue {
 	struct idpf_tx_queue base;
 	struct cpfl_txq_hairpin_info hairpin_info;
+	rte_spinlock_t lock;
 };
 
 static inline uint16_t
@@ -124,4 +125,7 @@ uint16_t cpfl_dummy_recv_pkts(void *queue,
 uint16_t cpfl_dummy_xmit_pkts(void *queue,
 			      struct rte_mbuf **tx_pkts,
 			      uint16_t nb_pkts);
+uint16_t cpfl_xmit_pkts_to_vsi(struct cpfl_tx_queue *txq,
+			       struct rte_mbuf **tx_pkts,
+			       uint16_t nb_pkts, uint16_t vsi_id);
 #endif /* _CPFL_RXTX_H_ */
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 17/19] net/cpfl: support dispatch process
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (15 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 16/19] net/cpfl: support representor data path beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 18/19] net/cpfl: add dispatch service beilei.xing
                   ` (2 subsequent siblings)
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Add dispatch process cpfl_packets_dispatch function.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c      |  39 ++++++++-
 drivers/net/cpfl/cpfl_ethdev.h      |   1 +
 drivers/net/cpfl/cpfl_representor.c |  80 +++++++++++++++++
 drivers/net/cpfl/cpfl_rxtx.c        | 131 ++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_rxtx.h        |   8 ++
 5 files changed, 257 insertions(+), 2 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index f674d93050..8569a0b81d 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -129,6 +129,13 @@ static const struct rte_cpfl_xstats_name_off rte_cpfl_stats_strings[] = {
 
 #define CPFL_NB_XSTATS			RTE_DIM(rte_cpfl_stats_strings)
 
+static const struct rte_mbuf_dynfield cpfl_source_metadata_param = {
+	.name = "cpfl_source_metadata",
+	.size = sizeof(uint16_t),
+	.align = __alignof__(uint16_t),
+	.flags = 0,
+};
+
 static int
 cpfl_dev_link_update(struct rte_eth_dev *dev,
 		     __rte_unused int wait_to_complete)
@@ -2382,7 +2389,7 @@ static int
 cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
 	struct cpfl_adapter_ext *adapter;
-	int retval;
+	int retval, offset;
 	uint16_t port_id;
 
 	adapter = rte_zmalloc("cpfl_adapter_ext",
@@ -2432,7 +2439,22 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 			PMD_INIT_LOG(ERR, "Failed to create exceptional vport. ");
 			goto close_ethdev;
 		}
+
+		/* register dynfield to carry src_vsi
+		 * TODO: is this a waste to use dynfield? Can we redefine a recv func like
+		 * below to carry src vsi directly by src_vsi[]?
+		 * idpf_exceptioanl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+		 * uint16_t src_vsi[], uint16_t nb_pkts)
+		 */
+		offset = rte_mbuf_dynfield_register(&cpfl_source_metadata_param);
+		if (unlikely(offset == -1)) {
+			retval = -rte_errno;
+			PMD_INIT_LOG(ERR, "source metadata is disabled in mbuf");
+			goto close_ethdev;
+		}
+		cpfl_dynfield_source_metadata_offset = offset;
 	}
+
 	retval = cpfl_repr_create(pci_dev, adapter);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to create representors ");
@@ -2458,7 +2480,7 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 static int
 cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
-	int ret;
+	int ret, offset;
 
 	ret = cpfl_parse_devargs(pci_dev, adapter, false);
 	if (ret != 0) {
@@ -2478,6 +2500,19 @@ cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *ad
 			PMD_INIT_LOG(ERR, "Failed to create exceptional vport. ");
 			return ret;
 		}
+
+		/* register dynfield to carry src_vsi
+		 * TODO: is this a waste to use dynfield? Can we redefine a recv func like
+		 * below to carry src vsi directly by src_vsi[]?
+		 * idpf_exceptioanl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+		 * uint16_t src_vsi[], uint16_t nb_pkts)
+		 */
+		offset = rte_mbuf_dynfield_register(&cpfl_source_metadata_param);
+		if (unlikely(offset == -1)) {
+			PMD_INIT_LOG(ERR, "source metadata is disabled in mbuf");
+			return -rte_errno;
+		}
+		cpfl_dynfield_source_metadata_offset = offset;
 	}
 
 	ret = cpfl_repr_create(pci_dev, adapter);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 33e810408b..5bd6f930b8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -227,6 +227,7 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 			   struct cpchnl2_vport_id *vport_id,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_info_response *response);
+int cpfl_packets_dispatch(void *arg);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 51b70ea346..a781cff403 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -4,6 +4,7 @@
 
 #include "cpfl_representor.h"
 #include "cpfl_rxtx.h"
+#include "cpfl_ethdev.h"
 
 static int
 cpfl_repr_whitelist_update(struct cpfl_adapter_ext *adapter,
@@ -853,3 +854,82 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte
 
 	return 0;
 }
+
+static struct cpfl_repr *
+cpfl_get_repr_by_vsi(struct cpfl_adapter_ext *adapter,
+		     uint16_t vsi_id)
+{
+	const struct cpfl_repr_id *repr_id;
+	struct rte_eth_dev *dev;
+	struct cpfl_repr *repr;
+	uint32_t iter = 0;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	while (rte_hash_iterate(adapter->repr_whitelist_hash,
+				(const void **)&repr_id, (void **)&dev, &iter) >= 0) {
+		if (dev == NULL)
+			continue;
+
+		repr = CPFL_DEV_TO_REPR(dev);
+		if (repr->vport_info->vport_info.vsi_id == vsi_id) {
+			rte_spinlock_unlock(&adapter->repr_lock);
+			return repr;
+		}
+	}
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return NULL;
+}
+
+#define PKT_DISPATCH_BURST  32
+/* Function to dispath packets to representors' rx rings */
+int
+cpfl_packets_dispatch(void *arg)
+{
+	struct rte_eth_dev *dev = arg;
+	struct cpfl_vport *vport = dev->data->dev_private;
+	struct cpfl_adapter_ext *adapter = vport->itf.adapter;
+	struct cpfl_rx_queue **rxq =
+		(struct cpfl_rx_queue **)dev->data->rx_queues;
+	struct rte_mbuf *pkts_burst[PKT_DISPATCH_BURST];
+	struct cpfl_repr *repr;
+	struct rte_eth_dev_data *dev_data;
+	struct cpfl_repr_rx_queue *repr_rxq;
+	uint16_t src_vsi;
+	uint32_t nb_rx, nb_enq;
+	uint8_t i, j;
+
+	if (dev->data->dev_started == 0) {
+		/* skip if excpetional vport is not started*/
+		return 0;
+	}
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		nb_rx = cpfl_splitq_recv_pkts(rxq[i], pkts_burst, PKT_DISPATCH_BURST);
+		for (j = 0; j < nb_rx; j++) {
+			src_vsi = *CPFL_MBUF_SOURCE_METADATA(pkts_burst[j]);
+			/* Get the repr according to source vsi */
+			repr = cpfl_get_repr_by_vsi(adapter, src_vsi);
+			if (unlikely(!repr)) {
+				rte_pktmbuf_free(pkts_burst[j]);
+				continue;
+			}
+			dev_data = (struct rte_eth_dev_data *)repr->itf.data;
+			if (unlikely(!dev_data->dev_started || !dev_data->rx_queue_state[0])) {
+				rte_pktmbuf_free(pkts_burst[j]);
+				continue;
+			}
+			repr_rxq = (struct cpfl_repr_rx_queue *)
+				(((struct rte_eth_dev_data *)repr->itf.data)->rx_queues[0]);
+			if (unlikely(!repr_rxq || !repr_rxq->rx_ring)) {
+				rte_pktmbuf_free(pkts_burst[j]);
+				continue;
+			}
+			nb_enq = rte_ring_enqueue_bulk(repr_rxq->rx_ring,
+						       (void *)&pkts_burst[j], 1, NULL);
+			if (!nb_enq) /* enqueue fails, just free it */
+				rte_pktmbuf_free(pkts_burst[j]);
+		}
+	}
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 882efe04cf..a931b5ec12 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1412,6 +1412,137 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
 	}
 }
 
+int cpfl_dynfield_source_metadata_offset = -1;
+
+uint16_t
+cpfl_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+		      uint16_t nb_pkts)
+{
+	volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc_ring;
+	volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
+	uint16_t pktlen_gen_bufq_id;
+	struct idpf_rx_queue *rxq;
+	const uint32_t *ptype_tbl;
+	uint8_t status_err0_qw1;
+	struct idpf_adapter *ad;
+	struct rte_mbuf *rxm;
+	uint16_t rx_id_bufq1;
+	uint16_t rx_id_bufq2;
+	uint64_t pkt_flags;
+	uint16_t pkt_len;
+	uint16_t bufq_id;
+	uint16_t gen_id;
+	uint16_t rx_id;
+	uint16_t nb_rx;
+	uint64_t ts_ns;
+
+	nb_rx = 0;
+	rxq = rx_queue;
+	ad = rxq->adapter;
+
+	if (unlikely(rxq == NULL) || unlikely(!rxq->q_started))
+		return nb_rx;
+
+	rx_id = rxq->rx_tail;
+	rx_id_bufq1 = rxq->bufq1->rx_next_avail;
+	rx_id_bufq2 = rxq->bufq2->rx_next_avail;
+	rx_desc_ring = rxq->rx_ring;
+	ptype_tbl = rxq->adapter->ptype_tbl;
+
+	if ((rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0)
+		rxq->hw_register_set = 1;
+
+	while (nb_rx < nb_pkts) {
+		rx_desc = &rx_desc_ring[rx_id];
+
+		pktlen_gen_bufq_id =
+			rte_le_to_cpu_16(rx_desc->pktlen_gen_bufq_id);
+		gen_id = (pktlen_gen_bufq_id &
+			  VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M) >>
+			VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S;
+		if (gen_id != rxq->expected_gen_id)
+			break;
+
+		pkt_len = (pktlen_gen_bufq_id &
+			   VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M) >>
+			VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S;
+		if (pkt_len == 0)
+			RX_LOG(ERR, "Packet length is 0");
+
+		rx_id++;
+		if (unlikely(rx_id == rxq->nb_rx_desc)) {
+			rx_id = 0;
+			rxq->expected_gen_id ^= 1;
+		}
+
+		bufq_id = (pktlen_gen_bufq_id &
+			   VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M) >>
+			VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S;
+		if (bufq_id == 0) {
+			rxm = rxq->bufq1->sw_ring[rx_id_bufq1];
+			rx_id_bufq1++;
+			if (unlikely(rx_id_bufq1 == rxq->bufq1->nb_rx_desc))
+				rx_id_bufq1 = 0;
+			rxq->bufq1->nb_rx_hold++;
+		} else {
+			rxm = rxq->bufq2->sw_ring[rx_id_bufq2];
+			rx_id_bufq2++;
+			if (unlikely(rx_id_bufq2 == rxq->bufq2->nb_rx_desc))
+				rx_id_bufq2 = 0;
+			rxq->bufq2->nb_rx_hold++;
+		}
+
+		rxm->pkt_len = pkt_len;
+		rxm->data_len = pkt_len;
+		rxm->data_off = RTE_PKTMBUF_HEADROOM;
+		rxm->next = NULL;
+		rxm->nb_segs = 1;
+		rxm->port = rxq->port_id;
+		rxm->ol_flags = 0;
+		rxm->packet_type =
+			ptype_tbl[(rte_le_to_cpu_16(rx_desc->ptype_err_fflags0) &
+				   VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M) >>
+				  VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S];
+
+		status_err0_qw1 = rx_desc->status_err0_qw1;
+		pkt_flags = idpf_splitq_rx_csum_offload(status_err0_qw1);
+		pkt_flags |= idpf_splitq_rx_rss_offload(rxm, rx_desc);
+		if (idpf_timestamp_dynflag > 0 &&
+		    (rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP)) {
+			/* timestamp */
+			ts_ns = idpf_tstamp_convert_32b_64b(ad,
+							    rxq->hw_register_set,
+							    rte_le_to_cpu_32(rx_desc->ts_high));
+			rxq->hw_register_set = 0;
+			*RTE_MBUF_DYNFIELD(rxm,
+					   idpf_timestamp_dynfield_offset,
+					   rte_mbuf_timestamp_t *) = ts_ns;
+			rxm->ol_flags |= idpf_timestamp_dynflag;
+		}
+
+		if (likely(cpfl_dynfield_source_metadata_offset != -1))
+			*CPFL_MBUF_SOURCE_METADATA(rxm) =
+				rte_le_to_cpu_16(rx_desc->fmd4);
+
+		rxm->ol_flags |= pkt_flags;
+
+		rx_pkts[nb_rx++] = rxm;
+	}
+
+	if (nb_rx > 0) {
+		rxq->rx_tail = rx_id;
+		if (rx_id_bufq1 != rxq->bufq1->rx_next_avail)
+			rxq->bufq1->rx_next_avail = rx_id_bufq1;
+		if (rx_id_bufq2 != rxq->bufq2->rx_next_avail)
+			rxq->bufq2->rx_next_avail = rx_id_bufq2;
+
+		idpf_split_rx_bufq_refill(rxq->bufq1);
+		idpf_split_rx_bufq_refill(rxq->bufq2);
+	}
+
+	return nb_rx;
+}
+
 static inline void
 cpfl_set_tx_switch_ctx(uint16_t vsi_id, bool is_vsi,
 		       volatile union idpf_flex_tx_ctx_desc *ctx_desc)
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 463ab73323..39e5e115d6 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -81,6 +81,11 @@ cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset)
 	return start_qid + offset;
 }
 
+extern int cpfl_dynfield_source_metadata_offset;
+
+#define CPFL_MBUF_SOURCE_METADATA(m)					\
+	RTE_MBUF_DYNFIELD((m), cpfl_dynfield_source_metadata_offset, uint16_t *)
+
 static inline uint64_t
 cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t tail_spacing)
 {
@@ -128,4 +133,7 @@ uint16_t cpfl_dummy_xmit_pkts(void *queue,
 uint16_t cpfl_xmit_pkts_to_vsi(struct cpfl_tx_queue *txq,
 			       struct rte_mbuf **tx_pkts,
 			       uint16_t nb_pkts, uint16_t vsi_id);
+uint16_t cpfl_splitq_recv_pkts(void *rx_queue,
+			       struct rte_mbuf **rx_pkts,
+			       uint16_t nb_pkts);
 #endif /* _CPFL_RXTX_H_ */
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 18/19] net/cpfl: add dispatch service
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (16 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 17/19] net/cpfl: support dispatch process beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-09 15:51 ` [PATCH 19/19] doc: update release notes for representor beilei.xing
  2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Add dispatch service for port representor.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 129 +++++++++++++++++++++++++++++++++
 1 file changed, 129 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 8569a0b81d..8dbc175749 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -11,6 +11,7 @@
 #include <errno.h>
 #include <rte_alarm.h>
 #include <rte_hash_crc.h>
+#include <rte_service_component.h>
 
 #include "cpfl_ethdev.h"
 #include <ethdev_private.h>
@@ -136,6 +137,107 @@ static const struct rte_mbuf_dynfield cpfl_source_metadata_param = {
 	.flags = 0,
 };
 
+static int
+cpfl_dispatch_service_register(struct rte_eth_dev *dev)
+{
+	struct cpfl_vport *vport = dev->data->dev_private;
+	struct rte_service_spec service_params;
+	uint32_t service_core_list[RTE_MAX_LCORE];
+	uint32_t num_service_cores;
+	uint32_t service_core_id;
+	int ret;
+
+	num_service_cores = rte_service_lcore_count();
+	if (num_service_cores <= 0) {
+		PMD_DRV_LOG(ERR, "Fail to register dispatch service, no service core found.");
+		return -ENOTSUP;
+	}
+
+	ret = rte_service_lcore_list(service_core_list, num_service_cores);
+	if (ret <= 0) {
+		PMD_DRV_LOG(ERR, "Fail to get service core list");
+		return -ENOTSUP;
+	}
+	/* use the first lcore by default */
+	service_core_id = service_core_list[0];
+
+	memset(&service_params, 0, sizeof(struct rte_service_spec));
+	snprintf(service_params.name, sizeof(service_params.name), "Dispatch service");
+	service_params.callback = cpfl_packets_dispatch;
+	service_params.callback_userdata = dev;
+	service_params.capabilities = 0;
+	service_params.socket_id = rte_lcore_to_socket_id(service_core_id);
+
+	ret = rte_service_component_register(&service_params, &vport->dispatch_service_id);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to register %s component", service_params.name);
+		return ret;
+	}
+
+	ret = rte_service_map_lcore_set(vport->dispatch_service_id, service_core_id, 1);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to map service %s to lcore %d",
+			    service_params.name, service_core_id);
+		return ret;
+	}
+
+	vport->dispatch_core_id = service_core_id;
+
+	return 0;
+}
+
+static void
+cpfl_dispatch_service_unregister(struct rte_eth_dev *dev)
+{
+	struct cpfl_vport *vport = dev->data->dev_private;
+
+	PMD_DRV_LOG(DEBUG, "Unregister service %s",
+		    rte_service_get_name(vport->dispatch_service_id));
+	rte_service_map_lcore_set(vport->dispatch_service_id,
+				  vport->dispatch_core_id, 0);
+	rte_service_component_unregister(vport->dispatch_service_id);
+}
+
+static int
+cpfl_dispatch_service_start(struct rte_eth_dev *dev)
+{
+	struct cpfl_vport *vport = dev->data->dev_private;
+	int ret;
+
+	ret = rte_service_component_runstate_set(vport->dispatch_service_id, 1);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to start %s component",
+			    rte_service_get_name(vport->dispatch_service_id));
+		return ret;
+	}
+	ret = rte_service_runstate_set(vport->dispatch_service_id, 1);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to start service %s",
+			    rte_service_get_name(vport->dispatch_service_id));
+		return ret;
+	}
+	return 0;
+}
+
+static void
+cpfl_dispatch_service_stop(struct rte_eth_dev *dev)
+{
+	struct cpfl_vport *vport = dev->data->dev_private;
+	int ret;
+
+	/* Service core may be shared and don't stop it here*/
+
+	ret = rte_service_runstate_set(vport->dispatch_service_id, 0);
+	if (ret)
+		PMD_DRV_LOG(WARNING, "Fail to stop service %s",
+			    rte_service_get_name(vport->dispatch_service_id));
+
+	ret = rte_service_component_runstate_set(vport->dispatch_service_id, 0);
+	if (ret)
+		PMD_DRV_LOG(WARNING, "Fail to stop %s component",
+			    rte_service_get_name(vport->dispatch_service_id));
+}
+
 static int
 cpfl_dev_link_update(struct rte_eth_dev *dev,
 		     __rte_unused int wait_to_complete)
@@ -1031,6 +1133,14 @@ cpfl_dev_start(struct rte_eth_dev *dev)
 	}
 
 	if (cpfl_vport->exceptional) {
+		/* No pkt_burst function setting on exceptional vport,
+		 * start dispatch service instead
+		 */
+		if (cpfl_dispatch_service_start(dev)) {
+			PMD_DRV_LOG(ERR, "Fail to start Dispatch service on %s",
+				    dev->device->name);
+			goto err_serv_start;
+		}
 		dev->rx_pkt_burst = cpfl_dummy_recv_pkts;
 		dev->tx_pkt_burst = cpfl_dummy_xmit_pkts;
 	} else {
@@ -1050,6 +1160,8 @@ cpfl_dev_start(struct rte_eth_dev *dev)
 	return 0;
 
 err_vport:
+	cpfl_dispatch_service_stop(dev);
+err_serv_start:
 	cpfl_stop_queues(dev);
 err_startq:
 	idpf_vport_irq_unmap_config(vport, dev->data->nb_rx_queues);
@@ -1070,6 +1182,10 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
 
 	idpf_vc_vport_ena_dis(vport, false);
 
+	if (cpfl_vport->exceptional)
+		/* Stop dispatch service when dev stop */
+		cpfl_dispatch_service_stop(dev);
+
 	cpfl_stop_queues(dev);
 
 	idpf_vport_irq_unmap_config(vport, dev->data->nb_rx_queues);
@@ -1114,6 +1230,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 		adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
 		adapter->cur_vport_nb--;
 		adapter->vports[vport->sw_idx] = NULL;
+	} else {
+		/* unregister idpf dispatch service on exceptional vport */
+		cpfl_dispatch_service_unregister(dev);
+		adapter->exceptional_vport = NULL;
 	}
 
 	idpf_vport_deinit(vport);
@@ -2253,8 +2373,17 @@ cpfl_exceptional_vport_init(struct rte_eth_dev *dev, void *init_params)
 
 	adapter->exceptional_vport = cpfl_vport;
 
+	/* register dispatch service on exceptional vport */
+	ret = cpfl_dispatch_service_register(dev);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to register dispatch service.");
+		goto err_serv_reg;
+	}
+
 	return 0;
 
+err_serv_reg:
+	rte_free(dev->data->mac_addrs);
 err_mac_addrs:
 	idpf_vport_deinit(vport);
 err:
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 19/19] doc: update release notes for representor
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (17 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 18/19] net/cpfl: add dispatch service beilei.xing
@ 2023-08-09 15:51 ` beilei.xing
  2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
  19 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-09 15:51 UTC (permalink / raw)
  To: jingjing.wu, mingxia.liu; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Add support for port representor.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 doc/guides/rel_notes/release_23_11.rst | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 6b4dd21fd0..688bee4d6d 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -55,6 +55,9 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Intel cpfl driver.**
+
+  * Added support for port representor.
 
 Removed Items
 -------------
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 00/12] net/cpfl: support port representor
  2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
                   ` (18 preceding siblings ...)
  2023-08-09 15:51 ` [PATCH 19/19] doc: update release notes for representor beilei.xing
@ 2023-08-16 15:05 ` beilei.xing
  2023-08-16 15:05   ` [PATCH v2 01/12] net/cpfl: refine devargs parse and process beilei.xing
                     ` (12 more replies)
  19 siblings, 13 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-16 15:05 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

1. code refine for representor support
2. support port representor

v2 changes:
 - Remove representor data path.
 - Fix coding style.

Beilei Xing (12):
  net/cpfl: refine devargs parse and process
  net/cpfl: introduce interface structure
  net/cpfl: add cp channel
  net/cpfl: enable vport mapping
  net/cpfl: parse representor devargs
  net/cpfl: support probe again
  net/cpfl: create port representor
  net/cpfl: support vport list/info get
  net/cpfl: update vport info before creating representor
  net/cpfl: refine handle virtual channel message
  net/cpfl: support link update for representor
  net/cpfl: support Rx/Tx queue setup for representor

 doc/guides/nics/cpfl.rst               |  36 ++
 doc/guides/rel_notes/release_23_11.rst |   3 +
 drivers/net/cpfl/cpfl_cpchnl.h         | 321 +++++++++++++
 drivers/net/cpfl/cpfl_ethdev.c         | 619 +++++++++++++++++++++----
 drivers/net/cpfl/cpfl_ethdev.h         |  92 +++-
 drivers/net/cpfl/cpfl_representor.c    | 619 +++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.h    |  26 ++
 drivers/net/cpfl/cpfl_vchnl.c          |  72 +++
 drivers/net/cpfl/meson.build           |   4 +-
 9 files changed, 1686 insertions(+), 106 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 01/12] net/cpfl: refine devargs parse and process
  2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
@ 2023-08-16 15:05   ` beilei.xing
  2023-08-16 15:05   ` [PATCH v2 02/12] net/cpfl: introduce interface structure beilei.xing
                     ` (11 subsequent siblings)
  12 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-16 15:05 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

1. Keep devargs in adapter.
2. Refine handling the case with no vport be specified in devargs.
3. Separate devargs parse and devargs process

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 154 ++++++++++++++++++---------------
 drivers/net/cpfl/cpfl_ethdev.h |   1 +
 2 files changed, 84 insertions(+), 71 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c4ca9343c3..46b3a52e49 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1407,12 +1407,12 @@ parse_bool(const char *key, const char *value, void *args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter,
-		   struct cpfl_devargs *cpfl_args)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
 	struct rte_devargs *devargs = pci_dev->device.devargs;
+	struct cpfl_devargs *cpfl_args = &adapter->devargs;
 	struct rte_kvargs *kvlist;
-	int i, ret;
+	int ret;
 
 	cpfl_args->req_vport_nb = 0;
 
@@ -1445,31 +1445,6 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (ret != 0)
 		goto fail;
 
-	/* check parsed devargs */
-	if (adapter->cur_vport_nb + cpfl_args->req_vport_nb >
-	    adapter->max_vport_nb) {
-		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
-			     adapter->max_vport_nb);
-		ret = -EINVAL;
-		goto fail;
-	}
-
-	for (i = 0; i < cpfl_args->req_vport_nb; i++) {
-		if (cpfl_args->req_vports[i] > adapter->max_vport_nb - 1) {
-			PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d",
-				     cpfl_args->req_vports[i], adapter->max_vport_nb - 1);
-			ret = -EINVAL;
-			goto fail;
-		}
-
-		if (adapter->cur_vports & RTE_BIT32(cpfl_args->req_vports[i])) {
-			PMD_INIT_LOG(ERR, "Vport %d has been requested",
-				     cpfl_args->req_vports[i]);
-			ret = -EINVAL;
-			goto fail;
-		}
-	}
-
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
@@ -1915,15 +1890,79 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 	adapter->vports = NULL;
 }
 
+static int
+cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int i;
+
+	/* refine vport number, at least 1 vport */
+	if (devargs->req_vport_nb == 0) {
+		devargs->req_vport_nb = 1;
+		devargs->req_vports[0] = 0;
+	}
+
+	/* check parsed devargs */
+	if (adapter->cur_vport_nb + devargs->req_vport_nb >
+	    adapter->max_vport_nb) {
+		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
+			     adapter->max_vport_nb);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < devargs->req_vport_nb; i++) {
+		if (devargs->req_vports[i] > adapter->max_vport_nb - 1) {
+			PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d",
+				     devargs->req_vports[i], adapter->max_vport_nb - 1);
+			return -EINVAL;
+		}
+
+		if (adapter->cur_vports & RTE_BIT32(devargs->req_vports[i])) {
+			PMD_INIT_LOG(ERR, "Vport %d has been requested",
+				     devargs->req_vports[i]);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport_param vport_param;
+	char name[RTE_ETH_NAME_MAX_LEN];
+	int ret, i;
+
+	for (i = 0; i < adapter->devargs.req_vport_nb; i++) {
+		vport_param.adapter = adapter;
+		vport_param.devarg_id = adapter->devargs.req_vports[i];
+		vport_param.idx = cpfl_vport_idx_alloc(adapter);
+		if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
+			PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
+			break;
+		}
+		snprintf(name, sizeof(name), "net_%s_vport_%d",
+			 pci_dev->device.name,
+			 adapter->devargs.req_vports[i]);
+		ret = rte_eth_dev_create(&pci_dev->device, name,
+					    sizeof(struct cpfl_vport),
+					    NULL, NULL, cpfl_dev_vport_init,
+					    &vport_param);
+		if (ret != 0)
+			PMD_DRV_LOG(ERR, "Failed to create vport %d",
+				    vport_param.devarg_id);
+	}
+
+	return 0;
+}
+
 static int
 cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	       struct rte_pci_device *pci_dev)
 {
-	struct cpfl_vport_param vport_param;
 	struct cpfl_adapter_ext *adapter;
-	struct cpfl_devargs devargs;
-	char name[RTE_ETH_NAME_MAX_LEN];
-	int i, retval;
+	int retval;
 
 	if (!cpfl_adapter_list_init) {
 		rte_spinlock_init(&cpfl_adapter_lock);
@@ -1938,6 +1977,12 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		return -ENOMEM;
 	}
 
+	retval = cpfl_parse_devargs(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		return retval;
+	}
+
 	retval = cpfl_adapter_ext_init(pci_dev, adapter);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to init adapter.");
@@ -1948,49 +1993,16 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	TAILQ_INSERT_TAIL(&cpfl_adapter_list, adapter, next);
 	rte_spinlock_unlock(&cpfl_adapter_lock);
 
-	retval = cpfl_parse_devargs(pci_dev, adapter, &devargs);
+	retval = cpfl_vport_devargs_process(adapter);
 	if (retval != 0) {
-		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		PMD_INIT_LOG(ERR, "Failed to process vport devargs");
 		goto err;
 	}
 
-	if (devargs.req_vport_nb == 0) {
-		/* If no vport devarg, create vport 0 by default. */
-		vport_param.adapter = adapter;
-		vport_param.devarg_id = 0;
-		vport_param.idx = cpfl_vport_idx_alloc(adapter);
-		if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
-			PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
-			return 0;
-		}
-		snprintf(name, sizeof(name), "cpfl_%s_vport_0",
-			 pci_dev->device.name);
-		retval = rte_eth_dev_create(&pci_dev->device, name,
-					    sizeof(struct cpfl_vport),
-					    NULL, NULL, cpfl_dev_vport_init,
-					    &vport_param);
-		if (retval != 0)
-			PMD_DRV_LOG(ERR, "Failed to create default vport 0");
-	} else {
-		for (i = 0; i < devargs.req_vport_nb; i++) {
-			vport_param.adapter = adapter;
-			vport_param.devarg_id = devargs.req_vports[i];
-			vport_param.idx = cpfl_vport_idx_alloc(adapter);
-			if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
-				PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
-				break;
-			}
-			snprintf(name, sizeof(name), "cpfl_%s_vport_%d",
-				 pci_dev->device.name,
-				 devargs.req_vports[i]);
-			retval = rte_eth_dev_create(&pci_dev->device, name,
-						    sizeof(struct cpfl_vport),
-						    NULL, NULL, cpfl_dev_vport_init,
-						    &vport_param);
-			if (retval != 0)
-				PMD_DRV_LOG(ERR, "Failed to create vport %d",
-					    vport_param.devarg_id);
-		}
+	retval = cpfl_vport_create(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create vports.");
+		goto err;
 	}
 
 	return 0;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 2e42354f70..b637bf2e45 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -115,6 +115,7 @@ struct cpfl_adapter_ext {
 	uint16_t cur_vport_nb;
 
 	uint16_t used_vecs_num;
+	struct cpfl_devargs devargs;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 02/12] net/cpfl: introduce interface structure
  2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
  2023-08-16 15:05   ` [PATCH v2 01/12] net/cpfl: refine devargs parse and process beilei.xing
@ 2023-08-16 15:05   ` beilei.xing
  2023-08-16 15:05   ` [PATCH v2 03/12] net/cpfl: add cp channel beilei.xing
                     ` (10 subsequent siblings)
  12 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-16 15:05 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Introduce cplf interface structure to distinguish vport and port
representor.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  3 +++
 drivers/net/cpfl/cpfl_ethdev.h | 16 ++++++++++++++++
 2 files changed, 19 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 46b3a52e49..92fe92c00f 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1803,6 +1803,9 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 		goto err;
 	}
 
+	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
+	cpfl_vport->itf.adapter = adapter;
+	cpfl_vport->itf.data = dev->data;
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index b637bf2e45..53e45035e8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -86,7 +86,19 @@ struct p2p_queue_chunks_info {
 	uint32_t rx_buf_qtail_spacing;
 };
 
+enum cpfl_itf_type {
+	CPFL_ITF_TYPE_VPORT,
+	CPFL_ITF_TYPE_REPRESENTOR
+};
+
+struct cpfl_itf {
+	enum cpfl_itf_type type;
+	struct cpfl_adapter_ext *adapter;
+	void *data;
+};
+
 struct cpfl_vport {
+	struct cpfl_itf itf;
 	struct idpf_vport base;
 	struct p2p_queue_chunks_info *p2p_q_chunks_info;
 
@@ -124,5 +136,9 @@ TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 	RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p)					\
 	container_of((p), struct cpfl_adapter_ext, base)
+#define CPFL_DEV_TO_VPORT(dev)					\
+	((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_ITF(dev)				\
+	((struct cpfl_itf *)((dev)->data->dev_private))
 
 #endif /* _CPFL_ETHDEV_H_ */
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 03/12] net/cpfl: add cp channel
  2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
  2023-08-16 15:05   ` [PATCH v2 01/12] net/cpfl: refine devargs parse and process beilei.xing
  2023-08-16 15:05   ` [PATCH v2 02/12] net/cpfl: introduce interface structure beilei.xing
@ 2023-08-16 15:05   ` beilei.xing
  2023-08-16 15:05   ` [PATCH v2 04/12] net/cpfl: enable vport mapping beilei.xing
                     ` (9 subsequent siblings)
  12 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-16 15:05 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Add cpchnl header file.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_cpchnl.h | 321 +++++++++++++++++++++++++++++++++
 1 file changed, 321 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h

diff --git a/drivers/net/cpfl/cpfl_cpchnl.h b/drivers/net/cpfl/cpfl_cpchnl.h
new file mode 100644
index 0000000000..c95fad57b6
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_cpchnl.h
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CPCHNL_H_
+#define _CPFL_CPCHNL_H_
+
+/** @brief      Command Opcodes
+ *              Values are to be different from virtchnl.h opcodes
+ */
+enum cpchnl2_ops {
+	/* vport info */
+	CPCHNL2_OP_GET_VPORT_LIST		= 0x8025,
+	CPCHNL2_OP_GET_VPORT_INFO		= 0x8026,
+
+	/* DPHMA Event notifications */
+	CPCHNL2_OP_EVENT			= 0x8050,
+};
+
+/* Note! This affects the size of structs below */
+#define CPCHNL2_MAX_TC_AMOUNT		8
+
+#define CPCHNL2_ETH_LENGTH_OF_ADDRESS	6
+
+#define CPCHNL2_FUNC_TYPE_PF		0
+#define CPCHNL2_FUNC_TYPE_SRIOV		1
+
+/* vport statuses - must match the DB ones - see enum cp_vport_status*/
+#define CPCHNL2_VPORT_STATUS_CREATED	0
+#define CPCHNL2_VPORT_STATUS_ENABLED	1
+#define CPCHNL2_VPORT_STATUS_DISABLED	2
+#define CPCHNL2_VPORT_STATUS_DESTROYED	3
+
+/* Queue Groups Extension */
+/**************************************************/
+
+#define MAX_Q_REGIONS 16
+/* TBD - with current structure sizes, in order not to exceed 4KB ICQH buffer
+ * no more than 11 queue groups are allowed per a single vport..
+ * More will be possible only with future msg fragmentation.
+ */
+#define MAX_Q_VPORT_GROUPS 11
+
+struct cpchnl2_queue_chunk {
+	u32 type;	       /* 0:QUEUE_TYPE_TX, 1:QUEUE_TYPE_RX */ /* enum nsl_lan_queue_type */
+	u32 start_queue_id;
+	u32 num_queues;
+	u8 pad[4];
+};
+
+/* structure to specify several chunks of contiguous queues */
+struct cpchnl2_queue_grp_chunks {
+	u16 num_chunks;
+	u8 reserved[6];
+	struct cpchnl2_queue_chunk chunks[MAX_Q_REGIONS];
+};
+
+struct cpchnl2_rx_queue_group_info {
+	/* User can ask to update rss_lut size originally allocated
+	 * by CreateVport command. New size will be returned if allocation succeeded,
+	 * otherwise original rss_size from CreateVport will be returned.
+	 */
+	u16 rss_lut_size;
+	u8 pad[6]; /*Future extension purpose*/
+};
+
+struct cpchnl2_tx_queue_group_info {
+	u8 tx_tc; /*TX TC queue group will be connected to*/
+	/* Each group can have its own priority, value 0-7, while each group with unique
+	 * priority is strict priority. It can be single set of queue groups which configured with
+	 * same priority, then they are assumed part of WFQ arbitration group and are expected to be
+	 * assigned with weight.
+	 */
+	u8 priority;
+	/* Determines if queue group is expected to be Strict Priority according to its priority */
+	u8 is_sp;
+	u8 pad;
+	/* Peak Info Rate Weight in case Queue Group is part of WFQ arbitration set.
+	 * The weights of the groups are independent of each other. Possible values: 1-200.
+	 */
+	u16 pir_weight;
+	/* Future extension purpose for CIR only */
+	u8 cir_pad[2];
+	u8 pad2[8]; /* Future extension purpose*/
+};
+
+struct cpchnl2_queue_group_id {
+	/* Queue group ID - depended on it's type:
+	 * Data & p2p - is an index which is relative to Vport.
+	 * Config & Mailbox - is an ID which is relative to func.
+	 * This ID is used in future calls, i.e. delete.
+	 * Requested by host and assigned by Control plane.
+	 */
+	u16 queue_group_id;
+	/* Functional type: see CPCHNL2_QUEUE_GROUP_TYPE definitions */
+	u16 queue_group_type;
+	u8 pad[4];
+};
+
+struct cpchnl2_queue_group_info {
+	/* IN */
+	struct cpchnl2_queue_group_id qg_id;
+
+	/* IN, Number of queues of different types in the group. */
+	u16 num_tx_q;
+	u16 num_tx_complq;
+	u16 num_rx_q;
+	u16 num_rx_bufq;
+
+	struct cpchnl2_tx_queue_group_info tx_q_grp_info;
+	struct cpchnl2_rx_queue_group_info rx_q_grp_info;
+
+	u8 egress_port;
+	u8 pad[39]; /*Future extension purpose*/
+	struct cpchnl2_queue_grp_chunks chunks;
+};
+
+struct cpchnl2_queue_groups {
+	u16 num_queue_groups; /* Number of queue groups in struct below */
+	u8 pad[6];
+	/* group information , number is determined by param above */
+	struct cpchnl2_queue_group_info groups[MAX_Q_VPORT_GROUPS];
+};
+
+/**
+ * @brief function types
+ */
+enum cpchnl2_func_type {
+	CPCHNL2_FTYPE_LAN_PF = 0,
+	CPCHNL2_FTYPE_LAN_VF = 1,
+	CPCHNL2_FTYPE_LAN_MAX
+};
+
+/**
+ * @brief containing vport id & type
+ */
+struct cpchnl2_vport_id {
+	u32 vport_id;
+	u16 vport_type;
+	u8 pad[2];
+};
+
+struct cpchnl2_func_id {
+	/* Function type: 0 - LAN PF, 1 -  LAN VF, Rest - "reserved" */
+	u8 func_type;
+	/* Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs
+	 * and 8-12 CPFs are valid
+	 */
+	u8 pf_id;
+	/* Valid only if "type" above is VF, indexing is relative to PF specified above. */
+	u16 vf_id;
+	u8 pad[4];
+};
+
+/* Note! Do not change the fields and especially their order as should eventually
+ * be aligned to 32bit. Must match the virtchnl structure definition.
+ * If should change, change also the relevant FAS and virtchnl code, under permission.
+ */
+struct cpchnl2_vport_info {
+	u16 vport_index;
+	/* VSI index, global indexing aligned to HW.
+	 * Index of HW VSI is allocated by HMA during "CreateVport" virtChnl command.
+	 * Relevant for VSI backed Vports only, not relevant for vport_type = "Qdev".
+	 */
+	u16 vsi_id;
+	u8 vport_status;	/* enum cpchnl2_vport_status */
+	/* 0 - LAN PF, 1 - LAN VF. Rest - reserved. Can be later expanded to other PEs */
+	u8 func_type;
+	/* Valid only if "type" above is VF, indexing is relative to PF specified above. */
+	u16 vf_id;
+	/* Always relevant, indexing is according to LAN PE 0-15,
+	 * while only 0-4 APFs and 8-12 CPFs are valid.
+	 */
+	u8 pf_id;
+	u8 rss_enabled; /* if RSS is enabled for Vport. Driven by Node Policy. Currently '0' */
+	/* MAC Address assigned for this vport, all 0s for "Qdev" Vport type */
+	u8 mac_addr[CPCHNL2_ETH_LENGTH_OF_ADDRESS];
+	u16 vmrl_id;
+	/* Indicates if IMC created SEM MAC rule for this Vport.
+	 * Currently this is done by IMC for all Vport of type "Default" only,
+	 * but can be different in the future.
+	 */
+	u8 sem_mac_rule_exist;
+	/* Bitmask to inform which TC is valid.
+	 * 0x1 << TCnum. 1b: valid else 0.
+	 * Driven by Node Policy on system level, then Sysetm level TCs are
+	 * reported to IDPF and it can enable Vport level TCs on TX according
+	 * to Syetm enabled ones.
+	 * If TC aware mode - bit set for valid TC.
+	 * otherwise =1 (only bit 0 is set. represents the VSI
+	 */
+	u8 tx_tc_bitmask;
+	/* For each valid TC, TEID of VPORT node over TC in TX LAN WS.
+	 * If TC aware mode - up to 8 TC TEIDs. Otherwise vport_tc_teid[0] shall hold VSI TEID
+	 */
+	u32 vport_tc_teid[CPCHNL2_MAX_TC_AMOUNT];
+	/* For each valid TC, bandwidth in mbps.
+	 * Default BW per Vport is from Node policy
+	 * If TC aware mode -per TC. Otherwise, bandwidth[0] holds VSI bandwidth
+	 */
+	u32 bandwidth[CPCHNL2_MAX_TC_AMOUNT];
+	/* From Node Policy. */
+	u16 max_mtu;
+	u16 default_rx_qid;	/* Default LAN RX Queue ID */
+	u16 vport_flags; /* see: VPORT_FLAGS */
+	u8 egress_port;
+	u8 pad_reserved[5];
+};
+
+/*
+ * CPCHNL2_OP_GET_VPORT_LIST
+ */
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode request
+ * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved (see enum cpchnl2_func_type)
+ * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12
+ *        CPFs are valid
+ * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above
+ */
+struct cpchnl2_get_vport_list_request {
+	u8 func_type;
+	u8 pf_id;
+	u16 vf_id;
+	u8 pad[4];
+};
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode response
+ * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved. Can be later extended to
+ *        other PE types
+ * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12
+ *        CPFs are valid
+ * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above
+ * @param nof_vports Number of vports created on the function
+ * @param vports array of the IDs and types. vport ID is elative to its func (PF/VF). same as in
+ *        Create Vport
+ * vport_type: Aligned to VirtChnl types: Default, SIOV, etc.
+ */
+struct cpchnl2_get_vport_list_response {
+	u8 func_type;
+	u8 pf_id;
+	u16 vf_id;
+	u16 nof_vports;
+	u8 pad[2];
+	struct cpchnl2_vport_id vports[];
+};
+
+/*
+ * CPCHNL2_OP_GET_VPORT_INFO
+ */
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_INFO opcode request
+ * @param vport a structure containing vport_id (relative to function) and type
+ * @param func a structure containing function type, pf_id, vf_id
+ */
+struct cpchnl2_get_vport_info_request {
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_func_id func;
+};
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_INFO opcode response
+ * @param vport a structure containing vport_id (relative to function) and type to get info for
+ * @param info a structure all the information for a given vport
+ * @param queue_groups a structure containing all the queue groups of the given vport
+ */
+struct cpchnl2_get_vport_info_response {
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_vport_info info;
+	struct cpchnl2_queue_groups queue_groups;
+};
+
+ /* Cpchnl events
+  * Sends event message to inform the peer of notification that may affect it.
+  * No direct response is expected from the peer, though it may generate other
+  * messages in response to this one.
+  */
+enum cpchnl2_event {
+	CPCHNL2_EVENT_UNKNOWN = 0,
+	CPCHNL2_EVENT_VPORT_CREATED,
+	CPCHNL2_EVENT_VPORT_DESTROYED,
+	CPCHNL2_EVENT_VPORT_ENABLED,
+	CPCHNL2_EVENT_VPORT_DISABLED,
+	CPCHNL2_PKG_EVENT,
+	CPCHNL2_EVENT_ADD_QUEUE_GROUPS,
+	CPCHNL2_EVENT_DEL_QUEUE_GROUPS,
+	CPCHNL2_EVENT_ADD_QUEUES,
+	CPCHNL2_EVENT_DEL_QUEUES
+};
+
+/*
+ * This is for CPCHNL2_EVENT_VPORT_CREATED
+ */
+struct cpchnl2_event_vport_created {
+	struct cpchnl2_vport_id vport; /* Vport identifier to point to specific Vport */
+	struct cpchnl2_vport_info info; /* Vport configuration info */
+	struct cpchnl2_queue_groups queue_groups; /* Vport assign queue groups configuration info */
+};
+
+/*
+ * This is for CPCHNL2_EVENT_VPORT_DESTROYED
+ */
+struct cpchnl2_event_vport_destroyed {
+	/* Vport identifier to point to specific Vport */
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_func_id func;
+};
+
+struct cpchnl2_event_info {
+	struct {
+		s32 type;		/* See enum cpchnl2_event */
+		uint8_t reserved[4];	/* Reserved */
+	} header;
+	union {
+		struct cpchnl2_event_vport_created vport_created;
+		struct cpchnl2_event_vport_destroyed vport_destroyed;
+	} data;
+};
+
+#endif /* _CPFL_CPCHNL_H_ */
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 04/12] net/cpfl: enable vport mapping
  2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
                     ` (2 preceding siblings ...)
  2023-08-16 15:05   ` [PATCH v2 03/12] net/cpfl: add cp channel beilei.xing
@ 2023-08-16 15:05   ` beilei.xing
  2023-08-16 15:05   ` [PATCH v2 05/12] net/cpfl: parse representor devargs beilei.xing
                     ` (8 subsequent siblings)
  12 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-16 15:05 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

1. Handle cpchnl event for vport create/destroy
2. Use hash table to store vport_id to vport_info mapping
3. Use spinlock for thread safe.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 157 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.h |  21 ++++-
 drivers/net/cpfl/meson.build   |   2 +-
 3 files changed, 177 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 92fe92c00f..17a69c16fe 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -10,6 +10,7 @@
 #include <rte_dev.h>
 #include <errno.h>
 #include <rte_alarm.h>
+#include <rte_hash_crc.h>
 
 #include "cpfl_ethdev.h"
 #include "cpfl_rxtx.h"
@@ -1492,6 +1493,108 @@ cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen)
 	}
 }
 
+static int
+cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_vport_id *vport_identity,
+		       struct cpchnl2_vport_info *vport_info)
+{
+	struct cpfl_vport_info *info = NULL;
+	int ret;
+
+	rte_spinlock_lock(&adapter->vport_map_lock);
+	ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info);
+	if (ret >= 0) {
+		PMD_DRV_LOG(WARNING, "vport already exist, overwrite info anyway");
+		/* overwrite info */
+		if (info)
+			info->vport_info = *vport_info;
+		goto fini;
+	}
+
+	info = rte_zmalloc(NULL, sizeof(*info), 0);
+	if (info == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory for vport map info");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	info->vport_info = *vport_info;
+
+	ret = rte_hash_add_key_data(adapter->vport_map_hash, vport_identity, info);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to add vport map into hash");
+		rte_free(info);
+		goto err;
+	}
+
+fini:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return ret;
+}
+
+static int
+cpfl_vport_info_destroy(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id *vport_identity)
+{
+	struct cpfl_vport_info *info;
+	int ret;
+
+	rte_spinlock_lock(&adapter->vport_map_lock);
+	ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "vport id not exist");
+		goto err;
+	}
+
+	rte_hash_del_key(adapter->vport_map_hash, vport_identity);
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	rte_free(info);
+
+	return 0;
+
+err:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return ret;
+}
+
+static void
+cpfl_handle_cpchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen)
+{
+	struct cpchnl2_event_info *cpchnl2_event = (struct cpchnl2_event_info *)msg;
+	struct cpchnl2_vport_info *info;
+	struct cpfl_vport_id vport_identity = { 0 };
+
+	if (msglen < sizeof(struct cpchnl2_event_info)) {
+		PMD_DRV_LOG(ERR, "Error event");
+		return;
+	}
+
+	switch (cpchnl2_event->header.type) {
+	case CPCHNL2_EVENT_VPORT_CREATED:
+		vport_identity.vport_id = cpchnl2_event->data.vport_created.vport.vport_id;
+		info = &cpchnl2_event->data.vport_created.info;
+		vport_identity.func_type = info->func_type;
+		vport_identity.pf_id = info->pf_id;
+		vport_identity.vf_id = info->vf_id;
+		if (cpfl_vport_info_create(adapter, &vport_identity, info))
+			PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_CREATED");
+		break;
+	case CPCHNL2_EVENT_VPORT_DESTROYED:
+		vport_identity.vport_id = cpchnl2_event->data.vport_destroyed.vport.vport_id;
+		vport_identity.func_type = cpchnl2_event->data.vport_destroyed.func.func_type;
+		vport_identity.pf_id = cpchnl2_event->data.vport_destroyed.func.pf_id;
+		vport_identity.vf_id = cpchnl2_event->data.vport_destroyed.func.vf_id;
+		if (cpfl_vport_info_destroy(adapter, &vport_identity))
+			PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_DESTROY");
+		break;
+	default:
+		PMD_DRV_LOG(ERR, " unknown event received %u", cpchnl2_event->header.type);
+		break;
+	}
+}
+
 static void
 cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 {
@@ -1535,6 +1638,9 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 				}
 				cpfl_handle_event_msg(vport, base->mbx_resp,
 						      ctlq_msg.data_len);
+			} else if (vc_op == CPCHNL2_OP_EVENT) {
+				cpfl_handle_cpchnl_event_msg(adapter, adapter->base.mbx_resp,
+							     ctlq_msg.data_len);
 			} else {
 				if (vc_op == base->pend_cmd)
 					notify_cmd(base, base->cmd_retval);
@@ -1610,6 +1716,48 @@ static struct virtchnl2_get_capabilities req_caps = {
 	.other_caps = VIRTCHNL2_CAP_WB_ON_ITR
 };
 
+static int
+cpfl_vport_map_init(struct cpfl_adapter_ext *adapter)
+{
+	char hname[32];
+
+	snprintf(hname, 32, "%s-vport", adapter->name);
+
+	rte_spinlock_init(&adapter->vport_map_lock);
+
+#define CPFL_VPORT_MAP_HASH_ENTRY_NUM 2048
+
+	struct rte_hash_parameters params = {
+		.name = adapter->name,
+		.entries = CPFL_VPORT_MAP_HASH_ENTRY_NUM,
+		.key_len = sizeof(struct cpfl_vport_id),
+		.hash_func = rte_hash_crc,
+		.socket_id = SOCKET_ID_ANY,
+	};
+
+	adapter->vport_map_hash = rte_hash_create(&params);
+
+	if (adapter->vport_map_hash == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to create vport map hash");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
+{
+	const void *key = NULL;
+	struct cpfl_vport_map_info *info;
+	uint32_t iter = 0;
+
+	while (rte_hash_iterate(adapter->vport_map_hash, &key, (void **)&info, &iter) >= 0)
+		rte_free(info);
+
+	rte_hash_free(adapter->vport_map_hash);
+}
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1634,6 +1782,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_adapter_init;
 	}
 
+	ret = cpfl_vport_map_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init vport map");
+		goto err_vport_map_init;
+	}
+
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
 	adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ?
@@ -1658,6 +1812,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_vport_map_uninit(adapter);
+err_vport_map_init:
 	idpf_adapter_deinit(base);
 err_adapter_init:
 	return ret;
@@ -1887,6 +2043,7 @@ static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
 
 	rte_free(adapter->vports);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 53e45035e8..3515fec4f7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -10,16 +10,18 @@
 #include <rte_spinlock.h>
 #include <rte_ethdev.h>
 #include <rte_kvargs.h>
+#include <rte_hash.h>
 #include <ethdev_driver.h>
 #include <ethdev_pci.h>
 
-#include "cpfl_logs.h"
-
 #include <idpf_common_device.h>
 #include <idpf_common_virtchnl.h>
 #include <base/idpf_prototype.h>
 #include <base/virtchnl2.h>
 
+#include "cpfl_logs.h"
+#include "cpfl_cpchnl.h"
+
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
 
@@ -86,6 +88,18 @@ struct p2p_queue_chunks_info {
 	uint32_t rx_buf_qtail_spacing;
 };
 
+struct cpfl_vport_id {
+	uint32_t vport_id;
+	uint8_t func_type;
+	uint8_t pf_id;
+	uint16_t vf_id;
+};
+
+struct cpfl_vport_info {
+	struct cpchnl2_vport_info vport_info;
+	bool enabled;
+};
+
 enum cpfl_itf_type {
 	CPFL_ITF_TYPE_VPORT,
 	CPFL_ITF_TYPE_REPRESENTOR
@@ -128,6 +142,9 @@ struct cpfl_adapter_ext {
 
 	uint16_t used_vecs_num;
 	struct cpfl_devargs devargs;
+
+	rte_spinlock_t vport_map_lock;
+	struct rte_hash *vport_map_hash;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 8d62ebfd77..28167bb81d 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -11,7 +11,7 @@ if dpdk_conf.get('RTE_IOVA_IN_MBUF') == 0
     subdir_done()
 endif
 
-deps += ['common_idpf']
+deps += ['hash', 'common_idpf']
 
 sources = files(
         'cpfl_ethdev.c',
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 05/12] net/cpfl: parse representor devargs
  2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
                     ` (3 preceding siblings ...)
  2023-08-16 15:05   ` [PATCH v2 04/12] net/cpfl: enable vport mapping beilei.xing
@ 2023-08-16 15:05   ` beilei.xing
  2023-08-16 15:05   ` [PATCH v2 06/12] net/cpfl: support probe again beilei.xing
                     ` (7 subsequent siblings)
  12 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-16 15:05 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Format:

[[c<controller_id>]pf<pf_id>]vf<vf_id>

  controller_id:

  0 : xeon (default)
  1:  acc

  pf_id:

  0 : apf (default)
  1 : cpf

Example:

representor=c0pf0vf[0-3]
  -- xeon > apf > vf 0,1,2,3
     same as pf0vf[0-3] and vf[0-3] if omit default value.

representor=c0pf0
  -- xeon> apf
     same as pf0 if omit default value.

representor=c1pf0
  -- acc > apf

multiple representor devargs are supported.
e.g.: create 4 representors for 4 vfs on xeon APF and one
representor for acc APF.

  -- representor=vf[0-3],representor=c1pf0

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 doc/guides/nics/cpfl.rst               |  36 +++++
 doc/guides/rel_notes/release_23_11.rst |   3 +
 drivers/net/cpfl/cpfl_ethdev.c         | 179 +++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.h         |   8 ++
 4 files changed, 226 insertions(+)

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 39a2b603f3..ff0a183f78 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -92,6 +92,42 @@ Runtime Configuration
   Then the PMD will configure Tx queue with single queue mode.
   Otherwise, split queue mode is chosen by default.
 
+- ``representor`` (default ``not enabled``)
+
+  The cpfl PMD supports the creation of APF/CPF/VF port representors.
+  Each port representor corresponds to a single function of that device.
+  Using the ``devargs`` option ``representor`` the user can specify
+  which functions to create port representors.
+
+  Format is::
+
+    [[c<controller_id>]pf<pf_id>]vf<vf_id>
+
+  Controller_id 0 is Xeon (default), while 1 is ACC.
+  Pf_id 0 is APF (default), while 1 is CPF.
+  Default value can be omitted.
+
+  Create 4 representors for 4 vfs on xeon APF::
+
+    -a BDF,representor=c0pf0vf[0-3]
+
+  Or::
+
+    -a BDF,representor=pf0vf[0-3]
+
+  Or::
+
+    -a BDF,representor=vf[0-3]
+
+  Create a representor for CPF on ACC::
+
+    -a BDF,representor=c1pf1
+
+  Multiple representor devargs are supported. Create 4 representors for 4
+  vfs on xeon APF and one representor for acc CPF::
+
+    -a BDF,representor=vf[0-3],representor=c1pf1
+
 
 Driver compilation and testing
 ------------------------------
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 4411bb32c1..83826c8896 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -72,6 +72,9 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Intel cpfl driver.**
+
+  * Added support for port representor.
 
 Removed Items
 -------------
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 17a69c16fe..a820528a0d 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -13,8 +13,10 @@
 #include <rte_hash_crc.h>
 
 #include "cpfl_ethdev.h"
+#include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 
+#define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
@@ -25,6 +27,7 @@ struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
 static const char * const cpfl_valid_args[] = {
+	CPFL_REPRESENTOR,
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
@@ -1407,6 +1410,128 @@ parse_bool(const char *key, const char *value, void *args)
 	return 0;
 }
 
+static int
+enlist(uint16_t *list, uint16_t *len_list, const uint16_t max_list, uint16_t val)
+{
+	uint16_t i;
+
+	for (i = 0; i < *len_list; i++) {
+		if (list[i] == val)
+			return 0;
+	}
+	if (*len_list >= max_list)
+		return -1;
+	list[(*len_list)++] = val;
+	return 0;
+}
+
+static const char *
+process_range(const char *str, uint16_t *list, uint16_t *len_list,
+	const uint16_t max_list)
+{
+	uint16_t lo, hi, val;
+	int result, n = 0;
+	const char *pos = str;
+
+	result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n);
+	if (result == 1) {
+		if (enlist(list, len_list, max_list, lo) != 0)
+			return NULL;
+	} else if (result == 2) {
+		if (lo > hi)
+			return NULL;
+		for (val = lo; val <= hi; val++) {
+			if (enlist(list, len_list, max_list, val) != 0)
+				return NULL;
+		}
+	} else {
+		return NULL;
+	}
+	return pos + n;
+}
+
+static const char *
+process_list(const char *str, uint16_t *list, uint16_t *len_list, const uint16_t max_list)
+{
+	const char *pos = str;
+
+	if (*pos == '[')
+		pos++;
+	while (1) {
+		pos = process_range(pos, list, len_list, max_list);
+		if (pos == NULL)
+			return NULL;
+		if (*pos != ',') /* end of list */
+			break;
+		pos++;
+	}
+	if (*str == '[' && *pos != ']')
+		return NULL;
+	if (*pos == ']')
+		pos++;
+	return pos;
+}
+
+static int
+parse_repr(const char *key __rte_unused, const char *value, void *args)
+{
+	struct cpfl_devargs *devargs = args;
+	struct rte_eth_devargs *eth_da;
+	const char *str = value;
+
+	if (devargs->repr_args_num == CPFL_REPR_ARG_NUM_MAX)
+		return -EINVAL;
+
+	eth_da = &devargs->repr_args[devargs->repr_args_num];
+
+	if (str[0] == 'c') {
+		str += 1;
+		str = process_list(str, eth_da->mh_controllers,
+				&eth_da->nb_mh_controllers,
+				RTE_DIM(eth_da->mh_controllers));
+		if (str == NULL)
+			goto done;
+	}
+	if (str[0] == 'p' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_PF;
+		str += 2;
+		str = process_list(str, eth_da->ports,
+				&eth_da->nb_ports, RTE_DIM(eth_da->ports));
+		if (str == NULL || str[0] == '\0')
+			goto done;
+	} else if (eth_da->nb_mh_controllers > 0) {
+		/* 'c' must followed by 'pf'. */
+		str = NULL;
+		goto done;
+	}
+	if (str[0] == 'v' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_VF;
+		str += 2;
+	} else if (str[0] == 's' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_SF;
+		str += 2;
+	} else {
+		/* 'pf' must followed by 'vf' or 'sf'. */
+		if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+			str = NULL;
+			goto done;
+		}
+		eth_da->type = RTE_ETH_REPRESENTOR_VF;
+	}
+	str = process_list(str, eth_da->representor_ports,
+		&eth_da->nb_representor_ports,
+		RTE_DIM(eth_da->representor_ports));
+done:
+	if (str == NULL) {
+		RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str);
+		return -1;
+	}
+
+	devargs->repr_args_num++;
+
+	return 0;
+}
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1431,6 +1556,12 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 		return -EINVAL;
 	}
 
+	cpfl_args->repr_args_num = 0;
+	ret = rte_kvargs_process(kvlist, CPFL_REPRESENTOR, &parse_repr, cpfl_args);
+
+	if (ret != 0)
+		goto fail;
+
 	ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport,
 				 cpfl_args);
 	if (ret != 0)
@@ -2087,6 +2218,48 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
 	return 0;
 }
 
+static int
+cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int i, j;
+
+	/* check and refine repr args */
+	for (i = 0; i < devargs->repr_args_num; i++) {
+		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
+
+		/* set default host_id to xeon host */
+		if (eth_da->nb_mh_controllers == 0) {
+			eth_da->nb_mh_controllers = 1;
+			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
+		} else {
+			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
+				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->mh_controllers[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		/* set default pf to APF */
+		if (eth_da->nb_ports == 0) {
+			eth_da->nb_ports = 1;
+			eth_da->ports[0] = CPFL_PF_TYPE_APF;
+		} else {
+			for (j = 0; j < eth_da->nb_ports; j++) {
+				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->ports[j]);
+					return -EINVAL;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
 static int
 cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -2165,6 +2338,12 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		goto err;
 	}
 
+	retval = cpfl_repr_devargs_process(adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to process repr devargs");
+		goto err;
+	}
+
 	return 0;
 
 err:
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 3515fec4f7..9c4d8d3ea1 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -60,16 +60,24 @@
 #define IDPF_DEV_ID_CPF			0x1453
 #define VIRTCHNL2_QUEUE_GROUP_P2P	0x100
 
+#define CPFL_HOST_ID_HOST	0
+#define CPFL_HOST_ID_ACC	1
+#define CPFL_PF_TYPE_APF	0
+#define CPFL_PF_TYPE_CPF	1
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
 	uint16_t idx;       /* index in adapter->vports[]*/
 };
 
+#define CPFL_REPR_ARG_NUM_MAX	4
 /* Struct used when parse driver specific devargs */
 struct cpfl_devargs {
 	uint16_t req_vports[CPFL_MAX_VPORT_NUM];
 	uint16_t req_vport_nb;
+	uint8_t repr_args_num;
+	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
 };
 
 struct p2p_queue_chunks_info {
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 06/12] net/cpfl: support probe again
  2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
                     ` (4 preceding siblings ...)
  2023-08-16 15:05   ` [PATCH v2 05/12] net/cpfl: parse representor devargs beilei.xing
@ 2023-08-16 15:05   ` beilei.xing
  2023-08-16 15:05   ` [PATCH v2 07/12] net/cpfl: create port representor beilei.xing
                     ` (6 subsequent siblings)
  12 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-16 15:05 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Only representor will be parsed for probe again.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 69 +++++++++++++++++++++++++++-------
 1 file changed, 56 insertions(+), 13 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a820528a0d..09015fbb08 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -26,7 +26,7 @@ rte_spinlock_t cpfl_adapter_lock;
 struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
-static const char * const cpfl_valid_args[] = {
+static const char * const cpfl_valid_args_first[] = {
 	CPFL_REPRESENTOR,
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
@@ -34,6 +34,11 @@ static const char * const cpfl_valid_args[] = {
 	NULL
 };
 
+static const char * const cpfl_valid_args_again[] = {
+	CPFL_REPRESENTOR,
+	NULL
+};
+
 uint32_t cpfl_supported_speeds[] = {
 	RTE_ETH_SPEED_NUM_NONE,
 	RTE_ETH_SPEED_NUM_10M,
@@ -1533,7 +1538,7 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
 	struct rte_devargs *devargs = pci_dev->device.devargs;
 	struct cpfl_devargs *cpfl_args = &adapter->devargs;
@@ -1545,7 +1550,8 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (devargs == NULL)
 		return 0;
 
-	kvlist = rte_kvargs_parse(devargs->args, cpfl_valid_args);
+	kvlist = rte_kvargs_parse(devargs->args,
+			first ? cpfl_valid_args_first : cpfl_valid_args_again);
 	if (kvlist == NULL) {
 		PMD_INIT_LOG(ERR, "invalid kvargs key");
 		return -EINVAL;
@@ -1562,6 +1568,9 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (ret != 0)
 		goto fail;
 
+	if (!first)
+		return 0;
+
 	ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport,
 				 cpfl_args);
 	if (ret != 0)
@@ -2291,18 +2300,11 @@ cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapt
 }
 
 static int
-cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
-	       struct rte_pci_device *pci_dev)
+cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
 	struct cpfl_adapter_ext *adapter;
 	int retval;
 
-	if (!cpfl_adapter_list_init) {
-		rte_spinlock_init(&cpfl_adapter_lock);
-		TAILQ_INIT(&cpfl_adapter_list);
-		cpfl_adapter_list_init = true;
-	}
-
 	adapter = rte_zmalloc("cpfl_adapter_ext",
 			      sizeof(struct cpfl_adapter_ext), 0);
 	if (adapter == NULL) {
@@ -2310,7 +2312,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		return -ENOMEM;
 	}
 
-	retval = cpfl_parse_devargs(pci_dev, adapter);
+	retval = cpfl_parse_devargs(pci_dev, adapter, true);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
 		return retval;
@@ -2355,6 +2357,46 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	return retval;
 }
 
+static int
+cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_parse_devargs(pci_dev, adapter, false);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		return ret;
+	}
+
+	ret = cpfl_repr_devargs_process(adapter);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to process reprenstor devargs");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+	       struct rte_pci_device *pci_dev)
+{
+	struct cpfl_adapter_ext *adapter;
+
+	if (!cpfl_adapter_list_init) {
+		rte_spinlock_init(&cpfl_adapter_lock);
+		TAILQ_INIT(&cpfl_adapter_list);
+		cpfl_adapter_list_init = true;
+	}
+
+	adapter = cpfl_find_adapter_ext(pci_dev);
+
+	if (adapter == NULL)
+		return cpfl_pci_probe_first(pci_dev);
+	else
+		return cpfl_pci_probe_again(pci_dev, adapter);
+}
+
 static int
 cpfl_pci_remove(struct rte_pci_device *pci_dev)
 {
@@ -2377,7 +2419,8 @@ cpfl_pci_remove(struct rte_pci_device *pci_dev)
 
 static struct rte_pci_driver rte_cpfl_pmd = {
 	.id_table	= pci_id_cpfl_map,
-	.drv_flags	= RTE_PCI_DRV_NEED_MAPPING,
+	.drv_flags	= RTE_PCI_DRV_NEED_MAPPING |
+			  RTE_PCI_DRV_PROBE_AGAIN,
 	.probe		= cpfl_pci_probe,
 	.remove		= cpfl_pci_remove,
 };
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 07/12] net/cpfl: create port representor
  2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
                     ` (5 preceding siblings ...)
  2023-08-16 15:05   ` [PATCH v2 06/12] net/cpfl: support probe again beilei.xing
@ 2023-08-16 15:05   ` beilei.xing
  2023-09-05  7:35     ` Liu, Mingxia
  2023-09-05  8:30     ` Liu, Mingxia
  2023-08-16 15:05   ` [PATCH v2 08/12] net/cpfl: support vport list/info get beilei.xing
                     ` (5 subsequent siblings)
  12 siblings, 2 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-16 15:05 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Track representor request in a whitelist.
Representor will only be created for active vport.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c      | 107 ++++---
 drivers/net/cpfl/cpfl_ethdev.h      |  34 +++
 drivers/net/cpfl/cpfl_representor.c | 448 ++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.h |  26 ++
 drivers/net/cpfl/meson.build        |   1 +
 5 files changed, 573 insertions(+), 43 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 09015fbb08..08daade7ac 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1898,6 +1898,42 @@ cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
 	rte_hash_free(adapter->vport_map_hash);
 }
 
+static int
+cpfl_repr_allowlist_init(struct cpfl_adapter_ext *adapter)
+{
+	char hname[32];
+
+	snprintf(hname, 32, "%s-repr_wl", adapter->name);
+
+	rte_spinlock_init(&adapter->repr_lock);
+
+#define CPFL_REPR_HASH_ENTRY_NUM 2048
+
+	struct rte_hash_parameters params = {
+		.name = hname,
+		.entries = CPFL_REPR_HASH_ENTRY_NUM,
+		.key_len = sizeof(struct cpfl_repr_id),
+		.hash_func = rte_hash_crc,
+		.socket_id = SOCKET_ID_ANY,
+	};
+
+	adapter->repr_allowlist_hash = rte_hash_create(&params);
+
+	if (adapter->repr_allowlist_hash == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to create repr allowlist hash");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+cpfl_repr_allowlist_uninit(struct cpfl_adapter_ext *adapter)
+{
+	rte_hash_free(adapter->repr_allowlist_hash);
+}
+
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1928,6 +1964,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vport_map_init;
 	}
 
+	ret = cpfl_repr_allowlist_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init representor allowlist");
+		goto err_repr_allowlist_init;
+	}
+
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
 	adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ?
@@ -1952,6 +1994,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_repr_allowlist_uninit(adapter);
+err_repr_allowlist_init:
 	cpfl_vport_map_uninit(adapter);
 err_vport_map_init:
 	idpf_adapter_deinit(base);
@@ -2227,48 +2271,6 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
 	return 0;
 }
 
-static int
-cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
-{
-	struct cpfl_devargs *devargs = &adapter->devargs;
-	int i, j;
-
-	/* check and refine repr args */
-	for (i = 0; i < devargs->repr_args_num; i++) {
-		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
-
-		/* set default host_id to xeon host */
-		if (eth_da->nb_mh_controllers == 0) {
-			eth_da->nb_mh_controllers = 1;
-			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
-		} else {
-			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
-				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
-					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-						     eth_da->mh_controllers[j]);
-					return -EINVAL;
-				}
-			}
-		}
-
-		/* set default pf to APF */
-		if (eth_da->nb_ports == 0) {
-			eth_da->nb_ports = 1;
-			eth_da->ports[0] = CPFL_PF_TYPE_APF;
-		} else {
-			for (j = 0; j < eth_da->nb_ports; j++) {
-				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
-					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-						     eth_da->ports[j]);
-					return -EINVAL;
-				}
-			}
-		}
-	}
-
-	return 0;
-}
-
 static int
 cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -2304,6 +2306,7 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
 	struct cpfl_adapter_ext *adapter;
 	int retval;
+	uint16_t port_id;
 
 	adapter = rte_zmalloc("cpfl_adapter_ext",
 			      sizeof(struct cpfl_adapter_ext), 0);
@@ -2343,11 +2346,23 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 	retval = cpfl_repr_devargs_process(adapter);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to process repr devargs");
-		goto err;
+		goto close_ethdev;
 	}
 
+	retval = cpfl_repr_create(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create representors ");
+		goto close_ethdev;
+	}
+
+
 	return 0;
 
+close_ethdev:
+	/* Ethdev created can be found RTE_ETH_FOREACH_DEV_OF through rte_device */
+	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
+		rte_eth_dev_close(port_id);
+	}
 err:
 	rte_spinlock_lock(&cpfl_adapter_lock);
 	TAILQ_REMOVE(&cpfl_adapter_list, adapter, next);
@@ -2374,6 +2389,12 @@ cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *ad
 		return ret;
 	}
 
+	ret = cpfl_repr_create(pci_dev, adapter);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create representors ");
+		return ret;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 9c4d8d3ea1..d4d9727a80 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -21,6 +21,7 @@
 
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
+#include "cpfl_representor.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -60,11 +61,32 @@
 #define IDPF_DEV_ID_CPF			0x1453
 #define VIRTCHNL2_QUEUE_GROUP_P2P	0x100
 
+#define CPFL_HOST_ID_NUM	2
+#define CPFL_PF_TYPE_NUM	2
 #define CPFL_HOST_ID_HOST	0
 #define CPFL_HOST_ID_ACC	1
 #define CPFL_PF_TYPE_APF	0
 #define CPFL_PF_TYPE_CPF	1
 
+/* Function IDs on IMC side */
+#define HOST0_APF	0
+#define HOST1_APF	1
+#define HOST2_APF	2
+#define HOST3_APF	3
+#define ACC_APF_ID	4
+#define IMC_APF_ID	5
+#define HOST0_NVME_ID	6
+#define ACC_NVME_ID	7
+#define HOST0_CPF_ID	8
+#define HOST1_CPF_ID	9
+#define HOST2_CPF_ID	10
+#define HOST3_CPF_ID	11
+#define ACC_CPF_ID	12
+#define IMC_IPF_ID	13
+#define ATE_CPF_ID	14
+#define ACC_LCE_ID	15
+#define IMC_MBX_EFD_ID	0
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
@@ -136,6 +158,13 @@ struct cpfl_vport {
 	bool p2p_manual_bind;
 };
 
+struct cpfl_repr {
+	struct cpfl_itf itf;
+	struct cpfl_repr_id repr_id;
+	struct rte_ether_addr mac_addr;
+	struct cpfl_vport_info *vport_info;
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -153,6 +182,9 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t vport_map_lock;
 	struct rte_hash *vport_map_hash;
+
+	rte_spinlock_t repr_lock;
+	struct rte_hash *repr_allowlist_hash;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -163,6 +195,8 @@ TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 	container_of((p), struct cpfl_adapter_ext, base)
 #define CPFL_DEV_TO_VPORT(dev)					\
 	((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_REPR(dev)					\
+	((struct cpfl_repr *)((dev)->data->dev_private))
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
new file mode 100644
index 0000000000..29da56dda0
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -0,0 +1,448 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include "cpfl_representor.h"
+#include "cpfl_rxtx.h"
+
+static int
+cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_repr_id *repr_id,
+			   struct rte_eth_dev *dev)
+{
+	int ret;
+
+	if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) < 0)
+		return -ENOENT;
+
+	ret = rte_hash_add_key_data(adapter->repr_allowlist_hash, repr_id, dev);
+
+	return ret;
+}
+
+static int
+cpfl_repr_allowlist_add(struct cpfl_adapter_ext *adapter,
+			struct cpfl_repr_id *repr_id)
+{
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+	if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) >= 0) {
+		ret = -EEXIST;
+		goto err;
+	}
+
+	ret = rte_hash_add_key(adapter->repr_allowlist_hash, repr_id);
+	if (ret < 0)
+		goto err;
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return ret;
+}
+
+static int
+cpfl_repr_devargs_process_one(struct cpfl_adapter_ext *adapter,
+			      struct rte_eth_devargs *eth_da)
+{
+	struct cpfl_repr_id repr_id;
+	int ret, c, p, v;
+
+	for (c = 0; c < eth_da->nb_mh_controllers; c++) {
+		for (p = 0; p < eth_da->nb_ports; p++) {
+			repr_id.type = eth_da->type;
+			if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+				repr_id.host_id = eth_da->mh_controllers[c];
+				repr_id.pf_id = eth_da->ports[p];
+				repr_id.vf_id = 0;
+				ret = cpfl_repr_allowlist_add(adapter, &repr_id);
+				if (ret == -EEXIST)
+					continue;
+				if (ret) {
+					PMD_DRV_LOG(ERR, "Failed to add PF repr to allowlist, "
+							 "host_id = %d, pf_id = %d.",
+						    repr_id.host_id, repr_id.pf_id);
+					return ret;
+				}
+			} else if (eth_da->type == RTE_ETH_REPRESENTOR_VF) {
+				for (v = 0; v < eth_da->nb_representor_ports; v++) {
+					repr_id.host_id = eth_da->mh_controllers[c];
+					repr_id.pf_id = eth_da->ports[p];
+					repr_id.vf_id = eth_da->representor_ports[v];
+					ret = cpfl_repr_allowlist_add(adapter, &repr_id);
+					if (ret == -EEXIST)
+						continue;
+					if (ret) {
+						PMD_DRV_LOG(ERR, "Failed to add VF repr to allowlist, "
+								 "host_id = %d, pf_id = %d, vf_id = %d.",
+							    repr_id.host_id,
+							    repr_id.pf_id,
+							    repr_id.vf_id);
+						return ret;
+					}
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+int
+cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int ret, i, j;
+
+	/* check and refine repr args */
+	for (i = 0; i < devargs->repr_args_num; i++) {
+		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
+
+		/* set default host_id to xeon host */
+		if (eth_da->nb_mh_controllers == 0) {
+			eth_da->nb_mh_controllers = 1;
+			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
+		} else {
+			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
+				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->mh_controllers[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		/* set default pf to APF */
+		if (eth_da->nb_ports == 0) {
+			eth_da->nb_ports = 1;
+			eth_da->ports[0] = CPFL_PF_TYPE_APF;
+		} else {
+			for (j = 0; j < eth_da->nb_ports; j++) {
+				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->ports[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		ret = cpfl_repr_devargs_process_one(adapter, eth_da);
+		if (ret != 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_repr_allowlist_del(struct cpfl_adapter_ext *adapter,
+			struct cpfl_repr_id *repr_id)
+{
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	ret = rte_hash_del_key(adapter->repr_allowlist_hash, repr_id);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to delete repr from allowlist."
+				 "host_id = %d, type = %d, pf_id = %d, vf_id = %d",
+				 repr_id->host_id, repr_id->type,
+				 repr_id->pf_id, repr_id->vf_id);
+		goto err;
+	}
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return ret;
+}
+
+static int
+cpfl_repr_uninit(struct rte_eth_dev *eth_dev)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+	struct cpfl_adapter_ext *adapter = repr->itf.adapter;
+
+	eth_dev->data->mac_addrs = NULL;
+
+	cpfl_repr_allowlist_del(adapter, &repr->repr_id);
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_configure(struct rte_eth_dev *dev)
+{
+	/* now only 1 RX queue is supported */
+	if (dev->data->nb_rx_queues > 1)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_close(struct rte_eth_dev *dev)
+{
+	return cpfl_repr_uninit(dev);
+}
+
+static int
+cpfl_repr_dev_info_get(struct rte_eth_dev *ethdev,
+		       struct rte_eth_dev_info *dev_info)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+
+	dev_info->device = ethdev->device;
+	dev_info->max_mac_addrs = 1;
+	dev_info->max_rx_queues = 1;
+	dev_info->max_tx_queues = 1;
+	dev_info->min_rx_bufsize = CPFL_MIN_BUF_SIZE;
+	dev_info->max_rx_pktlen = CPFL_MAX_FRAME_SIZE;
+
+	dev_info->flow_type_rss_offloads = CPFL_RSS_OFFLOAD_ALL;
+
+	dev_info->rx_offload_capa =
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP		|
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP		|
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM	|
+		RTE_ETH_RX_OFFLOAD_SCATTER		|
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER		|
+		RTE_ETH_RX_OFFLOAD_RSS_HASH		|
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
+	dev_info->tx_offload_capa =
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT		|
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT		|
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM	|
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS		|
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_free_thresh = CPFL_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+		.offloads = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_free_thresh = CPFL_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH,
+		.offloads = 0,
+	};
+
+	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
+	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
+	dev_info->switch_info.name = ethdev->device->name;
+	dev_info->switch_info.domain_id = 0; /* the same domain*/
+	dev_info->switch_info.port_id = repr->vport_info->vport_info.vsi_id;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_start(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_stop(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+static const struct eth_dev_ops cpfl_repr_dev_ops = {
+	.dev_start		= cpfl_repr_dev_start,
+	.dev_stop		= cpfl_repr_dev_stop,
+	.dev_configure		= cpfl_repr_dev_configure,
+	.dev_close		= cpfl_repr_dev_close,
+	.dev_infos_get		= cpfl_repr_dev_info_get,
+};
+
+static int
+cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+	struct cpfl_repr_param *param = init_param;
+	struct cpfl_adapter_ext *adapter = param->adapter;
+
+	repr->repr_id = param->repr_id;
+	repr->vport_info = param->vport_info;
+	repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
+	repr->itf.adapter = adapter;
+	repr->itf.data = eth_dev->data;
+
+	eth_dev->dev_ops = &cpfl_repr_dev_ops;
+
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+	/* bit[15:14] type
+	 * bit[13] xeon/acc
+	 * bit[12] apf/cpf
+	 * bit[11:0] vf
+	 */
+	eth_dev->data->representor_id =
+		(uint16_t)(repr->repr_id.type << 14 |
+			   repr->repr_id.host_id << 13 |
+			   repr->repr_id.pf_id << 12 |
+			   repr->repr_id.vf_id);
+
+	eth_dev->data->mac_addrs = &repr->mac_addr;
+
+	rte_eth_random_addr(repr->mac_addr.addr_bytes);
+
+	return cpfl_repr_allowlist_update(adapter, &repr->repr_id, eth_dev);
+}
+
+static int
+cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
+{
+	if ((host_id != CPFL_HOST_ID_HOST &&
+	     host_id != CPFL_HOST_ID_ACC) ||
+	    (pf_id != CPFL_PF_TYPE_APF &&
+	     pf_id != CPFL_PF_TYPE_CPF))
+		return -EINVAL;
+
+	static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = {
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = HOST0_APF,
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = HOST0_CPF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = ACC_APF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = ACC_CPF_ID,
+	};
+
+	return func_id_map[host_id][pf_id];
+}
+
+static bool
+match_repr_with_vport(const struct cpfl_repr_id *repr_id,
+		      struct cpchnl2_vport_info *info)
+{
+	int func_id;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF &&
+	    info->func_type == 0) {
+		func_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		if (func_id < 0)
+			return false;
+		else
+			return true;
+	} else if (repr_id->type == RTE_ETH_REPRESENTOR_VF &&
+		   info->func_type == 1) {
+		if (repr_id->vf_id == info->vf_id)
+			return true;
+	}
+
+	return false;
+}
+
+int
+cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	struct rte_eth_dev *dev;
+	uint32_t iter = 0;
+	const struct cpfl_repr_id *repr_id;
+	const struct cpfl_vport_id *vp_id;
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	while (rte_hash_iterate(adapter->repr_allowlist_hash,
+				(const void **)&repr_id, (void **)&dev, &iter) >= 0) {
+		struct cpfl_vport_info *vi;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		uint32_t iter_iter = 0;
+		bool matched;
+
+		/* skip representor already be created */
+		if (dev != NULL)
+			continue;
+
+		if (repr_id->type == RTE_ETH_REPRESENTOR_VF)
+			snprintf(name, sizeof(name), "net_%s_representor_c%dpf%dvf%d",
+				 pci_dev->name,
+				 repr_id->host_id,
+				 repr_id->pf_id,
+				 repr_id->vf_id);
+		else
+			snprintf(name, sizeof(name), "net_%s_representor_c%dpf%d",
+				 pci_dev->name,
+				 repr_id->host_id,
+				 repr_id->pf_id);
+
+		/* find a matched vport */
+		rte_spinlock_lock(&adapter->vport_map_lock);
+
+		matched = false;
+		while (rte_hash_iterate(adapter->vport_map_hash,
+					(const void **)&vp_id, (void **)&vi, &iter_iter) >= 0) {
+			struct cpfl_repr_param param;
+
+			if (!match_repr_with_vport(repr_id, &vi->vport_info))
+				continue;
+
+			matched = true;
+
+			param.adapter = adapter;
+			param.repr_id = *repr_id;
+			param.vport_info = vi;
+
+			ret = rte_eth_dev_create(&pci_dev->device,
+						 name,
+						 sizeof(struct cpfl_repr),
+						 NULL, NULL, cpfl_repr_init,
+						 &param);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to create representor %s", name);
+				rte_spinlock_unlock(&adapter->vport_map_lock);
+				rte_spinlock_unlock(&adapter->repr_lock);
+				return ret;
+			}
+			break;
+		}
+
+		/* warning if no match vport detected */
+		if (!matched)
+			PMD_INIT_LOG(WARNING, "No matched vport for representor %s "
+					      "creation will be deferred when vport is detected",
+					      name);
+
+		rte_spinlock_unlock(&adapter->vport_map_lock);
+	}
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_representor.h b/drivers/net/cpfl/cpfl_representor.h
new file mode 100644
index 0000000000..d3a4de531e
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_REPRESENTOR_H_
+#define _CPFL_REPRESENTOR_H_
+
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+struct cpfl_repr_id {
+	uint8_t host_id;
+	uint8_t pf_id;
+	uint8_t type;
+	uint8_t vf_id;
+};
+
+struct cpfl_repr_param {
+	struct cpfl_adapter_ext *adapter;
+	struct cpfl_repr_id repr_id;
+	struct cpfl_vport_info *vport_info;
+};
+
+int cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter);
+int cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 28167bb81d..1d963e5fd1 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -16,6 +16,7 @@ deps += ['hash', 'common_idpf']
 sources = files(
         'cpfl_ethdev.c',
         'cpfl_rxtx.c',
+        'cpfl_representor.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 08/12] net/cpfl: support vport list/info get
  2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
                     ` (6 preceding siblings ...)
  2023-08-16 15:05   ` [PATCH v2 07/12] net/cpfl: create port representor beilei.xing
@ 2023-08-16 15:05   ` beilei.xing
  2023-08-16 15:05   ` [PATCH v2 09/12] net/cpfl: update vport info before creating representor beilei.xing
                     ` (4 subsequent siblings)
  12 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-16 15:05 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Support cp channel ops CPCHNL2_OP_CPF_GET_VPORT_LIST and
CPCHNL2_OP_CPF_GET_VPORT_INFO.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h |  8 ++++
 drivers/net/cpfl/cpfl_vchnl.c  | 72 ++++++++++++++++++++++++++++++++++
 drivers/net/cpfl/meson.build   |  1 +
 3 files changed, 81 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index d4d9727a80..4f6944d00a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -189,6 +189,14 @@ struct cpfl_adapter_ext {
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_vport_id *vi,
+			   struct cpchnl2_get_vport_list_response *response);
+int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+			   struct cpchnl2_vport_id *vport_id,
+			   struct cpfl_vport_id *vi,
+			   struct cpchnl2_get_vport_info_response *response);
+
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p)					\
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
new file mode 100644
index 0000000000..a21a4a451f
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include "cpfl_ethdev.h"
+#include <idpf_common_virtchnl.h>
+
+int
+cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_vport_id *vi,
+		       struct cpchnl2_get_vport_list_response *response)
+{
+	struct cpchnl2_get_vport_list_request request;
+	struct idpf_cmd_info args;
+	int err;
+
+	memset(&request, 0, sizeof(request));
+	request.func_type = vi->func_type;
+	request.pf_id = vi->pf_id;
+	request.vf_id = vi->vf_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = CPCHNL2_OP_GET_VPORT_LIST;
+	args.in_args = (uint8_t *)&request;
+	args.in_args_size = sizeof(struct cpchnl2_get_vport_list_request);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err != 0) {
+		PMD_DRV_LOG(ERR, "Failed to execute command of CPCHNL2_OP_GET_VPORT_LIST");
+		return err;
+	}
+
+	rte_memcpy(response, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+
+	return 0;
+}
+
+int
+cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+		       struct cpchnl2_vport_id *vport_id,
+		       struct cpfl_vport_id *vi,
+		       struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpchnl2_get_vport_info_request request;
+	struct idpf_cmd_info args;
+	int err;
+
+	request.vport.vport_id = vport_id->vport_id;
+	request.vport.vport_type = vport_id->vport_type;
+	request.func.func_type = vi->func_type;
+	request.func.pf_id = vi->pf_id;
+	request.func.vf_id = vi->vf_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = CPCHNL2_OP_GET_VPORT_INFO;
+	args.in_args = (uint8_t *)&request;
+	args.in_args_size = sizeof(struct cpchnl2_get_vport_info_request);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err != 0) {
+		PMD_DRV_LOG(ERR, "Failed to execute command of CPCHNL2_OP_GET_VPORT_INFO");
+		return err;
+	}
+
+	rte_memcpy(response, args.out_buffer, sizeof(*response));
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 1d963e5fd1..fb075c6860 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -17,6 +17,7 @@ sources = files(
         'cpfl_ethdev.c',
         'cpfl_rxtx.c',
         'cpfl_representor.c',
+        'cpfl_vchnl.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 09/12] net/cpfl: update vport info before creating representor
  2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
                     ` (7 preceding siblings ...)
  2023-08-16 15:05   ` [PATCH v2 08/12] net/cpfl: support vport list/info get beilei.xing
@ 2023-08-16 15:05   ` beilei.xing
  2023-09-06  2:33     ` Liu, Mingxia
  2023-08-16 15:05   ` [PATCH v2 10/12] net/cpfl: refine handle virtual channel message beilei.xing
                     ` (3 subsequent siblings)
  12 siblings, 1 reply; 89+ messages in thread
From: beilei.xing @ 2023-08-16 15:05 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Get port representor's vport list and update vport_map_hash
before creating the port representor.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c      |   2 +-
 drivers/net/cpfl/cpfl_ethdev.h      |   3 +
 drivers/net/cpfl/cpfl_representor.c | 124 ++++++++++++++++++++++++++++
 3 files changed, 128 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 08daade7ac..e552387cfe 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1633,7 +1633,7 @@ cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen)
 	}
 }
 
-static int
+int
 cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
 		       struct cpfl_vport_id *vport_identity,
 		       struct cpchnl2_vport_info *vport_info)
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 4f6944d00a..cc7f43fc3e 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -189,6 +189,9 @@ struct cpfl_adapter_ext {
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_vport_id *vport_identity,
+			   struct cpchnl2_vport_info *vport_info);
 int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_list_response *response);
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 29da56dda0..ed2d1fff17 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -368,6 +368,86 @@ match_repr_with_vport(const struct cpfl_repr_id *repr_id,
 	return false;
 }
 
+static int
+cpfl_repr_vport_list_query(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_get_vport_list_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		vi.vf_id = 0;
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_cc_vport_list_get(adapter, &vi, response);
+
+	return ret;
+}
+
+static int
+cpfl_repr_vport_info_query(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_vport_id *vport_id,
+			   struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		vi.vf_id = 0;
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_cc_vport_info_get(adapter, vport_id, &vi, response);
+
+	return ret;
+}
+
+static int
+cpfl_repr_vport_map_update(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id, uint32_t vport_id,
+			   struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	vi.vport_id = vport_id;
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_vport_info_create(adapter, &vi, &response->info);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Fail to update vport map hash for representor.");
+		return ret;
+	}
+
+	return 0;
+}
+
 int
 cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -375,8 +455,14 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte
 	uint32_t iter = 0;
 	const struct cpfl_repr_id *repr_id;
 	const struct cpfl_vport_id *vp_id;
+	struct cpchnl2_get_vport_list_response *vlist_resp;
+	struct cpchnl2_get_vport_info_response vinfo_resp;
 	int ret;
 
+	vlist_resp = rte_zmalloc(NULL, IDPF_DFLT_MBX_BUF_SIZE, 0);
+	if (vlist_resp == NULL)
+		return -ENOMEM;
+
 	rte_spinlock_lock(&adapter->repr_lock);
 
 	while (rte_hash_iterate(adapter->repr_allowlist_hash,
@@ -385,6 +471,7 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte
 		char name[RTE_ETH_NAME_MAX_LEN];
 		uint32_t iter_iter = 0;
 		bool matched;
+		int i;
 
 		/* skip representor already be created */
 		if (dev != NULL)
@@ -402,6 +489,41 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte
 				 repr_id->host_id,
 				 repr_id->pf_id);
 
+		/* get vport list for the port representor */
+		ret = cpfl_repr_vport_list_query(adapter, repr_id, vlist_resp);
+		if (ret != 0) {
+			PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d's vport list",
+				     repr_id->host_id, repr_id->pf_id, repr_id->vf_id);
+			rte_spinlock_unlock(&adapter->repr_lock);
+			rte_free(vlist_resp);
+			return ret;
+		}
+
+		/* get all vport info for the port representor */
+		for (i = 0; i < vlist_resp->nof_vports; i++) {
+			ret = cpfl_repr_vport_info_query(adapter, repr_id,
+							 &vlist_resp->vports[i], &vinfo_resp);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d vport[%d]'s info",
+					     repr_id->host_id, repr_id->pf_id, repr_id->vf_id,
+					     vlist_resp->vports[i].vport_id);
+				rte_spinlock_unlock(&adapter->repr_lock);
+				rte_free(vlist_resp);
+				return ret;
+			}
+
+			ret = cpfl_repr_vport_map_update(adapter, repr_id,
+						 vlist_resp->vports[i].vport_id, &vinfo_resp);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to update  host%d pf%d vf%d vport[%d]'s info to vport_map_hash",
+					     repr_id->host_id, repr_id->pf_id, repr_id->vf_id,
+					     vlist_resp->vports[i].vport_id);
+				rte_spinlock_unlock(&adapter->repr_lock);
+				rte_free(vlist_resp);
+				return ret;
+			}
+		}
+
 		/* find a matched vport */
 		rte_spinlock_lock(&adapter->vport_map_lock);
 
@@ -428,6 +550,7 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte
 				PMD_INIT_LOG(ERR, "Failed to create representor %s", name);
 				rte_spinlock_unlock(&adapter->vport_map_lock);
 				rte_spinlock_unlock(&adapter->repr_lock);
+				rte_free(vlist_resp);
 				return ret;
 			}
 			break;
@@ -443,6 +566,7 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte
 	}
 
 	rte_spinlock_unlock(&adapter->repr_lock);
+	rte_free(vlist_resp);
 
 	return 0;
 }
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 10/12] net/cpfl: refine handle virtual channel message
  2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
                     ` (8 preceding siblings ...)
  2023-08-16 15:05   ` [PATCH v2 09/12] net/cpfl: update vport info before creating representor beilei.xing
@ 2023-08-16 15:05   ` beilei.xing
  2023-08-16 15:05   ` [PATCH v2 11/12] net/cpfl: support link update for representor beilei.xing
                     ` (2 subsequent siblings)
  12 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-16 15:05 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Refine handle virtual channel event message.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 46 ++++++++++++++++------------------
 1 file changed, 22 insertions(+), 24 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index e552387cfe..330a865e3c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1591,40 +1591,50 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	return ret;
 }
 
-static struct idpf_vport *
+static struct cpfl_vport *
 cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
 {
-	struct idpf_vport *vport = NULL;
+	struct cpfl_vport *vport = NULL;
 	int i;
 
 	for (i = 0; i < adapter->cur_vport_nb; i++) {
-		vport = &adapter->vports[i]->base;
-		if (vport->vport_id != vport_id)
+		vport = adapter->vports[i];
+		if (vport->base.vport_id != vport_id)
 			continue;
 		else
 			return vport;
 	}
 
-	return vport;
+	return NULL;
 }
 
 static void
-cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen)
+cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen)
 {
 	struct virtchnl2_event *vc_event = (struct virtchnl2_event *)msg;
-	struct rte_eth_dev_data *data = vport->dev_data;
-	struct rte_eth_dev *dev = &rte_eth_devices[data->port_id];
+	struct cpfl_vport *vport;
+	struct rte_eth_dev_data *data;
+	struct rte_eth_dev *dev;
 
 	if (msglen < sizeof(struct virtchnl2_event)) {
 		PMD_DRV_LOG(ERR, "Error event");
 		return;
 	}
 
+	vport = cpfl_find_vport(adapter, vc_event->vport_id);
+	if (!vport) {
+		PMD_DRV_LOG(ERR, "Can't find vport.");
+		return;
+	}
+
+	data = vport->itf.data;
+	dev = &rte_eth_devices[data->port_id];
+
 	switch (vc_event->event) {
 	case VIRTCHNL2_EVENT_LINK_CHANGE:
 		PMD_DRV_LOG(DEBUG, "VIRTCHNL2_EVENT_LINK_CHANGE");
-		vport->link_up = !!(vc_event->link_status);
-		vport->link_speed = vc_event->link_speed;
+		vport->base.link_up = !!(vc_event->link_status);
+		vport->base.link_speed = vc_event->link_speed;
 		cpfl_dev_link_update(dev, 0);
 		break;
 	default:
@@ -1741,10 +1751,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 	struct idpf_adapter *base = &adapter->base;
 	struct idpf_dma_mem *dma_mem = NULL;
 	struct idpf_hw *hw = &base->hw;
-	struct virtchnl2_event *vc_event;
 	struct idpf_ctlq_msg ctlq_msg;
 	enum idpf_mbx_opc mbx_op;
-	struct idpf_vport *vport;
 	uint16_t pending = 1;
 	uint32_t vc_op;
 	int ret;
@@ -1766,18 +1774,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 		switch (mbx_op) {
 		case idpf_mbq_opc_send_msg_to_peer_pf:
 			if (vc_op == VIRTCHNL2_OP_EVENT) {
-				if (ctlq_msg.data_len < sizeof(struct virtchnl2_event)) {
-					PMD_DRV_LOG(ERR, "Error event");
-					return;
-				}
-				vc_event = (struct virtchnl2_event *)base->mbx_resp;
-				vport = cpfl_find_vport(adapter, vc_event->vport_id);
-				if (!vport) {
-					PMD_DRV_LOG(ERR, "Can't find vport.");
-					return;
-				}
-				cpfl_handle_event_msg(vport, base->mbx_resp,
-						      ctlq_msg.data_len);
+				cpfl_handle_vchnl_event_msg(adapter, adapter->base.mbx_resp,
+							    ctlq_msg.data_len);
 			} else if (vc_op == CPCHNL2_OP_EVENT) {
 				cpfl_handle_cpchnl_event_msg(adapter, adapter->base.mbx_resp,
 							     ctlq_msg.data_len);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 11/12] net/cpfl: support link update for representor
  2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
                     ` (9 preceding siblings ...)
  2023-08-16 15:05   ` [PATCH v2 10/12] net/cpfl: refine handle virtual channel message beilei.xing
@ 2023-08-16 15:05   ` beilei.xing
  2023-08-16 15:05   ` [PATCH v2 12/12] net/cpfl: support Rx/Tx queue setup " beilei.xing
  2023-09-07 15:15   ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
  12 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-08-16 15:05 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Add link update ops for representor.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h      |  1 +
 drivers/net/cpfl/cpfl_representor.c | 21 +++++++++++++++++++++
 2 files changed, 22 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index cc7f43fc3e..55bd119423 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -163,6 +163,7 @@ struct cpfl_repr {
 	struct cpfl_repr_id repr_id;
 	struct rte_ether_addr mac_addr;
 	struct cpfl_vport_info *vport_info;
+	bool func_up; /* If the represented function is up */
 };
 
 struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index ed2d1fff17..5b5c959727 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -285,12 +285,31 @@ cpfl_repr_dev_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+cpfl_repr_link_update(struct rte_eth_dev *ethdev,
+		      __rte_unused int wait_to_complete)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+	struct rte_eth_link *dev_link = &ethdev->data->dev_link;
+
+	if (!(ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
+		PMD_INIT_LOG(ERR, "This ethdev is not representor.");
+		return -EINVAL;
+	}
+	dev_link->link_status = repr->func_up ?
+			RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
 	.dev_configure		= cpfl_repr_dev_configure,
 	.dev_close		= cpfl_repr_dev_close,
 	.dev_infos_get		= cpfl_repr_dev_info_get,
+
+	.link_update		= cpfl_repr_link_update,
 };
 
 static int
@@ -305,6 +324,8 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
 	repr->itf.adapter = adapter;
 	repr->itf.data = eth_dev->data;
+	if (repr->vport_info->vport_info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
+		repr->func_up = true;
 
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 12/12] net/cpfl: support Rx/Tx queue setup for representor
  2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
                     ` (10 preceding siblings ...)
  2023-08-16 15:05   ` [PATCH v2 11/12] net/cpfl: support link update for representor beilei.xing
@ 2023-08-16 15:05   ` beilei.xing
  2023-09-06  3:02     ` Liu, Mingxia
  2023-09-07 15:15   ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
  12 siblings, 1 reply; 89+ messages in thread
From: beilei.xing @ 2023-08-16 15:05 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Add dummy Rx/Tx queue setup functions for representor.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_representor.c | 26 ++++++++++++++++++++++++++
 1 file changed, 26 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 5b5c959727..58e0d91d97 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -285,6 +285,29 @@ cpfl_repr_dev_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+idpf_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+			 __rte_unused uint16_t queue_id,
+			 __rte_unused uint16_t nb_desc,
+			 __rte_unused unsigned int socket_id,
+			 __rte_unused const struct rte_eth_rxconf *conf,
+			 __rte_unused struct rte_mempool *pool)
+{
+	/* Dummy */
+	return 0;
+}
+
+static int
+idpf_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+			 __rte_unused uint16_t queue_id,
+			 __rte_unused uint16_t nb_desc,
+			 __rte_unused unsigned int socket_id,
+			 __rte_unused const struct rte_eth_txconf *conf)
+{
+	/* Dummy */
+	return 0;
+}
+
 static int
 cpfl_repr_link_update(struct rte_eth_dev *ethdev,
 		      __rte_unused int wait_to_complete)
@@ -309,6 +332,9 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_close		= cpfl_repr_dev_close,
 	.dev_infos_get		= cpfl_repr_dev_info_get,
 
+	.rx_queue_setup		= idpf_repr_rx_queue_setup,
+	.tx_queue_setup		= idpf_repr_tx_queue_setup,
+
 	.link_update		= cpfl_repr_link_update,
 };
 
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* RE: [PATCH v2 07/12] net/cpfl: create port representor
  2023-08-16 15:05   ` [PATCH v2 07/12] net/cpfl: create port representor beilei.xing
@ 2023-09-05  7:35     ` Liu, Mingxia
  2023-09-05  8:30     ` Liu, Mingxia
  1 sibling, 0 replies; 89+ messages in thread
From: Liu, Mingxia @ 2023-09-05  7:35 UTC (permalink / raw)
  To: Xing, Beilei, Wu, Jingjing; +Cc: dev, Zhang, Qi Z



> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Wednesday, August 16, 2023 11:06 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: [PATCH v2 07/12] net/cpfl: create port representor
> 
> From: Beilei Xing <beilei.xing@intel.com>
> 
> Track representor request in a whitelist.
> Representor will only be created for active vport.
> 
> Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
>  drivers/net/cpfl/cpfl_ethdev.c      | 107 ++++---
>  drivers/net/cpfl/cpfl_ethdev.h      |  34 +++
>  drivers/net/cpfl/cpfl_representor.c | 448 ++++++++++++++++++++++++++++
> drivers/net/cpfl/cpfl_representor.h |  26 ++
>  drivers/net/cpfl/meson.build        |   1 +
>  5 files changed, 573 insertions(+), 43 deletions(-)  create mode 100644
> drivers/net/cpfl/cpfl_representor.c
>  create mode 100644 drivers/net/cpfl/cpfl_representor.h
> 
> diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index
> 9c4d8d3ea1..d4d9727a80 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.h
> +++ b/drivers/net/cpfl/cpfl_ethdev.h
> @@ -21,6 +21,7 @@
> 
>  #include "cpfl_logs.h"
>  #include "cpfl_cpchnl.h"
> +#include "cpfl_representor.h"
> 
>  /* Currently, backend supports up to 8 vports */
>  #define CPFL_MAX_VPORT_NUM	8
> @@ -60,11 +61,32 @@
>  #define IDPF_DEV_ID_CPF			0x1453
>  #define VIRTCHNL2_QUEUE_GROUP_P2P	0x100
> 
> +#define CPFL_HOST_ID_NUM	2
> +#define CPFL_PF_TYPE_NUM	2
>  #define CPFL_HOST_ID_HOST	0
>  #define CPFL_HOST_ID_ACC	1
>  #define CPFL_PF_TYPE_APF	0
>  #define CPFL_PF_TYPE_CPF	1
> 
[Liu, Mingxia] Better to use enum.

> +/* Function IDs on IMC side */
> +#define HOST0_APF	0
> +#define HOST1_APF	1
> +#define HOST2_APF	2
> +#define HOST3_APF	3
> +#define ACC_APF_ID	4
> +#define IMC_APF_ID	5
> +#define HOST0_NVME_ID	6
> +#define ACC_NVME_ID	7
> +#define HOST0_CPF_ID	8
> +#define HOST1_CPF_ID	9
> +#define HOST2_CPF_ID	10
> +#define HOST3_CPF_ID	11
> +#define ACC_CPF_ID	12
> +#define IMC_IPF_ID	13
> +#define ATE_CPF_ID	14
> +#define ACC_LCE_ID	15
[Liu, Mingxia] Better to use enum.

> +#define IMC_MBX_EFD_ID	0
> +
>  struct cpfl_vport_param {
>  	struct cpfl_adapter_ext *adapter;
>  	uint16_t devarg_id; /* arg id from user */ @@ -136,6 +158,13 @@
> struct cpfl_vport {
>  	bool p2p_manual_bind;
>  };
> 

^ permalink raw reply	[flat|nested] 89+ messages in thread

* RE: [PATCH v2 07/12] net/cpfl: create port representor
  2023-08-16 15:05   ` [PATCH v2 07/12] net/cpfl: create port representor beilei.xing
  2023-09-05  7:35     ` Liu, Mingxia
@ 2023-09-05  8:30     ` Liu, Mingxia
  1 sibling, 0 replies; 89+ messages in thread
From: Liu, Mingxia @ 2023-09-05  8:30 UTC (permalink / raw)
  To: Xing, Beilei, Wu, Jingjing; +Cc: dev, Zhang, Qi Z



> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Wednesday, August 16, 2023 11:06 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: [PATCH v2 07/12] net/cpfl: create port representor
> 
> From: Beilei Xing <beilei.xing@intel.com>
> 
> Track representor request in a whitelist.
> Representor will only be created for active vport.
> 
> Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
>  drivers/net/cpfl/cpfl_ethdev.c      | 107 ++++---
>  drivers/net/cpfl/cpfl_ethdev.h      |  34 +++
>  drivers/net/cpfl/cpfl_representor.c | 448 ++++++++++++++++++++++++++++
> drivers/net/cpfl/cpfl_representor.h |  26 ++
>  drivers/net/cpfl/meson.build        |   1 +
>  5 files changed, 573 insertions(+), 43 deletions(-)  create mode 100644
> drivers/net/cpfl/cpfl_representor.c
>  create mode 100644 drivers/net/cpfl/cpfl_representor.h
> 
> +static int
> +cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param) {
> +	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
> +	struct cpfl_repr_param *param = init_param;
> +	struct cpfl_adapter_ext *adapter = param->adapter;
> +
> +	repr->repr_id = param->repr_id;
> +	repr->vport_info = param->vport_info;
> +	repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
> +	repr->itf.adapter = adapter;
> +	repr->itf.data = eth_dev->data;
> +
> +	eth_dev->dev_ops = &cpfl_repr_dev_ops;
> +
> +	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
> +	/* bit[15:14] type
> +	 * bit[13] xeon/acc
> +	 * bit[12] apf/cpf
> +	 * bit[11:0] vf
> +	 */
> +	eth_dev->data->representor_id =
> +		(uint16_t)(repr->repr_id.type << 14 |
> +			   repr->repr_id.host_id << 13 |
> +			   repr->repr_id.pf_id << 12 |
> +			   repr->repr_id.vf_id);
> +
[Liu, Mingxia]  how about use the macro variable ?
#define CPFL_REPRESENTOR_ID(type, host_id, pf_id, vf_id)\
  ((((type) & 0x3) << 14) + (((host_id) & 0x1) << 13) + (((pf_id) & 0x1) << 12) + ((vf_id) & 0xfff))

> +
> +static bool
> +match_repr_with_vport(const struct cpfl_repr_id *repr_id,
> +		      struct cpchnl2_vport_info *info) {
> +	int func_id;
> +
> +	if (repr_id->type == RTE_ETH_REPRESENTOR_PF &&
> +	    info->func_type == 0) {
> +		func_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
> +		if (func_id < 0)
> +			return false;
> +		else
> +			return true;
> +	} else if (repr_id->type == RTE_ETH_REPRESENTOR_VF &&
> +		   info->func_type == 1) {
[Liu, Mingxia] For good readability, func_type value 0 and 1 are better to be replaced by macro variables.



^ permalink raw reply	[flat|nested] 89+ messages in thread

* RE: [PATCH v2 09/12] net/cpfl: update vport info before creating representor
  2023-08-16 15:05   ` [PATCH v2 09/12] net/cpfl: update vport info before creating representor beilei.xing
@ 2023-09-06  2:33     ` Liu, Mingxia
  0 siblings, 0 replies; 89+ messages in thread
From: Liu, Mingxia @ 2023-09-06  2:33 UTC (permalink / raw)
  To: Xing, Beilei, Wu, Jingjing; +Cc: dev



> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Wednesday, August 16, 2023 11:06 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH v2 09/12] net/cpfl: update vport info before creating representor
> 
> From: Beilei Xing <beilei.xing@intel.com>
> 
> Get port representor's vport list and update vport_map_hash before creating the
> port representor.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
>  drivers/net/cpfl/cpfl_ethdev.c      |   2 +-
>  drivers/net/cpfl/cpfl_ethdev.h      |   3 +
>  drivers/net/cpfl/cpfl_representor.c | 124 ++++++++++++++++++++++++++++
>  3 files changed, 128 insertions(+), 1 deletion(-)

>  int
>  cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext
> *adapter)  { @@ -375,8 +455,14 @@ cpfl_repr_create(struct rte_pci_device
> *pci_dev, struct cpfl_adapter_ext *adapte
>  	uint32_t iter = 0;
>  	const struct cpfl_repr_id *repr_id;
>  	const struct cpfl_vport_id *vp_id;
> +	struct cpchnl2_get_vport_list_response *vlist_resp;
> +	struct cpchnl2_get_vport_info_response vinfo_resp;
>  	int ret;
> 
> +	vlist_resp = rte_zmalloc(NULL, IDPF_DFLT_MBX_BUF_SIZE, 0);
> +	if (vlist_resp == NULL)
> +		return -ENOMEM;
> +
>  	rte_spinlock_lock(&adapter->repr_lock);
> 
>  	while (rte_hash_iterate(adapter->repr_allowlist_hash,
> @@ -385,6 +471,7 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct
> cpfl_adapter_ext *adapte
>  		char name[RTE_ETH_NAME_MAX_LEN];
>  		uint32_t iter_iter = 0;
>  		bool matched;
> +		int i;
> 
>  		/* skip representor already be created */
>  		if (dev != NULL)
> @@ -402,6 +489,41 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct
> cpfl_adapter_ext *adapte
>  				 repr_id->host_id,
>  				 repr_id->pf_id);
> 
> +		/* get vport list for the port representor */
> +		ret = cpfl_repr_vport_list_query(adapter, repr_id, vlist_resp);
> +		if (ret != 0) {
> +			PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d's
> vport list",
> +				     repr_id->host_id, repr_id->pf_id, repr_id-
> >vf_id);
> +			rte_spinlock_unlock(&adapter->repr_lock);
> +			rte_free(vlist_resp);
> +			return ret;
> +		}
> +
> +		/* get all vport info for the port representor */
> +		for (i = 0; i < vlist_resp->nof_vports; i++) {
> +			ret = cpfl_repr_vport_info_query(adapter, repr_id,
> +							 &vlist_resp->vports[i],
> &vinfo_resp);
> +			if (ret != 0) {
> +				PMD_INIT_LOG(ERR, "Failed to get host%d
> pf%d vf%d vport[%d]'s info",
> +					     repr_id->host_id, repr_id->pf_id,
> repr_id->vf_id,
> +					     vlist_resp->vports[i].vport_id);
> +				rte_spinlock_unlock(&adapter->repr_lock);
> +				rte_free(vlist_resp);
> +				return ret;
> +			}
> +
> +			ret = cpfl_repr_vport_map_update(adapter, repr_id,
> +						 vlist_resp->vports[i].vport_id,
> &vinfo_resp);
> +			if (ret != 0) {
> +				PMD_INIT_LOG(ERR, "Failed to update  host%d
> pf%d vf%d vport[%d]'s info to vport_map_hash",
> +					     repr_id->host_id, repr_id->pf_id,
> repr_id->vf_id,
> +					     vlist_resp->vports[i].vport_id);
> +				rte_spinlock_unlock(&adapter->repr_lock);
> +				rte_free(vlist_resp);
> +				return ret;
> +			}
> +		}
> +
>  		/* find a matched vport */
>  		rte_spinlock_lock(&adapter->vport_map_lock);
> 
> @@ -428,6 +550,7 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct
> cpfl_adapter_ext *adapte
>  				PMD_INIT_LOG(ERR, "Failed to create
> representor %s", name);
>  				rte_spinlock_unlock(&adapter-
> >vport_map_lock);
>  				rte_spinlock_unlock(&adapter->repr_lock);
> +				rte_free(vlist_resp);
>  				return ret;
>  			}
>  			break;
> @@ -443,6 +566,7 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct
> cpfl_adapter_ext *adapte
>  	}
> 
>  	rte_spinlock_unlock(&adapter->repr_lock);
> +	rte_free(vlist_resp);
> 
[Liu, Mingxia] There are several exit point that do the common clean work
			rte_spinlock_unlock(&adapter->repr_lock);
			rte_free(vlist_resp);
			return ret;
how about using goto ?

>  	return 0;
>  }
> --
> 2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* RE: [PATCH v2 12/12] net/cpfl: support Rx/Tx queue setup for representor
  2023-08-16 15:05   ` [PATCH v2 12/12] net/cpfl: support Rx/Tx queue setup " beilei.xing
@ 2023-09-06  3:02     ` Liu, Mingxia
  0 siblings, 0 replies; 89+ messages in thread
From: Liu, Mingxia @ 2023-09-06  3:02 UTC (permalink / raw)
  To: Xing, Beilei, Wu, Jingjing; +Cc: dev



> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Wednesday, August 16, 2023 11:06 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH v2 12/12] net/cpfl: support Rx/Tx queue setup for representor
> 
> From: Beilei Xing <beilei.xing@intel.com>
> 
> Add dummy Rx/Tx queue setup functions for representor.
> 
> Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
>  drivers/net/cpfl/cpfl_representor.c | 26 ++++++++++++++++++++++++++
>  1 file changed, 26 insertions(+)
> 
> diff --git a/drivers/net/cpfl/cpfl_representor.c
> b/drivers/net/cpfl/cpfl_representor.c
> index 5b5c959727..58e0d91d97 100644
> --- a/drivers/net/cpfl/cpfl_representor.c
> +++ b/drivers/net/cpfl/cpfl_representor.c
> @@ -285,6 +285,29 @@ cpfl_repr_dev_stop(struct rte_eth_dev *dev)
>  	return 0;
>  }
> 
> +static int
> +idpf_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
> +			 __rte_unused uint16_t queue_id,
> +			 __rte_unused uint16_t nb_desc,
> +			 __rte_unused unsigned int socket_id,
> +			 __rte_unused const struct rte_eth_rxconf *conf,
> +			 __rte_unused struct rte_mempool *pool) {
> +	/* Dummy */
> +	return 0;
> +}
> +
> +static int
> +idpf_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
> +			 __rte_unused uint16_t queue_id,
> +			 __rte_unused uint16_t nb_desc,
> +			 __rte_unused unsigned int socket_id,
> +			 __rte_unused const struct rte_eth_txconf *conf) {
> +	/* Dummy */
> +	return 0;
> +}
> +
>  static int
>  cpfl_repr_link_update(struct rte_eth_dev *ethdev,
>  		      __rte_unused int wait_to_complete) @@ -309,6 +332,9 @@
> static const struct eth_dev_ops cpfl_repr_dev_ops = {
>  	.dev_close		= cpfl_repr_dev_close,
>  	.dev_infos_get		= cpfl_repr_dev_info_get,
> 
> +	.rx_queue_setup		= idpf_repr_rx_queue_setup,
> +	.tx_queue_setup		= idpf_repr_tx_queue_setup,
> +
[Liu, Mingxia] How about using function name cpfl_repr_xxx() ?

>  	.link_update		= cpfl_repr_link_update,
>  };
> 
> --
> 2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 00/11] net/cpfl: support port representor
  2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
                     ` (11 preceding siblings ...)
  2023-08-16 15:05   ` [PATCH v2 12/12] net/cpfl: support Rx/Tx queue setup " beilei.xing
@ 2023-09-07 15:15   ` beilei.xing
  2023-09-07 15:15     ` [PATCH v3 01/11] net/cpfl: refine devargs parse and process beilei.xing
                       ` (11 more replies)
  12 siblings, 12 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-07 15:15 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

1. code refine for representor support
2. support port representor

v3 changes:
 - Refine commit log.
 - Add macro and enum.
 - Refine doc.
 - Refine error handling.
v2 changes:
 - Remove representor data path.
 - Fix coding style.

Beilei Xing (11):
  net/cpfl: refine devargs parse and process
  net/cpfl: introduce interface structure
  net/cpfl: refine handle virtual channel message
  net/cpfl: introduce CP channel API
  net/cpfl: enable vport mapping
  net/cpfl: parse representor devargs
  net/cpfl: support probe again
  net/cpfl: create port representor
  net/cpfl: support vport list/info get
  net/cpfl: update vport info before creating representor
  net/cpfl: support link update for representor

 doc/guides/nics/cpfl.rst               |  36 ++
 doc/guides/rel_notes/release_23_11.rst |   3 +
 drivers/net/cpfl/cpfl_cpchnl.h         | 340 ++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.c         | 619 +++++++++++++++++++++----
 drivers/net/cpfl/cpfl_ethdev.h         | 101 +++-
 drivers/net/cpfl/cpfl_representor.c    | 607 ++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.h    |  26 ++
 drivers/net/cpfl/cpfl_vchnl.c          |  72 +++
 drivers/net/cpfl/meson.build           |   4 +-
 9 files changed, 1702 insertions(+), 106 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 01/11] net/cpfl: refine devargs parse and process
  2023-09-07 15:15   ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
@ 2023-09-07 15:15     ` beilei.xing
  2023-09-07 15:15     ` [PATCH v3 02/11] net/cpfl: introduce interface structure beilei.xing
                       ` (10 subsequent siblings)
  11 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-07 15:15 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

1. Keep devargs in adapter.
2. Refine handling the case with no vport be specified in devargs.
3. Separate devargs parse and devargs process

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 154 ++++++++++++++++++---------------
 drivers/net/cpfl/cpfl_ethdev.h |   1 +
 2 files changed, 84 insertions(+), 71 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c4ca9343c3..46b3a52e49 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1407,12 +1407,12 @@ parse_bool(const char *key, const char *value, void *args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter,
-		   struct cpfl_devargs *cpfl_args)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
 	struct rte_devargs *devargs = pci_dev->device.devargs;
+	struct cpfl_devargs *cpfl_args = &adapter->devargs;
 	struct rte_kvargs *kvlist;
-	int i, ret;
+	int ret;
 
 	cpfl_args->req_vport_nb = 0;
 
@@ -1445,31 +1445,6 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (ret != 0)
 		goto fail;
 
-	/* check parsed devargs */
-	if (adapter->cur_vport_nb + cpfl_args->req_vport_nb >
-	    adapter->max_vport_nb) {
-		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
-			     adapter->max_vport_nb);
-		ret = -EINVAL;
-		goto fail;
-	}
-
-	for (i = 0; i < cpfl_args->req_vport_nb; i++) {
-		if (cpfl_args->req_vports[i] > adapter->max_vport_nb - 1) {
-			PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d",
-				     cpfl_args->req_vports[i], adapter->max_vport_nb - 1);
-			ret = -EINVAL;
-			goto fail;
-		}
-
-		if (adapter->cur_vports & RTE_BIT32(cpfl_args->req_vports[i])) {
-			PMD_INIT_LOG(ERR, "Vport %d has been requested",
-				     cpfl_args->req_vports[i]);
-			ret = -EINVAL;
-			goto fail;
-		}
-	}
-
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
@@ -1915,15 +1890,79 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 	adapter->vports = NULL;
 }
 
+static int
+cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int i;
+
+	/* refine vport number, at least 1 vport */
+	if (devargs->req_vport_nb == 0) {
+		devargs->req_vport_nb = 1;
+		devargs->req_vports[0] = 0;
+	}
+
+	/* check parsed devargs */
+	if (adapter->cur_vport_nb + devargs->req_vport_nb >
+	    adapter->max_vport_nb) {
+		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
+			     adapter->max_vport_nb);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < devargs->req_vport_nb; i++) {
+		if (devargs->req_vports[i] > adapter->max_vport_nb - 1) {
+			PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d",
+				     devargs->req_vports[i], adapter->max_vport_nb - 1);
+			return -EINVAL;
+		}
+
+		if (adapter->cur_vports & RTE_BIT32(devargs->req_vports[i])) {
+			PMD_INIT_LOG(ERR, "Vport %d has been requested",
+				     devargs->req_vports[i]);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport_param vport_param;
+	char name[RTE_ETH_NAME_MAX_LEN];
+	int ret, i;
+
+	for (i = 0; i < adapter->devargs.req_vport_nb; i++) {
+		vport_param.adapter = adapter;
+		vport_param.devarg_id = adapter->devargs.req_vports[i];
+		vport_param.idx = cpfl_vport_idx_alloc(adapter);
+		if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
+			PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
+			break;
+		}
+		snprintf(name, sizeof(name), "net_%s_vport_%d",
+			 pci_dev->device.name,
+			 adapter->devargs.req_vports[i]);
+		ret = rte_eth_dev_create(&pci_dev->device, name,
+					    sizeof(struct cpfl_vport),
+					    NULL, NULL, cpfl_dev_vport_init,
+					    &vport_param);
+		if (ret != 0)
+			PMD_DRV_LOG(ERR, "Failed to create vport %d",
+				    vport_param.devarg_id);
+	}
+
+	return 0;
+}
+
 static int
 cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	       struct rte_pci_device *pci_dev)
 {
-	struct cpfl_vport_param vport_param;
 	struct cpfl_adapter_ext *adapter;
-	struct cpfl_devargs devargs;
-	char name[RTE_ETH_NAME_MAX_LEN];
-	int i, retval;
+	int retval;
 
 	if (!cpfl_adapter_list_init) {
 		rte_spinlock_init(&cpfl_adapter_lock);
@@ -1938,6 +1977,12 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		return -ENOMEM;
 	}
 
+	retval = cpfl_parse_devargs(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		return retval;
+	}
+
 	retval = cpfl_adapter_ext_init(pci_dev, adapter);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to init adapter.");
@@ -1948,49 +1993,16 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	TAILQ_INSERT_TAIL(&cpfl_adapter_list, adapter, next);
 	rte_spinlock_unlock(&cpfl_adapter_lock);
 
-	retval = cpfl_parse_devargs(pci_dev, adapter, &devargs);
+	retval = cpfl_vport_devargs_process(adapter);
 	if (retval != 0) {
-		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		PMD_INIT_LOG(ERR, "Failed to process vport devargs");
 		goto err;
 	}
 
-	if (devargs.req_vport_nb == 0) {
-		/* If no vport devarg, create vport 0 by default. */
-		vport_param.adapter = adapter;
-		vport_param.devarg_id = 0;
-		vport_param.idx = cpfl_vport_idx_alloc(adapter);
-		if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
-			PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
-			return 0;
-		}
-		snprintf(name, sizeof(name), "cpfl_%s_vport_0",
-			 pci_dev->device.name);
-		retval = rte_eth_dev_create(&pci_dev->device, name,
-					    sizeof(struct cpfl_vport),
-					    NULL, NULL, cpfl_dev_vport_init,
-					    &vport_param);
-		if (retval != 0)
-			PMD_DRV_LOG(ERR, "Failed to create default vport 0");
-	} else {
-		for (i = 0; i < devargs.req_vport_nb; i++) {
-			vport_param.adapter = adapter;
-			vport_param.devarg_id = devargs.req_vports[i];
-			vport_param.idx = cpfl_vport_idx_alloc(adapter);
-			if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
-				PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
-				break;
-			}
-			snprintf(name, sizeof(name), "cpfl_%s_vport_%d",
-				 pci_dev->device.name,
-				 devargs.req_vports[i]);
-			retval = rte_eth_dev_create(&pci_dev->device, name,
-						    sizeof(struct cpfl_vport),
-						    NULL, NULL, cpfl_dev_vport_init,
-						    &vport_param);
-			if (retval != 0)
-				PMD_DRV_LOG(ERR, "Failed to create vport %d",
-					    vport_param.devarg_id);
-		}
+	retval = cpfl_vport_create(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create vports.");
+		goto err;
 	}
 
 	return 0;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 2e42354f70..b637bf2e45 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -115,6 +115,7 @@ struct cpfl_adapter_ext {
 	uint16_t cur_vport_nb;
 
 	uint16_t used_vecs_num;
+	struct cpfl_devargs devargs;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 02/11] net/cpfl: introduce interface structure
  2023-09-07 15:15   ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
  2023-09-07 15:15     ` [PATCH v3 01/11] net/cpfl: refine devargs parse and process beilei.xing
@ 2023-09-07 15:15     ` beilei.xing
  2023-09-07 15:15     ` [PATCH v3 03/11] net/cpfl: refine handle virtual channel message beilei.xing
                       ` (9 subsequent siblings)
  11 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-07 15:15 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Introduce cplf interface structure to distinguish vport and port
representor.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  3 +++
 drivers/net/cpfl/cpfl_ethdev.h | 16 ++++++++++++++++
 2 files changed, 19 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 46b3a52e49..92fe92c00f 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1803,6 +1803,9 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 		goto err;
 	}
 
+	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
+	cpfl_vport->itf.adapter = adapter;
+	cpfl_vport->itf.data = dev->data;
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index b637bf2e45..53e45035e8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -86,7 +86,19 @@ struct p2p_queue_chunks_info {
 	uint32_t rx_buf_qtail_spacing;
 };
 
+enum cpfl_itf_type {
+	CPFL_ITF_TYPE_VPORT,
+	CPFL_ITF_TYPE_REPRESENTOR
+};
+
+struct cpfl_itf {
+	enum cpfl_itf_type type;
+	struct cpfl_adapter_ext *adapter;
+	void *data;
+};
+
 struct cpfl_vport {
+	struct cpfl_itf itf;
 	struct idpf_vport base;
 	struct p2p_queue_chunks_info *p2p_q_chunks_info;
 
@@ -124,5 +136,9 @@ TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 	RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p)					\
 	container_of((p), struct cpfl_adapter_ext, base)
+#define CPFL_DEV_TO_VPORT(dev)					\
+	((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_ITF(dev)				\
+	((struct cpfl_itf *)((dev)->data->dev_private))
 
 #endif /* _CPFL_ETHDEV_H_ */
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 03/11] net/cpfl: refine handle virtual channel message
  2023-09-07 15:15   ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
  2023-09-07 15:15     ` [PATCH v3 01/11] net/cpfl: refine devargs parse and process beilei.xing
  2023-09-07 15:15     ` [PATCH v3 02/11] net/cpfl: introduce interface structure beilei.xing
@ 2023-09-07 15:15     ` beilei.xing
  2023-09-07 15:15     ` [PATCH v3 04/11] net/cpfl: introduce CP channel API beilei.xing
                       ` (8 subsequent siblings)
  11 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-07 15:15 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Refine handle virtual channel event message.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 46 ++++++++++++++++------------------
 1 file changed, 22 insertions(+), 24 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 92fe92c00f..6b6e9b37b1 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1450,40 +1450,50 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	return ret;
 }
 
-static struct idpf_vport *
+static struct cpfl_vport *
 cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
 {
-	struct idpf_vport *vport = NULL;
+	struct cpfl_vport *vport = NULL;
 	int i;
 
 	for (i = 0; i < adapter->cur_vport_nb; i++) {
-		vport = &adapter->vports[i]->base;
-		if (vport->vport_id != vport_id)
+		vport = adapter->vports[i];
+		if (vport->base.vport_id != vport_id)
 			continue;
 		else
 			return vport;
 	}
 
-	return vport;
+	return NULL;
 }
 
 static void
-cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen)
+cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen)
 {
 	struct virtchnl2_event *vc_event = (struct virtchnl2_event *)msg;
-	struct rte_eth_dev_data *data = vport->dev_data;
-	struct rte_eth_dev *dev = &rte_eth_devices[data->port_id];
+	struct cpfl_vport *vport;
+	struct rte_eth_dev_data *data;
+	struct rte_eth_dev *dev;
 
 	if (msglen < sizeof(struct virtchnl2_event)) {
 		PMD_DRV_LOG(ERR, "Error event");
 		return;
 	}
 
+	vport = cpfl_find_vport(adapter, vc_event->vport_id);
+	if (!vport) {
+		PMD_DRV_LOG(ERR, "Can't find vport.");
+		return;
+	}
+
+	data = vport->itf.data;
+	dev = &rte_eth_devices[data->port_id];
+
 	switch (vc_event->event) {
 	case VIRTCHNL2_EVENT_LINK_CHANGE:
 		PMD_DRV_LOG(DEBUG, "VIRTCHNL2_EVENT_LINK_CHANGE");
-		vport->link_up = !!(vc_event->link_status);
-		vport->link_speed = vc_event->link_speed;
+		vport->base.link_up = !!(vc_event->link_status);
+		vport->base.link_speed = vc_event->link_speed;
 		cpfl_dev_link_update(dev, 0);
 		break;
 	default:
@@ -1498,10 +1508,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 	struct idpf_adapter *base = &adapter->base;
 	struct idpf_dma_mem *dma_mem = NULL;
 	struct idpf_hw *hw = &base->hw;
-	struct virtchnl2_event *vc_event;
 	struct idpf_ctlq_msg ctlq_msg;
 	enum idpf_mbx_opc mbx_op;
-	struct idpf_vport *vport;
 	uint16_t pending = 1;
 	uint32_t vc_op;
 	int ret;
@@ -1523,18 +1531,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 		switch (mbx_op) {
 		case idpf_mbq_opc_send_msg_to_peer_pf:
 			if (vc_op == VIRTCHNL2_OP_EVENT) {
-				if (ctlq_msg.data_len < sizeof(struct virtchnl2_event)) {
-					PMD_DRV_LOG(ERR, "Error event");
-					return;
-				}
-				vc_event = (struct virtchnl2_event *)base->mbx_resp;
-				vport = cpfl_find_vport(adapter, vc_event->vport_id);
-				if (!vport) {
-					PMD_DRV_LOG(ERR, "Can't find vport.");
-					return;
-				}
-				cpfl_handle_event_msg(vport, base->mbx_resp,
-						      ctlq_msg.data_len);
+				cpfl_handle_vchnl_event_msg(adapter, adapter->base.mbx_resp,
+							    ctlq_msg.data_len);
 			} else {
 				if (vc_op == base->pend_cmd)
 					notify_cmd(base, base->cmd_retval);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 04/11] net/cpfl: introduce CP channel API
  2023-09-07 15:15   ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
                       ` (2 preceding siblings ...)
  2023-09-07 15:15     ` [PATCH v3 03/11] net/cpfl: refine handle virtual channel message beilei.xing
@ 2023-09-07 15:15     ` beilei.xing
  2023-09-07 15:16     ` [PATCH v3 05/11] net/cpfl: enable vport mapping beilei.xing
                       ` (7 subsequent siblings)
  11 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-07 15:15 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

The CPCHNL2 defines the API (v2) used for communication between the
CPF driver and its on-chip management software. The CPFL PMD is a
specific CPF driver to utilize CPCHNL2 for device configuration and
event probing.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_cpchnl.h | 340 +++++++++++++++++++++++++++++++++
 1 file changed, 340 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h

diff --git a/drivers/net/cpfl/cpfl_cpchnl.h b/drivers/net/cpfl/cpfl_cpchnl.h
new file mode 100644
index 0000000000..2eefcbcc10
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_cpchnl.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CPCHNL_H_
+#define _CPFL_CPCHNL_H_
+
+/** @brief      Command Opcodes
+ *              Values are to be different from virtchnl.h opcodes
+ */
+enum cpchnl2_ops {
+	/* vport info */
+	CPCHNL2_OP_GET_VPORT_LIST		= 0x8025,
+	CPCHNL2_OP_GET_VPORT_INFO		= 0x8026,
+
+	/* DPHMA Event notifications */
+	CPCHNL2_OP_EVENT			= 0x8050,
+};
+
+/* Note! This affects the size of structs below */
+#define CPCHNL2_MAX_TC_AMOUNT		8
+
+#define CPCHNL2_ETH_LENGTH_OF_ADDRESS	6
+
+#define CPCHNL2_FUNC_TYPE_PF		0
+#define CPCHNL2_FUNC_TYPE_SRIOV		1
+
+/* vport statuses - must match the DB ones - see enum cp_vport_status*/
+#define CPCHNL2_VPORT_STATUS_CREATED	0
+#define CPCHNL2_VPORT_STATUS_ENABLED	1
+#define CPCHNL2_VPORT_STATUS_DISABLED	2
+#define CPCHNL2_VPORT_STATUS_DESTROYED	3
+
+/* Queue Groups Extension */
+/**************************************************/
+
+#define MAX_Q_REGIONS 16
+/* TBD - with current structure sizes, in order not to exceed 4KB ICQH buffer
+ * no more than 11 queue groups are allowed per a single vport..
+ * More will be possible only with future msg fragmentation.
+ */
+#define MAX_Q_VPORT_GROUPS 11
+
+#define CPCHNL2_CHECK_STRUCT_LEN(n, X) enum static_assert_enum_##X	\
+	{ static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) }
+
+struct cpchnl2_queue_chunk {
+	u32 type;	       /* 0:QUEUE_TYPE_TX, 1:QUEUE_TYPE_RX */ /* enum nsl_lan_queue_type */
+	u32 start_queue_id;
+	u32 num_queues;
+	u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_queue_chunk);
+
+/* structure to specify several chunks of contiguous queues */
+struct cpchnl2_queue_grp_chunks {
+	u16 num_chunks;
+	u8 reserved[6];
+	struct cpchnl2_queue_chunk chunks[MAX_Q_REGIONS];
+};
+CPCHNL2_CHECK_STRUCT_LEN(264, cpchnl2_queue_grp_chunks);
+
+struct cpchnl2_rx_queue_group_info {
+	/* User can ask to update rss_lut size originally allocated
+	 * by CreateVport command. New size will be returned if allocation succeeded,
+	 * otherwise original rss_size from CreateVport will be returned.
+	 */
+	u16 rss_lut_size;
+	u8 pad[6]; /*Future extension purpose*/
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_rx_queue_group_info);
+
+struct cpchnl2_tx_queue_group_info {
+	u8 tx_tc; /*TX TC queue group will be connected to*/
+	/* Each group can have its own priority, value 0-7, while each group with unique
+	 * priority is strict priority. It can be single set of queue groups which configured with
+	 * same priority, then they are assumed part of WFQ arbitration group and are expected to be
+	 * assigned with weight.
+	 */
+	u8 priority;
+	/* Determines if queue group is expected to be Strict Priority according to its priority */
+	u8 is_sp;
+	u8 pad;
+	/* Peak Info Rate Weight in case Queue Group is part of WFQ arbitration set.
+	 * The weights of the groups are independent of each other. Possible values: 1-200.
+	 */
+	u16 pir_weight;
+	/* Future extension purpose for CIR only */
+	u8 cir_pad[2];
+	u8 pad2[8]; /* Future extension purpose*/
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_tx_queue_group_info);
+
+struct cpchnl2_queue_group_id {
+	/* Queue group ID - depended on it's type:
+	 * Data & p2p - is an index which is relative to Vport.
+	 * Config & Mailbox - is an ID which is relative to func.
+	 * This ID is used in future calls, i.e. delete.
+	 * Requested by host and assigned by Control plane.
+	 */
+	u16 queue_group_id;
+	/* Functional type: see CPCHNL2_QUEUE_GROUP_TYPE definitions */
+	u16 queue_group_type;
+	u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_queue_group_id);
+
+struct cpchnl2_queue_group_info {
+	/* IN */
+	struct cpchnl2_queue_group_id qg_id;
+
+	/* IN, Number of queues of different types in the group. */
+	u16 num_tx_q;
+	u16 num_tx_complq;
+	u16 num_rx_q;
+	u16 num_rx_bufq;
+
+	struct cpchnl2_tx_queue_group_info tx_q_grp_info;
+	struct cpchnl2_rx_queue_group_info rx_q_grp_info;
+
+	u8 egress_port;
+	u8 pad[39]; /*Future extension purpose*/
+	struct cpchnl2_queue_grp_chunks chunks;
+};
+CPCHNL2_CHECK_STRUCT_LEN(344, cpchnl2_queue_group_info);
+
+struct cpchnl2_queue_groups {
+	u16 num_queue_groups; /* Number of queue groups in struct below */
+	u8 pad[6];
+	/* group information , number is determined by param above */
+	struct cpchnl2_queue_group_info groups[MAX_Q_VPORT_GROUPS];
+};
+CPCHNL2_CHECK_STRUCT_LEN(3792, cpchnl2_queue_groups);
+
+/**
+ * @brief function types
+ */
+enum cpchnl2_func_type {
+	CPCHNL2_FTYPE_LAN_PF = 0,
+	CPCHNL2_FTYPE_LAN_VF = 1,
+	CPCHNL2_FTYPE_LAN_MAX
+};
+
+/**
+ * @brief containing vport id & type
+ */
+struct cpchnl2_vport_id {
+	u32 vport_id;
+	u16 vport_type;
+	u8 pad[2];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_vport_id);
+
+struct cpchnl2_func_id {
+	/* Function type: 0 - LAN PF, 1 -  LAN VF, Rest - "reserved" */
+	u8 func_type;
+	/* Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs
+	 * and 8-12 CPFs are valid
+	 */
+	u8 pf_id;
+	/* Valid only if "type" above is VF, indexing is relative to PF specified above. */
+	u16 vf_id;
+	u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_func_id);
+
+/* Note! Do not change the fields and especially their order as should eventually
+ * be aligned to 32bit. Must match the virtchnl structure definition.
+ * If should change, change also the relevant FAS and virtchnl code, under permission.
+ */
+struct cpchnl2_vport_info {
+	u16 vport_index;
+	/* VSI index, global indexing aligned to HW.
+	 * Index of HW VSI is allocated by HMA during "CreateVport" virtChnl command.
+	 * Relevant for VSI backed Vports only, not relevant for vport_type = "Qdev".
+	 */
+	u16 vsi_id;
+	u8 vport_status;	/* enum cpchnl2_vport_status */
+	/* 0 - LAN PF, 1 - LAN VF. Rest - reserved. Can be later expanded to other PEs */
+	u8 func_type;
+	/* Valid only if "type" above is VF, indexing is relative to PF specified above. */
+	u16 vf_id;
+	/* Always relevant, indexing is according to LAN PE 0-15,
+	 * while only 0-4 APFs and 8-12 CPFs are valid.
+	 */
+	u8 pf_id;
+	u8 rss_enabled; /* if RSS is enabled for Vport. Driven by Node Policy. Currently '0' */
+	/* MAC Address assigned for this vport, all 0s for "Qdev" Vport type */
+	u8 mac_addr[CPCHNL2_ETH_LENGTH_OF_ADDRESS];
+	u16 vmrl_id;
+	/* Indicates if IMC created SEM MAC rule for this Vport.
+	 * Currently this is done by IMC for all Vport of type "Default" only,
+	 * but can be different in the future.
+	 */
+	u8 sem_mac_rule_exist;
+	/* Bitmask to inform which TC is valid.
+	 * 0x1 << TCnum. 1b: valid else 0.
+	 * Driven by Node Policy on system level, then Sysetm level TCs are
+	 * reported to IDPF and it can enable Vport level TCs on TX according
+	 * to Syetm enabled ones.
+	 * If TC aware mode - bit set for valid TC.
+	 * otherwise =1 (only bit 0 is set. represents the VSI
+	 */
+	u8 tx_tc_bitmask;
+	/* For each valid TC, TEID of VPORT node over TC in TX LAN WS.
+	 * If TC aware mode - up to 8 TC TEIDs. Otherwise vport_tc_teid[0] shall hold VSI TEID
+	 */
+	u32 vport_tc_teid[CPCHNL2_MAX_TC_AMOUNT];
+	/* For each valid TC, bandwidth in mbps.
+	 * Default BW per Vport is from Node policy
+	 * If TC aware mode -per TC. Otherwise, bandwidth[0] holds VSI bandwidth
+	 */
+	u32 bandwidth[CPCHNL2_MAX_TC_AMOUNT];
+	/* From Node Policy. */
+	u16 max_mtu;
+	u16 default_rx_qid;	/* Default LAN RX Queue ID */
+	u16 vport_flags; /* see: VPORT_FLAGS */
+	u8 egress_port;
+	u8 pad_reserved[5];
+};
+CPCHNL2_CHECK_STRUCT_LEN(96, cpchnl2_vport_info);
+
+/*
+ * CPCHNL2_OP_GET_VPORT_LIST
+ */
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode request
+ * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved (see enum cpchnl2_func_type)
+ * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12
+ *        CPFs are valid
+ * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above
+ */
+struct cpchnl2_get_vport_list_request {
+	u8 func_type;
+	u8 pf_id;
+	u16 vf_id;
+	u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_get_vport_list_request);
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode response
+ * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved. Can be later extended to
+ *        other PE types
+ * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12
+ *        CPFs are valid
+ * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above
+ * @param nof_vports Number of vports created on the function
+ * @param vports array of the IDs and types. vport ID is elative to its func (PF/VF). same as in
+ *        Create Vport
+ * vport_type: Aligned to VirtChnl types: Default, SIOV, etc.
+ */
+struct cpchnl2_get_vport_list_response {
+	u8 func_type;
+	u8 pf_id;
+	u16 vf_id;
+	u16 nof_vports;
+	u8 pad[2];
+	struct cpchnl2_vport_id vports[];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_get_vport_list_response);
+
+/*
+ * CPCHNL2_OP_GET_VPORT_INFO
+ */
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_INFO opcode request
+ * @param vport a structure containing vport_id (relative to function) and type
+ * @param func a structure containing function type, pf_id, vf_id
+ */
+struct cpchnl2_get_vport_info_request {
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_func_id func;
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_get_vport_info_request);
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_INFO opcode response
+ * @param vport a structure containing vport_id (relative to function) and type to get info for
+ * @param info a structure all the information for a given vport
+ * @param queue_groups a structure containing all the queue groups of the given vport
+ */
+struct cpchnl2_get_vport_info_response {
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_vport_info info;
+	struct cpchnl2_queue_groups queue_groups;
+};
+CPCHNL2_CHECK_STRUCT_LEN(3896, cpchnl2_get_vport_info_response);
+
+ /* Cpchnl events
+  * Sends event message to inform the peer of notification that may affect it.
+  * No direct response is expected from the peer, though it may generate other
+  * messages in response to this one.
+  */
+enum cpchnl2_event {
+	CPCHNL2_EVENT_UNKNOWN = 0,
+	CPCHNL2_EVENT_VPORT_CREATED,
+	CPCHNL2_EVENT_VPORT_DESTROYED,
+	CPCHNL2_EVENT_VPORT_ENABLED,
+	CPCHNL2_EVENT_VPORT_DISABLED,
+	CPCHNL2_PKG_EVENT,
+	CPCHNL2_EVENT_ADD_QUEUE_GROUPS,
+	CPCHNL2_EVENT_DEL_QUEUE_GROUPS,
+	CPCHNL2_EVENT_ADD_QUEUES,
+	CPCHNL2_EVENT_DEL_QUEUES
+};
+
+/*
+ * This is for CPCHNL2_EVENT_VPORT_CREATED
+ */
+struct cpchnl2_event_vport_created {
+	struct cpchnl2_vport_id vport; /* Vport identifier to point to specific Vport */
+	struct cpchnl2_vport_info info; /* Vport configuration info */
+	struct cpchnl2_queue_groups queue_groups; /* Vport assign queue groups configuration info */
+};
+CPCHNL2_CHECK_STRUCT_LEN(3896, cpchnl2_event_vport_created);
+
+/*
+ * This is for CPCHNL2_EVENT_VPORT_DESTROYED
+ */
+struct cpchnl2_event_vport_destroyed {
+	/* Vport identifier to point to specific Vport */
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_func_id func;
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_event_vport_destroyed);
+
+struct cpchnl2_event_info {
+	struct {
+		s32 type;		/* See enum cpchnl2_event */
+		uint8_t reserved[4];	/* Reserved */
+	} header;
+	union {
+		struct cpchnl2_event_vport_created vport_created;
+		struct cpchnl2_event_vport_destroyed vport_destroyed;
+	} data;
+};
+
+#endif /* _CPFL_CPCHNL_H_ */
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 05/11] net/cpfl: enable vport mapping
  2023-09-07 15:15   ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
                       ` (3 preceding siblings ...)
  2023-09-07 15:15     ` [PATCH v3 04/11] net/cpfl: introduce CP channel API beilei.xing
@ 2023-09-07 15:16     ` beilei.xing
  2023-09-07 15:16     ` [PATCH v3 06/11] net/cpfl: parse representor devargs beilei.xing
                       ` (6 subsequent siblings)
  11 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-07 15:16 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

1. Handle cpchnl event for vport create/destroy
2. Use hash table to store vport_id to vport_info mapping
3. Use spinlock for thread safe.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 157 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.h |  21 ++++-
 drivers/net/cpfl/meson.build   |   2 +-
 3 files changed, 177 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 6b6e9b37b1..f51aa6e95a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -10,6 +10,7 @@
 #include <rte_dev.h>
 #include <errno.h>
 #include <rte_alarm.h>
+#include <rte_hash_crc.h>
 
 #include "cpfl_ethdev.h"
 #include "cpfl_rxtx.h"
@@ -1502,6 +1503,108 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 	}
 }
 
+static int
+cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_vport_id *vport_identity,
+		       struct cpchnl2_vport_info *vport_info)
+{
+	struct cpfl_vport_info *info = NULL;
+	int ret;
+
+	rte_spinlock_lock(&adapter->vport_map_lock);
+	ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info);
+	if (ret >= 0) {
+		PMD_DRV_LOG(WARNING, "vport already exist, overwrite info anyway");
+		/* overwrite info */
+		if (info)
+			info->vport_info = *vport_info;
+		goto fini;
+	}
+
+	info = rte_zmalloc(NULL, sizeof(*info), 0);
+	if (info == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory for vport map info");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	info->vport_info = *vport_info;
+
+	ret = rte_hash_add_key_data(adapter->vport_map_hash, vport_identity, info);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to add vport map into hash");
+		rte_free(info);
+		goto err;
+	}
+
+fini:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return ret;
+}
+
+static int
+cpfl_vport_info_destroy(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id *vport_identity)
+{
+	struct cpfl_vport_info *info;
+	int ret;
+
+	rte_spinlock_lock(&adapter->vport_map_lock);
+	ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "vport id not exist");
+		goto err;
+	}
+
+	rte_hash_del_key(adapter->vport_map_hash, vport_identity);
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	rte_free(info);
+
+	return 0;
+
+err:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return ret;
+}
+
+static void
+cpfl_handle_cpchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen)
+{
+	struct cpchnl2_event_info *cpchnl2_event = (struct cpchnl2_event_info *)msg;
+	struct cpchnl2_vport_info *info;
+	struct cpfl_vport_id vport_identity = { 0 };
+
+	if (msglen < sizeof(struct cpchnl2_event_info)) {
+		PMD_DRV_LOG(ERR, "Error event");
+		return;
+	}
+
+	switch (cpchnl2_event->header.type) {
+	case CPCHNL2_EVENT_VPORT_CREATED:
+		vport_identity.vport_id = cpchnl2_event->data.vport_created.vport.vport_id;
+		info = &cpchnl2_event->data.vport_created.info;
+		vport_identity.func_type = info->func_type;
+		vport_identity.pf_id = info->pf_id;
+		vport_identity.vf_id = info->vf_id;
+		if (cpfl_vport_info_create(adapter, &vport_identity, info))
+			PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_CREATED");
+		break;
+	case CPCHNL2_EVENT_VPORT_DESTROYED:
+		vport_identity.vport_id = cpchnl2_event->data.vport_destroyed.vport.vport_id;
+		vport_identity.func_type = cpchnl2_event->data.vport_destroyed.func.func_type;
+		vport_identity.pf_id = cpchnl2_event->data.vport_destroyed.func.pf_id;
+		vport_identity.vf_id = cpchnl2_event->data.vport_destroyed.func.vf_id;
+		if (cpfl_vport_info_destroy(adapter, &vport_identity))
+			PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_DESTROY");
+		break;
+	default:
+		PMD_DRV_LOG(ERR, " unknown event received %u", cpchnl2_event->header.type);
+		break;
+	}
+}
+
 static void
 cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 {
@@ -1533,6 +1636,9 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 			if (vc_op == VIRTCHNL2_OP_EVENT) {
 				cpfl_handle_vchnl_event_msg(adapter, adapter->base.mbx_resp,
 							    ctlq_msg.data_len);
+			} else if (vc_op == CPCHNL2_OP_EVENT) {
+				cpfl_handle_cpchnl_event_msg(adapter, adapter->base.mbx_resp,
+							     ctlq_msg.data_len);
 			} else {
 				if (vc_op == base->pend_cmd)
 					notify_cmd(base, base->cmd_retval);
@@ -1608,6 +1714,48 @@ static struct virtchnl2_get_capabilities req_caps = {
 	.other_caps = VIRTCHNL2_CAP_WB_ON_ITR
 };
 
+static int
+cpfl_vport_map_init(struct cpfl_adapter_ext *adapter)
+{
+	char hname[32];
+
+	snprintf(hname, 32, "%s-vport", adapter->name);
+
+	rte_spinlock_init(&adapter->vport_map_lock);
+
+#define CPFL_VPORT_MAP_HASH_ENTRY_NUM 2048
+
+	struct rte_hash_parameters params = {
+		.name = adapter->name,
+		.entries = CPFL_VPORT_MAP_HASH_ENTRY_NUM,
+		.key_len = sizeof(struct cpfl_vport_id),
+		.hash_func = rte_hash_crc,
+		.socket_id = SOCKET_ID_ANY,
+	};
+
+	adapter->vport_map_hash = rte_hash_create(&params);
+
+	if (adapter->vport_map_hash == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to create vport map hash");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
+{
+	const void *key = NULL;
+	struct cpfl_vport_map_info *info;
+	uint32_t iter = 0;
+
+	while (rte_hash_iterate(adapter->vport_map_hash, &key, (void **)&info, &iter) >= 0)
+		rte_free(info);
+
+	rte_hash_free(adapter->vport_map_hash);
+}
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1632,6 +1780,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_adapter_init;
 	}
 
+	ret = cpfl_vport_map_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init vport map");
+		goto err_vport_map_init;
+	}
+
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
 	adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ?
@@ -1656,6 +1810,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_vport_map_uninit(adapter);
+err_vport_map_init:
 	idpf_adapter_deinit(base);
 err_adapter_init:
 	return ret;
@@ -1885,6 +2041,7 @@ static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
 
 	rte_free(adapter->vports);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 53e45035e8..3515fec4f7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -10,16 +10,18 @@
 #include <rte_spinlock.h>
 #include <rte_ethdev.h>
 #include <rte_kvargs.h>
+#include <rte_hash.h>
 #include <ethdev_driver.h>
 #include <ethdev_pci.h>
 
-#include "cpfl_logs.h"
-
 #include <idpf_common_device.h>
 #include <idpf_common_virtchnl.h>
 #include <base/idpf_prototype.h>
 #include <base/virtchnl2.h>
 
+#include "cpfl_logs.h"
+#include "cpfl_cpchnl.h"
+
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
 
@@ -86,6 +88,18 @@ struct p2p_queue_chunks_info {
 	uint32_t rx_buf_qtail_spacing;
 };
 
+struct cpfl_vport_id {
+	uint32_t vport_id;
+	uint8_t func_type;
+	uint8_t pf_id;
+	uint16_t vf_id;
+};
+
+struct cpfl_vport_info {
+	struct cpchnl2_vport_info vport_info;
+	bool enabled;
+};
+
 enum cpfl_itf_type {
 	CPFL_ITF_TYPE_VPORT,
 	CPFL_ITF_TYPE_REPRESENTOR
@@ -128,6 +142,9 @@ struct cpfl_adapter_ext {
 
 	uint16_t used_vecs_num;
 	struct cpfl_devargs devargs;
+
+	rte_spinlock_t vport_map_lock;
+	struct rte_hash *vport_map_hash;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 8d62ebfd77..28167bb81d 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -11,7 +11,7 @@ if dpdk_conf.get('RTE_IOVA_IN_MBUF') == 0
     subdir_done()
 endif
 
-deps += ['common_idpf']
+deps += ['hash', 'common_idpf']
 
 sources = files(
         'cpfl_ethdev.c',
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 06/11] net/cpfl: parse representor devargs
  2023-09-07 15:15   ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
                       ` (4 preceding siblings ...)
  2023-09-07 15:16     ` [PATCH v3 05/11] net/cpfl: enable vport mapping beilei.xing
@ 2023-09-07 15:16     ` beilei.xing
  2023-09-07 15:16     ` [PATCH v3 07/11] net/cpfl: support probe again beilei.xing
                       ` (5 subsequent siblings)
  11 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-07 15:16 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Format:

[[c<controller_id>]pf<pf_id>]vf<vf_id>

  controller_id:

  0 : host (default)
  1:  acc

  pf_id:

  0 : apf (default)
  1 : cpf

Example:

representor=c0pf0vf[0-3]
  -- host > apf > vf 0,1,2,3
     same as pf0vf[0-3] and vf[0-3] if omit default value.

representor=c0pf0
  -- host > apf
     same as pf0 if omit default value.

representor=c1pf0
  -- accelerator core > apf

multiple representor devargs are supported.
e.g.: create 4 representors for 4 vfs on host APF and one
representor for APF on accelerator core.

  -- representor=vf[0-3],representor=c1pf0

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 doc/guides/nics/cpfl.rst               |  36 +++++
 doc/guides/rel_notes/release_23_11.rst |   3 +
 drivers/net/cpfl/cpfl_ethdev.c         | 179 +++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.h         |   8 ++
 4 files changed, 226 insertions(+)

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 39a2b603f3..83a18c3f2e 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -92,6 +92,42 @@ Runtime Configuration
   Then the PMD will configure Tx queue with single queue mode.
   Otherwise, split queue mode is chosen by default.
 
+- ``representor`` (default ``not enabled``)
+
+  The cpfl PMD supports the creation of APF/CPF/VF port representors.
+  Each port representor corresponds to a single function of that device.
+  Using the ``devargs`` option ``representor`` the user can specify
+  which functions to create port representors.
+
+  Format is::
+
+    [[c<controller_id>]pf<pf_id>]vf<vf_id>
+
+  Controller_id 0 is host (default), while 1 is accelerator core.
+  Pf_id 0 is APF (default), while 1 is CPF.
+  Default value can be omitted.
+
+  Create 4 representors for 4 vfs on host APF::
+
+    -a BDF,representor=c0pf0vf[0-3]
+
+  Or::
+
+    -a BDF,representor=pf0vf[0-3]
+
+  Or::
+
+    -a BDF,representor=vf[0-3]
+
+  Create a representor for CPF on accelerator core::
+
+    -a BDF,representor=c1pf1
+
+  Multiple representor devargs are supported. Create 4 representors for 4
+  vfs on host APF and one representor for CPF on accelerator core::
+
+    -a BDF,representor=vf[0-3],representor=c1pf1
+
 
 Driver compilation and testing
 ------------------------------
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 333e1d95a2..3d9be208d0 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -78,6 +78,9 @@ New Features
 * build: Optional libraries can now be selected with the new ``enable_libs``
   build option similarly to the existing ``enable_drivers`` build option.
 
+* **Updated Intel cpfl driver.**
+
+  * Added support for port representor.
 
 Removed Items
 -------------
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index f51aa6e95a..1b21134ec1 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -13,8 +13,10 @@
 #include <rte_hash_crc.h>
 
 #include "cpfl_ethdev.h"
+#include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 
+#define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
@@ -25,6 +27,7 @@ struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
 static const char * const cpfl_valid_args[] = {
+	CPFL_REPRESENTOR,
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
@@ -1407,6 +1410,128 @@ parse_bool(const char *key, const char *value, void *args)
 	return 0;
 }
 
+static int
+enlist(uint16_t *list, uint16_t *len_list, const uint16_t max_list, uint16_t val)
+{
+	uint16_t i;
+
+	for (i = 0; i < *len_list; i++) {
+		if (list[i] == val)
+			return 0;
+	}
+	if (*len_list >= max_list)
+		return -1;
+	list[(*len_list)++] = val;
+	return 0;
+}
+
+static const char *
+process_range(const char *str, uint16_t *list, uint16_t *len_list,
+	const uint16_t max_list)
+{
+	uint16_t lo, hi, val;
+	int result, n = 0;
+	const char *pos = str;
+
+	result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n);
+	if (result == 1) {
+		if (enlist(list, len_list, max_list, lo) != 0)
+			return NULL;
+	} else if (result == 2) {
+		if (lo > hi)
+			return NULL;
+		for (val = lo; val <= hi; val++) {
+			if (enlist(list, len_list, max_list, val) != 0)
+				return NULL;
+		}
+	} else {
+		return NULL;
+	}
+	return pos + n;
+}
+
+static const char *
+process_list(const char *str, uint16_t *list, uint16_t *len_list, const uint16_t max_list)
+{
+	const char *pos = str;
+
+	if (*pos == '[')
+		pos++;
+	while (1) {
+		pos = process_range(pos, list, len_list, max_list);
+		if (pos == NULL)
+			return NULL;
+		if (*pos != ',') /* end of list */
+			break;
+		pos++;
+	}
+	if (*str == '[' && *pos != ']')
+		return NULL;
+	if (*pos == ']')
+		pos++;
+	return pos;
+}
+
+static int
+parse_repr(const char *key __rte_unused, const char *value, void *args)
+{
+	struct cpfl_devargs *devargs = args;
+	struct rte_eth_devargs *eth_da;
+	const char *str = value;
+
+	if (devargs->repr_args_num == CPFL_REPR_ARG_NUM_MAX)
+		return -EINVAL;
+
+	eth_da = &devargs->repr_args[devargs->repr_args_num];
+
+	if (str[0] == 'c') {
+		str += 1;
+		str = process_list(str, eth_da->mh_controllers,
+				&eth_da->nb_mh_controllers,
+				RTE_DIM(eth_da->mh_controllers));
+		if (str == NULL)
+			goto done;
+	}
+	if (str[0] == 'p' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_PF;
+		str += 2;
+		str = process_list(str, eth_da->ports,
+				&eth_da->nb_ports, RTE_DIM(eth_da->ports));
+		if (str == NULL || str[0] == '\0')
+			goto done;
+	} else if (eth_da->nb_mh_controllers > 0) {
+		/* 'c' must followed by 'pf'. */
+		str = NULL;
+		goto done;
+	}
+	if (str[0] == 'v' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_VF;
+		str += 2;
+	} else if (str[0] == 's' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_SF;
+		str += 2;
+	} else {
+		/* 'pf' must followed by 'vf' or 'sf'. */
+		if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+			str = NULL;
+			goto done;
+		}
+		eth_da->type = RTE_ETH_REPRESENTOR_VF;
+	}
+	str = process_list(str, eth_da->representor_ports,
+		&eth_da->nb_representor_ports,
+		RTE_DIM(eth_da->representor_ports));
+done:
+	if (str == NULL) {
+		RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str);
+		return -1;
+	}
+
+	devargs->repr_args_num++;
+
+	return 0;
+}
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1431,6 +1556,12 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 		return -EINVAL;
 	}
 
+	cpfl_args->repr_args_num = 0;
+	ret = rte_kvargs_process(kvlist, CPFL_REPRESENTOR, &parse_repr, cpfl_args);
+
+	if (ret != 0)
+		goto fail;
+
 	ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport,
 				 cpfl_args);
 	if (ret != 0)
@@ -2085,6 +2216,48 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
 	return 0;
 }
 
+static int
+cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int i, j;
+
+	/* check and refine repr args */
+	for (i = 0; i < devargs->repr_args_num; i++) {
+		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
+
+		/* set default host_id to xeon host */
+		if (eth_da->nb_mh_controllers == 0) {
+			eth_da->nb_mh_controllers = 1;
+			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
+		} else {
+			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
+				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->mh_controllers[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		/* set default pf to APF */
+		if (eth_da->nb_ports == 0) {
+			eth_da->nb_ports = 1;
+			eth_da->ports[0] = CPFL_PF_TYPE_APF;
+		} else {
+			for (j = 0; j < eth_da->nb_ports; j++) {
+				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->ports[j]);
+					return -EINVAL;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
 static int
 cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -2163,6 +2336,12 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		goto err;
 	}
 
+	retval = cpfl_repr_devargs_process(adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to process repr devargs");
+		goto err;
+	}
+
 	return 0;
 
 err:
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 3515fec4f7..9c4d8d3ea1 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -60,16 +60,24 @@
 #define IDPF_DEV_ID_CPF			0x1453
 #define VIRTCHNL2_QUEUE_GROUP_P2P	0x100
 
+#define CPFL_HOST_ID_HOST	0
+#define CPFL_HOST_ID_ACC	1
+#define CPFL_PF_TYPE_APF	0
+#define CPFL_PF_TYPE_CPF	1
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
 	uint16_t idx;       /* index in adapter->vports[]*/
 };
 
+#define CPFL_REPR_ARG_NUM_MAX	4
 /* Struct used when parse driver specific devargs */
 struct cpfl_devargs {
 	uint16_t req_vports[CPFL_MAX_VPORT_NUM];
 	uint16_t req_vport_nb;
+	uint8_t repr_args_num;
+	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
 };
 
 struct p2p_queue_chunks_info {
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 07/11] net/cpfl: support probe again
  2023-09-07 15:15   ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
                       ` (5 preceding siblings ...)
  2023-09-07 15:16     ` [PATCH v3 06/11] net/cpfl: parse representor devargs beilei.xing
@ 2023-09-07 15:16     ` beilei.xing
  2023-09-07 15:16     ` [PATCH v3 08/11] net/cpfl: create port representor beilei.xing
                       ` (4 subsequent siblings)
  11 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-07 15:16 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Only representor will be parsed for probe again.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 69 +++++++++++++++++++++++++++-------
 1 file changed, 56 insertions(+), 13 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 1b21134ec1..236347eeb3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -26,7 +26,7 @@ rte_spinlock_t cpfl_adapter_lock;
 struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
-static const char * const cpfl_valid_args[] = {
+static const char * const cpfl_valid_args_first[] = {
 	CPFL_REPRESENTOR,
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
@@ -34,6 +34,11 @@ static const char * const cpfl_valid_args[] = {
 	NULL
 };
 
+static const char * const cpfl_valid_args_again[] = {
+	CPFL_REPRESENTOR,
+	NULL
+};
+
 uint32_t cpfl_supported_speeds[] = {
 	RTE_ETH_SPEED_NUM_NONE,
 	RTE_ETH_SPEED_NUM_10M,
@@ -1533,7 +1538,7 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
 	struct rte_devargs *devargs = pci_dev->device.devargs;
 	struct cpfl_devargs *cpfl_args = &adapter->devargs;
@@ -1545,7 +1550,8 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (devargs == NULL)
 		return 0;
 
-	kvlist = rte_kvargs_parse(devargs->args, cpfl_valid_args);
+	kvlist = rte_kvargs_parse(devargs->args,
+			first ? cpfl_valid_args_first : cpfl_valid_args_again);
 	if (kvlist == NULL) {
 		PMD_INIT_LOG(ERR, "invalid kvargs key");
 		return -EINVAL;
@@ -1562,6 +1568,9 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (ret != 0)
 		goto fail;
 
+	if (!first)
+		return 0;
+
 	ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport,
 				 cpfl_args);
 	if (ret != 0)
@@ -2289,18 +2298,11 @@ cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapt
 }
 
 static int
-cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
-	       struct rte_pci_device *pci_dev)
+cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
 	struct cpfl_adapter_ext *adapter;
 	int retval;
 
-	if (!cpfl_adapter_list_init) {
-		rte_spinlock_init(&cpfl_adapter_lock);
-		TAILQ_INIT(&cpfl_adapter_list);
-		cpfl_adapter_list_init = true;
-	}
-
 	adapter = rte_zmalloc("cpfl_adapter_ext",
 			      sizeof(struct cpfl_adapter_ext), 0);
 	if (adapter == NULL) {
@@ -2308,7 +2310,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		return -ENOMEM;
 	}
 
-	retval = cpfl_parse_devargs(pci_dev, adapter);
+	retval = cpfl_parse_devargs(pci_dev, adapter, true);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
 		return retval;
@@ -2353,6 +2355,46 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	return retval;
 }
 
+static int
+cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_parse_devargs(pci_dev, adapter, false);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		return ret;
+	}
+
+	ret = cpfl_repr_devargs_process(adapter);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to process reprenstor devargs");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+	       struct rte_pci_device *pci_dev)
+{
+	struct cpfl_adapter_ext *adapter;
+
+	if (!cpfl_adapter_list_init) {
+		rte_spinlock_init(&cpfl_adapter_lock);
+		TAILQ_INIT(&cpfl_adapter_list);
+		cpfl_adapter_list_init = true;
+	}
+
+	adapter = cpfl_find_adapter_ext(pci_dev);
+
+	if (adapter == NULL)
+		return cpfl_pci_probe_first(pci_dev);
+	else
+		return cpfl_pci_probe_again(pci_dev, adapter);
+}
+
 static int
 cpfl_pci_remove(struct rte_pci_device *pci_dev)
 {
@@ -2375,7 +2417,8 @@ cpfl_pci_remove(struct rte_pci_device *pci_dev)
 
 static struct rte_pci_driver rte_cpfl_pmd = {
 	.id_table	= pci_id_cpfl_map,
-	.drv_flags	= RTE_PCI_DRV_NEED_MAPPING,
+	.drv_flags	= RTE_PCI_DRV_NEED_MAPPING |
+			  RTE_PCI_DRV_PROBE_AGAIN,
 	.probe		= cpfl_pci_probe,
 	.remove		= cpfl_pci_remove,
 };
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 08/11] net/cpfl: create port representor
  2023-09-07 15:15   ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
                       ` (6 preceding siblings ...)
  2023-09-07 15:16     ` [PATCH v3 07/11] net/cpfl: support probe again beilei.xing
@ 2023-09-07 15:16     ` beilei.xing
  2023-09-07 15:16     ` [PATCH v3 09/11] net/cpfl: support vport list/info get beilei.xing
                       ` (3 subsequent siblings)
  11 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-07 15:16 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Track representor request in the allowlist.
Representor will only be created for active vport.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c      | 107 ++++---
 drivers/net/cpfl/cpfl_ethdev.h      |  51 ++-
 drivers/net/cpfl/cpfl_representor.c | 470 ++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.h |  26 ++
 drivers/net/cpfl/meson.build        |   1 +
 5 files changed, 608 insertions(+), 47 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 236347eeb3..6eb387ce96 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1896,6 +1896,42 @@ cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
 	rte_hash_free(adapter->vport_map_hash);
 }
 
+static int
+cpfl_repr_allowlist_init(struct cpfl_adapter_ext *adapter)
+{
+	char hname[32];
+
+	snprintf(hname, 32, "%s-repr_wl", adapter->name);
+
+	rte_spinlock_init(&adapter->repr_lock);
+
+#define CPFL_REPR_HASH_ENTRY_NUM 2048
+
+	struct rte_hash_parameters params = {
+		.name = hname,
+		.entries = CPFL_REPR_HASH_ENTRY_NUM,
+		.key_len = sizeof(struct cpfl_repr_id),
+		.hash_func = rte_hash_crc,
+		.socket_id = SOCKET_ID_ANY,
+	};
+
+	adapter->repr_allowlist_hash = rte_hash_create(&params);
+
+	if (adapter->repr_allowlist_hash == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to create repr allowlist hash");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+cpfl_repr_allowlist_uninit(struct cpfl_adapter_ext *adapter)
+{
+	rte_hash_free(adapter->repr_allowlist_hash);
+}
+
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1926,6 +1962,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vport_map_init;
 	}
 
+	ret = cpfl_repr_allowlist_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init representor allowlist");
+		goto err_repr_allowlist_init;
+	}
+
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
 	adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ?
@@ -1950,6 +1992,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_repr_allowlist_uninit(adapter);
+err_repr_allowlist_init:
 	cpfl_vport_map_uninit(adapter);
 err_vport_map_init:
 	idpf_adapter_deinit(base);
@@ -2225,48 +2269,6 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
 	return 0;
 }
 
-static int
-cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
-{
-	struct cpfl_devargs *devargs = &adapter->devargs;
-	int i, j;
-
-	/* check and refine repr args */
-	for (i = 0; i < devargs->repr_args_num; i++) {
-		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
-
-		/* set default host_id to xeon host */
-		if (eth_da->nb_mh_controllers == 0) {
-			eth_da->nb_mh_controllers = 1;
-			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
-		} else {
-			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
-				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
-					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-						     eth_da->mh_controllers[j]);
-					return -EINVAL;
-				}
-			}
-		}
-
-		/* set default pf to APF */
-		if (eth_da->nb_ports == 0) {
-			eth_da->nb_ports = 1;
-			eth_da->ports[0] = CPFL_PF_TYPE_APF;
-		} else {
-			for (j = 0; j < eth_da->nb_ports; j++) {
-				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
-					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-						     eth_da->ports[j]);
-					return -EINVAL;
-				}
-			}
-		}
-	}
-
-	return 0;
-}
-
 static int
 cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -2302,6 +2304,7 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
 	struct cpfl_adapter_ext *adapter;
 	int retval;
+	uint16_t port_id;
 
 	adapter = rte_zmalloc("cpfl_adapter_ext",
 			      sizeof(struct cpfl_adapter_ext), 0);
@@ -2341,11 +2344,23 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 	retval = cpfl_repr_devargs_process(adapter);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to process repr devargs");
-		goto err;
+		goto close_ethdev;
 	}
 
+	retval = cpfl_repr_create(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create representors ");
+		goto close_ethdev;
+	}
+
+
 	return 0;
 
+close_ethdev:
+	/* Ethdev created can be found RTE_ETH_FOREACH_DEV_OF through rte_device */
+	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
+		rte_eth_dev_close(port_id);
+	}
 err:
 	rte_spinlock_lock(&cpfl_adapter_lock);
 	TAILQ_REMOVE(&cpfl_adapter_list, adapter, next);
@@ -2372,6 +2387,12 @@ cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *ad
 		return ret;
 	}
 
+	ret = cpfl_repr_create(pci_dev, adapter);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create representors ");
+		return ret;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 9c4d8d3ea1..1a87e5931c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -21,6 +21,7 @@
 
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
+#include "cpfl_representor.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -60,10 +61,40 @@
 #define IDPF_DEV_ID_CPF			0x1453
 #define VIRTCHNL2_QUEUE_GROUP_P2P	0x100
 
-#define CPFL_HOST_ID_HOST	0
-#define CPFL_HOST_ID_ACC	1
-#define CPFL_PF_TYPE_APF	0
-#define CPFL_PF_TYPE_CPF	1
+#define CPFL_HOST_ID_NUM	2
+#define CPFL_PF_TYPE_NUM	2
+
+/* bit[15:14] type
+ * bit[13] xeon/acc
+ * bit[12] apf/cpf
+ * bit[11:0] vf
+ */
+#define CPFL_REPRESENTOR_ID(type, host_id, pf_id, vf_id)	\
+	((((type) & 0x3) << 14) + (((host_id) & 0x1) << 13) +	\
+	 (((pf_id) & 0x1) << 12) + ((vf_id) & 0xfff))
+
+enum cpfl_host_id {
+	CPFL_HOST_ID_HOST = 0,
+	CPFL_HOST_ID_ACC,
+};
+
+enum cpfl_pf_type {
+	CPFL_PF_TYPE_APF = 0,
+	CPFL_PF_TYPE_CPF,
+};
+
+/* Function IDs on IMC side */
+enum cpfl_func_id {
+	CPFL_HOST0_APF = 0,
+	CPFL_ACC_APF_ID = 4,
+	CPFL_HOST0_CPF_ID = 8,
+	CPFL_ACC_CPF_ID = 12,
+};
+
+enum cpfl_vport_func_type {
+	CPFL_VPORT_LAN_PF = 0,
+	CPFL_VPORT_LAN_VF,
+};
 
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
@@ -136,6 +167,13 @@ struct cpfl_vport {
 	bool p2p_manual_bind;
 };
 
+struct cpfl_repr {
+	struct cpfl_itf itf;
+	struct cpfl_repr_id repr_id;
+	struct rte_ether_addr mac_addr;
+	struct cpfl_vport_info *vport_info;
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -153,6 +191,9 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t vport_map_lock;
 	struct rte_hash *vport_map_hash;
+
+	rte_spinlock_t repr_lock;
+	struct rte_hash *repr_allowlist_hash;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -163,6 +204,8 @@ TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 	container_of((p), struct cpfl_adapter_ext, base)
 #define CPFL_DEV_TO_VPORT(dev)					\
 	((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_REPR(dev)					\
+	((struct cpfl_repr *)((dev)->data->dev_private))
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
new file mode 100644
index 0000000000..fd42063f2c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -0,0 +1,470 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include "cpfl_representor.h"
+#include "cpfl_rxtx.h"
+
+static int
+cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_repr_id *repr_id,
+			   struct rte_eth_dev *dev)
+{
+	int ret;
+
+	if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) < 0)
+		return -ENOENT;
+
+	ret = rte_hash_add_key_data(adapter->repr_allowlist_hash, repr_id, dev);
+
+	return ret;
+}
+
+static int
+cpfl_repr_allowlist_add(struct cpfl_adapter_ext *adapter,
+			struct cpfl_repr_id *repr_id)
+{
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+	if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) >= 0) {
+		ret = -EEXIST;
+		goto err;
+	}
+
+	ret = rte_hash_add_key(adapter->repr_allowlist_hash, repr_id);
+	if (ret < 0)
+		goto err;
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return ret;
+}
+
+static int
+cpfl_repr_devargs_process_one(struct cpfl_adapter_ext *adapter,
+			      struct rte_eth_devargs *eth_da)
+{
+	struct cpfl_repr_id repr_id;
+	int ret, c, p, v;
+
+	for (c = 0; c < eth_da->nb_mh_controllers; c++) {
+		for (p = 0; p < eth_da->nb_ports; p++) {
+			repr_id.type = eth_da->type;
+			if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+				repr_id.host_id = eth_da->mh_controllers[c];
+				repr_id.pf_id = eth_da->ports[p];
+				repr_id.vf_id = 0;
+				ret = cpfl_repr_allowlist_add(adapter, &repr_id);
+				if (ret == -EEXIST)
+					continue;
+				if (ret) {
+					PMD_DRV_LOG(ERR, "Failed to add PF repr to allowlist, "
+							 "host_id = %d, pf_id = %d.",
+						    repr_id.host_id, repr_id.pf_id);
+					return ret;
+				}
+			} else if (eth_da->type == RTE_ETH_REPRESENTOR_VF) {
+				for (v = 0; v < eth_da->nb_representor_ports; v++) {
+					repr_id.host_id = eth_da->mh_controllers[c];
+					repr_id.pf_id = eth_da->ports[p];
+					repr_id.vf_id = eth_da->representor_ports[v];
+					ret = cpfl_repr_allowlist_add(adapter, &repr_id);
+					if (ret == -EEXIST)
+						continue;
+					if (ret) {
+						PMD_DRV_LOG(ERR, "Failed to add VF repr to allowlist, "
+								 "host_id = %d, pf_id = %d, vf_id = %d.",
+							    repr_id.host_id,
+							    repr_id.pf_id,
+							    repr_id.vf_id);
+						return ret;
+					}
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+int
+cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int ret, i, j;
+
+	/* check and refine repr args */
+	for (i = 0; i < devargs->repr_args_num; i++) {
+		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
+
+		/* set default host_id to xeon host */
+		if (eth_da->nb_mh_controllers == 0) {
+			eth_da->nb_mh_controllers = 1;
+			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
+		} else {
+			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
+				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->mh_controllers[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		/* set default pf to APF */
+		if (eth_da->nb_ports == 0) {
+			eth_da->nb_ports = 1;
+			eth_da->ports[0] = CPFL_PF_TYPE_APF;
+		} else {
+			for (j = 0; j < eth_da->nb_ports; j++) {
+				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->ports[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		ret = cpfl_repr_devargs_process_one(adapter, eth_da);
+		if (ret != 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_repr_allowlist_del(struct cpfl_adapter_ext *adapter,
+			struct cpfl_repr_id *repr_id)
+{
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	ret = rte_hash_del_key(adapter->repr_allowlist_hash, repr_id);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to delete repr from allowlist."
+				 "host_id = %d, type = %d, pf_id = %d, vf_id = %d",
+				 repr_id->host_id, repr_id->type,
+				 repr_id->pf_id, repr_id->vf_id);
+		goto err;
+	}
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return ret;
+}
+
+static int
+cpfl_repr_uninit(struct rte_eth_dev *eth_dev)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+	struct cpfl_adapter_ext *adapter = repr->itf.adapter;
+
+	eth_dev->data->mac_addrs = NULL;
+
+	cpfl_repr_allowlist_del(adapter, &repr->repr_id);
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_configure(struct rte_eth_dev *dev)
+{
+	/* now only 1 RX queue is supported */
+	if (dev->data->nb_rx_queues > 1)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_close(struct rte_eth_dev *dev)
+{
+	return cpfl_repr_uninit(dev);
+}
+
+static int
+cpfl_repr_dev_info_get(struct rte_eth_dev *ethdev,
+		       struct rte_eth_dev_info *dev_info)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+
+	dev_info->device = ethdev->device;
+	dev_info->max_mac_addrs = 1;
+	dev_info->max_rx_queues = 1;
+	dev_info->max_tx_queues = 1;
+	dev_info->min_rx_bufsize = CPFL_MIN_BUF_SIZE;
+	dev_info->max_rx_pktlen = CPFL_MAX_FRAME_SIZE;
+
+	dev_info->flow_type_rss_offloads = CPFL_RSS_OFFLOAD_ALL;
+
+	dev_info->rx_offload_capa =
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP		|
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP		|
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM	|
+		RTE_ETH_RX_OFFLOAD_SCATTER		|
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER		|
+		RTE_ETH_RX_OFFLOAD_RSS_HASH		|
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
+	dev_info->tx_offload_capa =
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT		|
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT		|
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM	|
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS		|
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_free_thresh = CPFL_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+		.offloads = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_free_thresh = CPFL_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH,
+		.offloads = 0,
+	};
+
+	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
+	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
+	dev_info->switch_info.name = ethdev->device->name;
+	dev_info->switch_info.domain_id = 0; /* the same domain*/
+	dev_info->switch_info.port_id = repr->vport_info->vport_info.vsi_id;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_start(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_stop(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+static int
+cpfl_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+			 __rte_unused uint16_t queue_id,
+			 __rte_unused uint16_t nb_desc,
+			 __rte_unused unsigned int socket_id,
+			 __rte_unused const struct rte_eth_rxconf *conf,
+			 __rte_unused struct rte_mempool *pool)
+{
+	/* Dummy */
+	return 0;
+}
+
+static int
+cpfl_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+			 __rte_unused uint16_t queue_id,
+			 __rte_unused uint16_t nb_desc,
+			 __rte_unused unsigned int socket_id,
+			 __rte_unused const struct rte_eth_txconf *conf)
+{
+	/* Dummy */
+	return 0;
+}
+
+static const struct eth_dev_ops cpfl_repr_dev_ops = {
+	.dev_start		= cpfl_repr_dev_start,
+	.dev_stop		= cpfl_repr_dev_stop,
+	.dev_configure		= cpfl_repr_dev_configure,
+	.dev_close		= cpfl_repr_dev_close,
+	.dev_infos_get		= cpfl_repr_dev_info_get,
+
+	.rx_queue_setup		= cpfl_repr_rx_queue_setup,
+	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
+};
+
+static int
+cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+	struct cpfl_repr_param *param = init_param;
+	struct cpfl_adapter_ext *adapter = param->adapter;
+
+	repr->repr_id = param->repr_id;
+	repr->vport_info = param->vport_info;
+	repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
+	repr->itf.adapter = adapter;
+	repr->itf.data = eth_dev->data;
+
+	eth_dev->dev_ops = &cpfl_repr_dev_ops;
+
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	eth_dev->data->representor_id =
+		CPFL_REPRESENTOR_ID(repr->repr_id.type,
+				    repr->repr_id.host_id,
+				    repr->repr_id.pf_id,
+				    repr->repr_id.vf_id);
+
+	eth_dev->data->mac_addrs = &repr->mac_addr;
+
+	rte_eth_random_addr(repr->mac_addr.addr_bytes);
+
+	return cpfl_repr_allowlist_update(adapter, &repr->repr_id, eth_dev);
+}
+
+static int
+cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
+{
+	if ((host_id != CPFL_HOST_ID_HOST &&
+	     host_id != CPFL_HOST_ID_ACC) ||
+	    (pf_id != CPFL_PF_TYPE_APF &&
+	     pf_id != CPFL_PF_TYPE_CPF))
+		return -EINVAL;
+
+	static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = {
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF,
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID,
+	};
+
+	return func_id_map[host_id][pf_id];
+}
+
+static bool
+cpfl_match_repr_with_vport(const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_vport_info *info)
+{
+	int func_id;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF &&
+	    info->func_type == CPFL_VPORT_LAN_PF) {
+		func_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		if (func_id < 0)
+			return false;
+		else
+			return true;
+	} else if (repr_id->type == RTE_ETH_REPRESENTOR_VF &&
+		   info->func_type == CPFL_VPORT_LAN_VF) {
+		if (repr_id->vf_id == info->vf_id)
+			return true;
+	}
+
+	return false;
+}
+
+int
+cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	struct rte_eth_dev *dev;
+	uint32_t iter = 0;
+	const struct cpfl_repr_id *repr_id;
+	const struct cpfl_vport_id *vp_id;
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	while (rte_hash_iterate(adapter->repr_allowlist_hash,
+				(const void **)&repr_id, (void **)&dev, &iter) >= 0) {
+		struct cpfl_vport_info *vi;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		uint32_t iter_iter = 0;
+		bool matched;
+
+		/* skip representor already be created */
+		if (dev != NULL)
+			continue;
+
+		if (repr_id->type == RTE_ETH_REPRESENTOR_VF)
+			snprintf(name, sizeof(name), "net_%s_representor_c%dpf%dvf%d",
+				 pci_dev->name,
+				 repr_id->host_id,
+				 repr_id->pf_id,
+				 repr_id->vf_id);
+		else
+			snprintf(name, sizeof(name), "net_%s_representor_c%dpf%d",
+				 pci_dev->name,
+				 repr_id->host_id,
+				 repr_id->pf_id);
+
+		/* find a matched vport */
+		rte_spinlock_lock(&adapter->vport_map_lock);
+
+		matched = false;
+		while (rte_hash_iterate(adapter->vport_map_hash,
+					(const void **)&vp_id, (void **)&vi, &iter_iter) >= 0) {
+			struct cpfl_repr_param param;
+
+			if (!cpfl_match_repr_with_vport(repr_id, &vi->vport_info))
+				continue;
+
+			matched = true;
+
+			param.adapter = adapter;
+			param.repr_id = *repr_id;
+			param.vport_info = vi;
+
+			ret = rte_eth_dev_create(&pci_dev->device,
+						 name,
+						 sizeof(struct cpfl_repr),
+						 NULL, NULL, cpfl_repr_init,
+						 &param);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to create representor %s", name);
+				rte_spinlock_unlock(&adapter->vport_map_lock);
+				rte_spinlock_unlock(&adapter->repr_lock);
+				return ret;
+			}
+			break;
+		}
+
+		/* warning if no match vport detected */
+		if (!matched)
+			PMD_INIT_LOG(WARNING, "No matched vport for representor %s "
+					      "creation will be deferred when vport is detected",
+					      name);
+
+		rte_spinlock_unlock(&adapter->vport_map_lock);
+	}
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_representor.h b/drivers/net/cpfl/cpfl_representor.h
new file mode 100644
index 0000000000..d3a4de531e
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_REPRESENTOR_H_
+#define _CPFL_REPRESENTOR_H_
+
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+struct cpfl_repr_id {
+	uint8_t host_id;
+	uint8_t pf_id;
+	uint8_t type;
+	uint8_t vf_id;
+};
+
+struct cpfl_repr_param {
+	struct cpfl_adapter_ext *adapter;
+	struct cpfl_repr_id repr_id;
+	struct cpfl_vport_info *vport_info;
+};
+
+int cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter);
+int cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 28167bb81d..1d963e5fd1 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -16,6 +16,7 @@ deps += ['hash', 'common_idpf']
 sources = files(
         'cpfl_ethdev.c',
         'cpfl_rxtx.c',
+        'cpfl_representor.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 09/11] net/cpfl: support vport list/info get
  2023-09-07 15:15   ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
                       ` (7 preceding siblings ...)
  2023-09-07 15:16     ` [PATCH v3 08/11] net/cpfl: create port representor beilei.xing
@ 2023-09-07 15:16     ` beilei.xing
  2023-09-07 15:16     ` [PATCH v3 10/11] net/cpfl: update vport info before creating representor beilei.xing
                       ` (2 subsequent siblings)
  11 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-07 15:16 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Support cp channel ops CPCHNL2_OP_CPF_GET_VPORT_LIST and
CPCHNL2_OP_CPF_GET_VPORT_INFO.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h |  8 ++++
 drivers/net/cpfl/cpfl_vchnl.c  | 72 ++++++++++++++++++++++++++++++++++
 drivers/net/cpfl/meson.build   |  1 +
 3 files changed, 81 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 1a87e5931c..4c6653e113 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -198,6 +198,14 @@ struct cpfl_adapter_ext {
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_vport_id *vi,
+			   struct cpchnl2_get_vport_list_response *response);
+int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+			   struct cpchnl2_vport_id *vport_id,
+			   struct cpfl_vport_id *vi,
+			   struct cpchnl2_get_vport_info_response *response);
+
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p)					\
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
new file mode 100644
index 0000000000..a21a4a451f
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include "cpfl_ethdev.h"
+#include <idpf_common_virtchnl.h>
+
+int
+cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_vport_id *vi,
+		       struct cpchnl2_get_vport_list_response *response)
+{
+	struct cpchnl2_get_vport_list_request request;
+	struct idpf_cmd_info args;
+	int err;
+
+	memset(&request, 0, sizeof(request));
+	request.func_type = vi->func_type;
+	request.pf_id = vi->pf_id;
+	request.vf_id = vi->vf_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = CPCHNL2_OP_GET_VPORT_LIST;
+	args.in_args = (uint8_t *)&request;
+	args.in_args_size = sizeof(struct cpchnl2_get_vport_list_request);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err != 0) {
+		PMD_DRV_LOG(ERR, "Failed to execute command of CPCHNL2_OP_GET_VPORT_LIST");
+		return err;
+	}
+
+	rte_memcpy(response, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+
+	return 0;
+}
+
+int
+cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+		       struct cpchnl2_vport_id *vport_id,
+		       struct cpfl_vport_id *vi,
+		       struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpchnl2_get_vport_info_request request;
+	struct idpf_cmd_info args;
+	int err;
+
+	request.vport.vport_id = vport_id->vport_id;
+	request.vport.vport_type = vport_id->vport_type;
+	request.func.func_type = vi->func_type;
+	request.func.pf_id = vi->pf_id;
+	request.func.vf_id = vi->vf_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = CPCHNL2_OP_GET_VPORT_INFO;
+	args.in_args = (uint8_t *)&request;
+	args.in_args_size = sizeof(struct cpchnl2_get_vport_info_request);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err != 0) {
+		PMD_DRV_LOG(ERR, "Failed to execute command of CPCHNL2_OP_GET_VPORT_INFO");
+		return err;
+	}
+
+	rte_memcpy(response, args.out_buffer, sizeof(*response));
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 1d963e5fd1..fb075c6860 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -17,6 +17,7 @@ sources = files(
         'cpfl_ethdev.c',
         'cpfl_rxtx.c',
         'cpfl_representor.c',
+        'cpfl_vchnl.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 10/11] net/cpfl: update vport info before creating representor
  2023-09-07 15:15   ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
                       ` (8 preceding siblings ...)
  2023-09-07 15:16     ` [PATCH v3 09/11] net/cpfl: support vport list/info get beilei.xing
@ 2023-09-07 15:16     ` beilei.xing
  2023-09-07 15:16     ` [PATCH v3 11/11] net/cpfl: support link update for representor beilei.xing
  2023-09-08 11:16     ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
  11 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-07 15:16 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Get port representor's vport list and update vport_map_hash
before creating the port representor.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c      |   2 +-
 drivers/net/cpfl/cpfl_ethdev.h      |   3 +
 drivers/net/cpfl/cpfl_representor.c | 124 +++++++++++++++++++++++++++-
 3 files changed, 124 insertions(+), 5 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 6eb387ce96..330a865e3c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1643,7 +1643,7 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 	}
 }
 
-static int
+int
 cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
 		       struct cpfl_vport_id *vport_identity,
 		       struct cpchnl2_vport_info *vport_info)
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 4c6653e113..8f5b07abc5 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -198,6 +198,9 @@ struct cpfl_adapter_ext {
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_vport_id *vport_identity,
+			   struct cpchnl2_vport_info *vport_info);
 int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_list_response *response);
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index fd42063f2c..0cd92b1351 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -390,6 +390,86 @@ cpfl_match_repr_with_vport(const struct cpfl_repr_id *repr_id,
 	return false;
 }
 
+static int
+cpfl_repr_vport_list_query(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_get_vport_list_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		vi.vf_id = 0;
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = CPFL_HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_cc_vport_list_get(adapter, &vi, response);
+
+	return ret;
+}
+
+static int
+cpfl_repr_vport_info_query(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_vport_id *vport_id,
+			   struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		vi.vf_id = 0;
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = CPFL_HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_cc_vport_info_get(adapter, vport_id, &vi, response);
+
+	return ret;
+}
+
+static int
+cpfl_repr_vport_map_update(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id, uint32_t vport_id,
+			   struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	vi.vport_id = vport_id;
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = CPFL_HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_vport_info_create(adapter, &vi, &response->info);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Fail to update vport map hash for representor.");
+		return ret;
+	}
+
+	return 0;
+}
+
 int
 cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -397,8 +477,14 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte
 	uint32_t iter = 0;
 	const struct cpfl_repr_id *repr_id;
 	const struct cpfl_vport_id *vp_id;
+	struct cpchnl2_get_vport_list_response *vlist_resp;
+	struct cpchnl2_get_vport_info_response vinfo_resp;
 	int ret;
 
+	vlist_resp = rte_zmalloc(NULL, IDPF_DFLT_MBX_BUF_SIZE, 0);
+	if (vlist_resp == NULL)
+		return -ENOMEM;
+
 	rte_spinlock_lock(&adapter->repr_lock);
 
 	while (rte_hash_iterate(adapter->repr_allowlist_hash,
@@ -407,6 +493,7 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte
 		char name[RTE_ETH_NAME_MAX_LEN];
 		uint32_t iter_iter = 0;
 		bool matched;
+		int i;
 
 		/* skip representor already be created */
 		if (dev != NULL)
@@ -424,6 +511,35 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte
 				 repr_id->host_id,
 				 repr_id->pf_id);
 
+		/* get vport list for the port representor */
+		ret = cpfl_repr_vport_list_query(adapter, repr_id, vlist_resp);
+		if (ret != 0) {
+			PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d's vport list",
+				     repr_id->host_id, repr_id->pf_id, repr_id->vf_id);
+			goto err;
+		}
+
+		/* get all vport info for the port representor */
+		for (i = 0; i < vlist_resp->nof_vports; i++) {
+			ret = cpfl_repr_vport_info_query(adapter, repr_id,
+							 &vlist_resp->vports[i], &vinfo_resp);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d vport[%d]'s info",
+					     repr_id->host_id, repr_id->pf_id, repr_id->vf_id,
+					     vlist_resp->vports[i].vport_id);
+				goto err;
+			}
+
+			ret = cpfl_repr_vport_map_update(adapter, repr_id,
+						 vlist_resp->vports[i].vport_id, &vinfo_resp);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to update  host%d pf%d vf%d vport[%d]'s info to vport_map_hash",
+					     repr_id->host_id, repr_id->pf_id, repr_id->vf_id,
+					     vlist_resp->vports[i].vport_id);
+				goto err;
+			}
+		}
+
 		/* find a matched vport */
 		rte_spinlock_lock(&adapter->vport_map_lock);
 
@@ -449,8 +565,7 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte
 			if (ret != 0) {
 				PMD_INIT_LOG(ERR, "Failed to create representor %s", name);
 				rte_spinlock_unlock(&adapter->vport_map_lock);
-				rte_spinlock_unlock(&adapter->repr_lock);
-				return ret;
+				goto err;
 			}
 			break;
 		}
@@ -464,7 +579,8 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte
 		rte_spinlock_unlock(&adapter->vport_map_lock);
 	}
 
+err:
 	rte_spinlock_unlock(&adapter->repr_lock);
-
-	return 0;
+	rte_free(vlist_resp);
+	return ret;
 }
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 11/11] net/cpfl: support link update for representor
  2023-09-07 15:15   ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
                       ` (9 preceding siblings ...)
  2023-09-07 15:16     ` [PATCH v3 10/11] net/cpfl: update vport info before creating representor beilei.xing
@ 2023-09-07 15:16     ` beilei.xing
  2023-09-08 11:16     ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
  11 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-07 15:16 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Add link update ops for representor.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h      |  1 +
 drivers/net/cpfl/cpfl_representor.c | 21 +++++++++++++++++++++
 2 files changed, 22 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 8f5b07abc5..d110f26e64 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -172,6 +172,7 @@ struct cpfl_repr {
 	struct cpfl_repr_id repr_id;
 	struct rte_ether_addr mac_addr;
 	struct cpfl_vport_info *vport_info;
+	bool func_up; /* If the represented function is up */
 };
 
 struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 0cd92b1351..3c0fa957de 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -308,6 +308,23 @@ cpfl_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
 	return 0;
 }
 
+static int
+cpfl_repr_link_update(struct rte_eth_dev *ethdev,
+		      __rte_unused int wait_to_complete)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+	struct rte_eth_link *dev_link = &ethdev->data->dev_link;
+
+	if (!(ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
+		PMD_INIT_LOG(ERR, "This ethdev is not representor.");
+		return -EINVAL;
+	}
+	dev_link->link_status = repr->func_up ?
+		RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -317,6 +334,8 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 
 	.rx_queue_setup		= cpfl_repr_rx_queue_setup,
 	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
+
+	.link_update		= cpfl_repr_link_update,
 };
 
 static int
@@ -331,6 +350,8 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
 	repr->itf.adapter = adapter;
 	repr->itf.data = eth_dev->data;
+	if (repr->vport_info->vport_info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
+		repr->func_up = true;
 
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v4 00/10] net/cpfl: support port representor
  2023-09-07 15:15   ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
                       ` (10 preceding siblings ...)
  2023-09-07 15:16     ` [PATCH v3 11/11] net/cpfl: support link update for representor beilei.xing
@ 2023-09-08 11:16     ` beilei.xing
  2023-09-08 11:16       ` [PATCH v4 01/10] net/cpfl: refine devargs parse and process beilei.xing
                         ` (10 more replies)
  11 siblings, 11 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-08 11:16 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

1. code refine for representor support
2. support port representor

v4 changes:
 - change the patch order
 - merge two patches
 - revert enum change
v3 changes:
 - Refine commit log.
 - Add macro and enum.
 - Refine doc.
 - Refine error handling.
v2 changes:
 - Remove representor data path.
 - Fix coding style.

Beilei Xing (10):
  net/cpfl: refine devargs parse and process
  net/cpfl: introduce interface structure
  net/cpfl: refine handle virtual channel message
  net/cpfl: introduce CP channel API
  net/cpfl: enable vport mapping
  net/cpfl: parse representor devargs
  net/cpfl: support probe again
  net/cpfl: support vport list/info get
  net/cpfl: create port representor
  net/cpfl: support link update for representor

 doc/guides/nics/cpfl.rst               |  36 ++
 doc/guides/rel_notes/release_23_11.rst |   3 +
 drivers/net/cpfl/cpfl_cpchnl.h         | 340 ++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.c         | 619 +++++++++++++++++++++----
 drivers/net/cpfl/cpfl_ethdev.h         |  91 +++-
 drivers/net/cpfl/cpfl_representor.c    | 607 ++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.h    |  26 ++
 drivers/net/cpfl/cpfl_vchnl.c          |  72 +++
 drivers/net/cpfl/meson.build           |   4 +-
 9 files changed, 1692 insertions(+), 106 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v4 01/10] net/cpfl: refine devargs parse and process
  2023-09-08 11:16     ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
@ 2023-09-08 11:16       ` beilei.xing
  2023-09-08 11:16       ` [PATCH v4 02/10] net/cpfl: introduce interface structure beilei.xing
                         ` (9 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-08 11:16 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

1. Keep devargs in adapter.
2. Refine handling the case with no vport be specified in devargs.
3. Separate devargs parse and devargs process

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 154 ++++++++++++++++++---------------
 drivers/net/cpfl/cpfl_ethdev.h |   1 +
 2 files changed, 84 insertions(+), 71 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c4ca9343c3..46b3a52e49 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1407,12 +1407,12 @@ parse_bool(const char *key, const char *value, void *args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter,
-		   struct cpfl_devargs *cpfl_args)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
 	struct rte_devargs *devargs = pci_dev->device.devargs;
+	struct cpfl_devargs *cpfl_args = &adapter->devargs;
 	struct rte_kvargs *kvlist;
-	int i, ret;
+	int ret;
 
 	cpfl_args->req_vport_nb = 0;
 
@@ -1445,31 +1445,6 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (ret != 0)
 		goto fail;
 
-	/* check parsed devargs */
-	if (adapter->cur_vport_nb + cpfl_args->req_vport_nb >
-	    adapter->max_vport_nb) {
-		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
-			     adapter->max_vport_nb);
-		ret = -EINVAL;
-		goto fail;
-	}
-
-	for (i = 0; i < cpfl_args->req_vport_nb; i++) {
-		if (cpfl_args->req_vports[i] > adapter->max_vport_nb - 1) {
-			PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d",
-				     cpfl_args->req_vports[i], adapter->max_vport_nb - 1);
-			ret = -EINVAL;
-			goto fail;
-		}
-
-		if (adapter->cur_vports & RTE_BIT32(cpfl_args->req_vports[i])) {
-			PMD_INIT_LOG(ERR, "Vport %d has been requested",
-				     cpfl_args->req_vports[i]);
-			ret = -EINVAL;
-			goto fail;
-		}
-	}
-
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
@@ -1915,15 +1890,79 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 	adapter->vports = NULL;
 }
 
+static int
+cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int i;
+
+	/* refine vport number, at least 1 vport */
+	if (devargs->req_vport_nb == 0) {
+		devargs->req_vport_nb = 1;
+		devargs->req_vports[0] = 0;
+	}
+
+	/* check parsed devargs */
+	if (adapter->cur_vport_nb + devargs->req_vport_nb >
+	    adapter->max_vport_nb) {
+		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
+			     adapter->max_vport_nb);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < devargs->req_vport_nb; i++) {
+		if (devargs->req_vports[i] > adapter->max_vport_nb - 1) {
+			PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d",
+				     devargs->req_vports[i], adapter->max_vport_nb - 1);
+			return -EINVAL;
+		}
+
+		if (adapter->cur_vports & RTE_BIT32(devargs->req_vports[i])) {
+			PMD_INIT_LOG(ERR, "Vport %d has been requested",
+				     devargs->req_vports[i]);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport_param vport_param;
+	char name[RTE_ETH_NAME_MAX_LEN];
+	int ret, i;
+
+	for (i = 0; i < adapter->devargs.req_vport_nb; i++) {
+		vport_param.adapter = adapter;
+		vport_param.devarg_id = adapter->devargs.req_vports[i];
+		vport_param.idx = cpfl_vport_idx_alloc(adapter);
+		if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
+			PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
+			break;
+		}
+		snprintf(name, sizeof(name), "net_%s_vport_%d",
+			 pci_dev->device.name,
+			 adapter->devargs.req_vports[i]);
+		ret = rte_eth_dev_create(&pci_dev->device, name,
+					    sizeof(struct cpfl_vport),
+					    NULL, NULL, cpfl_dev_vport_init,
+					    &vport_param);
+		if (ret != 0)
+			PMD_DRV_LOG(ERR, "Failed to create vport %d",
+				    vport_param.devarg_id);
+	}
+
+	return 0;
+}
+
 static int
 cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	       struct rte_pci_device *pci_dev)
 {
-	struct cpfl_vport_param vport_param;
 	struct cpfl_adapter_ext *adapter;
-	struct cpfl_devargs devargs;
-	char name[RTE_ETH_NAME_MAX_LEN];
-	int i, retval;
+	int retval;
 
 	if (!cpfl_adapter_list_init) {
 		rte_spinlock_init(&cpfl_adapter_lock);
@@ -1938,6 +1977,12 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		return -ENOMEM;
 	}
 
+	retval = cpfl_parse_devargs(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		return retval;
+	}
+
 	retval = cpfl_adapter_ext_init(pci_dev, adapter);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to init adapter.");
@@ -1948,49 +1993,16 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	TAILQ_INSERT_TAIL(&cpfl_adapter_list, adapter, next);
 	rte_spinlock_unlock(&cpfl_adapter_lock);
 
-	retval = cpfl_parse_devargs(pci_dev, adapter, &devargs);
+	retval = cpfl_vport_devargs_process(adapter);
 	if (retval != 0) {
-		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		PMD_INIT_LOG(ERR, "Failed to process vport devargs");
 		goto err;
 	}
 
-	if (devargs.req_vport_nb == 0) {
-		/* If no vport devarg, create vport 0 by default. */
-		vport_param.adapter = adapter;
-		vport_param.devarg_id = 0;
-		vport_param.idx = cpfl_vport_idx_alloc(adapter);
-		if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
-			PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
-			return 0;
-		}
-		snprintf(name, sizeof(name), "cpfl_%s_vport_0",
-			 pci_dev->device.name);
-		retval = rte_eth_dev_create(&pci_dev->device, name,
-					    sizeof(struct cpfl_vport),
-					    NULL, NULL, cpfl_dev_vport_init,
-					    &vport_param);
-		if (retval != 0)
-			PMD_DRV_LOG(ERR, "Failed to create default vport 0");
-	} else {
-		for (i = 0; i < devargs.req_vport_nb; i++) {
-			vport_param.adapter = adapter;
-			vport_param.devarg_id = devargs.req_vports[i];
-			vport_param.idx = cpfl_vport_idx_alloc(adapter);
-			if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
-				PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
-				break;
-			}
-			snprintf(name, sizeof(name), "cpfl_%s_vport_%d",
-				 pci_dev->device.name,
-				 devargs.req_vports[i]);
-			retval = rte_eth_dev_create(&pci_dev->device, name,
-						    sizeof(struct cpfl_vport),
-						    NULL, NULL, cpfl_dev_vport_init,
-						    &vport_param);
-			if (retval != 0)
-				PMD_DRV_LOG(ERR, "Failed to create vport %d",
-					    vport_param.devarg_id);
-		}
+	retval = cpfl_vport_create(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create vports.");
+		goto err;
 	}
 
 	return 0;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 2e42354f70..b637bf2e45 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -115,6 +115,7 @@ struct cpfl_adapter_ext {
 	uint16_t cur_vport_nb;
 
 	uint16_t used_vecs_num;
+	struct cpfl_devargs devargs;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v4 02/10] net/cpfl: introduce interface structure
  2023-09-08 11:16     ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
  2023-09-08 11:16       ` [PATCH v4 01/10] net/cpfl: refine devargs parse and process beilei.xing
@ 2023-09-08 11:16       ` beilei.xing
  2023-09-09  2:08         ` Wu, Jingjing
  2023-09-08 11:16       ` [PATCH v4 03/10] net/cpfl: refine handle virtual channel message beilei.xing
                         ` (8 subsequent siblings)
  10 siblings, 1 reply; 89+ messages in thread
From: beilei.xing @ 2023-09-08 11:16 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Introduce cplf interface structure to distinguish vport and port
representor.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  3 +++
 drivers/net/cpfl/cpfl_ethdev.h | 16 ++++++++++++++++
 2 files changed, 19 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 46b3a52e49..92fe92c00f 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1803,6 +1803,9 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 		goto err;
 	}
 
+	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
+	cpfl_vport->itf.adapter = adapter;
+	cpfl_vport->itf.data = dev->data;
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index b637bf2e45..53e45035e8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -86,7 +86,19 @@ struct p2p_queue_chunks_info {
 	uint32_t rx_buf_qtail_spacing;
 };
 
+enum cpfl_itf_type {
+	CPFL_ITF_TYPE_VPORT,
+	CPFL_ITF_TYPE_REPRESENTOR
+};
+
+struct cpfl_itf {
+	enum cpfl_itf_type type;
+	struct cpfl_adapter_ext *adapter;
+	void *data;
+};
+
 struct cpfl_vport {
+	struct cpfl_itf itf;
 	struct idpf_vport base;
 	struct p2p_queue_chunks_info *p2p_q_chunks_info;
 
@@ -124,5 +136,9 @@ TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 	RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p)					\
 	container_of((p), struct cpfl_adapter_ext, base)
+#define CPFL_DEV_TO_VPORT(dev)					\
+	((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_ITF(dev)				\
+	((struct cpfl_itf *)((dev)->data->dev_private))
 
 #endif /* _CPFL_ETHDEV_H_ */
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v4 03/10] net/cpfl: refine handle virtual channel message
  2023-09-08 11:16     ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
  2023-09-08 11:16       ` [PATCH v4 01/10] net/cpfl: refine devargs parse and process beilei.xing
  2023-09-08 11:16       ` [PATCH v4 02/10] net/cpfl: introduce interface structure beilei.xing
@ 2023-09-08 11:16       ` beilei.xing
  2023-09-09  2:13         ` Wu, Jingjing
  2023-09-08 11:16       ` [PATCH v4 04/10] net/cpfl: introduce CP channel API beilei.xing
                         ` (7 subsequent siblings)
  10 siblings, 1 reply; 89+ messages in thread
From: beilei.xing @ 2023-09-08 11:16 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Refine handle virtual channel event message.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 46 ++++++++++++++++------------------
 1 file changed, 22 insertions(+), 24 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 92fe92c00f..6b6e9b37b1 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1450,40 +1450,50 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	return ret;
 }
 
-static struct idpf_vport *
+static struct cpfl_vport *
 cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
 {
-	struct idpf_vport *vport = NULL;
+	struct cpfl_vport *vport = NULL;
 	int i;
 
 	for (i = 0; i < adapter->cur_vport_nb; i++) {
-		vport = &adapter->vports[i]->base;
-		if (vport->vport_id != vport_id)
+		vport = adapter->vports[i];
+		if (vport->base.vport_id != vport_id)
 			continue;
 		else
 			return vport;
 	}
 
-	return vport;
+	return NULL;
 }
 
 static void
-cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen)
+cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen)
 {
 	struct virtchnl2_event *vc_event = (struct virtchnl2_event *)msg;
-	struct rte_eth_dev_data *data = vport->dev_data;
-	struct rte_eth_dev *dev = &rte_eth_devices[data->port_id];
+	struct cpfl_vport *vport;
+	struct rte_eth_dev_data *data;
+	struct rte_eth_dev *dev;
 
 	if (msglen < sizeof(struct virtchnl2_event)) {
 		PMD_DRV_LOG(ERR, "Error event");
 		return;
 	}
 
+	vport = cpfl_find_vport(adapter, vc_event->vport_id);
+	if (!vport) {
+		PMD_DRV_LOG(ERR, "Can't find vport.");
+		return;
+	}
+
+	data = vport->itf.data;
+	dev = &rte_eth_devices[data->port_id];
+
 	switch (vc_event->event) {
 	case VIRTCHNL2_EVENT_LINK_CHANGE:
 		PMD_DRV_LOG(DEBUG, "VIRTCHNL2_EVENT_LINK_CHANGE");
-		vport->link_up = !!(vc_event->link_status);
-		vport->link_speed = vc_event->link_speed;
+		vport->base.link_up = !!(vc_event->link_status);
+		vport->base.link_speed = vc_event->link_speed;
 		cpfl_dev_link_update(dev, 0);
 		break;
 	default:
@@ -1498,10 +1508,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 	struct idpf_adapter *base = &adapter->base;
 	struct idpf_dma_mem *dma_mem = NULL;
 	struct idpf_hw *hw = &base->hw;
-	struct virtchnl2_event *vc_event;
 	struct idpf_ctlq_msg ctlq_msg;
 	enum idpf_mbx_opc mbx_op;
-	struct idpf_vport *vport;
 	uint16_t pending = 1;
 	uint32_t vc_op;
 	int ret;
@@ -1523,18 +1531,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 		switch (mbx_op) {
 		case idpf_mbq_opc_send_msg_to_peer_pf:
 			if (vc_op == VIRTCHNL2_OP_EVENT) {
-				if (ctlq_msg.data_len < sizeof(struct virtchnl2_event)) {
-					PMD_DRV_LOG(ERR, "Error event");
-					return;
-				}
-				vc_event = (struct virtchnl2_event *)base->mbx_resp;
-				vport = cpfl_find_vport(adapter, vc_event->vport_id);
-				if (!vport) {
-					PMD_DRV_LOG(ERR, "Can't find vport.");
-					return;
-				}
-				cpfl_handle_event_msg(vport, base->mbx_resp,
-						      ctlq_msg.data_len);
+				cpfl_handle_vchnl_event_msg(adapter, adapter->base.mbx_resp,
+							    ctlq_msg.data_len);
 			} else {
 				if (vc_op == base->pend_cmd)
 					notify_cmd(base, base->cmd_retval);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v4 04/10] net/cpfl: introduce CP channel API
  2023-09-08 11:16     ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
                         ` (2 preceding siblings ...)
  2023-09-08 11:16       ` [PATCH v4 03/10] net/cpfl: refine handle virtual channel message beilei.xing
@ 2023-09-08 11:16       ` beilei.xing
  2023-09-08 11:16       ` [PATCH v4 05/10] net/cpfl: enable vport mapping beilei.xing
                         ` (6 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-08 11:16 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

The CPCHNL2 defines the API (v2) used for communication between the
CPF driver and its on-chip management software. The CPFL PMD is a
specific CPF driver to utilize CPCHNL2 for device configuration and
event probing.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_cpchnl.h | 340 +++++++++++++++++++++++++++++++++
 1 file changed, 340 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h

diff --git a/drivers/net/cpfl/cpfl_cpchnl.h b/drivers/net/cpfl/cpfl_cpchnl.h
new file mode 100644
index 0000000000..2eefcbcc10
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_cpchnl.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CPCHNL_H_
+#define _CPFL_CPCHNL_H_
+
+/** @brief      Command Opcodes
+ *              Values are to be different from virtchnl.h opcodes
+ */
+enum cpchnl2_ops {
+	/* vport info */
+	CPCHNL2_OP_GET_VPORT_LIST		= 0x8025,
+	CPCHNL2_OP_GET_VPORT_INFO		= 0x8026,
+
+	/* DPHMA Event notifications */
+	CPCHNL2_OP_EVENT			= 0x8050,
+};
+
+/* Note! This affects the size of structs below */
+#define CPCHNL2_MAX_TC_AMOUNT		8
+
+#define CPCHNL2_ETH_LENGTH_OF_ADDRESS	6
+
+#define CPCHNL2_FUNC_TYPE_PF		0
+#define CPCHNL2_FUNC_TYPE_SRIOV		1
+
+/* vport statuses - must match the DB ones - see enum cp_vport_status*/
+#define CPCHNL2_VPORT_STATUS_CREATED	0
+#define CPCHNL2_VPORT_STATUS_ENABLED	1
+#define CPCHNL2_VPORT_STATUS_DISABLED	2
+#define CPCHNL2_VPORT_STATUS_DESTROYED	3
+
+/* Queue Groups Extension */
+/**************************************************/
+
+#define MAX_Q_REGIONS 16
+/* TBD - with current structure sizes, in order not to exceed 4KB ICQH buffer
+ * no more than 11 queue groups are allowed per a single vport..
+ * More will be possible only with future msg fragmentation.
+ */
+#define MAX_Q_VPORT_GROUPS 11
+
+#define CPCHNL2_CHECK_STRUCT_LEN(n, X) enum static_assert_enum_##X	\
+	{ static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) }
+
+struct cpchnl2_queue_chunk {
+	u32 type;	       /* 0:QUEUE_TYPE_TX, 1:QUEUE_TYPE_RX */ /* enum nsl_lan_queue_type */
+	u32 start_queue_id;
+	u32 num_queues;
+	u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_queue_chunk);
+
+/* structure to specify several chunks of contiguous queues */
+struct cpchnl2_queue_grp_chunks {
+	u16 num_chunks;
+	u8 reserved[6];
+	struct cpchnl2_queue_chunk chunks[MAX_Q_REGIONS];
+};
+CPCHNL2_CHECK_STRUCT_LEN(264, cpchnl2_queue_grp_chunks);
+
+struct cpchnl2_rx_queue_group_info {
+	/* User can ask to update rss_lut size originally allocated
+	 * by CreateVport command. New size will be returned if allocation succeeded,
+	 * otherwise original rss_size from CreateVport will be returned.
+	 */
+	u16 rss_lut_size;
+	u8 pad[6]; /*Future extension purpose*/
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_rx_queue_group_info);
+
+struct cpchnl2_tx_queue_group_info {
+	u8 tx_tc; /*TX TC queue group will be connected to*/
+	/* Each group can have its own priority, value 0-7, while each group with unique
+	 * priority is strict priority. It can be single set of queue groups which configured with
+	 * same priority, then they are assumed part of WFQ arbitration group and are expected to be
+	 * assigned with weight.
+	 */
+	u8 priority;
+	/* Determines if queue group is expected to be Strict Priority according to its priority */
+	u8 is_sp;
+	u8 pad;
+	/* Peak Info Rate Weight in case Queue Group is part of WFQ arbitration set.
+	 * The weights of the groups are independent of each other. Possible values: 1-200.
+	 */
+	u16 pir_weight;
+	/* Future extension purpose for CIR only */
+	u8 cir_pad[2];
+	u8 pad2[8]; /* Future extension purpose*/
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_tx_queue_group_info);
+
+struct cpchnl2_queue_group_id {
+	/* Queue group ID - depended on it's type:
+	 * Data & p2p - is an index which is relative to Vport.
+	 * Config & Mailbox - is an ID which is relative to func.
+	 * This ID is used in future calls, i.e. delete.
+	 * Requested by host and assigned by Control plane.
+	 */
+	u16 queue_group_id;
+	/* Functional type: see CPCHNL2_QUEUE_GROUP_TYPE definitions */
+	u16 queue_group_type;
+	u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_queue_group_id);
+
+struct cpchnl2_queue_group_info {
+	/* IN */
+	struct cpchnl2_queue_group_id qg_id;
+
+	/* IN, Number of queues of different types in the group. */
+	u16 num_tx_q;
+	u16 num_tx_complq;
+	u16 num_rx_q;
+	u16 num_rx_bufq;
+
+	struct cpchnl2_tx_queue_group_info tx_q_grp_info;
+	struct cpchnl2_rx_queue_group_info rx_q_grp_info;
+
+	u8 egress_port;
+	u8 pad[39]; /*Future extension purpose*/
+	struct cpchnl2_queue_grp_chunks chunks;
+};
+CPCHNL2_CHECK_STRUCT_LEN(344, cpchnl2_queue_group_info);
+
+struct cpchnl2_queue_groups {
+	u16 num_queue_groups; /* Number of queue groups in struct below */
+	u8 pad[6];
+	/* group information , number is determined by param above */
+	struct cpchnl2_queue_group_info groups[MAX_Q_VPORT_GROUPS];
+};
+CPCHNL2_CHECK_STRUCT_LEN(3792, cpchnl2_queue_groups);
+
+/**
+ * @brief function types
+ */
+enum cpchnl2_func_type {
+	CPCHNL2_FTYPE_LAN_PF = 0,
+	CPCHNL2_FTYPE_LAN_VF = 1,
+	CPCHNL2_FTYPE_LAN_MAX
+};
+
+/**
+ * @brief containing vport id & type
+ */
+struct cpchnl2_vport_id {
+	u32 vport_id;
+	u16 vport_type;
+	u8 pad[2];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_vport_id);
+
+struct cpchnl2_func_id {
+	/* Function type: 0 - LAN PF, 1 -  LAN VF, Rest - "reserved" */
+	u8 func_type;
+	/* Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs
+	 * and 8-12 CPFs are valid
+	 */
+	u8 pf_id;
+	/* Valid only if "type" above is VF, indexing is relative to PF specified above. */
+	u16 vf_id;
+	u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_func_id);
+
+/* Note! Do not change the fields and especially their order as should eventually
+ * be aligned to 32bit. Must match the virtchnl structure definition.
+ * If should change, change also the relevant FAS and virtchnl code, under permission.
+ */
+struct cpchnl2_vport_info {
+	u16 vport_index;
+	/* VSI index, global indexing aligned to HW.
+	 * Index of HW VSI is allocated by HMA during "CreateVport" virtChnl command.
+	 * Relevant for VSI backed Vports only, not relevant for vport_type = "Qdev".
+	 */
+	u16 vsi_id;
+	u8 vport_status;	/* enum cpchnl2_vport_status */
+	/* 0 - LAN PF, 1 - LAN VF. Rest - reserved. Can be later expanded to other PEs */
+	u8 func_type;
+	/* Valid only if "type" above is VF, indexing is relative to PF specified above. */
+	u16 vf_id;
+	/* Always relevant, indexing is according to LAN PE 0-15,
+	 * while only 0-4 APFs and 8-12 CPFs are valid.
+	 */
+	u8 pf_id;
+	u8 rss_enabled; /* if RSS is enabled for Vport. Driven by Node Policy. Currently '0' */
+	/* MAC Address assigned for this vport, all 0s for "Qdev" Vport type */
+	u8 mac_addr[CPCHNL2_ETH_LENGTH_OF_ADDRESS];
+	u16 vmrl_id;
+	/* Indicates if IMC created SEM MAC rule for this Vport.
+	 * Currently this is done by IMC for all Vport of type "Default" only,
+	 * but can be different in the future.
+	 */
+	u8 sem_mac_rule_exist;
+	/* Bitmask to inform which TC is valid.
+	 * 0x1 << TCnum. 1b: valid else 0.
+	 * Driven by Node Policy on system level, then Sysetm level TCs are
+	 * reported to IDPF and it can enable Vport level TCs on TX according
+	 * to Syetm enabled ones.
+	 * If TC aware mode - bit set for valid TC.
+	 * otherwise =1 (only bit 0 is set. represents the VSI
+	 */
+	u8 tx_tc_bitmask;
+	/* For each valid TC, TEID of VPORT node over TC in TX LAN WS.
+	 * If TC aware mode - up to 8 TC TEIDs. Otherwise vport_tc_teid[0] shall hold VSI TEID
+	 */
+	u32 vport_tc_teid[CPCHNL2_MAX_TC_AMOUNT];
+	/* For each valid TC, bandwidth in mbps.
+	 * Default BW per Vport is from Node policy
+	 * If TC aware mode -per TC. Otherwise, bandwidth[0] holds VSI bandwidth
+	 */
+	u32 bandwidth[CPCHNL2_MAX_TC_AMOUNT];
+	/* From Node Policy. */
+	u16 max_mtu;
+	u16 default_rx_qid;	/* Default LAN RX Queue ID */
+	u16 vport_flags; /* see: VPORT_FLAGS */
+	u8 egress_port;
+	u8 pad_reserved[5];
+};
+CPCHNL2_CHECK_STRUCT_LEN(96, cpchnl2_vport_info);
+
+/*
+ * CPCHNL2_OP_GET_VPORT_LIST
+ */
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode request
+ * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved (see enum cpchnl2_func_type)
+ * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12
+ *        CPFs are valid
+ * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above
+ */
+struct cpchnl2_get_vport_list_request {
+	u8 func_type;
+	u8 pf_id;
+	u16 vf_id;
+	u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_get_vport_list_request);
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode response
+ * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved. Can be later extended to
+ *        other PE types
+ * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12
+ *        CPFs are valid
+ * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above
+ * @param nof_vports Number of vports created on the function
+ * @param vports array of the IDs and types. vport ID is elative to its func (PF/VF). same as in
+ *        Create Vport
+ * vport_type: Aligned to VirtChnl types: Default, SIOV, etc.
+ */
+struct cpchnl2_get_vport_list_response {
+	u8 func_type;
+	u8 pf_id;
+	u16 vf_id;
+	u16 nof_vports;
+	u8 pad[2];
+	struct cpchnl2_vport_id vports[];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_get_vport_list_response);
+
+/*
+ * CPCHNL2_OP_GET_VPORT_INFO
+ */
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_INFO opcode request
+ * @param vport a structure containing vport_id (relative to function) and type
+ * @param func a structure containing function type, pf_id, vf_id
+ */
+struct cpchnl2_get_vport_info_request {
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_func_id func;
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_get_vport_info_request);
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_INFO opcode response
+ * @param vport a structure containing vport_id (relative to function) and type to get info for
+ * @param info a structure all the information for a given vport
+ * @param queue_groups a structure containing all the queue groups of the given vport
+ */
+struct cpchnl2_get_vport_info_response {
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_vport_info info;
+	struct cpchnl2_queue_groups queue_groups;
+};
+CPCHNL2_CHECK_STRUCT_LEN(3896, cpchnl2_get_vport_info_response);
+
+ /* Cpchnl events
+  * Sends event message to inform the peer of notification that may affect it.
+  * No direct response is expected from the peer, though it may generate other
+  * messages in response to this one.
+  */
+enum cpchnl2_event {
+	CPCHNL2_EVENT_UNKNOWN = 0,
+	CPCHNL2_EVENT_VPORT_CREATED,
+	CPCHNL2_EVENT_VPORT_DESTROYED,
+	CPCHNL2_EVENT_VPORT_ENABLED,
+	CPCHNL2_EVENT_VPORT_DISABLED,
+	CPCHNL2_PKG_EVENT,
+	CPCHNL2_EVENT_ADD_QUEUE_GROUPS,
+	CPCHNL2_EVENT_DEL_QUEUE_GROUPS,
+	CPCHNL2_EVENT_ADD_QUEUES,
+	CPCHNL2_EVENT_DEL_QUEUES
+};
+
+/*
+ * This is for CPCHNL2_EVENT_VPORT_CREATED
+ */
+struct cpchnl2_event_vport_created {
+	struct cpchnl2_vport_id vport; /* Vport identifier to point to specific Vport */
+	struct cpchnl2_vport_info info; /* Vport configuration info */
+	struct cpchnl2_queue_groups queue_groups; /* Vport assign queue groups configuration info */
+};
+CPCHNL2_CHECK_STRUCT_LEN(3896, cpchnl2_event_vport_created);
+
+/*
+ * This is for CPCHNL2_EVENT_VPORT_DESTROYED
+ */
+struct cpchnl2_event_vport_destroyed {
+	/* Vport identifier to point to specific Vport */
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_func_id func;
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_event_vport_destroyed);
+
+struct cpchnl2_event_info {
+	struct {
+		s32 type;		/* See enum cpchnl2_event */
+		uint8_t reserved[4];	/* Reserved */
+	} header;
+	union {
+		struct cpchnl2_event_vport_created vport_created;
+		struct cpchnl2_event_vport_destroyed vport_destroyed;
+	} data;
+};
+
+#endif /* _CPFL_CPCHNL_H_ */
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v4 05/10] net/cpfl: enable vport mapping
  2023-09-08 11:16     ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
                         ` (3 preceding siblings ...)
  2023-09-08 11:16       ` [PATCH v4 04/10] net/cpfl: introduce CP channel API beilei.xing
@ 2023-09-08 11:16       ` beilei.xing
  2023-09-08 11:16       ` [PATCH v4 06/10] net/cpfl: parse representor devargs beilei.xing
                         ` (5 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-08 11:16 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

1. Handle cpchnl event for vport create/destroy
2. Use hash table to store vport_id to vport_info mapping
3. Use spinlock for thread safe.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 157 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.h |  21 ++++-
 drivers/net/cpfl/meson.build   |   2 +-
 3 files changed, 177 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 6b6e9b37b1..f51aa6e95a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -10,6 +10,7 @@
 #include <rte_dev.h>
 #include <errno.h>
 #include <rte_alarm.h>
+#include <rte_hash_crc.h>
 
 #include "cpfl_ethdev.h"
 #include "cpfl_rxtx.h"
@@ -1502,6 +1503,108 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 	}
 }
 
+static int
+cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_vport_id *vport_identity,
+		       struct cpchnl2_vport_info *vport_info)
+{
+	struct cpfl_vport_info *info = NULL;
+	int ret;
+
+	rte_spinlock_lock(&adapter->vport_map_lock);
+	ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info);
+	if (ret >= 0) {
+		PMD_DRV_LOG(WARNING, "vport already exist, overwrite info anyway");
+		/* overwrite info */
+		if (info)
+			info->vport_info = *vport_info;
+		goto fini;
+	}
+
+	info = rte_zmalloc(NULL, sizeof(*info), 0);
+	if (info == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory for vport map info");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	info->vport_info = *vport_info;
+
+	ret = rte_hash_add_key_data(adapter->vport_map_hash, vport_identity, info);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to add vport map into hash");
+		rte_free(info);
+		goto err;
+	}
+
+fini:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return ret;
+}
+
+static int
+cpfl_vport_info_destroy(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id *vport_identity)
+{
+	struct cpfl_vport_info *info;
+	int ret;
+
+	rte_spinlock_lock(&adapter->vport_map_lock);
+	ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "vport id not exist");
+		goto err;
+	}
+
+	rte_hash_del_key(adapter->vport_map_hash, vport_identity);
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	rte_free(info);
+
+	return 0;
+
+err:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return ret;
+}
+
+static void
+cpfl_handle_cpchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen)
+{
+	struct cpchnl2_event_info *cpchnl2_event = (struct cpchnl2_event_info *)msg;
+	struct cpchnl2_vport_info *info;
+	struct cpfl_vport_id vport_identity = { 0 };
+
+	if (msglen < sizeof(struct cpchnl2_event_info)) {
+		PMD_DRV_LOG(ERR, "Error event");
+		return;
+	}
+
+	switch (cpchnl2_event->header.type) {
+	case CPCHNL2_EVENT_VPORT_CREATED:
+		vport_identity.vport_id = cpchnl2_event->data.vport_created.vport.vport_id;
+		info = &cpchnl2_event->data.vport_created.info;
+		vport_identity.func_type = info->func_type;
+		vport_identity.pf_id = info->pf_id;
+		vport_identity.vf_id = info->vf_id;
+		if (cpfl_vport_info_create(adapter, &vport_identity, info))
+			PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_CREATED");
+		break;
+	case CPCHNL2_EVENT_VPORT_DESTROYED:
+		vport_identity.vport_id = cpchnl2_event->data.vport_destroyed.vport.vport_id;
+		vport_identity.func_type = cpchnl2_event->data.vport_destroyed.func.func_type;
+		vport_identity.pf_id = cpchnl2_event->data.vport_destroyed.func.pf_id;
+		vport_identity.vf_id = cpchnl2_event->data.vport_destroyed.func.vf_id;
+		if (cpfl_vport_info_destroy(adapter, &vport_identity))
+			PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_DESTROY");
+		break;
+	default:
+		PMD_DRV_LOG(ERR, " unknown event received %u", cpchnl2_event->header.type);
+		break;
+	}
+}
+
 static void
 cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 {
@@ -1533,6 +1636,9 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 			if (vc_op == VIRTCHNL2_OP_EVENT) {
 				cpfl_handle_vchnl_event_msg(adapter, adapter->base.mbx_resp,
 							    ctlq_msg.data_len);
+			} else if (vc_op == CPCHNL2_OP_EVENT) {
+				cpfl_handle_cpchnl_event_msg(adapter, adapter->base.mbx_resp,
+							     ctlq_msg.data_len);
 			} else {
 				if (vc_op == base->pend_cmd)
 					notify_cmd(base, base->cmd_retval);
@@ -1608,6 +1714,48 @@ static struct virtchnl2_get_capabilities req_caps = {
 	.other_caps = VIRTCHNL2_CAP_WB_ON_ITR
 };
 
+static int
+cpfl_vport_map_init(struct cpfl_adapter_ext *adapter)
+{
+	char hname[32];
+
+	snprintf(hname, 32, "%s-vport", adapter->name);
+
+	rte_spinlock_init(&adapter->vport_map_lock);
+
+#define CPFL_VPORT_MAP_HASH_ENTRY_NUM 2048
+
+	struct rte_hash_parameters params = {
+		.name = adapter->name,
+		.entries = CPFL_VPORT_MAP_HASH_ENTRY_NUM,
+		.key_len = sizeof(struct cpfl_vport_id),
+		.hash_func = rte_hash_crc,
+		.socket_id = SOCKET_ID_ANY,
+	};
+
+	adapter->vport_map_hash = rte_hash_create(&params);
+
+	if (adapter->vport_map_hash == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to create vport map hash");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
+{
+	const void *key = NULL;
+	struct cpfl_vport_map_info *info;
+	uint32_t iter = 0;
+
+	while (rte_hash_iterate(adapter->vport_map_hash, &key, (void **)&info, &iter) >= 0)
+		rte_free(info);
+
+	rte_hash_free(adapter->vport_map_hash);
+}
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1632,6 +1780,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_adapter_init;
 	}
 
+	ret = cpfl_vport_map_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init vport map");
+		goto err_vport_map_init;
+	}
+
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
 	adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ?
@@ -1656,6 +1810,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_vport_map_uninit(adapter);
+err_vport_map_init:
 	idpf_adapter_deinit(base);
 err_adapter_init:
 	return ret;
@@ -1885,6 +2041,7 @@ static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
 
 	rte_free(adapter->vports);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 53e45035e8..3515fec4f7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -10,16 +10,18 @@
 #include <rte_spinlock.h>
 #include <rte_ethdev.h>
 #include <rte_kvargs.h>
+#include <rte_hash.h>
 #include <ethdev_driver.h>
 #include <ethdev_pci.h>
 
-#include "cpfl_logs.h"
-
 #include <idpf_common_device.h>
 #include <idpf_common_virtchnl.h>
 #include <base/idpf_prototype.h>
 #include <base/virtchnl2.h>
 
+#include "cpfl_logs.h"
+#include "cpfl_cpchnl.h"
+
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
 
@@ -86,6 +88,18 @@ struct p2p_queue_chunks_info {
 	uint32_t rx_buf_qtail_spacing;
 };
 
+struct cpfl_vport_id {
+	uint32_t vport_id;
+	uint8_t func_type;
+	uint8_t pf_id;
+	uint16_t vf_id;
+};
+
+struct cpfl_vport_info {
+	struct cpchnl2_vport_info vport_info;
+	bool enabled;
+};
+
 enum cpfl_itf_type {
 	CPFL_ITF_TYPE_VPORT,
 	CPFL_ITF_TYPE_REPRESENTOR
@@ -128,6 +142,9 @@ struct cpfl_adapter_ext {
 
 	uint16_t used_vecs_num;
 	struct cpfl_devargs devargs;
+
+	rte_spinlock_t vport_map_lock;
+	struct rte_hash *vport_map_hash;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 8d62ebfd77..28167bb81d 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -11,7 +11,7 @@ if dpdk_conf.get('RTE_IOVA_IN_MBUF') == 0
     subdir_done()
 endif
 
-deps += ['common_idpf']
+deps += ['hash', 'common_idpf']
 
 sources = files(
         'cpfl_ethdev.c',
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v4 06/10] net/cpfl: parse representor devargs
  2023-09-08 11:16     ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
                         ` (4 preceding siblings ...)
  2023-09-08 11:16       ` [PATCH v4 05/10] net/cpfl: enable vport mapping beilei.xing
@ 2023-09-08 11:16       ` beilei.xing
  2023-09-08 11:16       ` [PATCH v4 07/10] net/cpfl: support probe again beilei.xing
                         ` (4 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-08 11:16 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Format:

[[c<controller_id>]pf<pf_id>]vf<vf_id>

  controller_id:

  0 : host (default)
  1:  acc

  pf_id:

  0 : apf (default)
  1 : cpf

Example:

representor=c0pf0vf[0-3]
  -- host > apf > vf 0,1,2,3
     same as pf0vf[0-3] and vf[0-3] if omit default value.

representor=c0pf0
  -- host > apf
     same as pf0 if omit default value.

representor=c1pf0
  -- accelerator core > apf

multiple representor devargs are supported.
e.g.: create 4 representors for 4 vfs on host APF and one
representor for APF on accelerator core.

  -- representor=vf[0-3],representor=c1pf0

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 doc/guides/nics/cpfl.rst               |  36 +++++
 doc/guides/rel_notes/release_23_11.rst |   3 +
 drivers/net/cpfl/cpfl_ethdev.c         | 179 +++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.h         |   8 ++
 4 files changed, 226 insertions(+)

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 39a2b603f3..83a18c3f2e 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -92,6 +92,42 @@ Runtime Configuration
   Then the PMD will configure Tx queue with single queue mode.
   Otherwise, split queue mode is chosen by default.
 
+- ``representor`` (default ``not enabled``)
+
+  The cpfl PMD supports the creation of APF/CPF/VF port representors.
+  Each port representor corresponds to a single function of that device.
+  Using the ``devargs`` option ``representor`` the user can specify
+  which functions to create port representors.
+
+  Format is::
+
+    [[c<controller_id>]pf<pf_id>]vf<vf_id>
+
+  Controller_id 0 is host (default), while 1 is accelerator core.
+  Pf_id 0 is APF (default), while 1 is CPF.
+  Default value can be omitted.
+
+  Create 4 representors for 4 vfs on host APF::
+
+    -a BDF,representor=c0pf0vf[0-3]
+
+  Or::
+
+    -a BDF,representor=pf0vf[0-3]
+
+  Or::
+
+    -a BDF,representor=vf[0-3]
+
+  Create a representor for CPF on accelerator core::
+
+    -a BDF,representor=c1pf1
+
+  Multiple representor devargs are supported. Create 4 representors for 4
+  vfs on host APF and one representor for CPF on accelerator core::
+
+    -a BDF,representor=vf[0-3],representor=c1pf1
+
 
 Driver compilation and testing
 ------------------------------
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 333e1d95a2..3d9be208d0 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -78,6 +78,9 @@ New Features
 * build: Optional libraries can now be selected with the new ``enable_libs``
   build option similarly to the existing ``enable_drivers`` build option.
 
+* **Updated Intel cpfl driver.**
+
+  * Added support for port representor.
 
 Removed Items
 -------------
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index f51aa6e95a..1b21134ec1 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -13,8 +13,10 @@
 #include <rte_hash_crc.h>
 
 #include "cpfl_ethdev.h"
+#include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 
+#define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
@@ -25,6 +27,7 @@ struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
 static const char * const cpfl_valid_args[] = {
+	CPFL_REPRESENTOR,
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
@@ -1407,6 +1410,128 @@ parse_bool(const char *key, const char *value, void *args)
 	return 0;
 }
 
+static int
+enlist(uint16_t *list, uint16_t *len_list, const uint16_t max_list, uint16_t val)
+{
+	uint16_t i;
+
+	for (i = 0; i < *len_list; i++) {
+		if (list[i] == val)
+			return 0;
+	}
+	if (*len_list >= max_list)
+		return -1;
+	list[(*len_list)++] = val;
+	return 0;
+}
+
+static const char *
+process_range(const char *str, uint16_t *list, uint16_t *len_list,
+	const uint16_t max_list)
+{
+	uint16_t lo, hi, val;
+	int result, n = 0;
+	const char *pos = str;
+
+	result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n);
+	if (result == 1) {
+		if (enlist(list, len_list, max_list, lo) != 0)
+			return NULL;
+	} else if (result == 2) {
+		if (lo > hi)
+			return NULL;
+		for (val = lo; val <= hi; val++) {
+			if (enlist(list, len_list, max_list, val) != 0)
+				return NULL;
+		}
+	} else {
+		return NULL;
+	}
+	return pos + n;
+}
+
+static const char *
+process_list(const char *str, uint16_t *list, uint16_t *len_list, const uint16_t max_list)
+{
+	const char *pos = str;
+
+	if (*pos == '[')
+		pos++;
+	while (1) {
+		pos = process_range(pos, list, len_list, max_list);
+		if (pos == NULL)
+			return NULL;
+		if (*pos != ',') /* end of list */
+			break;
+		pos++;
+	}
+	if (*str == '[' && *pos != ']')
+		return NULL;
+	if (*pos == ']')
+		pos++;
+	return pos;
+}
+
+static int
+parse_repr(const char *key __rte_unused, const char *value, void *args)
+{
+	struct cpfl_devargs *devargs = args;
+	struct rte_eth_devargs *eth_da;
+	const char *str = value;
+
+	if (devargs->repr_args_num == CPFL_REPR_ARG_NUM_MAX)
+		return -EINVAL;
+
+	eth_da = &devargs->repr_args[devargs->repr_args_num];
+
+	if (str[0] == 'c') {
+		str += 1;
+		str = process_list(str, eth_da->mh_controllers,
+				&eth_da->nb_mh_controllers,
+				RTE_DIM(eth_da->mh_controllers));
+		if (str == NULL)
+			goto done;
+	}
+	if (str[0] == 'p' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_PF;
+		str += 2;
+		str = process_list(str, eth_da->ports,
+				&eth_da->nb_ports, RTE_DIM(eth_da->ports));
+		if (str == NULL || str[0] == '\0')
+			goto done;
+	} else if (eth_da->nb_mh_controllers > 0) {
+		/* 'c' must followed by 'pf'. */
+		str = NULL;
+		goto done;
+	}
+	if (str[0] == 'v' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_VF;
+		str += 2;
+	} else if (str[0] == 's' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_SF;
+		str += 2;
+	} else {
+		/* 'pf' must followed by 'vf' or 'sf'. */
+		if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+			str = NULL;
+			goto done;
+		}
+		eth_da->type = RTE_ETH_REPRESENTOR_VF;
+	}
+	str = process_list(str, eth_da->representor_ports,
+		&eth_da->nb_representor_ports,
+		RTE_DIM(eth_da->representor_ports));
+done:
+	if (str == NULL) {
+		RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str);
+		return -1;
+	}
+
+	devargs->repr_args_num++;
+
+	return 0;
+}
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1431,6 +1556,12 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 		return -EINVAL;
 	}
 
+	cpfl_args->repr_args_num = 0;
+	ret = rte_kvargs_process(kvlist, CPFL_REPRESENTOR, &parse_repr, cpfl_args);
+
+	if (ret != 0)
+		goto fail;
+
 	ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport,
 				 cpfl_args);
 	if (ret != 0)
@@ -2085,6 +2216,48 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
 	return 0;
 }
 
+static int
+cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int i, j;
+
+	/* check and refine repr args */
+	for (i = 0; i < devargs->repr_args_num; i++) {
+		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
+
+		/* set default host_id to xeon host */
+		if (eth_da->nb_mh_controllers == 0) {
+			eth_da->nb_mh_controllers = 1;
+			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
+		} else {
+			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
+				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->mh_controllers[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		/* set default pf to APF */
+		if (eth_da->nb_ports == 0) {
+			eth_da->nb_ports = 1;
+			eth_da->ports[0] = CPFL_PF_TYPE_APF;
+		} else {
+			for (j = 0; j < eth_da->nb_ports; j++) {
+				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->ports[j]);
+					return -EINVAL;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
 static int
 cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -2163,6 +2336,12 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		goto err;
 	}
 
+	retval = cpfl_repr_devargs_process(adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to process repr devargs");
+		goto err;
+	}
+
 	return 0;
 
 err:
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 3515fec4f7..9c4d8d3ea1 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -60,16 +60,24 @@
 #define IDPF_DEV_ID_CPF			0x1453
 #define VIRTCHNL2_QUEUE_GROUP_P2P	0x100
 
+#define CPFL_HOST_ID_HOST	0
+#define CPFL_HOST_ID_ACC	1
+#define CPFL_PF_TYPE_APF	0
+#define CPFL_PF_TYPE_CPF	1
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
 	uint16_t idx;       /* index in adapter->vports[]*/
 };
 
+#define CPFL_REPR_ARG_NUM_MAX	4
 /* Struct used when parse driver specific devargs */
 struct cpfl_devargs {
 	uint16_t req_vports[CPFL_MAX_VPORT_NUM];
 	uint16_t req_vport_nb;
+	uint8_t repr_args_num;
+	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
 };
 
 struct p2p_queue_chunks_info {
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v4 07/10] net/cpfl: support probe again
  2023-09-08 11:16     ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
                         ` (5 preceding siblings ...)
  2023-09-08 11:16       ` [PATCH v4 06/10] net/cpfl: parse representor devargs beilei.xing
@ 2023-09-08 11:16       ` beilei.xing
  2023-09-08 11:16       ` [PATCH v4 08/10] net/cpfl: support vport list/info get beilei.xing
                         ` (3 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-08 11:16 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Only representor will be parsed for probe again.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 69 +++++++++++++++++++++++++++-------
 1 file changed, 56 insertions(+), 13 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 1b21134ec1..236347eeb3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -26,7 +26,7 @@ rte_spinlock_t cpfl_adapter_lock;
 struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
-static const char * const cpfl_valid_args[] = {
+static const char * const cpfl_valid_args_first[] = {
 	CPFL_REPRESENTOR,
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
@@ -34,6 +34,11 @@ static const char * const cpfl_valid_args[] = {
 	NULL
 };
 
+static const char * const cpfl_valid_args_again[] = {
+	CPFL_REPRESENTOR,
+	NULL
+};
+
 uint32_t cpfl_supported_speeds[] = {
 	RTE_ETH_SPEED_NUM_NONE,
 	RTE_ETH_SPEED_NUM_10M,
@@ -1533,7 +1538,7 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
 	struct rte_devargs *devargs = pci_dev->device.devargs;
 	struct cpfl_devargs *cpfl_args = &adapter->devargs;
@@ -1545,7 +1550,8 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (devargs == NULL)
 		return 0;
 
-	kvlist = rte_kvargs_parse(devargs->args, cpfl_valid_args);
+	kvlist = rte_kvargs_parse(devargs->args,
+			first ? cpfl_valid_args_first : cpfl_valid_args_again);
 	if (kvlist == NULL) {
 		PMD_INIT_LOG(ERR, "invalid kvargs key");
 		return -EINVAL;
@@ -1562,6 +1568,9 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (ret != 0)
 		goto fail;
 
+	if (!first)
+		return 0;
+
 	ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport,
 				 cpfl_args);
 	if (ret != 0)
@@ -2289,18 +2298,11 @@ cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapt
 }
 
 static int
-cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
-	       struct rte_pci_device *pci_dev)
+cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
 	struct cpfl_adapter_ext *adapter;
 	int retval;
 
-	if (!cpfl_adapter_list_init) {
-		rte_spinlock_init(&cpfl_adapter_lock);
-		TAILQ_INIT(&cpfl_adapter_list);
-		cpfl_adapter_list_init = true;
-	}
-
 	adapter = rte_zmalloc("cpfl_adapter_ext",
 			      sizeof(struct cpfl_adapter_ext), 0);
 	if (adapter == NULL) {
@@ -2308,7 +2310,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		return -ENOMEM;
 	}
 
-	retval = cpfl_parse_devargs(pci_dev, adapter);
+	retval = cpfl_parse_devargs(pci_dev, adapter, true);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
 		return retval;
@@ -2353,6 +2355,46 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	return retval;
 }
 
+static int
+cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_parse_devargs(pci_dev, adapter, false);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		return ret;
+	}
+
+	ret = cpfl_repr_devargs_process(adapter);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to process reprenstor devargs");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+	       struct rte_pci_device *pci_dev)
+{
+	struct cpfl_adapter_ext *adapter;
+
+	if (!cpfl_adapter_list_init) {
+		rte_spinlock_init(&cpfl_adapter_lock);
+		TAILQ_INIT(&cpfl_adapter_list);
+		cpfl_adapter_list_init = true;
+	}
+
+	adapter = cpfl_find_adapter_ext(pci_dev);
+
+	if (adapter == NULL)
+		return cpfl_pci_probe_first(pci_dev);
+	else
+		return cpfl_pci_probe_again(pci_dev, adapter);
+}
+
 static int
 cpfl_pci_remove(struct rte_pci_device *pci_dev)
 {
@@ -2375,7 +2417,8 @@ cpfl_pci_remove(struct rte_pci_device *pci_dev)
 
 static struct rte_pci_driver rte_cpfl_pmd = {
 	.id_table	= pci_id_cpfl_map,
-	.drv_flags	= RTE_PCI_DRV_NEED_MAPPING,
+	.drv_flags	= RTE_PCI_DRV_NEED_MAPPING |
+			  RTE_PCI_DRV_PROBE_AGAIN,
 	.probe		= cpfl_pci_probe,
 	.remove		= cpfl_pci_remove,
 };
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v4 08/10] net/cpfl: support vport list/info get
  2023-09-08 11:16     ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
                         ` (6 preceding siblings ...)
  2023-09-08 11:16       ` [PATCH v4 07/10] net/cpfl: support probe again beilei.xing
@ 2023-09-08 11:16       ` beilei.xing
  2023-09-09  2:34         ` Wu, Jingjing
  2023-09-08 11:17       ` [PATCH v4 09/10] net/cpfl: create port representor beilei.xing
                         ` (2 subsequent siblings)
  10 siblings, 1 reply; 89+ messages in thread
From: beilei.xing @ 2023-09-08 11:16 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Support cp channel ops CPCHNL2_OP_CPF_GET_VPORT_LIST and
CPCHNL2_OP_CPF_GET_VPORT_INFO.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h |  8 ++++
 drivers/net/cpfl/cpfl_vchnl.c  | 72 ++++++++++++++++++++++++++++++++++
 drivers/net/cpfl/meson.build   |  1 +
 3 files changed, 81 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 9c4d8d3ea1..a501ccde14 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -157,6 +157,14 @@ struct cpfl_adapter_ext {
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_vport_id *vi,
+			   struct cpchnl2_get_vport_list_response *response);
+int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+			   struct cpchnl2_vport_id *vport_id,
+			   struct cpfl_vport_id *vi,
+			   struct cpchnl2_get_vport_info_response *response);
+
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p)					\
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
new file mode 100644
index 0000000000..a21a4a451f
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include "cpfl_ethdev.h"
+#include <idpf_common_virtchnl.h>
+
+int
+cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_vport_id *vi,
+		       struct cpchnl2_get_vport_list_response *response)
+{
+	struct cpchnl2_get_vport_list_request request;
+	struct idpf_cmd_info args;
+	int err;
+
+	memset(&request, 0, sizeof(request));
+	request.func_type = vi->func_type;
+	request.pf_id = vi->pf_id;
+	request.vf_id = vi->vf_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = CPCHNL2_OP_GET_VPORT_LIST;
+	args.in_args = (uint8_t *)&request;
+	args.in_args_size = sizeof(struct cpchnl2_get_vport_list_request);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err != 0) {
+		PMD_DRV_LOG(ERR, "Failed to execute command of CPCHNL2_OP_GET_VPORT_LIST");
+		return err;
+	}
+
+	rte_memcpy(response, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+
+	return 0;
+}
+
+int
+cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+		       struct cpchnl2_vport_id *vport_id,
+		       struct cpfl_vport_id *vi,
+		       struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpchnl2_get_vport_info_request request;
+	struct idpf_cmd_info args;
+	int err;
+
+	request.vport.vport_id = vport_id->vport_id;
+	request.vport.vport_type = vport_id->vport_type;
+	request.func.func_type = vi->func_type;
+	request.func.pf_id = vi->pf_id;
+	request.func.vf_id = vi->vf_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = CPCHNL2_OP_GET_VPORT_INFO;
+	args.in_args = (uint8_t *)&request;
+	args.in_args_size = sizeof(struct cpchnl2_get_vport_info_request);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err != 0) {
+		PMD_DRV_LOG(ERR, "Failed to execute command of CPCHNL2_OP_GET_VPORT_INFO");
+		return err;
+	}
+
+	rte_memcpy(response, args.out_buffer, sizeof(*response));
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 28167bb81d..2f0f5d8434 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -16,6 +16,7 @@ deps += ['hash', 'common_idpf']
 sources = files(
         'cpfl_ethdev.c',
         'cpfl_rxtx.c',
+        'cpfl_vchnl.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v4 09/10] net/cpfl: create port representor
  2023-09-08 11:16     ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
                         ` (7 preceding siblings ...)
  2023-09-08 11:16       ` [PATCH v4 08/10] net/cpfl: support vport list/info get beilei.xing
@ 2023-09-08 11:17       ` beilei.xing
  2023-09-09  3:04         ` Wu, Jingjing
  2023-09-08 11:17       ` [PATCH v4 10/10] net/cpfl: support link update for representor beilei.xing
  2023-09-12 16:26       ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
  10 siblings, 1 reply; 89+ messages in thread
From: beilei.xing @ 2023-09-08 11:17 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Track representor request in the allowlist.
Representor will only be created for active vport.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c      | 109 +++---
 drivers/net/cpfl/cpfl_ethdev.h      |  36 ++
 drivers/net/cpfl/cpfl_representor.c | 586 ++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.h |  26 ++
 drivers/net/cpfl/meson.build        |   1 +
 5 files changed, 714 insertions(+), 44 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 236347eeb3..330a865e3c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1643,7 +1643,7 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 	}
 }
 
-static int
+int
 cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
 		       struct cpfl_vport_id *vport_identity,
 		       struct cpchnl2_vport_info *vport_info)
@@ -1896,6 +1896,42 @@ cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
 	rte_hash_free(adapter->vport_map_hash);
 }
 
+static int
+cpfl_repr_allowlist_init(struct cpfl_adapter_ext *adapter)
+{
+	char hname[32];
+
+	snprintf(hname, 32, "%s-repr_wl", adapter->name);
+
+	rte_spinlock_init(&adapter->repr_lock);
+
+#define CPFL_REPR_HASH_ENTRY_NUM 2048
+
+	struct rte_hash_parameters params = {
+		.name = hname,
+		.entries = CPFL_REPR_HASH_ENTRY_NUM,
+		.key_len = sizeof(struct cpfl_repr_id),
+		.hash_func = rte_hash_crc,
+		.socket_id = SOCKET_ID_ANY,
+	};
+
+	adapter->repr_allowlist_hash = rte_hash_create(&params);
+
+	if (adapter->repr_allowlist_hash == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to create repr allowlist hash");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+cpfl_repr_allowlist_uninit(struct cpfl_adapter_ext *adapter)
+{
+	rte_hash_free(adapter->repr_allowlist_hash);
+}
+
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1926,6 +1962,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vport_map_init;
 	}
 
+	ret = cpfl_repr_allowlist_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init representor allowlist");
+		goto err_repr_allowlist_init;
+	}
+
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
 	adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ?
@@ -1950,6 +1992,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_repr_allowlist_uninit(adapter);
+err_repr_allowlist_init:
 	cpfl_vport_map_uninit(adapter);
 err_vport_map_init:
 	idpf_adapter_deinit(base);
@@ -2225,48 +2269,6 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
 	return 0;
 }
 
-static int
-cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
-{
-	struct cpfl_devargs *devargs = &adapter->devargs;
-	int i, j;
-
-	/* check and refine repr args */
-	for (i = 0; i < devargs->repr_args_num; i++) {
-		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
-
-		/* set default host_id to xeon host */
-		if (eth_da->nb_mh_controllers == 0) {
-			eth_da->nb_mh_controllers = 1;
-			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
-		} else {
-			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
-				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
-					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-						     eth_da->mh_controllers[j]);
-					return -EINVAL;
-				}
-			}
-		}
-
-		/* set default pf to APF */
-		if (eth_da->nb_ports == 0) {
-			eth_da->nb_ports = 1;
-			eth_da->ports[0] = CPFL_PF_TYPE_APF;
-		} else {
-			for (j = 0; j < eth_da->nb_ports; j++) {
-				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
-					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-						     eth_da->ports[j]);
-					return -EINVAL;
-				}
-			}
-		}
-	}
-
-	return 0;
-}
-
 static int
 cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -2302,6 +2304,7 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
 	struct cpfl_adapter_ext *adapter;
 	int retval;
+	uint16_t port_id;
 
 	adapter = rte_zmalloc("cpfl_adapter_ext",
 			      sizeof(struct cpfl_adapter_ext), 0);
@@ -2341,11 +2344,23 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 	retval = cpfl_repr_devargs_process(adapter);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to process repr devargs");
-		goto err;
+		goto close_ethdev;
 	}
 
+	retval = cpfl_repr_create(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create representors ");
+		goto close_ethdev;
+	}
+
+
 	return 0;
 
+close_ethdev:
+	/* Ethdev created can be found RTE_ETH_FOREACH_DEV_OF through rte_device */
+	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
+		rte_eth_dev_close(port_id);
+	}
 err:
 	rte_spinlock_lock(&cpfl_adapter_lock);
 	TAILQ_REMOVE(&cpfl_adapter_list, adapter, next);
@@ -2372,6 +2387,12 @@ cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *ad
 		return ret;
 	}
 
+	ret = cpfl_repr_create(pci_dev, adapter);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create representors ");
+		return ret;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index a501ccde14..4937d2c6e3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -21,6 +21,7 @@
 
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
+#include "cpfl_representor.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -60,11 +61,31 @@
 #define IDPF_DEV_ID_CPF			0x1453
 #define VIRTCHNL2_QUEUE_GROUP_P2P	0x100
 
+#define CPFL_HOST_ID_NUM	2
+#define CPFL_PF_TYPE_NUM	2
 #define CPFL_HOST_ID_HOST	0
 #define CPFL_HOST_ID_ACC	1
 #define CPFL_PF_TYPE_APF	0
 #define CPFL_PF_TYPE_CPF	1
 
+/* Function IDs on IMC side */
+#define CPFL_HOST0_APF		0
+#define CPFL_ACC_APF_ID		4
+#define CPFL_HOST0_CPF_ID	8
+#define CPFL_ACC_CPF_ID		12
+
+#define CPFL_VPORT_LAN_PF	0
+#define CPFL_VPORT_LAN_VF	1
+
+/* bit[15:14] type
+ * bit[13] host/accelerator core
+ * bit[12] apf/cpf
+ * bit[11:0] vf
+ */
+#define CPFL_REPRESENTOR_ID(type, host_id, pf_id, vf_id)	\
+	((((type) & 0x3) << 14) + (((host_id) & 0x1) << 13) +	\
+	 (((pf_id) & 0x1) << 12) + ((vf_id) & 0xfff))
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
@@ -136,6 +157,13 @@ struct cpfl_vport {
 	bool p2p_manual_bind;
 };
 
+struct cpfl_repr {
+	struct cpfl_itf itf;
+	struct cpfl_repr_id repr_id;
+	struct rte_ether_addr mac_addr;
+	struct cpfl_vport_info *vport_info;
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -153,10 +181,16 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t vport_map_lock;
 	struct rte_hash *vport_map_hash;
+
+	rte_spinlock_t repr_lock;
+	struct rte_hash *repr_allowlist_hash;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_vport_id *vport_identity,
+			   struct cpchnl2_vport_info *vport_info);
 int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_list_response *response);
@@ -171,6 +205,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 	container_of((p), struct cpfl_adapter_ext, base)
 #define CPFL_DEV_TO_VPORT(dev)					\
 	((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_REPR(dev)					\
+	((struct cpfl_repr *)((dev)->data->dev_private))
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
new file mode 100644
index 0000000000..0cd92b1351
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -0,0 +1,586 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include "cpfl_representor.h"
+#include "cpfl_rxtx.h"
+
+static int
+cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_repr_id *repr_id,
+			   struct rte_eth_dev *dev)
+{
+	int ret;
+
+	if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) < 0)
+		return -ENOENT;
+
+	ret = rte_hash_add_key_data(adapter->repr_allowlist_hash, repr_id, dev);
+
+	return ret;
+}
+
+static int
+cpfl_repr_allowlist_add(struct cpfl_adapter_ext *adapter,
+			struct cpfl_repr_id *repr_id)
+{
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+	if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) >= 0) {
+		ret = -EEXIST;
+		goto err;
+	}
+
+	ret = rte_hash_add_key(adapter->repr_allowlist_hash, repr_id);
+	if (ret < 0)
+		goto err;
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return ret;
+}
+
+static int
+cpfl_repr_devargs_process_one(struct cpfl_adapter_ext *adapter,
+			      struct rte_eth_devargs *eth_da)
+{
+	struct cpfl_repr_id repr_id;
+	int ret, c, p, v;
+
+	for (c = 0; c < eth_da->nb_mh_controllers; c++) {
+		for (p = 0; p < eth_da->nb_ports; p++) {
+			repr_id.type = eth_da->type;
+			if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+				repr_id.host_id = eth_da->mh_controllers[c];
+				repr_id.pf_id = eth_da->ports[p];
+				repr_id.vf_id = 0;
+				ret = cpfl_repr_allowlist_add(adapter, &repr_id);
+				if (ret == -EEXIST)
+					continue;
+				if (ret) {
+					PMD_DRV_LOG(ERR, "Failed to add PF repr to allowlist, "
+							 "host_id = %d, pf_id = %d.",
+						    repr_id.host_id, repr_id.pf_id);
+					return ret;
+				}
+			} else if (eth_da->type == RTE_ETH_REPRESENTOR_VF) {
+				for (v = 0; v < eth_da->nb_representor_ports; v++) {
+					repr_id.host_id = eth_da->mh_controllers[c];
+					repr_id.pf_id = eth_da->ports[p];
+					repr_id.vf_id = eth_da->representor_ports[v];
+					ret = cpfl_repr_allowlist_add(adapter, &repr_id);
+					if (ret == -EEXIST)
+						continue;
+					if (ret) {
+						PMD_DRV_LOG(ERR, "Failed to add VF repr to allowlist, "
+								 "host_id = %d, pf_id = %d, vf_id = %d.",
+							    repr_id.host_id,
+							    repr_id.pf_id,
+							    repr_id.vf_id);
+						return ret;
+					}
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+int
+cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int ret, i, j;
+
+	/* check and refine repr args */
+	for (i = 0; i < devargs->repr_args_num; i++) {
+		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
+
+		/* set default host_id to xeon host */
+		if (eth_da->nb_mh_controllers == 0) {
+			eth_da->nb_mh_controllers = 1;
+			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
+		} else {
+			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
+				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->mh_controllers[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		/* set default pf to APF */
+		if (eth_da->nb_ports == 0) {
+			eth_da->nb_ports = 1;
+			eth_da->ports[0] = CPFL_PF_TYPE_APF;
+		} else {
+			for (j = 0; j < eth_da->nb_ports; j++) {
+				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->ports[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		ret = cpfl_repr_devargs_process_one(adapter, eth_da);
+		if (ret != 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_repr_allowlist_del(struct cpfl_adapter_ext *adapter,
+			struct cpfl_repr_id *repr_id)
+{
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	ret = rte_hash_del_key(adapter->repr_allowlist_hash, repr_id);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to delete repr from allowlist."
+				 "host_id = %d, type = %d, pf_id = %d, vf_id = %d",
+				 repr_id->host_id, repr_id->type,
+				 repr_id->pf_id, repr_id->vf_id);
+		goto err;
+	}
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return ret;
+}
+
+static int
+cpfl_repr_uninit(struct rte_eth_dev *eth_dev)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+	struct cpfl_adapter_ext *adapter = repr->itf.adapter;
+
+	eth_dev->data->mac_addrs = NULL;
+
+	cpfl_repr_allowlist_del(adapter, &repr->repr_id);
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_configure(struct rte_eth_dev *dev)
+{
+	/* now only 1 RX queue is supported */
+	if (dev->data->nb_rx_queues > 1)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_close(struct rte_eth_dev *dev)
+{
+	return cpfl_repr_uninit(dev);
+}
+
+static int
+cpfl_repr_dev_info_get(struct rte_eth_dev *ethdev,
+		       struct rte_eth_dev_info *dev_info)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+
+	dev_info->device = ethdev->device;
+	dev_info->max_mac_addrs = 1;
+	dev_info->max_rx_queues = 1;
+	dev_info->max_tx_queues = 1;
+	dev_info->min_rx_bufsize = CPFL_MIN_BUF_SIZE;
+	dev_info->max_rx_pktlen = CPFL_MAX_FRAME_SIZE;
+
+	dev_info->flow_type_rss_offloads = CPFL_RSS_OFFLOAD_ALL;
+
+	dev_info->rx_offload_capa =
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP		|
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP		|
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM	|
+		RTE_ETH_RX_OFFLOAD_SCATTER		|
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER		|
+		RTE_ETH_RX_OFFLOAD_RSS_HASH		|
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
+	dev_info->tx_offload_capa =
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT		|
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT		|
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM	|
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS		|
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_free_thresh = CPFL_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+		.offloads = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_free_thresh = CPFL_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH,
+		.offloads = 0,
+	};
+
+	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
+	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
+	dev_info->switch_info.name = ethdev->device->name;
+	dev_info->switch_info.domain_id = 0; /* the same domain*/
+	dev_info->switch_info.port_id = repr->vport_info->vport_info.vsi_id;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_start(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_stop(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+static int
+cpfl_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+			 __rte_unused uint16_t queue_id,
+			 __rte_unused uint16_t nb_desc,
+			 __rte_unused unsigned int socket_id,
+			 __rte_unused const struct rte_eth_rxconf *conf,
+			 __rte_unused struct rte_mempool *pool)
+{
+	/* Dummy */
+	return 0;
+}
+
+static int
+cpfl_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+			 __rte_unused uint16_t queue_id,
+			 __rte_unused uint16_t nb_desc,
+			 __rte_unused unsigned int socket_id,
+			 __rte_unused const struct rte_eth_txconf *conf)
+{
+	/* Dummy */
+	return 0;
+}
+
+static const struct eth_dev_ops cpfl_repr_dev_ops = {
+	.dev_start		= cpfl_repr_dev_start,
+	.dev_stop		= cpfl_repr_dev_stop,
+	.dev_configure		= cpfl_repr_dev_configure,
+	.dev_close		= cpfl_repr_dev_close,
+	.dev_infos_get		= cpfl_repr_dev_info_get,
+
+	.rx_queue_setup		= cpfl_repr_rx_queue_setup,
+	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
+};
+
+static int
+cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+	struct cpfl_repr_param *param = init_param;
+	struct cpfl_adapter_ext *adapter = param->adapter;
+
+	repr->repr_id = param->repr_id;
+	repr->vport_info = param->vport_info;
+	repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
+	repr->itf.adapter = adapter;
+	repr->itf.data = eth_dev->data;
+
+	eth_dev->dev_ops = &cpfl_repr_dev_ops;
+
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	eth_dev->data->representor_id =
+		CPFL_REPRESENTOR_ID(repr->repr_id.type,
+				    repr->repr_id.host_id,
+				    repr->repr_id.pf_id,
+				    repr->repr_id.vf_id);
+
+	eth_dev->data->mac_addrs = &repr->mac_addr;
+
+	rte_eth_random_addr(repr->mac_addr.addr_bytes);
+
+	return cpfl_repr_allowlist_update(adapter, &repr->repr_id, eth_dev);
+}
+
+static int
+cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
+{
+	if ((host_id != CPFL_HOST_ID_HOST &&
+	     host_id != CPFL_HOST_ID_ACC) ||
+	    (pf_id != CPFL_PF_TYPE_APF &&
+	     pf_id != CPFL_PF_TYPE_CPF))
+		return -EINVAL;
+
+	static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = {
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF,
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID,
+	};
+
+	return func_id_map[host_id][pf_id];
+}
+
+static bool
+cpfl_match_repr_with_vport(const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_vport_info *info)
+{
+	int func_id;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF &&
+	    info->func_type == CPFL_VPORT_LAN_PF) {
+		func_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		if (func_id < 0)
+			return false;
+		else
+			return true;
+	} else if (repr_id->type == RTE_ETH_REPRESENTOR_VF &&
+		   info->func_type == CPFL_VPORT_LAN_VF) {
+		if (repr_id->vf_id == info->vf_id)
+			return true;
+	}
+
+	return false;
+}
+
+static int
+cpfl_repr_vport_list_query(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_get_vport_list_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		vi.vf_id = 0;
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = CPFL_HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_cc_vport_list_get(adapter, &vi, response);
+
+	return ret;
+}
+
+static int
+cpfl_repr_vport_info_query(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_vport_id *vport_id,
+			   struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		vi.vf_id = 0;
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = CPFL_HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_cc_vport_info_get(adapter, vport_id, &vi, response);
+
+	return ret;
+}
+
+static int
+cpfl_repr_vport_map_update(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id, uint32_t vport_id,
+			   struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	vi.vport_id = vport_id;
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = CPFL_HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_vport_info_create(adapter, &vi, &response->info);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Fail to update vport map hash for representor.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	struct rte_eth_dev *dev;
+	uint32_t iter = 0;
+	const struct cpfl_repr_id *repr_id;
+	const struct cpfl_vport_id *vp_id;
+	struct cpchnl2_get_vport_list_response *vlist_resp;
+	struct cpchnl2_get_vport_info_response vinfo_resp;
+	int ret;
+
+	vlist_resp = rte_zmalloc(NULL, IDPF_DFLT_MBX_BUF_SIZE, 0);
+	if (vlist_resp == NULL)
+		return -ENOMEM;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	while (rte_hash_iterate(adapter->repr_allowlist_hash,
+				(const void **)&repr_id, (void **)&dev, &iter) >= 0) {
+		struct cpfl_vport_info *vi;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		uint32_t iter_iter = 0;
+		bool matched;
+		int i;
+
+		/* skip representor already be created */
+		if (dev != NULL)
+			continue;
+
+		if (repr_id->type == RTE_ETH_REPRESENTOR_VF)
+			snprintf(name, sizeof(name), "net_%s_representor_c%dpf%dvf%d",
+				 pci_dev->name,
+				 repr_id->host_id,
+				 repr_id->pf_id,
+				 repr_id->vf_id);
+		else
+			snprintf(name, sizeof(name), "net_%s_representor_c%dpf%d",
+				 pci_dev->name,
+				 repr_id->host_id,
+				 repr_id->pf_id);
+
+		/* get vport list for the port representor */
+		ret = cpfl_repr_vport_list_query(adapter, repr_id, vlist_resp);
+		if (ret != 0) {
+			PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d's vport list",
+				     repr_id->host_id, repr_id->pf_id, repr_id->vf_id);
+			goto err;
+		}
+
+		/* get all vport info for the port representor */
+		for (i = 0; i < vlist_resp->nof_vports; i++) {
+			ret = cpfl_repr_vport_info_query(adapter, repr_id,
+							 &vlist_resp->vports[i], &vinfo_resp);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d vport[%d]'s info",
+					     repr_id->host_id, repr_id->pf_id, repr_id->vf_id,
+					     vlist_resp->vports[i].vport_id);
+				goto err;
+			}
+
+			ret = cpfl_repr_vport_map_update(adapter, repr_id,
+						 vlist_resp->vports[i].vport_id, &vinfo_resp);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to update  host%d pf%d vf%d vport[%d]'s info to vport_map_hash",
+					     repr_id->host_id, repr_id->pf_id, repr_id->vf_id,
+					     vlist_resp->vports[i].vport_id);
+				goto err;
+			}
+		}
+
+		/* find a matched vport */
+		rte_spinlock_lock(&adapter->vport_map_lock);
+
+		matched = false;
+		while (rte_hash_iterate(adapter->vport_map_hash,
+					(const void **)&vp_id, (void **)&vi, &iter_iter) >= 0) {
+			struct cpfl_repr_param param;
+
+			if (!cpfl_match_repr_with_vport(repr_id, &vi->vport_info))
+				continue;
+
+			matched = true;
+
+			param.adapter = adapter;
+			param.repr_id = *repr_id;
+			param.vport_info = vi;
+
+			ret = rte_eth_dev_create(&pci_dev->device,
+						 name,
+						 sizeof(struct cpfl_repr),
+						 NULL, NULL, cpfl_repr_init,
+						 &param);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to create representor %s", name);
+				rte_spinlock_unlock(&adapter->vport_map_lock);
+				goto err;
+			}
+			break;
+		}
+
+		/* warning if no match vport detected */
+		if (!matched)
+			PMD_INIT_LOG(WARNING, "No matched vport for representor %s "
+					      "creation will be deferred when vport is detected",
+					      name);
+
+		rte_spinlock_unlock(&adapter->vport_map_lock);
+	}
+
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	rte_free(vlist_resp);
+	return ret;
+}
diff --git a/drivers/net/cpfl/cpfl_representor.h b/drivers/net/cpfl/cpfl_representor.h
new file mode 100644
index 0000000000..d3a4de531e
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_REPRESENTOR_H_
+#define _CPFL_REPRESENTOR_H_
+
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+struct cpfl_repr_id {
+	uint8_t host_id;
+	uint8_t pf_id;
+	uint8_t type;
+	uint8_t vf_id;
+};
+
+struct cpfl_repr_param {
+	struct cpfl_adapter_ext *adapter;
+	struct cpfl_repr_id repr_id;
+	struct cpfl_vport_info *vport_info;
+};
+
+int cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter);
+int cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 2f0f5d8434..d8b92ae16a 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -17,6 +17,7 @@ sources = files(
         'cpfl_ethdev.c',
         'cpfl_rxtx.c',
         'cpfl_vchnl.c',
+        'cpfl_representor.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v4 10/10] net/cpfl: support link update for representor
  2023-09-08 11:16     ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
                         ` (8 preceding siblings ...)
  2023-09-08 11:17       ` [PATCH v4 09/10] net/cpfl: create port representor beilei.xing
@ 2023-09-08 11:17       ` beilei.xing
  2023-09-09  3:05         ` Wu, Jingjing
  2023-09-12 16:26       ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
  10 siblings, 1 reply; 89+ messages in thread
From: beilei.xing @ 2023-09-08 11:17 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Add link update ops for representor.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h      |  1 +
 drivers/net/cpfl/cpfl_representor.c | 21 +++++++++++++++++++++
 2 files changed, 22 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 4937d2c6e3..0dd9d4e7f9 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -162,6 +162,7 @@ struct cpfl_repr {
 	struct cpfl_repr_id repr_id;
 	struct rte_ether_addr mac_addr;
 	struct cpfl_vport_info *vport_info;
+	bool func_up; /* If the represented function is up */
 };
 
 struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 0cd92b1351..3c0fa957de 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -308,6 +308,23 @@ cpfl_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
 	return 0;
 }
 
+static int
+cpfl_repr_link_update(struct rte_eth_dev *ethdev,
+		      __rte_unused int wait_to_complete)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+	struct rte_eth_link *dev_link = &ethdev->data->dev_link;
+
+	if (!(ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
+		PMD_INIT_LOG(ERR, "This ethdev is not representor.");
+		return -EINVAL;
+	}
+	dev_link->link_status = repr->func_up ?
+		RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -317,6 +334,8 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 
 	.rx_queue_setup		= cpfl_repr_rx_queue_setup,
 	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
+
+	.link_update		= cpfl_repr_link_update,
 };
 
 static int
@@ -331,6 +350,8 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
 	repr->itf.adapter = adapter;
 	repr->itf.data = eth_dev->data;
+	if (repr->vport_info->vport_info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
+		repr->func_up = true;
 
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* RE: [PATCH v4 02/10] net/cpfl: introduce interface structure
  2023-09-08 11:16       ` [PATCH v4 02/10] net/cpfl: introduce interface structure beilei.xing
@ 2023-09-09  2:08         ` Wu, Jingjing
  0 siblings, 0 replies; 89+ messages in thread
From: Wu, Jingjing @ 2023-09-09  2:08 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: dev, Liu, Mingxia, Zhang, Qi Z



> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, September 8, 2023 7:17 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: [PATCH v4 02/10] net/cpfl: introduce interface structure
> 
> From: Beilei Xing <beilei.xing@intel.com>
> 
> Introduce cplf interface structure to distinguish vport and port
> representor.
> 
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
>  drivers/net/cpfl/cpfl_ethdev.c |  3 +++
>  drivers/net/cpfl/cpfl_ethdev.h | 16 ++++++++++++++++
>  2 files changed, 19 insertions(+)
> 
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
> index 46b3a52e49..92fe92c00f 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -1803,6 +1803,9 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void
> *init_params)
>  		goto err;
>  	}
> 
> +	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
> +	cpfl_vport->itf.adapter = adapter;
> +	cpfl_vport->itf.data = dev->data;
>  	adapter->vports[param->idx] = cpfl_vport;
>  	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
>  	adapter->cur_vport_nb++;
> diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
> index b637bf2e45..53e45035e8 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.h
> +++ b/drivers/net/cpfl/cpfl_ethdev.h
> @@ -86,7 +86,19 @@ struct p2p_queue_chunks_info {
>  	uint32_t rx_buf_qtail_spacing;
>  };
> 
> +enum cpfl_itf_type {
> +	CPFL_ITF_TYPE_VPORT,
> +	CPFL_ITF_TYPE_REPRESENTOR
Defined but not used in this patch, how about move CPFL_ITF_TYPE_REPRESENTOR to the patch that uses it?
> +};
> +
> +struct cpfl_itf {
> +	enum cpfl_itf_type type;
> +	struct cpfl_adapter_ext *adapter;
> +	void *data;
> +};
> +
>  struct cpfl_vport {
> +	struct cpfl_itf itf;
>  	struct idpf_vport base;
>  	struct p2p_queue_chunks_info *p2p_q_chunks_info;
> 
> @@ -124,5 +136,9 @@ TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
>  	RTE_DEV_TO_PCI((eth_dev)->device)
>  #define CPFL_ADAPTER_TO_EXT(p)					\
>  	container_of((p), struct cpfl_adapter_ext, base)
> +#define CPFL_DEV_TO_VPORT(dev)					\
> +	((struct cpfl_vport *)((dev)->data->dev_private))
> +#define CPFL_DEV_TO_ITF(dev)				\
> +	((struct cpfl_itf *)((dev)->data->dev_private))
> 
>  #endif /* _CPFL_ETHDEV_H_ */
> --
> 2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* RE: [PATCH v4 03/10] net/cpfl: refine handle virtual channel message
  2023-09-08 11:16       ` [PATCH v4 03/10] net/cpfl: refine handle virtual channel message beilei.xing
@ 2023-09-09  2:13         ` Wu, Jingjing
  0 siblings, 0 replies; 89+ messages in thread
From: Wu, Jingjing @ 2023-09-09  2:13 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: dev, Liu, Mingxia, Zhang, Qi Z

> -static struct idpf_vport *
> +static struct cpfl_vport *
>  cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
>  {
> -	struct idpf_vport *vport = NULL;
> +	struct cpfl_vport *vport = NULL;
>  	int i;
> 
>  	for (i = 0; i < adapter->cur_vport_nb; i++) {
> -		vport = &adapter->vports[i]->base;
> -		if (vport->vport_id != vport_id)
> +		vport = adapter->vports[i];
> +		if (vport->base.vport_id != vport_id)
Check if vport is NULL to ensure the structure access?
>  			continue;
>  		else
>  			return vport;
>  	}
> 
> -	return vport;
> +	return NULL;
>  }

^ permalink raw reply	[flat|nested] 89+ messages in thread

* RE: [PATCH v4 08/10] net/cpfl: support vport list/info get
  2023-09-08 11:16       ` [PATCH v4 08/10] net/cpfl: support vport list/info get beilei.xing
@ 2023-09-09  2:34         ` Wu, Jingjing
  0 siblings, 0 replies; 89+ messages in thread
From: Wu, Jingjing @ 2023-09-09  2:34 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: dev, Liu, Mingxia



> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, September 8, 2023 7:17 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH v4 08/10] net/cpfl: support vport list/info get
> 
> From: Beilei Xing <beilei.xing@intel.com>
> 
> Support cp channel ops CPCHNL2_OP_CPF_GET_VPORT_LIST and
> CPCHNL2_OP_CPF_GET_VPORT_INFO.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>

Can we merge this patch to previous cpchnl handle one or move ahead before representor is introduced?
 


^ permalink raw reply	[flat|nested] 89+ messages in thread

* RE: [PATCH v4 09/10] net/cpfl: create port representor
  2023-09-08 11:17       ` [PATCH v4 09/10] net/cpfl: create port representor beilei.xing
@ 2023-09-09  3:04         ` Wu, Jingjing
  0 siblings, 0 replies; 89+ messages in thread
From: Wu, Jingjing @ 2023-09-09  3:04 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: dev, Liu, Mingxia, Zhang, Qi Z

> +		/* warning if no match vport detected */
> +		if (!matched)
> +			PMD_INIT_LOG(WARNING, "No matched vport for
> representor %s "
> +					      "creation will be deferred when
> vport is detected",
> +					      name);
> +
If vport info is responded successfully, what is the case that matched is false? And I did not find the defer process.
> +		rte_spinlock_unlock(&adapter->vport_map_lock);
> +	}
> +
> +err:
> +	rte_spinlock_unlock(&adapter->repr_lock);
> +	rte_free(vlist_resp);
> +	return ret;
> +}

^ permalink raw reply	[flat|nested] 89+ messages in thread

* RE: [PATCH v4 10/10] net/cpfl: support link update for representor
  2023-09-08 11:17       ` [PATCH v4 10/10] net/cpfl: support link update for representor beilei.xing
@ 2023-09-09  3:05         ` Wu, Jingjing
  0 siblings, 0 replies; 89+ messages in thread
From: Wu, Jingjing @ 2023-09-09  3:05 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: dev, Liu, Mingxia



> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, September 8, 2023 7:17 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH v4 10/10] net/cpfl: support link update for representor
> 
> From: Beilei Xing <beilei.xing@intel.com>
> 
> Add link update ops for representor.
> 
> Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
>  drivers/net/cpfl/cpfl_ethdev.h      |  1 +
>  drivers/net/cpfl/cpfl_representor.c | 21 +++++++++++++++++++++
>  2 files changed, 22 insertions(+)
> 
> diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
> index 4937d2c6e3..0dd9d4e7f9 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.h
> +++ b/drivers/net/cpfl/cpfl_ethdev.h
> @@ -162,6 +162,7 @@ struct cpfl_repr {
>  	struct cpfl_repr_id repr_id;
>  	struct rte_ether_addr mac_addr;
>  	struct cpfl_vport_info *vport_info;
> +	bool func_up; /* If the represented function is up */
>  };
> 
>  struct cpfl_adapter_ext {
> diff --git a/drivers/net/cpfl/cpfl_representor.c
> b/drivers/net/cpfl/cpfl_representor.c
> index 0cd92b1351..3c0fa957de 100644
> --- a/drivers/net/cpfl/cpfl_representor.c
> +++ b/drivers/net/cpfl/cpfl_representor.c
> @@ -308,6 +308,23 @@ cpfl_repr_tx_queue_setup(__rte_unused struct
> rte_eth_dev *dev,
>  	return 0;
>  }
> 
> +static int
> +cpfl_repr_link_update(struct rte_eth_dev *ethdev,
> +		      __rte_unused int wait_to_complete)
> +{
> +	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
> +	struct rte_eth_link *dev_link = &ethdev->data->dev_link;
> +
> +	if (!(ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
> +		PMD_INIT_LOG(ERR, "This ethdev is not representor.");
> +		return -EINVAL;
> +	}
> +	dev_link->link_status = repr->func_up ?
> +		RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
> +
> +	return 0;
> +}
> +
>  static const struct eth_dev_ops cpfl_repr_dev_ops = {
>  	.dev_start		= cpfl_repr_dev_start,
>  	.dev_stop		= cpfl_repr_dev_stop,
> @@ -317,6 +334,8 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
> 
>  	.rx_queue_setup		= cpfl_repr_rx_queue_setup,
>  	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
> +
> +	.link_update		= cpfl_repr_link_update,
>  };
> 
>  static int
> @@ -331,6 +350,8 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void
> *init_param)
>  	repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
>  	repr->itf.adapter = adapter;
>  	repr->itf.data = eth_dev->data;
> +	if (repr->vport_info->vport_info.vport_status ==
> CPCHNL2_VPORT_STATUS_ENABLED)
> +		repr->func_up = true;
> 
Now event process? Think about the vsi status changes?

>  	eth_dev->dev_ops = &cpfl_repr_dev_ops;
> 
> --
> 2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v5 00/10] net/cpfl: support port representor
  2023-09-08 11:16     ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
                         ` (9 preceding siblings ...)
  2023-09-08 11:17       ` [PATCH v4 10/10] net/cpfl: support link update for representor beilei.xing
@ 2023-09-12 16:26       ` beilei.xing
  2023-09-12 16:26         ` [PATCH v5 01/10] net/cpfl: refine devargs parse and process beilei.xing
                           ` (10 more replies)
  10 siblings, 11 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 16:26 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

1. code refine for representor support
2. support port representor

v5 changes:
 - refine cpfl_vport_info structure
 - refine cpfl_repr_link_update function
 - refine cpfl_repr_create function
v4 changes:
 - change the patch order
 - merge two patches
 - revert enum change
v3 changes:
 - Refine commit log.
 - Add macro and enum.
 - Refine doc.
 - Refine error handling.
v2 changes:
 - Remove representor data path.
 - Fix coding style.

Beilei Xing (10):
  net/cpfl: refine devargs parse and process
  net/cpfl: introduce interface structure
  net/cpfl: refine handle virtual channel message
  net/cpfl: introduce CP channel API
  net/cpfl: enable vport mapping
  net/cpfl: support vport list/info get
  net/cpfl: parse representor devargs
  net/cpfl: support probe again
  net/cpfl: create port representor
  net/cpfl: support link update for representor

 doc/guides/nics/cpfl.rst               |  36 ++
 doc/guides/rel_notes/release_23_11.rst |   3 +
 drivers/net/cpfl/cpfl_cpchnl.h         | 340 +++++++++++++
 drivers/net/cpfl/cpfl_ethdev.c         | 621 ++++++++++++++++++++----
 drivers/net/cpfl/cpfl_ethdev.h         |  91 +++-
 drivers/net/cpfl/cpfl_representor.c    | 632 +++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.h    |  26 +
 drivers/net/cpfl/cpfl_vchnl.c          |  72 +++
 drivers/net/cpfl/meson.build           |   4 +-
 9 files changed, 1719 insertions(+), 106 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v5 01/10] net/cpfl: refine devargs parse and process
  2023-09-12 16:26       ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
@ 2023-09-12 16:26         ` beilei.xing
  2023-09-12 16:26         ` [PATCH v5 02/10] net/cpfl: introduce interface structure beilei.xing
                           ` (9 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 16:26 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

1. Keep devargs in adapter.
2. Refine handling the case with no vport be specified in devargs.
3. Separate devargs parse and devargs process

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 154 ++++++++++++++++++---------------
 drivers/net/cpfl/cpfl_ethdev.h |   1 +
 2 files changed, 84 insertions(+), 71 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c4ca9343c3..46b3a52e49 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1407,12 +1407,12 @@ parse_bool(const char *key, const char *value, void *args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter,
-		   struct cpfl_devargs *cpfl_args)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
 	struct rte_devargs *devargs = pci_dev->device.devargs;
+	struct cpfl_devargs *cpfl_args = &adapter->devargs;
 	struct rte_kvargs *kvlist;
-	int i, ret;
+	int ret;
 
 	cpfl_args->req_vport_nb = 0;
 
@@ -1445,31 +1445,6 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (ret != 0)
 		goto fail;
 
-	/* check parsed devargs */
-	if (adapter->cur_vport_nb + cpfl_args->req_vport_nb >
-	    adapter->max_vport_nb) {
-		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
-			     adapter->max_vport_nb);
-		ret = -EINVAL;
-		goto fail;
-	}
-
-	for (i = 0; i < cpfl_args->req_vport_nb; i++) {
-		if (cpfl_args->req_vports[i] > adapter->max_vport_nb - 1) {
-			PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d",
-				     cpfl_args->req_vports[i], adapter->max_vport_nb - 1);
-			ret = -EINVAL;
-			goto fail;
-		}
-
-		if (adapter->cur_vports & RTE_BIT32(cpfl_args->req_vports[i])) {
-			PMD_INIT_LOG(ERR, "Vport %d has been requested",
-				     cpfl_args->req_vports[i]);
-			ret = -EINVAL;
-			goto fail;
-		}
-	}
-
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
@@ -1915,15 +1890,79 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 	adapter->vports = NULL;
 }
 
+static int
+cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int i;
+
+	/* refine vport number, at least 1 vport */
+	if (devargs->req_vport_nb == 0) {
+		devargs->req_vport_nb = 1;
+		devargs->req_vports[0] = 0;
+	}
+
+	/* check parsed devargs */
+	if (adapter->cur_vport_nb + devargs->req_vport_nb >
+	    adapter->max_vport_nb) {
+		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
+			     adapter->max_vport_nb);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < devargs->req_vport_nb; i++) {
+		if (devargs->req_vports[i] > adapter->max_vport_nb - 1) {
+			PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d",
+				     devargs->req_vports[i], adapter->max_vport_nb - 1);
+			return -EINVAL;
+		}
+
+		if (adapter->cur_vports & RTE_BIT32(devargs->req_vports[i])) {
+			PMD_INIT_LOG(ERR, "Vport %d has been requested",
+				     devargs->req_vports[i]);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport_param vport_param;
+	char name[RTE_ETH_NAME_MAX_LEN];
+	int ret, i;
+
+	for (i = 0; i < adapter->devargs.req_vport_nb; i++) {
+		vport_param.adapter = adapter;
+		vport_param.devarg_id = adapter->devargs.req_vports[i];
+		vport_param.idx = cpfl_vport_idx_alloc(adapter);
+		if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
+			PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
+			break;
+		}
+		snprintf(name, sizeof(name), "net_%s_vport_%d",
+			 pci_dev->device.name,
+			 adapter->devargs.req_vports[i]);
+		ret = rte_eth_dev_create(&pci_dev->device, name,
+					    sizeof(struct cpfl_vport),
+					    NULL, NULL, cpfl_dev_vport_init,
+					    &vport_param);
+		if (ret != 0)
+			PMD_DRV_LOG(ERR, "Failed to create vport %d",
+				    vport_param.devarg_id);
+	}
+
+	return 0;
+}
+
 static int
 cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	       struct rte_pci_device *pci_dev)
 {
-	struct cpfl_vport_param vport_param;
 	struct cpfl_adapter_ext *adapter;
-	struct cpfl_devargs devargs;
-	char name[RTE_ETH_NAME_MAX_LEN];
-	int i, retval;
+	int retval;
 
 	if (!cpfl_adapter_list_init) {
 		rte_spinlock_init(&cpfl_adapter_lock);
@@ -1938,6 +1977,12 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		return -ENOMEM;
 	}
 
+	retval = cpfl_parse_devargs(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		return retval;
+	}
+
 	retval = cpfl_adapter_ext_init(pci_dev, adapter);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to init adapter.");
@@ -1948,49 +1993,16 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	TAILQ_INSERT_TAIL(&cpfl_adapter_list, adapter, next);
 	rte_spinlock_unlock(&cpfl_adapter_lock);
 
-	retval = cpfl_parse_devargs(pci_dev, adapter, &devargs);
+	retval = cpfl_vport_devargs_process(adapter);
 	if (retval != 0) {
-		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		PMD_INIT_LOG(ERR, "Failed to process vport devargs");
 		goto err;
 	}
 
-	if (devargs.req_vport_nb == 0) {
-		/* If no vport devarg, create vport 0 by default. */
-		vport_param.adapter = adapter;
-		vport_param.devarg_id = 0;
-		vport_param.idx = cpfl_vport_idx_alloc(adapter);
-		if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
-			PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
-			return 0;
-		}
-		snprintf(name, sizeof(name), "cpfl_%s_vport_0",
-			 pci_dev->device.name);
-		retval = rte_eth_dev_create(&pci_dev->device, name,
-					    sizeof(struct cpfl_vport),
-					    NULL, NULL, cpfl_dev_vport_init,
-					    &vport_param);
-		if (retval != 0)
-			PMD_DRV_LOG(ERR, "Failed to create default vport 0");
-	} else {
-		for (i = 0; i < devargs.req_vport_nb; i++) {
-			vport_param.adapter = adapter;
-			vport_param.devarg_id = devargs.req_vports[i];
-			vport_param.idx = cpfl_vport_idx_alloc(adapter);
-			if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
-				PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
-				break;
-			}
-			snprintf(name, sizeof(name), "cpfl_%s_vport_%d",
-				 pci_dev->device.name,
-				 devargs.req_vports[i]);
-			retval = rte_eth_dev_create(&pci_dev->device, name,
-						    sizeof(struct cpfl_vport),
-						    NULL, NULL, cpfl_dev_vport_init,
-						    &vport_param);
-			if (retval != 0)
-				PMD_DRV_LOG(ERR, "Failed to create vport %d",
-					    vport_param.devarg_id);
-		}
+	retval = cpfl_vport_create(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create vports.");
+		goto err;
 	}
 
 	return 0;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 2e42354f70..b637bf2e45 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -115,6 +115,7 @@ struct cpfl_adapter_ext {
 	uint16_t cur_vport_nb;
 
 	uint16_t used_vecs_num;
+	struct cpfl_devargs devargs;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v5 02/10] net/cpfl: introduce interface structure
  2023-09-12 16:26       ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
  2023-09-12 16:26         ` [PATCH v5 01/10] net/cpfl: refine devargs parse and process beilei.xing
@ 2023-09-12 16:26         ` beilei.xing
  2023-09-12 16:26         ` [PATCH v5 03/10] net/cpfl: refine handle virtual channel message beilei.xing
                           ` (8 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 16:26 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Introduce cplf interface structure to distinguish vport and port
representor.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  3 +++
 drivers/net/cpfl/cpfl_ethdev.h | 15 +++++++++++++++
 2 files changed, 18 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 46b3a52e49..92fe92c00f 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1803,6 +1803,9 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 		goto err;
 	}
 
+	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
+	cpfl_vport->itf.adapter = adapter;
+	cpfl_vport->itf.data = dev->data;
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index b637bf2e45..feb1edc4b8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -86,7 +86,18 @@ struct p2p_queue_chunks_info {
 	uint32_t rx_buf_qtail_spacing;
 };
 
+enum cpfl_itf_type {
+	CPFL_ITF_TYPE_VPORT,
+};
+
+struct cpfl_itf {
+	enum cpfl_itf_type type;
+	struct cpfl_adapter_ext *adapter;
+	void *data;
+};
+
 struct cpfl_vport {
+	struct cpfl_itf itf;
 	struct idpf_vport base;
 	struct p2p_queue_chunks_info *p2p_q_chunks_info;
 
@@ -124,5 +135,9 @@ TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 	RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p)					\
 	container_of((p), struct cpfl_adapter_ext, base)
+#define CPFL_DEV_TO_VPORT(dev)					\
+	((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_ITF(dev)				\
+	((struct cpfl_itf *)((dev)->data->dev_private))
 
 #endif /* _CPFL_ETHDEV_H_ */
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v5 03/10] net/cpfl: refine handle virtual channel message
  2023-09-12 16:26       ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
  2023-09-12 16:26         ` [PATCH v5 01/10] net/cpfl: refine devargs parse and process beilei.xing
  2023-09-12 16:26         ` [PATCH v5 02/10] net/cpfl: introduce interface structure beilei.xing
@ 2023-09-12 16:26         ` beilei.xing
  2023-09-12 16:26         ` [PATCH v5 04/10] net/cpfl: introduce CP channel API beilei.xing
                           ` (7 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 16:26 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Refine handle virtual channel event message.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 48 +++++++++++++++++-----------------
 1 file changed, 24 insertions(+), 24 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 92fe92c00f..31a5822d2c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1450,40 +1450,52 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	return ret;
 }
 
-static struct idpf_vport *
+static struct cpfl_vport *
 cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
 {
-	struct idpf_vport *vport = NULL;
+	struct cpfl_vport *vport = NULL;
 	int i;
 
 	for (i = 0; i < adapter->cur_vport_nb; i++) {
-		vport = &adapter->vports[i]->base;
-		if (vport->vport_id != vport_id)
+		vport = adapter->vports[i];
+		if (vport == NULL)
+			continue;
+		if (vport->base.vport_id != vport_id)
 			continue;
 		else
 			return vport;
 	}
 
-	return vport;
+	return NULL;
 }
 
 static void
-cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen)
+cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen)
 {
 	struct virtchnl2_event *vc_event = (struct virtchnl2_event *)msg;
-	struct rte_eth_dev_data *data = vport->dev_data;
-	struct rte_eth_dev *dev = &rte_eth_devices[data->port_id];
+	struct cpfl_vport *vport;
+	struct rte_eth_dev_data *data;
+	struct rte_eth_dev *dev;
 
 	if (msglen < sizeof(struct virtchnl2_event)) {
 		PMD_DRV_LOG(ERR, "Error event");
 		return;
 	}
 
+	vport = cpfl_find_vport(adapter, vc_event->vport_id);
+	if (!vport) {
+		PMD_DRV_LOG(ERR, "Can't find vport.");
+		return;
+	}
+
+	data = vport->itf.data;
+	dev = &rte_eth_devices[data->port_id];
+
 	switch (vc_event->event) {
 	case VIRTCHNL2_EVENT_LINK_CHANGE:
 		PMD_DRV_LOG(DEBUG, "VIRTCHNL2_EVENT_LINK_CHANGE");
-		vport->link_up = !!(vc_event->link_status);
-		vport->link_speed = vc_event->link_speed;
+		vport->base.link_up = !!(vc_event->link_status);
+		vport->base.link_speed = vc_event->link_speed;
 		cpfl_dev_link_update(dev, 0);
 		break;
 	default:
@@ -1498,10 +1510,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 	struct idpf_adapter *base = &adapter->base;
 	struct idpf_dma_mem *dma_mem = NULL;
 	struct idpf_hw *hw = &base->hw;
-	struct virtchnl2_event *vc_event;
 	struct idpf_ctlq_msg ctlq_msg;
 	enum idpf_mbx_opc mbx_op;
-	struct idpf_vport *vport;
 	uint16_t pending = 1;
 	uint32_t vc_op;
 	int ret;
@@ -1523,18 +1533,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 		switch (mbx_op) {
 		case idpf_mbq_opc_send_msg_to_peer_pf:
 			if (vc_op == VIRTCHNL2_OP_EVENT) {
-				if (ctlq_msg.data_len < sizeof(struct virtchnl2_event)) {
-					PMD_DRV_LOG(ERR, "Error event");
-					return;
-				}
-				vc_event = (struct virtchnl2_event *)base->mbx_resp;
-				vport = cpfl_find_vport(adapter, vc_event->vport_id);
-				if (!vport) {
-					PMD_DRV_LOG(ERR, "Can't find vport.");
-					return;
-				}
-				cpfl_handle_event_msg(vport, base->mbx_resp,
-						      ctlq_msg.data_len);
+				cpfl_handle_vchnl_event_msg(adapter, adapter->base.mbx_resp,
+							    ctlq_msg.data_len);
 			} else {
 				if (vc_op == base->pend_cmd)
 					notify_cmd(base, base->cmd_retval);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v5 04/10] net/cpfl: introduce CP channel API
  2023-09-12 16:26       ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
                           ` (2 preceding siblings ...)
  2023-09-12 16:26         ` [PATCH v5 03/10] net/cpfl: refine handle virtual channel message beilei.xing
@ 2023-09-12 16:26         ` beilei.xing
  2023-09-12 16:26         ` [PATCH v5 05/10] net/cpfl: enable vport mapping beilei.xing
                           ` (6 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 16:26 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

The CPCHNL2 defines the API (v2) used for communication between the
CPF driver and its on-chip management software. The CPFL PMD is a
specific CPF driver to utilize CPCHNL2 for device configuration and
event probing.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_cpchnl.h | 340 +++++++++++++++++++++++++++++++++
 1 file changed, 340 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h

diff --git a/drivers/net/cpfl/cpfl_cpchnl.h b/drivers/net/cpfl/cpfl_cpchnl.h
new file mode 100644
index 0000000000..2eefcbcc10
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_cpchnl.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CPCHNL_H_
+#define _CPFL_CPCHNL_H_
+
+/** @brief      Command Opcodes
+ *              Values are to be different from virtchnl.h opcodes
+ */
+enum cpchnl2_ops {
+	/* vport info */
+	CPCHNL2_OP_GET_VPORT_LIST		= 0x8025,
+	CPCHNL2_OP_GET_VPORT_INFO		= 0x8026,
+
+	/* DPHMA Event notifications */
+	CPCHNL2_OP_EVENT			= 0x8050,
+};
+
+/* Note! This affects the size of structs below */
+#define CPCHNL2_MAX_TC_AMOUNT		8
+
+#define CPCHNL2_ETH_LENGTH_OF_ADDRESS	6
+
+#define CPCHNL2_FUNC_TYPE_PF		0
+#define CPCHNL2_FUNC_TYPE_SRIOV		1
+
+/* vport statuses - must match the DB ones - see enum cp_vport_status*/
+#define CPCHNL2_VPORT_STATUS_CREATED	0
+#define CPCHNL2_VPORT_STATUS_ENABLED	1
+#define CPCHNL2_VPORT_STATUS_DISABLED	2
+#define CPCHNL2_VPORT_STATUS_DESTROYED	3
+
+/* Queue Groups Extension */
+/**************************************************/
+
+#define MAX_Q_REGIONS 16
+/* TBD - with current structure sizes, in order not to exceed 4KB ICQH buffer
+ * no more than 11 queue groups are allowed per a single vport..
+ * More will be possible only with future msg fragmentation.
+ */
+#define MAX_Q_VPORT_GROUPS 11
+
+#define CPCHNL2_CHECK_STRUCT_LEN(n, X) enum static_assert_enum_##X	\
+	{ static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) }
+
+struct cpchnl2_queue_chunk {
+	u32 type;	       /* 0:QUEUE_TYPE_TX, 1:QUEUE_TYPE_RX */ /* enum nsl_lan_queue_type */
+	u32 start_queue_id;
+	u32 num_queues;
+	u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_queue_chunk);
+
+/* structure to specify several chunks of contiguous queues */
+struct cpchnl2_queue_grp_chunks {
+	u16 num_chunks;
+	u8 reserved[6];
+	struct cpchnl2_queue_chunk chunks[MAX_Q_REGIONS];
+};
+CPCHNL2_CHECK_STRUCT_LEN(264, cpchnl2_queue_grp_chunks);
+
+struct cpchnl2_rx_queue_group_info {
+	/* User can ask to update rss_lut size originally allocated
+	 * by CreateVport command. New size will be returned if allocation succeeded,
+	 * otherwise original rss_size from CreateVport will be returned.
+	 */
+	u16 rss_lut_size;
+	u8 pad[6]; /*Future extension purpose*/
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_rx_queue_group_info);
+
+struct cpchnl2_tx_queue_group_info {
+	u8 tx_tc; /*TX TC queue group will be connected to*/
+	/* Each group can have its own priority, value 0-7, while each group with unique
+	 * priority is strict priority. It can be single set of queue groups which configured with
+	 * same priority, then they are assumed part of WFQ arbitration group and are expected to be
+	 * assigned with weight.
+	 */
+	u8 priority;
+	/* Determines if queue group is expected to be Strict Priority according to its priority */
+	u8 is_sp;
+	u8 pad;
+	/* Peak Info Rate Weight in case Queue Group is part of WFQ arbitration set.
+	 * The weights of the groups are independent of each other. Possible values: 1-200.
+	 */
+	u16 pir_weight;
+	/* Future extension purpose for CIR only */
+	u8 cir_pad[2];
+	u8 pad2[8]; /* Future extension purpose*/
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_tx_queue_group_info);
+
+struct cpchnl2_queue_group_id {
+	/* Queue group ID - depended on it's type:
+	 * Data & p2p - is an index which is relative to Vport.
+	 * Config & Mailbox - is an ID which is relative to func.
+	 * This ID is used in future calls, i.e. delete.
+	 * Requested by host and assigned by Control plane.
+	 */
+	u16 queue_group_id;
+	/* Functional type: see CPCHNL2_QUEUE_GROUP_TYPE definitions */
+	u16 queue_group_type;
+	u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_queue_group_id);
+
+struct cpchnl2_queue_group_info {
+	/* IN */
+	struct cpchnl2_queue_group_id qg_id;
+
+	/* IN, Number of queues of different types in the group. */
+	u16 num_tx_q;
+	u16 num_tx_complq;
+	u16 num_rx_q;
+	u16 num_rx_bufq;
+
+	struct cpchnl2_tx_queue_group_info tx_q_grp_info;
+	struct cpchnl2_rx_queue_group_info rx_q_grp_info;
+
+	u8 egress_port;
+	u8 pad[39]; /*Future extension purpose*/
+	struct cpchnl2_queue_grp_chunks chunks;
+};
+CPCHNL2_CHECK_STRUCT_LEN(344, cpchnl2_queue_group_info);
+
+struct cpchnl2_queue_groups {
+	u16 num_queue_groups; /* Number of queue groups in struct below */
+	u8 pad[6];
+	/* group information , number is determined by param above */
+	struct cpchnl2_queue_group_info groups[MAX_Q_VPORT_GROUPS];
+};
+CPCHNL2_CHECK_STRUCT_LEN(3792, cpchnl2_queue_groups);
+
+/**
+ * @brief function types
+ */
+enum cpchnl2_func_type {
+	CPCHNL2_FTYPE_LAN_PF = 0,
+	CPCHNL2_FTYPE_LAN_VF = 1,
+	CPCHNL2_FTYPE_LAN_MAX
+};
+
+/**
+ * @brief containing vport id & type
+ */
+struct cpchnl2_vport_id {
+	u32 vport_id;
+	u16 vport_type;
+	u8 pad[2];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_vport_id);
+
+struct cpchnl2_func_id {
+	/* Function type: 0 - LAN PF, 1 -  LAN VF, Rest - "reserved" */
+	u8 func_type;
+	/* Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs
+	 * and 8-12 CPFs are valid
+	 */
+	u8 pf_id;
+	/* Valid only if "type" above is VF, indexing is relative to PF specified above. */
+	u16 vf_id;
+	u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_func_id);
+
+/* Note! Do not change the fields and especially their order as should eventually
+ * be aligned to 32bit. Must match the virtchnl structure definition.
+ * If should change, change also the relevant FAS and virtchnl code, under permission.
+ */
+struct cpchnl2_vport_info {
+	u16 vport_index;
+	/* VSI index, global indexing aligned to HW.
+	 * Index of HW VSI is allocated by HMA during "CreateVport" virtChnl command.
+	 * Relevant for VSI backed Vports only, not relevant for vport_type = "Qdev".
+	 */
+	u16 vsi_id;
+	u8 vport_status;	/* enum cpchnl2_vport_status */
+	/* 0 - LAN PF, 1 - LAN VF. Rest - reserved. Can be later expanded to other PEs */
+	u8 func_type;
+	/* Valid only if "type" above is VF, indexing is relative to PF specified above. */
+	u16 vf_id;
+	/* Always relevant, indexing is according to LAN PE 0-15,
+	 * while only 0-4 APFs and 8-12 CPFs are valid.
+	 */
+	u8 pf_id;
+	u8 rss_enabled; /* if RSS is enabled for Vport. Driven by Node Policy. Currently '0' */
+	/* MAC Address assigned for this vport, all 0s for "Qdev" Vport type */
+	u8 mac_addr[CPCHNL2_ETH_LENGTH_OF_ADDRESS];
+	u16 vmrl_id;
+	/* Indicates if IMC created SEM MAC rule for this Vport.
+	 * Currently this is done by IMC for all Vport of type "Default" only,
+	 * but can be different in the future.
+	 */
+	u8 sem_mac_rule_exist;
+	/* Bitmask to inform which TC is valid.
+	 * 0x1 << TCnum. 1b: valid else 0.
+	 * Driven by Node Policy on system level, then Sysetm level TCs are
+	 * reported to IDPF and it can enable Vport level TCs on TX according
+	 * to Syetm enabled ones.
+	 * If TC aware mode - bit set for valid TC.
+	 * otherwise =1 (only bit 0 is set. represents the VSI
+	 */
+	u8 tx_tc_bitmask;
+	/* For each valid TC, TEID of VPORT node over TC in TX LAN WS.
+	 * If TC aware mode - up to 8 TC TEIDs. Otherwise vport_tc_teid[0] shall hold VSI TEID
+	 */
+	u32 vport_tc_teid[CPCHNL2_MAX_TC_AMOUNT];
+	/* For each valid TC, bandwidth in mbps.
+	 * Default BW per Vport is from Node policy
+	 * If TC aware mode -per TC. Otherwise, bandwidth[0] holds VSI bandwidth
+	 */
+	u32 bandwidth[CPCHNL2_MAX_TC_AMOUNT];
+	/* From Node Policy. */
+	u16 max_mtu;
+	u16 default_rx_qid;	/* Default LAN RX Queue ID */
+	u16 vport_flags; /* see: VPORT_FLAGS */
+	u8 egress_port;
+	u8 pad_reserved[5];
+};
+CPCHNL2_CHECK_STRUCT_LEN(96, cpchnl2_vport_info);
+
+/*
+ * CPCHNL2_OP_GET_VPORT_LIST
+ */
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode request
+ * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved (see enum cpchnl2_func_type)
+ * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12
+ *        CPFs are valid
+ * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above
+ */
+struct cpchnl2_get_vport_list_request {
+	u8 func_type;
+	u8 pf_id;
+	u16 vf_id;
+	u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_get_vport_list_request);
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode response
+ * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved. Can be later extended to
+ *        other PE types
+ * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12
+ *        CPFs are valid
+ * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above
+ * @param nof_vports Number of vports created on the function
+ * @param vports array of the IDs and types. vport ID is elative to its func (PF/VF). same as in
+ *        Create Vport
+ * vport_type: Aligned to VirtChnl types: Default, SIOV, etc.
+ */
+struct cpchnl2_get_vport_list_response {
+	u8 func_type;
+	u8 pf_id;
+	u16 vf_id;
+	u16 nof_vports;
+	u8 pad[2];
+	struct cpchnl2_vport_id vports[];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_get_vport_list_response);
+
+/*
+ * CPCHNL2_OP_GET_VPORT_INFO
+ */
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_INFO opcode request
+ * @param vport a structure containing vport_id (relative to function) and type
+ * @param func a structure containing function type, pf_id, vf_id
+ */
+struct cpchnl2_get_vport_info_request {
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_func_id func;
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_get_vport_info_request);
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_INFO opcode response
+ * @param vport a structure containing vport_id (relative to function) and type to get info for
+ * @param info a structure all the information for a given vport
+ * @param queue_groups a structure containing all the queue groups of the given vport
+ */
+struct cpchnl2_get_vport_info_response {
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_vport_info info;
+	struct cpchnl2_queue_groups queue_groups;
+};
+CPCHNL2_CHECK_STRUCT_LEN(3896, cpchnl2_get_vport_info_response);
+
+ /* Cpchnl events
+  * Sends event message to inform the peer of notification that may affect it.
+  * No direct response is expected from the peer, though it may generate other
+  * messages in response to this one.
+  */
+enum cpchnl2_event {
+	CPCHNL2_EVENT_UNKNOWN = 0,
+	CPCHNL2_EVENT_VPORT_CREATED,
+	CPCHNL2_EVENT_VPORT_DESTROYED,
+	CPCHNL2_EVENT_VPORT_ENABLED,
+	CPCHNL2_EVENT_VPORT_DISABLED,
+	CPCHNL2_PKG_EVENT,
+	CPCHNL2_EVENT_ADD_QUEUE_GROUPS,
+	CPCHNL2_EVENT_DEL_QUEUE_GROUPS,
+	CPCHNL2_EVENT_ADD_QUEUES,
+	CPCHNL2_EVENT_DEL_QUEUES
+};
+
+/*
+ * This is for CPCHNL2_EVENT_VPORT_CREATED
+ */
+struct cpchnl2_event_vport_created {
+	struct cpchnl2_vport_id vport; /* Vport identifier to point to specific Vport */
+	struct cpchnl2_vport_info info; /* Vport configuration info */
+	struct cpchnl2_queue_groups queue_groups; /* Vport assign queue groups configuration info */
+};
+CPCHNL2_CHECK_STRUCT_LEN(3896, cpchnl2_event_vport_created);
+
+/*
+ * This is for CPCHNL2_EVENT_VPORT_DESTROYED
+ */
+struct cpchnl2_event_vport_destroyed {
+	/* Vport identifier to point to specific Vport */
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_func_id func;
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_event_vport_destroyed);
+
+struct cpchnl2_event_info {
+	struct {
+		s32 type;		/* See enum cpchnl2_event */
+		uint8_t reserved[4];	/* Reserved */
+	} header;
+	union {
+		struct cpchnl2_event_vport_created vport_created;
+		struct cpchnl2_event_vport_destroyed vport_destroyed;
+	} data;
+};
+
+#endif /* _CPFL_CPCHNL_H_ */
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v5 05/10] net/cpfl: enable vport mapping
  2023-09-12 16:26       ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
                           ` (3 preceding siblings ...)
  2023-09-12 16:26         ` [PATCH v5 04/10] net/cpfl: introduce CP channel API beilei.xing
@ 2023-09-12 16:26         ` beilei.xing
  2023-09-12 16:26         ` [PATCH v5 06/10] net/cpfl: support vport list/info get beilei.xing
                           ` (5 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 16:26 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

1. Handle cpchnl event for vport create/destroy
2. Use hash table to store vport_id to vport_info mapping
3. Use spinlock for thread safe.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 157 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.h |  21 ++++-
 drivers/net/cpfl/meson.build   |   2 +-
 3 files changed, 177 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 31a5822d2c..ad21f901bb 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -10,6 +10,7 @@
 #include <rte_dev.h>
 #include <errno.h>
 #include <rte_alarm.h>
+#include <rte_hash_crc.h>
 
 #include "cpfl_ethdev.h"
 #include "cpfl_rxtx.h"
@@ -1504,6 +1505,108 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 	}
 }
 
+static int
+cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_vport_id *vport_identity,
+		       struct cpchnl2_vport_info *vport_info)
+{
+	struct cpfl_vport_info *info = NULL;
+	int ret;
+
+	rte_spinlock_lock(&adapter->vport_map_lock);
+	ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info);
+	if (ret >= 0) {
+		PMD_DRV_LOG(WARNING, "vport already exist, overwrite info anyway");
+		/* overwrite info */
+		if (info)
+			info->vport_info = *vport_info;
+		goto fini;
+	}
+
+	info = rte_zmalloc(NULL, sizeof(*info), 0);
+	if (info == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory for vport map info");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	info->vport_info = *vport_info;
+
+	ret = rte_hash_add_key_data(adapter->vport_map_hash, vport_identity, info);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to add vport map into hash");
+		rte_free(info);
+		goto err;
+	}
+
+fini:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return ret;
+}
+
+static int
+cpfl_vport_info_destroy(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id *vport_identity)
+{
+	struct cpfl_vport_info *info;
+	int ret;
+
+	rte_spinlock_lock(&adapter->vport_map_lock);
+	ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "vport id not exist");
+		goto err;
+	}
+
+	rte_hash_del_key(adapter->vport_map_hash, vport_identity);
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	rte_free(info);
+
+	return 0;
+
+err:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return ret;
+}
+
+static void
+cpfl_handle_cpchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen)
+{
+	struct cpchnl2_event_info *cpchnl2_event = (struct cpchnl2_event_info *)msg;
+	struct cpchnl2_vport_info *info;
+	struct cpfl_vport_id vport_identity = { 0 };
+
+	if (msglen < sizeof(struct cpchnl2_event_info)) {
+		PMD_DRV_LOG(ERR, "Error event");
+		return;
+	}
+
+	switch (cpchnl2_event->header.type) {
+	case CPCHNL2_EVENT_VPORT_CREATED:
+		vport_identity.vport_id = cpchnl2_event->data.vport_created.vport.vport_id;
+		info = &cpchnl2_event->data.vport_created.info;
+		vport_identity.func_type = info->func_type;
+		vport_identity.pf_id = info->pf_id;
+		vport_identity.vf_id = info->vf_id;
+		if (cpfl_vport_info_create(adapter, &vport_identity, info))
+			PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_CREATED");
+		break;
+	case CPCHNL2_EVENT_VPORT_DESTROYED:
+		vport_identity.vport_id = cpchnl2_event->data.vport_destroyed.vport.vport_id;
+		vport_identity.func_type = cpchnl2_event->data.vport_destroyed.func.func_type;
+		vport_identity.pf_id = cpchnl2_event->data.vport_destroyed.func.pf_id;
+		vport_identity.vf_id = cpchnl2_event->data.vport_destroyed.func.vf_id;
+		if (cpfl_vport_info_destroy(adapter, &vport_identity))
+			PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_DESTROY");
+		break;
+	default:
+		PMD_DRV_LOG(ERR, " unknown event received %u", cpchnl2_event->header.type);
+		break;
+	}
+}
+
 static void
 cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 {
@@ -1535,6 +1638,9 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 			if (vc_op == VIRTCHNL2_OP_EVENT) {
 				cpfl_handle_vchnl_event_msg(adapter, adapter->base.mbx_resp,
 							    ctlq_msg.data_len);
+			} else if (vc_op == CPCHNL2_OP_EVENT) {
+				cpfl_handle_cpchnl_event_msg(adapter, adapter->base.mbx_resp,
+							     ctlq_msg.data_len);
 			} else {
 				if (vc_op == base->pend_cmd)
 					notify_cmd(base, base->cmd_retval);
@@ -1610,6 +1716,48 @@ static struct virtchnl2_get_capabilities req_caps = {
 	.other_caps = VIRTCHNL2_CAP_WB_ON_ITR
 };
 
+static int
+cpfl_vport_map_init(struct cpfl_adapter_ext *adapter)
+{
+	char hname[32];
+
+	snprintf(hname, 32, "%s-vport", adapter->name);
+
+	rte_spinlock_init(&adapter->vport_map_lock);
+
+#define CPFL_VPORT_MAP_HASH_ENTRY_NUM 2048
+
+	struct rte_hash_parameters params = {
+		.name = adapter->name,
+		.entries = CPFL_VPORT_MAP_HASH_ENTRY_NUM,
+		.key_len = sizeof(struct cpfl_vport_id),
+		.hash_func = rte_hash_crc,
+		.socket_id = SOCKET_ID_ANY,
+	};
+
+	adapter->vport_map_hash = rte_hash_create(&params);
+
+	if (adapter->vport_map_hash == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to create vport map hash");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
+{
+	const void *key = NULL;
+	struct cpfl_vport_map_info *info;
+	uint32_t iter = 0;
+
+	while (rte_hash_iterate(adapter->vport_map_hash, &key, (void **)&info, &iter) >= 0)
+		rte_free(info);
+
+	rte_hash_free(adapter->vport_map_hash);
+}
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1634,6 +1782,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_adapter_init;
 	}
 
+	ret = cpfl_vport_map_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init vport map");
+		goto err_vport_map_init;
+	}
+
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
 	adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ?
@@ -1658,6 +1812,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_vport_map_uninit(adapter);
+err_vport_map_init:
 	idpf_adapter_deinit(base);
 err_adapter_init:
 	return ret;
@@ -1887,6 +2043,7 @@ static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
 
 	rte_free(adapter->vports);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index feb1edc4b8..de86c49016 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -10,16 +10,18 @@
 #include <rte_spinlock.h>
 #include <rte_ethdev.h>
 #include <rte_kvargs.h>
+#include <rte_hash.h>
 #include <ethdev_driver.h>
 #include <ethdev_pci.h>
 
-#include "cpfl_logs.h"
-
 #include <idpf_common_device.h>
 #include <idpf_common_virtchnl.h>
 #include <base/idpf_prototype.h>
 #include <base/virtchnl2.h>
 
+#include "cpfl_logs.h"
+#include "cpfl_cpchnl.h"
+
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
 
@@ -86,6 +88,18 @@ struct p2p_queue_chunks_info {
 	uint32_t rx_buf_qtail_spacing;
 };
 
+struct cpfl_vport_id {
+	uint32_t vport_id;
+	uint8_t func_type;
+	uint8_t pf_id;
+	uint16_t vf_id;
+};
+
+struct cpfl_vport_info {
+	struct cpchnl2_vport_info vport_info;
+	bool enabled;
+};
+
 enum cpfl_itf_type {
 	CPFL_ITF_TYPE_VPORT,
 };
@@ -127,6 +141,9 @@ struct cpfl_adapter_ext {
 
 	uint16_t used_vecs_num;
 	struct cpfl_devargs devargs;
+
+	rte_spinlock_t vport_map_lock;
+	struct rte_hash *vport_map_hash;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 8d62ebfd77..28167bb81d 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -11,7 +11,7 @@ if dpdk_conf.get('RTE_IOVA_IN_MBUF') == 0
     subdir_done()
 endif
 
-deps += ['common_idpf']
+deps += ['hash', 'common_idpf']
 
 sources = files(
         'cpfl_ethdev.c',
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v5 06/10] net/cpfl: support vport list/info get
  2023-09-12 16:26       ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
                           ` (4 preceding siblings ...)
  2023-09-12 16:26         ` [PATCH v5 05/10] net/cpfl: enable vport mapping beilei.xing
@ 2023-09-12 16:26         ` beilei.xing
  2023-09-12 16:26         ` [PATCH v5 07/10] net/cpfl: parse representor devargs beilei.xing
                           ` (4 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 16:26 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Support cp channel ops CPCHNL2_OP_CPF_GET_VPORT_LIST and
CPCHNL2_OP_CPF_GET_VPORT_INFO.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h |  8 ++++
 drivers/net/cpfl/cpfl_vchnl.c  | 72 ++++++++++++++++++++++++++++++++++
 drivers/net/cpfl/meson.build   |  1 +
 3 files changed, 81 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index de86c49016..4975c05a55 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -148,6 +148,14 @@ struct cpfl_adapter_ext {
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_vport_id *vi,
+			   struct cpchnl2_get_vport_list_response *response);
+int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+			   struct cpchnl2_vport_id *vport_id,
+			   struct cpfl_vport_id *vi,
+			   struct cpchnl2_get_vport_info_response *response);
+
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p)					\
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
new file mode 100644
index 0000000000..a21a4a451f
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include "cpfl_ethdev.h"
+#include <idpf_common_virtchnl.h>
+
+int
+cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_vport_id *vi,
+		       struct cpchnl2_get_vport_list_response *response)
+{
+	struct cpchnl2_get_vport_list_request request;
+	struct idpf_cmd_info args;
+	int err;
+
+	memset(&request, 0, sizeof(request));
+	request.func_type = vi->func_type;
+	request.pf_id = vi->pf_id;
+	request.vf_id = vi->vf_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = CPCHNL2_OP_GET_VPORT_LIST;
+	args.in_args = (uint8_t *)&request;
+	args.in_args_size = sizeof(struct cpchnl2_get_vport_list_request);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err != 0) {
+		PMD_DRV_LOG(ERR, "Failed to execute command of CPCHNL2_OP_GET_VPORT_LIST");
+		return err;
+	}
+
+	rte_memcpy(response, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+
+	return 0;
+}
+
+int
+cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+		       struct cpchnl2_vport_id *vport_id,
+		       struct cpfl_vport_id *vi,
+		       struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpchnl2_get_vport_info_request request;
+	struct idpf_cmd_info args;
+	int err;
+
+	request.vport.vport_id = vport_id->vport_id;
+	request.vport.vport_type = vport_id->vport_type;
+	request.func.func_type = vi->func_type;
+	request.func.pf_id = vi->pf_id;
+	request.func.vf_id = vi->vf_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = CPCHNL2_OP_GET_VPORT_INFO;
+	args.in_args = (uint8_t *)&request;
+	args.in_args_size = sizeof(struct cpchnl2_get_vport_info_request);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err != 0) {
+		PMD_DRV_LOG(ERR, "Failed to execute command of CPCHNL2_OP_GET_VPORT_INFO");
+		return err;
+	}
+
+	rte_memcpy(response, args.out_buffer, sizeof(*response));
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 28167bb81d..2f0f5d8434 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -16,6 +16,7 @@ deps += ['hash', 'common_idpf']
 sources = files(
         'cpfl_ethdev.c',
         'cpfl_rxtx.c',
+        'cpfl_vchnl.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v5 07/10] net/cpfl: parse representor devargs
  2023-09-12 16:26       ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
                           ` (5 preceding siblings ...)
  2023-09-12 16:26         ` [PATCH v5 06/10] net/cpfl: support vport list/info get beilei.xing
@ 2023-09-12 16:26         ` beilei.xing
  2023-09-12 16:26         ` [PATCH v5 08/10] net/cpfl: support probe again beilei.xing
                           ` (3 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 16:26 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Format:

[[c<controller_id>]pf<pf_id>]vf<vf_id>

  controller_id:

  0 : host (default)
  1:  acc

  pf_id:

  0 : apf (default)
  1 : cpf

Example:

representor=c0pf0vf[0-3]
  -- host > apf > vf 0,1,2,3
     same as pf0vf[0-3] and vf[0-3] if omit default value.

representor=c0pf0
  -- host > apf
     same as pf0 if omit default value.

representor=c1pf0
  -- accelerator core > apf

multiple representor devargs are supported.
e.g.: create 4 representors for 4 vfs on host APF and one
representor for APF on accelerator core.

  -- representor=vf[0-3],representor=c1pf0

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 doc/guides/nics/cpfl.rst               |  36 +++++
 doc/guides/rel_notes/release_23_11.rst |   3 +
 drivers/net/cpfl/cpfl_ethdev.c         | 179 +++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.h         |   8 ++
 4 files changed, 226 insertions(+)

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 39a2b603f3..83a18c3f2e 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -92,6 +92,42 @@ Runtime Configuration
   Then the PMD will configure Tx queue with single queue mode.
   Otherwise, split queue mode is chosen by default.
 
+- ``representor`` (default ``not enabled``)
+
+  The cpfl PMD supports the creation of APF/CPF/VF port representors.
+  Each port representor corresponds to a single function of that device.
+  Using the ``devargs`` option ``representor`` the user can specify
+  which functions to create port representors.
+
+  Format is::
+
+    [[c<controller_id>]pf<pf_id>]vf<vf_id>
+
+  Controller_id 0 is host (default), while 1 is accelerator core.
+  Pf_id 0 is APF (default), while 1 is CPF.
+  Default value can be omitted.
+
+  Create 4 representors for 4 vfs on host APF::
+
+    -a BDF,representor=c0pf0vf[0-3]
+
+  Or::
+
+    -a BDF,representor=pf0vf[0-3]
+
+  Or::
+
+    -a BDF,representor=vf[0-3]
+
+  Create a representor for CPF on accelerator core::
+
+    -a BDF,representor=c1pf1
+
+  Multiple representor devargs are supported. Create 4 representors for 4
+  vfs on host APF and one representor for CPF on accelerator core::
+
+    -a BDF,representor=vf[0-3],representor=c1pf1
+
 
 Driver compilation and testing
 ------------------------------
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 333e1d95a2..3d9be208d0 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -78,6 +78,9 @@ New Features
 * build: Optional libraries can now be selected with the new ``enable_libs``
   build option similarly to the existing ``enable_drivers`` build option.
 
+* **Updated Intel cpfl driver.**
+
+  * Added support for port representor.
 
 Removed Items
 -------------
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index ad21f901bb..eb57e355d2 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -13,8 +13,10 @@
 #include <rte_hash_crc.h>
 
 #include "cpfl_ethdev.h"
+#include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 
+#define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
@@ -25,6 +27,7 @@ struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
 static const char * const cpfl_valid_args[] = {
+	CPFL_REPRESENTOR,
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
@@ -1407,6 +1410,128 @@ parse_bool(const char *key, const char *value, void *args)
 	return 0;
 }
 
+static int
+enlist(uint16_t *list, uint16_t *len_list, const uint16_t max_list, uint16_t val)
+{
+	uint16_t i;
+
+	for (i = 0; i < *len_list; i++) {
+		if (list[i] == val)
+			return 0;
+	}
+	if (*len_list >= max_list)
+		return -1;
+	list[(*len_list)++] = val;
+	return 0;
+}
+
+static const char *
+process_range(const char *str, uint16_t *list, uint16_t *len_list,
+	const uint16_t max_list)
+{
+	uint16_t lo, hi, val;
+	int result, n = 0;
+	const char *pos = str;
+
+	result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n);
+	if (result == 1) {
+		if (enlist(list, len_list, max_list, lo) != 0)
+			return NULL;
+	} else if (result == 2) {
+		if (lo > hi)
+			return NULL;
+		for (val = lo; val <= hi; val++) {
+			if (enlist(list, len_list, max_list, val) != 0)
+				return NULL;
+		}
+	} else {
+		return NULL;
+	}
+	return pos + n;
+}
+
+static const char *
+process_list(const char *str, uint16_t *list, uint16_t *len_list, const uint16_t max_list)
+{
+	const char *pos = str;
+
+	if (*pos == '[')
+		pos++;
+	while (1) {
+		pos = process_range(pos, list, len_list, max_list);
+		if (pos == NULL)
+			return NULL;
+		if (*pos != ',') /* end of list */
+			break;
+		pos++;
+	}
+	if (*str == '[' && *pos != ']')
+		return NULL;
+	if (*pos == ']')
+		pos++;
+	return pos;
+}
+
+static int
+parse_repr(const char *key __rte_unused, const char *value, void *args)
+{
+	struct cpfl_devargs *devargs = args;
+	struct rte_eth_devargs *eth_da;
+	const char *str = value;
+
+	if (devargs->repr_args_num == CPFL_REPR_ARG_NUM_MAX)
+		return -EINVAL;
+
+	eth_da = &devargs->repr_args[devargs->repr_args_num];
+
+	if (str[0] == 'c') {
+		str += 1;
+		str = process_list(str, eth_da->mh_controllers,
+				&eth_da->nb_mh_controllers,
+				RTE_DIM(eth_da->mh_controllers));
+		if (str == NULL)
+			goto done;
+	}
+	if (str[0] == 'p' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_PF;
+		str += 2;
+		str = process_list(str, eth_da->ports,
+				&eth_da->nb_ports, RTE_DIM(eth_da->ports));
+		if (str == NULL || str[0] == '\0')
+			goto done;
+	} else if (eth_da->nb_mh_controllers > 0) {
+		/* 'c' must followed by 'pf'. */
+		str = NULL;
+		goto done;
+	}
+	if (str[0] == 'v' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_VF;
+		str += 2;
+	} else if (str[0] == 's' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_SF;
+		str += 2;
+	} else {
+		/* 'pf' must followed by 'vf' or 'sf'. */
+		if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+			str = NULL;
+			goto done;
+		}
+		eth_da->type = RTE_ETH_REPRESENTOR_VF;
+	}
+	str = process_list(str, eth_da->representor_ports,
+		&eth_da->nb_representor_ports,
+		RTE_DIM(eth_da->representor_ports));
+done:
+	if (str == NULL) {
+		RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str);
+		return -1;
+	}
+
+	devargs->repr_args_num++;
+
+	return 0;
+}
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1431,6 +1556,12 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 		return -EINVAL;
 	}
 
+	cpfl_args->repr_args_num = 0;
+	ret = rte_kvargs_process(kvlist, CPFL_REPRESENTOR, &parse_repr, cpfl_args);
+
+	if (ret != 0)
+		goto fail;
+
 	ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport,
 				 cpfl_args);
 	if (ret != 0)
@@ -2087,6 +2218,48 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
 	return 0;
 }
 
+static int
+cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int i, j;
+
+	/* check and refine repr args */
+	for (i = 0; i < devargs->repr_args_num; i++) {
+		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
+
+		/* set default host_id to xeon host */
+		if (eth_da->nb_mh_controllers == 0) {
+			eth_da->nb_mh_controllers = 1;
+			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
+		} else {
+			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
+				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->mh_controllers[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		/* set default pf to APF */
+		if (eth_da->nb_ports == 0) {
+			eth_da->nb_ports = 1;
+			eth_da->ports[0] = CPFL_PF_TYPE_APF;
+		} else {
+			for (j = 0; j < eth_da->nb_ports; j++) {
+				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->ports[j]);
+					return -EINVAL;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
 static int
 cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -2165,6 +2338,12 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		goto err;
 	}
 
+	retval = cpfl_repr_devargs_process(adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to process repr devargs");
+		goto err;
+	}
+
 	return 0;
 
 err:
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 4975c05a55..b03666f5ea 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -60,16 +60,24 @@
 #define IDPF_DEV_ID_CPF			0x1453
 #define VIRTCHNL2_QUEUE_GROUP_P2P	0x100
 
+#define CPFL_HOST_ID_HOST	0
+#define CPFL_HOST_ID_ACC	1
+#define CPFL_PF_TYPE_APF	0
+#define CPFL_PF_TYPE_CPF	1
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
 	uint16_t idx;       /* index in adapter->vports[]*/
 };
 
+#define CPFL_REPR_ARG_NUM_MAX	4
 /* Struct used when parse driver specific devargs */
 struct cpfl_devargs {
 	uint16_t req_vports[CPFL_MAX_VPORT_NUM];
 	uint16_t req_vport_nb;
+	uint8_t repr_args_num;
+	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
 };
 
 struct p2p_queue_chunks_info {
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v5 08/10] net/cpfl: support probe again
  2023-09-12 16:26       ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
                           ` (6 preceding siblings ...)
  2023-09-12 16:26         ` [PATCH v5 07/10] net/cpfl: parse representor devargs beilei.xing
@ 2023-09-12 16:26         ` beilei.xing
  2023-09-12 16:26         ` [PATCH v5 09/10] net/cpfl: create port representor beilei.xing
                           ` (2 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 16:26 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Only representor will be parsed for probe again.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 69 +++++++++++++++++++++++++++-------
 1 file changed, 56 insertions(+), 13 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index eb57e355d2..47c4c5c796 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -26,7 +26,7 @@ rte_spinlock_t cpfl_adapter_lock;
 struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
-static const char * const cpfl_valid_args[] = {
+static const char * const cpfl_valid_args_first[] = {
 	CPFL_REPRESENTOR,
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
@@ -34,6 +34,11 @@ static const char * const cpfl_valid_args[] = {
 	NULL
 };
 
+static const char * const cpfl_valid_args_again[] = {
+	CPFL_REPRESENTOR,
+	NULL
+};
+
 uint32_t cpfl_supported_speeds[] = {
 	RTE_ETH_SPEED_NUM_NONE,
 	RTE_ETH_SPEED_NUM_10M,
@@ -1533,7 +1538,7 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
 	struct rte_devargs *devargs = pci_dev->device.devargs;
 	struct cpfl_devargs *cpfl_args = &adapter->devargs;
@@ -1545,7 +1550,8 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (devargs == NULL)
 		return 0;
 
-	kvlist = rte_kvargs_parse(devargs->args, cpfl_valid_args);
+	kvlist = rte_kvargs_parse(devargs->args,
+			first ? cpfl_valid_args_first : cpfl_valid_args_again);
 	if (kvlist == NULL) {
 		PMD_INIT_LOG(ERR, "invalid kvargs key");
 		return -EINVAL;
@@ -1562,6 +1568,9 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (ret != 0)
 		goto fail;
 
+	if (!first)
+		return 0;
+
 	ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport,
 				 cpfl_args);
 	if (ret != 0)
@@ -2291,18 +2300,11 @@ cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapt
 }
 
 static int
-cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
-	       struct rte_pci_device *pci_dev)
+cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
 	struct cpfl_adapter_ext *adapter;
 	int retval;
 
-	if (!cpfl_adapter_list_init) {
-		rte_spinlock_init(&cpfl_adapter_lock);
-		TAILQ_INIT(&cpfl_adapter_list);
-		cpfl_adapter_list_init = true;
-	}
-
 	adapter = rte_zmalloc("cpfl_adapter_ext",
 			      sizeof(struct cpfl_adapter_ext), 0);
 	if (adapter == NULL) {
@@ -2310,7 +2312,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		return -ENOMEM;
 	}
 
-	retval = cpfl_parse_devargs(pci_dev, adapter);
+	retval = cpfl_parse_devargs(pci_dev, adapter, true);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
 		return retval;
@@ -2355,6 +2357,46 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	return retval;
 }
 
+static int
+cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_parse_devargs(pci_dev, adapter, false);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		return ret;
+	}
+
+	ret = cpfl_repr_devargs_process(adapter);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to process reprenstor devargs");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+	       struct rte_pci_device *pci_dev)
+{
+	struct cpfl_adapter_ext *adapter;
+
+	if (!cpfl_adapter_list_init) {
+		rte_spinlock_init(&cpfl_adapter_lock);
+		TAILQ_INIT(&cpfl_adapter_list);
+		cpfl_adapter_list_init = true;
+	}
+
+	adapter = cpfl_find_adapter_ext(pci_dev);
+
+	if (adapter == NULL)
+		return cpfl_pci_probe_first(pci_dev);
+	else
+		return cpfl_pci_probe_again(pci_dev, adapter);
+}
+
 static int
 cpfl_pci_remove(struct rte_pci_device *pci_dev)
 {
@@ -2377,7 +2419,8 @@ cpfl_pci_remove(struct rte_pci_device *pci_dev)
 
 static struct rte_pci_driver rte_cpfl_pmd = {
 	.id_table	= pci_id_cpfl_map,
-	.drv_flags	= RTE_PCI_DRV_NEED_MAPPING,
+	.drv_flags	= RTE_PCI_DRV_NEED_MAPPING |
+			  RTE_PCI_DRV_PROBE_AGAIN,
 	.probe		= cpfl_pci_probe,
 	.remove		= cpfl_pci_remove,
 };
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v5 09/10] net/cpfl: create port representor
  2023-09-12 16:26       ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
                           ` (7 preceding siblings ...)
  2023-09-12 16:26         ` [PATCH v5 08/10] net/cpfl: support probe again beilei.xing
@ 2023-09-12 16:26         ` beilei.xing
  2023-09-12 16:26         ` [PATCH v5 10/10] net/cpfl: support link update for representor beilei.xing
  2023-09-12 17:30         ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 16:26 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Track representor request in the allowlist.
Representor will only be created for active vport.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c      | 117 +++---
 drivers/net/cpfl/cpfl_ethdev.h      |  39 +-
 drivers/net/cpfl/cpfl_representor.c | 581 ++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.h |  26 ++
 drivers/net/cpfl/meson.build        |   1 +
 5 files changed, 715 insertions(+), 49 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 47c4c5c796..375bc8098c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1645,10 +1645,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 	}
 }
 
-static int
+int
 cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
 		       struct cpfl_vport_id *vport_identity,
-		       struct cpchnl2_vport_info *vport_info)
+		       struct cpchnl2_event_vport_created *vport_created)
 {
 	struct cpfl_vport_info *info = NULL;
 	int ret;
@@ -1659,7 +1659,7 @@ cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
 		PMD_DRV_LOG(WARNING, "vport already exist, overwrite info anyway");
 		/* overwrite info */
 		if (info)
-			info->vport_info = *vport_info;
+			info->vport = *vport_created;
 		goto fini;
 	}
 
@@ -1670,7 +1670,7 @@ cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
 		goto err;
 	}
 
-	info->vport_info = *vport_info;
+	info->vport = *vport_created;
 
 	ret = rte_hash_add_key_data(adapter->vport_map_hash, vport_identity, info);
 	if (ret < 0) {
@@ -1696,7 +1696,7 @@ cpfl_vport_info_destroy(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id *
 	rte_spinlock_lock(&adapter->vport_map_lock);
 	ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info);
 	if (ret < 0) {
-		PMD_DRV_LOG(ERR, "vport id not exist");
+		PMD_DRV_LOG(ERR, "vport id does not exist");
 		goto err;
 	}
 
@@ -1898,6 +1898,42 @@ cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
 	rte_hash_free(adapter->vport_map_hash);
 }
 
+static int
+cpfl_repr_allowlist_init(struct cpfl_adapter_ext *adapter)
+{
+	char hname[32];
+
+	snprintf(hname, 32, "%s-repr_al", adapter->name);
+
+	rte_spinlock_init(&adapter->repr_lock);
+
+#define CPFL_REPR_HASH_ENTRY_NUM 2048
+
+	struct rte_hash_parameters params = {
+		.name = hname,
+		.entries = CPFL_REPR_HASH_ENTRY_NUM,
+		.key_len = sizeof(struct cpfl_repr_id),
+		.hash_func = rte_hash_crc,
+		.socket_id = SOCKET_ID_ANY,
+	};
+
+	adapter->repr_allowlist_hash = rte_hash_create(&params);
+
+	if (adapter->repr_allowlist_hash == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to create repr allowlist hash");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+cpfl_repr_allowlist_uninit(struct cpfl_adapter_ext *adapter)
+{
+	rte_hash_free(adapter->repr_allowlist_hash);
+}
+
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1928,6 +1964,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vport_map_init;
 	}
 
+	ret = cpfl_repr_allowlist_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init representor allowlist");
+		goto err_repr_allowlist_init;
+	}
+
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
 	adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ?
@@ -1952,6 +1994,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_repr_allowlist_uninit(adapter);
+err_repr_allowlist_init:
 	cpfl_vport_map_uninit(adapter);
 err_vport_map_init:
 	idpf_adapter_deinit(base);
@@ -2227,48 +2271,6 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
 	return 0;
 }
 
-static int
-cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
-{
-	struct cpfl_devargs *devargs = &adapter->devargs;
-	int i, j;
-
-	/* check and refine repr args */
-	for (i = 0; i < devargs->repr_args_num; i++) {
-		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
-
-		/* set default host_id to xeon host */
-		if (eth_da->nb_mh_controllers == 0) {
-			eth_da->nb_mh_controllers = 1;
-			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
-		} else {
-			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
-				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
-					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-						     eth_da->mh_controllers[j]);
-					return -EINVAL;
-				}
-			}
-		}
-
-		/* set default pf to APF */
-		if (eth_da->nb_ports == 0) {
-			eth_da->nb_ports = 1;
-			eth_da->ports[0] = CPFL_PF_TYPE_APF;
-		} else {
-			for (j = 0; j < eth_da->nb_ports; j++) {
-				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
-					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-						     eth_da->ports[j]);
-					return -EINVAL;
-				}
-			}
-		}
-	}
-
-	return 0;
-}
-
 static int
 cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -2304,6 +2306,7 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
 	struct cpfl_adapter_ext *adapter;
 	int retval;
+	uint16_t port_id;
 
 	adapter = rte_zmalloc("cpfl_adapter_ext",
 			      sizeof(struct cpfl_adapter_ext), 0);
@@ -2343,11 +2346,23 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 	retval = cpfl_repr_devargs_process(adapter);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to process repr devargs");
-		goto err;
+		goto close_ethdev;
 	}
 
+	retval = cpfl_repr_create(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create representors ");
+		goto close_ethdev;
+	}
+
+
 	return 0;
 
+close_ethdev:
+	/* Ethdev created can be found RTE_ETH_FOREACH_DEV_OF through rte_device */
+	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
+		rte_eth_dev_close(port_id);
+	}
 err:
 	rte_spinlock_lock(&cpfl_adapter_lock);
 	TAILQ_REMOVE(&cpfl_adapter_list, adapter, next);
@@ -2374,6 +2389,12 @@ cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *ad
 		return ret;
 	}
 
+	ret = cpfl_repr_create(pci_dev, adapter);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create representors ");
+		return ret;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index b03666f5ea..a4ffd51fb3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -21,6 +21,7 @@
 
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
+#include "cpfl_representor.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -60,11 +61,31 @@
 #define IDPF_DEV_ID_CPF			0x1453
 #define VIRTCHNL2_QUEUE_GROUP_P2P	0x100
 
+#define CPFL_HOST_ID_NUM	2
+#define CPFL_PF_TYPE_NUM	2
 #define CPFL_HOST_ID_HOST	0
 #define CPFL_HOST_ID_ACC	1
 #define CPFL_PF_TYPE_APF	0
 #define CPFL_PF_TYPE_CPF	1
 
+/* Function IDs on IMC side */
+#define CPFL_HOST0_APF		0
+#define CPFL_ACC_APF_ID		4
+#define CPFL_HOST0_CPF_ID	8
+#define CPFL_ACC_CPF_ID		12
+
+#define CPFL_VPORT_LAN_PF	0
+#define CPFL_VPORT_LAN_VF	1
+
+/* bit[15:14] type
+ * bit[13] host/accelerator core
+ * bit[12] apf/cpf
+ * bit[11:0] vf
+ */
+#define CPFL_REPRESENTOR_ID(type, host_id, pf_id, vf_id)	\
+	((((type) & 0x3) << 14) + (((host_id) & 0x1) << 13) +	\
+	 (((pf_id) & 0x1) << 12) + ((vf_id) & 0xfff))
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
@@ -104,12 +125,13 @@ struct cpfl_vport_id {
 };
 
 struct cpfl_vport_info {
-	struct cpchnl2_vport_info vport_info;
+	struct cpchnl2_event_vport_created vport;
 	bool enabled;
 };
 
 enum cpfl_itf_type {
 	CPFL_ITF_TYPE_VPORT,
+	CPFL_ITF_TYPE_REPRESENTOR,
 };
 
 struct cpfl_itf {
@@ -135,6 +157,13 @@ struct cpfl_vport {
 	bool p2p_manual_bind;
 };
 
+struct cpfl_repr {
+	struct cpfl_itf itf;
+	struct cpfl_repr_id repr_id;
+	struct rte_ether_addr mac_addr;
+	struct cpfl_vport_info *vport_info;
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -152,10 +181,16 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t vport_map_lock;
 	struct rte_hash *vport_map_hash;
+
+	rte_spinlock_t repr_lock;
+	struct rte_hash *repr_allowlist_hash;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_vport_id *vport_identity,
+			   struct cpchnl2_event_vport_created *vport);
 int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_list_response *response);
@@ -170,6 +205,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 	container_of((p), struct cpfl_adapter_ext, base)
 #define CPFL_DEV_TO_VPORT(dev)					\
 	((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_REPR(dev)					\
+	((struct cpfl_repr *)((dev)->data->dev_private))
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
new file mode 100644
index 0000000000..d2558c39a8
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -0,0 +1,581 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include "cpfl_representor.h"
+#include "cpfl_rxtx.h"
+
+static int
+cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_repr_id *repr_id,
+			   struct rte_eth_dev *dev)
+{
+	int ret;
+
+	if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) < 0)
+		return -ENOENT;
+
+	ret = rte_hash_add_key_data(adapter->repr_allowlist_hash, repr_id, dev);
+
+	return ret;
+}
+
+static int
+cpfl_repr_allowlist_add(struct cpfl_adapter_ext *adapter,
+			struct cpfl_repr_id *repr_id)
+{
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+	if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) >= 0) {
+		ret = -EEXIST;
+		goto err;
+	}
+
+	ret = rte_hash_add_key(adapter->repr_allowlist_hash, repr_id);
+	if (ret < 0)
+		goto err;
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return ret;
+}
+
+static int
+cpfl_repr_devargs_process_one(struct cpfl_adapter_ext *adapter,
+			      struct rte_eth_devargs *eth_da)
+{
+	struct cpfl_repr_id repr_id;
+	int ret, c, p, v;
+
+	for (c = 0; c < eth_da->nb_mh_controllers; c++) {
+		for (p = 0; p < eth_da->nb_ports; p++) {
+			repr_id.type = eth_da->type;
+			if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+				repr_id.host_id = eth_da->mh_controllers[c];
+				repr_id.pf_id = eth_da->ports[p];
+				repr_id.vf_id = 0;
+				ret = cpfl_repr_allowlist_add(adapter, &repr_id);
+				if (ret == -EEXIST)
+					continue;
+				if (ret) {
+					PMD_DRV_LOG(ERR, "Failed to add PF repr to allowlist, "
+							 "host_id = %d, pf_id = %d.",
+						    repr_id.host_id, repr_id.pf_id);
+					return ret;
+				}
+			} else if (eth_da->type == RTE_ETH_REPRESENTOR_VF) {
+				for (v = 0; v < eth_da->nb_representor_ports; v++) {
+					repr_id.host_id = eth_da->mh_controllers[c];
+					repr_id.pf_id = eth_da->ports[p];
+					repr_id.vf_id = eth_da->representor_ports[v];
+					ret = cpfl_repr_allowlist_add(adapter, &repr_id);
+					if (ret == -EEXIST)
+						continue;
+					if (ret) {
+						PMD_DRV_LOG(ERR, "Failed to add VF repr to allowlist, "
+								 "host_id = %d, pf_id = %d, vf_id = %d.",
+							    repr_id.host_id,
+							    repr_id.pf_id,
+							    repr_id.vf_id);
+						return ret;
+					}
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+int
+cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int ret, i, j;
+
+	/* check and refine repr args */
+	for (i = 0; i < devargs->repr_args_num; i++) {
+		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
+
+		/* set default host_id to host */
+		if (eth_da->nb_mh_controllers == 0) {
+			eth_da->nb_mh_controllers = 1;
+			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
+		} else {
+			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
+				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->mh_controllers[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		/* set default pf to APF */
+		if (eth_da->nb_ports == 0) {
+			eth_da->nb_ports = 1;
+			eth_da->ports[0] = CPFL_PF_TYPE_APF;
+		} else {
+			for (j = 0; j < eth_da->nb_ports; j++) {
+				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->ports[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		ret = cpfl_repr_devargs_process_one(adapter, eth_da);
+		if (ret != 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_repr_allowlist_del(struct cpfl_adapter_ext *adapter,
+			struct cpfl_repr_id *repr_id)
+{
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	ret = rte_hash_del_key(adapter->repr_allowlist_hash, repr_id);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to delete repr from allowlist."
+				 "host_id = %d, type = %d, pf_id = %d, vf_id = %d",
+				 repr_id->host_id, repr_id->type,
+				 repr_id->pf_id, repr_id->vf_id);
+		goto err;
+	}
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return ret;
+}
+
+static int
+cpfl_repr_uninit(struct rte_eth_dev *eth_dev)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+	struct cpfl_adapter_ext *adapter = repr->itf.adapter;
+
+	eth_dev->data->mac_addrs = NULL;
+
+	cpfl_repr_allowlist_del(adapter, &repr->repr_id);
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_configure(struct rte_eth_dev *dev)
+{
+	/* now only 1 RX queue is supported */
+	if (dev->data->nb_rx_queues > 1)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_close(struct rte_eth_dev *dev)
+{
+	return cpfl_repr_uninit(dev);
+}
+
+static int
+cpfl_repr_dev_info_get(struct rte_eth_dev *ethdev,
+		       struct rte_eth_dev_info *dev_info)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+
+	dev_info->device = ethdev->device;
+	dev_info->max_mac_addrs = 1;
+	dev_info->max_rx_queues = 1;
+	dev_info->max_tx_queues = 1;
+	dev_info->min_rx_bufsize = CPFL_MIN_BUF_SIZE;
+	dev_info->max_rx_pktlen = CPFL_MAX_FRAME_SIZE;
+
+	dev_info->flow_type_rss_offloads = CPFL_RSS_OFFLOAD_ALL;
+
+	dev_info->rx_offload_capa =
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP		|
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP		|
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM	|
+		RTE_ETH_RX_OFFLOAD_SCATTER		|
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER		|
+		RTE_ETH_RX_OFFLOAD_RSS_HASH		|
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
+	dev_info->tx_offload_capa =
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT		|
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT		|
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM	|
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS		|
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_free_thresh = CPFL_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+		.offloads = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_free_thresh = CPFL_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH,
+		.offloads = 0,
+	};
+
+	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
+	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
+	dev_info->switch_info.name = ethdev->device->name;
+	dev_info->switch_info.domain_id = 0; /* the same domain*/
+	dev_info->switch_info.port_id = repr->vport_info->vport.info.vsi_id;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_start(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_stop(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+static int
+cpfl_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+			 __rte_unused uint16_t queue_id,
+			 __rte_unused uint16_t nb_desc,
+			 __rte_unused unsigned int socket_id,
+			 __rte_unused const struct rte_eth_rxconf *conf,
+			 __rte_unused struct rte_mempool *pool)
+{
+	/* Dummy */
+	return 0;
+}
+
+static int
+cpfl_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+			 __rte_unused uint16_t queue_id,
+			 __rte_unused uint16_t nb_desc,
+			 __rte_unused unsigned int socket_id,
+			 __rte_unused const struct rte_eth_txconf *conf)
+{
+	/* Dummy */
+	return 0;
+}
+
+static const struct eth_dev_ops cpfl_repr_dev_ops = {
+	.dev_start		= cpfl_repr_dev_start,
+	.dev_stop		= cpfl_repr_dev_stop,
+	.dev_configure		= cpfl_repr_dev_configure,
+	.dev_close		= cpfl_repr_dev_close,
+	.dev_infos_get		= cpfl_repr_dev_info_get,
+
+	.rx_queue_setup		= cpfl_repr_rx_queue_setup,
+	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
+};
+
+static int
+cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+	struct cpfl_repr_param *param = init_param;
+	struct cpfl_adapter_ext *adapter = param->adapter;
+
+	repr->repr_id = param->repr_id;
+	repr->vport_info = param->vport_info;
+	repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
+	repr->itf.adapter = adapter;
+	repr->itf.data = eth_dev->data;
+
+	eth_dev->dev_ops = &cpfl_repr_dev_ops;
+
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	eth_dev->data->representor_id =
+		CPFL_REPRESENTOR_ID(repr->repr_id.type,
+				    repr->repr_id.host_id,
+				    repr->repr_id.pf_id,
+				    repr->repr_id.vf_id);
+
+	eth_dev->data->mac_addrs = &repr->mac_addr;
+
+	rte_eth_random_addr(repr->mac_addr.addr_bytes);
+
+	return cpfl_repr_allowlist_update(adapter, &repr->repr_id, eth_dev);
+}
+
+static int
+cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
+{
+	if ((host_id != CPFL_HOST_ID_HOST &&
+	     host_id != CPFL_HOST_ID_ACC) ||
+	    (pf_id != CPFL_PF_TYPE_APF &&
+	     pf_id != CPFL_PF_TYPE_CPF))
+		return -EINVAL;
+
+	static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = {
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF,
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID,
+	};
+
+	return func_id_map[host_id][pf_id];
+}
+
+static bool
+cpfl_match_repr_with_vport(const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_vport_info *info)
+{
+	int func_id;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF &&
+	    info->func_type == CPFL_VPORT_LAN_PF) {
+		func_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		if (func_id < 0 || func_id != info->pf_id)
+			return false;
+		else
+			return true;
+	} else if (repr_id->type == RTE_ETH_REPRESENTOR_VF &&
+		   info->func_type == CPFL_VPORT_LAN_VF) {
+		if (repr_id->vf_id == info->vf_id)
+			return true;
+	}
+
+	return false;
+}
+
+static int
+cpfl_repr_vport_list_query(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_get_vport_list_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		vi.vf_id = 0;
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = CPFL_HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_cc_vport_list_get(adapter, &vi, response);
+
+	return ret;
+}
+
+static int
+cpfl_repr_vport_info_query(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_vport_id *vport_id,
+			   struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		vi.vf_id = 0;
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = CPFL_HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_cc_vport_info_get(adapter, vport_id, &vi, response);
+
+	return ret;
+}
+
+static int
+cpfl_repr_vport_map_update(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id, uint32_t vport_id,
+			   struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	vi.vport_id = vport_id;
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = CPFL_HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_vport_info_create(adapter, &vi, (struct cpchnl2_event_vport_created *)response);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Fail to update vport map hash for representor.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	struct rte_eth_dev *dev;
+	uint32_t iter = 0;
+	const struct cpfl_repr_id *repr_id;
+	const struct cpfl_vport_id *vp_id;
+	struct cpchnl2_get_vport_list_response *vlist_resp;
+	struct cpchnl2_get_vport_info_response vinfo_resp;
+	int ret;
+
+	vlist_resp = rte_zmalloc(NULL, IDPF_DFLT_MBX_BUF_SIZE, 0);
+	if (vlist_resp == NULL)
+		return -ENOMEM;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	while (rte_hash_iterate(adapter->repr_allowlist_hash,
+				(const void **)&repr_id, (void **)&dev, &iter) >= 0) {
+		struct cpfl_vport_info *vi;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		uint32_t iter_iter = 0;
+		int i;
+
+		/* skip representor already be created */
+		if (dev != NULL)
+			continue;
+
+		if (repr_id->type == RTE_ETH_REPRESENTOR_VF)
+			snprintf(name, sizeof(name), "net_%s_representor_c%dpf%dvf%d",
+				 pci_dev->name,
+				 repr_id->host_id,
+				 repr_id->pf_id,
+				 repr_id->vf_id);
+		else
+			snprintf(name, sizeof(name), "net_%s_representor_c%dpf%d",
+				 pci_dev->name,
+				 repr_id->host_id,
+				 repr_id->pf_id);
+
+		/* get vport list for the port representor */
+		ret = cpfl_repr_vport_list_query(adapter, repr_id, vlist_resp);
+		if (ret != 0) {
+			PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d's vport list",
+				     repr_id->host_id, repr_id->pf_id, repr_id->vf_id);
+			goto err;
+		}
+
+		if (vlist_resp->nof_vports == 0) {
+			PMD_INIT_LOG(WARNING, "No matched vport for representor %s", name);
+			continue;
+		}
+
+		/* get all vport info for the port representor */
+		for (i = 0; i < vlist_resp->nof_vports; i++) {
+			ret = cpfl_repr_vport_info_query(adapter, repr_id,
+							 &vlist_resp->vports[i], &vinfo_resp);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d vport[%d]'s info",
+					     repr_id->host_id, repr_id->pf_id, repr_id->vf_id,
+					     vlist_resp->vports[i].vport_id);
+				goto err;
+			}
+
+			ret = cpfl_repr_vport_map_update(adapter, repr_id,
+						 vlist_resp->vports[i].vport_id, &vinfo_resp);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to update  host%d pf%d vf%d vport[%d]'s info to vport_map_hash",
+					     repr_id->host_id, repr_id->pf_id, repr_id->vf_id,
+					     vlist_resp->vports[i].vport_id);
+				goto err;
+			}
+		}
+
+		/* find the matched vport */
+		rte_spinlock_lock(&adapter->vport_map_lock);
+
+		while (rte_hash_iterate(adapter->vport_map_hash,
+					(const void **)&vp_id, (void **)&vi, &iter_iter) >= 0) {
+			struct cpfl_repr_param param;
+
+			if (!cpfl_match_repr_with_vport(repr_id, &vi->vport.info))
+				continue;
+
+			param.adapter = adapter;
+			param.repr_id = *repr_id;
+			param.vport_info = vi;
+
+			ret = rte_eth_dev_create(&pci_dev->device,
+						 name,
+						 sizeof(struct cpfl_repr),
+						 NULL, NULL, cpfl_repr_init,
+						 &param);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to create representor %s", name);
+				rte_spinlock_unlock(&adapter->vport_map_lock);
+				goto err;
+			}
+			break;
+		}
+
+		rte_spinlock_unlock(&adapter->vport_map_lock);
+	}
+
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	rte_free(vlist_resp);
+	return ret;
+}
diff --git a/drivers/net/cpfl/cpfl_representor.h b/drivers/net/cpfl/cpfl_representor.h
new file mode 100644
index 0000000000..d3a4de531e
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_REPRESENTOR_H_
+#define _CPFL_REPRESENTOR_H_
+
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+struct cpfl_repr_id {
+	uint8_t host_id;
+	uint8_t pf_id;
+	uint8_t type;
+	uint8_t vf_id;
+};
+
+struct cpfl_repr_param {
+	struct cpfl_adapter_ext *adapter;
+	struct cpfl_repr_id repr_id;
+	struct cpfl_vport_info *vport_info;
+};
+
+int cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter);
+int cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 2f0f5d8434..d8b92ae16a 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -17,6 +17,7 @@ sources = files(
         'cpfl_ethdev.c',
         'cpfl_rxtx.c',
         'cpfl_vchnl.c',
+        'cpfl_representor.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v5 10/10] net/cpfl: support link update for representor
  2023-09-12 16:26       ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
                           ` (8 preceding siblings ...)
  2023-09-12 16:26         ` [PATCH v5 09/10] net/cpfl: create port representor beilei.xing
@ 2023-09-12 16:26         ` beilei.xing
  2023-09-12 17:30         ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 16:26 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Add link update ops for representor.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h      |  1 +
 drivers/net/cpfl/cpfl_representor.c | 89 +++++++++++++++++++++++------
 2 files changed, 71 insertions(+), 19 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index a4ffd51fb3..d0dcc0cc05 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -162,6 +162,7 @@ struct cpfl_repr {
 	struct cpfl_repr_id repr_id;
 	struct rte_ether_addr mac_addr;
 	struct cpfl_vport_info *vport_info;
+	bool func_up; /* If the represented function is up */
 };
 
 struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index d2558c39a8..4d15a26c80 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -308,6 +308,72 @@ cpfl_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
 	return 0;
 }
 
+static int
+cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
+{
+	if ((host_id != CPFL_HOST_ID_HOST &&
+	     host_id != CPFL_HOST_ID_ACC) ||
+	    (pf_id != CPFL_PF_TYPE_APF &&
+	     pf_id != CPFL_PF_TYPE_CPF))
+		return -EINVAL;
+
+	static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = {
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF,
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID,
+	};
+
+	return func_id_map[host_id][pf_id];
+}
+
+static int
+cpfl_repr_link_update(struct rte_eth_dev *ethdev,
+		      int wait_to_complete)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+	struct rte_eth_link *dev_link = &ethdev->data->dev_link;
+	struct cpfl_adapter_ext *adapter = repr->itf.adapter;
+	struct cpchnl2_get_vport_info_response response;
+	struct cpfl_vport_id vi;
+	int ret;
+
+	if (!(ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
+		PMD_INIT_LOG(ERR, "This ethdev is not representor.");
+		return -EINVAL;
+	}
+
+	if (wait_to_complete) {
+		if (repr->repr_id.type == RTE_ETH_REPRESENTOR_PF) {
+			/* PF */
+			vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+			vi.pf_id = cpfl_func_id_get(repr->repr_id.host_id, repr->repr_id.pf_id);
+			vi.vf_id = 0;
+		} else {
+			/* VF */
+			vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+			vi.pf_id = CPFL_HOST0_APF;
+			vi.vf_id = repr->repr_id.vf_id;
+		}
+		ret = cpfl_cc_vport_info_get(adapter, &repr->vport_info->vport.vport,
+					     &vi, &response);
+		if (ret < 0) {
+			PMD_INIT_LOG(ERR, "Fail to get vport info.");
+			return ret;
+		}
+
+		if (response.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
+			repr->func_up = true;
+		else
+			repr->func_up = false;
+	}
+
+	dev_link->link_status = repr->func_up ?
+		RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -317,6 +383,8 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 
 	.rx_queue_setup		= cpfl_repr_rx_queue_setup,
 	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
+
+	.link_update		= cpfl_repr_link_update,
 };
 
 static int
@@ -331,6 +399,8 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
 	repr->itf.adapter = adapter;
 	repr->itf.data = eth_dev->data;
+	if (repr->vport_info->vport.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
+		repr->func_up = true;
 
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
@@ -349,25 +419,6 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	return cpfl_repr_allowlist_update(adapter, &repr->repr_id, eth_dev);
 }
 
-static int
-cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
-{
-	if ((host_id != CPFL_HOST_ID_HOST &&
-	     host_id != CPFL_HOST_ID_ACC) ||
-	    (pf_id != CPFL_PF_TYPE_APF &&
-	     pf_id != CPFL_PF_TYPE_CPF))
-		return -EINVAL;
-
-	static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = {
-		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF,
-		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID,
-		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID,
-		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID,
-	};
-
-	return func_id_map[host_id][pf_id];
-}
-
 static bool
 cpfl_match_repr_with_vport(const struct cpfl_repr_id *repr_id,
 			   struct cpchnl2_vport_info *info)
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v6 00/10] net/cpfl: support port representor
  2023-09-12 16:26       ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
                           ` (9 preceding siblings ...)
  2023-09-12 16:26         ` [PATCH v5 10/10] net/cpfl: support link update for representor beilei.xing
@ 2023-09-12 17:30         ` beilei.xing
  2023-09-12 17:30           ` [PATCH v6 01/10] net/cpfl: refine devargs parse and process beilei.xing
                             ` (10 more replies)
  10 siblings, 11 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 17:30 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

1. code refine for representor support
2. support port representor

v6 changes:
 - move some change from 08/10 to 06/10 patch
v5 changes:
 - refine cpfl_vport_info structure
 - refine cpfl_repr_link_update function
 - refine cpfl_repr_create function
v4 changes:
 - change the patch order
 - merge two patches
 - revert enum change
v3 changes:
 - Refine commit log.
 - Add macro and enum.
 - Refine doc.
 - Refine error handling.
v2 changes:
 - Remove representor data path.
 - Fix coding style.

Beilei Xing (10):
  net/cpfl: refine devargs parse and process
  net/cpfl: introduce interface structure
  net/cpfl: refine handle virtual channel message
  net/cpfl: introduce CP channel API
  net/cpfl: enable vport mapping
  net/cpfl: support vport list/info get
  net/cpfl: parse representor devargs
  net/cpfl: support probe again
  net/cpfl: create port representor
  net/cpfl: support link update for representor

 doc/guides/nics/cpfl.rst               |  36 ++
 doc/guides/rel_notes/release_23_11.rst |   3 +
 drivers/net/cpfl/cpfl_cpchnl.h         | 340 +++++++++++++
 drivers/net/cpfl/cpfl_ethdev.c         | 621 ++++++++++++++++++++----
 drivers/net/cpfl/cpfl_ethdev.h         |  91 +++-
 drivers/net/cpfl/cpfl_representor.c    | 632 +++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.h    |  26 +
 drivers/net/cpfl/cpfl_vchnl.c          |  72 +++
 drivers/net/cpfl/meson.build           |   4 +-
 9 files changed, 1719 insertions(+), 106 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v6 01/10] net/cpfl: refine devargs parse and process
  2023-09-12 17:30         ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
@ 2023-09-12 17:30           ` beilei.xing
  2023-09-12 17:30           ` [PATCH v6 02/10] net/cpfl: introduce interface structure beilei.xing
                             ` (9 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 17:30 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

1. Keep devargs in adapter.
2. Refine handling the case with no vport be specified in devargs.
3. Separate devargs parse and devargs process

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 154 ++++++++++++++++++---------------
 drivers/net/cpfl/cpfl_ethdev.h |   1 +
 2 files changed, 84 insertions(+), 71 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c4ca9343c3..46b3a52e49 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1407,12 +1407,12 @@ parse_bool(const char *key, const char *value, void *args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter,
-		   struct cpfl_devargs *cpfl_args)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
 	struct rte_devargs *devargs = pci_dev->device.devargs;
+	struct cpfl_devargs *cpfl_args = &adapter->devargs;
 	struct rte_kvargs *kvlist;
-	int i, ret;
+	int ret;
 
 	cpfl_args->req_vport_nb = 0;
 
@@ -1445,31 +1445,6 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (ret != 0)
 		goto fail;
 
-	/* check parsed devargs */
-	if (adapter->cur_vport_nb + cpfl_args->req_vport_nb >
-	    adapter->max_vport_nb) {
-		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
-			     adapter->max_vport_nb);
-		ret = -EINVAL;
-		goto fail;
-	}
-
-	for (i = 0; i < cpfl_args->req_vport_nb; i++) {
-		if (cpfl_args->req_vports[i] > adapter->max_vport_nb - 1) {
-			PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d",
-				     cpfl_args->req_vports[i], adapter->max_vport_nb - 1);
-			ret = -EINVAL;
-			goto fail;
-		}
-
-		if (adapter->cur_vports & RTE_BIT32(cpfl_args->req_vports[i])) {
-			PMD_INIT_LOG(ERR, "Vport %d has been requested",
-				     cpfl_args->req_vports[i]);
-			ret = -EINVAL;
-			goto fail;
-		}
-	}
-
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
@@ -1915,15 +1890,79 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 	adapter->vports = NULL;
 }
 
+static int
+cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int i;
+
+	/* refine vport number, at least 1 vport */
+	if (devargs->req_vport_nb == 0) {
+		devargs->req_vport_nb = 1;
+		devargs->req_vports[0] = 0;
+	}
+
+	/* check parsed devargs */
+	if (adapter->cur_vport_nb + devargs->req_vport_nb >
+	    adapter->max_vport_nb) {
+		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
+			     adapter->max_vport_nb);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < devargs->req_vport_nb; i++) {
+		if (devargs->req_vports[i] > adapter->max_vport_nb - 1) {
+			PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d",
+				     devargs->req_vports[i], adapter->max_vport_nb - 1);
+			return -EINVAL;
+		}
+
+		if (adapter->cur_vports & RTE_BIT32(devargs->req_vports[i])) {
+			PMD_INIT_LOG(ERR, "Vport %d has been requested",
+				     devargs->req_vports[i]);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport_param vport_param;
+	char name[RTE_ETH_NAME_MAX_LEN];
+	int ret, i;
+
+	for (i = 0; i < adapter->devargs.req_vport_nb; i++) {
+		vport_param.adapter = adapter;
+		vport_param.devarg_id = adapter->devargs.req_vports[i];
+		vport_param.idx = cpfl_vport_idx_alloc(adapter);
+		if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
+			PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
+			break;
+		}
+		snprintf(name, sizeof(name), "net_%s_vport_%d",
+			 pci_dev->device.name,
+			 adapter->devargs.req_vports[i]);
+		ret = rte_eth_dev_create(&pci_dev->device, name,
+					    sizeof(struct cpfl_vport),
+					    NULL, NULL, cpfl_dev_vport_init,
+					    &vport_param);
+		if (ret != 0)
+			PMD_DRV_LOG(ERR, "Failed to create vport %d",
+				    vport_param.devarg_id);
+	}
+
+	return 0;
+}
+
 static int
 cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	       struct rte_pci_device *pci_dev)
 {
-	struct cpfl_vport_param vport_param;
 	struct cpfl_adapter_ext *adapter;
-	struct cpfl_devargs devargs;
-	char name[RTE_ETH_NAME_MAX_LEN];
-	int i, retval;
+	int retval;
 
 	if (!cpfl_adapter_list_init) {
 		rte_spinlock_init(&cpfl_adapter_lock);
@@ -1938,6 +1977,12 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		return -ENOMEM;
 	}
 
+	retval = cpfl_parse_devargs(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		return retval;
+	}
+
 	retval = cpfl_adapter_ext_init(pci_dev, adapter);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to init adapter.");
@@ -1948,49 +1993,16 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	TAILQ_INSERT_TAIL(&cpfl_adapter_list, adapter, next);
 	rte_spinlock_unlock(&cpfl_adapter_lock);
 
-	retval = cpfl_parse_devargs(pci_dev, adapter, &devargs);
+	retval = cpfl_vport_devargs_process(adapter);
 	if (retval != 0) {
-		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		PMD_INIT_LOG(ERR, "Failed to process vport devargs");
 		goto err;
 	}
 
-	if (devargs.req_vport_nb == 0) {
-		/* If no vport devarg, create vport 0 by default. */
-		vport_param.adapter = adapter;
-		vport_param.devarg_id = 0;
-		vport_param.idx = cpfl_vport_idx_alloc(adapter);
-		if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
-			PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
-			return 0;
-		}
-		snprintf(name, sizeof(name), "cpfl_%s_vport_0",
-			 pci_dev->device.name);
-		retval = rte_eth_dev_create(&pci_dev->device, name,
-					    sizeof(struct cpfl_vport),
-					    NULL, NULL, cpfl_dev_vport_init,
-					    &vport_param);
-		if (retval != 0)
-			PMD_DRV_LOG(ERR, "Failed to create default vport 0");
-	} else {
-		for (i = 0; i < devargs.req_vport_nb; i++) {
-			vport_param.adapter = adapter;
-			vport_param.devarg_id = devargs.req_vports[i];
-			vport_param.idx = cpfl_vport_idx_alloc(adapter);
-			if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
-				PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
-				break;
-			}
-			snprintf(name, sizeof(name), "cpfl_%s_vport_%d",
-				 pci_dev->device.name,
-				 devargs.req_vports[i]);
-			retval = rte_eth_dev_create(&pci_dev->device, name,
-						    sizeof(struct cpfl_vport),
-						    NULL, NULL, cpfl_dev_vport_init,
-						    &vport_param);
-			if (retval != 0)
-				PMD_DRV_LOG(ERR, "Failed to create vport %d",
-					    vport_param.devarg_id);
-		}
+	retval = cpfl_vport_create(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create vports.");
+		goto err;
 	}
 
 	return 0;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 2e42354f70..b637bf2e45 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -115,6 +115,7 @@ struct cpfl_adapter_ext {
 	uint16_t cur_vport_nb;
 
 	uint16_t used_vecs_num;
+	struct cpfl_devargs devargs;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v6 02/10] net/cpfl: introduce interface structure
  2023-09-12 17:30         ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
  2023-09-12 17:30           ` [PATCH v6 01/10] net/cpfl: refine devargs parse and process beilei.xing
@ 2023-09-12 17:30           ` beilei.xing
  2023-09-12 17:30           ` [PATCH v6 03/10] net/cpfl: refine handle virtual channel message beilei.xing
                             ` (8 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 17:30 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Introduce cplf interface structure to distinguish vport and port
representor.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  3 +++
 drivers/net/cpfl/cpfl_ethdev.h | 15 +++++++++++++++
 2 files changed, 18 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 46b3a52e49..92fe92c00f 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1803,6 +1803,9 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 		goto err;
 	}
 
+	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
+	cpfl_vport->itf.adapter = adapter;
+	cpfl_vport->itf.data = dev->data;
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index b637bf2e45..feb1edc4b8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -86,7 +86,18 @@ struct p2p_queue_chunks_info {
 	uint32_t rx_buf_qtail_spacing;
 };
 
+enum cpfl_itf_type {
+	CPFL_ITF_TYPE_VPORT,
+};
+
+struct cpfl_itf {
+	enum cpfl_itf_type type;
+	struct cpfl_adapter_ext *adapter;
+	void *data;
+};
+
 struct cpfl_vport {
+	struct cpfl_itf itf;
 	struct idpf_vport base;
 	struct p2p_queue_chunks_info *p2p_q_chunks_info;
 
@@ -124,5 +135,9 @@ TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 	RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p)					\
 	container_of((p), struct cpfl_adapter_ext, base)
+#define CPFL_DEV_TO_VPORT(dev)					\
+	((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_ITF(dev)				\
+	((struct cpfl_itf *)((dev)->data->dev_private))
 
 #endif /* _CPFL_ETHDEV_H_ */
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v6 03/10] net/cpfl: refine handle virtual channel message
  2023-09-12 17:30         ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
  2023-09-12 17:30           ` [PATCH v6 01/10] net/cpfl: refine devargs parse and process beilei.xing
  2023-09-12 17:30           ` [PATCH v6 02/10] net/cpfl: introduce interface structure beilei.xing
@ 2023-09-12 17:30           ` beilei.xing
  2023-09-12 17:30           ` [PATCH v6 04/10] net/cpfl: introduce CP channel API beilei.xing
                             ` (7 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 17:30 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Refine handle virtual channel event message.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 48 +++++++++++++++++-----------------
 1 file changed, 24 insertions(+), 24 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 92fe92c00f..31a5822d2c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1450,40 +1450,52 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	return ret;
 }
 
-static struct idpf_vport *
+static struct cpfl_vport *
 cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
 {
-	struct idpf_vport *vport = NULL;
+	struct cpfl_vport *vport = NULL;
 	int i;
 
 	for (i = 0; i < adapter->cur_vport_nb; i++) {
-		vport = &adapter->vports[i]->base;
-		if (vport->vport_id != vport_id)
+		vport = adapter->vports[i];
+		if (vport == NULL)
+			continue;
+		if (vport->base.vport_id != vport_id)
 			continue;
 		else
 			return vport;
 	}
 
-	return vport;
+	return NULL;
 }
 
 static void
-cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen)
+cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen)
 {
 	struct virtchnl2_event *vc_event = (struct virtchnl2_event *)msg;
-	struct rte_eth_dev_data *data = vport->dev_data;
-	struct rte_eth_dev *dev = &rte_eth_devices[data->port_id];
+	struct cpfl_vport *vport;
+	struct rte_eth_dev_data *data;
+	struct rte_eth_dev *dev;
 
 	if (msglen < sizeof(struct virtchnl2_event)) {
 		PMD_DRV_LOG(ERR, "Error event");
 		return;
 	}
 
+	vport = cpfl_find_vport(adapter, vc_event->vport_id);
+	if (!vport) {
+		PMD_DRV_LOG(ERR, "Can't find vport.");
+		return;
+	}
+
+	data = vport->itf.data;
+	dev = &rte_eth_devices[data->port_id];
+
 	switch (vc_event->event) {
 	case VIRTCHNL2_EVENT_LINK_CHANGE:
 		PMD_DRV_LOG(DEBUG, "VIRTCHNL2_EVENT_LINK_CHANGE");
-		vport->link_up = !!(vc_event->link_status);
-		vport->link_speed = vc_event->link_speed;
+		vport->base.link_up = !!(vc_event->link_status);
+		vport->base.link_speed = vc_event->link_speed;
 		cpfl_dev_link_update(dev, 0);
 		break;
 	default:
@@ -1498,10 +1510,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 	struct idpf_adapter *base = &adapter->base;
 	struct idpf_dma_mem *dma_mem = NULL;
 	struct idpf_hw *hw = &base->hw;
-	struct virtchnl2_event *vc_event;
 	struct idpf_ctlq_msg ctlq_msg;
 	enum idpf_mbx_opc mbx_op;
-	struct idpf_vport *vport;
 	uint16_t pending = 1;
 	uint32_t vc_op;
 	int ret;
@@ -1523,18 +1533,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 		switch (mbx_op) {
 		case idpf_mbq_opc_send_msg_to_peer_pf:
 			if (vc_op == VIRTCHNL2_OP_EVENT) {
-				if (ctlq_msg.data_len < sizeof(struct virtchnl2_event)) {
-					PMD_DRV_LOG(ERR, "Error event");
-					return;
-				}
-				vc_event = (struct virtchnl2_event *)base->mbx_resp;
-				vport = cpfl_find_vport(adapter, vc_event->vport_id);
-				if (!vport) {
-					PMD_DRV_LOG(ERR, "Can't find vport.");
-					return;
-				}
-				cpfl_handle_event_msg(vport, base->mbx_resp,
-						      ctlq_msg.data_len);
+				cpfl_handle_vchnl_event_msg(adapter, adapter->base.mbx_resp,
+							    ctlq_msg.data_len);
 			} else {
 				if (vc_op == base->pend_cmd)
 					notify_cmd(base, base->cmd_retval);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v6 04/10] net/cpfl: introduce CP channel API
  2023-09-12 17:30         ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
                             ` (2 preceding siblings ...)
  2023-09-12 17:30           ` [PATCH v6 03/10] net/cpfl: refine handle virtual channel message beilei.xing
@ 2023-09-12 17:30           ` beilei.xing
  2023-09-12 17:30           ` [PATCH v6 05/10] net/cpfl: enable vport mapping beilei.xing
                             ` (6 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 17:30 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

The CPCHNL2 defines the API (v2) used for communication between the
CPF driver and its on-chip management software. The CPFL PMD is a
specific CPF driver to utilize CPCHNL2 for device configuration and
event probing.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_cpchnl.h | 340 +++++++++++++++++++++++++++++++++
 1 file changed, 340 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h

diff --git a/drivers/net/cpfl/cpfl_cpchnl.h b/drivers/net/cpfl/cpfl_cpchnl.h
new file mode 100644
index 0000000000..2eefcbcc10
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_cpchnl.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CPCHNL_H_
+#define _CPFL_CPCHNL_H_
+
+/** @brief      Command Opcodes
+ *              Values are to be different from virtchnl.h opcodes
+ */
+enum cpchnl2_ops {
+	/* vport info */
+	CPCHNL2_OP_GET_VPORT_LIST		= 0x8025,
+	CPCHNL2_OP_GET_VPORT_INFO		= 0x8026,
+
+	/* DPHMA Event notifications */
+	CPCHNL2_OP_EVENT			= 0x8050,
+};
+
+/* Note! This affects the size of structs below */
+#define CPCHNL2_MAX_TC_AMOUNT		8
+
+#define CPCHNL2_ETH_LENGTH_OF_ADDRESS	6
+
+#define CPCHNL2_FUNC_TYPE_PF		0
+#define CPCHNL2_FUNC_TYPE_SRIOV		1
+
+/* vport statuses - must match the DB ones - see enum cp_vport_status*/
+#define CPCHNL2_VPORT_STATUS_CREATED	0
+#define CPCHNL2_VPORT_STATUS_ENABLED	1
+#define CPCHNL2_VPORT_STATUS_DISABLED	2
+#define CPCHNL2_VPORT_STATUS_DESTROYED	3
+
+/* Queue Groups Extension */
+/**************************************************/
+
+#define MAX_Q_REGIONS 16
+/* TBD - with current structure sizes, in order not to exceed 4KB ICQH buffer
+ * no more than 11 queue groups are allowed per a single vport..
+ * More will be possible only with future msg fragmentation.
+ */
+#define MAX_Q_VPORT_GROUPS 11
+
+#define CPCHNL2_CHECK_STRUCT_LEN(n, X) enum static_assert_enum_##X	\
+	{ static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) }
+
+struct cpchnl2_queue_chunk {
+	u32 type;	       /* 0:QUEUE_TYPE_TX, 1:QUEUE_TYPE_RX */ /* enum nsl_lan_queue_type */
+	u32 start_queue_id;
+	u32 num_queues;
+	u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_queue_chunk);
+
+/* structure to specify several chunks of contiguous queues */
+struct cpchnl2_queue_grp_chunks {
+	u16 num_chunks;
+	u8 reserved[6];
+	struct cpchnl2_queue_chunk chunks[MAX_Q_REGIONS];
+};
+CPCHNL2_CHECK_STRUCT_LEN(264, cpchnl2_queue_grp_chunks);
+
+struct cpchnl2_rx_queue_group_info {
+	/* User can ask to update rss_lut size originally allocated
+	 * by CreateVport command. New size will be returned if allocation succeeded,
+	 * otherwise original rss_size from CreateVport will be returned.
+	 */
+	u16 rss_lut_size;
+	u8 pad[6]; /*Future extension purpose*/
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_rx_queue_group_info);
+
+struct cpchnl2_tx_queue_group_info {
+	u8 tx_tc; /*TX TC queue group will be connected to*/
+	/* Each group can have its own priority, value 0-7, while each group with unique
+	 * priority is strict priority. It can be single set of queue groups which configured with
+	 * same priority, then they are assumed part of WFQ arbitration group and are expected to be
+	 * assigned with weight.
+	 */
+	u8 priority;
+	/* Determines if queue group is expected to be Strict Priority according to its priority */
+	u8 is_sp;
+	u8 pad;
+	/* Peak Info Rate Weight in case Queue Group is part of WFQ arbitration set.
+	 * The weights of the groups are independent of each other. Possible values: 1-200.
+	 */
+	u16 pir_weight;
+	/* Future extension purpose for CIR only */
+	u8 cir_pad[2];
+	u8 pad2[8]; /* Future extension purpose*/
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_tx_queue_group_info);
+
+struct cpchnl2_queue_group_id {
+	/* Queue group ID - depended on it's type:
+	 * Data & p2p - is an index which is relative to Vport.
+	 * Config & Mailbox - is an ID which is relative to func.
+	 * This ID is used in future calls, i.e. delete.
+	 * Requested by host and assigned by Control plane.
+	 */
+	u16 queue_group_id;
+	/* Functional type: see CPCHNL2_QUEUE_GROUP_TYPE definitions */
+	u16 queue_group_type;
+	u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_queue_group_id);
+
+struct cpchnl2_queue_group_info {
+	/* IN */
+	struct cpchnl2_queue_group_id qg_id;
+
+	/* IN, Number of queues of different types in the group. */
+	u16 num_tx_q;
+	u16 num_tx_complq;
+	u16 num_rx_q;
+	u16 num_rx_bufq;
+
+	struct cpchnl2_tx_queue_group_info tx_q_grp_info;
+	struct cpchnl2_rx_queue_group_info rx_q_grp_info;
+
+	u8 egress_port;
+	u8 pad[39]; /*Future extension purpose*/
+	struct cpchnl2_queue_grp_chunks chunks;
+};
+CPCHNL2_CHECK_STRUCT_LEN(344, cpchnl2_queue_group_info);
+
+struct cpchnl2_queue_groups {
+	u16 num_queue_groups; /* Number of queue groups in struct below */
+	u8 pad[6];
+	/* group information , number is determined by param above */
+	struct cpchnl2_queue_group_info groups[MAX_Q_VPORT_GROUPS];
+};
+CPCHNL2_CHECK_STRUCT_LEN(3792, cpchnl2_queue_groups);
+
+/**
+ * @brief function types
+ */
+enum cpchnl2_func_type {
+	CPCHNL2_FTYPE_LAN_PF = 0,
+	CPCHNL2_FTYPE_LAN_VF = 1,
+	CPCHNL2_FTYPE_LAN_MAX
+};
+
+/**
+ * @brief containing vport id & type
+ */
+struct cpchnl2_vport_id {
+	u32 vport_id;
+	u16 vport_type;
+	u8 pad[2];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_vport_id);
+
+struct cpchnl2_func_id {
+	/* Function type: 0 - LAN PF, 1 -  LAN VF, Rest - "reserved" */
+	u8 func_type;
+	/* Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs
+	 * and 8-12 CPFs are valid
+	 */
+	u8 pf_id;
+	/* Valid only if "type" above is VF, indexing is relative to PF specified above. */
+	u16 vf_id;
+	u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_func_id);
+
+/* Note! Do not change the fields and especially their order as should eventually
+ * be aligned to 32bit. Must match the virtchnl structure definition.
+ * If should change, change also the relevant FAS and virtchnl code, under permission.
+ */
+struct cpchnl2_vport_info {
+	u16 vport_index;
+	/* VSI index, global indexing aligned to HW.
+	 * Index of HW VSI is allocated by HMA during "CreateVport" virtChnl command.
+	 * Relevant for VSI backed Vports only, not relevant for vport_type = "Qdev".
+	 */
+	u16 vsi_id;
+	u8 vport_status;	/* enum cpchnl2_vport_status */
+	/* 0 - LAN PF, 1 - LAN VF. Rest - reserved. Can be later expanded to other PEs */
+	u8 func_type;
+	/* Valid only if "type" above is VF, indexing is relative to PF specified above. */
+	u16 vf_id;
+	/* Always relevant, indexing is according to LAN PE 0-15,
+	 * while only 0-4 APFs and 8-12 CPFs are valid.
+	 */
+	u8 pf_id;
+	u8 rss_enabled; /* if RSS is enabled for Vport. Driven by Node Policy. Currently '0' */
+	/* MAC Address assigned for this vport, all 0s for "Qdev" Vport type */
+	u8 mac_addr[CPCHNL2_ETH_LENGTH_OF_ADDRESS];
+	u16 vmrl_id;
+	/* Indicates if IMC created SEM MAC rule for this Vport.
+	 * Currently this is done by IMC for all Vport of type "Default" only,
+	 * but can be different in the future.
+	 */
+	u8 sem_mac_rule_exist;
+	/* Bitmask to inform which TC is valid.
+	 * 0x1 << TCnum. 1b: valid else 0.
+	 * Driven by Node Policy on system level, then Sysetm level TCs are
+	 * reported to IDPF and it can enable Vport level TCs on TX according
+	 * to Syetm enabled ones.
+	 * If TC aware mode - bit set for valid TC.
+	 * otherwise =1 (only bit 0 is set. represents the VSI
+	 */
+	u8 tx_tc_bitmask;
+	/* For each valid TC, TEID of VPORT node over TC in TX LAN WS.
+	 * If TC aware mode - up to 8 TC TEIDs. Otherwise vport_tc_teid[0] shall hold VSI TEID
+	 */
+	u32 vport_tc_teid[CPCHNL2_MAX_TC_AMOUNT];
+	/* For each valid TC, bandwidth in mbps.
+	 * Default BW per Vport is from Node policy
+	 * If TC aware mode -per TC. Otherwise, bandwidth[0] holds VSI bandwidth
+	 */
+	u32 bandwidth[CPCHNL2_MAX_TC_AMOUNT];
+	/* From Node Policy. */
+	u16 max_mtu;
+	u16 default_rx_qid;	/* Default LAN RX Queue ID */
+	u16 vport_flags; /* see: VPORT_FLAGS */
+	u8 egress_port;
+	u8 pad_reserved[5];
+};
+CPCHNL2_CHECK_STRUCT_LEN(96, cpchnl2_vport_info);
+
+/*
+ * CPCHNL2_OP_GET_VPORT_LIST
+ */
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode request
+ * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved (see enum cpchnl2_func_type)
+ * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12
+ *        CPFs are valid
+ * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above
+ */
+struct cpchnl2_get_vport_list_request {
+	u8 func_type;
+	u8 pf_id;
+	u16 vf_id;
+	u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_get_vport_list_request);
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode response
+ * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved. Can be later extended to
+ *        other PE types
+ * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12
+ *        CPFs are valid
+ * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above
+ * @param nof_vports Number of vports created on the function
+ * @param vports array of the IDs and types. vport ID is elative to its func (PF/VF). same as in
+ *        Create Vport
+ * vport_type: Aligned to VirtChnl types: Default, SIOV, etc.
+ */
+struct cpchnl2_get_vport_list_response {
+	u8 func_type;
+	u8 pf_id;
+	u16 vf_id;
+	u16 nof_vports;
+	u8 pad[2];
+	struct cpchnl2_vport_id vports[];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_get_vport_list_response);
+
+/*
+ * CPCHNL2_OP_GET_VPORT_INFO
+ */
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_INFO opcode request
+ * @param vport a structure containing vport_id (relative to function) and type
+ * @param func a structure containing function type, pf_id, vf_id
+ */
+struct cpchnl2_get_vport_info_request {
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_func_id func;
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_get_vport_info_request);
+
+/**
+ * @brief Used for CPCHNL2_OP_GET_VPORT_INFO opcode response
+ * @param vport a structure containing vport_id (relative to function) and type to get info for
+ * @param info a structure all the information for a given vport
+ * @param queue_groups a structure containing all the queue groups of the given vport
+ */
+struct cpchnl2_get_vport_info_response {
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_vport_info info;
+	struct cpchnl2_queue_groups queue_groups;
+};
+CPCHNL2_CHECK_STRUCT_LEN(3896, cpchnl2_get_vport_info_response);
+
+ /* Cpchnl events
+  * Sends event message to inform the peer of notification that may affect it.
+  * No direct response is expected from the peer, though it may generate other
+  * messages in response to this one.
+  */
+enum cpchnl2_event {
+	CPCHNL2_EVENT_UNKNOWN = 0,
+	CPCHNL2_EVENT_VPORT_CREATED,
+	CPCHNL2_EVENT_VPORT_DESTROYED,
+	CPCHNL2_EVENT_VPORT_ENABLED,
+	CPCHNL2_EVENT_VPORT_DISABLED,
+	CPCHNL2_PKG_EVENT,
+	CPCHNL2_EVENT_ADD_QUEUE_GROUPS,
+	CPCHNL2_EVENT_DEL_QUEUE_GROUPS,
+	CPCHNL2_EVENT_ADD_QUEUES,
+	CPCHNL2_EVENT_DEL_QUEUES
+};
+
+/*
+ * This is for CPCHNL2_EVENT_VPORT_CREATED
+ */
+struct cpchnl2_event_vport_created {
+	struct cpchnl2_vport_id vport; /* Vport identifier to point to specific Vport */
+	struct cpchnl2_vport_info info; /* Vport configuration info */
+	struct cpchnl2_queue_groups queue_groups; /* Vport assign queue groups configuration info */
+};
+CPCHNL2_CHECK_STRUCT_LEN(3896, cpchnl2_event_vport_created);
+
+/*
+ * This is for CPCHNL2_EVENT_VPORT_DESTROYED
+ */
+struct cpchnl2_event_vport_destroyed {
+	/* Vport identifier to point to specific Vport */
+	struct cpchnl2_vport_id vport;
+	struct cpchnl2_func_id func;
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_event_vport_destroyed);
+
+struct cpchnl2_event_info {
+	struct {
+		s32 type;		/* See enum cpchnl2_event */
+		uint8_t reserved[4];	/* Reserved */
+	} header;
+	union {
+		struct cpchnl2_event_vport_created vport_created;
+		struct cpchnl2_event_vport_destroyed vport_destroyed;
+	} data;
+};
+
+#endif /* _CPFL_CPCHNL_H_ */
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v6 05/10] net/cpfl: enable vport mapping
  2023-09-12 17:30         ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
                             ` (3 preceding siblings ...)
  2023-09-12 17:30           ` [PATCH v6 04/10] net/cpfl: introduce CP channel API beilei.xing
@ 2023-09-12 17:30           ` beilei.xing
  2023-09-12 17:30           ` [PATCH v6 06/10] net/cpfl: support vport list/info get beilei.xing
                             ` (5 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 17:30 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

1. Handle cpchnl event for vport create/destroy
2. Use hash table to store vport_id to vport_info mapping
3. Use spinlock for thread safe.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 157 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.h |  21 ++++-
 drivers/net/cpfl/meson.build   |   2 +-
 3 files changed, 177 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 31a5822d2c..a7a045ace4 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -10,6 +10,7 @@
 #include <rte_dev.h>
 #include <errno.h>
 #include <rte_alarm.h>
+#include <rte_hash_crc.h>
 
 #include "cpfl_ethdev.h"
 #include "cpfl_rxtx.h"
@@ -1504,6 +1505,108 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 	}
 }
 
+static int
+cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_vport_id *vport_identity,
+		       struct cpchnl2_event_vport_created *vport_created)
+{
+	struct cpfl_vport_info *info = NULL;
+	int ret;
+
+	rte_spinlock_lock(&adapter->vport_map_lock);
+	ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info);
+	if (ret >= 0) {
+		PMD_DRV_LOG(WARNING, "vport already exist, overwrite info anyway");
+		/* overwrite info */
+		if (info)
+			info->vport = *vport_created;
+		goto fini;
+	}
+
+	info = rte_zmalloc(NULL, sizeof(*info), 0);
+	if (info == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory for vport map info");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	info->vport = *vport_created;
+
+	ret = rte_hash_add_key_data(adapter->vport_map_hash, vport_identity, info);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to add vport map into hash");
+		rte_free(info);
+		goto err;
+	}
+
+fini:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return ret;
+}
+
+static int
+cpfl_vport_info_destroy(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id *vport_identity)
+{
+	struct cpfl_vport_info *info;
+	int ret;
+
+	rte_spinlock_lock(&adapter->vport_map_lock);
+	ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, (void **)&info);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "vport id doesn't exist");
+		goto err;
+	}
+
+	rte_hash_del_key(adapter->vport_map_hash, vport_identity);
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	rte_free(info);
+
+	return 0;
+
+err:
+	rte_spinlock_unlock(&adapter->vport_map_lock);
+	return ret;
+}
+
+static void
+cpfl_handle_cpchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint16_t msglen)
+{
+	struct cpchnl2_event_info *cpchnl2_event = (struct cpchnl2_event_info *)msg;
+	struct cpchnl2_event_vport_created *vport_created;
+	struct cpfl_vport_id vport_identity = { 0 };
+
+	if (msglen < sizeof(struct cpchnl2_event_info)) {
+		PMD_DRV_LOG(ERR, "Error event");
+		return;
+	}
+
+	switch (cpchnl2_event->header.type) {
+	case CPCHNL2_EVENT_VPORT_CREATED:
+		vport_identity.vport_id = cpchnl2_event->data.vport_created.vport.vport_id;
+		vport_created = &cpchnl2_event->data.vport_created;
+		vport_identity.func_type = vport_created->info.func_type;
+		vport_identity.pf_id = vport_created->info.pf_id;
+		vport_identity.vf_id = vport_created->info.vf_id;
+		if (cpfl_vport_info_create(adapter, &vport_identity, vport_created))
+			PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_CREATED");
+		break;
+	case CPCHNL2_EVENT_VPORT_DESTROYED:
+		vport_identity.vport_id = cpchnl2_event->data.vport_destroyed.vport.vport_id;
+		vport_identity.func_type = cpchnl2_event->data.vport_destroyed.func.func_type;
+		vport_identity.pf_id = cpchnl2_event->data.vport_destroyed.func.pf_id;
+		vport_identity.vf_id = cpchnl2_event->data.vport_destroyed.func.vf_id;
+		if (cpfl_vport_info_destroy(adapter, &vport_identity))
+			PMD_DRV_LOG(WARNING, "Failed to handle CPCHNL2_EVENT_VPORT_DESTROY");
+		break;
+	default:
+		PMD_DRV_LOG(ERR, " unknown event received %u", cpchnl2_event->header.type);
+		break;
+	}
+}
+
 static void
 cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 {
@@ -1535,6 +1638,9 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 			if (vc_op == VIRTCHNL2_OP_EVENT) {
 				cpfl_handle_vchnl_event_msg(adapter, adapter->base.mbx_resp,
 							    ctlq_msg.data_len);
+			} else if (vc_op == CPCHNL2_OP_EVENT) {
+				cpfl_handle_cpchnl_event_msg(adapter, adapter->base.mbx_resp,
+							     ctlq_msg.data_len);
 			} else {
 				if (vc_op == base->pend_cmd)
 					notify_cmd(base, base->cmd_retval);
@@ -1610,6 +1716,48 @@ static struct virtchnl2_get_capabilities req_caps = {
 	.other_caps = VIRTCHNL2_CAP_WB_ON_ITR
 };
 
+static int
+cpfl_vport_map_init(struct cpfl_adapter_ext *adapter)
+{
+	char hname[32];
+
+	snprintf(hname, 32, "%s-vport", adapter->name);
+
+	rte_spinlock_init(&adapter->vport_map_lock);
+
+#define CPFL_VPORT_MAP_HASH_ENTRY_NUM 2048
+
+	struct rte_hash_parameters params = {
+		.name = adapter->name,
+		.entries = CPFL_VPORT_MAP_HASH_ENTRY_NUM,
+		.key_len = sizeof(struct cpfl_vport_id),
+		.hash_func = rte_hash_crc,
+		.socket_id = SOCKET_ID_ANY,
+	};
+
+	adapter->vport_map_hash = rte_hash_create(&params);
+
+	if (adapter->vport_map_hash == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to create vport map hash");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
+{
+	const void *key = NULL;
+	struct cpfl_vport_map_info *info;
+	uint32_t iter = 0;
+
+	while (rte_hash_iterate(adapter->vport_map_hash, &key, (void **)&info, &iter) >= 0)
+		rte_free(info);
+
+	rte_hash_free(adapter->vport_map_hash);
+}
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1634,6 +1782,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_adapter_init;
 	}
 
+	ret = cpfl_vport_map_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init vport map");
+		goto err_vport_map_init;
+	}
+
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
 	adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ?
@@ -1658,6 +1812,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_vport_map_uninit(adapter);
+err_vport_map_init:
 	idpf_adapter_deinit(base);
 err_adapter_init:
 	return ret;
@@ -1887,6 +2043,7 @@ static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
 
 	rte_free(adapter->vports);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index feb1edc4b8..7d70ee13f2 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -10,16 +10,18 @@
 #include <rte_spinlock.h>
 #include <rte_ethdev.h>
 #include <rte_kvargs.h>
+#include <rte_hash.h>
 #include <ethdev_driver.h>
 #include <ethdev_pci.h>
 
-#include "cpfl_logs.h"
-
 #include <idpf_common_device.h>
 #include <idpf_common_virtchnl.h>
 #include <base/idpf_prototype.h>
 #include <base/virtchnl2.h>
 
+#include "cpfl_logs.h"
+#include "cpfl_cpchnl.h"
+
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
 
@@ -86,6 +88,18 @@ struct p2p_queue_chunks_info {
 	uint32_t rx_buf_qtail_spacing;
 };
 
+struct cpfl_vport_id {
+	uint32_t vport_id;
+	uint8_t func_type;
+	uint8_t pf_id;
+	uint16_t vf_id;
+};
+
+struct cpfl_vport_info {
+	struct cpchnl2_event_vport_created vport;
+	bool enabled;
+};
+
 enum cpfl_itf_type {
 	CPFL_ITF_TYPE_VPORT,
 };
@@ -127,6 +141,9 @@ struct cpfl_adapter_ext {
 
 	uint16_t used_vecs_num;
 	struct cpfl_devargs devargs;
+
+	rte_spinlock_t vport_map_lock;
+	struct rte_hash *vport_map_hash;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 8d62ebfd77..28167bb81d 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -11,7 +11,7 @@ if dpdk_conf.get('RTE_IOVA_IN_MBUF') == 0
     subdir_done()
 endif
 
-deps += ['common_idpf']
+deps += ['hash', 'common_idpf']
 
 sources = files(
         'cpfl_ethdev.c',
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v6 06/10] net/cpfl: support vport list/info get
  2023-09-12 17:30         ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
                             ` (4 preceding siblings ...)
  2023-09-12 17:30           ` [PATCH v6 05/10] net/cpfl: enable vport mapping beilei.xing
@ 2023-09-12 17:30           ` beilei.xing
  2023-09-12 17:30           ` [PATCH v6 07/10] net/cpfl: parse representor devargs beilei.xing
                             ` (4 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 17:30 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Support cp channel ops CPCHNL2_OP_CPF_GET_VPORT_LIST and
CPCHNL2_OP_CPF_GET_VPORT_INFO.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h |  8 ++++
 drivers/net/cpfl/cpfl_vchnl.c  | 72 ++++++++++++++++++++++++++++++++++
 drivers/net/cpfl/meson.build   |  1 +
 3 files changed, 81 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 7d70ee13f2..eb51a12fac 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -148,6 +148,14 @@ struct cpfl_adapter_ext {
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_vport_id *vi,
+			   struct cpchnl2_get_vport_list_response *response);
+int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+			   struct cpchnl2_vport_id *vport_id,
+			   struct cpfl_vport_id *vi,
+			   struct cpchnl2_get_vport_info_response *response);
+
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p)					\
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
new file mode 100644
index 0000000000..a21a4a451f
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include "cpfl_ethdev.h"
+#include <idpf_common_virtchnl.h>
+
+int
+cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_vport_id *vi,
+		       struct cpchnl2_get_vport_list_response *response)
+{
+	struct cpchnl2_get_vport_list_request request;
+	struct idpf_cmd_info args;
+	int err;
+
+	memset(&request, 0, sizeof(request));
+	request.func_type = vi->func_type;
+	request.pf_id = vi->pf_id;
+	request.vf_id = vi->vf_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = CPCHNL2_OP_GET_VPORT_LIST;
+	args.in_args = (uint8_t *)&request;
+	args.in_args_size = sizeof(struct cpchnl2_get_vport_list_request);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err != 0) {
+		PMD_DRV_LOG(ERR, "Failed to execute command of CPCHNL2_OP_GET_VPORT_LIST");
+		return err;
+	}
+
+	rte_memcpy(response, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+
+	return 0;
+}
+
+int
+cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+		       struct cpchnl2_vport_id *vport_id,
+		       struct cpfl_vport_id *vi,
+		       struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpchnl2_get_vport_info_request request;
+	struct idpf_cmd_info args;
+	int err;
+
+	request.vport.vport_id = vport_id->vport_id;
+	request.vport.vport_type = vport_id->vport_type;
+	request.func.func_type = vi->func_type;
+	request.func.pf_id = vi->pf_id;
+	request.func.vf_id = vi->vf_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = CPCHNL2_OP_GET_VPORT_INFO;
+	args.in_args = (uint8_t *)&request;
+	args.in_args_size = sizeof(struct cpchnl2_get_vport_info_request);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err != 0) {
+		PMD_DRV_LOG(ERR, "Failed to execute command of CPCHNL2_OP_GET_VPORT_INFO");
+		return err;
+	}
+
+	rte_memcpy(response, args.out_buffer, sizeof(*response));
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 28167bb81d..2f0f5d8434 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -16,6 +16,7 @@ deps += ['hash', 'common_idpf']
 sources = files(
         'cpfl_ethdev.c',
         'cpfl_rxtx.c',
+        'cpfl_vchnl.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v6 07/10] net/cpfl: parse representor devargs
  2023-09-12 17:30         ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
                             ` (5 preceding siblings ...)
  2023-09-12 17:30           ` [PATCH v6 06/10] net/cpfl: support vport list/info get beilei.xing
@ 2023-09-12 17:30           ` beilei.xing
  2023-09-12 17:30           ` [PATCH v6 08/10] net/cpfl: support probe again beilei.xing
                             ` (3 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 17:30 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Format:

[[c<controller_id>]pf<pf_id>]vf<vf_id>

  controller_id:

  0 : host (default)
  1:  acc

  pf_id:

  0 : apf (default)
  1 : cpf

Example:

representor=c0pf0vf[0-3]
  -- host > apf > vf 0,1,2,3
     same as pf0vf[0-3] and vf[0-3] if omit default value.

representor=c0pf0
  -- host > apf
     same as pf0 if omit default value.

representor=c1pf0
  -- accelerator core > apf

multiple representor devargs are supported.
e.g.: create 4 representors for 4 vfs on host APF and one
representor for APF on accelerator core.

  -- representor=vf[0-3],representor=c1pf0

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 doc/guides/nics/cpfl.rst               |  36 +++++
 doc/guides/rel_notes/release_23_11.rst |   3 +
 drivers/net/cpfl/cpfl_ethdev.c         | 179 +++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.h         |   8 ++
 4 files changed, 226 insertions(+)

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 39a2b603f3..83a18c3f2e 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -92,6 +92,42 @@ Runtime Configuration
   Then the PMD will configure Tx queue with single queue mode.
   Otherwise, split queue mode is chosen by default.
 
+- ``representor`` (default ``not enabled``)
+
+  The cpfl PMD supports the creation of APF/CPF/VF port representors.
+  Each port representor corresponds to a single function of that device.
+  Using the ``devargs`` option ``representor`` the user can specify
+  which functions to create port representors.
+
+  Format is::
+
+    [[c<controller_id>]pf<pf_id>]vf<vf_id>
+
+  Controller_id 0 is host (default), while 1 is accelerator core.
+  Pf_id 0 is APF (default), while 1 is CPF.
+  Default value can be omitted.
+
+  Create 4 representors for 4 vfs on host APF::
+
+    -a BDF,representor=c0pf0vf[0-3]
+
+  Or::
+
+    -a BDF,representor=pf0vf[0-3]
+
+  Or::
+
+    -a BDF,representor=vf[0-3]
+
+  Create a representor for CPF on accelerator core::
+
+    -a BDF,representor=c1pf1
+
+  Multiple representor devargs are supported. Create 4 representors for 4
+  vfs on host APF and one representor for CPF on accelerator core::
+
+    -a BDF,representor=vf[0-3],representor=c1pf1
+
 
 Driver compilation and testing
 ------------------------------
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 333e1d95a2..3d9be208d0 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -78,6 +78,9 @@ New Features
 * build: Optional libraries can now be selected with the new ``enable_libs``
   build option similarly to the existing ``enable_drivers`` build option.
 
+* **Updated Intel cpfl driver.**
+
+  * Added support for port representor.
 
 Removed Items
 -------------
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a7a045ace4..b6fcfe4275 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -13,8 +13,10 @@
 #include <rte_hash_crc.h>
 
 #include "cpfl_ethdev.h"
+#include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 
+#define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
@@ -25,6 +27,7 @@ struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
 static const char * const cpfl_valid_args[] = {
+	CPFL_REPRESENTOR,
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
@@ -1407,6 +1410,128 @@ parse_bool(const char *key, const char *value, void *args)
 	return 0;
 }
 
+static int
+enlist(uint16_t *list, uint16_t *len_list, const uint16_t max_list, uint16_t val)
+{
+	uint16_t i;
+
+	for (i = 0; i < *len_list; i++) {
+		if (list[i] == val)
+			return 0;
+	}
+	if (*len_list >= max_list)
+		return -1;
+	list[(*len_list)++] = val;
+	return 0;
+}
+
+static const char *
+process_range(const char *str, uint16_t *list, uint16_t *len_list,
+	const uint16_t max_list)
+{
+	uint16_t lo, hi, val;
+	int result, n = 0;
+	const char *pos = str;
+
+	result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n);
+	if (result == 1) {
+		if (enlist(list, len_list, max_list, lo) != 0)
+			return NULL;
+	} else if (result == 2) {
+		if (lo > hi)
+			return NULL;
+		for (val = lo; val <= hi; val++) {
+			if (enlist(list, len_list, max_list, val) != 0)
+				return NULL;
+		}
+	} else {
+		return NULL;
+	}
+	return pos + n;
+}
+
+static const char *
+process_list(const char *str, uint16_t *list, uint16_t *len_list, const uint16_t max_list)
+{
+	const char *pos = str;
+
+	if (*pos == '[')
+		pos++;
+	while (1) {
+		pos = process_range(pos, list, len_list, max_list);
+		if (pos == NULL)
+			return NULL;
+		if (*pos != ',') /* end of list */
+			break;
+		pos++;
+	}
+	if (*str == '[' && *pos != ']')
+		return NULL;
+	if (*pos == ']')
+		pos++;
+	return pos;
+}
+
+static int
+parse_repr(const char *key __rte_unused, const char *value, void *args)
+{
+	struct cpfl_devargs *devargs = args;
+	struct rte_eth_devargs *eth_da;
+	const char *str = value;
+
+	if (devargs->repr_args_num == CPFL_REPR_ARG_NUM_MAX)
+		return -EINVAL;
+
+	eth_da = &devargs->repr_args[devargs->repr_args_num];
+
+	if (str[0] == 'c') {
+		str += 1;
+		str = process_list(str, eth_da->mh_controllers,
+				&eth_da->nb_mh_controllers,
+				RTE_DIM(eth_da->mh_controllers));
+		if (str == NULL)
+			goto done;
+	}
+	if (str[0] == 'p' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_PF;
+		str += 2;
+		str = process_list(str, eth_da->ports,
+				&eth_da->nb_ports, RTE_DIM(eth_da->ports));
+		if (str == NULL || str[0] == '\0')
+			goto done;
+	} else if (eth_da->nb_mh_controllers > 0) {
+		/* 'c' must followed by 'pf'. */
+		str = NULL;
+		goto done;
+	}
+	if (str[0] == 'v' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_VF;
+		str += 2;
+	} else if (str[0] == 's' && str[1] == 'f') {
+		eth_da->type = RTE_ETH_REPRESENTOR_SF;
+		str += 2;
+	} else {
+		/* 'pf' must followed by 'vf' or 'sf'. */
+		if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+			str = NULL;
+			goto done;
+		}
+		eth_da->type = RTE_ETH_REPRESENTOR_VF;
+	}
+	str = process_list(str, eth_da->representor_ports,
+		&eth_da->nb_representor_ports,
+		RTE_DIM(eth_da->representor_ports));
+done:
+	if (str == NULL) {
+		RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str);
+		return -1;
+	}
+
+	devargs->repr_args_num++;
+
+	return 0;
+}
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1431,6 +1556,12 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 		return -EINVAL;
 	}
 
+	cpfl_args->repr_args_num = 0;
+	ret = rte_kvargs_process(kvlist, CPFL_REPRESENTOR, &parse_repr, cpfl_args);
+
+	if (ret != 0)
+		goto fail;
+
 	ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport,
 				 cpfl_args);
 	if (ret != 0)
@@ -2087,6 +2218,48 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
 	return 0;
 }
 
+static int
+cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int i, j;
+
+	/* check and refine repr args */
+	for (i = 0; i < devargs->repr_args_num; i++) {
+		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
+
+		/* set default host_id to xeon host */
+		if (eth_da->nb_mh_controllers == 0) {
+			eth_da->nb_mh_controllers = 1;
+			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
+		} else {
+			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
+				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->mh_controllers[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		/* set default pf to APF */
+		if (eth_da->nb_ports == 0) {
+			eth_da->nb_ports = 1;
+			eth_da->ports[0] = CPFL_PF_TYPE_APF;
+		} else {
+			for (j = 0; j < eth_da->nb_ports; j++) {
+				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->ports[j]);
+					return -EINVAL;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
 static int
 cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -2165,6 +2338,12 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		goto err;
 	}
 
+	retval = cpfl_repr_devargs_process(adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to process repr devargs");
+		goto err;
+	}
+
 	return 0;
 
 err:
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index eb51a12fac..362cad155d 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -60,16 +60,24 @@
 #define IDPF_DEV_ID_CPF			0x1453
 #define VIRTCHNL2_QUEUE_GROUP_P2P	0x100
 
+#define CPFL_HOST_ID_HOST	0
+#define CPFL_HOST_ID_ACC	1
+#define CPFL_PF_TYPE_APF	0
+#define CPFL_PF_TYPE_CPF	1
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
 	uint16_t idx;       /* index in adapter->vports[]*/
 };
 
+#define CPFL_REPR_ARG_NUM_MAX	4
 /* Struct used when parse driver specific devargs */
 struct cpfl_devargs {
 	uint16_t req_vports[CPFL_MAX_VPORT_NUM];
 	uint16_t req_vport_nb;
+	uint8_t repr_args_num;
+	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
 };
 
 struct p2p_queue_chunks_info {
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v6 08/10] net/cpfl: support probe again
  2023-09-12 17:30         ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
                             ` (6 preceding siblings ...)
  2023-09-12 17:30           ` [PATCH v6 07/10] net/cpfl: parse representor devargs beilei.xing
@ 2023-09-12 17:30           ` beilei.xing
  2023-09-12 17:30           ` [PATCH v6 09/10] net/cpfl: create port representor beilei.xing
                             ` (2 subsequent siblings)
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 17:30 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Only representor will be parsed for probe again.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 69 +++++++++++++++++++++++++++-------
 1 file changed, 56 insertions(+), 13 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index b6fcfe4275..428d87b960 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -26,7 +26,7 @@ rte_spinlock_t cpfl_adapter_lock;
 struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
-static const char * const cpfl_valid_args[] = {
+static const char * const cpfl_valid_args_first[] = {
 	CPFL_REPRESENTOR,
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
@@ -34,6 +34,11 @@ static const char * const cpfl_valid_args[] = {
 	NULL
 };
 
+static const char * const cpfl_valid_args_again[] = {
+	CPFL_REPRESENTOR,
+	NULL
+};
+
 uint32_t cpfl_supported_speeds[] = {
 	RTE_ETH_SPEED_NUM_NONE,
 	RTE_ETH_SPEED_NUM_10M,
@@ -1533,7 +1538,7 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
 	struct rte_devargs *devargs = pci_dev->device.devargs;
 	struct cpfl_devargs *cpfl_args = &adapter->devargs;
@@ -1545,7 +1550,8 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (devargs == NULL)
 		return 0;
 
-	kvlist = rte_kvargs_parse(devargs->args, cpfl_valid_args);
+	kvlist = rte_kvargs_parse(devargs->args,
+			first ? cpfl_valid_args_first : cpfl_valid_args_again);
 	if (kvlist == NULL) {
 		PMD_INIT_LOG(ERR, "invalid kvargs key");
 		return -EINVAL;
@@ -1562,6 +1568,9 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 	if (ret != 0)
 		goto fail;
 
+	if (!first)
+		return 0;
+
 	ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport,
 				 cpfl_args);
 	if (ret != 0)
@@ -2291,18 +2300,11 @@ cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapt
 }
 
 static int
-cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
-	       struct rte_pci_device *pci_dev)
+cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
 	struct cpfl_adapter_ext *adapter;
 	int retval;
 
-	if (!cpfl_adapter_list_init) {
-		rte_spinlock_init(&cpfl_adapter_lock);
-		TAILQ_INIT(&cpfl_adapter_list);
-		cpfl_adapter_list_init = true;
-	}
-
 	adapter = rte_zmalloc("cpfl_adapter_ext",
 			      sizeof(struct cpfl_adapter_ext), 0);
 	if (adapter == NULL) {
@@ -2310,7 +2312,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		return -ENOMEM;
 	}
 
-	retval = cpfl_parse_devargs(pci_dev, adapter);
+	retval = cpfl_parse_devargs(pci_dev, adapter, true);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
 		return retval;
@@ -2355,6 +2357,46 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	return retval;
 }
 
+static int
+cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_parse_devargs(pci_dev, adapter, false);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+		return ret;
+	}
+
+	ret = cpfl_repr_devargs_process(adapter);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to process reprenstor devargs");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+	       struct rte_pci_device *pci_dev)
+{
+	struct cpfl_adapter_ext *adapter;
+
+	if (!cpfl_adapter_list_init) {
+		rte_spinlock_init(&cpfl_adapter_lock);
+		TAILQ_INIT(&cpfl_adapter_list);
+		cpfl_adapter_list_init = true;
+	}
+
+	adapter = cpfl_find_adapter_ext(pci_dev);
+
+	if (adapter == NULL)
+		return cpfl_pci_probe_first(pci_dev);
+	else
+		return cpfl_pci_probe_again(pci_dev, adapter);
+}
+
 static int
 cpfl_pci_remove(struct rte_pci_device *pci_dev)
 {
@@ -2377,7 +2419,8 @@ cpfl_pci_remove(struct rte_pci_device *pci_dev)
 
 static struct rte_pci_driver rte_cpfl_pmd = {
 	.id_table	= pci_id_cpfl_map,
-	.drv_flags	= RTE_PCI_DRV_NEED_MAPPING,
+	.drv_flags	= RTE_PCI_DRV_NEED_MAPPING |
+			  RTE_PCI_DRV_PROBE_AGAIN,
 	.probe		= cpfl_pci_probe,
 	.remove		= cpfl_pci_remove,
 };
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v6 09/10] net/cpfl: create port representor
  2023-09-12 17:30         ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
                             ` (7 preceding siblings ...)
  2023-09-12 17:30           ` [PATCH v6 08/10] net/cpfl: support probe again beilei.xing
@ 2023-09-12 17:30           ` beilei.xing
  2023-09-12 17:30           ` [PATCH v6 10/10] net/cpfl: support link update for representor beilei.xing
  2023-09-13  1:01           ` [PATCH v6 00/10] net/cpfl: support port representor Wu, Jingjing
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 17:30 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Qi Zhang

From: Beilei Xing <beilei.xing@intel.com>

Track representor request in the allowlist.
Representor will only be created for active vport.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c      | 109 +++---
 drivers/net/cpfl/cpfl_ethdev.h      |  37 ++
 drivers/net/cpfl/cpfl_representor.c | 581 ++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.h |  26 ++
 drivers/net/cpfl/meson.build        |   1 +
 5 files changed, 710 insertions(+), 44 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 428d87b960..189072ab33 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1645,7 +1645,7 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 	}
 }
 
-static int
+int
 cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
 		       struct cpfl_vport_id *vport_identity,
 		       struct cpchnl2_event_vport_created *vport_created)
@@ -1898,6 +1898,42 @@ cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
 	rte_hash_free(adapter->vport_map_hash);
 }
 
+static int
+cpfl_repr_allowlist_init(struct cpfl_adapter_ext *adapter)
+{
+	char hname[32];
+
+	snprintf(hname, 32, "%s-repr_al", adapter->name);
+
+	rte_spinlock_init(&adapter->repr_lock);
+
+#define CPFL_REPR_HASH_ENTRY_NUM 2048
+
+	struct rte_hash_parameters params = {
+		.name = hname,
+		.entries = CPFL_REPR_HASH_ENTRY_NUM,
+		.key_len = sizeof(struct cpfl_repr_id),
+		.hash_func = rte_hash_crc,
+		.socket_id = SOCKET_ID_ANY,
+	};
+
+	adapter->repr_allowlist_hash = rte_hash_create(&params);
+
+	if (adapter->repr_allowlist_hash == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to create repr allowlist hash");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+cpfl_repr_allowlist_uninit(struct cpfl_adapter_ext *adapter)
+{
+	rte_hash_free(adapter->repr_allowlist_hash);
+}
+
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1928,6 +1964,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vport_map_init;
 	}
 
+	ret = cpfl_repr_allowlist_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init representor allowlist");
+		goto err_repr_allowlist_init;
+	}
+
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
 	adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ?
@@ -1952,6 +1994,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_repr_allowlist_uninit(adapter);
+err_repr_allowlist_init:
 	cpfl_vport_map_uninit(adapter);
 err_vport_map_init:
 	idpf_adapter_deinit(base);
@@ -2227,48 +2271,6 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
 	return 0;
 }
 
-static int
-cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
-{
-	struct cpfl_devargs *devargs = &adapter->devargs;
-	int i, j;
-
-	/* check and refine repr args */
-	for (i = 0; i < devargs->repr_args_num; i++) {
-		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
-
-		/* set default host_id to xeon host */
-		if (eth_da->nb_mh_controllers == 0) {
-			eth_da->nb_mh_controllers = 1;
-			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
-		} else {
-			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
-				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
-					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-						     eth_da->mh_controllers[j]);
-					return -EINVAL;
-				}
-			}
-		}
-
-		/* set default pf to APF */
-		if (eth_da->nb_ports == 0) {
-			eth_da->nb_ports = 1;
-			eth_da->ports[0] = CPFL_PF_TYPE_APF;
-		} else {
-			for (j = 0; j < eth_da->nb_ports; j++) {
-				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
-					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-						     eth_da->ports[j]);
-					return -EINVAL;
-				}
-			}
-		}
-	}
-
-	return 0;
-}
-
 static int
 cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -2304,6 +2306,7 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
 	struct cpfl_adapter_ext *adapter;
 	int retval;
+	uint16_t port_id;
 
 	adapter = rte_zmalloc("cpfl_adapter_ext",
 			      sizeof(struct cpfl_adapter_ext), 0);
@@ -2343,11 +2346,23 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 	retval = cpfl_repr_devargs_process(adapter);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to process repr devargs");
-		goto err;
+		goto close_ethdev;
 	}
 
+	retval = cpfl_repr_create(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create representors ");
+		goto close_ethdev;
+	}
+
+
 	return 0;
 
+close_ethdev:
+	/* Ethdev created can be found RTE_ETH_FOREACH_DEV_OF through rte_device */
+	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
+		rte_eth_dev_close(port_id);
+	}
 err:
 	rte_spinlock_lock(&cpfl_adapter_lock);
 	TAILQ_REMOVE(&cpfl_adapter_list, adapter, next);
@@ -2374,6 +2389,12 @@ cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *ad
 		return ret;
 	}
 
+	ret = cpfl_repr_create(pci_dev, adapter);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create representors ");
+		return ret;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 362cad155d..a4ffd51fb3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -21,6 +21,7 @@
 
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
+#include "cpfl_representor.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -60,11 +61,31 @@
 #define IDPF_DEV_ID_CPF			0x1453
 #define VIRTCHNL2_QUEUE_GROUP_P2P	0x100
 
+#define CPFL_HOST_ID_NUM	2
+#define CPFL_PF_TYPE_NUM	2
 #define CPFL_HOST_ID_HOST	0
 #define CPFL_HOST_ID_ACC	1
 #define CPFL_PF_TYPE_APF	0
 #define CPFL_PF_TYPE_CPF	1
 
+/* Function IDs on IMC side */
+#define CPFL_HOST0_APF		0
+#define CPFL_ACC_APF_ID		4
+#define CPFL_HOST0_CPF_ID	8
+#define CPFL_ACC_CPF_ID		12
+
+#define CPFL_VPORT_LAN_PF	0
+#define CPFL_VPORT_LAN_VF	1
+
+/* bit[15:14] type
+ * bit[13] host/accelerator core
+ * bit[12] apf/cpf
+ * bit[11:0] vf
+ */
+#define CPFL_REPRESENTOR_ID(type, host_id, pf_id, vf_id)	\
+	((((type) & 0x3) << 14) + (((host_id) & 0x1) << 13) +	\
+	 (((pf_id) & 0x1) << 12) + ((vf_id) & 0xfff))
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
@@ -110,6 +131,7 @@ struct cpfl_vport_info {
 
 enum cpfl_itf_type {
 	CPFL_ITF_TYPE_VPORT,
+	CPFL_ITF_TYPE_REPRESENTOR,
 };
 
 struct cpfl_itf {
@@ -135,6 +157,13 @@ struct cpfl_vport {
 	bool p2p_manual_bind;
 };
 
+struct cpfl_repr {
+	struct cpfl_itf itf;
+	struct cpfl_repr_id repr_id;
+	struct rte_ether_addr mac_addr;
+	struct cpfl_vport_info *vport_info;
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -152,10 +181,16 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t vport_map_lock;
 	struct rte_hash *vport_map_hash;
+
+	rte_spinlock_t repr_lock;
+	struct rte_hash *repr_allowlist_hash;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_vport_id *vport_identity,
+			   struct cpchnl2_event_vport_created *vport);
 int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_list_response *response);
@@ -170,6 +205,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 	container_of((p), struct cpfl_adapter_ext, base)
 #define CPFL_DEV_TO_VPORT(dev)					\
 	((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_REPR(dev)					\
+	((struct cpfl_repr *)((dev)->data->dev_private))
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
new file mode 100644
index 0000000000..d2558c39a8
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -0,0 +1,581 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include "cpfl_representor.h"
+#include "cpfl_rxtx.h"
+
+static int
+cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_repr_id *repr_id,
+			   struct rte_eth_dev *dev)
+{
+	int ret;
+
+	if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) < 0)
+		return -ENOENT;
+
+	ret = rte_hash_add_key_data(adapter->repr_allowlist_hash, repr_id, dev);
+
+	return ret;
+}
+
+static int
+cpfl_repr_allowlist_add(struct cpfl_adapter_ext *adapter,
+			struct cpfl_repr_id *repr_id)
+{
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+	if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) >= 0) {
+		ret = -EEXIST;
+		goto err;
+	}
+
+	ret = rte_hash_add_key(adapter->repr_allowlist_hash, repr_id);
+	if (ret < 0)
+		goto err;
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return ret;
+}
+
+static int
+cpfl_repr_devargs_process_one(struct cpfl_adapter_ext *adapter,
+			      struct rte_eth_devargs *eth_da)
+{
+	struct cpfl_repr_id repr_id;
+	int ret, c, p, v;
+
+	for (c = 0; c < eth_da->nb_mh_controllers; c++) {
+		for (p = 0; p < eth_da->nb_ports; p++) {
+			repr_id.type = eth_da->type;
+			if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+				repr_id.host_id = eth_da->mh_controllers[c];
+				repr_id.pf_id = eth_da->ports[p];
+				repr_id.vf_id = 0;
+				ret = cpfl_repr_allowlist_add(adapter, &repr_id);
+				if (ret == -EEXIST)
+					continue;
+				if (ret) {
+					PMD_DRV_LOG(ERR, "Failed to add PF repr to allowlist, "
+							 "host_id = %d, pf_id = %d.",
+						    repr_id.host_id, repr_id.pf_id);
+					return ret;
+				}
+			} else if (eth_da->type == RTE_ETH_REPRESENTOR_VF) {
+				for (v = 0; v < eth_da->nb_representor_ports; v++) {
+					repr_id.host_id = eth_da->mh_controllers[c];
+					repr_id.pf_id = eth_da->ports[p];
+					repr_id.vf_id = eth_da->representor_ports[v];
+					ret = cpfl_repr_allowlist_add(adapter, &repr_id);
+					if (ret == -EEXIST)
+						continue;
+					if (ret) {
+						PMD_DRV_LOG(ERR, "Failed to add VF repr to allowlist, "
+								 "host_id = %d, pf_id = %d, vf_id = %d.",
+							    repr_id.host_id,
+							    repr_id.pf_id,
+							    repr_id.vf_id);
+						return ret;
+					}
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+int
+cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int ret, i, j;
+
+	/* check and refine repr args */
+	for (i = 0; i < devargs->repr_args_num; i++) {
+		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
+
+		/* set default host_id to host */
+		if (eth_da->nb_mh_controllers == 0) {
+			eth_da->nb_mh_controllers = 1;
+			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
+		} else {
+			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
+				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->mh_controllers[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		/* set default pf to APF */
+		if (eth_da->nb_ports == 0) {
+			eth_da->nb_ports = 1;
+			eth_da->ports[0] = CPFL_PF_TYPE_APF;
+		} else {
+			for (j = 0; j < eth_da->nb_ports; j++) {
+				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->ports[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		ret = cpfl_repr_devargs_process_one(adapter, eth_da);
+		if (ret != 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_repr_allowlist_del(struct cpfl_adapter_ext *adapter,
+			struct cpfl_repr_id *repr_id)
+{
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	ret = rte_hash_del_key(adapter->repr_allowlist_hash, repr_id);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to delete repr from allowlist."
+				 "host_id = %d, type = %d, pf_id = %d, vf_id = %d",
+				 repr_id->host_id, repr_id->type,
+				 repr_id->pf_id, repr_id->vf_id);
+		goto err;
+	}
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return ret;
+}
+
+static int
+cpfl_repr_uninit(struct rte_eth_dev *eth_dev)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+	struct cpfl_adapter_ext *adapter = repr->itf.adapter;
+
+	eth_dev->data->mac_addrs = NULL;
+
+	cpfl_repr_allowlist_del(adapter, &repr->repr_id);
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_configure(struct rte_eth_dev *dev)
+{
+	/* now only 1 RX queue is supported */
+	if (dev->data->nb_rx_queues > 1)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_close(struct rte_eth_dev *dev)
+{
+	return cpfl_repr_uninit(dev);
+}
+
+static int
+cpfl_repr_dev_info_get(struct rte_eth_dev *ethdev,
+		       struct rte_eth_dev_info *dev_info)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+
+	dev_info->device = ethdev->device;
+	dev_info->max_mac_addrs = 1;
+	dev_info->max_rx_queues = 1;
+	dev_info->max_tx_queues = 1;
+	dev_info->min_rx_bufsize = CPFL_MIN_BUF_SIZE;
+	dev_info->max_rx_pktlen = CPFL_MAX_FRAME_SIZE;
+
+	dev_info->flow_type_rss_offloads = CPFL_RSS_OFFLOAD_ALL;
+
+	dev_info->rx_offload_capa =
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP		|
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP		|
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM	|
+		RTE_ETH_RX_OFFLOAD_SCATTER		|
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER		|
+		RTE_ETH_RX_OFFLOAD_RSS_HASH		|
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
+	dev_info->tx_offload_capa =
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT		|
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT		|
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM	|
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS		|
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_free_thresh = CPFL_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+		.offloads = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_free_thresh = CPFL_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH,
+		.offloads = 0,
+	};
+
+	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
+	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
+	dev_info->switch_info.name = ethdev->device->name;
+	dev_info->switch_info.domain_id = 0; /* the same domain*/
+	dev_info->switch_info.port_id = repr->vport_info->vport.info.vsi_id;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_start(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_stop(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+static int
+cpfl_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+			 __rte_unused uint16_t queue_id,
+			 __rte_unused uint16_t nb_desc,
+			 __rte_unused unsigned int socket_id,
+			 __rte_unused const struct rte_eth_rxconf *conf,
+			 __rte_unused struct rte_mempool *pool)
+{
+	/* Dummy */
+	return 0;
+}
+
+static int
+cpfl_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+			 __rte_unused uint16_t queue_id,
+			 __rte_unused uint16_t nb_desc,
+			 __rte_unused unsigned int socket_id,
+			 __rte_unused const struct rte_eth_txconf *conf)
+{
+	/* Dummy */
+	return 0;
+}
+
+static const struct eth_dev_ops cpfl_repr_dev_ops = {
+	.dev_start		= cpfl_repr_dev_start,
+	.dev_stop		= cpfl_repr_dev_stop,
+	.dev_configure		= cpfl_repr_dev_configure,
+	.dev_close		= cpfl_repr_dev_close,
+	.dev_infos_get		= cpfl_repr_dev_info_get,
+
+	.rx_queue_setup		= cpfl_repr_rx_queue_setup,
+	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
+};
+
+static int
+cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+	struct cpfl_repr_param *param = init_param;
+	struct cpfl_adapter_ext *adapter = param->adapter;
+
+	repr->repr_id = param->repr_id;
+	repr->vport_info = param->vport_info;
+	repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
+	repr->itf.adapter = adapter;
+	repr->itf.data = eth_dev->data;
+
+	eth_dev->dev_ops = &cpfl_repr_dev_ops;
+
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	eth_dev->data->representor_id =
+		CPFL_REPRESENTOR_ID(repr->repr_id.type,
+				    repr->repr_id.host_id,
+				    repr->repr_id.pf_id,
+				    repr->repr_id.vf_id);
+
+	eth_dev->data->mac_addrs = &repr->mac_addr;
+
+	rte_eth_random_addr(repr->mac_addr.addr_bytes);
+
+	return cpfl_repr_allowlist_update(adapter, &repr->repr_id, eth_dev);
+}
+
+static int
+cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
+{
+	if ((host_id != CPFL_HOST_ID_HOST &&
+	     host_id != CPFL_HOST_ID_ACC) ||
+	    (pf_id != CPFL_PF_TYPE_APF &&
+	     pf_id != CPFL_PF_TYPE_CPF))
+		return -EINVAL;
+
+	static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = {
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF,
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID,
+	};
+
+	return func_id_map[host_id][pf_id];
+}
+
+static bool
+cpfl_match_repr_with_vport(const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_vport_info *info)
+{
+	int func_id;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF &&
+	    info->func_type == CPFL_VPORT_LAN_PF) {
+		func_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		if (func_id < 0 || func_id != info->pf_id)
+			return false;
+		else
+			return true;
+	} else if (repr_id->type == RTE_ETH_REPRESENTOR_VF &&
+		   info->func_type == CPFL_VPORT_LAN_VF) {
+		if (repr_id->vf_id == info->vf_id)
+			return true;
+	}
+
+	return false;
+}
+
+static int
+cpfl_repr_vport_list_query(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_get_vport_list_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		vi.vf_id = 0;
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = CPFL_HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_cc_vport_list_get(adapter, &vi, response);
+
+	return ret;
+}
+
+static int
+cpfl_repr_vport_info_query(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_vport_id *vport_id,
+			   struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		vi.vf_id = 0;
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = CPFL_HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_cc_vport_info_get(adapter, vport_id, &vi, response);
+
+	return ret;
+}
+
+static int
+cpfl_repr_vport_map_update(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id, uint32_t vport_id,
+			   struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	vi.vport_id = vport_id;
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = CPFL_HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_vport_info_create(adapter, &vi, (struct cpchnl2_event_vport_created *)response);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Fail to update vport map hash for representor.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	struct rte_eth_dev *dev;
+	uint32_t iter = 0;
+	const struct cpfl_repr_id *repr_id;
+	const struct cpfl_vport_id *vp_id;
+	struct cpchnl2_get_vport_list_response *vlist_resp;
+	struct cpchnl2_get_vport_info_response vinfo_resp;
+	int ret;
+
+	vlist_resp = rte_zmalloc(NULL, IDPF_DFLT_MBX_BUF_SIZE, 0);
+	if (vlist_resp == NULL)
+		return -ENOMEM;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	while (rte_hash_iterate(adapter->repr_allowlist_hash,
+				(const void **)&repr_id, (void **)&dev, &iter) >= 0) {
+		struct cpfl_vport_info *vi;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		uint32_t iter_iter = 0;
+		int i;
+
+		/* skip representor already be created */
+		if (dev != NULL)
+			continue;
+
+		if (repr_id->type == RTE_ETH_REPRESENTOR_VF)
+			snprintf(name, sizeof(name), "net_%s_representor_c%dpf%dvf%d",
+				 pci_dev->name,
+				 repr_id->host_id,
+				 repr_id->pf_id,
+				 repr_id->vf_id);
+		else
+			snprintf(name, sizeof(name), "net_%s_representor_c%dpf%d",
+				 pci_dev->name,
+				 repr_id->host_id,
+				 repr_id->pf_id);
+
+		/* get vport list for the port representor */
+		ret = cpfl_repr_vport_list_query(adapter, repr_id, vlist_resp);
+		if (ret != 0) {
+			PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d's vport list",
+				     repr_id->host_id, repr_id->pf_id, repr_id->vf_id);
+			goto err;
+		}
+
+		if (vlist_resp->nof_vports == 0) {
+			PMD_INIT_LOG(WARNING, "No matched vport for representor %s", name);
+			continue;
+		}
+
+		/* get all vport info for the port representor */
+		for (i = 0; i < vlist_resp->nof_vports; i++) {
+			ret = cpfl_repr_vport_info_query(adapter, repr_id,
+							 &vlist_resp->vports[i], &vinfo_resp);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d vport[%d]'s info",
+					     repr_id->host_id, repr_id->pf_id, repr_id->vf_id,
+					     vlist_resp->vports[i].vport_id);
+				goto err;
+			}
+
+			ret = cpfl_repr_vport_map_update(adapter, repr_id,
+						 vlist_resp->vports[i].vport_id, &vinfo_resp);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to update  host%d pf%d vf%d vport[%d]'s info to vport_map_hash",
+					     repr_id->host_id, repr_id->pf_id, repr_id->vf_id,
+					     vlist_resp->vports[i].vport_id);
+				goto err;
+			}
+		}
+
+		/* find the matched vport */
+		rte_spinlock_lock(&adapter->vport_map_lock);
+
+		while (rte_hash_iterate(adapter->vport_map_hash,
+					(const void **)&vp_id, (void **)&vi, &iter_iter) >= 0) {
+			struct cpfl_repr_param param;
+
+			if (!cpfl_match_repr_with_vport(repr_id, &vi->vport.info))
+				continue;
+
+			param.adapter = adapter;
+			param.repr_id = *repr_id;
+			param.vport_info = vi;
+
+			ret = rte_eth_dev_create(&pci_dev->device,
+						 name,
+						 sizeof(struct cpfl_repr),
+						 NULL, NULL, cpfl_repr_init,
+						 &param);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to create representor %s", name);
+				rte_spinlock_unlock(&adapter->vport_map_lock);
+				goto err;
+			}
+			break;
+		}
+
+		rte_spinlock_unlock(&adapter->vport_map_lock);
+	}
+
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	rte_free(vlist_resp);
+	return ret;
+}
diff --git a/drivers/net/cpfl/cpfl_representor.h b/drivers/net/cpfl/cpfl_representor.h
new file mode 100644
index 0000000000..d3a4de531e
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_REPRESENTOR_H_
+#define _CPFL_REPRESENTOR_H_
+
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+struct cpfl_repr_id {
+	uint8_t host_id;
+	uint8_t pf_id;
+	uint8_t type;
+	uint8_t vf_id;
+};
+
+struct cpfl_repr_param {
+	struct cpfl_adapter_ext *adapter;
+	struct cpfl_repr_id repr_id;
+	struct cpfl_vport_info *vport_info;
+};
+
+int cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter);
+int cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 2f0f5d8434..d8b92ae16a 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -17,6 +17,7 @@ sources = files(
         'cpfl_ethdev.c',
         'cpfl_rxtx.c',
         'cpfl_vchnl.c',
+        'cpfl_representor.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v6 10/10] net/cpfl: support link update for representor
  2023-09-12 17:30         ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
                             ` (8 preceding siblings ...)
  2023-09-12 17:30           ` [PATCH v6 09/10] net/cpfl: create port representor beilei.xing
@ 2023-09-12 17:30           ` beilei.xing
  2023-09-13  1:01           ` [PATCH v6 00/10] net/cpfl: support port representor Wu, Jingjing
  10 siblings, 0 replies; 89+ messages in thread
From: beilei.xing @ 2023-09-12 17:30 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Add link update ops for representor.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h      |  1 +
 drivers/net/cpfl/cpfl_representor.c | 89 +++++++++++++++++++++++------
 2 files changed, 71 insertions(+), 19 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index a4ffd51fb3..d0dcc0cc05 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -162,6 +162,7 @@ struct cpfl_repr {
 	struct cpfl_repr_id repr_id;
 	struct rte_ether_addr mac_addr;
 	struct cpfl_vport_info *vport_info;
+	bool func_up; /* If the represented function is up */
 };
 
 struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index d2558c39a8..4d15a26c80 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -308,6 +308,72 @@ cpfl_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
 	return 0;
 }
 
+static int
+cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
+{
+	if ((host_id != CPFL_HOST_ID_HOST &&
+	     host_id != CPFL_HOST_ID_ACC) ||
+	    (pf_id != CPFL_PF_TYPE_APF &&
+	     pf_id != CPFL_PF_TYPE_CPF))
+		return -EINVAL;
+
+	static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = {
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF,
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID,
+	};
+
+	return func_id_map[host_id][pf_id];
+}
+
+static int
+cpfl_repr_link_update(struct rte_eth_dev *ethdev,
+		      int wait_to_complete)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+	struct rte_eth_link *dev_link = &ethdev->data->dev_link;
+	struct cpfl_adapter_ext *adapter = repr->itf.adapter;
+	struct cpchnl2_get_vport_info_response response;
+	struct cpfl_vport_id vi;
+	int ret;
+
+	if (!(ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
+		PMD_INIT_LOG(ERR, "This ethdev is not representor.");
+		return -EINVAL;
+	}
+
+	if (wait_to_complete) {
+		if (repr->repr_id.type == RTE_ETH_REPRESENTOR_PF) {
+			/* PF */
+			vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+			vi.pf_id = cpfl_func_id_get(repr->repr_id.host_id, repr->repr_id.pf_id);
+			vi.vf_id = 0;
+		} else {
+			/* VF */
+			vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+			vi.pf_id = CPFL_HOST0_APF;
+			vi.vf_id = repr->repr_id.vf_id;
+		}
+		ret = cpfl_cc_vport_info_get(adapter, &repr->vport_info->vport.vport,
+					     &vi, &response);
+		if (ret < 0) {
+			PMD_INIT_LOG(ERR, "Fail to get vport info.");
+			return ret;
+		}
+
+		if (response.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
+			repr->func_up = true;
+		else
+			repr->func_up = false;
+	}
+
+	dev_link->link_status = repr->func_up ?
+		RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -317,6 +383,8 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 
 	.rx_queue_setup		= cpfl_repr_rx_queue_setup,
 	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
+
+	.link_update		= cpfl_repr_link_update,
 };
 
 static int
@@ -331,6 +399,8 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
 	repr->itf.adapter = adapter;
 	repr->itf.data = eth_dev->data;
+	if (repr->vport_info->vport.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
+		repr->func_up = true;
 
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
@@ -349,25 +419,6 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	return cpfl_repr_allowlist_update(adapter, &repr->repr_id, eth_dev);
 }
 
-static int
-cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
-{
-	if ((host_id != CPFL_HOST_ID_HOST &&
-	     host_id != CPFL_HOST_ID_ACC) ||
-	    (pf_id != CPFL_PF_TYPE_APF &&
-	     pf_id != CPFL_PF_TYPE_CPF))
-		return -EINVAL;
-
-	static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = {
-		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF,
-		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID,
-		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID,
-		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID,
-	};
-
-	return func_id_map[host_id][pf_id];
-}
-
 static bool
 cpfl_match_repr_with_vport(const struct cpfl_repr_id *repr_id,
 			   struct cpchnl2_vport_info *info)
-- 
2.34.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* RE: [PATCH v6 00/10] net/cpfl: support port representor
  2023-09-12 17:30         ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
                             ` (9 preceding siblings ...)
  2023-09-12 17:30           ` [PATCH v6 10/10] net/cpfl: support link update for representor beilei.xing
@ 2023-09-13  1:01           ` Wu, Jingjing
  2023-09-13  5:41             ` Zhang, Qi Z
  10 siblings, 1 reply; 89+ messages in thread
From: Wu, Jingjing @ 2023-09-13  1:01 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: dev, Liu, Mingxia



> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Wednesday, September 13, 2023 1:30 AM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH v6 00/10] net/cpfl: support port representor
> 
> From: Beilei Xing <beilei.xing@intel.com>

Acked-by: Jingjing Wu <jingjing.wu@intel.com>

^ permalink raw reply	[flat|nested] 89+ messages in thread

* RE: [PATCH v6 00/10] net/cpfl: support port representor
  2023-09-13  1:01           ` [PATCH v6 00/10] net/cpfl: support port representor Wu, Jingjing
@ 2023-09-13  5:41             ` Zhang, Qi Z
  0 siblings, 0 replies; 89+ messages in thread
From: Zhang, Qi Z @ 2023-09-13  5:41 UTC (permalink / raw)
  To: Wu, Jingjing, Xing, Beilei; +Cc: dev, Liu, Mingxia



> -----Original Message-----
> From: Wu, Jingjing <jingjing.wu@intel.com>
> Sent: Wednesday, September 13, 2023 9:01 AM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>
> Subject: RE: [PATCH v6 00/10] net/cpfl: support port representor
> 
> 
> 
> > -----Original Message-----
> > From: Xing, Beilei <beilei.xing@intel.com>
> > Sent: Wednesday, September 13, 2023 1:30 AM
> > To: Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>
> > Subject: [PATCH v6 00/10] net/cpfl: support port representor
> >
> > From: Beilei Xing <beilei.xing@intel.com>
> 
> Acked-by: Jingjing Wu <jingjing.wu@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi

^ permalink raw reply	[flat|nested] 89+ messages in thread

end of thread, other threads:[~2023-09-13  5:42 UTC | newest]

Thread overview: 89+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
2023-08-09 15:51 ` [PATCH 01/19] net/cpfl: refine devargs parse and process beilei.xing
2023-08-09 15:51 ` [PATCH 02/19] net/cpfl: introduce interface structure beilei.xing
2023-08-09 15:51 ` [PATCH 03/19] net/cpfl: add cp channel beilei.xing
2023-08-09 15:51 ` [PATCH 04/19] net/cpfl: enable vport mapping beilei.xing
2023-08-09 15:51 ` [PATCH 05/19] net/cpfl: parse representor devargs beilei.xing
2023-08-09 15:51 ` [PATCH 06/19] net/cpfl: support probe again beilei.xing
2023-08-09 15:51 ` [PATCH 07/19] net/cpfl: create port representor beilei.xing
2023-08-09 15:51 ` [PATCH 08/19] net/cpfl: support vport list/info get beilei.xing
2023-08-09 15:51 ` [PATCH 09/19] net/cpfl: update vport info before creating representor beilei.xing
2023-08-09 15:51 ` [PATCH 10/19] net/cpfl: refine handle virtual channel message beilei.xing
2023-08-09 15:51 ` [PATCH 11/19] net/cpfl: add exceptional vport beilei.xing
2023-08-09 15:51 ` [PATCH 12/19] net/cpfl: support representor Rx/Tx queue setup beilei.xing
2023-08-09 15:51 ` [PATCH 13/19] net/cpfl: support link update for representor beilei.xing
2023-08-09 15:51 ` [PATCH 14/19] net/cpfl: add stats ops " beilei.xing
2023-08-09 15:51 ` [PATCH 15/19] common/idpf: refine inline function beilei.xing
2023-08-09 15:51 ` [PATCH 16/19] net/cpfl: support representor data path beilei.xing
2023-08-09 15:51 ` [PATCH 17/19] net/cpfl: support dispatch process beilei.xing
2023-08-09 15:51 ` [PATCH 18/19] net/cpfl: add dispatch service beilei.xing
2023-08-09 15:51 ` [PATCH 19/19] doc: update release notes for representor beilei.xing
2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
2023-08-16 15:05   ` [PATCH v2 01/12] net/cpfl: refine devargs parse and process beilei.xing
2023-08-16 15:05   ` [PATCH v2 02/12] net/cpfl: introduce interface structure beilei.xing
2023-08-16 15:05   ` [PATCH v2 03/12] net/cpfl: add cp channel beilei.xing
2023-08-16 15:05   ` [PATCH v2 04/12] net/cpfl: enable vport mapping beilei.xing
2023-08-16 15:05   ` [PATCH v2 05/12] net/cpfl: parse representor devargs beilei.xing
2023-08-16 15:05   ` [PATCH v2 06/12] net/cpfl: support probe again beilei.xing
2023-08-16 15:05   ` [PATCH v2 07/12] net/cpfl: create port representor beilei.xing
2023-09-05  7:35     ` Liu, Mingxia
2023-09-05  8:30     ` Liu, Mingxia
2023-08-16 15:05   ` [PATCH v2 08/12] net/cpfl: support vport list/info get beilei.xing
2023-08-16 15:05   ` [PATCH v2 09/12] net/cpfl: update vport info before creating representor beilei.xing
2023-09-06  2:33     ` Liu, Mingxia
2023-08-16 15:05   ` [PATCH v2 10/12] net/cpfl: refine handle virtual channel message beilei.xing
2023-08-16 15:05   ` [PATCH v2 11/12] net/cpfl: support link update for representor beilei.xing
2023-08-16 15:05   ` [PATCH v2 12/12] net/cpfl: support Rx/Tx queue setup " beilei.xing
2023-09-06  3:02     ` Liu, Mingxia
2023-09-07 15:15   ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
2023-09-07 15:15     ` [PATCH v3 01/11] net/cpfl: refine devargs parse and process beilei.xing
2023-09-07 15:15     ` [PATCH v3 02/11] net/cpfl: introduce interface structure beilei.xing
2023-09-07 15:15     ` [PATCH v3 03/11] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-07 15:15     ` [PATCH v3 04/11] net/cpfl: introduce CP channel API beilei.xing
2023-09-07 15:16     ` [PATCH v3 05/11] net/cpfl: enable vport mapping beilei.xing
2023-09-07 15:16     ` [PATCH v3 06/11] net/cpfl: parse representor devargs beilei.xing
2023-09-07 15:16     ` [PATCH v3 07/11] net/cpfl: support probe again beilei.xing
2023-09-07 15:16     ` [PATCH v3 08/11] net/cpfl: create port representor beilei.xing
2023-09-07 15:16     ` [PATCH v3 09/11] net/cpfl: support vport list/info get beilei.xing
2023-09-07 15:16     ` [PATCH v3 10/11] net/cpfl: update vport info before creating representor beilei.xing
2023-09-07 15:16     ` [PATCH v3 11/11] net/cpfl: support link update for representor beilei.xing
2023-09-08 11:16     ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
2023-09-08 11:16       ` [PATCH v4 01/10] net/cpfl: refine devargs parse and process beilei.xing
2023-09-08 11:16       ` [PATCH v4 02/10] net/cpfl: introduce interface structure beilei.xing
2023-09-09  2:08         ` Wu, Jingjing
2023-09-08 11:16       ` [PATCH v4 03/10] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-09  2:13         ` Wu, Jingjing
2023-09-08 11:16       ` [PATCH v4 04/10] net/cpfl: introduce CP channel API beilei.xing
2023-09-08 11:16       ` [PATCH v4 05/10] net/cpfl: enable vport mapping beilei.xing
2023-09-08 11:16       ` [PATCH v4 06/10] net/cpfl: parse representor devargs beilei.xing
2023-09-08 11:16       ` [PATCH v4 07/10] net/cpfl: support probe again beilei.xing
2023-09-08 11:16       ` [PATCH v4 08/10] net/cpfl: support vport list/info get beilei.xing
2023-09-09  2:34         ` Wu, Jingjing
2023-09-08 11:17       ` [PATCH v4 09/10] net/cpfl: create port representor beilei.xing
2023-09-09  3:04         ` Wu, Jingjing
2023-09-08 11:17       ` [PATCH v4 10/10] net/cpfl: support link update for representor beilei.xing
2023-09-09  3:05         ` Wu, Jingjing
2023-09-12 16:26       ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
2023-09-12 16:26         ` [PATCH v5 01/10] net/cpfl: refine devargs parse and process beilei.xing
2023-09-12 16:26         ` [PATCH v5 02/10] net/cpfl: introduce interface structure beilei.xing
2023-09-12 16:26         ` [PATCH v5 03/10] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-12 16:26         ` [PATCH v5 04/10] net/cpfl: introduce CP channel API beilei.xing
2023-09-12 16:26         ` [PATCH v5 05/10] net/cpfl: enable vport mapping beilei.xing
2023-09-12 16:26         ` [PATCH v5 06/10] net/cpfl: support vport list/info get beilei.xing
2023-09-12 16:26         ` [PATCH v5 07/10] net/cpfl: parse representor devargs beilei.xing
2023-09-12 16:26         ` [PATCH v5 08/10] net/cpfl: support probe again beilei.xing
2023-09-12 16:26         ` [PATCH v5 09/10] net/cpfl: create port representor beilei.xing
2023-09-12 16:26         ` [PATCH v5 10/10] net/cpfl: support link update for representor beilei.xing
2023-09-12 17:30         ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
2023-09-12 17:30           ` [PATCH v6 01/10] net/cpfl: refine devargs parse and process beilei.xing
2023-09-12 17:30           ` [PATCH v6 02/10] net/cpfl: introduce interface structure beilei.xing
2023-09-12 17:30           ` [PATCH v6 03/10] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-12 17:30           ` [PATCH v6 04/10] net/cpfl: introduce CP channel API beilei.xing
2023-09-12 17:30           ` [PATCH v6 05/10] net/cpfl: enable vport mapping beilei.xing
2023-09-12 17:30           ` [PATCH v6 06/10] net/cpfl: support vport list/info get beilei.xing
2023-09-12 17:30           ` [PATCH v6 07/10] net/cpfl: parse representor devargs beilei.xing
2023-09-12 17:30           ` [PATCH v6 08/10] net/cpfl: support probe again beilei.xing
2023-09-12 17:30           ` [PATCH v6 09/10] net/cpfl: create port representor beilei.xing
2023-09-12 17:30           ` [PATCH v6 10/10] net/cpfl: support link update for representor beilei.xing
2023-09-13  1:01           ` [PATCH v6 00/10] net/cpfl: support port representor Wu, Jingjing
2023-09-13  5:41             ` Zhang, Qi Z

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).