DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/cpfl: update CP channel API
@ 2023-10-11 11:40 beilei.xing
  2023-10-19 10:58 ` [PATCH v2] " beilei.xing
  0 siblings, 1 reply; 4+ messages in thread
From: beilei.xing @ 2023-10-11 11:40 UTC (permalink / raw)
  To: jingjing.wu; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Update the cpchnl2 function type according to the definition in
MEV 1.0 release.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_cpchnl.h      | 19 ++++++++++---------
 drivers/net/cpfl/cpfl_ethdev.h      |  2 +-
 drivers/net/cpfl/cpfl_representor.c | 20 ++++++++++----------
 3 files changed, 21 insertions(+), 20 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_cpchnl.h b/drivers/net/cpfl/cpfl_cpchnl.h
index 2eefcbcc10..667fadcac4 100644
--- a/drivers/net/cpfl/cpfl_cpchnl.h
+++ b/drivers/net/cpfl/cpfl_cpchnl.h
@@ -22,9 +22,6 @@ enum cpchnl2_ops {
 
 #define CPCHNL2_ETH_LENGTH_OF_ADDRESS	6
 
-#define CPCHNL2_FUNC_TYPE_PF		0
-#define CPCHNL2_FUNC_TYPE_SRIOV		1
-
 /* vport statuses - must match the DB ones - see enum cp_vport_status*/
 #define CPCHNL2_VPORT_STATUS_CREATED	0
 #define CPCHNL2_VPORT_STATUS_ENABLED	1
@@ -136,8 +133,10 @@ CPCHNL2_CHECK_STRUCT_LEN(3792, cpchnl2_queue_groups);
  * @brief function types
  */
 enum cpchnl2_func_type {
-	CPCHNL2_FTYPE_LAN_PF = 0,
-	CPCHNL2_FTYPE_LAN_VF = 1,
+	CPCHNL2_FTYPE_LAN_VF = 0x0,
+	CPCHNL2_FTYPE_LAN_VM = 0x1,
+	CPCHNL2_FTYPE_LAN_PF = 0x2,
+	CPCHNL2_FTYPE_LAN_IMC_BMC = 0x3,
 	CPCHNL2_FTYPE_LAN_MAX
 };
 
@@ -176,7 +175,7 @@ struct cpchnl2_vport_info {
 	 */
 	u16 vsi_id;
 	u8 vport_status;	/* enum cpchnl2_vport_status */
-	/* 0 - LAN PF, 1 - LAN VF. Rest - reserved. Can be later expanded to other PEs */
+	/* 0 - LAN VF, 2 - LAN PF. Rest - reserved. Can be later expanded to other PEs */
 	u8 func_type;
 	/* Valid only if "type" above is VF, indexing is relative to PF specified above. */
 	u16 vf_id;
@@ -216,7 +215,9 @@ struct cpchnl2_vport_info {
 	u16 default_rx_qid;	/* Default LAN RX Queue ID */
 	u16 vport_flags; /* see: VPORT_FLAGS */
 	u8 egress_port;
-	u8 pad_reserved[5];
+	/* Host LAN APF: 0; ACC LAN APF: 4; IMC LAN APF: 5; ACC LAN CPF: 4; IMC LAN CPF: 5 */
+	u8 host_id;
+	u8 pad_reserved[4];
 };
 CPCHNL2_CHECK_STRUCT_LEN(96, cpchnl2_vport_info);
 
@@ -226,7 +227,7 @@ CPCHNL2_CHECK_STRUCT_LEN(96, cpchnl2_vport_info);
 
 /**
  * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode request
- * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved (see enum cpchnl2_func_type)
+ * @param func_type Func type: 0 - LAN_VF, 2 - LAN_PF. Rest - reserved (see enum cpchnl2_func_type)
  * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12
  *        CPFs are valid
  * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above
@@ -241,7 +242,7 @@ CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_get_vport_list_request);
 
 /**
  * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode response
- * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved. Can be later extended to
+ * @param func_type Func type: 0 - LAN_VF, 2 - LAN_PF. Rest - reserved. Can be later extended to
  *        other PE types
  * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12
  *        CPFs are valid
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index efb0eb5251..bb53fca7c0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -296,7 +296,7 @@ cpfl_get_vsi_id(struct cpfl_itf *itf)
 	} else if (itf->type == CPFL_ITF_TYPE_VPORT) {
 		vport_id = ((struct cpfl_vport *)itf)->base.vport_id;
 
-		vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vport_identity.func_type = CPCHNL2_FTYPE_LAN_PF;
 		/* host: CPFL_HOST0_CPF_ID, acc: CPFL_ACC_CPF_ID */
 		vport_identity.pf_id = CPFL_ACC_CPF_ID;
 		vport_identity.vf_id = 0;
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index de3b426727..cb253f7af4 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -348,12 +348,12 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
 	if (wait_to_complete) {
 		if (repr->repr_id.type == RTE_ETH_REPRESENTOR_PF) {
 			/* PF */
-			vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+			vi.func_type = CPCHNL2_FTYPE_LAN_PF;
 			vi.pf_id = cpfl_func_id_get(repr->repr_id.host_id, repr->repr_id.pf_id);
 			vi.vf_id = 0;
 		} else {
 			/* VF */
-			vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+			vi.func_type = CPCHNL2_FTYPE_LAN_VF;
 			vi.pf_id = CPFL_HOST0_APF;
 			vi.vf_id = repr->repr_id.vf_id;
 		}
@@ -455,14 +455,14 @@ cpfl_match_repr_with_vport(const struct cpfl_repr_id *repr_id,
 	int func_id;
 
 	if (repr_id->type == RTE_ETH_REPRESENTOR_PF &&
-	    info->func_type == CPFL_VPORT_LAN_PF) {
+	    info->func_type == CPCHNL2_FTYPE_LAN_PF) {
 		func_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
 		if (func_id < 0 || func_id != info->pf_id)
 			return false;
 		else
 			return true;
 	} else if (repr_id->type == RTE_ETH_REPRESENTOR_VF &&
-		   info->func_type == CPFL_VPORT_LAN_VF) {
+		   info->func_type == CPCHNL2_FTYPE_LAN_VF) {
 		if (repr_id->vf_id == info->vf_id)
 			return true;
 	}
@@ -480,12 +480,12 @@ cpfl_repr_vport_list_query(struct cpfl_adapter_ext *adapter,
 
 	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
 		/* PF */
-		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.func_type = CPCHNL2_FTYPE_LAN_PF;
 		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
 		vi.vf_id = 0;
 	} else {
 		/* VF */
-		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.func_type = CPCHNL2_FTYPE_LAN_VF;
 		vi.pf_id = CPFL_HOST0_APF;
 		vi.vf_id = repr_id->vf_id;
 	}
@@ -506,12 +506,12 @@ cpfl_repr_vport_info_query(struct cpfl_adapter_ext *adapter,
 
 	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
 		/* PF */
-		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.func_type = CPCHNL2_FTYPE_LAN_PF;
 		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
 		vi.vf_id = 0;
 	} else {
 		/* VF */
-		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.func_type = CPCHNL2_FTYPE_LAN_VF;
 		vi.pf_id = CPFL_HOST0_APF;
 		vi.vf_id = repr_id->vf_id;
 	}
@@ -532,11 +532,11 @@ cpfl_repr_vport_map_update(struct cpfl_adapter_ext *adapter,
 	vi.vport_id = vport_id;
 	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
 		/* PF */
-		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.func_type = CPCHNL2_FTYPE_LAN_VF;
 		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
 	} else {
 		/* VF */
-		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.func_type = CPCHNL2_FTYPE_LAN_VF;
 		vi.pf_id = CPFL_HOST0_APF;
 		vi.vf_id = repr_id->vf_id;
 	}
-- 
2.34.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

* RE: [PATCH v2] net/cpfl: update CP channel API
  2023-10-19 10:58 ` [PATCH v2] " beilei.xing
@ 2023-10-19  2:51   ` Wu, Jingjing
  2023-10-19  3:51     ` Zhang, Qi Z
  0 siblings, 1 reply; 4+ messages in thread
From: Wu, Jingjing @ 2023-10-19  2:51 UTC (permalink / raw)
  To: Xing, Beilei, Zhang, Yuying; +Cc: dev



> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Thursday, October 19, 2023 6:58 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Yuying
> <yuying.zhang@intel.com>
> Cc: dev@dpdk.org; Xing, Beilei <beilei.xing@intel.com>
> Subject: [PATCH v2] net/cpfl: update CP channel API
> 
> From: Beilei Xing <beilei.xing@intel.com>
> 
> Update the cpchnl2 function type according to the definition in
> MEV 1.0 release.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>

Acked-by: Jingjing Wu <jingjing.wu@intel.com>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* RE: [PATCH v2] net/cpfl: update CP channel API
  2023-10-19  2:51   ` Wu, Jingjing
@ 2023-10-19  3:51     ` Zhang, Qi Z
  0 siblings, 0 replies; 4+ messages in thread
From: Zhang, Qi Z @ 2023-10-19  3:51 UTC (permalink / raw)
  To: Wu, Jingjing, Xing, Beilei, Zhang, Yuying; +Cc: dev



> -----Original Message-----
> From: Wu, Jingjing <jingjing.wu@intel.com>
> Sent: Thursday, October 19, 2023 10:51 AM
> To: Xing, Beilei <beilei.xing@intel.com>; Zhang, Yuying
> <yuying.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH v2] net/cpfl: update CP channel API
> 
> 
> 
> > -----Original Message-----
> > From: Xing, Beilei <beilei.xing@intel.com>
> > Sent: Thursday, October 19, 2023 6:58 PM
> > To: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Yuying
> > <yuying.zhang@intel.com>
> > Cc: dev@dpdk.org; Xing, Beilei <beilei.xing@intel.com>
> > Subject: [PATCH v2] net/cpfl: update CP channel API
> >
> > From: Beilei Xing <beilei.xing@intel.com>
> >
> > Update the cpchnl2 function type according to the definition in MEV
> > 1.0 release.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> 
> Acked-by: Jingjing Wu <jingjing.wu@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH v2] net/cpfl: update CP channel API
  2023-10-11 11:40 [PATCH] net/cpfl: update CP channel API beilei.xing
@ 2023-10-19 10:58 ` beilei.xing
  2023-10-19  2:51   ` Wu, Jingjing
  0 siblings, 1 reply; 4+ messages in thread
From: beilei.xing @ 2023-10-19 10:58 UTC (permalink / raw)
  To: jingjing.wu, yuying.zhang; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

Update the cpchnl2 function type according to the definition in
MEV 1.0 release.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
v2 change:
 - rename unused enum

 drivers/net/cpfl/cpfl_cpchnl.h      | 19 ++++++++++---------
 drivers/net/cpfl/cpfl_ethdev.h      |  2 +-
 drivers/net/cpfl/cpfl_representor.c | 20 ++++++++++----------
 3 files changed, 21 insertions(+), 20 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_cpchnl.h b/drivers/net/cpfl/cpfl_cpchnl.h
index 2eefcbcc10..0c9dfcdbf1 100644
--- a/drivers/net/cpfl/cpfl_cpchnl.h
+++ b/drivers/net/cpfl/cpfl_cpchnl.h
@@ -22,9 +22,6 @@ enum cpchnl2_ops {
 
 #define CPCHNL2_ETH_LENGTH_OF_ADDRESS	6
 
-#define CPCHNL2_FUNC_TYPE_PF		0
-#define CPCHNL2_FUNC_TYPE_SRIOV		1
-
 /* vport statuses - must match the DB ones - see enum cp_vport_status*/
 #define CPCHNL2_VPORT_STATUS_CREATED	0
 #define CPCHNL2_VPORT_STATUS_ENABLED	1
@@ -136,8 +133,10 @@ CPCHNL2_CHECK_STRUCT_LEN(3792, cpchnl2_queue_groups);
  * @brief function types
  */
 enum cpchnl2_func_type {
-	CPCHNL2_FTYPE_LAN_PF = 0,
-	CPCHNL2_FTYPE_LAN_VF = 1,
+	CPCHNL2_FTYPE_LAN_VF = 0x0,
+	CPCHNL2_FTYPE_LAN_RSV1 = 0x1,
+	CPCHNL2_FTYPE_LAN_PF = 0x2,
+	CPCHNL2_FTYPE_LAN_RSV2 = 0x3,
 	CPCHNL2_FTYPE_LAN_MAX
 };
 
@@ -176,7 +175,7 @@ struct cpchnl2_vport_info {
 	 */
 	u16 vsi_id;
 	u8 vport_status;	/* enum cpchnl2_vport_status */
-	/* 0 - LAN PF, 1 - LAN VF. Rest - reserved. Can be later expanded to other PEs */
+	/* 0 - LAN VF, 2 - LAN PF. Rest - reserved. Can be later expanded to other PEs */
 	u8 func_type;
 	/* Valid only if "type" above is VF, indexing is relative to PF specified above. */
 	u16 vf_id;
@@ -216,7 +215,9 @@ struct cpchnl2_vport_info {
 	u16 default_rx_qid;	/* Default LAN RX Queue ID */
 	u16 vport_flags; /* see: VPORT_FLAGS */
 	u8 egress_port;
-	u8 pad_reserved[5];
+	/* Host LAN APF: 0; ACC LAN APF: 4; IMC LAN APF: 5; ACC LAN CPF: 4; IMC LAN CPF: 5 */
+	u8 host_id;
+	u8 pad_reserved[4];
 };
 CPCHNL2_CHECK_STRUCT_LEN(96, cpchnl2_vport_info);
 
@@ -226,7 +227,7 @@ CPCHNL2_CHECK_STRUCT_LEN(96, cpchnl2_vport_info);
 
 /**
  * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode request
- * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved (see enum cpchnl2_func_type)
+ * @param func_type Func type: 0 - LAN_VF, 2 - LAN_PF. Rest - reserved (see enum cpchnl2_func_type)
  * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12
  *        CPFs are valid
  * @param vf_id Valid only if "type" above is VF, indexing is relative to PF specified above
@@ -241,7 +242,7 @@ CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_get_vport_list_request);
 
 /**
  * @brief Used for CPCHNL2_OP_GET_VPORT_LIST opcode response
- * @param func_type Func type: 0 - LAN_PF, 1 - LAN_VF. Rest - reserved. Can be later extended to
+ * @param func_type Func type: 0 - LAN_VF, 2 - LAN_PF. Rest - reserved. Can be later extended to
  *        other PE types
  * @param pf_id Always relevant, indexing is according to LAN PE 0-15, while only 0-4 APFs and 8-12
  *        CPFs are valid
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index a21701a81d..7a31a376b6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -295,7 +295,7 @@ cpfl_get_vsi_id(struct cpfl_itf *itf)
 	} else if (itf->type == CPFL_ITF_TYPE_VPORT) {
 		vport_id = ((struct cpfl_vport *)itf)->base.vport_id;
 
-		vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vport_identity.func_type = CPCHNL2_FTYPE_LAN_PF;
 		/* host: CPFL_HOST0_CPF_ID, acc: CPFL_ACC_CPF_ID */
 		vport_identity.pf_id = CPFL_ACC_CPF_ID;
 		vport_identity.vf_id = 0;
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index f9954efcb9..e2ed9eda04 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -347,12 +347,12 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
 	if (wait_to_complete) {
 		if (repr->repr_id.type == RTE_ETH_REPRESENTOR_PF) {
 			/* PF */
-			vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+			vi.func_type = CPCHNL2_FTYPE_LAN_PF;
 			vi.pf_id = cpfl_func_id_get(repr->repr_id.host_id, repr->repr_id.pf_id);
 			vi.vf_id = 0;
 		} else {
 			/* VF */
-			vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+			vi.func_type = CPCHNL2_FTYPE_LAN_VF;
 			vi.pf_id = CPFL_HOST0_APF;
 			vi.vf_id = repr->repr_id.vf_id;
 		}
@@ -454,14 +454,14 @@ cpfl_match_repr_with_vport(const struct cpfl_repr_id *repr_id,
 	int func_id;
 
 	if (repr_id->type == RTE_ETH_REPRESENTOR_PF &&
-	    info->func_type == CPFL_VPORT_LAN_PF) {
+	    info->func_type == CPCHNL2_FTYPE_LAN_PF) {
 		func_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
 		if (func_id < 0 || func_id != info->pf_id)
 			return false;
 		else
 			return true;
 	} else if (repr_id->type == RTE_ETH_REPRESENTOR_VF &&
-		   info->func_type == CPFL_VPORT_LAN_VF) {
+		   info->func_type == CPCHNL2_FTYPE_LAN_VF) {
 		if (repr_id->vf_id == info->vf_id)
 			return true;
 	}
@@ -479,12 +479,12 @@ cpfl_repr_vport_list_query(struct cpfl_adapter_ext *adapter,
 
 	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
 		/* PF */
-		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.func_type = CPCHNL2_FTYPE_LAN_PF;
 		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
 		vi.vf_id = 0;
 	} else {
 		/* VF */
-		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.func_type = CPCHNL2_FTYPE_LAN_VF;
 		vi.pf_id = CPFL_HOST0_APF;
 		vi.vf_id = repr_id->vf_id;
 	}
@@ -505,12 +505,12 @@ cpfl_repr_vport_info_query(struct cpfl_adapter_ext *adapter,
 
 	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
 		/* PF */
-		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.func_type = CPCHNL2_FTYPE_LAN_PF;
 		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
 		vi.vf_id = 0;
 	} else {
 		/* VF */
-		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.func_type = CPCHNL2_FTYPE_LAN_VF;
 		vi.pf_id = CPFL_HOST0_APF;
 		vi.vf_id = repr_id->vf_id;
 	}
@@ -531,11 +531,11 @@ cpfl_repr_vport_map_update(struct cpfl_adapter_ext *adapter,
 	vi.vport_id = vport_id;
 	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
 		/* PF */
-		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.func_type = CPCHNL2_FTYPE_LAN_VF;
 		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
 	} else {
 		/* VF */
-		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.func_type = CPCHNL2_FTYPE_LAN_VF;
 		vi.pf_id = CPFL_HOST0_APF;
 		vi.vf_id = repr_id->vf_id;
 	}
-- 
2.34.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2023-10-19  3:52 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-10-11 11:40 [PATCH] net/cpfl: update CP channel API beilei.xing
2023-10-19 10:58 ` [PATCH v2] " beilei.xing
2023-10-19  2:51   ` Wu, Jingjing
2023-10-19  3:51     ` Zhang, Qi Z

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).