DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev]  [PATCH 0/7] octeontx: sync with latest SDK
@ 2019-11-16 14:25 pbhagavatula
  2019-11-16 14:25 ` [dpdk-dev] [PATCH 1/7] octeontx: update mbox definition to version 1.1.3 pbhagavatula
                   ` (6 more replies)
  0 siblings, 7 replies; 9+ messages in thread
From: pbhagavatula @ 2019-11-16 14:25 UTC (permalink / raw)
  To: jerinj; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Sync octeontx mailbox with the latest version (10.1.2.x) of SDK available.

Pavan Nikhilesh (7):
  octeontx: upgrade mbox definition to version 1.1.3
  net/octeontx: add application domain validation
  net/octeontx: cleanup redudant mbox structs
  mempool/octeontx: add application domain validation
  event/octeontx: add appication domain validation
  net/octeontx: make rx queue offloads same as dev offloads
  doc: update OcteonTx limitations

 doc/guides/eventdevs/octeontx.rst             |   7 +
 doc/guides/nics/octeontx.rst                  |   7 +
 drivers/common/octeontx/octeontx_mbox.c       | 112 +++++++-
 drivers/common/octeontx/octeontx_mbox.h       |  13 +-
 .../octeontx/rte_common_octeontx_version.map  |   7 +
 drivers/event/octeontx/ssovf_evdev.c          |   5 +-
 drivers/event/octeontx/ssovf_probe.c          |   7 +-
 drivers/event/octeontx/timvf_evdev.c          |  12 +-
 drivers/event/octeontx/timvf_evdev.h          |   8 +-
 drivers/event/octeontx/timvf_probe.c          |  65 +++--
 drivers/mempool/octeontx/octeontx_fpavf.c     |  87 ++++--
 drivers/net/octeontx/base/octeontx_bgx.h      |   3 +
 drivers/net/octeontx/base/octeontx_pkivf.c    |  83 +++++-
 drivers/net/octeontx/base/octeontx_pkivf.h    | 249 +++---------------
 drivers/net/octeontx/base/octeontx_pkovf.c    |  37 ++-
 drivers/net/octeontx/base/octeontx_pkovf.h    |   3 +
 drivers/net/octeontx/octeontx_ethdev.c        |  15 +-
 drivers/net/octeontx/octeontx_ethdev.h        |   1 +
 18 files changed, 418 insertions(+), 303 deletions(-)

--
2.24.0


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [PATCH 1/7] octeontx: update mbox definition to version 1.1.3
  2019-11-16 14:25 [dpdk-dev] [PATCH 0/7] octeontx: sync with latest SDK pbhagavatula
@ 2019-11-16 14:25 ` pbhagavatula
  2019-11-16 14:25 ` [dpdk-dev] [PATCH 2/7] net/octeontx: add application domain validation pbhagavatula
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 9+ messages in thread
From: pbhagavatula @ 2019-11-16 14:25 UTC (permalink / raw)
  To: jerinj; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Sync mail box data structures to version 1.1.3.
Add mail box version verification and defer initializing octeontx
devices if mail box version mismatches.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Reviewed-by: Jerin Jacob Kollanukkaran <jerinj@marvell.com>
---
 drivers/common/octeontx/octeontx_mbox.c       | 97 +++++++++++++++++++
 drivers/common/octeontx/octeontx_mbox.h       |  7 ++
 .../octeontx/rte_common_octeontx_version.map  |  6 ++
 drivers/event/octeontx/ssovf_evdev.c          |  5 +-
 drivers/event/octeontx/ssovf_probe.c          |  2 -
 drivers/mempool/octeontx/octeontx_fpavf.c     |  1 +
 drivers/net/octeontx/base/octeontx_bgx.h      |  3 +
 drivers/net/octeontx/base/octeontx_pkivf.h    | 15 ++-
 8 files changed, 129 insertions(+), 7 deletions(-)

diff --git a/drivers/common/octeontx/octeontx_mbox.c b/drivers/common/octeontx/octeontx_mbox.c
index 880f8a40f..68cb0351f 100644
--- a/drivers/common/octeontx/octeontx_mbox.c
+++ b/drivers/common/octeontx/octeontx_mbox.c
@@ -31,6 +31,7 @@ enum {
 
 struct mbox {
 	int init_once;
+	uint8_t ready;
 	uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */
 	uint8_t *reg; /* Store to this register triggers PF mbox interrupt */
 	uint16_t tag_own; /* Last tag which was written to own channel */
@@ -59,6 +60,13 @@ struct mbox_ram_hdr {
 	};
 };
 
+/* MBOX interface version message */
+struct mbox_intf_ver {
+	uint32_t platform:12;
+	uint32_t major:10;
+	uint32_t minor:10;
+};
+
 int octeontx_logtype_mbox;
 
 RTE_INIT(otx_init_log)
@@ -247,3 +255,92 @@ octeontx_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
 
 	return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);
 }
+
+static int
+octeontx_start_domain(void)
+{
+	struct octeontx_mbox_hdr hdr = {0};
+	int result = -EINVAL;
+
+	hdr.coproc = NO_COPROC;
+	hdr.msg = RM_START_APP;
+
+	result = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+	if (result != 0) {
+		mbox_log_err("Could not start domain. Err=%d. FuncErr=%d\n",
+			     result, hdr.res_code);
+		result = -EINVAL;
+	}
+
+	return result;
+}
+
+static int
+octeontx_check_mbox_version(struct mbox_intf_ver app_intf_ver,
+			    struct mbox_intf_ver *intf_ver)
+{
+	struct mbox_intf_ver kernel_intf_ver = {0};
+	struct octeontx_mbox_hdr hdr = {0};
+	int result = 0;
+
+
+	hdr.coproc = NO_COPROC;
+	hdr.msg = RM_INTERFACE_VERSION;
+
+	result = octeontx_mbox_send(&hdr, &app_intf_ver, sizeof(app_intf_ver),
+			&kernel_intf_ver, sizeof(kernel_intf_ver));
+	if (result != sizeof(kernel_intf_ver)) {
+		mbox_log_err("Could not send interface version. Err=%d. FuncErr=%d\n",
+			     result, hdr.res_code);
+		result = -EINVAL;
+	}
+
+	if (intf_ver)
+		*intf_ver = kernel_intf_ver;
+
+	if (app_intf_ver.platform != kernel_intf_ver.platform ||
+			app_intf_ver.major != kernel_intf_ver.major ||
+			app_intf_ver.minor != kernel_intf_ver.minor)
+		result = -EINVAL;
+
+	return result;
+}
+
+int
+octeontx_mbox_init(void)
+{
+	const struct mbox_intf_ver MBOX_INTERFACE_VERSION = {
+		.platform = 0x01,
+		.major = 0x01,
+		.minor = 0x03
+	};
+	struct mbox_intf_ver rm_intf_ver = {0};
+	struct mbox *m = &octeontx_mbox;
+	int ret;
+
+	if (m->ready)
+		return 0;
+
+	ret = octeontx_start_domain();
+	if (ret < 0) {
+		m->init_once = 0;
+		return ret;
+	}
+
+	ret = octeontx_check_mbox_version(MBOX_INTERFACE_VERSION,
+					  &rm_intf_ver);
+	if (ret < 0) {
+		mbox_log_err("MBOX version: Kernel(%d.%d.%d) != DPDK(%d.%d.%d)",
+			     rm_intf_ver.platform, rm_intf_ver.major,
+			     rm_intf_ver.minor, MBOX_INTERFACE_VERSION.platform,
+			     MBOX_INTERFACE_VERSION.major,
+			     MBOX_INTERFACE_VERSION.minor);
+		m->init_once = 0;
+		return -EINVAL;
+	}
+
+	m->ready = 1;
+	rte_mb();
+
+	return 0;
+}
diff --git a/drivers/common/octeontx/octeontx_mbox.h b/drivers/common/octeontx/octeontx_mbox.h
index 43fbda282..1f794c7f7 100644
--- a/drivers/common/octeontx/octeontx_mbox.h
+++ b/drivers/common/octeontx/octeontx_mbox.h
@@ -11,6 +11,11 @@
 #define SSOW_BAR4_LEN			(64 * 1024)
 #define SSO_VHGRP_PF_MBOX(x)		(0x200ULL | ((x) << 3))
 
+#define NO_COPROC               0x0
+#define RM_START_APP            0x1
+#define RM_INTERFACE_VERSION    0x2
+
+
 #define MBOX_LOG(level, fmt, args...) \
 	rte_log(RTE_LOG_ ## level, octeontx_logtype_mbox,\
 			"%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
@@ -26,9 +31,11 @@ struct octeontx_mbox_hdr {
 	uint16_t vfid;  /* VF index or pf resource index local to the domain */
 	uint8_t coproc; /* Coprocessor id */
 	uint8_t msg;    /* Message id */
+	uint8_t oob;	/* out of band data */
 	uint8_t res_code; /* Functional layer response code */
 };
 
+int octeontx_mbox_init(void);
 int octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base);
 int octeontx_mbox_set_reg(uint8_t *reg);
 int octeontx_mbox_send(struct octeontx_mbox_hdr *hdr,
diff --git a/drivers/common/octeontx/rte_common_octeontx_version.map b/drivers/common/octeontx/rte_common_octeontx_version.map
index f04b3b7f8..fdc036a62 100644
--- a/drivers/common/octeontx/rte_common_octeontx_version.map
+++ b/drivers/common/octeontx/rte_common_octeontx_version.map
@@ -5,3 +5,9 @@ DPDK_18.05 {
 	octeontx_mbox_set_reg;
 	octeontx_mbox_send;
 };
+
+DPDK_19.08 {
+	global:
+
+	octeontx_mbox_init;
+};
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index e4e7c44ed..f9e93244f 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -77,6 +77,7 @@ ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns)
 }
 
 struct ssovf_mbox_grp_pri {
+	uint8_t vhgrp_id;
 	uint8_t wgt_left; /* Read only */
 	uint8_t weight;
 	uint8_t affinity;
@@ -95,6 +96,7 @@ ssovf_mbox_priority_set(uint8_t queue, uint8_t prio)
 	hdr.msg = SSO_GRP_SET_PRIORITY;
 	hdr.vfid = queue;
 
+	grp.vhgrp_id = queue;
 	grp.weight = 0xff;
 	grp.affinity = 0xff;
 	grp.priority = prio / 32; /* Normalize to 0 to 7 */
@@ -433,7 +435,7 @@ ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
 	pki_qos.mmask.f_grptag_ok = 1;
 	pki_qos.mmask.f_grptag_bad = 1;
 
-	pki_qos.tag_type = queue_conf->ev.sched_type;
+	pki_qos.qos_entry.tag_type = queue_conf->ev.sched_type;
 	pki_qos.qos_entry.port_add = 0;
 	pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
 	pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
@@ -780,6 +782,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
 		return 0;
 	}
 
+	octeontx_mbox_init();
 	ret = ssovf_info(&oinfo);
 	if (ret) {
 		ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
diff --git a/drivers/event/octeontx/ssovf_probe.c b/drivers/event/octeontx/ssovf_probe.c
index b3db596d4..9252998c1 100644
--- a/drivers/event/octeontx/ssovf_probe.c
+++ b/drivers/event/octeontx/ssovf_probe.c
@@ -19,8 +19,6 @@
 #define SSO_MAX_VHGRP                     (64)
 #define SSO_MAX_VHWS                      (32)
 
-#define SSO_VHGRP_AQ_THR                  (0x1E0ULL)
-
 struct ssovf_res {
 	uint16_t domain;
 	uint16_t vfid;
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
index baabc0152..ec84a5cff 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -507,6 +507,7 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 	RTE_SET_USED(node_id);
 	RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
 
+	octeontx_mbox_init();
 	object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
 	if (object_size > FPA_MAX_OBJ_SIZE) {
 		errno = EINVAL;
diff --git a/drivers/net/octeontx/base/octeontx_bgx.h b/drivers/net/octeontx/base/octeontx_bgx.h
index ff2651497..a9bbaf2c3 100644
--- a/drivers/net/octeontx/base/octeontx_bgx.h
+++ b/drivers/net/octeontx/base/octeontx_bgx.h
@@ -27,6 +27,7 @@
 #define MBOX_BGX_PORT_SET_BP            11
 #define MBOX_BGX_PORT_SET_BCAST         12
 #define MBOX_BGX_PORT_SET_MCAST         13
+#define MBOX_BGX_PORT_SET_MTU		14
 
 /* BGX port configuration parameters: */
 typedef struct octeontx_mbox_bgx_port_conf {
@@ -51,6 +52,8 @@ typedef struct octeontx_mbox_bgx_port_conf {
 typedef struct octeontx_mbox_bgx_port_status {
 	uint8_t link_up;
 	uint8_t bp;
+	uint8_t duplex;
+	uint32_t speed;
 } octeontx_mbox_bgx_port_status_t;
 
 /* BGX port statistics: */
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.h b/drivers/net/octeontx/base/octeontx_pkivf.h
index 764aff53f..7f19a4bb8 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.h
+++ b/drivers/net/octeontx/base/octeontx_pkivf.h
@@ -33,6 +33,9 @@
 #define MBOX_PKI_PORT_RESET_STATS		18
 #define MBOX_PKI_GET_PORT_CONFIG		19
 #define MBOX_PKI_GET_PORT_QOS_CONFIG		20
+#define MBOX_PKI_PORT_ALLOC_QPG			21
+#define MBOX_PKI_PORT_FREE_QPG			22
+#define MBOX_PKI_SET_PORT_CONFIG		23
 
 #define MBOX_PKI_MAX_QOS_ENTRY 64
 
@@ -64,6 +67,7 @@ typedef struct mbox_pki_port_cfg {
 	struct {
 		uint8_t fcs_pres:1;
 		uint8_t fcs_skip:1;
+		uint8_t inst_skip:1;
 		uint8_t parse_mode:1;
 		uint8_t mpls_parse:1;
 		uint8_t inst_hdr_parse:1;
@@ -74,6 +78,7 @@ typedef struct mbox_pki_port_cfg {
 	} mmask;
 	uint8_t fcs_pres;
 	uint8_t fcs_skip;
+	uint8_t inst_skip;
 	uint8_t parse_mode;
 	uint8_t mpls_parse;
 	uint8_t inst_hdr_parse;
@@ -189,6 +194,9 @@ struct mbox_pki_qos_entry {
 	uint16_t gaura;
 	uint8_t grptag_ok;
 	uint8_t grptag_bad;
+	uint8_t ena_red;
+	uint8_t ena_drop;
+	uint8_t tag_type;
 };
 
 /* pki flow/style enable qos */
@@ -201,7 +209,7 @@ typedef struct mbox_pki_port_create_qos {
 	struct mbox_pki_qos_entry qos_entry[MBOX_PKI_MAX_QOS_ENTRY];
 } mbox_pki_qos_cfg_t;
 
-/* pki flow/style enable qos */
+/* pki flow/style modify qos */
 typedef struct mbox_pki_port_modify_qos_entry {
 	uint8_t port_type;
 	uint16_t index;
@@ -214,11 +222,10 @@ typedef struct mbox_pki_port_modify_qos_entry {
 		uint8_t f_grptag_bad:1;
 		uint8_t f_tag_type:1;
 	} mmask;
-	uint8_t tag_type;
 	struct mbox_pki_qos_entry qos_entry;
 } mbox_pki_mod_qos_t;
 
-/* pki flow/style enable qos */
+/* pki flow/style delete qos */
 typedef struct mbox_pki_port_delete_qos_entry {
 	uint8_t port_type;
 	uint16_t index;
@@ -372,6 +379,7 @@ struct pki_qos_entry {
 	uint8_t grptag_bad;
 	uint8_t ena_red;
 	uint8_t ena_drop;
+	uint8_t tag_type;
 };
 
 #define PKO_MAX_QOS_ENTRY 64
@@ -405,7 +413,6 @@ typedef struct pki_port_modify_qos_entry {
 		uint8_t f_grptag_bad:1;
 		uint8_t f_tag_type:1;
 	} mmask;
-	uint8_t tag_type;
 	struct pki_qos_entry qos_entry;
 } pki_mod_qos_t;
 
-- 
2.24.0


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [PATCH 2/7] net/octeontx: add application domain validation
  2019-11-16 14:25 [dpdk-dev] [PATCH 0/7] octeontx: sync with latest SDK pbhagavatula
  2019-11-16 14:25 ` [dpdk-dev] [PATCH 1/7] octeontx: update mbox definition to version 1.1.3 pbhagavatula
@ 2019-11-16 14:25 ` pbhagavatula
  2019-11-16 14:25 ` [dpdk-dev] [PATCH 3/7] net/octeontx: cleanup redudant mbox structs pbhagavatula
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 9+ messages in thread
From: pbhagavatula @ 2019-11-16 14:25 UTC (permalink / raw)
  To: jerinj; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add domain validation for PKI and PKO vfs

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/common/octeontx/octeontx_mbox.c       | 15 ++++-
 drivers/common/octeontx/octeontx_mbox.h       |  6 +-
 .../octeontx/rte_common_octeontx_version.map  |  1 +
 drivers/event/octeontx/ssovf_probe.c          |  5 +-
 drivers/net/octeontx/base/octeontx_pkivf.c    | 66 +++++++++++++++++--
 drivers/net/octeontx/base/octeontx_pkivf.h    |  8 +--
 drivers/net/octeontx/base/octeontx_pkovf.c    | 37 +++++++++--
 drivers/net/octeontx/base/octeontx_pkovf.h    |  3 +
 drivers/net/octeontx/octeontx_ethdev.c        | 13 +++-
 drivers/net/octeontx/octeontx_ethdev.h        |  1 +
 10 files changed, 135 insertions(+), 20 deletions(-)

diff --git a/drivers/common/octeontx/octeontx_mbox.c b/drivers/common/octeontx/octeontx_mbox.c
index 68cb0351f..2fd253107 100644
--- a/drivers/common/octeontx/octeontx_mbox.c
+++ b/drivers/common/octeontx/octeontx_mbox.c
@@ -35,6 +35,7 @@ struct mbox {
 	uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */
 	uint8_t *reg; /* Store to this register triggers PF mbox interrupt */
 	uint16_t tag_own; /* Last tag which was written to own channel */
+	uint16_t domain; /* Domain */
 	rte_spinlock_t lock;
 };
 
@@ -198,7 +199,7 @@ mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg,
 }
 
 int
-octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base)
+octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base, uint16_t domain)
 {
 	struct mbox *m = &octeontx_mbox;
 
@@ -215,13 +216,14 @@ octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base)
 	if (m->reg != NULL) {
 		rte_spinlock_init(&m->lock);
 		m->init_once = 1;
+		m->domain = domain;
 	}
 
 	return 0;
 }
 
 int
-octeontx_mbox_set_reg(uint8_t *reg)
+octeontx_mbox_set_reg(uint8_t *reg, uint16_t domain)
 {
 	struct mbox *m = &octeontx_mbox;
 
@@ -238,6 +240,7 @@ octeontx_mbox_set_reg(uint8_t *reg)
 	if (m->ram_mbox_base != NULL) {
 		rte_spinlock_init(&m->lock);
 		m->init_once = 1;
+		m->domain = domain;
 	}
 
 	return 0;
@@ -344,3 +347,11 @@ octeontx_mbox_init(void)
 
 	return 0;
 }
+
+uint16_t
+octeontx_get_global_domain(void)
+{
+	struct mbox *m = &octeontx_mbox;
+
+	return m->domain;
+}
diff --git a/drivers/common/octeontx/octeontx_mbox.h b/drivers/common/octeontx/octeontx_mbox.h
index 1f794c7f7..e56719cb8 100644
--- a/drivers/common/octeontx/octeontx_mbox.h
+++ b/drivers/common/octeontx/octeontx_mbox.h
@@ -36,8 +36,10 @@ struct octeontx_mbox_hdr {
 };
 
 int octeontx_mbox_init(void);
-int octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base);
-int octeontx_mbox_set_reg(uint8_t *reg);
+void octeontx_set_global_domain(uint16_t global_domain);
+uint16_t octeontx_get_global_domain(void);
+int octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base, uint16_t domain);
+int octeontx_mbox_set_reg(uint8_t *reg, uint16_t domain);
 int octeontx_mbox_send(struct octeontx_mbox_hdr *hdr,
 		void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen);
 
diff --git a/drivers/common/octeontx/rte_common_octeontx_version.map b/drivers/common/octeontx/rte_common_octeontx_version.map
index fdc036a62..2bba7cc93 100644
--- a/drivers/common/octeontx/rte_common_octeontx_version.map
+++ b/drivers/common/octeontx/rte_common_octeontx_version.map
@@ -10,4 +10,5 @@ DPDK_19.08 {
 	global:
 
 	octeontx_mbox_init;
+	octeontx_get_global_domain;
 };
diff --git a/drivers/event/octeontx/ssovf_probe.c b/drivers/event/octeontx/ssovf_probe.c
index 9252998c1..4da7d1ae4 100644
--- a/drivers/event/octeontx/ssovf_probe.c
+++ b/drivers/event/octeontx/ssovf_probe.c
@@ -181,7 +181,8 @@ ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 	sdev.total_ssowvfs++;
 	if (vfid == 0) {
 		ram_mbox_base = ssovf_bar(OCTEONTX_SSO_HWS, 0, 4);
-		if (octeontx_mbox_set_ram_mbox_base(ram_mbox_base)) {
+		if (octeontx_mbox_set_ram_mbox_base(ram_mbox_base,
+						    res->domain)) {
 			mbox_log_err("Invalid Failed to set ram mbox base");
 			return -EINVAL;
 		}
@@ -257,7 +258,7 @@ ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 	if (vfid == 0) {
 		reg = ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0);
 		reg += SSO_VHGRP_PF_MBOX(1);
-		if (octeontx_mbox_set_reg(reg)) {
+		if (octeontx_mbox_set_reg(reg, res->domain)) {
 			mbox_log_err("Invalid Failed to set mbox_reg");
 			return -EINVAL;
 		}
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.c b/drivers/net/octeontx/base/octeontx_pkivf.c
index 1babea0e8..783b2a2e5 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.c
+++ b/drivers/net/octeontx/base/octeontx_pkivf.c
@@ -7,19 +7,50 @@
 #include <rte_eal.h>
 #include <rte_bus_pci.h>
 
+#include "../octeontx_logs.h"
+#include "octeontx_io.h"
 #include "octeontx_pkivf.h"
 
+
+struct octeontx_pkivf {
+	uint8_t		*bar0;
+	uint8_t		status;
+	uint16_t	domain;
+	uint16_t	vfid;
+};
+
+struct octeontx_pki_vf_ctl_s {
+	struct octeontx_pkivf pki[PKI_VF_MAX];
+};
+
+static struct octeontx_pki_vf_ctl_s pki_vf_ctl;
+
 int
 octeontx_pki_port_open(int port)
 {
+	uint16_t global_domain = octeontx_get_global_domain();
 	struct octeontx_mbox_hdr hdr;
-	int res;
+	mbox_pki_port_t port_type = {
+		.port_type = OCTTX_PORT_TYPE_NET,
+	};
+	int i, res;
+
+	/* Check if atleast one PKI vf is in application domain. */
+	for (i = 0; i < PKI_VF_MAX; i++) {
+		if (pki_vf_ctl.pki[i].domain != global_domain)
+			continue;
+		break;
+	}
+
+	if (i == PKI_VF_MAX)
+		return -ENODEV;
 
 	hdr.coproc = OCTEONTX_PKI_COPROC;
 	hdr.msg = MBOX_PKI_PORT_OPEN;
 	hdr.vfid = port;
 
-	res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+	res = octeontx_mbox_send(&hdr, &port_type, sizeof(mbox_pki_port_t),
+				 NULL, 0);
 	if (res < 0)
 		return -EACCES;
 	return res;
@@ -113,13 +144,40 @@ octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg)
 static int
 pkivf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 {
-	RTE_SET_USED(pci_drv);
-	RTE_SET_USED(pci_dev);
+	struct octeontx_pkivf *res;
+	static uint8_t vf_cnt;
+	uint16_t domain;
+	uint16_t vfid;
+	uint8_t *bar0;
+	uint64_t val;
 
+	RTE_SET_USED(pci_drv);
 	/* For secondary processes, the primary has done all the work */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
 
+	if (pci_dev->mem_resource[0].addr == NULL) {
+		octeontx_log_err("PKI Empty bar[0] %p",
+				 pci_dev->mem_resource[0].addr);
+		return -ENODEV;
+	}
+
+	bar0 = pci_dev->mem_resource[0].addr;
+	val = octeontx_read64(bar0);
+	domain = val & 0xffff;
+	vfid = (val >> 16) & 0xffff;
+
+	if (unlikely(vfid >= PKI_VF_MAX)) {
+		octeontx_log_err("pki: Invalid vfid %d", vfid);
+		return -EINVAL;
+	}
+
+	res = &pki_vf_ctl.pki[vf_cnt++];
+	res->vfid = vfid;
+	res->domain = domain;
+	res->bar0 = bar0;
+
+	octeontx_log_dbg("PKI Domain=%d vfid=%d", res->domain, res->vfid);
 	return 0;
 }
 
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.h b/drivers/net/octeontx/base/octeontx_pkivf.h
index 7f19a4bb8..c2a944404 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.h
+++ b/drivers/net/octeontx/base/octeontx_pkivf.h
@@ -48,6 +48,10 @@ enum  {
 	MBOX_PKI_PARSE_NOTHING = 0x7f
 };
 
+/* PKI maximum constants */
+#define PKI_VF_MAX			(32)
+#define PKI_MAX_PKTLEN			(32768)
+
 /* Interface types: */
 enum {
 	OCTTX_PORT_TYPE_NET, /* Network interface ports */
@@ -231,10 +235,6 @@ typedef struct mbox_pki_port_delete_qos_entry {
 	uint16_t index;
 } mbox_pki_del_qos_t;
 
-/* PKI maximum constants */
-#define PKI_VF_MAX			(1)
-#define PKI_MAX_PKTLEN			(32768)
-
 /* pki pkind parse mode */
 enum  {
 	PKI_PARSE_LA_TO_LG = 0,
diff --git a/drivers/net/octeontx/base/octeontx_pkovf.c b/drivers/net/octeontx/base/octeontx_pkovf.c
index 0a6d64b8e..dacbdd0b4 100644
--- a/drivers/net/octeontx/base/octeontx_pkovf.c
+++ b/drivers/net/octeontx/base/octeontx_pkovf.c
@@ -24,6 +24,8 @@ struct octeontx_pko_iomem {
 };
 
 #define PKO_IOMEM_NULL (struct octeontx_pko_iomem){0, 0, 0}
+#define PKO_VALID	0x1
+#define PKO_INUSE	0x2
 
 struct octeontx_pko_fc_ctl_s {
 	int64_t buf_cnt;
@@ -33,13 +35,14 @@ struct octeontx_pko_fc_ctl_s {
 struct octeontx_pkovf {
 	uint8_t		*bar0;
 	uint8_t		*bar2;
+	uint8_t		status;
 	uint16_t	domain;
 	uint16_t	vfid;
 };
 
 struct octeontx_pko_vf_ctl_s {
 	rte_spinlock_t lock;
-
+	uint16_t global_domain;
 	struct octeontx_pko_iomem fc_iomem;
 	struct octeontx_pko_fc_ctl_s *fc_ctl;
 	struct octeontx_pkovf pko[PKO_VF_MAX];
@@ -403,7 +406,7 @@ octeontx_pko_channel_query(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
 	curr.lmtline_va = ctl->pko[dq_vf].bar2;
 	curr.ioreg_va = (void *)((uintptr_t)ctl->pko[dq_vf].bar0
 		+ PKO_VF_DQ_OP_SEND((dq), 0));
-	curr.fc_status_va = ctl->fc_ctl + dq;
+	curr.fc_status_va = ctl->fc_ctl + dq_num;
 
 	octeontx_log_dbg("lmtline=%p ioreg_va=%p fc_status_va=%p",
 			 curr.lmtline_va, curr.ioreg_va,
@@ -431,8 +434,10 @@ octeontx_pko_channel_query_dqs(int chanid, void *out, size_t out_elem_size,
 int
 octeontx_pko_vf_count(void)
 {
+	uint16_t global_domain = octeontx_get_global_domain();
 	int vf_cnt;
 
+	pko_vf_ctl.global_domain = global_domain;
 	vf_cnt = 0;
 	while (pko_vf_ctl.pko[vf_cnt].bar0)
 		vf_cnt++;
@@ -440,6 +445,26 @@ octeontx_pko_vf_count(void)
 	return vf_cnt;
 }
 
+size_t
+octeontx_pko_get_vfid(void)
+{
+	size_t vf_cnt = octeontx_pko_vf_count();
+	size_t vf_idx;
+
+
+	for (vf_idx = 0; vf_idx < vf_cnt; vf_idx++) {
+		if (!(pko_vf_ctl.pko[vf_idx].status & PKO_VALID))
+			continue;
+		if (pko_vf_ctl.pko[vf_idx].status & PKO_INUSE)
+			continue;
+
+		pko_vf_ctl.pko[vf_idx].status |= PKO_INUSE;
+		return pko_vf_ctl.pko[vf_idx].vfid;
+	}
+
+	return SIZE_MAX;
+}
+
 int
 octeontx_pko_init_fc(const size_t pko_vf_count)
 {
@@ -467,8 +492,10 @@ octeontx_pko_init_fc(const size_t pko_vf_count)
 
 	/* Configure Flow-Control feature for all DQs of open VFs */
 	for (vf_idx = 0; vf_idx < pko_vf_count; vf_idx++) {
-		dq_ix = vf_idx * PKO_VF_NUM_DQ;
+		if (pko_vf_ctl.pko[vf_idx].domain != pko_vf_ctl.global_domain)
+			continue;
 
+		dq_ix = pko_vf_ctl.pko[vf_idx].vfid * PKO_VF_NUM_DQ;
 		vf_bar0 = pko_vf_ctl.pko[vf_idx].bar0;
 
 		reg = (pko_vf_ctl.fc_iomem.iova +
@@ -479,6 +506,7 @@ octeontx_pko_init_fc(const size_t pko_vf_count)
 		    (0x1 << 0);		/* ENABLE */
 
 		octeontx_write64(reg, vf_bar0 + PKO_VF_DQ_FC_CONFIG);
+		pko_vf_ctl.pko[vf_idx].status = PKO_VALID;
 
 		octeontx_log_dbg("PKO: bar0 %p VF_idx %d DQ_FC_CFG=%" PRIx64 "",
 				 vf_bar0, (int)vf_idx, reg);
@@ -528,6 +556,7 @@ pkovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 	uint16_t domain;
 	uint8_t *bar0;
 	uint8_t *bar2;
+	static uint8_t vf_cnt;
 	struct octeontx_pkovf *res;
 
 	RTE_SET_USED(pci_drv);
@@ -558,7 +587,7 @@ pkovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 		return -EINVAL;
 	}
 
-	res = &pko_vf_ctl.pko[vfid];
+	res = &pko_vf_ctl.pko[vf_cnt++];
 	res->vfid = vfid;
 	res->domain = domain;
 	res->bar0 = bar0;
diff --git a/drivers/net/octeontx/base/octeontx_pkovf.h b/drivers/net/octeontx/base/octeontx_pkovf.h
index cbd28249f..4208ef880 100644
--- a/drivers/net/octeontx/base/octeontx_pkovf.h
+++ b/drivers/net/octeontx/base/octeontx_pkovf.h
@@ -5,6 +5,8 @@
 #ifndef	__OCTEONTX_PKO_H__
 #define	__OCTEONTX_PKO_H__
 
+#include <octeontx_mbox.h>
+
 /* PKO maximum constants */
 #define	PKO_VF_MAX			(32)
 #define	PKO_VF_NUM_DQ			(8)
@@ -63,6 +65,7 @@ int octeontx_pko_channel_close(int chanid);
 int octeontx_pko_channel_start(int chanid);
 int octeontx_pko_channel_stop(int chanid);
 int octeontx_pko_vf_count(void);
+size_t octeontx_pko_get_vfid(void);
 int octeontx_pko_init_fc(const size_t pko_vf_count);
 void octeontx_pko_fc_free(void);
 
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 00686ea26..c2258d136 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -308,7 +308,7 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
 
 	nic->num_tx_queues = dev->data->nb_tx_queues;
 
-	ret = octeontx_pko_channel_open(nic->port_id * PKO_VF_NUM_DQ,
+	ret = octeontx_pko_channel_open(nic->pko_vfid * PKO_VF_NUM_DQ,
 					nic->num_tx_queues,
 					nic->base_ochan);
 	if (ret) {
@@ -719,7 +719,7 @@ octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	RTE_SET_USED(nb_desc);
 	RTE_SET_USED(socket_id);
 
-	dq_num = (nic->port_id * PKO_VF_NUM_DQ) + qidx;
+	dq_num = (nic->pko_vfid * PKO_VF_NUM_DQ) + qidx;
 
 	/* Socket id check */
 	if (socket_id != (unsigned int)SOCKET_ID_ANY &&
@@ -1001,6 +1001,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
 			int socket_id)
 {
 	int res;
+	size_t pko_vfid;
 	char octtx_name[OCTEONTX_MAX_NAME_LEN];
 	struct octeontx_nic *nic = NULL;
 	struct rte_eth_dev *eth_dev = NULL;
@@ -1039,7 +1040,15 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
 		goto err;
 	}
 	data->dev_private = nic;
+	pko_vfid = octeontx_pko_get_vfid();
 
+	if (pko_vfid == SIZE_MAX) {
+		octeontx_log_err("failed to get pko vfid");
+		res = -ENODEV;
+		goto err;
+	}
+
+	nic->pko_vfid = pko_vfid;
 	nic->port_id = port;
 	nic->evdev = evdev;
 
diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
index fd2e99edf..50fae35d9 100644
--- a/drivers/net/octeontx/octeontx_ethdev.h
+++ b/drivers/net/octeontx/octeontx_ethdev.h
@@ -58,6 +58,7 @@ struct octeontx_nic {
 	uint8_t mcast_mode;
 	uint16_t num_tx_queues;
 	uint64_t hwcap;
+	uint8_t pko_vfid;
 	uint8_t link_up;
 	uint8_t	duplex;
 	uint8_t speed;
-- 
2.24.0


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [PATCH 3/7] net/octeontx: cleanup redudant mbox structs
  2019-11-16 14:25 [dpdk-dev] [PATCH 0/7] octeontx: sync with latest SDK pbhagavatula
  2019-11-16 14:25 ` [dpdk-dev] [PATCH 1/7] octeontx: update mbox definition to version 1.1.3 pbhagavatula
  2019-11-16 14:25 ` [dpdk-dev] [PATCH 2/7] net/octeontx: add application domain validation pbhagavatula
@ 2019-11-16 14:25 ` pbhagavatula
  2019-11-16 14:25 ` [dpdk-dev] [PATCH 4/7] mempool/octeontx: add application domain validation pbhagavatula
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 9+ messages in thread
From: pbhagavatula @ 2019-11-16 14:25 UTC (permalink / raw)
  To: jerinj; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Cleanup redudant mail box structures.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/net/octeontx/base/octeontx_pkivf.c |  25 +--
 drivers/net/octeontx/base/octeontx_pkivf.h | 242 +++------------------
 2 files changed, 43 insertions(+), 224 deletions(-)

diff --git a/drivers/net/octeontx/base/octeontx_pkivf.c b/drivers/net/octeontx/base/octeontx_pkivf.c
index 783b2a2e5..8ce041955 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.c
+++ b/drivers/net/octeontx/base/octeontx_pkivf.c
@@ -30,9 +30,7 @@ octeontx_pki_port_open(int port)
 {
 	uint16_t global_domain = octeontx_get_global_domain();
 	struct octeontx_mbox_hdr hdr;
-	mbox_pki_port_t port_type = {
-		.port_type = OCTTX_PORT_TYPE_NET,
-	};
+	pki_port_type_t port_type;
 	int i, res;
 
 	/* Check if atleast one PKI vf is in application domain. */
@@ -45,11 +43,12 @@ octeontx_pki_port_open(int port)
 	if (i == PKI_VF_MAX)
 		return -ENODEV;
 
+	port_type.port_type = OCTTX_PORT_TYPE_NET;
 	hdr.coproc = OCTEONTX_PKI_COPROC;
 	hdr.msg = MBOX_PKI_PORT_OPEN;
 	hdr.vfid = port;
 
-	res = octeontx_mbox_send(&hdr, &port_type, sizeof(mbox_pki_port_t),
+	res = octeontx_mbox_send(&hdr, &port_type, sizeof(pki_port_type_t),
 				 NULL, 0);
 	if (res < 0)
 		return -EACCES;
@@ -62,8 +61,8 @@ octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg)
 	struct octeontx_mbox_hdr hdr;
 	int res;
 
-	mbox_pki_hash_cfg_t h_cfg = *(mbox_pki_hash_cfg_t *)hash_cfg;
-	int len = sizeof(mbox_pki_hash_cfg_t);
+	pki_hash_cfg_t h_cfg = *(pki_hash_cfg_t *)hash_cfg;
+	int len = sizeof(pki_hash_cfg_t);
 
 	hdr.coproc = OCTEONTX_PKI_COPROC;
 	hdr.msg = MBOX_PKI_PORT_HASH_CONFIG;
@@ -82,8 +81,8 @@ octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg)
 	struct octeontx_mbox_hdr hdr;
 	int res;
 
-	mbox_pki_pktbuf_cfg_t b_cfg = *(mbox_pki_pktbuf_cfg_t *)buf_cfg;
-	int len = sizeof(mbox_pki_pktbuf_cfg_t);
+	pki_pktbuf_cfg_t b_cfg = *(pki_pktbuf_cfg_t *)buf_cfg;
+	int len = sizeof(pki_pktbuf_cfg_t);
 
 	hdr.coproc = OCTEONTX_PKI_COPROC;
 	hdr.msg = MBOX_PKI_PORT_PKTBUF_CONFIG;
@@ -101,8 +100,8 @@ octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg)
 	struct octeontx_mbox_hdr hdr;
 	int res;
 
-	mbox_pki_qos_cfg_t q_cfg = *(mbox_pki_qos_cfg_t *)qos_cfg;
-	int len = sizeof(mbox_pki_qos_cfg_t);
+	pki_qos_cfg_t q_cfg = *(pki_qos_cfg_t *)qos_cfg;
+	int len = sizeof(pki_qos_cfg_t);
 
 	hdr.coproc = OCTEONTX_PKI_COPROC;
 	hdr.msg = MBOX_PKI_PORT_CREATE_QOS;
@@ -122,9 +121,9 @@ octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg)
 	struct octeontx_mbox_hdr hdr;
 	int res;
 
-	mbox_pki_errcheck_cfg_t e_cfg;
-	e_cfg = *((mbox_pki_errcheck_cfg_t *)(cfg));
-	int len = sizeof(mbox_pki_errcheck_cfg_t);
+	pki_errchk_cfg_t e_cfg;
+	e_cfg = *((pki_errchk_cfg_t *)(cfg));
+	int len = sizeof(pki_errchk_cfg_t);
 
 	hdr.coproc = OCTEONTX_PKI_COPROC;
 	hdr.msg = MBOX_PKI_PORT_ERRCHK_CONFIG;
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.h b/drivers/net/octeontx/base/octeontx_pkivf.h
index c2a944404..d541dc3bd 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.h
+++ b/drivers/net/octeontx/base/octeontx_pkivf.h
@@ -39,15 +39,6 @@
 
 #define MBOX_PKI_MAX_QOS_ENTRY 64
 
-/* pki pkind parse mode */
-enum  {
-	MBOX_PKI_PARSE_LA_TO_LG = 0,
-	MBOX_PKI_PARSE_LB_TO_LG = 1,
-	MBOX_PKI_PARSE_LC_TO_LG = 3,
-	MBOX_PKI_PARSE_LG = 0x3f,
-	MBOX_PKI_PARSE_NOTHING = 0x7f
-};
-
 /* PKI maximum constants */
 #define PKI_VF_MAX			(32)
 #define PKI_MAX_PKTLEN			(32768)
@@ -60,189 +51,37 @@ enum {
 	OCTTX_PORT_TYPE_MAX
 };
 
-/* pki port config */
-typedef struct mbox_pki_port_type {
-	uint8_t port_type;
-} mbox_pki_port_t;
-
-/* pki port config */
-typedef struct mbox_pki_port_cfg {
-	uint8_t port_type;
-	struct {
-		uint8_t fcs_pres:1;
-		uint8_t fcs_skip:1;
-		uint8_t inst_skip:1;
-		uint8_t parse_mode:1;
-		uint8_t mpls_parse:1;
-		uint8_t inst_hdr_parse:1;
-		uint8_t fulc_parse:1;
-		uint8_t dsa_parse:1;
-		uint8_t hg2_parse:1;
-		uint8_t hg_parse:1;
-	} mmask;
-	uint8_t fcs_pres;
-	uint8_t fcs_skip;
-	uint8_t inst_skip;
-	uint8_t parse_mode;
-	uint8_t mpls_parse;
-	uint8_t inst_hdr_parse;
-	uint8_t fulc_parse;
-	uint8_t dsa_parse;
-	uint8_t hg2_parse;
-	uint8_t hg_parse;
-} mbox_pki_prt_cfg_t;
-
-/* pki Flow/style packet buffer config */
-typedef struct mbox_pki_port_pktbuf_cfg {
-	uint8_t port_type;
-	struct {
-		uint16_t f_mbuff_size:1;
-		uint16_t f_wqe_skip:1;
-		uint16_t f_first_skip:1;
-		uint16_t f_later_skip:1;
-		uint16_t f_pkt_outside_wqe:1;
-		uint16_t f_wqe_endian:1;
-		uint16_t f_cache_mode:1;
-	} mmask;
-	uint16_t mbuff_size;
-	uint16_t wqe_skip;
-	uint16_t first_skip;
-	uint16_t later_skip;
-	uint8_t pkt_outside_wqe;
-	uint8_t wqe_endian;
-	uint8_t cache_mode;
-} mbox_pki_pktbuf_cfg_t;
-
-/* pki flow/style tag config */
-typedef struct mbox_pki_port_hash_cfg {
-	uint8_t port_type;
-	uint32_t tag_slf:1;
-	uint32_t tag_sle:1;
-	uint32_t tag_sld:1;
-	uint32_t tag_slc:1;
-	uint32_t tag_dlf:1;
-	uint32_t tag_dle:1;
-	uint32_t tag_dld:1;
-	uint32_t tag_dlc:1;
-	uint32_t tag_prt:1;
-	uint32_t tag_vlan0:1;
-	uint32_t tag_vlan1:1;
-	uint32_t tag_ip_pctl:1;
-	uint32_t tag_sync:1;
-	uint32_t tag_spi:1;
-	uint32_t tag_gtp:1;
-	uint32_t tag_vni:1;
-} mbox_pki_hash_cfg_t;
-
-/* pki flow/style errcheck config */
-typedef struct mbox_pki_port_errcheck_cfg {
-	uint8_t port_type;
-	struct {
-		uint32_t f_ip6_udp_opt:1;
-		uint32_t f_lenerr_en:1;
-		uint32_t f_maxerr_en:1;
-		uint32_t f_minerr_en:1;
-		uint32_t f_fcs_chk:1;
-		uint32_t f_fcs_strip:1;
-		uint32_t f_len_lf:1;
-		uint32_t f_len_le:1;
-		uint32_t f_len_ld:1;
-		uint32_t f_len_lc:1;
-		uint32_t f_csum_lf:1;
-		uint32_t f_csum_le:1;
-		uint32_t f_csum_ld:1;
-		uint32_t f_csum_lc:1;
-		uint32_t f_min_frame_len;
-		uint32_t f_max_frame_len;
-	} mmask;
-	uint64_t ip6_udp_opt:1;
-	uint64_t lenerr_en:1;
-	uint64_t maxerr_en:1;
-	uint64_t minerr_en:1;
-	uint64_t fcs_chk:1;
-	uint64_t fcs_strip:1;
-	uint64_t len_lf:1;
-	uint64_t len_le:1;
-	uint64_t len_ld:1;
-	uint64_t len_lc:1;
-	uint64_t csum_lf:1;
-	uint64_t csum_le:1;
-	uint64_t csum_ld:1;
-	uint64_t csum_lc:1;
-	uint64_t min_frame_len;
-	uint64_t max_frame_len;
-} mbox_pki_errcheck_cfg_t;
+/* pki pkind parse mode */
+enum  {
+	PKI_PARSE_LA_TO_LG = 0,
+	PKI_PARSE_LB_TO_LG = 1,
+	PKI_PARSE_LC_TO_LG = 3,
+	PKI_PARSE_LG = 0x3f,
+	PKI_PARSE_NOTHING = 0x7f
+};
 
 /* CACHE MODE*/
 enum {
-	MBOX_PKI_OPC_MODE_STT = 0LL,
-	MBOX_PKI_OPC_MODE_STF = 1LL,
-	MBOX_PKI_OPC_MODE_STF1_STT = 2LL,
-	MBOX_PKI_OPC_MODE_STF2_STT = 3LL
+	PKI_OPC_MODE_STT = 0LL,
+	PKI_OPC_MODE_STF = 1LL,
+	PKI_OPC_MODE_STF1_STT = 2LL,
+	PKI_OPC_MODE_STF2_STT = 3LL
 };
 
 /* PKI QPG QOS*/
 enum {
-	MBOX_PKI_QPG_QOS_NONE = 0,
-	MBOX_PKI_QPG_QOS_VLAN,
-	MBOX_PKI_QPG_QOS_MPLS,
-	MBOX_PKI_QPG_QOS_DSA_SRC,
-	MBOX_PKI_QPG_QOS_DIFFSERV,
-	MBOX_PKI_QPG_QOS_HIGIG,
-};
-
-struct mbox_pki_qos_entry {
-	uint16_t port_add;
-	uint16_t ggrp_ok;
-	uint16_t ggrp_bad;
-	uint16_t gaura;
-	uint8_t grptag_ok;
-	uint8_t grptag_bad;
-	uint8_t ena_red;
-	uint8_t ena_drop;
-	uint8_t tag_type;
+	PKI_QPG_QOS_NONE = 0,
+	PKI_QPG_QOS_VLAN,
+	PKI_QPG_QOS_MPLS,
+	PKI_QPG_QOS_DSA_SRC,
+	PKI_QPG_QOS_DIFFSERV,
+	PKI_QPG_QOS_HIGIG,
 };
 
-/* pki flow/style enable qos */
-typedef struct mbox_pki_port_create_qos {
-	uint8_t port_type;
-	uint8_t qpg_qos;
-	uint8_t num_entry;
-	uint8_t tag_type;
-	uint8_t drop_policy;
-	struct mbox_pki_qos_entry qos_entry[MBOX_PKI_MAX_QOS_ENTRY];
-} mbox_pki_qos_cfg_t;
-
-/* pki flow/style modify qos */
-typedef struct mbox_pki_port_modify_qos_entry {
-	uint8_t port_type;
-	uint16_t index;
-	struct {
-		uint8_t f_port_add:1;
-		uint8_t f_grp_ok:1;
-		uint8_t f_grp_bad:1;
-		uint8_t f_gaura:1;
-		uint8_t f_grptag_ok:1;
-		uint8_t f_grptag_bad:1;
-		uint8_t f_tag_type:1;
-	} mmask;
-	struct mbox_pki_qos_entry qos_entry;
-} mbox_pki_mod_qos_t;
-
-/* pki flow/style delete qos */
-typedef struct mbox_pki_port_delete_qos_entry {
+/* pki port config */
+typedef struct pki_port_type {
 	uint8_t port_type;
-	uint16_t index;
-} mbox_pki_del_qos_t;
-
-/* pki pkind parse mode */
-enum  {
-	PKI_PARSE_LA_TO_LG = 0,
-	PKI_PARSE_LB_TO_LG = 1,
-	PKI_PARSE_LC_TO_LG = 3,
-	PKI_PARSE_LG = 0x3f,
-	PKI_PARSE_NOTHING = 0x7f
-};
+} pki_port_type_t;
 
 /* pki port config */
 typedef struct pki_port_cfg {
@@ -351,25 +190,6 @@ typedef struct pki_port_errcheck_cfg {
 	uint64_t max_frame_len;
 } pki_errchk_cfg_t;
 
-
-/* CACHE MODE*/
-enum {
-	PKI_OPC_MODE_STT = 0LL,
-	PKI_OPC_MODE_STF = 1LL,
-	PKI_OPC_MODE_STF1_STT = 2LL,
-	PKI_OPC_MODE_STF2_STT = 3LL
-};
-
-/* PKI QPG QOS*/
-enum {
-	PKI_QPG_QOS_NONE = 0,
-	PKI_QPG_QOS_VLAN,
-	PKI_QPG_QOS_MPLS,
-	PKI_QPG_QOS_DSA_SRC,
-	PKI_QPG_QOS_DIFFSERV,
-	PKI_QPG_QOS_HIGIG,
-};
-
 struct pki_qos_entry {
 	uint16_t port_add;
 	uint16_t ggrp_ok;
@@ -422,8 +242,8 @@ octeontx_pki_port_modify_qos(int port, pki_mod_qos_t *qos_cfg)
 	struct octeontx_mbox_hdr hdr;
 	int res;
 
-	mbox_pki_mod_qos_t q_cfg = *(mbox_pki_mod_qos_t *)qos_cfg;
-	int len = sizeof(mbox_pki_mod_qos_t);
+	pki_mod_qos_t q_cfg = *(pki_mod_qos_t *)qos_cfg;
+	int len = sizeof(pki_mod_qos_t);
 
 	hdr.coproc = OCTEONTX_PKI_COPROC;
 	hdr.msg = MBOX_PKI_PORT_MODIFY_QOS;
@@ -442,8 +262,8 @@ octeontx_pki_port_delete_qos(int port, pki_del_qos_t *qos_cfg)
 	struct octeontx_mbox_hdr hdr;
 	int res;
 
-	mbox_pki_del_qos_t q_cfg = *(mbox_pki_del_qos_t *)qos_cfg;
-	int len = sizeof(mbox_pki_del_qos_t);
+	pki_del_qos_t q_cfg = *(pki_del_qos_t *)qos_cfg;
+	int len = sizeof(pki_del_qos_t);
 
 	hdr.coproc = OCTEONTX_PKI_COPROC;
 	hdr.msg = MBOX_PKI_PORT_DELETE_QOS;
@@ -462,8 +282,8 @@ octeontx_pki_port_close(int port)
 	struct octeontx_mbox_hdr hdr;
 	int res;
 
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
+	pki_port_type_t ptype;
+	int len = sizeof(pki_port_type_t);
 	memset(&ptype, 0, len);
 	ptype.port_type = OCTTX_PORT_TYPE_NET;
 
@@ -484,8 +304,8 @@ octeontx_pki_port_start(int port)
 	struct octeontx_mbox_hdr hdr;
 	int res;
 
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
+	pki_port_type_t ptype;
+	int len = sizeof(pki_port_type_t);
 	memset(&ptype, 0, len);
 	ptype.port_type = OCTTX_PORT_TYPE_NET;
 
@@ -506,8 +326,8 @@ octeontx_pki_port_stop(int port)
 	struct octeontx_mbox_hdr hdr;
 	int res;
 
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
+	pki_port_type_t ptype;
+	int len = sizeof(pki_port_type_t);
 	memset(&ptype, 0, len);
 	ptype.port_type = OCTTX_PORT_TYPE_NET;
 
-- 
2.24.0


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [PATCH 4/7] mempool/octeontx: add application domain validation
  2019-11-16 14:25 [dpdk-dev] [PATCH 0/7] octeontx: sync with latest SDK pbhagavatula
                   ` (2 preceding siblings ...)
  2019-11-16 14:25 ` [dpdk-dev] [PATCH 3/7] net/octeontx: cleanup redudant mbox structs pbhagavatula
@ 2019-11-16 14:25 ` pbhagavatula
  2019-11-16 14:25 ` [dpdk-dev] [PATCH 5/7] event/octeontx: add appication " pbhagavatula
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 9+ messages in thread
From: pbhagavatula @ 2019-11-16 14:25 UTC (permalink / raw)
  To: jerinj; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add application domain validation for OcteonTx FPA vfs.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/mempool/octeontx/octeontx_fpavf.c | 86 +++++++++++++++--------
 1 file changed, 58 insertions(+), 28 deletions(-)

diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
index ec84a5cff..c97267db3 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -119,20 +119,22 @@ RTE_INIT(otx_pool_init_log)
 static int
 octeontx_fpa_gpool_alloc(unsigned int object_size)
 {
+	uint16_t global_domain = octeontx_get_global_domain();
 	struct fpavf_res *res = NULL;
-	uint16_t gpool;
 	unsigned int sz128;
+	int i;
 
 	sz128 = FPA_OBJSZ_2_CACHE_LINE(object_size);
 
-	for (gpool = 0; gpool < FPA_VF_MAX; gpool++) {
+	for (i = 0; i < FPA_VF_MAX; i++) {
 
 		/* Skip VF that is not mapped Or _inuse */
-		if ((fpadev.pool[gpool].bar0 == NULL) ||
-		    (fpadev.pool[gpool].is_inuse == true))
+		if ((fpadev.pool[i].bar0 == NULL) ||
+		    (fpadev.pool[i].is_inuse == true) ||
+		    (fpadev.pool[i].domain_id != global_domain))
 			continue;
 
-		res = &fpadev.pool[gpool];
+		res = &fpadev.pool[i];
 
 		RTE_ASSERT(res->domain_id != (uint16_t)~0);
 		RTE_ASSERT(res->vf_id != (uint16_t)~0);
@@ -140,15 +142,34 @@ octeontx_fpa_gpool_alloc(unsigned int object_size)
 
 		if (res->sz128 == 0) {
 			res->sz128 = sz128;
+			fpavf_log_dbg("gpool %d blk_sz %d\n", res->vf_id,
+				      sz128);
 
-			fpavf_log_dbg("gpool %d blk_sz %d\n", gpool, sz128);
-			return gpool;
+			return res->vf_id;
 		}
 	}
 
 	return -ENOSPC;
 }
 
+static __rte_always_inline struct fpavf_res *
+octeontx_get_fpavf(uint16_t gpool)
+{
+	uint16_t global_domain = octeontx_get_global_domain();
+	int i;
+
+	for (i = 0; i < FPA_VF_MAX; i++) {
+		if (fpadev.pool[i].domain_id != global_domain)
+			continue;
+		if (fpadev.pool[i].vf_id != gpool)
+			continue;
+
+		return &fpadev.pool[i];
+	}
+
+	return NULL;
+}
+
 /* lock is taken by caller */
 static __rte_always_inline uintptr_t
 octeontx_fpa_gpool2handle(uint16_t gpool)
@@ -156,8 +177,10 @@ octeontx_fpa_gpool2handle(uint16_t gpool)
 	struct fpavf_res *res = NULL;
 
 	RTE_ASSERT(gpool < FPA_VF_MAX);
+	res = octeontx_get_fpavf(gpool);
+	if (res == NULL)
+		return 0;
 
-	res = &fpadev.pool[gpool];
 	return (uintptr_t)res->bar0 | gpool;
 }
 
@@ -182,7 +205,7 @@ octeontx_fpa_handle_valid(uintptr_t handle)
 			continue;
 
 		/* validate gpool */
-		if (gpool != i)
+		if (gpool != fpadev.pool[i].vf_id)
 			return false;
 
 		res = &fpadev.pool[i];
@@ -212,7 +235,10 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
 	struct octeontx_mbox_fpa_cfg cfg;
 	int ret = -1;
 
-	fpa = &fpadev.pool[gpool];
+	fpa = octeontx_get_fpavf(gpool);
+	if (fpa == NULL)
+		return -EINVAL;
+
 	memsz = FPA_ROUND_UP(max_buf_count / fpa->stack_ln_ptr, FPA_LN_SIZE) *
 			FPA_LN_SIZE;
 
@@ -278,7 +304,11 @@ octeontx_fpapf_pool_destroy(unsigned int gpool_index)
 	struct fpavf_res *fpa = NULL;
 	int ret = -1;
 
-	fpa = &fpadev.pool[gpool_index];
+	fpa = octeontx_get_fpavf(gpool_index);
+	if (fpa == NULL) {
+		ret = -EINVAL;
+		goto err;
+	}
 
 	hdr.coproc = FPA_COPROC;
 	hdr.msg = FPA_CONFIGSET;
@@ -422,6 +452,7 @@ octeontx_fpapf_start_count(uint16_t gpool_index)
 static __rte_always_inline int
 octeontx_fpavf_free(unsigned int gpool)
 {
+	struct fpavf_res *res = octeontx_get_fpavf(gpool);
 	int ret = 0;
 
 	if (gpool >= FPA_MAX_POOL) {
@@ -430,7 +461,8 @@ octeontx_fpavf_free(unsigned int gpool)
 	}
 
 	/* Pool is free */
-	fpadev.pool[gpool].is_inuse = false;
+	if (res != NULL)
+		res->is_inuse = false;
 
 err:
 	return ret;
@@ -439,8 +471,10 @@ octeontx_fpavf_free(unsigned int gpool)
 static __rte_always_inline int
 octeontx_gpool_free(uint16_t gpool)
 {
-	if (fpadev.pool[gpool].sz128 != 0) {
-		fpadev.pool[gpool].sz128 = 0;
+	struct fpavf_res *res = octeontx_get_fpavf(gpool);
+
+	if (res && res->sz128 != 0) {
+		res->sz128 = 0;
 		return 0;
 	}
 	return -EINVAL;
@@ -460,8 +494,8 @@ octeontx_fpa_bufpool_block_size(uintptr_t handle)
 
 	/* get the gpool */
 	gpool = octeontx_fpa_bufpool_gpool(handle);
-	res = &fpadev.pool[gpool];
-	return FPA_CACHE_LINE_2_OBJSZ(res->sz128);
+	res = octeontx_get_fpavf(gpool);
+	return res ? FPA_CACHE_LINE_2_OBJSZ(res->sz128) : 0;
 }
 
 int
@@ -722,6 +756,7 @@ octeontx_fpavf_identify(void *bar0)
 	uint16_t domain_id;
 	uint16_t vf_id;
 	uint64_t stack_ln_ptr;
+	static uint16_t vf_idx;
 
 	val = fpavf_read64((void *)((uintptr_t)bar0 +
 				FPA_VF_VHAURA_CNT_THRESHOLD(0)));
@@ -731,23 +766,18 @@ octeontx_fpavf_identify(void *bar0)
 
 	stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 +
 					FPA_VF_VHPOOL_THRESHOLD(0)));
-	if (vf_id >= FPA_VF_MAX) {
+	if (vf_idx >= FPA_VF_MAX) {
 		fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id);
-		return -1;
-	}
-
-	if (fpadev.pool[vf_id].is_inuse) {
-		fpavf_log_err("vf_id %d is_inuse\n", vf_id);
-		return -1;
+		return -E2BIG;
 	}
 
-	fpadev.pool[vf_id].domain_id = domain_id;
-	fpadev.pool[vf_id].vf_id = vf_id;
-	fpadev.pool[vf_id].bar0 = bar0;
-	fpadev.pool[vf_id].stack_ln_ptr = stack_ln_ptr;
+	fpadev.pool[vf_idx].domain_id = domain_id;
+	fpadev.pool[vf_idx].vf_id = vf_id;
+	fpadev.pool[vf_idx].bar0 = bar0;
+	fpadev.pool[vf_idx].stack_ln_ptr = stack_ln_ptr;
 
 	/* SUCCESS */
-	return vf_id;
+	return vf_idx++;
 }
 
 /* FPAVF pcie device aka mempool probe */
-- 
2.24.0


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [PATCH 5/7] event/octeontx: add appication domain validation
  2019-11-16 14:25 [dpdk-dev] [PATCH 0/7] octeontx: sync with latest SDK pbhagavatula
                   ` (3 preceding siblings ...)
  2019-11-16 14:25 ` [dpdk-dev] [PATCH 4/7] mempool/octeontx: add application domain validation pbhagavatula
@ 2019-11-16 14:25 ` pbhagavatula
  2019-11-16 14:25 ` [dpdk-dev] [PATCH 6/7] net/octeontx: make Rx queue offloads same as dev offloads pbhagavatula
  2019-11-16 14:25 ` [dpdk-dev] [PATCH 7/7] doc: update OcteonTx limitations pbhagavatula
  6 siblings, 0 replies; 9+ messages in thread
From: pbhagavatula @ 2019-11-16 14:25 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh; +Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add applicaton domain validation for OcteonTx TIM vfs aka Event timer.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/octeontx/timvf_evdev.c | 12 ++---
 drivers/event/octeontx/timvf_evdev.h |  8 +---
 drivers/event/octeontx/timvf_probe.c | 65 ++++++++++++++++++----------
 3 files changed, 49 insertions(+), 36 deletions(-)

diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c
index abbc9a775..caa129087 100644
--- a/drivers/event/octeontx/timvf_evdev.c
+++ b/drivers/event/octeontx/timvf_evdev.c
@@ -231,17 +231,15 @@ timvf_ring_create(struct rte_event_timer_adapter *adptr)
 {
 	char pool_name[25];
 	int ret;
+	uint8_t tim_ring_id;
 	uint64_t nb_timers;
 	struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
 	struct timvf_ring *timr;
-	struct timvf_info tinfo;
 	const char *mempool_ops;
 	unsigned int mp_flags = 0;
 
-	if (timvf_info(&tinfo) < 0)
-		return -ENODEV;
-
-	if (adptr->data->id >= tinfo.total_timvfs)
+	tim_ring_id = timvf_get_ring();
+	if (tim_ring_id == UINT8_MAX)
 		return -ENODEV;
 
 	timr = rte_zmalloc("octeontx_timvf_priv",
@@ -259,7 +257,7 @@ timvf_ring_create(struct rte_event_timer_adapter *adptr)
 	}
 
 	timr->clk_src = (int) rcfg->clk_src;
-	timr->tim_ring_id = adptr->data->id;
+	timr->tim_ring_id = tim_ring_id;
 	timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
 	timr->max_tout = rcfg->max_tmo_ns;
 	timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
@@ -337,8 +335,10 @@ static int
 timvf_ring_free(struct rte_event_timer_adapter *adptr)
 {
 	struct timvf_ring *timr = adptr->data->adapter_priv;
+
 	rte_mempool_free(timr->chunk_pool);
 	rte_free(timr->bkt);
+	timvf_release_ring(timr->tim_ring_id);
 	rte_free(adptr->data->adapter_priv);
 	return 0;
 }
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index 0185593f1..d0e5921db 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -115,11 +115,6 @@
 extern int otx_logtype_timvf;
 static const uint16_t nb_chunk_slots = (TIM_CHUNK_SIZE / 16) - 1;
 
-struct timvf_info {
-	uint16_t domain; /* Domain id */
-	uint8_t total_timvfs; /* Total timvf available in domain */
-};
-
 enum timvf_clk_src {
 	TIM_CLK_SRC_SCLK = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
 	TIM_CLK_SRC_GPIO = RTE_EVENT_TIMER_ADAPTER_EXT_CLK0,
@@ -196,7 +191,8 @@ bkt_and(uint32_t rel_bkt, uint32_t nb_bkts)
 	return rel_bkt & (nb_bkts - 1);
 }
 
-int timvf_info(struct timvf_info *tinfo);
+uint8_t timvf_get_ring(void);
+void timvf_release_ring(uint8_t vfid);
 void *timvf_bar(uint8_t id, uint8_t bar);
 int timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
 		uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
diff --git a/drivers/event/octeontx/timvf_probe.c b/drivers/event/octeontx/timvf_probe.c
index af87625fd..59bba31e8 100644
--- a/drivers/event/octeontx/timvf_probe.c
+++ b/drivers/event/octeontx/timvf_probe.c
@@ -20,6 +20,7 @@
 #define TIM_MAX_RINGS				(64)
 
 struct timvf_res {
+	uint8_t in_use;
 	uint16_t domain;
 	uint16_t vfid;
 	void *bar0;
@@ -34,50 +35,65 @@ struct timdev {
 
 static struct timdev tdev;
 
-int
-timvf_info(struct timvf_info *tinfo)
+uint8_t
+timvf_get_ring(void)
 {
+	uint16_t global_domain = octeontx_get_global_domain();
 	int i;
-	struct ssovf_info info;
 
-	if (tinfo == NULL)
-		return -EINVAL;
+	for (i = 0; i < tdev.total_timvfs; i++) {
+		if (tdev.rings[i].domain != global_domain)
+			continue;
+		if (tdev.rings[i].in_use)
+			continue;
 
-	if (!tdev.total_timvfs)
-		return -ENODEV;
+		tdev.rings[i].in_use = true;
+		return tdev.rings[i].vfid;
+	}
 
-	if (ssovf_info(&info) < 0)
-		return -EINVAL;
+	return UINT8_MAX;
+}
+
+void
+timvf_release_ring(uint8_t tim_ring_id)
+{
+	uint16_t global_domain = octeontx_get_global_domain();
+	int i;
 
 	for (i = 0; i < tdev.total_timvfs; i++) {
-		if (info.domain != tdev.rings[i].domain) {
-			timvf_log_err("GRP error, vfid=%d/%d domain=%d/%d %p",
-				i, tdev.rings[i].vfid,
-				info.domain, tdev.rings[i].domain,
-				tdev.rings[i].bar0);
-			return -EINVAL;
-		}
+		if (tdev.rings[i].domain != global_domain)
+			continue;
+		if (tdev.rings[i].vfid == tim_ring_id)
+			tdev.rings[i].in_use = false;
 	}
-
-	tinfo->total_timvfs = tdev.total_timvfs;
-	tinfo->domain = info.domain;
-	return 0;
 }
 
 void*
-timvf_bar(uint8_t id, uint8_t bar)
+timvf_bar(uint8_t vfid, uint8_t bar)
 {
+	uint16_t global_domain = octeontx_get_global_domain();
+	struct timvf_res *res = NULL;
+	int i;
+
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return NULL;
 
-	if (id > tdev.total_timvfs)
+	for (i = 0; i < tdev.total_timvfs; i++) {
+		if (tdev.rings[i].domain != global_domain)
+			continue;
+		if (tdev.rings[i].vfid == vfid)
+			res = &tdev.rings[i];
+
+	}
+
+	if (res == NULL)
 		return NULL;
 
 	switch (bar) {
 	case 0:
-		return tdev.rings[id].bar0;
+		return res->bar0;
 	case 4:
-		return tdev.rings[id].bar4;
+		return res->bar4;
 	default:
 		return NULL;
 	}
@@ -118,6 +134,7 @@ timvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 	res->bar2 = pci_dev->mem_resource[2].addr;
 	res->bar4 = pci_dev->mem_resource[4].addr;
 	res->domain = (val >> 7) & 0xffff;
+	res->in_use = false;
 	tdev.total_timvfs++;
 	rte_wmb();
 
-- 
2.24.0


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [PATCH 6/7] net/octeontx: make Rx queue offloads same as dev offloads
  2019-11-16 14:25 [dpdk-dev] [PATCH 0/7] octeontx: sync with latest SDK pbhagavatula
                   ` (4 preceding siblings ...)
  2019-11-16 14:25 ` [dpdk-dev] [PATCH 5/7] event/octeontx: add appication " pbhagavatula
@ 2019-11-16 14:25 ` pbhagavatula
  2019-11-16 14:25 ` [dpdk-dev] [PATCH 7/7] doc: update OcteonTx limitations pbhagavatula
  6 siblings, 0 replies; 9+ messages in thread
From: pbhagavatula @ 2019-11-16 14:25 UTC (permalink / raw)
  To: jerinj; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Make Rx queue specific offloads same as device Rx offloads.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/net/octeontx/octeontx_ethdev.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index c2258d136..679803dd4 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -604,6 +604,8 @@ octeontx_dev_info(struct rte_eth_dev *dev,
 
 	dev_info->rx_offload_capa = OCTEONTX_RX_OFFLOADS;
 	dev_info->tx_offload_capa = OCTEONTX_TX_OFFLOADS;
+	dev_info->rx_queue_offload_capa = OCTEONTX_RX_OFFLOADS;
+	dev_info->tx_queue_offload_capa = OCTEONTX_TX_OFFLOADS;
 
 	return 0;
 }
-- 
2.24.0


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev]  [PATCH 7/7] doc: update OcteonTx limitations
  2019-11-16 14:25 [dpdk-dev] [PATCH 0/7] octeontx: sync with latest SDK pbhagavatula
                   ` (5 preceding siblings ...)
  2019-11-16 14:25 ` [dpdk-dev] [PATCH 6/7] net/octeontx: make Rx queue offloads same as dev offloads pbhagavatula
@ 2019-11-16 14:25 ` pbhagavatula
  2019-11-19  2:43   ` Jerin Jacob
  6 siblings, 1 reply; 9+ messages in thread
From: pbhagavatula @ 2019-11-16 14:25 UTC (permalink / raw)
  To: jerinj, John McNamara, Marko Kovacevic; +Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Update OcteonTx limitaion with max mempool size used.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 doc/guides/eventdevs/octeontx.rst | 7 +++++++
 doc/guides/nics/octeontx.rst      | 7 +++++++
 2 files changed, 14 insertions(+)

diff --git a/doc/guides/eventdevs/octeontx.rst b/doc/guides/eventdevs/octeontx.rst
index ab36a36e0..587b7a427 100644
--- a/doc/guides/eventdevs/octeontx.rst
+++ b/doc/guides/eventdevs/octeontx.rst
@@ -139,3 +139,10 @@ follows:
 
 When timvf is used as Event timer adapter event schedule type
 ``RTE_SCHED_TYPE_PARALLEL`` is not supported.
+
+Max mempool size
+~~~~~~~~~~~~~~~~
+
+Max mempool size when using OcteonTx Eventdev (SSO) should be limited to 128K.
+When running dpdk-test-eventdev on OcteonTx the application can limit the
+number of mbufs by using the option ``--pool_sz 131072``
diff --git a/doc/guides/nics/octeontx.rst b/doc/guides/nics/octeontx.rst
index 3c19c912d..00098a3b2 100644
--- a/doc/guides/nics/octeontx.rst
+++ b/doc/guides/nics/octeontx.rst
@@ -174,3 +174,10 @@ The OCTEON TX SoC family NICs support a maximum of a 32K jumbo frame. The value
 is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
 member of ``struct rte_eth_conf`` is set to a value lower than 32k, frames
 up to 32k bytes can still reach the host interface.
+
+Maximum mempool size
+~~~~~~~~~~~~~~~~~~~~
+
+The maximum mempool size supplied to Rx queue setup should be less than 128K.
+When running testpmd on OcteonTx the application can limit the number of mbufs
+by using the option ``--total-num-mbufs=131072``.
-- 
2.24.0


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH 7/7] doc: update OcteonTx limitations
  2019-11-16 14:25 ` [dpdk-dev] [PATCH 7/7] doc: update OcteonTx limitations pbhagavatula
@ 2019-11-19  2:43   ` Jerin Jacob
  0 siblings, 0 replies; 9+ messages in thread
From: Jerin Jacob @ 2019-11-19  2:43 UTC (permalink / raw)
  To: Pavan Nikhilesh; +Cc: Jerin Jacob, John McNamara, Marko Kovacevic, dpdk-dev

On Sat, Nov 16, 2019 at 7:56 PM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Update OcteonTx limitaion with max mempool size used.

Please squash this patch to the first patch and change all occurrence
of OcteonTx with OCTEON TX.


>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  doc/guides/eventdevs/octeontx.rst | 7 +++++++
>  doc/guides/nics/octeontx.rst      | 7 +++++++
>  2 files changed, 14 insertions(+)
>
> diff --git a/doc/guides/eventdevs/octeontx.rst b/doc/guides/eventdevs/octeontx.rst
> index ab36a36e0..587b7a427 100644
> --- a/doc/guides/eventdevs/octeontx.rst
> +++ b/doc/guides/eventdevs/octeontx.rst
> @@ -139,3 +139,10 @@ follows:
>
>  When timvf is used as Event timer adapter event schedule type
>  ``RTE_SCHED_TYPE_PARALLEL`` is not supported.
> +
> +Max mempool size
> +~~~~~~~~~~~~~~~~
> +
> +Max mempool size when using OcteonTx Eventdev (SSO) should be limited to 128K.
> +When running dpdk-test-eventdev on OcteonTx the application can limit the
> +number of mbufs by using the option ``--pool_sz 131072``
> diff --git a/doc/guides/nics/octeontx.rst b/doc/guides/nics/octeontx.rst
> index 3c19c912d..00098a3b2 100644
> --- a/doc/guides/nics/octeontx.rst
> +++ b/doc/guides/nics/octeontx.rst
> @@ -174,3 +174,10 @@ The OCTEON TX SoC family NICs support a maximum of a 32K jumbo frame. The value
>  is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
>  member of ``struct rte_eth_conf`` is set to a value lower than 32k, frames
>  up to 32k bytes can still reach the host interface.
> +
> +Maximum mempool size
> +~~~~~~~~~~~~~~~~~~~~
> +
> +The maximum mempool size supplied to Rx queue setup should be less than 128K.
> +When running testpmd on OcteonTx the application can limit the number of mbufs
> +by using the option ``--total-num-mbufs=131072``.
> --
> 2.24.0
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2019-11-19  2:43 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-16 14:25 [dpdk-dev] [PATCH 0/7] octeontx: sync with latest SDK pbhagavatula
2019-11-16 14:25 ` [dpdk-dev] [PATCH 1/7] octeontx: update mbox definition to version 1.1.3 pbhagavatula
2019-11-16 14:25 ` [dpdk-dev] [PATCH 2/7] net/octeontx: add application domain validation pbhagavatula
2019-11-16 14:25 ` [dpdk-dev] [PATCH 3/7] net/octeontx: cleanup redudant mbox structs pbhagavatula
2019-11-16 14:25 ` [dpdk-dev] [PATCH 4/7] mempool/octeontx: add application domain validation pbhagavatula
2019-11-16 14:25 ` [dpdk-dev] [PATCH 5/7] event/octeontx: add appication " pbhagavatula
2019-11-16 14:25 ` [dpdk-dev] [PATCH 6/7] net/octeontx: make Rx queue offloads same as dev offloads pbhagavatula
2019-11-16 14:25 ` [dpdk-dev] [PATCH 7/7] doc: update OcteonTx limitations pbhagavatula
2019-11-19  2:43   ` Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).