From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, John McNamara <john.mcnamara@intel.com>,
"Marko Kovacevic" <marko.kovacevic@intel.com>
Cc: <dev@dpdk.org>, Pavan Nikhilesh <pbhagavatula@marvell.com>
Subject: [dpdk-dev] [PATCH v2 1/6] octeontx: update mbox definition to version 1.1.3
Date: Wed, 20 Nov 2019 09:18:02 +0530 [thread overview]
Message-ID: <20191120034808.2760-2-pbhagavatula@marvell.com> (raw)
In-Reply-To: <20191120034808.2760-1-pbhagavatula@marvell.com>
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Sync mail box data structures to version 1.1.3.
Add mail box version verification and defer initializing octeontx
devices if mail box version mismatches.
Update OCTEON TX limitaion with max mempool size used.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Reviewed-by: Jerin Jacob Kollanukkaran <jerinj@marvell.com>
---
doc/guides/eventdevs/octeontx.rst | 7 ++
doc/guides/nics/octeontx.rst | 7 ++
drivers/common/octeontx/octeontx_mbox.c | 97 +++++++++++++++++++
drivers/common/octeontx/octeontx_mbox.h | 7 ++
.../octeontx/rte_common_octeontx_version.map | 6 ++
drivers/event/octeontx/ssovf_evdev.c | 5 +-
drivers/event/octeontx/ssovf_probe.c | 2 -
drivers/mempool/octeontx/octeontx_fpavf.c | 1 +
drivers/net/octeontx/base/octeontx_bgx.h | 3 +
drivers/net/octeontx/base/octeontx_pkivf.h | 15 ++-
10 files changed, 143 insertions(+), 7 deletions(-)
diff --git a/doc/guides/eventdevs/octeontx.rst b/doc/guides/eventdevs/octeontx.rst
index ab36a36e0..9a3646db0 100644
--- a/doc/guides/eventdevs/octeontx.rst
+++ b/doc/guides/eventdevs/octeontx.rst
@@ -139,3 +139,10 @@ follows:
When timvf is used as Event timer adapter event schedule type
``RTE_SCHED_TYPE_PARALLEL`` is not supported.
+
+Max mempool size
+~~~~~~~~~~~~~~~~
+
+Max mempool size when using OCTEON TX Eventdev (SSO) should be limited to 128K.
+When running dpdk-test-eventdev on OCTEON TX the application can limit the
+number of mbufs by using the option ``--pool_sz 131072``
diff --git a/doc/guides/nics/octeontx.rst b/doc/guides/nics/octeontx.rst
index 3c19c912d..8fc53810b 100644
--- a/doc/guides/nics/octeontx.rst
+++ b/doc/guides/nics/octeontx.rst
@@ -174,3 +174,10 @@ The OCTEON TX SoC family NICs support a maximum of a 32K jumbo frame. The value
is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
member of ``struct rte_eth_conf`` is set to a value lower than 32k, frames
up to 32k bytes can still reach the host interface.
+
+Maximum mempool size
+~~~~~~~~~~~~~~~~~~~~
+
+The maximum mempool size supplied to Rx queue setup should be less than 128K.
+When running testpmd on OCTEON TX the application can limit the number of mbufs
+by using the option ``--total-num-mbufs=131072``.
diff --git a/drivers/common/octeontx/octeontx_mbox.c b/drivers/common/octeontx/octeontx_mbox.c
index 880f8a40f..68cb0351f 100644
--- a/drivers/common/octeontx/octeontx_mbox.c
+++ b/drivers/common/octeontx/octeontx_mbox.c
@@ -31,6 +31,7 @@ enum {
struct mbox {
int init_once;
+ uint8_t ready;
uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */
uint8_t *reg; /* Store to this register triggers PF mbox interrupt */
uint16_t tag_own; /* Last tag which was written to own channel */
@@ -59,6 +60,13 @@ struct mbox_ram_hdr {
};
};
+/* MBOX interface version message */
+struct mbox_intf_ver {
+ uint32_t platform:12;
+ uint32_t major:10;
+ uint32_t minor:10;
+};
+
int octeontx_logtype_mbox;
RTE_INIT(otx_init_log)
@@ -247,3 +255,92 @@ octeontx_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);
}
+
+static int
+octeontx_start_domain(void)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ int result = -EINVAL;
+
+ hdr.coproc = NO_COPROC;
+ hdr.msg = RM_START_APP;
+
+ result = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (result != 0) {
+ mbox_log_err("Could not start domain. Err=%d. FuncErr=%d\n",
+ result, hdr.res_code);
+ result = -EINVAL;
+ }
+
+ return result;
+}
+
+static int
+octeontx_check_mbox_version(struct mbox_intf_ver app_intf_ver,
+ struct mbox_intf_ver *intf_ver)
+{
+ struct mbox_intf_ver kernel_intf_ver = {0};
+ struct octeontx_mbox_hdr hdr = {0};
+ int result = 0;
+
+
+ hdr.coproc = NO_COPROC;
+ hdr.msg = RM_INTERFACE_VERSION;
+
+ result = octeontx_mbox_send(&hdr, &app_intf_ver, sizeof(app_intf_ver),
+ &kernel_intf_ver, sizeof(kernel_intf_ver));
+ if (result != sizeof(kernel_intf_ver)) {
+ mbox_log_err("Could not send interface version. Err=%d. FuncErr=%d\n",
+ result, hdr.res_code);
+ result = -EINVAL;
+ }
+
+ if (intf_ver)
+ *intf_ver = kernel_intf_ver;
+
+ if (app_intf_ver.platform != kernel_intf_ver.platform ||
+ app_intf_ver.major != kernel_intf_ver.major ||
+ app_intf_ver.minor != kernel_intf_ver.minor)
+ result = -EINVAL;
+
+ return result;
+}
+
+int
+octeontx_mbox_init(void)
+{
+ const struct mbox_intf_ver MBOX_INTERFACE_VERSION = {
+ .platform = 0x01,
+ .major = 0x01,
+ .minor = 0x03
+ };
+ struct mbox_intf_ver rm_intf_ver = {0};
+ struct mbox *m = &octeontx_mbox;
+ int ret;
+
+ if (m->ready)
+ return 0;
+
+ ret = octeontx_start_domain();
+ if (ret < 0) {
+ m->init_once = 0;
+ return ret;
+ }
+
+ ret = octeontx_check_mbox_version(MBOX_INTERFACE_VERSION,
+ &rm_intf_ver);
+ if (ret < 0) {
+ mbox_log_err("MBOX version: Kernel(%d.%d.%d) != DPDK(%d.%d.%d)",
+ rm_intf_ver.platform, rm_intf_ver.major,
+ rm_intf_ver.minor, MBOX_INTERFACE_VERSION.platform,
+ MBOX_INTERFACE_VERSION.major,
+ MBOX_INTERFACE_VERSION.minor);
+ m->init_once = 0;
+ return -EINVAL;
+ }
+
+ m->ready = 1;
+ rte_mb();
+
+ return 0;
+}
diff --git a/drivers/common/octeontx/octeontx_mbox.h b/drivers/common/octeontx/octeontx_mbox.h
index 43fbda282..1f794c7f7 100644
--- a/drivers/common/octeontx/octeontx_mbox.h
+++ b/drivers/common/octeontx/octeontx_mbox.h
@@ -11,6 +11,11 @@
#define SSOW_BAR4_LEN (64 * 1024)
#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
+#define NO_COPROC 0x0
+#define RM_START_APP 0x1
+#define RM_INTERFACE_VERSION 0x2
+
+
#define MBOX_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, octeontx_logtype_mbox,\
"%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
@@ -26,9 +31,11 @@ struct octeontx_mbox_hdr {
uint16_t vfid; /* VF index or pf resource index local to the domain */
uint8_t coproc; /* Coprocessor id */
uint8_t msg; /* Message id */
+ uint8_t oob; /* out of band data */
uint8_t res_code; /* Functional layer response code */
};
+int octeontx_mbox_init(void);
int octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base);
int octeontx_mbox_set_reg(uint8_t *reg);
int octeontx_mbox_send(struct octeontx_mbox_hdr *hdr,
diff --git a/drivers/common/octeontx/rte_common_octeontx_version.map b/drivers/common/octeontx/rte_common_octeontx_version.map
index f04b3b7f8..fdc036a62 100644
--- a/drivers/common/octeontx/rte_common_octeontx_version.map
+++ b/drivers/common/octeontx/rte_common_octeontx_version.map
@@ -5,3 +5,9 @@ DPDK_18.05 {
octeontx_mbox_set_reg;
octeontx_mbox_send;
};
+
+DPDK_19.08 {
+ global:
+
+ octeontx_mbox_init;
+};
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index e4e7c44ed..f9e93244f 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -77,6 +77,7 @@ ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns)
}
struct ssovf_mbox_grp_pri {
+ uint8_t vhgrp_id;
uint8_t wgt_left; /* Read only */
uint8_t weight;
uint8_t affinity;
@@ -95,6 +96,7 @@ ssovf_mbox_priority_set(uint8_t queue, uint8_t prio)
hdr.msg = SSO_GRP_SET_PRIORITY;
hdr.vfid = queue;
+ grp.vhgrp_id = queue;
grp.weight = 0xff;
grp.affinity = 0xff;
grp.priority = prio / 32; /* Normalize to 0 to 7 */
@@ -433,7 +435,7 @@ ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
pki_qos.mmask.f_grptag_ok = 1;
pki_qos.mmask.f_grptag_bad = 1;
- pki_qos.tag_type = queue_conf->ev.sched_type;
+ pki_qos.qos_entry.tag_type = queue_conf->ev.sched_type;
pki_qos.qos_entry.port_add = 0;
pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
@@ -780,6 +782,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
return 0;
}
+ octeontx_mbox_init();
ret = ssovf_info(&oinfo);
if (ret) {
ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
diff --git a/drivers/event/octeontx/ssovf_probe.c b/drivers/event/octeontx/ssovf_probe.c
index b3db596d4..9252998c1 100644
--- a/drivers/event/octeontx/ssovf_probe.c
+++ b/drivers/event/octeontx/ssovf_probe.c
@@ -19,8 +19,6 @@
#define SSO_MAX_VHGRP (64)
#define SSO_MAX_VHWS (32)
-#define SSO_VHGRP_AQ_THR (0x1E0ULL)
-
struct ssovf_res {
uint16_t domain;
uint16_t vfid;
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
index baabc0152..ec84a5cff 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -507,6 +507,7 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
RTE_SET_USED(node_id);
RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
+ octeontx_mbox_init();
object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
if (object_size > FPA_MAX_OBJ_SIZE) {
errno = EINVAL;
diff --git a/drivers/net/octeontx/base/octeontx_bgx.h b/drivers/net/octeontx/base/octeontx_bgx.h
index ff2651497..a9bbaf2c3 100644
--- a/drivers/net/octeontx/base/octeontx_bgx.h
+++ b/drivers/net/octeontx/base/octeontx_bgx.h
@@ -27,6 +27,7 @@
#define MBOX_BGX_PORT_SET_BP 11
#define MBOX_BGX_PORT_SET_BCAST 12
#define MBOX_BGX_PORT_SET_MCAST 13
+#define MBOX_BGX_PORT_SET_MTU 14
/* BGX port configuration parameters: */
typedef struct octeontx_mbox_bgx_port_conf {
@@ -51,6 +52,8 @@ typedef struct octeontx_mbox_bgx_port_conf {
typedef struct octeontx_mbox_bgx_port_status {
uint8_t link_up;
uint8_t bp;
+ uint8_t duplex;
+ uint32_t speed;
} octeontx_mbox_bgx_port_status_t;
/* BGX port statistics: */
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.h b/drivers/net/octeontx/base/octeontx_pkivf.h
index 764aff53f..7f19a4bb8 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.h
+++ b/drivers/net/octeontx/base/octeontx_pkivf.h
@@ -33,6 +33,9 @@
#define MBOX_PKI_PORT_RESET_STATS 18
#define MBOX_PKI_GET_PORT_CONFIG 19
#define MBOX_PKI_GET_PORT_QOS_CONFIG 20
+#define MBOX_PKI_PORT_ALLOC_QPG 21
+#define MBOX_PKI_PORT_FREE_QPG 22
+#define MBOX_PKI_SET_PORT_CONFIG 23
#define MBOX_PKI_MAX_QOS_ENTRY 64
@@ -64,6 +67,7 @@ typedef struct mbox_pki_port_cfg {
struct {
uint8_t fcs_pres:1;
uint8_t fcs_skip:1;
+ uint8_t inst_skip:1;
uint8_t parse_mode:1;
uint8_t mpls_parse:1;
uint8_t inst_hdr_parse:1;
@@ -74,6 +78,7 @@ typedef struct mbox_pki_port_cfg {
} mmask;
uint8_t fcs_pres;
uint8_t fcs_skip;
+ uint8_t inst_skip;
uint8_t parse_mode;
uint8_t mpls_parse;
uint8_t inst_hdr_parse;
@@ -189,6 +194,9 @@ struct mbox_pki_qos_entry {
uint16_t gaura;
uint8_t grptag_ok;
uint8_t grptag_bad;
+ uint8_t ena_red;
+ uint8_t ena_drop;
+ uint8_t tag_type;
};
/* pki flow/style enable qos */
@@ -201,7 +209,7 @@ typedef struct mbox_pki_port_create_qos {
struct mbox_pki_qos_entry qos_entry[MBOX_PKI_MAX_QOS_ENTRY];
} mbox_pki_qos_cfg_t;
-/* pki flow/style enable qos */
+/* pki flow/style modify qos */
typedef struct mbox_pki_port_modify_qos_entry {
uint8_t port_type;
uint16_t index;
@@ -214,11 +222,10 @@ typedef struct mbox_pki_port_modify_qos_entry {
uint8_t f_grptag_bad:1;
uint8_t f_tag_type:1;
} mmask;
- uint8_t tag_type;
struct mbox_pki_qos_entry qos_entry;
} mbox_pki_mod_qos_t;
-/* pki flow/style enable qos */
+/* pki flow/style delete qos */
typedef struct mbox_pki_port_delete_qos_entry {
uint8_t port_type;
uint16_t index;
@@ -372,6 +379,7 @@ struct pki_qos_entry {
uint8_t grptag_bad;
uint8_t ena_red;
uint8_t ena_drop;
+ uint8_t tag_type;
};
#define PKO_MAX_QOS_ENTRY 64
@@ -405,7 +413,6 @@ typedef struct pki_port_modify_qos_entry {
uint8_t f_grptag_bad:1;
uint8_t f_tag_type:1;
} mmask;
- uint8_t tag_type;
struct pki_qos_entry qos_entry;
} pki_mod_qos_t;
--
2.17.1
next prev parent reply other threads:[~2019-11-20 3:48 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-11-20 3:48 [dpdk-dev] [PATCH v2 0/6] octeontx: sync with latest SDK pbhagavatula
2019-11-20 3:48 ` pbhagavatula [this message]
2020-05-01 15:40 ` [dpdk-dev] [PATCH v2 1/6] octeontx: update mbox definition to version 1.1.3 Ferruh Yigit
2019-11-20 3:48 ` [dpdk-dev] [PATCH v2 2/6] net/octeontx: add application domain validation pbhagavatula
2019-11-20 3:48 ` [dpdk-dev] [PATCH v2 3/6] net/octeontx: cleanup redudant mbox structs pbhagavatula
2019-11-20 3:48 ` [dpdk-dev] [PATCH v2 4/6] mempool/octeontx: add application domain validation pbhagavatula
2019-11-20 3:48 ` [dpdk-dev] [PATCH v2 5/6] event/octeontx: add appication " pbhagavatula
2019-11-20 3:48 ` [dpdk-dev] [PATCH v2 6/6] net/octeontx: make Rx queue offloads same as dev offloads pbhagavatula
2019-11-21 2:40 ` [dpdk-dev] [PATCH v2 0/6] octeontx: sync with latest SDK Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191120034808.2760-2-pbhagavatula@marvell.com \
--to=pbhagavatula@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=john.mcnamara@intel.com \
--cc=marko.kovacevic@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).