From: Jerin Jacob <jerinjacobk@gmail.com>
To: Sathesh Edara <sedara@marvell.com>
Cc: sburla@marvell.com, jerinj@marvell.com,
Radha Mohan Chintakuntla <radhac@marvell.com>,
Veerasenareddy Burru <vburru@marvell.com>,
dev@dpdk.org
Subject: Re: [PATCH v2 08/10] net/octeon_ep: support Mailbox between VF and PF
Date: Fri, 21 Apr 2023 10:30:21 +0530 [thread overview]
Message-ID: <CALBAE1NJXJMV6nNQKVi7r7e_f83gAMrWKRg5HryCbfoE-tN=Bw@mail.gmail.com> (raw)
In-Reply-To: <20230405142537.1899973-9-sedara@marvell.com>
On Wed, Apr 5, 2023 at 7:56 PM Sathesh Edara <sedara@marvell.com> wrote:
>
> This patch adds the mailbox communication between
> VF and PF and supports the following mailbox
> messages.
> - Get and set MAC address
> - Get link information
> - Get stats
> - Get and set link status
> - Set and get MTU
> - Send notification to PF
>
> Signed-off-by: Sathesh Edara <sedara@marvell.com>
1) Change "Mailbox" to "mailbox" in subject line
2) Please cross check, Do you need to update new items in
doc/guides/nics/features/octeon_ep.ini by adding this new features.
See doc/guides/nics/features.rst for list of features.
> ---
> drivers/net/octeon_ep/cnxk_ep_vf.c | 1 +
> drivers/net/octeon_ep/cnxk_ep_vf.h | 12 +-
> drivers/net/octeon_ep/meson.build | 1 +
> drivers/net/octeon_ep/otx_ep_common.h | 26 +++
> drivers/net/octeon_ep/otx_ep_ethdev.c | 143 +++++++++++-
> drivers/net/octeon_ep/otx_ep_mbox.c | 309 ++++++++++++++++++++++++++
> drivers/net/octeon_ep/otx_ep_mbox.h | 163 ++++++++++++++
> 7 files changed, 642 insertions(+), 13 deletions(-)
> create mode 100644 drivers/net/octeon_ep/otx_ep_mbox.c
> create mode 100644 drivers/net/octeon_ep/otx_ep_mbox.h
>
> diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
> index a437ae68cb..cadb4ecbf9 100644
> --- a/drivers/net/octeon_ep/cnxk_ep_vf.c
> +++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
> @@ -8,6 +8,7 @@
> #include <rte_common.h>
> #include <rte_cycles.h>
> #include <rte_memzone.h>
> +#include "otx_ep_common.h"
> #include "cnxk_ep_vf.h"
>
> static void
> diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.h b/drivers/net/octeon_ep/cnxk_ep_vf.h
> index 072b38ea15..86277449ea 100644
> --- a/drivers/net/octeon_ep/cnxk_ep_vf.h
> +++ b/drivers/net/octeon_ep/cnxk_ep_vf.h
> @@ -5,7 +5,7 @@
> #define _CNXK_EP_VF_H_
>
> #include <rte_io.h>
> -#include "otx_ep_common.h"
> +
> #define CNXK_CONFIG_XPANSION_BAR 0x38
> #define CNXK_CONFIG_PCIE_CAP 0x70
> #define CNXK_CONFIG_PCIE_DEVCAP 0x74
> @@ -92,6 +92,10 @@
> #define CNXK_EP_R_OUT_BYTE_CNT_START 0x10190
> #define CNXK_EP_R_OUT_CNTS_ISM_START 0x10510
>
> +#define CNXK_EP_R_MBOX_PF_VF_DATA_START 0x10210
> +#define CNXK_EP_R_MBOX_VF_PF_DATA_START 0x10230
> +#define CNXK_EP_R_MBOX_PF_VF_INT_START 0x10220
> +
> #define CNXK_EP_R_OUT_CNTS(ring) \
> (CNXK_EP_R_OUT_CNTS_START + ((ring) * CNXK_EP_RING_OFFSET))
>
> @@ -125,6 +129,12 @@
> #define CNXK_EP_R_OUT_CNTS_ISM(ring) \
> (CNXK_EP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_EP_RING_OFFSET))
>
> +#define CNXK_EP_R_MBOX_VF_PF_DATA(ring) \
> + (CNXK_EP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_EP_RING_OFFSET))
> +
> +#define CNXK_EP_R_MBOX_PF_VF_INT(ring) \
> + (CNXK_EP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_EP_RING_OFFSET))
> +
> /*------------------ R_OUT Masks ----------------*/
> #define CNXK_EP_R_OUT_INT_LEVELS_BMODE (1ULL << 63)
> #define CNXK_EP_R_OUT_INT_LEVELS_TIMET (32)
> diff --git a/drivers/net/octeon_ep/meson.build b/drivers/net/octeon_ep/meson.build
> index a267b60290..e698bf9792 100644
> --- a/drivers/net/octeon_ep/meson.build
> +++ b/drivers/net/octeon_ep/meson.build
> @@ -8,4 +8,5 @@ sources = files(
> 'otx_ep_vf.c',
> 'otx2_ep_vf.c',
> 'cnxk_ep_vf.c',
> + 'otx_ep_mbox.c',
> )
> diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
> index 3beec71968..0bf5454a39 100644
> --- a/drivers/net/octeon_ep/otx_ep_common.h
> +++ b/drivers/net/octeon_ep/otx_ep_common.h
> @@ -4,6 +4,7 @@
> #ifndef _OTX_EP_COMMON_H_
> #define _OTX_EP_COMMON_H_
>
> +#include <rte_spinlock.h>
>
> #define OTX_EP_NW_PKT_OP 0x1220
> #define OTX_EP_NW_CMD_OP 0x1221
> @@ -67,6 +68,9 @@
> #define oct_ep_read64(addr) rte_read64_relaxed((void *)(addr))
> #define oct_ep_write64(val, addr) rte_write64_relaxed((val), (void *)(addr))
>
> +/* Mailbox maximum data size */
> +#define MBOX_MAX_DATA_BUF_SIZE 320
> +
> /* Input Request Header format */
> union otx_ep_instr_irh {
> uint64_t u64;
> @@ -488,6 +492,18 @@ struct otx_ep_device {
>
> /* DMA buffer for SDP ISM messages */
> const struct rte_memzone *ism_buffer_mz;
> +
> + /* Mailbox lock */
> + rte_spinlock_t mbox_lock;
> +
> + /* Mailbox data */
> + uint8_t mbox_data_buf[MBOX_MAX_DATA_BUF_SIZE];
> +
> + /* Mailbox data index */
> + int32_t mbox_data_index;
> +
> + /* Mailbox receive message length */
> + int32_t mbox_rcv_message_len;
> };
>
> int otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no,
> @@ -541,6 +557,16 @@ struct otx_ep_buf_free_info {
> #define OTX_EP_CLEAR_SLIST_DBELL 0xFFFFFFFF
> #define OTX_EP_CLEAR_SDP_OUT_PKT_CNT 0xFFFFFFFFF
>
> +/* Max overhead includes
> + * - Ethernet hdr
> + * - CRC
> + * - nested VLANs
> + * - octeon rx info
> + */
> +#define OTX_EP_ETH_OVERHEAD \
> + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
> + (2 * RTE_VLAN_HLEN) + OTX_EP_DROQ_INFO_SIZE)
> +
> /* PCI IDs */
> #define PCI_VENDOR_ID_CAVIUM 0x177D
>
> diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
> index 0f710b1ffa..885fbb475f 100644
> --- a/drivers/net/octeon_ep/otx_ep_ethdev.c
> +++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
> @@ -10,6 +10,7 @@
> #include "otx2_ep_vf.h"
> #include "cnxk_ep_vf.h"
> #include "otx_ep_rxtx.h"
> +#include "otx_ep_mbox.h"
>
> #define OTX_EP_DEV(_eth_dev) \
> ((struct otx_ep_device *)(_eth_dev)->data->dev_private)
> @@ -31,15 +32,24 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
> struct rte_eth_dev_info *devinfo)
> {
> struct otx_ep_device *otx_epvf;
> + int max_rx_pktlen;
>
> otx_epvf = OTX_EP_DEV(eth_dev);
>
> + max_rx_pktlen = otx_ep_mbox_get_max_pkt_len(eth_dev);
> + if (!max_rx_pktlen) {
> + otx_ep_err("Failed to get Max Rx packet length");
> + return -EINVAL;
> + }
> +
> devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
> devinfo->max_rx_queues = otx_epvf->max_rx_queues;
> devinfo->max_tx_queues = otx_epvf->max_tx_queues;
>
> devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
> - devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
> + devinfo->max_rx_pktlen = max_rx_pktlen;
> + devinfo->max_mtu = devinfo->max_rx_pktlen - OTX_EP_ETH_OVERHEAD;
> + devinfo->min_mtu = RTE_ETHER_MIN_LEN;
> devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
> devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
>
> @@ -54,6 +64,71 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
> return 0;
> }
>
> +static int
> +otx_ep_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
> +{
> + RTE_SET_USED(wait_to_complete);
> +
> + if (!eth_dev->data->dev_started)
> + return 0;
> + struct rte_eth_link link;
> + int ret = 0;
> +
> + memset(&link, 0, sizeof(link));
> + ret = otx_ep_mbox_get_link_info(eth_dev, &link);
> + if (ret)
> + return -EINVAL;
> + otx_ep_dbg("link status resp link %d duplex %d autoneg %d link_speed %d\n",
> + link.link_status, link.link_duplex, link.link_autoneg, link.link_speed);
> + return rte_eth_linkstatus_set(eth_dev, &link);
> +}
> +
> +static int
> +otx_ep_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
> +{
> + struct rte_eth_dev_info devinfo;
> + int32_t ret = 0;
> +
> + if (otx_ep_dev_info_get(eth_dev, &devinfo)) {
> + otx_ep_err("Cannot set MTU to %u: failed to get device info", mtu);
> + return -EPERM;
> + }
> +
> + /* Check if MTU is within the allowed range */
> + if (mtu < devinfo.min_mtu) {
> + otx_ep_err("Invalid MTU %u: lower than minimum MTU %u", mtu, devinfo.min_mtu);
> + return -EINVAL;
> + }
> +
> + if (mtu > devinfo.max_mtu) {
> + otx_ep_err("Invalid MTU %u; higher than maximum MTU %u", mtu, devinfo.max_mtu);
> + return -EINVAL;
> + }
> +
> + ret = otx_ep_mbox_set_mtu(eth_dev, mtu);
> + if (ret)
> + return -EINVAL;
> +
> + otx_ep_dbg("MTU is set to %u", mtu);
> +
> + return 0;
> +}
> +
> +static int
> +otx_ep_dev_set_default_mac_addr(struct rte_eth_dev *eth_dev,
> + struct rte_ether_addr *mac_addr)
> +{
> + int ret;
> +
> + ret = otx_ep_mbox_set_mac_addr(eth_dev, mac_addr);
> + if (ret)
> + return -EINVAL;
> + otx_ep_dbg("Default MAC address " RTE_ETHER_ADDR_PRT_FMT "\n",
> + RTE_ETHER_ADDR_BYTES(mac_addr));
> + rte_ether_addr_copy(mac_addr, eth_dev->data->mac_addrs);
> + return 0;
> +}
> +
> static int
> otx_ep_dev_start(struct rte_eth_dev *eth_dev)
> {
> @@ -78,6 +153,7 @@ otx_ep_dev_start(struct rte_eth_dev *eth_dev)
> rte_read32(otx_epvf->droq[q]->pkts_credit_reg));
> }
>
> + otx_ep_dev_link_update(eth_dev, 0);
> otx_ep_info("dev started\n");
>
> return 0;
> @@ -454,6 +530,7 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev)
> struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
> uint32_t num_queues, q_no;
>
> + otx_ep_mbox_send_dev_exit(eth_dev);
> otx_epvf->fn_list.disable_io_queues(otx_epvf);
> num_queues = otx_epvf->nb_rx_queues;
> for (q_no = 0; q_no < num_queues; q_no++) {
> @@ -482,19 +559,17 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev)
> }
>
> static int
> -otx_ep_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
> +otx_ep_dev_get_mac_addr(struct rte_eth_dev *eth_dev,
> + struct rte_ether_addr *mac_addr)
> {
> - RTE_SET_USED(wait_to_complete);
> -
> - if (!eth_dev->data->dev_started)
> - return 0;
> - struct rte_eth_link link;
> + int ret;
>
> - memset(&link, 0, sizeof(link));
> - link.link_status = RTE_ETH_LINK_UP;
> - link.link_speed = RTE_ETH_SPEED_NUM_10G;
> - link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
> - return rte_eth_linkstatus_set(eth_dev, &link);
> + ret = otx_ep_mbox_get_mac_addr(eth_dev, mac_addr);
> + if (ret)
> + return -EINVAL;
> + otx_ep_dbg("Get MAC address " RTE_ETHER_ADDR_PRT_FMT "\n",
> + RTE_ETHER_ADDR_BYTES(mac_addr));
> + return 0;
> }
>
> /* Define our ethernet definitions */
> @@ -511,6 +586,8 @@ static const struct eth_dev_ops otx_ep_eth_dev_ops = {
> .stats_reset = otx_ep_dev_stats_reset,
> .link_update = otx_ep_dev_link_update,
> .dev_close = otx_ep_dev_close,
> + .mtu_set = otx_ep_dev_mtu_set,
> + .mac_addr_set = otx_ep_dev_set_default_mac_addr,
> };
>
> static int
> @@ -526,6 +603,37 @@ otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
> return 0;
> }
>
> +static int otx_ep_eth_dev_query_set_vf_mac(struct rte_eth_dev *eth_dev,
> + struct rte_ether_addr *mac_addr)
> +{
> + int ret_val;
> +
> + memset(mac_addr, 0, sizeof(struct rte_ether_addr));
> + ret_val = otx_ep_dev_get_mac_addr(eth_dev, mac_addr);
> + if (!ret_val) {
> + if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
> + otx_ep_dbg("PF doesn't have valid VF MAC addr" RTE_ETHER_ADDR_PRT_FMT "\n",
> + RTE_ETHER_ADDR_BYTES(mac_addr));
> + rte_eth_random_addr(mac_addr->addr_bytes);
> + otx_ep_dbg("Setting Random MAC address" RTE_ETHER_ADDR_PRT_FMT "\n",
> + RTE_ETHER_ADDR_BYTES(mac_addr));
> + ret_val = otx_ep_dev_set_default_mac_addr(eth_dev, mac_addr);
> + if (ret_val) {
> + otx_ep_err("Setting MAC address " RTE_ETHER_ADDR_PRT_FMT "fails\n",
> + RTE_ETHER_ADDR_BYTES(mac_addr));
> + return ret_val;
> + }
> + }
> + otx_ep_dbg("Received valid MAC addr from PF" RTE_ETHER_ADDR_PRT_FMT "\n",
> + RTE_ETHER_ADDR_BYTES(mac_addr));
> + } else {
> + otx_ep_err("Getting MAC address from PF via Mbox fails with ret_val: %d\n",
> + ret_val);
> + return ret_val;
> + }
> + return 0;
> +}
> +
> static int
> otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
> {
> @@ -541,6 +649,7 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
> otx_epvf->eth_dev = eth_dev;
> otx_epvf->port_id = eth_dev->data->port_id;
> eth_dev->dev_ops = &otx_ep_eth_dev_ops;
> + rte_spinlock_init(&otx_epvf->mbox_lock);
> eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0);
> if (eth_dev->data->mac_addrs == NULL) {
> otx_ep_err("MAC addresses memory allocation failed\n");
> @@ -572,6 +681,16 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
> return -EINVAL;
> }
>
> + if (otx_ep_mbox_version_check(eth_dev))
> + return -EINVAL;
> +
> + if (otx_ep_eth_dev_query_set_vf_mac(eth_dev,
> + (struct rte_ether_addr *)&vf_mac_addr)) {
> + otx_ep_err("set mac addr failed\n");
> + return -ENODEV;
> + }
> + rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
> +
> return 0;
> }
>
> diff --git a/drivers/net/octeon_ep/otx_ep_mbox.c b/drivers/net/octeon_ep/otx_ep_mbox.c
> new file mode 100644
> index 0000000000..1ad36e14c8
> --- /dev/null
> +++ b/drivers/net/octeon_ep/otx_ep_mbox.c
> @@ -0,0 +1,309 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2021 Marvell.
> + */
> +
> +#include <ethdev_pci.h>
> +#include <rte_ether.h>
> +#include <rte_kvargs.h>
> +
> +#include "otx_ep_common.h"
> +#include "otx_ep_vf.h"
> +#include "otx2_ep_vf.h"
> +#include "cnxk_ep_vf.h"
> +#include "otx_ep_mbox.h"
> +
> +static int
> +__otx_ep_send_mbox_cmd(struct otx_ep_device *otx_ep,
> + union otx_ep_mbox_word cmd,
> + union otx_ep_mbox_word *rsp)
> +{
> + volatile uint64_t reg_val = 0ull;
> + int count = 0;
> +
> + cmd.s.type = OTX_EP_MBOX_TYPE_CMD;
> + otx2_write64(cmd.u64, otx_ep->hw_addr + CNXK_EP_R_MBOX_VF_PF_DATA(0));
> +
> + /* No response for notification messages */
> + if (!rsp)
> + return 0;
> +
> + for (count = 0; count < OTX_EP_MBOX_TIMEOUT_MS; count++) {
> + rte_delay_ms(1);
> + reg_val = otx2_read64(otx_ep->hw_addr + CNXK_EP_R_MBOX_VF_PF_DATA(0));
> + if (reg_val != cmd.u64) {
> + rsp->u64 = reg_val;
> + break;
> + }
> + }
> + if (count == OTX_EP_MBOX_TIMEOUT_MS) {
> + otx_ep_err("mbox send Timeout count:%d\n", count);
> + return OTX_EP_MBOX_TIMEOUT_MS;
> + }
> + if (rsp->s.type != OTX_EP_MBOX_TYPE_RSP_ACK) {
> + otx_ep_err("mbox received NACK from PF\n");
> + return OTX_EP_MBOX_CMD_STATUS_NACK;
> + }
> +
> + rsp->u64 = reg_val;
> + return 0;
> +}
> +
> +static int
> +otx_ep_send_mbox_cmd(struct otx_ep_device *otx_ep,
> + union otx_ep_mbox_word cmd,
> + union otx_ep_mbox_word *rsp)
> +{
> + int ret;
> +
> + rte_spinlock_lock(&otx_ep->mbox_lock);
> + ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, rsp);
> + rte_spinlock_unlock(&otx_ep->mbox_lock);
> + return ret;
> +}
> +
> +static int
> +otx_ep_mbox_bulk_read(struct otx_ep_device *otx_ep,
> + enum otx_ep_mbox_opcode opcode,
> + uint8_t *data, int32_t *size)
> +{
> + union otx_ep_mbox_word cmd;
> + union otx_ep_mbox_word rsp;
> + int read_cnt, i = 0, ret;
> + int data_len = 0, tmp_len = 0;
> +
> + rte_spinlock_lock(&otx_ep->mbox_lock);
> + cmd.u64 = 0;
> + cmd.s_data.opcode = opcode;
> + cmd.s_data.frag = 0;
> + /* Send cmd to read data from PF */
> + ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
> + if (ret) {
> + otx_ep_err("mbox bulk read data request failed\n");
> + rte_spinlock_unlock(&otx_ep->mbox_lock);
> + return ret;
> + }
> + /* PF sends the data length of requested CMD
> + * in ACK
> + */
> + memcpy(&data_len, rsp.s_data.data, sizeof(data_len));
> + tmp_len = data_len;
> + cmd.u64 = 0;
> + rsp.u64 = 0;
> + cmd.s_data.opcode = opcode;
> + cmd.s_data.frag = 1;
> + while (data_len) {
> + ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
> + if (ret) {
> + otx_ep_err("mbox bulk read data request failed\n");
> + otx_ep->mbox_data_index = 0;
> + memset(otx_ep->mbox_data_buf, 0, OTX_EP_MBOX_MAX_DATA_BUF_SIZE);
> + rte_spinlock_unlock(&otx_ep->mbox_lock);
> + return ret;
> + }
> + if (data_len > OTX_EP_MBOX_MAX_DATA_SIZE) {
> + data_len -= OTX_EP_MBOX_MAX_DATA_SIZE;
> + read_cnt = OTX_EP_MBOX_MAX_DATA_SIZE;
> + } else {
> + read_cnt = data_len;
> + data_len = 0;
> + }
> + for (i = 0; i < read_cnt; i++) {
> + otx_ep->mbox_data_buf[otx_ep->mbox_data_index] =
> + rsp.s_data.data[i];
> + otx_ep->mbox_data_index++;
> + }
> + cmd.u64 = 0;
> + rsp.u64 = 0;
> + cmd.s_data.opcode = opcode;
> + cmd.s_data.frag = 1;
> + }
> + memcpy(data, otx_ep->mbox_data_buf, tmp_len);
> + *size = tmp_len;
> + otx_ep->mbox_data_index = 0;
> + memset(otx_ep->mbox_data_buf, 0, OTX_EP_MBOX_MAX_DATA_BUF_SIZE);
> + rte_spinlock_unlock(&otx_ep->mbox_lock);
> + return 0;
> +}
> +
> +int
> +otx_ep_mbox_set_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
> +{
> + struct otx_ep_device *otx_ep =
> + (struct otx_ep_device *)(eth_dev)->data->dev_private;
> + union otx_ep_mbox_word cmd;
> + union otx_ep_mbox_word rsp;
> + int ret = 0;
> +
> + cmd.u64 = 0;
> + cmd.s_set_mtu.opcode = OTX_EP_MBOX_CMD_SET_MTU;
> + cmd.s_set_mtu.mtu = mtu;
> +
> + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
> + if (ret) {
> + otx_ep_err("set MTU failed\n");
> + return -EINVAL;
> + }
> + otx_ep_dbg("mtu set success mtu %u\n", mtu);
> +
> + return 0;
> +}
> +
> +int
> +otx_ep_mbox_set_mac_addr(struct rte_eth_dev *eth_dev,
> + struct rte_ether_addr *mac_addr)
> +{
> + struct otx_ep_device *otx_ep =
> + (struct otx_ep_device *)(eth_dev)->data->dev_private;
> + union otx_ep_mbox_word cmd;
> + union otx_ep_mbox_word rsp;
> + int i, ret;
> +
> + cmd.u64 = 0;
> + cmd.s_set_mac.opcode = OTX_EP_MBOX_CMD_SET_MAC_ADDR;
> + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
> + cmd.s_set_mac.mac_addr[i] = mac_addr->addr_bytes[i];
> + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
> + if (ret) {
> + otx_ep_err("set MAC address failed\n");
> + return -EINVAL;
> + }
> + otx_ep_dbg("%s VF MAC " RTE_ETHER_ADDR_PRT_FMT "\n",
> + __func__, RTE_ETHER_ADDR_BYTES(mac_addr));
> + rte_ether_addr_copy(mac_addr, eth_dev->data->mac_addrs);
> + return 0;
> +}
> +
> +int
> +otx_ep_mbox_get_mac_addr(struct rte_eth_dev *eth_dev,
> + struct rte_ether_addr *mac_addr)
> +{
> + struct otx_ep_device *otx_ep =
> + (struct otx_ep_device *)(eth_dev)->data->dev_private;
> + union otx_ep_mbox_word cmd;
> + union otx_ep_mbox_word rsp;
> + int i, ret;
> +
> + cmd.u64 = 0;
> + cmd.s_set_mac.opcode = OTX_EP_MBOX_CMD_GET_MAC_ADDR;
> + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
> + if (ret) {
> + otx_ep_err("get MAC address failed\n");
> + return -EINVAL;
> + }
> + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
> + mac_addr->addr_bytes[i] = rsp.s_set_mac.mac_addr[i];
> + otx_ep_dbg("%s VF MAC " RTE_ETHER_ADDR_PRT_FMT "\n",
> + __func__, RTE_ETHER_ADDR_BYTES(mac_addr));
> + return 0;
> +}
> +
> +int otx_ep_mbox_get_link_status(struct rte_eth_dev *eth_dev,
> + uint8_t *oper_up)
> +{
> + struct otx_ep_device *otx_ep =
> + (struct otx_ep_device *)(eth_dev)->data->dev_private;
> + union otx_ep_mbox_word cmd;
> + union otx_ep_mbox_word rsp;
> + int ret;
> +
> + cmd.u64 = 0;
> + cmd.s_link_status.opcode = OTX_EP_MBOX_CMD_GET_LINK_STATUS;
> + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
> + if (ret) {
> + otx_ep_err("Get link status failed\n");
> + return -EINVAL;
> + }
> + *oper_up = rsp.s_link_status.status;
> + return 0;
> +}
> +
> +int otx_ep_mbox_get_link_info(struct rte_eth_dev *eth_dev,
> + struct rte_eth_link *link)
> +{
> + int32_t ret, size;
> + struct otx_ep_iface_link_info link_info;
> + struct otx_ep_device *otx_ep =
> + (struct otx_ep_device *)(eth_dev)->data->dev_private;
> + memset(&link_info, 0, sizeof(struct otx_ep_iface_link_info));
> + ret = otx_ep_mbox_bulk_read(otx_ep, OTX_EP_MBOX_CMD_GET_LINK_INFO,
> + (uint8_t *)&link_info, (int32_t *)&size);
> + if (ret) {
> + otx_ep_err("Get link info failed\n");
> + return ret;
> + }
> + link->link_status = RTE_ETH_LINK_UP;
> + link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
> + link->link_autoneg = (link_info.autoneg ==
> + OTX_EP_LINK_AUTONEG) ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
> +
> + link->link_autoneg = link_info.autoneg;
> + link->link_speed = link_info.speed;
> + return 0;
> +}
> +
> +void
> +otx_ep_mbox_enable_interrupt(struct otx_ep_device *otx_ep)
> +{
> + rte_write64(0x2, (uint8_t *)otx_ep->hw_addr +
> + CNXK_EP_R_MBOX_PF_VF_INT(0));
> +}
> +
> +void
> +otx_ep_mbox_disable_interrupt(struct otx_ep_device *otx_ep)
> +{
> + rte_write64(0x00, (uint8_t *)otx_ep->hw_addr +
> + CNXK_EP_R_MBOX_PF_VF_INT(0));
> +}
> +
> +int
> +otx_ep_mbox_get_max_pkt_len(struct rte_eth_dev *eth_dev)
> +{
> + struct otx_ep_device *otx_ep =
> + (struct otx_ep_device *)(eth_dev)->data->dev_private;
> + union otx_ep_mbox_word cmd;
> + union otx_ep_mbox_word rsp;
> + int ret;
> +
> + rsp.u64 = 0;
> + cmd.u64 = 0;
> + cmd.s_get_mtu.opcode = OTX_EP_MBOX_CMD_GET_MTU;
> +
> + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
> + if (ret)
> + return ret;
> + return rsp.s_get_mtu.mtu;
> +}
> +
> +int otx_ep_mbox_version_check(struct rte_eth_dev *eth_dev)
> +{
> + struct otx_ep_device *otx_ep =
> + (struct otx_ep_device *)(eth_dev)->data->dev_private;
> + union otx_ep_mbox_word cmd;
> + union otx_ep_mbox_word rsp;
> + int ret;
> +
> + cmd.u64 = 0;
> + cmd.s_version.opcode = OTX_EP_MBOX_CMD_VERSION;
> + cmd.s_version.version = OTX_EP_MBOX_VERSION;
> + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
> + if (!ret)
> + return 0;
> + if (ret == OTX_EP_MBOX_CMD_STATUS_NACK) {
> + otx_ep_err("VF Mbox version:%u is not compatible with PF\n",
> + (uint32_t)cmd.s_version.version);
> + }
> + return ret;
> +}
> +
> +int otx_ep_mbox_send_dev_exit(struct rte_eth_dev *eth_dev)
> +{
> + struct otx_ep_device *otx_ep =
> + (struct otx_ep_device *)(eth_dev)->data->dev_private;
> + union otx_ep_mbox_word cmd;
> + int ret;
> +
> + cmd.u64 = 0;
> + cmd.s_version.opcode = OTX_EP_MBOX_CMD_DEV_REMOVE;
> + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, NULL);
> + return ret;
> +}
> diff --git a/drivers/net/octeon_ep/otx_ep_mbox.h b/drivers/net/octeon_ep/otx_ep_mbox.h
> new file mode 100644
> index 0000000000..9df3c53edd
> --- /dev/null
> +++ b/drivers/net/octeon_ep/otx_ep_mbox.h
> @@ -0,0 +1,163 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2021 Marvell.
> + */
> +
> +#ifndef _OTX_EP_MBOX_H_
> +#define _OTX_EP_MBOX_H_
> +
> +
> +#define OTX_EP_MBOX_VERSION 1
> +
> +enum otx_ep_mbox_opcode {
> + OTX_EP_MBOX_CMD_VERSION,
> + OTX_EP_MBOX_CMD_SET_MTU,
> + OTX_EP_MBOX_CMD_SET_MAC_ADDR,
> + OTX_EP_MBOX_CMD_GET_MAC_ADDR,
> + OTX_EP_MBOX_CMD_GET_LINK_INFO,
> + OTX_EP_MBOX_CMD_GET_STATS,
> + OTX_EP_MBOX_CMD_SET_RX_STATE,
> + OTX_EP_MBOX_CMD_SET_LINK_STATUS,
> + OTX_EP_MBOX_CMD_GET_LINK_STATUS,
> + OTX_EP_MBOX_CMD_GET_MTU,
> + OTX_EP_MBOX_CMD_DEV_REMOVE,
> + OTX_EP_MBOX_CMD_LAST,
> +};
> +
> +enum otx_ep_mbox_word_type {
> + OTX_EP_MBOX_TYPE_CMD,
> + OTX_EP_MBOX_TYPE_RSP_ACK,
> + OTX_EP_MBOX_TYPE_RSP_NACK,
> +};
> +
> +enum otx_ep_mbox_cmd_status {
> + OTX_EP_MBOX_CMD_STATUS_NOT_SETUP = 1,
> + OTX_EP_MBOX_CMD_STATUS_TIMEDOUT = 2,
> + OTX_EP_MBOX_CMD_STATUS_NACK = 3,
> + OTX_EP_MBOX_CMD_STATUS_BUSY = 4
> +};
> +
> +enum otx_ep_mbox_state {
> + OTX_EP_MBOX_STATE_IDLE = 0,
> + OTX_EP_MBOX_STATE_BUSY = 1,
> +};
> +
> +enum otx_ep_link_status {
> + OTX_EP_LINK_STATUS_DOWN,
> + OTX_EP_LINK_STATUS_UP,
> +};
> +
> +enum otx_ep_link_duplex {
> + OTX_EP_LINK_HALF_DUPLEX,
> + OTX_EP_LINK_FULL_DUPLEX,
> +};
> +
> +enum otx_ep_link_autoneg {
> + OTX_EP_LINK_FIXED,
> + OTX_EP_LINK_AUTONEG,
> +};
> +
> +#define OTX_EP_MBOX_TIMEOUT_MS 1200
> +#define OTX_EP_MBOX_MAX_RETRIES 2
> +#define OTX_EP_MBOX_MAX_DATA_SIZE 6
> +#define OTX_EP_MBOX_MAX_DATA_BUF_SIZE 256
> +#define OTX_EP_MBOX_MORE_FRAG_FLAG 1
> +#define OTX_EP_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1)
> +
> +union otx_ep_mbox_word {
> + uint64_t u64;
> + struct {
> + uint64_t opcode:8;
> + uint64_t type:2;
> + uint64_t rsvd:6;
> + uint64_t data:48;
> + } s;
> + struct {
> + uint64_t opcode:8;
> + uint64_t type:2;
> + uint64_t frag:1;
> + uint64_t rsvd:5;
> + uint8_t data[6];
> + } s_data;
> + struct {
> + uint64_t opcode:8;
> + uint64_t type:2;
> + uint64_t rsvd:6;
> + uint64_t version:48;
> + } s_version;
> + struct {
> + uint64_t opcode:8;
> + uint64_t type:2;
> + uint64_t rsvd:6;
> + uint8_t mac_addr[6];
> + } s_set_mac;
> + struct {
> + uint64_t opcode:8;
> + uint64_t type:2;
> + uint64_t rsvd:6;
> + uint64_t mtu:48;
> + } s_set_mtu;
> + struct {
> + uint64_t opcode:8;
> + uint64_t type:2;
> + uint64_t rsvd:6;
> + uint64_t mtu:48;
> + } s_get_mtu;
> + struct {
> + uint64_t opcode:8;
> + uint64_t type:2;
> + uint64_t state:1;
> + uint64_t rsvd:53;
> + } s_link_state;
> + struct {
> + uint64_t opcode:8;
> + uint64_t type:2;
> + uint64_t status:1;
> + uint64_t rsvd:53;
> + } s_link_status;
> +} __rte_packed;
> +
> +/* Hardware interface link state information. */
> +struct otx_ep_iface_link_info {
> + /* Bitmap of Supported link speeds/modes. */
> + uint64_t supported_modes;
> +
> + /* Bitmap of Advertised link speeds/modes. */
> + uint64_t advertised_modes;
> +
> + /* Negotiated link speed in Mbps. */
> + uint32_t speed;
> +
> + /* MTU */
> + uint16_t mtu;
> +
> + /* Autonegotiation state. */
> +#define OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
> +#define OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
> + uint8_t autoneg;
> +
> + /* Pause frames setting. */
> +#define OCTEP_VF_LINK_MODE_PAUSE_SUPPORTED BIT(0)
> +#define OCTEP_VF_LINK_MODE_PAUSE_ADVERTISED BIT(1)
> + uint8_t pause;
> +
> + /* Admin state of the link (ifconfig <iface> up/down */
> + uint8_t admin_up;
> +
> + /* Operational state of the link: physical link is up down */
> + uint8_t oper_up;
> +};
> +
> +int otx_ep_mbox_set_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu);
> +int otx_ep_mbox_set_mac_addr(struct rte_eth_dev *eth_dev,
> + struct rte_ether_addr *mac_addr);
> +int otx_ep_mbox_get_mac_addr(struct rte_eth_dev *eth_dev,
> + struct rte_ether_addr *mac_addr);
> +int otx_ep_mbox_get_link_status(struct rte_eth_dev *eth_dev,
> + uint8_t *oper_up);
> +int otx_ep_mbox_get_link_info(struct rte_eth_dev *eth_dev, struct rte_eth_link *link);
> +void otx_ep_mbox_enable_interrupt(struct otx_ep_device *otx_ep);
> +void otx_ep_mbox_disable_interrupt(struct otx_ep_device *otx_ep);
> +int otx_ep_mbox_get_max_pkt_len(struct rte_eth_dev *eth_dev);
> +int otx_ep_mbox_version_check(struct rte_eth_dev *eth_dev);
> +int otx_ep_mbox_send_dev_exit(struct rte_eth_dev *eth_dev);
> +#endif
> --
> 2.31.1
>
next prev parent reply other threads:[~2023-04-21 5:00 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-04 14:18 [PATCH v1 0/9] extend octeon ep driver functionality Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 1/9] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 00/10] extend octeon ep driver functionality Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 01/10] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 01/11] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 02/11] net/octeon_ep: support CNX10K series SoC Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 03/11] net/octeon_ep: support error propagation Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 04/11] net/octeon_ep: support IQ/OQ reset Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 05/11] devtools: add acronym in dictionary for commit checks Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 06/11] net/octeon_ep: support ISM Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 07/11] net/octeon_ep: flush pending DMA operations Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 08/11] net/octeon_ep: update queue size checks Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 09/11] net/octeon_ep: support mailbox between VF and PF Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 10/11] net/octeon_ep: set watermark for output queues Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 11/11] net/octeon_ep: set secondary process dev ops Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 02/10] net/octeon_ep: support CNX10K series SoC Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 03/10] net/octeon_ep: support error propagation Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 04/10] net/octeon_ep: support IQ/OQ reset Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 05/10] net/octeon_ep: support ISM Sathesh Edara
2023-04-21 4:56 ` Jerin Jacob
2023-04-05 14:25 ` [PATCH v2 06/10] net/octeon_ep: fix DMA incompletion Sathesh Edara
2023-04-21 4:51 ` Jerin Jacob
2023-04-05 14:25 ` [PATCH v2 07/10] net/octeon_ep: update queue size checks Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 08/10] net/octeon_ep: support Mailbox between VF and PF Sathesh Edara
2023-04-21 5:00 ` Jerin Jacob [this message]
2023-04-05 14:25 ` [PATCH v2 09/10] net/octeon_ep: set watermark for output queues Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 10/10] net/octeon_ep: set secondary process dev ops Sathesh Edara
2023-04-21 5:03 ` Jerin Jacob
2023-04-24 12:55 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 01/11] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 02/11] net/octeon_ep: support CNX10K series SoC Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 03/11] net/octeon_ep: support error propagation Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 04/11] net/octeon_ep: support IQ/OQ reset Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 05/11] devtools: add acronym in dictionary for commit checks Sathesh Edara
2023-05-03 7:16 ` Jerin Jacob
2023-04-24 12:55 ` [PATCH v3 06/11] net/octeon_ep: support ISM Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 07/11] net/octeon_ep: flush pending DMA operations Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 08/11] net/octeon_ep: update queue size checks Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 09/11] net/octeon_ep: support mailbox between VF and PF Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 10/11] net/octeon_ep: set watermark for output queues Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 11/11] net/octeon_ep: set secondary process dev ops Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 2/9] net/octeon_ep: support CNX10K series SoC Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 3/9] net/octeon_ep: support error propagation Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 4/9] net/octeon_ep: support IQ/OQ reset Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 5/9] net/octeon_ep: support ISM Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 6/9] net/octeon_ep: fix DMA incompletion Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 7/9] net/octeon_ep: update queue size checks Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 8/9] net/octeon_ep: support Mailbox between VF and PF Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 9/9] net/octeon_ep: set watermark for output queues Sathesh Edara
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to='CALBAE1NJXJMV6nNQKVi7r7e_f83gAMrWKRg5HryCbfoE-tN=Bw@mail.gmail.com' \
--to=jerinjacobk@gmail.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=radhac@marvell.com \
--cc=sburla@marvell.com \
--cc=sedara@marvell.com \
--cc=vburru@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).