From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 92AC943747; Tue, 19 Dec 2023 18:42:15 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2C1C142E6D; Tue, 19 Dec 2023 18:41:20 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 0C01542DED for ; Tue, 19 Dec 2023 18:41:10 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 3BJ93T8d016883 for ; Tue, 19 Dec 2023 09:41:10 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=w3tB6mkk1I7KoKLAcC+Sf wzKjJpoqflF4NeiKd0W86A=; b=BJF3GrxYmDdQYuBUJ1RAFPmLSkqA5DEMPhFoT /AYxRiRmbrxWTsVZkhwhutU2bBdgPP/WNj6rgSzipmbH+vL0LHHp6tfZGWGYcUNn 8deXCX5zHfviuG1cSAptXTT37jYF84xClKf0GnZUqBD8HDUht/Q2/URQIrpBfAr/ rGX+LzwgqxIzABevkxo/fix79RGqgamM2RT2S44p37ut9WABL/uhd28DOR32EZ+2 pRxIJh1EuT62QVGzGp8MMREL/U7cGS4uq/x9gjEdUqI9XX6GLo0t12dB+DBH8bEY GXuoT66AYNUvcEEGFOHl8PCrTn0jjOz33e9W4PjqARuIlypjw== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3v1c9kumfy-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Tue, 19 Dec 2023 09:41:10 -0800 (PST) Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48; Tue, 19 Dec 2023 09:41:07 -0800 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.48 via Frontend Transport; Tue, 19 Dec 2023 09:41:07 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id 4C6373F7050; Tue, 19 Dec 2023 09:41:05 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: , Subject: [PATCH v2 13/24] net/cnxk: representor ethdev ops Date: Tue, 19 Dec 2023 23:09:52 +0530 Message-ID: <20231219174003.72901-14-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20231219174003.72901-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20231219174003.72901-1-hkalra@marvell.com> MIME-Version: 1.0 Content-Type: text/plain X-Proofpoint-ORIG-GUID: Gu_23iLGBex_51dsxsARdn63K2msTtfI X-Proofpoint-GUID: Gu_23iLGBex_51dsxsARdn63K2msTtfI X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.997,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2023-12-09_02,2023-12-07_01,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Implementing ethernet device operation callbacks for port representors PMD Signed-off-by: Harman Kalra --- drivers/net/cnxk/cnxk_rep.c | 28 +- drivers/net/cnxk/cnxk_rep.h | 35 +++ drivers/net/cnxk/cnxk_rep_msg.h | 8 + drivers/net/cnxk/cnxk_rep_ops.c | 495 ++++++++++++++++++++++++++++++-- 4 files changed, 523 insertions(+), 43 deletions(-) diff --git a/drivers/net/cnxk/cnxk_rep.c b/drivers/net/cnxk/cnxk_rep.c index 3b01856bc8..6e2424db40 100644 --- a/drivers/net/cnxk/cnxk_rep.c +++ b/drivers/net/cnxk/cnxk_rep.c @@ -73,6 +73,8 @@ cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, ui int cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev) { + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; @@ -80,6 +82,8 @@ cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev) rte_free(ethdev->data->mac_addrs); ethdev->data->mac_addrs = NULL; + rep_dev->parent_dev->repr_cnt.nb_repr_probed--; + return 0; } @@ -369,26 +373,6 @@ cnxk_rep_parent_setup(struct cnxk_eswitch_dev *eswitch_dev) return rc; } -static uint16_t -cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) -{ - PLT_SET_USED(tx_queue); - PLT_SET_USED(tx_pkts); - PLT_SET_USED(nb_pkts); - - return 0; -} - -static uint16_t -cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) -{ - PLT_SET_USED(rx_queue); - PLT_SET_USED(rx_pkts); - PLT_SET_USED(nb_pkts); - - return 0; -} - static int cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params) { @@ -418,8 +402,8 @@ cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params) eth_dev->dev_ops = &cnxk_rep_dev_ops; /* Rx/Tx functions stubs to avoid crashing */ - eth_dev->rx_pkt_burst = cnxk_rep_rx_burst; - eth_dev->tx_pkt_burst = cnxk_rep_tx_burst; + eth_dev->rx_pkt_burst = cnxk_rep_rx_burst_dummy; + eth_dev->tx_pkt_burst = cnxk_rep_tx_burst_dummy; /* Only single queues for representor devices */ eth_dev->data->nb_rx_queues = 1; diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h index 9172fae641..266dd4a688 100644 --- a/drivers/net/cnxk/cnxk_rep.h +++ b/drivers/net/cnxk/cnxk_rep.h @@ -7,6 +7,13 @@ #ifndef __CNXK_REP_H__ #define __CNXK_REP_H__ +#define CNXK_REP_TX_OFFLOAD_CAPA \ + (RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \ + RTE_ETH_TX_OFFLOAD_MULTI_SEGS) + +#define CNXK_REP_RX_OFFLOAD_CAPA \ + (RTE_ETH_RX_OFFLOAD_SCATTER | RTE_ETH_RX_OFFLOAD_RSS_HASH | RTE_ETH_RX_OFFLOAD_VLAN_STRIP) + /* Common ethdev ops */ extern struct eth_dev_ops cnxk_rep_dev_ops; @@ -57,12 +64,33 @@ struct cnxk_rep_dev { uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; }; +/* Inline functions */ +static inline void +cnxk_rep_lock(struct cnxk_rep_dev *rep) +{ + rte_spinlock_lock(&rep->parent_dev->rep_lock); +} + +static inline void +cnxk_rep_unlock(struct cnxk_rep_dev *rep) +{ + rte_spinlock_unlock(&rep->parent_dev->rep_lock); +} + static inline struct cnxk_rep_dev * cnxk_rep_pmd_priv(const struct rte_eth_dev *eth_dev) { return eth_dev->data->dev_private; } +static __rte_always_inline void +cnxk_rep_pool_buffer_stats(struct rte_mempool *pool) +{ + plt_rep_dbg(" pool %s size %d buffer count in use %d available %d\n", pool->name, + pool->size, rte_mempool_in_use_count(pool), rte_mempool_avail_count(pool)); +} + +/* Prototypes */ int cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswitch_dev); int cnxk_rep_dev_remove(struct cnxk_eswitch_dev *eswitch_dev); int cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev); @@ -85,5 +113,12 @@ int cnxk_rep_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats) int cnxk_rep_stats_reset(struct rte_eth_dev *eth_dev); int cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **ops); int cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, uint16_t *rep_id); +int cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev); +int cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev); +int cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr); +uint16_t cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +void cnxk_rep_tx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id); +void cnxk_rep_rx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id); #endif /* __CNXK_REP_H__ */ diff --git a/drivers/net/cnxk/cnxk_rep_msg.h b/drivers/net/cnxk/cnxk_rep_msg.h index fb84d58848..37953ac74f 100644 --- a/drivers/net/cnxk/cnxk_rep_msg.h +++ b/drivers/net/cnxk/cnxk_rep_msg.h @@ -19,6 +19,8 @@ typedef enum CNXK_REP_MSG { CNXK_REP_MSG_READY = 0, CNXK_REP_MSG_ACK, CNXK_REP_MSG_EXIT, + /* Ethernet operation msgs */ + CNXK_REP_MSG_ETH_SET_MAC, /* End of messaging sequence */ CNXK_REP_MSG_END, } cnxk_rep_msg_t; @@ -81,6 +83,12 @@ typedef struct cnxk_rep_msg_exit_data { uint16_t data[]; } __rte_packed cnxk_rep_msg_exit_data_t; +/* Ethernet op - set mac */ +typedef struct cnxk_rep_msg_eth_mac_set_meta { + uint16_t portid; + uint8_t addr_bytes[RTE_ETHER_ADDR_LEN]; +} __rte_packed cnxk_rep_msg_eth_set_mac_meta_t; + void cnxk_rep_msg_populate_command(void *buffer, uint32_t *length, cnxk_rep_msg_t type, uint32_t size); void cnxk_rep_msg_populate_command_meta(void *buffer, uint32_t *length, void *msg_meta, uint32_t sz, diff --git a/drivers/net/cnxk/cnxk_rep_ops.c b/drivers/net/cnxk/cnxk_rep_ops.c index 67dcc422e3..4b3fe28acc 100644 --- a/drivers/net/cnxk/cnxk_rep_ops.c +++ b/drivers/net/cnxk/cnxk_rep_ops.c @@ -3,25 +3,221 @@ */ #include +#include + +#define MEMPOOL_CACHE_SIZE 256 +#define TX_DESC_PER_QUEUE 512 +#define RX_DESC_PER_QUEUE 256 +#define NB_REP_VDEV_MBUF 1024 + +static uint16_t +cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct cnxk_rep_txq *txq = tx_queue; + struct cnxk_rep_dev *rep_dev; + uint16_t n_tx; + + if (unlikely(!txq)) + return 0; + + rep_dev = txq->rep_dev; + plt_rep_dbg("Transmitting %d packets on eswitch queue %d", nb_pkts, txq->qid); + n_tx = cnxk_eswitch_dev_tx_burst(rep_dev->parent_dev, txq->qid, tx_pkts, nb_pkts, + NIX_TX_OFFLOAD_VLAN_QINQ_F); + return n_tx; +} + +static uint16_t +cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct cnxk_rep_rxq *rxq = rx_queue; + struct cnxk_rep_dev *rep_dev; + uint16_t n_rx; + + if (unlikely(!rxq)) + return 0; + + rep_dev = rxq->rep_dev; + n_rx = cnxk_eswitch_dev_rx_burst(rep_dev->parent_dev, rxq->qid, rx_pkts, nb_pkts); + if (n_rx == 0) + return 0; + + plt_rep_dbg("Received %d packets on eswitch queue %d", n_rx, rxq->qid); + return n_rx; +} + +uint16_t +cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + PLT_SET_USED(tx_queue); + PLT_SET_USED(tx_pkts); + PLT_SET_USED(nb_pkts); + + return 0; +} + +uint16_t +cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + PLT_SET_USED(rx_queue); + PLT_SET_USED(rx_pkts); + PLT_SET_USED(nb_pkts); + + return 0; +} int cnxk_rep_link_update(struct rte_eth_dev *ethdev, int wait_to_complete) { - PLT_SET_USED(ethdev); + struct rte_eth_link link; PLT_SET_USED(wait_to_complete); + + memset(&link, 0, sizeof(link)); + if (ethdev->data->dev_started) + link.link_status = RTE_ETH_LINK_UP; + else + link.link_status = RTE_ETH_LINK_DOWN; + + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; + link.link_autoneg = RTE_ETH_LINK_FIXED; + link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; + + return rte_eth_linkstatus_set(ethdev, &link); +} + +int +cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info *dev_info) +{ + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + uint32_t max_rx_pktlen; + + max_rx_pktlen = (roc_nix_max_pkt_len(&rep_dev->parent_dev->nix) + RTE_ETHER_CRC_LEN - + CNXK_NIX_MAX_VTAG_ACT_SIZE); + + dev_info->min_rx_bufsize = NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN; + dev_info->max_rx_pktlen = max_rx_pktlen; + dev_info->max_mac_addrs = roc_nix_mac_max_entries_get(&rep_dev->parent_dev->nix); + + dev_info->rx_offload_capa = CNXK_REP_RX_OFFLOAD_CAPA; + dev_info->tx_offload_capa = CNXK_REP_TX_OFFLOAD_CAPA; + dev_info->rx_queue_offload_capa = 0; + dev_info->tx_queue_offload_capa = 0; + + /* For the sake of symmetry, max_rx_queues = max_tx_queues */ + dev_info->max_rx_queues = 1; + dev_info->max_tx_queues = 1; + + /* MTU specifics */ + dev_info->max_mtu = dev_info->max_rx_pktlen - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN); + dev_info->min_mtu = dev_info->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD; + + /* Switch info specific */ + dev_info->switch_info.name = ethdev->device->name; + dev_info->switch_info.domain_id = rep_dev->switch_domain_id; + dev_info->switch_info.port_id = rep_dev->port_id; + return 0; } int -cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info *devinfo) +cnxk_rep_representor_info_get(struct rte_eth_dev *ethdev, struct rte_eth_representor_info *info) +{ + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + + return cnxk_eswitch_representor_info_get(rep_dev->parent_dev, info); +} + +static int +rep_eth_conf_chk(const struct rte_eth_conf *conf, uint16_t nb_rx_queues) +{ + const struct rte_eth_rss_conf *rss_conf; + int ret = 0; + + if (conf->link_speeds != 0) { + plt_err("specific link speeds not supported"); + ret = -EINVAL; + } + + switch (conf->rxmode.mq_mode) { + case RTE_ETH_MQ_RX_RSS: + if (nb_rx_queues != 1) { + plt_err("Rx RSS is not supported with %u queues", nb_rx_queues); + ret = -EINVAL; + break; + } + + rss_conf = &conf->rx_adv_conf.rss_conf; + if (rss_conf->rss_key != NULL || rss_conf->rss_key_len != 0 || + rss_conf->rss_hf != 0) { + plt_err("Rx RSS configuration is not supported"); + ret = -EINVAL; + } + break; + case RTE_ETH_MQ_RX_NONE: + break; + default: + plt_err("Rx mode MQ modes other than RSS not supported"); + ret = -EINVAL; + break; + } + + if (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) { + plt_err("Tx mode MQ modes not supported"); + ret = -EINVAL; + } + + if (conf->lpbk_mode != 0) { + plt_err("loopback not supported"); + ret = -EINVAL; + } + + if (conf->dcb_capability_en != 0) { + plt_err("priority-based flow control not supported"); + ret = -EINVAL; + } + + if (conf->intr_conf.lsc != 0) { + plt_err("link status change interrupt not supported"); + ret = -EINVAL; + } + + if (conf->intr_conf.rxq != 0) { + plt_err("receive queue interrupt not supported"); + ret = -EINVAL; + } + + if (conf->intr_conf.rmv != 0) { + plt_err("remove interrupt not supported"); + ret = -EINVAL; + } + + return ret; +} + +int +cnxk_rep_dev_configure(struct rte_eth_dev *ethdev) +{ + struct rte_eth_dev_data *ethdev_data = ethdev->data; + int rc = -1; + + rc = rep_eth_conf_chk(ðdev_data->dev_conf, ethdev_data->nb_rx_queues); + if (rc) + goto fail; + + return 0; +fail: + return rc; +} + +int +cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev) { PLT_SET_USED(ethdev); - PLT_SET_USED(devinfo); return 0; } int -cnxk_rep_dev_configure(struct rte_eth_dev *ethdev) +cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev) { PLT_SET_USED(ethdev); return 0; @@ -30,21 +226,73 @@ cnxk_rep_dev_configure(struct rte_eth_dev *ethdev) int cnxk_rep_dev_start(struct rte_eth_dev *ethdev) { - PLT_SET_USED(ethdev); + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + int rc = 0, qid; + + ethdev->rx_pkt_burst = cnxk_rep_rx_burst; + ethdev->tx_pkt_burst = cnxk_rep_tx_burst; + + if (!rep_dev->is_vf_active) + return 0; + + if (!rep_dev->rxq || !rep_dev->txq) { + plt_err("Invalid rxq or txq for representor id %d", rep_dev->rep_id); + rc = -EINVAL; + goto fail; + } + + /* Start rx queues */ + qid = rep_dev->rxq->qid; + rc = cnxk_eswitch_rxq_start(rep_dev->parent_dev, qid); + if (rc) { + plt_err("Failed to start rxq %d, rc=%d", qid, rc); + goto fail; + } + + /* Start tx queues */ + qid = rep_dev->txq->qid; + rc = cnxk_eswitch_txq_start(rep_dev->parent_dev, qid); + if (rc) { + plt_err("Failed to start txq %d, rc=%d", qid, rc); + goto fail; + } + + /* Start rep_xport device only once after first representor gets active */ + if (!rep_dev->parent_dev->repr_cnt.nb_repr_started) { + rc = cnxk_eswitch_nix_rsrc_start(rep_dev->parent_dev); + if (rc) { + plt_err("Failed to start nix dev, rc %d", rc); + goto fail; + } + } + + ethdev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED; + ethdev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED; + + rep_dev->parent_dev->repr_cnt.nb_repr_started++; + return 0; +fail: + return rc; } int cnxk_rep_dev_close(struct rte_eth_dev *ethdev) { - PLT_SET_USED(ethdev); - return 0; + return cnxk_rep_dev_uninit(ethdev); } int cnxk_rep_dev_stop(struct rte_eth_dev *ethdev) { - PLT_SET_USED(ethdev); + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + + ethdev->rx_pkt_burst = cnxk_rep_rx_burst_dummy; + ethdev->tx_pkt_burst = cnxk_rep_tx_burst_dummy; + cnxk_rep_rx_queue_stop(ethdev, 0); + cnxk_rep_tx_queue_stop(ethdev, 0); + rep_dev->parent_dev->repr_cnt.nb_repr_started--; + return 0; } @@ -53,39 +301,189 @@ cnxk_rep_rx_queue_setup(struct rte_eth_dev *ethdev, uint16_t rx_queue_id, uint16 unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool) { - PLT_SET_USED(ethdev); - PLT_SET_USED(rx_queue_id); - PLT_SET_USED(nb_rx_desc); + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + struct cnxk_rep_rxq *rxq = NULL; + uint16_t qid = 0; + int rc; + PLT_SET_USED(socket_id); - PLT_SET_USED(rx_conf); - PLT_SET_USED(mb_pool); + /* If no representee assigned, store the respective rxq parameters */ + if (!rep_dev->is_vf_active && !rep_dev->rxq) { + rxq = plt_zmalloc(sizeof(*rxq), RTE_CACHE_LINE_SIZE); + if (!rxq) { + rc = -ENOMEM; + plt_err("Failed to alloc RxQ for rep id %d", rep_dev->rep_id); + goto fail; + } + + rxq->qid = qid; + rxq->nb_desc = nb_rx_desc; + rxq->rep_dev = rep_dev; + rxq->mpool = mb_pool; + rxq->rx_conf = rx_conf; + rep_dev->rxq = rxq; + ethdev->data->rx_queues[rx_queue_id] = NULL; + + return 0; + } + + qid = rep_dev->rep_id; + rc = cnxk_eswitch_rxq_setup(rep_dev->parent_dev, qid, nb_rx_desc, rx_conf, mb_pool); + if (rc) { + plt_err("failed to setup eswitch queue id %d", qid); + goto fail; + } + + rxq = rep_dev->rxq; + if (!rxq) { + plt_err("Invalid RXQ handle for representor port %d rep id %d", rep_dev->port_id, + rep_dev->rep_id); + goto free_queue; + } + + rxq->qid = qid; + ethdev->data->rx_queues[rx_queue_id] = rxq; + ethdev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + plt_rep_dbg("representor id %d portid %d rxq id %d", rep_dev->port_id, + ethdev->data->port_id, rxq->qid); + return 0; +free_queue: + cnxk_eswitch_rxq_release(rep_dev->parent_dev, qid); +fail: + return rc; +} + +void +cnxk_rep_rx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id) +{ + struct cnxk_rep_rxq *rxq = ethdev->data->rx_queues[queue_id]; + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + int rc; + + if (!rxq) + return; + + plt_rep_dbg("Stopping rxq %u", rxq->qid); + + rc = cnxk_eswitch_rxq_stop(rep_dev->parent_dev, rxq->qid); + if (rc) + plt_err("Failed to stop rxq %d, rc=%d", rc, rxq->qid); + + ethdev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; } void cnxk_rep_rx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id) { - PLT_SET_USED(ethdev); - PLT_SET_USED(queue_id); + struct cnxk_rep_rxq *rxq = ethdev->data->rx_queues[queue_id]; + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + int rc; + + if (!rxq) { + plt_err("Invalid rxq retrieved for rep_id %d", rep_dev->rep_id); + return; + } + + plt_rep_dbg("Releasing rxq %u", rxq->qid); + + rc = cnxk_eswitch_rxq_release(rep_dev->parent_dev, rxq->qid); + if (rc) + plt_err("Failed to release rxq %d, rc=%d", rc, rxq->qid); } int cnxk_rep_tx_queue_setup(struct rte_eth_dev *ethdev, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { - PLT_SET_USED(ethdev); - PLT_SET_USED(tx_queue_id); - PLT_SET_USED(nb_tx_desc); + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + struct cnxk_rep_txq *txq = NULL; + int rc = 0, qid = 0; + PLT_SET_USED(socket_id); - PLT_SET_USED(tx_conf); + /* If no representee assigned, store the respective rxq parameters */ + if (!rep_dev->is_vf_active && !rep_dev->txq) { + txq = plt_zmalloc(sizeof(*txq), RTE_CACHE_LINE_SIZE); + if (!txq) { + rc = -ENOMEM; + plt_err("failed to alloc txq for rep id %d", rep_dev->rep_id); + goto free_queue; + } + + txq->qid = qid; + txq->nb_desc = nb_tx_desc; + txq->tx_conf = tx_conf; + txq->rep_dev = rep_dev; + rep_dev->txq = txq; + + ethdev->data->tx_queues[tx_queue_id] = NULL; + + return 0; + } + + qid = rep_dev->rep_id; + rc = cnxk_eswitch_txq_setup(rep_dev->parent_dev, qid, nb_tx_desc, tx_conf); + if (rc) { + plt_err("failed to setup eswitch queue id %d", qid); + goto fail; + } + + txq = rep_dev->txq; + if (!txq) { + plt_err("Invalid TXQ handle for representor port %d rep id %d", rep_dev->port_id, + rep_dev->rep_id); + goto free_queue; + } + + txq->qid = qid; + ethdev->data->tx_queues[tx_queue_id] = txq; + ethdev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + plt_rep_dbg("representor id %d portid %d txq id %d", rep_dev->port_id, + ethdev->data->port_id, txq->qid); + return 0; +free_queue: + cnxk_eswitch_txq_release(rep_dev->parent_dev, qid); +fail: + return rc; +} + +void +cnxk_rep_tx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id) +{ + struct cnxk_rep_txq *txq = ethdev->data->tx_queues[queue_id]; + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + int rc; + + if (!txq) + return; + + plt_rep_dbg("Releasing txq %u", txq->qid); + + rc = cnxk_eswitch_txq_stop(rep_dev->parent_dev, txq->qid); + if (rc) + plt_err("Failed to stop txq %d, rc=%d", rc, txq->qid); + + ethdev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; } void cnxk_rep_tx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id) { - PLT_SET_USED(ethdev); - PLT_SET_USED(queue_id); + struct cnxk_rep_txq *txq = ethdev->data->tx_queues[queue_id]; + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + int rc; + + if (!txq) { + plt_err("Invalid txq retrieved for rep_id %d", rep_dev->rep_id); + return; + } + + plt_rep_dbg("Releasing txq %u", txq->qid); + + rc = cnxk_eswitch_txq_release(rep_dev->parent_dev, txq->qid); + if (rc) + plt_err("Failed to release txq %d, rc=%d", rc, txq->qid); } int @@ -111,15 +509,70 @@ cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **op return 0; } +int +cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr) +{ + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev); + cnxk_rep_msg_eth_set_mac_meta_t msg_sm_meta; + cnxk_rep_msg_ack_data_t adata; + uint32_t len = 0, rc; + void *buffer; + size_t size; + + /* If representor not representing any VF, return 0 */ + if (!rep_dev->is_vf_active) + return 0; + + size = CNXK_REP_MSG_MAX_BUFFER_SZ; + buffer = plt_zmalloc(size, 0); + if (!buffer) { + plt_err("Failed to allocate mem"); + rc = -ENOMEM; + goto fail; + } + + cnxk_rep_msg_populate_header(buffer, &len); + + msg_sm_meta.portid = rep_dev->rep_id; + rte_memcpy(&msg_sm_meta.addr_bytes, addr->addr_bytes, RTE_ETHER_ADDR_LEN); + cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_sm_meta, + sizeof(cnxk_rep_msg_eth_set_mac_meta_t), + CNXK_REP_MSG_ETH_SET_MAC); + cnxk_rep_msg_populate_msg_end(buffer, &len); + + rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, &adata); + if (rc) { + plt_err("Failed to process the message, err %d", rc); + goto fail; + } + + if (adata.u.sval < 0) { + rc = adata.u.sval; + plt_err("Failed to set mac address, err %d", rc); + goto fail; + } + + rte_free(buffer); + + return 0; +fail: + rte_free(buffer); + return rc; +} + /* CNXK platform representor dev ops */ struct eth_dev_ops cnxk_rep_dev_ops = { .dev_infos_get = cnxk_rep_dev_info_get, + .representor_info_get = cnxk_rep_representor_info_get, .dev_configure = cnxk_rep_dev_configure, .dev_start = cnxk_rep_dev_start, .rx_queue_setup = cnxk_rep_rx_queue_setup, .rx_queue_release = cnxk_rep_rx_queue_release, .tx_queue_setup = cnxk_rep_tx_queue_setup, .tx_queue_release = cnxk_rep_tx_queue_release, + .promiscuous_enable = cnxk_rep_promiscuous_enable, + .promiscuous_disable = cnxk_rep_promiscuous_disable, + .mac_addr_set = cnxk_rep_mac_addr_set, .link_update = cnxk_rep_link_update, .dev_close = cnxk_rep_dev_close, .dev_stop = cnxk_rep_dev_stop, -- 2.18.0