From: Harman Kalra <hkalra@marvell.com>
To: Nithin Dabilpuram <ndabilpuram@marvell.com>,
Kiran Kumar K <kirankumark@marvell.com>,
Sunil Kumar Kori <skori@marvell.com>,
Satha Rao <skoteshwar@marvell.com>,
Harman Kalra <hkalra@marvell.com>
Cc: <dev@dpdk.org>, <jerinj@marvell.com>
Subject: [PATCH v2 13/24] net/cnxk: representor ethdev ops
Date: Tue, 19 Dec 2023 23:09:52 +0530 [thread overview]
Message-ID: <20231219174003.72901-14-hkalra@marvell.com> (raw)
In-Reply-To: <20231219174003.72901-1-hkalra@marvell.com>
Implementing ethernet device operation callbacks for
port representors PMD
Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
drivers/net/cnxk/cnxk_rep.c | 28 +-
drivers/net/cnxk/cnxk_rep.h | 35 +++
drivers/net/cnxk/cnxk_rep_msg.h | 8 +
drivers/net/cnxk/cnxk_rep_ops.c | 495 ++++++++++++++++++++++++++++++--
4 files changed, 523 insertions(+), 43 deletions(-)
diff --git a/drivers/net/cnxk/cnxk_rep.c b/drivers/net/cnxk/cnxk_rep.c
index 3b01856bc8..6e2424db40 100644
--- a/drivers/net/cnxk/cnxk_rep.c
+++ b/drivers/net/cnxk/cnxk_rep.c
@@ -73,6 +73,8 @@ cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, ui
int
cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev)
{
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
@@ -80,6 +82,8 @@ cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev)
rte_free(ethdev->data->mac_addrs);
ethdev->data->mac_addrs = NULL;
+ rep_dev->parent_dev->repr_cnt.nb_repr_probed--;
+
return 0;
}
@@ -369,26 +373,6 @@ cnxk_rep_parent_setup(struct cnxk_eswitch_dev *eswitch_dev)
return rc;
}
-static uint16_t
-cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- PLT_SET_USED(tx_queue);
- PLT_SET_USED(tx_pkts);
- PLT_SET_USED(nb_pkts);
-
- return 0;
-}
-
-static uint16_t
-cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
-{
- PLT_SET_USED(rx_queue);
- PLT_SET_USED(rx_pkts);
- PLT_SET_USED(nb_pkts);
-
- return 0;
-}
-
static int
cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params)
{
@@ -418,8 +402,8 @@ cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params)
eth_dev->dev_ops = &cnxk_rep_dev_ops;
/* Rx/Tx functions stubs to avoid crashing */
- eth_dev->rx_pkt_burst = cnxk_rep_rx_burst;
- eth_dev->tx_pkt_burst = cnxk_rep_tx_burst;
+ eth_dev->rx_pkt_burst = cnxk_rep_rx_burst_dummy;
+ eth_dev->tx_pkt_burst = cnxk_rep_tx_burst_dummy;
/* Only single queues for representor devices */
eth_dev->data->nb_rx_queues = 1;
diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h
index 9172fae641..266dd4a688 100644
--- a/drivers/net/cnxk/cnxk_rep.h
+++ b/drivers/net/cnxk/cnxk_rep.h
@@ -7,6 +7,13 @@
#ifndef __CNXK_REP_H__
#define __CNXK_REP_H__
+#define CNXK_REP_TX_OFFLOAD_CAPA \
+ (RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define CNXK_REP_RX_OFFLOAD_CAPA \
+ (RTE_ETH_RX_OFFLOAD_SCATTER | RTE_ETH_RX_OFFLOAD_RSS_HASH | RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+
/* Common ethdev ops */
extern struct eth_dev_ops cnxk_rep_dev_ops;
@@ -57,12 +64,33 @@ struct cnxk_rep_dev {
uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
};
+/* Inline functions */
+static inline void
+cnxk_rep_lock(struct cnxk_rep_dev *rep)
+{
+ rte_spinlock_lock(&rep->parent_dev->rep_lock);
+}
+
+static inline void
+cnxk_rep_unlock(struct cnxk_rep_dev *rep)
+{
+ rte_spinlock_unlock(&rep->parent_dev->rep_lock);
+}
+
static inline struct cnxk_rep_dev *
cnxk_rep_pmd_priv(const struct rte_eth_dev *eth_dev)
{
return eth_dev->data->dev_private;
}
+static __rte_always_inline void
+cnxk_rep_pool_buffer_stats(struct rte_mempool *pool)
+{
+ plt_rep_dbg(" pool %s size %d buffer count in use %d available %d\n", pool->name,
+ pool->size, rte_mempool_in_use_count(pool), rte_mempool_avail_count(pool));
+}
+
+/* Prototypes */
int cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswitch_dev);
int cnxk_rep_dev_remove(struct cnxk_eswitch_dev *eswitch_dev);
int cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev);
@@ -85,5 +113,12 @@ int cnxk_rep_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
int cnxk_rep_stats_reset(struct rte_eth_dev *eth_dev);
int cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **ops);
int cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, uint16_t *rep_id);
+int cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev);
+int cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev);
+int cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr);
+uint16_t cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+uint16_t cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+void cnxk_rep_tx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id);
+void cnxk_rep_rx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id);
#endif /* __CNXK_REP_H__ */
diff --git a/drivers/net/cnxk/cnxk_rep_msg.h b/drivers/net/cnxk/cnxk_rep_msg.h
index fb84d58848..37953ac74f 100644
--- a/drivers/net/cnxk/cnxk_rep_msg.h
+++ b/drivers/net/cnxk/cnxk_rep_msg.h
@@ -19,6 +19,8 @@ typedef enum CNXK_REP_MSG {
CNXK_REP_MSG_READY = 0,
CNXK_REP_MSG_ACK,
CNXK_REP_MSG_EXIT,
+ /* Ethernet operation msgs */
+ CNXK_REP_MSG_ETH_SET_MAC,
/* End of messaging sequence */
CNXK_REP_MSG_END,
} cnxk_rep_msg_t;
@@ -81,6 +83,12 @@ typedef struct cnxk_rep_msg_exit_data {
uint16_t data[];
} __rte_packed cnxk_rep_msg_exit_data_t;
+/* Ethernet op - set mac */
+typedef struct cnxk_rep_msg_eth_mac_set_meta {
+ uint16_t portid;
+ uint8_t addr_bytes[RTE_ETHER_ADDR_LEN];
+} __rte_packed cnxk_rep_msg_eth_set_mac_meta_t;
+
void cnxk_rep_msg_populate_command(void *buffer, uint32_t *length, cnxk_rep_msg_t type,
uint32_t size);
void cnxk_rep_msg_populate_command_meta(void *buffer, uint32_t *length, void *msg_meta, uint32_t sz,
diff --git a/drivers/net/cnxk/cnxk_rep_ops.c b/drivers/net/cnxk/cnxk_rep_ops.c
index 67dcc422e3..4b3fe28acc 100644
--- a/drivers/net/cnxk/cnxk_rep_ops.c
+++ b/drivers/net/cnxk/cnxk_rep_ops.c
@@ -3,25 +3,221 @@
*/
#include <cnxk_rep.h>
+#include <cnxk_rep_msg.h>
+
+#define MEMPOOL_CACHE_SIZE 256
+#define TX_DESC_PER_QUEUE 512
+#define RX_DESC_PER_QUEUE 256
+#define NB_REP_VDEV_MBUF 1024
+
+static uint16_t
+cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct cnxk_rep_txq *txq = tx_queue;
+ struct cnxk_rep_dev *rep_dev;
+ uint16_t n_tx;
+
+ if (unlikely(!txq))
+ return 0;
+
+ rep_dev = txq->rep_dev;
+ plt_rep_dbg("Transmitting %d packets on eswitch queue %d", nb_pkts, txq->qid);
+ n_tx = cnxk_eswitch_dev_tx_burst(rep_dev->parent_dev, txq->qid, tx_pkts, nb_pkts,
+ NIX_TX_OFFLOAD_VLAN_QINQ_F);
+ return n_tx;
+}
+
+static uint16_t
+cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct cnxk_rep_rxq *rxq = rx_queue;
+ struct cnxk_rep_dev *rep_dev;
+ uint16_t n_rx;
+
+ if (unlikely(!rxq))
+ return 0;
+
+ rep_dev = rxq->rep_dev;
+ n_rx = cnxk_eswitch_dev_rx_burst(rep_dev->parent_dev, rxq->qid, rx_pkts, nb_pkts);
+ if (n_rx == 0)
+ return 0;
+
+ plt_rep_dbg("Received %d packets on eswitch queue %d", n_rx, rxq->qid);
+ return n_rx;
+}
+
+uint16_t
+cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ PLT_SET_USED(tx_queue);
+ PLT_SET_USED(tx_pkts);
+ PLT_SET_USED(nb_pkts);
+
+ return 0;
+}
+
+uint16_t
+cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ PLT_SET_USED(rx_queue);
+ PLT_SET_USED(rx_pkts);
+ PLT_SET_USED(nb_pkts);
+
+ return 0;
+}
int
cnxk_rep_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
{
- PLT_SET_USED(ethdev);
+ struct rte_eth_link link;
PLT_SET_USED(wait_to_complete);
+
+ memset(&link, 0, sizeof(link));
+ if (ethdev->data->dev_started)
+ link.link_status = RTE_ETH_LINK_UP;
+ else
+ link.link_status = RTE_ETH_LINK_DOWN;
+
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
+ link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
+
+ return rte_eth_linkstatus_set(ethdev, &link);
+}
+
+int
+cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info *dev_info)
+{
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ uint32_t max_rx_pktlen;
+
+ max_rx_pktlen = (roc_nix_max_pkt_len(&rep_dev->parent_dev->nix) + RTE_ETHER_CRC_LEN -
+ CNXK_NIX_MAX_VTAG_ACT_SIZE);
+
+ dev_info->min_rx_bufsize = NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN;
+ dev_info->max_rx_pktlen = max_rx_pktlen;
+ dev_info->max_mac_addrs = roc_nix_mac_max_entries_get(&rep_dev->parent_dev->nix);
+
+ dev_info->rx_offload_capa = CNXK_REP_RX_OFFLOAD_CAPA;
+ dev_info->tx_offload_capa = CNXK_REP_TX_OFFLOAD_CAPA;
+ dev_info->rx_queue_offload_capa = 0;
+ dev_info->tx_queue_offload_capa = 0;
+
+ /* For the sake of symmetry, max_rx_queues = max_tx_queues */
+ dev_info->max_rx_queues = 1;
+ dev_info->max_tx_queues = 1;
+
+ /* MTU specifics */
+ dev_info->max_mtu = dev_info->max_rx_pktlen - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
+ dev_info->min_mtu = dev_info->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
+
+ /* Switch info specific */
+ dev_info->switch_info.name = ethdev->device->name;
+ dev_info->switch_info.domain_id = rep_dev->switch_domain_id;
+ dev_info->switch_info.port_id = rep_dev->port_id;
+
return 0;
}
int
-cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info *devinfo)
+cnxk_rep_representor_info_get(struct rte_eth_dev *ethdev, struct rte_eth_representor_info *info)
+{
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+
+ return cnxk_eswitch_representor_info_get(rep_dev->parent_dev, info);
+}
+
+static int
+rep_eth_conf_chk(const struct rte_eth_conf *conf, uint16_t nb_rx_queues)
+{
+ const struct rte_eth_rss_conf *rss_conf;
+ int ret = 0;
+
+ if (conf->link_speeds != 0) {
+ plt_err("specific link speeds not supported");
+ ret = -EINVAL;
+ }
+
+ switch (conf->rxmode.mq_mode) {
+ case RTE_ETH_MQ_RX_RSS:
+ if (nb_rx_queues != 1) {
+ plt_err("Rx RSS is not supported with %u queues", nb_rx_queues);
+ ret = -EINVAL;
+ break;
+ }
+
+ rss_conf = &conf->rx_adv_conf.rss_conf;
+ if (rss_conf->rss_key != NULL || rss_conf->rss_key_len != 0 ||
+ rss_conf->rss_hf != 0) {
+ plt_err("Rx RSS configuration is not supported");
+ ret = -EINVAL;
+ }
+ break;
+ case RTE_ETH_MQ_RX_NONE:
+ break;
+ default:
+ plt_err("Rx mode MQ modes other than RSS not supported");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) {
+ plt_err("Tx mode MQ modes not supported");
+ ret = -EINVAL;
+ }
+
+ if (conf->lpbk_mode != 0) {
+ plt_err("loopback not supported");
+ ret = -EINVAL;
+ }
+
+ if (conf->dcb_capability_en != 0) {
+ plt_err("priority-based flow control not supported");
+ ret = -EINVAL;
+ }
+
+ if (conf->intr_conf.lsc != 0) {
+ plt_err("link status change interrupt not supported");
+ ret = -EINVAL;
+ }
+
+ if (conf->intr_conf.rxq != 0) {
+ plt_err("receive queue interrupt not supported");
+ ret = -EINVAL;
+ }
+
+ if (conf->intr_conf.rmv != 0) {
+ plt_err("remove interrupt not supported");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int
+cnxk_rep_dev_configure(struct rte_eth_dev *ethdev)
+{
+ struct rte_eth_dev_data *ethdev_data = ethdev->data;
+ int rc = -1;
+
+ rc = rep_eth_conf_chk(ðdev_data->dev_conf, ethdev_data->nb_rx_queues);
+ if (rc)
+ goto fail;
+
+ return 0;
+fail:
+ return rc;
+}
+
+int
+cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev)
{
PLT_SET_USED(ethdev);
- PLT_SET_USED(devinfo);
return 0;
}
int
-cnxk_rep_dev_configure(struct rte_eth_dev *ethdev)
+cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev)
{
PLT_SET_USED(ethdev);
return 0;
@@ -30,21 +226,73 @@ cnxk_rep_dev_configure(struct rte_eth_dev *ethdev)
int
cnxk_rep_dev_start(struct rte_eth_dev *ethdev)
{
- PLT_SET_USED(ethdev);
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ int rc = 0, qid;
+
+ ethdev->rx_pkt_burst = cnxk_rep_rx_burst;
+ ethdev->tx_pkt_burst = cnxk_rep_tx_burst;
+
+ if (!rep_dev->is_vf_active)
+ return 0;
+
+ if (!rep_dev->rxq || !rep_dev->txq) {
+ plt_err("Invalid rxq or txq for representor id %d", rep_dev->rep_id);
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ /* Start rx queues */
+ qid = rep_dev->rxq->qid;
+ rc = cnxk_eswitch_rxq_start(rep_dev->parent_dev, qid);
+ if (rc) {
+ plt_err("Failed to start rxq %d, rc=%d", qid, rc);
+ goto fail;
+ }
+
+ /* Start tx queues */
+ qid = rep_dev->txq->qid;
+ rc = cnxk_eswitch_txq_start(rep_dev->parent_dev, qid);
+ if (rc) {
+ plt_err("Failed to start txq %d, rc=%d", qid, rc);
+ goto fail;
+ }
+
+ /* Start rep_xport device only once after first representor gets active */
+ if (!rep_dev->parent_dev->repr_cnt.nb_repr_started) {
+ rc = cnxk_eswitch_nix_rsrc_start(rep_dev->parent_dev);
+ if (rc) {
+ plt_err("Failed to start nix dev, rc %d", rc);
+ goto fail;
+ }
+ }
+
+ ethdev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
+ ethdev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ rep_dev->parent_dev->repr_cnt.nb_repr_started++;
+
return 0;
+fail:
+ return rc;
}
int
cnxk_rep_dev_close(struct rte_eth_dev *ethdev)
{
- PLT_SET_USED(ethdev);
- return 0;
+ return cnxk_rep_dev_uninit(ethdev);
}
int
cnxk_rep_dev_stop(struct rte_eth_dev *ethdev)
{
- PLT_SET_USED(ethdev);
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+
+ ethdev->rx_pkt_burst = cnxk_rep_rx_burst_dummy;
+ ethdev->tx_pkt_burst = cnxk_rep_tx_burst_dummy;
+ cnxk_rep_rx_queue_stop(ethdev, 0);
+ cnxk_rep_tx_queue_stop(ethdev, 0);
+ rep_dev->parent_dev->repr_cnt.nb_repr_started--;
+
return 0;
}
@@ -53,39 +301,189 @@ cnxk_rep_rx_queue_setup(struct rte_eth_dev *ethdev, uint16_t rx_queue_id, uint16
unsigned int socket_id, const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool)
{
- PLT_SET_USED(ethdev);
- PLT_SET_USED(rx_queue_id);
- PLT_SET_USED(nb_rx_desc);
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ struct cnxk_rep_rxq *rxq = NULL;
+ uint16_t qid = 0;
+ int rc;
+
PLT_SET_USED(socket_id);
- PLT_SET_USED(rx_conf);
- PLT_SET_USED(mb_pool);
+ /* If no representee assigned, store the respective rxq parameters */
+ if (!rep_dev->is_vf_active && !rep_dev->rxq) {
+ rxq = plt_zmalloc(sizeof(*rxq), RTE_CACHE_LINE_SIZE);
+ if (!rxq) {
+ rc = -ENOMEM;
+ plt_err("Failed to alloc RxQ for rep id %d", rep_dev->rep_id);
+ goto fail;
+ }
+
+ rxq->qid = qid;
+ rxq->nb_desc = nb_rx_desc;
+ rxq->rep_dev = rep_dev;
+ rxq->mpool = mb_pool;
+ rxq->rx_conf = rx_conf;
+ rep_dev->rxq = rxq;
+ ethdev->data->rx_queues[rx_queue_id] = NULL;
+
+ return 0;
+ }
+
+ qid = rep_dev->rep_id;
+ rc = cnxk_eswitch_rxq_setup(rep_dev->parent_dev, qid, nb_rx_desc, rx_conf, mb_pool);
+ if (rc) {
+ plt_err("failed to setup eswitch queue id %d", qid);
+ goto fail;
+ }
+
+ rxq = rep_dev->rxq;
+ if (!rxq) {
+ plt_err("Invalid RXQ handle for representor port %d rep id %d", rep_dev->port_id,
+ rep_dev->rep_id);
+ goto free_queue;
+ }
+
+ rxq->qid = qid;
+ ethdev->data->rx_queues[rx_queue_id] = rxq;
+ ethdev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ plt_rep_dbg("representor id %d portid %d rxq id %d", rep_dev->port_id,
+ ethdev->data->port_id, rxq->qid);
+
return 0;
+free_queue:
+ cnxk_eswitch_rxq_release(rep_dev->parent_dev, qid);
+fail:
+ return rc;
+}
+
+void
+cnxk_rep_rx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id)
+{
+ struct cnxk_rep_rxq *rxq = ethdev->data->rx_queues[queue_id];
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ int rc;
+
+ if (!rxq)
+ return;
+
+ plt_rep_dbg("Stopping rxq %u", rxq->qid);
+
+ rc = cnxk_eswitch_rxq_stop(rep_dev->parent_dev, rxq->qid);
+ if (rc)
+ plt_err("Failed to stop rxq %d, rc=%d", rc, rxq->qid);
+
+ ethdev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
void
cnxk_rep_rx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
{
- PLT_SET_USED(ethdev);
- PLT_SET_USED(queue_id);
+ struct cnxk_rep_rxq *rxq = ethdev->data->rx_queues[queue_id];
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ int rc;
+
+ if (!rxq) {
+ plt_err("Invalid rxq retrieved for rep_id %d", rep_dev->rep_id);
+ return;
+ }
+
+ plt_rep_dbg("Releasing rxq %u", rxq->qid);
+
+ rc = cnxk_eswitch_rxq_release(rep_dev->parent_dev, rxq->qid);
+ if (rc)
+ plt_err("Failed to release rxq %d, rc=%d", rc, rxq->qid);
}
int
cnxk_rep_tx_queue_setup(struct rte_eth_dev *ethdev, uint16_t tx_queue_id, uint16_t nb_tx_desc,
unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
{
- PLT_SET_USED(ethdev);
- PLT_SET_USED(tx_queue_id);
- PLT_SET_USED(nb_tx_desc);
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ struct cnxk_rep_txq *txq = NULL;
+ int rc = 0, qid = 0;
+
PLT_SET_USED(socket_id);
- PLT_SET_USED(tx_conf);
+ /* If no representee assigned, store the respective rxq parameters */
+ if (!rep_dev->is_vf_active && !rep_dev->txq) {
+ txq = plt_zmalloc(sizeof(*txq), RTE_CACHE_LINE_SIZE);
+ if (!txq) {
+ rc = -ENOMEM;
+ plt_err("failed to alloc txq for rep id %d", rep_dev->rep_id);
+ goto free_queue;
+ }
+
+ txq->qid = qid;
+ txq->nb_desc = nb_tx_desc;
+ txq->tx_conf = tx_conf;
+ txq->rep_dev = rep_dev;
+ rep_dev->txq = txq;
+
+ ethdev->data->tx_queues[tx_queue_id] = NULL;
+
+ return 0;
+ }
+
+ qid = rep_dev->rep_id;
+ rc = cnxk_eswitch_txq_setup(rep_dev->parent_dev, qid, nb_tx_desc, tx_conf);
+ if (rc) {
+ plt_err("failed to setup eswitch queue id %d", qid);
+ goto fail;
+ }
+
+ txq = rep_dev->txq;
+ if (!txq) {
+ plt_err("Invalid TXQ handle for representor port %d rep id %d", rep_dev->port_id,
+ rep_dev->rep_id);
+ goto free_queue;
+ }
+
+ txq->qid = qid;
+ ethdev->data->tx_queues[tx_queue_id] = txq;
+ ethdev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ plt_rep_dbg("representor id %d portid %d txq id %d", rep_dev->port_id,
+ ethdev->data->port_id, txq->qid);
+
return 0;
+free_queue:
+ cnxk_eswitch_txq_release(rep_dev->parent_dev, qid);
+fail:
+ return rc;
+}
+
+void
+cnxk_rep_tx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id)
+{
+ struct cnxk_rep_txq *txq = ethdev->data->tx_queues[queue_id];
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ int rc;
+
+ if (!txq)
+ return;
+
+ plt_rep_dbg("Releasing txq %u", txq->qid);
+
+ rc = cnxk_eswitch_txq_stop(rep_dev->parent_dev, txq->qid);
+ if (rc)
+ plt_err("Failed to stop txq %d, rc=%d", rc, txq->qid);
+
+ ethdev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
void
cnxk_rep_tx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
{
- PLT_SET_USED(ethdev);
- PLT_SET_USED(queue_id);
+ struct cnxk_rep_txq *txq = ethdev->data->tx_queues[queue_id];
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ int rc;
+
+ if (!txq) {
+ plt_err("Invalid txq retrieved for rep_id %d", rep_dev->rep_id);
+ return;
+ }
+
+ plt_rep_dbg("Releasing txq %u", txq->qid);
+
+ rc = cnxk_eswitch_txq_release(rep_dev->parent_dev, txq->qid);
+ if (rc)
+ plt_err("Failed to release txq %d, rc=%d", rc, txq->qid);
}
int
@@ -111,15 +509,70 @@ cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **op
return 0;
}
+int
+cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
+{
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
+ cnxk_rep_msg_eth_set_mac_meta_t msg_sm_meta;
+ cnxk_rep_msg_ack_data_t adata;
+ uint32_t len = 0, rc;
+ void *buffer;
+ size_t size;
+
+ /* If representor not representing any VF, return 0 */
+ if (!rep_dev->is_vf_active)
+ return 0;
+
+ size = CNXK_REP_MSG_MAX_BUFFER_SZ;
+ buffer = plt_zmalloc(size, 0);
+ if (!buffer) {
+ plt_err("Failed to allocate mem");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ cnxk_rep_msg_populate_header(buffer, &len);
+
+ msg_sm_meta.portid = rep_dev->rep_id;
+ rte_memcpy(&msg_sm_meta.addr_bytes, addr->addr_bytes, RTE_ETHER_ADDR_LEN);
+ cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_sm_meta,
+ sizeof(cnxk_rep_msg_eth_set_mac_meta_t),
+ CNXK_REP_MSG_ETH_SET_MAC);
+ cnxk_rep_msg_populate_msg_end(buffer, &len);
+
+ rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, &adata);
+ if (rc) {
+ plt_err("Failed to process the message, err %d", rc);
+ goto fail;
+ }
+
+ if (adata.u.sval < 0) {
+ rc = adata.u.sval;
+ plt_err("Failed to set mac address, err %d", rc);
+ goto fail;
+ }
+
+ rte_free(buffer);
+
+ return 0;
+fail:
+ rte_free(buffer);
+ return rc;
+}
+
/* CNXK platform representor dev ops */
struct eth_dev_ops cnxk_rep_dev_ops = {
.dev_infos_get = cnxk_rep_dev_info_get,
+ .representor_info_get = cnxk_rep_representor_info_get,
.dev_configure = cnxk_rep_dev_configure,
.dev_start = cnxk_rep_dev_start,
.rx_queue_setup = cnxk_rep_rx_queue_setup,
.rx_queue_release = cnxk_rep_rx_queue_release,
.tx_queue_setup = cnxk_rep_tx_queue_setup,
.tx_queue_release = cnxk_rep_tx_queue_release,
+ .promiscuous_enable = cnxk_rep_promiscuous_enable,
+ .promiscuous_disable = cnxk_rep_promiscuous_disable,
+ .mac_addr_set = cnxk_rep_mac_addr_set,
.link_update = cnxk_rep_link_update,
.dev_close = cnxk_rep_dev_close,
.dev_stop = cnxk_rep_dev_stop,
--
2.18.0
next prev parent reply other threads:[~2023-12-19 17:42 UTC|newest]
Thread overview: 142+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-11 16:34 [PATCH 0/9] net/cnxk: support for port representors Harman Kalra
2023-08-11 16:34 ` [PATCH 1/9] common/cnxk: debug log type for representors Harman Kalra
2023-08-11 16:34 ` [PATCH 2/9] net/cnxk: probing representor ports Harman Kalra
2023-08-11 16:34 ` [PATCH 3/9] common/cnxk: maintaining representor state Harman Kalra
2023-08-11 16:34 ` [PATCH 4/9] net/cnxk: callbacks for " Harman Kalra
2023-08-11 16:34 ` [PATCH 5/9] net/cnxk: add representor control plane Harman Kalra
2023-08-11 16:34 ` [PATCH 6/9] net/cnxk: representor ethdev ops Harman Kalra
2023-08-11 16:34 ` [PATCH 7/9] net/cnxk: representor flow ops Harman Kalra
2023-08-11 16:34 ` [PATCH 8/9] common/cnxk: support represented port for cnxk Harman Kalra
2023-08-11 16:34 ` [PATCH 9/9] net/cnxk: add " Harman Kalra
2023-12-19 17:39 ` [PATCH v2 00/24] net/cnxk: support for port representors Harman Kalra
2023-12-19 17:39 ` [PATCH v2 01/24] common/cnxk: add support for representors Harman Kalra
2023-12-19 17:39 ` [PATCH v2 02/24] net/cnxk: implementing eswitch device Harman Kalra
2024-01-04 12:30 ` Jerin Jacob
2023-12-19 17:39 ` [PATCH v2 03/24] net/cnxk: eswitch HW resource configuration Harman Kalra
2024-01-04 12:34 ` Jerin Jacob
2023-12-19 17:39 ` [PATCH v2 04/24] net/cnxk: eswitch devargs parsing Harman Kalra
2023-12-19 17:39 ` [PATCH v2 05/24] net/cnxk: probing representor ports Harman Kalra
2023-12-19 17:39 ` [PATCH v2 06/24] common/cnxk: common NPC changes for eswitch Harman Kalra
2023-12-19 17:39 ` [PATCH v2 07/24] common/cnxk: interface to update VLAN TPID Harman Kalra
2024-01-04 12:47 ` Jerin Jacob
2023-12-19 17:39 ` [PATCH v2 08/24] net/cnxk: eswitch flow configurations Harman Kalra
2023-12-19 17:39 ` [PATCH v2 09/24] net/cnxk: eswitch fastpath routines Harman Kalra
2023-12-19 17:39 ` [PATCH v2 10/24] net/cnxk: add representor control plane Harman Kalra
2023-12-19 17:39 ` [PATCH v2 11/24] common/cnxk: representee notification callback Harman Kalra
2023-12-19 17:39 ` [PATCH v2 12/24] net/cnxk: handling representee notification Harman Kalra
2023-12-19 17:39 ` Harman Kalra [this message]
2023-12-19 17:39 ` [PATCH v2 14/24] common/cnxk: get representees ethernet stats Harman Kalra
2023-12-19 17:39 ` [PATCH v2 15/24] net/cnxk: ethernet statistic for representor Harman Kalra
2023-12-19 17:39 ` [PATCH v2 16/24] common/cnxk: base support for eswitch VF Harman Kalra
2023-12-19 17:39 ` [PATCH v2 17/24] net/cnxk: eswitch VF as ethernet device Harman Kalra
2023-12-19 17:39 ` [PATCH v2 18/24] common/cnxk: support port representor and represented port Harman Kalra
2023-12-19 17:39 ` [PATCH v2 19/24] net/cnxk: add represented port pattern and action Harman Kalra
2023-12-19 17:39 ` [PATCH v2 20/24] net/cnxk: add port representor " Harman Kalra
2023-12-19 17:40 ` [PATCH v2 21/24] net/cnxk: generalize flow operation APIs Harman Kalra
2023-12-19 17:40 ` [PATCH v2 22/24] net/cnxk: flow create on representor ports Harman Kalra
2023-12-19 17:40 ` [PATCH v2 23/24] net/cnxk: other flow operations Harman Kalra
2023-12-19 17:40 ` [PATCH v2 24/24] doc: port representors in cnxk Harman Kalra
2023-12-20 9:37 ` Thomas Monjalon
2023-12-21 13:28 ` [EXT] " Harman Kalra
2023-12-21 18:33 ` Thomas Monjalon
2024-01-11 6:48 ` Harman Kalra
2024-02-01 13:07 ` [PATCH v3 00/23] net/cnxk: support for port representors Harman Kalra
2024-02-01 13:07 ` [PATCH v3 01/23] common/cnxk: add support for representors Harman Kalra
2024-02-01 13:07 ` [PATCH v3 02/23] net/cnxk: implementing eswitch device Harman Kalra
2024-02-01 13:07 ` [PATCH v3 03/23] net/cnxk: eswitch HW resource configuration Harman Kalra
2024-02-01 13:07 ` [PATCH v3 04/23] net/cnxk: eswitch devargs parsing Harman Kalra
2024-02-01 13:07 ` [PATCH v3 05/23] net/cnxk: probing representor ports Harman Kalra
2024-02-01 13:07 ` [PATCH v3 06/23] common/cnxk: common NPC changes for eswitch Harman Kalra
2024-02-01 13:07 ` [PATCH v3 07/23] common/cnxk: interface to update VLAN TPID Harman Kalra
2024-02-01 13:07 ` [PATCH v3 08/23] net/cnxk: eswitch flow configurations Harman Kalra
2024-02-01 13:07 ` [PATCH v3 09/23] net/cnxk: eswitch fastpath routines Harman Kalra
2024-02-01 13:07 ` [PATCH v3 10/23] net/cnxk: add representor control plane Harman Kalra
2024-02-01 13:07 ` [PATCH v3 11/23] common/cnxk: representee notification callback Harman Kalra
2024-02-01 13:07 ` [PATCH v3 12/23] net/cnxk: handling representee notification Harman Kalra
2024-02-01 13:07 ` [PATCH v3 13/23] net/cnxk: representor ethdev ops Harman Kalra
2024-02-01 13:07 ` [PATCH v3 14/23] common/cnxk: get representees ethernet stats Harman Kalra
2024-02-01 13:07 ` [PATCH v3 15/23] net/cnxk: ethernet statistic for representor Harman Kalra
2024-02-01 13:07 ` [PATCH v3 16/23] common/cnxk: base support for eswitch VF Harman Kalra
2024-02-01 13:07 ` [PATCH v3 17/23] net/cnxk: eswitch VF as ethernet device Harman Kalra
2024-02-01 13:07 ` [PATCH v3 18/23] common/cnxk: support port representor and represented port Harman Kalra
2024-02-01 13:07 ` [PATCH v3 19/23] net/cnxk: add represented port pattern and action Harman Kalra
2024-02-01 13:07 ` [PATCH v3 20/23] net/cnxk: add representor " Harman Kalra
2024-02-01 13:07 ` [PATCH v3 21/23] net/cnxk: generalise flow operation APIs Harman Kalra
2024-02-01 13:07 ` [PATCH v3 22/23] net/cnxk: flow create on representor ports Harman Kalra
2024-02-01 13:07 ` [PATCH v3 23/23] net/cnxk: other flow operations Harman Kalra
2024-02-27 19:15 ` [PATCH v4 00/23] net/cnxk: support for port representors Harman Kalra
2024-02-27 19:15 ` [PATCH v4 01/23] common/cnxk: add support for representors Harman Kalra
2024-02-27 19:15 ` [PATCH v4 02/23] net/cnxk: implementing eswitch device Harman Kalra
2024-03-01 9:31 ` Jerin Jacob
2024-02-27 19:15 ` [PATCH v4 03/23] net/cnxk: eswitch HW resource configuration Harman Kalra
2024-02-27 19:15 ` [PATCH v4 04/23] net/cnxk: eswitch devargs parsing Harman Kalra
2024-02-27 19:15 ` [PATCH v4 05/23] net/cnxk: probing representor ports Harman Kalra
2024-02-27 19:15 ` [PATCH v4 06/23] common/cnxk: common NPC changes for eswitch Harman Kalra
2024-02-27 19:15 ` [PATCH v4 07/23] common/cnxk: interface to update VLAN TPID Harman Kalra
2024-02-27 19:15 ` [PATCH v4 08/23] net/cnxk: eswitch flow configurations Harman Kalra
2024-02-27 19:15 ` [PATCH v4 09/23] net/cnxk: eswitch fastpath routines Harman Kalra
2024-02-27 19:15 ` [PATCH v4 10/23] net/cnxk: add representor control plane Harman Kalra
2024-02-27 19:15 ` [PATCH v4 11/23] common/cnxk: representee notification callback Harman Kalra
2024-02-27 19:15 ` [PATCH v4 12/23] net/cnxk: handling representee notification Harman Kalra
2024-02-27 19:15 ` [PATCH v4 13/23] net/cnxk: representor ethdev ops Harman Kalra
2024-02-27 19:15 ` [PATCH v4 14/23] common/cnxk: get representees ethernet stats Harman Kalra
2024-02-27 19:15 ` [PATCH v4 15/23] net/cnxk: ethernet statistics for representor Harman Kalra
2024-02-27 19:15 ` [PATCH v4 16/23] common/cnxk: base support for eswitch VF Harman Kalra
2024-02-27 19:15 ` [PATCH v4 17/23] net/cnxk: eswitch VF as ethernet device Harman Kalra
2024-02-27 19:15 ` [PATCH v4 18/23] common/cnxk: support port representor and represented port Harman Kalra
2024-02-27 19:15 ` [PATCH v4 19/23] net/cnxk: add represented port pattern and action Harman Kalra
2024-02-27 19:15 ` [PATCH v4 20/23] net/cnxk: add representor " Harman Kalra
2024-02-27 19:15 ` [PATCH v4 21/23] net/cnxk: generalise flow operation APIs Harman Kalra
2024-02-27 19:15 ` [PATCH v4 22/23] net/cnxk: flow create on representor ports Harman Kalra
2024-02-27 19:15 ` [PATCH v4 23/23] net/cnxk: other flow operations Harman Kalra
2024-03-01 9:35 ` Jerin Jacob
2024-03-01 19:14 ` [PATCH v5 00/23] net/cnxk: support for port representors Harman Kalra
2024-03-01 19:14 ` [PATCH v5 01/23] common/cnxk: add support for representors Harman Kalra
2024-03-01 19:14 ` [PATCH v5 02/23] net/cnxk: implementing eswitch device Harman Kalra
2024-03-01 19:14 ` [PATCH v5 03/23] net/cnxk: eswitch HW resource configuration Harman Kalra
2024-03-01 19:14 ` [PATCH v5 04/23] net/cnxk: eswitch devargs parsing Harman Kalra
2024-03-01 19:14 ` [PATCH v5 05/23] net/cnxk: probing representor ports Harman Kalra
2024-03-01 19:14 ` [PATCH v5 06/23] common/cnxk: common NPC changes for eswitch Harman Kalra
2024-03-01 19:14 ` [PATCH v5 07/23] common/cnxk: interface to update VLAN TPID Harman Kalra
2024-03-01 19:14 ` [PATCH v5 08/23] net/cnxk: eswitch flow configurations Harman Kalra
2024-03-01 19:14 ` [PATCH v5 09/23] net/cnxk: eswitch fastpath routines Harman Kalra
2024-03-01 19:14 ` [PATCH v5 10/23] net/cnxk: add representor control plane Harman Kalra
2024-03-01 19:14 ` [PATCH v5 11/23] common/cnxk: representee notification callback Harman Kalra
2024-03-01 19:14 ` [PATCH v5 12/23] net/cnxk: handling representee notification Harman Kalra
2024-03-01 19:14 ` [PATCH v5 13/23] net/cnxk: representor ethdev ops Harman Kalra
2024-03-01 19:14 ` [PATCH v5 14/23] common/cnxk: get representees ethernet stats Harman Kalra
2024-03-01 19:14 ` [PATCH v5 15/23] net/cnxk: ethernet statistics for representor Harman Kalra
2024-03-01 19:14 ` [PATCH v5 16/23] common/cnxk: base support for eswitch VF Harman Kalra
2024-03-01 19:14 ` [PATCH v5 17/23] net/cnxk: eswitch VF as ethernet device Harman Kalra
2024-03-01 19:14 ` [PATCH v5 18/23] common/cnxk: support port representor and represented port Harman Kalra
2024-03-01 19:14 ` [PATCH v5 19/23] net/cnxk: add represented port pattern and action Harman Kalra
2024-03-01 19:14 ` [PATCH v5 20/23] net/cnxk: add representor " Harman Kalra
2024-03-01 19:14 ` [PATCH v5 21/23] net/cnxk: generalise flow operation APIs Harman Kalra
2024-03-03 14:50 ` Jerin Jacob
2024-03-01 19:14 ` [PATCH v5 22/23] net/cnxk: flow create on representor ports Harman Kalra
2024-03-01 19:14 ` [PATCH v5 23/23] net/cnxk: other flow operations Harman Kalra
2024-03-03 17:38 ` [PATCH v6 00/23] net/cnxk: support for port representors Harman Kalra
2024-03-03 17:38 ` [PATCH v6 01/23] common/cnxk: add support for representors Harman Kalra
2024-03-03 17:38 ` [PATCH v6 02/23] net/cnxk: implementing eswitch device Harman Kalra
2024-03-03 17:38 ` [PATCH v6 03/23] net/cnxk: eswitch HW resource configuration Harman Kalra
2024-03-03 17:38 ` [PATCH v6 04/23] net/cnxk: eswitch devargs parsing Harman Kalra
2024-03-03 17:38 ` [PATCH v6 05/23] net/cnxk: probing representor ports Harman Kalra
2024-03-03 17:38 ` [PATCH v6 06/23] common/cnxk: common NPC changes for eswitch Harman Kalra
2024-03-03 17:38 ` [PATCH v6 07/23] common/cnxk: interface to update VLAN TPID Harman Kalra
2024-03-03 17:38 ` [PATCH v6 08/23] net/cnxk: eswitch flow configurations Harman Kalra
2024-03-03 17:38 ` [PATCH v6 09/23] net/cnxk: eswitch fastpath routines Harman Kalra
2024-03-03 17:38 ` [PATCH v6 10/23] net/cnxk: add representor control plane Harman Kalra
2024-03-03 17:38 ` [PATCH v6 11/23] common/cnxk: representee notification callback Harman Kalra
2024-03-03 17:38 ` [PATCH v6 12/23] net/cnxk: handling representee notification Harman Kalra
2024-03-03 17:38 ` [PATCH v6 13/23] net/cnxk: representor ethdev ops Harman Kalra
2024-03-03 17:38 ` [PATCH v6 14/23] common/cnxk: get representees ethernet stats Harman Kalra
2024-03-03 17:38 ` [PATCH v6 15/23] net/cnxk: ethernet statistics for representor Harman Kalra
2024-03-03 17:38 ` [PATCH v6 16/23] common/cnxk: base support for eswitch VF Harman Kalra
2024-03-03 17:38 ` [PATCH v6 17/23] net/cnxk: eswitch VF as ethernet device Harman Kalra
2024-03-03 17:38 ` [PATCH v6 18/23] common/cnxk: support port representor and represented port Harman Kalra
2024-03-03 17:38 ` [PATCH v6 19/23] net/cnxk: add represented port pattern and action Harman Kalra
2024-03-03 17:38 ` [PATCH v6 20/23] net/cnxk: add representor " Harman Kalra
2024-03-03 17:38 ` [PATCH v6 21/23] net/cnxk: generalise flow operation APIs Harman Kalra
2024-03-03 17:38 ` [PATCH v6 22/23] net/cnxk: flow create on representor ports Harman Kalra
2024-03-03 17:38 ` [PATCH v6 23/23] net/cnxk: other flow operations Harman Kalra
2024-03-04 7:57 ` Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231219174003.72901-14-hkalra@marvell.com \
--to=hkalra@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=kirankumark@marvell.com \
--cc=ndabilpuram@marvell.com \
--cc=skori@marvell.com \
--cc=skoteshwar@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).