From: Harman Kalra <hkalra@marvell.com>
To: <jerinj@marvell.com>, Nithin Dabilpuram <ndabilpuram@marvell.com>,
"Kiran Kumar K" <kirankumark@marvell.com>,
Sunil Kumar Kori <skori@marvell.com>,
Satha Rao <skoteshwar@marvell.com>
Cc: <dev@dpdk.org>, Harman Kalra <hkalra@marvell.com>
Subject: [PATCH 6/9] net/cnxk: representor ethdev ops
Date: Fri, 11 Aug 2023 22:04:16 +0530 [thread overview]
Message-ID: <20230811163419.165790-7-hkalra@marvell.com> (raw)
In-Reply-To: <20230811163419.165790-1-hkalra@marvell.com>
Implementing ethernet device operation callbacks for
port representors PMD
Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
drivers/net/cnxk/cnxk_rep.c | 62 +--
drivers/net/cnxk/cnxk_rep.h | 36 ++
drivers/net/cnxk/cnxk_rep_msg.h | 15 +
drivers/net/cnxk/cnxk_rep_ops.c | 655 ++++++++++++++++++++++++++++++--
4 files changed, 713 insertions(+), 55 deletions(-)
diff --git a/drivers/net/cnxk/cnxk_rep.c b/drivers/net/cnxk/cnxk_rep.c
index e6f5790adc..5ee7e93ab9 100644
--- a/drivers/net/cnxk/cnxk_rep.c
+++ b/drivers/net/cnxk/cnxk_rep.c
@@ -13,6 +13,9 @@ struct eth_dev_ops cnxk_rep_dev_ops = {
.rx_queue_release = cnxk_rep_rx_queue_release,
.tx_queue_setup = cnxk_rep_tx_queue_setup,
.tx_queue_release = cnxk_rep_tx_queue_release,
+ .promiscuous_enable = cnxk_rep_promiscuous_enable,
+ .promiscuous_disable = cnxk_rep_promiscuous_disable,
+ .mac_addr_set = cnxk_rep_mac_addr_set,
.link_update = cnxk_rep_link_update,
.dev_close = cnxk_rep_dev_close,
.dev_stop = cnxk_rep_dev_stop,
@@ -24,14 +27,36 @@ struct eth_dev_ops cnxk_rep_dev_ops = {
int
cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev)
{
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+ const struct plt_memzone *mz;
+
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+ if (!mz) {
+ plt_err("Failed to lookup a memzone, rep id %d, err %d",
+ rep_dev->vf_id, rte_errno);
+ goto fail;
+ }
+
+ rep_xport_vdev_cfg = mz->addr;
plt_rep_dbg("Representor port:%d uninit", ethdev->data->port_id);
rte_free(ethdev->data->mac_addrs);
ethdev->data->mac_addrs = NULL;
+ rep_xport_vdev_cfg->nb_rep_ports--;
+ /* Once all representors are closed, cleanup rep base vdev config */
+ if (!rep_xport_vdev_cfg->nb_rep_ports) {
+ plt_free(rep_xport_vdev_cfg->q_bmap_mem);
+ plt_free(rep_xport_vdev_cfg->mdevinfo);
+ plt_memzone_free(mz);
+ }
+
return 0;
+fail:
+ return rte_errno;
}
int
@@ -121,26 +146,6 @@ cnxk_init_rep_internal(struct cnxk_eth_dev *pf_dev)
return rc;
}
-static uint16_t
-cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- PLT_SET_USED(tx_queue);
- PLT_SET_USED(tx_pkts);
- PLT_SET_USED(nb_pkts);
-
- return 0;
-}
-
-static uint16_t
-cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
-{
- PLT_SET_USED(rx_queue);
- PLT_SET_USED(rx_pkts);
- PLT_SET_USED(nb_pkts);
-
- return 0;
-}
-
static int
cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params)
{
@@ -152,6 +157,11 @@ cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params)
rep_dev->vf_id = rep_params->vf_id;
rep_dev->switch_domain_id = rep_params->switch_domain_id;
rep_dev->parent_dev = rep_params->parent_dev;
+ rep_dev->u.rxq = UINT16_MAX;
+ rep_dev->u.txq = UINT16_MAX;
+
+ pf_dev = cnxk_eth_pmd_priv(rep_dev->parent_dev);
+ rep_dev->rep_xport_vdev = pf_dev->rep_xport_vdev;
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
eth_dev->data->representor_id = rep_params->vf_id;
@@ -170,11 +180,10 @@ cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params)
eth_dev->dev_ops = &cnxk_rep_dev_ops;
/* Rx/Tx functions stubs to avoid crashing */
- eth_dev->rx_pkt_burst = cnxk_rep_rx_burst;
- eth_dev->tx_pkt_burst = cnxk_rep_tx_burst;
+ eth_dev->rx_pkt_burst = cnxk_rep_rx_burst_dummy;
+ eth_dev->tx_pkt_burst = cnxk_rep_tx_burst_dummy;
/* Link state. Inherited from PF */
- pf_dev = cnxk_eth_pmd_priv(rep_dev->parent_dev);
link = &pf_dev->eth_dev->data->dev_link;
eth_dev->data->dev_link.link_speed = link->link_speed;
@@ -325,13 +334,6 @@ cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct rte_eth_dev *pf_ethdev
goto err;
}
- /* Launch a thread to handle control messages */
- rc = cnxk_rep_control_thread_launch(pf_dev);
- if (rc) {
- plt_err("Failed to launch message ctrl thread");
- goto err;
- }
-
return 0;
err:
return rc;
diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h
index 8825fa1cf2..2b6403f003 100644
--- a/drivers/net/cnxk/cnxk_rep.h
+++ b/drivers/net/cnxk/cnxk_rep.h
@@ -6,6 +6,7 @@
#ifndef __CNXK_REP_H__
#define __CNXK_REP_H__
+#define CNXK_REP_XPORT_VDEV_CFG_MZ "rep_xport_vdev_cfg"
#define CNXK_REP_XPORT_VDEV_DEVARGS "role=server"
#define CNXK_REP_XPORT_VDEV_NAME "net_memif"
#define CNXK_REP_VDEV_CTRL_QUEUE 0
@@ -14,6 +15,18 @@
/* Common ethdev ops */
extern struct eth_dev_ops cnxk_rep_dev_ops;
+/* Representor base device configurations */
+typedef struct rep_xport_vdev_cfg_s {
+ struct plt_bitmap *q_map;
+ void *q_bmap_mem;
+ uint8_t nb_rep_ports;
+ uint8_t nb_rep_started;
+ struct rte_mempool *ctrl_chan_pool;
+ struct rte_eth_dev_info *mdevinfo;
+ bool rep_xport_configured;
+} rep_xport_vdev_cfg_t;
+
+/* Representor port configurations */
struct cnxk_rep_dev {
uint16_t vf_id;
uint16_t switch_domain_id;
@@ -22,15 +35,33 @@ struct cnxk_rep_dev {
uint16_t rep_xport_vdev;
bool is_vf_active;
uint16_t pf_func;
+ union {
+ uint16_t rxq;
+ uint16_t txq;
+ uint16_t rep_portid;
+ } u;
uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
};
+/* Inline functions */
static inline struct cnxk_rep_dev *
cnxk_rep_pmd_priv(const struct rte_eth_dev *eth_dev)
{
return eth_dev->data->dev_private;
}
+static inline struct rte_eth_dev *
+cnxk_rep_xport_eth_dev(uint16_t portid)
+{
+ if (!rte_eth_dev_is_valid_port(portid)) {
+ plt_err("Invalid port_id=%u", portid);
+ return NULL;
+ }
+
+ return &rte_eth_devices[portid];
+}
+
+/* Prototypes */
int cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct rte_eth_dev *pf_ethdev,
struct rte_eth_devargs *eth_da);
int cnxk_rep_dev_remove(struct rte_eth_dev *pf_ethdev);
@@ -52,5 +83,10 @@ int cnxk_rep_dev_close(struct rte_eth_dev *eth_dev);
int cnxk_rep_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats);
int cnxk_rep_stats_reset(struct rte_eth_dev *eth_dev);
int cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **ops);
+int cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev);
+int cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev);
+int cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr);
+uint16_t cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+uint16_t cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
#endif /* __CNXK_REP_H__ */
diff --git a/drivers/net/cnxk/cnxk_rep_msg.h b/drivers/net/cnxk/cnxk_rep_msg.h
index a28c63f762..554122d7f8 100644
--- a/drivers/net/cnxk/cnxk_rep_msg.h
+++ b/drivers/net/cnxk/cnxk_rep_msg.h
@@ -19,6 +19,10 @@ typedef enum CNXK_REP_MSG {
CNXK_REP_MSG_READY = 0,
CNXK_REP_MSG_ACK,
CNXK_REP_MSG_EXIT,
+ /* Ethernet operation msgs */
+ CNXK_REP_MSG_ETH_SET_MAC,
+ CNXK_REP_MSG_ETH_STATS_GET,
+ CNXK_REP_MSG_ETH_STATS_CLEAR,
/* End of messaging sequence */
CNXK_REP_MSG_END,
} cnxk_rep_msg_t;
@@ -64,6 +68,17 @@ typedef struct cnxk_rep_msg_exit_data {
uint8_t val;
} __rte_packed cnxk_rep_msg_exit_data_t;
+/* Ethernet op - set mac */
+typedef struct cnxk_rep_msg_eth_mac_set_meta {
+ uint16_t portid;
+ uint8_t addr_bytes[RTE_ETHER_ADDR_LEN];
+} __rte_packed cnxk_rep_msg_eth_set_mac_meta_t;
+
+/* Ethernet op - get/clear stats */
+typedef struct cnxk_rep_msg_eth_stats_meta {
+ uint16_t portid;
+} __rte_packed cnxk_rep_msg_eth_stats_meta_t;
+
void cnxk_rep_msg_populate_command(void *buffer, uint32_t *length, cnxk_rep_msg_t type,
uint32_t size);
void cnxk_rep_msg_populate_command_meta(void *buffer, uint32_t *length, void *msg_meta, uint32_t sz,
diff --git a/drivers/net/cnxk/cnxk_rep_ops.c b/drivers/net/cnxk/cnxk_rep_ops.c
index 3f1aab077b..022a5137df 100644
--- a/drivers/net/cnxk/cnxk_rep_ops.c
+++ b/drivers/net/cnxk/cnxk_rep_ops.c
@@ -3,6 +3,54 @@
*/
#include <cnxk_rep.h>
+#include <cnxk_rep_msg.h>
+
+#define MEMPOOL_CACHE_SIZE 256
+#define TX_DESC_PER_QUEUE 512
+#define RX_DESC_PER_QUEUE 256
+#define NB_REP_VDEV_MBUF 1024
+
+static uint16_t
+cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct cnxk_rep_dev *rep_dev = tx_queue;
+
+ nb_pkts = rte_eth_tx_burst(rep_dev->rep_xport_vdev, rep_dev->u.txq, tx_pkts, nb_pkts);
+
+ return nb_pkts;
+}
+
+static uint16_t
+cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct cnxk_rep_dev *rep_dev = rx_queue;
+
+ nb_pkts = rte_eth_rx_burst(rep_dev->rep_xport_vdev, rep_dev->u.txq, rx_pkts, 32);
+ if (nb_pkts == 0)
+ return 0;
+
+ return nb_pkts;
+}
+
+uint16_t
+cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ PLT_SET_USED(tx_queue);
+ PLT_SET_USED(tx_pkts);
+ PLT_SET_USED(nb_pkts);
+
+ return 0;
+}
+
+uint16_t
+cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ PLT_SET_USED(rx_queue);
+ PLT_SET_USED(rx_pkts);
+ PLT_SET_USED(nb_pkts);
+
+ return 0;
+}
int
cnxk_rep_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
@@ -13,39 +61,379 @@ cnxk_rep_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
}
int
-cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info *devinfo)
+cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info *dev_info)
{
- PLT_SET_USED(ethdev);
- PLT_SET_USED(devinfo);
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+ struct rte_eth_dev_info mdevinfo;
+ const struct plt_memzone *mz;
+ int rc = 0;
+
+ mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+ if (!mz) {
+ mz = plt_memzone_reserve_cache_align(CNXK_REP_XPORT_VDEV_CFG_MZ,
+ sizeof(rep_xport_vdev_cfg_t));
+ if (!mz) {
+ plt_err("Failed to reserve a memzone, rep id %d, err %d",
+ rep_dev->vf_id, rte_errno);
+ goto fail;
+ }
+ }
+
+ rep_xport_vdev_cfg = mz->addr;
+ /* Get the rep base vdev devinfo */
+ if (!rep_xport_vdev_cfg->mdevinfo) {
+ rc = rte_eth_dev_info_get(rep_dev->rep_xport_vdev, &mdevinfo);
+ if (rc) {
+ plt_err("Failed to get rep_xport port dev info, err %d", rc);
+ goto fail;
+ }
+ rep_xport_vdev_cfg->mdevinfo = plt_zmalloc(sizeof(struct rte_eth_dev_info), 0);
+ if (!rep_xport_vdev_cfg->mdevinfo) {
+ plt_err("Failed to alloc memory for dev info");
+ goto fail;
+ }
+ rte_memcpy(rep_xport_vdev_cfg->mdevinfo, &mdevinfo,
+ sizeof(struct rte_eth_dev_info));
+ }
+
+ /* Use rep_xport device info */
+ dev_info->max_mac_addrs = rep_xport_vdev_cfg->mdevinfo->max_mac_addrs;
+ dev_info->max_rx_pktlen = rep_xport_vdev_cfg->mdevinfo->max_rx_pktlen;
+ dev_info->min_rx_bufsize = rep_xport_vdev_cfg->mdevinfo->min_rx_bufsize;
+ dev_info->tx_offload_capa = rep_xport_vdev_cfg->mdevinfo->tx_offload_capa;
+
+ /* For the sake of symmetry, max_rx_queues = max_tx_queues */
+ dev_info->max_rx_queues = 1;
+ dev_info->max_tx_queues = 1;
+
+ /* MTU specifics */
+ dev_info->max_mtu = rep_xport_vdev_cfg->mdevinfo->max_mtu;
+ dev_info->min_mtu = rep_xport_vdev_cfg->mdevinfo->min_mtu;
+
+ /* Switch info specific */
+ dev_info->switch_info.name = ethdev->device->name;
+ dev_info->switch_info.domain_id = rep_dev->switch_domain_id;
+ dev_info->switch_info.port_id = rep_dev->vf_id;
+
return 0;
+fail:
+ return rc;
+}
+
+static inline int
+bitmap_ctzll(uint64_t slab)
+{
+ if (slab == 0)
+ return 0;
+
+ return __builtin_ctzll(slab);
+}
+
+static uint16_t
+alloc_rep_xport_qid(struct plt_bitmap *bmp)
+{
+ uint16_t idx, rc;
+ uint64_t slab;
+ uint32_t pos;
+
+ pos = 0;
+ slab = 0;
+ /* Scan from the beginning */
+ plt_bitmap_scan_init(bmp);
+ /* Scan bitmap to get the free pool */
+ rc = plt_bitmap_scan(bmp, &pos, &slab);
+ /* Empty bitmap */
+ if (rc == 0)
+ return UINT16_MAX;
+
+ idx = pos + bitmap_ctzll(slab);
+ plt_bitmap_clear(bmp, idx);
+ return idx;
+}
+
+static int
+configure_rep_xport_queues_map(rep_xport_vdev_cfg_t *rep_xport_vdev_cfg)
+{
+ int id, rc = 0, q_max;
+ uint32_t bmap_sz;
+ void *bmap_mem;
+
+ q_max = CNXK_MAX_REP_PORTS + 1;
+ /* Return success on no-pci case */
+ if (!q_max)
+ return 0;
+
+ bmap_sz = plt_bitmap_get_memory_footprint(q_max);
+
+ /* Allocate memory for rep_xport queue bitmap */
+ bmap_mem = plt_zmalloc(bmap_sz, RTE_CACHE_LINE_SIZE);
+ if (bmap_mem == NULL) {
+ plt_err("Failed to allocate memory for worker lmt bmap");
+ rc = -ENOMEM;
+ goto exit;
+ }
+ rep_xport_vdev_cfg->q_bmap_mem = bmap_mem;
+
+ /* Initialize worker lmt bitmap */
+ rep_xport_vdev_cfg->q_map = plt_bitmap_init(q_max, bmap_mem, bmap_sz);
+ if (!rep_xport_vdev_cfg->q_map) {
+ plt_err("Failed to initialize rep_xport queue bitmap");
+ rc = -EIO;
+ goto exit;
+ }
+
+ /* Set all the queue initially */
+ for (id = 0; id < q_max; id++)
+ plt_bitmap_set(rep_xport_vdev_cfg->q_bmap_mem, id);
+
+ return 0;
+exit:
+ return rc;
+}
+
+static uint16_t
+cnxk_rep_eth_dev_count_total(void)
+{
+ uint16_t port, count = 0;
+ struct rte_eth_dev *ethdev;
+
+ RTE_ETH_FOREACH_DEV(port) {
+ ethdev = &rte_eth_devices[port];
+ if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+ count++;
+ }
+
+ return count;
+}
+
+static int
+configure_control_channel(rep_xport_vdev_cfg_t *rep_xport_vdev_cfg, uint16_t portid)
+{
+ struct rte_mempool *ctrl_chan_pool = NULL;
+ int rc;
+
+ /* Allocate a qid for control channel */
+ alloc_rep_xport_qid(rep_xport_vdev_cfg->q_map);
+
+ /* Create the mbuf pool. */
+ ctrl_chan_pool = rte_pktmbuf_pool_create("rep_xport_ctrl_pool", NB_REP_VDEV_MBUF,
+ MEMPOOL_CACHE_SIZE, RTE_CACHE_LINE_SIZE,
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+
+ if (ctrl_chan_pool == NULL) {
+ plt_err("Cannot init mbuf pool");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ /* Setup a RX queue for control channel */
+ rc = rte_eth_rx_queue_setup(portid, CNXK_REP_VDEV_CTRL_QUEUE, RX_DESC_PER_QUEUE,
+ rte_eth_dev_socket_id(portid), NULL, ctrl_chan_pool);
+ if (rc < 0) {
+ plt_err("rte_eth_rx_queue_setup:err=%d, port=%u\n", rc, portid);
+ goto fail;
+ }
+
+ /* Setup a TX queue for control channel */
+ rc = rte_eth_tx_queue_setup(portid, CNXK_REP_VDEV_CTRL_QUEUE, TX_DESC_PER_QUEUE,
+ rte_eth_dev_socket_id(portid), NULL);
+ if (rc < 0) {
+ plt_err("TX queue setup failed, err %d port %d", rc, portid);
+ goto fail;
+ }
+
+ rep_xport_vdev_cfg->ctrl_chan_pool = ctrl_chan_pool;
+
+ return 0;
+fail:
+ return rc;
+}
+
+static int
+configure_rep_xport_dev(rep_xport_vdev_cfg_t *rep_xport_vdev_cfg, uint16_t portid)
+{
+ struct rte_eth_dev *rep_xport_ethdev = cnxk_rep_xport_eth_dev(portid);
+ static struct rte_eth_conf port_conf_default;
+ uint16_t nb_rxq, nb_txq, nb_rep_ports;
+ int rc = 0;
+
+ /* If rep_xport port already started, stop it and reconfigure */
+ if (rep_xport_ethdev->data->dev_started)
+ rte_eth_dev_stop(portid);
+
+ /* Get the no of representors probed */
+ nb_rep_ports = cnxk_rep_eth_dev_count_total();
+ if (nb_rep_ports > CNXK_MAX_REP_PORTS) {
+ plt_err("Representors probed %d > Max supported %d", nb_rep_ports,
+ CNXK_MAX_REP_PORTS);
+ goto fail;
+ }
+
+ /* Each queue of rep_xport describes representor port. 1 additional queue is
+ * configured as control channel to configure flows, etc.
+ */
+ nb_rxq = CNXK_MAX_REP_PORTS + 1;
+ nb_txq = CNXK_MAX_REP_PORTS + 1;
+
+ rc = rte_eth_dev_configure(portid, nb_rxq, nb_txq, &port_conf_default);
+ if (rc) {
+ plt_err("Failed to configure rep_xport port: %d", rc);
+ goto fail;
+ }
+
+ rep_xport_vdev_cfg->rep_xport_configured = true;
+ rep_xport_vdev_cfg->nb_rep_ports = nb_rep_ports;
+
+ return 0;
+fail:
+ return rc;
}
int
cnxk_rep_dev_configure(struct rte_eth_dev *ethdev)
{
- PLT_SET_USED(ethdev);
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+ const struct plt_memzone *mz;
+ int rc = -1;
+
+ mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+ if (!mz) {
+ mz = plt_memzone_reserve_cache_align(CNXK_REP_XPORT_VDEV_CFG_MZ,
+ sizeof(rep_xport_vdev_cfg_t));
+ if (!mz) {
+ plt_err("Failed to reserve a memzone, rep id %d, err %d",
+ rep_dev->vf_id, rte_errno);
+ goto fail;
+ }
+ }
+
+ rep_xport_vdev_cfg = mz->addr;
+ /* Return if rep_xport dev already configured */
+ if (rep_xport_vdev_cfg->rep_xport_configured) {
+ rep_dev->ctrl_chan_pool = rep_xport_vdev_cfg->ctrl_chan_pool;
+ return 0;
+ }
+
+ /* Configure rep_xport pmd */
+ rc = configure_rep_xport_dev(rep_xport_vdev_cfg, rep_dev->rep_xport_vdev);
+ if (rc) {
+ plt_err("Configuring rep_xport port failed");
+ goto free;
+ }
+
+ /* Setup a bitmap for rep_xport queues */
+ rc = configure_rep_xport_queues_map(rep_xport_vdev_cfg);
+ if (rc != 0) {
+ plt_err("Failed to setup rep_xport queue map, err %d", rc);
+ goto free;
+ }
+
+ /* Setup a queue for control channel */
+ rc = configure_control_channel(rep_xport_vdev_cfg, rep_dev->rep_xport_vdev);
+ if (rc != 0) {
+ plt_err("Failed to setup control channgel, err %d", rc);
+ goto free;
+ }
+ rep_dev->ctrl_chan_pool = rep_xport_vdev_cfg->ctrl_chan_pool;
+
return 0;
+free:
+ plt_memzone_free(mz);
+fail:
+ return rc;
}
int
-cnxk_rep_dev_start(struct rte_eth_dev *ethdev)
+cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev)
{
PLT_SET_USED(ethdev);
return 0;
}
int
-cnxk_rep_dev_close(struct rte_eth_dev *ethdev)
+cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev)
{
PLT_SET_USED(ethdev);
return 0;
}
+int
+cnxk_rep_dev_start(struct rte_eth_dev *ethdev)
+{
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+ const struct plt_memzone *mz;
+ int rc = 0;
+
+ mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+ if (!mz) {
+ plt_err("Failed to lookup a memzone, rep id %d, err %d",
+ rep_dev->vf_id, rte_errno);
+ goto fail;
+ }
+
+ rep_xport_vdev_cfg = mz->addr;
+ ethdev->rx_pkt_burst = cnxk_rep_rx_burst;
+ ethdev->tx_pkt_burst = cnxk_rep_tx_burst;
+
+ /* Start rep_xport device only once after first representor gets active */
+ if (!rep_xport_vdev_cfg->nb_rep_started) {
+ rc = rte_eth_dev_start(rep_dev->rep_xport_vdev);
+ if (rc) {
+ plt_err("Rep base vdev portid %d start failed, err %d",
+ rep_dev->rep_xport_vdev, rc);
+ goto fail;
+ }
+
+ /* Launch a thread to handle control messages */
+ rc = cnxk_rep_control_thread_launch(cnxk_eth_pmd_priv(rep_dev->parent_dev));
+ if (rc) {
+ plt_err("Failed to launch message ctrl thread");
+ goto fail;
+ }
+ }
+
+ rep_xport_vdev_cfg->nb_rep_started++;
+
+ return 0;
+fail:
+ return rc;
+}
+
+int
+cnxk_rep_dev_close(struct rte_eth_dev *ethdev)
+{
+ return cnxk_rep_dev_uninit(ethdev);
+}
+
int
cnxk_rep_dev_stop(struct rte_eth_dev *ethdev)
{
- PLT_SET_USED(ethdev);
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+ const struct plt_memzone *mz;
+
+ mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+ if (!mz) {
+ plt_err("Failed to lookup a memzone, rep id %d, err %d",
+ rep_dev->vf_id, rte_errno);
+ goto fail;
+ }
+
+ rep_xport_vdev_cfg = mz->addr;
+ ethdev->rx_pkt_burst = cnxk_rep_rx_burst_dummy;
+ ethdev->tx_pkt_burst = cnxk_rep_tx_burst_dummy;
+ rep_xport_vdev_cfg->nb_rep_started--;
+
+ /* Stop rep_xport device only after all other devices stopped */
+ if (!rep_xport_vdev_cfg->nb_rep_started)
+ rte_eth_dev_stop(rep_dev->rep_xport_vdev);
+
return 0;
+fail:
+ return rte_errno;
}
int
@@ -53,54 +441,220 @@ cnxk_rep_rx_queue_setup(struct rte_eth_dev *ethdev, uint16_t rx_queue_id, uint16
unsigned int socket_id, const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool)
{
- PLT_SET_USED(ethdev);
- PLT_SET_USED(rx_queue_id);
- PLT_SET_USED(nb_rx_desc);
- PLT_SET_USED(socket_id);
- PLT_SET_USED(rx_conf);
- PLT_SET_USED(mb_pool);
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+ const struct plt_memzone *mz;
+ int rc = 0;
+
+ mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+ if (!mz) {
+ plt_err("Failed to lookup a memzone, rep id %d, err %d",
+ rep_dev->vf_id, rte_errno);
+ goto fail;
+ }
+
+ rep_xport_vdev_cfg = mz->addr;
+ /* Allocate a qid, if tx queue setup already done use the same qid */
+ if (rep_dev->u.rxq == UINT16_MAX && rep_dev->u.txq == UINT16_MAX)
+ rep_dev->u.rxq = alloc_rep_xport_qid(rep_xport_vdev_cfg->q_map);
+ else
+ rep_dev->u.rxq = rep_dev->u.txq;
+
+ /* Setup the RX queue */
+ rc = rte_eth_rx_queue_setup(rep_dev->rep_xport_vdev, rep_dev->u.rxq, nb_rx_desc, socket_id,
+ rx_conf, mb_pool);
+ if (rc < 0) {
+ plt_err("rte_eth_rx_queue_setup:err=%d, port=%u\n", rc, rep_dev->rep_xport_vdev);
+ goto fail;
+ }
+
+ ethdev->data->rx_queues[rx_queue_id] = rep_dev;
+ plt_info("Representor id %d portid %d rxq %d", rep_dev->vf_id, ethdev->data->port_id,
+ rep_dev->u.rxq);
+
return 0;
+fail:
+ return rc;
}
void
cnxk_rep_rx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
{
- PLT_SET_USED(ethdev);
- PLT_SET_USED(queue_id);
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+ const struct plt_memzone *mz;
+ RTE_SET_USED(queue_id);
+
+ mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+ if (!mz) {
+ plt_err("Failed to lookup a memzone, rep id %d, err %d",
+ rep_dev->vf_id, rte_errno);
+ return;
+ }
+
+ rep_xport_vdev_cfg = mz->addr;
+ plt_bitmap_clear(rep_xport_vdev_cfg->q_bmap_mem, rep_dev->u.rxq);
}
int
cnxk_rep_tx_queue_setup(struct rte_eth_dev *ethdev, uint16_t tx_queue_id, uint16_t nb_tx_desc,
unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
{
- PLT_SET_USED(ethdev);
- PLT_SET_USED(tx_queue_id);
- PLT_SET_USED(nb_tx_desc);
- PLT_SET_USED(socket_id);
- PLT_SET_USED(tx_conf);
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+ const struct plt_memzone *mz;
+ int rc = 0;
+
+ mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+ if (!mz) {
+ plt_err("Failed to lookup a memzone, rep id %d, err %d",
+ rep_dev->vf_id, rte_errno);
+ goto fail;
+ }
+
+ rep_xport_vdev_cfg = mz->addr;
+ /* Allocate a qid, if rx queue setup already done use the same qid */
+ if (rep_dev->u.rxq == UINT16_MAX && rep_dev->u.txq == UINT16_MAX)
+ rep_dev->u.txq = alloc_rep_xport_qid(rep_xport_vdev_cfg->q_map);
+ else
+ rep_dev->u.txq = rep_dev->u.rxq;
+
+ /* Setup the TX queue */
+ rc = rte_eth_tx_queue_setup(rep_dev->rep_xport_vdev, rep_dev->u.txq, nb_tx_desc, socket_id,
+ tx_conf);
+ if (rc < 0) {
+ plt_err("TX queue setup failed, err %d port %d", rc, rep_dev->rep_xport_vdev);
+ goto fail;
+ }
+
+ ethdev->data->tx_queues[tx_queue_id] = rep_dev;
+ plt_info("Representor id %d portid %d txq %d", rep_dev->vf_id, ethdev->data->port_id,
+ rep_dev->u.txq);
+
return 0;
+fail:
+ return rc;
}
void
cnxk_rep_tx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
{
- PLT_SET_USED(ethdev);
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+ const struct plt_memzone *mz;
PLT_SET_USED(queue_id);
+
+ mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+ if (!mz) {
+ plt_err("Failed to lookup a memzone, rep id %d, err %d",
+ rep_dev->vf_id, rte_errno);
+ return;
+ }
+
+ rep_xport_vdev_cfg = mz->addr;
+ plt_bitmap_clear(rep_xport_vdev_cfg->q_bmap_mem, rep_dev->u.txq);
+}
+
+static int
+process_eth_stats(struct cnxk_rep_dev *rep_dev, cnxk_rep_msg_ack_data_t *adata, cnxk_rep_msg_t msg)
+{
+ cnxk_rep_msg_eth_stats_meta_t msg_st_meta;
+ uint32_t len = 0, rc;
+ void *buffer;
+ size_t size;
+
+ size = CNXK_REP_MSG_MAX_BUFFER_SZ;
+ buffer = plt_zmalloc(size, 0);
+ if (!buffer) {
+ plt_err("Failed to allocate mem");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ cnxk_rep_msg_populate_header(buffer, &len);
+
+ msg_st_meta.portid = rep_dev->u.rxq;
+ cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_st_meta,
+ sizeof(cnxk_rep_msg_eth_stats_meta_t), msg);
+ cnxk_rep_msg_populate_msg_end(buffer, &len);
+
+ rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata);
+ if (rc) {
+ plt_err("Failed to process the message, err %d", rc);
+ goto fail;
+ }
+
+ rte_free(buffer);
+
+ return 0;
+fail:
+ rte_free(buffer);
+ return rc;
}
int
cnxk_rep_stats_get(struct rte_eth_dev *ethdev, struct rte_eth_stats *stats)
{
- PLT_SET_USED(ethdev);
- PLT_SET_USED(stats);
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ struct rte_eth_stats vf_stats;
+ cnxk_rep_msg_ack_data_t adata;
+ int rc;
+
+ /* If representor not representing any active VF, return 0 */
+ if (!rep_dev->is_vf_active)
+ return 0;
+
+ rc = process_eth_stats(rep_dev, &adata, CNXK_REP_MSG_ETH_STATS_GET);
+ if (rc || adata.u.sval < 0) {
+ if (adata.u.sval < 0)
+ rc = adata.u.sval;
+
+ plt_err("Failed to clear stats for vf rep %x, err %d", rep_dev->vf_id, rc);
+ }
+
+ if (adata.size != sizeof(struct rte_eth_stats)) {
+ rc = -EINVAL;
+ plt_err("Incomplete stats received for vf rep %d", rep_dev->vf_id);
+ goto fail;
+ }
+
+ rte_memcpy(&vf_stats, adata.u.data, adata.size);
+
+ stats->q_ipackets[0] = vf_stats.ipackets;
+ stats->q_ibytes[0] = vf_stats.ibytes;
+ stats->ipackets = vf_stats.ipackets;
+ stats->ibytes = vf_stats.ibytes;
+
+ stats->q_opackets[0] = vf_stats.opackets;
+ stats->q_obytes[0] = vf_stats.obytes;
+ stats->opackets = vf_stats.opackets;
+ stats->obytes = vf_stats.obytes;
+
return 0;
+fail:
+ return rc;
}
int
cnxk_rep_stats_reset(struct rte_eth_dev *ethdev)
{
- PLT_SET_USED(ethdev);
- return 0;
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ cnxk_rep_msg_ack_data_t adata;
+ int rc = 0;
+
+ /* If representor not representing any active VF, return 0 */
+ if (!rep_dev->is_vf_active)
+ return 0;
+
+ rc = process_eth_stats(rep_dev, &adata, CNXK_REP_MSG_ETH_STATS_CLEAR);
+ if (rc || adata.u.sval < 0) {
+ if (adata.u.sval < 0)
+ rc = adata.u.sval;
+
+ plt_err("Failed to clear stats for vf rep %x, err %d", rep_dev->vf_id, rc);
+ }
+
+ return rc;
}
int
@@ -110,3 +664,54 @@ cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **op
PLT_SET_USED(ops);
return 0;
}
+
+int
+cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
+{
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
+ cnxk_rep_msg_eth_set_mac_meta_t msg_sm_meta;
+ cnxk_rep_msg_ack_data_t adata;
+ uint32_t len = 0, rc;
+ void *buffer;
+ size_t size;
+
+ /* If representor not representing any VF, return 0 */
+ if (!rep_dev->is_vf_active)
+ return 0;
+
+ size = CNXK_REP_MSG_MAX_BUFFER_SZ;
+ buffer = plt_zmalloc(size, 0);
+ if (!buffer) {
+ plt_err("Failed to allocate mem");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ cnxk_rep_msg_populate_header(buffer, &len);
+
+ msg_sm_meta.portid = rep_dev->u.rxq;
+ rte_memcpy(&msg_sm_meta.addr_bytes, addr->addr_bytes, RTE_ETHER_ADDR_LEN);
+ cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_sm_meta,
+ sizeof(cnxk_rep_msg_eth_set_mac_meta_t),
+ CNXK_REP_MSG_ETH_SET_MAC);
+ cnxk_rep_msg_populate_msg_end(buffer, &len);
+
+ rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, &adata);
+ if (rc) {
+ plt_err("Failed to process the message, err %d", rc);
+ goto fail;
+ }
+
+ if (adata.u.sval < 0) {
+ rc = adata.u.sval;
+ plt_err("Failed to set mac address, err %d", rc);
+ goto fail;
+ }
+
+ rte_free(buffer);
+
+ return 0;
+fail:
+ rte_free(buffer);
+ return rc;
+}
--
2.18.0
next prev parent reply other threads:[~2023-08-11 16:35 UTC|newest]
Thread overview: 142+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-11 16:34 [PATCH 0/9] net/cnxk: support for port representors Harman Kalra
2023-08-11 16:34 ` [PATCH 1/9] common/cnxk: debug log type for representors Harman Kalra
2023-08-11 16:34 ` [PATCH 2/9] net/cnxk: probing representor ports Harman Kalra
2023-08-11 16:34 ` [PATCH 3/9] common/cnxk: maintaining representor state Harman Kalra
2023-08-11 16:34 ` [PATCH 4/9] net/cnxk: callbacks for " Harman Kalra
2023-08-11 16:34 ` [PATCH 5/9] net/cnxk: add representor control plane Harman Kalra
2023-08-11 16:34 ` Harman Kalra [this message]
2023-08-11 16:34 ` [PATCH 7/9] net/cnxk: representor flow ops Harman Kalra
2023-08-11 16:34 ` [PATCH 8/9] common/cnxk: support represented port for cnxk Harman Kalra
2023-08-11 16:34 ` [PATCH 9/9] net/cnxk: add " Harman Kalra
2023-12-19 17:39 ` [PATCH v2 00/24] net/cnxk: support for port representors Harman Kalra
2023-12-19 17:39 ` [PATCH v2 01/24] common/cnxk: add support for representors Harman Kalra
2023-12-19 17:39 ` [PATCH v2 02/24] net/cnxk: implementing eswitch device Harman Kalra
2024-01-04 12:30 ` Jerin Jacob
2023-12-19 17:39 ` [PATCH v2 03/24] net/cnxk: eswitch HW resource configuration Harman Kalra
2024-01-04 12:34 ` Jerin Jacob
2023-12-19 17:39 ` [PATCH v2 04/24] net/cnxk: eswitch devargs parsing Harman Kalra
2023-12-19 17:39 ` [PATCH v2 05/24] net/cnxk: probing representor ports Harman Kalra
2023-12-19 17:39 ` [PATCH v2 06/24] common/cnxk: common NPC changes for eswitch Harman Kalra
2023-12-19 17:39 ` [PATCH v2 07/24] common/cnxk: interface to update VLAN TPID Harman Kalra
2024-01-04 12:47 ` Jerin Jacob
2023-12-19 17:39 ` [PATCH v2 08/24] net/cnxk: eswitch flow configurations Harman Kalra
2023-12-19 17:39 ` [PATCH v2 09/24] net/cnxk: eswitch fastpath routines Harman Kalra
2023-12-19 17:39 ` [PATCH v2 10/24] net/cnxk: add representor control plane Harman Kalra
2023-12-19 17:39 ` [PATCH v2 11/24] common/cnxk: representee notification callback Harman Kalra
2023-12-19 17:39 ` [PATCH v2 12/24] net/cnxk: handling representee notification Harman Kalra
2023-12-19 17:39 ` [PATCH v2 13/24] net/cnxk: representor ethdev ops Harman Kalra
2023-12-19 17:39 ` [PATCH v2 14/24] common/cnxk: get representees ethernet stats Harman Kalra
2023-12-19 17:39 ` [PATCH v2 15/24] net/cnxk: ethernet statistic for representor Harman Kalra
2023-12-19 17:39 ` [PATCH v2 16/24] common/cnxk: base support for eswitch VF Harman Kalra
2023-12-19 17:39 ` [PATCH v2 17/24] net/cnxk: eswitch VF as ethernet device Harman Kalra
2023-12-19 17:39 ` [PATCH v2 18/24] common/cnxk: support port representor and represented port Harman Kalra
2023-12-19 17:39 ` [PATCH v2 19/24] net/cnxk: add represented port pattern and action Harman Kalra
2023-12-19 17:39 ` [PATCH v2 20/24] net/cnxk: add port representor " Harman Kalra
2023-12-19 17:40 ` [PATCH v2 21/24] net/cnxk: generalize flow operation APIs Harman Kalra
2023-12-19 17:40 ` [PATCH v2 22/24] net/cnxk: flow create on representor ports Harman Kalra
2023-12-19 17:40 ` [PATCH v2 23/24] net/cnxk: other flow operations Harman Kalra
2023-12-19 17:40 ` [PATCH v2 24/24] doc: port representors in cnxk Harman Kalra
2023-12-20 9:37 ` Thomas Monjalon
2023-12-21 13:28 ` [EXT] " Harman Kalra
2023-12-21 18:33 ` Thomas Monjalon
2024-01-11 6:48 ` Harman Kalra
2024-02-01 13:07 ` [PATCH v3 00/23] net/cnxk: support for port representors Harman Kalra
2024-02-01 13:07 ` [PATCH v3 01/23] common/cnxk: add support for representors Harman Kalra
2024-02-01 13:07 ` [PATCH v3 02/23] net/cnxk: implementing eswitch device Harman Kalra
2024-02-01 13:07 ` [PATCH v3 03/23] net/cnxk: eswitch HW resource configuration Harman Kalra
2024-02-01 13:07 ` [PATCH v3 04/23] net/cnxk: eswitch devargs parsing Harman Kalra
2024-02-01 13:07 ` [PATCH v3 05/23] net/cnxk: probing representor ports Harman Kalra
2024-02-01 13:07 ` [PATCH v3 06/23] common/cnxk: common NPC changes for eswitch Harman Kalra
2024-02-01 13:07 ` [PATCH v3 07/23] common/cnxk: interface to update VLAN TPID Harman Kalra
2024-02-01 13:07 ` [PATCH v3 08/23] net/cnxk: eswitch flow configurations Harman Kalra
2024-02-01 13:07 ` [PATCH v3 09/23] net/cnxk: eswitch fastpath routines Harman Kalra
2024-02-01 13:07 ` [PATCH v3 10/23] net/cnxk: add representor control plane Harman Kalra
2024-02-01 13:07 ` [PATCH v3 11/23] common/cnxk: representee notification callback Harman Kalra
2024-02-01 13:07 ` [PATCH v3 12/23] net/cnxk: handling representee notification Harman Kalra
2024-02-01 13:07 ` [PATCH v3 13/23] net/cnxk: representor ethdev ops Harman Kalra
2024-02-01 13:07 ` [PATCH v3 14/23] common/cnxk: get representees ethernet stats Harman Kalra
2024-02-01 13:07 ` [PATCH v3 15/23] net/cnxk: ethernet statistic for representor Harman Kalra
2024-02-01 13:07 ` [PATCH v3 16/23] common/cnxk: base support for eswitch VF Harman Kalra
2024-02-01 13:07 ` [PATCH v3 17/23] net/cnxk: eswitch VF as ethernet device Harman Kalra
2024-02-01 13:07 ` [PATCH v3 18/23] common/cnxk: support port representor and represented port Harman Kalra
2024-02-01 13:07 ` [PATCH v3 19/23] net/cnxk: add represented port pattern and action Harman Kalra
2024-02-01 13:07 ` [PATCH v3 20/23] net/cnxk: add representor " Harman Kalra
2024-02-01 13:07 ` [PATCH v3 21/23] net/cnxk: generalise flow operation APIs Harman Kalra
2024-02-01 13:07 ` [PATCH v3 22/23] net/cnxk: flow create on representor ports Harman Kalra
2024-02-01 13:07 ` [PATCH v3 23/23] net/cnxk: other flow operations Harman Kalra
2024-02-27 19:15 ` [PATCH v4 00/23] net/cnxk: support for port representors Harman Kalra
2024-02-27 19:15 ` [PATCH v4 01/23] common/cnxk: add support for representors Harman Kalra
2024-02-27 19:15 ` [PATCH v4 02/23] net/cnxk: implementing eswitch device Harman Kalra
2024-03-01 9:31 ` Jerin Jacob
2024-02-27 19:15 ` [PATCH v4 03/23] net/cnxk: eswitch HW resource configuration Harman Kalra
2024-02-27 19:15 ` [PATCH v4 04/23] net/cnxk: eswitch devargs parsing Harman Kalra
2024-02-27 19:15 ` [PATCH v4 05/23] net/cnxk: probing representor ports Harman Kalra
2024-02-27 19:15 ` [PATCH v4 06/23] common/cnxk: common NPC changes for eswitch Harman Kalra
2024-02-27 19:15 ` [PATCH v4 07/23] common/cnxk: interface to update VLAN TPID Harman Kalra
2024-02-27 19:15 ` [PATCH v4 08/23] net/cnxk: eswitch flow configurations Harman Kalra
2024-02-27 19:15 ` [PATCH v4 09/23] net/cnxk: eswitch fastpath routines Harman Kalra
2024-02-27 19:15 ` [PATCH v4 10/23] net/cnxk: add representor control plane Harman Kalra
2024-02-27 19:15 ` [PATCH v4 11/23] common/cnxk: representee notification callback Harman Kalra
2024-02-27 19:15 ` [PATCH v4 12/23] net/cnxk: handling representee notification Harman Kalra
2024-02-27 19:15 ` [PATCH v4 13/23] net/cnxk: representor ethdev ops Harman Kalra
2024-02-27 19:15 ` [PATCH v4 14/23] common/cnxk: get representees ethernet stats Harman Kalra
2024-02-27 19:15 ` [PATCH v4 15/23] net/cnxk: ethernet statistics for representor Harman Kalra
2024-02-27 19:15 ` [PATCH v4 16/23] common/cnxk: base support for eswitch VF Harman Kalra
2024-02-27 19:15 ` [PATCH v4 17/23] net/cnxk: eswitch VF as ethernet device Harman Kalra
2024-02-27 19:15 ` [PATCH v4 18/23] common/cnxk: support port representor and represented port Harman Kalra
2024-02-27 19:15 ` [PATCH v4 19/23] net/cnxk: add represented port pattern and action Harman Kalra
2024-02-27 19:15 ` [PATCH v4 20/23] net/cnxk: add representor " Harman Kalra
2024-02-27 19:15 ` [PATCH v4 21/23] net/cnxk: generalise flow operation APIs Harman Kalra
2024-02-27 19:15 ` [PATCH v4 22/23] net/cnxk: flow create on representor ports Harman Kalra
2024-02-27 19:15 ` [PATCH v4 23/23] net/cnxk: other flow operations Harman Kalra
2024-03-01 9:35 ` Jerin Jacob
2024-03-01 19:14 ` [PATCH v5 00/23] net/cnxk: support for port representors Harman Kalra
2024-03-01 19:14 ` [PATCH v5 01/23] common/cnxk: add support for representors Harman Kalra
2024-03-01 19:14 ` [PATCH v5 02/23] net/cnxk: implementing eswitch device Harman Kalra
2024-03-01 19:14 ` [PATCH v5 03/23] net/cnxk: eswitch HW resource configuration Harman Kalra
2024-03-01 19:14 ` [PATCH v5 04/23] net/cnxk: eswitch devargs parsing Harman Kalra
2024-03-01 19:14 ` [PATCH v5 05/23] net/cnxk: probing representor ports Harman Kalra
2024-03-01 19:14 ` [PATCH v5 06/23] common/cnxk: common NPC changes for eswitch Harman Kalra
2024-03-01 19:14 ` [PATCH v5 07/23] common/cnxk: interface to update VLAN TPID Harman Kalra
2024-03-01 19:14 ` [PATCH v5 08/23] net/cnxk: eswitch flow configurations Harman Kalra
2024-03-01 19:14 ` [PATCH v5 09/23] net/cnxk: eswitch fastpath routines Harman Kalra
2024-03-01 19:14 ` [PATCH v5 10/23] net/cnxk: add representor control plane Harman Kalra
2024-03-01 19:14 ` [PATCH v5 11/23] common/cnxk: representee notification callback Harman Kalra
2024-03-01 19:14 ` [PATCH v5 12/23] net/cnxk: handling representee notification Harman Kalra
2024-03-01 19:14 ` [PATCH v5 13/23] net/cnxk: representor ethdev ops Harman Kalra
2024-03-01 19:14 ` [PATCH v5 14/23] common/cnxk: get representees ethernet stats Harman Kalra
2024-03-01 19:14 ` [PATCH v5 15/23] net/cnxk: ethernet statistics for representor Harman Kalra
2024-03-01 19:14 ` [PATCH v5 16/23] common/cnxk: base support for eswitch VF Harman Kalra
2024-03-01 19:14 ` [PATCH v5 17/23] net/cnxk: eswitch VF as ethernet device Harman Kalra
2024-03-01 19:14 ` [PATCH v5 18/23] common/cnxk: support port representor and represented port Harman Kalra
2024-03-01 19:14 ` [PATCH v5 19/23] net/cnxk: add represented port pattern and action Harman Kalra
2024-03-01 19:14 ` [PATCH v5 20/23] net/cnxk: add representor " Harman Kalra
2024-03-01 19:14 ` [PATCH v5 21/23] net/cnxk: generalise flow operation APIs Harman Kalra
2024-03-03 14:50 ` Jerin Jacob
2024-03-01 19:14 ` [PATCH v5 22/23] net/cnxk: flow create on representor ports Harman Kalra
2024-03-01 19:14 ` [PATCH v5 23/23] net/cnxk: other flow operations Harman Kalra
2024-03-03 17:38 ` [PATCH v6 00/23] net/cnxk: support for port representors Harman Kalra
2024-03-03 17:38 ` [PATCH v6 01/23] common/cnxk: add support for representors Harman Kalra
2024-03-03 17:38 ` [PATCH v6 02/23] net/cnxk: implementing eswitch device Harman Kalra
2024-03-03 17:38 ` [PATCH v6 03/23] net/cnxk: eswitch HW resource configuration Harman Kalra
2024-03-03 17:38 ` [PATCH v6 04/23] net/cnxk: eswitch devargs parsing Harman Kalra
2024-03-03 17:38 ` [PATCH v6 05/23] net/cnxk: probing representor ports Harman Kalra
2024-03-03 17:38 ` [PATCH v6 06/23] common/cnxk: common NPC changes for eswitch Harman Kalra
2024-03-03 17:38 ` [PATCH v6 07/23] common/cnxk: interface to update VLAN TPID Harman Kalra
2024-03-03 17:38 ` [PATCH v6 08/23] net/cnxk: eswitch flow configurations Harman Kalra
2024-03-03 17:38 ` [PATCH v6 09/23] net/cnxk: eswitch fastpath routines Harman Kalra
2024-03-03 17:38 ` [PATCH v6 10/23] net/cnxk: add representor control plane Harman Kalra
2024-03-03 17:38 ` [PATCH v6 11/23] common/cnxk: representee notification callback Harman Kalra
2024-03-03 17:38 ` [PATCH v6 12/23] net/cnxk: handling representee notification Harman Kalra
2024-03-03 17:38 ` [PATCH v6 13/23] net/cnxk: representor ethdev ops Harman Kalra
2024-03-03 17:38 ` [PATCH v6 14/23] common/cnxk: get representees ethernet stats Harman Kalra
2024-03-03 17:38 ` [PATCH v6 15/23] net/cnxk: ethernet statistics for representor Harman Kalra
2024-03-03 17:38 ` [PATCH v6 16/23] common/cnxk: base support for eswitch VF Harman Kalra
2024-03-03 17:38 ` [PATCH v6 17/23] net/cnxk: eswitch VF as ethernet device Harman Kalra
2024-03-03 17:38 ` [PATCH v6 18/23] common/cnxk: support port representor and represented port Harman Kalra
2024-03-03 17:38 ` [PATCH v6 19/23] net/cnxk: add represented port pattern and action Harman Kalra
2024-03-03 17:38 ` [PATCH v6 20/23] net/cnxk: add representor " Harman Kalra
2024-03-03 17:38 ` [PATCH v6 21/23] net/cnxk: generalise flow operation APIs Harman Kalra
2024-03-03 17:38 ` [PATCH v6 22/23] net/cnxk: flow create on representor ports Harman Kalra
2024-03-03 17:38 ` [PATCH v6 23/23] net/cnxk: other flow operations Harman Kalra
2024-03-04 7:57 ` Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230811163419.165790-7-hkalra@marvell.com \
--to=hkalra@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=kirankumark@marvell.com \
--cc=ndabilpuram@marvell.com \
--cc=skori@marvell.com \
--cc=skoteshwar@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).