From: Gagandeep Singh <g.singh@nxp.com>
To: dev@dpdk.org, ferruh.yigit@intel.com
Cc: thomas@monjalon.net, Gagandeep Singh <g.singh@nxp.com>,
Akhil Goyal <akhil.goyal@nxp.com>
Subject: [dpdk-dev] [PATCH v4 07/14] net/pfe: add device start stop operations
Date: Thu, 10 Oct 2019 12:02:27 +0530 [thread overview]
Message-ID: <20191010063234.32568-8-g.singh@nxp.com> (raw)
In-Reply-To: <20191010063234.32568-1-g.singh@nxp.com>
This patch adds device start, stop and close
operations.
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
Acked-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/net/pfe/Makefile | 2 +-
drivers/net/pfe/pfe_eth.h | 1 +
drivers/net/pfe/pfe_ethdev.c | 184 +++++++++++++++++
drivers/net/pfe/pfe_hif.c | 139 +++++++++++++
drivers/net/pfe/pfe_hif.h | 41 ++++
drivers/net/pfe/pfe_hif_lib.c | 362 +++++++++++++++++++++++++++++++++-
drivers/net/pfe/pfe_hif_lib.h | 11 ++
drivers/net/pfe/pfe_mod.h | 1 +
8 files changed, 739 insertions(+), 2 deletions(-)
diff --git a/drivers/net/pfe/Makefile b/drivers/net/pfe/Makefile
index 5c317e10b..91815fc0c 100644
--- a/drivers/net/pfe/Makefile
+++ b/drivers/net/pfe/Makefile
@@ -31,7 +31,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_PFE_PMD) += pfe_hif.c
LDLIBS += -lrte_bus_vdev
LDLIBS += -lrte_bus_dpaa
LDLIBS += -lrte_common_dpaax
-LDLIBS += -lrte_eal
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
LDLIBS += -lrte_ethdev -lrte_kvargs
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/pfe/pfe_eth.h b/drivers/net/pfe/pfe_eth.h
index ab739f03c..63e5aca42 100644
--- a/drivers/net/pfe/pfe_eth.h
+++ b/drivers/net/pfe/pfe_eth.h
@@ -55,6 +55,7 @@ struct ls1012a_pfe_platform_data {
struct pfe_eth_priv_s {
struct pfe *pfe;
+ struct hif_client_s client;
int low_tmu_q;
int high_tmu_q;
struct rte_eth_dev *ndev;
diff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c
index ac1e608c8..08f3716b0 100644
--- a/drivers/net/pfe/pfe_ethdev.c
+++ b/drivers/net/pfe/pfe_ethdev.c
@@ -71,6 +71,126 @@ pfe_soc_version_get(void)
fclose(svr_file);
}
+static int pfe_eth_start(struct pfe_eth_priv_s *priv)
+{
+ gpi_enable(priv->GPI_baseaddr);
+ gemac_enable(priv->EMAC_baseaddr);
+
+ return 0;
+}
+
+static void
+pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
+ __rte_unused from_tx, __rte_unused int n_desc)
+{
+ struct rte_mbuf *mbuf;
+ unsigned int flags;
+
+ /* Clean HIF and client queue */
+ while ((mbuf = hif_lib_tx_get_next_complete(&priv->client,
+ tx_q_num, &flags,
+ HIF_TX_DESC_NT))) {
+ if (mbuf) {
+ mbuf->next = NULL;
+ mbuf->nb_segs = 1;
+ rte_pktmbuf_free(mbuf);
+ }
+ }
+}
+
+
+static void
+pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
+{
+ unsigned int ii;
+
+ for (ii = 0; ii < emac_txq_cnt; ii++)
+ pfe_eth_flush_txQ(priv, ii, 0, 0);
+}
+
+static int
+pfe_eth_event_handler(void *data, int event, __rte_unused int qno)
+{
+ struct pfe_eth_priv_s *priv = data;
+
+ switch (event) {
+ case EVENT_TXDONE_IND:
+ pfe_eth_flush_tx(priv);
+ hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
+ break;
+ case EVENT_HIGH_RX_WM:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+pfe_eth_open(struct rte_eth_dev *dev)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+ struct hif_client_s *client;
+ struct hif_shm *hif_shm;
+ int rc;
+
+ /* Register client driver with HIF */
+ client = &priv->client;
+
+ if (client->pfe) {
+ hif_shm = client->pfe->hif.shm;
+ /* TODO please remove the below code of if block, once we add
+ * the proper cleanup in eth_close
+ */
+ if (!test_bit(PFE_CL_GEM0 + priv->id,
+ &hif_shm->g_client_status[0])) {
+ /* Register client driver with HIF */
+ memset(client, 0, sizeof(*client));
+ client->id = PFE_CL_GEM0 + priv->id;
+ client->tx_qn = emac_txq_cnt;
+ client->rx_qn = EMAC_RXQ_CNT;
+ client->priv = priv;
+ client->pfe = priv->pfe;
+ client->port_id = dev->data->port_id;
+ client->event_handler = pfe_eth_event_handler;
+
+ client->tx_qsize = EMAC_TXQ_DEPTH;
+ client->rx_qsize = EMAC_RXQ_DEPTH;
+
+ rc = hif_lib_client_register(client);
+ if (rc) {
+ PFE_PMD_ERR("hif_lib_client_register(%d)"
+ " failed", client->id);
+ goto err0;
+ }
+ }
+ } else {
+ /* Register client driver with HIF */
+ memset(client, 0, sizeof(*client));
+ client->id = PFE_CL_GEM0 + priv->id;
+ client->tx_qn = emac_txq_cnt;
+ client->rx_qn = EMAC_RXQ_CNT;
+ client->priv = priv;
+ client->pfe = priv->pfe;
+ client->port_id = dev->data->port_id;
+ client->event_handler = pfe_eth_event_handler;
+
+ client->tx_qsize = EMAC_TXQ_DEPTH;
+ client->rx_qsize = EMAC_RXQ_DEPTH;
+
+ rc = hif_lib_client_register(client);
+ if (rc) {
+ PFE_PMD_ERR("hif_lib_client_register(%d) failed",
+ client->id);
+ goto err0;
+ }
+ }
+ rc = pfe_eth_start(priv);
+
+err0:
+ return rc;
+}
+
static int
pfe_eth_open_cdev(struct pfe_eth_priv_s *priv)
{
@@ -105,11 +225,21 @@ pfe_eth_close_cdev(struct pfe_eth_priv_s *priv)
}
}
+static void
+pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+ gemac_disable(priv->EMAC_baseaddr);
+ gpi_disable(priv->GPI_baseaddr);
+}
+
static void
pfe_eth_exit(struct rte_eth_dev *dev, struct pfe *pfe)
{
PMD_INIT_FUNC_TRACE();
+ pfe_eth_stop(dev);
/* Close the device file for link status */
pfe_eth_close_cdev(dev->data->dev_private);
@@ -118,6 +248,58 @@ pfe_eth_exit(struct rte_eth_dev *dev, struct pfe *pfe)
pfe->nb_devs--;
}
+static void
+pfe_eth_close(struct rte_eth_dev *dev)
+{
+ if (!dev)
+ return;
+
+ if (!g_pfe)
+ return;
+
+ pfe_eth_exit(dev, g_pfe);
+
+ if (g_pfe->nb_devs == 0) {
+ pfe_hif_exit(g_pfe);
+ pfe_hif_lib_exit(g_pfe);
+ rte_free(g_pfe);
+ g_pfe = NULL;
+ }
+}
+
+static int
+pfe_eth_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ return 0;
+}
+
+static int
+pfe_eth_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct pfe_eth_priv_s *internals = dev->data->dev_private;
+
+ dev_info->if_index = internals->id;
+ dev_info->max_mac_addrs = PFE_MAX_MACS;
+ dev_info->max_rx_queues = dev->data->nb_rx_queues;
+ dev_info->max_tx_queues = dev->data->nb_tx_queues;
+ dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE;
+ if (pfe_svr == SVR_LS1012A_REV1)
+ dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD;
+ else
+ dev_info->max_rx_pktlen = JUMBO_FRAME_SIZE;
+
+ return 0;
+}
+
+static const struct eth_dev_ops ops = {
+ .dev_start = pfe_eth_open,
+ .dev_stop = pfe_eth_stop,
+ .dev_close = pfe_eth_close,
+ .dev_configure = pfe_eth_configure,
+ .dev_infos_get = pfe_eth_info,
+};
+
static int
pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
{
@@ -178,6 +360,8 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
}
eth_dev->data->mtu = 1500;
+ eth_dev->dev_ops = &ops;
+ pfe_eth_stop(eth_dev);
pfe_gemac_init(priv);
eth_dev->data->nb_rx_queues = 1;
diff --git a/drivers/net/pfe/pfe_hif.c b/drivers/net/pfe/pfe_hif.c
index 28530d12c..39a6ec8d4 100644
--- a/drivers/net/pfe/pfe_hif.c
+++ b/drivers/net/pfe/pfe_hif.c
@@ -43,6 +43,145 @@ pfe_hif_free_descr(struct pfe_hif *hif)
rte_free(hif->descr_baseaddr_v);
}
+/*
+ * pfe_hif_client_register
+ *
+ * This function used to register a client driver with the HIF driver.
+ *
+ * Return value:
+ * 0 - on Successful registration
+ */
+static int
+pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
+ struct hif_client_shm *client_shm)
+{
+ struct hif_client *client = &hif->client[client_id];
+ u32 i, cnt;
+ struct rx_queue_desc *rx_qbase;
+ struct tx_queue_desc *tx_qbase;
+ struct hif_rx_queue *rx_queue;
+ struct hif_tx_queue *tx_queue;
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_spinlock_lock(&hif->tx_lock);
+
+ if (test_bit(client_id, &hif->shm->g_client_status[0])) {
+ PFE_PMD_ERR("client %d already registered", client_id);
+ err = -1;
+ goto unlock;
+ }
+
+ memset(client, 0, sizeof(struct hif_client));
+
+ /* Initialize client Rx queues baseaddr, size */
+
+ cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
+ /* Check if client is requesting for more queues than supported */
+ if (cnt > HIF_CLIENT_QUEUES_MAX)
+ cnt = HIF_CLIENT_QUEUES_MAX;
+
+ client->rx_qn = cnt;
+ rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
+ for (i = 0; i < cnt; i++) {
+ rx_queue = &client->rx_q[i];
+ rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
+ rx_queue->size = client_shm->rx_qsize;
+ rx_queue->write_idx = 0;
+ }
+
+ /* Initialize client Tx queues baseaddr, size */
+ cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
+
+ /* Check if client is requesting for more queues than supported */
+ if (cnt > HIF_CLIENT_QUEUES_MAX)
+ cnt = HIF_CLIENT_QUEUES_MAX;
+
+ client->tx_qn = cnt;
+ tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
+ for (i = 0; i < cnt; i++) {
+ tx_queue = &client->tx_q[i];
+ tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
+ tx_queue->size = client_shm->tx_qsize;
+ tx_queue->ack_idx = 0;
+ }
+
+ set_bit(client_id, &hif->shm->g_client_status[0]);
+
+unlock:
+ rte_spinlock_unlock(&hif->tx_lock);
+
+ return err;
+}
+
+/*
+ * pfe_hif_client_unregister
+ *
+ * This function used to unregister a client from the HIF driver.
+ *
+ */
+static void
+pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ /*
+ * Mark client as no longer available (which prevents further packet
+ * receive for this client)
+ */
+ rte_spinlock_lock(&hif->tx_lock);
+
+ if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
+ PFE_PMD_ERR("client %d not registered", client_id);
+
+ rte_spinlock_unlock(&hif->tx_lock);
+ return;
+ }
+
+ clear_bit(client_id, &hif->shm->g_client_status[0]);
+
+ rte_spinlock_unlock(&hif->tx_lock);
+}
+
+void
+hif_process_client_req(struct pfe_hif *hif, int req,
+ int data1, __rte_unused int data2)
+{
+ unsigned int client_id = data1;
+
+ if (client_id >= HIF_CLIENTS_MAX) {
+ PFE_PMD_ERR("client id %d out of bounds", client_id);
+ return;
+ }
+
+ switch (req) {
+ case REQUEST_CL_REGISTER:
+ /* Request for register a client */
+ PFE_PMD_INFO("register client_id %d", client_id);
+ pfe_hif_client_register(hif, client_id, (struct
+ hif_client_shm *)&hif->shm->client[client_id]);
+ break;
+
+ case REQUEST_CL_UNREGISTER:
+ PFE_PMD_INFO("unregister client_id %d", client_id);
+
+ /* Request for unregister a client */
+ pfe_hif_client_unregister(hif, client_id);
+
+ break;
+
+ default:
+ PFE_PMD_ERR("unsupported request %d", req);
+ break;
+ }
+
+ /*
+ * Process client Tx queues
+ * Currently we don't have checking for tx pending
+ */
+}
+
#if defined(LS1012A_PFE_RESET_WA)
static void
pfe_hif_disable_rx_desc(struct pfe_hif *hif)
diff --git a/drivers/net/pfe/pfe_hif.h b/drivers/net/pfe/pfe_hif.h
index fa3c08cc7..9c4e57730 100644
--- a/drivers/net/pfe/pfe_hif.h
+++ b/drivers/net/pfe/pfe_hif.h
@@ -14,6 +14,12 @@
#define HIF_RX_DESC_NT 64
#define HIF_TX_DESC_NT 2048
+#define HIF_FIRST_BUFFER BIT(0)
+#define HIF_LAST_BUFFER BIT(1)
+#define HIF_DONT_DMA_MAP BIT(2)
+#define HIF_DATA_VALID BIT(3)
+#define HIF_TSO BIT(4)
+
enum {
PFE_CL_GEM0 = 0,
PFE_CL_GEM1,
@@ -63,6 +69,39 @@ struct hif_desc_sw {
u16 flags;
};
+struct hif_hdr {
+ u8 client_id;
+ u8 q_num;
+ u16 client_ctrl;
+ u16 client_ctrl1;
+};
+
+struct __hif_hdr {
+ union {
+ struct hif_hdr hdr;
+ u32 word[2];
+ };
+};
+
+struct hif_ipsec_hdr {
+ u16 sa_handle[2];
+} __packed;
+
+struct pfe_parse {
+ unsigned int packet_type;
+ uint16_t hash;
+ uint16_t parse_incomplete;
+ unsigned long long ol_flags;
+};
+
+/* HIF_CTRL_TX... defines */
+#define HIF_CTRL_TX_CHECKSUM BIT(2)
+
+/* HIF_CTRL_RX... defines */
+#define HIF_CTRL_RX_OFFSET_OFST (24)
+#define HIF_CTRL_RX_CHECKSUMMED BIT(2)
+#define HIF_CTRL_RX_CONTINUED BIT(1)
+
struct pfe_hif {
/* To store registered clients in hif layer */
struct hif_client client[HIF_CLIENTS_MAX];
@@ -99,6 +138,8 @@ struct pfe_hif {
struct rte_device *dev;
};
+void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
+ data2);
int pfe_hif_init(struct pfe *pfe);
void pfe_hif_exit(struct pfe *pfe);
void pfe_hif_rx_idle(struct pfe_hif *hif);
diff --git a/drivers/net/pfe/pfe_hif_lib.c b/drivers/net/pfe/pfe_hif_lib.c
index 8f8121be1..2012d896a 100644
--- a/drivers/net/pfe/pfe_hif_lib.c
+++ b/drivers/net/pfe/pfe_hif_lib.c
@@ -5,11 +5,371 @@
#include "pfe_logs.h"
#include "pfe_mod.h"
+unsigned int emac_txq_cnt;
+
+/*
+ * @pfe_hal_lib.c
+ * Common functions used by HIF client drivers
+ */
+
+/*HIF shared memory Global variable */
+struct hif_shm ghif_shm;
+
+/*This function sends indication to HIF driver
+ *
+ * @param[in] hif hif context
+ */
+static void
+hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
+ data2)
+{
+ hif_process_client_req(hif, req, data1, data2);
+}
+
+void
+hif_lib_indicate_client(struct hif_client_s *client, int event_type,
+ int qno)
+{
+ if (!client || event_type >= HIF_EVENT_MAX ||
+ qno >= HIF_CLIENT_QUEUES_MAX)
+ return;
+
+ if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
+ client->event_handler(client->priv, event_type, qno);
+}
+
+/*This function releases Rx queue descriptors memory and pre-filled buffers
+ *
+ * @param[in] client hif_client context
+ */
+static void
+hif_lib_client_release_rx_buffers(struct hif_client_s *client)
+{
+ struct rte_mempool *pool;
+ struct rte_pktmbuf_pool_private *mb_priv;
+ struct rx_queue_desc *desc;
+ unsigned int qno, ii;
+ void *buf;
+
+ pool = client->pfe->hif.shm->pool;
+ mb_priv = rte_mempool_get_priv(pool);
+ for (qno = 0; qno < client->rx_qn; qno++) {
+ desc = client->rx_q[qno].base;
+
+ for (ii = 0; ii < client->rx_q[qno].size; ii++) {
+ buf = (void *)desc->data;
+ if (buf) {
+ /* Data pointor to mbuf pointor calculation:
+ * "Data - User private data - headroom - mbufsize"
+ * Actual data pointor given to HIF BDs was
+ * "mbuf->data_offset - PFE_PKT_HEADER_SZ"
+ */
+ buf = buf + PFE_PKT_HEADER_SZ
+ - sizeof(struct rte_mbuf)
+ - RTE_PKTMBUF_HEADROOM
+ - mb_priv->mbuf_priv_size;
+ rte_pktmbuf_free((struct rte_mbuf *)buf);
+ desc->ctrl = 0;
+ }
+ desc++;
+ }
+ }
+ rte_free(client->rx_qbase);
+}
+
+/*This function allocates memory for the rxq descriptors and pre-fill rx queues
+ * with buffers.
+ * @param[in] client client context
+ * @param[in] q_size size of the rxQ, all queues are of same size
+ */
+static int
+hif_lib_client_init_rx_buffers(struct hif_client_s *client,
+ int q_size)
+{
+ struct rx_queue_desc *desc;
+ struct hif_client_rx_queue *queue;
+ unsigned int ii, qno;
+
+ /*Allocate memory for the client queues */
+ client->rx_qbase = rte_malloc(NULL, client->rx_qn * q_size *
+ sizeof(struct rx_queue_desc), RTE_CACHE_LINE_SIZE);
+ if (!client->rx_qbase)
+ goto err;
+
+ for (qno = 0; qno < client->rx_qn; qno++) {
+ queue = &client->rx_q[qno];
+
+ queue->base = client->rx_qbase + qno * q_size * sizeof(struct
+ rx_queue_desc);
+ queue->size = q_size;
+ queue->read_idx = 0;
+ queue->write_idx = 0;
+ queue->queue_id = 0;
+ queue->port_id = client->port_id;
+ queue->priv = client->priv;
+ PFE_PMD_DEBUG("rx queue: %d, base: %p, size: %d\n", qno,
+ queue->base, queue->size);
+ }
+
+ for (qno = 0; qno < client->rx_qn; qno++) {
+ queue = &client->rx_q[qno];
+ desc = queue->base;
+
+ for (ii = 0; ii < queue->size; ii++) {
+ desc->ctrl = CL_DESC_OWN;
+ desc++;
+ }
+ }
+
+ return 0;
+
+err:
+ return 1;
+}
+
+
+static void
+hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
+{
+ /*
+ * Check if there are any pending packets. Client must flush the tx
+ * queues before unregistering, by calling by calling
+ * hif_lib_tx_get_next_complete()
+ *
+ * Hif no longer calls since we are no longer registered
+ */
+ if (queue->tx_pending)
+ PFE_PMD_ERR("pending transmit packet");
+}
+
+static void
+hif_lib_client_release_tx_buffers(struct hif_client_s *client)
+{
+ unsigned int qno;
+
+ for (qno = 0; qno < client->tx_qn; qno++)
+ hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
+
+ rte_free(client->tx_qbase);
+}
+
+static int
+hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
+ q_size)
+{
+ struct hif_client_tx_queue *queue;
+ unsigned int qno;
+
+ client->tx_qbase = rte_malloc(NULL, client->tx_qn * q_size *
+ sizeof(struct tx_queue_desc), RTE_CACHE_LINE_SIZE);
+ if (!client->tx_qbase)
+ return 1;
+
+ for (qno = 0; qno < client->tx_qn; qno++) {
+ queue = &client->tx_q[qno];
+
+ queue->base = client->tx_qbase + qno * q_size * sizeof(struct
+ tx_queue_desc);
+ queue->size = q_size;
+ queue->read_idx = 0;
+ queue->write_idx = 0;
+ queue->tx_pending = 0;
+ queue->nocpy_flag = 0;
+ queue->prev_tmu_tx_pkts = 0;
+ queue->done_tmu_tx_pkts = 0;
+ queue->priv = client->priv;
+ queue->queue_id = 0;
+ queue->port_id = client->port_id;
+
+ PFE_PMD_DEBUG("tx queue: %d, base: %p, size: %d", qno,
+ queue->base, queue->size);
+ }
+
+ return 0;
+}
+
+static int
+hif_lib_event_dummy(__rte_unused void *priv,
+ __rte_unused int event_type, __rte_unused int qno)
+{
+ return 0;
+}
+
int
-pfe_hif_lib_init(__rte_unused struct pfe *pfe)
+hif_lib_client_register(struct hif_client_s *client)
{
+ struct hif_shm *hif_shm;
+ struct hif_client_shm *client_shm;
+ int err, i;
+
PMD_INIT_FUNC_TRACE();
+ /*Allocate memory before spin_lock*/
+ if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
+ err = -ENOMEM;
+ goto err_rx;
+ }
+
+ if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
+ err = -ENOMEM;
+ goto err_tx;
+ }
+
+ rte_spinlock_lock(&client->pfe->hif.lock);
+ if (!(client->pfe) || client->id >= HIF_CLIENTS_MAX ||
+ client->pfe->hif_client[client->id]) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ hif_shm = client->pfe->hif.shm;
+
+ if (!client->event_handler)
+ client->event_handler = hif_lib_event_dummy;
+
+ /*Initialize client specific shared memory */
+ client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
+ client_shm->rx_qbase = (unsigned long)client->rx_qbase;
+ client_shm->rx_qsize = client->rx_qsize;
+ client_shm->tx_qbase = (unsigned long)client->tx_qbase;
+ client_shm->tx_qsize = client->tx_qsize;
+ client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
+ (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
+
+ for (i = 0; i < HIF_EVENT_MAX; i++) {
+ client->queue_mask[i] = 0; /*
+ * By default all events are
+ * unmasked
+ */
+ }
+
+ /*Indicate to HIF driver*/
+ hif_lib_indicate_hif(&client->pfe->hif, REQUEST_CL_REGISTER,
+ client->id, 0);
+
+ PFE_PMD_DEBUG("client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d",
+ client, client->id, client->tx_qsize, client->rx_qsize);
+
+ client->cpu_id = -1;
+
+ client->pfe->hif_client[client->id] = client;
+ rte_spinlock_unlock(&client->pfe->hif.lock);
+
+ return 0;
+
+err:
+ rte_spinlock_unlock(&client->pfe->hif.lock);
+ hif_lib_client_release_tx_buffers(client);
+
+err_tx:
+ hif_lib_client_release_rx_buffers(client);
+
+err_rx:
+ return err;
+}
+
+int
+hif_lib_client_unregister(struct hif_client_s *client)
+{
+ struct pfe *pfe = client->pfe;
+ u32 client_id = client->id;
+
+ PFE_PMD_INFO("client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d",
+ client, client->id, client->tx_qsize, client->rx_qsize);
+
+ rte_spinlock_lock(&pfe->hif.lock);
+ hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
+
+ hif_lib_client_release_tx_buffers(client);
+ hif_lib_client_release_rx_buffers(client);
+ pfe->hif_client[client_id] = NULL;
+ rte_spinlock_unlock(&pfe->hif.lock);
+
+ return 0;
+}
+
+int
+hif_lib_event_handler_start(struct hif_client_s *client, int event,
+ int qno)
+{
+ struct hif_client_rx_queue *queue = &client->rx_q[qno];
+ struct rx_queue_desc *desc = queue->base + queue->read_idx;
+
+ if (event >= HIF_EVENT_MAX || qno >= HIF_CLIENT_QUEUES_MAX) {
+ PFE_PMD_WARN("Unsupported event : %d queue number : %d",
+ event, qno);
+ return -1;
+ }
+
+ test_and_clear_bit(qno, &client->queue_mask[event]);
+
+ switch (event) {
+ case EVENT_RX_PKT_IND:
+ if (!(desc->ctrl & CL_DESC_OWN))
+ hif_lib_indicate_client(client,
+ EVENT_RX_PKT_IND, qno);
+ break;
+
+ case EVENT_HIGH_RX_WM:
+ case EVENT_TXDONE_IND:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void *
+hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
+ unsigned int *flags, __rte_unused int count)
+{
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
+ struct tx_queue_desc *desc = queue->base + queue->read_idx;
+
+ PFE_DP_LOG(DEBUG, "qno : %d rd_indx: %d pending:%d",
+ qno, queue->read_idx, queue->tx_pending);
+
+ if (!queue->tx_pending)
+ return NULL;
+
+ if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
+ u32 tmu_tx_pkts = 0;
+
+ if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
+ queue->done_tmu_tx_pkts = UINT_MAX -
+ queue->prev_tmu_tx_pkts + tmu_tx_pkts;
+ else
+ queue->done_tmu_tx_pkts = tmu_tx_pkts -
+ queue->prev_tmu_tx_pkts;
+
+ queue->prev_tmu_tx_pkts = tmu_tx_pkts;
+
+ if (!queue->done_tmu_tx_pkts)
+ return NULL;
+ }
+
+ if (desc->ctrl & CL_DESC_OWN)
+ return NULL;
+
+ queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
+ queue->tx_pending--;
+
+ *flags = CL_DESC_GET_FLAGS(desc->ctrl);
+
+ if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
+ queue->done_tmu_tx_pkts--;
+
+ return desc->data;
+}
+
+int
+pfe_hif_lib_init(struct pfe *pfe)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ emac_txq_cnt = EMAC_TXQ_CNT;
+ pfe->hif.shm = &ghif_shm;
+
return 0;
}
diff --git a/drivers/net/pfe/pfe_hif_lib.h b/drivers/net/pfe/pfe_hif_lib.h
index 45fae7d93..25c1a3363 100644
--- a/drivers/net/pfe/pfe_hif_lib.h
+++ b/drivers/net/pfe/pfe_hif_lib.h
@@ -5,6 +5,8 @@
#ifndef _PFE_HIF_LIB_H_
#define _PFE_HIF_LIB_H_
+#include "pfe_hif.h"
+
#define HIF_CL_REQ_TIMEOUT 10
#define GFP_DMA_PFE 0
@@ -158,5 +160,14 @@ extern unsigned int emac_txq_cnt;
int pfe_hif_lib_init(struct pfe *pfe);
void pfe_hif_lib_exit(struct pfe *pfe);
+int hif_lib_client_register(struct hif_client_s *client);
+int hif_lib_client_unregister(struct hif_client_s *client);
+void hif_lib_indicate_client(struct hif_client_s *client, int event, int data);
+int hif_lib_event_handler_start(struct hif_client_s *client, int event, int
+ data);
+void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
+ unsigned int *flags, int count);
+int pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool);
+void pfe_hif_shm_clean(struct hif_shm *hif_shm);
#endif /* _PFE_HIF_LIB_H_ */
diff --git a/drivers/net/pfe/pfe_mod.h b/drivers/net/pfe/pfe_mod.h
index 97fbb4891..363feed45 100644
--- a/drivers/net/pfe/pfe_mod.h
+++ b/drivers/net/pfe/pfe_mod.h
@@ -48,6 +48,7 @@ struct pfe {
struct ls1012a_pfe_platform_data platform_data;
struct pfe_hif hif;
struct pfe_eth eth;
+ struct hif_client_s *hif_client[HIF_CLIENTS_MAX];
int mdio_muxval[PHYID_MAX_VAL];
uint8_t nb_devs;
uint8_t max_intf;
--
2.17.1
next prev parent reply other threads:[~2019-10-10 6:49 UTC|newest]
Thread overview: 87+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-08-26 13:02 [dpdk-dev] [PATCH v1 00/13] introduces ppfe network PMD Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 01/13] common/dpaax: moving OF lib code from dpaa bus Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 02/13] net/ppfe: introduce ppfe net poll mode driver Gagandeep Singh
2019-10-28 17:18 ` Stephen Hemminger
2019-10-29 9:27 ` Ferruh Yigit
2019-11-04 11:06 ` Bruce Richardson
2019-11-05 16:02 ` Ferruh Yigit
2019-11-06 9:38 ` Bruce Richardson
2019-11-06 12:22 ` Ferruh Yigit
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 03/13] doc: add guide for ppfe net PMD Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 04/13] net/ppfe: support dynamic logging Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 05/13] net/ppfe: add HW specific macros and operations Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 06/13] net/ppfe: add MAC and host interface initialisation Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 07/13] net/ppfe: add device start stop operations Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 08/13] net/ppfe: add queue setup and release operations Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 09/13] net/ppfe: add burst enqueue and dequeue operations Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 10/13] net/ppfe: add supported packet types and basic statistics Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 11/13] net/ppfe: add MTU and MAC address set operations Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 12/13] net/ppfe: add allmulticast and promiscuous Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 13/13] net/ppfe: add link status update Gagandeep Singh
2019-08-27 7:16 ` [dpdk-dev] [PATCH v1 00/13] introduces ppfe network PMD Gagandeep Singh
2019-08-28 11:08 ` [dpdk-dev] [PATCH v2 " Gagandeep Singh
2019-08-28 11:08 ` [dpdk-dev] [PATCH v2 01/13] common/dpaax: moving OF lib code from dpaa bus Gagandeep Singh
2019-09-26 16:54 ` Ferruh Yigit
2019-08-28 11:08 ` [dpdk-dev] [PATCH v2 02/13] net/ppfe: introduce ppfe net poll mode driver Gagandeep Singh
2019-09-26 16:53 ` Ferruh Yigit
2019-10-01 7:05 ` Gagandeep Singh
2019-08-28 11:08 ` [dpdk-dev] [PATCH v2 03/13] doc: add guide for ppfe net PMD Gagandeep Singh
2019-09-26 16:56 ` Ferruh Yigit
2019-09-26 18:00 ` Ferruh Yigit
2019-08-28 11:08 ` [dpdk-dev] [PATCH v2 04/13] net/ppfe: support dynamic logging Gagandeep Singh
2019-09-26 16:57 ` Ferruh Yigit
2019-08-28 11:08 ` [dpdk-dev] [PATCH v2 05/13] net/ppfe: add HW specific macros and operations Gagandeep Singh
2019-08-28 11:08 ` [dpdk-dev] [PATCH v2 06/13] net/ppfe: add MAC and host interface initialisation Gagandeep Singh
2019-09-26 17:00 ` Ferruh Yigit
2019-08-28 11:08 ` [dpdk-dev] [PATCH v2 07/13] net/ppfe: add device start stop operations Gagandeep Singh
2019-08-28 11:08 ` [dpdk-dev] [PATCH v2 08/13] net/ppfe: add queue setup and release operations Gagandeep Singh
2019-08-28 11:08 ` [dpdk-dev] [PATCH v2 09/13] net/ppfe: add burst enqueue and dequeue operations Gagandeep Singh
2019-08-28 11:08 ` [dpdk-dev] [PATCH v2 10/13] net/ppfe: add supported packet types and basic statistics Gagandeep Singh
2019-08-28 11:08 ` [dpdk-dev] [PATCH v2 11/13] net/ppfe: add MTU and MAC address set operations Gagandeep Singh
2019-08-28 11:08 ` [dpdk-dev] [PATCH v2 12/13] net/ppfe: add allmulticast and promiscuous Gagandeep Singh
2019-08-28 11:08 ` [dpdk-dev] [PATCH v2 13/13] net/ppfe: add link status update Gagandeep Singh
2019-09-26 17:28 ` [dpdk-dev] [PATCH v2 00/13] introduces ppfe network PMD Ferruh Yigit
2019-09-27 14:55 ` Gagandeep Singh
2019-10-01 11:01 ` [dpdk-dev] [PATCH v3 00/14] " Gagandeep Singh
2019-10-01 11:01 ` [dpdk-dev] [PATCH v3 01/14] common/dpaax: moving OF lib code from dpaa bus Gagandeep Singh
2019-10-01 11:01 ` [dpdk-dev] [PATCH v3 02/14] net/ppfe: introduce ppfe net poll mode driver Gagandeep Singh
2019-10-04 15:38 ` Ferruh Yigit
2019-10-09 6:52 ` Gagandeep Singh
2019-10-01 11:01 ` [dpdk-dev] [PATCH v3 03/14] doc: add guide for ppfe net PMD Gagandeep Singh
2019-10-04 15:41 ` Ferruh Yigit
2019-10-09 6:54 ` Gagandeep Singh
2019-10-01 11:01 ` [dpdk-dev] [PATCH v3 04/14] net/ppfe: support dynamic logging Gagandeep Singh
2019-10-01 11:02 ` [dpdk-dev] [PATCH v3 05/14] net/ppfe: add HW specific macros and operations Gagandeep Singh
2019-10-01 11:02 ` [dpdk-dev] [PATCH v3 06/14] net/ppfe: add MAC and host interface initialisation Gagandeep Singh
2019-10-01 11:02 ` [dpdk-dev] [PATCH v3 07/14] net/ppfe: add device start stop operations Gagandeep Singh
2019-10-04 15:42 ` Ferruh Yigit
2019-10-09 6:54 ` Gagandeep Singh
2019-10-01 11:02 ` [dpdk-dev] [PATCH v3 08/14] net/ppfe: add queue setup and release operations Gagandeep Singh
2019-10-01 11:02 ` [dpdk-dev] [PATCH v3 09/14] net/ppfe: add burst enqueue and dequeue operations Gagandeep Singh
2019-10-01 11:02 ` [dpdk-dev] [PATCH v3 10/14] net/ppfe: add supported packet types and basic statistics Gagandeep Singh
2019-10-01 11:02 ` [dpdk-dev] [PATCH v3 11/14] net/ppfe: add MTU and MAC address set operations Gagandeep Singh
2019-10-01 11:02 ` [dpdk-dev] [PATCH v3 12/14] net/ppfe: add allmulticast and promiscuous Gagandeep Singh
2019-10-01 11:02 ` [dpdk-dev] [PATCH v3 13/14] net/ppfe: add link status update Gagandeep Singh
2019-10-04 15:43 ` Ferruh Yigit
2019-10-09 6:57 ` Gagandeep Singh
2019-10-01 11:02 ` [dpdk-dev] [PATCH v3 14/14] doc: add NXP PPFE PMD in release notes Gagandeep Singh
2019-10-10 6:32 ` [dpdk-dev] [PATCH v4 00/14] introduces pfe network PMD Gagandeep Singh
2019-10-10 6:32 ` [dpdk-dev] [PATCH v4 01/14] common/dpaax: moving OF lib code from dpaa bus Gagandeep Singh
2019-10-10 17:01 ` Ferruh Yigit
2019-10-10 6:32 ` [dpdk-dev] [PATCH v4 02/14] net/pfe: introduce pfe net poll mode driver Gagandeep Singh
2019-10-10 6:32 ` [dpdk-dev] [PATCH v4 03/14] doc: add guide for pfe net PMD Gagandeep Singh
2019-10-10 6:32 ` [dpdk-dev] [PATCH v4 04/14] net/pfe: support dynamic logging Gagandeep Singh
2019-10-10 6:32 ` [dpdk-dev] [PATCH v4 05/14] net/pfe: add HW specific macros and operations Gagandeep Singh
2019-10-10 6:32 ` [dpdk-dev] [PATCH v4 06/14] net/pfe: add MAC and host interface initialisation Gagandeep Singh
2019-10-10 6:32 ` Gagandeep Singh [this message]
2019-10-10 6:32 ` [dpdk-dev] [PATCH v4 08/14] net/pfe: add queue setup and release operations Gagandeep Singh
2019-10-10 6:32 ` [dpdk-dev] [PATCH v4 09/14] net/pfe: add burst enqueue and dequeue operations Gagandeep Singh
2019-10-10 6:32 ` [dpdk-dev] [PATCH v4 10/14] net/pfe: add supported packet types and basic statistics Gagandeep Singh
2019-10-10 6:32 ` [dpdk-dev] [PATCH v4 11/14] net/pfe: add MTU and MAC address set operations Gagandeep Singh
2019-10-10 6:32 ` [dpdk-dev] [PATCH v4 12/14] net/pfe: add allmulticast and promiscuous Gagandeep Singh
2019-10-10 6:32 ` [dpdk-dev] [PATCH v4 13/14] net/pfe: add link status update Gagandeep Singh
2019-10-10 6:32 ` [dpdk-dev] [PATCH v4 14/14] doc: add NXP PFE PMD in release notes Gagandeep Singh
2019-10-10 7:11 ` [dpdk-dev] [PATCH v4 00/14] introduces pfe network PMD Thomas Monjalon
2019-10-10 17:01 ` Ferruh Yigit
2019-10-10 17:47 ` Ferruh Yigit
2019-10-25 7:59 ` Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191010063234.32568-8-g.singh@nxp.com \
--to=g.singh@nxp.com \
--cc=akhil.goyal@nxp.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).