DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH V2] net/thunderx: add support for hardware first skip feature
@ 2018-06-14  9:36 rkudurumalla
  2018-06-18  4:02 ` Jerin Jacob
  0 siblings, 1 reply; 2+ messages in thread
From: rkudurumalla @ 2018-06-14  9:36 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, jerin.jacob, Rakesh Kudurumalla

This feature is used to create a hole between HEADROOM
and actual data.Size of hole is specified in bytes as
module param to pmd

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@caviumnetworks.com>
---
V1: https://dpdk.org/ml/archives/dev/2018-May/103058.html
V2: Includes changes in thunder.rst to render outupt correctly in html

 doc/guides/nics/thunderx.rst         | 18 +++++++++
 drivers/net/thunderx/base/nicvf_hw.c | 12 ++++++
 drivers/net/thunderx/base/nicvf_hw.h |  1 +
 drivers/net/thunderx/nicvf_ethdev.c  | 76 +++++++++++++++++++++++++++++++++++-
 drivers/net/thunderx/nicvf_ethdev.h  |  1 +
 drivers/net/thunderx/nicvf_struct.h  |  1 +
 6 files changed, 108 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/thunderx.rst b/doc/guides/nics/thunderx.rst
index 2642374..7825b43 100644
--- a/doc/guides/nics/thunderx.rst
+++ b/doc/guides/nics/thunderx.rst
@@ -30,6 +30,7 @@ Features of the ThunderX PMD are:
 - SR-IOV VF
 - NUMA support
 - Multi queue set support (up to 96 queues (12 queue sets)) per port
+- First skip support
 
 Supported ThunderX SoCs
 -----------------------
@@ -312,6 +313,18 @@ We will choose four secondary queue sets from the ending of the list (0002:01:01
 
 The nicvf thunderx driver will make use of attached secondary VFs automatically during the interface configuration stage.
 
+
+Module params
+--------------
+
+    This feature is used to create a hole between HEADROOM and actual data.Size of hole is specified
+    in bytes as module param("skip_data_bytes") to pmd.
+    This scheme is useful when application would like to insert vlan header without disturbing HEADROOM
+
+
+    use ``-w pci_id,skip_data_bytes="number of bytes to skip"``
+
+
 Limitations
 -----------
 
@@ -335,3 +348,8 @@ Maximum packet segments
 The ThunderX SoC family NICs support up to 12 segments per packet when working
 in scatter/gather mode. So, setting MTU will result with ``EINVAL`` when the
 frame size does not fit in the maximum number of segments.
+
+First_skip
+~~~~~~~~~~
+
+Maximum limit on first_skip is 128 bytes and number of bytes should be multiple of 8.
diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
index ea8092c..b07a293 100644
--- a/drivers/net/thunderx/base/nicvf_hw.c
+++ b/drivers/net/thunderx/base/nicvf_hw.c
@@ -703,6 +703,18 @@ nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
 }
 
 void
+nicvf_first_skip_config(struct nicvf *nic, uint8_t num_dwords)
+{
+	uint64_t val;
+
+	val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
+	val &= ~(0xfULL);
+	val |= (num_dwords & 0xf);
+
+	nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
+}
+
+void
 nicvf_apad_config(struct nicvf *nic, bool enable)
 {
 	uint64_t val;
diff --git a/drivers/net/thunderx/base/nicvf_hw.h b/drivers/net/thunderx/base/nicvf_hw.h
index 284d0bd..fd13ea8 100644
--- a/drivers/net/thunderx/base/nicvf_hw.h
+++ b/drivers/net/thunderx/base/nicvf_hw.h
@@ -193,6 +193,7 @@ uint32_t nicvf_qsize_sq_roundup(uint32_t val);
 void nicvf_vlan_hw_strip(struct nicvf *nic, bool enable);
 
 void nicvf_apad_config(struct nicvf *nic, bool enable);
+void nicvf_first_skip_config(struct nicvf *nic, uint8_t dwords);
 
 int nicvf_rss_config(struct nicvf *nic, uint32_t  qcnt, uint64_t cfg);
 int nicvf_rss_term(struct nicvf *nic);
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 99fcd51..466cb86 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -34,6 +34,8 @@
 #include <rte_pci.h>
 #include <rte_bus_pci.h>
 #include <rte_tailq.h>
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
 
 #include "base/nicvf_plat.h"
 
@@ -1230,6 +1232,7 @@ nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
 {
 	uintptr_t p;
 	struct rte_mbuf mb_def;
+	struct nicvf *nic = rxq->nic;
 
 	RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
@@ -1240,7 +1243,7 @@ nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
 				offsetof(struct rte_mbuf, data_off) != 6);
 	mb_def.nb_segs = 1;
-	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+	mb_def.data_off = RTE_PKTMBUF_HEADROOM + (nic->skip_bytes);
 	mb_def.port = rxq->port_id;
 	rte_mbuf_refcnt_set(&mb_def, 1);
 
@@ -1260,9 +1263,19 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	struct nicvf_rxq *rxq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	uint64_t offloads;
+	uint32_t buffsz;
+	struct rte_pktmbuf_pool_private *mbp_priv;
 
 	PMD_INIT_FUNC_TRACE();
 
+	/* First skip check */
+	mbp_priv = rte_mempool_get_priv(mp);
+	buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+	if (buffsz < (uint32_t)(nic->skip_bytes)) {
+		PMD_INIT_LOG(ERR, "First skip is more than configured buffer size");
+		return -EINVAL;
+	}
+
 	if (qidx >= MAX_RCV_QUEUES_PER_QS)
 		nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
 
@@ -1298,6 +1311,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		return -EINVAL;
 	}
 
+
 	/* Check rx_free_thresh upper bound */
 	rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
 				rx_conf->rx_free_thresh :
@@ -1498,6 +1512,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 			return -EINVAL;
 		}
 		rxq->mbuf_phys_off -= data_off;
+		rxq->mbuf_phys_off -= nic->skip_bytes;
 
 		if (mbuf_phys_off == 0)
 			mbuf_phys_off = rxq->mbuf_phys_off;
@@ -1978,6 +1993,59 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
 	.get_reg                  = nicvf_dev_get_regs,
 };
 
+static inline int
+nicvf_set_first_skip(struct rte_eth_dev *dev)
+{
+	int bytes_to_skip = 0;
+	int ret = 0;
+	unsigned int i;
+	struct rte_kvargs *kvlist;
+	static const char *const skip[] = {
+		SKIP_DATA_BYTES,
+		NULL};
+	struct nicvf *nic = nicvf_pmd_priv(dev);
+
+	if (!dev->device->devargs) {
+		nicvf_first_skip_config(nic, 0);
+		return ret;
+	}
+
+	kvlist = rte_kvargs_parse(dev->device->devargs->args, skip);
+	if (!kvlist)
+		return -EINVAL;
+
+	if (kvlist->count == 0)
+		goto exit;
+
+	for (i = 0; i != kvlist->count; ++i) {
+		const struct rte_kvargs_pair *pair = &kvlist->pairs[i];
+
+		if (!strcmp(pair->key, SKIP_DATA_BYTES))
+			bytes_to_skip = atoi(pair->value);
+	}
+
+		/*128 bytes amounts to one cache line*/
+	if (bytes_to_skip >= 0 && bytes_to_skip < 128) {
+		if (!(bytes_to_skip % 8)) {
+			nicvf_first_skip_config(nic, (bytes_to_skip / 8));
+			nic->skip_bytes = bytes_to_skip;
+			goto kvlist_free;
+		} else {
+			PMD_INIT_LOG(ERR, "skip_data_bytes should be multiple of 8");
+			ret = -EINVAL;
+			goto exit;
+		}
+	} else {
+		PMD_INIT_LOG(ERR, "skip_data_bytes should be less than 128");
+		ret = -EINVAL;
+		goto exit;
+	}
+exit:
+	nicvf_first_skip_config(nic, 0);
+kvlist_free:
+	rte_kvargs_free(kvlist);
+	return ret;
+}
 static int
 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
 {
@@ -2087,6 +2155,11 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
 		goto malloc_fail;
 	}
 
+	ret = nicvf_set_first_skip(eth_dev);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to configure first skip");
+		goto malloc_fail;
+	}
 	PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
 		eth_dev->data->port_id, nic->vendor_id, nic->device_id,
 		nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
@@ -2159,3 +2232,4 @@ static struct rte_pci_driver rte_nicvf_pmd = {
 RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_thunderx, SKIP_DATA_BYTES "=<int>");
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index ea8dccd..9af5088 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -51,6 +51,7 @@
 
 #define VLAN_TAG_SIZE                   4	/* 802.3ac tag */
 
+#define SKIP_DATA_BYTES "skip_data_bytes"
 static inline struct nicvf *
 nicvf_pmd_priv(struct rte_eth_dev *eth_dev)
 {
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index d4a83c3..cf98f7c 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -99,6 +99,7 @@ struct nicvf {
 	struct rte_intr_handle intr_handle;
 	uint8_t cpi_alg;
 	uint16_t mtu;
+	int skip_bytes;
 	bool vlan_filter_en;
 	uint8_t mac_addr[ETHER_ADDR_LEN];
 	/* secondary queue set support */
-- 
2.7.4

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2018-06-18  4:02 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-06-14  9:36 [dpdk-dev] [PATCH V2] net/thunderx: add support for hardware first skip feature rkudurumalla
2018-06-18  4:02 ` Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).