* [dpdk-dev] [PATCH v3] net/axgbe: enabling VLAN support in axgbe
@ 2020-04-30 6:59 ssardar
2020-05-01 11:33 ` Ferruh Yigit
0 siblings, 1 reply; 5+ messages in thread
From: ssardar @ 2020-04-30 6:59 UTC (permalink / raw)
To: dev
From: Sardar Shamsher Singh <Shamshersingh.Sardar@amd.com>
adding below APIs for axgbe
- axgbe_enable_rx_vlan_stripping: to enable vlan header stipping
- axgbe_disable_rx_vlan_stripping: to disable vlan header stipping
- axgbe_enable_rx_vlan_filtering: to enable vlan filter mode
- axgbe_disable_rx_vlan_filtering: to disable vlan filter mode
- axgbe_update_vlan_hash_table: crc calculation and hash table update
based on vlan values post filter enable
- axgbe_vlan_filter_set: setting of active vlan out of max 4K values before
doing hash update of same
- axgbe_vlan_tpid_set: setting of default tpid values
- axgbe_vlan_offload_set: a top layer function to call stip/filter etc
based on mask values
Signed-off-by: Sardar Shamsher Singh <Shamshersingh.Sardar@amd.com>
---
doc/guides/nics/features/axgbe.ini | 1 +
drivers/net/axgbe/axgbe_common.h | 29 +++++
drivers/net/axgbe/axgbe_dev.c | 171 ++++++++++++++++++++++++++--
drivers/net/axgbe/axgbe_ethdev.c | 173 ++++++++++++++++++++++++++++-
drivers/net/axgbe/axgbe_ethdev.h | 16 +++
drivers/net/axgbe/axgbe_rxtx.c | 63 ++++++++++-
6 files changed, 437 insertions(+), 16 deletions(-)
diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
index 0becaa097..b7b4dd992 100644
--- a/doc/guides/nics/features/axgbe.ini
+++ b/doc/guides/nics/features/axgbe.ini
@@ -11,6 +11,7 @@ Scattered Rx = Y
Promiscuous mode = Y
Allmulticast mode = Y
RSS hash = Y
+VLAN = Y
CRC offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
index f48117180..283cbffaa 100644
--- a/drivers/net/axgbe/axgbe_common.h
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -257,6 +257,7 @@
#define MAC_HWF0R 0x011c
#define MAC_HWF1R 0x0120
#define MAC_HWF2R 0x0124
+#define MAC_HWF3R 0x0128
#define MAC_MDIOSCAR 0x0200
#define MAC_MDIOSCCDR 0x0204
#define MAC_MDIOISR 0x0214
@@ -282,6 +283,18 @@
#define MAC_TXSNR 0x0d30
#define MAC_TXSSR 0x0d34
+/*VLAN control bit mask*/
+#define AXGBE_VLNCTRL_MASK 0x0000FFFF
+#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
+#define VLAN_PRIO_SHIFT 13
+#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */
+#define VLAN_TAG_PRESENT VLAN_CFI_MASK
+#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
+#define VLAN_N_VID 4096
+#define VLAN_TABLE_SIZE 64
+#define VLAN_TABLE_BIT(vlan_id) (1UL << ((vlan_id) & 0x3F))
+#define VLAN_TABLE_IDX(vlan_id) ((vlan_id) >> 6)
+
#define MAC_QTFCR_INC 4
#define MAC_MACA_INC 4
#define MAC_HTR_INC 4
@@ -359,6 +372,10 @@
#define MAC_HWF2R_TXCHCNT_WIDTH 4
#define MAC_HWF2R_TXQCNT_INDEX 6
#define MAC_HWF2R_TXQCNT_WIDTH 4
+#define MAC_HWF3R_CBTISEL_INDEX 4
+#define MAC_HWF3R_CBTISEL_WIDTH 1
+#define MAC_HWF3R_NRVF_INDEX 0
+#define MAC_HWF3R_NRVF_WIDTH 3
#define MAC_IER_TSIE_INDEX 12
#define MAC_IER_TSIE_WIDTH 1
#define MAC_ISR_MMCRXIS_INDEX 9
@@ -513,6 +530,8 @@
#define MAC_VLANIR_VLTI_WIDTH 1
#define MAC_VLANIR_CSVL_INDEX 19
#define MAC_VLANIR_CSVL_WIDTH 1
+#define MAC_VLANIR_VLC_INDEX 16
+#define MAC_VLANIR_VLC_WIDTH 2
#define MAC_VLANTR_DOVLTC_INDEX 20
#define MAC_VLANTR_DOVLTC_WIDTH 1
#define MAC_VLANTR_ERSVLM_INDEX 19
@@ -523,12 +542,18 @@
#define MAC_VLANTR_ETV_WIDTH 1
#define MAC_VLANTR_EVLS_INDEX 21
#define MAC_VLANTR_EVLS_WIDTH 2
+#define MAC_VLANTR_EIVLS_INDEX 21
+#define MAC_VLANTR_EIVLS_WIDTH 2
#define MAC_VLANTR_EVLRXS_INDEX 24
#define MAC_VLANTR_EVLRXS_WIDTH 1
+#define MAC_VLANTR_EIVLRXS_INDEX 31
+#define MAC_VLANTR_EIVLRXS_WIDTH 1
#define MAC_VLANTR_VL_INDEX 0
#define MAC_VLANTR_VL_WIDTH 16
#define MAC_VLANTR_VTHM_INDEX 25
#define MAC_VLANTR_VTHM_WIDTH 1
+#define MAC_VLANTR_EDVLP_INDEX 26
+#define MAC_VLANTR_EDVLP_WIDTH 1
#define MAC_VLANTR_VTIM_INDEX 17
#define MAC_VLANTR_VTIM_WIDTH 1
#define MAC_VR_DEVID_INDEX 8
@@ -537,6 +562,10 @@
#define MAC_VR_SNPSVER_WIDTH 8
#define MAC_VR_USERVER_INDEX 16
#define MAC_VR_USERVER_WIDTH 8
+#define MAC_VLANIR_VLT_INDEX 0
+#define MAC_VLANIR_VLT_WIDTH 16
+#define MAC_VLANTR_ERIVLT_INDEX 27
+#define MAC_VLANTR_ERIVLT_WIDTH 1
/* MMC register offsets */
#define MMC_CR 0x0800
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
index 5f0f19592..79b913c74 100644
--- a/drivers/net/axgbe/axgbe_dev.c
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -8,6 +8,46 @@
#include "axgbe_phy.h"
#include "axgbe_rxtx.h"
+static uint32_t bitrev32(uint32_t x)
+{
+ x = (x >> 16) | (x << 16);
+ x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
+ x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
+ x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
+ x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
+ return x;
+}
+
+/*MSB set bit from 32 to 1*/
+static int get_lastbit_set(int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
{
return pdata->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
@@ -407,6 +447,120 @@ static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata)
}
}
+static int axgbe_enable_rx_vlan_stripping(struct axgbe_port *pdata)
+{
+ /* Put the VLAN tag in the Rx descriptor */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
+
+ /* Don't check the VLAN type */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
+
+ /* Check only C-TAG (0x8100) packets */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
+
+ /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
+
+ /* Enable VLAN tag stripping */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
+ return 0;
+}
+
+static int axgbe_disable_rx_vlan_stripping(struct axgbe_port *pdata)
+{
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
+ return 0;
+}
+
+static int axgbe_enable_rx_vlan_filtering(struct axgbe_port *pdata)
+{
+ /* Enable VLAN filtering */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
+
+ /* Enable VLAN Hash Table filtering */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
+
+ /* Disable VLAN tag inverse matching */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
+
+ /* Only filter on the lower 12-bits of the VLAN tag */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
+
+ /* In order for the VLAN Hash Table filtering to be effective,
+ * the VLAN tag identifier in the VLAN Tag Register must not
+ * be zero. Set the VLAN tag identifier to "1" to enable the
+ * VLAN Hash Table filtering. This implies that a VLAN tag of
+ * 1 will always pass filtering.
+ */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
+ return 0;
+}
+
+static int axgbe_disable_rx_vlan_filtering(struct axgbe_port *pdata)
+{
+ /* Disable VLAN filtering */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
+ return 0;
+}
+
+static u32 axgbe_vid_crc32_le(__le16 vid_le)
+{
+ u32 poly = 0xedb88320; /* CRCPOLY_LE */
+ u32 crc = ~0;
+ u32 temp = 0;
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ int i, bits;
+
+ bits = get_lastbit_set(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= poly;
+ }
+ return crc;
+}
+
+static int axgbe_update_vlan_hash_table(struct axgbe_port *pdata)
+{
+ u32 crc = 0;
+ u16 vid;
+ __le16 vid_le = 0;
+ u16 vlan_hash_table = 0;
+ unsigned int reg = 0;
+ unsigned long vid_idx, vid_valid;
+
+ /* Generate the VLAN Hash Table value */
+ for (vid = 0; vid < VLAN_N_VID; vid++) {
+ vid_idx = VLAN_TABLE_IDX(vid);
+ vid_valid = pdata->active_vlans[vid_idx];
+ vid_valid = (unsigned long)vid_valid >> (vid - (64 * vid_idx));
+ if (vid_valid & 1)
+ PMD_DRV_LOG(DEBUG,
+ "vid:%d pdata->active_vlans[%ld]=0x%lx\n",
+ vid, vid_idx, pdata->active_vlans[vid_idx]);
+ else
+ continue;
+
+ vid_le = rte_cpu_to_le_16(vid);
+ crc = bitrev32(~axgbe_vid_crc32_le(vid_le)) >> 28;
+ vlan_hash_table |= (1 << crc);
+ PMD_DRV_LOG(DEBUG, "crc = %d vlan_hash_table = 0x%x\n",
+ crc, vlan_hash_table);
+ }
+ /* Set the VLAN Hash Table filtering register */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
+ reg = AXGMAC_IOREAD(pdata, MAC_VLANHTR);
+ PMD_DRV_LOG(DEBUG, "vlan_hash_table reg val = 0x%x\n", reg);
+ return 0;
+}
+
static int __axgbe_exit(struct axgbe_port *pdata)
{
unsigned int count = 2000;
@@ -1008,16 +1162,6 @@ static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata)
}
}
-static uint32_t bitrev32(uint32_t x)
-{
- x = (x >> 16) | (x << 16);
- x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
- x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
- x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
- x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
- return x;
-}
-
static uint32_t crc32_le(uint32_t crc, uint8_t *p, uint32_t len)
{
int i;
@@ -1217,4 +1361,11 @@ void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
/* For FLOW ctrl */
hw_if->config_tx_flow_control = axgbe_config_tx_flow_control;
hw_if->config_rx_flow_control = axgbe_config_rx_flow_control;
+
+ /*vlan*/
+ hw_if->enable_rx_vlan_stripping = axgbe_enable_rx_vlan_stripping;
+ hw_if->disable_rx_vlan_stripping = axgbe_disable_rx_vlan_stripping;
+ hw_if->enable_rx_vlan_filtering = axgbe_enable_rx_vlan_filtering;
+ hw_if->disable_rx_vlan_filtering = axgbe_disable_rx_vlan_filtering;
+ hw_if->update_vlan_hash_table = axgbe_update_vlan_hash_table;
}
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 867058845..3ba691475 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -73,6 +73,14 @@ static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+static int
+axgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on);
+static int
+axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid);
+static int
+axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
struct axgbe_xstats {
char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -214,6 +222,9 @@ static const struct eth_dev_ops axgbe_eth_dev_ops = {
.dev_supported_ptypes_get = axgbe_dev_supported_ptypes_get,
.rx_descriptor_status = axgbe_dev_rx_descriptor_status,
.tx_descriptor_status = axgbe_dev_tx_descriptor_status,
+ .vlan_filter_set = axgbe_vlan_filter_set,
+ .vlan_tpid_set = axgbe_vlan_tpid_set,
+ .vlan_offload_set = axgbe_vlan_offload_set,
};
static int axgbe_phy_reset(struct axgbe_port *pdata)
@@ -995,6 +1006,9 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa = ETH_LINK_SPEED_10G;
dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
@@ -1003,6 +1017,8 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_KEEP_CRC;
dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM;
@@ -1255,14 +1271,163 @@ axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+axgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned long vid_bit, vid_idx;
+
+ vid_bit = VLAN_TABLE_BIT(vid);
+ vid_idx = VLAN_TABLE_IDX(vid);
+
+ if (on) {
+ PMD_DRV_LOG(DEBUG, "Set VLAN vid=%d for device = %s\n",
+ vid, pdata->eth_dev->device->name);
+ pdata->active_vlans[vid_idx] |= vid_bit;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Reset VLAN vid=%d for device = %s\n",
+ vid, pdata->eth_dev->device->name);
+ pdata->active_vlans[vid_idx] &= ~vid_bit;
+ }
+ pdata->hw_if.update_vlan_hash_table(pdata);
+ return 0;
+}
+
+static int
+axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+ uint32_t reg = 0;
+ uint32_t qinq = 0;
+
+ qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
+ PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq);
+
+ switch (vlan_type) {
+ case ETH_VLAN_TYPE_INNER:
+ PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_INNER\n");
+ if (qinq) {
+ if (tpid != 0x8100 && tpid != 0x88a8)
+ PMD_DRV_LOG(ERR,
+ "tag supported 0x8100/0x88A8\n");
+ PMD_DRV_LOG(DEBUG, "qinq with inner tag\n");
+
+ /*Enable Inner VLAN Tag */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERIVLT, 1);
+ reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, ERIVLT);
+ PMD_DRV_LOG(DEBUG, "bit ERIVLT = 0x%x\n", reg);
+
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Inner type not supported in single tag\n");
+ }
+ break;
+ case ETH_VLAN_TYPE_OUTER:
+ PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_OUTER\n");
+ if (qinq) {
+ PMD_DRV_LOG(DEBUG, "double tagging is enabled\n");
+ /*Enable outer VLAN tag*/
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERIVLT, 0);
+ reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, ERIVLT);
+ PMD_DRV_LOG(DEBUG, "bit ERIVLT = 0x%x\n", reg);
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 1);
+ reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANIR, CSVL);
+ PMD_DRV_LOG(DEBUG, "bit CSVL = 0x%x\n", reg);
+ } else {
+ if (tpid != 0x8100 && tpid != 0x88a8)
+ PMD_DRV_LOG(ERR,
+ "tag supported 0x8100/0x88A8\n");
+ }
+ break;
+ case ETH_VLAN_TYPE_MAX:
+ PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_MAX\n");
+ break;
+ case ETH_VLAN_TYPE_UNKNOWN:
+ PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_UNKNOWN\n");
+ break;
+ }
+ return 0;
+}
+
+static void axgbe_vlan_extend_enable(struct axgbe_port *pdata)
+{
+ int qinq = 0;
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EDVLP, 1);
+ qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
+ PMD_DRV_LOG(DEBUG, "vlan double tag enabled EDVLP:qinq=0x%x\n", qinq);
+}
+
+static void axgbe_vlan_extend_disable(struct axgbe_port *pdata)
+{
+ int qinq = 0;
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EDVLP, 0);
+ qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
+ PMD_DRV_LOG(DEBUG, "vlan double tag disable EDVLP:qinq=0x%x\n", qinq);
+}
+
+static int
+axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ /* Indicate that VLAN Tx CTAGs come from context descriptors */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
+ pdata->eth_dev->device->name);
+ pdata->hw_if.enable_rx_vlan_stripping(pdata);
+ } else {
+ PMD_DRV_LOG(DEBUG, "Strip OFF for device = %s\n",
+ pdata->eth_dev->device->name);
+ pdata->hw_if.disable_rx_vlan_stripping(pdata);
+ }
+ }
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
+ pdata->eth_dev->device->name);
+ pdata->hw_if.enable_rx_vlan_filtering(pdata);
+ } else {
+ PMD_DRV_LOG(DEBUG, "Filter OFF for device = %s\n",
+ pdata->eth_dev->device->name);
+ pdata->hw_if.disable_rx_vlan_filtering(pdata);
+ }
+ }
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+ PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
+ axgbe_vlan_extend_enable(pdata);
+ /* Set global registers with default ethertype*/
+ axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+ RTE_ETHER_TYPE_VLAN);
+ axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+ RTE_ETHER_TYPE_VLAN);
+ } else {
+ PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
+ axgbe_vlan_extend_disable(pdata);
+ }
+ }
+ return 0;
+}
+
static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
{
- unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2, mac_hfr3;
struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
+ mac_hfr3 = AXGMAC_IOREAD(pdata, MAC_HWF3R);
memset(hw_feat, 0, sizeof(*hw_feat));
@@ -1313,6 +1478,12 @@ static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
AUXSNAPNUM);
+ /* Hardware feature register 3 */
+ hw_feat->tx_q_vlan_tag_ins = AXGMAC_GET_BITS(mac_hfr3,
+ MAC_HWF3R, CBTISEL);
+ hw_feat->no_of_vlan_extn = AXGMAC_GET_BITS(mac_hfr3,
+ MAC_HWF3R, NRVF);
+
/* Translate the Hash Table size into actual number */
switch (hw_feat->hash_table_size) {
case 0:
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index f10ec4a40..5840a313c 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -291,6 +291,13 @@ struct axgbe_hw_if {
int (*config_tx_flow_control)(struct axgbe_port *);
int (*config_rx_flow_control)(struct axgbe_port *);
+ /* vlan */
+ int (*enable_rx_vlan_stripping)(struct axgbe_port *);
+ int (*disable_rx_vlan_stripping)(struct axgbe_port *);
+ int (*enable_rx_vlan_filtering)(struct axgbe_port *);
+ int (*disable_rx_vlan_filtering)(struct axgbe_port *);
+ int (*update_vlan_hash_table)(struct axgbe_port *);
+
int (*exit)(struct axgbe_port *);
};
@@ -425,6 +432,12 @@ struct axgbe_hw_features {
unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
unsigned int pps_out_num; /* Number of PPS outputs */
unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
+
+ /* HW Feature Register3 */
+ unsigned int tx_q_vlan_tag_ins; /* Queue/Channel based VLAN tag */
+ /* insertion on Tx Enable */
+ unsigned int no_of_vlan_extn; /* Number of Extended VLAN Tag */
+ /* Filters Enabled */
};
struct axgbe_version_data {
@@ -644,6 +657,9 @@ struct axgbe_port {
unsigned int hash_table_count;
unsigned int uc_hash_mac_addr;
unsigned int uc_hash_table[AXGBE_MAC_HASH_TABLE_SIZE];
+
+ /* Filtering support */
+ unsigned long active_vlans[VLAN_TABLE_SIZE];
};
void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if);
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index 30c467db7..0027f605f 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -208,7 +208,7 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
volatile union axgbe_rx_desc *desc;
uint64_t old_dirty = rxq->dirty;
struct rte_mbuf *mbuf, *tmbuf;
- unsigned int err;
+ unsigned int err, etlt;
uint32_t error_status;
uint16_t idx, pidx, pkt_len;
@@ -275,6 +275,23 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* Get the RSS hash */
if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
+ etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, ETLT);
+ if (!err || !etlt) {
+ if (etlt == 0x09 &&
+ (rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP)) {
+ mbuf->ol_flags |=
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mbuf->vlan_tci =
+ AXGMAC_GET_BITS_LE(desc->write.desc0,
+ RX_NORMAL_DESC0, OVT);
+ } else {
+ mbuf->ol_flags &=
+ ~(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ mbuf->vlan_tci = 0;
+ }
+ }
pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
PL) - rxq->crc_len;
/* Mbuf populate */
@@ -319,7 +336,7 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
uint64_t old_dirty = rxq->dirty;
struct rte_mbuf *first_seg = NULL;
struct rte_mbuf *mbuf, *tmbuf;
- unsigned int err;
+ unsigned int err, etlt;
uint32_t error_status;
uint16_t idx, pidx, data_len = 0, pkt_len = 0;
@@ -394,7 +411,23 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
/* Get the RSS hash */
if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
-
+ etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, ETLT);
+ if (!err || !etlt) {
+ if (etlt == 0x09 &&
+ (rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP)) {
+ mbuf->ol_flags |=
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mbuf->vlan_tci =
+ AXGMAC_GET_BITS_LE(desc->write.desc0,
+ RX_NORMAL_DESC0, OVT);
+ } else {
+ mbuf->ol_flags &=
+ ~(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ mbuf->vlan_tci = 0;
+ }
+ }
/* Mbuf populate */
mbuf->data_off = RTE_PKTMBUF_HEADROOM;
mbuf->data_len = data_len;
@@ -487,6 +520,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
struct axgbe_tx_queue *txq;
unsigned int tsize;
const struct rte_memzone *tz;
+ uint64_t offloads;
tx_desc = nb_desc;
pdata = dev->data->dev_private;
@@ -506,7 +540,8 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (!txq)
return -ENOMEM;
txq->pdata = pdata;
-
+ offloads = tx_conf->offloads |
+ txq->pdata->eth_dev->data->dev_conf.txmode.offloads;
txq->nb_desc = tx_desc;
txq->free_thresh = tx_conf->tx_free_thresh ?
tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH;
@@ -518,7 +553,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (txq->nb_desc % txq->free_thresh != 0)
txq->vector_disable = 1;
- if (tx_conf->offloads != 0)
+ if (offloads != 0)
txq->vector_disable = 1;
/* Allocate TX ring hardware descriptors */
@@ -735,10 +770,28 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
rte_wmb();
+ if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ /* Mark it as a CONTEXT descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+ /* Set the VLAN tag */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+ VT, mbuf->vlan_tci);
+ /* Indicate this descriptor contains the VLAN tag */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+ VLTV, 1);
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
+ TX_NORMAL_DESC2_VLAN_INSERT);
+ } else {
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
+ }
+ rte_wmb();
+
/* Set OWN bit */
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
rte_wmb();
+
/* Save mbuf */
txq->sw_ring[idx] = mbuf;
/* Update current index*/
--
2.17.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH v3] net/axgbe: enabling VLAN support in axgbe
2020-04-30 6:59 [dpdk-dev] [PATCH v3] net/axgbe: enabling VLAN support in axgbe ssardar
@ 2020-05-01 11:33 ` Ferruh Yigit
2020-05-01 16:15 ` Ferruh Yigit
2020-05-06 5:17 ` Sardar, Shamsher singh
0 siblings, 2 replies; 5+ messages in thread
From: Ferruh Yigit @ 2020-05-01 11:33 UTC (permalink / raw)
To: ssardar, dev
On 4/30/2020 7:59 AM, ssardar@amd.com wrote:
> From: Sardar Shamsher Singh <Shamshersingh.Sardar@amd.com>
>
> adding below APIs for axgbe
> - axgbe_enable_rx_vlan_stripping: to enable vlan header stipping
> - axgbe_disable_rx_vlan_stripping: to disable vlan header stipping
> - axgbe_enable_rx_vlan_filtering: to enable vlan filter mode
> - axgbe_disable_rx_vlan_filtering: to disable vlan filter mode
> - axgbe_update_vlan_hash_table: crc calculation and hash table update
> based on vlan values post filter enable
> - axgbe_vlan_filter_set: setting of active vlan out of max 4K values before
> doing hash update of same
> - axgbe_vlan_tpid_set: setting of default tpid values
> - axgbe_vlan_offload_set: a top layer function to call stip/filter etc
> based on mask values
>
> Signed-off-by: Sardar Shamsher Singh <Shamshersingh.Sardar@amd.com>
<...>
> +static int
> +axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
> +{
> + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
> + struct axgbe_port *pdata = dev->data->dev_private;
> +
> + /* Indicate that VLAN Tx CTAGs come from context descriptors */
> + AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
> + AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
> +
> + if (mask & ETH_VLAN_STRIP_MASK) {
> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
> + PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
> + pdata->eth_dev->device->name);
> + pdata->hw_if.enable_rx_vlan_stripping(pdata);
> + } else {
> + PMD_DRV_LOG(DEBUG, "Strip OFF for device = %s\n",
> + pdata->eth_dev->device->name);
> + pdata->hw_if.disable_rx_vlan_stripping(pdata);
> + }
> + }
> + if (mask & ETH_VLAN_FILTER_MASK) {
> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
> + PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
> + pdata->eth_dev->device->name);
> + pdata->hw_if.enable_rx_vlan_filtering(pdata);
> + } else {
> + PMD_DRV_LOG(DEBUG, "Filter OFF for device = %s\n",
> + pdata->eth_dev->device->name);
> + pdata->hw_if.disable_rx_vlan_filtering(pdata);
> + }
> + }
> + if (mask & ETH_VLAN_EXTEND_MASK) {
> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
> + PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
> + axgbe_vlan_extend_enable(pdata);
> + /* Set global registers with default ethertype*/
> + axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
> + RTE_ETHER_TYPE_VLAN);
> + axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
> + RTE_ETHER_TYPE_VLAN);
> + } else {
> + PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
> + axgbe_vlan_extend_disable(pdata);
> + }
> + }
Is the intention here to enable disable QinQ stip, because enable/disable
fucntions talks about qinq, if so 'ETH_QINQ_STRIP_MASK' should be used.
<...>
> @@ -275,6 +275,23 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> /* Get the RSS hash */
> if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
> mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
> + etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
> + RX_NORMAL_DESC3, ETLT);
> + if (!err || !etlt) {
> + if (etlt == 0x09 &&
> + (rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads &
> + DEV_RX_OFFLOAD_VLAN_STRIP)) {
> + mbuf->ol_flags |=
> + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
> + mbuf->vlan_tci =
> + AXGMAC_GET_BITS_LE(desc->write.desc0,
> + RX_NORMAL_DESC0, OVT);
I don't know HW capabilities, but if 'etlt == 0x09' means packet has VLAN tag,
you can use it independent from strip, like below, up to you.
if (vlan) {
mbuf->ol_flags |= PKT_RX_VLAN;
mbuf->vlan_tci = xxx
if (vlan_stripped) {
mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
}
}
<...>
> @@ -487,6 +520,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> struct axgbe_tx_queue *txq;
> unsigned int tsize;
> const struct rte_memzone *tz;
> + uint64_t offloads;
>
> tx_desc = nb_desc;
> pdata = dev->data->dev_private;
> @@ -506,7 +540,8 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> if (!txq)
> return -ENOMEM;
> txq->pdata = pdata;
> -
> + offloads = tx_conf->offloads |
> + txq->pdata->eth_dev->data->dev_conf.txmode.offloads;
As far as I can see PMD doesn't support queue specific offloads, so
'tx_conf->offloads' should be always 0.
And since there is no queue specific offload and this 'offloads' information is
only used to set burst function, which is again only port bases (this will keep
overwrite same info per each queue), why not do this check in the
'axgbe_dev_configure()' instead of per queue?
And I can see you may hit the problem becuase of VLAN offload but the issue
seems generic, not directly related to VLAN support, and this can be separate
fix I think.
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH v3] net/axgbe: enabling VLAN support in axgbe
2020-05-01 11:33 ` Ferruh Yigit
@ 2020-05-01 16:15 ` Ferruh Yigit
2020-05-06 5:17 ` Sardar, Shamsher singh
1 sibling, 0 replies; 5+ messages in thread
From: Ferruh Yigit @ 2020-05-01 16:15 UTC (permalink / raw)
To: ssardar, dev
On 5/1/2020 12:33 PM, Ferruh Yigit wrote:
> On 4/30/2020 7:59 AM, ssardar@amd.com wrote:
>> From: Sardar Shamsher Singh <Shamshersingh.Sardar@amd.com>
>>
>> adding below APIs for axgbe
>> - axgbe_enable_rx_vlan_stripping: to enable vlan header stipping
>> - axgbe_disable_rx_vlan_stripping: to disable vlan header stipping
>> - axgbe_enable_rx_vlan_filtering: to enable vlan filter mode
>> - axgbe_disable_rx_vlan_filtering: to disable vlan filter mode
>> - axgbe_update_vlan_hash_table: crc calculation and hash table update
>> based on vlan values post filter enable
>> - axgbe_vlan_filter_set: setting of active vlan out of max 4K values before
>> doing hash update of same
>> - axgbe_vlan_tpid_set: setting of default tpid values
>> - axgbe_vlan_offload_set: a top layer function to call stip/filter etc
>> based on mask values
>>
>> Signed-off-by: Sardar Shamsher Singh <Shamshersingh.Sardar@amd.com>
>
> <...>
>
>> +static int
>> +axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
>> +{
>> + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
>> + struct axgbe_port *pdata = dev->data->dev_private;
>> +
>> + /* Indicate that VLAN Tx CTAGs come from context descriptors */
>> + AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
>> + AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
>> +
>> + if (mask & ETH_VLAN_STRIP_MASK) {
>> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
>> + PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
>> + pdata->eth_dev->device->name);
>> + pdata->hw_if.enable_rx_vlan_stripping(pdata);
>> + } else {
>> + PMD_DRV_LOG(DEBUG, "Strip OFF for device = %s\n",
>> + pdata->eth_dev->device->name);
>> + pdata->hw_if.disable_rx_vlan_stripping(pdata);
>> + }
>> + }
>> + if (mask & ETH_VLAN_FILTER_MASK) {
>> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
>> + PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
>> + pdata->eth_dev->device->name);
>> + pdata->hw_if.enable_rx_vlan_filtering(pdata);
>> + } else {
>> + PMD_DRV_LOG(DEBUG, "Filter OFF for device = %s\n",
>> + pdata->eth_dev->device->name);
>> + pdata->hw_if.disable_rx_vlan_filtering(pdata);
>> + }
>> + }
>> + if (mask & ETH_VLAN_EXTEND_MASK) {
>> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
>> + PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
>> + axgbe_vlan_extend_enable(pdata);
>> + /* Set global registers with default ethertype*/
>> + axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
>> + RTE_ETHER_TYPE_VLAN);
>> + axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
>> + RTE_ETHER_TYPE_VLAN);
>> + } else {
>> + PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
>> + axgbe_vlan_extend_disable(pdata);
>> + }
>> + }
>
>
> Is the intention here to enable disable QinQ stip, because enable/disable
> fucntions talks about qinq, if so 'ETH_QINQ_STRIP_MASK' should be used.
>
> <...>
>> @@ -275,6 +275,23 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>> /* Get the RSS hash */
>> if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
>> mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
>> + etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
>> + RX_NORMAL_DESC3, ETLT);
>> + if (!err || !etlt) {
>> + if (etlt == 0x09 &&
>> + (rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads &
>> + DEV_RX_OFFLOAD_VLAN_STRIP)) {
Also checking these offload flags only in the '.vlan_offload_set' dev_ops is not
enough, user may be set these flags but not called the '.vlan_offload_set' at
all, that is why configure() fucntion should do similar checks and configuration.
>> + mbuf->ol_flags |=
>> + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
>> + mbuf->vlan_tci =
>> + AXGMAC_GET_BITS_LE(desc->write.desc0,
>> + RX_NORMAL_DESC0, OVT);
>
> I don't know HW capabilities, but if 'etlt == 0x09' means packet has VLAN tag,
> you can use it independent from strip, like below, up to you.
>
> if (vlan) {
> mbuf->ol_flags |= PKT_RX_VLAN;
> mbuf->vlan_tci = xxx
> if (vlan_stripped) {
> mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
> }
> }
>
> <...>
>
>> @@ -487,6 +520,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
>> struct axgbe_tx_queue *txq;
>> unsigned int tsize;
>> const struct rte_memzone *tz;
>> + uint64_t offloads;
>>
>> tx_desc = nb_desc;
>> pdata = dev->data->dev_private;
>> @@ -506,7 +540,8 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
>> if (!txq)
>> return -ENOMEM;
>> txq->pdata = pdata;
>> -
>> + offloads = tx_conf->offloads |
>> + txq->pdata->eth_dev->data->dev_conf.txmode.offloads;
>
> As far as I can see PMD doesn't support queue specific offloads, so
> 'tx_conf->offloads' should be always 0.
>
> And since there is no queue specific offload and this 'offloads' information is
> only used to set burst function, which is again only port bases (this will keep
> overwrite same info per each queue), why not do this check in the
> 'axgbe_dev_configure()' instead of per queue?
>
> And I can see you may hit the problem becuase of VLAN offload but the issue
> seems generic, not directly related to VLAN support, and this can be separate
> fix I think.
>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH v3] net/axgbe: enabling VLAN support in axgbe
2020-05-01 11:33 ` Ferruh Yigit
2020-05-01 16:15 ` Ferruh Yigit
@ 2020-05-06 5:17 ` Sardar, Shamsher singh
2020-05-06 9:48 ` Ferruh Yigit
1 sibling, 1 reply; 5+ messages in thread
From: Sardar, Shamsher singh @ 2020-05-06 5:17 UTC (permalink / raw)
To: Ferruh Yigit, dev
[AMD Official Use Only - Internal Distribution Only]
Hi Ferruh,
Thanks for knowledge sharing.
Yes etlt - 0x09 is nothing but indicate " ■ 4’b1001: The packet is type packet with Single CVLAN tag."
And you are right it should be as below and will do changes on same:
if (vlan) {
mbuf->ol_flags |= PKT_RX_VLAN;
mbuf->vlan_tci = xxx
if (vlan_stripped) {
mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
}
}
But rest of the items will do as incremental development.
1. Like in axgbe_dev_configure() need to check for default setting when system comes up.
2. There are lot of changes need to be add for QinQ support.
Will add all as incremental development work.
Currently working to make other vendor's SFP to work and in parallel above changes will add for upstream.
Hope this should be fine.
Can you please put some more light on below, what type issues may occur?
" And I can see you may hit the problem becuase of VLAN offload but the issue seems generic, not directly related to VLAN support, and this can be separate fix I think."
Thanks & regards
S Shamsher Singh
(+91)7259016141
-----Original Message-----
From: Ferruh Yigit <ferruh.yigit@intel.com>
Sent: Friday, May 1, 2020 5:04 PM
To: Sardar, Shamsher singh <Shamshersingh.Sardar@amd.com>; dev@dpdk.org
Subject: Re: [dpdk-dev] [PATCH v3] net/axgbe: enabling VLAN support in axgbe
[CAUTION: External Email]
On 4/30/2020 7:59 AM, ssardar@amd.com wrote:
> From: Sardar Shamsher Singh <Shamshersingh.Sardar@amd.com>
>
> adding below APIs for axgbe
> - axgbe_enable_rx_vlan_stripping: to enable vlan header stipping
> - axgbe_disable_rx_vlan_stripping: to disable vlan header stipping
> - axgbe_enable_rx_vlan_filtering: to enable vlan filter mode
> - axgbe_disable_rx_vlan_filtering: to disable vlan filter mode
> - axgbe_update_vlan_hash_table: crc calculation and hash table update
> based on vlan values post filter enable
> - axgbe_vlan_filter_set: setting of active vlan out of max 4K values
> before doing hash update of same
> - axgbe_vlan_tpid_set: setting of default tpid values
> - axgbe_vlan_offload_set: a top layer function to call stip/filter etc
> based on mask values
>
> Signed-off-by: Sardar Shamsher Singh <Shamshersingh.Sardar@amd.com>
<...>
> +static int
> +axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) {
> + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
> + struct axgbe_port *pdata = dev->data->dev_private;
> +
> + /* Indicate that VLAN Tx CTAGs come from context descriptors */
> + AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
> + AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
> +
> + if (mask & ETH_VLAN_STRIP_MASK) {
> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
> + PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
> + pdata->eth_dev->device->name);
> + pdata->hw_if.enable_rx_vlan_stripping(pdata);
> + } else {
> + PMD_DRV_LOG(DEBUG, "Strip OFF for device = %s\n",
> + pdata->eth_dev->device->name);
> + pdata->hw_if.disable_rx_vlan_stripping(pdata);
> + }
> + }
> + if (mask & ETH_VLAN_FILTER_MASK) {
> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
> + PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
> + pdata->eth_dev->device->name);
> + pdata->hw_if.enable_rx_vlan_filtering(pdata);
> + } else {
> + PMD_DRV_LOG(DEBUG, "Filter OFF for device = %s\n",
> + pdata->eth_dev->device->name);
> + pdata->hw_if.disable_rx_vlan_filtering(pdata);
> + }
> + }
> + if (mask & ETH_VLAN_EXTEND_MASK) {
> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
> + PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
> + axgbe_vlan_extend_enable(pdata);
> + /* Set global registers with default ethertype*/
> + axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
> + RTE_ETHER_TYPE_VLAN);
> + axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
> + RTE_ETHER_TYPE_VLAN);
> + } else {
> + PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
> + axgbe_vlan_extend_disable(pdata);
> + }
> + }
Is the intention here to enable disable QinQ stip, because enable/disable fucntions talks about qinq, if so 'ETH_QINQ_STRIP_MASK' should be used.
<...>
> @@ -275,6 +275,23 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> /* Get the RSS hash */
> if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
> mbuf->hash.rss =
> rte_le_to_cpu_32(desc->write.desc1);
> + etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
> + RX_NORMAL_DESC3, ETLT);
> + if (!err || !etlt) {
> + if (etlt == 0x09 &&
> + (rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads &
> + DEV_RX_OFFLOAD_VLAN_STRIP)) {
> + mbuf->ol_flags |=
> + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
> + mbuf->vlan_tci =
> + AXGMAC_GET_BITS_LE(desc->write.desc0,
> + RX_NORMAL_DESC0,
> + OVT);
I don't know HW capabilities, but if 'etlt == 0x09' means packet has VLAN tag, you can use it independent from strip, like below, up to you.
if (vlan) {
mbuf->ol_flags |= PKT_RX_VLAN;
mbuf->vlan_tci = xxx
if (vlan_stripped) {
mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
}
}
<...>
> @@ -487,6 +520,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> struct axgbe_tx_queue *txq;
> unsigned int tsize;
> const struct rte_memzone *tz;
> + uint64_t offloads;
>
> tx_desc = nb_desc;
> pdata = dev->data->dev_private;
> @@ -506,7 +540,8 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> if (!txq)
> return -ENOMEM;
> txq->pdata = pdata;
> -
> + offloads = tx_conf->offloads |
> + txq->pdata->eth_dev->data->dev_conf.txmode.offloads;
As far as I can see PMD doesn't support queue specific offloads, so 'tx_conf->offloads' should be always 0.
And since there is no queue specific offload and this 'offloads' information is only used to set burst function, which is again only port bases (this will keep overwrite same info per each queue), why not do this check in the 'axgbe_dev_configure()' instead of per queue?
And I can see you may hit the problem becuase of VLAN offload but the issue seems generic, not directly related to VLAN support, and this can be separate fix I think.
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH v3] net/axgbe: enabling VLAN support in axgbe
2020-05-06 5:17 ` Sardar, Shamsher singh
@ 2020-05-06 9:48 ` Ferruh Yigit
0 siblings, 0 replies; 5+ messages in thread
From: Ferruh Yigit @ 2020-05-06 9:48 UTC (permalink / raw)
To: Sardar, Shamsher singh, dev
On 5/6/2020 6:17 AM, Sardar, Shamsher singh wrote:
> [AMD Official Use Only - Internal Distribution Only]
>
> Hi Ferruh,
> Thanks for knowledge sharing.
> Yes etlt - 0x09 is nothing but indicate " ■ 4’b1001: The packet is type packet with Single CVLAN tag."
> And you are right it should be as below and will do changes on same:
>
> if (vlan) {
> mbuf->ol_flags |= PKT_RX_VLAN;
> mbuf->vlan_tci = xxx
> if (vlan_stripped) {
> mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
> }
> }
>
> But rest of the items will do as incremental development.
> 1. Like in axgbe_dev_configure() need to check for default setting when system comes up.
OK
> 2. There are lot of changes need to be add for QinQ support.
I just would like to be sure you are not confusing QinQ support with
'DEV_RX_OFFLOAD_VLAN_EXTEND', because 'DEV_RX_OFFLOAD_VLAN_EXTEND' is not very
clearly defined.
In the 'axgbe_vlan_extend_enable()', it mentions from 'qinq', so it is confusing.
If you are clear on what 'DEV_RX_OFFLOAD_VLAN_EXTEND' is, that is OK. If not I
sugggest dropping the 'ETH_VLAN_EXTEND_MASK' part.
> Will add all as incremental development work.
> Currently working to make other vendor's SFP to work and in parallel above changes will add for upstream.
> Hope this should be fine.
OK
>
> Can you please put some more light on below, what type issues may occur?
> " And I can see you may hit the problem becuase of VLAN offload but the issue seems generic, not directly related to VLAN support, and this can be separate fix I think."
I was refering to the changes in 'axgbe_dev_tx_queue_setup()',
a) Why those changes are done at all?
b) Why they are done in this patch? Should it be a seperate fix?
>
> Thanks & regards
> S Shamsher Singh
> (+91)7259016141
>
> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: Friday, May 1, 2020 5:04 PM
> To: Sardar, Shamsher singh <Shamshersingh.Sardar@amd.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v3] net/axgbe: enabling VLAN support in axgbe
>
> [CAUTION: External Email]
>
> On 4/30/2020 7:59 AM, ssardar@amd.com wrote:
>> From: Sardar Shamsher Singh <Shamshersingh.Sardar@amd.com>
>>
>> adding below APIs for axgbe
>> - axgbe_enable_rx_vlan_stripping: to enable vlan header stipping
>> - axgbe_disable_rx_vlan_stripping: to disable vlan header stipping
>> - axgbe_enable_rx_vlan_filtering: to enable vlan filter mode
>> - axgbe_disable_rx_vlan_filtering: to disable vlan filter mode
>> - axgbe_update_vlan_hash_table: crc calculation and hash table update
>> based on vlan values post filter enable
>> - axgbe_vlan_filter_set: setting of active vlan out of max 4K values
>> before doing hash update of same
>> - axgbe_vlan_tpid_set: setting of default tpid values
>> - axgbe_vlan_offload_set: a top layer function to call stip/filter etc
>> based on mask values
>>
>> Signed-off-by: Sardar Shamsher Singh <Shamshersingh.Sardar@amd.com>
>
> <...>
>
>> +static int
>> +axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) {
>> + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
>> + struct axgbe_port *pdata = dev->data->dev_private;
>> +
>> + /* Indicate that VLAN Tx CTAGs come from context descriptors */
>> + AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
>> + AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
>> +
>> + if (mask & ETH_VLAN_STRIP_MASK) {
>> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
>> + PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
>> + pdata->eth_dev->device->name);
>> + pdata->hw_if.enable_rx_vlan_stripping(pdata);
>> + } else {
>> + PMD_DRV_LOG(DEBUG, "Strip OFF for device = %s\n",
>> + pdata->eth_dev->device->name);
>> + pdata->hw_if.disable_rx_vlan_stripping(pdata);
>> + }
>> + }
>> + if (mask & ETH_VLAN_FILTER_MASK) {
>> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
>> + PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
>> + pdata->eth_dev->device->name);
>> + pdata->hw_if.enable_rx_vlan_filtering(pdata);
>> + } else {
>> + PMD_DRV_LOG(DEBUG, "Filter OFF for device = %s\n",
>> + pdata->eth_dev->device->name);
>> + pdata->hw_if.disable_rx_vlan_filtering(pdata);
>> + }
>> + }
>> + if (mask & ETH_VLAN_EXTEND_MASK) {
>> + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
>> + PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
>> + axgbe_vlan_extend_enable(pdata);
>> + /* Set global registers with default ethertype*/
>> + axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
>> + RTE_ETHER_TYPE_VLAN);
>> + axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
>> + RTE_ETHER_TYPE_VLAN);
>> + } else {
>> + PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
>> + axgbe_vlan_extend_disable(pdata);
>> + }
>> + }
>
>
> Is the intention here to enable disable QinQ stip, because enable/disable fucntions talks about qinq, if so 'ETH_QINQ_STRIP_MASK' should be used.
>
> <...>
>> @@ -275,6 +275,23 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>> /* Get the RSS hash */
>> if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
>> mbuf->hash.rss =
>> rte_le_to_cpu_32(desc->write.desc1);
>> + etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
>> + RX_NORMAL_DESC3, ETLT);
>> + if (!err || !etlt) {
>> + if (etlt == 0x09 &&
>> + (rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads &
>> + DEV_RX_OFFLOAD_VLAN_STRIP)) {
>> + mbuf->ol_flags |=
>> + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
>> + mbuf->vlan_tci =
>> + AXGMAC_GET_BITS_LE(desc->write.desc0,
>> + RX_NORMAL_DESC0,
>> + OVT);
>
> I don't know HW capabilities, but if 'etlt == 0x09' means packet has VLAN tag, you can use it independent from strip, like below, up to you.
>
> if (vlan) {
> mbuf->ol_flags |= PKT_RX_VLAN;
> mbuf->vlan_tci = xxx
> if (vlan_stripped) {
> mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
> }
> }
>
> <...>
>
>> @@ -487,6 +520,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
>> struct axgbe_tx_queue *txq;
>> unsigned int tsize;
>> const struct rte_memzone *tz;
>> + uint64_t offloads;
>>
>> tx_desc = nb_desc;
>> pdata = dev->data->dev_private;
>> @@ -506,7 +540,8 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
>> if (!txq)
>> return -ENOMEM;
>> txq->pdata = pdata;
>> -
>> + offloads = tx_conf->offloads |
>> + txq->pdata->eth_dev->data->dev_conf.txmode.offloads;
>
> As far as I can see PMD doesn't support queue specific offloads, so 'tx_conf->offloads' should be always 0.
>
> And since there is no queue specific offload and this 'offloads' information is only used to set burst function, which is again only port bases (this will keep overwrite same info per each queue), why not do this check in the 'axgbe_dev_configure()' instead of per queue?
>
> And I can see you may hit the problem becuase of VLAN offload but the issue seems generic, not directly related to VLAN support, and this can be separate fix I think.
>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2020-05-06 9:48 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-30 6:59 [dpdk-dev] [PATCH v3] net/axgbe: enabling VLAN support in axgbe ssardar
2020-05-01 11:33 ` Ferruh Yigit
2020-05-01 16:15 ` Ferruh Yigit
2020-05-06 5:17 ` Sardar, Shamsher singh
2020-05-06 9:48 ` Ferruh Yigit
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).