DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/sxe: add net driver sxe
@ 2024-08-26  8:10 Jie Liu
  2024-09-06 23:39 ` [PATCH v2] " Jie Liu
  0 siblings, 1 reply; 4+ messages in thread
From: Jie Liu @ 2024-08-26  8:10 UTC (permalink / raw)
  To: anatoly.burakov; +Cc: dev, Jie Liu

Adding complete pmd library and doc build infrastructure
and claim the maintainership for sxe PMD.

Signed-off-by: Jie Liu <liujie5@linkdatatechnology.com>
---
 MAINTAINERS                                |    6 +
 app/test-pmd/meson.build                   |    3 +
 doc/guides/nics/features/sxe.ini           |   81 +
 doc/guides/nics/features/sxe_vf.ini        |   39 +
 doc/guides/nics/index.rst                  |    1 +
 doc/guides/nics/sxe.rst                    |   71 +
 drivers/net/meson.build                    |    1 +
 drivers/net/sxe/Makefile                   |  105 +
 drivers/net/sxe/base/docker_version        |    4 +
 drivers/net/sxe/base/sxe_common.c          |   66 +
 drivers/net/sxe/base/sxe_common.h          |   15 +
 drivers/net/sxe/base/sxe_compat_platform.h |  143 +
 drivers/net/sxe/base/sxe_compat_version.h  |  304 +
 drivers/net/sxe/base/sxe_dpdk_version.h    |   20 +
 drivers/net/sxe/base/sxe_errno.h           |   61 +
 drivers/net/sxe/base/sxe_hw.c              | 6647 ++++++++++++++++++++
 drivers/net/sxe/base/sxe_hw.h              | 1505 +++++
 drivers/net/sxe/base/sxe_logs.h            |  299 +
 drivers/net/sxe/base/sxe_offload_common.c  |   66 +
 drivers/net/sxe/base/sxe_offload_common.h  |   15 +
 drivers/net/sxe/base/sxe_queue_common.c    |  450 ++
 drivers/net/sxe/base/sxe_queue_common.h    |  236 +
 drivers/net/sxe/base/sxe_rx_common.c       |  349 +
 drivers/net/sxe/base/sxe_rx_common.h       |   24 +
 drivers/net/sxe/base/sxe_tx_common.c       |   49 +
 drivers/net/sxe/base/sxe_tx_common.h       |   12 +
 drivers/net/sxe/base/sxe_types.h           |   40 +
 drivers/net/sxe/base/sxevf_hw.c            | 1057 ++++
 drivers/net/sxe/base/sxevf_hw.h            |  351 ++
 drivers/net/sxe/base/sxevf_regs.h          |  119 +
 drivers/net/sxe/include/drv_msg.h          |   22 +
 drivers/net/sxe/include/readme.txt         |    0
 drivers/net/sxe/include/sxe/mgl/sxe_port.h |   40 +
 drivers/net/sxe/include/sxe/sxe_cli.h      |  213 +
 drivers/net/sxe/include/sxe/sxe_hdc.h      |   43 +
 drivers/net/sxe/include/sxe/sxe_ioctl.h    |   21 +
 drivers/net/sxe/include/sxe/sxe_msg.h      |  139 +
 drivers/net/sxe/include/sxe/sxe_regs.h     | 1276 ++++
 drivers/net/sxe/include/sxe_type.h         |  794 +++
 drivers/net/sxe/include/sxe_version.h      |   32 +
 drivers/net/sxe/meson.build                |   55 +
 drivers/net/sxe/pf/rte_pmd_sxe.h           |   33 +
 drivers/net/sxe/pf/sxe.h                   |  117 +
 drivers/net/sxe/pf/sxe_dcb.c               | 1014 +++
 drivers/net/sxe/pf/sxe_dcb.h               |   99 +
 drivers/net/sxe/pf/sxe_ethdev.c            | 1109 ++++
 drivers/net/sxe/pf/sxe_ethdev.h            |   27 +
 drivers/net/sxe/pf/sxe_filter.c            |  826 +++
 drivers/net/sxe/pf/sxe_filter.h            |  119 +
 drivers/net/sxe/pf/sxe_flow_ctrl.c         |  100 +
 drivers/net/sxe/pf/sxe_flow_ctrl.h         |   16 +
 drivers/net/sxe/pf/sxe_irq.c               |  562 ++
 drivers/net/sxe/pf/sxe_irq.h               |   56 +
 drivers/net/sxe/pf/sxe_main.c              |  326 +
 drivers/net/sxe/pf/sxe_offload.c           |  365 ++
 drivers/net/sxe/pf/sxe_offload.h           |   51 +
 drivers/net/sxe/pf/sxe_phy.c               |  993 +++
 drivers/net/sxe/pf/sxe_phy.h               |  121 +
 drivers/net/sxe/pf/sxe_pmd_hdc.c           |  717 +++
 drivers/net/sxe/pf/sxe_pmd_hdc.h           |   44 +
 drivers/net/sxe/pf/sxe_ptp.c               |  204 +
 drivers/net/sxe/pf/sxe_ptp.h               |   26 +
 drivers/net/sxe/pf/sxe_queue.c             |  856 +++
 drivers/net/sxe/pf/sxe_queue.h             |  147 +
 drivers/net/sxe/pf/sxe_rx.c                | 1567 +++++
 drivers/net/sxe/pf/sxe_rx.h                |  195 +
 drivers/net/sxe/pf/sxe_stats.c             |  593 ++
 drivers/net/sxe/pf/sxe_stats.h             |   79 +
 drivers/net/sxe/pf/sxe_tx.c                | 1069 ++++
 drivers/net/sxe/pf/sxe_tx.h                |   31 +
 drivers/net/sxe/pf/sxe_vf.c                | 1275 ++++
 drivers/net/sxe/pf/sxe_vf.h                |  221 +
 drivers/net/sxe/rte_pmd_sxe_version.map    |   10 +
 drivers/net/sxe/sxe_drv_type.h             |   23 +
 drivers/net/sxe/version.map                |   24 +
 drivers/net/sxe/vf/sxevf.h                 |   44 +
 drivers/net/sxe/vf/sxevf_ethdev.c          |  811 +++
 drivers/net/sxe/vf/sxevf_ethdev.h          |   17 +
 drivers/net/sxe/vf/sxevf_filter.c          |  511 ++
 drivers/net/sxe/vf/sxevf_filter.h          |   79 +
 drivers/net/sxe/vf/sxevf_irq.c             |  455 ++
 drivers/net/sxe/vf/sxevf_irq.h             |   40 +
 drivers/net/sxe/vf/sxevf_main.c            |   94 +
 drivers/net/sxe/vf/sxevf_msg.c             |  646 ++
 drivers/net/sxe/vf/sxevf_msg.h             |  201 +
 drivers/net/sxe/vf/sxevf_offload.c         |   36 +
 drivers/net/sxe/vf/sxevf_offload.h         |   17 +
 drivers/net/sxe/vf/sxevf_queue.c           |  236 +
 drivers/net/sxe/vf/sxevf_queue.h           |   82 +
 drivers/net/sxe/vf/sxevf_rx.c              |  182 +
 drivers/net/sxe/vf/sxevf_rx.h              |   19 +
 drivers/net/sxe/vf/sxevf_stats.c           |  166 +
 drivers/net/sxe/vf/sxevf_stats.h           |   32 +
 drivers/net/sxe/vf/sxevf_tx.c              |   48 +
 drivers/net/sxe/vf/sxevf_tx.h              |   15 +
 95 files changed, 31520 insertions(+)
 create mode 100644 doc/guides/nics/features/sxe.ini
 create mode 100644 doc/guides/nics/features/sxe_vf.ini
 create mode 100644 doc/guides/nics/sxe.rst
 create mode 100644 drivers/net/sxe/Makefile
 create mode 100644 drivers/net/sxe/base/docker_version
 create mode 100644 drivers/net/sxe/base/sxe_common.c
 create mode 100644 drivers/net/sxe/base/sxe_common.h
 create mode 100644 drivers/net/sxe/base/sxe_compat_platform.h
 create mode 100644 drivers/net/sxe/base/sxe_compat_version.h
 create mode 100644 drivers/net/sxe/base/sxe_dpdk_version.h
 create mode 100644 drivers/net/sxe/base/sxe_errno.h
 create mode 100644 drivers/net/sxe/base/sxe_hw.c
 create mode 100644 drivers/net/sxe/base/sxe_hw.h
 create mode 100644 drivers/net/sxe/base/sxe_logs.h
 create mode 100644 drivers/net/sxe/base/sxe_offload_common.c
 create mode 100644 drivers/net/sxe/base/sxe_offload_common.h
 create mode 100644 drivers/net/sxe/base/sxe_queue_common.c
 create mode 100644 drivers/net/sxe/base/sxe_queue_common.h
 create mode 100644 drivers/net/sxe/base/sxe_rx_common.c
 create mode 100644 drivers/net/sxe/base/sxe_rx_common.h
 create mode 100644 drivers/net/sxe/base/sxe_tx_common.c
 create mode 100644 drivers/net/sxe/base/sxe_tx_common.h
 create mode 100644 drivers/net/sxe/base/sxe_types.h
 create mode 100644 drivers/net/sxe/base/sxevf_hw.c
 create mode 100644 drivers/net/sxe/base/sxevf_hw.h
 create mode 100644 drivers/net/sxe/base/sxevf_regs.h
 create mode 100644 drivers/net/sxe/include/drv_msg.h
 create mode 100644 drivers/net/sxe/include/readme.txt
 create mode 100644 drivers/net/sxe/include/sxe/mgl/sxe_port.h
 create mode 100644 drivers/net/sxe/include/sxe/sxe_cli.h
 create mode 100644 drivers/net/sxe/include/sxe/sxe_hdc.h
 create mode 100644 drivers/net/sxe/include/sxe/sxe_ioctl.h
 create mode 100644 drivers/net/sxe/include/sxe/sxe_msg.h
 create mode 100644 drivers/net/sxe/include/sxe/sxe_regs.h
 create mode 100644 drivers/net/sxe/include/sxe_type.h
 create mode 100644 drivers/net/sxe/include/sxe_version.h
 create mode 100644 drivers/net/sxe/meson.build
 create mode 100644 drivers/net/sxe/pf/rte_pmd_sxe.h
 create mode 100644 drivers/net/sxe/pf/sxe.h
 create mode 100644 drivers/net/sxe/pf/sxe_dcb.c
 create mode 100644 drivers/net/sxe/pf/sxe_dcb.h
 create mode 100644 drivers/net/sxe/pf/sxe_ethdev.c
 create mode 100644 drivers/net/sxe/pf/sxe_ethdev.h
 create mode 100644 drivers/net/sxe/pf/sxe_filter.c
 create mode 100644 drivers/net/sxe/pf/sxe_filter.h
 create mode 100644 drivers/net/sxe/pf/sxe_flow_ctrl.c
 create mode 100644 drivers/net/sxe/pf/sxe_flow_ctrl.h
 create mode 100644 drivers/net/sxe/pf/sxe_irq.c
 create mode 100644 drivers/net/sxe/pf/sxe_irq.h
 create mode 100644 drivers/net/sxe/pf/sxe_main.c
 create mode 100644 drivers/net/sxe/pf/sxe_offload.c
 create mode 100644 drivers/net/sxe/pf/sxe_offload.h
 create mode 100644 drivers/net/sxe/pf/sxe_phy.c
 create mode 100644 drivers/net/sxe/pf/sxe_phy.h
 create mode 100644 drivers/net/sxe/pf/sxe_pmd_hdc.c
 create mode 100644 drivers/net/sxe/pf/sxe_pmd_hdc.h
 create mode 100644 drivers/net/sxe/pf/sxe_ptp.c
 create mode 100644 drivers/net/sxe/pf/sxe_ptp.h
 create mode 100644 drivers/net/sxe/pf/sxe_queue.c
 create mode 100644 drivers/net/sxe/pf/sxe_queue.h
 create mode 100644 drivers/net/sxe/pf/sxe_rx.c
 create mode 100644 drivers/net/sxe/pf/sxe_rx.h
 create mode 100644 drivers/net/sxe/pf/sxe_stats.c
 create mode 100644 drivers/net/sxe/pf/sxe_stats.h
 create mode 100644 drivers/net/sxe/pf/sxe_tx.c
 create mode 100644 drivers/net/sxe/pf/sxe_tx.h
 create mode 100644 drivers/net/sxe/pf/sxe_vf.c
 create mode 100644 drivers/net/sxe/pf/sxe_vf.h
 create mode 100644 drivers/net/sxe/rte_pmd_sxe_version.map
 create mode 100644 drivers/net/sxe/sxe_drv_type.h
 create mode 100644 drivers/net/sxe/version.map
 create mode 100644 drivers/net/sxe/vf/sxevf.h
 create mode 100644 drivers/net/sxe/vf/sxevf_ethdev.c
 create mode 100644 drivers/net/sxe/vf/sxevf_ethdev.h
 create mode 100644 drivers/net/sxe/vf/sxevf_filter.c
 create mode 100644 drivers/net/sxe/vf/sxevf_filter.h
 create mode 100644 drivers/net/sxe/vf/sxevf_irq.c
 create mode 100644 drivers/net/sxe/vf/sxevf_irq.h
 create mode 100644 drivers/net/sxe/vf/sxevf_main.c
 create mode 100644 drivers/net/sxe/vf/sxevf_msg.c
 create mode 100644 drivers/net/sxe/vf/sxevf_msg.h
 create mode 100644 drivers/net/sxe/vf/sxevf_offload.c
 create mode 100644 drivers/net/sxe/vf/sxevf_offload.h
 create mode 100644 drivers/net/sxe/vf/sxevf_queue.c
 create mode 100644 drivers/net/sxe/vf/sxevf_queue.h
 create mode 100644 drivers/net/sxe/vf/sxevf_rx.c
 create mode 100644 drivers/net/sxe/vf/sxevf_rx.h
 create mode 100644 drivers/net/sxe/vf/sxevf_stats.c
 create mode 100644 drivers/net/sxe/vf/sxevf_stats.h
 create mode 100644 drivers/net/sxe/vf/sxevf_tx.c
 create mode 100644 drivers/net/sxe/vf/sxevf_tx.h

diff --git a/MAINTAINERS b/MAINTAINERS
index c5a703b5c0..03adb4036f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -981,6 +981,12 @@ F: drivers/net/sfc/
 F: doc/guides/nics/sfc_efx.rst
 F: doc/guides/nics/features/sfc.ini
 
+Linkdata sxe
+M: Jie Liu <liujie5@linkdatatechnology.com>
+F: drivers/net/sxe/
+F: doc/guides/nics/sxe.rst
+F: doc/guides/nics/features/sxe*.ini
+
 Wangxun ngbe
 M: Jiawen Wu <jiawenwu@trustnetic.com>
 F: drivers/net/ngbe/
diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build
index 719f875be0..34ca42bd55 100644
--- a/app/test-pmd/meson.build
+++ b/app/test-pmd/meson.build
@@ -72,6 +72,9 @@ endif
 if dpdk_conf.has('RTE_NET_DPAA')
     deps += ['bus_dpaa', 'mempool_dpaa', 'net_dpaa']
 endif
+if dpdk_conf.has('RTE_NET_SXE')
+    deps += 'net_sxe'
+endif
 
 # Driver-specific commands are located in driver directories.
 includes = include_directories('.')
diff --git a/doc/guides/nics/features/sxe.ini b/doc/guides/nics/features/sxe.ini
new file mode 100644
index 0000000000..5a18808ccf
--- /dev/null
+++ b/doc/guides/nics/features/sxe.ini
@@ -0,0 +1,81 @@
+;
+; Supported features of the 'sxe' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link speed configuration = Y
+Link status          = Y
+Link status event    = Y
+Rx interrupt         = Y
+Queue start/stop     = Y
+Power mgmt address monitor = Y
+MTU update           = Y
+Scattered Rx         = Y
+LRO                  = Y
+TSO                  = Y
+Promiscuous mode     = Y
+Allmulticast mode    = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+RSS reta update      = Y
+VMDq                 = Y
+SR-IOV               = Y
+DCB                  = Y
+VLAN filter          = Y
+Flow control         = Y
+Rate limitation      = Y
+Traffic manager      = Y
+Inline crypto        = Y
+CRC offload          = P
+VLAN offload         = P
+QinQ offload         = P
+L3 checksum offload  = P
+L4 checksum offload  = P
+Inner L3 checksum    = P
+Inner L4 checksum    = P
+Packet type parsing  = Y
+Timesync             = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
+Basic stats          = Y
+Extended stats       = Y
+Stats per queue      = Y
+FW version           = Y
+EEPROM dump          = Y
+Module EEPROM dump   = Y
+Registers dump       = Y
+Multiprocess aware   = Y
+FreeBSD              = Y
+Linux                = Y
+ARMv8                = Y
+LoongArch64          = Y
+rv64                 = Y
+x86-32               = Y
+x86-64               = Y
+
+[rte_flow items]
+eth                  = P
+e_tag                = Y
+fuzzy                = Y
+ipv4                 = Y
+ipv6                 = Y
+nvgre                = Y
+raw                  = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = P
+vxlan                = Y
+
+[rte_flow actions]
+drop                 = Y
+mark                 = Y
+pf                   = Y
+queue                = Y
+rss                  = Y
+security             = Y
+vf                   = Y
diff --git a/doc/guides/nics/features/sxe_vf.ini b/doc/guides/nics/features/sxe_vf.ini
new file mode 100644
index 0000000000..49eaeaaaae
--- /dev/null
+++ b/doc/guides/nics/features/sxe_vf.ini
@@ -0,0 +1,39 @@
+;
+; Supported features of the 'sxe_vf' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Link status          = Y
+Rx interrupt         = Y
+Power mgmt address monitor = Y
+MTU update           = Y
+Scattered Rx         = Y
+LRO                  = Y
+TSO                  = Y
+Promiscuous mode     = Y
+Allmulticast mode    = Y
+Unicast MAC filter   = Y
+RSS hash             = Y
+RSS key update       = Y
+RSS reta update      = Y
+VLAN filter          = Y
+Inline crypto        = Y
+CRC offload          = P
+VLAN offload         = P
+QinQ offload         = P
+L3 checksum offload  = P
+L4 checksum offload  = P
+Inner L3 checksum    = P
+Inner L4 checksum    = P
+Packet type parsing  = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
+Basic stats          = Y
+Extended stats       = Y
+Registers dump       = Y
+FreeBSD              = Y
+Linux                = Y
+ARMv8                = Y
+x86-32               = Y
+x86-64               = Y
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index c14bc7988a..ac06a1c72d 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -69,3 +69,4 @@ Network Interface Controller Drivers
     vhost
     virtio
     vmxnet3
+    sxe
diff --git a/doc/guides/nics/sxe.rst b/doc/guides/nics/sxe.rst
new file mode 100644
index 0000000000..93969118be
--- /dev/null
+++ b/doc/guides/nics/sxe.rst
@@ -0,0 +1,71 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright (C), 2022, Linkdata Technology Co., Ltd.
+
+SXE Poll Mode Driver
+======================
+
+The SXE PMD (librte_pmd_sxe) provides poll mode driver support
+for Linkdata 1160-2X 10GE Ethernet Adapter.
+
+Features
+--------
+- PXE boot
+- PTP(Precision Time Protocol)
+- VMDq(Virtual Machine Device Queues)
+- SR-IOV,max 2PF,63VF per PF
+- 128 L2 Ethernet MAC Address Filters (unicast and multicast) 
+- 64 L2 VLAN filters
+- pldm over mctp over smbus
+- 802.1q VLAN
+- Low Latency Interrupts
+- LRO
+- Promiscuous mode
+- Multicast mode
+- Multiple queues for TX and RX
+- Receiver Side Scaling (RSS)
+- MAC/VLAN filtering
+- Packet type information
+- Checksum offload
+- VLAN/QinQ stripping and inserting
+- TSO offload
+- Port hardware statistics
+- Link state information
+- Link flow control
+- Interrupt mode for RX
+- Scattered and gather for TX and RX
+- DCB
+- IEEE 1588
+- FW version
+- Generic flow API
+
+Configuration
+-------------
+
+Dynamic Logging Parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+One may leverage EAL option "--log-level" to change default levels
+for the log types supported by the driver. The option is used with
+an argument typically consisting of two parts separated by a colon.
+
+SXE PMD provides the following log types available for control:
+
+- ``pmd.net.sxe.drv`` (default level is **DEBUG**)
+
+  Affects driver-wide messages unrelated to any particular devices.
+
+- ``pmd.net.sxe.init`` (default level is **DEBUG**)
+
+  Extra logging of the messages during PMD initialization.
+
+- ``pmd.net.sxe.rx`` (default level is **DEBUG**)
+
+  Affects rx-wide messages.
+- ``pmd.net.sxe.tx`` (default level is **DEBUG**)
+
+  Affects tx-wide messages.
+------------------------------
+
+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+for details.
+
diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index fb6d34b782..4d716d76cd 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -62,6 +62,7 @@ drivers = [
         'vhost',
         'virtio',
         'vmxnet3',
+        'sxe',
 ]
 std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc
 std_deps += ['bus_pci']         # very many PMDs depend on PCI, so make std
diff --git a/drivers/net/sxe/Makefile b/drivers/net/sxe/Makefile
new file mode 100644
index 0000000000..5e2870fdc4
--- /dev/null
+++ b/drivers/net/sxe/Makefile
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_sxe.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -DSXE_DPDK
+CFLAGS += -DSXE_HOST_DRIVER
+CFLAGS += -DSXE_DPDK_L4_FEATURES
+CFLAGS += -DSXE_DPDK_SRIOV
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_sxe_version.map
+
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+#
+# CFLAGS for icc
+#
+CFLAGS_BASE_DRIVER  = -diag-disable 174 -diag-disable 593 -diag-disable 869
+CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259
+
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+#
+# CFLAGS for clang
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+else
+#
+# CFLAGS for gcc
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+CFLAGS_BASE_DRIVER += -Wmissing-prototypes
+
+endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lpthread
+
+#
+# Add extra flags for base driver files (also known as shared code)
+# to disable warnings in them
+#
+
+$(shell cp $(SRCDIR)/pf/* $(SRCDIR))
+$(shell cp $(SRCDIR)/vf/* $(SRCDIR))
+$(shell cp $(SRCDIR)/base/* $(SRCDIR))
+$(shell cp $(SRCDIR)/include/*.h $(SRCDIR))
+$(shell cp $(SRCDIR)/include/sxe/*.h $(SRCDIR))
+$(shell cp $(SRCDIR)/include/sxe/mgl/*.h $(SRCDIR))
+$(warning "file copy done")
+
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_testpmd.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_offload_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_queue_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_rx_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_tx_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_hw.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_dcb.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_flow_ctrl.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_irq.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_offload.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_pmd_hdc.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_ptp.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_queue.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_stats.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_vf.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_msg.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_irq.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_stats.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_queue.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_offload.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_SXE_PMD)-include := rte_pmd_sxe.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_SXE_PMD)-include += sxe_dcb.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
+
diff --git a/drivers/net/sxe/base/docker_version b/drivers/net/sxe/base/docker_version
new file mode 100644
index 0000000000..33ecb22479
--- /dev/null
+++ b/drivers/net/sxe/base/docker_version
@@ -0,0 +1,4 @@
+dpdk_images_v0.1:
+只包含dodk源码,源码目录:/usr/src/dpdk
+dpdk_images_v0.2:
+包含dpdk源码和build下的原生编译产物
diff --git a/drivers/net/sxe/base/sxe_common.c b/drivers/net/sxe/base/sxe_common.c
new file mode 100644
index 0000000000..62f76ccf3f
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_common.c
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include <pthread.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "sxe_types.h"
+#include "sxe_common.h"
+
+#define SXE_TRACE_ID_COUNT_MASK  0x00000000000000FFLLU
+#define SXE_TRACE_ID_TID_MASK    0x0000000000FFFF00LLU
+#define SXE_TRACE_ID_TIME_MASK   0x00FFFFFFFF000000LLU
+#define SXE_TRACE_ID_FLAG        0xFF00000000000000LLU
+
+#define SXE_TRACE_ID_COUNT_SHIFT 0
+#define SXE_TRACE_ID_TID_SHIFT   8
+#define SXE_TRACE_ID_TIME_SHIFT  24
+
+#define SXE_SEC_TO_MS(sec) (sec * 1000ULL)
+#define SXE_SEC_TO_NS(sec) (sec * 1000000000ULL)
+
+#define SXE_USEC_PER_MS          1000
+
+static u64 sxe_trace_id      = 0;
+
+u64 sxe_time_get_real_ms(void)
+{
+	u64 ms = 0;
+	struct timeval      tv = { 0 };
+	s32 ret = gettimeofday(&tv, NULL);
+	if(ret < 0) {
+		goto l_end;
+	}
+
+	ms = SXE_SEC_TO_MS(tv.tv_sec) + tv.tv_usec / SXE_USEC_PER_MS;
+
+l_end:
+	return ms;
+}
+
+u64 sxe_trace_id_gen(void)
+{
+	u64 tid       = getpid() + (pthread_self() << 20);
+	u64 index     = 0;
+	u64 timestamp = sxe_time_get_real_ms();
+
+	sxe_trace_id = (SXE_TRACE_ID_FLAG)
+		| ((timestamp << SXE_TRACE_ID_TIME_SHIFT) & SXE_TRACE_ID_TIME_MASK)
+		| ((tid << SXE_TRACE_ID_TID_SHIFT) & SXE_TRACE_ID_TID_MASK)
+		| ((index << SXE_TRACE_ID_COUNT_SHIFT) & SXE_TRACE_ID_COUNT_MASK);
+	return sxe_trace_id;
+}
+
+void sxe_trace_id_clean(void)
+{
+	sxe_trace_id = 0;
+	return;
+}
+
+u64 sxe_trace_id_get(void)
+{
+	return sxe_trace_id++;
+}
diff --git a/drivers/net/sxe/base/sxe_common.h b/drivers/net/sxe/base/sxe_common.h
new file mode 100644
index 0000000000..43c062b937
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_common.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_DPDK_COMMON_H__
+#define __SXE_DPDK_COMMON_H__
+
+u64 sxe_trace_id_gen(void);
+
+void sxe_trace_id_clean(void);
+
+u64 sxe_trace_id_get(void);
+
+u64 sxe_time_get_real_ms(void);
+
+#endif
diff --git a/drivers/net/sxe/base/sxe_compat_platform.h b/drivers/net/sxe/base/sxe_compat_platform.h
new file mode 100644
index 0000000000..8509f3cf0c
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_compat_platform.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_COMPAT_PLATFORM_H__
+#define __SXE_COMPAT_PLATFORM_H__
+
+#include <rte_cycles.h>
+#include <rte_branch_prediction.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_common.h>
+
+#include "sxe_types.h"
+
+#define  false 0
+#define  true  1
+
+#ifdef SXE_TEST
+#define STATIC
+#else
+#define STATIC static
+#endif
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d)      (((n) + (d) - 1) / (d))
+#endif
+
+#define __iomem
+#define __force
+
+#define min(a,b)	RTE_MIN(a,b)
+
+#ifdef __has_attribute
+#if __has_attribute(__fallthrough__)
+# define fallthrough __attribute__((__fallthrough__))
+#else
+# define fallthrough do {} while (0)  
+#endif 
+#else
+# define fallthrough do {} while (0)  
+#endif 
+
+#define __swab32(_value) \
+	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
+	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
+#define __swab16(_value) \
+	(((u16)(_value) >> 8) | ((u16)(_value) << 8))
+
+#define cpu_to_be16(o) rte_cpu_to_be_16(o)
+#define cpu_to_be32(o) rte_cpu_to_be_32(o)
+#define cpu_to_be64(o) rte_cpu_to_be_64(o)
+#define cpu_to_le32(o) rte_cpu_to_le_32(o)
+#define be16_to_cpu(o) rte_be_to_cpu_16(o)
+#define be32_to_cpu(o) rte_be_to_cpu_32(o)
+#define be64_to_cpu(o) rte_be_to_cpu_64(o)
+#define le32_to_cpu(o) rte_le_to_cpu_32(o)
+
+#ifndef ntohs
+#define ntohs(o) be16_to_cpu(o)
+#endif
+
+#ifndef ntohl
+#define ntohl(o) be32_to_cpu(o)
+#endif
+
+#ifndef htons
+#define htons(o) cpu_to_be16(o)
+#endif
+
+#ifndef htonl
+#define htonl(o) cpu_to_be32(o)
+#endif
+#define mdelay rte_delay_ms
+#define udelay rte_delay_us
+#define usleep_range(min, max) rte_delay_us(min)
+#define msleep(x)             rte_delay_us(x*1000)
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#define BIT(x)	(1UL << (x))
+#define DMA_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+
+#define NSEC_PER_SEC	1000000000L
+
+#define ETH_P_1588	0x88F7		
+
+#define VLAN_PRIO_SHIFT		13
+
+static inline void
+set_bit(unsigned long nr, void *addr)
+{
+        int *m = ((int *)addr) + (nr >> 5);
+        *m |= 1 << (nr & 31);
+}
+
+static inline int
+test_bit(int nr, const void *addr)
+{
+	return (1UL & (((const int *)addr)[nr >> 5] >> (nr & 31))) != 0UL;
+}
+
+static inline void
+clear_bit(unsigned long nr, void *addr)
+{
+	int *m = ((int *)addr) + (nr >> 5);
+	*m &= ~(1 << (nr & 31));
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, void *addr)
+{
+	unsigned long mask = 1 << (nr & 0x1f);
+	int *m = ((int *)addr) + (nr >> 5);
+	int old = *m;
+
+	*m = old & ~mask;
+	return (old & mask) != 0;
+}
+
+static __rte_always_inline uint64_t
+readq(volatile void *addr)
+{
+	return rte_le_to_cpu_64(rte_read64(addr));
+}
+
+static __rte_always_inline void
+writeq(uint64_t value, volatile void *addr)
+{
+	rte_write64(rte_cpu_to_le_64(value), addr);
+}
+
+static inline u32 sxe_read_addr(const volatile void *addr)
+{
+	return rte_le_to_cpu_32(rte_read32(addr));
+}
+
+static inline void  sxe_write_addr(u32 value, volatile void *addr)
+{
+	rte_write32((rte_cpu_to_le_32(value)), addr);
+	return;
+}
+
+#endif
diff --git a/drivers/net/sxe/base/sxe_compat_version.h b/drivers/net/sxe/base/sxe_compat_version.h
new file mode 100644
index 0000000000..32d1a0862a
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_compat_version.h
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_COMPAT_VERSION_H__
+#define __SXE_COMPAT_VERSION_H__
+
+#include <stdbool.h>
+#include "sxe_dpdk_version.h"
+
+struct rte_eth_dev;
+enum rte_eth_event_type;
+
+int sxe_eth_dev_callback_process(struct rte_eth_dev *dev,
+	enum rte_eth_event_type event, void *ret_param);
+
+#ifdef DPDK_19_11_6
+#define ETH_DEV_OPS_HAS_DESC_RELATE
+
+#define __rte_cold __attribute__((cold))
+
+#define ETH_SPEED_NUM_UNKNOWN UINT32_MAX 
+#ifdef RTE_ARCH_ARM64
+#define RTE_ARCH_ARM
+#endif
+
+#else
+
+#define SET_AUTOFILL_QUEUE_XSTATS
+#define PCI_REG_WC_WRITE
+
+#endif
+
+#ifndef PCI_REG_WC_WRITE
+#define rte_write32_wc rte_write32
+#define rte_write32_wc_relaxed rte_write32_relaxed
+#endif
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+
+#define    RTE_ETH_RSS_IPV4                ETH_RSS_IPV4
+#define    RTE_ETH_RSS_NONFRAG_IPV4_TCP    ETH_RSS_NONFRAG_IPV4_TCP
+#define    RTE_ETH_RSS_NONFRAG_IPV4_UDP    ETH_RSS_NONFRAG_IPV4_UDP
+#define    RTE_ETH_RSS_IPV6                ETH_RSS_IPV6
+#define    RTE_ETH_RSS_NONFRAG_IPV6_TCP    ETH_RSS_NONFRAG_IPV6_TCP
+#define    RTE_ETH_RSS_NONFRAG_IPV6_UDP    ETH_RSS_NONFRAG_IPV6_UDP
+#define    RTE_ETH_RSS_IPV6_EX             ETH_RSS_IPV6_EX
+#define    RTE_ETH_RSS_IPV6_TCP_EX         ETH_RSS_IPV6_TCP_EX
+#define    RTE_ETH_RSS_IPV6_UDP_EX         ETH_RSS_IPV6_UDP_EX
+
+
+#define    RTE_ETH_VLAN_TYPE_UNKNOWN       ETH_VLAN_TYPE_UNKNOWN
+#define    RTE_ETH_VLAN_TYPE_INNER         ETH_VLAN_TYPE_INNER
+#define    RTE_ETH_VLAN_TYPE_OUTER         ETH_VLAN_TYPE_OUTER
+#define    RTE_ETH_VLAN_TYPE_MAX           ETH_VLAN_TYPE_MAX
+
+
+#define    RTE_ETH_8_POOLS        ETH_8_POOLS
+#define    RTE_ETH_16_POOLS       ETH_16_POOLS
+#define    RTE_ETH_32_POOLS       ETH_32_POOLS
+#define    RTE_ETH_64_POOLS       ETH_64_POOLS
+
+
+#define RTE_ETH_4_TCS       ETH_4_TCS
+#define RTE_ETH_8_TCS       ETH_8_TCS
+
+
+#define RTE_ETH_MQ_RX_NONE          ETH_MQ_RX_NONE
+#define RTE_ETH_MQ_RX_RSS           ETH_MQ_RX_RSS
+#define RTE_ETH_MQ_RX_DCB           ETH_MQ_RX_DCB
+#define RTE_ETH_MQ_RX_DCB_RSS       ETH_MQ_RX_DCB_RSS
+#define RTE_ETH_MQ_RX_VMDQ_ONLY     ETH_MQ_RX_VMDQ_ONLY
+#define RTE_ETH_MQ_RX_VMDQ_RSS      ETH_MQ_RX_VMDQ_RSS
+#define RTE_ETH_MQ_RX_VMDQ_DCB      ETH_MQ_RX_VMDQ_DCB
+#define RTE_ETH_MQ_RX_VMDQ_DCB_RSS  ETH_MQ_RX_VMDQ_DCB_RSS
+
+
+#define RTE_ETH_MQ_TX_NONE          ETH_MQ_TX_NONE
+#define RTE_ETH_MQ_TX_DCB           ETH_MQ_TX_DCB
+#define RTE_ETH_MQ_TX_VMDQ_DCB      ETH_MQ_TX_VMDQ_DCB
+#define RTE_ETH_MQ_TX_VMDQ_ONLY     ETH_MQ_TX_VMDQ_ONLY
+
+
+#define RTE_ETH_FC_NONE         RTE_FC_NONE
+#define RTE_ETH_FC_RX_PAUSE     RTE_FC_RX_PAUSE
+#define RTE_ETH_FC_TX_PAUSE     RTE_FC_TX_PAUSE
+#define RTE_ETH_FC_FULL         RTE_FC_FULL
+
+
+#define RTE_ETH_MQ_RX_RSS_FLAG      ETH_MQ_RX_RSS_FLAG
+#define RTE_ETH_MQ_RX_DCB_FLAG      ETH_MQ_RX_DCB_FLAG
+#define RTE_ETH_MQ_RX_VMDQ_FLAG     ETH_MQ_RX_VMDQ_FLAG
+
+
+#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP       DEV_RX_OFFLOAD_VLAN_STRIP
+#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM       DEV_RX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM        DEV_RX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM        DEV_RX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_LRO          DEV_RX_OFFLOAD_TCP_LRO
+#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP       DEV_RX_OFFLOAD_QINQ_STRIP
+#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP     DEV_RX_OFFLOAD_MACSEC_STRIP
+#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER      DEV_RX_OFFLOAD_VLAN_FILTER
+#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND      DEV_RX_OFFLOAD_VLAN_EXTEND
+#define RTE_ETH_RX_OFFLOAD_SCATTER          DEV_RX_OFFLOAD_SCATTER
+#define RTE_ETH_RX_OFFLOAD_TIMESTAMP        DEV_RX_OFFLOAD_TIMESTAMP
+#define RTE_ETH_RX_OFFLOAD_SECURITY         DEV_RX_OFFLOAD_SECURITY
+#define RTE_ETH_RX_OFFLOAD_KEEP_CRC         DEV_RX_OFFLOAD_KEEP_CRC
+#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM       DEV_RX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_RSS_HASH         DEV_RX_OFFLOAD_RSS_HASH
+
+
+#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT      DEV_TX_OFFLOAD_VLAN_INSERT
+#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM       DEV_TX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM        DEV_TX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM        DEV_TX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM       DEV_TX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_TSO          DEV_TX_OFFLOAD_TCP_TSO
+#define RTE_ETH_TX_OFFLOAD_UDP_TSO          DEV_TX_OFFLOAD_UDP_TSO
+#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT      DEV_TX_OFFLOAD_QINQ_INSERT
+#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    DEV_TX_OFFLOAD_VXLAN_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO      DEV_TX_OFFLOAD_GRE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO     DEV_TX_OFFLOAD_IPIP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   DEV_TX_OFFLOAD_GENEVE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT    DEV_TX_OFFLOAD_MACSEC_INSERT
+#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE      DEV_TX_OFFLOAD_MT_LOCKFREE
+#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS       DEV_TX_OFFLOAD_MULTI_SEGS
+#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE   DEV_TX_OFFLOAD_MBUF_FAST_FREE
+#define RTE_ETH_TX_OFFLOAD_SECURITY         DEV_TX_OFFLOAD_SECURITY
+#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO      DEV_TX_OFFLOAD_UDP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO       DEV_TX_OFFLOAD_IP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM  DEV_TX_OFFLOAD_OUTER_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP
+
+
+#define RTE_ETH_LINK_SPEED_AUTONEG      ETH_LINK_SPEED_AUTONEG
+#define RTE_ETH_LINK_SPEED_FIXED        ETH_LINK_SPEED_FIXED
+#define RTE_ETH_LINK_SPEED_1G           ETH_LINK_SPEED_1G
+#define RTE_ETH_LINK_SPEED_10G          ETH_LINK_SPEED_10G
+
+#define RTE_ETH_SPEED_NUM_NONE          ETH_SPEED_NUM_NONE
+#define RTE_ETH_SPEED_NUM_1G            ETH_SPEED_NUM_1G  
+#define RTE_ETH_SPEED_NUM_10G           ETH_SPEED_NUM_10G
+#define RTE_ETH_SPEED_NUM_UNKNOWN       ETH_SPEED_NUM_UNKNOWN
+
+
+#define RTE_ETH_LINK_HALF_DUPLEX        ETH_LINK_HALF_DUPLEX
+#define RTE_ETH_LINK_FULL_DUPLEX        ETH_LINK_FULL_DUPLEX
+#define RTE_ETH_LINK_DOWN               ETH_LINK_DOWN       
+#define RTE_ETH_LINK_UP                 ETH_LINK_UP 
+
+
+#define RTE_ETH_RSS_RETA_SIZE_128       ETH_RSS_RETA_SIZE_128
+#define RTE_ETH_RETA_GROUP_SIZE         RTE_RETA_GROUP_SIZE
+
+
+#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS   ETH_VMDQ_MAX_VLAN_FILTERS
+#define RTE_ETH_DCB_NUM_USER_PRIORITIES ETH_DCB_NUM_USER_PRIORITIES
+#define RTE_ETH_VMDQ_DCB_NUM_QUEUES     ETH_VMDQ_DCB_NUM_QUEUES
+#define RTE_ETH_DCB_NUM_QUEUES          ETH_DCB_NUM_QUEUES
+
+
+#define RTE_ETH_DCB_PFC_SUPPORT     ETH_DCB_PFC_SUPPORT
+
+
+#define RTE_ETH_VLAN_STRIP_OFFLOAD   ETH_VLAN_STRIP_OFFLOAD
+#define RTE_ETH_VLAN_FILTER_OFFLOAD  ETH_VLAN_FILTER_OFFLOAD
+#define RTE_ETH_VLAN_EXTEND_OFFLOAD  ETH_VLAN_EXTEND_OFFLOAD
+#define RTE_ETH_QINQ_STRIP_OFFLOAD   ETH_QINQ_STRIP_OFFLOAD
+
+#define RTE_ETH_VLAN_STRIP_MASK      ETH_VLAN_STRIP_MASK
+#define RTE_ETH_VLAN_FILTER_MASK     ETH_VLAN_FILTER_MASK
+#define RTE_ETH_VLAN_EXTEND_MASK     ETH_VLAN_EXTEND_MASK
+#define RTE_ETH_QINQ_STRIP_MASK      ETH_QINQ_STRIP_MASK
+#define RTE_ETH_VLAN_ID_MAX          ETH_VLAN_ID_MAX
+
+
+#define RTE_ETH_NUM_RECEIVE_MAC_ADDR   ETH_NUM_RECEIVE_MAC_ADDR
+#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY ETH_VMDQ_NUM_UC_HASH_ARRAY
+
+#define RTE_ETH_VMDQ_ACCEPT_UNTAG      ETH_VMDQ_ACCEPT_UNTAG
+#define RTE_ETH_VMDQ_ACCEPT_HASH_MC    ETH_VMDQ_ACCEPT_HASH_MC
+#define RTE_ETH_VMDQ_ACCEPT_HASH_UC    ETH_VMDQ_ACCEPT_HASH_UC
+#define RTE_ETH_VMDQ_ACCEPT_BROADCAST  ETH_VMDQ_ACCEPT_BROADCAST
+#define RTE_ETH_VMDQ_ACCEPT_MULTICAST  ETH_VMDQ_ACCEPT_MULTICAST
+
+#define RTE_VLAN_HLEN       4  
+
+
+#define RTE_MBUF_F_RX_VLAN                  PKT_RX_VLAN
+#define RTE_MBUF_F_RX_RSS_HASH              PKT_RX_RSS_HASH
+#define RTE_MBUF_F_RX_FDIR                  PKT_RX_FDIR
+#define RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD    PKT_RX_EIP_CKSUM_BAD
+#define RTE_MBUF_F_RX_VLAN_STRIPPED         PKT_RX_VLAN_STRIPPED
+#define RTE_MBUF_F_RX_IP_CKSUM_MASK         PKT_RX_IP_CKSUM_MASK
+#define RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN      PKT_RX_IP_CKSUM_UNKNOWN
+#define RTE_MBUF_F_RX_IP_CKSUM_BAD          PKT_RX_IP_CKSUM_BAD
+#define RTE_MBUF_F_RX_IP_CKSUM_GOOD         PKT_RX_IP_CKSUM_GOOD
+#define RTE_MBUF_F_RX_IP_CKSUM_NONE         PKT_RX_IP_CKSUM_NONE
+#define RTE_MBUF_F_RX_L4_CKSUM_MASK         PKT_RX_L4_CKSUM_MASK
+#define RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN      PKT_RX_L4_CKSUM_UNKNOWN
+#define RTE_MBUF_F_RX_L4_CKSUM_BAD          PKT_RX_L4_CKSUM_BAD
+#define RTE_MBUF_F_RX_L4_CKSUM_GOOD         PKT_RX_L4_CKSUM_GOOD
+#define RTE_MBUF_F_RX_L4_CKSUM_NONE         PKT_RX_L4_CKSUM_NONE
+#define RTE_MBUF_F_RX_IEEE1588_PTP          PKT_RX_IEEE1588_PTP
+#define RTE_MBUF_F_RX_IEEE1588_TMST         PKT_RX_IEEE1588_TMST
+#define RTE_MBUF_F_RX_FDIR_ID               PKT_RX_FDIR_ID
+#define RTE_MBUF_F_RX_FDIR_FLX              PKT_RX_FDIR_FLX
+#define RTE_MBUF_F_RX_QINQ_STRIPPED         PKT_RX_QINQ_STRIPPED
+#define RTE_MBUF_F_RX_LRO                   PKT_RX_LRO
+#define RTE_MBUF_F_RX_SEC_OFFLOAD	        PKT_RX_SEC_OFFLOAD
+#define RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED	PKT_RX_SEC_OFFLOAD_FAILED
+#define RTE_MBUF_F_RX_QINQ                  PKT_RX_QINQ
+
+#define RTE_MBUF_F_TX_SEC_OFFLOAD	        PKT_TX_SEC_OFFLOAD
+#define RTE_MBUF_F_TX_MACSEC                PKT_TX_MACSEC
+#define RTE_MBUF_F_TX_QINQ                  PKT_TX_QINQ
+#define RTE_MBUF_F_TX_TCP_SEG               PKT_TX_TCP_SEG
+#define RTE_MBUF_F_TX_IEEE1588_TMST         PKT_TX_IEEE1588_TMST
+#define RTE_MBUF_F_TX_L4_NO_CKSUM           PKT_TX_L4_NO_CKSUM
+#define RTE_MBUF_F_TX_TCP_CKSUM             PKT_TX_TCP_CKSUM
+#define RTE_MBUF_F_TX_SCTP_CKSUM            PKT_TX_SCTP_CKSUM
+#define RTE_MBUF_F_TX_UDP_CKSUM             PKT_TX_UDP_CKSUM
+#define RTE_MBUF_F_TX_L4_MASK               PKT_TX_L4_MASK
+#define RTE_MBUF_F_TX_IP_CKSUM              PKT_TX_IP_CKSUM
+#define RTE_MBUF_F_TX_IPV4                  PKT_TX_IPV4
+#define RTE_MBUF_F_TX_IPV6                  PKT_TX_IPV6
+#define RTE_MBUF_F_TX_VLAN                  PKT_TX_VLAN
+#define RTE_MBUF_F_TX_OUTER_IP_CKSUM        PKT_TX_OUTER_IP_CKSUM
+#define RTE_MBUF_F_TX_OUTER_IPV4            PKT_TX_OUTER_IPV4
+#define RTE_MBUF_F_TX_OUTER_IPV6            PKT_TX_OUTER_IPV6
+
+#define RTE_MBUF_F_TX_OFFLOAD_MASK          PKT_TX_OFFLOAD_MASK
+
+#define RTE_ETH_8_POOLS                     ETH_8_POOLS
+#define RTE_ETH_16_POOLS                    ETH_16_POOLS
+#define RTE_ETH_32_POOLS                    ETH_32_POOLS
+#define RTE_ETH_64_POOLS                    ETH_64_POOLS
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#define RTE_ETHDEV_DEBUG_RX
+#define RTE_ETHDEV_DEBUG_TX
+#endif
+
+#endif
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#define rte_eth_fdir_pballoc_type   rte_fdir_pballoc_type
+#define rte_eth_fdir_conf 			rte_fdir_conf
+
+#define RTE_ETH_FDIR_PBALLOC_64K   RTE_FDIR_PBALLOC_64K
+#define RTE_ETH_FDIR_PBALLOC_128K  RTE_FDIR_PBALLOC_128K
+#define RTE_ETH_FDIR_PBALLOC_256K  RTE_FDIR_PBALLOC_256K
+#endif
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+
+#define SXE_PCI_INTR_HANDLE(pci_dev) \
+	(&((pci_dev)->intr_handle))
+
+#define SXE_DEV_FNAV_CONF(dev) \
+	(&((dev)->data->dev_conf.fdir_conf)) 
+#define SXE_GET_FRAME_SIZE(dev) \
+	(dev->data->dev_conf.rxmode.max_rx_pkt_len)
+	
+#elif defined DPDK_21_11_5
+#define SXE_PCI_INTR_HANDLE(pci_dev) \
+	((pci_dev)->intr_handle)
+#define SXE_DEV_FNAV_CONF(dev) \
+	(&((dev)->data->dev_conf.fdir_conf)) 
+#define SXE_GET_FRAME_SIZE(dev) \
+	(dev->data->mtu + SXE_ETH_OVERHEAD)
+
+#else
+#define SXE_PCI_INTR_HANDLE(pci_dev) \
+	((pci_dev)->intr_handle)
+#define SXE_DEV_FNAV_CONF(dev) \
+	(&((struct sxe_adapter *)(dev)->data->dev_private)->fnav_conf) 
+#define RTE_ADAPTER_HAVE_FNAV_CONF
+#define SXE_GET_FRAME_SIZE(dev) \
+	(dev->data->mtu + SXE_ETH_OVERHEAD)
+
+#endif
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#define ETH_DEV_OPS_FILTER_CTRL
+#define DEV_RX_JUMBO_FRAME
+#define ETH_DEV_MIRROR_RULE
+#define ETH_DEV_RX_DESC_DONE
+#else
+#define ETH_DEV_OPS_MONITOR
+#endif
+
+#ifdef DPDK_22_11_3
+#define DEV_RX_OFFLOAD_CHECKSUM RTE_ETH_RX_OFFLOAD_CHECKSUM
+#endif
+
+#ifdef DPDK_22_11_3
+#define ETH_DCB_NUM_USER_PRIORITIES RTE_ETH_DCB_NUM_USER_PRIORITIES
+#endif
+
+#endif
diff --git a/drivers/net/sxe/base/sxe_dpdk_version.h b/drivers/net/sxe/base/sxe_dpdk_version.h
new file mode 100644
index 0000000000..902812566a
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_dpdk_version.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_DPDK_VERSION_H__
+#define __SXE_DPDK_VERSION_H__
+
+#include <rte_version.h>
+
+#if (RTE_VERSION >= RTE_VERSION_NUM(19, 0, 0, 0) && RTE_VERSION < RTE_VERSION_NUM(19, 12, 0, 0))
+	#define DPDK_19_11_6
+#elif (RTE_VERSION >= RTE_VERSION_NUM(20, 0, 0, 0) && RTE_VERSION < RTE_VERSION_NUM(20, 12, 0, 0))
+	#define DPDK_20_11_5
+#elif (RTE_VERSION >= RTE_VERSION_NUM(21, 0, 0, 0) && RTE_VERSION < RTE_VERSION_NUM(21, 12, 0, 0))
+	#define DPDK_21_11_5
+#elif (RTE_VERSION >= RTE_VERSION_NUM(22, 0, 0, 0) && RTE_VERSION < RTE_VERSION_NUM(22, 12, 0, 0))
+	#define DPDK_22_11_3
+#endif
+
+#endif
diff --git a/drivers/net/sxe/base/sxe_errno.h b/drivers/net/sxe/base/sxe_errno.h
new file mode 100644
index 0000000000..e4de8bef29
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_errno.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_ERRNO_H__
+#define __SXE_ERRNO_H__
+
+#define SXE_ERR_MODULE_STANDARD			0
+#define SXE_ERR_MODULE_PF				1
+#define SXE_ERR_MODULE_VF				2
+#define SXE_ERR_MODULE_HDC				3
+
+#define SXE_ERR_MODULE_OFFSET	16
+#define SXE_ERR_MODULE(module, errcode)		\
+	((module << SXE_ERR_MODULE_OFFSET) | errcode)
+#define SXE_ERR_PF(errcode)		SXE_ERR_MODULE(SXE_ERR_MODULE_PF, errcode)
+#define SXE_ERR_VF(errcode)		SXE_ERR_MODULE(SXE_ERR_MODULE_VF, errcode)
+#define SXE_ERR_HDC(errcode)	SXE_ERR_MODULE(SXE_ERR_MODULE_HDC, errcode)
+
+#define SXE_ERR_CONFIG                        EINVAL
+#define SXE_ERR_PARAM                         EINVAL
+#define SXE_ERR_RESET_FAILED                  EPERM
+#define SXE_ERR_NO_SPACE                      ENOSPC
+#define SXE_ERR_FNAV_CMD_INCOMPLETE           EBUSY
+#define SXE_ERR_MBX_LOCK_FAIL                 EBUSY
+#define SXE_ERR_OPRATION_NOT_PERM             EPERM
+#define SXE_ERR_LINK_STATUS_INVALID           EINVAL
+#define SXE_ERR_LINK_SPEED_INVALID            EINVAL
+#define SXE_ERR_DEVICE_NOT_SUPPORTED          EOPNOTSUPP
+#define SXE_ERR_HDC_LOCK_BUSY                 EBUSY
+#define SXE_ERR_HDC_FW_OV_TIMEOUT             ETIMEDOUT
+#define SXE_ERR_MDIO_CMD_TIMEOUT              ETIMEDOUT
+#define SXE_ERR_INVALID_LINK_SETTINGS         EINVAL
+#define SXE_ERR_FNAV_REINIT_FAILED            EIO
+#define SXE_ERR_CLI_FAILED                    EIO
+#define SXE_ERR_MASTER_REQUESTS_PENDING       SXE_ERR_PF(1)
+#define SXE_ERR_SFP_NO_INIT_SEQ_PRESENT       SXE_ERR_PF(2)
+#define SXE_ERR_ENABLE_SRIOV_FAIL             SXE_ERR_PF(3)
+#define SXE_ERR_IPSEC_SA_STATE_NOT_EXSIT      SXE_ERR_PF(4)
+#define SXE_ERR_SFP_NOT_PERSENT               SXE_ERR_PF(5)
+#define SXE_ERR_PHY_NOT_PERSENT               SXE_ERR_PF(6)
+#define SXE_ERR_PHY_RESET_FAIL                SXE_ERR_PF(7)
+#define SXE_ERR_FC_NOT_NEGOTIATED             SXE_ERR_PF(8)
+#define SXE_ERR_SFF_NOT_SUPPORTED             SXE_ERR_PF(9)
+
+#define SXEVF_ERR_MAC_ADDR_INVALID              EINVAL
+#define SXEVF_ERR_RESET_FAILED                  EIO
+#define SXEVF_ERR_ARGUMENT_INVALID              EINVAL
+#define SXEVF_ERR_NOT_READY                     EBUSY
+#define SXEVF_ERR_POLL_ACK_FAIL                 EIO
+#define SXEVF_ERR_POLL_MSG_FAIL                 EIO
+#define SXEVF_ERR_MBX_LOCK_FAIL                 EBUSY
+#define SXEVF_ERR_REPLY_INVALID                 EINVAL
+#define SXEVF_ERR_IRQ_NUM_INVALID               EINVAL
+#define SXEVF_ERR_PARAM                         EINVAL
+#define SXEVF_ERR_MAILBOX_FAIL                  SXE_ERR_VF(1)
+#define SXEVF_ERR_MSG_HANDLE_ERR                SXE_ERR_VF(2)
+#define SXEVF_ERR_DEVICE_NOT_SUPPORTED          SXE_ERR_VF(3)
+#define SXEVF_ERR_IPSEC_SA_STATE_NOT_EXSIT      SXE_ERR_VF(4)
+
+#endif
diff --git a/drivers/net/sxe/base/sxe_hw.c b/drivers/net/sxe/base/sxe_hw.c
new file mode 100644
index 0000000000..14d1d67456
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_hw.c
@@ -0,0 +1,6647 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifdef SXE_PHY_CONFIGURE
+#include <linux/mdio.h>
+#endif
+#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST)
+#include "sxe_pci.h"
+#include "sxe_log.h"
+#include "sxe_debug.h"
+#include "sxe_host_hdc.h"
+#include "sxe_sriov.h"
+#include "sxe_compat.h"
+#else
+#include "sxe_errno.h"
+#include "sxe_logs.h"
+#include "sxe.h"
+
+#include "sxe_hw.h"
+#endif
+
+
+#define SXE_PFMSG_MASK  (0xFF00)
+
+#define SXE_MSGID_MASK  (0xFFFFFFFF)
+
+#define SXE_CTRL_MSG_MASK          (0x700)
+
+#define SXE_RING_WAIT_LOOP        10
+#define SXE_REG_NAME_LEN          16
+#define SXE_DUMP_REG_STRING_LEN   73
+#define SXE_DUMP_REGS_NUM         64
+#define SXE_MAX_RX_DESC_POLL      10
+#define SXE_LPBK_EN               0x00000001
+#define SXE_MACADDR_LOW_4_BYTE    4
+#define SXE_MACADDR_HIGH_2_BYTE   2
+#define SXE_RSS_FIELD_MASK        0xffff0000
+#define SXE_MRQE_MASK             0x0000000f
+
+#define SXE_HDC_DATA_LEN_MAX         256
+
+#define SXE_8_TC_MSB				(0x11111111)
+
+STATIC u32 sxe_read_reg(struct sxe_hw *hw, u32 reg);
+STATIC void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value);
+static void sxe_write_reg64(struct sxe_hw *hw, u32 reg, u64 value);
+
+#define SXE_WRITE_REG_ARRAY_32(a, reg, offset, value) \
+	sxe_write_reg(a, reg + (offset << 2), value)
+#define SXE_READ_REG_ARRAY_32(a, reg, offset) \
+	sxe_read_reg(a, reg + (offset << 2))
+
+#define SXE_REG_READ(hw, addr)        sxe_read_reg(hw, addr)
+#define SXE_REG_WRITE(hw, reg, value) sxe_write_reg(hw, reg, value)
+#define SXE_WRITE_FLUSH(a) sxe_read_reg(a, SXE_STATUS)
+#define SXE_REG_WRITE_ARRAY(hw, reg, offset, value) \
+	sxe_write_reg(hw, (reg) + ((offset) << 2), (value))
+
+#define SXE_SWAP_32(_value) __swab32((_value))
+
+#define SXE_REG_WRITE_BE32(a, reg, value) \
+	SXE_REG_WRITE((a), (reg), SXE_SWAP_32(ntohl(value)))
+
+#define SXE_SWAP_16(_value) __swab16((_value))
+
+#define SXE_REG64_WRITE(a, reg, value) sxe_write_reg64((a), (reg), (value))
+
+enum sxe_ipsec_table {
+	SXE_IPSEC_IP_TABLE = 0,
+	SXE_IPSEC_SPI_TABLE,
+	SXE_IPSEC_KEY_TABLE,
+};
+
+u32 mac_regs[] = {
+	SXE_COMCTRL,
+	SXE_PCCTRL,
+	SXE_LPBKCTRL,
+	SXE_MAXFS,
+	SXE_VLANCTRL,
+	SXE_VLANID,
+	SXE_LINKS,
+	SXE_HLREG0,
+	SXE_MFLCN,
+	SXE_MACC,
+};
+
+u16 sxe_mac_reg_num_get(void)
+{
+	return ARRAY_SIZE(mac_regs);
+}
+
+
+#ifndef SXE_DPDK 
+
+void sxe_hw_fault_handle(struct sxe_hw *hw)
+{
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (test_bit(SXE_HW_FAULT, &hw->state)) {
+		goto l_ret;
+	}
+
+	set_bit(SXE_HW_FAULT, &hw->state);
+
+	LOG_DEV_ERR("sxe nic hw fault\n");
+
+	if ((hw->fault_handle != NULL) && (hw->priv != NULL) ) {
+		hw->fault_handle(hw->priv);
+	}
+
+l_ret:
+	return;
+}
+
+static u32 sxe_hw_fault_check(struct sxe_hw *hw, u32 reg)
+{
+	u32 i, value;
+	u8  __iomem *base_addr = hw->reg_base_addr;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (sxe_is_hw_fault(hw)) {
+		goto l_out;
+	}
+
+	for (i = 0; i < SXE_REG_READ_RETRY; i++) {
+		value = hw->reg_read(base_addr + SXE_STATUS);
+		if (value != SXE_REG_READ_FAIL) {
+			break;
+		}
+
+		mdelay(3);
+	}
+
+	if (SXE_REG_READ_FAIL == value) {
+		LOG_ERROR_BDF("read registers multiple times failed, ret=%#x\n", value);
+		sxe_hw_fault_handle(hw);
+	} else {
+		value = hw->reg_read(base_addr + reg);
+	}
+
+	return value;
+l_out:
+	return SXE_REG_READ_FAIL;
+}
+
+STATIC u32 sxe_read_reg(struct sxe_hw *hw, u32 reg)
+{
+	u32 value;
+	u8  __iomem *base_addr = hw->reg_base_addr;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (sxe_is_hw_fault(hw)) {
+		value = SXE_REG_READ_FAIL;
+		goto l_ret;
+	}
+
+	value = hw->reg_read(base_addr + reg);
+	if (unlikely(SXE_REG_READ_FAIL == value)) {
+		LOG_ERROR_BDF("reg[0x%x] read failed, ret=%#x\n", reg, value);
+		value = sxe_hw_fault_check(hw, reg);
+	}
+
+l_ret:
+	return value;
+}
+
+STATIC void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value)
+{
+	u8 __iomem *base_addr = hw->reg_base_addr;
+
+	if (sxe_is_hw_fault(hw)) {
+		goto l_ret;
+	}
+
+	hw->reg_write(value, base_addr + reg);
+
+l_ret:
+	return;
+}
+
+#else 
+
+STATIC u32 sxe_read_reg(struct sxe_hw *hw, u32 reg)
+{
+	u32 i, value;
+	u8  __iomem *base_addr = hw->reg_base_addr;
+
+	value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
+	if (unlikely(SXE_REG_READ_FAIL == value)) {
+
+		value = rte_le_to_cpu_32(rte_read32(base_addr + SXE_STATUS));
+		if (unlikely(SXE_REG_READ_FAIL != value)) {
+
+			value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
+		} else {
+			LOG_ERROR("reg[0x%x] and reg[0x%x] read failed, ret=%#x\n",
+							reg, SXE_STATUS, value);
+			for (i = 0; i < SXE_REG_READ_RETRY; i++) {
+
+				value = rte_le_to_cpu_32(rte_read32(base_addr + SXE_STATUS));
+				if (unlikely(SXE_REG_READ_FAIL != value)) {
+
+					value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
+					LOG_INFO("reg[0x%x] read ok, value=%#x\n",
+									reg, value);
+					break;
+				} else {
+					LOG_ERROR("reg[0x%x] and reg[0x%x] read failed, ret=%#x\n",
+							reg, SXE_STATUS, value);
+				}
+
+				mdelay(3);
+			}
+		}
+	}
+
+	return value;
+}
+
+STATIC void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value)
+{
+	u8 __iomem *base_addr = hw->reg_base_addr;
+
+	rte_write32((rte_cpu_to_le_32(value)), (base_addr + reg));
+
+	return;
+}
+#endif
+
+static void sxe_write_reg64(struct sxe_hw *hw, u32 reg, u64 value)
+{
+	u8 __iomem *reg_addr = hw->reg_base_addr;
+
+	if (sxe_is_hw_fault(hw)) {
+		goto l_ret;
+	}
+
+	writeq(value, reg_addr + reg);
+
+l_ret:
+	return;
+}
+
+
+void sxe_hw_no_snoop_disable(struct sxe_hw *hw)
+{
+	u32 ctrl_ext;
+
+	ctrl_ext = SXE_REG_READ(hw, SXE_CTRL_EXT);
+	ctrl_ext |= SXE_CTRL_EXT_NS_DIS;
+	SXE_REG_WRITE(hw, SXE_CTRL_EXT, ctrl_ext);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+s32 sxe_hw_uc_addr_pool_enable(struct sxe_hw *hw,
+						u8 rar_idx, u8 pool_idx)
+{
+	s32 ret = 0;
+	u32 value;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (rar_idx > SXE_UC_ENTRY_NUM_MAX) {
+		ret = -SXE_ERR_PARAM;
+		LOG_DEV_ERR("pool_idx:%d rar_idx:%d invalid.\n",
+			  pool_idx, rar_idx);
+		goto l_end;
+	}
+
+	if (pool_idx < 32) {
+		value = SXE_REG_READ(hw, SXE_MPSAR_LOW(rar_idx));
+		value |= BIT(pool_idx);
+		SXE_REG_WRITE(hw, SXE_MPSAR_LOW(rar_idx), value);
+	} else {
+		value = SXE_REG_READ(hw, SXE_MPSAR_HIGH(rar_idx));
+		value |= BIT(pool_idx - 32);
+		SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(rar_idx), value);
+	}
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_hw_uc_addr_pool_disable(struct sxe_hw *hw, u8 rar_idx)
+{
+	u32 hi;
+	u32 low;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	hi = SXE_REG_READ(hw, SXE_MPSAR_HIGH(rar_idx));
+	low = SXE_REG_READ(hw, SXE_MPSAR_LOW(rar_idx));
+
+	if (sxe_is_hw_fault(hw)) {
+		goto l_end;
+	}
+
+	if (!hi & !low) {
+		LOG_DEBUG_BDF("no need clear rar-pool relation register.\n");
+		goto l_end;
+	}
+
+	if (low) {
+		SXE_REG_WRITE(hw, SXE_MPSAR_LOW(rar_idx), 0);
+	}
+	if (hi) {
+		SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(rar_idx), 0);
+	}
+
+
+l_end:
+	return 0;
+}
+
+s32 sxe_hw_nic_reset(struct sxe_hw *hw)
+{
+	s32 ret = 0;
+	u32 ctrl, i;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	ctrl = SXE_CTRL_RST;
+	ctrl |= SXE_REG_READ(hw, SXE_CTRL);
+	ctrl &= ~SXE_CTRL_GIO_DIS;
+	SXE_REG_WRITE(hw, SXE_CTRL, ctrl);
+
+	SXE_WRITE_FLUSH(hw);
+	usleep_range(1000, 1200);
+
+	for (i = 0; i < 10; i++) {
+		ctrl = SXE_REG_READ(hw, SXE_CTRL);
+		if (!(ctrl & SXE_CTRL_RST_MASK)) {
+			break;
+		}
+		udelay(1);
+	}
+
+	if (ctrl & SXE_CTRL_RST_MASK) {
+		ret = -SXE_ERR_RESET_FAILED;
+		LOG_DEV_ERR("reset polling failed to complete\n");
+	}
+
+	return ret;
+}
+
+void sxe_hw_pf_rst_done_set(struct sxe_hw *hw)
+{
+	u32 value;
+
+	value = SXE_REG_READ(hw, SXE_CTRL_EXT);
+	value |= SXE_CTRL_EXT_PFRSTD;
+	SXE_REG_WRITE(hw, SXE_CTRL_EXT, value);
+
+	return;
+}
+
+static void sxe_hw_regs_flush(struct sxe_hw *hw)
+{
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+
+static const struct sxe_reg_info sxe_reg_info_tbl[] = {
+
+	{SXE_CTRL, 1, 1, "CTRL"},
+	{SXE_STATUS, 1, 1, "STATUS"},
+	{SXE_CTRL_EXT, 1, 1, "CTRL_EXT"},
+
+	{SXE_EICR, 1, 1, "EICR"},
+
+	{SXE_SRRCTL(0), 16, 0x4, "SRRCTL"},
+	{SXE_RDH(0), 64, 0x40, "RDH"},
+	{SXE_RDT(0), 64, 0x40, "RDT"},
+	{SXE_RXDCTL(0), 64, 0x40, "RXDCTL"},
+	{SXE_RDBAL(0), 64, 0x40, "RDBAL"},
+	{SXE_RDBAH(0), 64, 0x40, "RDBAH"},
+
+	{SXE_TDBAL(0), 32, 0x40, "TDBAL"},
+	{SXE_TDBAH(0), 32, 0x40, "TDBAH"},
+	{SXE_TDLEN(0), 32, 0x40, "TDLEN"},
+	{SXE_TDH(0), 32, 0x40, "TDH"},
+	{SXE_TDT(0), 32, 0x40, "TDT"},
+	{SXE_TXDCTL(0), 32, 0x40, "TXDCTL"},
+
+	{ .name = NULL }
+};
+
+static void sxe_hw_reg_print(struct sxe_hw *hw,
+				const struct sxe_reg_info *reginfo)
+{
+	u32 i, j;
+	s8 *value;
+	u32 first_reg_idx = 0;
+	u32 regs[SXE_DUMP_REGS_NUM];
+	s8 reg_name[SXE_REG_NAME_LEN];
+	s8 buf[SXE_DUMP_REG_STRING_LEN];
+	struct sxe_adapter *adapter = hw->adapter;
+
+	switch (reginfo->addr) {
+	case SXE_SRRCTL(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_SRRCTL(i));
+		}
+		break;
+	case SXE_RDLEN(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_RDLEN(i));
+		}
+		break;
+	case SXE_RDH(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_RDH(i));
+		}
+		break;
+	case SXE_RDT(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_RDT(i));
+		}
+		break;
+	case SXE_RXDCTL(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_RXDCTL(i));
+		}
+		break;
+	case SXE_RDBAL(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_RDBAL(i));
+		}
+		break;
+	case SXE_RDBAH(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_RDBAH(i));
+		}
+		break;
+	case SXE_TDBAL(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_TDBAL(i));
+		}
+		break;
+	case SXE_TDBAH(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_TDBAH(i));
+		}
+		break;
+	case SXE_TDLEN(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_TDLEN(i));
+		}
+		break;
+	case SXE_TDH(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_TDH(i));
+		}
+		break;
+	case SXE_TDT(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_TDT(i));
+		}
+		break;
+	case SXE_TXDCTL(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_TXDCTL(i));
+		}
+		break;
+	default:
+		LOG_DEV_INFO("%-15s %08x\n",
+			reginfo->name, SXE_REG_READ(hw, reginfo->addr));
+		goto l_end;
+	}
+
+	while (first_reg_idx < SXE_DUMP_REGS_NUM) {
+		value = buf;
+		snprintf(reg_name, SXE_REG_NAME_LEN,
+			"%s[%d-%d]", reginfo->name,
+			first_reg_idx, (first_reg_idx + 7));
+
+		for (j = 0; j < 8; j++) {
+			value += sprintf(value, " %08x", regs[first_reg_idx++]);
+		}
+
+		LOG_DEV_ERR("%-15s%s\n", reg_name, buf);
+	}
+
+l_end:
+	return;
+}
+
+static void sxe_hw_reg_dump(struct sxe_hw *hw)
+{
+	const struct sxe_reg_info *reginfo;
+
+	for (reginfo = (const struct sxe_reg_info *)sxe_reg_info_tbl;
+	     reginfo->name; reginfo++) {
+		sxe_hw_reg_print(hw, reginfo);
+	}
+
+	return;
+}
+
+static s32 sxe_hw_status_reg_test(struct sxe_hw *hw)
+{
+	s32 ret = 0;
+	u32 value, before, after;
+	u32 toggle = 0x7FFFF30F;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	before = SXE_REG_READ(hw, SXE_STATUS);
+	value = (SXE_REG_READ(hw, SXE_STATUS) & toggle);
+	SXE_REG_WRITE(hw, SXE_STATUS, toggle);
+	after = SXE_REG_READ(hw, SXE_STATUS) & toggle;
+	if (value != after) {
+		LOG_MSG_ERR(drv, "failed status register test got: "
+				"0x%08X expected: 0x%08X\n",
+					after, value);
+		ret = -SXE_DIAG_TEST_BLOCKED;
+		goto l_end;
+	}
+
+	SXE_REG_WRITE(hw, SXE_STATUS, before);
+
+l_end:
+	return ret;
+}
+
+#define PATTERN_TEST	1
+#define SET_READ_TEST	2
+#define WRITE_NO_TEST	3
+#define TABLE32_TEST	4
+#define TABLE64_TEST_LO	5
+#define TABLE64_TEST_HI	6
+
+struct sxe_self_test_reg {
+	u32 reg;
+	u8  array_len;
+	u8  test_type;
+	u32 mask;
+	u32 write;
+};
+
+static const struct sxe_self_test_reg self_test_reg[] = {
+	{ SXE_FCRTL(0),  1,   PATTERN_TEST, 0x8007FFE0, 0x8007FFF0 },
+	{ SXE_FCRTH(0),  1,   PATTERN_TEST, 0x8007FFE0, 0x8007FFF0 },
+	{ SXE_PFCTOP,    1,   PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_FCTTV(0),  1,   PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_VLNCTRL,   1,   PATTERN_TEST, 0x00000000, 0x00000000 },
+	{ SXE_RDBAL(0),  4,   PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+	{ SXE_RDBAH(0),  4,   PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_RDLEN(0),  4,   PATTERN_TEST, 0x000FFFFF, 0x000FFFFF },
+	{ SXE_RXDCTL(0), 4,   WRITE_NO_TEST, 0, SXE_RXDCTL_ENABLE },
+	{ SXE_RDT(0),    4,   PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ SXE_RXDCTL(0), 4,   WRITE_NO_TEST, 0, 0 },
+	{ SXE_TDBAL(0),  4,   PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ SXE_TDBAH(0),  4,   PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_TDLEN(0),  4,   PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+	{ SXE_RXCTRL,    1,   SET_READ_TEST, 0x00000001, 0x00000001 },
+	{ SXE_RAL(0),    16,  TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_RAL(0),    16,  TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
+	{ SXE_MTA(0),    128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ .reg = 0 }
+};
+
+static s32 sxe_hw_reg_pattern_test(struct sxe_hw *hw, u32 reg,
+				u32 mask, u32 write)
+{
+	s32 ret = 0;
+	u32 pat, val, before;
+	struct sxe_adapter *adapter = hw->adapter;
+	static const u32 test_pattern[] = {
+		0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFE};
+
+	if (sxe_is_hw_fault(hw)) {
+		LOG_ERROR_BDF("hw fault\n");
+		ret = -SXE_DIAG_TEST_BLOCKED;
+		goto l_end;
+	}
+
+	for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
+		before = SXE_REG_READ(hw, reg);
+
+		SXE_REG_WRITE(hw, reg, test_pattern[pat] & write);
+		val = SXE_REG_READ(hw, reg);
+		if (val != (test_pattern[pat] & write & mask)) {
+			LOG_MSG_ERR(drv, "pattern test reg %04X failed: "
+					"got 0x%08X expected 0x%08X\n",
+				reg, val, (test_pattern[pat] & write & mask));
+			SXE_REG_WRITE(hw, reg, before);
+			ret = -SXE_DIAG_REG_PATTERN_TEST_ERR;
+			goto l_end;
+		}
+
+		SXE_REG_WRITE(hw, reg, before);
+	}
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_hw_reg_set_and_check(struct sxe_hw *hw, int reg,
+				u32 mask, u32 write)
+{
+	s32 ret = 0;
+	u32 val, before;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (sxe_is_hw_fault(hw)) {
+		LOG_ERROR_BDF("hw fault\n");
+		ret = -SXE_DIAG_TEST_BLOCKED;
+		goto l_end;
+	}
+
+	before = SXE_REG_READ(hw, reg);
+	SXE_REG_WRITE(hw, reg, write & mask);
+	val = SXE_REG_READ(hw, reg);
+	if ((write & mask) != (val & mask)) {
+		LOG_MSG_ERR(drv, "set/check reg %04X test failed: "
+				"got 0x%08X expected 0x%08X\n",
+			reg, (val & mask), (write & mask));
+		SXE_REG_WRITE(hw, reg, before);
+		ret = -SXE_DIAG_CHECK_REG_TEST_ERR;
+		goto l_end;
+	}
+
+	SXE_REG_WRITE(hw, reg, before);
+
+l_end:
+	return ret;
+}
+
+STATIC s32 sxe_hw_regs_test(struct sxe_hw *hw)
+{
+	u32 i;
+	s32 ret = 0;
+	const struct sxe_self_test_reg *test = self_test_reg;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	ret = sxe_hw_status_reg_test(hw);
+	if (ret) {
+		LOG_MSG_ERR(drv, "status register test failed\n");
+		goto l_end;
+	}
+
+	while (test->reg) {
+		for (i = 0; i < test->array_len; i++) {
+			switch (test->test_type) {
+			case PATTERN_TEST:
+				ret = sxe_hw_reg_pattern_test(hw,
+					test->reg + (i * 0x40),
+					test->mask, test->write);
+				break;
+			case TABLE32_TEST:
+				ret = sxe_hw_reg_pattern_test(hw,
+					test->reg + (i * 4),
+					test->mask, test->write);
+				break;
+			case TABLE64_TEST_LO:
+				ret = sxe_hw_reg_pattern_test(hw,
+					test->reg + (i * 8),
+					test->mask, test->write);
+				break;
+			case TABLE64_TEST_HI:
+				ret = sxe_hw_reg_pattern_test(hw,
+					(test->reg + 4) + (i * 8),
+					test->mask, test->write);
+				break;
+			case SET_READ_TEST:
+				ret = sxe_hw_reg_set_and_check(hw,
+					test->reg + (i * 0x40),
+					test->mask, test->write);
+				break;
+			case WRITE_NO_TEST:
+				SXE_REG_WRITE(hw, test->reg + (i * 0x40),
+						test->write);
+				break;
+			default:
+				LOG_ERROR_BDF("reg test mod err, type=%d\n",
+						test->test_type);
+				break;
+			}
+
+			if (ret) {
+				goto l_end;
+			}
+
+		}
+		test++;
+	}
+
+l_end:
+	return ret;
+}
+
+static const struct sxe_setup_operations sxe_setup_ops = {
+	.regs_dump		= sxe_hw_reg_dump,
+	.reg_read		= sxe_read_reg,
+	.reg_write		= sxe_write_reg,
+	.regs_test		= sxe_hw_regs_test,
+	.reset			= sxe_hw_nic_reset,
+	.regs_flush		= sxe_hw_regs_flush,
+	.pf_rst_done_set	= sxe_hw_pf_rst_done_set,
+	.no_snoop_disable	= sxe_hw_no_snoop_disable,
+};
+
+
+static void sxe_hw_ring_irq_enable(struct sxe_hw *hw, u64 qmask)
+{
+	u32 mask0, mask1;
+
+	mask0 = qmask & 0xFFFFFFFF;
+	mask1 = qmask >> 32;
+
+	if (mask0 && mask1) {
+		SXE_REG_WRITE(hw, SXE_EIMS_EX(0), mask0);
+		SXE_REG_WRITE(hw, SXE_EIMS_EX(1), mask1);
+	} else if (mask0) {
+		SXE_REG_WRITE(hw, SXE_EIMS_EX(0), mask0);
+	} else if (mask1) {
+		SXE_REG_WRITE(hw, SXE_EIMS_EX(1), mask1);
+	}
+
+	return;
+}
+
+u32 sxe_hw_pending_irq_read_clear(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_EICR);
+}
+
+void sxe_hw_pending_irq_write_clear(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_EICR, value);
+	return;
+}
+
+u32 sxe_hw_irq_cause_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_EICS);
+}
+
+static void sxe_hw_event_irq_trigger(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_EICS, (SXE_EICS_TCP_TIMER | SXE_EICS_OTHER));
+
+	return;
+}
+
+static void sxe_hw_ring_irq_trigger(struct sxe_hw *hw, u64 eics)
+{
+	u32 mask;
+
+	mask = (eics & 0xFFFFFFFF);
+	SXE_REG_WRITE(hw, SXE_EICS_EX(0), mask);
+	mask = (eics >> 32);
+	SXE_REG_WRITE(hw, SXE_EICS_EX(1), mask);
+	return;
+}
+
+void sxe_hw_ring_irq_auto_disable(struct sxe_hw *hw,
+					bool is_msix)
+{
+	if (true == is_msix) {
+		SXE_REG_WRITE(hw, SXE_EIAM_EX(0), 0xFFFFFFFF);
+		SXE_REG_WRITE(hw, SXE_EIAM_EX(1), 0xFFFFFFFF);
+	} else {
+		SXE_REG_WRITE(hw, SXE_EIAM, SXE_EICS_RTX_QUEUE);
+	}
+
+	return;
+}
+
+void sxe_hw_irq_general_reg_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_GPIE, value);
+
+	return;
+}
+
+u32 sxe_hw_irq_general_reg_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_GPIE);
+}
+
+static void sxe_hw_set_eitrsel(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_EITRSEL, value);
+
+	return;
+}
+
+void sxe_hw_event_irq_map(struct sxe_hw *hw, u8 offset, u16 irq_idx)
+{
+	u8  allocation;
+	u32 ivar, position;
+
+	allocation = irq_idx | SXE_IVAR_ALLOC_VALID;
+
+	position = (offset & 1) * 8;
+
+	ivar = SXE_REG_READ(hw, SXE_IVAR_MISC);
+	ivar &= ~(0xFF << position);
+	ivar |= (allocation << position);
+
+	SXE_REG_WRITE(hw, SXE_IVAR_MISC, ivar);
+
+	return;
+}
+
+void sxe_hw_ring_irq_map(struct sxe_hw *hw, bool is_tx,
+						u16 reg_idx, u16 irq_idx)
+{
+	u8  allocation;
+	u32 ivar, position;
+
+	allocation = irq_idx | SXE_IVAR_ALLOC_VALID;
+
+	position = ((reg_idx & 1) * 16) + (8 * is_tx);
+
+	ivar = SXE_REG_READ(hw, SXE_IVAR(reg_idx >> 1));
+	ivar &= ~(0xFF << position);
+	ivar |= (allocation << position);
+
+	SXE_REG_WRITE(hw, SXE_IVAR(reg_idx >> 1), ivar);
+
+	return;
+}
+
+void sxe_hw_ring_irq_interval_set(struct sxe_hw *hw,
+						u16 irq_idx, u32 interval)
+{
+	u32 eitr = interval & SXE_EITR_ITR_MASK;
+
+	eitr |= SXE_EITR_CNT_WDIS;
+
+	SXE_REG_WRITE(hw, SXE_EITR(irq_idx), eitr);
+
+	return;
+}
+
+static void sxe_hw_event_irq_interval_set(struct sxe_hw *hw,
+						u16 irq_idx, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_EITR(irq_idx), value);
+
+	return;
+}
+
+void sxe_hw_event_irq_auto_clear_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_EIAC, value);
+
+	return;
+}
+
+void sxe_hw_specific_irq_disable(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_EIMC, value);
+
+	return;
+}
+
+void sxe_hw_specific_irq_enable(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_EIMS, value);
+
+	return;
+}
+
+void sxe_hw_all_irq_disable(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_EIMC, 0xFFFF0000);
+
+	SXE_REG_WRITE(hw, SXE_EIMC_EX(0), ~0);
+	SXE_REG_WRITE(hw, SXE_EIMC_EX(1), ~0);
+
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+static void sxe_hw_spp_configure(struct sxe_hw *hw, u32 hw_spp_proc_delay_us)
+{
+	SXE_REG_WRITE(hw, SXE_SPP_PROC,
+			(SXE_REG_READ(hw, SXE_SPP_PROC) &
+			~SXE_SPP_PROC_DELAY_US_MASK) |
+			hw_spp_proc_delay_us);
+
+	return;
+}
+
+static s32 sxe_hw_irq_test(struct sxe_hw *hw, u32 *icr, bool shared)
+{
+	s32 ret = 0;
+	u32 i, mask;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	sxe_hw_specific_irq_disable(hw, 0xFFFFFFFF);
+	sxe_hw_regs_flush(hw);
+	usleep_range(10000, 20000);
+
+	for (i = 0; i < 10; i++) {
+		mask = BIT(i);
+		if (!shared) {
+			LOG_INFO_BDF("test irq: irq test start\n");
+			*icr = 0;
+			SXE_REG_WRITE(hw, SXE_EIMC, ~mask & 0x00007FFF);
+			SXE_REG_WRITE(hw, SXE_EICS, ~mask & 0x00007FFF);
+			sxe_hw_regs_flush(hw);
+			usleep_range(10000, 20000);
+
+			if (*icr & mask) {
+				LOG_ERROR_BDF("test irq: failed, eicr = %x\n", *icr);
+				ret = -SXE_DIAG_DISABLE_IRQ_TEST_ERR;
+				break;
+			}
+			LOG_INFO_BDF("test irq: irq test end\n");
+		}
+
+		LOG_INFO_BDF("test irq: mask irq test start\n");
+		*icr = 0;
+		SXE_REG_WRITE(hw, SXE_EIMS, mask);
+		SXE_REG_WRITE(hw, SXE_EICS, mask);
+		sxe_hw_regs_flush(hw);
+		usleep_range(10000, 20000);
+
+		if (!(*icr & mask)) {
+			LOG_ERROR_BDF("test irq: mask failed, eicr = %x\n", *icr);
+			ret = -SXE_DIAG_ENABLE_IRQ_TEST_ERR;
+			break;
+		}
+		LOG_INFO_BDF("test irq: mask irq test end\n");
+
+		sxe_hw_specific_irq_disable(hw, mask);
+		sxe_hw_regs_flush(hw);
+		usleep_range(10000, 20000);
+
+		if (!shared) {
+			LOG_INFO_BDF("test irq: other irq test start\n");
+			*icr = 0;
+			SXE_REG_WRITE(hw, SXE_EIMC, ~mask & 0x00007FFF);
+			SXE_REG_WRITE(hw, SXE_EICS, ~mask & 0x00007FFF);
+			sxe_hw_regs_flush(hw);
+			usleep_range(10000, 20000);
+
+			if (*icr) {
+				LOG_ERROR_BDF("test irq: other irq failed, eicr = %x\n", *icr);
+				ret = -SXE_DIAG_DISABLE_OTHER_IRQ_TEST_ERR;
+				break;
+			}
+			LOG_INFO_BDF("test irq: other irq test end\n");
+		}
+	}
+
+	sxe_hw_specific_irq_disable(hw, 0xFFFFFFFF);
+	sxe_hw_regs_flush(hw);
+	usleep_range(10000, 20000);
+
+	return ret;
+}
+
+static const struct sxe_irq_operations sxe_irq_ops = {
+	.event_irq_auto_clear_set	= sxe_hw_event_irq_auto_clear_set,
+	.ring_irq_interval_set		= sxe_hw_ring_irq_interval_set,
+	.event_irq_interval_set		= sxe_hw_event_irq_interval_set,
+	.set_eitrsel			= sxe_hw_set_eitrsel,
+	.ring_irq_map			= sxe_hw_ring_irq_map,
+	.event_irq_map			= sxe_hw_event_irq_map,
+	.irq_general_reg_set		= sxe_hw_irq_general_reg_set,
+	.irq_general_reg_get		= sxe_hw_irq_general_reg_get,
+	.ring_irq_auto_disable		= sxe_hw_ring_irq_auto_disable,
+	.pending_irq_read_clear		= sxe_hw_pending_irq_read_clear,
+	.pending_irq_write_clear	= sxe_hw_pending_irq_write_clear,
+	.ring_irq_enable		= sxe_hw_ring_irq_enable,
+	.irq_cause_get			= sxe_hw_irq_cause_get,
+	.event_irq_trigger		= sxe_hw_event_irq_trigger,
+	.ring_irq_trigger		= sxe_hw_ring_irq_trigger,
+	.specific_irq_disable		= sxe_hw_specific_irq_disable,
+	.specific_irq_enable		= sxe_hw_specific_irq_enable,
+	.all_irq_disable		= sxe_hw_all_irq_disable,
+	.spp_configure			= sxe_hw_spp_configure,
+	.irq_test			= sxe_hw_irq_test,
+};
+
+
+u32 sxe_hw_link_speed_get(struct sxe_hw *hw)
+{
+	u32 speed, value;
+	struct sxe_adapter *adapter = hw->adapter;
+	value = SXE_REG_READ(hw, SXE_COMCTRL);
+
+	if ((value & SXE_COMCTRL_SPEED_10G) == SXE_COMCTRL_SPEED_10G) {
+		speed = SXE_LINK_SPEED_10GB_FULL;
+	} else if ((value & SXE_COMCTRL_SPEED_1G) == SXE_COMCTRL_SPEED_1G) {
+		speed = SXE_LINK_SPEED_1GB_FULL;
+	} else {
+		speed = SXE_LINK_SPEED_UNKNOWN;
+	}
+
+	LOG_DEBUG_BDF("hw link speed=%x, (0x80=10G, 0x20=1G)\n, reg=%x",
+			speed, value);
+
+	return speed;
+}
+
+void sxe_hw_link_speed_set(struct sxe_hw *hw, u32 speed)
+{
+	u32 ctrl;
+
+	ctrl = SXE_REG_READ(hw, SXE_COMCTRL);
+
+	if (SXE_LINK_SPEED_1GB_FULL == speed) {
+		ctrl |= SXE_COMCTRL_SPEED_1G;
+	} else if (SXE_LINK_SPEED_10GB_FULL == speed) {
+		ctrl |= SXE_COMCTRL_SPEED_10G;
+	}
+
+	SXE_REG_WRITE(hw, SXE_COMCTRL, ctrl);
+
+	return;
+}
+
+STATIC bool sxe_hw_1g_link_up_check(struct sxe_hw *hw)
+{
+	return (SXE_REG_READ(hw, SXE_LINKS) & SXE_LINKS_UP) ? true : false;
+}
+
+bool sxe_hw_is_link_state_up(struct sxe_hw *hw)
+{
+	bool ret = false;
+	u32 links_reg, link_speed;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	links_reg  = SXE_REG_READ(hw, SXE_LINKS);
+
+	LOG_DEBUG_BDF("nic link reg: 0x%x\n", links_reg);
+
+	if (links_reg & SXE_LINKS_UP) {
+		ret = true;
+
+		link_speed = sxe_hw_link_speed_get(hw);
+		if ((link_speed == SXE_LINK_SPEED_10GB_FULL) &&
+		    (links_reg & SXE_10G_LINKS_DOWN)) {
+			ret = false;
+		}
+	}
+
+	return ret;
+}
+
+void sxe_hw_mac_pad_enable(struct sxe_hw *hw)
+{
+	u32 ctl;
+
+	ctl = SXE_REG_READ(hw, SXE_MACCFG);
+	ctl |= SXE_MACCFG_PAD_EN;
+	SXE_REG_WRITE(hw, SXE_MACCFG, ctl);
+
+	return;
+}
+
+s32 sxe_hw_fc_enable(struct sxe_hw *hw)
+{
+	s32 ret = 0;
+	u8  i;
+	u32 reg;
+	u32 flctrl_val;
+	u32 fcrtl, fcrth;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	flctrl_val = SXE_REG_READ(hw, SXE_FLCTRL);
+	flctrl_val &= ~(SXE_FCTRL_TFCE_MASK | SXE_FCTRL_RFCE_MASK |
+		       SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
+
+	switch (hw->fc.current_mode) {
+	case SXE_FC_NONE:
+		break;
+	case SXE_FC_RX_PAUSE:
+		flctrl_val |= SXE_FCTRL_RFCE_LFC_EN;
+		break;
+	case SXE_FC_TX_PAUSE:
+		flctrl_val |= SXE_FCTRL_TFCE_LFC_EN;
+		break;
+	case SXE_FC_FULL:
+		flctrl_val |= SXE_FCTRL_RFCE_LFC_EN;
+		flctrl_val |= SXE_FCTRL_TFCE_LFC_EN;
+		break;
+	default:
+		LOG_DEV_DEBUG("flow control param set incorrectly\n");
+		ret = -SXE_ERR_CONFIG;
+		goto l_ret;
+	}
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		if ((hw->fc.current_mode & SXE_FC_TX_PAUSE) &&
+		    hw->fc.high_water[i]) {
+			fcrtl = (hw->fc.low_water[i] << 9) | SXE_FCRTL_XONE;
+			SXE_REG_WRITE(hw, SXE_FCRTL(i), fcrtl);
+			fcrth = (hw->fc.high_water[i] << 9) | SXE_FCRTH_FCEN;
+		} else {
+			SXE_REG_WRITE(hw, SXE_FCRTL(i), 0);
+			fcrth = (SXE_REG_READ(hw, SXE_RXPBSIZE(i)) - 24576) >> 1;
+		}
+
+		SXE_REG_WRITE(hw, SXE_FCRTH(i), fcrth);
+	}
+
+	flctrl_val |= SXE_FCTRL_TFCE_DPF_EN;
+
+	if ((hw->fc.current_mode & SXE_FC_TX_PAUSE)) {
+		flctrl_val |= (SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
+	}
+
+	SXE_REG_WRITE(hw, SXE_FLCTRL, flctrl_val);
+
+	reg = SXE_REG_READ(hw, SXE_PFCTOP);
+	reg &= ~SXE_PFCTOP_FCOP_MASK;
+	reg |= SXE_PFCTOP_FCT;
+	reg |= SXE_PFCTOP_FCOP_LFC;
+	SXE_REG_WRITE(hw, SXE_PFCTOP, reg);
+
+	reg = hw->fc.pause_time * 0x00010001U;
+	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) {
+		SXE_REG_WRITE(hw, SXE_FCTTV(i), reg);
+	}
+
+	SXE_REG_WRITE(hw, SXE_FCRTV, hw->fc.pause_time / 2);
+
+l_ret:
+	return ret;
+}
+
+void sxe_fc_autoneg_localcap_set(struct sxe_hw *hw)
+{
+	u32 reg = 0;
+
+	if (hw->fc.requested_mode == SXE_FC_DEFAULT) {
+		hw->fc.requested_mode = SXE_FC_FULL;
+	}
+
+	reg = SXE_REG_READ(hw, SXE_PCS1GANA);
+
+	switch (hw->fc.requested_mode) {
+	case SXE_FC_NONE:
+		reg &= ~(SXE_PCS1GANA_SYM_PAUSE | SXE_PCS1GANA_ASM_PAUSE);
+		break;
+	case SXE_FC_TX_PAUSE:
+		reg |= SXE_PCS1GANA_ASM_PAUSE;
+		reg &= ~SXE_PCS1GANA_SYM_PAUSE;
+		break;
+	case SXE_FC_RX_PAUSE:
+	case SXE_FC_FULL:
+		reg |= SXE_PCS1GANA_SYM_PAUSE | SXE_PCS1GANA_ASM_PAUSE;
+		break;
+	default:
+		LOG_ERROR("Flow control param set incorrectly.");
+		break;
+	}
+
+	SXE_REG_WRITE(hw, SXE_PCS1GANA, reg);
+	return;
+}
+
+s32 sxe_hw_pfc_enable(struct sxe_hw *hw, u8 tc_idx)
+{
+	s32 ret = 0;
+	u8  i;
+	u32 reg;
+	u32 flctrl_val;
+	u32 fcrtl, fcrth;
+	struct sxe_adapter *adapter = hw->adapter;
+	u8 rx_en_num;
+
+	flctrl_val = SXE_REG_READ(hw, SXE_FLCTRL);
+	flctrl_val &= ~(SXE_FCTRL_TFCE_MASK | SXE_FCTRL_RFCE_MASK |
+		       SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
+
+	switch (hw->fc.current_mode) {
+	case SXE_FC_NONE:
+		rx_en_num = 0;
+		for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+			reg = SXE_REG_READ(hw, SXE_FCRTH(i));
+			if (reg & SXE_FCRTH_FCEN) {
+				rx_en_num++;
+			}
+		}
+		if (rx_en_num > 1) {
+			flctrl_val |= SXE_FCTRL_TFCE_PFC_EN;
+		}
+
+		break;
+
+	case SXE_FC_RX_PAUSE:
+		flctrl_val |= SXE_FCTRL_RFCE_PFC_EN;
+
+		rx_en_num = 0;
+		for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+			reg = SXE_REG_READ(hw, SXE_FCRTH(i));
+			if (reg & SXE_FCRTH_FCEN) {
+				rx_en_num++;
+			}
+		}
+
+		if (rx_en_num > 1) {
+			flctrl_val |= SXE_FCTRL_TFCE_PFC_EN;
+		}
+
+		break;
+	case SXE_FC_TX_PAUSE:
+		flctrl_val |= SXE_FCTRL_TFCE_PFC_EN;
+		break;
+	case SXE_FC_FULL:
+		flctrl_val |= SXE_FCTRL_RFCE_PFC_EN;
+		flctrl_val |= SXE_FCTRL_TFCE_PFC_EN;
+		break;
+	default:
+		LOG_DEV_DEBUG("flow control param set incorrectly\n");
+		ret = -SXE_ERR_CONFIG;
+		goto l_ret;
+	}
+
+	if ((hw->fc.current_mode & SXE_FC_TX_PAUSE) &&
+	    hw->fc.high_water[tc_idx]) {
+		fcrtl = (hw->fc.low_water[tc_idx] << 9) | SXE_FCRTL_XONE;
+		SXE_REG_WRITE(hw, SXE_FCRTL(tc_idx), fcrtl);
+		fcrth = (hw->fc.high_water[tc_idx] << 9) | SXE_FCRTH_FCEN;
+	} else {
+		SXE_REG_WRITE(hw, SXE_FCRTL(tc_idx), 0);
+		fcrth = (SXE_REG_READ(hw, SXE_RXPBSIZE(tc_idx)) - 24576) >> 1;
+	}
+
+	SXE_REG_WRITE(hw, SXE_FCRTH(tc_idx), fcrth);
+
+	flctrl_val |= SXE_FCTRL_TFCE_DPF_EN;
+
+	if ((hw->fc.current_mode & SXE_FC_TX_PAUSE)) {
+		flctrl_val |= (BIT(tc_idx) << 16) & SXE_FCTRL_TFCE_FCEN_MASK;
+		flctrl_val |= (BIT(tc_idx) << 24) & SXE_FCTRL_TFCE_XONE_MASK;
+	}
+
+	SXE_REG_WRITE(hw, SXE_FLCTRL, flctrl_val);
+
+	reg = SXE_REG_READ(hw, SXE_PFCTOP);
+	reg &= ~SXE_PFCTOP_FCOP_MASK;
+	reg |= SXE_PFCTOP_FCT;
+	reg |= SXE_PFCTOP_FCOP_PFC;
+	SXE_REG_WRITE(hw, SXE_PFCTOP, reg);
+
+	reg = hw->fc.pause_time * 0x00010001U;
+	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) {
+		SXE_REG_WRITE(hw, SXE_FCTTV(i), reg);
+	}
+
+	SXE_REG_WRITE(hw, SXE_FCRTV, hw->fc.pause_time / 2);
+
+l_ret:
+	return ret;
+}
+
+void sxe_hw_crc_configure(struct sxe_hw *hw)
+{
+	u32 ctrl = SXE_REG_READ(hw, SXE_PCCTRL);
+
+	ctrl |=  SXE_PCCTRL_TXCE | SXE_PCCTRL_RXCE | SXE_PCCTRL_PCSC_ALL;
+	SXE_REG_WRITE(hw, SXE_PCCTRL, ctrl);
+
+	return;
+}
+
+void sxe_hw_loopback_switch(struct sxe_hw *hw, bool is_enable)
+{
+	u32 value;
+
+	value = (true == is_enable) ? SXE_LPBK_EN : 0;
+
+	SXE_REG_WRITE(hw, SXE_LPBKCTRL, value);
+
+	return;
+}
+
+void sxe_hw_mac_txrx_enable(struct sxe_hw *hw)
+{
+	u32 ctl;
+
+	ctl = SXE_REG_READ(hw, SXE_COMCTRL);
+	ctl |= SXE_COMCTRL_TXEN | SXE_COMCTRL_RXEN | SXE_COMCTRL_EDSEL;
+	SXE_REG_WRITE(hw, SXE_COMCTRL, ctl);
+
+	return;
+}
+
+void sxe_hw_mac_max_frame_set(struct sxe_hw *hw, u32 max_frame)
+{
+	u32 maxfs = SXE_REG_READ(hw, SXE_MAXFS);
+
+	if (max_frame != (maxfs >> SXE_MAXFS_MFS_SHIFT)) {
+		maxfs &= ~SXE_MAXFS_MFS_MASK;
+		maxfs |= max_frame << SXE_MAXFS_MFS_SHIFT;
+	}
+
+	maxfs |=  SXE_MAXFS_RFSEL | SXE_MAXFS_TFSEL;
+	SXE_REG_WRITE(hw, SXE_MAXFS, maxfs);
+
+	return;
+}
+
+u32 sxe_hw_mac_max_frame_get(struct sxe_hw *hw)
+{
+	u32 maxfs = SXE_REG_READ(hw, SXE_MAXFS);
+
+	maxfs &= SXE_MAXFS_MFS_MASK;
+	maxfs >>= SXE_MAXFS_MFS_SHIFT;
+
+	return maxfs;
+}
+
+bool sxe_device_supports_autoneg_fc(struct sxe_hw *hw)
+{
+	bool supported = true;
+	bool link_up = sxe_hw_is_link_state_up(hw);
+	u32  link_speed = sxe_hw_link_speed_get(hw);
+
+	if (link_up) {
+		supported = (link_speed == SXE_LINK_SPEED_1GB_FULL) ?
+				true : false;
+	}
+
+	return supported;
+}
+
+STATIC void sxe_hw_fc_param_init(struct sxe_hw *hw)
+{
+	hw->fc.requested_mode = SXE_FC_FULL;
+	hw->fc.current_mode = SXE_FC_FULL;	
+	hw->fc.pause_time = SXE_DEFAULT_FCPAUSE;
+
+	hw->fc.disable_fc_autoneg = true;
+	return;
+}
+
+void sxe_hw_fc_tc_high_water_mark_set(struct sxe_hw *hw,
+							u8 tc_idx, u32 mark)
+{
+	hw->fc.high_water[tc_idx] = mark;
+
+	return;
+}
+
+void sxe_hw_fc_tc_low_water_mark_set(struct sxe_hw *hw,
+							u8 tc_idx, u32 mark)
+{
+	hw->fc.low_water[tc_idx] = mark;
+
+	return;
+}
+
+bool sxe_hw_is_fc_autoneg_disabled(struct sxe_hw *hw)
+{
+	return hw->fc.disable_fc_autoneg;
+}
+
+void sxe_hw_fc_autoneg_disable_set(struct sxe_hw *hw,
+							bool is_disabled)
+{
+	hw->fc.disable_fc_autoneg = is_disabled;
+	return;
+}
+
+static enum sxe_fc_mode sxe_hw_fc_current_mode_get(struct sxe_hw *hw)
+{
+	return hw->fc.current_mode;
+}
+
+static enum sxe_fc_mode sxe_hw_fc_requested_mode_get(struct sxe_hw *hw)
+{
+	return hw->fc.requested_mode;
+}
+
+void sxe_hw_fc_requested_mode_set(struct sxe_hw *hw,
+						enum sxe_fc_mode mode)
+{
+	hw->fc.requested_mode = mode;
+	return;
+}
+
+static const struct sxe_mac_operations sxe_mac_ops = {
+	.link_up_1g_check			= sxe_hw_1g_link_up_check,
+	.link_state_is_up		= sxe_hw_is_link_state_up,
+	.link_speed_get			= sxe_hw_link_speed_get,
+	.link_speed_set			= sxe_hw_link_speed_set,
+	.pad_enable			= sxe_hw_mac_pad_enable,
+	.crc_configure			= sxe_hw_crc_configure,
+	.loopback_switch		= sxe_hw_loopback_switch,
+	.txrx_enable			= sxe_hw_mac_txrx_enable,
+	.max_frame_set			= sxe_hw_mac_max_frame_set,
+	.max_frame_get			= sxe_hw_mac_max_frame_get,
+	.fc_enable			= sxe_hw_fc_enable,
+	.fc_autoneg_localcap_set	= sxe_fc_autoneg_localcap_set,
+	.fc_tc_high_water_mark_set	= sxe_hw_fc_tc_high_water_mark_set,
+	.fc_tc_low_water_mark_set	= sxe_hw_fc_tc_low_water_mark_set,
+	.fc_param_init			= sxe_hw_fc_param_init,
+	.fc_current_mode_get		= sxe_hw_fc_current_mode_get,
+	.fc_requested_mode_get		= sxe_hw_fc_requested_mode_get,
+	.fc_requested_mode_set		= sxe_hw_fc_requested_mode_set,
+	.is_fc_autoneg_disabled		= sxe_hw_is_fc_autoneg_disabled,
+	.fc_autoneg_disable_set		= sxe_hw_fc_autoneg_disable_set,
+};
+
+u32 sxe_hw_rx_mode_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_FCTRL);
+}
+
+u32 sxe_hw_pool_rx_mode_get(struct sxe_hw *hw, u16 pool_idx)
+{
+	return SXE_REG_READ(hw, SXE_VMOLR(pool_idx));
+}
+
+void sxe_hw_rx_mode_set(struct sxe_hw *hw, u32 filter_ctrl)
+{
+	SXE_REG_WRITE(hw, SXE_FCTRL, filter_ctrl);
+	return;
+}
+
+void sxe_hw_pool_rx_mode_set(struct sxe_hw *hw,
+						u32 vmolr, u16 pool_idx)
+{
+	SXE_REG_WRITE(hw, SXE_VMOLR(pool_idx), vmolr);
+	return;
+}
+
+void sxe_hw_rx_lro_enable(struct sxe_hw *hw, bool is_enable)
+{
+	u32 rfctl = SXE_REG_READ(hw, SXE_RFCTL);
+	rfctl &= ~SXE_RFCTL_LRO_DIS;
+
+	if (!is_enable) {
+		rfctl |= SXE_RFCTL_LRO_DIS;
+	}
+
+	SXE_REG_WRITE(hw, SXE_RFCTL, rfctl);
+	return;
+}
+
+void sxe_hw_rx_nfs_filter_disable(struct sxe_hw *hw)
+{
+	u32 rfctl = 0;
+
+	rfctl |= (SXE_RFCTL_NFSW_DIS | SXE_RFCTL_NFSR_DIS);
+	SXE_REG_WRITE(hw, SXE_RFCTL, rfctl);
+	return;
+}
+
+void sxe_hw_rx_udp_frag_checksum_disable(struct sxe_hw *hw)
+{
+	u32 rxcsum;
+
+	rxcsum = SXE_REG_READ(hw, SXE_RXCSUM);
+	rxcsum |= SXE_RXCSUM_PCSD;
+	SXE_REG_WRITE(hw, SXE_RXCSUM, rxcsum);
+	return;
+}
+
+void sxe_hw_fc_mac_addr_set(struct sxe_hw *hw, u8 *mac_addr)
+{
+	u32 mac_addr_h, mac_addr_l;
+
+	mac_addr_l = ((u32)mac_addr[5] |
+		    ((u32)mac_addr[4] << 8) |
+		    ((u32)mac_addr[3] << 16) |
+		    ((u32)mac_addr[2] << 24));
+	mac_addr_h = (((u32)mac_addr[1] << 16) |
+		    ((u32)mac_addr[0] << 24));
+
+	SXE_REG_WRITE(hw, SXE_SACONH, mac_addr_h);
+	SXE_REG_WRITE(hw, SXE_SACONL, mac_addr_l);
+
+	return;
+}
+
+s32 sxe_hw_uc_addr_add(struct sxe_hw *hw, u32 rar_idx,
+					u8 *addr, u32 pool_idx)
+{
+	s32 ret = 0;
+	u32 rar_low, rar_high;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (rar_idx >= SXE_UC_ENTRY_NUM_MAX) {
+		LOG_DEV_DEBUG("RAR rar_idx %d is out of range:%u.\n",
+			rar_idx, SXE_UC_ENTRY_NUM_MAX);
+		ret = -SXE_ERR_PARAM;
+		goto l_end;
+	}
+
+	sxe_hw_uc_addr_pool_enable(hw, rar_idx, pool_idx);
+
+	rar_low = ((u32)addr[0] |
+		   ((u32)addr[1] << 8) |
+		   ((u32)addr[2] << 16) |
+		   ((u32)addr[3] << 24));
+
+	rar_high = SXE_REG_READ(hw, SXE_RAH(rar_idx));
+	rar_high &= ~(0x0000FFFF | SXE_RAH_AV);
+	rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
+
+	rar_high |= SXE_RAH_AV;
+
+	SXE_REG_WRITE(hw, SXE_RAL(rar_idx), rar_low);
+	SXE_WRITE_FLUSH(hw);
+	SXE_REG_WRITE(hw, SXE_RAH(rar_idx), rar_high);
+
+	LOG_DEBUG_BDF("rar_idx:%d pool_idx:%u addr:%pM add to rar done\n",
+		rar_idx, pool_idx, addr);
+
+l_end:
+	return ret;
+}
+
+s32 sxe_hw_uc_addr_del(struct sxe_hw *hw, u32 index)
+{
+	s32 ret = 0;
+	u32 rar_high;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (index >= SXE_UC_ENTRY_NUM_MAX) {
+		ret = -SXE_ERR_PARAM;
+		LOG_ERROR_BDF("uc_entry_num:%d index:%u invalid.(err:%d)\n",
+			  SXE_UC_ENTRY_NUM_MAX, index, ret);
+		goto l_end;
+	}
+
+	rar_high = SXE_REG_READ(hw, SXE_RAH(index));
+	rar_high &= ~(0x0000FFFF | SXE_RAH_AV);
+
+	SXE_REG_WRITE(hw, SXE_RAH(index), rar_high);
+	SXE_WRITE_FLUSH(hw);
+	SXE_REG_WRITE(hw, SXE_RAL(index), 0);
+
+	sxe_hw_uc_addr_pool_disable(hw, index);
+
+l_end:
+	return ret;
+}
+
+void sxe_hw_mta_hash_table_set(struct sxe_hw *hw,
+						u8 index, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_MTA(index), value);
+	return;
+}
+
+void sxe_hw_mta_hash_table_update(struct sxe_hw *hw,
+						u8 reg_idx, u8 bit_idx)
+{
+	u32 value = SXE_REG_READ(hw, SXE_MTA(reg_idx));
+
+	value |= BIT(bit_idx);
+
+	LOG_INFO("mta update value:0x%x.\n", value);
+	SXE_REG_WRITE(hw, SXE_MTA(reg_idx), value);
+
+	return;
+}
+
+void sxe_hw_mc_filter_enable(struct sxe_hw *hw)
+{
+	u32 value = SXE_MC_FILTER_TYPE0 | SXE_MCSTCTRL_MFE;
+
+	SXE_REG_WRITE(hw, SXE_MCSTCTRL, value);
+
+	return;
+}
+
+static void sxe_hw_mc_filter_disable(struct sxe_hw *hw)
+{
+	u32 value = SXE_REG_READ(hw, SXE_MCSTCTRL);
+
+	value &= ~SXE_MCSTCTRL_MFE;
+
+	SXE_REG_WRITE(hw, SXE_MCSTCTRL, value);
+
+	return;
+}
+
+void sxe_hw_uc_addr_clear(struct sxe_hw *hw)
+{
+	u32 i;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	sxe_hw_uc_addr_pool_disable(hw, 0);
+
+	LOG_DEV_DEBUG("clear uc filter addr register:0-%d\n",
+		   SXE_UC_ENTRY_NUM_MAX - 1);
+	for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
+		SXE_REG_WRITE(hw, SXE_RAL(i), 0);
+		SXE_REG_WRITE(hw, SXE_RAH(i), 0);
+	}
+
+	LOG_DEV_DEBUG("clear %u uta filter addr register\n",
+			SXE_UTA_ENTRY_NUM_MAX);
+	for (i = 0; i < SXE_UTA_ENTRY_NUM_MAX; i++) {
+		SXE_REG_WRITE(hw, SXE_UTA(i), 0);
+	}
+
+	SXE_REG_WRITE(hw, SXE_MCSTCTRL, SXE_MC_FILTER_TYPE0);
+
+	LOG_DEV_DEBUG("clear %u mta filter addr register\n",
+			SXE_MTA_ENTRY_NUM_MAX);
+	for (i = 0; i < SXE_MTA_ENTRY_NUM_MAX; i++) {
+		SXE_REG_WRITE(hw, SXE_MTA(i), 0);
+	}
+
+	return;
+}
+
+static void sxe_hw_ethertype_filter_set(struct sxe_hw *hw,
+						u8 filter_type, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_ETQF(filter_type), value);
+	return;
+}
+
+void sxe_hw_vt_ctrl_cfg(struct sxe_hw *hw, u8 default_pool)
+{
+	u32 ctrl;
+
+	ctrl = SXE_REG_READ(hw, SXE_VT_CTL);
+
+	ctrl |= SXE_VT_CTL_VT_ENABLE; 
+	ctrl &= ~SXE_VT_CTL_POOL_MASK;
+	ctrl |= default_pool << SXE_VT_CTL_POOL_SHIFT;
+	ctrl |= SXE_VT_CTL_REPLEN; 
+
+	SXE_REG_WRITE(hw, SXE_VT_CTL, ctrl);
+
+	return;
+}
+
+void sxe_hw_vt_disable(struct sxe_hw *hw)
+{
+	u32 vmdctl;
+
+	vmdctl = SXE_REG_READ(hw, SXE_VT_CTL);
+	vmdctl &= ~SXE_VMD_CTL_POOL_EN;
+	SXE_REG_WRITE(hw, SXE_VT_CTL, vmdctl);
+
+	return;
+}
+
+#ifdef SXE_WOL_CONFIGURE
+
+static void sxe_hw_wol_status_set(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_WUS, ~0);
+
+	return;
+}
+
+static void sxe_hw_wol_mode_set(struct sxe_hw *hw, u32 wol_status)
+{
+	u32 fctrl;
+
+	SXE_REG_WRITE(hw, SXE_WUC, SXE_WUC_PME_EN);
+
+	fctrl = SXE_REG_READ(hw, SXE_FCTRL);
+	fctrl |= SXE_FCTRL_BAM;
+	if (wol_status & SXE_WUFC_MC) {
+		fctrl |= SXE_FCTRL_MPE;
+	}
+
+	SXE_REG_WRITE(hw, SXE_FCTRL, fctrl);
+
+	SXE_REG_WRITE(hw, SXE_WUFC, wol_status);
+	sxe_hw_wol_status_set(hw);
+
+	return;
+}
+
+static void sxe_hw_wol_mode_clean(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_WUC, 0);
+	SXE_REG_WRITE(hw, SXE_WUFC, 0);
+
+	return;
+}
+#endif
+
+static const struct sxe_filter_mac_operations sxe_filter_mac_ops = {
+	.rx_mode_get			= sxe_hw_rx_mode_get,
+	.rx_mode_set			= sxe_hw_rx_mode_set,
+	.pool_rx_mode_get		= sxe_hw_pool_rx_mode_get,
+	.pool_rx_mode_set		= sxe_hw_pool_rx_mode_set,
+	.rx_lro_enable			= sxe_hw_rx_lro_enable,
+	.uc_addr_add			= sxe_hw_uc_addr_add,
+	.uc_addr_del			= sxe_hw_uc_addr_del,
+	.uc_addr_clear			= sxe_hw_uc_addr_clear,
+	.fc_mac_addr_set		= sxe_hw_fc_mac_addr_set,
+	.mta_hash_table_set		= sxe_hw_mta_hash_table_set,
+	.mta_hash_table_update		= sxe_hw_mta_hash_table_update,
+
+	.mc_filter_enable		= sxe_hw_mc_filter_enable,
+	.mc_filter_disable		= sxe_hw_mc_filter_disable,
+	.rx_nfs_filter_disable		= sxe_hw_rx_nfs_filter_disable,
+	.ethertype_filter_set		= sxe_hw_ethertype_filter_set,
+	.vt_ctrl_configure		= sxe_hw_vt_ctrl_cfg,
+	.uc_addr_pool_enable		= sxe_hw_uc_addr_pool_enable,
+	.rx_udp_frag_checksum_disable	= sxe_hw_rx_udp_frag_checksum_disable,
+
+#ifdef SXE_WOL_CONFIGURE
+	.wol_mode_set			= sxe_hw_wol_mode_set,
+	.wol_mode_clean			= sxe_hw_wol_mode_clean,
+	.wol_status_set			= sxe_hw_wol_status_set,
+#endif
+
+	.vt_disable                     = sxe_hw_vt_disable,
+};
+
+u32 sxe_hw_vlan_pool_filter_read(struct sxe_hw *hw, u16 reg_index)
+{
+	return SXE_REG_READ(hw, SXE_VLVF(reg_index));
+}
+
+static void sxe_hw_vlan_pool_filter_write(struct sxe_hw *hw,
+						u16 reg_index, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_VLVF(reg_index), value);
+	return;
+}
+
+static u32 sxe_hw_vlan_pool_filter_bitmap_read(struct sxe_hw *hw,
+							u16 reg_index)
+{
+	return SXE_REG_READ(hw, SXE_VLVFB(reg_index));
+}
+
+static void sxe_hw_vlan_pool_filter_bitmap_write(struct sxe_hw *hw,
+						u16 reg_index, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_VLVFB(reg_index), value);
+	return;
+}
+
+void sxe_hw_vlan_filter_array_write(struct sxe_hw *hw,
+					u16 reg_index, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_VFTA(reg_index), value);
+	return;
+}
+
+u32 sxe_hw_vlan_filter_array_read(struct sxe_hw *hw, u16 reg_index)
+{
+	return SXE_REG_READ(hw, SXE_VFTA(reg_index));
+}
+
+void sxe_hw_vlan_filter_switch(struct sxe_hw *hw, bool is_enable)
+{
+	u32 vlnctrl;
+
+	vlnctrl = SXE_REG_READ(hw, SXE_VLNCTRL);
+	if (is_enable) {
+		vlnctrl |= SXE_VLNCTRL_VFE;
+	} else {
+		vlnctrl &= ~SXE_VLNCTRL_VFE;
+	}
+
+	SXE_REG_WRITE(hw, SXE_VLNCTRL, vlnctrl);
+	return;
+}
+
+static void sxe_hw_vlan_untagged_pkts_rcv_switch(struct sxe_hw *hw,
+							u32 vf, bool accept)
+{
+	u32 vmolr = SXE_REG_READ(hw, SXE_VMOLR(vf));
+	vmolr |= SXE_VMOLR_BAM;
+	if (accept) {
+		vmolr |= SXE_VMOLR_AUPE;
+	} else {
+		vmolr &= ~SXE_VMOLR_AUPE;
+	}
+
+	LOG_WARN("vf:%u value:0x%x.\n", vf, vmolr);
+	SXE_REG_WRITE(hw, SXE_VMOLR(vf), vmolr);
+	return;
+}
+
+s32 sxe_hw_vlvf_slot_find(struct sxe_hw *hw, u32 vlan, bool vlvf_bypass)
+{
+	s32 ret, regindex, first_empty_slot;
+	u32 bits;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (vlan == 0) {
+		ret = 0;
+		goto l_end;
+	}
+
+	first_empty_slot = vlvf_bypass ? -SXE_ERR_NO_SPACE : 0;
+
+	vlan |= SXE_VLVF_VIEN;
+
+	for (regindex = SXE_VLVF_ENTRIES; --regindex;) {
+		bits = SXE_REG_READ(hw, SXE_VLVF(regindex));
+		if (bits == vlan) {
+			ret = regindex;
+			goto l_end;
+		}
+
+		if (!first_empty_slot && !bits) {
+			first_empty_slot = regindex;
+		}
+	}
+
+	if (!first_empty_slot) {
+		LOG_DEV_WARN("no space in VLVF.\n");
+	}
+
+	ret = first_empty_slot ? : -SXE_ERR_NO_SPACE;
+l_end:
+	return ret;
+}
+
+s32 sxe_hw_vlan_filter_configure(struct sxe_hw *hw,
+					u32 vid, u32 pool,
+					bool vlan_on, bool vlvf_bypass)
+{
+	s32 ret = 0;
+	u32 regidx, vfta_delta, vfta, bits;
+	s32 vlvf_index;
+
+	LOG_DEBUG("vid: %u, pool: %u, vlan_on: %d, vlvf_bypass: %d",
+		vid, pool, vlan_on, vlvf_bypass);
+
+	if ((vid > 4095) || (pool > 63)) {
+		ret = -SXE_ERR_PARAM;
+		goto l_end;
+	}
+
+
+	regidx = vid / 32;
+	vfta_delta = BIT(vid % 32);
+	vfta = SXE_REG_READ(hw, SXE_VFTA(regidx));
+
+	vfta_delta &= vlan_on ? ~vfta : vfta;
+	vfta ^= vfta_delta;
+
+	if (!(SXE_REG_READ(hw, SXE_VT_CTL) & SXE_VT_CTL_VT_ENABLE)) {
+		goto vfta_update;
+	}
+
+	vlvf_index = sxe_hw_vlvf_slot_find(hw, vid, vlvf_bypass);
+	if (vlvf_index < 0) {
+		if (vlvf_bypass) {
+			goto vfta_update;
+		}
+
+		ret = vlvf_index;
+		goto l_end;
+	}
+
+	bits = SXE_REG_READ(hw, SXE_VLVFB(vlvf_index * 2 + pool / 32));
+
+	bits |= BIT(pool % 32);
+	if (vlan_on) {
+		goto vlvf_update;
+	}
+
+	bits ^= BIT(pool % 32);
+
+	if (!bits &&
+	    !SXE_REG_READ(hw, SXE_VLVFB(vlvf_index * 2 + 1 - pool / 32))) {
+		if (vfta_delta) {
+			SXE_REG_WRITE(hw, SXE_VFTA(regidx), vfta);
+		}
+
+		SXE_REG_WRITE(hw, SXE_VLVF(vlvf_index), 0);
+		SXE_REG_WRITE(hw, SXE_VLVFB(vlvf_index * 2 + pool / 32), 0);
+
+		goto l_end;
+	}
+
+	vfta_delta = 0;
+
+vlvf_update:
+	SXE_REG_WRITE(hw, SXE_VLVFB(vlvf_index * 2 + pool / 32), bits);
+	SXE_REG_WRITE(hw, SXE_VLVF(vlvf_index), SXE_VLVF_VIEN | vid);
+
+vfta_update:
+	if (vfta_delta) {
+		SXE_REG_WRITE(hw, SXE_VFTA(regidx), vfta);
+	}
+
+l_end:
+	return ret;
+}
+
+void sxe_hw_vlan_filter_array_clear(struct sxe_hw *hw)
+{
+	u32 offset;
+
+	for (offset = 0; offset < SXE_VFT_TBL_SIZE; offset++) {
+		SXE_REG_WRITE(hw, SXE_VFTA(offset), 0);
+	}
+
+	for (offset = 0; offset < SXE_VLVF_ENTRIES; offset++) {
+		SXE_REG_WRITE(hw, SXE_VLVF(offset), 0);
+		SXE_REG_WRITE(hw, SXE_VLVFB(offset * 2), 0);
+		SXE_REG_WRITE(hw, SXE_VLVFB(offset * 2 + 1), 0);
+	}
+
+	return;
+}
+
+static const struct sxe_filter_vlan_operations sxe_filter_vlan_ops = {
+	.pool_filter_read		= sxe_hw_vlan_pool_filter_read,
+	.pool_filter_write		= sxe_hw_vlan_pool_filter_write,
+	.pool_filter_bitmap_read	= sxe_hw_vlan_pool_filter_bitmap_read,
+	.pool_filter_bitmap_write	= sxe_hw_vlan_pool_filter_bitmap_write,
+	.filter_array_write		= sxe_hw_vlan_filter_array_write,
+	.filter_array_read		= sxe_hw_vlan_filter_array_read,
+	.filter_array_clear		= sxe_hw_vlan_filter_array_clear,
+	.filter_switch			= sxe_hw_vlan_filter_switch,
+	.untagged_pkts_rcv_switch	= sxe_hw_vlan_untagged_pkts_rcv_switch,
+	.filter_configure		= sxe_hw_vlan_filter_configure,
+};
+
+
+static void sxe_hw_rx_pkt_buf_switch(struct sxe_hw *hw, bool is_on)
+{
+	u32 dbucfg = SXE_REG_READ(hw, SXE_DRXCFG);
+
+	if (is_on) {
+		dbucfg |= SXE_DRXCFG_DBURX_START;
+	} else {
+		dbucfg &= ~SXE_DRXCFG_DBURX_START;
+	}
+
+	SXE_REG_WRITE(hw, SXE_DRXCFG, dbucfg);
+
+	return;
+}
+
+static void sxe_hw_rx_pkt_buf_size_configure(struct sxe_hw *hw,
+			     u8 num_pb,
+			     u32 headroom,
+			     u16 strategy)
+{
+	u16 total_buf_size = (SXE_RX_PKT_BUF_SIZE - headroom);
+	u32 rx_buf_size;
+	u16 i = 0;
+
+	if (!num_pb) {
+		num_pb = 1;
+	}
+
+	switch (strategy) {
+	case (PBA_STRATEGY_WEIGHTED):
+		rx_buf_size = ((total_buf_size * 5 * 2) / (num_pb * 8));
+		total_buf_size -= rx_buf_size * (num_pb / 2);
+		rx_buf_size <<= SXE_RX_PKT_BUF_SIZE_SHIFT;
+		for (i = 0; i < (num_pb / 2); i++) {
+			SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), rx_buf_size);
+		}
+		fallthrough;
+	case (PBA_STRATEGY_EQUAL):
+		rx_buf_size = (total_buf_size / (num_pb - i))
+				<< SXE_RX_PKT_BUF_SIZE_SHIFT;
+		for (; i < num_pb; i++) {
+			SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), rx_buf_size);
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	for (; i < SXE_PKG_BUF_NUM_MAX; i++) {
+		SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), 0);
+	}
+
+	return;
+}
+
+u32 sxe_hw_rx_pkt_buf_size_get(struct sxe_hw *hw, u8 pb)
+{
+	return SXE_REG_READ(hw, SXE_RXPBSIZE(pb));
+}
+
+void sxe_hw_rx_multi_ring_configure(struct sxe_hw *hw,
+						u8 tcs, bool is_4q_per_pool,
+						bool sriov_enable)
+{
+	u32 mrqc = SXE_REG_READ(hw, SXE_MRQC);
+
+	mrqc &= ~SXE_MRQE_MASK;
+
+	if (sriov_enable) {
+		if (tcs > 4) {
+			mrqc |= SXE_MRQC_VMDQRT8TCEN;	
+		} else if (tcs > 1) {
+			mrqc |= SXE_MRQC_VMDQRT4TCEN;	
+		} else if (is_4q_per_pool == true) {
+			mrqc |= SXE_MRQC_VMDQRSS32EN;
+		} else {
+			mrqc |= SXE_MRQC_VMDQRSS64EN;
+		}
+	} else {
+		if (tcs > 4) {
+			mrqc |= SXE_MRQC_RTRSS8TCEN;
+		} else if (tcs > 1) {
+			mrqc |= SXE_MRQC_RTRSS4TCEN;
+		} else {
+			mrqc |= SXE_MRQC_RSSEN;
+		}
+	}
+
+	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+	return;
+}
+
+static void sxe_hw_rss_hash_pkt_type_set(struct sxe_hw *hw, u32 version)
+{
+	u32 mrqc = 0;
+	u32 rss_field = 0;
+
+	rss_field |= SXE_MRQC_RSS_FIELD_IPV4 |
+		     SXE_MRQC_RSS_FIELD_IPV4_TCP |
+		     SXE_MRQC_RSS_FIELD_IPV6 |
+		     SXE_MRQC_RSS_FIELD_IPV6_TCP;
+
+	if (version == SXE_RSS_IP_VER_4) {
+		rss_field |= SXE_MRQC_RSS_FIELD_IPV4_UDP;
+	}
+	if (version == SXE_RSS_IP_VER_6) {
+		rss_field |= SXE_MRQC_RSS_FIELD_IPV6_UDP;
+	}
+
+	mrqc |= rss_field;
+	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+	return;
+}
+
+static void sxe_hw_rss_hash_pkt_type_update(struct sxe_hw *hw,
+							u32 version)
+{
+	u32 mrqc;
+
+	mrqc = SXE_REG_READ(hw, SXE_MRQC);
+
+	mrqc |= SXE_MRQC_RSS_FIELD_IPV4
+	      | SXE_MRQC_RSS_FIELD_IPV4_TCP
+	      | SXE_MRQC_RSS_FIELD_IPV6
+	      | SXE_MRQC_RSS_FIELD_IPV6_TCP;
+
+	mrqc &= ~(SXE_MRQC_RSS_FIELD_IPV4_UDP |
+		  SXE_MRQC_RSS_FIELD_IPV6_UDP);
+
+	if (version == SXE_RSS_IP_VER_4) {
+		mrqc |= SXE_MRQC_RSS_FIELD_IPV4_UDP;
+	}
+	if (version == SXE_RSS_IP_VER_6) {
+		mrqc |= SXE_MRQC_RSS_FIELD_IPV6_UDP;
+	}
+
+	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+	return;
+}
+
+static void sxe_hw_rss_rings_used_set(struct sxe_hw *hw, u32 rss_num,
+						u16 pool, u16 pf_offset)
+{
+	u32 psrtype = 0;
+
+	if (rss_num > 3) {
+		psrtype |= 2u << 29;
+	} else if (rss_num > 1) {
+		psrtype |= 1u << 29;
+	}
+
+	while (pool--) {
+		SXE_REG_WRITE(hw, SXE_PSRTYPE(pf_offset + pool), psrtype);
+	}
+
+	return;
+}
+
+void sxe_hw_rss_key_set_all(struct sxe_hw *hw, u32 *rss_key)
+{
+	u32 i;
+
+	for (i = 0; i < SXE_MAX_RSS_KEY_ENTRIES; i++) {
+		SXE_REG_WRITE(hw, SXE_RSSRK(i), rss_key[i]);
+	}
+
+	return;
+}
+
+void sxe_hw_rss_redir_tbl_reg_write(struct sxe_hw *hw,
+						u16 reg_idx, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_RETA(reg_idx >> 2), value);
+	return;
+}
+
+void sxe_hw_rss_redir_tbl_set_all(struct sxe_hw *hw, u8 *redir_tbl)
+{
+	u32 i;
+	u32 tbl = 0;
+	u32 indices_multi = 0x1;
+
+
+	for (i = 0; i < SXE_MAX_RETA_ENTRIES; i++) {
+		tbl |= indices_multi * redir_tbl[i] << (i & 0x3) * 8;
+		if ((i & 3) == 3) {
+			sxe_hw_rss_redir_tbl_reg_write(hw, i, tbl);
+			tbl = 0;
+		}
+	}
+	return;
+}
+
+void sxe_hw_rx_cap_switch_on(struct sxe_hw *hw)
+{
+	u32 rxctrl;
+
+	if (hw->mac.set_lben) {
+		u32 pfdtxgswc = SXE_REG_READ(hw, SXE_PFDTXGSWC);
+		pfdtxgswc |= SXE_PFDTXGSWC_VT_LBEN;
+		SXE_REG_WRITE(hw, SXE_PFDTXGSWC, pfdtxgswc);
+		hw->mac.set_lben = false;
+	}
+
+	rxctrl = SXE_REG_READ(hw, SXE_RXCTRL);
+	rxctrl |= SXE_RXCTRL_RXEN;
+	SXE_REG_WRITE(hw, SXE_RXCTRL, rxctrl);
+
+	return;
+}
+
+void sxe_hw_rx_cap_switch_off(struct sxe_hw *hw)
+{
+	u32 rxctrl;
+
+	rxctrl = SXE_REG_READ(hw, SXE_RXCTRL);
+	if (rxctrl & SXE_RXCTRL_RXEN) {
+		u32 pfdtxgswc = SXE_REG_READ(hw, SXE_PFDTXGSWC);
+		if (pfdtxgswc & SXE_PFDTXGSWC_VT_LBEN) {
+			pfdtxgswc &= ~SXE_PFDTXGSWC_VT_LBEN;
+			SXE_REG_WRITE(hw, SXE_PFDTXGSWC, pfdtxgswc);
+			hw->mac.set_lben = true;
+		} else {
+			hw->mac.set_lben = false;
+		}
+		rxctrl &= ~SXE_RXCTRL_RXEN;
+		SXE_REG_WRITE(hw, SXE_RXCTRL, rxctrl);
+	}
+
+	return;
+}
+
+static void sxe_hw_rx_func_switch_on(struct sxe_hw *hw)
+{
+	u32 rxctrl;
+
+	rxctrl = SXE_REG_READ(hw, SXE_COMCTRL);
+	rxctrl |= SXE_COMCTRL_RXEN | SXE_COMCTRL_EDSEL;
+	SXE_REG_WRITE(hw, SXE_COMCTRL, rxctrl);
+
+	return;
+}
+
+void sxe_hw_tx_pkt_buf_switch(struct sxe_hw *hw, bool is_on)
+{
+	u32 dbucfg;
+
+	dbucfg = SXE_REG_READ(hw, SXE_DTXCFG);
+
+	if (is_on) {
+		dbucfg |= SXE_DTXCFG_DBUTX_START;
+		dbucfg |= SXE_DTXCFG_DBUTX_BUF_ALFUL_CFG;
+		SXE_REG_WRITE(hw, SXE_DTXCFG, dbucfg);
+	} else {
+		dbucfg &= ~SXE_DTXCFG_DBUTX_START;
+		SXE_REG_WRITE(hw, SXE_DTXCFG, dbucfg);
+	}
+
+	return;
+}
+
+void sxe_hw_tx_pkt_buf_size_configure(struct sxe_hw *hw, u8 num_pb)
+{
+	u32 i, tx_pkt_size;
+
+	if (!num_pb){
+		num_pb = 1;
+	}
+
+	tx_pkt_size = SXE_TX_PBSIZE_MAX / num_pb;
+	for (i = 0; i < num_pb; i++) {
+		SXE_REG_WRITE(hw, SXE_TXPBSIZE(i), tx_pkt_size);
+	}
+
+	for (; i < SXE_PKG_BUF_NUM_MAX; i++) {
+		SXE_REG_WRITE(hw, SXE_TXPBSIZE(i), 0);
+	}
+
+	return;
+}
+
+void sxe_hw_rx_lro_ack_switch(struct sxe_hw *hw, bool is_on)
+{
+	u32 lro_dbu = SXE_REG_READ(hw, SXE_LRODBU);
+
+	if (is_on) {
+		lro_dbu &= ~SXE_LRODBU_LROACKDIS;
+	} else {
+		lro_dbu |= SXE_LRODBU_LROACKDIS;
+	}
+
+	SXE_REG_WRITE(hw, SXE_LRODBU, lro_dbu);
+
+	return;
+}
+
+static void sxe_hw_vf_rx_switch(struct sxe_hw *hw,
+				u32 reg_offset, u32 vf_index, bool is_off)
+{
+	u32 vfre = SXE_REG_READ(hw, SXE_VFRE(reg_offset));
+	if (is_off) {
+		vfre &= ~BIT(vf_index);
+	} else {
+		vfre |= BIT(vf_index);
+	}
+
+	SXE_REG_WRITE(hw, SXE_VFRE(reg_offset), vfre);
+
+	return;
+}
+
+STATIC s32 sxe_hw_fnav_wait_init_done(struct sxe_hw *hw)
+{
+	u32 i;
+	s32 ret = 0;
+	struct sxe_adapter *adapter = hw->adapter;
+	for (i = 0; i < SXE_FNAV_INIT_DONE_POLL; i++) {
+		if (SXE_REG_READ(hw, SXE_FNAVCTRL) &
+				   SXE_FNAVCTRL_INIT_DONE) {
+			break;
+		}
+
+		usleep_range(1000, 2000);
+	}
+
+	if (i >= SXE_FNAV_INIT_DONE_POLL) {
+		LOG_DEV_DEBUG("flow navigator poll time exceeded!\n");
+		ret = -SXE_ERR_FNAV_REINIT_FAILED;
+	}
+
+	return ret;
+}
+
+void sxe_hw_fnav_enable(struct sxe_hw *hw, u32 fnavctrl)
+{
+	u32 fnavctrl_ori;
+	bool is_clear_stat = false;
+
+	SXE_REG_WRITE(hw, SXE_FNAVHKEY, SXE_FNAV_BUCKET_HASH_KEY);
+	SXE_REG_WRITE(hw, SXE_FNAVSKEY, SXE_FNAV_SAMPLE_HASH_KEY);
+
+	fnavctrl_ori = SXE_REG_READ(hw, SXE_FNAVCTRL);
+	if((fnavctrl_ori & 0x13) != (fnavctrl & 0x13)) {
+		is_clear_stat = true;
+	}
+
+	SXE_REG_WRITE(hw, SXE_FNAVCTRL, fnavctrl);
+	SXE_WRITE_FLUSH(hw);
+
+	sxe_hw_fnav_wait_init_done(hw);
+
+	if(is_clear_stat) {
+		SXE_REG_READ(hw, SXE_FNAVUSTAT);
+		SXE_REG_READ(hw, SXE_FNAVFSTAT);
+		SXE_REG_READ(hw, SXE_FNAVMATCH);
+		SXE_REG_READ(hw, SXE_FNAVMISS);
+		SXE_REG_READ(hw, SXE_FNAVLEN);
+	}
+
+	return;
+}
+
+static s32 sxe_hw_fnav_mode_init(struct sxe_hw *hw,
+					u32 fnavctrl, u32 sxe_fnav_mode)
+{
+	struct sxe_adapter *adapter = hw->adapter;
+
+	LOG_DEBUG_BDF("fnavctrl=0x%x, sxe_fnav_mode=%u\n", fnavctrl, sxe_fnav_mode);
+
+	if ((sxe_fnav_mode != SXE_FNAV_SAMPLE_MODE) &&
+		(sxe_fnav_mode != SXE_FNAV_SPECIFIC_MODE)) {
+		LOG_ERROR_BDF("mode[%u] a error fnav mode, fnav do not work. please use"
+			"SXE_FNAV_SAMPLE_MODE or SXE_FNAV_SPECIFIC_MODE\n",
+			sxe_fnav_mode);
+		goto l_end;
+	}
+
+	if (sxe_fnav_mode == SXE_FNAV_SPECIFIC_MODE) {
+		fnavctrl |= SXE_FNAVCTRL_SPECIFIC_MATCH |
+			 (SXE_FNAV_DROP_QUEUE << SXE_FNAVCTRL_DROP_Q_SHIFT);
+	}
+
+	fnavctrl |= (0x6 << SXE_FNAVCTRL_FLEX_SHIFT) |
+		    (0xA << SXE_FNAVCTRL_MAX_LENGTH_SHIFT) |
+		    (4 << SXE_FNAVCTRL_FULL_THRESH_SHIFT);
+
+	sxe_hw_fnav_enable(hw, fnavctrl);
+
+l_end:
+	return 0;
+}
+
+u32 sxe_hw_fnav_port_mask_get(__be16 src_port_mask, __be16 dst_port_mask)
+{
+	u32 mask = ntohs(dst_port_mask);
+
+	mask <<= SXE_FNAVTCPM_DPORTM_SHIFT;
+	mask |= ntohs(src_port_mask);
+	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+static s32 sxe_hw_fnav_vm_pool_mask_get(struct sxe_hw *hw,
+					u8 vm_pool, u32 *fnavm)
+{
+	s32 ret = 0;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	switch (vm_pool & SXE_SAMPLE_VM_POOL_MASK) {
+	case 0x0:
+		*fnavm |= SXE_FNAVM_POOL;
+		fallthrough;
+	case 0x7F:
+		break;
+	default:
+		LOG_DEV_ERR("error on vm pool mask\n");
+		ret = -SXE_ERR_CONFIG;
+	}
+
+	return ret;
+}
+
+static s32 sxe_hw_fnav_flow_type_mask_get(struct sxe_hw *hw,
+					union sxe_fnav_rule_info *input_mask,
+					u32 *fnavm)
+{
+	s32 ret = 0;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	switch (input_mask->ntuple.flow_type & SXE_SAMPLE_L4TYPE_MASK) {
+	case 0x0:
+		*fnavm |= SXE_FNAVM_L4P;
+		if (input_mask->ntuple.dst_port ||
+		    input_mask->ntuple.src_port) {
+			LOG_DEV_ERR("error on src/dst port mask\n");
+			ret = -SXE_ERR_CONFIG;
+			goto l_ret;
+		}
+		break;
+	case SXE_SAMPLE_L4TYPE_MASK:
+		break;
+	default:
+		LOG_DEV_ERR("error on flow type mask\n");
+		ret = -SXE_ERR_CONFIG;
+	}
+
+l_ret:
+	return ret;
+}
+
+static s32 sxe_hw_fnav_vlan_mask_get(struct sxe_hw *hw,
+					__be16 vlan_id, u32 *fnavm)
+{
+	s32 ret = 0;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	switch (ntohs(vlan_id) & SXE_SAMPLE_VLAN_MASK) {
+	case 0x0000:
+		*fnavm |= SXE_FNAVM_VLANID;
+		fallthrough;
+	case 0x0FFF:
+		*fnavm |= SXE_FNAVM_VLANP;
+		break;
+	case 0xE000:
+		*fnavm |= SXE_FNAVM_VLANID;
+		fallthrough;
+	case 0xEFFF:
+		break;
+	default:
+		LOG_DEV_ERR("error on VLAN mask\n");
+		ret = -SXE_ERR_CONFIG;
+	}
+
+	return ret;
+}
+
+static s32 sxe_hw_fnav_flex_bytes_mask_get(struct sxe_hw *hw,
+					__be16 flex_bytes, u32 *fnavm)
+{
+	s32 ret = 0;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	switch ((__force u16)flex_bytes & SXE_SAMPLE_FLEX_BYTES_MASK) {
+	case 0x0000:
+		*fnavm |= SXE_FNAVM_FLEX;
+		fallthrough;
+	case 0xFFFF:
+		break;
+	default:
+		LOG_DEV_ERR("error on flexible byte mask\n");
+		ret = -SXE_ERR_CONFIG;
+	}
+
+	return ret;
+}
+
+s32 sxe_hw_fnav_specific_rule_mask_set(struct sxe_hw *hw,
+				    union sxe_fnav_rule_info *input_mask)
+{
+	s32 ret;
+	u32 fnavm = SXE_FNAVM_DIPv6;
+	u32 fnavtcpm;
+	struct sxe_adapter *adapter = hw->adapter;
+
+
+	if (input_mask->ntuple.bkt_hash) {
+		LOG_DEV_ERR("bucket hash should always be 0 in mask\n");
+	}
+
+	ret = sxe_hw_fnav_vm_pool_mask_get(hw, input_mask->ntuple.vm_pool, &fnavm);
+	if (ret) {
+		goto l_err_config;
+	}
+
+	ret = sxe_hw_fnav_flow_type_mask_get(hw, input_mask,  &fnavm);
+	if (ret) {
+		goto l_err_config;
+	}
+
+	ret = sxe_hw_fnav_vlan_mask_get(hw, input_mask->ntuple.vlan_id, &fnavm);
+	if (ret) {
+		goto l_err_config;
+	}
+
+	ret = sxe_hw_fnav_flex_bytes_mask_get(hw, input_mask->ntuple.flex_bytes, &fnavm);
+	if (ret) {
+		goto l_err_config;
+	}
+
+	LOG_DEBUG_BDF("fnavm = 0x%x\n", fnavm);
+	SXE_REG_WRITE(hw, SXE_FNAVM, fnavm);
+
+	fnavtcpm = sxe_hw_fnav_port_mask_get(input_mask->ntuple.src_port,
+					     input_mask->ntuple.dst_port);
+
+	LOG_DEBUG_BDF("fnavtcpm = 0x%x\n", fnavtcpm);
+	SXE_REG_WRITE(hw, SXE_FNAVTCPM, ~fnavtcpm);
+	SXE_REG_WRITE(hw, SXE_FNAVUDPM, ~fnavtcpm);
+
+	SXE_REG_WRITE_BE32(hw, SXE_FNAVSIP4M,
+			     ~input_mask->ntuple.src_ip[0]);
+	SXE_REG_WRITE_BE32(hw, SXE_FNAVDIP4M,
+			     ~input_mask->ntuple.dst_ip[0]);
+
+	return 0;
+
+l_err_config:
+	return -SXE_ERR_CONFIG;
+}
+
+STATIC s32 sxe_hw_fnav_cmd_complete_check(struct sxe_hw *hw,
+							u32 *fnavcmd)
+{
+	u32 i;
+
+	for (i = 0; i < SXE_FNAVCMD_CMD_POLL * 10; i++) {
+		*fnavcmd = SXE_REG_READ(hw, SXE_FNAVCMD);
+		if (!(*fnavcmd & SXE_FNAVCMD_CMD_MASK)) {
+			return 0;
+		}
+
+		udelay(10);
+	}
+
+	return -SXE_ERR_FNAV_CMD_INCOMPLETE;
+}
+
+static void sxe_hw_fnav_filter_ip_set(struct sxe_hw *hw,
+					union sxe_fnav_rule_info *input)
+{
+	SXE_REG_WRITE_BE32(hw, SXE_FNAVSIPv6(0),
+			     input->ntuple.src_ip[0]);
+	SXE_REG_WRITE_BE32(hw, SXE_FNAVSIPv6(1),
+			     input->ntuple.src_ip[1]);
+	SXE_REG_WRITE_BE32(hw, SXE_FNAVSIPv6(2),
+			     input->ntuple.src_ip[2]);
+
+	SXE_REG_WRITE_BE32(hw, SXE_FNAVIPSA, input->ntuple.src_ip[0]);
+
+	SXE_REG_WRITE_BE32(hw, SXE_FNAVIPDA, input->ntuple.dst_ip[0]);
+
+	return;
+}
+
+static void sxe_hw_fnav_filter_port_set(struct sxe_hw *hw,
+					union sxe_fnav_rule_info *input)
+{
+	u32 fnavport;
+
+	fnavport = be16_to_cpu(input->ntuple.dst_port);
+	fnavport <<= SXE_FNAVPORT_DESTINATION_SHIFT;
+	fnavport |= be16_to_cpu(input->ntuple.src_port);
+	SXE_REG_WRITE(hw, SXE_FNAVPORT, fnavport);
+
+	return;
+}
+
+static void sxe_hw_fnav_filter_vlan_set(struct sxe_hw *hw,
+					union sxe_fnav_rule_info *input)
+{
+	u32 fnavvlan;
+
+	fnavvlan = ntohs(SXE_SWAP_16(input->ntuple.flex_bytes));
+	fnavvlan <<= SXE_FNAVVLAN_FLEX_SHIFT;
+	fnavvlan |= ntohs(input->ntuple.vlan_id);
+	SXE_REG_WRITE(hw, SXE_FNAVVLAN, fnavvlan);
+
+	return;
+}
+
+static void sxe_hw_fnav_filter_bkt_hash_set(struct sxe_hw *hw,
+					union sxe_fnav_rule_info *input,
+					u16 soft_id)
+{
+	u32 fnavhash;
+
+	fnavhash = (__force u32)input->ntuple.bkt_hash;
+	fnavhash |= soft_id << SXE_FNAVHASH_SIG_SW_INDEX_SHIFT;
+	SXE_REG_WRITE(hw, SXE_FNAVHASH, fnavhash);
+
+	return;
+}
+
+static s32 sxe_hw_fnav_filter_cmd_set(struct sxe_hw *hw,
+					union sxe_fnav_rule_info *input,
+					u8 queue)
+{
+	u32 fnavcmd;
+	s32 ret;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	fnavcmd = SXE_FNAVCMD_CMD_ADD_FLOW | SXE_FNAVCMD_FILTER_UPDATE |
+		  SXE_FNAVCMD_LAST | SXE_FNAVCMD_QUEUE_EN;
+
+#ifndef SXE_DPDK
+	if (queue == SXE_FNAV_DROP_QUEUE) {
+		fnavcmd |= SXE_FNAVCMD_DROP;
+	}
+#endif
+
+	fnavcmd |= input->ntuple.flow_type << SXE_FNAVCMD_FLOW_TYPE_SHIFT;
+	fnavcmd |= (u32)queue << SXE_FNAVCMD_RX_QUEUE_SHIFT;
+	fnavcmd |= (u32)input->ntuple.vm_pool << SXE_FNAVCMD_VT_POOL_SHIFT;
+
+	SXE_REG_WRITE(hw, SXE_FNAVCMD, fnavcmd);
+	ret = sxe_hw_fnav_cmd_complete_check(hw, &fnavcmd);
+	if (ret) {
+		LOG_DEV_ERR("flow navigator command did not complete!\n");
+	}
+
+	return ret;
+}
+
+s32 sxe_hw_fnav_specific_rule_add(struct sxe_hw *hw,
+					  union sxe_fnav_rule_info *input,
+					  u16 soft_id, u8 queue)
+{
+	s32 ret;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	sxe_hw_fnav_filter_ip_set(hw, input);
+
+	sxe_hw_fnav_filter_port_set(hw, input);
+
+	sxe_hw_fnav_filter_vlan_set(hw, input);
+
+	sxe_hw_fnav_filter_bkt_hash_set(hw, input, soft_id);
+
+	SXE_WRITE_FLUSH(hw);
+
+	ret = sxe_hw_fnav_filter_cmd_set(hw, input, queue);
+	if (ret) {
+		LOG_ERROR_BDF("set fnav filter cmd error. ret=%d\n", ret);
+	}
+
+	return ret;
+}
+
+s32 sxe_hw_fnav_specific_rule_del(struct sxe_hw *hw,
+					  union sxe_fnav_rule_info *input,
+					  u16 soft_id)
+{
+	u32 fnavhash;
+	u32 fnavcmd;
+	s32 ret;
+	struct sxe_adapter *adapter = hw->adapter;
+
+
+	fnavhash = (__force u32)input->ntuple.bkt_hash;
+	fnavhash |= soft_id << SXE_FNAVHASH_SIG_SW_INDEX_SHIFT;
+	SXE_REG_WRITE(hw, SXE_FNAVHASH, fnavhash);
+
+	SXE_WRITE_FLUSH(hw);
+
+	SXE_REG_WRITE(hw, SXE_FNAVCMD, SXE_FNAVCMD_CMD_QUERY_REM_FILT);
+
+	ret = sxe_hw_fnav_cmd_complete_check(hw, &fnavcmd);
+	if (ret) {
+		LOG_DEV_ERR("flow navigator command did not complete!\n");
+		return ret;
+	}
+
+	if (fnavcmd & SXE_FNAVCMD_FILTER_VALID) {
+		SXE_REG_WRITE(hw, SXE_FNAVHASH, fnavhash);
+		SXE_WRITE_FLUSH(hw);
+		SXE_REG_WRITE(hw, SXE_FNAVCMD,
+				SXE_FNAVCMD_CMD_REMOVE_FLOW);
+	}
+
+	return 0;
+}
+
+void sxe_hw_fnav_sample_rule_configure(struct sxe_hw *hw,
+					  u8 flow_type, u32 hash_value, u8 queue)
+{
+	u32 fnavcmd;
+	u64 fnavhashcmd;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	fnavcmd = SXE_FNAVCMD_CMD_ADD_FLOW | SXE_FNAVCMD_FILTER_UPDATE |
+		  SXE_FNAVCMD_LAST | SXE_FNAVCMD_QUEUE_EN;
+	fnavcmd |= (u32)flow_type << SXE_FNAVCMD_FLOW_TYPE_SHIFT;
+	fnavcmd |= (u32)queue << SXE_FNAVCMD_RX_QUEUE_SHIFT;
+
+	fnavhashcmd = (u64)fnavcmd << 32;
+	fnavhashcmd |= hash_value;
+	SXE_REG64_WRITE(hw, SXE_FNAVHASH, fnavhashcmd);
+
+	LOG_DEV_DEBUG("tx queue=%x hash=%x\n", queue, (u32)fnavhashcmd);
+
+	return;
+}
+
+static u64 sxe_hw_fnav_sample_rule_hash_get(struct sxe_hw *hw,
+					  u8 flow_type, u32 hash_value, u8 queue)
+{
+	u32 fnavcmd;
+	u64 fnavhashcmd;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	fnavcmd = SXE_FNAVCMD_CMD_ADD_FLOW | SXE_FNAVCMD_FILTER_UPDATE |
+		  SXE_FNAVCMD_LAST | SXE_FNAVCMD_QUEUE_EN;
+	fnavcmd |= (u32)flow_type << SXE_FNAVCMD_FLOW_TYPE_SHIFT;
+	fnavcmd |= (u32)queue << SXE_FNAVCMD_RX_QUEUE_SHIFT;
+
+	fnavhashcmd = (u64)fnavcmd << 32;
+	fnavhashcmd |= hash_value;
+
+	LOG_DEV_DEBUG("tx queue=%x hash=%x\n", queue, (u32)fnavhashcmd);
+
+	return fnavhashcmd;
+}
+
+static s32 sxe_hw_fnav_sample_hash_cmd_get(struct sxe_hw *hw,
+					  u8  flow_type,
+					  u32 hash_value,
+					  u8  queue, u64 *hash_cmd)
+{
+	s32 ret = 0;
+	u8 pkg_type;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	pkg_type = flow_type & SXE_SAMPLE_FLOW_TYPE_MASK;
+	switch (pkg_type) {
+	case SXE_SAMPLE_FLOW_TYPE_TCPV4:
+	case SXE_SAMPLE_FLOW_TYPE_UDPV4:
+	case SXE_SAMPLE_FLOW_TYPE_SCTPV4:
+	case SXE_SAMPLE_FLOW_TYPE_TCPV6:
+	case SXE_SAMPLE_FLOW_TYPE_UDPV6:
+	case SXE_SAMPLE_FLOW_TYPE_SCTPV6:
+		break;
+	default:
+		LOG_DEV_ERR("error on flow type input\n");
+		ret = -SXE_ERR_CONFIG;
+		goto l_end;
+	}
+
+	*hash_cmd = sxe_hw_fnav_sample_rule_hash_get(hw, pkg_type, hash_value, queue);
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_hw_fnav_single_sample_rule_del(struct sxe_hw *hw,
+						u32 hash)
+{
+	u32 fdircmd;
+	s32 ret;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	SXE_REG_WRITE(hw, SXE_FNAVHASH, hash);
+	SXE_WRITE_FLUSH(hw);
+
+	SXE_REG_WRITE(hw, SXE_FNAVCMD, SXE_FNAVCMD_CMD_REMOVE_FLOW);
+	ret = sxe_hw_fnav_cmd_complete_check(hw, &fdircmd);
+	if (ret) {
+		LOG_DEV_ERR("flow navigator previous command did not complete,"
+			"aborting table re-initialization.\n");
+	}
+
+	return ret;
+}
+
+s32 sxe_hw_fnav_sample_rules_table_reinit(struct sxe_hw *hw)
+{
+	u32 fnavctrl = SXE_REG_READ(hw, SXE_FNAVCTRL);
+	u32 fnavcmd;
+	s32 ret;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	fnavctrl &= ~SXE_FNAVCTRL_INIT_DONE;
+
+	ret = sxe_hw_fnav_cmd_complete_check(hw, &fnavcmd);
+	if (ret) {
+		LOG_DEV_ERR("flow navigator previous command did not complete,"
+			"aborting table re-initialization.\n");
+		goto l_ret;
+	}
+
+	SXE_REG_WRITE(hw, SXE_FNAVFREE, 0);
+	SXE_WRITE_FLUSH(hw);
+
+	SXE_REG_WRITE(hw, SXE_FNAVCMD,
+			(SXE_REG_READ(hw, SXE_FNAVCMD) |
+			 SXE_FNAVCMD_CLEARHT));
+	SXE_WRITE_FLUSH(hw);
+	SXE_REG_WRITE(hw, SXE_FNAVCMD,
+			(SXE_REG_READ(hw, SXE_FNAVCMD) &
+			 ~SXE_FNAVCMD_CLEARHT));
+	SXE_WRITE_FLUSH(hw);
+
+	SXE_REG_WRITE(hw, SXE_FNAVHASH, 0x00);
+	SXE_WRITE_FLUSH(hw);
+
+	SXE_REG_WRITE(hw, SXE_FNAVCTRL, fnavctrl);
+	SXE_WRITE_FLUSH(hw);
+
+	ret = sxe_hw_fnav_wait_init_done(hw);
+	if (ret) {
+		LOG_ERROR_BDF("flow navigator simple poll time exceeded!\n");
+		goto l_ret;
+	}
+
+	SXE_REG_READ(hw, SXE_FNAVUSTAT);
+	SXE_REG_READ(hw, SXE_FNAVFSTAT);
+	SXE_REG_READ(hw, SXE_FNAVMATCH);
+	SXE_REG_READ(hw, SXE_FNAVMISS);
+	SXE_REG_READ(hw, SXE_FNAVLEN);
+
+l_ret:
+	return ret;
+}
+
+static void sxe_hw_fnav_sample_stats_reinit(struct sxe_hw *hw)
+{
+	SXE_REG_READ(hw, SXE_FNAVUSTAT);
+	SXE_REG_READ(hw, SXE_FNAVFSTAT);
+	SXE_REG_READ(hw, SXE_FNAVMATCH);
+	SXE_REG_READ(hw, SXE_FNAVMISS);
+	SXE_REG_READ(hw, SXE_FNAVLEN);
+
+	return;
+}
+
+static void sxe_hw_ptp_freq_adjust(struct sxe_hw *hw, u32 adj_freq)
+{
+	SXE_REG_WRITE(hw, SXE_TIMADJL, 0);
+	SXE_REG_WRITE(hw, SXE_TIMADJH, adj_freq);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+u64 sxe_hw_ptp_systime_get(struct sxe_hw *hw)
+{
+	struct sxe_adapter *adapter = hw->adapter;
+	u32 systiml;
+	u32 systimm;
+	u64 ns;
+
+	systiml = SXE_REG_READ(hw, SXE_SYSTIML);
+	systimm = SXE_REG_READ(hw, SXE_SYSTIMM);
+	ns = SXE_TIME_TO_NS(systiml, systimm);
+
+	LOG_DEBUG_BDF("get ptp hw systime systiml=%u, systimm=%u, ns=%"SXE_PRIU64"\n",
+			systiml, systimm, ns);
+	return ns;
+}
+
+void sxe_hw_ptp_systime_init(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_SYSTIML, 0);
+	SXE_REG_WRITE(hw, SXE_SYSTIMM, 0);
+	SXE_REG_WRITE(hw, SXE_SYSTIMH, 0);
+
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+
+void sxe_hw_ptp_init(struct sxe_hw *hw)
+{
+	u32 regval;
+	u32 tsctl = SXE_TSCTRL_TSEN |
+	SXE_TSCTRL_VER_2 |
+	SXE_TSCTRL_PTYP_ALL |
+	SXE_TSCTRL_L4_UNICAST;
+
+	regval = SXE_REG_READ(hw, SXE_TSCTRL);
+	regval &= ~SXE_TSCTRL_ONESTEP;	
+	regval &= ~SXE_TSCTRL_CSEN;	
+	regval |= tsctl;
+	SXE_REG_WRITE(hw, SXE_TSCTRL, regval);
+
+	SXE_REG_WRITE(hw, SXE_TIMINC,
+			SXE_TIMINC_SET(SXE_INCPD, SXE_IV_NS, SXE_IV_SNS));
+
+	return;
+}
+
+void sxe_hw_ptp_rx_timestamp_clear(struct sxe_hw *hw)
+{
+	SXE_REG_READ(hw, SXE_RXSTMPH);
+	return;
+}
+
+void sxe_hw_ptp_tx_timestamp_get(struct sxe_hw *hw,
+						u32 *ts_sec, u32 *ts_ns)
+{
+	u32 reg_sec;
+	u32 reg_ns;
+	u32 sec_8bit;
+	u32 sec_24bit;
+	u32 systimm;
+	u32 systimm_8bit;
+	u32 systimm_24bit;
+
+	SXE_REG64_WRITE(hw, SXE_TXSTMP_SEL, SXE_TXTS_MAGIC0);
+	reg_ns = SXE_REG_READ(hw, SXE_TXSTMP_VAL);
+	SXE_REG64_WRITE(hw, SXE_TXSTMP_SEL, SXE_TXTS_MAGIC1);
+	reg_sec = SXE_REG_READ(hw, SXE_TXSTMP_VAL);
+	systimm = SXE_REG_READ(hw, SXE_SYSTIMM);
+
+
+	sec_8bit  = reg_sec & 0x000000FF;
+	sec_24bit = (reg_sec >> 8) & 0x00FFFFFF;
+
+	systimm_24bit = systimm & 0x00FFFFFF;
+	systimm_8bit  = systimm & 0xFF000000;
+
+	*ts_ns  = (sec_8bit << 24) | ((reg_ns & 0xFFFFFF00) >> 8);
+
+	if (unlikely((sec_24bit - systimm_24bit) >= 0x00FFFFF0)) {
+		if (systimm_8bit >= 1) {
+			systimm_8bit -= 1;
+		}
+	}
+
+	*ts_sec = systimm_8bit | sec_24bit;
+	return;
+}
+
+u64 sxe_hw_ptp_rx_timestamp_get(struct sxe_hw *hw)
+{
+	struct sxe_adapter *adapter = hw->adapter;
+	u32 rxtsl;
+	u32 rxtsh;
+	u64 ns;
+
+	rxtsl = SXE_REG_READ(hw, SXE_RXSTMPL);
+	rxtsh = SXE_REG_READ(hw, SXE_RXSTMPH);
+	ns = SXE_TIME_TO_NS(rxtsl, rxtsh);
+
+	LOG_DEBUG_BDF("ptp get rx ptp timestamp low=%u, high=%u, ns=%"SXE_PRIU64"\n",
+			rxtsl, rxtsh, ns);
+	return ns;
+}
+
+bool sxe_hw_ptp_is_rx_timestamp_valid(struct sxe_hw *hw)
+{
+	bool rx_tmstamp_valid = false;
+	u32 tsyncrxctl;
+
+	tsyncrxctl = SXE_REG_READ(hw, SXE_TSYNCRXCTL);
+	if (tsyncrxctl & SXE_TSYNCRXCTL_RXTT) {
+		rx_tmstamp_valid = true;
+	}
+
+	return rx_tmstamp_valid;
+}
+
+void sxe_hw_ptp_timestamp_mode_set(struct sxe_hw *hw,
+					bool is_l2, u32 tsctl, u32 tses)
+{
+	u32 regval;
+
+	if (is_l2) {
+		SXE_REG_WRITE(hw, SXE_ETQF(SXE_ETQF_FILTER_1588),
+			(SXE_ETQF_FILTER_EN |   
+			 SXE_ETQF_1588 |	
+			 ETH_P_1588));		
+	} else {
+		SXE_REG_WRITE(hw, SXE_ETQF(SXE_ETQF_FILTER_1588), 0);
+	}
+
+	if (tsctl) {
+		regval = SXE_REG_READ(hw, SXE_TSCTRL);
+		regval |= tsctl;
+		SXE_REG_WRITE(hw, SXE_TSCTRL, regval);
+	}
+
+	SXE_REG_WRITE(hw, SXE_TSES, tses);
+
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_ptp_timestamp_enable(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_TSYNCTXCTL,
+			(SXE_REG_READ(hw, SXE_TSYNCTXCTL) |
+			SXE_TSYNCTXCTL_TEN));
+
+	SXE_REG_WRITE(hw, SXE_TSYNCRXCTL,
+			(SXE_REG_READ(hw, SXE_TSYNCRXCTL) |
+			SXE_TSYNCRXCTL_REN));
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+static void sxe_hw_dcb_tc_rss_configure(struct sxe_hw *hw, u16 rss)
+{
+	u32 msb = 0;
+
+	while (rss) {
+		msb++;
+		rss >>= 1;
+	}
+
+	SXE_REG_WRITE(hw, SXE_RQTC, msb * SXE_8_TC_MSB);
+}
+
+static void sxe_hw_tx_ring_disable(struct sxe_hw *hw, u8 reg_idx,
+				 unsigned long timeout)
+{
+	unsigned long wait_delay, delay_interval;
+	int wait_loop;
+	u32 txdctl;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+	txdctl &= ~SXE_TXDCTL_ENABLE;
+	SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+
+	delay_interval = timeout / 100;
+
+	wait_loop = SXE_MAX_RX_DESC_POLL;
+	wait_delay = delay_interval;
+
+	while (wait_loop--) {
+		usleep_range(wait_delay, wait_delay + 10);
+		wait_delay += delay_interval * 2;
+		txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+
+		if (!(txdctl & SXE_TXDCTL_ENABLE)) {
+			return;
+		}
+	}
+
+	LOG_MSG_ERR(drv, "register TXDCTL.ENABLE not cleared within the polling period\n");
+}
+
+static void sxe_hw_rx_ring_disable(struct sxe_hw *hw, u8 reg_idx,
+				 unsigned long timeout)
+{
+	unsigned long wait_delay, delay_interval;
+	int wait_loop;
+	u32 rxdctl;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+	rxdctl &= ~SXE_RXDCTL_ENABLE;
+
+	SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+
+	delay_interval = timeout / 100;
+
+	wait_loop = SXE_MAX_RX_DESC_POLL;
+	wait_delay = delay_interval;
+
+	while (wait_loop--) {
+		usleep_range(wait_delay, wait_delay + 10);
+		wait_delay += delay_interval * 2;
+		rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+
+		if (!(rxdctl & SXE_RXDCTL_ENABLE))
+			return;
+	}
+
+	LOG_MSG_ERR(drv, "register RXDCTL.ENABLE not cleared within the polling period\n");
+}
+
+static u32 sxe_hw_tx_dbu_fc_status_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_TXPBFCS);
+}
+
+static void sxe_hw_fnav_sample_hash_set(struct sxe_hw *hw, u64 hash)
+{
+	SXE_REG64_WRITE(hw, SXE_FNAVHASH, hash);
+	return;
+}
+
+static const struct sxe_dbu_operations sxe_dbu_ops = {
+	.rx_pkt_buf_size_configure	= sxe_hw_rx_pkt_buf_size_configure,
+	.rx_pkt_buf_switch		= sxe_hw_rx_pkt_buf_switch,
+	.rx_multi_ring_configure	= sxe_hw_rx_multi_ring_configure,
+	.rss_key_set_all		= sxe_hw_rss_key_set_all,
+	.rss_redir_tbl_set_all		= sxe_hw_rss_redir_tbl_set_all,
+	.rx_cap_switch_on		= sxe_hw_rx_cap_switch_on,
+	.rx_cap_switch_off		= sxe_hw_rx_cap_switch_off,
+	.rss_hash_pkt_type_set		= sxe_hw_rss_hash_pkt_type_set,
+	.rss_hash_pkt_type_update	= sxe_hw_rss_hash_pkt_type_update,
+	.rss_rings_used_set		= sxe_hw_rss_rings_used_set,
+	.lro_ack_switch			= sxe_hw_rx_lro_ack_switch,
+
+	.fnav_mode_init			= sxe_hw_fnav_mode_init,
+	.fnav_specific_rule_mask_set	= sxe_hw_fnav_specific_rule_mask_set,
+	.fnav_specific_rule_add		= sxe_hw_fnav_specific_rule_add,
+	.fnav_specific_rule_del		= sxe_hw_fnav_specific_rule_del,
+	.fnav_sample_hash_cmd_get	= sxe_hw_fnav_sample_hash_cmd_get,
+	.fnav_sample_stats_reinit	= sxe_hw_fnav_sample_stats_reinit,
+	.fnav_sample_hash_set		= sxe_hw_fnav_sample_hash_set,
+	.fnav_single_sample_rule_del	= sxe_hw_fnav_single_sample_rule_del,
+
+	.tx_pkt_buf_switch		= sxe_hw_tx_pkt_buf_switch,
+	.tx_pkt_buf_size_configure	= sxe_hw_tx_pkt_buf_size_configure,
+
+	.ptp_init			= sxe_hw_ptp_init,
+	.ptp_freq_adjust		= sxe_hw_ptp_freq_adjust,
+	.ptp_systime_init		= sxe_hw_ptp_systime_init,
+	.ptp_systime_get		= sxe_hw_ptp_systime_get,
+	.ptp_tx_timestamp_get		= sxe_hw_ptp_tx_timestamp_get,
+	.ptp_timestamp_mode_set		= sxe_hw_ptp_timestamp_mode_set,
+	.ptp_timestamp_enable		= sxe_hw_ptp_timestamp_enable,
+	.ptp_rx_timestamp_clear		= sxe_hw_ptp_rx_timestamp_clear,
+	.ptp_rx_timestamp_get		= sxe_hw_ptp_rx_timestamp_get,
+	.ptp_is_rx_timestamp_valid	= sxe_hw_ptp_is_rx_timestamp_valid,
+
+	.dcb_tc_rss_configure		= sxe_hw_dcb_tc_rss_configure,
+	.vf_rx_switch			= sxe_hw_vf_rx_switch,
+	.rx_pkt_buf_size_get		= sxe_hw_rx_pkt_buf_size_get,
+	.rx_func_switch_on		= sxe_hw_rx_func_switch_on,
+
+	.tx_ring_disable		= sxe_hw_tx_ring_disable,
+	.rx_ring_disable		= sxe_hw_rx_ring_disable,
+
+	.tx_dbu_fc_status_get		= sxe_hw_tx_dbu_fc_status_get,
+};
+
+
+void sxe_hw_rx_dma_ctrl_init(struct sxe_hw *hw, bool crc_strip_on)
+{
+	u32 rx_dma_ctrl = SXE_REG_READ(hw, SXE_RDRXCTL);
+
+	if (crc_strip_on) {
+		rx_dma_ctrl |= SXE_RDRXCTL_CRCSTRIP;
+	} else {
+		rx_dma_ctrl &= ~SXE_RDRXCTL_CRCSTRIP;
+	}
+
+	rx_dma_ctrl &= ~SXE_RDRXCTL_LROFRSTSIZE;
+	SXE_REG_WRITE(hw, SXE_RDRXCTL, rx_dma_ctrl);
+	return;
+}
+
+void sxe_hw_rx_dma_lro_ctrl_set(struct sxe_hw *hw)
+{
+	u32 rx_dma_ctrl = SXE_REG_READ(hw, SXE_RDRXCTL);
+
+	rx_dma_ctrl |= SXE_RDRXCTL_LROACKC;
+	SXE_REG_WRITE(hw, SXE_RDRXCTL, rx_dma_ctrl);
+	return;
+}
+
+void sxe_hw_rx_desc_thresh_set(struct sxe_hw *hw, u8 reg_idx)
+{
+	u32 rxdctl;
+	rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+	rxdctl |= 0x40 << SXE_RXDCTL_PREFETCH_NUM_CFG_SHIFT;
+	rxdctl |= 0x2 << SXE_RXDCTL_DESC_FIFO_AE_TH_SHIFT;
+	rxdctl |= 0x10;
+	SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+
+	return;
+}
+
+void sxe_hw_rx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on)
+{
+	u32 rxdctl;
+	u32 wait_loop = SXE_RING_WAIT_LOOP;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+	if (is_on) {
+		rxdctl |= SXE_RXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+
+		do {
+			usleep_range(1000, 2000);
+			rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+		} while (--wait_loop && !(rxdctl & SXE_RXDCTL_ENABLE));
+	} else {
+		rxdctl &= ~SXE_RXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+
+		do {
+			usleep_range(1000, 2000);
+			rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+		} while (--wait_loop && (rxdctl & SXE_RXDCTL_ENABLE));
+	}
+
+	SXE_WRITE_FLUSH(hw);
+
+	if (!wait_loop) {
+		LOG_MSG_ERR(drv, "rx ring %u switch %u failed within "
+			  "the polling period\n", reg_idx, is_on);
+	}
+
+	return;
+}
+
+void sxe_hw_rx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on)
+{
+	u32 rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+	if (is_on) {
+		rxdctl |= SXE_RXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+	} else {
+		rxdctl &= ~SXE_RXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+	}
+
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_rx_queue_desc_reg_configure(struct sxe_hw *hw,
+					u8 reg_idx, u32 rdh_value,
+					u32 rdt_value)
+{
+	SXE_REG_WRITE(hw, SXE_RDH(reg_idx), rdh_value);
+	SXE_REG_WRITE(hw, SXE_RDT(reg_idx), rdt_value);
+	return;
+}
+
+static void sxe_hw_rx_ring_head_init(struct sxe_hw *hw, u8 reg_idx)
+{
+	SXE_REG_WRITE(hw, SXE_RDH(reg_idx), 0);
+
+	return;
+}
+
+static void sxe_hw_rx_ring_tail_init(struct sxe_hw *hw, u8 reg_idx)
+{
+	SXE_REG_WRITE(hw, SXE_RDT(reg_idx), 0);
+
+	return;
+}
+
+void sxe_hw_rx_ring_desc_configure(struct sxe_hw *hw,
+					u32 desc_mem_len, u64 desc_dma_addr,
+					u8 reg_idx)
+{
+	SXE_REG_WRITE(hw, SXE_RDBAL(reg_idx),
+			(desc_dma_addr & DMA_BIT_MASK(32)));
+	SXE_REG_WRITE(hw, SXE_RDBAH(reg_idx), (desc_dma_addr >> 32));
+	SXE_REG_WRITE(hw, SXE_RDLEN(reg_idx), desc_mem_len);
+
+	SXE_WRITE_FLUSH(hw);
+
+	sxe_hw_rx_ring_head_init(hw, reg_idx);
+	sxe_hw_rx_ring_tail_init(hw, reg_idx);
+
+	return;
+}
+
+void sxe_hw_rx_rcv_ctl_configure(struct sxe_hw *hw, u8 reg_idx,
+				   u32 header_buf_len, u32 pkg_buf_len
+				   )
+{
+	u32 srrctl;
+
+	srrctl = ((header_buf_len << SXE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+			SXE_SRRCTL_BSIZEHDR_MASK);
+	srrctl |= ((pkg_buf_len >> SXE_SRRCTL_BSIZEPKT_SHIFT) &
+			SXE_SRRCTL_BSIZEPKT_MASK);
+
+	SXE_REG_WRITE(hw, SXE_SRRCTL(reg_idx), srrctl);
+
+	return;
+}
+
+void sxe_hw_rx_lro_ctl_configure(struct sxe_hw *hw,
+						u8 reg_idx, u32 max_desc)
+{
+	u32 lroctrl;
+	lroctrl = SXE_REG_READ(hw, SXE_LROCTL(reg_idx));
+	lroctrl |= SXE_LROCTL_LROEN;
+	lroctrl |= max_desc;
+	SXE_REG_WRITE(hw, SXE_LROCTL(reg_idx), lroctrl);
+
+	return;
+}
+
+static u32 sxe_hw_rx_desc_ctrl_get(struct sxe_hw *hw, u8 reg_idx)
+{
+	return SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+}
+
+static void sxe_hw_dcb_arbiter_set(struct sxe_hw *hw, bool is_enable)
+{
+	u32 rttdcs;
+
+	rttdcs = SXE_REG_READ(hw, SXE_RTTDCS);
+
+	if (true == is_enable) {
+		rttdcs &= ~SXE_RTTDCS_ARBDIS;
+		rttdcs &= ~SXE_RTTDCS_BPBFSM;
+
+		SXE_REG_WRITE(hw, SXE_RTTDCS, rttdcs);
+	} else {
+		rttdcs |= SXE_RTTDCS_ARBDIS;
+		SXE_REG_WRITE(hw, SXE_RTTDCS, rttdcs);
+	}
+
+	return;
+}
+
+
+static void sxe_hw_tx_multi_ring_configure(struct sxe_hw *hw, u8 tcs,
+				u16 pool_mask, bool sriov_enable, u16 max_txq)
+{
+	u32 mtqc;
+
+	sxe_hw_dcb_arbiter_set(hw, false);
+
+	if (true == sriov_enable) {
+		mtqc = SXE_MTQC_VT_ENA;
+		if (tcs > SXE_DCB_4_TC)
+			mtqc |= SXE_MTQC_RT_ENA | SXE_MTQC_8TC_8TQ;
+		else if (tcs > SXE_DCB_1_TC)
+			mtqc |= SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ;
+		else if (pool_mask == SXE_4Q_PER_POOL_MASK)
+			mtqc |= SXE_MTQC_32VF;
+		else
+			mtqc |= SXE_MTQC_64VF;
+	} else {
+		if (tcs > SXE_DCB_4_TC) {
+			mtqc = SXE_MTQC_RT_ENA | SXE_MTQC_8TC_8TQ;
+		} else if (tcs > SXE_DCB_1_TC) {
+			mtqc = SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ;
+		} else {
+			if (max_txq > 63) {
+				mtqc = SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ;
+			} else {
+				mtqc = SXE_MTQC_64Q_1PB;
+			}
+		}
+	}
+
+	SXE_REG_WRITE(hw, SXE_MTQC, mtqc);
+
+	sxe_hw_dcb_arbiter_set(hw, true);
+
+	return;
+}
+
+void sxe_hw_tx_ring_head_init(struct sxe_hw *hw, u8 reg_idx)
+{
+	SXE_REG_WRITE(hw, SXE_TDH(reg_idx), 0);
+
+	return;
+}
+
+void sxe_hw_tx_ring_tail_init(struct sxe_hw *hw, u8 reg_idx)
+{
+	SXE_REG_WRITE(hw, SXE_TDT(reg_idx), 0);
+
+	return;
+}
+
+void sxe_hw_tx_ring_desc_configure(struct sxe_hw *hw,
+					u32 desc_mem_len,
+					u64 desc_dma_addr, u8 reg_idx)
+{
+	SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), 0);
+
+	SXE_WRITE_FLUSH(hw);
+
+	SXE_REG_WRITE(hw, SXE_TDBAL(reg_idx), (desc_dma_addr & \
+						DMA_BIT_MASK(32)));
+	SXE_REG_WRITE(hw, SXE_TDBAH(reg_idx), (desc_dma_addr >> 32));
+	SXE_REG_WRITE(hw, SXE_TDLEN(reg_idx), desc_mem_len);
+	sxe_hw_tx_ring_head_init(hw, reg_idx);
+	sxe_hw_tx_ring_tail_init(hw, reg_idx);
+
+	return;
+}
+
+void sxe_hw_tx_desc_thresh_set(
+				struct sxe_hw *hw,
+				u8 reg_idx,
+				u32 wb_thresh,
+				u32 host_thresh,
+				u32 prefech_thresh)
+{
+	u32 txdctl = 0;
+
+	txdctl |= (wb_thresh << SXE_TXDCTL_WTHRESH_SHIFT);
+	txdctl |= (host_thresh << SXE_TXDCTL_HTHRESH_SHIFT) | prefech_thresh;
+
+	SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+
+	return;
+}
+
+void sxe_hw_all_ring_disable(struct sxe_hw *hw, u32 ring_max)
+{
+	u32 i, value;
+
+	for (i = 0; i < ring_max; i++) {
+		value = SXE_REG_READ(hw, SXE_TXDCTL(i));
+		value &= ~SXE_TXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_TXDCTL(i), value);
+
+		value = SXE_REG_READ(hw, SXE_RXDCTL(i));
+		value &= ~SXE_RXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_RXDCTL(i), value);
+	}
+
+	SXE_WRITE_FLUSH(hw);
+	usleep_range(1000, 2000);
+
+	return;
+}
+
+void sxe_hw_tx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on)
+{
+	u32 wait_loop = SXE_RING_WAIT_LOOP;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	u32 txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+	if (is_on) {
+		txdctl |= SXE_TXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+
+		do {
+			usleep_range(1000, 2000);
+			txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+		} while (--wait_loop && !(txdctl & SXE_TXDCTL_ENABLE));
+	} else {
+		txdctl &= ~SXE_TXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+
+		do {
+			usleep_range(1000, 2000);
+			txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+		} while (--wait_loop && (txdctl & SXE_TXDCTL_ENABLE));
+	}
+
+	if (!wait_loop) {
+		LOG_DEV_ERR("tx ring %u switch %u failed within "
+			  "the polling period\n", reg_idx, is_on);
+	}
+
+	return;
+}
+
+void sxe_hw_tx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on)
+{
+	u32 txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+	if (is_on) {
+		txdctl |= SXE_TXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+	} else {
+		txdctl &= ~SXE_TXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+	}
+
+	return;
+}
+
+void sxe_hw_tx_pkt_buf_thresh_configure(struct sxe_hw *hw,
+					u8 num_pb, bool dcb_enable)
+{
+	u32 i, tx_pkt_size, tx_pb_thresh;
+
+	if (!num_pb){
+		num_pb = 1;
+	}
+
+	tx_pkt_size = SXE_TX_PBSIZE_MAX / num_pb;
+	if (true == dcb_enable) {
+		tx_pb_thresh = (tx_pkt_size / 1024) - SXE_TX_PKT_SIZE_MAX;
+	} else {
+		tx_pb_thresh = (tx_pkt_size / 1024) - SXE_NODCB_TX_PKT_SIZE_MAX;
+	}
+
+	for (i = 0; i < num_pb; i++) {
+		SXE_REG_WRITE(hw, SXE_TXPBTHRESH(i), tx_pb_thresh);
+	}
+
+	for (; i < SXE_PKG_BUF_NUM_MAX; i++) {
+		SXE_REG_WRITE(hw, SXE_TXPBTHRESH(i), 0);
+	}
+
+	return;
+}
+
+void sxe_hw_tx_enable(struct sxe_hw *hw)
+{
+	u32 ctl;
+
+	ctl = SXE_REG_READ(hw, SXE_DMATXCTL);
+	ctl |= SXE_DMATXCTL_TE;
+	SXE_REG_WRITE(hw, SXE_DMATXCTL, ctl);
+
+	return;
+}
+
+static u32 sxe_hw_tx_desc_ctrl_get(struct sxe_hw *hw, u8 reg_idx)
+{
+	return SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+}
+
+static void sxe_hw_tx_desc_wb_thresh_clear(struct sxe_hw *hw, u8 reg_idx)
+{
+	u32 reg_data;
+
+	reg_data = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+	reg_data &= ~SXE_TXDCTL_ENABLE;
+	SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg_data);
+	SXE_WRITE_FLUSH(hw);
+	reg_data &= ~(0x7f<<16);
+	reg_data |= SXE_TXDCTL_ENABLE;
+	SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg_data);
+
+	return;
+}
+
+void sxe_hw_vlan_tag_strip_switch(struct sxe_hw *hw,
+					u16 reg_index, bool is_enable)
+{
+	u32 rxdctl;
+
+	rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_index));
+
+	if (is_enable) {
+		rxdctl |= SXE_RXDCTL_VME;
+	} else {
+		rxdctl &= ~SXE_RXDCTL_VME;
+	}
+
+	SXE_REG_WRITE(hw, SXE_RXDCTL(reg_index), rxdctl);
+
+	return;
+}
+
+static void sxe_hw_tx_vlan_tag_set(struct sxe_hw *hw,
+				   u16 vid, u16 qos, u32 vf)
+{
+	u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | SXE_VMVIR_VLANA_DEFAULT;
+
+	SXE_REG_WRITE(hw, SXE_VMVIR(vf), vmvir);
+	return;
+}
+
+void sxe_hw_tx_vlan_tag_clear(struct sxe_hw *hw, u32 vf)
+{
+	SXE_REG_WRITE(hw, SXE_VMVIR(vf), 0);
+	return;
+}
+
+u32 sxe_hw_tx_vlan_insert_get(struct sxe_hw *hw, u32 vf)
+{
+	return SXE_REG_READ(hw, SXE_VMVIR(vf));
+}
+
+void sxe_hw_tx_ring_info_get(struct sxe_hw *hw,
+				u8 idx, u32 *head, u32 *tail)
+{
+	*head = SXE_REG_READ(hw, SXE_TDH(idx));
+	*tail = SXE_REG_READ(hw, SXE_TDT(idx));
+
+	return;
+}
+
+void sxe_hw_dcb_rx_bw_alloc_configure(struct sxe_hw *hw,
+				      u16 *refill,
+				      u16 *max,
+				      u8 *bwg_id,
+				      u8 *prio_type,
+				      u8 *prio_tc,
+				      u8 max_priority)
+{
+	u32    reg;
+	u32    credit_refill;
+	u32    credit_max;
+	u8     i;
+
+	reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC | SXE_RTRPCS_ARBDIS;
+	SXE_REG_WRITE(hw, SXE_RTRPCS, reg);
+
+	reg = 0;
+	for (i = 0; i < max_priority; i++) {
+		reg |= (prio_tc[i] << (i * SXE_RTRUP2TC_UP_SHIFT));
+	}
+
+	SXE_REG_WRITE(hw, SXE_RTRUP2TC, reg);
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		credit_refill = refill[i];
+		credit_max    = max[i];
+		reg = credit_refill | (credit_max << SXE_RTRPT4C_MCL_SHIFT);
+
+		reg |= (u32)(bwg_id[i]) << SXE_RTRPT4C_BWG_SHIFT;
+
+		if (prio_type[i] == PRIO_LINK) {
+			reg |= SXE_RTRPT4C_LSP;
+		}
+
+		SXE_REG_WRITE(hw, SXE_RTRPT4C(i), reg);
+	}
+
+	reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC;
+	SXE_REG_WRITE(hw, SXE_RTRPCS, reg);
+
+	return;
+}
+
+void sxe_hw_dcb_tx_desc_bw_alloc_configure(struct sxe_hw *hw,
+					   u16 *refill,
+					   u16 *max,
+					   u8 *bwg_id,
+					   u8 *prio_type)
+{
+	u32    reg, max_credits;
+	u8     i;
+
+	for (i = 0; i < 128; i++) {
+		SXE_REG_WRITE(hw, SXE_RTTDQSEL, i);
+		SXE_REG_WRITE(hw, SXE_RTTDT1C, 0);
+	}
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		max_credits = max[i];
+		reg = max_credits << SXE_RTTDT2C_MCL_SHIFT;
+		reg |= refill[i];
+		reg |= (u32)(bwg_id[i]) << SXE_RTTDT2C_BWG_SHIFT;
+
+		if (prio_type[i] == PRIO_GROUP) {
+			reg |= SXE_RTTDT2C_GSP;
+		}
+
+		if (prio_type[i] == PRIO_LINK) {
+			reg |= SXE_RTTDT2C_LSP;
+		}
+
+		SXE_REG_WRITE(hw, SXE_RTTDT2C(i), reg);
+	}
+
+	reg = SXE_RTTDCS_TDPAC | SXE_RTTDCS_TDRM;
+	SXE_REG_WRITE(hw, SXE_RTTDCS, reg);
+
+	return;
+}
+
+void sxe_hw_dcb_tx_data_bw_alloc_configure(struct sxe_hw *hw,
+					   u16 *refill,
+					   u16 *max,
+					   u8 *bwg_id,
+					   u8 *prio_type,
+					   u8 *prio_tc,
+					   u8 max_priority)
+{
+	u32 reg;
+	u8 i;
+
+	reg = SXE_RTTPCS_TPPAC | SXE_RTTPCS_TPRM |
+	      (SXE_RTTPCS_ARBD_DCB << SXE_RTTPCS_ARBD_SHIFT) |
+	      SXE_RTTPCS_ARBDIS;
+	SXE_REG_WRITE(hw, SXE_RTTPCS, reg);
+
+	reg = 0;
+	for (i = 0; i < max_priority; i++) {
+		reg |= (prio_tc[i] << (i * SXE_RTTUP2TC_UP_SHIFT));
+	}
+
+	SXE_REG_WRITE(hw, SXE_RTTUP2TC, reg);
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		reg = refill[i];
+		reg |= (u32)(max[i]) << SXE_RTTPT2C_MCL_SHIFT;
+		reg |= (u32)(bwg_id[i]) << SXE_RTTPT2C_BWG_SHIFT;
+
+		if (prio_type[i] == PRIO_GROUP) {
+			reg |= SXE_RTTPT2C_GSP;
+		}
+
+		if (prio_type[i] == PRIO_LINK) {
+			reg |= SXE_RTTPT2C_LSP;
+		}
+
+		SXE_REG_WRITE(hw, SXE_RTTPT2C(i), reg);
+	}
+
+	reg = SXE_RTTPCS_TPPAC | SXE_RTTPCS_TPRM |
+	      (SXE_RTTPCS_ARBD_DCB << SXE_RTTPCS_ARBD_SHIFT);
+	SXE_REG_WRITE(hw, SXE_RTTPCS, reg);
+
+	return;
+}
+
+void sxe_hw_dcb_pfc_configure(struct sxe_hw *hw,
+						u8 pfc_en, u8 *prio_tc,
+						u8 max_priority)
+{
+	u32 i, j, fcrtl, reg;
+	u8 max_tc = 0;
+	u32 reg_val;
+
+	reg_val = SXE_REG_READ(hw, SXE_FLCTRL);
+
+	reg_val &= ~SXE_FCTRL_TFCE_MASK;
+	reg_val |= SXE_FCTRL_TFCE_PFC_EN;
+
+	reg_val |= SXE_FCTRL_TFCE_DPF_EN;
+
+	reg_val &= ~(SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
+	reg_val |= (pfc_en << 16) & SXE_FCTRL_TFCE_FCEN_MASK;
+	reg_val |= (pfc_en << 24) & SXE_FCTRL_TFCE_XONE_MASK;
+
+	reg_val &= ~SXE_FCTRL_RFCE_MASK;
+	reg_val |= SXE_FCTRL_RFCE_PFC_EN;
+	SXE_REG_WRITE(hw, SXE_FLCTRL, reg_val);
+
+	reg_val = SXE_REG_READ(hw, SXE_PFCTOP);
+	reg_val &= ~SXE_PFCTOP_FCOP_MASK;
+	reg_val |= SXE_PFCTOP_FCT;
+	reg_val |= SXE_PFCTOP_FCOP_PFC;
+	SXE_REG_WRITE(hw, SXE_PFCTOP, reg_val);
+
+	for (i = 0; i < max_priority; i++) {
+		if (prio_tc[i] > max_tc) {
+			max_tc = prio_tc[i];
+		}
+	}
+
+	for (i = 0; i <= max_tc; i++) {
+		int enabled = 0;
+
+		for (j = 0; j < max_priority; j++) {
+			if ((prio_tc[j] == i) && (pfc_en & BIT(j))) {
+				enabled = 1;
+				break;
+			}
+		}
+
+		if (enabled) {
+			reg = (hw->fc.high_water[i] << 9) | SXE_FCRTH_FCEN;
+			fcrtl = (hw->fc.low_water[i] << 9) | SXE_FCRTL_XONE;
+			SXE_REG_WRITE(hw, SXE_FCRTL(i), fcrtl);
+		} else {
+
+			reg = (SXE_REG_READ(hw, SXE_RXPBSIZE(i)) - 24576) >> 1;
+			SXE_REG_WRITE(hw, SXE_FCRTL(i), 0);
+		}
+
+		SXE_REG_WRITE(hw, SXE_FCRTH(i), reg);
+	}
+
+	for (; i < MAX_TRAFFIC_CLASS; i++) {
+		SXE_REG_WRITE(hw, SXE_FCRTL(i), 0);
+		SXE_REG_WRITE(hw, SXE_FCRTH(i), 0);
+	}
+
+	reg = hw->fc.pause_time * 0x00010001;
+	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) {
+		SXE_REG_WRITE(hw, SXE_FCTTV(i), reg);
+	}
+
+	SXE_REG_WRITE(hw, SXE_FCRTV, hw->fc.pause_time / 2);
+
+	return;
+}
+
+static void sxe_hw_dcb_8tc_vmdq_off_stats_configure(struct sxe_hw *hw)
+{
+	u32 reg;
+	u8  i;
+
+	for (i = 0; i < 32; i++) {
+		reg = 0x01010101 * (i / 4);
+		SXE_REG_WRITE(hw, SXE_RQSMR(i), reg);
+	}
+
+	for (i = 0; i < 32; i++) {
+		if (i < 8) {
+			reg = 0x00000000;
+		} else if (i < 16) {
+			reg = 0x01010101;
+		} else if (i < 20) {
+			reg = 0x02020202;
+		} else if (i < 24) {
+			reg = 0x03030303;
+		} else if (i < 26) {
+			reg = 0x04040404;
+		} else if (i < 28) {
+			reg = 0x05050505;
+		} else if (i < 30) {
+			reg = 0x06060606;
+		} else {
+			reg = 0x07070707;
+		}
+
+		SXE_REG_WRITE(hw, SXE_TQSM(i), reg);
+	}
+
+	return;
+}
+
+static void sxe_hw_dcb_rx_up_tc_map_set(struct sxe_hw *hw, u8 tc)
+{
+	u8 i;
+	u32 reg, rsave;
+
+	reg = SXE_REG_READ(hw, SXE_RTRUP2TC);
+	rsave = reg;
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		u8 up2tc = reg >> (i * SXE_RTRUP2TC_UP_SHIFT);
+
+		if (up2tc > tc) {
+			reg &= ~(0x7 << SXE_RTRUP2TC_UP_MASK);
+		}
+	}
+
+	if (reg != rsave) {
+		SXE_REG_WRITE(hw, SXE_RTRUP2TC, reg);
+	}
+
+	return;
+}
+
+void sxe_hw_vt_pool_loopback_switch(struct sxe_hw *hw,
+							bool is_enable)
+{
+	if (true == is_enable) {
+		SXE_REG_WRITE(hw, SXE_PFDTXGSWC, SXE_PFDTXGSWC_VT_LBEN);
+	} else {
+		SXE_REG_WRITE(hw, SXE_PFDTXGSWC, 0);
+	}
+
+	return;
+}
+
+void sxe_hw_pool_rx_ring_drop_enable(struct sxe_hw *hw, u8 vf_idx,
+					u16 pf_vlan, u8 ring_per_pool)
+{
+	u32 qde = SXE_QDE_ENABLE;
+	u8 i;
+
+	if (pf_vlan) {
+		qde |= SXE_QDE_HIDE_VLAN;
+	}
+
+	for (i = (vf_idx * ring_per_pool); i < ((vf_idx + 1) * ring_per_pool); i++)
+	{
+		u32 value;
+
+		SXE_WRITE_FLUSH(hw);
+
+		value = i << SXE_QDE_IDX_SHIFT;
+		value |= qde | SXE_QDE_WRITE;
+
+		SXE_REG_WRITE(hw, SXE_QDE, value);
+	}
+
+	return;
+}
+
+u32 sxe_hw_rx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx)
+{
+	return SXE_REG_READ(hw, SXE_VFRE(reg_idx));
+}
+
+void sxe_hw_rx_pool_bitmap_set(struct sxe_hw *hw,
+						u8 reg_idx, u32 bitmap)
+{
+	SXE_REG_WRITE(hw, SXE_VFRE(reg_idx), bitmap);
+
+	return;
+}
+
+u32 sxe_hw_tx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx)
+{
+	return SXE_REG_READ(hw, SXE_VFTE(reg_idx));
+}
+
+void sxe_hw_tx_pool_bitmap_set(struct sxe_hw *hw,
+						u8 reg_idx, u32 bitmap)
+{
+	SXE_REG_WRITE(hw, SXE_VFTE(reg_idx), bitmap);
+
+	return;
+}
+
+void sxe_hw_dcb_max_mem_window_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_RTTBCNRM, value);
+
+	return;
+}
+
+void sxe_hw_dcb_tx_ring_rate_factor_set(struct sxe_hw *hw,
+							u32 ring_idx, u32 rate)
+{
+	SXE_REG_WRITE(hw, SXE_RTTDQSEL, ring_idx);
+	SXE_REG_WRITE(hw, SXE_RTTBCNRC, rate);
+
+	return;
+}
+
+void sxe_hw_spoof_count_enable(struct sxe_hw *hw,
+						u8 reg_idx, u8 bit_index)
+{
+	u32 value = SXE_REG_READ(hw, SXE_VMECM(reg_idx));
+
+	value |= BIT(bit_index);
+
+	SXE_REG_WRITE(hw, SXE_VMECM(reg_idx), value);
+
+	return;
+}
+
+void sxe_hw_pool_mac_anti_spoof_set(struct sxe_hw *hw,
+							u8 vf_idx, bool status)
+{
+	u8 reg_index = vf_idx >> 3;
+	u8 bit_index = vf_idx % 8;
+	u32 value;
+
+	value = SXE_REG_READ(hw, SXE_SPOOF(reg_index));
+
+	if (status) {
+		value |= BIT(bit_index);
+	} else {
+		value &= ~BIT(bit_index);
+	}
+
+	SXE_REG_WRITE(hw, SXE_SPOOF(reg_index), value);
+
+	return;
+}
+
+static void sxe_hw_dcb_rx_up_tc_map_get(struct sxe_hw *hw, u8 *map)
+{
+	u32 reg, i;
+
+	reg = SXE_REG_READ(hw, SXE_RTRUP2TC);
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		map[i] = SXE_RTRUP2TC_UP_MASK &
+			(reg >> (i * SXE_RTRUP2TC_UP_SHIFT));
+	}
+
+	return;
+}
+
+void sxe_hw_rx_drop_switch(struct sxe_hw *hw, u8 idx, bool is_enable)
+{
+	u32 srrctl = SXE_REG_READ(hw, SXE_SRRCTL(idx));
+
+	if (true == is_enable) {
+		srrctl |= SXE_SRRCTL_DROP_EN;
+	} else {
+		srrctl &= ~SXE_SRRCTL_DROP_EN;
+	}
+
+	SXE_REG_WRITE(hw, SXE_SRRCTL(idx), srrctl);
+
+	return;
+}
+
+static void sxe_hw_pool_vlan_anti_spoof_set(struct sxe_hw *hw,
+							u8 vf_idx, bool status)
+{
+	u8 reg_index = vf_idx >> 3;
+	u8 bit_index = (vf_idx % 8) + SXE_SPOOF_VLAN_SHIFT;
+	u32 value;
+
+	value = SXE_REG_READ(hw, SXE_SPOOF(reg_index));
+
+	if (status) {
+		value |= BIT(bit_index);
+	} else {
+		value &= ~BIT(bit_index);
+	}
+
+	SXE_REG_WRITE(hw, SXE_SPOOF(reg_index), value);
+
+	return;
+}
+
+static void sxe_hw_vf_tx_desc_addr_clear(struct sxe_hw *hw,
+						u8 vf_idx, u8 ring_per_pool)
+{
+	u8 i;
+
+	for (i = 0; i < ring_per_pool; i++) {
+		SXE_REG_WRITE(hw, SXE_PVFTDWBAL_N(ring_per_pool, vf_idx, i), 0);
+		SXE_REG_WRITE(hw, SXE_PVFTDWBAH_N(ring_per_pool, vf_idx, i), 0);
+	}
+
+	return;
+}
+
+static void sxe_hw_vf_tx_ring_disable(struct sxe_hw *hw,
+						u8 ring_per_pool, u8 vf_idx)
+{
+	u32 ring_idx;
+	u32 reg;
+
+	for (ring_idx = 0; ring_idx < ring_per_pool; ring_idx++) {
+		u32 reg_idx = vf_idx * ring_per_pool + ring_idx;
+		reg = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+		if (reg) {
+			reg |= SXE_TXDCTL_ENABLE;
+			SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg);
+			reg &= ~SXE_TXDCTL_ENABLE;
+			SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg);
+		}
+	}
+
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_dcb_rate_limiter_clear(struct sxe_hw *hw, u8 ring_max)
+{
+	u32 i;
+
+	for (i = 0; i < ring_max; i++) {
+		SXE_REG_WRITE(hw, SXE_RTTDQSEL, i);
+		SXE_REG_WRITE(hw, SXE_RTTBCNRC, 0);
+	}
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+static void sxe_hw_tx_tph_update(struct sxe_hw *hw, u8 ring_idx, u8 cpu)
+{
+	u32 value = cpu;
+
+	value <<= SXE_TPH_TXCTRL_CPUID_SHIFT;
+
+	value |= SXE_TPH_TXCTRL_DESC_RRO_EN | \
+		 SXE_TPH_TXCTRL_DATA_RRO_EN | \
+		 SXE_TPH_TXCTRL_DESC_TPH_EN;
+
+	SXE_REG_WRITE(hw, SXE_TPH_TXCTRL(ring_idx), value);
+	return;
+}
+
+static void sxe_hw_rx_tph_update(struct sxe_hw *hw, u8 ring_idx, u8 cpu)
+{
+	u32 value = cpu;
+
+	value <<= SXE_TPH_RXCTRL_CPUID_SHIFT;
+
+	value |= SXE_TPH_RXCTRL_DESC_RRO_EN | \
+		 SXE_TPH_RXCTRL_DATA_TPH_EN | \
+		 SXE_TPH_RXCTRL_DESC_TPH_EN;
+
+	SXE_REG_WRITE(hw, SXE_TPH_RXCTRL(ring_idx), value);
+	return;
+}
+
+static void sxe_hw_tph_switch(struct sxe_hw *hw, bool is_enable)
+{
+	if (is_enable == true) {
+		SXE_REG_WRITE(hw, SXE_TPH_CTRL, SXE_TPH_CTRL_MODE_CB2);
+	} else {
+		SXE_REG_WRITE(hw, SXE_TPH_CTRL, SXE_TPH_CTRL_DISABLE);
+	}
+
+	return;
+}
+
+static const struct sxe_dma_operations sxe_dma_ops = {
+	.rx_dma_ctrl_init		= sxe_hw_rx_dma_ctrl_init,
+	.rx_ring_switch			= sxe_hw_rx_ring_switch,
+	.rx_ring_switch_not_polling	= sxe_hw_rx_ring_switch_not_polling,
+	.rx_ring_desc_configure		= sxe_hw_rx_ring_desc_configure,
+	.rx_desc_thresh_set		= sxe_hw_rx_desc_thresh_set,
+	.rx_rcv_ctl_configure		= sxe_hw_rx_rcv_ctl_configure,
+	.rx_lro_ctl_configure		= sxe_hw_rx_lro_ctl_configure,
+	.rx_desc_ctrl_get		= sxe_hw_rx_desc_ctrl_get,
+	.rx_dma_lro_ctl_set		= sxe_hw_rx_dma_lro_ctrl_set,
+	.rx_drop_switch			= sxe_hw_rx_drop_switch,
+	.pool_rx_ring_drop_enable	= sxe_hw_pool_rx_ring_drop_enable,
+	.rx_tph_update			= sxe_hw_rx_tph_update,
+
+	.tx_enable			= sxe_hw_tx_enable,
+	.tx_multi_ring_configure	= sxe_hw_tx_multi_ring_configure,
+	.tx_ring_desc_configure		= sxe_hw_tx_ring_desc_configure,
+	.tx_desc_thresh_set		= sxe_hw_tx_desc_thresh_set,
+	.tx_desc_wb_thresh_clear	= sxe_hw_tx_desc_wb_thresh_clear,
+	.tx_ring_switch			= sxe_hw_tx_ring_switch,
+	.tx_ring_switch_not_polling     = sxe_hw_tx_ring_switch_not_polling,
+	.tx_pkt_buf_thresh_configure	= sxe_hw_tx_pkt_buf_thresh_configure,
+	.tx_desc_ctrl_get		= sxe_hw_tx_desc_ctrl_get,
+	.tx_ring_info_get		= sxe_hw_tx_ring_info_get,
+	.tx_tph_update			= sxe_hw_tx_tph_update,
+
+	.tph_switch			= sxe_hw_tph_switch,
+
+	.vlan_tag_strip_switch		= sxe_hw_vlan_tag_strip_switch,
+	.tx_vlan_tag_set		= sxe_hw_tx_vlan_tag_set,
+	.tx_vlan_tag_clear		= sxe_hw_tx_vlan_tag_clear,
+
+	.dcb_rx_bw_alloc_configure	= sxe_hw_dcb_rx_bw_alloc_configure,
+	.dcb_tx_desc_bw_alloc_configure	= sxe_hw_dcb_tx_desc_bw_alloc_configure,
+	.dcb_tx_data_bw_alloc_configure	= sxe_hw_dcb_tx_data_bw_alloc_configure,
+	.dcb_pfc_configure		= sxe_hw_dcb_pfc_configure,
+	.dcb_tc_stats_configure		= sxe_hw_dcb_8tc_vmdq_off_stats_configure,
+	.dcb_rx_up_tc_map_set		= sxe_hw_dcb_rx_up_tc_map_set,
+	.dcb_rx_up_tc_map_get		= sxe_hw_dcb_rx_up_tc_map_get,
+	.dcb_rate_limiter_clear		= sxe_hw_dcb_rate_limiter_clear,
+	.dcb_tx_ring_rate_factor_set	= sxe_hw_dcb_tx_ring_rate_factor_set,
+
+	.vt_pool_loopback_switch	= sxe_hw_vt_pool_loopback_switch,
+	.rx_pool_get			= sxe_hw_rx_pool_bitmap_get,
+	.rx_pool_set			= sxe_hw_rx_pool_bitmap_set,
+	.tx_pool_get			= sxe_hw_tx_pool_bitmap_get,
+	.tx_pool_set			= sxe_hw_tx_pool_bitmap_set,
+
+	.vf_tx_desc_addr_clear		= sxe_hw_vf_tx_desc_addr_clear,
+	.pool_mac_anti_spoof_set	= sxe_hw_pool_mac_anti_spoof_set,
+	.pool_vlan_anti_spoof_set	= sxe_hw_pool_vlan_anti_spoof_set,
+
+	.max_dcb_memory_window_set	= sxe_hw_dcb_max_mem_window_set,
+	.spoof_count_enable		= sxe_hw_spoof_count_enable,
+
+	.vf_tx_ring_disable	        = sxe_hw_vf_tx_ring_disable,
+	.all_ring_disable               = sxe_hw_all_ring_disable,
+	.tx_ring_tail_init 	        = sxe_hw_tx_ring_tail_init,
+};
+
+
+#ifdef SXE_IPSEC_CONFIGURE
+
+static void sxe_hw_ipsec_rx_sa_load(struct sxe_hw *hw, u16 idx,
+					u8 type)
+{
+	u32 reg = SXE_REG_READ(hw, SXE_IPSRXIDX);
+
+	reg &= SXE_RXTXIDX_IPS_EN;
+	reg |= type << SXE_RXIDX_TBL_SHIFT |
+	       idx << SXE_RXTXIDX_IDX_SHIFT |
+	       SXE_RXTXIDX_WRITE;
+	SXE_REG_WRITE(hw, SXE_IPSRXIDX, reg);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+static void sxe_hw_ipsec_rx_ip_store(struct sxe_hw *hw,
+					     __be32 *ip_addr, u8 ip_len, u8 ip_idx)
+{
+	u8 i;
+
+	for (i = 0; i < ip_len; i++) {
+		SXE_REG_WRITE(hw, SXE_IPSRXIPADDR(i),
+				(__force u32)cpu_to_le32((__force u32)ip_addr[i]));
+	}
+	SXE_WRITE_FLUSH(hw);
+	sxe_hw_ipsec_rx_sa_load(hw, ip_idx, SXE_IPSEC_IP_TABLE);
+
+	return;
+}
+
+static void sxe_hw_ipsec_rx_spi_store(struct sxe_hw *hw,
+					     __be32 spi, u8 ip_idx, u16 sa_idx)
+{
+	SXE_REG_WRITE(hw, SXE_IPSRXSPI, (__force u32)cpu_to_le32((__force u32)spi));
+
+	SXE_REG_WRITE(hw, SXE_IPSRXIPIDX, ip_idx);
+
+	SXE_WRITE_FLUSH(hw);
+
+	sxe_hw_ipsec_rx_sa_load(hw, sa_idx, SXE_IPSEC_SPI_TABLE);
+
+	return;
+}
+
+static void sxe_hw_ipsec_rx_key_store(struct sxe_hw *hw,
+			u32 *key,  u8 key_len, u32 salt, u32 mode, u16 sa_idx)
+{
+	u8 i;
+
+	for (i = 0; i < key_len; i++) {
+		SXE_REG_WRITE(hw, SXE_IPSRXKEY(i),
+				(__force u32)cpu_to_be32(key[(key_len - 1) - i]));
+	}
+
+	SXE_REG_WRITE(hw, SXE_IPSRXSALT, (__force u32)cpu_to_be32(salt));
+	SXE_REG_WRITE(hw, SXE_IPSRXMOD, mode);
+	SXE_WRITE_FLUSH(hw);
+
+	sxe_hw_ipsec_rx_sa_load(hw, sa_idx, SXE_IPSEC_KEY_TABLE);
+
+	return;
+}
+
+static void sxe_hw_ipsec_tx_sa_load(struct sxe_hw *hw, u16 idx)
+{
+	u32 reg = SXE_REG_READ(hw, SXE_IPSTXIDX);
+
+	reg &= SXE_RXTXIDX_IPS_EN;
+	reg |= idx << SXE_RXTXIDX_IDX_SHIFT | SXE_RXTXIDX_WRITE;
+	SXE_REG_WRITE(hw, SXE_IPSTXIDX, reg);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+static void sxe_hw_ipsec_tx_key_store(struct sxe_hw *hw, u32 *key,
+						u8 key_len, u32 salt, u16 sa_idx)
+{
+	u8 i;
+
+	for (i = 0; i < key_len; i++) {
+		SXE_REG_WRITE(hw, SXE_IPSTXKEY(i),
+			(__force u32)cpu_to_be32(key[(key_len - 1) - i]));
+	}
+	SXE_REG_WRITE(hw, SXE_IPSTXSALT, (__force u32)cpu_to_be32(salt));
+	SXE_WRITE_FLUSH(hw);
+
+	sxe_hw_ipsec_tx_sa_load(hw, sa_idx);
+
+	return;
+}
+
+static void sxe_hw_ipsec_sec_data_stop(struct sxe_hw *hw, bool is_linkup)
+{
+	u32 tx_empty, rx_empty;
+	u32 limit;
+	u32 reg;
+
+	reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+	reg |= SXE_SECTXCTRL_TX_DIS;
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+	reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+	reg |= SXE_SECRXCTRL_RX_DIS;
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+
+	tx_empty = SXE_REG_READ(hw, SXE_SECTXSTAT) & SXE_SECTXSTAT_SECTX_RDY;
+	rx_empty = SXE_REG_READ(hw, SXE_SECRXSTAT) & SXE_SECRXSTAT_SECRX_RDY;
+	if (tx_empty && rx_empty) {
+		goto l_out;
+	}
+
+	if (!is_linkup) {
+		SXE_REG_WRITE(hw, SXE_LPBKCTRL, SXE_LPBKCTRL_EN);
+
+		SXE_WRITE_FLUSH(hw);
+		mdelay(3);
+	}
+
+	limit = 20;
+	do {
+	   mdelay(10);
+	   tx_empty = SXE_REG_READ(hw, SXE_SECTXSTAT) &
+		   SXE_SECTXSTAT_SECTX_RDY;
+	   rx_empty = SXE_REG_READ(hw, SXE_SECRXSTAT) &
+		   SXE_SECRXSTAT_SECRX_RDY;
+	} while (!(tx_empty && rx_empty) && limit--);
+
+	if (!is_linkup) {
+		SXE_REG_WRITE(hw, SXE_LPBKCTRL, 0);
+
+		SXE_WRITE_FLUSH(hw);
+	}
+
+l_out:
+	return;
+}
+
+static void sxe_hw_ipsec_engine_start(struct sxe_hw *hw, bool is_linkup)
+{
+	u32 reg;
+
+	sxe_hw_ipsec_sec_data_stop(hw, is_linkup);
+
+	reg = SXE_REG_READ(hw, SXE_SECTXMINIFG);
+	reg = (reg & 0xfffffff0) | 0x3;
+	SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg);
+
+	reg = SXE_REG_READ(hw, SXE_SECTXBUFFAF);
+	reg = (reg & 0xfffffc00) | 0x15;
+	SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, reg);
+
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, 0);
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, SXE_SECTXCTRL_STORE_FORWARD);
+
+	SXE_REG_WRITE(hw, SXE_IPSTXIDX, SXE_RXTXIDX_IPS_EN);
+	SXE_REG_WRITE(hw, SXE_IPSRXIDX, SXE_RXTXIDX_IPS_EN);
+
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+static void sxe_hw_ipsec_engine_stop(struct sxe_hw *hw, bool is_linkup)
+{
+	u32 reg;
+
+	sxe_hw_ipsec_sec_data_stop(hw, is_linkup);
+
+	SXE_REG_WRITE(hw, SXE_IPSTXIDX, 0);
+	SXE_REG_WRITE(hw, SXE_IPSRXIDX, 0);
+
+	reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+	reg |= SXE_SECTXCTRL_SECTX_DIS;
+	reg &= ~SXE_SECTXCTRL_STORE_FORWARD;
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+	reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+	reg |= SXE_SECRXCTRL_SECRX_DIS;
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+
+	 SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, 0x250);
+
+	 reg = SXE_REG_READ(hw, SXE_SECTXMINIFG);
+	 reg = (reg & 0xfffffff0) | 0x1;
+	 SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg);
+
+	 SXE_REG_WRITE(hw, SXE_SECTXCTRL, SXE_SECTXCTRL_SECTX_DIS);
+	 SXE_REG_WRITE(hw, SXE_SECRXCTRL, SXE_SECRXCTRL_SECRX_DIS);
+
+	 SXE_WRITE_FLUSH(hw);
+
+	 return;
+}
+
+bool sxe_hw_ipsec_offload_is_disable(struct sxe_hw *hw)
+{
+	u32 tx_dis = SXE_REG_READ(hw, SXE_SECTXSTAT);
+	u32 rx_dis = SXE_REG_READ(hw, SXE_SECRXSTAT);
+	bool ret = false;
+
+	if ((tx_dis & SXE_SECTXSTAT_SECTX_OFF_DIS) ||
+	    (rx_dis & SXE_SECRXSTAT_SECRX_OFF_DIS)) {
+		ret = true;
+	}
+
+	return ret;
+}
+
+void sxe_hw_ipsec_sa_disable(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_IPSRXIDX, 0);
+	SXE_REG_WRITE(hw, SXE_IPSTXIDX, 0);
+
+	return;
+}
+
+static const struct sxe_sec_operations sxe_sec_ops = {
+	.ipsec_rx_ip_store		= sxe_hw_ipsec_rx_ip_store,
+	.ipsec_rx_spi_store		= sxe_hw_ipsec_rx_spi_store,
+	.ipsec_rx_key_store		= sxe_hw_ipsec_rx_key_store,
+	.ipsec_tx_key_store		= sxe_hw_ipsec_tx_key_store,
+	.ipsec_sec_data_stop		= sxe_hw_ipsec_sec_data_stop,
+	.ipsec_engine_start		= sxe_hw_ipsec_engine_start,
+	.ipsec_engine_stop		= sxe_hw_ipsec_engine_stop,
+	.ipsec_sa_disable		= sxe_hw_ipsec_sa_disable,
+	.ipsec_offload_is_disable	= sxe_hw_ipsec_offload_is_disable,
+};
+#endif
+
+static const struct sxe_sec_operations sxe_sec_ops = { 0 };
+
+
+void sxe_hw_stats_regs_clean(struct sxe_hw *hw)
+{
+	u16 i;
+	for (i = 0; i < 16; i++) {
+		SXE_REG_READ(hw, SXE_QPTC(i));
+		SXE_REG_READ(hw, SXE_QPRC(i));
+		SXE_REG_READ(hw, SXE_QBTC_H(i));
+		SXE_REG_READ(hw, SXE_QBTC_L(i));
+		SXE_REG_READ(hw, SXE_QBRC_H(i));
+		SXE_REG_READ(hw, SXE_QBRC_L(i));
+		SXE_REG_READ(hw, SXE_QPRDC(i));
+	}
+
+	SXE_REG_READ(hw, SXE_RXDGBCH);
+	SXE_REG_READ(hw, SXE_RXDGBCL);
+	SXE_REG_READ(hw, SXE_RXDGPC);
+	SXE_REG_READ(hw, SXE_TXDGPC);
+	SXE_REG_READ(hw, SXE_TXDGBCH);
+	SXE_REG_READ(hw, SXE_TXDGBCL);
+	SXE_REG_READ(hw,SXE_RXDDGPC);
+	SXE_REG_READ(hw, SXE_RXDDGBCH);
+	SXE_REG_READ(hw,SXE_RXDDGBCL);
+	SXE_REG_READ(hw,SXE_RXLPBKGPC);
+	SXE_REG_READ(hw, SXE_RXLPBKGBCH);
+	SXE_REG_READ(hw,SXE_RXLPBKGBCL);
+	SXE_REG_READ(hw,SXE_RXDLPBKGPC);
+	SXE_REG_READ(hw, SXE_RXDLPBKGBCH);
+	SXE_REG_READ(hw,SXE_RXDLPBKGBCL);
+	SXE_REG_READ(hw,SXE_RXTPCIN);
+	SXE_REG_READ(hw,SXE_RXTPCOUT);
+	SXE_REG_READ(hw,SXE_RXPRDDC);
+	SXE_REG_READ(hw, SXE_TXSWERR);
+	SXE_REG_READ(hw, SXE_TXSWITCH);
+	SXE_REG_READ(hw, SXE_TXREPEAT);
+	SXE_REG_READ(hw, SXE_TXDESCERR);
+
+	SXE_REG_READ(hw, SXE_CRCERRS);
+	SXE_REG_READ(hw, SXE_ERRBC);
+	SXE_REG_READ(hw, SXE_RLEC);
+	SXE_REG_READ(hw, SXE_PRC64);
+	SXE_REG_READ(hw, SXE_PRC127);
+	SXE_REG_READ(hw, SXE_PRC255);
+	SXE_REG_READ(hw, SXE_PRC511);
+	SXE_REG_READ(hw, SXE_PRC1023);
+	SXE_REG_READ(hw, SXE_PRC1522);
+	SXE_REG_READ(hw, SXE_GPRC);
+	SXE_REG_READ(hw, SXE_BPRC);
+	SXE_REG_READ(hw, SXE_MPRC);
+	SXE_REG_READ(hw, SXE_GPTC);
+	SXE_REG_READ(hw, SXE_GORCL);
+	SXE_REG_READ(hw, SXE_GORCH);
+	SXE_REG_READ(hw, SXE_GOTCL);
+	SXE_REG_READ(hw, SXE_GOTCH);
+	SXE_REG_READ(hw, SXE_RUC);
+	SXE_REG_READ(hw, SXE_RFC);
+	SXE_REG_READ(hw, SXE_ROC);
+	SXE_REG_READ(hw, SXE_RJC);
+	for (i = 0; i < 8; i++) {
+		SXE_REG_READ(hw, SXE_PRCPF(i));
+	}
+	SXE_REG_READ(hw, SXE_TORL);
+	SXE_REG_READ(hw, SXE_TORH);
+	SXE_REG_READ(hw, SXE_TPR);
+	SXE_REG_READ(hw, SXE_TPT);
+	SXE_REG_READ(hw, SXE_PTC64);
+	SXE_REG_READ(hw, SXE_PTC127);
+	SXE_REG_READ(hw, SXE_PTC255);
+	SXE_REG_READ(hw, SXE_PTC511);
+	SXE_REG_READ(hw, SXE_PTC1023);
+	SXE_REG_READ(hw, SXE_PTC1522);
+	SXE_REG_READ(hw, SXE_MPTC);
+	SXE_REG_READ(hw, SXE_BPTC);
+	for (i = 0; i < 8; i++) {
+		SXE_REG_READ(hw, SXE_PFCT(i));
+	}
+
+	return;
+}
+
+static void sxe_hw_stats_seq_get(struct sxe_hw *hw, struct sxe_mac_stats *stats)
+{
+	u8 i;
+	u64 tx_pfc_num = 0;
+#ifdef SXE_DPDK
+	u64 gotch = 0;
+	u32 rycle_cnt = 10;
+#endif
+
+	for (i = 0; i < 8; i++) {
+		stats->prcpf[i] += SXE_REG_READ(hw,SXE_PRCPF(i));
+		tx_pfc_num = SXE_REG_READ(hw,SXE_PFCT(i));
+		stats->pfct[i] += tx_pfc_num;
+		stats->total_tx_pause += tx_pfc_num;
+	}
+
+	stats->total_gptc += SXE_REG_READ(hw, SXE_GPTC);
+	stats->total_gotc += (SXE_REG_READ(hw, SXE_GOTCL) |
+			((u64)SXE_REG_READ(hw, SXE_GOTCH) << 32));
+#ifdef SXE_DPDK
+	do {
+		gotch = SXE_REG_READ(hw, SXE_GOTCH);
+		rycle_cnt--;
+	} while (gotch != 0 && rycle_cnt != 0);
+	if (gotch != 0) {
+		LOG_INFO("GOTCH is not clear!\n");
+	}
+#endif
+
+	return;
+}
+
+void sxe_hw_stats_seq_clean(struct sxe_hw *hw, struct sxe_mac_stats *stats)
+{
+	u8 i;
+	u64 tx_pfc_num = 0;
+	u64 gotch = 0;
+	u32 rycle_cnt = 10;
+
+	stats->total_gotc += (SXE_REG_READ(hw, SXE_GOTCL) |
+			((u64)SXE_REG_READ(hw, SXE_GOTCH) << 32));
+	stats->total_gptc += SXE_REG_READ(hw, SXE_GPTC);
+	do {
+		gotch = SXE_REG_READ(hw, SXE_GOTCH);
+		rycle_cnt--;
+	} while (gotch != 0 && rycle_cnt != 0);
+	if (gotch != 0) {
+		LOG_INFO("GOTCH is not clear!\n");
+	}
+	
+	for (i = 0; i < 8; i++) {
+		stats->prcpf[i] += SXE_REG_READ(hw,SXE_PRCPF(i));
+		tx_pfc_num = SXE_REG_READ(hw,SXE_PFCT(i));
+		stats->pfct[i] += tx_pfc_num;
+		stats->total_tx_pause += tx_pfc_num;
+	}
+
+	return;
+}
+
+void sxe_hw_stats_get(struct sxe_hw *hw, struct sxe_mac_stats *stats)
+{
+	u64 rjc;
+	u32 i, rx_dbu_drop, ring_drop = 0;
+	u64 tpr = 0;
+#ifdef SXE_DPDK
+	u32 rycle_cnt = 10;
+	u64 gorch, torh = 0;
+#endif
+
+	for (i = 0; i < 16; i++) {
+		stats->qptc[i] += SXE_REG_READ(hw, SXE_QPTC(i));
+		stats->qprc[i] += SXE_REG_READ(hw, SXE_QPRC(i));
+		ring_drop = SXE_REG_READ(hw, SXE_QPRDC(i));
+		stats->qprdc[i] += ring_drop;
+		stats->hw_rx_no_dma_resources += ring_drop;
+
+		stats->qbtc[i] += ((u64)SXE_REG_READ(hw, SXE_QBTC_H(i)) << 32);
+		SXE_RMB();
+		stats->qbtc[i] += SXE_REG_READ(hw, SXE_QBTC_L(i));
+
+		stats->qbrc[i] += ((u64)SXE_REG_READ(hw, SXE_QBRC_H(i)) << 32);
+		SXE_RMB();
+		stats->qbrc[i] += SXE_REG_READ(hw, SXE_QBRC_L(i));
+	}
+	stats->rxdgbc += ((u64)SXE_REG_READ(hw, SXE_RXDGBCH) << 32) +
+				(SXE_REG_READ(hw, SXE_RXDGBCL));
+
+	stats->rxdgpc += SXE_REG_READ(hw, SXE_RXDGPC);
+	stats->txdgpc += SXE_REG_READ(hw, SXE_TXDGPC);
+	stats->txdgbc += (((u64)SXE_REG_READ(hw, SXE_TXDGBCH) << 32) +
+				SXE_REG_READ(hw, SXE_TXDGBCL));
+
+	stats->rxddpc += SXE_REG_READ(hw,SXE_RXDDGPC);
+	stats->rxddbc += ((u64)SXE_REG_READ(hw, SXE_RXDDGBCH) << 32) +
+				(SXE_REG_READ(hw,SXE_RXDDGBCL));
+
+	stats->rxlpbkpc += SXE_REG_READ(hw,SXE_RXLPBKGPC);
+	stats->rxlpbkbc += ((u64)SXE_REG_READ(hw, SXE_RXLPBKGBCH) << 32) +
+			(SXE_REG_READ(hw,SXE_RXLPBKGBCL));
+
+	stats->rxdlpbkpc += SXE_REG_READ(hw,SXE_RXDLPBKGPC);
+	stats->rxdlpbkbc += ((u64)SXE_REG_READ(hw, SXE_RXDLPBKGBCH) << 32) +
+				(SXE_REG_READ(hw,SXE_RXDLPBKGBCL));
+	stats->rxtpcing += SXE_REG_READ(hw,SXE_RXTPCIN);
+	stats->rxtpceng += SXE_REG_READ(hw,SXE_RXTPCOUT);
+	stats->prddc += SXE_REG_READ(hw,SXE_RXPRDDC);
+	stats->txswerr += SXE_REG_READ(hw, SXE_TXSWERR);
+	stats->txswitch += SXE_REG_READ(hw, SXE_TXSWITCH);
+	stats->txrepeat += SXE_REG_READ(hw, SXE_TXREPEAT);
+	stats->txdescerr += SXE_REG_READ(hw, SXE_TXDESCERR);
+
+	for (i = 0; i < 8; i++) {
+		stats->dburxtcin[i] += SXE_REG_READ(hw, SXE_DBUDRTCICNT(i));
+		stats->dburxtcout[i] += SXE_REG_READ(hw, SXE_DBUDRTCOCNT(i));
+		stats->dburxgdreecnt[i] += SXE_REG_READ(hw, SXE_DBUDREECNT(i));
+		rx_dbu_drop = SXE_REG_READ(hw, SXE_DBUDROFPCNT(i));
+		stats->dburxdrofpcnt[i] += rx_dbu_drop;
+		stats->dbutxtcin[i] += SXE_REG_READ(hw,SXE_DBUDTTCICNT(i));
+		stats->dbutxtcout[i] += SXE_REG_READ(hw,SXE_DBUDTTCOCNT(i));
+	}
+
+	stats->fnavadd += (SXE_REG_READ(hw, SXE_FNAVUSTAT) & 0xFFFF);
+	stats->fnavrmv += ((SXE_REG_READ(hw, SXE_FNAVUSTAT) >> 16) & 0xFFFF);
+	stats->fnavadderr += (SXE_REG_READ(hw, SXE_FNAVFSTAT) & 0xFFFF);
+	stats->fnavrmverr += ((SXE_REG_READ(hw, SXE_FNAVFSTAT) >> 16) & 0xFFFF);
+	stats->fnavmatch += SXE_REG_READ(hw, SXE_FNAVMATCH);
+	stats->fnavmiss += SXE_REG_READ(hw, SXE_FNAVMISS);
+
+	sxe_hw_stats_seq_get(hw, stats);
+
+	stats->crcerrs += SXE_REG_READ(hw, SXE_CRCERRS);
+	stats->errbc   += SXE_REG_READ(hw, SXE_ERRBC);
+	stats->bprc += SXE_REG_READ(hw, SXE_BPRC);
+	stats->mprc += SXE_REG_READ(hw, SXE_MPRC);
+	stats->roc += SXE_REG_READ(hw, SXE_ROC);
+	stats->prc64 += SXE_REG_READ(hw, SXE_PRC64);
+	stats->prc127 += SXE_REG_READ(hw, SXE_PRC127);
+	stats->prc255 += SXE_REG_READ(hw, SXE_PRC255);
+	stats->prc511 += SXE_REG_READ(hw, SXE_PRC511);
+	stats->prc1023 += SXE_REG_READ(hw, SXE_PRC1023);
+	stats->prc1522 += SXE_REG_READ(hw, SXE_PRC1522);
+	stats->rlec += SXE_REG_READ(hw, SXE_RLEC);
+	stats->mptc += SXE_REG_READ(hw, SXE_MPTC);
+	stats->ruc += SXE_REG_READ(hw, SXE_RUC);
+	stats->rfc += SXE_REG_READ(hw, SXE_RFC);
+
+	rjc = SXE_REG_READ(hw, SXE_RJC);
+	stats->rjc += rjc;
+	stats->roc += rjc;
+
+	tpr = SXE_REG_READ(hw, SXE_TPR);
+	stats->tpr += tpr;
+	stats->tpt += SXE_REG_READ(hw, SXE_TPT);
+	stats->ptc64 += SXE_REG_READ(hw, SXE_PTC64);
+	stats->ptc127 += SXE_REG_READ(hw, SXE_PTC127);
+	stats->ptc255 += SXE_REG_READ(hw, SXE_PTC255);
+	stats->ptc511 += SXE_REG_READ(hw, SXE_PTC511);
+	stats->ptc1023 += SXE_REG_READ(hw, SXE_PTC1023);
+	stats->ptc1522 += SXE_REG_READ(hw, SXE_PTC1522);
+	stats->bptc += SXE_REG_READ(hw, SXE_BPTC);
+
+	stats->gprc += SXE_REG_READ(hw, SXE_GPRC);
+	stats->gorc += (SXE_REG_READ(hw, SXE_GORCL) |
+			((u64)SXE_REG_READ(hw, SXE_GORCH) << 32));
+#ifdef SXE_DPDK
+	do {
+		gorch = SXE_REG_READ(hw, SXE_GORCH);
+		rycle_cnt--;
+	} while (gorch != 0 && rycle_cnt != 0);
+	if (gorch != 0) {
+		LOG_INFO("GORCH is not clear!\n");
+	}
+#endif
+
+	stats->tor += (SXE_REG_READ(hw, SXE_TORL) |
+			((u64)SXE_REG_READ(hw, SXE_TORH) << 32));
+#ifdef SXE_DPDK
+	rycle_cnt = 10;
+	do {
+		torh = SXE_REG_READ(hw, SXE_TORH);
+		rycle_cnt--;
+	} while (torh != 0 && rycle_cnt != 0);
+	if (torh != 0) {
+		LOG_INFO("TORH is not clear!\n");
+	}
+#endif
+
+#ifdef SXE_DPDK
+	stats->tor -= tpr * RTE_ETHER_CRC_LEN;
+	stats->gptc = stats->total_gptc - stats->total_tx_pause;
+	stats->gotc = stats->total_gotc - stats->total_tx_pause * RTE_ETHER_MIN_LEN
+			- stats->gptc * RTE_ETHER_CRC_LEN;
+#else
+	stats->gptc = stats->total_gptc;
+	stats->gotc = stats->total_gotc;
+#endif
+
+	return;
+}
+
+static u32 sxe_hw_tx_packets_num_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_TXDGPC);
+}
+
+static u32 sxe_hw_unsec_packets_num_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_SSVPC);
+}
+
+static u32 sxe_hw_mac_stats_dump(struct sxe_hw *hw, u32 *regs_buff, u32 buf_size)
+{
+	u32 i;
+	u32 regs_num = buf_size / sizeof(u32);
+
+	for (i = 0; i < regs_num; i++) {
+		regs_buff[i] = SXE_REG_READ(hw, mac_regs[i]);
+	}
+
+	return i;
+}
+
+static u32 sxe_hw_tx_dbu_to_mac_stats(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_DTMPCNT);
+}
+
+static const struct sxe_stat_operations sxe_stat_ops = {
+	.stats_get			= sxe_hw_stats_get,
+	.stats_clear			= sxe_hw_stats_regs_clean,
+	.mac_stats_dump			= sxe_hw_mac_stats_dump,
+	.tx_packets_num_get		= sxe_hw_tx_packets_num_get,
+	.unsecurity_packets_num_get	= sxe_hw_unsec_packets_num_get,
+	.tx_dbu_to_mac_stats		= sxe_hw_tx_dbu_to_mac_stats,
+};
+
+void sxe_hw_mbx_init(struct sxe_hw *hw)
+{
+	hw->mbx.msg_len = SXE_MBX_MSG_NUM;
+	hw->mbx.interval = SXE_MBX_RETRY_INTERVAL;
+	hw->mbx.retry = SXE_MBX_RETRY_COUNT;
+
+	hw->mbx.stats.rcv_msgs   = 0;
+	hw->mbx.stats.send_msgs  = 0;
+	hw->mbx.stats.acks       = 0;
+	hw->mbx.stats.reqs       = 0;
+	hw->mbx.stats.rsts       = 0;
+
+	return;
+}
+
+static bool sxe_hw_vf_irq_check(struct sxe_hw *hw, u32 mask, u32 index)
+{
+	u32 value = SXE_REG_READ(hw, SXE_PFMBICR(index));
+
+	if (value & mask) {
+		SXE_REG_WRITE(hw, SXE_PFMBICR(index), mask);
+		return true;
+	}
+
+	return false;
+}
+
+bool sxe_hw_vf_rst_check(struct sxe_hw *hw, u8 vf_idx)
+{
+	u32 index = vf_idx >> 5;
+	u32 bit = vf_idx % 32;
+	u32 value;
+
+	value = SXE_REG_READ(hw, SXE_VFLRE(index));
+	if (value & BIT(bit)) {
+		SXE_REG_WRITE(hw, SXE_VFLREC(index), BIT(bit));
+		hw->mbx.stats.rsts++;
+		return true;
+	}
+
+	return false;
+}
+
+bool sxe_hw_vf_req_check(struct sxe_hw *hw, u8 vf_idx)
+{
+	u8 index = vf_idx >> 4;
+	u8 bit = vf_idx % 16;
+
+	if (sxe_hw_vf_irq_check(hw, SXE_PFMBICR_VFREQ << bit, index)) {
+		hw->mbx.stats.reqs++;
+		return true;
+	}
+
+	return false;
+}
+
+bool sxe_hw_vf_ack_check(struct sxe_hw *hw, u8 vf_idx)
+{
+	u8 index = vf_idx >> 4;
+	u8 bit = vf_idx % 16;
+
+	if (sxe_hw_vf_irq_check(hw, SXE_PFMBICR_VFACK << bit, index)) {
+		hw->mbx.stats.acks++;
+		return true;
+	}
+
+	return false;
+}
+
+static bool sxe_hw_mbx_lock(struct sxe_hw *hw, u8 vf_idx)
+{
+	u32 value;
+	bool ret = false;
+	u32 retry = hw->mbx.retry;
+
+	while (retry--) {
+		SXE_REG_WRITE(hw, SXE_PFMAILBOX(vf_idx), SXE_PFMAILBOX_PFU);
+
+		value = SXE_REG_READ(hw, SXE_PFMAILBOX(vf_idx));
+		if (value & SXE_PFMAILBOX_PFU) {
+			ret = true;
+			break;
+		}
+
+		udelay(hw->mbx.interval);
+	}
+
+	return ret;
+}
+
+s32 sxe_hw_rcv_msg_from_vf(struct sxe_hw *hw, u32 *msg,
+				u16 msg_len, u16 index)
+{
+	struct sxe_mbx_info *mbx = &hw->mbx;
+	u8 i;
+	s32 ret = 0;
+	u16 msg_entry;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	msg_entry = (msg_len > mbx->msg_len) ? mbx->msg_len : msg_len;
+
+	if (!sxe_hw_mbx_lock(hw, index)) {
+		ret = -SXE_ERR_MBX_LOCK_FAIL;
+		LOG_ERROR_BDF("vf idx:%d msg_len:%d rcv lock mailbox fail.(err:%d)\n",
+			   index, msg_len, ret);
+		goto l_out;
+	}
+
+	for (i = 0; i < msg_entry; i++) {
+		msg[i] = SXE_REG_READ(hw, (SXE_PFMBMEM(index) + (i << 2)));
+		LOG_DEBUG_BDF("vf_idx:%u read mbx mem[%u]:0x%x.\n",
+			      index, i, msg[i]);
+	}
+
+	SXE_REG_WRITE(hw, SXE_PFMAILBOX(index), SXE_PFMAILBOX_ACK);
+	mbx->stats.rcv_msgs++;
+
+l_out:
+	return ret;
+}
+
+s32 sxe_hw_send_msg_to_vf(struct sxe_hw *hw, u32 *msg,
+				u16 msg_len, u16 index)
+{
+	struct sxe_mbx_info *mbx = &hw->mbx;
+	u8 i;
+	s32 ret = 0;
+	u32 old;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (msg_len > mbx->msg_len) {
+		ret = -EINVAL;
+		LOG_ERROR_BDF("pf reply msg num:%d exceed limit:%d reply fail.(err:%d)\n",
+			  msg_len, mbx->msg_len, ret);
+		goto l_out;
+	}
+
+	if (!sxe_hw_mbx_lock(hw, index)) {
+		ret = -SXE_ERR_MBX_LOCK_FAIL;
+		LOG_ERROR_BDF("send msg len:%u to vf idx:%u msg[0]:0x%x "
+			   "lock mailbox fail.(err:%d)\n",
+			   msg_len, index, msg[0], ret);
+		goto l_out;
+	}
+
+	old = SXE_REG_READ(hw, (SXE_PFMBMEM(index)));
+	LOG_DEBUG_BDF("original send msg:0x%x. mbx mem[0]:0x%x\n", *msg, old);
+	if (msg[0] & SXE_CTRL_MSG_MASK) {
+		msg[0] |= (old & SXE_MSGID_MASK);
+	} else {
+		msg[0] |= (old & SXE_PFMSG_MASK);
+	}
+
+	for (i = 0; i < msg_len; i++) {
+		SXE_REG_WRITE(hw, (SXE_PFMBMEM(index) + (i << 2)), msg[i]);
+		LOG_DEBUG_BDF("vf_idx:%u write mbx mem[%u]:0x%x.\n",
+			      index, i, msg[i]);
+	}
+
+	SXE_REG_WRITE(hw, SXE_PFMAILBOX(index), SXE_PFMAILBOX_STS);
+	mbx->stats.send_msgs++;
+
+l_out:
+	return ret;
+}
+
+void sxe_hw_mbx_mem_clear(struct sxe_hw *hw, u8 vf_idx)
+{
+	u8 msg_idx;
+	struct sxe_adapter *adapter = hw->adapter;
+	for (msg_idx = 0; msg_idx < hw->mbx.msg_len; msg_idx++) {
+		SXE_REG_WRITE_ARRAY(hw, SXE_PFMBMEM(vf_idx), msg_idx, 0);
+	}
+
+	SXE_WRITE_FLUSH(hw);
+
+	LOG_INFO_BDF("vf_idx:%u clear mbx mem.\n", vf_idx);
+	return;
+}
+
+static const struct sxe_mbx_operations sxe_mbx_ops = {
+	.init		= sxe_hw_mbx_init,
+
+	.req_check	= sxe_hw_vf_req_check,
+	.ack_check	= sxe_hw_vf_ack_check,
+	.rst_check	= sxe_hw_vf_rst_check,
+
+	.msg_send	= sxe_hw_send_msg_to_vf,
+	.msg_rcv	= sxe_hw_rcv_msg_from_vf,
+
+	.mbx_mem_clear	= sxe_hw_mbx_mem_clear,
+};
+
+void sxe_hw_pcie_vt_mode_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_GCR_EXT, value);
+
+	return;
+}
+
+static const struct sxe_pcie_operations sxe_pcie_ops = {
+	.vt_mode_set	= sxe_hw_pcie_vt_mode_set,
+};
+
+s32 sxe_hw_hdc_lock_get(struct sxe_hw *hw, u32 trylock)
+{
+	u32 val;
+	u16 i;
+	s32 ret = 0;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	SXE_REG_WRITE(hw, SXE_HDC_SW_LK, SXE_HDC_RELEASE_SW_LK);
+	SXE_WRITE_FLUSH(hw);
+
+	for (i = 0; i < trylock; i++) {
+		val = SXE_REG_READ(hw, SXE_HDC_SW_LK) & SXE_HDC_SW_LK_BIT;
+		if (!val) {
+			break;
+		}
+
+		udelay(10);
+	}
+
+	if (i >= trylock) {
+		LOG_ERROR_BDF("hdc is busy, reg: 0x%x\n", val);
+		ret = -SXE_ERR_HDC_LOCK_BUSY;
+		goto l_out;
+	}
+
+	val = SXE_REG_READ(hw, SXE_HDC_PF_LK) & SXE_HDC_PF_LK_BIT;
+	if (!val) {
+		SXE_REG_WRITE(hw, SXE_HDC_SW_LK, SXE_HDC_RELEASE_SW_LK);
+		LOG_ERROR_BDF("get hdc lock fail, reg: 0x%x\n", val);
+		ret = -SXE_ERR_HDC_LOCK_BUSY;
+		goto l_out;
+	}
+
+	hw->hdc.pf_lock_val = val;
+	LOG_DEBUG_BDF("hw[%p]'s port[%u] got pf lock\n", hw, val);
+
+l_out:
+	return ret;
+}
+
+void sxe_hw_hdc_lock_release(struct sxe_hw *hw, u32 retry_cnt)
+{
+	struct sxe_adapter *adapter = hw->adapter;
+
+	do {
+		SXE_REG_WRITE(hw, SXE_HDC_SW_LK, SXE_HDC_RELEASE_SW_LK);
+		udelay(1);
+		if (!(SXE_REG_READ(hw, SXE_HDC_PF_LK) & hw->hdc.pf_lock_val)) {
+			LOG_DEBUG_BDF("hw[%p]'s port[%u] release pf lock\n", hw,
+				hw->hdc.pf_lock_val);
+			hw->hdc.pf_lock_val = 0;
+			break;
+		}
+	} while((retry_cnt--) > 0);
+
+	return;
+}
+
+void sxe_hw_hdc_fw_ov_clear(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_HDC_FW_OV, 0);
+}
+
+bool sxe_hw_hdc_is_fw_over_set(struct sxe_hw *hw)
+{
+	bool fw_ov = false;
+
+	if (SXE_REG_READ(hw, SXE_HDC_FW_OV) & SXE_HDC_FW_OV_BIT) {
+		fw_ov = true;
+	}
+
+	return fw_ov;
+}
+
+void sxe_hw_hdc_packet_send_done(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_HDC_SW_OV, SXE_HDC_SW_OV_BIT);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_hdc_packet_header_send(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_HDC_PACKET_HEAD0, value);
+
+	return;
+}
+
+void sxe_hw_hdc_packet_data_dword_send(struct sxe_hw *hw,
+						u16 dword_index, u32 value)
+{
+	SXE_WRITE_REG_ARRAY_32(hw, SXE_HDC_PACKET_DATA0, dword_index, value);
+	return;
+}
+
+u32 sxe_hw_hdc_fw_ack_header_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_HDC_PACKET_HEAD0);
+}
+
+u32 sxe_hw_hdc_packet_data_dword_rcv(struct sxe_hw *hw,
+						u16 dword_index)
+{
+	return SXE_READ_REG_ARRAY_32(hw, SXE_HDC_PACKET_DATA0, dword_index);
+}
+
+u32 sxe_hw_hdc_fw_status_get(struct sxe_hw *hw)
+{
+	struct sxe_adapter *adapter = hw->adapter;
+	u32 status = SXE_REG_READ(hw, SXE_FW_STATUS_REG);
+
+	LOG_DEBUG_BDF("fw status[0x%x]\n", status);
+
+	return status;
+}
+
+void sxe_hw_hdc_drv_status_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_DRV_STATUS_REG, value);
+	return;
+}
+
+u32 sxe_hw_hdc_channel_state_get(struct sxe_hw *hw)
+{
+	struct sxe_adapter *adapter = hw->adapter;
+
+	u32 state = SXE_REG_READ(hw, SXE_FW_HDC_STATE_REG);
+
+	LOG_DEBUG_BDF("hdc channel state[0x%x]\n", state);
+
+	return state;
+}
+
+STATIC u32 sxe_hw_hdc_irq_event_get(struct sxe_hw *hw)
+{
+	u32 status = SXE_REG_READ(hw, SXE_HDC_MSI_STATUS_REG);
+	struct sxe_adapter *adapter = hw->adapter;
+
+	LOG_DEBUG_BDF("msi status[0x%x]\n", status);
+
+	return status;
+}
+
+static void sxe_hw_hdc_irq_event_clear(struct sxe_hw *hw, u32 event)
+{
+	u32 status = SXE_REG_READ(hw, SXE_HDC_MSI_STATUS_REG);
+	struct sxe_adapter *adapter = hw->adapter;
+
+	LOG_DEBUG_BDF("msi status[0x%x] and clear bit=[0x%x]\n", status, event);
+
+	status &= ~event;
+	SXE_REG_WRITE(hw, SXE_HDC_MSI_STATUS_REG, status);
+
+	return;
+}
+
+static void sxe_hw_hdc_resource_clean(struct sxe_hw *hw)
+{
+	u16 i;
+
+	SXE_REG_WRITE(hw, SXE_HDC_SW_LK, 0x0);
+	SXE_REG_WRITE(hw, SXE_HDC_PACKET_HEAD0, 0x0);
+	for (i = 0; i < SXE_HDC_DATA_LEN_MAX; i++) {
+		SXE_WRITE_REG_ARRAY_32(hw, SXE_HDC_PACKET_DATA0, i, 0x0);
+	}
+
+	return;
+}
+
+static const struct sxe_hdc_operations sxe_hdc_ops = {
+	.pf_lock_get            = sxe_hw_hdc_lock_get,
+	.pf_lock_release        = sxe_hw_hdc_lock_release,
+	.is_fw_over_set         = sxe_hw_hdc_is_fw_over_set,
+	.fw_ack_header_rcv      = sxe_hw_hdc_fw_ack_header_get,
+	.packet_send_done       = sxe_hw_hdc_packet_send_done,
+	.packet_header_send     = sxe_hw_hdc_packet_header_send,
+	.packet_data_dword_send = sxe_hw_hdc_packet_data_dword_send,
+	.packet_data_dword_rcv  = sxe_hw_hdc_packet_data_dword_rcv,
+	.fw_status_get          = sxe_hw_hdc_fw_status_get,
+	.drv_status_set         = sxe_hw_hdc_drv_status_set,
+	.irq_event_get          = sxe_hw_hdc_irq_event_get,
+	.irq_event_clear        = sxe_hw_hdc_irq_event_clear,
+	.fw_ov_clear            = sxe_hw_hdc_fw_ov_clear,
+	.channel_state_get      = sxe_hw_hdc_channel_state_get,
+	.resource_clean         = sxe_hw_hdc_resource_clean,
+};
+
+#ifdef SXE_PHY_CONFIGURE
+#define SXE_MDIO_COMMAND_TIMEOUT 100 
+
+static s32 sxe_hw_phy_reg_write(struct sxe_hw *hw, s32 prtad, u32 reg_addr,
+				u32 device_type, u16 phy_data)
+{
+	s32 ret;
+	u32 i, command;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	SXE_REG_WRITE(hw, SXE_MSCD, (u32)phy_data);
+
+	command = ((reg_addr << SXE_MSCA_NP_ADDR_SHIFT)  |
+		   (device_type << SXE_MSCA_DEV_TYPE_SHIFT) |
+		   (prtad << SXE_MSCA_PHY_ADDR_SHIFT) |
+		   (SXE_MSCA_ADDR_CYCLE | SXE_MSCA_MDI_CMD_ON_PROG));
+
+	SXE_REG_WRITE(hw, SXE_MSCA, command);
+
+	for (i = 0; i < SXE_MDIO_COMMAND_TIMEOUT; i++) {
+		udelay(10);
+
+		command = SXE_REG_READ(hw, SXE_MSCA);
+		if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0) {
+			break;
+		}
+	}
+
+	if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) {
+		LOG_DEV_ERR("phy write cmd didn't complete, "
+			"reg_addr=%u, device_type=%u\n", reg_addr, device_type);
+		ret = -SXE_ERR_MDIO_CMD_TIMEOUT;
+		goto l_end;
+	}
+
+	command = ((reg_addr << SXE_MSCA_NP_ADDR_SHIFT)  |
+		   (device_type << SXE_MSCA_DEV_TYPE_SHIFT) |
+		   (prtad << SXE_MSCA_PHY_ADDR_SHIFT) |
+		   (SXE_MSCA_WRITE | SXE_MSCA_MDI_CMD_ON_PROG));
+
+	SXE_REG_WRITE(hw, SXE_MSCA, command);
+
+	for (i = 0; i < SXE_MDIO_COMMAND_TIMEOUT; i++) {
+		udelay(10);
+
+		command = SXE_REG_READ(hw, SXE_MSCA);
+		if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0) {
+			break;
+		}
+	}
+
+	if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) {
+		LOG_DEV_ERR("phy write cmd didn't complete, "
+			"reg_addr=%u, device_type=%u\n", reg_addr, device_type);
+		ret = -SXE_ERR_MDIO_CMD_TIMEOUT;
+	}
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_hw_phy_reg_read(struct sxe_hw *hw, s32 prtad, u32 reg_addr,
+				u32 device_type, u16 *phy_data)
+{
+	s32 ret = 0;
+	u32 i, data, command;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	command = ((reg_addr << SXE_MSCA_NP_ADDR_SHIFT)  |
+		   (device_type << SXE_MSCA_DEV_TYPE_SHIFT) |
+		   (prtad << SXE_MSCA_PHY_ADDR_SHIFT) |
+		   (SXE_MSCA_ADDR_CYCLE | SXE_MSCA_MDI_CMD_ON_PROG));
+
+	SXE_REG_WRITE(hw, SXE_MSCA, command);
+
+	for (i = 0; i < SXE_MDIO_COMMAND_TIMEOUT; i++) {
+		udelay(10);
+
+		command = SXE_REG_READ(hw, SXE_MSCA);
+		if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0) {
+			break;
+		}
+	}
+
+	if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) {
+		LOG_DEV_ERR("phy read cmd didn't complete, "
+			"reg_addr=%u, device_type=%u\n", reg_addr, device_type);
+		ret = -SXE_ERR_MDIO_CMD_TIMEOUT;
+		goto l_end;
+	}
+
+	command = ((reg_addr << SXE_MSCA_NP_ADDR_SHIFT)  |
+		   (device_type << SXE_MSCA_DEV_TYPE_SHIFT) |
+		   (prtad << SXE_MSCA_PHY_ADDR_SHIFT) |
+		   (SXE_MSCA_READ | SXE_MSCA_MDI_CMD_ON_PROG));
+
+	SXE_REG_WRITE(hw, SXE_MSCA, command);
+
+	for (i = 0; i < SXE_MDIO_COMMAND_TIMEOUT; i++) {
+		udelay(10);
+
+		command = SXE_REG_READ(hw, SXE_MSCA);
+		if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0)
+			break;
+	}
+
+	if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) {
+		LOG_DEV_ERR("phy write cmd didn't complete, "
+			"reg_addr=%u, device_type=%u\n", reg_addr, device_type);
+		ret = -SXE_ERR_MDIO_CMD_TIMEOUT;
+		goto l_end;
+	}
+
+	data = SXE_REG_READ(hw, SXE_MSCD);
+	data >>= MDIO_MSCD_RDATA_SHIFT;
+	*phy_data = (u16)(data);
+
+l_end:
+	return ret;
+}
+
+#define SXE_PHY_REVISION_MASK		0x000F
+#define SXE_PHY_ID_HIGH_5_BIT_MASK	0xFC00
+#define SXE_PHY_ID_HIGH_SHIFT		10
+
+static s32 sxe_hw_phy_id_get(struct sxe_hw *hw, u32 prtad, u32 *id)
+{
+	s32 ret;
+	u16 phy_id_high = 0;
+	u16 phy_id_low = 0;
+
+
+	ret = sxe_hw_phy_reg_read(hw, prtad, MDIO_DEVID1, MDIO_MMD_PMAPMD,
+				      &phy_id_low);
+
+	if (ret) {
+		LOG_ERROR("get phy id upper 16 bits failed, prtad=%d\n", prtad);
+		goto l_end;
+	}
+
+	ret = sxe_hw_phy_reg_read(hw, prtad, MDIO_DEVID2, MDIO_MMD_PMAPMD,
+					&phy_id_high);
+	if (ret) {
+		LOG_ERROR("get phy id lower 4 bits failed, prtad=%d\n", prtad);
+		goto l_end;
+	}
+
+	*id = (u32)((phy_id_high >> SXE_PHY_ID_HIGH_SHIFT) << 16);
+	*id |= (u32)phy_id_low;
+
+l_end:
+	return ret;
+}
+
+s32 sxe_hw_phy_link_cap_get(struct sxe_hw *hw, u32 prtad, u32 *speed)
+{
+	s32 ret;
+	u16 speed_ability;
+
+	ret = hw->phy.ops->reg_read(hw, prtad, MDIO_SPEED, MDIO_MMD_PMAPMD,
+				      &speed_ability);
+	if (ret) {
+		*speed = 0;
+		LOG_ERROR("get phy link cap failed, ret=%d, prtad=%d\n",
+							ret, prtad);
+		goto l_end;
+	}
+
+	if (speed_ability & MDIO_SPEED_10G) {
+		*speed |= SXE_LINK_SPEED_10GB_FULL;
+	}
+
+	if (speed_ability & MDIO_PMA_SPEED_1000) {
+		*speed |= SXE_LINK_SPEED_1GB_FULL;
+	}
+
+	if (speed_ability & MDIO_PMA_SPEED_100) {
+		*speed |= SXE_LINK_SPEED_100_FULL;
+	}
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_hw_phy_ctrl_reset(struct sxe_hw *hw, u32 prtad)
+{
+	u32 i;
+	s32 ret;
+	u16 ctrl;
+
+	ret = sxe_hw_phy_reg_write(hw, prtad, MDIO_CTRL1,
+			 MDIO_MMD_PHYXS, MDIO_CTRL1_RESET);
+	if (ret) {
+		LOG_ERROR("phy reset failed, ret=%d\n", ret);
+		goto l_end;
+	}
+
+	for (i = 0; i < 30; i++) {
+		msleep(100);
+		ret = sxe_hw_phy_reg_read(hw, prtad, MDIO_CTRL1,
+					MDIO_MMD_PHYXS, &ctrl);
+		if (ret) {
+			goto l_end;
+		}
+
+		if (!(ctrl & MDIO_CTRL1_RESET)) {
+			udelay(2);
+			break;
+		}
+	}
+
+	if (ctrl & MDIO_CTRL1_RESET) {
+		LOG_DEV_ERR("phy reset polling failed to complete\n");
+		return -SXE_ERR_PHY_RESET_FAIL;
+	}
+
+l_end:
+	return ret;
+}
+
+static const struct sxe_phy_operations sxe_phy_hw_ops = {
+	.reg_write	= sxe_hw_phy_reg_write,
+	.reg_read	= sxe_hw_phy_reg_read,
+	.identifier_get	= sxe_hw_phy_id_get,
+	.link_cap_get	= sxe_hw_phy_link_cap_get,
+	.reset		= sxe_hw_phy_ctrl_reset,
+};
+#endif
+
+void sxe_hw_ops_init(struct sxe_hw *hw)
+{
+	hw->setup.ops	= &sxe_setup_ops;
+	hw->irq.ops	= &sxe_irq_ops;
+	hw->mac.ops	= &sxe_mac_ops;
+	hw->dbu.ops	= &sxe_dbu_ops;
+	hw->dma.ops	= &sxe_dma_ops;
+	hw->sec.ops	= &sxe_sec_ops;
+	hw->stat.ops	= &sxe_stat_ops;
+	hw->mbx.ops	= &sxe_mbx_ops;
+	hw->pcie.ops	= &sxe_pcie_ops;
+	hw->hdc.ops	= &sxe_hdc_ops;
+#ifdef SXE_PHY_CONFIGURE
+	hw->phy.ops     = &sxe_phy_hw_ops;
+#endif
+
+	hw->filter.mac.ops	= &sxe_filter_mac_ops;
+	hw->filter.vlan.ops	= &sxe_filter_vlan_ops;
+	return;
+}
+
+u32 sxe_hw_rss_key_get_by_idx(struct sxe_hw *hw, u8 reg_idx)
+{
+	u32 rss_key;
+
+	if (reg_idx >= SXE_MAX_RSS_KEY_ENTRIES) {
+		rss_key = 0;
+	} else {
+		rss_key = SXE_REG_READ(hw, SXE_RSSRK(reg_idx));
+	}
+
+	return rss_key;
+}
+
+bool sxe_hw_is_rss_enabled(struct sxe_hw *hw)
+{
+	bool rss_enable = false;
+	u32 mrqc = SXE_REG_READ(hw, SXE_MRQC);
+	if (mrqc & SXE_MRQC_RSSEN) {
+		rss_enable = true;
+	}
+
+	return rss_enable;
+}
+
+static u32 sxe_hw_mrqc_reg_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_MRQC);
+}
+
+u32 sxe_hw_rss_field_get(struct sxe_hw *hw)
+{
+	u32 mrqc = sxe_hw_mrqc_reg_get(hw);
+	return (mrqc & SXE_RSS_FIELD_MASK);
+}
+
+#ifdef SXE_DPDK 
+
+#define SXE_TRAFFIC_CLASS_MAX  8
+
+#define SXE_MR_VLAN_MSB_REG_OFFSET         4
+#define SXE_MR_VIRTUAL_POOL_MSB_REG_OFFSET 4
+
+#define SXE_MR_TYPE_MASK                   0x0F
+#define SXE_MR_DST_POOL_OFFSET             8
+
+void sxe_hw_rx_pkt_buf_size_set(struct sxe_hw *hw, u8 tc_idx, u16 pbsize)
+{
+	u32 rxpbsize = pbsize << SXE_RX_PKT_BUF_SIZE_SHIFT;
+
+	sxe_hw_rx_pkt_buf_switch(hw, false);
+	SXE_REG_WRITE(hw, SXE_RXPBSIZE(tc_idx), rxpbsize);
+	sxe_hw_rx_pkt_buf_switch(hw, true);
+
+	return;
+}
+
+void sxe_hw_dcb_vmdq_mq_configure(struct sxe_hw *hw, u8 num_pools)
+{
+	u16 pbsize;
+	u8 i, nb_tcs;
+	u32 mrqc;
+
+	nb_tcs = SXE_VMDQ_DCB_NUM_QUEUES / num_pools;
+
+	pbsize = (u8)(SXE_RX_PKT_BUF_SIZE / nb_tcs);
+
+	for (i = 0; i < nb_tcs; i++) {
+		sxe_hw_rx_pkt_buf_size_set(hw, i, pbsize);
+	}
+
+	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		sxe_hw_rx_pkt_buf_size_set(hw, i, 0);
+	}
+
+	mrqc = (num_pools == RTE_ETH_16_POOLS) ?
+		SXE_MRQC_VMDQRT8TCEN : SXE_MRQC_VMDQRT4TCEN;
+	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+	SXE_REG_WRITE(hw, SXE_RTRPCS, SXE_RTRPCS_RRM);
+
+	return;
+}
+
+static const struct sxe_reg_info sxe_regs_general_group[] = {
+	{SXE_CTRL, 1, 1, "SXE_CTRL"},
+	{SXE_STATUS, 1, 1, "SXE_STATUS"},
+	{SXE_CTRL_EXT, 1, 1, "SXE_CTRL_EXT"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_interrupt_group[] = {
+	{SXE_EICS, 1, 1, "SXE_EICS"},
+	{SXE_EIMS, 1, 1, "SXE_EIMS"},
+	{SXE_EIMC, 1, 1, "SXE_EIMC"},
+	{SXE_EIAC, 1, 1, "SXE_EIAC"},
+	{SXE_EIAM, 1, 1, "SXE_EIAM"},
+	{SXE_EITR(0), 24, 4, "SXE_EITR"},
+	{SXE_IVAR(0), 24, 4, "SXE_IVAR"},
+	{SXE_GPIE, 1, 1, "SXE_GPIE"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_fctl_group[] = {
+	{SXE_PFCTOP, 1, 1, "SXE_PFCTOP"},
+	{SXE_FCRTV, 1, 1, "SXE_FCRTV"},
+	{SXE_TFCS, 1, 1, "SXE_TFCS"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_rxdma_group[] = {
+	{SXE_RDBAL(0), 64, 0x40, "SXE_RDBAL"},
+	{SXE_RDBAH(0), 64, 0x40, "SXE_RDBAH"},
+	{SXE_RDLEN(0), 64, 0x40, "SXE_RDLEN"},
+	{SXE_RDH(0), 64, 0x40, "SXE_RDH"},
+	{SXE_RDT(0), 64, 0x40, "SXE_RDT"},
+	{SXE_RXDCTL(0), 64, 0x40, "SXE_RXDCTL"},
+	{SXE_SRRCTL(0), 16, 0x4, "SXE_SRRCTL"},
+	{SXE_TPH_RXCTRL(0), 16, 4, "SXE_TPH_RXCTRL"},
+	{SXE_RDRXCTL, 1, 1, "SXE_RDRXCTL"},
+	{SXE_RXPBSIZE(0), 8, 4, "SXE_RXPBSIZE"},
+	{SXE_RXCTRL, 1, 1, "SXE_RXCTRL"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_rx_group[] = {
+	{SXE_RXCSUM, 1, 1, "SXE_RXCSUM"},
+	{SXE_RFCTL, 1, 1, "SXE_RFCTL"},
+	{SXE_RAL(0), 16, 8, "SXE_RAL"},
+	{SXE_RAH(0), 16, 8, "SXE_RAH"},
+	{SXE_PSRTYPE(0), 1, 4, "SXE_PSRTYPE"},
+	{SXE_FCTRL, 1, 1, "SXE_FCTRL"},
+	{SXE_VLNCTRL, 1, 1, "SXE_VLNCTRL"},
+	{SXE_MCSTCTRL, 1, 1, "SXE_MCSTCTRL"},
+	{SXE_MRQC, 1, 1, "SXE_MRQC"},
+	{SXE_VMD_CTL, 1, 1, "SXE_VMD_CTL"},
+
+	{0, 0, 0, ""}
+};
+
+static struct sxe_reg_info sxe_regs_tx_group[] = {
+	{SXE_TDBAL(0), 32, 0x40, "SXE_TDBAL"},
+	{SXE_TDBAH(0), 32, 0x40, "SXE_TDBAH"},
+	{SXE_TDLEN(0), 32, 0x40, "SXE_TDLEN"},
+	{SXE_TDH(0), 32, 0x40, "SXE_TDH"},
+	{SXE_TDT(0), 32, 0x40, "SXE_TDT"},
+	{SXE_TXDCTL(0), 32, 0x40, "SXE_TXDCTL"},
+	{SXE_TPH_TXCTRL(0), 16, 4, "SXE_TPH_TXCTRL"},
+	{SXE_TXPBSIZE(0), 8, 4, "SXE_TXPBSIZE"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_wakeup_group[] = {
+	{SXE_WUC, 1, 1, "SXE_WUC"},
+	{SXE_WUFC, 1, 1, "SXE_WUFC"},
+	{SXE_WUS, 1, 1, "SXE_WUS"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_dcb_group[] = {
+	{0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_diagnostic_group[] = {
+
+	{SXE_MFLCN, 1, 1, "SXE_MFLCN"},
+	{0, 0, 0, ""},
+};
+
+static const struct sxe_reg_info *sxe_regs_group[] = {
+				sxe_regs_general_group,
+				sxe_regs_interrupt_group,
+				sxe_regs_fctl_group,
+				sxe_regs_rxdma_group,
+				sxe_regs_rx_group,
+				sxe_regs_tx_group,
+				sxe_regs_wakeup_group,
+				sxe_regs_dcb_group,
+				sxe_regs_diagnostic_group,
+				NULL};
+
+static u32 sxe_regs_group_count(const struct sxe_reg_info *regs)
+{
+	int i = 0;
+	int count = 0;
+
+	while (regs[i].count) {
+		count += regs[i++].count;
+	}
+
+	return count;
+};
+
+static u32 sxe_hw_regs_group_read(struct sxe_hw *hw,
+				const struct sxe_reg_info *regs,
+				u32 *reg_buf)
+{
+	u32 j, i = 0;
+	int count = 0;
+
+	while (regs[i].count) {
+		for (j = 0; j < regs[i].count; j++) {
+			reg_buf[count + j] = SXE_REG_READ(hw,
+					regs[i].addr + j * regs[i].stride);
+			LOG_INFO("regs= %s, regs_addr=%x, regs_value=%04x\n",
+				regs[i].name , regs[i].addr, reg_buf[count + j]);
+		}
+
+		i++;
+		count += j;
+	}
+
+	return count;
+};
+
+u32 sxe_hw_all_regs_group_num_get(void)
+{
+	u32 i = 0;
+	u32 count = 0;
+	const struct sxe_reg_info *reg_group;
+	const struct sxe_reg_info **reg_set = sxe_regs_group;
+
+	while ((reg_group = reg_set[i++])) {
+		count += sxe_regs_group_count(reg_group);
+	}
+
+	return count;
+}
+
+void sxe_hw_all_regs_group_read(struct sxe_hw *hw, u32 *data)
+{
+	u32 count = 0, i = 0;
+	const struct sxe_reg_info *reg_group;
+	const struct sxe_reg_info **reg_set = sxe_regs_group;
+
+	while ((reg_group = reg_set[i++])) {
+		count += sxe_hw_regs_group_read(hw, reg_group, &data[count]);
+	}
+
+	LOG_INFO("read regs cnt=%u, regs num=%u\n",
+				count, sxe_hw_all_regs_group_num_get());
+
+	return;
+}
+
+static void sxe_hw_default_pool_configure(struct sxe_hw *hw,
+						u8 default_pool_enabled,
+						u8 default_pool_idx)
+{
+	u32 vt_ctl;
+
+	vt_ctl = SXE_VT_CTL_VT_ENABLE | SXE_VT_CTL_REPLEN;
+	if (default_pool_enabled) {
+		vt_ctl |= (default_pool_idx << SXE_VT_CTL_POOL_SHIFT);
+	} else {
+		vt_ctl |= SXE_VT_CTL_DIS_DEFPL;
+	}
+
+	SXE_REG_WRITE(hw, SXE_VT_CTL, vt_ctl);
+	return;
+}
+
+void sxe_hw_dcb_vmdq_default_pool_configure(struct sxe_hw *hw,
+						u8 default_pool_enabled,
+						u8 default_pool_idx)
+{
+	sxe_hw_default_pool_configure(hw, default_pool_enabled, default_pool_idx);
+	return;
+}
+
+u32 sxe_hw_ring_irq_switch_get(struct sxe_hw *hw, u8 idx)
+{
+	u32 mask;
+
+	if (idx == 0) {
+		mask = SXE_REG_READ(hw, SXE_EIMS_EX(0));
+	} else {
+		mask = SXE_REG_READ(hw, SXE_EIMS_EX(1));
+	}
+
+	return mask;
+}
+
+void sxe_hw_ring_irq_switch_set(struct sxe_hw *hw, u8 idx, u32 value)
+{
+	if (idx == 0) {
+		SXE_REG_WRITE(hw, SXE_EIMS_EX(0), value);
+	} else {
+		SXE_REG_WRITE(hw, SXE_EIMS_EX(1), value);
+	}
+
+	return;
+}
+
+void sxe_hw_dcb_vmdq_up_2_tc_configure(struct sxe_hw *hw,
+						u8 *tc_arr)
+{
+	u32 up2tc;
+	u8 i;
+
+	up2tc = 0;
+	for (i = 0; i < MAX_USER_PRIORITY; i++) {
+		up2tc |= ((tc_arr[i] & 0x07) << (i * 3));
+	}
+
+	SXE_REG_WRITE(hw, SXE_RTRUP2TC, up2tc);
+
+	return;
+}
+
+u32 sxe_hw_uta_hash_table_get(struct sxe_hw *hw, u8 reg_idx)
+{
+	return SXE_REG_READ(hw, SXE_UTA(reg_idx));
+}
+
+void sxe_hw_uta_hash_table_set(struct sxe_hw *hw,
+				u8 reg_idx, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_UTA(reg_idx), value);
+
+	return;
+}
+
+u32 sxe_hw_vlan_type_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_VLNCTRL);
+}
+
+void sxe_hw_vlan_type_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_VLNCTRL, value);
+	return;
+}
+
+void sxe_hw_dcb_vmdq_vlan_configure(struct sxe_hw *hw,
+						u8 num_pools)
+{
+	u32 vlanctrl;
+	u8 i;
+
+	vlanctrl = SXE_REG_READ(hw, SXE_VLNCTRL);
+	vlanctrl |= SXE_VLNCTRL_VFE;
+	SXE_REG_WRITE(hw, SXE_VLNCTRL, vlanctrl);
+
+	for (i = 0; i < SXE_VFT_TBL_SIZE; i++) {
+		SXE_REG_WRITE(hw, SXE_VFTA(i), 0xFFFFFFFF);
+	}
+
+	SXE_REG_WRITE(hw, SXE_VFRE(0),
+			num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+	SXE_REG_WRITE(hw, SXE_MPSAR_LOW(0), 0xFFFFFFFF);
+	SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(0), 0xFFFFFFFF);
+
+	return;
+}
+
+void sxe_hw_vlan_ext_type_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_EXVET, value);
+	return;
+}
+
+u32 sxe_hw_txctl_vlan_type_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_DMATXCTL);
+}
+
+void sxe_hw_txctl_vlan_type_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_DMATXCTL, value);
+	return;
+}
+
+u32 sxe_hw_ext_vlan_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_CTRL_EXT);
+}
+
+void sxe_hw_ext_vlan_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_CTRL_EXT, value);
+	return;
+}
+
+void sxe_hw_rxq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_RQSMR(idx), value);
+	return;
+}
+
+void sxe_hw_dcb_vmdq_pool_configure(struct sxe_hw *hw,
+						u8 pool_idx, u16 vlan_id,
+						u64 pools_map)
+{
+	SXE_REG_WRITE(hw, SXE_VLVF(pool_idx), (SXE_VLVF_VIEN |
+			(vlan_id & 0xFFF)));
+
+	SXE_REG_WRITE(hw, SXE_VLVFB(pool_idx * 2), pools_map);
+
+	return;
+}
+
+void sxe_hw_txq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_TQSM(idx), value);
+	return;
+}
+
+void sxe_hw_dcb_rx_configure(struct sxe_hw *hw, bool is_vt_on,
+					u8 sriov_active, u8 tc_num)
+{
+	u32 reg;
+	u32 vlanctrl;
+	u8 i;
+	u32 q;
+
+	reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC | SXE_RTRPCS_ARBDIS;
+	SXE_REG_WRITE(hw, SXE_RTRPCS, reg);
+
+	reg = SXE_REG_READ(hw, SXE_MRQC);
+	if (tc_num == 4) {
+		if (is_vt_on) {
+			reg = (reg & ~SXE_MRQC_MRQE_MASK) |
+				SXE_MRQC_VMDQRT4TCEN;
+		} else {
+			SXE_REG_WRITE(hw, SXE_VT_CTL, 0);
+			reg = (reg & ~SXE_MRQC_MRQE_MASK) |
+				SXE_MRQC_RTRSS4TCEN;
+		}
+	}
+
+	if (tc_num == 8) {
+		if (is_vt_on) {
+			reg = (reg & ~SXE_MRQC_MRQE_MASK) |
+				SXE_MRQC_VMDQRT8TCEN;
+		} else {
+			SXE_REG_WRITE(hw, SXE_VT_CTL, 0);
+			reg = (reg & ~SXE_MRQC_MRQE_MASK) |
+				SXE_MRQC_RTRSS8TCEN;
+		}
+	}
+
+	SXE_REG_WRITE(hw, SXE_MRQC, reg);
+
+	if (sriov_active == 0) {
+		for (q = 0; q < SXE_HW_TXRX_RING_NUM_MAX; q++) {
+			SXE_REG_WRITE(hw, SXE_QDE,
+				(SXE_QDE_WRITE |
+				 (q << SXE_QDE_IDX_SHIFT)));
+		}
+	} else {
+		for (q = 0; q < SXE_HW_TXRX_RING_NUM_MAX; q++) {
+			SXE_REG_WRITE(hw, SXE_QDE,
+				(SXE_QDE_WRITE |
+				 (q << SXE_QDE_IDX_SHIFT) |
+				 SXE_QDE_ENABLE));
+		}
+	}
+
+	vlanctrl = SXE_REG_READ(hw, SXE_VLNCTRL);
+	vlanctrl |= SXE_VLNCTRL_VFE;
+	SXE_REG_WRITE(hw, SXE_VLNCTRL, vlanctrl);
+
+	for (i = 0; i < SXE_VFT_TBL_SIZE; i++) {
+		SXE_REG_WRITE(hw, SXE_VFTA(i), 0xFFFFFFFF);
+	}
+
+	reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC;
+	SXE_REG_WRITE(hw, SXE_RTRPCS, reg);
+
+	return;
+}
+
+void sxe_hw_fc_status_get(struct sxe_hw *hw,
+					bool *rx_pause_on, bool *tx_pause_on)
+{
+	u32 flctrl;
+
+	flctrl = SXE_REG_READ(hw, SXE_FLCTRL);
+	if (flctrl & (SXE_FCTRL_RFCE_PFC_EN | SXE_FCTRL_RFCE_LFC_EN)) {
+		*rx_pause_on = true;
+	} else {
+		*rx_pause_on = false;
+	}
+
+	if (flctrl & (SXE_FCTRL_TFCE_PFC_EN | SXE_FCTRL_TFCE_LFC_EN)) {
+		*tx_pause_on = true;
+	} else {
+		*tx_pause_on = false;
+	}
+
+	return;
+}
+
+void sxe_hw_fc_base_init(struct sxe_hw *hw)
+{
+	u8 i;
+
+	hw->fc.requested_mode = SXE_FC_NONE;
+	hw->fc.current_mode = SXE_FC_NONE;
+	hw->fc.pause_time = SXE_DEFAULT_FCPAUSE;
+	hw->fc.disable_fc_autoneg = false;
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		hw->fc.low_water[i]  = SXE_FC_DEFAULT_LOW_WATER_MARK;
+		hw->fc.high_water[i] = SXE_FC_DEFAULT_HIGH_WATER_MARK;
+	}
+
+	hw->fc.send_xon = 1;
+	return;
+}
+
+u32 sxe_hw_fc_tc_high_water_mark_get(struct sxe_hw *hw, u8 tc_idx)
+{
+	return hw->fc.high_water[tc_idx];
+}
+
+u32 sxe_hw_fc_tc_low_water_mark_get(struct sxe_hw *hw, u8 tc_idx)
+{
+	return hw->fc.low_water[tc_idx];
+}
+
+u16 sxe_hw_fc_send_xon_get(struct sxe_hw *hw)
+{
+	return hw->fc.send_xon;
+}
+
+void sxe_hw_fc_send_xon_set(struct sxe_hw *hw, u16 send_xon)
+{
+	hw->fc.send_xon = send_xon;
+	return;
+}
+
+u16 sxe_hw_fc_pause_time_get(struct sxe_hw *hw)
+{
+	return hw->fc.pause_time;
+}
+
+void sxe_hw_fc_pause_time_set(struct sxe_hw *hw, u16 pause_time)
+{
+	hw->fc.pause_time = pause_time;
+	return;
+}
+
+void sxe_hw_dcb_tx_configure(struct sxe_hw *hw, bool is_vt_on, u8 tc_num)
+{
+	u32 reg;
+
+	reg = SXE_REG_READ(hw, SXE_RTTDCS);
+	reg |= SXE_RTTDCS_ARBDIS;
+	SXE_REG_WRITE(hw, SXE_RTTDCS, reg);
+
+	if (tc_num == 8) {
+		reg = SXE_MTQC_RT_ENA | SXE_MTQC_8TC_8TQ;
+	} else {
+		reg = SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ;
+	}
+
+	if (is_vt_on) {
+		reg |= SXE_MTQC_VT_ENA;
+	}
+
+	SXE_REG_WRITE(hw, SXE_MTQC, reg);
+
+	reg = SXE_REG_READ(hw, SXE_RTTDCS);
+	reg &= ~SXE_RTTDCS_ARBDIS;
+	SXE_REG_WRITE(hw, SXE_RTTDCS, reg);
+
+
+	return;
+}
+
+void sxe_hw_rx_ip_checksum_offload_switch(struct sxe_hw *hw,
+							bool is_on)
+{
+	u32 rxcsum;
+
+	rxcsum = SXE_REG_READ(hw, SXE_RXCSUM);
+	if (is_on) {
+		rxcsum |= SXE_RXCSUM_IPPCSE;
+	} else {
+		rxcsum &= ~SXE_RXCSUM_IPPCSE;
+	}
+
+	SXE_REG_WRITE(hw, SXE_RXCSUM, rxcsum);
+
+	return;
+}
+
+void sxe_hw_rss_cap_switch(struct sxe_hw *hw, bool is_on)
+{
+	u32 mrqc = SXE_REG_READ(hw, SXE_MRQC);
+	if (is_on) {
+		mrqc |= SXE_MRQC_RSSEN;
+	} else {
+		mrqc &= ~SXE_MRQC_RSSEN;
+	}
+
+	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+	return;
+}
+
+void sxe_hw_pool_xmit_enable(struct sxe_hw *hw, u16 reg_idx, u8 pool_num)
+{
+	SXE_REG_WRITE(hw, SXE_VFTE(reg_idx),
+		pool_num == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+	return;
+}
+
+void sxe_hw_rss_field_set(struct sxe_hw *hw, u32 rss_field)
+{
+	u32 mrqc = SXE_REG_READ(hw, SXE_MRQC);
+
+	mrqc &= ~SXE_RSS_FIELD_MASK;
+	mrqc |= rss_field;
+	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+	return;
+}
+
+static void sxe_hw_dcb_4tc_vmdq_off_stats_configure(struct sxe_hw *hw)
+{
+	u32 reg;
+	u8  i;
+
+	for (i = 0; i < 32; i++) {
+		if (i % 8 > 3) {
+			continue;
+		}
+
+		reg = 0x01010101 * (i / 8);
+		SXE_REG_WRITE(hw, SXE_RQSMR(i), reg);
+	}
+	for (i = 0; i < 32; i++) {
+		if (i < 16) {
+			reg = 0x00000000;
+		} else if (i < 24) {
+			reg = 0x01010101;
+		} else if (i < 28) {
+			reg = 0x02020202;
+		} else {
+			reg = 0x03030303;
+		}
+
+		SXE_REG_WRITE(hw, SXE_TQSM(i), reg);
+	}
+
+	return;
+}
+
+static void sxe_hw_dcb_4tc_vmdq_on_stats_configure(struct sxe_hw *hw)
+{
+	u8  i;
+
+	for (i = 0; i < 32; i++) {
+		SXE_REG_WRITE(hw, SXE_RQSMR(i), 0x03020100);
+	}
+
+
+	for (i = 0; i < 32; i++) {
+		SXE_REG_WRITE(hw, SXE_TQSM(i), 0x03020100);
+	}
+
+	return;
+}
+
+void sxe_hw_rss_redir_tbl_set_by_idx(struct sxe_hw *hw,
+						u16 reg_idx, u32 value)
+{
+	return sxe_hw_rss_redir_tbl_reg_write(hw, reg_idx, value);
+}
+
+static u32 sxe_hw_rss_redir_tbl_reg_read(struct sxe_hw *hw, u16 reg_idx)
+{
+	return SXE_REG_READ(hw, SXE_RETA(reg_idx >> 2));
+}
+
+u32 sxe_hw_rss_redir_tbl_get_by_idx(struct sxe_hw *hw, u16 reg_idx)
+{
+	return sxe_hw_rss_redir_tbl_reg_read(hw, reg_idx);
+}
+
+void sxe_hw_ptp_time_inc_stop(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_TIMINC, 0);
+	return;
+}
+
+void sxe_hw_dcb_tc_stats_configure(struct sxe_hw *hw,
+					u8 tc_num, bool vmdq_active)
+{
+	if (tc_num == 8 && vmdq_active == false) {
+		sxe_hw_dcb_8tc_vmdq_off_stats_configure(hw);
+	} else if (tc_num == 4 && vmdq_active == false) {
+		sxe_hw_dcb_4tc_vmdq_off_stats_configure(hw);
+	} else if (tc_num == 4 && vmdq_active == true) {
+		sxe_hw_dcb_4tc_vmdq_on_stats_configure(hw);
+	}
+
+	return;
+}
+
+void sxe_hw_ptp_timestamp_disable(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_TSYNCTXCTL,
+			(SXE_REG_READ(hw, SXE_TSYNCTXCTL) &
+			~SXE_TSYNCTXCTL_TEN));
+
+	SXE_REG_WRITE(hw, SXE_TSYNCRXCTL,
+			(SXE_REG_READ(hw, SXE_TSYNCRXCTL) &
+			~SXE_TSYNCRXCTL_REN));
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_mac_pool_clear(struct sxe_hw *hw, u8 rar_idx)
+{
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (rar_idx > SXE_UC_ENTRY_NUM_MAX) {
+		LOG_ERROR_BDF("rar_idx:%d invalid.(err:%d)\n",
+			  rar_idx, SXE_ERR_PARAM);
+		goto l_end;
+	}
+
+	SXE_REG_WRITE(hw, SXE_MPSAR_LOW(rar_idx), 0);
+	SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(rar_idx), 0);
+
+l_end:
+	return;
+}
+
+void sxe_hw_vmdq_mq_configure(struct sxe_hw *hw)
+{
+	u32 mrqc;
+
+	mrqc = SXE_MRQC_VMDQEN;
+	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+	return;
+}
+
+void sxe_hw_vmdq_default_pool_configure(struct sxe_hw *hw,
+						u8 default_pool_enabled,
+						u8 default_pool_idx)
+{
+	sxe_hw_default_pool_configure(hw, default_pool_enabled, default_pool_idx);
+	return;
+}
+
+void sxe_hw_vmdq_vlan_configure(struct sxe_hw *hw,
+						u8 num_pools, u32 rx_mode)
+{
+	u32 vlanctrl;
+	u8 i;
+
+	vlanctrl = SXE_REG_READ(hw, SXE_VLNCTRL);
+	vlanctrl |= SXE_VLNCTRL_VFE;
+	SXE_REG_WRITE(hw, SXE_VLNCTRL, vlanctrl);
+
+	for (i = 0; i < SXE_VFT_TBL_SIZE; i++) {
+		SXE_REG_WRITE(hw, SXE_VFTA(i), 0xFFFFFFFF);
+	}
+
+	SXE_REG_WRITE(hw, SXE_VFRE(0), 0xFFFFFFFF);
+	if (num_pools == RTE_ETH_64_POOLS) {
+		SXE_REG_WRITE(hw, SXE_VFRE(1), 0xFFFFFFFF);
+	}
+
+	for (i = 0; i < num_pools; i++) {
+		SXE_REG_WRITE(hw, SXE_VMOLR(i), rx_mode);
+	}
+
+	SXE_REG_WRITE(hw, SXE_MPSAR_LOW(0), 0xFFFFFFFF);
+	SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(0), 0xFFFFFFFF);
+
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+
+u32 sxe_hw_pcie_vt_mode_get(struct sxe_hw *hw)
+{
+
+	return SXE_REG_READ(hw, SXE_GCR_EXT);
+}
+
+void sxe_rx_fc_threshold_set(struct sxe_hw *hw)
+{
+	u8 i;
+	u32 high;
+
+	for (i = 0; i < SXE_TRAFFIC_CLASS_MAX; i++) {
+		SXE_REG_WRITE(hw, SXE_FCRTL(i), 0);
+		high = SXE_REG_READ(hw, SXE_RXPBSIZE(i)) - 32;
+		SXE_REG_WRITE(hw, SXE_FCRTH(i), high);
+	}
+
+	return;
+}
+
+void sxe_hw_vmdq_pool_configure(struct sxe_hw *hw,
+						u8 pool_idx, u16 vlan_id,
+						u64 pools_map)
+{
+	SXE_REG_WRITE(hw, SXE_VLVF(pool_idx), (SXE_VLVF_VIEN |
+			(vlan_id & SXE_RXD_VLAN_ID_MASK)));
+
+	if (((pools_map >> 32) & 0xFFFFFFFF) == 0) {
+		SXE_REG_WRITE(hw, SXE_VLVFB(pool_idx * 2),
+			(pools_map & 0xFFFFFFFF));
+	} else {
+		SXE_REG_WRITE(hw, SXE_VLVFB((pool_idx * 2 + 1)),
+			((pools_map >> 32) & 0xFFFFFFFF));
+	}
+
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+
+void sxe_hw_vmdq_loopback_configure(struct sxe_hw *hw)
+{
+	u8 i;
+	SXE_REG_WRITE(hw, SXE_PFDTXGSWC, SXE_PFDTXGSWC_VT_LBEN);
+	for (i = 0; i < SXE_VMTXSW_REGISTER_COUNT; i++) {
+		SXE_REG_WRITE(hw, SXE_VMTXSW(i), 0xFFFFFFFF);
+	}
+
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+
+void sxe_hw_tx_multi_queue_configure(struct sxe_hw *hw,
+				bool vmdq_enable, bool sriov_enable, u16 pools_num)
+{
+	u32 mtqc;
+
+	sxe_hw_dcb_arbiter_set(hw, false);
+
+	if (sriov_enable) {
+		switch (pools_num) {
+		case RTE_ETH_64_POOLS:
+			mtqc = SXE_MTQC_VT_ENA | SXE_MTQC_64VF;
+			break;
+		case RTE_ETH_32_POOLS:
+			mtqc = SXE_MTQC_VT_ENA | SXE_MTQC_32VF;
+			break;
+		case RTE_ETH_16_POOLS:
+			mtqc = SXE_MTQC_VT_ENA | SXE_MTQC_RT_ENA |
+				SXE_MTQC_8TC_8TQ;
+			break;
+		default:
+			mtqc = SXE_MTQC_64Q_1PB;
+		}
+	} else {
+		if (vmdq_enable) {
+			u8 queue_idx;
+			SXE_REG_WRITE(hw, SXE_VFTE(0), UINT32_MAX);
+			SXE_REG_WRITE(hw, SXE_VFTE(1), UINT32_MAX);
+
+			for (queue_idx = 0; queue_idx < SXE_HW_TXRX_RING_NUM_MAX;
+			    queue_idx++) {
+				SXE_REG_WRITE(hw, SXE_QDE,
+					(SXE_QDE_WRITE |
+					(queue_idx << SXE_QDE_IDX_SHIFT)));
+			}
+
+			mtqc = SXE_MTQC_VT_ENA | SXE_MTQC_64VF;
+		} else {
+			mtqc = SXE_MTQC_64Q_1PB;
+		}
+	}
+
+	SXE_REG_WRITE(hw, SXE_MTQC, mtqc);
+
+	sxe_hw_dcb_arbiter_set(hw, true);
+
+	return;
+}
+
+void sxe_hw_vf_queue_drop_enable(struct sxe_hw *hw, u8 vf_idx,
+					u8 ring_per_pool)
+{
+	u32 value;
+	u8 i;
+
+	for (i = (vf_idx * ring_per_pool); i < ((vf_idx + 1) * ring_per_pool); i++)
+	{
+		value = SXE_QDE_ENABLE | SXE_QDE_WRITE;
+		SXE_WRITE_FLUSH(hw);
+
+		value |= i << SXE_QDE_IDX_SHIFT;
+
+		SXE_REG_WRITE(hw, SXE_QDE, value);
+	}
+
+	return;
+}
+
+bool sxe_hw_vt_status(struct sxe_hw *hw)
+{
+	bool ret;
+	u32 vt_ctl = SXE_REG_READ(hw, SXE_VT_CTL);
+
+	if (vt_ctl & SXE_VMD_CTL_POOL_EN) {
+		ret = true;
+	} else {
+		ret = false;
+	}
+
+	return ret;
+}
+
+void sxe_hw_mirror_ctl_set(struct sxe_hw *hw, u8 rule_id,
+				    u8 mirror_type, u8 dst_pool, bool on)
+{
+	u32 mr_ctl;
+
+	mr_ctl = SXE_REG_READ(hw, SXE_MRCTL(rule_id));
+
+	if (on) {
+		mr_ctl |= mirror_type;
+		mr_ctl &= SXE_MR_TYPE_MASK;
+		mr_ctl |= dst_pool << SXE_MR_DST_POOL_OFFSET;
+	} else {
+		mr_ctl &= ~(mirror_type & SXE_MR_TYPE_MASK);
+	}
+
+	SXE_REG_WRITE(hw, SXE_MRCTL(rule_id), mr_ctl);
+
+	return;
+}
+
+void sxe_hw_mirror_virtual_pool_set(struct sxe_hw *hw, u8 rule_id,u32 lsb, u32 msb)
+{
+	SXE_REG_WRITE(hw, SXE_VMRVM(rule_id), lsb);
+	SXE_REG_WRITE(hw, SXE_VMRVM(rule_id  + SXE_MR_VIRTUAL_POOL_MSB_REG_OFFSET), msb);
+
+	return;
+}
+
+void sxe_hw_mirror_vlan_set(struct sxe_hw *hw, u8 rule_id,u32 lsb, u32 msb)
+{
+	SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id), lsb);
+	SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id  + SXE_MR_VLAN_MSB_REG_OFFSET), msb);
+
+	return;
+}
+
+void sxe_hw_mirror_rule_clear(struct sxe_hw *hw, u8 rule_id)
+{
+	SXE_REG_WRITE(hw, SXE_MRCTL(rule_id), 0);
+
+	SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id), 0);
+	SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id  + SXE_MR_VLAN_MSB_REG_OFFSET), 0);
+
+	SXE_REG_WRITE(hw, SXE_VMRVM(rule_id), 0);
+	SXE_REG_WRITE(hw, SXE_VMRVM(rule_id  + SXE_MR_VIRTUAL_POOL_MSB_REG_OFFSET), 0);
+
+	return;
+}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+void sxe_hw_fivetuple_filter_add(struct rte_eth_dev *dev,
+					struct sxe_fivetuple_node_info *filter)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u16 i;
+	u32 ftqf, sdpqf;
+	u32 l34timir = 0;
+	u8 mask = 0xff;
+
+	i = filter->index;
+
+	sdpqf = (u32)(filter->filter_info.dst_port << SXE_SDPQF_DSTPORT_SHIFT);
+	sdpqf = sdpqf | (filter->filter_info.src_port & SXE_SDPQF_SRCPORT);
+
+	ftqf = (u32)(filter->filter_info.protocol & SXE_FTQF_PROTOCOL_MASK);
+	ftqf |= (u32)((filter->filter_info.priority &
+			SXE_FTQF_PRIORITY_MASK) << SXE_FTQF_PRIORITY_SHIFT);
+
+	if (filter->filter_info.src_ip_mask == 0) {
+		mask &= SXE_FTQF_SOURCE_ADDR_MASK;
+	}
+	if (filter->filter_info.dst_ip_mask == 0) {
+		mask &= SXE_FTQF_DEST_ADDR_MASK;
+	}
+	if (filter->filter_info.src_port_mask == 0) {
+		mask &= SXE_FTQF_SOURCE_PORT_MASK;
+	}
+	if (filter->filter_info.dst_port_mask == 0) {
+		mask &= SXE_FTQF_DEST_PORT_MASK;
+	}
+	if (filter->filter_info.proto_mask == 0) {
+		mask &= SXE_FTQF_PROTOCOL_COMP_MASK;
+	}
+	ftqf |= mask << SXE_FTQF_5TUPLE_MASK_SHIFT;
+	ftqf |= SXE_FTQF_POOL_MASK_EN;
+	ftqf |= SXE_FTQF_QUEUE_ENABLE;
+
+	LOG_DEBUG("add fivetuple filter, index[%u], src_ip[0x%x], dst_ip[0x%x]"
+		"src_port[%u], dst_port[%u], ftqf[0x%x], queue[%u]", i, filter->filter_info.src_ip,
+		filter->filter_info.dst_ip, filter->filter_info.src_port, filter->filter_info.dst_port,
+		ftqf, filter->queue);
+
+	SXE_REG_WRITE(hw, SXE_DAQF(i), filter->filter_info.dst_ip);
+	SXE_REG_WRITE(hw, SXE_SAQF(i), filter->filter_info.src_ip);
+	SXE_REG_WRITE(hw, SXE_SDPQF(i), sdpqf);
+	SXE_REG_WRITE(hw, SXE_FTQF(i), ftqf);
+
+	l34timir |= SXE_L34T_IMIR_RESERVE;
+	l34timir |= (u32)(filter->queue << SXE_L34T_IMIR_QUEUE_SHIFT);
+	SXE_REG_WRITE(hw, SXE_L34T_IMIR(i), l34timir);
+
+	return;
+}
+
+void sxe_hw_fivetuple_filter_del(struct sxe_hw *hw, u16 reg_index)
+{
+	SXE_REG_WRITE(hw, SXE_DAQF(reg_index), 0);
+	SXE_REG_WRITE(hw, SXE_SAQF(reg_index), 0);
+	SXE_REG_WRITE(hw, SXE_SDPQF(reg_index), 0);
+	SXE_REG_WRITE(hw, SXE_FTQF(reg_index), 0);
+	SXE_REG_WRITE(hw, SXE_L34T_IMIR(reg_index), 0);
+
+	return;
+}
+
+void sxe_hw_ethertype_filter_add(struct sxe_hw *hw,
+					u8 reg_index, u16 ethertype, u16 queue)
+{
+	u32 etqf = 0;
+	u32 etqs = 0;
+
+	etqf = SXE_ETQF_FILTER_EN;
+	etqf |= (u32)ethertype;
+	etqs |= (u32)((queue << SXE_ETQS_RX_QUEUE_SHIFT) &
+			SXE_ETQS_RX_QUEUE);
+	etqs |= SXE_ETQS_QUEUE_EN;
+
+	SXE_REG_WRITE(hw, SXE_ETQF(reg_index), etqf);
+	SXE_REG_WRITE(hw, SXE_ETQS(reg_index), etqs);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_ethertype_filter_del(struct sxe_hw *hw, u8 filter_type)
+{
+	SXE_REG_WRITE(hw, SXE_ETQF(filter_type), 0);
+	SXE_REG_WRITE(hw, SXE_ETQS(filter_type), 0);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_syn_filter_add(struct sxe_hw *hw, u16 queue, u8 priority)
+{
+	u32 synqf;
+
+	synqf = (u32)(((queue << SXE_SYN_FILTER_QUEUE_SHIFT) &
+			SXE_SYN_FILTER_QUEUE) | SXE_SYN_FILTER_ENABLE);
+
+	if (priority) {
+		synqf |= SXE_SYN_FILTER_SYNQFP;
+	} else {
+		synqf &= ~SXE_SYN_FILTER_SYNQFP;
+	}
+
+	SXE_REG_WRITE(hw, SXE_SYNQF, synqf);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_syn_filter_del(struct sxe_hw *hw)
+{
+	u32 synqf;
+
+	synqf = SXE_REG_READ(hw, SXE_SYNQF);
+
+	synqf &= ~(SXE_SYN_FILTER_QUEUE | SXE_SYN_FILTER_ENABLE);
+	SXE_REG_WRITE(hw, SXE_SYNQF, synqf);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_fnav_rx_pkt_buf_size_reset(struct sxe_hw *hw, u32 pbsize)
+{
+	S32 i;
+
+	SXE_REG_WRITE(hw, SXE_RXPBSIZE(0), (SXE_REG_READ(hw, SXE_RXPBSIZE(0)) - pbsize));
+	for (i = 1; i < 8; i++) {
+		SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), 0);
+	}
+
+	return;
+}
+
+void sxe_hw_fnav_flex_mask_set(struct sxe_hw *hw, u16 flex_mask)
+{
+	u32 fnavm;
+
+	fnavm = SXE_REG_READ(hw, SXE_FNAVM);
+	if (flex_mask == UINT16_MAX) {
+		fnavm &= ~SXE_FNAVM_FLEX;
+	}
+
+	SXE_REG_WRITE(hw, SXE_FNAVM, fnavm);
+	return;
+}
+
+void sxe_hw_fnav_ipv6_mask_set(struct sxe_hw *hw, u16 src_mask, u16 dst_mask)
+{
+	u32 fnavipv6m;
+
+	fnavipv6m = (dst_mask << 16) | src_mask;
+	SXE_REG_WRITE(hw, SXE_FNAVIP6M, ~fnavipv6m);
+
+	return;
+}
+
+s32 sxe_hw_fnav_flex_offset_set(struct sxe_hw *hw, u16 offset)
+{
+	u32 fnavctrl;
+	s32 ret;
+
+	fnavctrl = SXE_REG_READ(hw, SXE_FNAVCTRL);
+	fnavctrl &= ~SXE_FNAVCTRL_FLEX_MASK;
+	fnavctrl |= ((offset >> 1)
+		<< SXE_FNAVCTRL_FLEX_SHIFT);
+
+	SXE_REG_WRITE(hw, SXE_FNAVCTRL, fnavctrl);
+	SXE_WRITE_FLUSH(hw);
+
+	ret = sxe_hw_fnav_wait_init_done(hw);
+	if (ret) {
+		LOG_ERROR("flow director signature poll time exceeded!\n");
+	}
+
+	return ret;
+}
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
+static void sxe_macsec_stop_data(struct sxe_hw *hw, bool link)
+{
+	u32 t_rdy, r_rdy;
+	u32 limit;
+	u32 reg;
+
+	reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+	reg |= SXE_SECTXCTRL_TX_DIS;
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+	reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+	reg |= SXE_SECRXCTRL_RX_DIS;
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+	SXE_WRITE_FLUSH(hw);
+
+	t_rdy = SXE_REG_READ(hw, SXE_SECTXSTAT) &
+		SXE_SECTXSTAT_SECTX_RDY;
+	r_rdy = SXE_REG_READ(hw, SXE_SECRXSTAT) &
+		SXE_SECRXSTAT_SECRX_RDY;
+	if (t_rdy && r_rdy)
+		return;
+
+	if (!link) {
+		SXE_REG_WRITE(hw, SXE_LPBKCTRL, 0x1);
+
+		SXE_WRITE_FLUSH(hw);
+		mdelay(3);
+	}
+
+	limit = 20;
+	do {
+		mdelay(10);
+		t_rdy = SXE_REG_READ(hw, SXE_SECTXSTAT) &
+			SXE_SECTXSTAT_SECTX_RDY;
+		r_rdy = SXE_REG_READ(hw, SXE_SECRXSTAT) &
+			SXE_SECRXSTAT_SECRX_RDY;
+	} while (!(t_rdy && r_rdy) && limit--);
+
+	if (!link) {
+		SXE_REG_WRITE(hw, SXE_LPBKCTRL, 0x0);
+		SXE_WRITE_FLUSH(hw);
+	}
+
+	return;
+}
+void sxe_hw_rx_queue_mode_set(struct sxe_hw *hw, u32 mrqc)
+{
+	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+	return;
+}
+
+void sxe_hw_macsec_enable(struct sxe_hw *hw, bool is_up, u32 tx_mode,
+				u32 rx_mode, u32 pn_trh)
+{
+	u32 reg;
+
+	sxe_macsec_stop_data(hw, is_up);
+
+	reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+	reg &= ~SXE_SECTXCTRL_SECTX_DIS;
+	reg &= ~SXE_SECTXCTRL_STORE_FORWARD;
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+	SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, 0x250);
+
+	reg = SXE_REG_READ(hw, SXE_SECTXMINIFG);
+	reg = (reg & 0xfffffff0) | 0x3;
+	SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg);
+
+	reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+	reg &= ~SXE_SECRXCTRL_SECRX_DIS;
+	reg |= SXE_SECRXCTRL_RP;
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+
+	reg = tx_mode & SXE_LSECTXCTRL_EN_MASK;
+	reg |= SXE_LSECTXCTRL_AISCI;
+	reg &= ~SXE_LSECTXCTRL_PNTHRSH_MASK;
+	reg |= (pn_trh << SXE_LSECTXCTRL_PNTHRSH_SHIFT);
+	SXE_REG_WRITE(hw, SXE_LSECTXCTRL, reg);
+
+	reg = (rx_mode << SXE_LSECRXCTRL_EN_SHIFT) & SXE_LSECRXCTRL_EN_MASK;
+	reg |= SXE_LSECRXCTRL_RP;
+	reg |= SXE_LSECRXCTRL_DROP_EN;
+	SXE_REG_WRITE(hw, SXE_LSECRXCTRL, reg);
+
+	reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+	reg &= ~SXE_SECTXCTRL_TX_DIS;
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+	reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+	reg &= ~SXE_SECRXCTRL_RX_DIS;
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_macsec_disable(struct sxe_hw *hw, bool is_up)
+{
+	u32 reg;
+
+	sxe_macsec_stop_data(hw, is_up);
+
+	reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+	reg |= SXE_SECTXCTRL_SECTX_DIS;
+	reg &= ~SXE_SECTXCTRL_STORE_FORWARD;
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+	reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+	reg |= SXE_SECRXCTRL_SECRX_DIS;
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+
+	SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, 0x250);
+
+	reg = SXE_REG_READ(hw, SXE_SECTXMINIFG);
+	reg = (reg & 0xfffffff0) | 0x1;
+	SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg);
+
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, SXE_SECTXCTRL_SECTX_DIS);
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, SXE_SECRXCTRL_SECRX_DIS);
+
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+
+void sxe_hw_macsec_txsc_set(struct sxe_hw *hw, u32 scl, u32 sch)
+{
+	SXE_REG_WRITE(hw, SXE_LSECTXSCL, scl);
+	SXE_REG_WRITE(hw, SXE_LSECTXSCH, sch);
+
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+
+void sxe_hw_macsec_rxsc_set(struct sxe_hw *hw, u32 scl, u32 sch, u16 pi)
+{
+	u32 reg = sch;
+
+	SXE_REG_WRITE(hw, SXE_LSECRXSCL, scl);
+
+	reg |= (pi << SXE_LSECRXSCH_PI_SHIFT) & SXE_LSECRXSCH_PI_MASK;
+	SXE_REG_WRITE(hw, SXE_LSECRXSCH, reg);
+
+	SXE_WRITE_FLUSH(hw);
+	return;
+
+}
+
+void sxe_hw_macsec_tx_sa_configure(struct sxe_hw *hw, u8 sa_idx,
+				u8 an, u32 pn, u32 *keys)
+{
+	u32 reg;
+	u8 i;
+
+	reg = SXE_REG_READ(hw, SXE_LSECTXSA);
+	reg &= ~SXE_LSECTXSA_SELSA;
+	reg |= (sa_idx << SXE_LSECTXSA_SELSA_SHIFT) & SXE_LSECTXSA_SELSA;
+	SXE_REG_WRITE(hw, SXE_LSECTXSA, reg);
+	SXE_WRITE_FLUSH(hw);
+
+	SXE_REG_WRITE(hw, SXE_LSECTXPN(sa_idx), pn);
+	for (i = 0; i < 4; i++) {
+		SXE_REG_WRITE(hw, SXE_LSECTXKEY(sa_idx, i), keys[i]);
+	}
+	SXE_WRITE_FLUSH(hw);
+
+	reg = SXE_REG_READ(hw, SXE_LSECTXSA);
+	if (sa_idx == 0) {
+		reg &= ~SXE_LSECTXSA_AN0_MASK;
+		reg |= (an << SXE_LSECTXSA_AN0_SHIFT) & SXE_LSECTXSA_AN0_MASK;
+		reg &= ~SXE_LSECTXSA_SELSA;
+		SXE_REG_WRITE(hw, SXE_LSECTXSA, reg);
+	} else if (sa_idx == 1) {
+		reg &= ~SXE_LSECTXSA_AN1_MASK;
+		reg |= (an << SXE_LSECTXSA_AN1_SHIFT) & SXE_LSECTXSA_AN1_MASK;
+		reg |= SXE_LSECTXSA_SELSA;
+		SXE_REG_WRITE(hw, SXE_LSECTXSA, reg);
+	}
+
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+
+void sxe_hw_macsec_rx_sa_configure(struct sxe_hw *hw, u8 sa_idx,
+				u8 an, u32 pn, u32 *keys)
+{
+	u32 reg;
+	u8 i;
+
+	reg = SXE_REG_READ(hw, SXE_LSECRXSA(sa_idx));
+	reg &= ~SXE_LSECRXSA_SAV;
+	reg |= (0 << SXE_LSECRXSA_SAV_SHIFT) & SXE_LSECRXSA_SAV;
+
+	SXE_REG_WRITE(hw, SXE_LSECRXSA(sa_idx), reg);
+
+	SXE_WRITE_FLUSH(hw);
+
+	SXE_REG_WRITE(hw, SXE_LSECRXPN(sa_idx), pn);
+
+	for (i = 0; i < 4; i++) {
+		SXE_REG_WRITE(hw, SXE_LSECRXKEY(sa_idx, i), keys[i]);
+	}
+	SXE_WRITE_FLUSH(hw);
+
+	reg = ((an << SXE_LSECRXSA_AN_SHIFT) & SXE_LSECRXSA_AN_MASK) | SXE_LSECRXSA_SAV;
+	SXE_REG_WRITE(hw, SXE_LSECRXSA(sa_idx), reg);
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+				
+#endif 
+#endif 
diff --git a/drivers/net/sxe/base/sxe_hw.h b/drivers/net/sxe/base/sxe_hw.h
new file mode 100644
index 0000000000..8adc9fc15b
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_hw.h
@@ -0,0 +1,1505 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_HW_H__
+#define __SXE_HW_H__
+
+#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST)
+#include <linux/types.h>
+#include <linux/kernel.h>
+#else
+#include "sxe_types.h"
+#include "sxe_compat_platform.h"
+#include "sxe_compat_version.h"
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+#include <inttypes.h>
+#endif
+
+#include "sxe_regs.h"
+
+#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST)
+#define SXE_PRIU64  "llu"
+#define SXE_PRIX64  "llx"
+#define SXE_PRID64  "lld"
+#define SXE_RMB()     rmb()
+
+#else
+#define SXE_PRIU64  PRIu64
+#define SXE_PRIX64  PRIx64
+#define SXE_PRID64  PRId64
+#define SXE_RMB()     rte_rmb()
+#endif
+
+struct sxe_hw;
+struct sxe_filter_mac;
+struct sxe_fc_info;
+
+#define SXE_MAC_ADDR_LEN 6
+#define SXE_QUEUE_STATS_MAP_REG_NUM 32
+
+#define SXE_FC_DEFAULT_HIGH_WATER_MARK    0x80
+#define SXE_FC_DEFAULT_LOW_WATER_MARK     0x40
+
+#define  SXE_MC_ADDR_EXTRACT_MASK  (0xFFF)
+#define  SXE_MC_ADDR_SHIFT         (5)    
+#define  SXE_MC_ADDR_REG_MASK      (0x7F) 
+#define  SXE_MC_ADDR_BIT_MASK      (0x1F) 
+
+#define SXE_TXTS_POLL_CHECK		3
+#define SXE_TXTS_POLL			5
+#define SXE_TIME_TO_NS(ns, sec)	(((u64)(ns)) + (u64)(((u64)(sec)) * NSEC_PER_SEC))
+
+enum sxe_strict_prio_type {
+	PRIO_NONE = 0, 
+	PRIO_GROUP,    
+	PRIO_LINK      
+};
+
+enum sxe_mc_filter_type {
+	SXE_MC_FILTER_TYPE0 = 0,  
+	SXE_MC_FILTER_TYPE1,      
+	SXE_MC_FILTER_TYPE2,      
+	SXE_MC_FILTER_TYPE3       
+};
+
+#define SXE_POOLS_NUM_MAX 64
+#define SXE_16_POOL 16
+#define SXE_32_POOL 32
+#define SXE_1_RING_PER_POOL 1
+#define SXE_2_RING_PER_POOL 2
+#define SXE_3_RING_PER_POOL 3
+#define SXE_4_RING_PER_POOL 4
+
+#define SXE_DCB_1_TC 1
+#define SXE_DCB_4_TC 4
+#define SXE_DCB_8_TC 8
+
+#define SXE_8Q_PER_POOL_MASK   0x78
+#define SXE_4Q_PER_POOL_MASK   0x7C
+#define SXE_2Q_PER_POOL_MASK   0x7E
+
+#define SXE_VF_NUM_16		16
+#define SXE_VF_NUM_32		32
+
+#define SXE_TX_DESC_EOP_MASK  0x01000000   
+#define SXE_TX_DESC_RS_MASK   0x08000000   
+#define SXE_TX_DESC_STAT_DD   0x00000001   
+#define SXE_TX_DESC_CMD       (SXE_TX_DESC_EOP_MASK | SXE_TX_DESC_RS_MASK)
+#define SXE_TX_DESC_TYPE_DATA 0x00300000   
+#define SXE_TX_DESC_DEXT      0x20000000   
+#define SXE_TX_DESC_IFCS      0x02000000   
+#define SXE_TX_DESC_VLE       0x40000000 
+#define SXE_TX_DESC_TSTAMP    0x00080000 
+#define SXE_TX_DESC_FLAGS     (SXE_TX_DESC_TYPE_DATA | \
+				SXE_TX_DESC_IFCS | \
+				SXE_TX_DESC_DEXT| \
+				SXE_TX_DESC_EOP_MASK)
+#define SXE_TXD_DTYP_CTXT     0x00200000 
+#define SXE_TXD_DCMD_TSE      0x80000000 
+#define SXE_TXD_MAC_LINKSEC   0x00040000 
+#define SXE_TXD_MAC_1588      0x00080000 
+#define SXE_TX_DESC_PAYLEN_SHIFT     14
+#define SXE_TX_OUTERIPCS_SHIFT	17 
+
+#define SXE_TX_POPTS_IXSM   0x01
+#define SXE_TX_POPTS_TXSM   0x02
+#define SXE_TXD_POPTS_SHIFT 8  
+#define SXE_TXD_POPTS_IXSM  (SXE_TX_POPTS_IXSM << SXE_TXD_POPTS_SHIFT)
+#define SXE_TXD_POPTS_TXSM  (SXE_TX_POPTS_TXSM << SXE_TXD_POPTS_SHIFT)
+#define SXE_TXD_POPTS_IPSEC (0x00000400)
+
+#define SXE_TX_CTXTD_DTYP_CTXT      0x00200000 
+#define SXE_TX_CTXTD_TUCMD_IPV6     0x00000000 
+#define SXE_TX_CTXTD_TUCMD_IPV4     0x00000400 
+#define SXE_TX_CTXTD_TUCMD_L4T_UDP  0x00000000 
+#define SXE_TX_CTXTD_TUCMD_L4T_TCP  0x00000800 
+#define SXE_TX_CTXTD_TUCMD_L4T_SCTP 0x00001000 
+#define SXE_TX_CTXTD_TUCMD_L4T_RSV  0x00001800 
+#define SXE_TX_CTXTD_TUCMD_IPSEC_TYPE_ESP   0x00002000 
+#define SXE_TX_CTXTD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 
+
+#define SXE_TX_CTXTD_L4LEN_SHIFT          8  
+#define SXE_TX_CTXTD_MSS_SHIFT            16 
+#define SXE_TX_CTXTD_MACLEN_SHIFT         9  
+#define SXE_TX_CTXTD_VLAN_SHIFT           16
+#define SXE_TX_CTXTD_VLAN_MASK            0xffff0000
+#define SXE_TX_CTXTD_MACLEN_MASK          0x0000fE00
+#define SXE_TX_CTXTD_OUTER_IPLEN_SHIFT    16 
+#define SXE_TX_CTXTD_TUNNEL_LEN_SHIFT     24 
+
+#define SXE_VLAN_TAG_SIZE     4
+
+#define SXE_RSS_KEY_SIZE                (40)  
+#define SXE_MAX_RSS_KEY_ENTRIES		(10)  
+#define SXE_MAX_RETA_ENTRIES            (128) 
+
+#define SXE_TIMINC_IV_NS_SHIFT  8
+#define SXE_TIMINC_INCPD_SHIFT  24
+#define SXE_TIMINC_SET(incpd, iv_ns, iv_sns)   \
+	(((incpd) << SXE_TIMINC_INCPD_SHIFT) | \
+	((iv_ns) << SXE_TIMINC_IV_NS_SHIFT) | (iv_sns))
+
+#define PBA_STRATEGY_EQUAL       (0)    
+#define PBA_STRATEGY_WEIGHTED    (1)	
+#define SXE_PKG_BUF_NUM_MAX               (8)
+#define SXE_HW_TXRX_RING_NUM_MAX 128
+#define SXE_VMDQ_DCB_NUM_QUEUES  SXE_HW_TXRX_RING_NUM_MAX
+#define SXE_RX_PKT_BUF_SIZE 			(512)
+
+#define SXE_UC_ENTRY_NUM_MAX   128
+#define SXE_HW_TX_NONE_MODE_Q_NUM 64
+
+#define SXE_MBX_MSG_NUM    16
+#define SXE_MBX_RETRY_INTERVAL   500
+#define SXE_MBX_RETRY_COUNT      2000
+
+#define SXE_VF_UC_ENTRY_NUM_MAX 10
+#define SXE_VF_MC_ENTRY_NUM_MAX 30
+
+#define SXE_UTA_ENTRY_NUM_MAX   128
+#define SXE_MTA_ENTRY_NUM_MAX   128
+#define SXE_HASH_UC_NUM_MAX   4096 
+
+#define  SXE_MAC_ADDR_EXTRACT_MASK  (0xFFF) 
+#define  SXE_MAC_ADDR_SHIFT         (5)     
+#define  SXE_MAC_ADDR_REG_MASK      (0x7F)  
+#define  SXE_MAC_ADDR_BIT_MASK      (0x1F)  
+
+#define  SXE_VFT_TBL_SIZE          (128)   
+#define  SXE_VLAN_ID_SHIFT         (5)     
+#define  SXE_VLAN_ID_REG_MASK      (0x7F)  
+#define  SXE_VLAN_ID_BIT_MASK      (0x1F)  
+
+#define SXE_TX_PBSIZE_MAX    0x00028000 
+#define SXE_TX_PKT_SIZE_MAX  0xA        
+#define SXE_NODCB_TX_PKT_SIZE_MAX 0x14 
+#define SXE_RING_ENABLE_WAIT_LOOP 10
+
+#define VFTA_BLOCK_SIZE 		8
+#define VF_BLOCK_BITS 			(32)
+#define SXE_MAX_MAC_HDR_LEN		127
+#define SXE_MAX_NETWORK_HDR_LEN		511
+#define SXE_MAC_ADDR_LEN		6
+
+#define SXE_FNAV_BUCKET_HASH_KEY    0x3DAD14E2
+#define SXE_FNAV_SAMPLE_HASH_KEY 0x174D3614
+#define SXE_SAMPLE_COMMON_HASH_KEY \
+		(SXE_FNAV_BUCKET_HASH_KEY & SXE_FNAV_SAMPLE_HASH_KEY)
+
+#define SXE_SAMPLE_HASH_MASK		0x7fff
+#define SXE_SAMPLE_L4TYPE_MASK		0x3
+#define SXE_SAMPLE_L4TYPE_UDP		0x1
+#define SXE_SAMPLE_L4TYPE_TCP		0x2
+#define SXE_SAMPLE_L4TYPE_SCTP		0x3
+#define SXE_SAMPLE_L4TYPE_IPV6_MASK	0x4
+#define SXE_SAMPLE_L4TYPE_TUNNEL_MASK	0x10
+#define SXE_SAMPLE_FLOW_TYPE_MASK	0xF
+
+#define SXE_SAMPLE_VM_POOL_MASK		0x7F
+#define SXE_SAMPLE_VLAN_MASK		0xEFFF
+#define SXE_SAMPLE_FLEX_BYTES_MASK	0xFFFF
+
+#define SXE_FNAV_INIT_DONE_POLL               10
+#define SXE_FNAV_DROP_QUEUE                   127
+
+#define MAX_TRAFFIC_CLASS        8
+#define DEF_TRAFFIC_CLASS        1
+
+#define SXE_LINK_SPEED_UNKNOWN   0
+#define SXE_LINK_SPEED_10_FULL   0x0002
+#define SXE_LINK_SPEED_100_FULL  0x0008
+#define SXE_LINK_SPEED_1GB_FULL  0x0020
+#define SXE_LINK_SPEED_10GB_FULL 0x0080
+
+typedef u32 sxe_link_speed;
+#ifdef SXE_TEST
+#define SXE_LINK_MBPS_SPEED_DEFAULT 1000
+#else
+#define SXE_LINK_MBPS_SPEED_DEFAULT 10000
+#endif
+
+#define SXE_LINK_MBPS_SPEED_MIN   (10)
+
+enum sxe_rss_ip_version {
+	SXE_RSS_IP_VER_4 = 4,
+	SXE_RSS_IP_VER_6 = 6,
+};
+
+enum sxe_fnav_mode {
+	SXE_FNAV_SAMPLE_MODE = 1,
+	SXE_FNAV_SPECIFIC_MODE	= 2,
+};
+
+enum sxe_sample_type {
+	SXE_SAMPLE_FLOW_TYPE_IPV4   = 0x0,
+	SXE_SAMPLE_FLOW_TYPE_UDPV4  = 0x1,
+	SXE_SAMPLE_FLOW_TYPE_TCPV4  = 0x2,
+	SXE_SAMPLE_FLOW_TYPE_SCTPV4 = 0x3,
+	SXE_SAMPLE_FLOW_TYPE_IPV6   = 0x4,
+	SXE_SAMPLE_FLOW_TYPE_UDPV6  = 0x5,
+	SXE_SAMPLE_FLOW_TYPE_TCPV6  = 0x6,
+	SXE_SAMPLE_FLOW_TYPE_SCTPV6 = 0x7,
+};
+
+enum {
+	SXE_DIAG_TEST_PASSED                = 0,
+	SXE_DIAG_TEST_BLOCKED               = 1,
+	SXE_DIAG_STATS_REG_TEST_ERR         = 2,
+	SXE_DIAG_REG_PATTERN_TEST_ERR       = 3,
+	SXE_DIAG_CHECK_REG_TEST_ERR         = 4,
+	SXE_DIAG_DISABLE_IRQ_TEST_ERR       = 5,
+	SXE_DIAG_ENABLE_IRQ_TEST_ERR        = 6,
+	SXE_DIAG_DISABLE_OTHER_IRQ_TEST_ERR = 7,
+	SXE_DIAG_TX_RING_CONFIGURE_ERR      = 8,
+	SXE_DIAG_RX_RING_CONFIGURE_ERR      = 9,
+	SXE_DIAG_ALLOC_SKB_ERR              = 10,
+	SXE_DIAG_LOOPBACK_SEND_TEST_ERR     = 11,
+	SXE_DIAG_LOOPBACK_RECV_TEST_ERR     = 12,
+};
+
+#define SXE_RXD_STAT_DD       0x01    
+#define SXE_RXD_STAT_EOP      0x02    
+#define SXE_RXD_STAT_FLM      0x04    
+#define SXE_RXD_STAT_VP       0x08    
+#define SXE_RXDADV_NEXTP_MASK   0x000FFFF0 
+#define SXE_RXDADV_NEXTP_SHIFT  0x00000004
+#define SXE_RXD_STAT_UDPCS    0x10    
+#define SXE_RXD_STAT_L4CS     0x20    
+#define SXE_RXD_STAT_IPCS     0x40    
+#define SXE_RXD_STAT_PIF      0x80    
+#define SXE_RXD_STAT_CRCV     0x100   
+#define SXE_RXD_STAT_OUTERIPCS  0x100 
+#define SXE_RXD_STAT_VEXT     0x200   
+#define SXE_RXD_STAT_UDPV     0x400   
+#define SXE_RXD_STAT_DYNINT   0x800   
+#define SXE_RXD_STAT_LLINT    0x800   
+#define SXE_RXD_STAT_TSIP     0x08000 
+#define SXE_RXD_STAT_TS       0x10000 
+#define SXE_RXD_STAT_SECP     0x20000 
+#define SXE_RXD_STAT_LB       0x40000 
+#define SXE_RXD_STAT_ACK      0x8000  
+#define SXE_RXD_ERR_CE        0x01    
+#define SXE_RXD_ERR_LE        0x02    
+#define SXE_RXD_ERR_PE        0x08    
+#define SXE_RXD_ERR_OSE       0x10    
+#define SXE_RXD_ERR_USE       0x20    
+#define SXE_RXD_ERR_TCPE      0x40    
+#define SXE_RXD_ERR_IPE       0x80    
+#define SXE_RXDADV_ERR_MASK           0xfff00000 
+#define SXE_RXDADV_ERR_SHIFT          20         
+#define SXE_RXDADV_ERR_OUTERIPER	0x04000000 
+#define SXE_RXDADV_ERR_FCEOFE         0x80000000 
+#define SXE_RXDADV_ERR_FCERR          0x00700000 
+#define SXE_RXDADV_ERR_FNAV_LEN       0x00100000 
+#define SXE_RXDADV_ERR_FNAV_DROP      0x00200000 
+#define SXE_RXDADV_ERR_FNAV_COLL      0x00400000 
+#define SXE_RXDADV_ERR_HBO    0x00800000 
+#define SXE_RXDADV_ERR_CE     0x01000000 
+#define SXE_RXDADV_ERR_LE     0x02000000 
+#define SXE_RXDADV_ERR_PE     0x08000000 
+#define SXE_RXDADV_ERR_OSE    0x10000000 
+#define SXE_RXDADV_ERR_IPSEC_INV_PROTOCOL  0x08000000 
+#define SXE_RXDADV_ERR_IPSEC_INV_LENGTH    0x10000000 
+#define SXE_RXDADV_ERR_IPSEC_AUTH_FAILED   0x18000000
+#define SXE_RXDADV_ERR_USE    0x20000000 
+#define SXE_RXDADV_ERR_L4E    0x40000000 
+#define SXE_RXDADV_ERR_IPE    0x80000000 
+#define SXE_RXD_VLAN_ID_MASK  0x0FFF  
+#define SXE_RXD_PRI_MASK      0xE000  
+#define SXE_RXD_PRI_SHIFT     13
+#define SXE_RXD_CFI_MASK      0x1000  
+#define SXE_RXD_CFI_SHIFT     12
+#define SXE_RXDADV_LROCNT_MASK        0x001E0000
+#define SXE_RXDADV_LROCNT_SHIFT       17
+
+#define SXE_RXDADV_STAT_DD            SXE_RXD_STAT_DD  
+#define SXE_RXDADV_STAT_EOP           SXE_RXD_STAT_EOP 
+#define SXE_RXDADV_STAT_FLM           SXE_RXD_STAT_FLM 
+#define SXE_RXDADV_STAT_VP            SXE_RXD_STAT_VP  
+#define SXE_RXDADV_STAT_MASK          0x000fffff 
+#define SXE_RXDADV_STAT_TS		0x00010000 
+#define SXE_RXDADV_STAT_SECP		0x00020000 
+
+#define SXE_RXDADV_PKTTYPE_NONE       0x00000000
+#define SXE_RXDADV_PKTTYPE_IPV4       0x00000010 
+#define SXE_RXDADV_PKTTYPE_IPV4_EX    0x00000020 
+#define SXE_RXDADV_PKTTYPE_IPV6       0x00000040 
+#define SXE_RXDADV_PKTTYPE_IPV6_EX    0x00000080 
+#define SXE_RXDADV_PKTTYPE_TCP        0x00000100 
+#define SXE_RXDADV_PKTTYPE_UDP        0x00000200 
+#define SXE_RXDADV_PKTTYPE_SCTP       0x00000400 
+#define SXE_RXDADV_PKTTYPE_NFS        0x00000800 
+#define SXE_RXDADV_PKTTYPE_VXLAN      0x00000800 
+#define SXE_RXDADV_PKTTYPE_TUNNEL     0x00010000 
+#define SXE_RXDADV_PKTTYPE_IPSEC_ESP  0x00001000 
+#define SXE_RXDADV_PKTTYPE_IPSEC_AH   0x00002000 
+#define SXE_RXDADV_PKTTYPE_LINKSEC    0x00004000 
+#define SXE_RXDADV_PKTTYPE_ETQF       0x00008000 
+#define SXE_RXDADV_PKTTYPE_ETQF_MASK  0x00000070 
+#define SXE_RXDADV_PKTTYPE_ETQF_SHIFT 4          
+
+struct sxe_mac_stats {
+	u64 crcerrs;           
+	u64 errbc;             
+	u64 rlec;              
+	u64 prc64;             
+	u64 prc127;            
+	u64 prc255;            
+	u64 prc511;            
+	u64 prc1023;           
+	u64 prc1522;           
+	u64 gprc;              
+	u64 bprc;              
+	u64 mprc;              
+	u64 gptc;              
+	u64 gorc;              
+	u64 gotc;              
+	u64 ruc;               
+	u64 rfc;               
+	u64 roc;               
+	u64 rjc;               
+	u64 tor;               
+	u64 tpr;               
+	u64 tpt;               
+	u64 ptc64;             
+	u64 ptc127;            
+	u64 ptc255;            
+	u64 ptc511;            
+	u64 ptc1023;           
+	u64 ptc1522;           
+	u64 mptc;              
+	u64 bptc;              
+	u64 qprc[16];          
+	u64 qptc[16];          
+	u64 qbrc[16];          
+	u64 qbtc[16];          
+	u64 qprdc[16];         
+	u64 dburxtcin[8];      
+	u64 dburxtcout[8];     
+	u64 dburxgdreecnt[8];  
+	u64 dburxdrofpcnt[8];  
+	u64 dbutxtcin[8];      
+	u64 dbutxtcout[8];     
+	u64 rxdgpc;            
+	u64 rxdgbc;            
+	u64 rxddpc;            
+	u64 rxddbc;            
+	u64 rxtpcing;          
+	u64 rxtpceng;          
+	u64 rxlpbkpc;          
+	u64 rxlpbkbc;          
+	u64 rxdlpbkpc;         
+	u64 rxdlpbkbc;         
+	u64 prddc;             
+	u64 txdgpc;            
+	u64 txdgbc;            
+	u64 txswerr;           
+	u64 txswitch;          
+	u64 txrepeat;          
+	u64 txdescerr;         
+
+	u64 fnavadd;           
+	u64 fnavrmv;           
+	u64 fnavadderr;        
+	u64 fnavrmverr;        
+	u64 fnavmatch;         
+	u64 fnavmiss;          
+	u64 hw_rx_no_dma_resources; 
+	u64 prcpf[8];          
+	u64 pfct[8];           
+	u64 mpc[8];            
+
+	u64 total_tx_pause;    
+	u64 total_gptc;        
+	u64 total_gotc;        
+};
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+enum sxe_fivetuple_protocol {
+	SXE_FILTER_PROTOCOL_TCP = 0,
+	SXE_FILTER_PROTOCOL_UDP,
+	SXE_FILTER_PROTOCOL_SCTP,
+	SXE_FILTER_PROTOCOL_NONE,
+};
+
+struct sxe_fivetuple_filter_info {
+	u32 src_ip;
+	u32 dst_ip;
+	u16 src_port;
+	u16 dst_port;
+	enum sxe_fivetuple_protocol protocol;
+	u8 priority;
+	u8 src_ip_mask:1,
+	   dst_ip_mask:1,
+	   src_port_mask:1,
+	   dst_port_mask:1,
+	   proto_mask:1;
+};
+
+struct sxe_fivetuple_node_info {
+	u16 index;  
+	u16 queue;  
+	struct sxe_fivetuple_filter_info filter_info;
+};
+#endif
+
+union sxe_fnav_rule_info {
+	struct {
+		u8     vm_pool;
+		u8     flow_type;
+		__be16 vlan_id;
+		__be32 dst_ip[4];
+		__be32 src_ip[4];
+		__be16 src_port;
+		__be16 dst_port;
+		__be16 flex_bytes;
+		__be16 bkt_hash;
+	} ntuple;
+	__be32 fast_access[11];
+};
+
+union sxe_sample_hash_dword {
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+	} formatted;
+	__be32 ip;
+	struct {
+		__be16 src;
+		__be16 dst;
+	} port;
+	__be16 flex_bytes;
+	__be32 dword;
+};
+
+void sxe_hw_ops_init(struct sxe_hw *hw);
+
+
+struct sxe_reg_info {
+	u32 addr;        
+	u32 count;       
+	u32 stride;      
+	const s8 *name;  
+};
+
+struct sxe_setup_operations {
+	s32  (*reset)(struct sxe_hw *);
+	void (*pf_rst_done_set)(struct sxe_hw *);
+	void (*no_snoop_disable)(struct sxe_hw *);
+	u32  (*reg_read)(struct sxe_hw *, u32);
+	void (*reg_write)(struct sxe_hw *, u32, u32);
+	void (*regs_dump)(struct sxe_hw *);
+	void (*regs_flush)(struct sxe_hw *);
+	s32  (*regs_test)(struct sxe_hw *);
+};
+
+struct sxe_hw_setup {
+	const struct sxe_setup_operations *ops;
+};
+
+struct sxe_irq_operations {
+	u32  (*pending_irq_read_clear)(struct sxe_hw *hw);
+	void (*pending_irq_write_clear)(struct sxe_hw * hw, u32 value);
+	void (*irq_general_reg_set)(struct sxe_hw *hw, u32 value);
+	u32  (*irq_general_reg_get)(struct sxe_hw *hw);
+	void (*ring_irq_auto_disable)(struct sxe_hw *hw, bool is_misx);
+	void (*set_eitrsel)(struct sxe_hw *hw, u32 value);
+	void (*ring_irq_interval_set)(struct sxe_hw *hw, u16 irq_idx, u32 interval);
+	void (*event_irq_interval_set)(struct sxe_hw * hw, u16 irq_idx, u32 value);
+	void (*event_irq_auto_clear_set)(struct sxe_hw *hw, u32 value);
+	void (*ring_irq_map)(struct sxe_hw *hw, bool is_tx,
+                                u16 reg_idx, u16 irq_idx);
+	void (*event_irq_map)(struct sxe_hw *hw, u8 offset, u16 irq_idx);
+	void (*ring_irq_enable)(struct sxe_hw * hw, u64 qmask);
+	u32  (*irq_cause_get)(struct sxe_hw * hw);
+	void (*event_irq_trigger)(struct sxe_hw * hw);
+	void (*ring_irq_trigger)(struct sxe_hw *hw, u64 eics);
+	void (*specific_irq_disable)(struct sxe_hw *hw, u32 value);
+	void (*specific_irq_enable)(struct sxe_hw *hw, u32 value);
+	void (*all_irq_disable)(struct sxe_hw *hw);
+	void (*spp_configure)(struct sxe_hw *hw, u32 value);
+	s32  (*irq_test)(struct sxe_hw *hw, u32 *icr, bool shared);
+};
+
+struct sxe_irq_info {
+	const struct sxe_irq_operations *ops;
+};
+
+struct sxe_mac_operations {
+	bool (*link_up_1g_check)(struct sxe_hw *);
+	bool (*link_state_is_up)(struct sxe_hw *);
+	u32  (*link_speed_get)(struct sxe_hw *);
+	void (*link_speed_set)(struct sxe_hw *, u32 speed);
+	void (*pad_enable)(struct sxe_hw *);
+	s32  (*fc_enable)(struct sxe_hw *);
+	void (*crc_configure)(struct sxe_hw *);
+	void (*loopback_switch)(struct sxe_hw *, bool);
+	void (*txrx_enable)(struct sxe_hw *hw);
+	void (*max_frame_set)(struct sxe_hw *, u32);
+	u32  (*max_frame_get)(struct sxe_hw *);
+	void (*fc_autoneg_localcap_set)(struct sxe_hw *);
+	void (*fc_tc_high_water_mark_set)(struct sxe_hw *, u8, u32);
+	void (*fc_tc_low_water_mark_set)(struct sxe_hw *, u8, u32);
+	void (*fc_param_init)(struct sxe_hw *);
+	enum sxe_fc_mode (*fc_current_mode_get)(struct sxe_hw *);
+	enum sxe_fc_mode (*fc_requested_mode_get)(struct sxe_hw *);
+	void (*fc_requested_mode_set)(struct sxe_hw *, enum sxe_fc_mode);
+	bool (*is_fc_autoneg_disabled)(struct sxe_hw *);
+	void (*fc_autoneg_disable_set)(struct sxe_hw *, bool);
+};
+
+#define SXE_FLAGS_DOUBLE_RESET_REQUIRED	0x01
+
+struct sxe_mac_info {
+	const struct sxe_mac_operations	*ops;
+	u8   flags;
+	bool set_lben;
+	bool auto_restart;
+};
+
+struct sxe_filter_mac_operations {
+	u32 (*rx_mode_get)(struct sxe_hw *);
+	void (*rx_mode_set)(struct sxe_hw *, u32);
+	u32 (*pool_rx_mode_get)(struct sxe_hw *, u16);
+	void (*pool_rx_mode_set)(struct sxe_hw *, u32, u16);
+	void (*rx_lro_enable) (struct sxe_hw *, bool);
+	void (*rx_udp_frag_checksum_disable) (struct sxe_hw *);
+	s32  (*uc_addr_add)(struct sxe_hw *, u32, u8 *, u32);
+	s32  (*uc_addr_del)(struct sxe_hw *, u32);
+	void (*uc_addr_clear)(struct sxe_hw *);
+	void (*mta_hash_table_set)(struct sxe_hw *hw, u8 index, u32 value);
+	void (*mta_hash_table_update)(struct sxe_hw *hw, u8 reg_idx, u8 bit_idx);
+	void (*fc_mac_addr_set)(struct sxe_hw *hw, u8 *mac_addr);
+
+	void (*mc_filter_enable)(struct sxe_hw *);
+
+	void (*mc_filter_disable)(struct sxe_hw *hw);
+
+	void (*rx_nfs_filter_disable)(struct sxe_hw *);
+	void (*ethertype_filter_set)(struct sxe_hw *, u8, u32);
+
+	void (*vt_ctrl_configure)(struct sxe_hw *hw, u8 num_vfs);
+
+#ifdef SXE_WOL_CONFIGURE
+	void (*wol_mode_set)(struct sxe_hw *hw, u32 wol_status);
+	void (*wol_mode_clean)(struct sxe_hw *hw);
+	void (*wol_status_set)(struct sxe_hw *hw);
+#endif
+
+	void (*vt_disable)(struct sxe_hw *hw);
+
+	s32 (*uc_addr_pool_enable)(struct sxe_hw *hw, u8 rar_idx, u8 pool_idx);
+};
+
+struct sxe_filter_mac {
+	const struct sxe_filter_mac_operations *ops;
+};
+
+struct sxe_filter_vlan_operations {
+	u32 (*pool_filter_read)(struct sxe_hw *, u16);
+	void (*pool_filter_write)(struct sxe_hw *, u16, u32);
+	u32 (*pool_filter_bitmap_read)(struct sxe_hw *, u16);
+	void (*pool_filter_bitmap_write)(struct sxe_hw *, u16, u32);
+	void (*filter_array_write)(struct sxe_hw *, u16, u32);
+	u32  (*filter_array_read)(struct sxe_hw *, u16);
+	void (*filter_array_clear)(struct sxe_hw *);
+	void (*filter_switch)(struct sxe_hw *,bool);
+	void (*untagged_pkts_rcv_switch)(struct sxe_hw *, u32, bool);
+	s32  (*filter_configure)(struct sxe_hw *, u32, u32, bool, bool);
+};
+
+struct sxe_filter_vlan {
+	const struct sxe_filter_vlan_operations *ops;
+};
+
+struct sxe_filter_info {
+	struct sxe_filter_mac  mac;
+	struct sxe_filter_vlan vlan;
+};
+
+struct sxe_dbu_operations {
+	void (*rx_pkt_buf_size_configure)(struct sxe_hw *, u8, u32, u16);
+	void (*rx_pkt_buf_switch)(struct sxe_hw *, bool);
+	void (*rx_multi_ring_configure)(struct sxe_hw *, u8, bool, bool);
+	void (*rss_key_set_all)(struct sxe_hw *, u32 *);
+	void (*rss_redir_tbl_set_all)(struct sxe_hw *, u8 *);
+	void (*rx_cap_switch_on)(struct sxe_hw *);
+	void (*rss_hash_pkt_type_set)(struct sxe_hw *, u32);
+	void (*rss_hash_pkt_type_update)(struct sxe_hw *, u32);
+	void (*rss_rings_used_set)(struct sxe_hw *, u32, u16, u16);
+	void (*lro_ack_switch)(struct sxe_hw *, bool);
+	void (*vf_rx_switch)(struct sxe_hw *, u32, u32, bool);
+
+	s32  (*fnav_mode_init)(struct sxe_hw *, u32, u32);
+	s32  (*fnav_specific_rule_mask_set)(struct sxe_hw *,
+						union sxe_fnav_rule_info *);
+	s32  (*fnav_specific_rule_add)(struct sxe_hw *,
+						union sxe_fnav_rule_info *,
+						u16, u8);
+	s32  (*fnav_specific_rule_del)(struct sxe_hw *,
+					  union sxe_fnav_rule_info *, u16);
+	s32  (*fnav_sample_hash_cmd_get)(struct sxe_hw *,
+						u8, u32, u8, u64 *);
+	void (*fnav_sample_stats_reinit)(struct sxe_hw *hw);
+	void (*fnav_sample_hash_set)(struct sxe_hw *hw, u64 hash);
+	s32  (*fnav_single_sample_rule_del)(struct sxe_hw *,u32);
+
+	void (*ptp_init)(struct sxe_hw *);
+	void (*ptp_freq_adjust)(struct sxe_hw *, u32);
+	void (*ptp_systime_init)(struct sxe_hw *);
+	u64  (*ptp_systime_get)(struct sxe_hw *);
+	void (*ptp_tx_timestamp_get)(struct sxe_hw *, u32 *ts_sec, u32 *ts_ns);
+	void (*ptp_timestamp_mode_set)(struct sxe_hw *, bool, u32, u32);
+	void (*ptp_rx_timestamp_clear)(struct sxe_hw *);
+	u64  (*ptp_rx_timestamp_get)(struct sxe_hw *);
+	bool (*ptp_is_rx_timestamp_valid)(struct sxe_hw *);
+	void (*ptp_timestamp_enable)(struct sxe_hw *);
+
+	void (*tx_pkt_buf_switch)(struct sxe_hw *, bool);
+
+	void (*dcb_tc_rss_configure)(struct sxe_hw *hw, u16 rss_i);
+
+	void (*tx_pkt_buf_size_configure)(struct sxe_hw *, u8);
+
+	void (*rx_cap_switch_off)(struct sxe_hw *);
+	u32  (*rx_pkt_buf_size_get)(struct sxe_hw *, u8);
+	void (*rx_func_switch_on)(struct sxe_hw *hw);
+
+	void (*tx_ring_disable)(struct sxe_hw *, u8, unsigned long);
+	void (*rx_ring_disable)(struct sxe_hw *, u8, unsigned long);
+
+	u32  (*tx_dbu_fc_status_get)(struct sxe_hw *hw);
+};
+
+struct sxe_dbu_info {
+	const struct sxe_dbu_operations	*ops;
+};
+
+
+struct sxe_dma_operations {
+	void (*rx_dma_ctrl_init)(struct sxe_hw *, bool);
+	void (*rx_ring_disable)(struct sxe_hw *, u8);
+	void (*rx_ring_switch)(struct sxe_hw *, u8, bool);
+	void (*rx_ring_switch_not_polling)(struct sxe_hw *, u8, bool);
+	void (*rx_ring_desc_configure)(struct sxe_hw *, u32, u64, u8);
+	void (*rx_desc_thresh_set)(struct sxe_hw *, u8);
+	void (*rx_rcv_ctl_configure)(struct sxe_hw *, u8, u32, u32);
+	void (*rx_lro_ctl_configure)(struct sxe_hw *, u8, u32);
+	u32  (*rx_desc_ctrl_get)(struct sxe_hw *, u8);
+	void (*rx_dma_lro_ctl_set)(struct sxe_hw *);
+	void (*rx_drop_switch)(struct sxe_hw *, u8, bool);
+	void (*rx_tph_update)(struct sxe_hw *hw, u8 ring_idx, u8 cpu);
+
+	void (*tx_enable)(struct sxe_hw *);
+	void (*tx_multi_ring_configure)(struct sxe_hw *, u8, u16, bool, u16);
+	void (*tx_ring_desc_configure)(struct sxe_hw *, u32, u64, u8);
+	void (*tx_desc_thresh_set)(struct sxe_hw *, u8, u32, u32, u32);
+	void (*tx_ring_switch)(struct sxe_hw *, u8, bool);
+	void (*tx_ring_switch_not_polling)(struct sxe_hw *, u8, bool);
+	void (*tx_pkt_buf_thresh_configure)(struct sxe_hw *, u8, bool);
+	u32  (*tx_desc_ctrl_get)(struct sxe_hw *, u8);
+	void (*tx_ring_info_get)(struct sxe_hw *, u8, u32 *, u32 *);
+	void (*tx_desc_wb_thresh_clear)(struct sxe_hw *, u8);
+
+	void (*vlan_tag_strip_switch)(struct sxe_hw *, u16, bool);
+	void (*tx_vlan_tag_set)(struct sxe_hw *, u16, u16, u32);
+	void (*tx_vlan_tag_clear)(struct sxe_hw *, u32);
+	void (*tx_tph_update)(struct sxe_hw *hw, u8 ring_idx, u8 cpu);
+
+	void (*tph_switch)(struct sxe_hw *hw, bool is_enable);
+
+	void  (*dcb_rx_bw_alloc_configure)(struct sxe_hw *hw,
+				      u16 *refill,
+				      u16 *max,
+				      u8 *bwg_id,
+				      u8 *prio_type,
+				      u8 *prio_tc,
+				      u8 max_priority);
+	void  (*dcb_tx_desc_bw_alloc_configure)(struct sxe_hw *hw,
+					   u16 *refill,
+					   u16 *max,
+					   u8 *bwg_id,
+					   u8 *prio_type);
+	void  (*dcb_tx_data_bw_alloc_configure)(struct sxe_hw *hw,
+					   u16 *refill,
+					   u16 *max,
+					   u8 *bwg_id,
+					   u8 *prio_type,
+					   u8 *prio_tc,
+					   u8 max_priority);
+	void  (*dcb_pfc_configure)(struct sxe_hw *hw, u8 pfc_en, u8 *prio_tc,
+				  u8 max_priority);
+	void  (*dcb_tc_stats_configure)(struct sxe_hw *hw);
+	void (*dcb_rx_up_tc_map_set)(struct sxe_hw *hw, u8 tc);
+	void (*dcb_rx_up_tc_map_get)(struct sxe_hw *hw, u8 *map);
+	void (*dcb_rate_limiter_clear)(struct sxe_hw *hw, u8 ring_max);
+
+	void (*vt_pool_loopback_switch)(struct sxe_hw *hw, bool is_enable);
+	u32 (*rx_pool_get)(struct sxe_hw *hw, u8 reg_idx);
+	u32 (*tx_pool_get)(struct sxe_hw *hw, u8 reg_idx);
+	void (*tx_pool_set)(struct sxe_hw *hw, u8 reg_idx, u32 bitmap);
+	void (*rx_pool_set)(struct sxe_hw *hw, u8 reg_idx, u32 bitmap);
+
+	void (*vf_tx_desc_addr_clear)(struct sxe_hw *hw, u8 vf_idx, u8 ring_per_pool);
+	void (*pool_mac_anti_spoof_set)(struct sxe_hw *hw, u8 vf_idx, bool status);
+	void (*pool_vlan_anti_spoof_set)(struct sxe_hw *hw, u8 vf_idx, bool status);
+	void (*spoof_count_enable)(struct sxe_hw *hw, u8 reg_idx, u8 bit_index);
+	void (*pool_rx_ring_drop_enable)(struct sxe_hw *hw, u8 vf_idx,
+					u16 pf_vlan, u8 ring_per_pool);
+
+	void (*max_dcb_memory_window_set)(struct sxe_hw *hw, u32 value);
+	void (*dcb_tx_ring_rate_factor_set)(struct sxe_hw *hw, u32 ring_idx, u32 rate);
+
+	void (*vf_tx_ring_disable)(struct sxe_hw *hw, u8 ring_per_pool, u8 vf_idx);
+	void (*all_ring_disable)(struct sxe_hw *hw, u32 ring_max);
+	void (*tx_ring_tail_init)(struct sxe_hw *hw, u8 reg_idx);
+};
+
+struct sxe_dma_info {
+	const struct sxe_dma_operations	*ops;
+};
+
+struct sxe_sec_operations {
+	void (*ipsec_rx_ip_store)(struct sxe_hw *hw, __be32 *ip_addr, u8 ip_len, u8 ip_idx);
+	void (*ipsec_rx_spi_store)(struct sxe_hw *hw, __be32 spi, u8 ip_idx, u16 idx);
+	void (*ipsec_rx_key_store)(struct sxe_hw *hw, u32 *key,  u8 key_len, u32 salt, u32 mode, u16 idx);
+	void (*ipsec_tx_key_store)(struct sxe_hw *hw, u32 *key,  u8 key_len, u32 salt, u16 idx);
+	void (*ipsec_sec_data_stop)(struct sxe_hw *hw, bool is_linkup);
+	void (*ipsec_engine_start)(struct sxe_hw *hw, bool is_linkup);
+	void (*ipsec_engine_stop)(struct sxe_hw *hw, bool is_linkup);
+	bool (*ipsec_offload_is_disable)(struct sxe_hw *hw);
+	void (*ipsec_sa_disable)(struct sxe_hw *hw);
+};
+
+struct sxe_sec_info {
+	const struct sxe_sec_operations *ops;
+};
+
+struct sxe_stat_operations {
+	void (*stats_clear)(struct sxe_hw *);
+	void (*stats_get)(struct sxe_hw *, struct sxe_mac_stats *);
+
+	u32 (*tx_packets_num_get)(struct sxe_hw *hw);
+	u32 (*unsecurity_packets_num_get)(struct sxe_hw *hw);
+	u32  (*mac_stats_dump)(struct sxe_hw *, u32 *, u32);
+	u32  (*tx_dbu_to_mac_stats)(struct sxe_hw *hw);
+};
+
+struct sxe_stat_info {
+	const struct sxe_stat_operations	*ops;
+};
+
+struct sxe_mbx_operations {
+	void (*init)(struct sxe_hw *hw);
+
+	s32 (*msg_send)(struct sxe_hw *hw, u32 *msg, u16 len, u16 index);
+	s32 (*msg_rcv)(struct sxe_hw *hw, u32 *msg, u16 len, u16 index);
+
+	bool (*req_check)(struct sxe_hw *hw, u8 vf_idx);
+	bool (*ack_check)(struct sxe_hw *hw, u8 vf_idx);
+	bool (*rst_check)(struct sxe_hw *hw, u8 vf_idx);
+
+	void (*mbx_mem_clear)(struct sxe_hw *hw, u8 vf_idx);
+};
+
+struct sxe_mbx_stats {
+	u32 send_msgs; 
+	u32 rcv_msgs;  
+
+	u32 reqs;      
+	u32 acks;      
+	u32 rsts;      
+};
+
+struct sxe_mbx_info {
+	const struct sxe_mbx_operations *ops; 
+	struct sxe_mbx_stats stats; 
+	u32 retry;    
+	u32 interval; 
+	u32 msg_len; 
+};
+
+struct sxe_pcie_operations {
+	void (*vt_mode_set)(struct sxe_hw *hw, u32 value);
+};
+
+struct sxe_pcie_info {
+	const struct sxe_pcie_operations *ops; 
+};
+
+enum sxe_hw_state {
+	SXE_HW_STOP,
+	SXE_HW_FAULT,
+};
+
+enum sxe_fc_mode {
+	SXE_FC_NONE = 0,
+	SXE_FC_RX_PAUSE,
+	SXE_FC_TX_PAUSE,
+	SXE_FC_FULL,
+	SXE_FC_DEFAULT,
+};
+
+struct sxe_fc_info {
+	u32 high_water[MAX_TRAFFIC_CLASS]; 
+	u32 low_water[MAX_TRAFFIC_CLASS]; 
+	u16 pause_time; 
+	bool strict_ieee; 
+	bool disable_fc_autoneg; 
+	u16 send_xon; 
+	enum sxe_fc_mode current_mode; 
+	enum sxe_fc_mode requested_mode; 
+};
+
+struct sxe_fc_nego_mode {
+	u32 adv_sym; 
+	u32 adv_asm; 
+	u32 lp_sym;  
+	u32 lp_asm;  
+
+};
+
+struct sxe_hdc_operations {
+	s32 (*pf_lock_get)(struct sxe_hw *, u32);
+	void (*pf_lock_release)(struct sxe_hw *, u32);
+	bool (*is_fw_over_set)(struct sxe_hw *);
+	u32 (*fw_ack_header_rcv)(struct sxe_hw *);
+	void (*packet_send_done)(struct sxe_hw *);
+	void (*packet_header_send)(struct sxe_hw *, u32);
+	void (*packet_data_dword_send)(struct sxe_hw *, u16, u32);
+	u32  (*packet_data_dword_rcv)(struct sxe_hw *, u16);
+	u32 (*fw_status_get)(struct sxe_hw *);
+	void (*drv_status_set)(struct sxe_hw *, u32);
+	u32 (*irq_event_get)(struct sxe_hw *);
+	void (*irq_event_clear)(struct sxe_hw *, u32);
+	void (*fw_ov_clear)(struct sxe_hw *);
+	u32 (*channel_state_get)(struct sxe_hw *);
+	void (*resource_clean)(struct sxe_hw *);
+};
+
+struct sxe_hdc_info {
+	u32 pf_lock_val;
+	const struct sxe_hdc_operations	*ops;
+};
+
+struct sxe_phy_operations {
+	s32 (*reg_write)(struct sxe_hw *hw, s32 prtad, u32 reg_addr,
+				u32 device_type, u16 phy_data);
+	s32 (*reg_read)(struct sxe_hw *hw, s32 prtad, u32 reg_addr,
+				u32 device_type, u16 *phy_data);
+	s32 (*identifier_get)(struct sxe_hw *hw, u32 prtad, u32 *id);
+	s32 (*link_cap_get)(struct sxe_hw *hw, u32 prtad, u32 *speed);
+	s32 (*reset)(struct sxe_hw *hw, u32 prtad);
+};
+
+struct sxe_phy_reg_info {
+	const struct sxe_phy_operations	*ops;
+};
+
+struct sxe_hw {
+	u8 __iomem *reg_base_addr;            
+
+	void *adapter;
+	void *priv;
+	unsigned long state;   
+	void (*fault_handle)(void *priv);
+	u32 (*reg_read)(const volatile void *reg);
+	void (*reg_write)(u32 value, volatile void *reg);
+
+	struct sxe_hw_setup  setup;           
+	struct sxe_irq_info  irq;             
+	struct sxe_mac_info  mac;             
+	struct sxe_filter_info filter;        
+	struct sxe_dbu_info  dbu;             
+	struct sxe_dma_info  dma;             
+	struct sxe_sec_info  sec;             
+	struct sxe_stat_info stat;            
+	struct sxe_fc_info   fc;
+
+	struct sxe_mbx_info mbx;              
+	struct sxe_pcie_info pcie;            
+	struct sxe_hdc_info  hdc;             
+	struct sxe_phy_reg_info phy;          
+};
+
+u16 sxe_mac_reg_num_get(void);
+
+void sxe_hw_fault_handle(struct sxe_hw *hw);
+
+bool sxe_device_supports_autoneg_fc(struct sxe_hw *hw);
+
+void sxe_hw_ops_init(struct sxe_hw *hw);
+
+u32 sxe_hw_rss_key_get_by_idx(struct sxe_hw *hw, u8 reg_idx);
+
+bool sxe_hw_is_rss_enabled(struct sxe_hw *hw);
+
+u32 sxe_hw_rss_field_get(struct sxe_hw *hw);
+
+static inline bool sxe_is_hw_fault(struct sxe_hw *hw)
+{
+	return test_bit(SXE_HW_FAULT, &hw->state);
+}
+
+static inline void sxe_hw_fault_handle_init(struct sxe_hw *hw,
+			void (*handle)(void *), void *priv)
+{
+	hw->priv = priv;
+	hw->fault_handle = handle;
+
+	return;
+}
+
+static inline void sxe_hw_reg_handle_init(struct sxe_hw *hw,
+		u32 (*read)(const volatile void *),
+		void (*write)(u32, volatile void *))
+{
+	hw->reg_read  = read;
+	hw->reg_write = write;
+
+	return;
+}
+
+#ifdef SXE_DPDK 
+
+void sxe_hw_stats_seq_clean(struct sxe_hw *hw, struct sxe_mac_stats *stats);
+
+void sxe_hw_hdc_drv_status_set(struct sxe_hw *hw, u32 value);
+
+s32 sxe_hw_nic_reset(struct sxe_hw *hw);
+
+u16 sxe_hw_fc_pause_time_get(struct sxe_hw *hw);
+
+void sxe_hw_fc_pause_time_set(struct sxe_hw *hw, u16 pause_time);
+
+void sxe_fc_autoneg_localcap_set(struct sxe_hw *hw);
+
+u32 sxe_hw_fc_tc_high_water_mark_get(struct sxe_hw *hw, u8 tc_idx);
+
+u32 sxe_hw_fc_tc_low_water_mark_get(struct sxe_hw *hw, u8 tc_idx);
+
+u16 sxe_hw_fc_send_xon_get(struct sxe_hw *hw);
+
+void sxe_hw_fc_send_xon_set(struct sxe_hw *hw, u16 send_xon);
+
+u32 sxe_hw_rx_mode_get(struct sxe_hw *hw);
+
+void sxe_hw_rx_mode_set(struct sxe_hw *hw, u32 filter_ctrl);
+
+void sxe_hw_specific_irq_enable(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_specific_irq_disable(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_irq_general_reg_set(struct sxe_hw *hw, u32 value);
+
+u32 sxe_hw_irq_general_reg_get(struct sxe_hw *hw);
+
+void sxe_hw_event_irq_map(struct sxe_hw *hw, u8 offset, u16 irq_idx);
+
+void sxe_hw_ring_irq_map(struct sxe_hw *hw, bool is_tx,
+						u16 reg_idx, u16 irq_idx);
+
+void sxe_hw_ring_irq_interval_set(struct sxe_hw *hw,
+						u16 irq_idx, u32 interval);
+
+void sxe_hw_event_irq_auto_clear_set(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_all_irq_disable(struct sxe_hw *hw);
+
+void sxe_hw_ring_irq_auto_disable(struct sxe_hw *hw,
+					bool is_msix);
+
+u32 sxe_hw_irq_cause_get(struct sxe_hw *hw);
+
+void sxe_hw_pending_irq_write_clear(struct sxe_hw *hw, u32 value);
+
+u32 sxe_hw_ring_irq_switch_get(struct sxe_hw *hw, u8 idx);
+
+void sxe_hw_ring_irq_switch_set(struct sxe_hw *hw, u8 idx, u32 value);
+
+s32 sxe_hw_uc_addr_add(struct sxe_hw *hw, u32 rar_idx,
+					u8 *addr, u32 pool_idx);
+
+s32 sxe_hw_uc_addr_del(struct sxe_hw *hw, u32 index);
+
+u32 sxe_hw_uta_hash_table_get(struct sxe_hw *hw, u8 reg_idx);
+
+void sxe_hw_uta_hash_table_set(struct sxe_hw *hw,
+				u8 reg_idx, u32 value);
+
+void sxe_hw_mta_hash_table_set(struct sxe_hw *hw,
+						u8 index, u32 value);
+
+void sxe_hw_mc_filter_enable(struct sxe_hw *hw);
+
+void sxe_hw_vlan_filter_array_write(struct sxe_hw *hw,
+					u16 reg_index, u32 value);
+
+u32 sxe_hw_vlan_filter_array_read(struct sxe_hw *hw, u16 reg_index);
+
+void sxe_hw_vlan_filter_switch(struct sxe_hw *hw, bool is_enable);
+
+u32 sxe_hw_vlan_type_get(struct sxe_hw *hw);
+
+void sxe_hw_vlan_type_set(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_vlan_ext_vet_write(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_vlan_tag_strip_switch(struct sxe_hw *hw,
+					u16 reg_index, bool is_enable);
+
+void sxe_hw_txctl_vlan_type_set(struct sxe_hw *hw, u32 value);
+
+u32 sxe_hw_txctl_vlan_type_get(struct sxe_hw *hw);
+
+u32 sxe_hw_ext_vlan_get(struct sxe_hw *hw);
+
+void sxe_hw_ext_vlan_set(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_pf_rst_done_set(struct sxe_hw *hw);
+
+u32 sxe_hw_all_regs_group_num_get(void);
+
+void sxe_hw_all_regs_group_read(struct sxe_hw *hw, u32 *data);
+
+s32 sxe_hw_fc_enable(struct sxe_hw *hw);
+
+bool sxe_hw_is_fc_autoneg_disabled(struct sxe_hw *hw);
+
+void sxe_hw_fc_status_get(struct sxe_hw *hw,
+					bool *rx_pause_on, bool *tx_pause_on);
+
+void sxe_hw_fc_requested_mode_set(struct sxe_hw *hw,
+						enum sxe_fc_mode mode);
+
+void sxe_hw_fc_tc_high_water_mark_set(struct sxe_hw *hw,
+							u8 tc_idx, u32 mark);
+
+void sxe_hw_fc_tc_low_water_mark_set(struct sxe_hw *hw,
+							u8 tc_idx, u32 mark);
+
+void sxe_hw_fc_autoneg_disable_set(struct sxe_hw *hw,
+							bool is_disabled);
+
+u32 sxe_hw_rx_pkt_buf_size_get(struct sxe_hw *hw, u8 pb);
+
+void sxe_hw_ptp_init(struct sxe_hw *hw);
+
+void sxe_hw_ptp_timestamp_mode_set(struct sxe_hw *hw,
+					bool is_l2, u32 tsctl, u32 tses);
+
+void sxe_hw_ptp_timestamp_enable(struct sxe_hw *hw);
+
+void sxe_hw_ptp_time_inc_stop(struct sxe_hw *hw);
+
+void sxe_hw_ptp_rx_timestamp_clear(struct sxe_hw *hw);
+
+void sxe_hw_ptp_timestamp_disable(struct sxe_hw *hw);
+
+bool sxe_hw_ptp_is_rx_timestamp_valid(struct sxe_hw *hw);
+
+u64 sxe_hw_ptp_rx_timestamp_get(struct sxe_hw *hw);
+
+void sxe_hw_ptp_tx_timestamp_get(struct sxe_hw *hw,
+						u32 *ts_sec, u32 *ts_ns);
+
+u64 sxe_hw_ptp_systime_get(struct sxe_hw *hw);
+
+void sxe_hw_rss_cap_switch(struct sxe_hw *hw, bool is_on);
+
+void sxe_hw_rss_key_set_all(struct sxe_hw *hw, u32 *rss_key);
+
+void sxe_hw_rss_field_set(struct sxe_hw *hw, u32 rss_field);
+
+void sxe_hw_rss_redir_tbl_set_all(struct sxe_hw *hw, u8 *redir_tbl);
+
+u32 sxe_hw_rss_redir_tbl_get_by_idx(struct sxe_hw *hw, u16);
+
+void sxe_hw_rss_redir_tbl_set_by_idx(struct sxe_hw *hw,
+						u16 reg_idx, u32 value);
+
+void sxe_hw_rx_dma_ctrl_init(struct sxe_hw *hw, bool crc_strip_on);
+
+void sxe_hw_mac_max_frame_set(struct sxe_hw *hw, u32 max_frame);
+
+void sxe_hw_rx_udp_frag_checksum_disable(struct sxe_hw *hw);
+
+void sxe_hw_rx_ip_checksum_offload_switch(struct sxe_hw *hw,
+							bool is_on);
+
+void sxe_hw_rx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+
+void sxe_hw_rx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+
+void sxe_hw_rx_ring_desc_configure(struct sxe_hw *hw,
+					u32 desc_mem_len, u64 desc_dma_addr,
+					u8 reg_idx);
+
+void sxe_hw_rx_rcv_ctl_configure(struct sxe_hw *hw, u8 reg_idx,
+				   u32 header_buf_len, u32 pkg_buf_len
+				   );
+
+void sxe_hw_rx_drop_switch(struct sxe_hw *hw, u8 idx, bool is_enable);
+
+void sxe_hw_rx_desc_thresh_set(struct sxe_hw *hw, u8 reg_idx);
+
+void sxe_hw_rx_lro_ack_switch(struct sxe_hw *hw, bool is_on);
+
+void sxe_hw_rx_dma_lro_ctrl_set(struct sxe_hw *hw);
+
+void sxe_hw_rx_nfs_filter_disable(struct sxe_hw *hw);
+
+void sxe_hw_rx_lro_enable(struct sxe_hw *hw, bool is_enable);
+
+void sxe_hw_rx_lro_ctl_configure(struct sxe_hw *hw,
+						u8 reg_idx, u32 max_desc);
+void sxe_hw_loopback_switch(struct sxe_hw *hw, bool is_enable);
+
+void sxe_hw_rx_cap_switch_off(struct sxe_hw *hw);
+
+void sxe_hw_tx_ring_info_get(struct sxe_hw *hw,
+				u8 idx, u32 *head, u32 *tail);
+
+void sxe_hw_tx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+
+void sxe_hw_tx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+
+void sxe_hw_rx_queue_desc_reg_configure(struct sxe_hw *hw,
+					u8 reg_idx, u32 rdh_value,
+					u32 rdt_value);
+
+u32 sxe_hw_hdc_fw_status_get(struct sxe_hw *hw);
+
+s32 sxe_hw_hdc_lock_get(struct sxe_hw *hw, u32 trylock);
+
+void sxe_hw_hdc_lock_release(struct sxe_hw *hw, u32 retry_cnt);
+
+bool sxe_hw_hdc_is_fw_over_set(struct sxe_hw *hw);
+
+void sxe_hw_hdc_fw_ov_clear(struct sxe_hw *hw);
+
+u32 sxe_hw_hdc_fw_ack_header_get(struct sxe_hw *hw);
+
+void sxe_hw_hdc_packet_send_done(struct sxe_hw *hw);
+
+void sxe_hw_hdc_packet_header_send(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_hdc_packet_data_dword_send(struct sxe_hw *hw,
+						u16 dword_index, u32 value);
+
+u32 sxe_hw_hdc_packet_data_dword_rcv(struct sxe_hw *hw,
+						u16 dword_index);
+
+u32 sxe_hw_hdc_channel_state_get(struct sxe_hw *hw);
+
+u32 sxe_hw_pending_irq_read_clear(struct sxe_hw *hw);
+
+void sxe_hw_all_ring_disable(struct sxe_hw *hw, u32 ring_max);
+
+void sxe_hw_tx_ring_head_init(struct sxe_hw *hw, u8 reg_idx);
+
+void sxe_hw_tx_ring_tail_init(struct sxe_hw *hw, u8 reg_idx);
+
+void sxe_hw_tx_enable(struct sxe_hw *hw);
+
+void sxe_hw_tx_desc_thresh_set(
+				struct sxe_hw *hw,
+				u8 reg_idx,
+				u32 wb_thresh,
+				u32 host_thresh,
+				u32 prefech_thresh);
+
+void sxe_hw_tx_pkt_buf_switch(struct sxe_hw *hw, bool is_on);
+
+void sxe_hw_tx_pkt_buf_size_configure(struct sxe_hw *hw, u8 num_pb);
+
+void sxe_hw_tx_pkt_buf_thresh_configure(struct sxe_hw *hw,
+					u8 num_pb, bool dcb_enable);
+
+void sxe_hw_tx_ring_desc_configure(struct sxe_hw *hw,
+					u32 desc_mem_len,
+					u64 desc_dma_addr, u8 reg_idx);
+
+void sxe_hw_mac_txrx_enable(struct sxe_hw *hw);
+
+void sxe_hw_rx_cap_switch_on(struct sxe_hw *hw);
+
+void sxe_hw_mac_pad_enable(struct sxe_hw *hw);
+
+bool sxe_hw_is_link_state_up(struct sxe_hw *hw);
+
+u32 sxe_hw_link_speed_get(struct sxe_hw *hw);
+
+void sxe_hw_fc_base_init(struct sxe_hw *hw);
+
+void sxe_hw_stats_get(struct sxe_hw *hw, struct sxe_mac_stats *stats);
+
+void sxe_hw_rxq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value);
+
+void sxe_hw_txq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value);
+
+void sxe_hw_uc_addr_clear(struct sxe_hw *hw);
+
+void sxe_hw_vt_disable(struct sxe_hw *hw);
+
+void sxe_hw_stats_regs_clean(struct sxe_hw *hw);
+
+void sxe_hw_vlan_ext_type_set(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_link_speed_set(struct sxe_hw *hw, u32 speed);
+
+void sxe_hw_crc_configure(struct sxe_hw *hw);
+
+void sxe_hw_vlan_filter_array_clear(struct sxe_hw *hw);
+
+void sxe_hw_no_snoop_disable(struct sxe_hw *hw);
+
+void sxe_hw_dcb_rate_limiter_clear(struct sxe_hw *hw, u8 ring_max);
+
+s32 sxe_hw_pfc_enable(struct sxe_hw *hw, u8 tc_idx);
+
+void sxe_hw_dcb_vmdq_mq_configure(struct sxe_hw *hw, u8 num_pools);
+
+void sxe_hw_dcb_vmdq_default_pool_configure(struct sxe_hw *hw,
+						u8 default_pool_enabled,
+						u8 default_pool_idx);
+
+void sxe_hw_dcb_vmdq_up_2_tc_configure(struct sxe_hw *hw,
+						u8 *tc_arr);
+
+void sxe_hw_dcb_vmdq_vlan_configure(struct sxe_hw *hw,
+						u8 num_pools);
+
+void sxe_hw_dcb_vmdq_pool_configure(struct sxe_hw *hw,
+						u8 pool_idx, u16 vlan_id,
+						u64 pools_map);
+
+void sxe_hw_dcb_rx_configure(struct sxe_hw *hw, bool is_vt_on,
+					u8 sriov_active, u8 pg_tcs);
+
+void sxe_hw_dcb_tx_configure(struct sxe_hw *hw, bool is_vt_on, u8 pg_tcs);
+
+void sxe_hw_pool_xmit_enable(struct sxe_hw *hw, u16 reg_idx, u8 pool_num);
+
+void sxe_hw_rx_pkt_buf_size_set(struct sxe_hw *hw, u8 tc_idx, u16 pbsize);
+
+void sxe_hw_dcb_tc_stats_configure(struct sxe_hw *hw,
+					u8 tc_count, bool vmdq_active);
+
+void sxe_hw_dcb_rx_bw_alloc_configure(struct sxe_hw *hw,
+				      u16 *refill,
+				      u16 *max,
+				      u8 *bwg_id,
+				      u8 *prio_type,
+				      u8 *prio_tc,
+				      u8 max_priority);
+
+void sxe_hw_dcb_tx_desc_bw_alloc_configure(struct sxe_hw *hw,
+					   u16 *refill,
+					   u16 *max,
+					   u8 *bwg_id,
+					   u8 *prio_type);
+
+void sxe_hw_dcb_tx_data_bw_alloc_configure(struct sxe_hw *hw,
+					   u16 *refill,
+					   u16 *max,
+					   u8 *bwg_id,
+					   u8 *prio_type,
+					   u8 *prio_tc,
+					   u8 max_priority);
+
+void sxe_hw_dcb_pfc_configure(struct sxe_hw *hw,
+						u8 pfc_en, u8 *prio_tc,
+						u8 max_priority);
+
+void sxe_hw_vmdq_mq_configure(struct sxe_hw *hw);
+
+void sxe_hw_vmdq_default_pool_configure(struct sxe_hw *hw,
+						u8 default_pool_enabled,
+						u8 default_pool_idx);
+
+void sxe_hw_vmdq_vlan_configure(struct sxe_hw *hw,
+						u8 num_pools, u32 rx_mode);
+
+void sxe_hw_vmdq_pool_configure(struct sxe_hw *hw,
+						u8 pool_idx, u16 vlan_id,
+						u64 pools_map);
+
+void sxe_hw_vmdq_loopback_configure(struct sxe_hw *hw);
+
+void sxe_hw_tx_multi_queue_configure(struct sxe_hw *hw,
+				bool vmdq_enable, bool sriov_enable, u16 pools_num);
+
+void sxe_hw_dcb_max_mem_window_set(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_dcb_tx_ring_rate_factor_set(struct sxe_hw *hw,
+							u32 ring_idx, u32 rate);
+
+void sxe_hw_mbx_init(struct sxe_hw *hw);
+
+void sxe_hw_vt_ctrl_cfg(struct sxe_hw *hw, u8 num_vfs);
+
+void sxe_hw_tx_pool_bitmap_set(struct sxe_hw *hw,
+						u8 reg_idx, u32 bitmap);
+
+void sxe_hw_rx_pool_bitmap_set(struct sxe_hw *hw,
+						u8 reg_idx, u32 bitmap);
+
+void sxe_hw_vt_pool_loopback_switch(struct sxe_hw *hw,
+						bool is_enable);
+
+void sxe_hw_mac_pool_clear(struct sxe_hw *hw, u8 rar_idx);
+
+s32 sxe_hw_uc_addr_pool_enable(struct sxe_hw *hw,
+						u8 rar_idx, u8 pool_idx);
+
+void sxe_hw_pcie_vt_mode_set(struct sxe_hw *hw, u32 value);
+
+u32 sxe_hw_pcie_vt_mode_get(struct sxe_hw *hw);
+
+void sxe_hw_pool_mac_anti_spoof_set(struct sxe_hw *hw,
+							u8 vf_idx, bool status);
+
+void sxe_rx_fc_threshold_set(struct sxe_hw *hw);
+
+void sxe_hw_rx_multi_ring_configure(struct sxe_hw *hw,
+						u8 tcs, bool is_4Q,
+						bool sriov_enable);
+
+void sxe_hw_rx_queue_mode_set(struct sxe_hw *hw, u32 mrqc);
+
+bool sxe_hw_vf_rst_check(struct sxe_hw *hw, u8 vf_idx);
+
+bool sxe_hw_vf_req_check(struct sxe_hw *hw, u8 vf_idx);
+
+bool sxe_hw_vf_ack_check(struct sxe_hw *hw, u8 vf_idx);
+
+s32 sxe_hw_rcv_msg_from_vf(struct sxe_hw *hw, u32 *msg,
+				u16 msg_len, u16 index);
+
+s32 sxe_hw_send_msg_to_vf(struct sxe_hw *hw, u32 *msg,
+				u16 msg_len, u16 index);
+
+void sxe_hw_mbx_mem_clear(struct sxe_hw *hw, u8 vf_idx);
+
+u32 sxe_hw_pool_rx_mode_get(struct sxe_hw *hw, u16 pool_idx);
+
+void sxe_hw_pool_rx_mode_set(struct sxe_hw *hw,
+						u32 vmolr, u16 pool_idx);
+
+void sxe_hw_tx_vlan_tag_clear(struct sxe_hw *hw, u32 vf);
+
+u32 sxe_hw_rx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx);
+
+u32 sxe_hw_tx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx);
+
+void sxe_hw_pool_rx_ring_drop_enable(struct sxe_hw *hw, u8 vf_idx,
+					u16 pf_vlan, u8 ring_per_pool);
+
+void sxe_hw_spoof_count_enable(struct sxe_hw *hw,
+						u8 reg_idx, u8 bit_index);
+
+u32 sxe_hw_tx_vlan_insert_get(struct sxe_hw *hw, u32 vf);
+
+bool sxe_hw_vt_status(struct sxe_hw *hw);
+
+s32 sxe_hw_vlvf_slot_find(struct sxe_hw *hw, u32 vlan, bool vlvf_bypass);
+
+u32 sxe_hw_vlan_pool_filter_read(struct sxe_hw *hw, u16 reg_index);
+
+void sxe_hw_mirror_vlan_set(struct sxe_hw *hw, u8 idx,u32 lsb, u32 msb);
+
+void sxe_hw_mirror_virtual_pool_set(struct sxe_hw *hw, u8 idx,u32 lsb, u32 msb);
+
+void sxe_hw_mirror_ctl_set(struct sxe_hw *hw, u8 rule_id,
+				    u8 mirror_type, u8 dst_pool, bool on);
+
+void sxe_hw_mirror_rule_clear(struct sxe_hw *hw, u8 rule_id);
+
+u32 sxe_hw_mac_max_frame_get(struct sxe_hw *hw);
+
+void sxe_hw_mta_hash_table_update(struct sxe_hw *hw,
+						u8 reg_idx, u8 bit_idx);
+
+void sxe_hw_vf_queue_drop_enable(struct sxe_hw *hw, u8 vf_idx,
+					u8 ring_per_pool);
+void sxe_hw_fc_mac_addr_set(struct sxe_hw *hw, u8 *mac_addr);
+
+void sxe_hw_macsec_enable(struct sxe_hw *hw, bool is_up, u32 tx_mode,
+				u32 rx_mode, u32 pn_trh);
+
+void sxe_hw_macsec_disable(struct sxe_hw *hw, bool is_up);
+
+void sxe_hw_macsec_txsc_set(struct sxe_hw *hw, u32 scl, u32 sch);
+
+void sxe_hw_macsec_rxsc_set(struct sxe_hw *hw, u32 scl, u32 sch, u16 pi);
+
+void sxe_hw_macsec_tx_sa_configure(struct sxe_hw *hw, u8 sa_idx,
+				u8 an, u32 pn, u32 *keys);
+
+void sxe_hw_macsec_rx_sa_configure(struct sxe_hw *hw, u8 sa_idx,
+				u8 an, u32 pn, u32 *keys);
+void sxe_hw_vt_pool_loopback_switch(struct sxe_hw *hw,
+							bool is_enable);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+void sxe_hw_fnav_rx_pkt_buf_size_reset(struct sxe_hw *hw, u32 pbsize);
+
+void sxe_hw_fnav_flex_mask_set(struct sxe_hw *hw, u16 flex_mask);
+
+void sxe_hw_fnav_ipv6_mask_set(struct sxe_hw *hw, u16 src_mask, u16 dst_mask);
+
+s32 sxe_hw_fnav_flex_offset_set(struct sxe_hw *hw, u16 offset);
+
+void sxe_hw_fivetuple_filter_add(struct rte_eth_dev *dev,
+				struct sxe_fivetuple_node_info *filter);
+
+void sxe_hw_fivetuple_filter_del(struct sxe_hw *hw, u16 reg_index);
+
+void sxe_hw_ethertype_filter_add(struct sxe_hw *hw,
+					u8 reg_index, u16 ethertype, u16 queue);
+
+void sxe_hw_ethertype_filter_del(struct sxe_hw *hw, u8 filter_type);
+
+void sxe_hw_syn_filter_add(struct sxe_hw *hw, u16 queue, u8 priority);
+
+void sxe_hw_syn_filter_del(struct sxe_hw *hw);
+
+void sxe_hw_rss_key_set_all(struct sxe_hw *hw, u32 *rss_key);
+#endif 
+
+void sxe_hw_fnav_enable(struct sxe_hw *hw, u32 fnavctrl);
+
+s32 sxe_hw_fnav_sample_rules_table_reinit(struct sxe_hw *hw);
+
+s32 sxe_hw_fnav_specific_rule_add(struct sxe_hw *hw,
+					  union sxe_fnav_rule_info *input,
+					  u16 soft_id, u8 queue);
+
+s32 sxe_hw_fnav_specific_rule_del(struct sxe_hw *hw,
+					  union sxe_fnav_rule_info *input,
+					  u16 soft_id);
+
+void sxe_hw_fnav_sample_rule_configure(struct sxe_hw *hw,
+					  u8 flow_type, u32 hash_value, u8 queue);
+
+void sxe_hw_rss_redir_tbl_reg_write(struct sxe_hw *hw,
+						u16 reg_idx, u32 value);
+
+u32 sxe_hw_fnav_port_mask_get(__be16 src_port_mask, __be16 dst_port_mask);
+
+s32 sxe_hw_fnav_specific_rule_mask_set(struct sxe_hw *hw,
+				    union sxe_fnav_rule_info *input_mask);
+
+s32 sxe_hw_vlan_filter_configure(struct sxe_hw *hw,
+					u32 vid, u32 pool,
+					bool vlan_on, bool vlvf_bypass);
+
+void sxe_hw_ptp_systime_init(struct sxe_hw *hw);
+
+#endif 
+#endif
diff --git a/drivers/net/sxe/base/sxe_logs.h b/drivers/net/sxe/base/sxe_logs.h
new file mode 100644
index 0000000000..510d7aae5c
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_logs.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef _SXE_LOGS_H_
+#define _SXE_LOGS_H_
+
+#include <stdio.h>
+#include <sys/time.h>
+#include <pthread.h>
+
+#include "sxe_types.h"
+
+#define LOG_FILE_NAME_LEN     256
+#define LOG_FILE_PATH         "/var/log/"
+#define LOG_FILE_PREFIX       "sxepmd.log"
+
+extern s32 sxe_log_init;
+extern s32 sxe_log_rx;
+extern s32 sxe_log_tx;
+extern s32 sxe_log_drv;
+extern s32 sxe_log_hw;
+
+#define INIT sxe_log_init
+#define RX   sxe_log_rx
+#define TX   sxe_log_tx
+#define HW   sxe_log_hw
+#define DRV  sxe_log_drv
+
+#define UNUSED(x)	(void)(x)
+
+#define  TIME(log_time) \
+	do { \
+		struct timeval	tv; \
+		struct tm *td; \
+		gettimeofday(&tv, NULL); \
+		td = localtime(&tv.tv_sec); \
+		strftime(log_time, sizeof(log_time), "%Y-%m-%d-%H:%M:%S", td); \
+	} while(0)
+
+#define filename_printf(x) strrchr((x),'/')?strrchr((x),'/')+1:(x)
+
+#ifdef SXE_DPDK_DEBUG
+#define PMD_LOG_DEBUG(logtype, fmt, ...) \
+	do { \
+		s8 log_time[40]; \
+		TIME(log_time); \
+		rte_log(RTE_LOG_DEBUG, logtype, \
+			"[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+			"DEBUG", log_time, pthread_self(), \
+			filename_printf(__FILE__), __LINE__, \
+			__func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_INFO(logtype, fmt, ...) \
+	do { \
+		s8 log_time[40]; \
+		TIME(log_time); \
+		rte_log(RTE_LOG_INFO, logtype, \
+			"[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+			"INFO", log_time, pthread_self(), \
+			filename_printf(__FILE__), __LINE__, \
+			__func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_NOTICE(logtype, fmt, ...) \
+	do { \
+		s8 log_time[40]; \
+		TIME(log_time); \
+		rte_log(RTE_LOG_NOTICE, logtype, \
+			"[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+			"NOTICE", log_time, pthread_self(), \
+			filename_printf(__FILE__), __LINE__, \
+			__func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_WARN(logtype, fmt, ...) \
+	do { \
+		s8 log_time[40]; \
+		TIME(log_time); \
+		rte_log(RTE_LOG_WARNING, logtype, \
+			"[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+			"WARN", log_time, pthread_self(), \
+			filename_printf(__FILE__), __LINE__, \
+			__func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_ERR(logtype, fmt, ...) \
+	do { \
+		s8 log_time[40]; \
+		TIME(log_time); \
+		rte_log(RTE_LOG_ERR, logtype, \
+			"[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+			"ERR", log_time, pthread_self(), \
+			filename_printf(__FILE__), __LINE__, \
+			__func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_CRIT(logtype, fmt, ...) \
+	do { \
+		s8 log_time[40]; \
+		TIME(log_time); \
+		rte_log(RTE_LOG_CRIT, logtype, \
+			"[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+			"CRIT", log_time, pthread_self(), \
+			filename_printf(__FILE__), __LINE__, \
+			__func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_ALERT(logtype, fmt, ...) \
+	do { \
+		s8 log_time[40]; \
+		TIME(log_time); \
+		rte_log(RTE_LOG_ALERT, logtype, \
+			"[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+			"ALERT", log_time, pthread_self(), \
+			filename_printf(__FILE__), __LINE__, \
+			__func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_EMERG(logtype, fmt, ...) \
+	do { \
+		s8 log_time[40]; \
+		TIME(log_time); \
+		rte_log(RTE_LOG_EMERG, logtype, \
+			"[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+			"EMERG", log_time, pthread_self(), \
+			filename_printf(__FILE__), __LINE__, \
+			__func__, ##__VA_ARGS__); \
+	} while(0)
+
+#else
+#define PMD_LOG_DEBUG(logtype, fmt, ...) \
+	do { \
+		rte_log(RTE_LOG_DEBUG, logtype, "%s(): " \
+			fmt "\n", __func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_INFO(logtype, fmt, ...) \
+	do { \
+		rte_log(RTE_LOG_INFO, logtype, "%s(): " \
+			fmt "\n", __func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_NOTICE(logtype, fmt, ...) \
+	do { \
+		rte_log(RTE_LOG_NOTICE, logtype, "%s(): " \
+			fmt "\n", __func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_WARN(logtype, fmt, ...) \
+	do { \
+		rte_log(RTE_LOG_WARNING, logtype, "%s(): " \
+			fmt "\n", __func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_ERR(logtype, fmt, ...) \
+	do { \
+		rte_log(RTE_LOG_ERR, logtype, "%s(): " \
+			fmt "\n", __func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_CRIT(logtype, fmt, ...) \
+	do { \
+		rte_log(RTE_LOG_CRIT, logtype, "%s(): " \
+			fmt "\n", __func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_ALERT(logtype, fmt, ...) \
+	do { \
+		rte_log(RTE_LOG_ALERT, logtype, "%s(): " \
+			fmt "\n", __func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_EMERG(logtype, fmt, ...) \
+	do { \
+		rte_log(RTE_LOG_EMERG, logtype, "%s(): " \
+			fmt "\n", __func__, ##__VA_ARGS__); \
+	} while(0)
+
+#endif
+
+#define PMD_INIT_FUNC_TRACE() PMD_LOG_DEBUG(INIT, " >>")
+
+#ifdef SXE_DPDK_DEBUG
+#define LOG_DEBUG(fmt, ...) \
+	do { \
+		PMD_LOG_DEBUG(DRV, fmt, ##__VA_ARGS__); \
+	   } while(0)
+
+#define LOG_INFO(fmt, ...) \
+	do { \
+		PMD_LOG_INFO(DRV, fmt, ##__VA_ARGS__); \
+	   } while(0)
+
+#define LOG_WARN(fmt, ...) \
+	do { \
+		PMD_LOG_WARN(DRV, fmt, ##__VA_ARGS__); \
+	   } while(0)
+
+#define LOG_ERROR(fmt, ...) \
+	do { \
+		PMD_LOG_ERR(DRV, fmt, ##__VA_ARGS__); \
+	   } while(0)
+
+#define LOG_DEBUG_BDF(fmt, ...) \
+	do { \
+		PMD_LOG_DEBUG(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__); \
+	   } while(0)
+
+#define LOG_INFO_BDF(fmt, ...) \
+	do { \
+		PMD_LOG_INFO(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__); \
+	   } while(0)
+
+#define LOG_WARN_BDF(fmt, ...) \
+	do { \
+		PMD_LOG_WARN(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__); \
+	   } while(0)
+
+#define LOG_ERROR_BDF(fmt, ...) \
+	do { \
+		PMD_LOG_ERR(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__); \
+	   } while(0)
+
+#else
+#define LOG_DEBUG(fmt, ...)
+#define LOG_INFO(fmt, ...)
+#define LOG_WARN(fmt, ...)
+#define LOG_ERROR(fmt, ...)
+#define LOG_DEBUG_BDF(fmt, ...) do { UNUSED(adapter); } while(0)
+#define LOG_INFO_BDF(fmt, ...)  do { UNUSED(adapter); } while(0)
+#define LOG_WARN_BDF(fmt, ...)  do { UNUSED(adapter); } while(0)
+#define LOG_ERROR_BDF(fmt, ...) do { UNUSED(adapter); } while(0)
+#endif
+
+#ifdef SXE_DPDK_DEBUG
+#define LOG_DEV_DEBUG(fmt, ...) \
+	do { \
+		UNUSED(adapter); \
+		LOG_DEBUG_BDF(fmt, ##__VA_ARGS__); \
+	} while(0)
+
+#define LOG_DEV_INFO(fmt, ...) \
+	do { \
+		UNUSED(adapter); \
+		LOG_INFO_BDF(fmt, ##__VA_ARGS__); \
+	} while(0)
+
+#define LOG_DEV_WARN(fmt, ...) \
+	do { \
+		UNUSED(adapter); \
+		LOG_WARN_BDF(fmt, ##__VA_ARGS__); \
+	} while(0)
+
+#define LOG_DEV_ERR(fmt, ...) \
+	do { \
+		UNUSED(adapter); \
+		LOG_ERROR_BDF(fmt, ##__VA_ARGS__); \
+	} while(0)
+
+#define LOG_MSG_DEBUG(msglvl, fmt, ...) \
+	do { \
+		UNUSED(adapter); \
+		LOG_DEBUG_BDF(fmt, ##__VA_ARGS__); \
+	} while(0)
+
+#define LOG_MSG_INFO(msglvl, fmt, ...) \
+	do { \
+		UNUSED(adapter); \
+		LOG_INFO_BDF(fmt, ##__VA_ARGS__); \
+	} while(0)
+
+#define LOG_MSG_WARN(msglvl, fmt, ...) \
+	do { \
+		UNUSED(adapter); \
+		LOG_WARN_BDF(fmt, ##__VA_ARGS__); \
+	} while(0)
+
+#define LOG_MSG_ERR(msglvl, fmt, ...) \
+	do { \
+		UNUSED(adapter); \
+		LOG_ERROR_BDF(fmt, ##__VA_ARGS__); \
+	} while(0)
+
+#else
+#define LOG_DEV_DEBUG(fmt, ...) do { UNUSED(adapter); } while(0)
+#define LOG_DEV_INFO(fmt, ...)  do { UNUSED(adapter); } while(0)
+#define LOG_DEV_WARN(fmt, ...)  do { UNUSED(adapter); } while(0)
+#define LOG_DEV_ERR(fmt, ...)   do { UNUSED(adapter); } while(0)
+#define LOG_MSG_DEBUG(msglvl, fmt, ...) do { UNUSED(adapter); } while(0)
+#define LOG_MSG_INFO(msglvl, fmt, ...)  do { UNUSED(adapter); } while(0)
+#define LOG_MSG_WARN(msglvl, fmt, ...)  do { UNUSED(adapter); } while(0)
+#define LOG_MSG_ERR(msglvl, fmt, ...)   do { UNUSED(adapter); } while(0)
+#endif
+
+void sxe_log_stream_init(void);
+
+#endif 
diff --git a/drivers/net/sxe/base/sxe_offload_common.c b/drivers/net/sxe/base/sxe_offload_common.c
new file mode 100644
index 0000000000..a7075b4669
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_offload_common.c
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+
+#include "sxe_types.h"
+#include "sxe_offload_common.h"
+#include "sxe_compat_version.h"
+
+u64 __sxe_rx_queue_offload_capa_get(struct rte_eth_dev *dev)
+{
+	RTE_SET_USED(dev);
+
+	u64 offloads = 0;
+
+	offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+
+	return offloads;
+}
+
+u64 __sxe_rx_port_offload_capa_get(struct rte_eth_dev *dev)
+{
+	u64 rx_offload_capa;
+
+	rx_offload_capa = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+#ifdef DEV_RX_JUMBO_FRAME
+		   DEV_RX_OFFLOAD_JUMBO_FRAME |
+#endif
+		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 
+		   RTE_ETH_RX_OFFLOAD_SCATTER |
+		   RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+	if (!RTE_ETH_DEV_SRIOV(dev).active) {
+		rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
+	}
+
+	return rx_offload_capa;
+}
+
+u64 __sxe_tx_port_offload_capa_get(struct rte_eth_dev *dev)
+{
+	u64 tx_offload_capa;
+	RTE_SET_USED(dev);
+
+	tx_offload_capa =
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+		RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
+
+	return tx_offload_capa;
+}
+
diff --git a/drivers/net/sxe/base/sxe_offload_common.h b/drivers/net/sxe/base/sxe_offload_common.h
new file mode 100644
index 0000000000..20083de2e3
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_offload_common.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_OFFLOAD_COMMON_H__
+#define __SXE_OFFLOAD_COMMON_H__
+
+u64 __sxe_rx_queue_offload_capa_get(struct rte_eth_dev *dev);
+
+u64 __sxe_rx_port_offload_capa_get(struct rte_eth_dev *dev);
+
+u64 __sxe_tx_port_offload_capa_get(struct rte_eth_dev *dev);
+
+#endif
+
diff --git a/drivers/net/sxe/base/sxe_queue_common.c b/drivers/net/sxe/base/sxe_queue_common.c
new file mode 100644
index 0000000000..eda73c3f79
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_queue_common.c
@@ -0,0 +1,450 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include "sxe_dpdk_version.h"
+#include "sxe_compat_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_bus_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <rte_bus_pci.h>
+#else
+#include <ethdev_driver.h>
+#include <bus_pci_driver.h>
+#endif
+
+#include "sxe_rx.h"
+#include "sxe_tx.h"
+#include "sxe_logs.h"
+#include "sxe_regs.h"
+#include "sxevf_regs.h"
+#include "sxe.h"
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#include "sxe_vec_common.h"
+#include <rte_vect.h>
+#endif
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+#include "sxevf.h"
+#endif
+#include "sxe_queue_common.h"
+#include "sxe_queue.h"
+
+static void sxe_tx_queues_clear(struct rte_eth_dev *dev)
+{
+	u16 i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		struct sxe_tx_queue *txq = dev->data->tx_queues[i];
+
+		if (txq != NULL && txq->ops != NULL) {
+			txq->ops->mbufs_release(txq);
+			txq->ops->init(txq);
+		}
+	}
+
+	return;
+}
+
+static void sxe_rx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed)
+{
+	u16 i;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		struct sxe_rx_queue *rxq = dev->data->rx_queues[i];
+
+		if (rxq != NULL) {
+			sxe_rx_queue_mbufs_free(rxq);
+			sxe_rx_queue_init(rx_batch_alloc_allowed, rxq);
+		}
+	}
+
+	return;
+}
+
+s32 __rte_cold __sxe_rx_queue_setup(struct rx_setup *rx_setup, bool is_vf)
+{
+	struct rte_eth_dev *dev = rx_setup->dev;
+	const struct rte_eth_rxconf *rx_conf = rx_setup->rx_conf;
+	u16 queue_idx = rx_setup->queue_idx;
+	u32 socket_id = rx_setup->socket_id;
+	u16 desc_num = rx_setup->desc_num;
+	struct rte_mempool *mp = rx_setup->mp;
+	const struct rte_memzone *rx_mz;
+	struct sxe_rx_queue *rxq;
+	u16 len;
+	u64 offloads;
+	s32 ret = 0;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	struct sxe_adapter *pf_adapter = dev->data->dev_private;
+	struct sxevf_adapter *vf_adapter = dev->data->dev_private;
+#endif
+
+	PMD_INIT_FUNC_TRACE();
+
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+	if (desc_num % SXE_RX_DESC_RING_ALIGN != 0 ||
+			(desc_num > SXE_MAX_RING_DESC) ||
+			(desc_num < SXE_MIN_RING_DESC)) {
+		PMD_LOG_ERR(INIT, "desc_num %u error",desc_num);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	if (dev->data->rx_queues[queue_idx] != NULL) {
+		sxe_rx_queue_free(dev->data->rx_queues[queue_idx]);
+		dev->data->rx_queues[queue_idx] = NULL;
+	}
+
+	rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct sxe_rx_queue),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (rxq == NULL) {
+		PMD_LOG_ERR(INIT, "rxq malloc mem failed");
+		ret = -ENOMEM;
+		goto l_end;
+	}
+
+	rxq->mb_pool = mp;
+	rxq->ring_depth = desc_num;
+	rxq->batch_alloc_size = rx_conf->rx_free_thresh;
+	rxq->queue_id = queue_idx;
+	rxq->reg_idx = (u16)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+	rxq->port_id = dev->data->port_id;
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
+		rxq->crc_len = RTE_ETHER_CRC_LEN;
+	} else {
+		rxq->crc_len = 0;
+	}
+
+	rxq->drop_en = rx_conf->rx_drop_en;
+	rxq->deferred_start = rx_conf->rx_deferred_start;
+	rxq->offloads = offloads;
+
+	rxq->pkt_type_mask = SXE_PACKET_TYPE_MASK;
+
+	rx_mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+					SXE_RX_RING_SIZE, SXE_ALIGN, socket_id);
+	if (rx_mz == NULL) {
+		PMD_LOG_ERR(INIT, "rxq malloc desc mem failed");
+		sxe_rx_queue_free(rxq);
+		ret = -ENOMEM;
+		goto l_end;
+	}
+
+	rxq->mz = rx_mz;
+
+	memset(rx_mz->addr, 0, SXE_RX_RING_SIZE);
+
+	if (is_vf) {
+		rxq->rdt_reg_addr = (volatile u32 *)(rx_setup->reg_base_addr +
+			SXE_VFRDT(rxq->reg_idx));
+	} else {
+		rxq->rdt_reg_addr = (volatile u32 *)(rx_setup->reg_base_addr +
+			SXE_RDT(rxq->reg_idx));
+	}
+
+	rxq->base_addr = rx_mz->iova;
+
+	rxq->desc_ring = (union sxe_rx_data_desc *)rx_mz->addr;
+
+	if (!sxe_check_is_rx_batch_alloc_support(rxq)) {
+		PMD_LOG_DEBUG(INIT, "queue[%d] doesn't support rx batch alloc "
+				"- canceling the feature for the whole port[%d]",
+				rxq->queue_id, rxq->port_id);
+		*rx_setup->rx_batch_alloc_allowed = false;
+	}
+
+	len = desc_num;
+	if (*rx_setup->rx_batch_alloc_allowed) {
+		len += RTE_PMD_SXE_MAX_RX_BURST;
+	}
+
+	rxq->buffer_ring = rte_zmalloc_socket("rxq->sw_ring",
+					  sizeof(struct sxe_rx_buffer) * len,
+					  RTE_CACHE_LINE_SIZE, socket_id);
+	if (!rxq->buffer_ring) {
+		PMD_LOG_ERR(INIT, "rxq malloc buffer mem failed");
+		sxe_rx_queue_free(rxq);
+		ret = -ENOMEM;
+		goto l_end;
+	}
+
+	rxq->sc_buffer_ring =
+		rte_zmalloc_socket("rxq->sw_sc_ring",
+				   sizeof(struct sxe_rx_buffer) * len,
+				   RTE_CACHE_LINE_SIZE, socket_id);
+	if (!rxq->sc_buffer_ring) {
+		PMD_LOG_ERR(INIT, "rxq malloc sc buffer mem failed");
+		sxe_rx_queue_free(rxq);
+		ret = -ENOMEM;
+		goto l_end;
+	}
+
+	PMD_LOG_DEBUG(INIT, "buffer_ring=%p sc_buffer_ring=%p desc_ring=%p "
+			    "dma_addr=0x%"SXE_PRIX64,
+		     rxq->buffer_ring, rxq->sc_buffer_ring, rxq->desc_ring,
+		     rxq->base_addr);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	if (!rte_is_power_of_2(desc_num)) {
+		PMD_LOG_DEBUG(INIT, "queue[%d] doesn't meet Vector Rx "
+				    "preconditions - canceling the feature for "
+				    "the whole port[%d]",
+			     rxq->queue_id, rxq->port_id);
+		if (is_vf) {
+			vf_adapter->rx_vec_allowed = false;
+		} else {
+			pf_adapter->rx_vec_allowed = false;
+		}
+	} else {
+		sxe_rxq_vec_setup(rxq);
+	}
+#endif
+
+	dev->data->rx_queues[queue_idx] = rxq;
+
+	sxe_rx_queue_init(*rx_setup->rx_batch_alloc_allowed, rxq);
+
+l_end:
+	return ret;
+}
+
+int __rte_cold __sxe_tx_queue_setup(struct tx_setup *tx_setup, bool is_vf)
+{
+	s32 ret;
+	struct rte_eth_dev *dev = tx_setup->dev;
+	const struct rte_eth_txconf *tx_conf = tx_setup->tx_conf;
+	u16 tx_queue_id = tx_setup->queue_idx;
+	u32 socket_id = tx_setup->socket_id;
+	u16 ring_depth = tx_setup->desc_num;
+	struct sxe_tx_queue *txq;
+	u16 rs_thresh, free_thresh;
+
+	PMD_INIT_FUNC_TRACE();
+
+	ret = sxe_txq_arg_validate(dev, ring_depth, &rs_thresh,
+					&free_thresh, tx_conf);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "tx queue[%d] arg validate failed", tx_queue_id);
+		goto l_end;
+	} else {
+		PMD_LOG_INFO(INIT, "tx queue[%d] ring_depth=%d, "
+				"rs_thresh=%d, free_thresh=%d", tx_queue_id,
+				ring_depth, rs_thresh, free_thresh);
+	}
+
+	txq = sxe_tx_queue_alloc(dev, tx_queue_id, ring_depth, socket_id);
+	if (!txq) {
+		PMD_LOG_ERR(INIT, "tx queue[%d] resource alloc failed", tx_queue_id);
+		ret = -ENOMEM;
+		goto l_end;
+	}
+
+	txq->ops               = sxe_tx_default_ops_get();
+	txq->ring_depth        = ring_depth;
+	txq->queue_idx         = tx_queue_id;
+	txq->port_id           = dev->data->port_id;
+	txq->pthresh           = tx_conf->tx_thresh.pthresh;
+	txq->hthresh           = tx_conf->tx_thresh.hthresh;
+	txq->wthresh           = tx_conf->tx_thresh.wthresh;
+	txq->rs_thresh         = rs_thresh;
+	txq->free_thresh       = free_thresh;
+	txq->tx_deferred_start = tx_conf->tx_deferred_start;
+	txq->reg_idx           = (u16)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+		tx_queue_id : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + tx_queue_id);
+	txq->offloads          = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+	if (is_vf) {
+		txq->tdt_reg_addr = (volatile u32 *)(tx_setup->reg_base_addr + SXE_VFTDT(txq->reg_idx));
+	} else {
+		txq->tdt_reg_addr = (u32 *)(tx_setup->reg_base_addr + SXE_TDT(txq->reg_idx));
+	}
+
+	PMD_LOG_INFO(INIT, "buffer_ring=%p desc_ring=%p dma_addr=0x%"PRIx64,
+		     txq->buffer_ring, txq->desc_ring,
+		     (long unsigned int)txq->base_addr);
+	sxe_tx_function_set(dev, txq);
+
+	txq->ops->init(txq);
+
+	dev->data->tx_queues[tx_queue_id] = txq;
+
+l_end:
+	return ret;
+}
+
+void __sxe_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+					struct rte_eth_rxq_info *qinfo)
+{
+	struct sxe_rx_queue *rxq;
+
+	rxq = dev->data->rx_queues[queue_id];
+
+	qinfo->mp = rxq->mb_pool;
+	qinfo->scattered_rx = dev->data->scattered_rx;
+	qinfo->nb_desc = rxq->ring_depth;
+
+	qinfo->conf.rx_free_thresh = rxq->batch_alloc_size;
+	qinfo->conf.rx_drop_en = rxq->drop_en;
+	qinfo->conf.rx_deferred_start = rxq->deferred_start;
+	qinfo->conf.offloads = rxq->offloads;
+
+	return;
+}
+
+void __sxe_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+		struct rte_eth_txq_info *q_info)
+{
+	struct sxe_tx_queue *txq;
+
+	txq = dev->data->tx_queues[queue_id];
+
+	q_info->nb_desc = txq->ring_depth;
+	q_info->conf.tx_thresh.pthresh = txq->pthresh;
+	q_info->conf.tx_thresh.hthresh = txq->hthresh;
+	q_info->conf.tx_thresh.wthresh = txq->wthresh;
+	q_info->conf.tx_free_thresh = txq->free_thresh;
+	q_info->conf.tx_rs_thresh = txq->rs_thresh;
+	q_info->conf.offloads = txq->offloads;
+	q_info->conf.tx_deferred_start = txq->tx_deferred_start;
+
+	return;
+}
+
+s32 __sxe_tx_done_cleanup(void *tx_queue, u32 free_cnt)
+{
+	int ret;
+	struct sxe_tx_queue *txq = (struct sxe_tx_queue *)tx_queue;
+	if (txq->offloads == 0 && \
+		txq->rs_thresh >= RTE_PMD_SXE_MAX_TX_BURST) {
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+		if (txq->rs_thresh <= RTE_SXE_MAX_TX_FREE_BUF_SZ &&
+#ifndef DPDK_19_11_6
+		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
+#endif
+		    (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+		    txq->buffer_ring_vec != NULL)) {
+		    ret = sxe_tx_done_cleanup_vec(txq, free_cnt);
+		} else{
+			ret = sxe_tx_done_cleanup_simple(txq, free_cnt);
+		}
+#else
+		ret = sxe_tx_done_cleanup_simple(txq, free_cnt);
+#endif
+
+	} else {
+		ret = sxe_tx_done_cleanup_full(txq, free_cnt);
+	}
+
+	return ret;
+}
+
+s32 __rte_cold __sxe_rx_queue_mbufs_alloc(struct sxe_rx_queue *rxq)
+{
+	struct sxe_rx_buffer *buf_ring = rxq->buffer_ring;
+	s32 ret = 0;
+	u64 dma_addr;
+	u16 i;
+
+	for (i = 0; i < rxq->ring_depth; i++) {
+		volatile union sxe_rx_data_desc *desc;
+		struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+
+		if (mbuf == NULL) {
+			PMD_LOG_ERR(DRV, "rx mbuf alloc failed queue_id=%u",
+					(unsigned) rxq->queue_id);
+			ret = -ENOMEM;
+			goto l_end;
+		}
+
+		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+		mbuf->port = rxq->port_id;
+
+		dma_addr =
+			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+		desc = &rxq->desc_ring[i];
+		desc->read.hdr_addr = 0;
+		desc->read.pkt_addr = dma_addr;
+		buf_ring[i].mbuf = mbuf;
+	}
+
+l_end:
+	return ret;
+}
+
+void __rte_cold __sxe_rx_queue_free(struct sxe_rx_queue *rxq)
+{
+	if (rxq != NULL) {
+		sxe_rx_queue_mbufs_free(rxq);
+		rte_free(rxq->buffer_ring);
+		rte_free(rxq->sc_buffer_ring);
+		rte_memzone_free(rxq->mz);
+		rte_free(rxq);
+	}
+	return;
+}
+
+void __rte_cold __sxe_tx_queue_free(struct sxe_tx_queue *txq)
+{
+	if (txq != NULL && txq->ops != NULL) {
+		txq->ops->mbufs_release(txq);
+		txq->ops->buffer_ring_free(txq);
+		rte_memzone_free(txq->mz);
+		rte_free(txq);
+	}
+
+	return;
+}
+
+void __rte_cold __sxe_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_tx_queues_clear(dev);
+
+	sxe_rx_queues_clear(dev, rx_batch_alloc_allowed);
+
+	return;
+}
+
+void __sxe_queues_free(struct rte_eth_dev *dev)
+{
+	unsigned i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		__sxe_rx_queue_free(dev->data->rx_queues[i]);
+		dev->data->rx_queues[i] = NULL;
+	}
+	dev->data->nb_rx_queues = 0;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		__sxe_tx_queue_free(dev->data->tx_queues[i]);
+		dev->data->tx_queues[i] = NULL;
+	}
+	dev->data->nb_tx_queues = 0;
+
+	return;
+}
+
+void __sxe_secondary_proc_init(struct rte_eth_dev *eth_dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
+{
+	struct sxe_tx_queue *txq;
+	if (eth_dev->data->tx_queues) {
+		txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
+		sxe_tx_function_set(eth_dev, txq);
+	} else {
+		PMD_LOG_NOTICE(INIT, "No TX queues configured yet. "
+			     "Using default TX function.");
+	}
+
+	sxe_rx_function_set(eth_dev, rx_batch_alloc_allowed, rx_vec_allowed);
+	return;
+}
+
diff --git a/drivers/net/sxe/base/sxe_queue_common.h b/drivers/net/sxe/base/sxe_queue_common.h
new file mode 100644
index 0000000000..a38113b643
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_queue_common.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_QUEUE_COMMON_H__
+#define __SXE_QUEUE_COMMON_H__
+
+#include "sxe_types.h"
+#include "sxe_compat_platform.h"
+#include "sxe_compat_version.h"
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+#define RTE_PMD_SXE_MAX_RX_BURST 32
+
+enum sxe_ctxt_num {
+	SXE_CTXT_DESC_0    = 0, 
+	SXE_CTXT_DESC_1    = 1, 
+	SXE_CTXT_DESC_NUM  = 2, 
+};
+
+struct rx_setup {
+	struct rte_eth_dev *dev;
+	u16 queue_idx;
+	u16 desc_num;
+	u32 socket_id;
+	const struct rte_eth_rxconf *rx_conf;
+	struct rte_mempool *mp;
+	u8 __iomem *reg_base_addr;
+	bool *rx_batch_alloc_allowed;
+};
+
+struct tx_setup {
+	struct rte_eth_dev *dev;
+	u16 queue_idx;
+	u16 desc_num;
+	u32 socket_id;
+	const struct rte_eth_txconf *tx_conf;
+	u8 __iomem *reg_base_addr;
+};
+
+union sxe_tx_data_desc {
+	struct {
+		__le64 buffer_addr; 
+		__le32 cmd_type_len;
+		__le32 olinfo_status;
+	} read;
+	struct {
+		__le64 rsvd;
+		__le32 nxtseq_seed;
+		__le32 status;
+	} wb;
+};
+
+struct sxe_rx_buffer {
+	struct rte_mbuf *mbuf;
+};
+
+struct sxe_rx_queue_stats {
+	u64 csum_err;
+};
+
+union sxe_rx_data_desc {
+	struct {
+		__le64 pkt_addr; 
+		__le64 hdr_addr; 
+	} read;
+	struct {
+		struct {
+			union {
+				__le32 data;
+				struct {
+					__le16 pkt_info; 
+					__le16 hdr_info; 
+				} hs_rss;
+			} lo_dword;
+			union {
+				__le32 rss; 
+				struct {
+					__le16 ip_id; 
+					__le16 csum; 
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			__le32 status_error; 
+			__le16 length; 
+			__le16 vlan; 
+		} upper;
+	} wb;
+ };
+
+struct sxe_tx_buffer {
+	struct rte_mbuf *mbuf; 
+	u16 next_id;             
+	u16 last_id;             
+};
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+struct sxe_tx_buffer_vec {
+	struct rte_mbuf *mbuf; 
+};
+#endif
+
+union sxe_tx_offload {
+	u64 data[2];
+	struct {
+		u64 l2_len:7;     
+		u64 l3_len:9;     
+		u64 l4_len:8;     
+		u64 tso_segsz:16; 
+		u64 vlan_tci:16;  
+
+		u64 outer_l3_len:8; 
+		u64 outer_l2_len:8; 
+	};
+};
+
+struct sxe_ctxt_info {
+	u64 flags;  
+	union sxe_tx_offload tx_offload;
+	union sxe_tx_offload tx_offload_mask;
+};
+
+struct sxe_tx_queue {
+	volatile union sxe_tx_data_desc *desc_ring;
+	u64             base_addr;          
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	union {
+		struct sxe_tx_buffer *buffer_ring;          
+		struct sxe_tx_buffer_vec *buffer_ring_vec;  
+	};
+#else
+	struct sxe_tx_buffer *buffer_ring;	
+#endif
+	volatile u32   *tdt_reg_addr;       
+	u16            ring_depth;          
+	u16            next_to_use;         
+	u16            free_thresh;         
+
+	u16            rs_thresh;
+
+	u16            desc_used_num;
+	u16            next_to_clean;  
+	u16            desc_free_num;   
+	u16            next_dd;        
+	u16            next_rs;        
+	u16            queue_idx;      
+	u16            reg_idx;        
+	u16            port_id;        
+	u8             pthresh;        
+	u8             hthresh;        
+
+	u8             wthresh;
+	u64            offloads;       
+	u32            ctx_curr;       
+	struct sxe_ctxt_info ctx_cache[SXE_CTXT_DESC_NUM]; 
+	const struct sxe_txq_ops *ops; 
+	u8     tx_deferred_start;      
+	const struct rte_memzone *mz;
+};
+
+struct sxe_rx_queue {
+	struct rte_mempool  *mb_pool;   
+	volatile union sxe_rx_data_desc *desc_ring; 
+	u64  base_addr;                 
+	volatile u32   *rdt_reg_addr;   
+	struct sxe_rx_buffer *buffer_ring; 
+	struct sxe_rx_buffer *sc_buffer_ring; 
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	struct rte_mbuf *pkt_first_seg; 
+	struct rte_mbuf *pkt_last_seg;  
+	u64    mbuf_init_value;		
+	u8     is_using_sse;		
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM)
+	u16    realloc_num; 		
+	u16    realloc_start;		
+#endif
+#endif
+	u16    ring_depth;           
+	u16    processing_idx;     
+	u16    hold_num;            
+	u16    completed_pkts_num;     
+	u16    next_ret_pkg;         
+	u16    batch_alloc_trigger;  
+
+	u16    batch_alloc_size;
+	u16    queue_id;            
+	u16    reg_idx;              
+	u16    pkt_type_mask;        
+	u16    port_id;              
+	u8     crc_len;              
+	u8     drop_en;              
+	u8     deferred_start;       
+	u64    vlan_flags;           
+	u64    offloads;             
+	struct rte_mbuf fake_mbuf;   
+	struct rte_mbuf *completed_ring[RTE_PMD_SXE_MAX_RX_BURST * 2];
+	const struct rte_memzone *mz;
+	struct sxe_rx_queue_stats rx_stats;  
+};
+
+struct sxe_txq_ops {
+	void (*init)(struct sxe_tx_queue *txq);
+	void (*mbufs_release)(struct sxe_tx_queue *txq);
+	void (*buffer_ring_free)(struct sxe_tx_queue *txq);
+};
+
+s32 __rte_cold __sxe_rx_queue_setup(struct rx_setup *rx_setup, bool is_vf);
+
+int __rte_cold __sxe_tx_queue_setup(struct tx_setup *tx_setup, bool is_vf);
+
+void __sxe_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+					struct rte_eth_rxq_info *qinfo);
+
+void __sxe_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+		struct rte_eth_txq_info *q_info);
+
+s32 __sxe_tx_done_cleanup(void *tx_queue, u32 free_cnt);
+
+s32 __rte_cold __sxe_rx_queue_mbufs_alloc(struct sxe_rx_queue *rxq);
+
+void __rte_cold __sxe_tx_queue_free(struct sxe_tx_queue *txq);
+
+void sxe_rx_queue_free(struct sxe_rx_queue *rxq);
+
+void __rte_cold __sxe_rx_queue_free(struct sxe_rx_queue *rxq);
+
+void __rte_cold __sxe_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed);
+
+void __sxe_queues_free(struct rte_eth_dev *dev);
+
+void __sxe_secondary_proc_init(struct rte_eth_dev *eth_dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
+
+#endif
diff --git a/drivers/net/sxe/base/sxe_rx_common.c b/drivers/net/sxe/base/sxe_rx_common.c
new file mode 100644
index 0000000000..4472058a29
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_rx_common.c
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_mbuf.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+#include <rte_prefetch.h>
+#include <rte_malloc.h>
+
+#include "sxe.h"
+#include "sxe_rx.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe_queue_common.h"
+#include "sxe_vf.h"
+#include "sxe_errno.h"
+#include "sxe_irq.h"
+#include "sxe_rx_common.h"
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#include "sxe_vec_common.h"
+#include "rte_vect.h"
+#endif
+
+static inline void sxe_rx_resource_prefetch(u16 next_idx,
+				struct sxe_rx_buffer *buf_ring,
+				volatile union sxe_rx_data_desc *desc_ring)
+{
+	rte_sxe_prefetch(buf_ring[next_idx].mbuf);
+
+	if ((next_idx & 0x3) == 0) {
+		rte_sxe_prefetch(&desc_ring[next_idx]);
+		rte_sxe_prefetch(&buf_ring[next_idx]);
+	}
+
+	return;
+}
+
+void __rte_cold __sxe_rx_function_set(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
+{
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	u16  i, is_using_sse;
+
+	if (sxe_rx_vec_condition_check(dev) ||
+	    !rx_batch_alloc_allowed 
+#ifndef DPDK_19_11_6
+		|| rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128
+#endif
+		) {
+		PMD_LOG_DEBUG(INIT, "Port[%d] doesn't meet Vector Rx "
+				    "preconditions", dev->data->port_id);
+		*rx_vec_allowed = false;
+	}
+#else
+	UNUSED(rx_vec_allowed);
+#endif
+
+	if (dev->data->lro) {
+		if (rx_batch_alloc_allowed) {
+			PMD_LOG_DEBUG(INIT, "LRO is requested. Using a bulk "
+					   "allocation version");
+			dev->rx_pkt_burst = sxe_batch_alloc_lro_pkts_recv;
+		} else {
+			PMD_LOG_DEBUG(INIT, "LRO is requested. Using a single "
+					   "allocation version");
+			dev->rx_pkt_burst = sxe_single_alloc_lro_pkts_recv;
+		}
+	} else if (dev->data->scattered_rx) {
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+		if (*rx_vec_allowed) {
+			PMD_LOG_DEBUG(INIT, "Using Vector Scattered Rx "
+					    "callback (port=%d).",
+				     dev->data->port_id);
+
+			dev->rx_pkt_burst = sxe_scattered_pkts_vec_recv;
+		} else
+#endif
+		if (rx_batch_alloc_allowed) {
+			PMD_LOG_DEBUG(INIT, "Using a Scattered with bulk "
+					   "allocation callback (port=%d).",
+				     dev->data->port_id);
+
+			dev->rx_pkt_burst = sxe_batch_alloc_lro_pkts_recv;
+		} else {
+			PMD_LOG_DEBUG(INIT, "Using Regular (non-vector, "
+					    "single allocation) "
+					    "Scattered Rx callback "
+					    "(port=%d).",
+				     dev->data->port_id);
+
+			dev->rx_pkt_burst = sxe_single_alloc_lro_pkts_recv;
+		}
+	}
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	else if (*rx_vec_allowed) {
+		PMD_LOG_DEBUG(INIT, "Vector rx enabled, please make sure RX "
+				    "burst size no less than %d (port=%d).",
+			     SXE_DESCS_PER_LOOP,
+			     dev->data->port_id);
+
+		dev->rx_pkt_burst = sxe_pkts_vec_recv;
+	}
+#endif
+	else if (rx_batch_alloc_allowed) {
+		PMD_LOG_DEBUG(INIT, "Rx Burst Bulk Alloc Preconditions are "
+				    "satisfied. Rx Burst Bulk Alloc function "
+				    "will be used on port=%d.",
+				dev->data->port_id);
+
+		dev->rx_pkt_burst = sxe_batch_alloc_pkts_recv;
+	} else {
+		PMD_LOG_DEBUG(INIT, "Rx Burst Bulk Alloc Preconditions are not "
+				"satisfied, or Scattered Rx is requested "
+				"(port=%d).",
+				dev->data->port_id);
+
+		dev->rx_pkt_burst = sxe_pkts_recv;
+	}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	is_using_sse =
+		(dev->rx_pkt_burst == sxe_scattered_pkts_vec_recv ||
+		dev->rx_pkt_burst == sxe_pkts_vec_recv);
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		struct sxe_rx_queue *rxq = dev->data->rx_queues[i];
+
+		rxq->is_using_sse = is_using_sse;
+	}
+#endif
+
+	return;
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+s32 __sxe_rx_descriptor_done(void *rx_queue, u16 offset)
+{
+	volatile union sxe_rx_data_desc *desc;
+	struct sxe_rx_queue *rxq = rx_queue;
+	u32 index;
+	s32 is_done = 0;
+
+	LOG_DEBUG("check rx queue[%u], offset desc[%u]\n",
+			rxq->queue_id, offset);
+	if (unlikely(offset >= rxq->ring_depth)) {
+		LOG_DEBUG("offset=%u >= ring depth=%u\n",
+				offset, rxq->ring_depth);
+		goto l_end;
+	}
+
+	index = rxq->processing_idx + offset;
+	if (index >= rxq->ring_depth) {
+		index -= rxq->ring_depth;
+	}
+
+	desc = &rxq->desc_ring[index];
+	is_done = !!(desc->wb.upper.status_error &
+			rte_cpu_to_le_32(SXE_RXDADV_STAT_DD));
+
+l_end:
+	return is_done;
+}
+#endif
+
+s32 __sxe_rx_descriptor_status(void *rx_queue, u16 offset)
+{
+	int ret = RTE_ETH_RX_DESC_AVAIL;
+	struct sxe_rx_queue *rxq = rx_queue;
+	volatile u32 *status;
+	u32 hold_num, desc;
+
+	if (unlikely(offset >= rxq->ring_depth)) {
+		LOG_DEBUG("rx queue[%u] get desc status err,"
+			"offset=%u >= ring_depth=%u\n",
+			rxq->queue_id, offset, rxq->ring_depth);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#if defined(RTE_ARCH_X86)
+	if (rxq->is_using_sse)
+		hold_num = rxq->realloc_num;
+	else
+#endif
+#endif
+		hold_num = rxq->hold_num;
+	if (offset >= rxq->ring_depth - hold_num) {
+		ret = RTE_ETH_RX_DESC_UNAVAIL;
+		goto l_end;
+	}
+
+	desc = rxq->processing_idx + offset;
+	if (desc >= rxq->ring_depth) {
+		desc -= rxq->ring_depth;
+	}
+
+	status = &rxq->desc_ring[desc].wb.upper.status_error;
+	if (*status & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD)) {
+		ret =  RTE_ETH_RX_DESC_DONE;
+	}
+
+l_end:
+	LOG_DEBUG("rx queue[%u] get desc status=%d\n",rxq->queue_id, ret);
+	return ret;
+}
+
+u16 __sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,
+		u16 pkts_num)
+{
+	struct sxe_rx_queue *rxq = (struct sxe_rx_queue *)rx_queue;
+	volatile union sxe_rx_data_desc *desc_ring = rxq->desc_ring;
+	volatile union sxe_rx_data_desc *cur_desc;
+	struct sxe_rx_buffer *buff_ring = rxq->buffer_ring;
+	struct sxe_rx_buffer *cur_buf;
+	struct rte_mbuf *cur_mb;
+	struct rte_mbuf *new_mb;
+	union sxe_rx_data_desc rxd;
+	u16 processing_idx = rxq->processing_idx;
+	u64 dma_addr;
+	u32 staterr;
+	u32 pkt_info;
+	u16 done_num = 0;
+	u16 hold_num = 0;
+	u16 pkt_len;
+
+	while (done_num < pkts_num) {
+		cur_desc = &desc_ring[processing_idx];
+		staterr = cur_desc->wb.upper.status_error;
+		if (!(staterr & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD))) {
+			break;
+		}
+
+		rxd = *cur_desc;
+
+		LOG_DEBUG("port_id=%u queue_id=%u processing_idx=%u "
+			   "staterr=0x%08x pkt_len=%u",
+			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+			   (unsigned) processing_idx, (unsigned) staterr,
+			   (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+
+		new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
+		if (new_mb == NULL) {
+			LOG_ERROR("RX mbuf alloc failed port_id=%u "
+				   "queue_id=%u", (unsigned) rxq->port_id,
+				   (unsigned) rxq->queue_id);
+			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+			break;
+		}
+
+		hold_num++;
+		cur_buf = &buff_ring[processing_idx];
+		processing_idx++;
+		if (processing_idx == rxq->ring_depth) {
+			processing_idx = 0;
+		}
+
+		sxe_rx_resource_prefetch(processing_idx, buff_ring, desc_ring);
+
+		cur_mb = cur_buf->mbuf;
+		cur_buf->mbuf = new_mb;
+		dma_addr =
+			rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb));
+		cur_desc->read.hdr_addr = 0;
+		cur_desc->read.pkt_addr = dma_addr;
+
+		cur_mb->data_off = RTE_PKTMBUF_HEADROOM;
+		rte_packet_prefetch((char *)cur_mb->buf_addr + cur_mb->data_off);
+		cur_mb->nb_segs = 1;
+		cur_mb->next = NULL;
+		pkt_len = (u16)(rte_le_to_cpu_16(rxd.wb.upper.length) -
+						rxq->crc_len);
+		cur_mb->pkt_len = pkt_len;
+		cur_mb->data_len = pkt_len;
+
+		pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+
+		sxe_rx_mbuf_common_header_fill(rxq, cur_mb, rxd, pkt_info, staterr);
+
+		rx_pkts[done_num++] = cur_mb;
+	}
+
+	rxq->processing_idx = processing_idx;
+
+	hold_num = (u16) (hold_num + rxq->hold_num);
+	if (hold_num > rxq->batch_alloc_size) {
+		LOG_DEBUG("port_id=%u queue_id=%u rx_tail=%u "
+			   "num_hold=%u num_done=%u",
+			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+			   (unsigned) processing_idx, (unsigned) hold_num,
+			   (unsigned) done_num);
+		processing_idx = (u16)((processing_idx == 0) ?
+				(rxq->ring_depth - 1) : (processing_idx - 1));
+		SXE_PCI_REG_WC_WRITE(rxq->rdt_reg_addr, processing_idx);
+		hold_num = 0;
+	}
+
+	rxq->hold_num = hold_num;
+	return done_num;
+}
+
+const u32 *__sxe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+	const u32 * ptypes = NULL;
+	static const u32 ptypes_arr[] = {
+		RTE_PTYPE_L2_ETHER,
+		RTE_PTYPE_L3_IPV4,
+		RTE_PTYPE_L3_IPV4_EXT,
+		RTE_PTYPE_L3_IPV6,
+		RTE_PTYPE_L3_IPV6_EXT,
+		RTE_PTYPE_L4_SCTP,
+		RTE_PTYPE_L4_TCP,
+		RTE_PTYPE_L4_UDP,
+		RTE_PTYPE_TUNNEL_IP,
+		RTE_PTYPE_INNER_L3_IPV6,
+		RTE_PTYPE_INNER_L3_IPV6_EXT,
+		RTE_PTYPE_INNER_L4_TCP,
+		RTE_PTYPE_INNER_L4_UDP,
+		RTE_PTYPE_UNKNOWN
+	};
+
+	if (dev->rx_pkt_burst == sxe_pkts_recv ||
+		dev->rx_pkt_burst == sxe_batch_alloc_pkts_recv ||
+		dev->rx_pkt_burst == sxe_single_alloc_lro_pkts_recv ||
+		dev->rx_pkt_burst == sxe_batch_alloc_lro_pkts_recv) {
+		ptypes = ptypes_arr;
+		goto l_end;
+	}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#if defined(RTE_ARCH_X86)
+	if (dev->rx_pkt_burst == sxe_pkts_vec_recv ||
+	    dev->rx_pkt_burst == sxe_scattered_pkts_vec_recv) {
+		ptypes = ptypes_arr;
+	}
+#endif
+#endif
+
+l_end:
+	return ptypes;
+}
+
diff --git a/drivers/net/sxe/base/sxe_rx_common.h b/drivers/net/sxe/base/sxe_rx_common.h
new file mode 100644
index 0000000000..b7eb37f54a
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_rx_common.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_RX_COMMON_H__
+#define __SXE_RX_COMMON_H__
+
+#include "sxe_dpdk_version.h"
+
+void __rte_cold __sxe_rx_function_set(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+s32 __sxe_rx_descriptor_done(void *rx_queue, u16 offset);
+#endif
+
+s32 __sxe_rx_descriptor_status(void *rx_queue, u16 offset);
+
+u16 __sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,
+		u16 pkts_num);
+
+const u32 *__sxe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+
+#endif
+
diff --git a/drivers/net/sxe/base/sxe_tx_common.c b/drivers/net/sxe/base/sxe_tx_common.c
new file mode 100644
index 0000000000..a47f90109a
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_tx_common.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#else
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#endif
+#include <rte_net.h>
+
+#include "sxe_hw.h"
+#include "sxe_logs.h"
+#include "sxe_queue_common.h"
+#include "sxe_tx_common.h"
+
+int __sxe_tx_descriptor_status(void *tx_queue, u16 offset)
+{
+	int ret = RTE_ETH_TX_DESC_FULL;
+	u32 desc_idx;
+	struct sxe_tx_queue *txq = tx_queue;
+	volatile u32 *status;
+
+	if (unlikely(offset >= txq->ring_depth)) {
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	desc_idx = txq->next_to_use + offset;
+
+	desc_idx = ((desc_idx + txq->rs_thresh - 1) / txq->rs_thresh) * txq->rs_thresh;
+	if (desc_idx >= txq->ring_depth) {
+		desc_idx -= txq->ring_depth;
+		if (desc_idx >= txq->ring_depth) {
+			desc_idx -= txq->ring_depth;
+		}
+	}
+
+	status = &txq->desc_ring[desc_idx].wb.status;
+	if (*status & rte_cpu_to_le_32(SXE_TX_DESC_STAT_DD)) {
+		ret = RTE_ETH_TX_DESC_DONE;
+	}
+
+l_end:
+	return ret;
+}
+
diff --git a/drivers/net/sxe/base/sxe_tx_common.h b/drivers/net/sxe/base/sxe_tx_common.h
new file mode 100644
index 0000000000..2759ef5a7a
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_tx_common.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_TX_COMMON_H__
+#define __SXE_TX_COMMON_H__
+
+int __sxe_tx_descriptor_status(void *tx_queue, u16 offset);
+
+u16 __sxe_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num);
+
+#endif
diff --git a/drivers/net/sxe/base/sxe_types.h b/drivers/net/sxe/base/sxe_types.h
new file mode 100644
index 0000000000..966ee230b3
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_types.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_DPDK_TYPES_H__
+#define __SXE_DPDK_TYPES_H__
+
+#include <sys/time.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <string.h>
+
+#include <rte_common.h>
+
+typedef uint8_t		u8;
+typedef uint16_t	u16;
+typedef uint32_t	u32;
+typedef uint64_t	u64; 
+
+typedef char		s8;
+typedef int16_t		s16;
+typedef int32_t		s32;
+typedef int64_t		s64;
+
+typedef s8		S8;
+typedef s16		S16;
+typedef s32		S32;
+
+#define __le16  u16
+#define __le32  u32
+#define __le64  u64
+
+#define __be16  u16
+#define __be32  u32
+#define __be64  u64
+
+#endif
diff --git a/drivers/net/sxe/base/sxevf_hw.c b/drivers/net/sxe/base/sxevf_hw.c
new file mode 100644
index 0000000000..75ac9dd25b
--- /dev/null
+++ b/drivers/net/sxe/base/sxevf_hw.c
@@ -0,0 +1,1057 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST) 
+#include <linux/etherdevice.h>
+
+#include "sxevf_hw.h"
+#include "sxevf_regs.h"
+#include "sxe_log.h"
+#include "sxevf_irq.h"
+#include "sxevf_msg.h"
+#include "sxevf_ring.h"
+#include "sxevf.h"
+#include "sxevf_rx_proc.h"
+#else 
+#include "sxe_errno.h"
+#include "sxe_logs.h"
+#include "sxe_dpdk_version.h"
+#include "sxe_compat_version.h"
+#include "sxevf.h"
+#include "sxevf_hw.h"
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+struct sxevf_adapter;
+#endif
+
+#define DMA_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+#define DMA_MASK_NONE	0x0ULL
+
+#define  SXEVF_REG_READ_CNT    5
+
+#define SXE_REG_READ_FAIL       0xffffffffU
+
+#define SXEVF_RING_WAIT_LOOP                   (100)
+#define SXEVF_MAX_RX_DESC_POLL                 (10)
+
+
+#define SXEVF_REG_READ(hw, addr)        sxevf_reg_read(hw, addr)
+#define SXEVF_REG_WRITE(hw, reg, value) sxevf_reg_write(hw, reg, value)
+#define SXEVF_WRITE_FLUSH(a) sxevf_reg_read(a, SXE_VFSTATUS)
+
+#ifndef SXE_DPDK 
+void sxevf_hw_fault_handle(struct sxevf_hw *hw)
+{
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	if (test_bit(SXEVF_HW_FAULT, &hw->state)) {
+		goto l_ret;
+	}
+
+	set_bit(SXEVF_HW_FAULT, &hw->state);
+
+	LOG_DEV_ERR("sxe nic hw fault\n");
+
+	if ((hw->fault_handle != NULL) && (hw->priv != NULL) ) {
+		hw->fault_handle(hw->priv);
+	}
+
+l_ret:
+	return;
+}
+
+static void sxevf_hw_fault_check(struct sxevf_hw *hw, u32 reg)
+{
+	u32 value;
+	u8  __iomem *base_addr = hw->reg_base_addr;
+	struct sxevf_adapter *adapter = hw->adapter;
+	u8 i;
+
+	if (reg == SXE_VFSTATUS) {
+		sxevf_hw_fault_handle(hw);
+		return;
+	}
+
+
+	for (i = 0; i < SXEVF_REG_READ_CNT; i++) {
+		value = hw->reg_read(base_addr + SXE_VFSTATUS);
+
+		if (value != SXEVF_REG_READ_FAIL) {
+			break;
+		}
+
+		mdelay(20);
+	}
+
+	LOG_INFO_BDF("retry done i:%d value:0x%x\n", i, value);
+
+	if (value == SXEVF_REG_READ_FAIL) {
+		sxevf_hw_fault_handle(hw);
+	}
+
+	return;
+}
+
+STATIC u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg)
+{
+	u32 value;
+	u8  __iomem *base_addr = hw->reg_base_addr;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	if (sxevf_is_hw_fault(hw)) {
+		value = SXEVF_REG_READ_FAIL;
+		goto l_ret;
+	}
+
+	value = hw->reg_read(base_addr + reg);
+	if (unlikely(SXEVF_REG_READ_FAIL == value)) {
+		LOG_ERROR_BDF("reg[0x%x] read failed, value=%#x\n", reg, value);
+		sxevf_hw_fault_check(hw, reg);
+	}
+
+l_ret:
+	return value;
+}
+
+STATIC void sxevf_reg_write(struct sxevf_hw *hw, u32 reg, u32 value)
+{
+	u8 __iomem *base_addr = hw->reg_base_addr;
+
+	if (sxevf_is_hw_fault(hw)) {
+		goto l_ret;
+	}
+
+	hw->reg_write(value, base_addr + reg);
+
+l_ret:
+	return;
+}
+
+#else 
+
+STATIC u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg)
+{
+	u32 i, value;
+	u8  __iomem *base_addr = hw->reg_base_addr;
+
+	value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
+	if (unlikely(SXEVF_REG_READ_FAIL == value)) {
+		for (i = 0; i < SXEVF_REG_READ_CNT; i++) {
+			LOG_ERROR("reg[0x%x] read failed, value=%#x\n",
+							reg, value);
+			value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
+			if (value != SXEVF_REG_READ_FAIL) {
+				LOG_INFO("reg[0x%x] read ok, value=%#x\n",
+								reg, value);
+				break;
+			}
+
+			mdelay(3);
+		}
+	}
+
+	return value;
+}
+
+STATIC void sxevf_reg_write(struct sxevf_hw *hw, u32 reg, u32 value)
+{
+	u8 __iomem *base_addr = hw->reg_base_addr;
+
+	rte_write32((rte_cpu_to_le_32(value)), (base_addr + reg));
+
+	return;
+}
+#endif
+
+void sxevf_hw_stop(struct sxevf_hw *hw)
+{
+	u8 i;
+	u32 value;
+
+	for (i = 0; i < SXEVF_TXRX_RING_NUM_MAX; i++) {
+		value = SXEVF_REG_READ(hw, SXE_VFRXDCTL(i));
+		if (value & SXE_VFRXDCTL_ENABLE) {
+			value &= ~SXE_VFRXDCTL_ENABLE;
+			SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(i), value);
+		}
+	}
+
+	SXEVF_WRITE_FLUSH(hw);
+
+	SXEVF_REG_WRITE(hw, SXE_VFEIMC, SXEVF_VFEIMC_IRQ_MASK);
+	SXEVF_REG_READ(hw, SXE_VFEICR);
+
+	for (i = 0; i < SXEVF_TXRX_RING_NUM_MAX; i++) {
+		value = SXEVF_REG_READ(hw, SXE_VFTXDCTL(i));
+		if (value & SXE_VFTXDCTL_ENABLE) {
+			value &= ~SXE_VFTXDCTL_ENABLE;
+			SXEVF_REG_WRITE(hw, SXE_VFTXDCTL(i), value);
+		}
+	}
+
+	return;
+}
+
+void sxevf_msg_write(struct sxevf_hw *hw, u8 index, u32 msg)
+{
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	SXEVF_REG_WRITE(hw, SXE_VFMBMEM + (index << 2), msg);
+
+	LOG_DEBUG_BDF("index:%u write mbx mem:0x%x.\n", index, msg);
+
+	return;
+}
+
+u32 sxevf_msg_read(struct sxevf_hw *hw, u8 index)
+{
+	u32 value = SXEVF_REG_READ(hw, SXE_VFMBMEM + (index << 2));
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	LOG_DEBUG_BDF("index:%u read mbx mem:0x%x.\n", index, value);
+
+	return value;
+}
+
+u32 sxevf_mailbox_read(struct sxevf_hw *hw)
+{
+	return SXEVF_REG_READ(hw, SXE_VFMAILBOX);
+}
+
+void sxevf_mailbox_write(struct sxevf_hw *hw, u32 value)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, value);
+	return;
+}
+
+void sxevf_pf_req_irq_trigger(struct sxevf_hw *hw)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, SXE_VFMAILBOX_REQ);
+
+	return;
+}
+
+void sxevf_pf_ack_irq_trigger(struct sxevf_hw *hw)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, SXE_VFMAILBOX_ACK);
+
+	return;
+}
+
+void sxevf_event_irq_map(struct sxevf_hw *hw, u16 vector)
+{
+	u8  allocation;
+	u32 ivar;
+
+	allocation = vector | SXEVF_IVAR_ALLOC_VALID;
+
+	ivar = SXEVF_REG_READ(hw, SXE_VFIVAR_MISC);
+	ivar &= ~0xFF;
+	ivar |= allocation;
+
+	SXEVF_REG_WRITE(hw, SXE_VFIVAR_MISC, ivar);
+
+	return;
+}
+
+void sxevf_specific_irq_enable(struct sxevf_hw *hw, u32 value)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFEIMS, value);
+
+	return;
+}
+
+void sxevf_irq_enable(struct sxevf_hw *hw, u32 mask)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFEIAM, mask);
+	SXEVF_REG_WRITE(hw, SXE_VFEIMS, mask);
+
+	return;
+}
+
+void sxevf_irq_disable(struct sxevf_hw *hw)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFEIAM, 0);
+	SXEVF_REG_WRITE(hw, SXE_VFEIMC, ~0);
+
+	SXEVF_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxevf_hw_ring_irq_map(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx, u16 vector)
+{
+	u8  allocation;
+	u32 ivar, position;
+
+	allocation = vector | SXEVF_IVAR_ALLOC_VALID;
+
+	position = ((hw_ring_idx & 1) * 16) + (8 * is_tx);
+
+	ivar = SXEVF_REG_READ(hw, SXE_VFIVAR(hw_ring_idx >> 1));
+	ivar &= ~(0xFF << position);
+	ivar |= (allocation << position);
+
+	SXEVF_REG_WRITE(hw, SXE_VFIVAR(hw_ring_idx >> 1), ivar);
+
+	return;
+}
+
+void sxevf_ring_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx, u32 interval)
+{
+	u32 eitr = interval & SXEVF_EITR_ITR_MASK;
+
+	eitr |= SXEVF_EITR_CNT_WDIS;
+
+	SXEVF_REG_WRITE(hw, SXE_VFEITR(irq_idx), eitr);
+
+	return;
+}
+
+static void sxevf_event_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx, u32 value)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFEITR(irq_idx), value);
+
+	return;
+}
+
+static void sxevf_pending_irq_clear(struct sxevf_hw *hw)
+{
+	SXEVF_REG_READ(hw, SXE_VFEICR);
+
+	return;
+}
+
+static void sxevf_ring_irq_trigger(struct sxevf_hw *hw, u64 eics)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFEICS, eics);
+
+	return;
+}
+
+static const struct sxevf_irq_operations sxevf_irq_ops = {
+	.ring_irq_interval_set   = sxevf_ring_irq_interval_set,
+	.event_irq_interval_set  = sxevf_event_irq_interval_set,
+	.ring_irq_map         = sxevf_hw_ring_irq_map,
+	.event_irq_map           = sxevf_event_irq_map,
+	.pending_irq_clear       = sxevf_pending_irq_clear,
+	.ring_irq_trigger        = sxevf_ring_irq_trigger,
+	.specific_irq_enable     = sxevf_specific_irq_enable,
+	.irq_enable              = sxevf_irq_enable,
+	.irq_disable             = sxevf_irq_disable,
+};
+
+void sxevf_hw_reset(struct sxevf_hw *hw)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFCTRL, SXE_VFCTRL_RST);
+	SXEVF_WRITE_FLUSH(hw);
+
+	return;
+}
+
+STATIC bool sxevf_hw_rst_done(struct sxevf_hw *hw)
+{
+	return !(SXEVF_REG_READ(hw, SXE_VFCTRL) & SXE_VFCTRL_RST);
+}
+
+u32 sxevf_link_state_get(struct sxevf_hw *hw)
+{
+	return SXEVF_REG_READ(hw, SXE_VFLINKS);
+}
+
+u32 dump_regs[] = {
+	SXE_VFCTRL,
+};
+
+u16 sxevf_reg_dump_num_get(void)
+{
+	return ARRAY_SIZE(dump_regs);
+}
+
+static u32 sxevf_reg_dump(struct sxevf_hw *hw, u32 *regs_buff, u32 buf_size)
+{
+	u32 i;
+	u32 regs_num = buf_size / sizeof(u32);
+
+	for (i = 0; i < regs_num; i++) {
+		regs_buff[i] = SXEVF_REG_READ(hw, dump_regs[i]);
+	}
+
+	return i;
+}
+
+#define PATTERN_TEST	1
+#define SET_READ_TEST	2
+#define WRITE_NO_TEST	3
+#define TABLE32_TEST	4
+#define TABLE64_TEST_LO	5
+#define TABLE64_TEST_HI	6
+
+struct sxevf_self_test_reg {
+	u32 reg;
+	u8  array_len;
+	u8  test_type;
+	u32 mask;
+	u32 write;
+};
+
+static const struct sxevf_self_test_reg self_test_reg[] = {
+	{ SXE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+	{ SXE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFFFF, 0x000FFFFF },
+	{ SXE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, SXEVF_RXDCTL_ENABLE },
+	{ SXE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ SXE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
+	{ SXE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ SXE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+	{ .reg = 0 }
+};
+
+static s32 sxevf_reg_pattern_test(struct sxevf_hw *hw, u32 reg,
+				u32 mask, u32 write)
+{
+	s32 ret = 0;
+	u32 pat, val, before;
+	static const u32 test_pattern[] = {
+		0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFE};
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	if (sxevf_is_hw_fault(hw)) {
+		LOG_ERROR_BDF("hw fault\n");
+		ret = -SXEVF_DIAG_TEST_BLOCKED;
+		goto l_end;
+	}
+
+	for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
+		before = SXEVF_REG_READ(hw, reg);
+
+		SXEVF_REG_WRITE(hw, reg, test_pattern[pat] & write);
+		val = SXEVF_REG_READ(hw, reg);
+		if (val != (test_pattern[pat] & write & mask)) {
+			LOG_MSG_ERR(drv, "pattern test reg %04X failed: "
+					"got 0x%08X expected 0x%08X\n",
+				reg, val, (test_pattern[pat] & write & mask));
+			SXEVF_REG_WRITE(hw, reg, before);
+			ret = -SXEVF_DIAG_REG_PATTERN_TEST_ERR;
+			goto l_end;
+		}
+
+		SXEVF_REG_WRITE(hw, reg, before);
+	}
+
+l_end:
+	return ret;
+}
+
+static s32 sxevf_reg_set_and_check(struct sxevf_hw *hw, int reg,
+				u32 mask, u32 write)
+{
+	s32 ret = 0;
+	u32 val, before;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	if (sxevf_is_hw_fault(hw)) {
+		LOG_ERROR_BDF("hw fault\n");
+		ret = -SXEVF_DIAG_TEST_BLOCKED;
+		goto l_end;
+	}
+
+	before = SXEVF_REG_READ(hw, reg);
+	SXEVF_REG_WRITE(hw, reg, write & mask);
+	val = SXEVF_REG_READ(hw, reg);
+	if ((write & mask) != (val & mask)) {
+		LOG_DEV_ERR("set/check reg %04X test failed: "
+				"got 0x%08X expected 0x%08X\n",
+			reg, (val & mask), (write & mask));
+		SXEVF_REG_WRITE(hw, reg, before);
+		ret = -SXEVF_DIAG_CHECK_REG_TEST_ERR;
+		goto l_end;
+	}
+
+	SXEVF_REG_WRITE(hw, reg, before);
+
+l_end:
+	return ret;
+}
+
+STATIC s32 sxevf_regs_test(struct sxevf_hw *hw)
+{
+	u32 i;
+	s32 ret = 0;
+	const struct sxevf_self_test_reg *test = self_test_reg;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	while (test->reg) {
+		for (i = 0; i < test->array_len; i++) {
+			switch (test->test_type) {
+			case PATTERN_TEST:
+				ret = sxevf_reg_pattern_test(hw,
+					test->reg + (i * 0x40),
+					test->mask, test->write);
+				break;
+			case TABLE32_TEST:
+				ret = sxevf_reg_pattern_test(hw,
+					test->reg + (i * 4),
+					test->mask, test->write);
+				break;
+			case TABLE64_TEST_LO:
+				ret = sxevf_reg_pattern_test(hw,
+					test->reg + (i * 8),
+					test->mask, test->write);
+				break;
+			case TABLE64_TEST_HI:
+				ret = sxevf_reg_pattern_test(hw,
+					(test->reg + 4) + (i * 8),
+					test->mask, test->write);
+				break;
+			case SET_READ_TEST:
+				ret = sxevf_reg_set_and_check(hw,
+					test->reg + (i * 0x40),
+					test->mask, test->write);
+				break;
+			case WRITE_NO_TEST:
+				SXEVF_REG_WRITE(hw, test->reg + (i * 0x40),
+						test->write);
+				break;
+			default:
+				LOG_ERROR_BDF("reg test mod err, type=%d\n",
+						test->test_type);
+				break;
+			}
+
+			if (ret) {
+				goto l_end;
+			}
+
+		}
+		test++;
+	}
+
+l_end:
+	return ret;
+}
+
+static const struct sxevf_setup_operations sxevf_setup_ops = {
+	.reset		= sxevf_hw_reset,
+	.hw_stop	= sxevf_hw_stop,
+	.regs_test	= sxevf_regs_test,
+	.regs_dump	= sxevf_reg_dump,
+	.link_state_get	= sxevf_link_state_get,
+	.reset_done = sxevf_hw_rst_done,
+};
+
+static void sxevf_tx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
+				u64 desc_dma_addr, u8 reg_idx)
+{
+	SXEVF_REG_WRITE(hw, SXEVF_TDBAL(reg_idx), (desc_dma_addr & \
+						DMA_BIT_MASK(32)));
+	SXEVF_REG_WRITE(hw, SXEVF_TDBAH(reg_idx), (desc_dma_addr >> 32));
+	SXEVF_REG_WRITE(hw, SXEVF_TDLEN(reg_idx), desc_mem_len);
+	SXEVF_REG_WRITE(hw, SXEVF_TDH(reg_idx), 0);
+	SXEVF_REG_WRITE(hw, SXEVF_TDT(reg_idx), 0);
+
+	return;
+}
+
+static void sxevf_tx_writeback_off(struct sxevf_hw *hw, u8 reg_idx)
+{
+	SXEVF_REG_WRITE(hw, SXEVF_TDWBAH(reg_idx), 0);
+	SXEVF_REG_WRITE(hw, SXEVF_TDWBAL(reg_idx), 0);
+
+	return;
+}
+
+static void sxevf_tx_desc_thresh_set(
+				struct sxevf_hw *hw,
+				u8 reg_idx,
+				u32 wb_thresh,
+				u32 host_thresh,
+				u32 prefech_thresh)
+{
+	u32 txdctl = 0;
+
+	txdctl |= (wb_thresh << SXEVF_TXDCTL_WTHRESH_SHIFT);
+	txdctl |= (host_thresh << SXEVF_TXDCTL_HTHRESH_SHIFT) |
+			prefech_thresh;
+
+	SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
+
+	return;
+}
+
+void sxevf_tx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on)
+{
+	u32 wait_loop = SXEVF_MAX_TXRX_DESC_POLL;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	u32 txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx));
+	if (is_on) {
+		txdctl |= SXEVF_TXDCTL_ENABLE;
+		SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
+
+		do {
+			usleep_range(1000, 2000);
+			txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx));
+		} while (--wait_loop && !(txdctl & SXEVF_TXDCTL_ENABLE));
+	} else {
+		txdctl &= ~SXEVF_TXDCTL_ENABLE;
+		SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
+
+		do {
+			usleep_range(1000, 2000);
+			txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx));
+		} while (--wait_loop && (txdctl & SXEVF_TXDCTL_ENABLE));
+	}
+
+	if (!wait_loop) {
+		LOG_DEV_ERR("tx ring %u switch %u failed within "
+			  "the polling period\n", reg_idx, is_on);
+	}
+
+	return;
+}
+
+static void sxevf_rx_disable(struct sxevf_hw *hw, u8 reg_idx)
+{
+	u32 rxdctl;
+	u32 wait_loop = SXEVF_RX_RING_POLL_MAX;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	if (!hw->reg_base_addr) {
+		goto l_end;
+	}
+
+	rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
+	rxdctl &= ~SXE_VFRXDCTL_ENABLE;
+	SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_idx), rxdctl);
+
+	do {
+		udelay(10);
+		rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
+	} while (--wait_loop && (rxdctl & SXE_VFRXDCTL_ENABLE));
+
+	if (!wait_loop) {
+		LOG_ERROR_BDF("RXDCTL.ENABLE queue %d not cleared while polling\n",
+				reg_idx);
+	}
+
+l_end:
+	return;
+}
+
+void sxevf_rx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on)
+{
+	u32 rxdctl;
+	u32 wait_loop = SXEVF_RING_WAIT_LOOP;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
+	if (is_on) {
+		rxdctl |= SXEVF_RXDCTL_ENABLE | SXEVF_RXDCTL_VME;
+		SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_idx), rxdctl);
+
+		do {
+			usleep_range(1000, 2000);
+			rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
+		} while (--wait_loop && !(rxdctl & SXEVF_RXDCTL_ENABLE));
+	} else {
+		rxdctl &= ~SXEVF_RXDCTL_ENABLE;
+		SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_idx), rxdctl);
+
+		do {
+			usleep_range(1000, 2000);
+			rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
+		} while (--wait_loop && (rxdctl & SXEVF_RXDCTL_ENABLE));
+	}
+
+	SXEVF_WRITE_FLUSH(hw);
+
+	if (!wait_loop) {
+		LOG_DEV_ERR("rx ring %u switch %u failed within "
+			  "the polling period\n", reg_idx, is_on);
+	}
+
+	return;
+}
+
+void sxevf_rx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
+					u64 desc_dma_addr, u8 reg_idx)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFRDBAL(reg_idx),
+			(desc_dma_addr & DMA_BIT_MASK(32)));
+	SXEVF_REG_WRITE(hw, SXE_VFRDBAH(reg_idx), (desc_dma_addr >> 32));
+	SXEVF_REG_WRITE(hw, SXE_VFRDLEN(reg_idx), desc_mem_len);
+
+	SXEVF_WRITE_FLUSH(hw);
+
+	SXEVF_REG_WRITE(hw, SXE_VFRDH(reg_idx), 0);
+	SXEVF_REG_WRITE(hw, SXE_VFRDT(reg_idx), 0);
+
+	return;
+}
+
+void sxevf_rx_rcv_ctl_configure(struct sxevf_hw *hw, u8 reg_idx,
+				   u32 header_buf_len, u32 pkg_buf_len, bool drop_en)
+{
+	u32 srrctl = 0;
+
+	if (drop_en) {
+		srrctl = SXEVF_SRRCTL_DROP_EN;
+	}
+
+	srrctl |= ((header_buf_len << SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+			SXEVF_SRRCTL_BSIZEHDR_MASK);
+	srrctl |= ((pkg_buf_len >> SXEVF_SRRCTL_BSIZEPKT_SHIFT) &
+			SXEVF_SRRCTL_BSIZEPKT_MASK);
+
+	SXEVF_REG_WRITE(hw, SXE_VFSRRCTL(reg_idx), srrctl);
+
+	return;
+}
+
+static void sxevf_tx_ring_info_get(struct sxevf_hw *hw,
+				u8 idx, u32 *head, u32 *tail)
+{
+	*head = SXEVF_REG_READ(hw, SXE_VFTDH(idx));
+	*tail = SXEVF_REG_READ(hw, SXE_VFTDT(idx));
+
+	return;
+}
+
+static const struct sxevf_dma_operations sxevf_dma_ops = {
+	.tx_ring_desc_configure  = sxevf_tx_ring_desc_configure,
+	.tx_writeback_off      = sxevf_tx_writeback_off,
+	.tx_desc_thresh_set    = sxevf_tx_desc_thresh_set,
+	.tx_ring_switch        = sxevf_tx_ring_switch,
+	.tx_ring_info_get      = sxevf_tx_ring_info_get,
+
+	.rx_disable          = sxevf_rx_disable,
+	.rx_ring_switch      = sxevf_rx_ring_switch,
+	.rx_ring_desc_configure= sxevf_rx_ring_desc_configure,
+	.rx_rcv_ctl_configure  = sxevf_rx_rcv_ctl_configure,
+};
+
+#ifdef SXE_DPDK
+#define SXEVF_32BIT_COUNTER_UPDATE(reg, last, cur)                          \
+	{																\
+		u32 latest = SXEVF_REG_READ(hw, reg);				\
+		cur += (latest - last) & UINT_MAX;						\
+		last = latest;											\
+	}
+	
+#define SXEVF_36BIT_COUNTER_UPDATE(lsb, msb, last, cur)                \
+	{																 \
+		u64 new_lsb = SXEVF_REG_READ(hw, lsb);					 \
+		u64 new_msb = SXEVF_REG_READ(hw, msb);					 \
+		u64 latest = ((new_msb << 32) | new_lsb);				 \
+		cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
+		last = latest;											 \
+	}
+
+#else
+#define SXEVF_32BIT_COUNTER_UPDATE(reg, last_counter, counter)	\
+	{							\
+		u32 current_counter = SXEVF_REG_READ(hw, reg);	\
+		if (current_counter < last_counter)		\
+			counter += 0x100000000LL;		\
+		last_counter = current_counter;			\
+		counter &= 0xFFFFFFFF00000000LL;		\
+		counter |= current_counter;			\
+	}
+
+#define SXEVF_36BIT_COUNTER_UPDATE(reg_lsb, reg_msb, last_counter, counter) \
+	{								 \
+		u64 current_counter_lsb = SXEVF_REG_READ(hw, reg_lsb);	 \
+		u64 current_counter_msb = SXEVF_REG_READ(hw, reg_msb);	 \
+		u64 current_counter = (current_counter_msb << 32) |	 \
+			current_counter_lsb;				 \
+		if (current_counter < last_counter)			 \
+			counter += 0x1000000000LL;			 \
+		last_counter = current_counter;				 \
+		counter &= 0xFFFFFFF000000000LL;			 \
+		counter |= current_counter;				 \
+	}
+#endif
+
+void sxevf_packet_stats_get(struct sxevf_hw *hw,
+				struct sxevf_hw_stats *stats)
+{
+	SXEVF_32BIT_COUNTER_UPDATE(SXEVF_VFGPRC, stats->last_vfgprc,
+				stats->vfgprc);
+	SXEVF_32BIT_COUNTER_UPDATE(SXEVF_VFGPTC, stats->last_vfgptc,
+				stats->vfgptc);
+	SXEVF_36BIT_COUNTER_UPDATE(SXEVF_VFGORC_LSB, SXEVF_VFGORC_MSB,
+				stats->last_vfgorc,
+				stats->vfgorc);
+	SXEVF_36BIT_COUNTER_UPDATE(SXEVF_VFGOTC_LSB, SXEVF_VFGOTC_MSB,
+				stats->last_vfgotc,
+				stats->vfgotc);
+	SXEVF_32BIT_COUNTER_UPDATE(SXEVF_VFMPRC, stats->last_vfmprc,
+				stats->vfmprc);
+
+	return;
+}
+
+void sxevf_stats_init_value_get(struct sxevf_hw *hw,
+				struct sxevf_hw_stats *stats)
+{
+	stats->last_vfgprc = SXEVF_REG_READ(hw, SXE_VFGPRC);
+	stats->last_vfgorc = SXEVF_REG_READ(hw, SXE_VFGORC_LSB);
+	stats->last_vfgorc |= (((u64)(SXEVF_REG_READ(hw, SXE_VFGORC_MSB))) << 32);
+	stats->last_vfgptc = SXEVF_REG_READ(hw, SXE_VFGPTC);
+	stats->last_vfgotc = SXEVF_REG_READ(hw, SXE_VFGOTC_LSB);
+	stats->last_vfgotc |= (((u64)(SXEVF_REG_READ(hw, SXE_VFGOTC_MSB))) << 32);
+	stats->last_vfmprc = SXEVF_REG_READ(hw, SXE_VFMPRC);
+
+	return;
+}
+static const struct sxevf_stat_operations sxevf_stat_ops = {
+	.packet_stats_get	= sxevf_packet_stats_get,
+	.stats_init_value_get	= sxevf_stats_init_value_get,
+};
+
+static void sxevf_rx_max_used_ring_set(struct sxevf_hw *hw, u16 max_rx_ring)
+{
+	u32 rqpl = 0;
+
+	if (max_rx_ring > 1) {
+		rqpl |= BIT(29);
+	}
+
+	SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, rqpl);
+
+	return;
+}
+
+static const struct sxevf_dbu_operations sxevf_dbu_ops = {
+	.rx_max_used_ring_set = sxevf_rx_max_used_ring_set,
+};
+
+static const struct sxevf_mbx_operations sxevf_mbx_ops = {
+
+	.mailbox_read = sxevf_mailbox_read,
+	.mailbox_write = sxevf_mailbox_write,
+
+	.msg_write = sxevf_msg_write,
+	.msg_read = sxevf_msg_read,
+
+	.pf_req_irq_trigger = sxevf_pf_req_irq_trigger,
+	.pf_ack_irq_trigger = sxevf_pf_ack_irq_trigger,
+};
+
+void sxevf_hw_ops_init(struct sxevf_hw *hw)
+{
+	hw->setup.ops   = &sxevf_setup_ops;
+	hw->irq.ops     = &sxevf_irq_ops;
+	hw->mbx.ops     = &sxevf_mbx_ops;
+	hw->dma.ops     = &sxevf_dma_ops;
+	hw->stat.ops    = &sxevf_stat_ops;
+	hw->dbu.ops     = &sxevf_dbu_ops;
+
+	return;
+}
+
+#ifdef SXE_DPDK 
+
+#define SXEVF_RSS_FIELD_MASK        0xffff0000
+#define SXEVF_MRQC_RSSEN            0x00000001 
+
+#define SXEVF_RSS_KEY_SIZE                (40)  
+#define SXEVF_MAX_RSS_KEY_ENTRIES	  (10)  
+#define SXEVF_MAX_RETA_ENTRIES            (128) 
+
+void sxevf_rxtx_reg_init(struct sxevf_hw *hw)
+{
+	int i;
+	u32 vfsrrctl;
+
+	vfsrrctl = 0x100 << SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT;
+	vfsrrctl |= 0x800 >> SXEVF_SRRCTL_BSIZEPKT_SHIFT;
+
+	SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, 0);
+
+	for (i = 0; i < 7; i++) {
+		SXEVF_REG_WRITE(hw, SXE_VFRDH(i), 0);
+		SXEVF_REG_WRITE(hw, SXE_VFRDT(i), 0);
+		SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(i), 0);
+		SXEVF_REG_WRITE(hw, SXE_VFSRRCTL(i), vfsrrctl);
+		SXEVF_REG_WRITE(hw, SXE_VFTDH(i), 0);
+		SXEVF_REG_WRITE(hw, SXE_VFTDT(i), 0);
+		SXEVF_REG_WRITE(hw, SXE_VFTXDCTL(i), 0);
+		SXEVF_REG_WRITE(hw, SXE_VFTDWBAH(i), 0);
+		SXEVF_REG_WRITE(hw, SXE_VFTDWBAL(i), 0);
+	}
+
+	SXEVF_WRITE_FLUSH(hw);
+
+	return;
+}
+
+u32 sxevf_irq_cause_get(struct sxevf_hw *hw)
+{
+	return SXEVF_REG_READ(hw, SXE_VFEICR);
+}
+
+void sxevf_tx_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
+				u64 desc_dma_addr, u8 reg_idx)
+{
+
+	SXEVF_REG_WRITE(hw, SXEVF_TDBAL(reg_idx), (desc_dma_addr & \
+						DMA_BIT_MASK(32)));
+	SXEVF_REG_WRITE(hw, SXEVF_TDBAH(reg_idx), (desc_dma_addr >> 32));
+	SXEVF_REG_WRITE(hw, SXEVF_TDLEN(reg_idx), desc_mem_len);
+	SXEVF_REG_WRITE(hw, SXEVF_TDH(reg_idx), 0);
+	SXEVF_REG_WRITE(hw, SXEVF_TDT(reg_idx), 0);
+
+	return;
+}
+
+void sxevf_rss_bit_num_set(struct sxevf_hw *hw, u32 value)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, value);
+
+	return;
+}
+
+void sxevf_hw_vlan_tag_strip_switch(struct sxevf_hw *hw,
+					u16 reg_index, bool is_enable)
+{
+	u32 vlnctrl;
+
+	vlnctrl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_index));
+
+	if (is_enable) {
+		vlnctrl |= SXEVF_RXDCTL_VME;
+	} else {
+		vlnctrl &= ~SXEVF_RXDCTL_VME;
+	}
+
+	SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_index), vlnctrl);
+
+	return;
+}
+
+void sxevf_tx_queue_thresh_set(struct sxevf_hw *hw, u8 reg_idx,
+				u32 prefech_thresh, u32 host_thresh, u32 wb_thresh)
+{
+	u32 txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx));
+
+	txdctl |= (prefech_thresh & SXEVF_TXDCTL_THRESH_MASK);
+	txdctl |= ((host_thresh & SXEVF_TXDCTL_THRESH_MASK) << SXEVF_TXDCTL_HTHRESH_SHIFT);
+	txdctl |= ((wb_thresh & SXEVF_TXDCTL_THRESH_MASK)<< SXEVF_TXDCTL_WTHRESH_SHIFT);
+
+	SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
+
+	return;
+}
+
+void sxevf_rx_desc_tail_set(struct sxevf_hw *hw, u8 reg_idx, u32 value)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFRDT(reg_idx), value);
+
+	return;
+}
+
+u32 sxevf_hw_rss_redir_tbl_get(struct sxevf_hw *hw, u16 reg_idx)
+{
+	return SXEVF_REG_READ(hw, SXE_VFRETA(reg_idx >> 2));
+}
+
+void sxevf_hw_rss_redir_tbl_set(struct sxevf_hw *hw,
+						u16 reg_idx, u32 value)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFRETA(reg_idx >> 2), value);
+	return;
+}
+
+u32 sxevf_hw_rss_key_get(struct sxevf_hw *hw, u8 reg_idx)
+{
+	u32 rss_key;
+
+	if (reg_idx >= SXEVF_MAX_RSS_KEY_ENTRIES) {
+		rss_key = 0;
+	} else {
+		rss_key = SXEVF_REG_READ(hw, SXE_VFRSSRK(reg_idx));
+	}
+
+	return rss_key;
+}
+
+u32 sxevf_hw_rss_field_get(struct sxevf_hw *hw)
+{
+	u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
+	return (mrqc & SXEVF_RSS_FIELD_MASK);
+}
+
+bool sxevf_hw_is_rss_enabled(struct sxevf_hw *hw)
+{
+	bool rss_enable = false;
+	u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
+	if (mrqc & SXEVF_MRQC_RSSEN) {
+		rss_enable = true;
+	}
+
+	return rss_enable;
+}
+
+void sxevf_hw_rss_key_set_all(struct sxevf_hw *hw, u32 *rss_key)
+{
+	u32 i;
+
+	for (i = 0; i < SXEVF_MAX_RSS_KEY_ENTRIES; i++) {
+		SXEVF_REG_WRITE(hw, SXE_VFRSSRK(i), rss_key[i]);
+	}
+
+	return;
+}
+
+void sxevf_hw_rss_cap_switch(struct sxevf_hw *hw, bool is_on)
+{
+	u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
+	if (is_on) {
+		mrqc |= SXEVF_MRQC_RSSEN;
+	} else {
+		mrqc &= ~SXEVF_MRQC_RSSEN;
+	}
+
+	SXEVF_REG_WRITE(hw, SXE_VFMRQC, mrqc);
+
+	return;
+}
+
+void sxevf_hw_rss_field_set(struct sxevf_hw *hw, u32 rss_field)
+{
+	u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
+
+	mrqc &= ~SXEVF_RSS_FIELD_MASK;
+	mrqc |= rss_field;
+	SXEVF_REG_WRITE(hw, SXE_VFMRQC, mrqc);
+
+	return;
+}
+
+u32 sxevf_hw_regs_group_read(struct sxevf_hw *hw,
+				const struct sxevf_reg_info *regs,
+				u32 *reg_buf)
+{
+	u32 j, i = 0;
+	int count = 0;
+
+	while (regs[i].count) {
+		for (j = 0; j < regs[i].count; j++) {
+			reg_buf[count + j] = SXEVF_REG_READ(hw,
+					regs[i].addr + j * regs[i].stride);
+			LOG_INFO("regs= %s, regs_addr=%x, regs_value=%04x\n",
+				regs[i].name , regs[i].addr, reg_buf[count + j]);
+		}
+
+		i++;
+		count += j;
+	}
+
+	return count;
+};
+
+#endif
diff --git a/drivers/net/sxe/base/sxevf_hw.h b/drivers/net/sxe/base/sxevf_hw.h
new file mode 100644
index 0000000000..67d711d5b8
--- /dev/null
+++ b/drivers/net/sxe/base/sxevf_hw.h
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_HW_H__
+#define __SXEVF_HW_H__
+
+#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST)
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/if_ether.h>
+#else
+#include "sxe_compat_platform.h"
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+#endif
+
+#include "sxevf_regs.h"
+
+#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST)
+#define SXE_PRIU64  "llu"
+#define SXE_PRIX64  "llx"
+#define SXE_PRID64  "lld"
+#else
+#define SXE_PRIU64  PRIu64
+#define SXE_PRIX64  PRIx64
+#define SXE_PRID64  PRId64
+#endif
+
+#define SXEVF_TXRX_RING_NUM_MAX           8  
+#define SXEVF_MAX_TXRX_DESC_POLL          (10)
+#define SXEVF_TX_DESC_PREFETCH_THRESH_32  (32)
+#define SXEVF_TX_DESC_HOST_THRESH_1       (1)
+#define SXEVF_TX_DESC_WRITEBACK_THRESH_8  (8)
+#define SXEVF_TXDCTL_HTHRESH_SHIFT        (8)
+#define SXEVF_TXDCTL_WTHRESH_SHIFT        (16)
+
+#define SXEVF_TXDCTL_THRESH_MASK          (0x7F)
+
+#define SXEVF_RX_RING_POLL_MAX           (10)
+
+#define SXEVF_MAC_HDR_LEN_MAX           (127)
+#define SXEVF_NETWORK_HDR_LEN_MAX       (511)
+
+#define SXEVF_LINK_SPEED_UNKNOWN        0
+#define SXEVF_LINK_SPEED_1GB_FULL	0x0020
+#define SXEVF_LINK_SPEED_10GB_FULL	0x0080
+#define SXEVF_LINK_SPEED_100_FULL	0x0008
+
+#define SXEVF_VFT_TBL_SIZE           (128)   
+#define SXEVF_HW_TXRX_RING_NUM_MAX   (128)   
+
+#define SXEVF_VLAN_TAG_SIZE          (4)
+
+#define SXEVF_HW_UC_ENTRY_NUM_MAX   128
+
+enum {
+	SXEVF_LINK_TO_PHY   = 0,
+	SXEVF_LINK_TO_DOWN,
+	SXEVF_LINK_TO_REINIT,
+};
+
+enum {
+	SXEVF_DIAG_TEST_PASSED			= 0,
+	SXEVF_DIAG_TEST_BLOCKED			= 1,
+	SXEVF_DIAG_REG_PATTERN_TEST_ERR		= 2,
+	SXEVF_DIAG_CHECK_REG_TEST_ERR           = 3,
+};
+
+struct sxevf_hw;
+
+struct sxevf_hw_stats {
+	u64 base_vfgprc;
+	u64 base_vfgptc;
+	u64 base_vfgorc;
+	u64 base_vfgotc;
+	u64 base_vfmprc;
+
+	u64 last_vfgprc;
+	u64 last_vfgptc;
+	u64 last_vfgorc;
+	u64 last_vfgotc;
+	u64 last_vfmprc;
+
+	u64 vfgprc;      
+	u64 vfgptc;      
+	u64 vfgorc;      
+	u64 vfgotc;      
+	u64 vfmprc;      
+
+	u64 saved_reset_vfgprc;
+	u64 saved_reset_vfgptc;
+	u64 saved_reset_vfgorc;
+	u64 saved_reset_vfgotc;
+	u64 saved_reset_vfmprc;
+};
+
+void sxevf_hw_ops_init(struct sxevf_hw *hw);
+
+
+struct sxevf_setup_operations {
+	void (*reset)(struct sxevf_hw *);
+	void (*hw_stop)(struct sxevf_hw *hw);
+	s32  (*regs_test)(struct sxevf_hw *hw);
+	u32  (*link_state_get)(struct sxevf_hw *hw);
+	u32  (*regs_dump)(struct sxevf_hw *hw, u32 *regs_buff, u32 buf_size);
+	bool (*reset_done)(struct sxevf_hw *);
+};
+
+struct sxevf_hw_setup {
+	const struct sxevf_setup_operations *ops;
+};
+
+struct sxevf_irq_operations {
+	void (*pending_irq_clear)(struct sxevf_hw *hw);
+	void (*ring_irq_interval_set)(struct sxevf_hw *hw, u16 irq_idx, u32 interval);
+	void (*event_irq_interval_set)(struct sxevf_hw * hw, u16 irq_idx, u32 value);
+	void (*ring_irq_map)(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx, u16 irq_idx);
+	void (*event_irq_map)(struct sxevf_hw *hw, u16 irq_idx);
+	void (*ring_irq_trigger)(struct sxevf_hw *hw, u64 eics);
+	void (*irq_enable)(struct sxevf_hw * hw, u32 mask);
+	void (*specific_irq_enable)(struct sxevf_hw * hw, u32 value);
+	void (*irq_disable)(struct sxevf_hw *hw);
+	void (*irq_off)(struct sxevf_hw *hw);
+};
+
+struct sxevf_irq_info {
+	const struct sxevf_irq_operations *ops;
+};
+
+struct sxevf_mbx_operations {
+
+	u32 (*mailbox_read)(struct sxevf_hw *hw);
+	void (*mailbox_write)(struct sxevf_hw *hw, u32 value);
+
+	void (*msg_write)(struct sxevf_hw *hw, u8 index, u32 msg);
+	u32 (*msg_read)(struct sxevf_hw *hw, u8 index);
+
+	void (*pf_req_irq_trigger)(struct sxevf_hw *hw);
+	void (*pf_ack_irq_trigger)(struct sxevf_hw *hw);
+};
+
+struct sxevf_mbx_stats {
+	u32 send_msgs; 
+	u32 rcv_msgs;  
+
+	u32 reqs;      
+	u32 acks;      
+	u32 rsts;      
+};
+
+struct sxevf_mbx_info {
+	const struct sxevf_mbx_operations *ops; 
+
+	struct sxevf_mbx_stats stats; 
+	u32 msg_len;  
+	u32 retry;    
+	u32 interval; 
+	u32 reg_value; 
+	u32 api_version; 
+};
+
+struct sxevf_dma_operations {
+	void (* tx_ring_desc_configure)(struct sxevf_hw *, u32, u64, u8);
+	void (* tx_writeback_off)(struct sxevf_hw *, u8);
+	void (* tx_desc_thresh_set)(struct sxevf_hw *, u8, u32, u32, u32);
+	void (* tx_ring_switch)(struct sxevf_hw *, u8, bool);
+	void (* tx_desc_wb_flush)(struct sxevf_hw *, u8);
+	void (* tx_ring_info_get)(struct sxevf_hw *hw, u8 reg_idx,
+					u32 *head, u32 *tail);
+	void (* rx_disable)(struct sxevf_hw *, u8);
+	void (* rx_ring_switch)(struct sxevf_hw *, u8, bool);
+	void (* rx_ring_desc_configure)(struct sxevf_hw *, u32, u64, u8);
+	void (* rx_rcv_ctl_configure)(struct sxevf_hw *hw, u8 reg_idx,
+				   u32 header_buf_len, u32 pkg_buf_len, bool drop_en);
+};
+
+struct sxevf_dma_info {
+	const struct sxevf_dma_operations *ops;
+};
+
+struct sxevf_stat_operations {
+	void (*packet_stats_get)(struct sxevf_hw *,
+			struct sxevf_hw_stats *);
+	void (*stats_init_value_get)(struct sxevf_hw *hw,
+				struct sxevf_hw_stats *stats);
+};
+
+struct sxevf_stat_info {
+	const struct sxevf_stat_operations *ops;
+};
+
+struct sxevf_dbu_operations {
+	void (*rx_max_used_ring_set)(struct sxevf_hw *, u16);
+
+};
+
+struct sxevf_dbu_info {
+	const struct sxevf_dbu_operations	*ops;
+};
+
+enum sxevf_hw_state {
+	SXEVF_HW_STOP,
+	SXEVF_HW_FAULT,
+};
+
+struct sxevf_hw {
+	u8 __iomem *reg_base_addr;      
+	void *adapter;
+
+	void *priv;
+	unsigned long state;   
+	void (*fault_handle)(void *priv);
+	u32 (*reg_read)(const volatile void *reg);
+	void (*reg_write)(u32 value, volatile void *reg);
+	s32	board_type;		
+
+	struct sxevf_hw_setup   setup;   
+	struct sxevf_irq_info   irq;     
+	struct sxevf_mbx_info   mbx;     
+
+	struct sxevf_dma_info    dma;    
+	struct sxevf_stat_info   stat;   
+	struct sxevf_dbu_info    dbu;
+};
+
+struct sxevf_reg_info {
+	u32 addr;        
+	u32 count;       
+	u32 stride;      
+	const s8 *name;  
+};
+
+u16 sxevf_reg_dump_num_get(void);
+
+void sxevf_hw_fault_handle(struct sxevf_hw *hw);
+
+static inline bool sxevf_is_hw_fault(struct sxevf_hw *hw)
+{
+	return test_bit(SXEVF_HW_FAULT, &hw->state);
+}
+
+static inline void sxevf_hw_fault_handle_init(struct sxevf_hw *hw,
+			void (*handle)(void *), void *priv)
+{
+	hw->priv = priv;
+	hw->fault_handle = handle;
+
+	return;
+}
+
+static inline void sxevf_hw_reg_handle_init(struct sxevf_hw *hw,
+		u32 (*read)(const volatile void *),
+		void (*write)(u32, volatile void *))
+{
+	hw->reg_read  = read;
+	hw->reg_write = write;
+
+	return;
+}
+
+#ifdef SXE_DPDK 
+
+void sxevf_irq_disable(struct sxevf_hw *hw);
+
+void sxevf_hw_stop(struct sxevf_hw *hw);
+
+void sxevf_hw_reset(struct sxevf_hw *hw);
+
+void sxevf_msg_write(struct sxevf_hw *hw, u8 index, u32 msg);
+
+u32 sxevf_msg_read(struct sxevf_hw *hw, u8 index);
+
+u32 sxevf_mailbox_read(struct sxevf_hw *hw);
+
+void sxevf_mailbox_write(struct sxevf_hw *hw, u32 value);
+
+void sxevf_pf_req_irq_trigger(struct sxevf_hw *hw);
+
+void sxevf_pf_ack_irq_trigger(struct sxevf_hw *hw);
+
+void sxevf_rxtx_reg_init(struct sxevf_hw *hw);
+
+void sxevf_irq_enable(struct sxevf_hw *hw, u32 mask);
+
+u32 sxevf_irq_cause_get(struct sxevf_hw *hw);
+
+void sxevf_event_irq_map(struct sxevf_hw *hw, u16 vector);
+
+void sxevf_hw_ring_irq_map(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx, u16 vector);
+
+void sxevf_ring_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx, u32 interval);
+
+void sxevf_tx_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
+				u64 desc_dma_addr, u8 reg_idx);
+
+void sxevf_rx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
+					u64 desc_dma_addr, u8 reg_idx);
+
+void sxevf_rx_rcv_ctl_configure(struct sxevf_hw *hw, u8 reg_idx,
+				   u32 header_buf_len, u32 pkg_buf_len,
+				   bool drop_en);
+
+void sxevf_rss_bit_num_set(struct sxevf_hw *hw, u32 value);
+
+void sxevf_hw_vlan_tag_strip_switch(struct sxevf_hw *hw,
+					u16 reg_index, bool is_enable);
+
+void sxevf_tx_queue_thresh_set(struct sxevf_hw *hw, u8 reg_idx,
+				u32 prefech_thresh, u32 host_thresh, u32 wb_thresh);
+
+void sxevf_tx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on);
+
+void sxevf_rx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on);
+
+void sxevf_rx_desc_tail_set(struct sxevf_hw *hw, u8 reg_idx, u32 value);
+
+void sxevf_specific_irq_enable(struct sxevf_hw *hw, u32 value);
+
+void sxevf_packet_stats_get(struct sxevf_hw *hw,
+				struct sxevf_hw_stats *stats);
+
+void sxevf_stats_init_value_get(struct sxevf_hw *hw,
+				struct sxevf_hw_stats *stats);
+
+u32 sxevf_hw_rss_redir_tbl_get(struct sxevf_hw *hw, u16 reg_idx);
+
+void sxevf_hw_rss_redir_tbl_set(struct sxevf_hw *hw,
+						u16 reg_idx, u32 value);
+
+u32 sxevf_hw_rss_key_get(struct sxevf_hw *hw, u8 reg_idx);
+
+u32 sxevf_hw_rss_field_get(struct sxevf_hw *hw);
+
+void sxevf_hw_rss_field_set(struct sxevf_hw *hw, u32 rss_field);
+
+void sxevf_hw_rss_cap_switch(struct sxevf_hw *hw, bool is_on);
+
+void sxevf_hw_rss_key_set_all(struct sxevf_hw *hw, u32 *rss_key);
+
+bool sxevf_hw_is_rss_enabled(struct sxevf_hw *hw);
+
+u32 sxevf_link_state_get(struct sxevf_hw *hw);
+
+u32 sxevf_hw_regs_group_read(struct sxevf_hw *hw,
+				const struct sxevf_reg_info *regs,
+				u32 *reg_buf);
+
+#endif 
+#endif
diff --git a/drivers/net/sxe/base/sxevf_regs.h b/drivers/net/sxe/base/sxevf_regs.h
new file mode 100644
index 0000000000..43486db526
--- /dev/null
+++ b/drivers/net/sxe/base/sxevf_regs.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_REGS_H__
+#define __SXEVF_REGS_H__
+
+#define SXEVF_REG_READ_FAIL  0xffffffffU
+#define SXEVF_REG_READ_RETRY 5
+
+#define SXE_VFLINKS_UP		0x00000008
+#define SXE_VFLINKS_SPEED	0x00000006
+#define SXE_VFLINKS_SPEED_10G	0x00000006
+#define SXE_VFLINKS_SPEED_1G	0x00000004
+#define SXE_VFLINKS_SPEED_100	0x00000002
+
+#define SXE_VFCTRL        0x00000
+#define SXE_VFSTATUS      0x00008
+#define SXE_VFLINKS       0x00018
+#define SXE_VFFRTIMER     0x00048
+#define SXE_VFRXMEMWRAP   0x03190
+#define SXE_VFEICR        0x00100
+#define SXE_VFEICS        0x00104
+#define SXE_VFEIMS        0x00108
+#define SXE_VFEIMC        0x0010C
+#define SXE_VFEIAM        0x00114
+#define SXE_VFEITR(x)     (0x00820 + (4 * (x)))
+#define SXE_VFIVAR(x)     (0x00120 + (4 * (x)))
+#define SXE_VFIVAR_MISC    0x00140
+#define SXE_VFRDBAL(x)    (0x01000 + (0x40 * (x)))
+#define SXE_VFRDBAH(x)    (0x01004 + (0x40 * (x)))
+#define SXE_VFRDLEN(x)    (0x01008 + (0x40 * (x)))
+#define SXE_VFRDH(x)      (0x01010 + (0x40 * (x)))
+#define SXE_VFRDT(x)      (0x01018 + (0x40 * (x)))
+#define SXE_VFRXDCTL(x)   (0x01028 + (0x40 * (x)))
+#define SXE_VFSRRCTL(x)   (0x01014 + (0x40 * (x)))
+#define SXE_VFLROCTL(x)   (0x0102C + (0x40 * (x)))
+#define SXE_VFPSRTYPE     0x00300
+#define SXE_VFTDBAL(x)    (0x02000 + (0x40 * (x)))
+#define SXE_VFTDBAH(x)    (0x02004 + (0x40 * (x)))
+#define SXE_VFTDLEN(x)    (0x02008 + (0x40 * (x)))
+#define SXE_VFTDH(x)      (0x02010 + (0x40 * (x)))
+#define SXE_VFTDT(x)      (0x02018 + (0x40 * (x)))
+#define SXE_VFTXDCTL(x)   (0x02028 + (0x40 * (x)))
+#define SXE_VFTDWBAL(x)   (0x02038 + (0x40 * (x)))
+#define SXE_VFTDWBAH(x)   (0x0203C + (0x40 * (x)))
+#define SXE_VFDCA_RXCTRL(x)    (0x0100C + (0x40 * (x)))
+#define SXE_VFDCA_TXCTRL(x)    (0x0200c + (0x40 * (x)))
+#define SXE_VFGPRC        0x0101C
+#define SXE_VFGPTC        0x0201C
+#define SXE_VFGORC_LSB    0x01020
+#define SXE_VFGORC_MSB    0x01024
+#define SXE_VFGOTC_LSB    0x02020
+#define SXE_VFGOTC_MSB    0x02024
+#define SXE_VFMPRC        0x01034
+#define SXE_VFMRQC        0x3000
+#define SXE_VFRSSRK(x)    (0x3100 + ((x) * 4))
+#define SXE_VFRETA(x)     (0x3200 + ((x) * 4))
+
+#define SXEVF_VFEIMC_IRQ_MASK            (7)
+#define SXEVF_IVAR_ALLOC_VALID    (0x80)
+
+#define SXEVF_EITR_CNT_WDIS       (0x80000000)
+#define SXEVF_EITR_ITR_MASK       (0x00000FF8)
+#define SXEVF_EITR_ITR_SHIFT      (2)
+#define SXEVF_EITR_ITR_MAX        (SXEVF_EITR_ITR_MASK >> SXEVF_EITR_ITR_SHIFT)
+
+#define SXE_VFRXDCTL_ENABLE  0x02000000
+#define SXE_VFTXDCTL_ENABLE  0x02000000
+#define SXE_VFCTRL_RST       0x04000000
+
+#define SXEVF_RXDCTL_ENABLE     0x02000000  
+#define SXEVF_RXDCTL_VME	0x40000000  
+
+#define SXEVF_PSRTYPE_RQPL_SHIFT               29 
+
+#define SXEVF_SRRCTL_DROP_EN                   0x10000000
+#define SXEVF_SRRCTL_DESCTYPE_DATA_ONEBUF      0x02000000
+#define SXEVF_SRRCTL_BSIZEPKT_SHIFT            (10)
+#define SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT        (2)
+#define SXEVF_SRRCTL_BSIZEPKT_MASK	       0x0000007F
+#define SXEVF_SRRCTL_BSIZEHDR_MASK	       0x00003F00
+
+#define SXE_VFMAILBOX       0x002FC
+#define SXE_VFMBMEM         0x00200
+
+#define SXE_VFMAILBOX_REQ     0x00000001 
+#define SXE_VFMAILBOX_ACK     0x00000002 
+#define SXE_VFMAILBOX_VFU     0x00000004 
+#define SXE_VFMAILBOX_PFU     0x00000008 
+#define SXE_VFMAILBOX_PFSTS   0x00000010 
+#define SXE_VFMAILBOX_PFACK   0x00000020 
+#define SXE_VFMAILBOX_RSTI    0x00000040 
+#define SXE_VFMAILBOX_RSTD    0x00000080 
+#define SXE_VFMAILBOX_RC_BIT  0x000000B0  
+
+#define SXEVF_TDBAL(_i)      (0x02000 + ((_i) * 0x40))
+#define SXEVF_TDBAH(_i)      (0x02004 + ((_i) * 0x40))
+#define SXEVF_TDLEN(_i)      (0x02008 + ((_i) * 0x40))
+#define SXEVF_TDH(_i)        (0x02010 + ((_i) * 0x40))
+#define SXEVF_TDT(_i)        (0x02018 + ((_i) * 0x40))
+#define SXEVF_TXDCTL(_i)     (0x02028 + ((_i) * 0x40))
+#define SXEVF_TDWBAL(_i)     (0x02038 + ((_i) * 0x40))
+#define SXEVF_TDWBAH(_i)     (0x0203C + ((_i) * 0x40))
+
+#define SXEVF_TXDCTL_SWFLSH  (0x02000000)  
+#define SXEVF_TXDCTL_ENABLE  (0x02000000) 
+
+#define SXEVF_VFGPRC          0x0101C
+#define SXEVF_VFGPTC          0x0201C
+#define SXEVF_VFGORC_LSB      0x01020
+#define SXEVF_VFGORC_MSB      0x01024
+#define SXEVF_VFGOTC_LSB      0x02020
+#define SXEVF_VFGOTC_MSB      0x02024
+#define SXEVF_VFMPRC          0x01034
+
+#define SXEVF_EICR_MASK       0x07
+
+#endif
diff --git a/drivers/net/sxe/include/drv_msg.h b/drivers/net/sxe/include/drv_msg.h
new file mode 100644
index 0000000000..9f06624cc3
--- /dev/null
+++ b/drivers/net/sxe/include/drv_msg.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __DRV_MSG_H__
+#define __DRV_MSG_H__
+
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+#define SXE_VERSION_LEN 32
+
+
+
+
+
+typedef struct sxe_version_resp {
+    U8 fw_version[SXE_VERSION_LEN];
+}sxe_version_resp_s;
+
+#endif 
diff --git a/drivers/net/sxe/include/readme.txt b/drivers/net/sxe/include/readme.txt
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/drivers/net/sxe/include/sxe/mgl/sxe_port.h b/drivers/net/sxe/include/sxe/mgl/sxe_port.h
new file mode 100644
index 0000000000..e41cb9e87b
--- /dev/null
+++ b/drivers/net/sxe/include/sxe/mgl/sxe_port.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_PORT_H__
+#define __SXE_PORT_H__
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "mgc_types.h"
+#include "ps3_types.h"
+
+typedef enum MglPortCmdSetCode{
+    MGL_CMD_PORT_SET_BASE      = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 0),
+    MGL_CMD_PORT_SET_REG       = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 1),
+    MGL_CMD_PORT_SET_LED       = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 2),
+    MGL_CMD_SXE_SOC_HTHRESHOLD = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 3),
+    MGL_CMD_SXE_SFP_HTHRESHOLD = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 4),
+    MGL_CMD_SXE_SOC_RST        = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 5),
+    MGL_CMD_SXE_SET_MFGINFO    = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 6),
+    MGL_CMD_SXE_SET_INSIGHT    = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 7),
+    MGL_CMD_SXE_OPT_INSIGHT    = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 8),
+} MglPortCmdSetCode_e;
+
+typedef enum MglPortCmdGetCode{
+    MGL_CMD_SXE_GET_REG        = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 0),
+    MGL_CMD_SXE_GET_SOC_INFO   = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 1),
+    MGL_CMD_SXE_LOG_EXPORT     = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 2),
+    MGL_CMD_SXE_REGS_DUMP      = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 3),
+    MGL_CMD_SXE_GET_MFGINFO    = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 4),
+    MGL_CMD_SXE_MAC_ADDR_GET   = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 5),
+    MGL_CMD_SXE_GET_INSIGHT    = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 6),
+} MglPortCmdGetCode_e;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/drivers/net/sxe/include/sxe/sxe_cli.h b/drivers/net/sxe/include/sxe/sxe_cli.h
new file mode 100644
index 0000000000..206cc48542
--- /dev/null
+++ b/drivers/net/sxe/include/sxe/sxe_cli.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_CLI_H__
+#define __SXE_CLI_H__
+
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+#define SXE_VERION_LEN                  (32)
+#define SXE_MAC_NUM                     (128)
+#define SXE_PORT_TRANSCEIVER_LEN        (32)
+#define SXE_PORT_VENDOR_LEN             (32)
+#define SXE_CHIP_TYPE_LEN               (32)
+#define SXE_VPD_SN_LEN                  (16)
+#define SXE_SOC_RST_TIME                (0x93A80)  
+#define SXE_SFP_TEMP_THRESHOLD_INTERVAL (3)        
+#define MGC_TERMLOG_INFO_MAX_LEN        (12 * 1024)
+#define SXE_REGS_DUMP_MAX_LEN           (12 * 1024)
+#define SXE_PRODUCT_NAME_LEN        (32)       
+
+typedef enum sxe_led_mode {
+    SXE_IDENTIFY_LED_BLINK_ON   = 0,    
+    SXE_IDENTIFY_LED_BLINK_OFF,         
+    SXE_IDENTIFY_LED_ON,                
+    SXE_IDENTIFY_LED_OFF,               
+    SXE_IDENTIFY_LED_RESET,             
+} sxe_led_mode_s;
+
+typedef struct sxe_led_ctrl {
+    U32    mode;      
+    U32    duration;  
+
+} sxe_led_ctrl_s;
+
+typedef struct sxe_led_ctrl_resp {
+    U32    ack;       
+} sxe_led_ctrl_resp_s;
+
+typedef enum PortLinkSpeed {
+    PORT_LINK_NO            = 0,     
+    PORT_LINK_100M          = 1,     
+    PORT_LINK_1G            = 2,     
+    PORT_LINK_10G           = 3,     
+} PortLinkSpeed_e;
+
+typedef struct SysSocInfo {
+    S8     fwVer[SXE_VERION_LEN];        
+    S8     optVer[SXE_VERION_LEN];       
+    U8     socStatus;                    
+    U8     pad[3];
+    S32    socTemp;                      
+    U64    chipId;                       
+    S8     chipType[SXE_CHIP_TYPE_LEN];  
+    S8     pba[SXE_VPD_SN_LEN];          
+    S8     productName[SXE_PRODUCT_NAME_LEN];   
+} SysSocInfo_s;
+
+typedef struct SysPortInfo {
+    U64    mac[SXE_MAC_NUM];         
+    U8     isPortAbs;                
+    U8     linkStat;                 
+    U8     linkSpeed;                
+
+
+    U8     isSfp:1;                                     
+    U8     isGetInfo:1;                                 
+    U8     rvd:6;                                       
+    S8     opticalModTemp;                              
+    U8     pad[3];
+    S8     transceiverType[SXE_PORT_TRANSCEIVER_LEN];   
+    S8     vendorName[SXE_PORT_VENDOR_LEN];             
+    S8     vendorPn[SXE_PORT_VENDOR_LEN];               
+} SysPortInfo_s;
+
+typedef struct SysInfoResp {
+    SysSocInfo_s     socInfo;        
+    SysPortInfo_s    portInfo;       
+} SysInfoResp_s;
+
+typedef enum SfpTempTdMode {
+    SFP_TEMP_THRESHOLD_MODE_ALARM   = 0,
+    SFP_TEMP_THRESHOLD_MODE_WARN,
+} SfpTempTdMode_e;
+
+typedef struct SfpTempTdSet{
+    U8     mode;             
+    U8     pad[3];
+    S8     hthreshold;       
+    S8     lthreshold;       
+} SfpTempTdSet_s;
+
+typedef struct SxeLogExportResp {
+    U16    curLogLen;       
+    U8     isEnd;
+    U8     pad;
+    S32    sessionId;       
+    S8     data[0];
+} SxeLogExportResp_s;
+
+typedef enum SxeLogExportType  {
+    SXE_LOG_EXPORT_REQ    = 0,     
+    SXE_LOG_EXPORT_FIN,            
+    SXE_LOG_EXPORT_ABORT,          
+} SxeLogExportType_e;
+
+typedef struct SxeLogExportReq {
+    U8     isALLlog;       
+    U8     cmdtype;        
+    U8     isBegin;        
+    U8     pad;
+    S32    sessionId;      
+    U32    logLen;         
+} SxeLogExportReq_s;
+
+typedef struct SocRstReq {
+    U32    time;        
+} SocRstReq_s;
+
+typedef struct RegsDumpResp {
+    U32    curdwLen;    
+    U8     data[0];
+} RegsDumpResp_s;
+
+enum {
+    SXE_MFG_PART_NUMBER_LEN   = 8,
+    SXE_MFG_SERIAL_NUMBER_LEN = 16,
+    SXE_MFG_REVISION_LEN      = 4,
+    SXE_MFG_OEM_STR_LEN       = 64,
+    SXE_MFG_SXE_BOARD_ASSEMBLY_LEN  = 32,
+    SXE_MFG_SXE_BOARD_TRACE_NUM_LEN = 16,
+    SXE_MFG_SXE_MAC_ADDR_CNT        = 2,
+};
+
+typedef struct sxeMfgInfo {
+    U8 partNumber[SXE_MFG_PART_NUMBER_LEN];      
+    U8 serialNumber [SXE_MFG_SERIAL_NUMBER_LEN]; 
+    U32 mfgDate;                               
+    U8 revision[SXE_MFG_REVISION_LEN];         
+    U32 reworkDate;                            
+    U8 pad[4];
+    U64 macAddr[SXE_MFG_SXE_MAC_ADDR_CNT];             
+    U8 boardTraceNum[SXE_MFG_SXE_BOARD_TRACE_NUM_LEN]; 
+    U8 boardAssembly[SXE_MFG_SXE_BOARD_ASSEMBLY_LEN];  
+    U8 extra1[SXE_MFG_OEM_STR_LEN];                    
+    U8 extra2[SXE_MFG_OEM_STR_LEN];                    
+} sxeMfgInfo_t;
+
+typedef struct RegsDumpReq {
+    U32    baseAddr;    
+    U32    dwLen;       
+} RegsDumpReq_s;
+
+typedef enum sxe_pcs_mode {
+    SXE_PCS_MODE_1000BASE_KX_WO = 0, 
+    SXE_PCS_MODE_1000BASE_KX_W,      
+    SXE_PCS_MODE_SGMII,              
+    SXE_PCS_MODE_10GBASE_KR_WO,      
+    SXE_PCS_MODE_AUTO_NEGT_73,       
+    SXE_PCS_MODE_LPBK_PHY_TX2RX,     
+    SXE_PCS_MODE_LPBK_PHY_RX2TX,     
+    SXE_PCS_MODE_LPBK_PCS_RX2TX,     
+    SXE_PCS_MODE_BUTT,               
+} sxe_pcs_mode_e;
+
+typedef enum sxe_remote_fault_mode {
+	SXE_REMOTE_FALUT_NO_ERROR		= 0,
+	SXE_REMOTE_FALUT_OFFLINE,
+	SXE_REMOTE_FALUT_LINK_FAILURE,
+	SXE_REMOTE_FALUT_AUTO_NEGOTIATION,
+	SXE_REMOTE_UNKNOWN,
+} sxe_remote_fault_e;
+
+typedef struct sxe_phy_cfg {
+    sxe_pcs_mode_e mode;          
+    U32 mtu;
+} sxe_pcs_cfg_s;
+
+typedef enum sxe_an_speed {
+    SXE_AN_SPEED_NO_LINK = 0,
+    SXE_AN_SPEED_100M,
+    SXE_AN_SPEED_1G,      
+    SXE_AN_SPEED_10G,     
+    SXE_AN_SPEED_UNKNOWN,
+} sxe_an_speed_e;
+
+typedef enum sxe_phy_pause_cap {
+    SXE_PAUSE_CAP_NO_PAUSE    = 0,   
+    SXE_PAUSE_CAP_ASYMMETRIC_PAUSE,  
+    SXE_PAUSE_CAP_SYMMETRIC_PAUSE,   
+    SXE_PAUSE_CAP_BOTH_PAUSE,        
+    SXE_PAUSE_CAP_UNKNOWN,
+} sxe_phy_pause_cap_e;
+
+typedef enum sxe_phy_duplex_type {
+    SXE_FULL_DUPLEX	= 0,	  
+    SXE_HALF_DUPLEX	= 1,	  
+    SXE_UNKNOWN_DUPLEX,
+} sxe_phy_duplex_type_e;
+
+typedef struct sxe_phy_an_cap {
+    sxe_remote_fault_e   remote_fault; 
+    sxe_phy_pause_cap_e  pause_cap;    
+    sxe_phy_duplex_type_e duplex_cap;  
+} sxe_phy_an_cap_s;
+
+typedef struct sxe_an_cap {
+    sxe_phy_an_cap_s local;     
+    sxe_phy_an_cap_s peer;      
+} sxe_an_cap_s;
+#endif
diff --git a/drivers/net/sxe/include/sxe/sxe_hdc.h b/drivers/net/sxe/include/sxe/sxe_hdc.h
new file mode 100644
index 0000000000..bbdc273bf9
--- /dev/null
+++ b/drivers/net/sxe/include/sxe/sxe_hdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_HDC_H__
+#define __SXE_HDC_H__
+
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+#define HDC_CACHE_TOTAL_LEN     (16 *1024)    
+#define ONE_PACKET_LEN_MAX      (1024)        
+#define DWORD_NUM               (256)         
+#define HDC_TRANS_RETRY_COUNT   (3)           
+
+
+typedef enum SxeHdcErrnoCode {
+    PKG_OK            = 0,     
+    PKG_ERR_REQ_LEN,           
+    PKG_ERR_RESP_LEN,          
+    PKG_ERR_PKG_SKIP,          
+    PKG_ERR_NODATA,            
+    PKG_ERR_PF_LK,             
+    PKG_ERR_OTHER,
+} SxeHdcErrnoCode_e;
+
+typedef union HdcHeader {
+    struct {
+        U8 pid:4;          
+        U8 errCode:4;      
+        U8 len;            
+        U16 startPkg:1;    
+        U16 endPkg:1;      
+        U16 isRd:1;        
+        U16 msi:1;         
+        U16 totalLen:12;   
+    } head;
+    U32 dw0;
+} HdcHeader_u;
+
+#endif 
+
diff --git a/drivers/net/sxe/include/sxe/sxe_ioctl.h b/drivers/net/sxe/include/sxe/sxe_ioctl.h
new file mode 100644
index 0000000000..4f39b0f92c
--- /dev/null
+++ b/drivers/net/sxe/include/sxe/sxe_ioctl.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef _SXE_IOCTL_H_
+#define _SXE_IOCTL_H_
+
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+struct SxeIoctlSyncCmd {
+    U64   traceid;
+    void *inData;
+    U32   inLen;
+    void *outData;
+    U32   outLen;
+};
+
+#define SXE_CMD_IOCTL_SYNC_CMD _IOWR('M', 1, struct SxeIoctlSyncCmd)
+
+#endif
diff --git a/drivers/net/sxe/include/sxe/sxe_msg.h b/drivers/net/sxe/include/sxe/sxe_msg.h
new file mode 100644
index 0000000000..3db4e60ce5
--- /dev/null
+++ b/drivers/net/sxe/include/sxe/sxe_msg.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_MSG_H__
+#define __SXE_MSG_H__
+
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+#define SXE_MAC_ADDR_LEN 6
+
+#define SXE_HDC_CMD_HDR_SIZE  sizeof(struct sxe_hdc_cmd_hdr)
+#define SXE_HDC_MSG_HDR_SIZE  sizeof(struct sxe_hdc_drv_cmd_msg)
+
+enum sxe_cmd_type {
+    SXE_CMD_TYPE_CLI,
+    SXE_CMD_TYPE_DRV,
+    SXE_CMD_TYPE_UNKOWN,
+};
+
+typedef struct sxe_hdc_cmd_hdr {
+    U8 cmd_type;       
+    U8 cmd_sub_type;   
+    U8 reserve[6];
+}sxe_hdc_cmd_hdr_s;
+
+
+
+typedef enum SxeFWState {
+    SXE_FW_START_STATE_UNDEFINED    = 0x00,   
+    SXE_FW_START_STATE_INIT_BASE    = 0x10,   
+    SXE_FW_START_STATE_SCAN_DEVICE  = 0x20,   
+    SXE_FW_START_STATE_FINISHED     = 0x30,   
+    SXE_FW_START_STATE_UPGRADE      = 0x31,   
+    SXE_FW_RUNNING_STATE_ABNOMAL    = 0x40,   
+    SXE_FW_START_STATE_MASK         = 0xF0,
+}SxeFWState_e;
+
+typedef struct SxeFWStateInfo {
+    U8 socStatus;          
+    char statBuff[32];       
+} SxeFWStateInfo_s;
+
+
+typedef enum MsiEvt {
+    MSI_EVT_SOC_STATUS          = 0x1,
+    MSI_EVT_HDC_FWOV            = 0x2,
+    MSI_EVT_HDC_TIME_SYNC       = 0x4,
+
+    MSI_EVT_MAX                 = 0x80000000,
+} MsiEvt_u;
+
+
+typedef enum SxeFwHdcState {
+    SXE_FW_HDC_TRANSACTION_IDLE = 0x01,
+    SXE_FW_HDC_TRANSACTION_BUSY,
+
+    SXE_FW_HDC_TRANSACTION_ERR,
+} SxeFwHdcState_e;
+
+enum sxe_hdc_cmd_opcode {
+    SXE_CMD_SET_WOL         = 1,
+    SXE_CMD_LED_CTRL,
+    SXE_CMD_SFP_READ,
+    SXE_CMD_SFP_WRITE,
+    SXE_CMD_TX_DIS_CTRL     = 5,
+    SXE_CMD_TINE_SYNC,
+    SXE_CMD_RATE_SELECT,
+    SXE_CMD_R0_MAC_GET,
+    SXE_CMD_LOG_EXPORT,
+    SXE_CMD_FW_VER_GET  = 10,
+    SXE_CMD_PCS_SDS_INIT,         
+    SXE_CMD_AN_SPEED_GET,         
+    SXE_CMD_AN_CAP_GET,           
+    SXE_CMD_GET_SOC_INFO,         
+    SXE_CMD_MNG_RST = 15,         
+
+    SXE_CMD_MAX,
+};
+
+enum sxe_hdc_cmd_errcode {
+    SXE_ERR_INVALID_PARAM = 1,
+};
+
+typedef struct sxe_hdc_drv_cmd_msg {
+
+    U16 opcode;
+    U16 errcode;
+    union dataLength {
+        U16 req_len;
+        U16 ack_len;
+    } length;
+    U8 reserve[8];
+    U64 traceid;
+    U8 body[0];
+} sxe_hdc_drv_cmd_msg_s;
+
+
+typedef struct sxe_sfp_rw_req {
+    U16 offset;       
+    U16 len;          
+    U8  write_data[0];
+} sxe_sfp_rw_req_s;
+
+
+typedef struct sxe_sfp_read_resp {
+    U16 len;     
+    U8  resp[0]; 
+} sxe_sfp_read_resp_s;
+
+typedef enum sxe_sfp_rate{
+    SXE_SFP_RATE_1G     = 0,
+    SXE_SFP_RATE_10G    = 1,
+} sxe_sfp_rate_e;
+
+
+typedef struct sxe_sfp_rate_able {
+    sxe_sfp_rate_e rate;       
+} sxe_sfp_rate_able_s;
+
+
+typedef struct sxe_spp_tx_able {
+    BOOL isDisable;       
+} sxe_spp_tx_able_s;
+
+
+typedef struct sxe_default_mac_addr_resp {
+    U8  addr[SXE_MAC_ADDR_LEN]; 
+} sxe_default_mac_addr_resp_s;
+
+
+typedef struct sxe_mng_rst {
+    BOOL enable;       
+} sxe_mng_rst_s;
+
+#endif 
+
diff --git a/drivers/net/sxe/include/sxe/sxe_regs.h b/drivers/net/sxe/include/sxe/sxe_regs.h
new file mode 100644
index 0000000000..0652cd4906
--- /dev/null
+++ b/drivers/net/sxe/include/sxe/sxe_regs.h
@@ -0,0 +1,1276 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+ 
+#ifndef __SXE_REGS_H__
+#define __SXE_REGS_H__
+
+#define SXE_LINKSEC_MAX_SC_COUNT 1
+#define SXE_LINKSEC_MAX_SA_COUNT 2
+
+#define SXE_FLAGS_DOUBLE_RESET_REQUIRED	0x01
+
+
+#define SXE_REG_READ_FAIL	0xffffffffU
+#define SXE_REG_READ_RETRY	5
+#ifdef SXE_TEST
+#define SXE_PCI_MASTER_DISABLE_TIMEOUT	(1)
+#else
+#define SXE_PCI_MASTER_DISABLE_TIMEOUT	(800)
+#endif
+
+
+#define SXE_CTRL	0x00000 
+#define SXE_STATUS	0x00008 
+#define SXE_CTRL_EXT	0x00018 
+
+
+#define SXE_CTRL_LNK_RST	0x00000008
+#define SXE_CTRL_RST		0x04000000
+
+#ifdef SXE_TEST
+#define SXE_CTRL_RST_MASK	(0)
+#define SXE_CTRL_GIO_DIS	(0)
+#else
+#define SXE_CTRL_RST_MASK	(SXE_CTRL_LNK_RST | SXE_CTRL_RST)
+#define SXE_CTRL_GIO_DIS	0x00000004
+#endif
+
+
+#define SXE_STATUS_GIO		0x00080000
+
+
+#define SXE_CTRL_EXT_PFRSTD	0x00004000
+#define SXE_CTRL_EXT_NS_DIS	0x00010000
+#define SXE_CTRL_EXT_DRV_LOAD	0x10000000
+
+
+#define SXE_FCRTL(_i)	(0x03220 + ((_i) * 4))
+#define SXE_FCRTH(_i)	(0x03260 + ((_i) * 4))
+#define SXE_FCCFG	0x03D00
+
+
+#define SXE_FCRTL_XONE		0x80000000
+#define SXE_FCRTH_FCEN		0x80000000
+
+#define SXE_FCCFG_TFCE_802_3X	0x00000008
+#define SXE_FCCFG_TFCE_PRIORITY	0x00000010
+
+
+#define SXE_GCR_EXT           0x11050 
+
+
+#define SXE_GCR_CMPL_TMOUT_MASK		0x0000F000
+#define SXE_GCR_CMPL_TMOUT_10ms		0x00001000
+#define SXE_GCR_CMPL_TMOUT_RESEND	0x00010000
+#define SXE_GCR_CAP_VER2		0x00040000
+#define SXE_GCR_EXT_MSIX_EN		0x80000000
+#define SXE_GCR_EXT_BUFFERS_CLEAR	0x40000000
+#define SXE_GCR_EXT_VT_MODE_16		0x00000001
+#define SXE_GCR_EXT_VT_MODE_32		0x00000002
+#define SXE_GCR_EXT_VT_MODE_64		0x00000003
+#define SXE_GCR_EXT_VT_MODE_MASK	0x00000003
+#define SXE_GCR_EXT_SRIOV		(SXE_GCR_EXT_MSIX_EN | \
+					SXE_GCR_EXT_VT_MODE_64)
+
+#define SXE_PCI_DEVICE_STATUS		0x7A
+#define SXE_PCI_DEVICE_STATUS_TRANSACTION_PENDING   0x0020
+#define SXE_PCI_LINK_STATUS		0x82
+#define SXE_PCI_DEVICE_CONTROL2		0x98
+#define SXE_PCI_LINK_WIDTH		0x3F0
+#define SXE_PCI_LINK_WIDTH_1		0x10
+#define SXE_PCI_LINK_WIDTH_2		0x20
+#define SXE_PCI_LINK_WIDTH_4		0x40
+#define SXE_PCI_LINK_WIDTH_8		0x80
+#define SXE_PCI_LINK_SPEED		0xF
+#define SXE_PCI_LINK_SPEED_2500		0x1
+#define SXE_PCI_LINK_SPEED_5000		0x2
+#define SXE_PCI_LINK_SPEED_8000		0x3
+#define SXE_PCI_HEADER_TYPE_REGISTER	0x0E
+#define SXE_PCI_HEADER_TYPE_MULTIFUNC	0x80
+#define SXE_PCI_DEVICE_CONTROL2_16ms	0x0005
+
+#define SXE_PCIDEVCTRL2_TIMEO_MASK	0xf
+#define SXE_PCIDEVCTRL2_16_32ms_def	0x0
+#define SXE_PCIDEVCTRL2_50_100us	0x1
+#define SXE_PCIDEVCTRL2_1_2ms		0x2
+#define SXE_PCIDEVCTRL2_16_32ms		0x5
+#define SXE_PCIDEVCTRL2_65_130ms	0x6
+#define SXE_PCIDEVCTRL2_260_520ms	0x9
+#define SXE_PCIDEVCTRL2_1_2s		0xa
+#define SXE_PCIDEVCTRL2_4_8s		0xd
+#define SXE_PCIDEVCTRL2_17_34s		0xe
+
+
+#define SXE_EICR	0x00800
+#define SXE_EICS	0x00808
+#define SXE_EIMS	0x00880
+#define SXE_EIMC	0x00888
+#define SXE_EIAC	0x00810
+#define SXE_EIAM	0x00890
+#define SXE_EITRSEL	0x00894
+#define SXE_GPIE	0x00898
+#define SXE_IVAR(i)	(0x00900 + (i) * 4)
+#define SXE_IVAR_MISC	0x00A00
+#define SXE_EICS_EX(i)	(0x00A90 + (i) * 4)
+#define SXE_EIMS_EX(i)	(0x00AA0 + (i) * 4)
+#define SXE_EIMC_EX(i)	(0x00AB0 + (i) * 4)
+#define SXE_EIAM_EX(i)	(0x00AD0 + (i) * 4)
+#define SXE_EITR(i)	(((i) <= 23) ? (0x00820 + ((i) * 4)) : \
+                        	(0x012300 + (((i) - 24) * 4)))
+
+#define SXE_SPP_PROC	0x00AD8
+#define SXE_SPP_STATE	0x00AF4
+
+
+
+#define SXE_EICR_RTX_QUEUE	0x0000FFFF
+#define SXE_EICR_FLOW_NAV	0x00010000
+#define SXE_EICR_MAILBOX	0x00080000
+#define SXE_EICR_LSC		0x00100000
+#define SXE_EICR_LINKSEC	0x00200000
+#define SXE_EICR_ECC		0x10000000
+#define SXE_EICR_HDC		0x20000000
+#define SXE_EICR_TCP_TIMER	0x40000000
+#define SXE_EICR_OTHER		0x80000000
+
+
+#define SXE_EICS_RTX_QUEUE	SXE_EICR_RTX_QUEUE
+#define SXE_EICS_FLOW_NAV	SXE_EICR_FLOW_NAV 
+#define SXE_EICS_MAILBOX	SXE_EICR_MAILBOX  
+#define SXE_EICS_LSC		SXE_EICR_LSC      
+#define SXE_EICS_ECC		SXE_EICR_ECC      
+#define SXE_EICS_HDC		SXE_EICR_HDC      
+#define SXE_EICS_TCP_TIMER	SXE_EICR_TCP_TIMER
+#define SXE_EICS_OTHER		SXE_EICR_OTHER    
+
+
+#define SXE_EIMS_RTX_QUEUE	SXE_EICR_RTX_QUEUE
+#define SXE_EIMS_FLOW_NAV	SXE_EICR_FLOW_NAV
+#define SXE_EIMS_MAILBOX	SXE_EICR_MAILBOX
+#define SXE_EIMS_LSC		SXE_EICR_LSC
+#define SXE_EIMS_ECC		SXE_EICR_ECC
+#define SXE_EIMS_HDC		SXE_EICR_HDC
+#define SXE_EIMS_TCP_TIMER	SXE_EICR_TCP_TIMER
+#define SXE_EIMS_OTHER		SXE_EICR_OTHER
+#define SXE_EIMS_ENABLE_MASK	(SXE_EIMS_RTX_QUEUE | SXE_EIMS_LSC | \
+					SXE_EIMS_TCP_TIMER | SXE_EIMS_OTHER)
+
+#define SXE_EIMC_FLOW_NAV	SXE_EICR_FLOW_NAV 
+#define SXE_EIMC_LSC		SXE_EICR_LSC      
+#define SXE_EIMC_HDC		SXE_EICR_HDC      
+
+
+#define SXE_GPIE_SPP0_EN	0x00000001
+#define SXE_GPIE_SPP1_EN	0x00000002
+#define SXE_GPIE_SPP2_EN	0x00000004
+#define SXE_GPIE_MSIX_MODE	0x00000010
+#define SXE_GPIE_OCD		0x00000020
+#define SXE_GPIE_EIMEN		0x00000040
+#define SXE_GPIE_EIAME		0x40000000
+#define SXE_GPIE_PBA_SUPPORT	0x80000000
+#define SXE_GPIE_VTMODE_MASK	0x0000C000
+#define SXE_GPIE_VTMODE_16	0x00004000
+#define SXE_GPIE_VTMODE_32	0x00008000
+#define SXE_GPIE_VTMODE_64	0x0000C000
+
+
+#define SXE_IVAR_ALLOC_VALID	0x80
+
+
+#define SXE_EITR_CNT_WDIS	0x80000000
+#define SXE_EITR_ITR_MASK	0x00000FF8
+#define SXE_EITR_ITR_SHIFT	2
+#define SXE_EITR_ITR_MAX	(SXE_EITR_ITR_MASK >> SXE_EITR_ITR_SHIFT)
+
+
+#define SXE_EICR_GPI_SPP0	0x01000000
+#define SXE_EICR_GPI_SPP1	0x02000000
+#define SXE_EICR_GPI_SPP2	0x04000000
+#define SXE_EIMS_GPI_SPP0	SXE_EICR_GPI_SPP0
+#define SXE_EIMS_GPI_SPP1	SXE_EICR_GPI_SPP1
+#define SXE_EIMS_GPI_SPP2	SXE_EICR_GPI_SPP2
+
+
+#define SXE_SPP_PROC_SPP2_TRIGGER	0x00300000
+#define SXE_SPP_PROC_SPP2_TRIGGER_MASK	0xFFCFFFFF
+#define SXE_SPP_PROC_DELAY_US_MASK	0x0000FFFF
+#define SXE_SPP_PROC_DELAY_US		0x00000007
+
+
+#define SXE_IRQ_CLEAR_MASK	0xFFFFFFFF
+
+
+#define SXE_RXCSUM		0x05000
+#define SXE_RFCTL		0x05008
+#define SXE_FCTRL		0x05080
+#define SXE_EXVET               0x05078
+#define SXE_VLNCTRL		0x05088
+#define SXE_MCSTCTRL		0x05090
+#define SXE_ETQF(_i)		(0x05128 + ((_i) * 4))
+#define SXE_ETQS(_i)		(0x0EC00 + ((_i) * 4))
+#define SXE_SYNQF		0x0EC30
+#define SXE_MTA(_i)		(0x05200 + ((_i) * 4))
+#define SXE_UTA(_i)		(0x0F400 + ((_i) * 4))
+#define SXE_VFTA(_i)		(0x0A000 + ((_i) * 4))
+#define SXE_RAL(_i)		(0x0A200 + ((_i) * 8))
+#define SXE_RAH(_i)		(0x0A204 + ((_i) * 8))
+#define SXE_MPSAR_LOW(_i)	(0x0A600 + ((_i) * 8))
+#define SXE_MPSAR_HIGH(_i)	(0x0A604 + ((_i) * 8))
+#define SXE_PSRTYPE(_i)		(0x0EA00 + ((_i) * 4))
+#define SXE_RETA(_i)		(0x0EB00 + ((_i) * 4)) 
+#define SXE_RSSRK(_i)		(0x0EB80 + ((_i) * 4)) 
+#define SXE_RQTC		0x0EC70
+#define SXE_MRQC		0x0EC80
+#define SXE_IEOI		0x0F654
+#define SXE_PL			0x0F658
+#define SXE_LPL			0x0F65C
+
+
+#define SXE_ETQF_CNT			8
+#define SXE_MTA_CNT				128
+#define SXE_UTA_CNT				128
+#define SXE_VFTA_CNT			128
+#define SXE_RAR_CNT				128
+#define SXE_MPSAR_CNT			128
+
+
+#define SXE_EXVET_DEFAULT		0x81000000
+#define SXE_VLNCTRL_DEFAULT		0x8100
+#define SXE_IEOI_DEFAULT		0x060005DC
+#define SXE_PL_DEFAULT			0x3e000016
+#define SXE_LPL_DEFAULT			0x26000000
+
+
+#define SXE_RXCSUM_IPPCSE	0x00001000  
+#define SXE_RXCSUM_PCSD		0x00002000  
+
+
+#define SXE_RFCTL_LRO_DIS	0x00000020
+#define SXE_RFCTL_NFSW_DIS	0x00000040
+#define SXE_RFCTL_NFSR_DIS	0x00000080
+
+
+#define SXE_FCTRL_SBP		0x00000002
+#define SXE_FCTRL_MPE		0x00000100
+#define SXE_FCTRL_UPE		0x00000200
+#define SXE_FCTRL_BAM		0x00000400
+#define SXE_FCTRL_PMCF		0x00001000
+#define SXE_FCTRL_DPF		0x00002000
+
+
+#define SXE_VLNCTRL_VET		0x0000FFFF 
+#define SXE_VLNCTRL_CFI		0x10000000 
+#define SXE_VLNCTRL_CFIEN	0x20000000 
+#define SXE_VLNCTRL_VFE		0x40000000 
+#define SXE_VLNCTRL_VME		0x80000000 
+
+#define SXE_EXVET_VET_EXT_SHIFT              16
+#define SXE_EXTENDED_VLAN	             (1 << 26)
+
+
+#define SXE_MCSTCTRL_MFE	4
+
+#define SXE_ETQF_FILTER_EAPOL	0
+#define SXE_ETQF_FILTER_1588	3
+#define SXE_ETQF_FILTER_FIP	4
+#define SXE_ETQF_FILTER_LLDP	5
+#define SXE_ETQF_FILTER_LACP	6
+#define SXE_ETQF_FILTER_FC	7
+#define SXE_MAX_ETQF_FILTERS	8
+#define SXE_ETQF_1588		0x40000000
+#define SXE_ETQF_FILTER_EN	0x80000000
+#define SXE_ETQF_POOL_ENABLE	BIT(26)
+#define SXE_ETQF_POOL_SHIFT	20
+
+
+#define SXE_ETQS_RX_QUEUE	0x007F0000
+#define SXE_ETQS_RX_QUEUE_SHIFT	16
+#define SXE_ETQS_LLI		0x20000000
+#define SXE_ETQS_QUEUE_EN	0x80000000
+
+
+#define SXE_SYN_FILTER_ENABLE         0x00000001
+#define SXE_SYN_FILTER_QUEUE          0x000000FE
+#define SXE_SYN_FILTER_QUEUE_SHIFT    1
+#define SXE_SYN_FILTER_SYNQFP         0x80000000
+
+
+#define SXE_RAH_VIND_MASK	0x003C0000
+#define SXE_RAH_VIND_SHIFT	18
+#define SXE_RAH_AV		0x80000000
+#define SXE_CLEAR_VMDQ_ALL	0xFFFFFFFF
+
+
+#define SXE_PSRTYPE_TCPHDR	0x00000010
+#define SXE_PSRTYPE_UDPHDR	0x00000020
+#define SXE_PSRTYPE_IPV4HDR	0x00000100
+#define SXE_PSRTYPE_IPV6HDR	0x00000200
+#define SXE_PSRTYPE_L2HDR	0x00001000
+
+
+#define SXE_MRQC_RSSEN                 0x00000001 
+#define SXE_MRQC_MRQE_MASK                    0xF
+#define SXE_MRQC_RT8TCEN               0x00000002
+#define SXE_MRQC_RT4TCEN               0x00000003
+#define SXE_MRQC_RTRSS8TCEN            0x00000004
+#define SXE_MRQC_RTRSS4TCEN            0x00000005
+#define SXE_MRQC_VMDQEN                0x00000008
+#define SXE_MRQC_VMDQRSS32EN           0x0000000A
+#define SXE_MRQC_VMDQRSS64EN           0x0000000B
+#define SXE_MRQC_VMDQRT8TCEN           0x0000000C
+#define SXE_MRQC_VMDQRT4TCEN           0x0000000D
+#define SXE_MRQC_RSS_FIELD_MASK        0xFFFF0000
+#define SXE_MRQC_RSS_FIELD_IPV4_TCP    0x00010000
+#define SXE_MRQC_RSS_FIELD_IPV4        0x00020000
+#define SXE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
+#define SXE_MRQC_RSS_FIELD_IPV6_EX     0x00080000
+#define SXE_MRQC_RSS_FIELD_IPV6        0x00100000
+#define SXE_MRQC_RSS_FIELD_IPV6_TCP    0x00200000
+#define SXE_MRQC_RSS_FIELD_IPV4_UDP    0x00400000
+#define SXE_MRQC_RSS_FIELD_IPV6_UDP    0x00800000
+#define SXE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
+
+
+#define SXE_RDBAL(_i)		(((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
+					(0x0D000 + (((_i) - 64) * 0x40)))
+#define SXE_RDBAH(_i)		(((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
+					(0x0D004 + (((_i) - 64) * 0x40)))
+#define SXE_RDLEN(_i)		(((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
+					(0x0D008 + (((_i) - 64) * 0x40)))
+#define SXE_RDH(_i)		(((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
+					(0x0D010 + (((_i) - 64) * 0x40)))
+#define SXE_SRRCTL(_i)		(((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+					(0x0D014 + (((_i) - 64) * 0x40)))
+#define SXE_RDT(_i)		(((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
+					(0x0D018 + (((_i) - 64) * 0x40)))
+#define SXE_RXDCTL(_i)		(((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
+					(0x0D028 + (((_i) - 64) * 0x40)))
+#define SXE_LROCTL(_i)		(((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+					(0x0D02C + (((_i) - 64) * 0x40)))
+#define SXE_RDRXCTL		0x02F00  
+#define SXE_RXCTRL		0x03000 
+#define SXE_LRODBU 		0x03028  
+#define SXE_RXPBSIZE(_i)	(0x03C00 + ((_i) * 4))
+
+#define SXE_DRXCFG		(0x03C20)
+
+
+#define SXE_RXDCTL_CNT			128
+
+
+#define SXE_RXDCTL_DEFAULT		0x40210
+
+
+#define SXE_SRRCTL_DROP_EN		0x10000000
+#define SXE_SRRCTL_BSIZEPKT_SHIFT	(10)
+#define SXE_SRRCTL_BSIZEHDRSIZE_SHIFT	(2)
+#define SXE_SRRCTL_DESCTYPE_DATA_ONEBUF	0x02000000
+#define SXE_SRRCTL_BSIZEPKT_MASK	0x0000007F
+#define SXE_SRRCTL_BSIZEHDR_MASK	0x00003F00
+
+
+#define SXE_RXDCTL_ENABLE	0x02000000 
+#define SXE_RXDCTL_SWFLSH	0x04000000 
+#define SXE_RXDCTL_VME		0x40000000 
+#define SXE_RXDCTL_DESC_FIFO_AE_TH_SHIFT	8
+#define SXE_RXDCTL_PREFETCH_NUM_CFG_SHIFT	16
+
+
+#define SXE_LROCTL_LROEN	0x01
+#define SXE_LROCTL_MAXDESC_1	0x00
+#define SXE_LROCTL_MAXDESC_4	0x04
+#define SXE_LROCTL_MAXDESC_8	0x08
+#define SXE_LROCTL_MAXDESC_16	0x0C
+
+
+#define SXE_RDRXCTL_RDMTS_1_2	0x00000000
+#define SXE_RDRXCTL_RDMTS_EN	0x00200000
+#define SXE_RDRXCTL_CRCSTRIP	0x00000002
+#define SXE_RDRXCTL_PSP		0x00000004
+#define SXE_RDRXCTL_MVMEN	0x00000020
+#define SXE_RDRXCTL_DMAIDONE	0x00000008
+#define SXE_RDRXCTL_AGGDIS	0x00010000
+#define SXE_RDRXCTL_LROFRSTSIZE	0x003E0000
+#define SXE_RDRXCTL_LROLLIDIS	0x00800000
+#define SXE_RDRXCTL_LROACKC	0x02000000
+#define SXE_RDRXCTL_FCOE_WRFIX	0x04000000
+#define SXE_RDRXCTL_MBINTEN	0x10000000
+#define SXE_RDRXCTL_MDP_EN	0x20000000
+#define SXE_RDRXCTL_MPBEN	0x00000010
+
+#define SXE_RDRXCTL_MCEN	0x00000040
+
+
+
+#define SXE_RXCTRL_RXEN		0x00000001
+
+
+#define SXE_LRODBU_LROACKDIS	0x00000080
+
+
+#define SXE_DRXCFG_GSP_ZERO    0x00000002
+#define SXE_DRXCFG_DBURX_START 0x00000001
+
+
+#define SXE_DMATXCTL		0x04A80   
+#define SXE_TDBAL(_i)		(0x06000 + ((_i) * 0x40))
+#define SXE_TDBAH(_i)		(0x06004 + ((_i) * 0x40))
+#define SXE_TDLEN(_i)		(0x06008 + ((_i) * 0x40))
+#define SXE_TDH(_i)		(0x06010 + ((_i) * 0x40))
+#define SXE_TDT(_i)		(0x06018 + ((_i) * 0x40))
+#define SXE_TXDCTL(_i)		(0x06028 + ((_i) * 0x40))
+#define SXE_PVFTDWBAL(p)	(0x06038 + (0x40 * (p)))
+#define SXE_PVFTDWBAH(p)	(0x0603C + (0x40 * (p)))
+#define SXE_TXPBSIZE(_i)	(0x0CC00 + ((_i) * 4))
+#define SXE_TXPBTHRESH(_i)	(0x04950 + ((_i) * 4))
+#define SXE_MTQC		0x08120               
+#define SXE_TXPBFCS		0x0CE00               
+#define SXE_DTXCFG		0x0CE08               
+#define SXE_DTMPCNT		0x0CE98               
+
+
+#define SXE_DMATXCTL_DEFAULT		0x81000000
+
+
+#define SXE_DMATXCTL_TE		0x1       
+#define SXE_DMATXCTL_GDV	0x8       
+#define SXE_DMATXCTL_VT_SHIFT	16        
+#define SXE_DMATXCTL_VT_MASK    0xFFFF0000
+
+
+#define SXE_TXDCTL_HTHRESH_SHIFT 8
+#define SXE_TXDCTL_WTHRESH_SHIFT 16
+#define SXE_TXDCTL_ENABLE     0x02000000
+#define SXE_TXDCTL_SWFLSH     0x04000000
+
+#define SXE_PVFTDWBAL_N(ring_per_pool, vf_idx, vf_ring_idx) \
+		SXE_PVFTDWBAL((ring_per_pool) * (vf_idx) + vf_ring_idx)
+#define SXE_PVFTDWBAH_N(ring_per_pool, vf_idx, vf_ring_idx) \
+		SXE_PVFTDWBAH((ring_per_pool) * (vf_idx) + vf_ring_idx)
+
+
+#define SXE_MTQC_RT_ENA		0x1
+#define SXE_MTQC_VT_ENA		0x2
+#define SXE_MTQC_64Q_1PB	0x0
+#define SXE_MTQC_32VF		0x8
+#define SXE_MTQC_64VF		0x4
+#define SXE_MTQC_8TC_8TQ	0xC
+#define SXE_MTQC_4TC_4TQ	0x8
+
+
+#define SXE_TFCS_PB0_MASK	0x1
+#define SXE_TFCS_PB1_MASK	0x2
+#define SXE_TFCS_PB2_MASK	0x4
+#define SXE_TFCS_PB3_MASK	0x8
+#define SXE_TFCS_PB4_MASK	0x10
+#define SXE_TFCS_PB5_MASK	0x20
+#define SXE_TFCS_PB6_MASK	0x40
+#define SXE_TFCS_PB7_MASK	0x80
+#define SXE_TFCS_PB_MASK	0xff
+
+
+#define SXE_DTXCFG_DBUTX_START	0x00000001   
+#define SXE_DTXCFG_DBUTX_BUF_ALFUL_CFG	0x20
+
+
+#define SXE_RTRPCS		0x02430
+#define SXE_RTRPT4C(_i)		(0x02140 + ((_i) * 4))
+#define SXE_RTRUP2TC		0x03020
+#define SXE_RTTDCS		0x04900
+#define SXE_RTTDQSEL		0x04904
+#define SXE_RTTDT1C		0x04908
+#define SXE_RTTDT2C(_i)		(0x04910 + ((_i) * 4))
+#define SXE_RTTBCNRM		0x04980
+#define SXE_RTTBCNRC		0x04984
+#define SXE_RTTUP2TC		0x0C800
+#define SXE_RTTPCS		0x0CD00
+#define SXE_RTTPT2C(_i)		(0x0CD20 + ((_i) * 4))
+
+
+#define SXE_RTRPCS_RRM		0x00000002
+#define SXE_RTRPCS_RAC		0x00000004
+#define SXE_RTRPCS_ARBDIS	0x00000040
+
+
+#define SXE_RTRPT4C_MCL_SHIFT	12
+#define SXE_RTRPT4C_BWG_SHIFT	9 
+#define SXE_RTRPT4C_GSP		0x40000000
+#define SXE_RTRPT4C_LSP		0x80000000
+
+
+#define SXE_RTRUP2TC_UP_SHIFT 3
+#define SXE_RTRUP2TC_UP_MASK	7
+
+
+#define SXE_RTTDCS_ARBDIS	0x00000040
+#define SXE_RTTDCS_TDPAC	0x00000001
+
+#define SXE_RTTDCS_VMPAC	0x00000002
+
+#define SXE_RTTDCS_TDRM		0x00000010
+#define SXE_RTTDCS_ARBDIS	0x00000040
+#define SXE_RTTDCS_BDPM		0x00400000
+#define SXE_RTTDCS_BPBFSM	0x00800000
+
+#define SXE_RTTDCS_SPEED_CHG	0x80000000
+
+
+#define SXE_RTTDT2C_MCL_SHIFT	12
+#define SXE_RTTDT2C_BWG_SHIFT	9
+#define SXE_RTTDT2C_GSP		0x40000000
+#define SXE_RTTDT2C_LSP		0x80000000
+
+
+#define SXE_RTTBCNRC_RS_ENA		0x80000000
+#define SXE_RTTBCNRC_RF_DEC_MASK	0x00003FFF
+#define SXE_RTTBCNRC_RF_INT_SHIFT	14
+#define SXE_RTTBCNRC_RF_INT_MASK	\
+			(SXE_RTTBCNRC_RF_DEC_MASK << SXE_RTTBCNRC_RF_INT_SHIFT)
+
+
+#define SXE_RTTUP2TC_UP_SHIFT	3
+
+
+#define SXE_RTTPCS_TPPAC	0x00000020
+
+#define SXE_RTTPCS_ARBDIS	0x00000040
+#define SXE_RTTPCS_TPRM		0x00000100
+#define SXE_RTTPCS_ARBD_SHIFT	22
+#define SXE_RTTPCS_ARBD_DCB	0x4       
+
+
+#define SXE_RTTPT2C_MCL_SHIFT 12
+#define SXE_RTTPT2C_BWG_SHIFT 9
+#define SXE_RTTPT2C_GSP       0x40000000
+#define SXE_RTTPT2C_LSP       0x80000000
+
+
+#define SXE_TPH_CTRL		0x11074
+#define SXE_TPH_TXCTRL(_i)      (0x0600C + ((_i) * 0x40))
+#define SXE_TPH_RXCTRL(_i)	(((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+				 (0x0D00C + (((_i) - 64) * 0x40)))
+
+
+#define SXE_TPH_CTRL_ENABLE		0x00000000
+#define SXE_TPH_CTRL_DISABLE		0x00000001
+#define SXE_TPH_CTRL_MODE_CB1		0x00      
+#define SXE_TPH_CTRL_MODE_CB2		0x02      
+
+
+#define SXE_TPH_RXCTRL_DESC_TPH_EN	BIT(5) 
+#define SXE_TPH_RXCTRL_HEAD_TPH_EN	BIT(6) 
+#define SXE_TPH_RXCTRL_DATA_TPH_EN	BIT(7) 
+#define SXE_TPH_RXCTRL_DESC_RRO_EN	BIT(9) 
+#define SXE_TPH_RXCTRL_DATA_WRO_EN	BIT(13)
+#define SXE_TPH_RXCTRL_HEAD_WRO_EN	BIT(15)
+#define SXE_TPH_RXCTRL_CPUID_SHIFT	24     
+
+#define SXE_TPH_TXCTRL_DESC_TPH_EN	BIT(5) 
+#define SXE_TPH_TXCTRL_DESC_RRO_EN	BIT(9) 
+#define SXE_TPH_TXCTRL_DESC_WRO_EN	BIT(11)
+#define SXE_TPH_TXCTRL_DATA_RRO_EN	BIT(13)
+#define SXE_TPH_TXCTRL_CPUID_SHIFT	24     
+
+
+#define SXE_SECTXCTRL		0x08800
+#define SXE_SECTXSTAT		0x08804
+#define SXE_SECTXBUFFAF		0x08808
+#define SXE_SECTXMINIFG		0x08810
+#define SXE_SECRXCTRL		0x08D00
+#define SXE_SECRXSTAT		0x08D04
+#define SXE_LSECTXCTRL            0x08A04
+#define SXE_LSECTXSCL             0x08A08
+#define SXE_LSECTXSCH             0x08A0C
+#define SXE_LSECTXSA              0x08A10
+#define SXE_LSECTXPN(_n)          (0x08A14 + (4 * (_n)))
+#define SXE_LSECTXKEY(_n, _m)     (0x08A1C + ((0x10 * (_n)) + (4 * (_m))))
+#define SXE_LSECRXCTRL            0x08B04
+#define SXE_LSECRXSCL             0x08B08
+#define SXE_LSECRXSCH             0x08B0C
+#define SXE_LSECRXSA(_i)          (0x08B10 + (4 * (_i)))
+#define SXE_LSECRXPN(_i)          (0x08B18 + (4 * (_i)))
+#define SXE_LSECRXKEY(_n, _m)     (0x08B20 + ((0x10 * (_n)) + (4 * (_m))))  
+
+
+#define SXE_SECTXCTRL_SECTX_DIS		0x00000001
+#define SXE_SECTXCTRL_TX_DIS		0x00000002
+#define SXE_SECTXCTRL_STORE_FORWARD	0x00000004
+
+
+#define SXE_SECTXSTAT_SECTX_RDY		0x00000001
+#define SXE_SECTXSTAT_SECTX_OFF_DIS	0x00000002
+#define SXE_SECTXSTAT_ECC_TXERR		0x00000004
+
+
+#define SXE_SECRXCTRL_SECRX_DIS		0x00000001
+#define SXE_SECRXCTRL_RX_DIS		0x00000002
+#define SXE_SECRXCTRL_RP              0x00000080
+
+
+#define SXE_SECRXSTAT_SECRX_RDY		0x00000001
+#define SXE_SECRXSTAT_SECRX_OFF_DIS	0x00000002
+#define SXE_SECRXSTAT_ECC_RXERR		0x00000004
+
+#define SXE_SECTX_DCB_ENABLE_MASK	0x00001F00
+
+#define SXE_LSECTXCTRL_EN_MASK        0x00000003
+#define SXE_LSECTXCTRL_EN_SHIFT       0
+#define SXE_LSECTXCTRL_ES             0x00000010
+#define SXE_LSECTXCTRL_AISCI          0x00000020
+#define SXE_LSECTXCTRL_PNTHRSH_MASK   0xFFFFFF00
+#define SXE_LSECTXCTRL_PNTHRSH_SHIFT  8
+#define SXE_LSECTXCTRL_RSV_MASK       0x000000D8
+
+#define SXE_LSECRXCTRL_EN_MASK        0x0000000C
+#define SXE_LSECRXCTRL_EN_SHIFT       2
+#define SXE_LSECRXCTRL_DROP_EN        0x00000010
+#define SXE_LSECRXCTRL_DROP_EN_SHIFT  4
+#define SXE_LSECRXCTRL_PLSH           0x00000040
+#define SXE_LSECRXCTRL_PLSH_SHIFT     6
+#define SXE_LSECRXCTRL_RP             0x00000080
+#define SXE_LSECRXCTRL_RP_SHIFT       7
+#define SXE_LSECRXCTRL_RSV_MASK       0xFFFFFF33
+
+#define SXE_LSECTXSA_AN0_MASK         0x00000003
+#define SXE_LSECTXSA_AN0_SHIFT        0
+#define SXE_LSECTXSA_AN1_MASK         0x0000000C
+#define SXE_LSECTXSA_AN1_SHIFT        2
+#define SXE_LSECTXSA_SELSA            0x00000010
+#define SXE_LSECTXSA_SELSA_SHIFT      4
+#define SXE_LSECTXSA_ACTSA            0x00000020
+
+#define SXE_LSECRXSA_AN_MASK          0x00000003
+#define SXE_LSECRXSA_AN_SHIFT         0
+#define SXE_LSECRXSA_SAV              0x00000004
+#define SXE_LSECRXSA_SAV_SHIFT        2
+#define SXE_LSECRXSA_RETIRED          0x00000010
+#define SXE_LSECRXSA_RETIRED_SHIFT    4
+
+#define SXE_LSECRXSCH_PI_MASK         0xFFFF0000
+#define SXE_LSECRXSCH_PI_SHIFT        16
+
+#define SXE_LSECTXCTRL_DISABLE	0x0
+#define SXE_LSECTXCTRL_AUTH		0x1
+#define SXE_LSECTXCTRL_AUTH_ENCRYPT	0x2
+
+#define SXE_LSECRXCTRL_DISABLE	0x0
+#define SXE_LSECRXCTRL_CHECK		0x1
+#define SXE_LSECRXCTRL_STRICT		0x2
+#define SXE_LSECRXCTRL_DROP		0x3
+#define SXE_SECTXCTRL_STORE_FORWARD_ENABLE    0x4
+
+
+
+#define SXE_IPSTXIDX		0x08900
+#define SXE_IPSTXSALT		0x08904
+#define SXE_IPSTXKEY(_i)	(0x08908 + (4 * (_i)))
+#define SXE_IPSRXIDX		0x08E00
+#define SXE_IPSRXIPADDR(_i)	(0x08E04 + (4 * (_i)))
+#define SXE_IPSRXSPI		0x08E14
+#define SXE_IPSRXIPIDX		0x08E18
+#define SXE_IPSRXKEY(_i)	(0x08E1C + (4 * (_i)))
+#define SXE_IPSRXSALT		0x08E2C
+#define SXE_IPSRXMOD		0x08E30
+
+
+
+#define SXE_FNAVCTRL		0x0EE00
+#define SXE_FNAVHKEY		0x0EE68
+#define SXE_FNAVSKEY		0x0EE6C
+#define SXE_FNAVDIP4M		0x0EE3C
+#define SXE_FNAVSIP4M		0x0EE40
+#define SXE_FNAVTCPM		0x0EE44
+#define SXE_FNAVUDPM		0x0EE48
+#define SXE_FNAVIP6M		0x0EE74
+#define SXE_FNAVM		0x0EE70
+
+#define SXE_FNAVFREE		0x0EE38
+#define SXE_FNAVLEN		0x0EE4C
+#define SXE_FNAVUSTAT		0x0EE50
+#define SXE_FNAVFSTAT		0x0EE54
+#define SXE_FNAVMATCH		0x0EE58
+#define SXE_FNAVMISS		0x0EE5C
+
+#define SXE_FNAVSIPv6(_i)	(0x0EE0C + ((_i) * 4))
+#define SXE_FNAVIPSA		0x0EE18
+#define SXE_FNAVIPDA		0x0EE1C
+#define SXE_FNAVPORT		0x0EE20
+#define SXE_FNAVVLAN		0x0EE24
+#define SXE_FNAVHASH		0x0EE28
+#define SXE_FNAVCMD		0x0EE2C
+
+
+#define SXE_FNAVCTRL_FLEX_SHIFT			16
+#define SXE_FNAVCTRL_MAX_LENGTH_SHIFT		24
+#define SXE_FNAVCTRL_FULL_THRESH_SHIFT		28
+#define SXE_FNAVCTRL_DROP_Q_SHIFT		8
+#define SXE_FNAVCTRL_PBALLOC_64K		0x00000001
+#define SXE_FNAVCTRL_PBALLOC_128K		0x00000002
+#define SXE_FNAVCTRL_PBALLOC_256K		0x00000003
+#define SXE_FNAVCTRL_INIT_DONE			0x00000008
+#define SXE_FNAVCTRL_SPECIFIC_MATCH		0x00000010
+#define SXE_FNAVCTRL_REPORT_STATUS		0x00000020
+#define SXE_FNAVCTRL_REPORT_STATUS_ALWAYS	0x00000080
+
+#define SXE_FNAVCTRL_FLEX_MASK			(0x1F << SXE_FNAVCTRL_FLEX_SHIFT)
+
+#define SXE_FNAVTCPM_DPORTM_SHIFT		16
+
+#define SXE_FNAVM_VLANID			0x00000001
+#define SXE_FNAVM_VLANP				0x00000002
+#define SXE_FNAVM_POOL				0x00000004
+#define SXE_FNAVM_L4P				0x00000008
+#define SXE_FNAVM_FLEX				0x00000010
+#define SXE_FNAVM_DIPv6				0x00000020
+
+#define SXE_FNAVPORT_DESTINATION_SHIFT		16
+#define SXE_FNAVVLAN_FLEX_SHIFT			16
+#define SXE_FNAVHASH_SIG_SW_INDEX_SHIFT		16
+
+#define SXE_FNAVCMD_CMD_MASK			0x00000003
+#define SXE_FNAVCMD_CMD_ADD_FLOW		0x00000001
+#define SXE_FNAVCMD_CMD_REMOVE_FLOW		0x00000002
+#define SXE_FNAVCMD_CMD_QUERY_REM_FILT		0x00000003
+#define SXE_FNAVCMD_FILTER_VALID		0x00000004
+#define SXE_FNAVCMD_FILTER_UPDATE		0x00000008
+#define SXE_FNAVCMD_IPv6DMATCH			0x00000010
+#define SXE_FNAVCMD_L4TYPE_UDP			0x00000020
+#define SXE_FNAVCMD_L4TYPE_TCP			0x00000040
+#define SXE_FNAVCMD_L4TYPE_SCTP			0x00000060
+#define SXE_FNAVCMD_IPV6			0x00000080
+#define SXE_FNAVCMD_CLEARHT			0x00000100
+#define SXE_FNAVCMD_DROP			0x00000200
+#define SXE_FNAVCMD_INT				0x00000400
+#define SXE_FNAVCMD_LAST			0x00000800
+#define SXE_FNAVCMD_COLLISION			0x00001000
+#define SXE_FNAVCMD_QUEUE_EN			0x00008000
+#define SXE_FNAVCMD_FLOW_TYPE_SHIFT		5
+#define SXE_FNAVCMD_RX_QUEUE_SHIFT		16
+#define SXE_FNAVCMD_RX_TUNNEL_FILTER_SHIFT	23
+#define SXE_FNAVCMD_VT_POOL_SHIFT		24
+#define SXE_FNAVCMD_CMD_POLL			10
+#define SXE_FNAVCMD_TUNNEL_FILTER		0x00800000
+
+
+#define SXE_LXOFFRXCNT		0x041A8
+#define SXE_PXOFFRXCNT(_i)	(0x04160 + ((_i) * 4))
+
+#define SXE_EPC_GPRC		0x050E0
+#define SXE_RXDGPC              0x02F50
+#define SXE_RXDGBCL             0x02F54
+#define SXE_RXDGBCH             0x02F58
+#define SXE_RXDDGPC             0x02F5C
+#define SXE_RXDDGBCL            0x02F60
+#define SXE_RXDDGBCH            0x02F64
+#define SXE_RXLPBKGPC           0x02F68
+#define SXE_RXLPBKGBCL          0x02F6C
+#define SXE_RXLPBKGBCH          0x02F70
+#define SXE_RXDLPBKGPC          0x02F74
+#define SXE_RXDLPBKGBCL         0x02F78
+#define SXE_RXDLPBKGBCH         0x02F7C
+
+#define SXE_RXTPCIN             0x02F88
+#define SXE_RXTPCOUT            0x02F8C
+#define SXE_RXPRDDC             0x02F9C
+
+#define SXE_TXDGPC		0x087A0
+#define SXE_TXDGBCL             0x087A4
+#define SXE_TXDGBCH             0x087A8
+#define SXE_TXSWERR             0x087B0
+#define SXE_TXSWITCH            0x087B4
+#define SXE_TXREPEAT            0x087B8
+#define SXE_TXDESCERR           0x087BC
+#define SXE_MNGPRC		0x040B4
+#define SXE_MNGPDC		0x040B8
+#define SXE_RQSMR(_i)		(0x02300 + ((_i) * 4))   
+#define SXE_TQSM(_i)		(0x08600 + ((_i) * 4))   
+#define SXE_QPRC(_i)		(0x01030 + ((_i) * 0x40))
+#define SXE_QBRC_L(_i)		(0x01034 + ((_i) * 0x40))
+#define SXE_QBRC_H(_i)		(0x01038 + ((_i) * 0x40))
+
+
+#define SXE_QPRDC(_i)		(0x01430 + ((_i) * 0x40))
+#define SXE_QPTC(_i)		(0x08680 + ((_i) * 0x4))
+#define SXE_QBTC_L(_i)		(0x08700 + ((_i) * 0x8)) 
+#define SXE_QBTC_H(_i)		(0x08704 + ((_i) * 0x8)) 
+#define SXE_SSVPC		0x08780                  
+#define SXE_MNGPTC		0x0CF90
+#define SXE_MPC(_i)		(0x03FA0 + ((_i) * 4))
+
+#define SXE_DBUDRTCICNT(_i)	(0x03C6C + ((_i) * 4))
+#define SXE_DBUDRTCOCNT(_i)	(0x03C8C + ((_i) * 4))
+#define SXE_DBUDRBDPCNT(_i)	(0x03D20 + ((_i) * 4))
+#define SXE_DBUDREECNT(_i)	(0x03D40 + ((_i) * 4))
+#define SXE_DBUDROFPCNT(_i)	(0x03D60 + ((_i) * 4))
+#define SXE_DBUDTTCICNT(_i)	(0x0CE54 + ((_i) * 4))
+#define SXE_DBUDTTCOCNT(_i)	(0x0CE74 + ((_i) * 4))
+
+
+
+#define SXE_WUC                       0x05800
+#define SXE_WUFC                      0x05808
+#define SXE_WUS                       0x05810
+#define SXE_IP6AT(_i)                 (0x05880 + ((_i) * 4))   
+
+
+#define SXE_IP6AT_CNT                 4
+
+
+#define SXE_WUC_PME_EN                0x00000002
+#define SXE_WUC_PME_STATUS            0x00000004
+#define SXE_WUC_WKEN                  0x00000010
+#define SXE_WUC_APME                  0x00000020
+
+
+#define SXE_WUFC_LNKC                 0x00000001
+#define SXE_WUFC_MAG                  0x00000002
+#define SXE_WUFC_EX                   0x00000004
+#define SXE_WUFC_MC                   0x00000008
+#define SXE_WUFC_BC                   0x00000010
+#define SXE_WUFC_ARP                  0x00000020
+#define SXE_WUFC_IPV4                 0x00000040
+#define SXE_WUFC_IPV6                 0x00000080
+#define SXE_WUFC_MNG                  0x00000100
+
+
+
+
+#define SXE_TSCTRL              0x14800
+#define SXE_TSES                0x14804
+#define SXE_TSYNCTXCTL          0x14810
+#define SXE_TSYNCRXCTL          0x14820
+#define SXE_RXSTMPL             0x14824
+#define SXE_RXSTMPH             0x14828
+#define SXE_SYSTIML             0x14840
+#define SXE_SYSTIMM             0x14844
+#define SXE_SYSTIMH             0x14848
+#define SXE_TIMADJL             0x14850
+#define SXE_TIMADJH             0x14854
+#define SXE_TIMINC              0x14860
+
+
+#define SXE_TSYNCTXCTL_TXTT     0x0001
+#define SXE_TSYNCTXCTL_TEN      0x0010
+
+
+#define SXE_TSYNCRXCTL_RXTT     0x0001
+#define SXE_TSYNCRXCTL_REN      0x0010
+
+
+#define SXE_TSCTRL_TSSEL        0x00001
+#define SXE_TSCTRL_TSEN         0x00002
+#define SXE_TSCTRL_VER_2        0x00010
+#define SXE_TSCTRL_ONESTEP      0x00100
+#define SXE_TSCTRL_CSEN         0x01000
+#define SXE_TSCTRL_PTYP_ALL     0x00C00
+#define SXE_TSCTRL_L4_UNICAST   0x08000
+
+
+#define SXE_TSES_TXES                   0x00200
+#define SXE_TSES_RXES                   0x00800
+#define SXE_TSES_TXES_V1_SYNC           0x00000
+#define SXE_TSES_TXES_V1_DELAY_REQ      0x00100
+#define SXE_TSES_TXES_V1_ALL            0x00200
+#define SXE_TSES_RXES_V1_SYNC           0x00000
+#define SXE_TSES_RXES_V1_DELAY_REQ      0x00400
+#define SXE_TSES_RXES_V1_ALL            0x00800
+#define SXE_TSES_TXES_V2_ALL            0x00200
+#define SXE_TSES_RXES_V2_ALL            0x00800
+
+#define SXE_IV_SNS              0
+#define SXE_IV_NS               8
+#define SXE_INCPD               0
+#define SXE_BASE_INCVAL         8
+
+
+#define SXE_VT_CTL		0x051B0
+#define SXE_PFMAILBOX(_i)	(0x04B00 + (4 * (_i)))
+
+#define SXE_PFMBICR(_i)		(0x00710 + (4 * (_i)))
+#define SXE_VFLRE(i)		((i & 1)? 0x001C0 : 0x00600)
+#define SXE_VFLREC(i)		(0x00700 + (i * 4))
+#define SXE_VFRE(_i)		(0x051E0 + ((_i) * 4))
+#define SXE_VFTE(_i)		(0x08110 + ((_i) * 4))
+#define SXE_QDE			(0x02F04)             
+#define SXE_SPOOF(_i)		(0x08200 + (_i) * 4)
+#define SXE_PFDTXGSWC		0x08220
+#define SXE_VMVIR(_i)		(0x08000 + ((_i) * 4))
+#define SXE_VMOLR(_i)		(0x0F000 + ((_i) * 4))
+#define SXE_VLVF(_i)		(0x0F100 + ((_i) * 4))
+#define SXE_VLVFB(_i)		(0x0F200 + ((_i) * 4))
+#define SXE_MRCTL(_i)		(0x0F600 + ((_i) * 4))
+#define SXE_VMRVLAN(_i)	        (0x0F610 + ((_i) * 4))
+#define SXE_VMRVM(_i)		(0x0F630 + ((_i) * 4))
+#define SXE_VMECM(_i)		(0x08790 + ((_i) * 4))
+#define SXE_PFMBMEM(_i)		(0x13000 + (64 * (_i)))
+
+
+#define SXE_VMOLR_CNT			64
+#define SXE_VLVF_CNT			64
+#define SXE_VLVFB_CNT			128
+#define SXE_MRCTL_CNT			4
+#define SXE_VMRVLAN_CNT			8
+#define SXE_VMRVM_CNT			8
+#define SXE_SPOOF_CNT			8
+#define SXE_VMVIR_CNT			64
+#define SXE_VFRE_CNT			2
+
+
+#define SXE_VMVIR_VLANA_MASK	0xC0000000
+#define SXE_VMVIR_VLAN_VID_MASK	0x00000FFF
+#define SXE_VMVIR_VLAN_UP_MASK	0x0000E000
+
+
+#define SXE_MRCTL_VPME  0x01
+
+#define SXE_MRCTL_UPME  0x02
+
+#define SXE_MRCTL_DPME  0x04
+
+#define SXE_MRCTL_VLME  0x08
+
+
+#define SXE_VT_CTL_DIS_DEFPL  0x20000000
+#define SXE_VT_CTL_REPLEN     0x40000000
+#define SXE_VT_CTL_VT_ENABLE  0x00000001 
+#define SXE_VT_CTL_POOL_SHIFT 7
+#define SXE_VT_CTL_POOL_MASK  (0x3F << SXE_VT_CTL_POOL_SHIFT)
+
+
+#define SXE_PFMAILBOX_STS         0x00000001
+#define SXE_PFMAILBOX_ACK         0x00000002
+#define SXE_PFMAILBOX_VFU         0x00000004
+#define SXE_PFMAILBOX_PFU         0x00000008
+#define SXE_PFMAILBOX_RVFU        0x00000010
+
+
+#define SXE_PFMBICR_VFREQ         0x00000001
+#define SXE_PFMBICR_VFACK         0x00010000
+#define SXE_PFMBICR_VFREQ_MASK    0x0000FFFF
+#define SXE_PFMBICR_VFACK_MASK    0xFFFF0000
+
+
+#define SXE_QDE_ENABLE		(0x00000001)
+#define SXE_QDE_HIDE_VLAN	(0x00000002)
+#define SXE_QDE_IDX_MASK	(0x00007F00)
+#define SXE_QDE_IDX_SHIFT	(8)
+#define SXE_QDE_WRITE		(0x00010000)
+
+
+
+#define SXE_SPOOF_VLAN_SHIFT  (8)
+
+
+#define SXE_PFDTXGSWC_VT_LBEN	0x1 
+
+
+#define SXE_VMVIR_VLANA_DEFAULT 0x40000000
+#define SXE_VMVIR_VLANA_NEVER   0x80000000
+
+
+#define SXE_VMOLR_UPE		0x00400000
+#define SXE_VMOLR_VPE		0x00800000
+#define SXE_VMOLR_AUPE		0x01000000
+#define SXE_VMOLR_ROMPE		0x02000000
+#define SXE_VMOLR_ROPE		0x04000000
+#define SXE_VMOLR_BAM		0x08000000
+#define SXE_VMOLR_MPE		0x10000000
+
+
+#define SXE_VLVF_VIEN         0x80000000 
+#define SXE_VLVF_ENTRIES      64
+#define SXE_VLVF_VLANID_MASK  0x00000FFF
+
+
+#define SXE_HDC_HOST_BASE       0x16000
+#define SXE_HDC_SW_LK           (SXE_HDC_HOST_BASE + 0x00)
+#define SXE_HDC_PF_LK           (SXE_HDC_HOST_BASE + 0x04)
+#define SXE_HDC_SW_OV           (SXE_HDC_HOST_BASE + 0x08)
+#define SXE_HDC_FW_OV           (SXE_HDC_HOST_BASE + 0x0C)
+#define SXE_HDC_PACKET_HEAD0    (SXE_HDC_HOST_BASE + 0x10)
+
+#define SXE_HDC_PACKET_DATA0    (SXE_HDC_HOST_BASE + 0x20)
+
+
+#define SXE_HDC_MSI_STATUS_REG  0x17000
+#define SXE_FW_STATUS_REG       0x17004
+#define SXE_DRV_STATUS_REG      0x17008
+#define SXE_FW_HDC_STATE_REG    0x1700C
+#define SXE_R0_MAC_ADDR_RAL     0x17010
+#define SXE_R0_MAC_ADDR_RAH     0x17014
+#define SXE_CRC_STRIP_REG		0x17018
+
+
+#define SXE_HDC_SW_LK_BIT       0x0001
+#define SXE_HDC_PF_LK_BIT       0x0003
+#define SXE_HDC_SW_OV_BIT       0x0001
+#define SXE_HDC_FW_OV_BIT       0x0001
+#define SXE_HDC_RELEASE_SW_LK   0x0000
+
+#define SXE_HDC_LEN_TO_REG(n)        (n - 1)
+#define SXE_HDC_LEN_FROM_REG(n)      (n + 1)
+
+
+#define SXE_RX_PKT_BUF_SIZE_SHIFT    10
+#define SXE_TX_PKT_BUF_SIZE_SHIFT    10
+
+#define SXE_RXIDX_TBL_SHIFT           1
+#define SXE_RXTXIDX_IPS_EN            0x00000001
+#define SXE_RXTXIDX_IDX_SHIFT         3
+#define SXE_RXTXIDX_READ              0x40000000
+#define SXE_RXTXIDX_WRITE             0x80000000
+
+
+#define SXE_KEEP_CRC_EN		      0x00000001
+
+
+#define SXE_VMD_CTL			0x0581C
+
+
+#define SXE_VMD_CTL_POOL_EN		0x00000001
+#define SXE_VMD_CTL_POOL_FILTER		0x00000002
+
+
+#define SXE_FLCTRL                    0x14300
+#define SXE_PFCTOP                    0x14304
+#define SXE_FCTTV0                    0x14310
+#define SXE_FCTTV(_i)                (SXE_FCTTV0 + ((_i) * 4))
+#define SXE_FCRTV                     0x14320
+#define SXE_TFCS                      0x14324
+
+
+#define SXE_FCTRL_TFCE_MASK           0x0018
+#define SXE_FCTRL_TFCE_LFC_EN         0x0008
+#define SXE_FCTRL_TFCE_PFC_EN         0x0010
+#define SXE_FCTRL_TFCE_DPF_EN         0x0020
+#define SXE_FCTRL_RFCE_MASK           0x0300
+#define SXE_FCTRL_RFCE_LFC_EN         0x0100
+#define SXE_FCTRL_RFCE_PFC_EN         0x0200
+
+#define SXE_FCTRL_TFCE_FCEN_MASK      0x00FF0000
+#define SXE_FCTRL_TFCE_XONE_MASK      0xFF000000
+
+
+#define SXE_PFCTOP_FCT               0x8808
+#define SXE_PFCTOP_FCOP_MASK         0xFFFF0000
+#define SXE_PFCTOP_FCOP_PFC          0x01010000
+#define SXE_PFCTOP_FCOP_LFC          0x00010000
+
+
+#define SXE_COMCTRL                   0x14400
+#define SXE_PCCTRL                    0x14404
+#define SXE_LPBKCTRL                  0x1440C
+#define SXE_MAXFS                     0x14410
+#define SXE_SACONH                    0x14420
+#define SXE_SACONL                    0x14424
+#define SXE_VLANCTRL                  0x14430
+#define SXE_VLANID                    0x14434
+#define SXE_LINKS                     0x14454
+#define SXE_FPGA_SDS_STS	      0x14704
+#define SXE_MSCA                      0x14500
+#define SXE_MSCD                      0x14504
+
+#define SXE_HLREG0                    0x04240
+#define SXE_MFLCN                     0x04294
+#define SXE_MACC                      0x04330
+
+#define SXE_PCS1GLSTA                 0x0420C
+#define SXE_MFLCN                     0x04294
+#define SXE_PCS1GANA                  0x04850
+#define SXE_PCS1GANLP                 0x04854
+
+
+#define SXE_LPBKCTRL_EN               0x00000001
+
+
+#define SXE_MAC_ADDR_SACONH_SHIFT     32
+#define SXE_MAC_ADDR_SACONL_MASK      0xFFFFFFFF
+
+
+#define SXE_PCS1GLSTA_AN_COMPLETE     0x10000
+#define SXE_PCS1GLSTA_AN_PAGE_RX      0x20000
+#define SXE_PCS1GLSTA_AN_TIMED_OUT    0x40000
+#define SXE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000
+#define SXE_PCS1GLSTA_AN_ERROR_RWS    0x100000
+
+#define SXE_PCS1GANA_SYM_PAUSE        0x100
+#define SXE_PCS1GANA_ASM_PAUSE        0x80 
+
+
+#define SXE_LKSTS_PCS_LKSTS_UP        0x00000001
+#define SXE_LINK_UP_TIME              90
+#define SXE_AUTO_NEG_TIME             45
+
+
+#define SXE_MSCA_NP_ADDR_MASK      0x0000FFFF
+#define SXE_MSCA_NP_ADDR_SHIFT     0
+#define SXE_MSCA_DEV_TYPE_MASK     0x001F0000
+#define SXE_MSCA_DEV_TYPE_SHIFT    16        
+#define SXE_MSCA_PHY_ADDR_MASK     0x03E00000
+#define SXE_MSCA_PHY_ADDR_SHIFT    21        
+#define SXE_MSCA_OP_CODE_MASK      0x0C000000
+#define SXE_MSCA_OP_CODE_SHIFT     26        
+#define SXE_MSCA_ADDR_CYCLE        0x00000000
+#define SXE_MSCA_WRITE             0x04000000
+#define SXE_MSCA_READ              0x0C000000
+#define SXE_MSCA_READ_AUTOINC      0x08000000
+#define SXE_MSCA_ST_CODE_MASK      0x30000000
+#define SXE_MSCA_ST_CODE_SHIFT     28        
+#define SXE_MSCA_NEW_PROTOCOL      0x00000000
+#define SXE_MSCA_OLD_PROTOCOL      0x10000000
+#define SXE_MSCA_BYPASSRA_C45      0x40000000
+#define SXE_MSCA_MDI_CMD_ON_PROG   0x80000000
+
+
+#define MDIO_MSCD_RDATA_LEN        16
+#define MDIO_MSCD_RDATA_SHIFT      16
+
+
+#define SXE_CRCERRS                   0x14A04
+#define SXE_ERRBC                     0x14A10
+#define SXE_RLEC                      0x14A14
+#define SXE_PRC64                     0x14A18
+#define SXE_PRC127                    0x14A1C
+#define SXE_PRC255                    0x14A20
+#define SXE_PRC511                    0x14A24
+#define SXE_PRC1023                   0x14A28
+#define SXE_PRC1522                   0x14A2C
+#define SXE_BPRC                      0x14A30
+#define SXE_MPRC                      0x14A34
+#define SXE_GPRC                      0x14A38
+#define SXE_GORCL                     0x14A3C
+#define SXE_GORCH                     0x14A40
+#define SXE_RUC                       0x14A44
+#define SXE_RFC                       0x14A48
+#define SXE_ROC                       0x14A4C
+#define SXE_RJC                       0x14A50
+#define SXE_TORL                      0x14A54
+#define SXE_TORH                      0x14A58
+#define SXE_TPR                       0x14A5C
+#define SXE_PRCPF(_i)                 (0x14A60 + ((_i) * 4))
+#define SXE_GPTC                      0x14B00
+#define SXE_GOTCL                     0x14B04
+#define SXE_GOTCH                     0x14B08
+#define SXE_TPT                       0x14B0C
+#define SXE_PTC64                     0x14B10
+#define SXE_PTC127                    0x14B14
+#define SXE_PTC255                    0x14B18
+#define SXE_PTC511                    0x14B1C
+#define SXE_PTC1023                   0x14B20
+#define SXE_PTC1522                   0x14B24
+#define SXE_MPTC                      0x14B28
+#define SXE_BPTC                      0x14B2C
+#define SXE_PFCT(_i)                  (0x14B30 + ((_i) * 4))
+
+#define SXE_MACCFG                    0x0CE04
+#define SXE_MACCFG_PAD_EN             0x00000001
+
+
+#define SXE_COMCTRL_TXEN	      0x0001        
+#define SXE_COMCTRL_RXEN	      0x0002        
+#define SXE_COMCTRL_EDSEL	      0x0004        
+#define SXE_COMCTRL_SPEED_1G	      0x0200        
+#define SXE_COMCTRL_SPEED_10G	      0x0300        
+
+
+#define SXE_PCCTRL_TXCE		      0x0001        
+#define SXE_PCCTRL_RXCE		      0x0002        
+#define SXE_PCCTRL_PEN		      0x0100        
+#define SXE_PCCTRL_PCSC_ALL	      0x30000       
+
+
+#define SXE_MAXFS_TFSEL		      0x0001        
+#define SXE_MAXFS_RFSEL		      0x0002        
+#define SXE_MAXFS_MFS_MASK	      0xFFFF0000    
+#define SXE_MAXFS_MFS		      0x40000000    
+#define SXE_MAXFS_MFS_SHIFT	      16            
+
+
+#define SXE_LINKS_UP 	              0x00000001    
+
+#define SXE_10G_LINKS_DOWN            0x00000006
+
+
+#define SXE_LINK_SPEED_UNKNOWN        0             
+#define SXE_LINK_SPEED_10_FULL        0x0002        
+#define SXE_LINK_SPEED_100_FULL       0x0008        
+#define SXE_LINK_SPEED_1GB_FULL       0x0020        
+#define SXE_LINK_SPEED_10GB_FULL      0x0080        
+
+
+#define SXE_HLREG0_TXCRCEN            0x00000001  
+#define SXE_HLREG0_RXCRCSTRP          0x00000002  
+#define SXE_HLREG0_JUMBOEN            0x00000004  
+#define SXE_HLREG0_TXPADEN            0x00000400  
+#define SXE_HLREG0_TXPAUSEEN          0x00001000  
+#define SXE_HLREG0_RXPAUSEEN          0x00004000  
+#define SXE_HLREG0_LPBK               0x00008000  
+#define SXE_HLREG0_MDCSPD             0x00010000  
+#define SXE_HLREG0_CONTMDC            0x00020000  
+#define SXE_HLREG0_CTRLFLTR           0x00040000  
+#define SXE_HLREG0_PREPEND            0x00F00000  
+#define SXE_HLREG0_PRIPAUSEEN         0x01000000  
+#define SXE_HLREG0_RXPAUSERECDA       0x06000000  
+#define SXE_HLREG0_RXLNGTHERREN       0x08000000  
+#define SXE_HLREG0_RXPADSTRIPEN       0x10000000  
+
+#define SXE_MFLCN_PMCF                0x00000001  
+#define SXE_MFLCN_DPF                 0x00000002  
+#define SXE_MFLCN_RPFCE               0x00000004  
+#define SXE_MFLCN_RFCE                0x00000008  
+#define SXE_MFLCN_RPFCE_MASK	      0x00000FF4  
+#define SXE_MFLCN_RPFCE_SHIFT         4
+
+#define SXE_MACC_FLU                  0x00000001
+#define SXE_MACC_FSV_10G              0x00030000
+#define SXE_MACC_FS                   0x00040000
+
+#define SXE_DEFAULT_FCPAUSE           0xFFFF
+
+
+#define SXE_SAQF(_i)		(0x0E000 + ((_i) * 4)) 
+#define SXE_DAQF(_i)		(0x0E200 + ((_i) * 4)) 
+#define SXE_SDPQF(_i)		(0x0E400 + ((_i) * 4)) 
+#define SXE_FTQF(_i)		(0x0E600 + ((_i) * 4)) 
+#define SXE_L34T_IMIR(_i)	(0x0E800 + ((_i) * 4)) 
+
+#define SXE_MAX_FTQF_FILTERS		128
+#define SXE_FTQF_PROTOCOL_MASK		0x00000003
+#define SXE_FTQF_PROTOCOL_TCP		0x00000000
+#define SXE_FTQF_PROTOCOL_UDP		0x00000001
+#define SXE_FTQF_PROTOCOL_SCTP		2
+#define SXE_FTQF_PRIORITY_MASK		0x00000007
+#define SXE_FTQF_PRIORITY_SHIFT		2
+#define SXE_FTQF_POOL_MASK		0x0000003F
+#define SXE_FTQF_POOL_SHIFT		8
+#define SXE_FTQF_5TUPLE_MASK_MASK	0x0000001F
+#define SXE_FTQF_5TUPLE_MASK_SHIFT	25
+#define SXE_FTQF_SOURCE_ADDR_MASK	0x1E
+#define SXE_FTQF_DEST_ADDR_MASK		0x1D
+#define SXE_FTQF_SOURCE_PORT_MASK	0x1B
+#define SXE_FTQF_DEST_PORT_MASK		0x17
+#define SXE_FTQF_PROTOCOL_COMP_MASK	0x0F
+#define SXE_FTQF_POOL_MASK_EN		0x40000000
+#define SXE_FTQF_QUEUE_ENABLE		0x80000000
+
+#define SXE_SDPQF_DSTPORT		0xFFFF0000
+#define SXE_SDPQF_DSTPORT_SHIFT		16
+#define SXE_SDPQF_SRCPORT		0x0000FFFF
+
+#define SXE_L34T_IMIR_SIZE_BP		0x00001000
+#define SXE_L34T_IMIR_RESERVE		0x00080000
+#define SXE_L34T_IMIR_LLI			0x00100000
+#define SXE_L34T_IMIR_QUEUE			0x0FE00000
+#define SXE_L34T_IMIR_QUEUE_SHIFT	21
+
+#define SXE_VMTXSW(_i)                (0x05180 + ((_i) * 4))   
+#define SXE_VMTXSW_REGISTER_COUNT     2
+
+#define SXE_TXSTMP_SEL		0x14510  
+#define SXE_TXSTMP_VAL		0x1451c  
+
+#define SXE_TXTS_MAGIC0		0x005a005900580057
+#define SXE_TXTS_MAGIC1		0x005e005d005c005b
+
+#endif
diff --git a/drivers/net/sxe/include/sxe_type.h b/drivers/net/sxe/include/sxe_type.h
new file mode 100644
index 0000000000..433385a0c9
--- /dev/null
+++ b/drivers/net/sxe/include/sxe_type.h
@@ -0,0 +1,794 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_TYPE_H__
+#define __SXE_TYPE_H__
+
+#define SXE_TXD_CMD_EOP   0x01000000  
+#define SXE_TXD_CMD_RS    0x08000000  
+#define SXE_TXD_STAT_DD   0x00000001  
+
+#define SXE_TXD_CMD       (SXE_TXD_CMD_EOP | SXE_TXD_CMD_RS)
+
+
+typedef union sxe_adv_tx_desc {
+    struct {
+        U64 buffer_addr;
+        U32 cmd_type_len;
+        U32 olinfo_status;
+    } read;
+    struct {
+        U64 rsvd;
+        U32 nxtseq_seed;
+        U32 status;
+    } wb;
+}sxe_adv_tx_desc_u;
+
+typedef union sxe_adv_rx_desc {
+    struct {
+        U64 pkt_addr;
+        U64 hdr_addr;
+    } read;
+    struct {
+        struct {
+            union {
+                U32 data;
+                struct {
+                    U16 pkt_info;
+                    U16 hdr_info;
+                } hs_rss;
+            } lo_dword;
+            union {
+                U32 rss;
+                struct {
+                    U16 ip_id;
+                    U16 csum;
+                } csum_ip;
+            }hi_dword;
+        } lower;
+        struct {
+            U32 status_error;
+            U16 length;
+            U16 vlan;
+        } upper;
+    } wb;
+}sxe_adv_rx_desc_u;
+
+#define SXE_RXD_STAT_DD    0x01  
+#define SXE_RXD_STAT_EOP   0x02  
+
+
+#define PCI_VENDOR_ID_STARS		 0x1FF2
+#define SXE_DEV_ID_FPGA			 0x1160
+
+
+#define SXE_CTRL      0x00000
+#define SXE_STATUS    0x00008
+#define SXE_CTRL_EXT  0x00018
+#define SXE_ESDP      0x00020
+#define SXE_EODSDP    0x00028
+
+#define SXE_I2CCTL_8259X	0x00028
+#define SXE_I2CCTL_X540	SXE_I2CCTL_8259X
+#define SXE_I2CCTL_X550	0x15F5C
+#define SXE_I2CCTL_X550EM_x	SXE_I2CCTL_X550
+#define SXE_I2CCTL_X550EM_a	SXE_I2CCTL_X550
+#define SXE_I2CCTL(_hw)	SXE_BY_MAC((_hw), I2CCTL)
+
+#define SXE_LEDCTL    0x00200
+#define SXE_FRTIMER   0x00048
+#define SXE_TCPTIMER  0x0004C
+#define SXE_CORESPARE 0x00600
+#define SXE_EXVET     0x05078
+
+
+#define SXE_EICR      0x00800
+#define SXE_EICS      0x00808
+#define SXE_EIMS      0x00880
+#define SXE_EIMC      0x00888
+#define SXE_EIAC      0x00810
+#define SXE_EIAM      0x00890
+#define SXE_EICR_EX(_i)   (0x00A80 + (_i) * 4)
+#define SXE_EICS_EX(_i)   (0x00A90 + (_i) * 4)
+#define SXE_EIMS_EX(_i)   (0x00AA0 + (_i) * 4)
+#define SXE_EIMC_EX(_i)   (0x00AB0 + (_i) * 4)
+#define SXE_EIAM_EX(_i)   (0x00AD0 + (_i) * 4)
+
+
+#define SXE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
+			 (0x0D000 + (((_i) - 64) * 0x40)))
+#define SXE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
+			 (0x0D004 + (((_i) - 64) * 0x40)))
+#define SXE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
+			 (0x0D008 + (((_i) - 64) * 0x40)))
+#define SXE_RDH(_i)   (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
+			 (0x0D010 + (((_i) - 64) * 0x40)))
+#define SXE_RDT(_i)   (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
+			 (0x0D018 + (((_i) - 64) * 0x40)))
+#define SXE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
+			 (0x0D028 + (((_i) - 64) * 0x40)))
+#define SXE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+			 (0x0D02C + (((_i) - 64) * 0x40)))
+#define SXE_RSCDBU     0x03028
+#define SXE_RDDCC      0x02F20
+#define SXE_RXMEMWRAP  0x03190
+#define SXE_STARCTRL   0x03024
+
+#define SXE_SRRCTL(_i) (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : (0x0D014 + (((_i) - 64) * 0x40)))
+
+#define SXE_DCA_RXCTRL(_i)    (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : (0x0D00C + (((_i) - 64) * 0x40)))
+#define SXE_RDRXCTL           0x02F00
+#define SXE_RXPBSIZE(_i)      (0x03C00 + ((_i) * 4))    
+#define SXE_DRXCFG	0x03C20
+#define SXE_RXCTRL    0x03000
+#define SXE_DROPEN    0x03D04
+#define SXE_RXPBSIZE_SHIFT 10
+#define SXE_DRXCFG_GSP_ZERO    0x00000002
+#define SXE_DRXCFG_DBURX_START 0x00000001
+
+
+#define SXE_RXCSUM    0x05000
+#define SXE_RFCTL     0x05008
+#define SXE_DRECCCTL  0x02F08
+#define SXE_DRECCCTL_DISABLE 0
+
+
+#define SXE_MTA(_i)   (0x05200 + ((_i) * 4))
+#define SXE_RAL(_i)   (0x0A200 + ((_i) * 8))
+#define SXE_RAH(_i)   (0x0A204 + ((_i) * 8))
+#define SXE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
+#define SXE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
+
+
+#define SXE_PSRTYPE(_i)    (0x0EA00 + ((_i) * 4))
+
+
+#define SXE_VFTA(_i)  (0x0A000 + ((_i) * 4))
+
+
+#define SXE_VFTAVIND(_j, _i)  (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
+#define SXE_FCTRL     0x05080
+#define SXE_VLNCTRL   0x05088
+#define SXE_MCSTCTRL  0x05090
+#define SXE_MRQC      0x0EC80
+#define SXE_SAQF(_i)  (0x0E000 + ((_i) * 4)) 
+#define SXE_DAQF(_i)  (0x0E200 + ((_i) * 4)) 
+#define SXE_SDPQF(_i) (0x0E400 + ((_i) * 4)) 
+#define SXE_FTQF(_i)  (0x0E600 + ((_i) * 4)) 
+#define SXE_ETQF(_i)  (0x05128 + ((_i) * 4)) 
+#define SXE_ETQS(_i)  (0x0EC00 + ((_i) * 4)) 
+#define SXE_SYNQF     0x0EC30                 
+#define SXE_RQTC      0x0EC70
+#define SXE_MTQC      0x08120
+#define SXE_VLVF(_i)  (0x0F100 + ((_i) * 4)) 
+#define SXE_VLVFB(_i) (0x0F200 + ((_i) * 4)) 
+#define SXE_VMVIR(_i) (0x08000 + ((_i) * 4)) 
+#define SXE_PFFLPL     0x050B0
+#define SXE_PFFLPH     0x050B4
+#define SXE_VT_CTL         0x051B0
+#define SXE_PFMAILBOX(_i)  (0x04B00 + (4 * (_i)))   
+#define SXE_PFMBMEM(_i)    (0x13000 + (64 * (_i)))  
+#define SXE_PFMBICR(_i)    (0x00710 + (4 * (_i)))   
+#define SXE_PFMBIMR(_i)    (0x00720 + (4 * (_i)))   
+#define SXE_VFRE(_i)       (0x051E0 + ((_i) * 4))
+#define SXE_VFTE(_i)       (0x08110 + ((_i) * 4))
+#define SXE_VMECM(_i)      (0x08790 + ((_i) * 4))
+#define SXE_QDE            0x2F04
+#define SXE_VMTXSW(_i)     (0x05180 + ((_i) * 4))   
+#define SXE_VMOLR(_i)      (0x0F000 + ((_i) * 4))    
+#define SXE_UTA(_i)        (0x0F400 + ((_i) * 4))
+#define SXE_MRCTL(_i)      (0x0F600 + ((_i) * 4))
+#define SXE_VMRVLAN(_i)    (0x0F610 + ((_i) * 4))
+#define SXE_VMRVM(_i)      (0x0F630 + ((_i) * 4))
+#define SXE_WQBR_RX(_i)    (0x2FB0 + ((_i) * 4))    
+#define SXE_WQBR_TX(_i)    (0x8130 + ((_i) * 4))    
+#define SXE_L34T_IMIR(_i)  (0x0E800 + ((_i) * 4))   
+#define SXE_RXFECCERR0         0x051B8
+#define SXE_LLITHRESH 0x0EC90
+#define SXE_IMIR(_i)  (0x05A80 + ((_i) * 4))         
+#define SXE_IMIREXT(_i)       (0x05AA0 + ((_i) * 4))
+#define SXE_IMIRVP    0x0EC60
+#define SXE_VMD_CTL   0x0581C
+#define SXE_RETA(_i)  (0x0EB00 + ((_i) * 4))        
+#define SXE_ERETA(_i)	(0x0EE80 + ((_i) * 4))     
+#define SXE_RSSRK(_i) (0x0EB80 + ((_i) * 4))       
+
+
+#define SXE_TDBAL(_i) (0x06000 + ((_i) * 0x40))  
+#define SXE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
+#define SXE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
+#define SXE_TDH(_i)   (0x06010 + ((_i) * 0x40))
+#define SXE_TDT(_i)   (0x06018 + ((_i) * 0x40))
+#define SXE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
+#define SXE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
+#define SXE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
+#define SXE_DTXCTL    0x07E00
+
+#define SXE_DMATXCTL      0x04A80
+#define SXE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4))  
+#define SXE_PFDTXGSWC     0x08220
+#define SXE_DTXMXSZRQ     0x08100
+#define SXE_DTXTCPFLGL    0x04A88
+#define SXE_DTXTCPFLGH    0x04A8C
+#define SXE_LBDRPEN       0x0CA00
+#define SXE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4))  
+
+#define SXE_DMATXCTL_TE       0x1   
+#define SXE_DMATXCTL_NS       0x2   
+#define SXE_DMATXCTL_GDV      0x8   
+#define SXE_DMATXCTL_MDP_EN   0x20  
+#define SXE_DMATXCTL_MBINTEN  0x40  
+#define SXE_DMATXCTL_VT_SHIFT 16    
+
+#define SXE_PFDTXGSWC_VT_LBEN 0x1   
+
+
+#define SXE_DCA_TXCTRL_82599(_i)  (0x0600C + ((_i) * 0x40))
+#define SXE_TIPG      0x0CB00
+#define SXE_TXPBSIZE(_i)      (0x0CC00 + ((_i) * 4))  
+#define SXE_DTXCFG	0x0CE08
+#define SXE_MNGTXMAP  0x0CD10
+#define SXE_TIPG_FIBER_DEFAULT 3
+#define SXE_TXPBSIZE_SHIFT    10
+#define SXE_DTXCFG_DBUTX_START  0x00000001
+
+
+#define SXE_RTRPCS      0x02430
+#define SXE_RTTDCS      0x04900
+#define SXE_RTTDCS_ARBDIS     0x00000040   
+#define SXE_RTTPCS      0x0CD00
+#define SXE_RTRUP2TC    0x03020
+#define SXE_RTTUP2TC    0x0C800
+#define SXE_RTRPT4C(_i) (0x02140 + ((_i) * 4))  
+#define SXE_TXLLQ(_i)   (0x082E0 + ((_i) * 4))  
+#define SXE_RTRPT4S(_i) (0x02160 + ((_i) * 4))  
+#define SXE_RTTDT2C(_i) (0x04910 + ((_i) * 4))  
+#define SXE_RTTDT2S(_i) (0x04930 + ((_i) * 4))  
+#define SXE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4))  
+#define SXE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4))  
+#define SXE_RTTDQSEL    0x04904
+#define SXE_RTTDT1C     0x04908
+#define SXE_RTTDT1S     0x0490C
+
+
+#define SXE_RTTQCNCR                0x08B00
+#define SXE_RTTQCNTG                0x04A90
+#define SXE_RTTBCNRD                0x0498C
+#define SXE_RTTQCNRR                0x0498C
+#define SXE_RTTDTECC                0x04990
+#define SXE_RTTDTECC_NO_BCN         0x00000100
+#define SXE_RTTBCNRC                0x04984
+#define SXE_RTTBCNRC_RS_ENA         0x80000000
+#define SXE_RTTBCNRC_RF_DEC_MASK    0x00003FFF
+#define SXE_RTTBCNRC_RF_INT_SHIFT   14
+#define SXE_RTTBCNRC_RF_INT_MASK    (SXE_RTTBCNRC_RF_DEC_MASK << SXE_RTTBCNRC_RF_INT_SHIFT)
+#define SXE_RTTBCNRM                0x04980
+#define SXE_RTTQCNRM                0x04980
+
+
+#define SXE_MACCFG      0x0CE04
+
+
+#define SXE_GCR_EXT           0x11050
+#define SXE_GSCL_5_82599      0x11030
+#define SXE_GSCL_6_82599      0x11034
+#define SXE_GSCL_7_82599      0x11038
+#define SXE_GSCL_8_82599      0x1103C
+#define SXE_PHYADR_82599      0x11040
+#define SXE_PHYDAT_82599      0x11044
+#define SXE_PHYCTL_82599      0x11048
+#define SXE_PBACLR_82599      0x11068
+
+#define SXE_CIAA_8259X	0x11088
+
+
+#define SXE_CIAD_8259X	0x1108C
+
+
+#define SXE_PICAUSE           0x110B0
+#define SXE_PIENA             0x110B8
+#define SXE_CDQ_MBR_82599     0x110B4
+#define SXE_PCIESPARE         0x110BC
+#define SXE_MISC_REG_82599    0x110F0
+#define SXE_ECC_CTRL_0_82599  0x11100
+#define SXE_ECC_CTRL_1_82599  0x11104
+#define SXE_ECC_STATUS_82599  0x110E0
+#define SXE_BAR_CTRL_82599    0x110F4
+
+
+#define SXE_GCR_CMPL_TMOUT_MASK       0x0000F000
+#define SXE_GCR_CMPL_TMOUT_10ms       0x00001000
+#define SXE_GCR_CMPL_TMOUT_RESEND     0x00010000
+#define SXE_GCR_CAP_VER2              0x00040000
+
+#define SXE_GCR_EXT_MSIX_EN           0x80000000
+#define SXE_GCR_EXT_BUFFERS_CLEAR     0x40000000
+#define SXE_GCR_EXT_VT_MODE_16        0x00000001
+#define SXE_GCR_EXT_VT_MODE_32        0x00000002
+#define SXE_GCR_EXT_VT_MODE_64        0x00000003
+#define SXE_GCR_EXT_SRIOV             (SXE_GCR_EXT_MSIX_EN | \
+					 SXE_GCR_EXT_VT_MODE_64)
+
+
+#define SXE_PCS1GCFIG 0x04200
+#define SXE_PCS1GLCTL 0x04208
+#define SXE_PCS1GLSTA 0x0420C
+#define SXE_PCS1GDBG0 0x04210
+#define SXE_PCS1GDBG1 0x04214
+#define SXE_PCS1GANA  0x04218
+#define SXE_PCS1GANLP 0x0421C
+#define SXE_PCS1GANNP 0x04220
+#define SXE_PCS1GANLPNP 0x04224
+#define SXE_HLREG0    0x04240
+#define SXE_HLREG1    0x04244
+#define SXE_PAP       0x04248
+#define SXE_MACA      0x0424C
+#define SXE_APAE      0x04250
+#define SXE_ARD       0x04254
+#define SXE_AIS       0x04258
+#define SXE_MSCA      0x0425C
+#define SXE_MSRWD     0x04260
+#define SXE_MLADD     0x04264
+#define SXE_MHADD     0x04268
+#define SXE_MAXFRS    0x04268
+#define SXE_TREG      0x0426C
+#define SXE_PCSS1     0x04288
+#define SXE_PCSS2     0x0428C
+#define SXE_XPCSS     0x04290
+#define SXE_MFLCN     0x04294
+#define SXE_SERDESC   0x04298
+#define SXE_MAC_SGMII_BUSY 0x04298
+#define SXE_MACS      0x0429C
+#define SXE_AUTOC     0x042A0
+#define SXE_LINKS     0x042A4
+#define SXE_LINKS2    0x04324
+#define SXE_AUTOC2    0x042A8
+#define SXE_AUTOC3    0x042AC
+#define SXE_ANLP1     0x042B0
+#define SXE_ANLP2     0x042B4
+#define SXE_MACC      0x04330
+#define SXE_ATLASCTL  0x04800
+#define SXE_MMNGC     0x042D0
+#define SXE_ANLPNP1   0x042D4
+#define SXE_ANLPNP2   0x042D8
+#define SXE_KRPCSFC   0x042E0
+#define SXE_KRPCSS    0x042E4
+#define SXE_FECS1     0x042E8
+#define SXE_FECS2     0x042EC
+#define SXE_SMADARCTL 0x14F10
+#define SXE_MPVC      0x04318
+#define SXE_SGMIIC    0x04314
+
+
+#define SXE_COMCTRL             0x14400
+#define SXE_PCCTRL              0x14404
+#define SXE_LPBKCTRL            0x1440C
+#define SXE_MAXFS               0x14410
+#define SXE_SACONH              0x14420
+#define SXE_VLANCTRL            0x14430
+#define SXE_VLANID              0x14434
+#define SXE_VLANCTRL            0x14430
+#define SXE_FPAG_SDS_CON        0x14700
+
+
+#define SXE_COMCTRL_TXEN        0x0001
+#define SXE_COMCTRL_RXEN        0x0002
+#define SXE_COMCTRL_EDSEL       0x0004
+#define SXE_COMCTRL_SPEED_1G    0x0200
+#define SXE_COMCTRL_SPEED_10G   0x0300
+
+
+#define SXE_PCCTRL_TXCE         0x0001
+#define SXE_PCCTRL_RXCE         0x0002
+#define SXE_PCCTRL_PEN          0x0100
+#define SXE_PCCTRL_PCSC_ALL     0x30000
+
+
+#define SXE_MAXFS_TFSEL         0x0001
+#define SXE_MAXFS_RFSEL         0x0002
+#define SXE_MAXFS_MFS_MASK      0xFFFF0000
+#define SXE_MAXFS_MFS           0x40000000
+#define SXE_MAXFS_MFS_SHIFT     16
+
+
+#define SXE_FPGA_SDS_CON_FULL_DUPLEX_MODE    0x00200000
+#define SXE_FPGA_SDS_CON_ANRESTART           0x00008000
+#define SXE_FPGA_SDS_CON_AN_ENABLE           0x00001000
+
+
+#define SXE_RSCDBU_RSCSMALDIS_MASK    0x0000007F
+#define SXE_RSCDBU_RSCACKDIS          0x00000080
+
+
+#define SXE_RDRXCTL_RDMTS_1_2     0x00000000  
+#define SXE_RDRXCTL_CRCSTRIP      0x00000002  
+#define SXE_RDRXCTL_PSP           0x00000004  
+#define SXE_RDRXCTL_MVMEN         0x00000020
+#define SXE_RDRXCTL_DMAIDONE      0x00000008  
+#define SXE_RDRXCTL_AGGDIS        0x00010000  
+#define SXE_RDRXCTL_RSCFRSTSIZE   0x003E0000  
+#define SXE_RDRXCTL_RSCLLIDIS     0x00800000  
+#define SXE_RDRXCTL_RSCACKC       0x02000000  
+#define SXE_RDRXCTL_FCOE_WRFIX    0x04000000  
+#define SXE_RDRXCTL_MBINTEN       0x10000000
+#define SXE_RDRXCTL_MDP_EN        0x20000000
+
+
+#define SXE_CTRL_GIO_DIS      0x00000004
+#define SXE_CTRL_LNK_RST      0x00000008
+#define SXE_CTRL_RST          0x04000000
+#define SXE_CTRL_RST_MASK     (SXE_CTRL_LNK_RST | SXE_CTRL_RST)
+
+
+#define SXE_MHADD_MFS_MASK    0xFFFF0000
+#define SXE_MHADD_MFS_SHIFT   16
+
+
+#define SXE_CTRL_EXT_PFRSTD   0x00004000
+#define SXE_CTRL_EXT_NS_DIS   0x00010000
+#define SXE_CTRL_EXT_RO_DIS   0x00020000
+#define SXE_CTRL_EXT_DRV_LOAD 0x10000000
+
+
+#define SXE_TXPBSIZE_20KB     0x00005000  
+#define SXE_TXPBSIZE_40KB     0x0000A000  
+#define SXE_RXPBSIZE_48KB     0x0000C000  
+#define SXE_RXPBSIZE_64KB     0x00010000  
+#define SXE_RXPBSIZE_80KB     0x00014000  
+#define SXE_RXPBSIZE_128KB    0x00020000  
+#define SXE_RXPBSIZE_MAX      0x00080000  
+#define SXE_TXPBSIZE_MAX      0x00028000  
+
+#define SXE_TXPKT_SIZE_MAX    0xA         
+#define SXE_MAX_PB		8
+
+
+#define SXE_HLREG0_TXCRCEN      0x00000001  
+#define SXE_HLREG0_RXCRCSTRP    0x00000002  
+#define SXE_HLREG0_JUMBOEN      0x00000004  
+#define SXE_HLREG0_TXPADEN      0x00000400  
+#define SXE_HLREG0_TXPAUSEEN    0x00001000  
+#define SXE_HLREG0_RXPAUSEEN    0x00004000  
+#define SXE_HLREG0_LPBK         0x00008000  
+#define SXE_HLREG0_MDCSPD       0x00010000  
+#define SXE_HLREG0_CONTMDC      0x00020000  
+#define SXE_HLREG0_CTRLFLTR     0x00040000  
+#define SXE_HLREG0_PREPEND      0x00F00000  
+#define SXE_HLREG0_PRIPAUSEEN   0x01000000  
+#define SXE_HLREG0_RXPAUSERECDA 0x06000000  
+#define SXE_HLREG0_RXLNGTHERREN 0x08000000  
+#define SXE_HLREG0_RXPADSTRIPEN 0x10000000  
+
+
+#define SXE_VMOLR_UPE		  0x00400000
+#define SXE_VMOLR_VPE		  0x00800000
+#define SXE_VMOLR_AUPE        0x01000000
+#define SXE_VMOLR_ROMPE       0x02000000
+#define SXE_VMOLR_ROPE        0x04000000
+#define SXE_VMOLR_BAM         0x08000000
+#define SXE_VMOLR_MPE         0x10000000
+
+
+#define SXE_RXCSUM_IPPCSE     0x00001000  
+#define SXE_RXCSUM_PCSD       0x00002000  
+
+
+#define SXE_VMD_CTL_VMDQ_EN     0x00000001
+#define SXE_VMD_CTL_VMDQ_FILTER 0x00000002
+
+
+#define	SXE_MACCFG_PAD_EN       0x00000001
+
+
+#define SXE_IRQ_CLEAR_MASK    0xFFFFFFFF
+
+
+#define SXE_STATUS_LAN_ID         0x0000000C
+#define SXE_STATUS_LAN_ID_SHIFT   2         
+#define SXE_STATUS_GIO            0x00080000
+
+
+#define SXE_LINKS_KX_AN_COMP  0x80000000
+#define SXE_LINKS_UP          0x40000000
+#define SXE_LINKS_SPEED       0x20000000
+#define SXE_LINKS_MODE        0x18000000
+#define SXE_LINKS_RX_MODE     0x06000000
+#define SXE_LINKS_TX_MODE     0x01800000
+#define SXE_LINKS_XGXS_EN     0x00400000
+#define SXE_LINKS_SGMII_EN    0x02000000
+#define SXE_LINKS_PCS_1G_EN   0x00200000
+#define SXE_LINKS_1G_AN_EN    0x00100000
+#define SXE_LINKS_KX_AN_IDLE  0x00080000
+#define SXE_LINKS_1G_SYNC     0x00040000
+#define SXE_LINKS_10G_ALIGN   0x00020000
+#define SXE_LINKS_10G_LANE_SYNC 0x00017000
+#define SXE_LINKS_TL_FAULT    0x00001000
+#define SXE_LINKS_SIGNAL      0x00000F00
+
+
+#define SXE_PCI_DEVICE_STATUS   0x7A 
+#define SXE_PCI_DEVICE_STATUS_TRANSACTION_PENDING   0x0020
+#define SXE_PCI_LINK_STATUS     0x82 
+#define SXE_PCI_DEVICE_CONTROL2 0x98 
+#define SXE_PCI_LINK_WIDTH      0x3F0
+#define SXE_PCI_LINK_WIDTH_1    0x10
+#define SXE_PCI_LINK_WIDTH_2    0x20
+#define SXE_PCI_LINK_WIDTH_4    0x40
+#define SXE_PCI_LINK_WIDTH_8    0x80
+#define SXE_PCI_LINK_SPEED      0xF
+#define SXE_PCI_LINK_SPEED_2500 0x1
+#define SXE_PCI_LINK_SPEED_5000 0x2
+#define SXE_PCI_LINK_SPEED_8000 0x3
+#define SXE_PCI_HEADER_TYPE_REGISTER  0x0E
+#define SXE_PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define SXE_PCI_DEVICE_CONTROL2_16ms  0x0005
+
+#define SXE_PCIDEVCTRL2_TIMEO_MASK	0xf
+#define SXE_PCIDEVCTRL2_16_32ms_def	0x0
+#define SXE_PCIDEVCTRL2_50_100us	0x1
+#define SXE_PCIDEVCTRL2_1_2ms		0x2
+#define SXE_PCIDEVCTRL2_16_32ms	0x5
+#define SXE_PCIDEVCTRL2_65_130ms	0x6
+#define SXE_PCIDEVCTRL2_260_520ms	0x9
+#define SXE_PCIDEVCTRL2_1_2s		0xa
+#define SXE_PCIDEVCTRL2_4_8s		0xd
+#define SXE_PCIDEVCTRL2_17_34s	0xe
+
+
+#define SXE_PCI_MASTER_DISABLE_TIMEOUT	800
+
+
+#define SXE_RAH_VIND_MASK     0x003C0000
+#define SXE_RAH_VIND_SHIFT    18
+#define SXE_RAH_AV            0x80000000
+#define SXE_CLEAR_VMDQ_ALL    0xFFFFFFFF
+
+
+#define SXE_RFCTL_ISCSI_DIS       0x00000001
+#define SXE_RFCTL_ISCSI_DWC_MASK  0x0000003E
+#define SXE_RFCTL_ISCSI_DWC_SHIFT 1
+#define SXE_RFCTL_RSC_DIS		0x00000020
+#define SXE_RFCTL_NFSW_DIS        0x00000040
+#define SXE_RFCTL_NFSR_DIS        0x00000080
+#define SXE_RFCTL_NFS_VER_MASK    0x00000300
+#define SXE_RFCTL_NFS_VER_SHIFT   8
+#define SXE_RFCTL_NFS_VER_2       0
+#define SXE_RFCTL_NFS_VER_3       1
+#define SXE_RFCTL_NFS_VER_4       2
+#define SXE_RFCTL_IPV6_DIS        0x00000400
+#define SXE_RFCTL_IPV6_XSUM_DIS   0x00000800
+#define SXE_RFCTL_IPFRSP_DIS      0x00004000
+#define SXE_RFCTL_IPV6_EX_DIS     0x00010000
+#define SXE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+
+
+#define SXE_TXDCTL_ENABLE     0x02000000   
+#define SXE_TXDCTL_SWFLSH     0x04000000   
+#define SXE_TXDCTL_WTHRESH_SHIFT      16   
+
+
+#define SXE_RXCTRL_RXEN       0x00000001 
+#define SXE_RXCTRL_DMBYPS     0x00000002 
+#define SXE_RXDCTL_ENABLE     0x02000000 
+#define SXE_RXDCTL_SWFLSH     0x04000000 
+
+
+#define SXE_RXDCTL_DESC_FIFO_AFUL_TH_MASK 0x0000001F
+#define SXE_RXDCTL_AFUL_CFG_ERR		  0x00000020
+#define SXE_RXDCTL_DESC_FIFO_AE_TH_MASK   0x00001F00
+#define SXE_RXDCTL_DESC_FIFO_AE_TH_SHIFT  8
+#define SXE_RXDCTL_PREFETCH_NUM_CFG_MASK  0x001F0000
+#define SXE_RXDCTL_PREFETCH_NUM_CFG_SHIFT 16
+
+
+#define SXE_PCI_MASTER_DISABLE_TIMEOUT	800
+
+
+#define SXE_FCTRL_SBP 0x00000002  
+#define SXE_FCTRL_MPE 0x00000100  
+#define SXE_FCTRL_UPE 0x00000200  
+#define SXE_FCTRL_BAM 0x00000400  
+#define SXE_FCTRL_PMCF 0x00001000 
+#define SXE_FCTRL_DPF 0x00002000  
+
+
+#define SXE_QDE_ENABLE	0x00000001
+#define SXE_QDE_HIDE_VLAN	0x00000002
+#define SXE_QDE_IDX_MASK	0x00007F00
+#define SXE_QDE_IDX_SHIFT	8
+#define SXE_QDE_WRITE		0x00010000
+
+#define SXE_TXD_POPTS_IXSM 0x01      
+#define SXE_TXD_POPTS_TXSM 0x02      
+#define SXE_TXD_CMD_EOP    0x01000000
+#define SXE_TXD_CMD_IFCS   0x02000000
+#define SXE_TXD_CMD_IC     0x04000000
+#define SXE_TXD_CMD_RS     0x08000000
+#define SXE_TXD_CMD_DEXT   0x20000000
+#define SXE_TXD_CMD_VLE    0x40000000
+#define SXE_TXD_STAT_DD    0x00000001
+
+
+#define SXE_SRRCTL_BSIZEPKT_SHIFT     10          
+#define SXE_SRRCTL_RDMTS_SHIFT        22
+#define SXE_SRRCTL_RDMTS_MASK         0x01C00000
+#define SXE_SRRCTL_DROP_EN            0x10000000
+#define SXE_SRRCTL_BSIZEPKT_MASK      0x0000007F
+#define SXE_SRRCTL_BSIZEHDR_MASK      0x00003F00
+#define SXE_SRRCTL_DESCTYPE_LEGACY    0x00000000
+#define SXE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define SXE_SRRCTL_DESCTYPE_HDR_SPLIT  0x04000000
+#define SXE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define SXE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define SXE_SRRCTL_DESCTYPE_MASK      0x0E000000
+
+#define SXE_RXDPS_HDRSTAT_HDRSP       0x00008000
+#define SXE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+
+#define SXE_RXDADV_RSSTYPE_MASK       0x0000000F
+#define SXE_RXDADV_PKTTYPE_MASK       0x0000FFF0
+#define SXE_RXDADV_PKTTYPE_MASK_EX    0x0001FFF0
+#define SXE_RXDADV_HDRBUFLEN_MASK     0x00007FE0
+#define SXE_RXDADV_RSCCNT_MASK        0x001E0000
+#define SXE_RXDADV_RSCCNT_SHIFT       17
+#define SXE_RXDADV_HDRBUFLEN_SHIFT    5
+#define SXE_RXDADV_SPLITHEADER_EN     0x00001000
+#define SXE_RXDADV_SPH                0x8000
+
+
+#define SXE_ADVTXD_DTYP_DATA  0x00300000        
+#define SXE_ADVTXD_DCMD_IFCS  SXE_TXD_CMD_IFCS  
+#define SXE_ADVTXD_DCMD_DEXT  SXE_TXD_CMD_DEXT  
+#define SXE_ADVTXD_PAYLEN_SHIFT    14           
+
+
+#define SXE_FLAGS_DOUBLE_RESET_REQUIRED	0x01
+
+
+#define SXE_ERR_EEPROM                        -1
+#define SXE_ERR_EEPROM_CHECKSUM               -2
+#define SXE_ERR_PHY                           -3
+#define SXE_ERR_CONFIG                        -4
+#define SXE_ERR_PARAM                         -5
+#define SXE_ERR_MAC_TYPE                      -6
+#define SXE_ERR_UNKNOWN_PHY                   -7
+#define SXE_ERR_LINK_SETUP                    -8
+#define SXE_ERR_ADAPTER_STOPPED               -9
+#define SXE_ERR_INVALID_MAC_ADDR              -10
+#define SXE_ERR_DEVICE_NOT_SUPPORTED          -11
+#define SXE_ERR_MASTER_REQUESTS_PENDING       -12
+#define SXE_ERR_INVALID_LINK_SETTINGS         -13
+#define SXE_ERR_AUTONEG_NOT_COMPLETE          -14
+#define SXE_ERR_RESET_FAILED                  -15
+#define SXE_ERR_SWFW_SYNC                     -16
+#define SXE_ERR_PHY_ADDR_INVALID              -17
+#define SXE_ERR_I2C                           -18
+#define SXE_ERR_SFP_NOT_SUPPORTED             -19
+#define SXE_ERR_SFP_NOT_PRESENT               -20
+#define SXE_ERR_SFP_NO_INIT_SEQ_PRESENT       -21
+#define SXE_ERR_NO_SAN_ADDR_PTR               -22
+#define SXE_ERR_FDIR_REINIT_FAILED            -23
+#define SXE_ERR_EEPROM_VERSION                -24
+#define SXE_ERR_NO_SPACE                      -25
+#define SXE_ERR_OVERTEMP                      -26
+#define SXE_ERR_FC_NOT_NEGOTIATED             -27
+#define SXE_ERR_FC_NOT_SUPPORTED              -28
+#define SXE_ERR_SFP_SETUP_NOT_COMPLETE        -30
+#define SXE_ERR_PBA_SECTION                   -31
+#define SXE_ERR_INVALID_ARGUMENT              -32
+#define SXE_ERR_HOST_INTERFACE_COMMAND        -33
+#define SXE_ERR_FDIR_CMD_INCOMPLETE		-38
+#define SXE_ERR_FW_RESP_INVALID		-39
+#define SXE_ERR_TOKEN_RETRY			-40
+#define SXE_NOT_IMPLEMENTED                   0x7FFFFFFF
+
+#define SXE_FUSES0_GROUP(_i)		(0x11158 + ((_i) * 4))
+#define SXE_FUSES0_300MHZ		BIT(5)
+#define SXE_FUSES0_REV_MASK		(3u << 6)
+
+#define SXE_KRM_PORT_CAR_GEN_CTRL(P)	((P) ? 0x8010 : 0x4010)
+#define SXE_KRM_LINK_S1(P)		((P) ? 0x8200 : 0x4200)
+#define SXE_KRM_LINK_CTRL_1(P)	((P) ? 0x820C : 0x420C)
+#define SXE_KRM_AN_CNTL_1(P)		((P) ? 0x822C : 0x422C)
+#define SXE_KRM_AN_CNTL_8(P)		((P) ? 0x8248 : 0x4248)
+#define SXE_KRM_SGMII_CTRL(P)		((P) ? 0x82A0 : 0x42A0)
+#define SXE_KRM_LP_BASE_PAGE_HIGH(P)	((P) ? 0x836C : 0x436C)
+#define SXE_KRM_DSP_TXFFE_STATE_4(P)	((P) ? 0x8634 : 0x4634)
+#define SXE_KRM_DSP_TXFFE_STATE_5(P)	((P) ? 0x8638 : 0x4638)
+#define SXE_KRM_RX_TRN_LINKUP_CTRL(P)	((P) ? 0x8B00 : 0x4B00)
+#define SXE_KRM_PMD_DFX_BURNIN(P)	((P) ? 0x8E00 : 0x4E00)
+#define SXE_KRM_PMD_FLX_MASK_ST20(P)	((P) ? 0x9054 : 0x5054)
+#define SXE_KRM_TX_COEFF_CTRL_1(P)	((P) ? 0x9520 : 0x5520)
+#define SXE_KRM_RX_ANA_CTL(P)		((P) ? 0x9A00 : 0x5A00)
+
+#define SXE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA		~(0x3 << 20)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR		BIT(20)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR		(0x2 << 20)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SGMII_EN		BIT(25)
+#define SXE_KRM_PMD_FLX_MASK_ST20_AN37_EN		BIT(26)
+#define SXE_KRM_PMD_FLX_MASK_ST20_AN_EN		BIT(27)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_10M		~(0x7 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_100M		BIT(28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_1G		(0x2 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_10G		(0x3 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_AN		(0x4 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G		(0x7 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK		(0x7 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART	BIT(31)
+
+#define SXE_KRM_PORT_CAR_GEN_CTRL_NELB_32B		BIT(9)
+#define SXE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS		BIT(11)
+
+#define SXE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK	(7u << 8)
+#define SXE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G	(2u << 8)
+#define SXE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G	(4u << 8)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN		BIT(12)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN	BIT(13)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ		BIT(14)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC		BIT(15)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX		BIT(16)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR		BIT(18)
+#define SXE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX		BIT(24)
+#define SXE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR		BIT(26)
+#define SXE_KRM_LINK_S1_MAC_AN_COMPLETE		BIT(28)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_ENABLE		BIT(29)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_RESTART		BIT(31)
+
+#define SXE_KRM_AN_CNTL_1_SYM_PAUSE			BIT(28)
+#define SXE_KRM_AN_CNTL_1_ASM_PAUSE			BIT(29)
+
+#define SXE_KRM_AN_CNTL_8_LINEAR			BIT(0)
+#define SXE_KRM_AN_CNTL_8_LIMITING			BIT(1)
+
+#define SXE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE		BIT(10)
+#define SXE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE		BIT(11)
+#define SXE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D	BIT(12)
+#define SXE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D		BIT(19)
+
+#define SXE_KRM_DSP_TXFFE_STATE_C0_EN			BIT(6)
+#define SXE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN		BIT(15)
+#define SXE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN		BIT(16)
+
+#define SXE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL	BIT(4)
+#define SXE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS	BIT(2)
+
+#define SXE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK	(3u << 16)
+
+#define SXE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN	BIT(1)
+#define SXE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN	BIT(2)
+#define SXE_KRM_TX_COEFF_CTRL_1_CZERO_EN		BIT(3)
+#define SXE_KRM_TX_COEFF_CTRL_1_OVRRD_EN		BIT(31)
+
+#define SXE_SB_IOSF_INDIRECT_CTRL		0x00011144
+#define SXE_SB_IOSF_INDIRECT_DATA		0x00011148
+
+#define SXE_SB_IOSF_CTRL_ADDR_SHIFT		0
+#define SXE_SB_IOSF_CTRL_ADDR_MASK		0xFF
+#define SXE_SB_IOSF_CTRL_RESP_STAT_SHIFT	18
+#define SXE_SB_IOSF_CTRL_RESP_STAT_MASK \
+				(0x3 << SXE_SB_IOSF_CTRL_RESP_STAT_SHIFT)
+#define SXE_SB_IOSF_CTRL_CMPL_ERR_SHIFT	20
+#define SXE_SB_IOSF_CTRL_CMPL_ERR_MASK \
+				(0xFF << SXE_SB_IOSF_CTRL_CMPL_ERR_SHIFT)
+#define SXE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT	28
+#define SXE_SB_IOSF_CTRL_TARGET_SELECT_MASK	0x7
+#define SXE_SB_IOSF_CTRL_BUSY_SHIFT		31
+#define SXE_SB_IOSF_CTRL_BUSY		BIT(SXE_SB_IOSF_CTRL_BUSY_SHIFT)
+#define SXE_SB_IOSF_TARGET_KR_PHY	0
+
+#define SXE_NW_MNG_IF_SEL		0x00011178
+#define SXE_NW_MNG_IF_SEL_MDIO_ACT		BIT(1)
+#define SXE_NW_MNG_IF_SEL_PHY_SPEED_10M	BIT(17)
+#define SXE_NW_MNG_IF_SEL_PHY_SPEED_100M	BIT(18)
+#define SXE_NW_MNG_IF_SEL_PHY_SPEED_1G	BIT(19)
+#define SXE_NW_MNG_IF_SEL_PHY_SPEED_2_5G	BIT(20)
+#define SXE_NW_MNG_IF_SEL_PHY_SPEED_10G	BIT(21)
+#define SXE_NW_MNG_IF_SEL_SGMII_ENABLE	BIT(25)
+#define SXE_NW_MNG_IF_SEL_INT_PHY_MODE	BIT(24)
+#define SXE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT	3
+#define SXE_NW_MNG_IF_SEL_MDIO_PHY_ADD	\
+				(0x1F << SXE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
+
+#endif 
+
diff --git a/drivers/net/sxe/include/sxe_version.h b/drivers/net/sxe/include/sxe_version.h
new file mode 100644
index 0000000000..50afd69a63
--- /dev/null
+++ b/drivers/net/sxe/include/sxe_version.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_VER_H__
+#define __SXE_VER_H__
+
+#define SXE_VERSION                "0.0.0.0"
+#define SXE_COMMIT_ID              "13cf402"
+#define SXE_BRANCH                 "feature/sagitta-1.3.0-P3-dpdk_patch_rwy"
+#define SXE_BUILD_TIME             "2024-08-24 11:02:12"
+
+
+#define SXE_DRV_NAME                   "sxe"
+#define SXEVF_DRV_NAME                 "sxevf"
+#define SXE_DRV_LICENSE                "GPL v2"
+#define SXE_DRV_COPYRIGHT              "Copyright (C), 2022, Linkdata Technology Co., Ltd."
+#define SXE_DRV_AUTHOR                 "Linkdata Technology Corporation"
+#define SXE_DRV_DESCRIPTION            "LD 1160-2X 2-port 10G SFP+ NIC"
+#define SXEVF_DRV_DESCRIPTION          "LD 1160-2X Virtual Function"
+#define SXE_DRV_CONNECTION             "Linkdata Technology 10G Network Connection"
+
+
+#define SXE_FW_NAME                     "soc"
+#define SXE_FW_ARCH                     "arm32"
+
+#ifndef PS3_CFG_RELEASE
+#define PS3_SXE_FW_BUILD_MODE             "debug"
+#else
+#define PS3_SXE_FW_BUILD_MODE             "release"
+#endif
+
+#endif
diff --git a/drivers/net/sxe/meson.build b/drivers/net/sxe/meson.build
new file mode 100644
index 0000000000..5e7b49dcf6
--- /dev/null
+++ b/drivers/net/sxe/meson.build
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (C), 2020, Wuxi Stars Micro System Technologies Co., Ltd.
+
+cflags += ['-DSXE_DPDK']
+cflags += ['-DSXE_HOST_DRIVER']
+cflags += ['-DSXE_DPDK_L4_FEATURES']
+cflags += ['-DSXE_DPDK_SRIOV']
+
+#subdir('base')
+#objs = [base_objs]
+
+deps += ['hash']
+sources = files(
+	'pf/sxe_main.c',
+	'pf/sxe_filter.c',
+	'pf/sxe_flow_ctrl.c',
+	'pf/sxe_irq.c',
+	'pf/sxe_ethdev.c',
+	'pf/sxe_offload.c',
+	'pf/sxe_queue.c',
+	'pf/sxe_rx.c',
+	'pf/sxe_tx.c',
+	'pf/sxe_stats.c',
+	'pf/sxe_pmd_hdc.c',
+	'pf/sxe_phy.c',
+	'pf/sxe_ptp.c',
+	'pf/sxe_vf.c',
+	'pf/sxe_dcb.c',
+	'vf/sxevf_main.c',
+	'vf/sxevf_filter.c',
+	'vf/sxevf_irq.c',
+	'vf/sxevf_msg.c',
+	'vf/sxevf_ethdev.c',
+	'vf/sxevf_stats.c',
+	'vf/sxevf_rx.c',
+	'vf/sxevf_tx.c',
+	'vf/sxevf_queue.c',
+	'vf/sxevf_offload.c',
+	'base/sxe_queue_common.c',
+	'base/sxe_rx_common.c',
+	'base/sxe_tx_common.c',
+	'base/sxe_offload_common.c',
+	'base/sxe_common.c',
+	'base/sxe_hw.c',
+	'base/sxevf_hw.c',
+)
+
+testpmd_sources = files('sxe_testpmd.c')
+
+includes += include_directories('base')
+includes += include_directories('pf')
+includes += include_directories('vf')
+includes += include_directories('include/sxe/')
+includes += include_directories('include/')
+
diff --git a/drivers/net/sxe/pf/rte_pmd_sxe.h b/drivers/net/sxe/pf/rte_pmd_sxe.h
new file mode 100644
index 0000000000..70d342d433
--- /dev/null
+++ b/drivers/net/sxe/pf/rte_pmd_sxe.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __PMD_SXE_H__
+#define __PMD_SXE_H__
+
+typedef uint8_t		u8;
+typedef uint16_t	u16;
+typedef uint32_t	u32;
+typedef int32_t		s32;
+
+s32 rte_pmd_sxe_tx_loopback_set(u16 port, u8 on);
+
+s32 rte_pmd_sxe_tc_bw_set(u8 port,
+				u8 tc_num, u8 *bw_weight);
+
+s32 rte_pmd_sxe_macsec_enable(u16 port, u8 en, u8 rp_en);
+
+s32 rte_pmd_sxe_macsec_disable(u16 port);
+
+s32 rte_pmd_sxe_macsec_txsc_configure(u16 port, u8 *mac);
+
+s32 rte_pmd_sxe_macsec_rxsc_configure(u16 port, u8 *mac, u16 pi);
+
+s32 rte_pmd_sxe_macsec_txsa_configure(u16 port, u8 sa_idx, u8 an,
+				 u32 pn, u8 *keys);
+
+s32 rte_pmd_sxe_macsec_rxsa_configure(u16 port, u8 sa_idx, u8 an,
+				 u32 pn, u8 *keys);
+
+#endif
+
diff --git a/drivers/net/sxe/pf/sxe.h b/drivers/net/sxe/pf/sxe.h
new file mode 100644
index 0000000000..139480e90d
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_H__
+#define __SXE_H__
+
+#include <rte_pci.h>
+#include <rte_time.h>
+
+#include "sxe_types.h"
+#include "sxe_filter.h"
+#include "sxe_irq.h"
+#include "sxe_stats.h"
+#include "sxe_phy.h"
+#include "sxe_vf.h"
+#include "sxe_dcb.h"
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
+#include "sxe_macsec.h"
+#endif
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+#include "sxe_filter_ctrl.h"
+#include "sxe_fnav.h"
+#endif
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+#include "sxe_tm.h"
+#endif
+
+struct sxe_hw;
+struct sxe_vlan_context;
+
+#define SXE_LPBK_DISABLED   0x0 
+#define SXE_LPBK_ENABLED    0x1 
+
+#define PCI_VENDOR_ID_STARS      0x1FF2
+#define SXE_DEV_ID_ASIC          0x10a1
+
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_ADDR(x) ((u8*)(x))[0],((u8*)(x))[1], \
+	           ((u8*)(x))[2],((u8*)(x))[3], \
+	           ((u8*)(x))[4],((u8*)(x))[5]
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p)  rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p)  do {} while(0)
+#endif
+
+#if 1
+#define RTE_PMD_USE_PREFETCH
+#endif
+
+#ifdef RTE_PMD_USE_PREFETCH
+#define rte_sxe_prefetch(p)   rte_prefetch0(p)
+#else
+#define rte_sxe_prefetch(p)   do {} while (0)
+#endif
+
+struct sxe_ptp_context {
+	struct rte_timecounter      systime_tc;
+	struct rte_timecounter      rx_tstamp_tc;
+	struct rte_timecounter      tx_tstamp_tc;
+	u32 tx_hwtstamp_sec;
+	u32 tx_hwtstamp_nsec;
+};
+
+struct sxe_adapter {
+	struct sxe_hw hw;
+
+	struct sxe_irq_context irq_ctxt;
+
+	struct sxe_vlan_context vlan_ctxt;
+	struct sxe_mac_filter_context mac_filter_ctxt;
+#ifdef RTE_ADAPTER_HAVE_FNAV_CONF
+	struct rte_eth_fdir_conf fnav_conf;
+#endif
+	struct sxe_ptp_context ptp_ctxt;
+	struct sxe_phy_context phy_ctxt;
+	struct sxe_virtual_context vt_ctxt; 
+
+	struct sxe_stats_info stats_info;
+	struct sxe_dcb_context dcb_ctxt;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
+	struct sxe_macsec_context macsec_ctxt;
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+	struct sxe_tm_context tm_ctxt;
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+	struct sxe_filter_context filter_ctxt;
+
+	struct sxe_fnav_context fnav_ctxt;
+#endif
+
+	bool rx_batch_alloc_allowed;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	bool rx_vec_allowed;
+#endif
+	s8 name[PCI_PRI_STR_SIZE+1]; 
+
+	u32 mtu;
+
+	bool rss_reta_updated;
+
+	rte_atomic32_t link_thread_running;
+	pthread_t link_thread_tid;
+	bool is_stopped;
+};
+
+s32 sxe_hw_reset(struct sxe_hw *hw);
+
+void sxe_hw_start(struct sxe_hw *hw);
+
+bool is_sxe_supported(struct rte_eth_dev *dev);
+
+#endif 
diff --git a/drivers/net/sxe/pf/sxe_dcb.c b/drivers/net/sxe/pf/sxe_dcb.c
new file mode 100644
index 0000000000..5217cc655f
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_dcb.c
@@ -0,0 +1,1014 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include "sxe.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe_phy.h"
+#include "sxe_errno.h"
+#include "sxe_offload.h"
+#include "sxe_ethdev.h"
+#include "sxe_compat_version.h"
+#include "rte_pmd_sxe.h"
+
+#define DCB_RX_CONFIG  1
+#define DCB_TX_CONFIG  1
+
+#define DCB_CREDIT_QUANTUM	64   
+#define MAX_CREDIT_REFILL       511  
+#define MAX_CREDIT              4095 
+
+void sxe_dcb_init(struct rte_eth_dev *dev)
+{
+	u8 i;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_dcb_config *cfg = &adapter->dcb_ctxt.config;
+	struct sxe_tc_config *tc;
+	u8 dcb_max_tc = SXE_DCB_MAX_TRAFFIC_CLASS;
+
+	memset(cfg, 0, sizeof(struct sxe_dcb_config));
+
+	cfg->num_tcs.pg_tcs = dcb_max_tc;
+	cfg->num_tcs.pfc_tcs = dcb_max_tc;
+	for (i = 0; i < dcb_max_tc; i++) {
+		tc = &cfg->tc_config[i];
+		tc->channel[DCB_PATH_TX].bwg_id = i;
+		tc->channel[DCB_PATH_TX].bwg_percent =
+				 (u8)(100/dcb_max_tc + (i & 1));
+		tc->channel[DCB_PATH_RX].bwg_id = i;
+		tc->channel[DCB_PATH_RX].bwg_percent =
+				 (u8)(100/dcb_max_tc + (i & 1));
+		tc->pfc_type = pfc_disabled;
+	}
+
+	tc = &cfg->tc_config[0];
+	tc->channel[DCB_PATH_TX].up_to_tc_bitmap = 0xFF;
+	tc->channel[DCB_PATH_RX].up_to_tc_bitmap = 0xFF;
+	for (i = 0; i < MAX_BW_GROUP; i++) {
+		cfg->bwg_link_percent[DCB_PATH_TX][i] = 100;
+		cfg->bwg_link_percent[DCB_PATH_RX][i] = 100;
+	}
+	cfg->rx_pba_config = SXE_DCB_PBA_EQUAL;
+	cfg->pfc_mode_enable = false;
+	cfg->vmdq_active = true;
+	cfg->round_robin_enable = false;
+
+	return;
+}
+
+static u8 sxe_dcb_get_tc_from_up(struct sxe_dcb_config *cfg,
+					u8 direction, u8 up)
+{
+	struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+	u8 prio_mask = BIT(up);
+	u8 tc = cfg->num_tcs.pg_tcs;
+
+	if (!tc) {
+		goto l_ret;
+	}
+
+	for (tc--; tc; tc--) {
+		if (prio_mask & tc_config[tc].channel[direction].up_to_tc_bitmap) {
+			break;
+		}
+	}
+
+l_ret:
+	LOG_DEBUG("up[%u] to tc[%u]\n", up, tc);
+	return tc;
+}
+
+static void sxe_dcb_up2tc_map_parse(struct sxe_dcb_config *cfg,
+						u8 direction, u8 *map)
+{
+	u8 up;
+
+	for (up = 0; up < MAX_USER_PRIORITY; up++) {
+		map[up] = sxe_dcb_get_tc_from_up(cfg, direction, up);
+		LOG_DEBUG("up[%u] --- up2tc_map[%u]\n", up, map[up]);
+	}
+
+	return;
+}
+
+s32 sxe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
+					struct rte_eth_pfc_conf *pfc_conf)
+{
+	s32 ret;
+	u32 rx_buf_size;
+	u32 max_high_water;
+	u8 tc_idx;
+	u8  up2tc_map[MAX_USER_PRIORITY] = { 0 };
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	struct sxe_dcb_config *dcb_config = &adapter->dcb_ctxt.config;
+
+	static const enum sxe_fc_mode fcmode[] = {
+		SXE_FC_NONE,
+		SXE_FC_RX_PAUSE,
+		SXE_FC_TX_PAUSE,
+		SXE_FC_FULL,
+	};
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_dcb_up2tc_map_parse(dcb_config, DCB_PATH_RX, up2tc_map);
+	tc_idx = up2tc_map[pfc_conf->priority];
+	rx_buf_size = sxe_hw_rx_pkt_buf_size_get(hw, tc_idx);
+	PMD_LOG_DEBUG(INIT, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+	max_high_water = (rx_buf_size -
+			RTE_ETHER_MAX_LEN) >> SXE_RX_PKT_BUF_SIZE_SHIFT;
+	if ((pfc_conf->fc.high_water > max_high_water) ||
+	    (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
+		PMD_LOG_ERR(INIT, "Invalid high/low water setup value in KB, "
+			    "high water=0x%x, low water=0x%x",
+			    pfc_conf->fc.high_water, pfc_conf->fc.low_water);
+		PMD_LOG_ERR(INIT, "High_water must <= 0x%x", max_high_water);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	sxe_hw_fc_requested_mode_set(hw, fcmode[pfc_conf->fc.mode]);
+	sxe_hw_fc_pause_time_set(hw, pfc_conf->fc.pause_time);
+	sxe_hw_fc_send_xon_set(hw, pfc_conf->fc.send_xon);
+	sxe_hw_fc_tc_low_water_mark_set(hw, tc_idx, pfc_conf->fc.low_water);
+	sxe_hw_fc_tc_high_water_mark_set(hw, tc_idx, pfc_conf->fc.high_water);
+
+	ret = sxe_pfc_enable(adapter, tc_idx);
+
+	if ((ret == 0) || (ret == SXE_ERR_FC_NOT_NEGOTIATED)) {
+		PMD_LOG_DEBUG(INIT, "pfc set end ret = %d", ret);
+		ret = 0;
+		goto l_end;
+	}
+
+	PMD_LOG_ERR(INIT, "sxe_dcb_pfc_enable = 0x%x", ret);
+	ret = -EIO;
+l_end:
+	return ret;
+}
+
+s32 sxe_get_dcb_info(struct rte_eth_dev *dev,
+			struct rte_eth_dcb_info *dcb_info)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_dcb_config *dcb_config = &adapter->dcb_ctxt.config;
+
+	struct sxe_tc_config *tc;
+	struct rte_eth_dcb_tc_queue_mapping *tc_queue;
+	u8 tcs_num;
+	u8 i, j;
+
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
+		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
+	} else {
+		dcb_info->nb_tcs = 1;
+	}
+
+	tc_queue = &dcb_info->tc_queue;
+	tcs_num = dcb_info->nb_tcs;
+
+	if (dcb_config->vmdq_active) { 
+		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
+		}
+
+		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
+			for (j = 0; j < tcs_num; j++) {
+				tc_queue->tc_rxq[0][j].base = j;
+				tc_queue->tc_rxq[0][j].nb_queue = 1;
+				tc_queue->tc_txq[0][j].base = j;
+				tc_queue->tc_txq[0][j].nb_queue = 1;
+			}
+		} else {
+			for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+				for (j = 0; j < tcs_num; j++) {
+					tc_queue->tc_rxq[i][j].base =
+						i * tcs_num + j;
+					tc_queue->tc_rxq[i][j].nb_queue = 1;
+					tc_queue->tc_txq[i][j].base =
+						i * tcs_num + j;
+					tc_queue->tc_txq[i][j].nb_queue = 1;
+				}
+			}
+		}
+	} else { 
+		struct rte_eth_dcb_rx_conf *rx_conf =
+				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
+		}
+
+		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
+			for (i = 0; i < dcb_info->nb_tcs; i++) {
+				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
+				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+			}
+
+			dcb_info->tc_queue.tc_txq[0][0].base = 0;
+			dcb_info->tc_queue.tc_txq[0][1].base = 64;
+			dcb_info->tc_queue.tc_txq[0][2].base = 96;
+			dcb_info->tc_queue.tc_txq[0][3].base = 112;
+			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
+			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+		} else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
+			for (i = 0; i < dcb_info->nb_tcs; i++) {
+				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
+				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+			}
+
+			dcb_info->tc_queue.tc_txq[0][0].base = 0;
+			dcb_info->tc_queue.tc_txq[0][1].base = 32;
+			dcb_info->tc_queue.tc_txq[0][2].base = 64;
+			dcb_info->tc_queue.tc_txq[0][3].base = 80;
+			dcb_info->tc_queue.tc_txq[0][4].base = 96;
+			dcb_info->tc_queue.tc_txq[0][5].base = 104;
+			dcb_info->tc_queue.tc_txq[0][6].base = 112;
+			dcb_info->tc_queue.tc_txq[0][7].base = 120;
+			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
+			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+			dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
+			dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
+			dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
+			dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
+		}
+	}
+
+	for (i = 0; i < dcb_info->nb_tcs; i++) {
+		tc = &dcb_config->tc_config[i];
+		dcb_info->tc_bws[i] = tc->channel[DCB_PATH_TX].bwg_percent;
+	}
+
+	return 0;
+}
+
+static void sxe_dcb_vmdq_rx_param_get(struct rte_eth_dev *dev,
+			struct sxe_dcb_config *dcb_config)
+{
+	struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+			&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+	struct sxe_tc_config *tc;
+	u8 i, j;
+
+	if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
+	} else {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
+	}
+
+	for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+		tc = &dcb_config->tc_config[j];
+		tc->channel[DCB_PATH_RX].up_to_tc_bitmap = 0;
+	}
+
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		j = vmdq_rx_conf->dcb_tc[i];
+		tc = &dcb_config->tc_config[j];
+		tc->channel[DCB_PATH_RX].up_to_tc_bitmap |=
+						(u8)(1 << i);
+	}
+
+	return;
+}
+
+void sxe_dcb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+	struct rte_eth_vmdq_dcb_conf *cfg;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	enum rte_eth_nb_pools pools_num;
+	u16 i;
+
+	PMD_INIT_FUNC_TRACE();
+	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+	pools_num = cfg->nb_queue_pools;
+
+	if (pools_num != RTE_ETH_16_POOLS && pools_num != RTE_ETH_32_POOLS) {
+		sxe_rss_disable(dev);
+		return;
+	}
+
+	sxe_hw_dcb_vmdq_mq_configure(hw, pools_num);
+
+	sxe_hw_dcb_vmdq_default_pool_configure(hw,
+						cfg->enable_default_pool,
+						cfg->default_pool);
+
+	sxe_hw_dcb_vmdq_up_2_tc_configure(hw, cfg->dcb_tc);
+
+	sxe_hw_dcb_vmdq_vlan_configure(hw, pools_num);
+
+	for (i = 0; i < cfg->nb_pool_maps; i++) {
+		sxe_hw_dcb_vmdq_pool_configure(hw,
+					i, cfg->pool_map[i].vlan_id,
+					cfg->pool_map[i].pools);
+	}
+
+	return;
+}
+
+static void sxe_dcb_rx_param_get(struct rte_eth_dev *dev,
+		struct sxe_dcb_config *dcb_config)
+{
+	struct rte_eth_dcb_rx_conf *rx_conf =
+			&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+	struct sxe_tc_config *tc;
+	u8 i, j;
+
+	dcb_config->num_tcs.pg_tcs = (u8)rx_conf->nb_tcs;
+	dcb_config->num_tcs.pfc_tcs = (u8)rx_conf->nb_tcs;
+
+	for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+		tc = &dcb_config->tc_config[j];
+		tc->channel[DCB_PATH_RX].up_to_tc_bitmap = 0;
+	}
+
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		j = rx_conf->dcb_tc[i];
+		tc = &dcb_config->tc_config[j];
+		tc->channel[DCB_PATH_RX].up_to_tc_bitmap |=
+						(u8)(1 << i);
+	}
+
+	return;
+}
+
+static void sxe_dcb_rx_hw_configure(struct rte_eth_dev *dev,
+		       struct sxe_dcb_config *dcb_config)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	PMD_INIT_FUNC_TRACE();
+	sxe_hw_dcb_rx_configure(hw, dcb_config->vmdq_active,
+				RTE_ETH_DEV_SRIOV(dev).active,
+				dcb_config->num_tcs.pg_tcs);
+	return;
+}
+
+static void sxe_dcb_vmdq_tx_param_get(struct rte_eth_dev *dev,
+			struct sxe_dcb_config *dcb_config)
+{
+	struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+			&dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+	struct sxe_tc_config *tc;
+	u8 i, j;
+
+	if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
+	} else {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
+	}
+
+	for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+		tc = &dcb_config->tc_config[j];
+		tc->channel[DCB_PATH_TX].up_to_tc_bitmap = 0;
+	}
+
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		j = vmdq_tx_conf->dcb_tc[i];
+		tc = &dcb_config->tc_config[j];
+		tc->channel[DCB_PATH_TX].up_to_tc_bitmap |=
+						(u8)(1 << i);
+	}
+
+	return;
+}
+
+static void sxe_dcb_vmdq_tx_hw_configure(struct rte_eth_dev *dev,
+			struct sxe_dcb_config *dcb_config)
+{
+	struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+			&dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_hw_pool_xmit_enable(hw, 0, (u8)vmdq_tx_conf->nb_queue_pools);
+
+	sxe_hw_dcb_tx_configure(hw, dcb_config->vmdq_active,
+				dcb_config->num_tcs.pg_tcs);
+	return;
+}
+
+static void sxe_dcb_tx_param_get(struct rte_eth_dev *dev,
+		struct sxe_dcb_config *dcb_config)
+{
+	struct rte_eth_dcb_tx_conf *tx_conf =
+			&dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+	struct sxe_tc_config *tc;
+	u8 i, j;
+
+	dcb_config->num_tcs.pg_tcs = (u8)tx_conf->nb_tcs;
+	dcb_config->num_tcs.pfc_tcs = (u8)tx_conf->nb_tcs;
+
+	for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+		tc = &dcb_config->tc_config[j];
+		tc->channel[DCB_PATH_TX].up_to_tc_bitmap = 0;
+	}
+
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		j = tx_conf->dcb_tc[i];
+		tc = &dcb_config->tc_config[j];
+		tc->channel[DCB_PATH_TX].up_to_tc_bitmap |=
+						(u8)(1 << i);
+	}
+
+	return;
+}
+
+static u32 sxe_dcb_min_credit_get(u32 max_frame)
+{
+
+	return ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
+				DCB_CREDIT_QUANTUM;
+
+}
+
+static u16 sxe_dcb_cee_tc_link_percent_get(
+			struct sxe_dcb_config *cee_config,
+			u8 direction, u8 tc_index)
+{
+	u8  bw_percent;
+	u16 link_percentage;
+	struct sxe_tc_bw_alloc *tc_info;
+
+	tc_info = &cee_config->tc_config[tc_index].channel[direction];
+	link_percentage =
+		cee_config->bwg_link_percent[direction][tc_info->bwg_id];
+	bw_percent = tc_info->bwg_percent;
+
+	link_percentage = (link_percentage * bw_percent) / 100;
+
+	return link_percentage;
+}
+
+static u32 sxe_dcb_cee_min_link_percent_get(
+			struct sxe_dcb_config *cee_config, u8 direction)
+{
+	u8  tc_index;
+	u16 link_percentage;
+	u32 min_link_percent = 100;
+
+	for (tc_index = 0; tc_index < MAX_TRAFFIC_CLASS; tc_index++) {
+		link_percentage = sxe_dcb_cee_tc_link_percent_get(
+					cee_config, direction, tc_index);
+
+		if (link_percentage && link_percentage < min_link_percent) {
+			min_link_percent = link_percentage;
+		}
+	}
+
+	return min_link_percent;
+}
+
+static s32 sxe_dcb_cee_tc_credits_calculate(struct sxe_hw *hw,
+				   struct sxe_dcb_config *cee_config,
+				   u32 max_frame, u8 direction)
+{
+	s32 ret = 0;
+	struct sxe_adapter *adapter = hw->adapter;
+	struct sxe_tc_bw_alloc *tc_info;
+	u32 min_credit;
+	u32 total_credit;
+	u32 min_link_percent;
+	u32 credit_refill;
+	u32 credit_max;
+	u16 link_percentage;
+	u8  tc_index;
+
+	LOG_DEBUG_BDF("cee_config[%p] input max_frame[%u] direction[%s]\n",
+			cee_config, max_frame, direction ? "RX" : "TX");
+
+	min_credit = sxe_dcb_min_credit_get(max_frame);
+	LOG_DEBUG_BDF("cee_config[%p] max_frame[%u] got min_credit[%u]\n",
+			cee_config, max_frame, min_credit);
+
+	min_link_percent = sxe_dcb_cee_min_link_percent_get(cee_config, direction);
+	LOG_DEBUG_BDF("cee_config[%p] direction[%s] got min_link_percent[%u]\n",
+			cee_config, direction ? "RX" : "TX", min_link_percent);
+
+	total_credit = (min_credit / min_link_percent) + 1;
+	LOG_DEBUG_BDF("cee_config[%p] total_credit=%u\n", cee_config, total_credit);
+
+	for (tc_index = 0; tc_index < MAX_TRAFFIC_CLASS; tc_index++) {
+		tc_info = &cee_config->tc_config[tc_index].channel[direction];
+
+		link_percentage = sxe_dcb_cee_tc_link_percent_get(
+					cee_config, direction, tc_index);
+		LOG_DEBUG_BDF("tc[%u] bwg_percent=%u, link_percentage=%u\n",
+			tc_index, tc_info->bwg_percent, link_percentage);
+
+		if (tc_info->bwg_percent > 0 && link_percentage == 0) {
+			link_percentage = 1;
+		}
+
+		tc_info->link_percent = (u8)link_percentage;
+
+		credit_refill = min(link_percentage * total_credit,
+				    (u32)MAX_CREDIT_REFILL);
+
+		if (credit_refill < min_credit) {
+			credit_refill = min_credit;
+		}
+
+		tc_info->data_credits_refill = (u16)credit_refill;
+		LOG_DEBUG_BDF("tc[%u] credit_refill=%u\n",
+					tc_index, credit_refill);
+
+		credit_max = (link_percentage * MAX_CREDIT) / 100;
+
+		if (credit_max < min_credit) {
+			credit_max = min_credit;
+		}
+		LOG_DEBUG_BDF("tc[%u] credit_max=%u\n",
+					tc_index, credit_max);
+
+		if (direction == DCB_PATH_TX) {
+			cee_config->tc_config[tc_index].desc_credits_max =
+				(u16)credit_max;
+		}
+
+		tc_info->data_credits_max = (u16)credit_max;
+	}
+
+	return ret;
+}
+
+static void sxe_dcb_cee_refill_parse(struct sxe_dcb_config *cfg,
+					u8 direction, u16 *refill)
+{
+	u32 tc;
+	struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+
+	for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
+		refill[tc] = tc_config[tc]. \
+			channel[direction].data_credits_refill;
+		LOG_DEBUG("tc[%u] --- refill[%u]\n", tc, refill[tc]);
+	}
+
+	return;
+}
+
+static void sxe_dcb_cee_max_credits_parse(struct sxe_dcb_config *cfg,
+						u16 *max_credits)
+{
+	u32 tc;
+	struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+
+	for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
+		max_credits[tc] = tc_config[tc].desc_credits_max;
+		LOG_DEBUG("tc[%u] --- max_credits[%u]\n", tc, max_credits[tc]);
+	}
+
+	return;
+}
+
+static void sxe_dcb_cee_bwgid_parse(struct sxe_dcb_config *cfg,
+					u8 direction, u8 *bwgid)
+{
+	u32 tc;
+	struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+
+	for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
+		bwgid[tc] = tc_config[tc].channel[direction].bwg_id;
+		LOG_DEBUG("tc[%u] --- bwgid[%u]\n", tc, bwgid[tc]);
+	}
+
+	return;
+}
+
+static void sxe_dcb_cee_prio_parse(struct sxe_dcb_config *cfg,
+					u8 direction, u8 *ptype)
+{
+	u32 tc;
+	struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+
+	for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
+		ptype[tc] = tc_config[tc].channel[direction].prio_type;
+		LOG_DEBUG("tc[%u] --- ptype[%u]\n", tc, ptype[tc]);
+	}
+
+	return;
+}
+
+static void sxe_dcb_cee_pfc_parse(struct sxe_dcb_config *cfg,
+						u8 *map, u8 *pfc_en)
+{
+	u32 up;
+	struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+
+	for (*pfc_en = 0, up = 0; up < MAX_TRAFFIC_CLASS; up++) {
+		if (tc_config[map[up]].pfc_type != pfc_disabled) {
+			*pfc_en |= BIT(up);
+		}
+	}
+	LOG_DEBUG("cfg[%p] pfc_en[0x%x]\n", cfg, *pfc_en);
+
+	return;
+}
+
+static s32 sxe_dcb_tc_stats_configure(struct sxe_hw *hw,
+					struct sxe_dcb_config *dcb_config)
+{
+	s32 ret;
+	u8 tc_count = 8;
+	bool vmdq_active = false;
+
+	if (dcb_config != NULL) {
+		tc_count = dcb_config->num_tcs.pg_tcs;
+		vmdq_active = dcb_config->vmdq_active;
+	}
+
+	if (!((tc_count == 8 && vmdq_active == false) || tc_count == 4)) {
+		ret = -SXE_ERR_PARAM;
+		PMD_LOG_ERR(INIT, "dcb tc stats configure failed, "
+				"tc_num = %u, vmdq_active = %s",
+				tc_count, vmdq_active ? "on" : "off");
+		goto l_end;
+	}
+
+	sxe_hw_dcb_tc_stats_configure(hw, tc_count, vmdq_active);
+
+l_end:
+	return ret;
+}
+
+static void sxe_dcb_rx_mq_mode_configure(struct rte_eth_dev *dev,
+					struct sxe_dcb_config *dcb_config,
+					u8 *rx_configed)
+{
+	switch (dev->data->dev_conf.rxmode.mq_mode) {
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
+		dcb_config->vmdq_active = true;
+		*rx_configed = DCB_RX_CONFIG;
+
+		sxe_dcb_vmdq_rx_param_get(dev, dcb_config);
+		sxe_dcb_vmdq_rx_hw_configure(dev);
+		break;
+	case RTE_ETH_MQ_RX_DCB:
+	case RTE_ETH_MQ_RX_DCB_RSS:
+		dcb_config->vmdq_active = false;
+		*rx_configed = DCB_RX_CONFIG;
+
+		sxe_dcb_rx_param_get(dev, dcb_config);
+		sxe_dcb_rx_hw_configure(dev, dcb_config);
+		break;
+	default:
+		PMD_LOG_ERR(INIT, "Incorrect DCB RX mode configuration");
+		break;
+	}
+
+	return;
+}
+
+static void sxe_dcb_tx_mq_mode_configure(struct rte_eth_dev *dev,
+					struct sxe_dcb_config *dcb_config,
+					u8 *tx_configed)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	switch (dev->data->dev_conf.txmode.mq_mode) {
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
+		dcb_config->vmdq_active = true;
+		*tx_configed = DCB_TX_CONFIG;
+
+		sxe_dcb_vmdq_tx_param_get(dev, dcb_config);
+		sxe_dcb_vmdq_tx_hw_configure(dev, dcb_config);
+		break;
+
+	case RTE_ETH_MQ_TX_DCB:
+		dcb_config->vmdq_active = false;
+		*tx_configed = DCB_TX_CONFIG;
+
+		sxe_dcb_tx_param_get(dev, dcb_config);
+		sxe_hw_dcb_tx_configure(hw, dcb_config->vmdq_active,
+				dcb_config->num_tcs.pg_tcs);
+		break;
+	default:
+		PMD_LOG_ERR(INIT, "Incorrect DCB TX mode configuration");
+		break;
+	}
+
+	return;
+}
+
+static void sxe_dcb_bwg_percentage_alloc(struct rte_eth_dev *dev,
+				struct sxe_dcb_config *dcb_config, u8 *map)
+{
+	u8 i;
+	struct sxe_tc_config *tc;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_bw_config *bw_conf = &adapter->dcb_ctxt.bw_config;
+
+	u8 nb_tcs = dcb_config->num_tcs.pfc_tcs;
+
+	if (nb_tcs == RTE_ETH_4_TCS) {
+
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		if (map[i] >= nb_tcs) {
+			PMD_LOG_INFO(DRV, "map[up-%u] to tc[%u] not exist, "
+					"change to tc 0", i, map[i]);
+			map[i] = 0;
+		}
+	}
+
+		for (i = 0; i < nb_tcs; i++) {
+			tc = &dcb_config->tc_config[i];
+			if (bw_conf->tc_num != nb_tcs) {
+				tc->channel[DCB_PATH_TX].bwg_percent =
+					(u8)(100 / nb_tcs);
+			}
+
+			tc->channel[DCB_PATH_RX].bwg_percent =
+						(u8)(100 / nb_tcs);
+		}
+		for (; i < MAX_TRAFFIC_CLASS; i++) {
+			tc = &dcb_config->tc_config[i];
+			tc->channel[DCB_PATH_TX].bwg_percent = 0;
+			tc->channel[DCB_PATH_RX].bwg_percent = 0;
+		}
+	} else {
+		for (i = 0; i < nb_tcs; i++) {
+			tc = &dcb_config->tc_config[i];
+			if (bw_conf->tc_num != nb_tcs) {
+				tc->channel[DCB_PATH_TX].bwg_percent =
+					(u8)(100 / nb_tcs + (i & 1));
+			}
+
+			tc->channel[DCB_PATH_RX].bwg_percent =
+				(u8)(100 / nb_tcs + (i & 1));
+		}
+	}
+
+	return;
+}
+
+static void sxe_dcb_rx_pkt_buf_configure(struct sxe_hw *hw,
+						u16 rx_buffer_size, u8 tcs_num)
+{
+	u8 i;
+	u16 pbsize;
+
+	pbsize = (u16)(rx_buffer_size / tcs_num);
+
+	for (i = 0; i < tcs_num; i++) {
+		sxe_hw_rx_pkt_buf_size_set(hw, i, pbsize);
+	}
+
+	for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		sxe_hw_rx_pkt_buf_size_set(hw, i, 0);
+	}
+
+	return;
+}
+
+static void sxe_dcb_tx_pkt_buf_configure(struct sxe_hw *hw, u8 tcs_num)
+{
+	sxe_hw_tx_pkt_buf_switch(hw, false);
+
+	sxe_hw_tx_pkt_buf_size_configure(hw, tcs_num);
+	sxe_hw_tx_pkt_buf_thresh_configure(hw, tcs_num, true);
+
+	sxe_hw_tx_pkt_buf_switch(hw, true);
+	return;
+}
+
+static void sxe_dcb_rx_configure(struct rte_eth_dev *dev,
+			struct sxe_dcb_config *dcb_config, u8 *map)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u8 tsa[MAX_TRAFFIC_CLASS]     = {0};
+	u8 bwgid[MAX_TRAFFIC_CLASS]   = {0};
+	u16 refill[MAX_TRAFFIC_CLASS] = {0};
+	u16 max[MAX_TRAFFIC_CLASS]    = {0};
+
+	sxe_dcb_rx_pkt_buf_configure(hw, SXE_RX_PKT_BUF_SIZE, dcb_config->num_tcs.pg_tcs);
+
+	sxe_dcb_cee_refill_parse(dcb_config, DCB_PATH_RX, refill);
+	sxe_dcb_cee_bwgid_parse(dcb_config, DCB_PATH_RX, bwgid);
+	sxe_dcb_cee_prio_parse(dcb_config, DCB_PATH_RX, tsa);
+	sxe_dcb_cee_max_credits_parse(dcb_config, max);
+
+	sxe_hw_dcb_rx_bw_alloc_configure(hw, refill, max,
+				bwgid, tsa, map, MAX_USER_PRIORITY);
+	return;
+}
+
+static void sxe_dcb_tx_configure(struct rte_eth_dev *dev,
+			struct sxe_dcb_config *dcb_config, u8 *map)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u8 tsa[MAX_TRAFFIC_CLASS]     = {0};
+	u8 bwgid[MAX_TRAFFIC_CLASS]   = {0};
+	u16 refill[MAX_TRAFFIC_CLASS] = {0};
+	u16 max[MAX_TRAFFIC_CLASS]    = {0};
+
+	sxe_dcb_tx_pkt_buf_configure(hw, dcb_config->num_tcs.pg_tcs);
+
+	sxe_dcb_cee_refill_parse(dcb_config, DCB_PATH_TX, refill);
+	sxe_dcb_cee_max_credits_parse(dcb_config, max);
+	sxe_dcb_cee_bwgid_parse(dcb_config, DCB_PATH_TX, bwgid);
+	sxe_dcb_cee_prio_parse(dcb_config, DCB_PATH_TX, tsa);
+
+	sxe_hw_dcb_tx_desc_bw_alloc_configure(hw, refill, max, bwgid, tsa);
+	sxe_hw_dcb_tx_data_bw_alloc_configure(hw, refill, max,
+				bwgid, tsa, map, MAX_USER_PRIORITY);
+
+	return;
+}
+
+static void sxe_dcb_pfc_configure(struct sxe_hw *hw,
+					struct sxe_dcb_config *dcb_config,
+					u8 *map)
+{
+	u8 nb_tcs = dcb_config->num_tcs.pg_tcs;
+	u16 pbsize;
+	u8 i, pfc_en;
+	struct sxe_tc_config *tc;
+
+	pbsize = (u16)(SXE_RX_PKT_BUF_SIZE / nb_tcs);
+	for (i = 0; i < nb_tcs; i++) {
+		sxe_hw_fc_tc_high_water_mark_set(hw, i, (pbsize * 3) / 4);
+		sxe_hw_fc_tc_low_water_mark_set(hw, i, pbsize / 4);
+
+		tc = &dcb_config->tc_config[i];
+		tc->pfc_type = pfc_enabled_full;
+	}
+
+	sxe_dcb_cee_pfc_parse(dcb_config, map, &pfc_en);
+	if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS) {
+		pfc_en &= 0x0F;
+	}
+
+	sxe_hw_dcb_pfc_configure(hw, pfc_en, map, MAX_USER_PRIORITY);
+
+	return;
+}
+
+static void sxe_dcb_hw_configure(struct rte_eth_dev *dev,
+			struct sxe_dcb_config *dcb_config)
+{
+	u8 rx_configed = 0;
+	u8 tx_configed = 0;
+	u8 map[MAX_TRAFFIC_CLASS] = {0};
+	u32 max_frame = dev->data->mtu + SXE_ETH_DEAD_LOAD;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	sxe_dcb_rx_mq_mode_configure(dev, dcb_config, &rx_configed);
+	sxe_dcb_tx_mq_mode_configure(dev, dcb_config, &tx_configed);
+
+	sxe_dcb_up2tc_map_parse(dcb_config, DCB_PATH_RX, map);
+
+	sxe_dcb_bwg_percentage_alloc(dev, dcb_config, map);
+
+	sxe_dcb_cee_tc_credits_calculate(hw, dcb_config, max_frame, DCB_PATH_TX);
+	sxe_dcb_cee_tc_credits_calculate(hw, dcb_config, max_frame, DCB_PATH_RX);
+
+	if (rx_configed) {
+		sxe_dcb_rx_configure(dev, dcb_config, map);
+	}
+
+	if (tx_configed) {
+		sxe_dcb_tx_configure(dev, dcb_config, map);
+	}
+
+	sxe_dcb_tc_stats_configure(hw, dcb_config);
+
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
+		sxe_dcb_pfc_configure(hw, dcb_config, map);
+	}
+
+	return;
+}
+
+void sxe_dcb_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	struct sxe_dcb_config *dcb_cfg = &adapter->dcb_ctxt.config;
+	struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
+
+	PMD_INIT_FUNC_TRACE();
+
+	if ((dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB) &&
+		(dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB) &&
+		(dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)) {
+		PMD_LOG_INFO(INIT, "dcb config failed, cause mq_mode=0x%x", 
+				(u8)dev_conf->rxmode.mq_mode);
+		goto l_end;
+	}
+
+	if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES) {
+		PMD_LOG_INFO(INIT, "dcb config failed, cause nb_rx_queues=%u > %u", 
+			dev->data->nb_rx_queues, RTE_ETH_DCB_NUM_QUEUES);
+		goto l_end;
+	}
+
+	sxe_dcb_hw_configure(dev, dcb_cfg);
+
+l_end:
+	return;
+}
+
+s32 rte_pmd_sxe_tc_bw_set(u8 port,
+				u8 tc_num, u8 *bw_weight)
+{
+	struct sxe_adapter *adapter;
+	struct rte_eth_dev *dev;
+	struct sxe_dcb_config *dcb_config;
+	struct sxe_tc_config *tc;
+	struct rte_eth_conf *eth_conf;
+	struct sxe_bw_config *bw_conf;
+	u8 i;
+	u8 nb_tcs;
+	u16 sum;
+	s32 ret = 0;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	if (!is_sxe_supported(dev)) {
+		ret = -ENOTSUP;
+		goto l_end;
+	}
+
+	if (tc_num > MAX_TRAFFIC_CLASS) {
+		PMD_LOG_ERR(DRV, "TCs should be no more than %d.",
+				MAX_TRAFFIC_CLASS);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	adapter = dev->data->dev_private;
+	dcb_config = &adapter->dcb_ctxt.config;
+	bw_conf = &adapter->dcb_ctxt.bw_config;
+	eth_conf = &dev->data->dev_conf;
+
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
+		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
+		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+			RTE_ETH_32_POOLS) {
+			nb_tcs = RTE_ETH_4_TCS;
+		} else {
+			nb_tcs = RTE_ETH_8_TCS;
+		}
+	} else {
+		nb_tcs = 1;
+	}
+
+	if (nb_tcs != tc_num) {
+		PMD_LOG_ERR(DRV,
+			    "Weight should be set for all %d enabled TCs.",
+			    nb_tcs);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	sum = 0;
+	for (i = 0; i < nb_tcs; i++) {
+		sum += bw_weight[i];
+	}
+
+	if (sum != 100) {
+		PMD_LOG_ERR(DRV,
+			    "The summary of the TC weight should be 100.");
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	for (i = 0; i < nb_tcs; i++) {
+		tc = &dcb_config->tc_config[i];
+		tc->channel[DCB_PATH_TX].bwg_percent = bw_weight[i];
+	}
+
+	for (; i < MAX_TRAFFIC_CLASS; i++) {
+		tc = &dcb_config->tc_config[i];
+		tc->channel[DCB_PATH_TX].bwg_percent = 0;
+	}
+
+	bw_conf->tc_num = nb_tcs;
+
+l_end:
+	return ret;
+}
diff --git a/drivers/net/sxe/pf/sxe_dcb.h b/drivers/net/sxe/pf/sxe_dcb.h
new file mode 100644
index 0000000000..accfc930af
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_dcb.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+ 
+#ifndef __SXE_DCB_H__
+#define __SXE_DCB_H__
+#include <stdbool.h>
+
+#define PBA_STRATEGY_EQUAL       (0)    
+#define PBA_STRATEGY_WEIGHTED    (1)	
+#define MAX_BW_GROUP             8
+#define MAX_USER_PRIORITY        8
+#define SXE_DCB_MAX_TRAFFIC_CLASS        8
+
+enum sxe_dcb_strict_prio_type {
+	DCB_PRIO_NONE = 0, 
+	DCB_PRIO_GROUP,    
+	DCB_PRIO_LINK      
+};
+enum {
+	DCB_PATH_TX   =  0,
+	DCB_PATH_RX   =  1,
+	DCB_PATH_NUM  =  DCB_PATH_RX + 1,
+};
+
+enum sxe_dcb_tsa {
+	sxe_dcb_tsa_ets = 0,
+	sxe_dcb_tsa_group_strict_cee,
+	sxe_dcb_tsa_strict
+};
+
+enum sxe_dcb_pba_config {
+	SXE_DCB_PBA_EQUAL = PBA_STRATEGY_EQUAL,
+	SXE_DCB_PBA_80_48 = PBA_STRATEGY_WEIGHTED
+};
+
+struct sxe_dcb_num_tcs {
+	u8 pg_tcs;	
+	u8 pfc_tcs;
+};
+
+struct sxe_tc_bw_alloc {
+	u8 bwg_id;		  
+	u8 bwg_percent;		  
+	u8 link_percent;	  
+	u8 up_to_tc_bitmap;	  
+	u16 data_credits_refill;  
+	u16 data_credits_max;	  
+	enum sxe_dcb_strict_prio_type prio_type; 
+};
+
+enum sxe_dcb_pfc_type {
+	pfc_disabled = 0,
+	pfc_enabled_full,
+	pfc_enabled_tx,
+	pfc_enabled_rx
+};
+
+struct sxe_tc_config {
+	struct sxe_tc_bw_alloc channel[DCB_PATH_NUM]; 
+	enum sxe_dcb_pfc_type  pfc_type; 
+
+	u16 desc_credits_max; 
+	u8 tc; 
+};
+
+struct sxe_dcb_config {
+	struct sxe_tc_config tc_config[SXE_DCB_MAX_TRAFFIC_CLASS];
+	struct sxe_dcb_num_tcs num_tcs;
+	u8 bwg_link_percent[DCB_PATH_NUM][MAX_BW_GROUP]; 
+	bool pfc_mode_enable;
+	bool round_robin_enable;
+
+	enum sxe_dcb_pba_config rx_pba_config;
+	bool vmdq_active;
+};
+
+struct sxe_bw_config {
+	u8 tc_num; 
+};
+
+struct sxe_dcb_context {
+	struct sxe_dcb_config config;
+	struct sxe_bw_config bw_config;
+};
+
+void sxe_dcb_init(struct rte_eth_dev *dev);
+
+s32 sxe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 
+					struct rte_eth_pfc_conf *pfc_conf);
+
+s32 sxe_get_dcb_info(struct rte_eth_dev *dev,
+			struct rte_eth_dcb_info *dcb_info);
+
+void sxe_dcb_configure(struct rte_eth_dev *dev);
+
+void sxe_dcb_vmdq_rx_hw_configure(struct rte_eth_dev *dev);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_ethdev.c b/drivers/net/sxe/pf/sxe_ethdev.c
new file mode 100644
index 0000000000..00c6674f75
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_ethdev.c
@@ -0,0 +1,1109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_bus_pci.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#elif defined DPDK_21_11_5
+#include <rte_bus_pci.h>
+#include <ethdev_driver.h>
+#include <rte_dev.h>
+#include <ethdev_pci.h>
+#else
+#include <bus_pci_driver.h>
+#include <ethdev_driver.h>
+#include <dev_driver.h>
+#include <ethdev_pci.h>
+#endif
+
+#include <rte_ethdev.h>
+#include <rte_pmd_sxe.h>
+#include <rte_alarm.h>
+
+#include "sxe_types.h"
+#include "sxe_logs.h"
+#include "sxe_compat_platform.h"
+#include "sxe_errno.h"
+#include "sxe.h"
+#include "sxe_hw.h"
+#include "sxe_ethdev.h"
+#include "sxe_filter.h"
+#include "sxe_rx.h"
+#include "sxe_tx.h"
+#include "sxe_offload.h"
+#include "sxe_queue.h"
+#include "sxe_irq.h"
+#include "sxe_stats.h"
+#include "sxe_phy.h"
+#include "sxe_pmd_hdc.h"
+#include "sxe_flow_ctrl.h"
+#include "sxe_ptp.h"
+#include "sxe_cli.h"
+#include "drv_msg.h"
+#include "sxe_vf.h"
+#include "sxe_dcb.h"
+#include "sxe_version.h"
+#include "sxe_compat_version.h"
+#include <rte_string_fns.h>
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+#include "sxe_tm.h"
+#endif
+
+#define SXE_DEFAULT_MTU             1500
+#define SXE_ETH_HLEN                14
+#define SXE_ETH_FCS_LEN             4
+#define SXE_ETH_FRAME_LEN           1514
+
+#define SXE_ETH_MAX_LEN  (RTE_ETHER_MTU + SXE_ETH_OVERHEAD) 
+
+STATIC const struct rte_eth_desc_lim sxe_rx_desc_lim = {
+	.nb_max = SXE_MAX_RING_DESC,
+	.nb_min = SXE_MIN_RING_DESC,
+	.nb_align = SXE_RX_DESC_RING_ALIGN,
+};
+
+STATIC const struct rte_eth_desc_lim sxe_tx_desc_lim = {
+	.nb_max = SXE_MAX_RING_DESC,
+	.nb_min = SXE_MIN_RING_DESC,
+	.nb_align = SXE_TX_DESC_RING_ALIGN,
+	.nb_seg_max = SXE_TX_MAX_SEG,
+	.nb_mtu_seg_max = SXE_TX_MAX_SEG,
+};
+
+s32 sxe_dev_reset(struct rte_eth_dev *eth_dev);
+
+STATIC s32 sxe_dev_configure(struct rte_eth_dev *dev)
+{
+	s32 ret;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Rx mode check */
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+		PMD_LOG_DEBUG(INIT, "rx offload rss hash");
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+	}
+
+	/* Multi queue mode check */
+	ret  = sxe_mq_mode_check(dev);
+	if (ret != 0) {
+		PMD_LOG_ERR(INIT, "sxe mq mode check fails with %d.",
+			    ret);
+		goto l_end;
+	}
+
+	irq->action |= SXE_IRQ_LINK_UPDATE;
+
+	/* Default use batch alloc  */
+	adapter->rx_batch_alloc_allowed = true;
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	adapter->rx_vec_allowed = true;
+#endif
+
+l_end:
+	return ret;
+}
+
+static void sxe_txrx_start(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw     *hw = &adapter->hw;
+
+	sxe_hw_rx_cap_switch_on(hw);
+
+	sxe_hw_mac_txrx_enable(hw);
+
+	return;
+}
+
+static s32 sxe_link_configure(struct rte_eth_dev *dev)
+{
+	s32 ret = 0;
+	bool link_up = false;
+	u32 conf_speeds;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	/* Disable loopback */
+	sxe_hw_loopback_switch(hw, false);
+
+	sxe_sfp_tx_laser_enable(adapter);
+
+	dev->data->dev_link.link_status = link_up;
+
+	/* Rate of obtaining user configuration */
+	ret = sxe_conf_speed_get(dev, &conf_speeds);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "invalid link setting");
+		goto l_end;
+	}
+
+	if (adapter->phy_ctxt.sfp_info.multispeed_fiber) {
+		ret = sxe_multispeed_sfp_link_configure(dev, conf_speeds, false);
+	} else {
+		ret = sxe_sfp_link_configure(dev);
+	}
+	if (ret) {
+		PMD_LOG_ERR(INIT, "link config failed, speed=%x",
+						conf_speeds);
+		ret = -EIO;
+		goto l_end;
+	}
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_loopback_pcs_init(struct sxe_adapter *adapter,
+				sxe_pcs_mode_e mode, u32 max_frame)
+{
+	s32 ret;
+	sxe_pcs_cfg_s pcs_cfg;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+
+	pcs_cfg.mode = mode;
+	pcs_cfg.mtu  = max_frame;
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_PCS_SDS_INIT,
+				(void *)&pcs_cfg, sizeof(pcs_cfg),
+				NULL, 0);
+	irq->to_pcs_init = false;
+	if (ret) {
+		LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:pcs init\n", ret);
+		goto l_end;
+	}
+
+	/* Set flow control mac address */
+	sxe_fc_mac_addr_set(adapter);
+
+	LOG_INFO_BDF("mode:%u max_frame:0x%x loopback pcs init done.\n",
+		     mode, max_frame);
+l_end:
+	return ret;
+}
+
+static s32 sxe_loopback_configure(struct sxe_adapter *adapter)
+{
+	s32 ret;
+	u32 max_frame = SXE_DEFAULT_MTU + SXE_ETH_DEAD_LOAD;
+
+	(void)sxe_sfp_tx_laser_disable(adapter);
+
+	/* Initialize sds and pcs modules */
+	ret = sxe_loopback_pcs_init(adapter, SXE_PCS_MODE_10GBASE_KR_WO, max_frame);
+	if (ret) {
+		LOG_ERROR_BDF("pcs sds init failed, mode=%d, ret=%d\n",
+					SXE_PCS_MODE_10GBASE_KR_WO, ret);
+		goto l_out;
+	}
+
+	ret = sxe_loopback_pcs_init(adapter, SXE_PCS_MODE_LPBK_PHY_TX2RX, max_frame);
+	if (ret) {
+		LOG_ERROR_BDF("pcs sds init failed, mode=%d, ret=%d\n",
+					SXE_PCS_MODE_LPBK_PHY_TX2RX, ret);
+		goto l_out;
+	}
+
+	usleep_range(10000, 20000);
+
+	LOG_DEBUG_BDF("loolback configure success max_frame:0x%x.", max_frame);
+
+l_out:
+	return ret;
+
+}
+
+static s32 sxe_dev_start(struct rte_eth_dev *dev)
+{
+	s32 ret;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
+	struct sxe_macsec_context *macsec_ctxt = &adapter->macsec_ctxt;
+#endif
+
+	ret = sxe_fw_time_sync(hw);
+
+	sxe_wait_setup_link_complete(dev, 0);
+
+	rte_intr_disable(handle);
+
+	adapter->is_stopped = false;
+
+	ret = sxe_phy_init(adapter);
+	if (ret == -SXE_ERR_SFF_NOT_SUPPORTED) {
+		PMD_LOG_ERR(INIT, "sfp is not sfp+, not supported, ret=%d\n", ret);
+		ret = -EPERM;
+		goto l_end;
+	} else if (ret) {
+		PMD_LOG_ERR(INIT, "phy init failed, ret=%d", ret);
+	}
+
+	ret = sxe_hw_reset(hw);
+	if (ret < 0) {
+		PMD_LOG_ERR(INIT, "hw init failed, ret=%d", ret);
+		goto l_end;
+	}
+
+	sxe_hw_start(hw);
+
+	sxe_mac_addr_set(dev, &dev->data->mac_addrs[0]);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	sxe_hw_pf_rst_done_set(hw);
+
+	/* Configure virtualization */
+	sxe_vt_configure(dev);
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+	if (SXE_DEV_FNAV_CONF(dev)->mode != RTE_FDIR_MODE_NONE) {
+		ret = sxe_fnav_filter_configure(dev);
+		if (ret) {
+			PMD_LOG_ERR(INIT, "fnav config fail.");
+			goto l_end;
+		}
+	}
+#endif
+
+	sxe_tx_configure(dev);
+
+	ret = sxe_rx_configure(dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "unable to initialize RX hardware");
+		goto l_error;
+	}
+
+	ret = sxe_irq_configure(dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "irq config fail.");
+		goto l_error;
+	}
+
+	sxe_vlan_filter_configure(dev);
+
+	sxe_queue_stats_map_restore(dev);
+
+	sxe_txrx_start(dev);
+
+	irq->to_pcs_init = true;
+
+	if (dev->data->dev_conf.lpbk_mode == SXE_LPBK_DISABLED) {
+		sxe_link_configure(dev);
+	} else if (dev->data->dev_conf.lpbk_mode == SXE_LPBK_ENABLED){
+	       sxe_loopback_configure(adapter);
+	} else {
+		ret = -ENOTSUP;
+		PMD_LOG_ERR(INIT, "unsupport loopback mode:%u.",
+			    dev->data->dev_conf.lpbk_mode);
+		goto l_end;
+	}
+
+	sxe_link_update(dev, false);
+
+	ret = sxe_flow_ctrl_enable(dev);
+	if (ret < 0) {
+		PMD_LOG_ERR(INIT, "enable flow ctrl err");
+		goto l_error;
+	}
+
+	sxe_dcb_configure(dev);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
+	if (macsec_ctxt->offload_en) {
+		sxe_macsec_enable(dev, macsec_ctxt);
+	}
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+	sxe_filter_restore(dev);
+#endif
+
+l_end:
+	return ret;
+
+l_error:
+	PMD_LOG_ERR(INIT, "dev start err, ret=%d", ret);
+	sxe_irq_vec_free(handle);
+	sxe_txrx_queues_clear(dev, adapter->rx_batch_alloc_allowed);
+	ret = -EIO;
+	goto l_end;
+}
+
+#ifdef DPDK_19_11_6
+static void sxe_dev_stop(struct rte_eth_dev *dev)
+#else
+static s32 sxe_dev_stop(struct rte_eth_dev *dev)
+#endif
+{
+	s32 ret = 0;
+	s32 num;
+	struct rte_eth_link link;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (adapter->is_stopped) {
+		LOG_ERROR("adapter[%p] is stopped", adapter);
+		goto l_end;
+	}
+
+	sxe_hw_all_irq_disable(hw);
+
+	sxe_sfp_tx_laser_disable(adapter);
+
+	sxe_wait_setup_link_complete(dev, 0);
+
+	ret = sxe_hw_reset(hw);
+	if (ret < 0) {
+		PMD_LOG_ERR(INIT, "hw init failed, ret=%d", ret);
+		goto l_end;
+	}
+
+	sxe_mac_addr_set(dev, &dev->data->mac_addrs[0]);
+
+	sxe_irq_disable(dev);
+
+	sxe_txrx_queues_clear(dev, adapter->rx_batch_alloc_allowed);
+
+	dev->data->scattered_rx = 0;
+	dev->data->lro = 0;
+
+	memset(&link, 0, sizeof(link));
+	rte_eth_linkstatus_set(dev, &link);
+
+	adapter->rss_reta_updated = false;
+
+	dev->data->dev_started = 0;
+	adapter->is_stopped = true;
+
+	num = rte_eal_alarm_cancel(sxe_event_irq_delayed_handler, dev);
+	if (num > 0) {
+		sxe_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+	}
+
+	LOG_DEBUG_BDF("dev stop success.");
+
+l_end:
+#ifdef DPDK_19_11_6
+	return;
+#else
+	return ret;
+#endif
+}
+
+#ifdef DPDK_19_11_6
+static void sxe_dev_close(struct rte_eth_dev *dev)
+#else
+static s32 sxe_dev_close(struct rte_eth_dev *dev)
+#endif
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret = 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		PMD_LOG_INFO(INIT, "not primary, do nothing");
+		 goto l_end;
+	}
+
+	sxe_hw_hdc_drv_status_set(hw, (u32)false);
+
+	ret = sxe_hw_reset(hw);
+	if (ret < 0) {
+		PMD_LOG_ERR(INIT, "hw init failed, ret=%d", ret);
+		goto l_end;
+	}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	sxe_hw_pf_rst_done_set(hw);
+#endif
+
+#ifdef DPDK_19_11_6
+	sxe_dev_stop(dev);
+#else
+	ret = sxe_dev_stop(dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "dev stop fail.(err:%d)", ret);
+	}
+#endif
+
+	sxe_queues_free(dev);
+
+	sxe_mac_addr_set(dev, &adapter->mac_filter_ctxt.def_mac_addr);
+	sxe_irq_uninit(dev);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	sxe_vt_uninit(dev);
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+	sxe_fnav_filter_uninit(dev);
+	sxe_fivetuple_filter_uninit(dev);
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+	sxe_tm_ctxt_uninit(dev);
+#endif
+
+l_end:
+#ifdef DPDK_19_11_6
+	return;
+#else
+	return ret;
+#endif
+}
+
+static s32 sxe_dev_infos_get(struct rte_eth_dev *dev,
+					struct rte_eth_dev_info *dev_info)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+
+	dev_info->max_rx_queues = SXE_HW_TXRX_RING_NUM_MAX;
+	dev_info->max_tx_queues = SXE_HW_TXRX_RING_NUM_MAX;
+	if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE) {
+			dev_info->max_tx_queues = SXE_HW_TX_NONE_MODE_Q_NUM;
+		}
+	}
+
+	dev_info->min_rx_bufsize = 1024;
+	dev_info->max_rx_pktlen = 15872; 
+	dev_info->max_mac_addrs = SXE_UC_ENTRY_NUM_MAX;
+	dev_info->max_hash_mac_addrs = SXE_HASH_UC_NUM_MAX;
+	dev_info->max_vfs = pci_dev->max_vfs;
+	dev_info->max_mtu =  dev_info->max_rx_pktlen - SXE_ETH_OVERHEAD;
+	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
+	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
+
+	dev_info->rx_queue_offload_capa = sxe_rx_queue_offload_capa_get(dev);
+	dev_info->rx_offload_capa = (sxe_rx_port_offload_capa_get(dev) |
+				     dev_info->rx_queue_offload_capa);
+	dev_info->tx_queue_offload_capa = sxe_tx_queue_offload_capa_get(dev);
+	dev_info->tx_offload_capa = sxe_tx_port_offload_capa_get(dev);
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_thresh = {
+			.pthresh = SXE_DEFAULT_RX_PTHRESH,
+			.hthresh = SXE_DEFAULT_RX_HTHRESH,
+			.wthresh = SXE_DEFAULT_RX_WTHRESH,
+		},
+		.rx_free_thresh = SXE_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+		.offloads = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_thresh = {
+			.pthresh = SXE_DEFAULT_TX_PTHRESH,
+			.hthresh = SXE_DEFAULT_TX_HTHRESH,
+			.wthresh = SXE_DEFAULT_TX_WTHRESH,
+		},
+		.tx_free_thresh = SXE_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = SXE_DEFAULT_TX_RSBIT_THRESH,
+		.offloads = 0,
+	};
+
+	dev_info->rx_desc_lim = sxe_rx_desc_lim;
+	dev_info->tx_desc_lim = sxe_tx_desc_lim;
+
+	dev_info->hash_key_size = SXE_HKEY_MAX_INDEX * sizeof(u32);
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
+	dev_info->flow_type_rss_offloads = SXE_RSS_OFFLOAD_ALL;
+
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+
+	dev_info->default_rxportconf.burst_size = 32;
+	dev_info->default_txportconf.burst_size = 32;
+	dev_info->default_rxportconf.nb_queues = 1;
+	dev_info->default_txportconf.nb_queues = 1;
+	dev_info->default_rxportconf.ring_size = 256;
+	dev_info->default_txportconf.ring_size = 256;
+
+	return 0;
+}
+
+static s32 sxe_mtu_set(struct rte_eth_dev *dev, u16 mtu)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct rte_eth_dev_info dev_info;
+	u32 frame_size = mtu + SXE_ETH_OVERHEAD;
+	struct rte_eth_dev_data *dev_data = dev->data;
+	s32 ret;
+
+	ret = sxe_dev_infos_get(dev, &dev_info);
+	if (ret != 0) {
+		PMD_LOG_ERR(INIT, "get dev info fails with ret=%d",ret);
+		goto l_end;
+	}
+
+	if (mtu < RTE_ETHER_MTU || frame_size > dev_info.max_rx_pktlen) {
+		PMD_LOG_ERR(INIT, "mtu=%u < %u or frame_size=%u > max_rx_pktlen=%u",
+			mtu, RTE_ETHER_MTU, frame_size, dev_info.max_rx_pktlen);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	if (dev_data->dev_started && !dev_data->scattered_rx &&
+		(frame_size + 2 * SXE_VLAN_TAG_SIZE >
+		dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+		PMD_LOG_ERR(INIT, "stop port first.");
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+	if (frame_size > SXE_ETH_MAX_LEN) {
+		dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_JUMBO_FRAME;
+	} else {
+		dev->data->dev_conf.rxmode.offloads &=
+			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+	}
+
+	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+#endif
+	adapter->mtu = mtu;
+	PMD_LOG_NOTICE(DRV, "mtu set success, take effect after port-restart.");
+
+l_end:
+	return ret;
+}
+
+static int sxe_get_regs(struct rte_eth_dev *dev,
+	      struct rte_dev_reg_info *regs)
+{
+	s32 ret = 0;
+	u32 *data = regs->data;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 length = sxe_hw_all_regs_group_num_get();
+
+	if (data == NULL) {
+		regs->length = length;
+		regs->width = sizeof(uint32_t);
+		goto l_end;
+	}
+
+	if ((regs->length == 0) || (regs->length == length)) {
+		sxe_hw_all_regs_group_read(hw, data);
+
+		goto l_end;
+	}
+
+	ret = -ENOTSUP;
+	LOG_ERROR("get regs: inval param: regs_len=%u, regs->data=%p, "
+			"regs_offset=%u,  regs_width=%u, regs_version=%u",
+			regs->length, regs->data,
+			regs->offset, regs->width,
+			regs->version);
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_led_reset(struct rte_eth_dev *dev)
+{
+	s32 ret;
+	s32 resp;
+	struct sxe_led_ctrl ctrl;
+	struct sxe_adapter *adapter = (struct sxe_adapter *)(dev->data->dev_private);
+	struct sxe_hw *hw = &adapter->hw;
+
+	ctrl.mode = SXE_IDENTIFY_LED_RESET; 
+	ctrl.duration = 0;
+
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_LED_CTRL,
+				(void *)&ctrl, sizeof(ctrl),
+				(void *)&resp, sizeof(resp));
+	if (ret) {
+		LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:led reset", ret);
+		ret = -EIO;
+	} else {
+		LOG_DEBUG_BDF("led reset sucess");
+	}
+
+	return ret;
+}
+
+static s32 sxe_led_ctrl(struct sxe_adapter *adapter, bool is_on)
+{
+	s32 ret;
+	s32 resp;
+	struct sxe_led_ctrl ctrl;
+	struct sxe_hw *hw = &adapter->hw;
+
+	ctrl.mode = (true == is_on) ? SXE_IDENTIFY_LED_ON : \
+					SXE_IDENTIFY_LED_OFF;
+	ctrl.duration = 0;
+
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_LED_CTRL,
+				(void *)&ctrl, sizeof(ctrl),
+				(void *)&resp, sizeof(resp));
+	if (ret) {
+		LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:led ctrl\n", ret);
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static int sxe_led_on(struct rte_eth_dev *dev)
+{
+	int ret;
+
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	ret = sxe_led_ctrl(adapter, true);
+
+	return ret;
+}
+
+static int sxe_led_off(struct rte_eth_dev *dev)
+{
+	int ret;
+
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	ret = sxe_led_ctrl(adapter, false);
+
+	return ret;
+}
+
+static int sxe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
+						size_t fw_size)
+{
+	int ret;
+	sxe_version_resp_s resp;
+	struct sxe_adapter *adapter = (struct sxe_adapter *)(dev->data->dev_private);
+	struct sxe_hw *hw = &adapter->hw;
+
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_FW_VER_GET,
+				NULL, 0,
+				(void *)&resp, sizeof(resp));
+	if (ret) {
+		LOG_ERROR_BDF("get version failed, ret=%d\n", ret);
+		ret = -EIO;
+		goto l_end;
+	}
+
+	ret = snprintf(fw_version, fw_size, "%s", resp.fw_version);
+	if (ret < 0) {
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	ret += 1; 
+
+	if (fw_size >= (size_t)ret) {
+		ret = 0;
+	}
+
+l_end:
+	return ret;
+}
+
+static const struct eth_dev_ops sxe_eth_dev_ops = {
+	.dev_configure		= sxe_dev_configure,
+	.dev_start		= sxe_dev_start,
+	.dev_stop		= sxe_dev_stop,
+	.dev_close		= sxe_dev_close,
+	.dev_reset		= sxe_dev_reset,
+
+	.rx_queue_start		= sxe_rx_queue_start,
+	.rx_queue_stop		= sxe_rx_queue_stop,
+	.rx_queue_setup		= sxe_rx_queue_setup,
+	.rx_queue_release	= sxe_rx_queue_release,
+	.rxq_info_get		= sxe_rx_queue_info_get,
+	.dev_infos_get		= sxe_dev_infos_get,
+
+	.tx_queue_start		= sxe_tx_queue_start,
+	.tx_queue_stop		= sxe_tx_queue_stop,
+	.tx_queue_setup		= sxe_tx_queue_setup,
+	.tx_queue_release	= sxe_tx_queue_release,
+	.tx_done_cleanup	= sxe_tx_done_cleanup,
+	.txq_info_get		= sxe_tx_queue_info_get,
+
+	.promiscuous_enable	= sxe_promiscuous_enable,
+	.promiscuous_disable	= sxe_promiscuous_disable,
+	.allmulticast_enable	= sxe_allmulticast_enable,
+	.allmulticast_disable	= sxe_allmulticast_disable,
+
+	.rx_queue_intr_enable	= sxe_rx_queue_intr_enable,
+	.rx_queue_intr_disable	= sxe_rx_queue_intr_disable,
+
+	.mtu_set		= sxe_mtu_set,
+	.reta_update		= sxe_rss_reta_update,
+	.reta_query		= sxe_rss_reta_query,
+	.rss_hash_update	= sxe_rss_hash_update,
+	.rss_hash_conf_get	= sxe_rss_hash_conf_get,
+
+	.mac_addr_add		= sxe_mac_addr_add,
+	.mac_addr_remove	= sxe_mac_addr_remove,
+	.mac_addr_set		= sxe_mac_addr_set,
+
+	.uc_hash_table_set	= sxe_uc_hash_table_set,
+	.uc_all_hash_table_set	= sxe_uc_all_hash_table_set,
+
+	.set_mc_addr_list	= sxe_set_mc_addr_list,
+
+	.stats_get		= sxe_eth_stats_get,
+	.stats_reset		= sxe_stats_reset,
+
+	.xstats_get		= sxe_xstats_get,
+	.xstats_reset		= sxe_xstats_reset,
+	.xstats_get_by_id	= sxe_xstats_get_by_id,
+	.xstats_get_names	= sxe_xstats_names_get,
+	.xstats_get_names_by_id	= sxe_xstats_names_get_by_id,
+	.queue_stats_mapping_set = sxe_queue_stats_mapping_set,
+
+	.get_module_info	= sxe_get_module_info,
+	.get_module_eeprom	= sxe_get_module_eeprom,
+
+	.flow_ctrl_get		= sxe_flow_ctrl_get,
+	.flow_ctrl_set		= sxe_flow_ctrl_set,
+	.priority_flow_ctrl_set = sxe_priority_flow_ctrl_set,
+
+	.timesync_enable	= sxe_timesync_enable,
+	.timesync_disable	= sxe_timesync_disable,
+	.timesync_read_rx_timestamp = sxe_timesync_read_rx_timestamp,
+	.timesync_read_tx_timestamp = sxe_timesync_read_tx_timestamp,
+	.timesync_adjust_time	= sxe_timesync_adjust_time,
+	.timesync_read_time	= sxe_timesync_read_time,
+	.timesync_write_time	= sxe_timesync_write_time,
+
+	.vlan_filter_set      = sxe_vlan_filter_set,
+	.vlan_tpid_set        = sxe_vlan_tpid_set,
+	.vlan_offload_set     = sxe_vlan_offload_set,
+	.vlan_strip_queue_set = sxe_vlan_strip_queue_set,
+
+	.get_reg		= sxe_get_regs,
+
+	.dev_set_link_up	= sxe_dev_set_link_up,
+	.dev_set_link_down	= sxe_dev_set_link_down,
+	.dev_led_on		= sxe_led_on,
+	.dev_led_off		= sxe_led_off,
+	.link_update		= sxe_link_update,
+
+	.dev_supported_ptypes_get = sxe_dev_supported_ptypes_get,
+
+	.get_dcb_info		= sxe_get_dcb_info,
+
+	.set_queue_rate_limit	= sxe_queue_rate_limit_set,
+	.fw_version_get		= sxe_fw_version_get,
+
+#ifdef ETH_DEV_MIRROR_RULE
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	.mirror_rule_set        = sxe_mirror_rule_set,
+	.mirror_rule_reset      = sxe_mirror_rule_reset,
+#endif
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+#ifdef ETH_DEV_OPS_FILTER_CTRL
+	.filter_ctrl		= sxe_filter_ctrl,
+#else
+	.flow_ops_get		= sxe_flow_ops_get,
+#endif
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+	.tm_ops_get		= sxe_tm_ops_get,
+#endif
+
+#ifdef ETH_DEV_OPS_MONITOR
+	.get_monitor_addr	= sxe_monitor_addr_get,
+#endif
+#ifdef ETH_DEV_OPS_HAS_DESC_RELATE
+	.rx_queue_count	   = sxe_rx_queue_count,
+	.rx_descriptor_status = sxe_rx_descriptor_status,
+	.tx_descriptor_status = sxe_tx_descriptor_status,
+#ifdef ETH_DEV_RX_DESC_DONE
+	.rx_descriptor_done   = sxe_rx_descriptor_done,
+#endif
+#endif
+};
+
+static s32 sxe_hw_base_init(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret;
+
+	hw->reg_base_addr = (void *)pci_dev->mem_resource[0].addr;
+	PMD_LOG_INFO(INIT, "eth_dev[%u] got reg_base_addr=%p",
+			eth_dev->data->port_id, hw->reg_base_addr);
+	hw->adapter = adapter;
+
+	strlcpy(adapter->name, pci_dev->device.name, sizeof(adapter->name) -1);
+
+	sxe_hw_hdc_drv_status_set(hw, (u32)true);
+
+	ret = sxe_phy_init(adapter);
+	if (ret == -SXE_ERR_SFF_NOT_SUPPORTED) {
+		PMD_LOG_ERR(INIT, "sfp is not sfp+, not supported, ret=%d\n", ret);
+		ret = -EPERM;
+		goto l_out;
+	} else if (ret) {
+		PMD_LOG_ERR(INIT, "phy init failed, ret=%d\n", ret);
+	}
+
+	ret = sxe_hw_reset(hw);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "hw init failed, ret=%d", ret);
+		goto l_out;
+	} else {
+		sxe_hw_start(hw);
+	}
+
+	ret = sxe_mac_addr_init(eth_dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "mac addr init fail, ret=%d", ret);
+		goto l_out;
+	}
+
+	sxe_hw_fc_base_init(hw);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	sxe_hw_pf_rst_done_set(hw);
+#endif
+
+l_out:
+	if (ret) {
+		sxe_hw_hdc_drv_status_set(hw, (u32)false);
+	}
+	return ret;
+}
+
+void sxe_secondary_proc_init(struct rte_eth_dev *eth_dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
+{
+	__sxe_secondary_proc_init(eth_dev, rx_batch_alloc_allowed, rx_vec_allowed);
+
+	return;
+}
+
+STATIC void sxe_ethdev_mac_mem_free(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+
+	if (eth_dev->data->mac_addrs) {
+		rte_free(eth_dev->data->mac_addrs);
+		eth_dev->data->mac_addrs = NULL;
+	}
+
+	if (eth_dev->data->hash_mac_addrs) {
+		rte_free(eth_dev->data->hash_mac_addrs);
+		eth_dev->data->hash_mac_addrs = NULL;
+	}
+
+	if (adapter->mac_filter_ctxt.uc_addr_table) {
+		rte_free(adapter->mac_filter_ctxt.uc_addr_table);
+		adapter->mac_filter_ctxt.uc_addr_table = NULL;
+	}
+
+	return;
+}
+
+#ifdef  DPDK_19_11_6
+static void sxe_pf_init(struct sxe_adapter *adapter)
+{
+	memset(&adapter->vlan_ctxt, 0, sizeof(adapter->vlan_ctxt));
+	memset(&adapter->mac_filter_ctxt.uta_hash_table, 0, \
+		sizeof(adapter->mac_filter_ctxt.uta_hash_table));
+	memset(&adapter->dcb_ctxt.config, 0, sizeof(adapter->dcb_ctxt.config));
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+	memset(&adapter->filter_ctxt, 0, sizeof(adapter->filter_ctxt));
+#endif
+
+	return;
+}
+#endif
+
+s32 sxe_ethdev_init(struct rte_eth_dev *eth_dev, void *param __rte_unused)
+{
+	s32 ret = 0;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+	struct sxe_filter_context *filter_info = &adapter->filter_ctxt;
+#endif
+
+	eth_dev->dev_ops = &sxe_eth_dev_ops;
+
+#ifndef ETH_DEV_OPS_HAS_DESC_RELATE
+	eth_dev->rx_queue_count       = sxe_rx_queue_count;
+	eth_dev->rx_descriptor_status = sxe_rx_descriptor_status;
+	eth_dev->tx_descriptor_status = sxe_tx_descriptor_status;
+#ifdef ETH_DEV_RX_DESC_DONE
+	eth_dev->rx_descriptor_done   = sxe_rx_descriptor_done;
+#endif
+#endif
+
+	eth_dev->rx_pkt_burst		  = &sxe_pkts_recv;
+	eth_dev->tx_pkt_burst = &sxe_pkts_xmit_with_offload;
+	eth_dev->tx_pkt_prepare = &sxe_prep_pkts;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+		sxe_secondary_proc_init(eth_dev, adapter->rx_batch_alloc_allowed, &adapter->rx_vec_allowed);
+#else
+		bool rx_vec_allowed = 0;
+		sxe_secondary_proc_init(eth_dev, adapter->rx_batch_alloc_allowed, &rx_vec_allowed);
+#endif
+		goto l_out;
+	}
+
+	rte_atomic32_clear(&adapter->link_thread_running);
+	rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+#ifdef  DPDK_19_11_6
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+	sxe_pf_init(adapter);
+#endif
+	ret = sxe_hw_base_init(eth_dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "hw base init fail.(err:%d)", ret);
+		goto l_out;
+	}
+
+	sxe_led_reset(eth_dev);
+
+	sxe_dcb_init(eth_dev);
+
+	/* Reset stats info */
+	sxe_stats_reset(eth_dev);
+
+	sxe_queue_stats_map_reset(eth_dev);
+
+
+#ifdef SET_AUTOFILL_QUEUE_XSTATS
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+#endif
+
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	sxe_vt_init(eth_dev);
+#endif
+	adapter->mtu = RTE_ETHER_MTU;
+
+	sxe_irq_init(eth_dev);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+	memset(filter_info, 0, sizeof(struct sxe_filter_context));
+	TAILQ_INIT(&filter_info->fivetuple_list);
+	ret = sxe_fnav_filter_init(eth_dev);
+	if (ret) {
+		sxe_ethdev_mac_mem_free(eth_dev);
+		sxe_irq_uninit(eth_dev);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+		sxe_vt_uninit(eth_dev);
+#endif
+		goto l_out;
+	}
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+	sxe_tm_ctxt_init(eth_dev);
+#endif
+
+	PMD_LOG_INFO(INIT, "sxe eth dev init done.");
+
+l_out:
+	return ret;
+}
+
+s32 sxe_ethdev_uninit(struct rte_eth_dev *eth_dev)
+{
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		PMD_LOG_INFO(INIT, "not primary process ,do nothing");
+		goto l_end;
+	}
+
+	sxe_dev_close(eth_dev);
+
+	sxe_ethdev_mac_mem_free(eth_dev);
+
+l_end:
+	return 0;
+}
+
+s32 sxe_dev_reset(struct rte_eth_dev *eth_dev)
+{
+	s32 ret;
+
+	if (eth_dev->data->sriov.active) {
+		ret = -ENOTSUP;
+		PMD_LOG_ERR(INIT, "sriov actived, not support reset pf port[%u]",
+			eth_dev->data->port_id);
+		goto l_end;
+	}
+
+	ret = sxe_ethdev_uninit(eth_dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "port[%u] dev uninit failed",
+			eth_dev->data->port_id);
+		goto l_end;
+	}
+
+	ret = sxe_ethdev_init(eth_dev, NULL);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "port[%u] dev init failed",
+			eth_dev->data->port_id);
+	}
+
+l_end:
+	return ret;
+}
+
+s32 rte_pmd_sxe_tx_loopback_set(u16 port, u8 on)
+{
+	struct rte_eth_dev *dev;
+	struct sxe_adapter *adapter;
+	s32 ret = 0;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_sxe_supported(dev)) {
+		ret = -ENOTSUP;
+		PMD_LOG_ERR(DRV, "port:%u not support tx loopback set.", port);
+		goto l_out;
+	}
+
+	if (on > 1) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "port:%u invalid user configure value:%u.",
+		            port, on);
+		goto l_out;
+	}
+
+	adapter = dev->data->dev_private;
+
+	sxe_hw_vt_pool_loopback_switch(&adapter->hw, on);
+
+	PMD_LOG_ERR(DRV, "port:%u set tx loopback:%u success.", port, on);
+
+l_out:
+	return ret;
+
+}
+
diff --git a/drivers/net/sxe/pf/sxe_ethdev.h b/drivers/net/sxe/pf/sxe_ethdev.h
new file mode 100644
index 0000000000..f1165e0413
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_ethdev.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_ETHDEV_H__
+#define __SXE_ETHDEV_H__
+
+#include "sxe.h"
+
+#define SXE_MMW_SIZE_DEFAULT        0x4
+#define SXE_MMW_SIZE_JUMBO_FRAME    0x14
+#define SXE_MAX_JUMBO_FRAME_SIZE    0x2600 
+
+#define SXE_ETH_MAX_LEN  (RTE_ETHER_MTU + SXE_ETH_OVERHEAD) 
+
+#define SXE_HKEY_MAX_INDEX 10
+#define SXE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
+#define SXE_ETH_DEAD_LOAD (SXE_ETH_OVERHEAD + 2 * SXE_VLAN_TAG_SIZE)
+
+struct sxe_adapter;
+s32 sxe_ethdev_init(struct rte_eth_dev *eth_dev, void *param __rte_unused);
+
+s32 sxe_ethdev_uninit(struct rte_eth_dev *eth_dev);
+
+void sxe_secondary_proc_init(struct rte_eth_dev *eth_dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_filter.c b/drivers/net/sxe/pf/sxe_filter.c
new file mode 100644
index 0000000000..e323af94f8
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_filter.c
@@ -0,0 +1,826 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_bus_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <rte_bus_pci.h>
+#else
+#include <ethdev_driver.h>
+#include <bus_pci_driver.h>
+#endif
+
+#include <rte_malloc.h>
+#include <rte_ethdev.h>
+
+#include "sxe_filter.h"
+#include "sxe_logs.h"
+#include "sxe.h"
+#include "sxe_queue.h"
+#include "drv_msg.h"
+#include "sxe_pmd_hdc.h"
+#include "sxe_cli.h"
+#include "sxe_compat_version.h"
+
+#define PF_POOL_INDEX(p)        (p)
+
+#define SXE_STRIP_BITMAP_SET(h, q) \
+	do { \
+		u32 idx = (q) / (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		u32 bit = (q) % (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		(h)->strip_bitmap[idx] |= 1 << bit;\
+	} while (0)
+
+#define SXE_STRIP_BITMAP_CLEAR(h, q) \
+	do {\
+		u32 idx = (q) / (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		u32 bit = (q) % (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		(h)->strip_bitmap[idx] &= ~(1 << bit);\
+	} while (0)
+
+#define SXE_STRIP_BITMAP_GET(h, q, r) \
+	do {\
+		u32 idx = (q) / (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		u32 bit = (q) % (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		(r) = (h)->strip_bitmap[idx] >> bit & 1;\
+	} while (0)
+
+static s32 sxe_get_mac_addr_from_fw(struct sxe_adapter *adapter,
+						u8 *mac_addr)
+{
+	s32 ret;
+	struct sxe_default_mac_addr_resp mac;
+	struct sxe_hw *hw = &adapter->hw;
+
+	/* Get default mac address from firmware */
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_R0_MAC_GET, NULL, 0,
+				(void *)&mac, sizeof(mac));
+	if (ret) {
+		LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:mac addr get\n", ret);
+		ret = -EIO;
+	} else {
+		memcpy(mac_addr, mac.addr, SXE_MAC_ADDR_LEN);
+	}
+
+	return ret;
+}
+
+static void sxe_default_mac_addr_get(struct sxe_adapter *adapter)
+{
+	s32 ret;
+	struct rte_ether_addr mac_addr = { {0} };
+
+	ret = sxe_get_mac_addr_from_fw(adapter, mac_addr.addr_bytes);
+	if (ret || !rte_is_valid_assigned_ether_addr(&mac_addr)) {
+		LOG_DEBUG("invalid default mac addr:"MAC_FMT" result:%d\n",
+			      MAC_ADDR(mac_addr.addr_bytes), ret);
+		goto l_out;
+	}
+
+	LOG_DEBUG("default mac addr = "MAC_FMT"\n", MAC_ADDR(mac_addr.addr_bytes));
+	rte_ether_addr_copy(&mac_addr, &adapter->mac_filter_ctxt.def_mac_addr);
+	rte_ether_addr_copy(&mac_addr, &adapter->mac_filter_ctxt.fc_mac_addr);
+
+l_out:
+	return;
+}
+
+static u8 sxe_sw_uc_entry_add(struct sxe_adapter *adapter, u8 index,
+				u8 *mac_addr)
+{
+	u8 i;
+	struct sxe_uc_addr_table *uc_table = adapter->mac_filter_ctxt.uc_addr_table;
+
+	for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
+		if (!uc_table[i].used) {
+			uc_table[i].used = true;
+			uc_table[i].rar_idx = i;
+			uc_table[i].original_index = index;
+			uc_table[i].type = SXE_PF;
+			rte_memcpy(uc_table[i].addr, mac_addr, SXE_MAC_ADDR_LEN);
+			break;
+		}
+	}
+
+	return i;
+}
+
+static u8 sxe_sw_uc_entry_del(struct sxe_adapter *adapter, u8 index)
+{
+	u8 i;
+	struct sxe_uc_addr_table *uc_table = adapter->mac_filter_ctxt.uc_addr_table;
+
+	for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
+		if (!uc_table[i].used || (uc_table[i].type != SXE_PF)) {
+			continue;
+		}
+
+		if (uc_table[i].original_index == index) {
+			uc_table[i].used = false;
+			break;
+		}
+	}
+
+	return i;
+}
+
+u8 sxe_sw_uc_entry_vf_add(struct sxe_adapter *adapter,
+				u8 vf_idx, u8 *mac_addr, bool macvlan)
+{
+	u8 i;
+	struct sxe_uc_addr_table *uc_table = adapter->mac_filter_ctxt.uc_addr_table;
+
+	for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
+		if (!uc_table[i].used) {
+			uc_table[i].used = true;
+			uc_table[i].rar_idx = i;
+			uc_table[i].vf_idx = vf_idx;
+			uc_table[i].type = macvlan ? SXE_VF_MACVLAN : SXE_VF;
+			rte_memcpy(uc_table[i].addr, mac_addr, SXE_MAC_ADDR_LEN);
+			break;
+		}
+	}
+
+	return i;
+}
+
+s32 sxe_sw_uc_entry_vf_del(struct sxe_adapter *adapter, u8 vf_idx,
+					bool macvlan)
+{
+	u8 i;
+	struct sxe_uc_addr_table *uc_table = adapter->mac_filter_ctxt.uc_addr_table;
+
+	for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
+		if (!uc_table[i].used || (uc_table[i].type == SXE_PF)) {
+			continue;
+		}
+
+		if (uc_table[i].vf_idx == vf_idx) {
+			uc_table[i].used = false;
+			sxe_hw_uc_addr_del(&adapter->hw, i);
+			if (!macvlan) {
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+s32 sxe_mac_addr_init(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret = 0;
+	u8 rar_idx;
+
+	eth_dev->data->mac_addrs = rte_zmalloc("sxe",
+				RTE_ETHER_ADDR_LEN * SXE_UC_ENTRY_NUM_MAX, 0);
+	if (eth_dev->data->mac_addrs == NULL) {
+		LOG_ERROR("mac addr allocate %u B fail.",
+			RTE_ETHER_ADDR_LEN * SXE_UC_ENTRY_NUM_MAX);
+		ret = -ENOMEM;
+		goto l_out;
+	}
+
+	eth_dev->data->hash_mac_addrs = rte_zmalloc("sxe",
+				RTE_ETHER_ADDR_LEN * SXE_UTA_ENTRY_NUM_MAX, 0);
+	if (eth_dev->data->hash_mac_addrs == NULL) {
+		LOG_ERROR("uta table allocate %u B fail.",
+			RTE_ETHER_ADDR_LEN * SXE_UTA_ENTRY_NUM_MAX);
+		ret = -ENOMEM;
+		goto l_free_mac_addr;
+	}
+
+	adapter->mac_filter_ctxt.uc_addr_table = rte_zmalloc("sxe",
+		sizeof(struct sxe_uc_addr_table) * SXE_UC_ENTRY_NUM_MAX, 0);
+	if (adapter->mac_filter_ctxt.uc_addr_table == NULL) {
+		LOG_ERROR("uc table allocate %lu B fail.",
+			sizeof(struct sxe_uc_addr_table) * SXE_UC_ENTRY_NUM_MAX);
+		ret = -ENOMEM;
+		goto l_free_hash_mac;
+	}
+
+	sxe_default_mac_addr_get(adapter);
+
+	rte_ether_addr_copy(&adapter->mac_filter_ctxt.def_mac_addr,
+				eth_dev->data->mac_addrs);
+
+	rte_ether_addr_copy(&adapter->mac_filter_ctxt.def_mac_addr,
+				&adapter->mac_filter_ctxt.cur_mac_addr);
+
+	rar_idx = sxe_sw_uc_entry_add(adapter, 0, adapter->mac_filter_ctxt.def_mac_addr.addr_bytes);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	sxe_hw_uc_addr_add(hw, rar_idx,
+			adapter->mac_filter_ctxt.def_mac_addr.addr_bytes,
+			sxe_vf_num_get(eth_dev));
+#else
+	sxe_hw_uc_addr_add(hw, rar_idx,
+		adapter->mac_filter_ctxt.def_mac_addr.addr_bytes,
+		0);
+#endif
+
+l_out:
+	return ret;
+
+l_free_hash_mac:
+	rte_free(eth_dev->data->hash_mac_addrs);
+	eth_dev->data->hash_mac_addrs = NULL;
+
+l_free_mac_addr:
+	rte_free(eth_dev->data->mac_addrs);
+	eth_dev->data->mac_addrs = NULL;
+	goto l_out;
+}
+
+s32 sxe_promiscuous_enable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 flt_ctrl;
+
+	flt_ctrl = sxe_hw_rx_mode_get(hw);
+	PMD_LOG_DEBUG(DRV,"read flt_ctrl=0x%x\n", flt_ctrl);
+
+	flt_ctrl |= (SXE_FCTRL_UPE | SXE_FCTRL_MPE);
+
+	PMD_LOG_DEBUG(DRV,"write flt_ctrl=0x%x\n", flt_ctrl);
+	sxe_hw_rx_mode_set(hw, flt_ctrl);
+
+	return 0;
+}
+
+s32 sxe_promiscuous_disable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 flt_ctrl;
+
+	flt_ctrl = sxe_hw_rx_mode_get(hw);
+	PMD_LOG_DEBUG(DRV,"read flt_ctrl=0x%x\n", flt_ctrl);
+
+	flt_ctrl &= (~SXE_FCTRL_UPE);
+	if (dev->data->all_multicast == 1) {
+		flt_ctrl |= SXE_FCTRL_MPE;
+	} else {
+		flt_ctrl &= (~SXE_FCTRL_MPE);
+	}
+
+	PMD_LOG_DEBUG(DRV,"write flt_ctrl=0x%x\n", flt_ctrl);
+	sxe_hw_rx_mode_set(hw, flt_ctrl);
+
+	return 0;
+}
+
+s32 sxe_allmulticast_enable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 flt_ctrl;
+
+	flt_ctrl = sxe_hw_rx_mode_get(hw);
+	PMD_LOG_DEBUG(DRV,"read flt_ctrl=0x%x\n", flt_ctrl);
+
+	flt_ctrl |= SXE_FCTRL_MPE;
+
+	PMD_LOG_DEBUG(DRV,"write flt_ctrl=0x%x\n", flt_ctrl);
+	sxe_hw_rx_mode_set(hw, flt_ctrl);
+
+	return 0;
+}
+
+s32 sxe_allmulticast_disable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 flt_ctrl;
+
+	if (dev->data->promiscuous == 1) {
+		PMD_LOG_DEBUG(DRV,"promiscuous is enable, allmulticast must be enabled.\n");
+		goto l_out;
+	}
+
+	flt_ctrl = sxe_hw_rx_mode_get(hw);
+	PMD_LOG_DEBUG(DRV,"read flt_ctrl=0x%x\n", flt_ctrl);
+
+	flt_ctrl &= (~SXE_FCTRL_MPE);
+
+	PMD_LOG_DEBUG(DRV,"write flt_ctrl=0x%x\n", flt_ctrl);
+	sxe_hw_rx_mode_set(hw, flt_ctrl);
+
+l_out:
+	return 0;
+}
+
+s32 sxe_mac_addr_add(struct rte_eth_dev *dev,
+			     struct rte_ether_addr *mac_addr,
+			     u32 index, u32 pool)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret;
+	u8 rar_idx = sxe_sw_uc_entry_add(adapter, index, mac_addr->addr_bytes);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	ret = sxe_hw_uc_addr_add(hw, rar_idx,
+					mac_addr->addr_bytes, pool);
+#else
+		ret = sxe_hw_uc_addr_add(hw, rar_idx,
+						mac_addr->addr_bytes, sxe_vf_num_get(dev));
+#endif
+	if (ret) {
+		LOG_ERROR("rar_idx:%u pool:%u mac_addr:"MAC_FMT
+				"add fail.(err:%d)",
+				rar_idx, pool,
+				MAC_ADDR(mac_addr->addr_bytes), ret);
+		goto l_out;
+	}
+
+	PMD_LOG_INFO(DRV, "rar_idx:%u pool:%u mac_addr:"MAC_FMT" add done",
+			rar_idx, pool,
+			MAC_ADDR(mac_addr->addr_bytes));
+
+l_out:
+	return ret;
+}
+
+void sxe_mac_addr_remove(struct rte_eth_dev *dev, u32 index)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret;
+	u8 rar_idx = sxe_sw_uc_entry_del(adapter, index);
+
+	ret = sxe_hw_uc_addr_del(hw, rar_idx);
+	if (ret) {
+		PMD_LOG_ERR(DRV, "rar_idx:%u remove fail.(err:%d)",
+				rar_idx, ret);
+		goto l_out;
+	}
+
+	PMD_LOG_INFO(DRV, "rar_idx:%u mac_addr:"MAC_FMT" remove done",
+			rar_idx,
+			MAC_ADDR(&dev->data->mac_addrs[rar_idx]));
+
+l_out:
+	return;
+}
+
+void sxe_fc_mac_addr_set(struct sxe_adapter *adapter)
+{
+	struct sxe_hw *hw = &adapter->hw;
+
+	sxe_hw_fc_mac_addr_set(hw,
+			adapter->mac_filter_ctxt.fc_mac_addr.addr_bytes);
+
+	return;
+}
+
+s32 sxe_mac_addr_set(struct rte_eth_dev *dev,
+			     struct rte_ether_addr *mac_addr)
+{
+	u8 pool_idx;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	sxe_mac_addr_remove(dev, 0);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+	pool_idx = pci_dev->max_vfs;
+#else
+	pool_idx = 0;
+#endif
+
+	sxe_mac_addr_add(dev, mac_addr, 0, pool_idx);
+	rte_ether_addr_copy(mac_addr, &adapter->mac_filter_ctxt.fc_mac_addr);
+
+	sxe_fc_mac_addr_set(adapter);
+
+	PMD_LOG_INFO(DRV, "pool:%u mac_addr:"MAC_FMT" set to be cur mac addr done",
+			pool_idx,
+			MAC_ADDR(mac_addr));
+
+	return 0;
+}
+
+static void sxe_hash_mac_addr_parse(u8 *mac_addr, u16 *reg_idx,
+						u16 *bit_idx)
+{
+	u16 extracted;
+
+	extracted = ((mac_addr[4] >> 4) |
+			(((u16)mac_addr[5]) << 4));
+
+	extracted &= SXE_MAC_ADDR_EXTRACT_MASK;
+
+	*reg_idx = (extracted >> SXE_MAC_ADDR_SHIFT) & SXE_MAC_ADDR_REG_MASK;
+
+	*bit_idx = extracted & SXE_MAC_ADDR_BIT_MASK;
+
+	PMD_LOG_DEBUG(DRV, "mac_addr:"MAC_FMT" hash reg_idx:%u bit_idx:%u",
+			 MAC_ADDR(mac_addr), *reg_idx, *bit_idx);
+
+	return;
+}
+
+s32 sxe_uc_hash_table_set(struct rte_eth_dev *dev,
+			struct rte_ether_addr *mac_addr, u8 on)
+{
+	u16 bit_idx;
+	u16 reg_idx;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_mac_filter_context *mac_filter = &adapter->mac_filter_ctxt;
+	u32 value;
+	s32 ret = 0;
+
+	sxe_hash_mac_addr_parse(mac_addr->addr_bytes, &reg_idx, &bit_idx);
+
+	value = (mac_filter->uta_hash_table[reg_idx] >> bit_idx) & 0x1;
+	if (value == on) {
+		goto l_out;
+	}
+
+	value = sxe_hw_uta_hash_table_get(hw, reg_idx);
+	if (on) {
+		mac_filter->uta_used_count++;
+		value |= (0x1 << bit_idx);
+		mac_filter->uta_hash_table[reg_idx] |= (0x1 << bit_idx);
+	} else { 
+		mac_filter->uta_used_count--;
+		value &= ~(0x1 << bit_idx);
+		mac_filter->uta_hash_table[reg_idx] &= ~(0x1 << bit_idx);
+	}
+
+	sxe_hw_uta_hash_table_set(hw, reg_idx, value);
+
+	PMD_LOG_INFO(DRV, "mac_addr:"MAC_FMT" uta reg_idx:%u bit_idx:%u"
+			  " %s done, uta_used_count:%u",
+			 MAC_ADDR(mac_addr->addr_bytes),
+			 reg_idx, bit_idx,
+			 on ? "set" : "clear",
+			 mac_filter->uta_used_count);
+
+l_out:
+	return ret;
+}
+
+s32 sxe_uc_all_hash_table_set(struct rte_eth_dev *dev, u8 on)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_mac_filter_context *mac_filter = &adapter->mac_filter_ctxt;
+	u32 value;
+	u8 i;
+
+	value = on ? (~0) : 0;
+
+	for (i = 0; i < SXE_UTA_ENTRY_NUM_MAX; i++) {
+		mac_filter->uta_hash_table[i] = value;
+		sxe_hw_uta_hash_table_set(hw, i, value);
+	}
+
+	PMD_LOG_INFO(DRV, "uta table all entry %s done.",
+			  on ? "set" : "clear");
+
+	return 0;
+}
+
+s32 sxe_set_mc_addr_list(struct rte_eth_dev *dev,
+			  struct rte_ether_addr *mc_addr_list,
+			  u32 nb_mc_addr)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_mac_filter_context *mac_filter = &adapter->mac_filter_ctxt;
+	u32 i;
+	u16 bit_idx;
+	u16 reg_idx;
+
+	memset(&mac_filter->mta_hash_table, 0, sizeof(mac_filter->mta_hash_table));
+	for (i = 0; i < nb_mc_addr; i++) {
+		sxe_hash_mac_addr_parse(mc_addr_list->addr_bytes, &reg_idx, &bit_idx);
+		mc_addr_list++;
+		mac_filter->mta_hash_table[reg_idx] |= (0x1 << bit_idx);
+	}
+
+	for (i = 0; i < SXE_MTA_ENTRY_NUM_MAX; i++) {
+		sxe_hw_mta_hash_table_set(hw, i, mac_filter->mta_hash_table[i]);
+	}
+
+	if (nb_mc_addr) {
+		sxe_hw_mc_filter_enable(hw);
+	}
+
+	PMD_LOG_INFO(DRV, "mc addr list cnt:%u set to mta done.", nb_mc_addr);
+
+	return 0;
+}
+
+s32 sxe_vlan_filter_set(struct rte_eth_dev *eth_dev, u16 vlan_id, s32 on)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vlan_context *vlan_ctxt = &adapter->vlan_ctxt;
+	u8 reg_idx;
+	u8 bit_idx;
+	u32 value;
+
+	reg_idx = (vlan_id >> SXE_VLAN_ID_SHIFT) & SXE_VLAN_ID_REG_MASK;
+	bit_idx = (vlan_id & SXE_VLAN_ID_BIT_MASK);
+
+	value = sxe_hw_vlan_filter_array_read(hw, reg_idx);
+	if (on) {
+		value |= (1 << bit_idx);
+	} else {
+		value &= ~(1 << bit_idx);
+	}
+
+	sxe_hw_vlan_filter_array_write(hw, reg_idx, value);
+
+	vlan_ctxt->vlan_hash_table[reg_idx] = value;
+
+	PMD_LOG_INFO(DRV, "vlan_id:0x%x on:%d set done", vlan_id, on);
+
+	return 0;
+}
+
+static void sxe_vlan_tpid_write(struct sxe_hw *hw, u16 tpid)
+{
+	u32 value;
+
+	value = sxe_hw_vlan_type_get(hw);
+	value = (value & (~SXE_VLNCTRL_VET)) | tpid;
+	sxe_hw_vlan_type_set(hw, value);
+
+	value = sxe_hw_txctl_vlan_type_get(hw);
+	value = (value & (~SXE_DMATXCTL_VT_MASK)) |
+		(tpid << SXE_DMATXCTL_VT_SHIFT);
+	sxe_hw_txctl_vlan_type_set(hw, value);
+
+	return;
+}
+
+s32 sxe_vlan_tpid_set(struct rte_eth_dev *eth_dev,
+		    enum rte_vlan_type vlan_type, u16 tpid)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret = 0;
+	u32 txctl;
+	bool double_vlan;
+
+	txctl = sxe_hw_txctl_vlan_type_get(hw);
+	double_vlan = txctl & SXE_DMATXCTL_GDV;
+
+	switch (vlan_type) {
+	case RTE_ETH_VLAN_TYPE_INNER:
+		if (double_vlan) {
+			sxe_vlan_tpid_write(hw, tpid);
+		} else {
+			ret = -ENOTSUP;
+			PMD_LOG_ERR(DRV, "unsupport inner vlan without "
+				     "global double vlan.");
+		}
+		break;
+	case RTE_ETH_VLAN_TYPE_OUTER:
+		if (double_vlan) {
+			sxe_hw_vlan_ext_type_set(hw,
+				(tpid << SXE_EXVET_VET_EXT_SHIFT));
+		} else {
+			sxe_vlan_tpid_write(hw, tpid);
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "Unsupported VLAN type %d", vlan_type);
+		break;
+	}
+
+	PMD_LOG_INFO(DRV, "double_vlan:%d vlan_type:%d tpid:0x%x set done ret:%d",
+			   double_vlan, vlan_type, tpid, ret);
+	return ret;
+}
+
+static void sxe_vlan_strip_bitmap_set(struct rte_eth_dev *dev, u16 queue_idx, bool on)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_vlan_context *vlan_ctxt = &adapter->vlan_ctxt;
+
+	sxe_rx_queue_s *rxq;
+
+	if ((queue_idx >= SXE_HW_TXRX_RING_NUM_MAX) ||
+	    (queue_idx >= dev->data->nb_rx_queues)) {
+		PMD_LOG_ERR(DRV, "invalid queue idx:%u exceed max"
+			   " queue number:%u or nb_rx_queues:%u.",
+			   queue_idx, SXE_HW_TXRX_RING_NUM_MAX,
+			   dev->data->nb_rx_queues);
+		goto l_out;
+	}
+
+	if (on) {
+		SXE_STRIP_BITMAP_SET(vlan_ctxt, queue_idx);
+	} else {
+		SXE_STRIP_BITMAP_CLEAR(vlan_ctxt, queue_idx);
+	}
+
+	rxq = dev->data->rx_queues[queue_idx];
+
+	if (on) {
+		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	} else {
+		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	}
+
+	PMD_LOG_INFO(DRV, "queue idx:%u vlan strip on:%d set bitmap and offload done.",
+		     queue_idx, on);
+
+l_out:
+	return;
+}
+
+void sxe_vlan_strip_switch_set(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u16 i;
+	sxe_rx_queue_s *rxq;
+	bool on;
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+			on = true;
+		} else {
+			on = false;
+		}
+		sxe_hw_vlan_tag_strip_switch(hw, i, on);
+
+		sxe_vlan_strip_bitmap_set(dev, i, on);
+	}
+
+	return;
+}
+
+static void sxe_vlan_filter_disable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_hw_vlan_filter_switch(hw, 0);
+
+	return;
+}
+
+static void sxe_vlan_filter_enable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vlan_context *vlan_ctxt = &adapter->vlan_ctxt;
+	u32 vlan_ctl;
+	u16 i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	vlan_ctl = sxe_hw_vlan_type_get(hw);
+	vlan_ctl &= ~SXE_VLNCTRL_CFI;
+	vlan_ctl |= SXE_VLNCTRL_VFE;
+	sxe_hw_vlan_type_set(hw, vlan_ctl);
+
+	for (i = 0; i < SXE_VFT_TBL_SIZE; i++) {
+		sxe_hw_vlan_filter_array_write(hw, i, vlan_ctxt->vlan_hash_table[i]);
+	}
+
+	return;
+}
+
+static void sxe_vlan_extend_disable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 ctrl;
+
+	PMD_INIT_FUNC_TRACE();
+
+	ctrl = sxe_hw_txctl_vlan_type_get(hw);
+	ctrl &= ~SXE_DMATXCTL_GDV;
+	sxe_hw_txctl_vlan_type_set(hw, ctrl);
+
+	ctrl = sxe_hw_ext_vlan_get(hw);
+	ctrl &= ~SXE_EXTENDED_VLAN;
+	sxe_hw_ext_vlan_set(hw, ctrl);
+
+	return;
+}
+
+static void sxe_vlan_extend_enable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 ctrl;
+
+	PMD_INIT_FUNC_TRACE();
+
+	ctrl = sxe_hw_txctl_vlan_type_get(hw);
+	ctrl |= SXE_DMATXCTL_GDV;
+	sxe_hw_txctl_vlan_type_set(hw, ctrl);
+
+	ctrl = sxe_hw_ext_vlan_get(hw);
+	ctrl |= SXE_EXTENDED_VLAN;
+	sxe_hw_ext_vlan_set(hw, ctrl);
+
+	return;
+}
+
+static s32 sxe_vlan_offload_configure(struct rte_eth_dev *dev, s32 mask)
+{
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		sxe_vlan_strip_switch_set(dev);
+	}
+
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
+			sxe_vlan_filter_enable(dev);
+		} else {
+			sxe_vlan_filter_disable(dev);
+		}
+	}
+
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
+			sxe_vlan_extend_enable(dev);
+		} else {
+			sxe_vlan_extend_disable(dev);
+		}
+	}
+
+	PMD_LOG_INFO(DRV, "mask:0x%x rx mode offload:0x%"SXE_PRIX64
+		     " vlan offload set done", mask, rxmode->offloads);
+
+	return 0;
+}
+
+s32 sxe_vlan_offload_set(struct rte_eth_dev *dev, s32 vlan_mask)
+{
+	s32 mask;
+	s32 ret = 0;
+
+	if (vlan_mask & RTE_ETH_VLAN_STRIP_MASK) {
+		PMD_LOG_WARN(DRV, "vlan strip has been on, not support to set.");
+		ret = -1;
+		goto l_out;
+	}
+	mask = vlan_mask & ~RTE_ETH_VLAN_STRIP_MASK;
+
+	sxe_vlan_offload_configure(dev, mask);
+
+	PMD_LOG_INFO(DRV, "vlan offload mask:0x%d set done.", vlan_mask);
+
+l_out:
+	return ret;
+}
+
+void sxe_vlan_strip_queue_set(struct rte_eth_dev *dev, u16 queue, s32 on)
+{
+	UNUSED(dev);
+	UNUSED(on);
+	PMD_LOG_WARN(DRV, "queue:%u vlan strip has been on, not support to set.", queue);
+
+	return;
+}
+
+void sxe_vlan_filter_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 vlan_mask;
+	u32 vlan_ctl;
+
+	vlan_mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		    RTE_ETH_VLAN_EXTEND_MASK;
+	sxe_vlan_offload_configure(dev, vlan_mask);
+
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
+		vlan_ctl = sxe_hw_vlan_type_get(hw);
+		vlan_ctl |= SXE_VLNCTRL_VFE;
+		sxe_hw_vlan_type_set(hw, vlan_ctl);
+		LOG_DEBUG_BDF("vmdq mode enable vlan filter done.");
+	}
+
+	return;
+}
+
diff --git a/drivers/net/sxe/pf/sxe_filter.h b/drivers/net/sxe/pf/sxe_filter.h
new file mode 100644
index 0000000000..a541dce586
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_filter.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_FILTER_H__
+#define __SXE_FILTER_H__
+
+#include <rte_ether.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+
+#include "sxe_types.h"
+#include "sxe_hw.h"
+
+struct sxe_adapter;
+
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_ADDR(x) ((u8*)(x))[0],((u8*)(x))[1], \
+			   ((u8*)(x))[2],((u8*)(x))[3], \
+			   ((u8*)(x))[4],((u8*)(x))[5]
+
+#define BYTE_BIT_NUM   8
+
+#define SXE_VLAN_STRIP_BITMAP_SIZE    \
+        RTE_ALIGN((SXE_HW_TXRX_RING_NUM_MAX / (sizeof(u32) * BYTE_BIT_NUM)), \
+		sizeof(u32))
+
+struct sxe_vlan_context {
+	u32 vlan_hash_table[SXE_VFT_TBL_SIZE];
+	u32 strip_bitmap[SXE_VLAN_STRIP_BITMAP_SIZE];
+	u32  vlan_table_size;
+};
+
+enum sxe_uc_addr_src_type {
+	SXE_PF = 0,
+	SXE_VF,
+	SXE_VF_MACVLAN
+};
+
+struct sxe_uc_addr_table {
+	u8 rar_idx;         
+	u8 vf_idx;          
+	u8 type;            
+	u8 original_index;  
+	bool used;          
+	u8 addr[SXE_MAC_ADDR_LEN];  
+};
+
+struct sxe_mac_filter_context {
+	struct rte_ether_addr def_mac_addr; 
+	struct rte_ether_addr cur_mac_addr; 
+
+	struct rte_ether_addr fc_mac_addr;
+
+	u32 uta_used_count;            
+	u32 uta_hash_table[SXE_UTA_ENTRY_NUM_MAX]; 
+
+	u32 mta_hash_table[SXE_MTA_ENTRY_NUM_MAX]; 
+	struct sxe_uc_addr_table *uc_addr_table;
+};
+
+s32 sxe_mac_addr_init(struct rte_eth_dev *eth_dev);
+
+s32 sxe_promiscuous_enable(struct rte_eth_dev *dev);
+
+s32 sxe_promiscuous_disable(struct rte_eth_dev *dev);
+
+s32 sxe_allmulticast_enable(struct rte_eth_dev *dev);
+
+s32 sxe_allmulticast_disable(struct rte_eth_dev *dev);
+
+s32 sxe_mac_addr_add(struct rte_eth_dev *dev,
+			     struct rte_ether_addr *mac_addr,
+			     u32 rar_idx, u32 pool);
+
+void sxe_mac_addr_remove(struct rte_eth_dev *dev, u32 rar_idx);
+
+s32 sxe_mac_addr_set(struct rte_eth_dev *dev,
+			     struct rte_ether_addr *mac_addr);
+
+s32 sxe_uc_hash_table_set(struct rte_eth_dev *dev,
+			struct rte_ether_addr *mac_addr, u8 on);
+
+s32 sxe_uc_all_hash_table_set(struct rte_eth_dev *dev, u8 on);
+
+s32 sxe_set_mc_addr_list(struct rte_eth_dev *dev,
+			  struct rte_ether_addr *mc_addr_list,
+			  u32 nb_mc_addr);
+
+s32 sxe_vlan_filter_set(struct rte_eth_dev *eth_dev, u16 vlan_id, s32 on);
+
+s32 sxe_vlan_tpid_set(struct rte_eth_dev *eth_dev,
+		    enum rte_vlan_type vlan_type, u16 tpid);
+
+s32 sxe_vlan_offload_set(struct rte_eth_dev *dev, s32 vlan_mask);
+
+void sxe_vlan_strip_queue_set(struct rte_eth_dev *dev, u16 queue, s32 on);
+
+void sxe_vlan_filter_configure(struct rte_eth_dev *dev);
+
+s32 sxe_set_mc_addr_list(struct rte_eth_dev *dev,
+			  struct rte_ether_addr *mc_addr_list,
+			  u32 nb_mc_addr);
+
+void sxe_vlan_strip_switch_set(struct rte_eth_dev *dev);
+
+void sxe_fc_mac_addr_set(struct sxe_adapter *adapter);
+
+u8 sxe_sw_uc_entry_vf_add(struct sxe_adapter *adapter,
+				u8 vf_idx, u8 *mac_addr, bool macvlan);
+
+s32 sxe_sw_uc_entry_vf_del(struct sxe_adapter *adapter, u8 vf_idx,
+					bool macvlan);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_flow_ctrl.c b/drivers/net/sxe/pf/sxe_flow_ctrl.c
new file mode 100644
index 0000000000..33c4ffeb9d
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_flow_ctrl.c
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include "sxe.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe_flow_ctrl.h"
+#include "sxe_phy.h"
+#include "sxe_compat_version.h"
+
+s32 sxe_flow_ctrl_enable(struct rte_eth_dev *dev)
+{
+	s32 ret = 0;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	ret = sxe_fc_enable(adapter);
+	PMD_LOG_DEBUG(INIT, "fc enable");
+
+	return ret;
+}
+
+s32 sxe_flow_ctrl_get(struct rte_eth_dev *dev, 
+					struct rte_eth_fc_conf *fc_conf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	bool rx_pause_on;
+	bool tx_pause_on;
+
+	fc_conf->pause_time = sxe_hw_fc_pause_time_get(hw);
+	fc_conf->high_water = sxe_hw_fc_tc_high_water_mark_get(hw, 0);
+	fc_conf->low_water = sxe_hw_fc_tc_low_water_mark_get(hw, 0);
+	fc_conf->send_xon = sxe_hw_fc_send_xon_get(hw);
+	fc_conf->autoneg = !sxe_hw_is_fc_autoneg_disabled(hw);
+
+	fc_conf->mac_ctrl_frame_fwd = 1;
+
+	sxe_hw_fc_status_get(hw, &rx_pause_on, &tx_pause_on);
+
+	if (rx_pause_on && tx_pause_on) {
+		fc_conf->mode = RTE_ETH_FC_FULL;
+	} else if (rx_pause_on) {
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
+	} else if (tx_pause_on) {
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
+	} else {
+		fc_conf->mode = RTE_ETH_FC_NONE;
+	}
+
+	return 0;
+}
+
+s32 sxe_flow_ctrl_set(struct rte_eth_dev *dev, 
+					struct rte_eth_fc_conf *fc_conf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret;
+	u32 rx_buf_size;
+	u32 max_high_water;
+	enum sxe_fc_mode rte_2_sxe_fcmode[] = {
+		SXE_FC_NONE,
+		SXE_FC_RX_PAUSE,
+		SXE_FC_TX_PAUSE,
+		SXE_FC_FULL,
+	};
+
+	PMD_INIT_FUNC_TRACE();
+
+	rx_buf_size = sxe_hw_rx_pkt_buf_size_get(hw, 0);
+	PMD_LOG_DEBUG(INIT, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+	max_high_water = (rx_buf_size -
+			RTE_ETHER_MAX_LEN) >> SXE_RX_PKT_BUF_SIZE_SHIFT;
+	if ((fc_conf->high_water > max_high_water) ||
+		(fc_conf->high_water < fc_conf->low_water)) {
+		PMD_LOG_ERR(INIT, "Invalid high/low water setup value in KB");
+		PMD_LOG_ERR(INIT, "High_water must <= 0x%x", max_high_water);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	sxe_hw_fc_requested_mode_set(hw, rte_2_sxe_fcmode[fc_conf->mode]);
+	sxe_hw_fc_pause_time_set(hw, fc_conf->pause_time);
+	sxe_hw_fc_tc_high_water_mark_set(hw, 0, fc_conf->high_water);
+	sxe_hw_fc_tc_low_water_mark_set(hw, 0, fc_conf->low_water);
+	sxe_hw_fc_send_xon_set(hw, fc_conf->send_xon);
+	sxe_hw_fc_autoneg_disable_set(hw, !fc_conf->autoneg);
+
+	ret = sxe_flow_ctrl_enable(dev);
+	if (ret < 0) {
+		PMD_LOG_ERR(INIT, "sxe_flow_ctrl_enable = 0x%x", ret);
+		ret = -EIO;
+	}
+
+l_end:
+	return ret;
+}
+
diff --git a/drivers/net/sxe/pf/sxe_flow_ctrl.h b/drivers/net/sxe/pf/sxe_flow_ctrl.h
new file mode 100644
index 0000000000..0be5d1aaaf
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_flow_ctrl.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+ 
+#ifndef __SXE_FLOW_CTRL_H__
+#define __SXE_FLOW_CTRL_H__
+
+s32 sxe_flow_ctrl_enable(struct rte_eth_dev *dev);
+
+s32 sxe_flow_ctrl_get(struct rte_eth_dev *dev, 
+					struct rte_eth_fc_conf *fc_conf);
+
+s32 sxe_flow_ctrl_set(struct rte_eth_dev *dev, 
+					struct rte_eth_fc_conf *fc_conf);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_irq.c b/drivers/net/sxe/pf/sxe_irq.c
new file mode 100644
index 0000000000..90c1e168f8
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_irq.c
@@ -0,0 +1,562 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_alarm.h>
+
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_bus_pci.h>
+#include <rte_interrupts.h>
+#elif defined DPDK_21_11_5
+#include <rte_bus_pci.h>
+#include <eal_interrupts.h>
+#else
+#include <rte_pci.h>
+#include <bus_pci_driver.h>
+#include <eal_interrupts.h>
+#endif
+
+#include <rte_malloc.h>
+
+#include "sxe_irq.h"
+#include "sxe_logs.h"
+#include "sxe_regs.h"
+#include "sxe_hw.h"
+#include "sxe.h"
+#include "sxe_phy.h"
+#include "sxe_queue.h"
+#include "sxe_errno.h"
+#include "sxe_compat_version.h"
+#include "sxe_vf.h"
+
+#define SXE_LINK_DOWN_TIMEOUT 4000 
+#define SXE_LINK_UP_TIMEOUT   1000 
+
+#define SXE_IRQ_MAILBOX          (u32)(1 << 1)
+#define SXE_IRQ_MACSEC           (u32)(1 << 2)
+
+#define SXE_LINK_UP_TIME         90 
+
+#define SXE_MISC_VEC_ID          RTE_INTR_VEC_ZERO_OFFSET
+
+#define SXE_RX_VEC_BASE          RTE_INTR_VEC_RXTX_OFFSET
+
+static void sxe_link_info_output(struct rte_eth_dev *dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_eth_link link;
+
+	rte_eth_linkstatus_get(dev, &link);
+
+	PMD_LOG_DEBUG(DRV, "port:%d link status:%s speed %u Mbps %s",
+				(u16)(dev->data->port_id),
+				link.link_status ? "up" : "down",
+				link.link_speed,
+				(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
+				"full-duplex" : "half-duplex");
+
+	PMD_LOG_DEBUG(DRV, "pci dev: " PCI_PRI_FMT,
+				pci_dev->addr.domain,
+				pci_dev->addr.bus,
+				pci_dev->addr.devid,
+				pci_dev->addr.function);
+
+	return;
+}
+
+void sxe_event_irq_delayed_handler(void *param)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *intr_handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 eicr;
+
+	rte_spinlock_lock(&adapter->irq_ctxt.event_irq_lock);
+
+	sxe_hw_all_irq_disable(hw);
+
+	eicr = sxe_hw_irq_cause_get(hw);
+	PMD_LOG_DEBUG(DRV, "delay handler eicr:0x%x action:0x%x",
+			   eicr, irq->action);
+
+	eicr &= 0xFFFF0000;
+	if (rte_atomic32_read(&adapter->link_thread_running) && (eicr & SXE_EICR_LSC)) {
+		eicr &= ~SXE_EICR_LSC;
+		PMD_LOG_DEBUG(DRV, "delay handler keep lsc irq");
+	}
+	sxe_hw_pending_irq_write_clear(hw, eicr);
+
+	rte_spinlock_unlock(&adapter->irq_ctxt.event_irq_lock);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	if (eicr & SXE_EICR_MAILBOX) {
+		sxe_mbx_irq_handler(eth_dev);
+	}
+#endif
+
+	if (irq->action & SXE_IRQ_LINK_UPDATE) {
+		sxe_link_update(eth_dev, 0);
+		sxe_link_info_output(eth_dev);
+		sxe_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+
+		irq->action &= ~SXE_IRQ_LINK_UPDATE;
+	}
+
+	irq->enable_mask |= SXE_EIMS_LSC;
+	PMD_LOG_DEBUG(DRV, "irq enable mask:0x%x", irq->enable_mask);
+
+	rte_spinlock_lock(&adapter->irq_ctxt.event_irq_lock);
+	sxe_hw_specific_irq_enable(hw, irq->enable_mask);
+	rte_spinlock_unlock(&adapter->irq_ctxt.event_irq_lock);
+
+	rte_intr_ack(intr_handle);
+
+	return;
+}
+
+static void sxe_lsc_irq_handler(struct rte_eth_dev *eth_dev)
+{
+	struct rte_eth_link link;
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	u64 timeout;
+	bool link_up;
+
+	rte_eth_linkstatus_get(eth_dev, &link);
+
+	link_up = sxe_hw_is_link_state_up(hw);
+
+	if (!link.link_status && !link_up) {
+		PMD_LOG_DEBUG(DRV, "link change irq, down->down, do nothing.");
+		goto l_out;
+	}
+
+	if (irq->to_pcs_init) {
+		PMD_LOG_DEBUG(DRV, "to set pcs init, do nothing.");
+		goto l_out;
+	}
+
+	PMD_LOG_INFO(DRV, "link change irq handler start");
+	sxe_link_update(eth_dev, 0);
+	sxe_link_info_output(eth_dev);
+
+	timeout = link.link_status ? SXE_LINK_DOWN_TIMEOUT :
+					SXE_LINK_UP_TIMEOUT;
+
+	if (rte_eal_alarm_set(timeout * 1000,
+			      sxe_event_irq_delayed_handler,
+			      (void *)eth_dev) < 0) {
+		PMD_LOG_ERR(DRV, "submit event irq delay handle fail.");
+	} else {
+		irq->enable_mask &= ~SXE_EIMS_LSC;
+	}
+
+	PMD_LOG_INFO(DRV, "link change irq handler end");
+
+l_out:
+	return;
+}
+
+static s32 sxe_event_irq_action(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+
+	PMD_LOG_DEBUG(DRV, "event irq action type %d", irq->action);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	/* mailbox irq handler */
+	if (irq->action & SXE_IRQ_MAILBOX) {
+		sxe_mbx_irq_handler(eth_dev);
+		irq->action &= ~SXE_IRQ_MAILBOX;
+	}
+#endif
+
+	/* lsc irq handler */
+	if (irq->action & SXE_IRQ_LINK_UPDATE) {
+		sxe_lsc_irq_handler(eth_dev);
+		PMD_LOG_INFO(DRV, "link change irq");
+	}
+
+	return 0;
+}
+
+STATIC void sxe_event_irq_handler(void *data)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)data;
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	u32 eicr;
+
+	rte_spinlock_lock(&adapter->irq_ctxt.event_irq_lock);
+
+	sxe_hw_all_irq_disable(hw);
+
+	eicr = sxe_hw_irq_cause_get(hw);
+	PMD_LOG_DEBUG(DRV, "event irq triggered eicr:0x%x", eicr);
+
+	eicr &= 0xFFFF0000;
+
+	sxe_hw_pending_irq_write_clear(hw, eicr);
+
+	rte_spinlock_unlock(&adapter->irq_ctxt.event_irq_lock);
+
+	if (eicr & SXE_EICR_LSC) {
+		irq->action |= SXE_IRQ_LINK_UPDATE;
+	}
+
+	if (eicr & SXE_EICR_MAILBOX) {
+		irq->action |= SXE_IRQ_MAILBOX;
+	}
+
+	if (eicr & SXE_EICR_LINKSEC) {
+		irq->action |= SXE_IRQ_MACSEC;
+	}
+
+	sxe_event_irq_action(eth_dev);
+
+	rte_spinlock_lock(&adapter->irq_ctxt.event_irq_lock);
+	sxe_hw_specific_irq_enable(hw, irq->enable_mask);
+	rte_spinlock_unlock(&adapter->irq_ctxt.event_irq_lock);
+
+	return;
+}
+
+void sxe_irq_init(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *irq_handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+
+
+	rte_intr_callback_register(irq_handle,
+				   sxe_event_irq_handler, eth_dev);
+
+	rte_spinlock_init(&adapter->irq_ctxt.event_irq_lock);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 gpie = 0;
+
+	if ((irq_handle->type == RTE_INTR_HANDLE_UIO) ||
+	    (irq_handle->type == RTE_INTR_HANDLE_VFIO_MSIX)) {
+		gpie = sxe_hw_irq_general_reg_get(hw);
+
+		gpie |= SXE_GPIE_MSIX_MODE | SXE_GPIE_OCD;
+		sxe_hw_irq_general_reg_set(hw, gpie);
+	}
+	rte_intr_enable(irq_handle);
+
+	sxe_hw_specific_irq_enable(hw, irq->enable_mask);
+#endif
+	return;
+}
+
+static s32 sxe_irq_general_config(struct rte_eth_dev *dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 gpie;
+	s32 ret = 0;
+
+	gpie = sxe_hw_irq_general_reg_get(hw);
+	if (!rte_intr_dp_is_en(handle) &&
+	    !(gpie & (SXE_GPIE_MSIX_MODE | SXE_GPIE_PBA_SUPPORT))) {
+		ret = -SXE_ERR_CONFIG;
+		gpie |= SXE_GPIE_MSIX_MODE;
+		PMD_LOG_INFO(DRV, "rx queue irq num:%d gpie:0x%x.",
+				  handle->nb_efd, gpie);
+	} else {
+		gpie |= SXE_GPIE_MSIX_MODE | SXE_GPIE_PBA_SUPPORT |
+			SXE_GPIE_OCD | SXE_GPIE_EIAME |
+			SXE_GPIE_SPP1_EN | SXE_GPIE_SPP2_EN;
+	}
+
+	sxe_hw_irq_general_reg_set(hw, gpie);
+
+	return ret;
+}
+
+static void sxe_msix_configure(struct rte_eth_dev *dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_rx_queue *rx_queue;
+	s32 ret;
+	u16 queue_id;
+	u16 vector = SXE_MISC_VEC_ID;
+	u16 base = SXE_MISC_VEC_ID;
+	u32 irq_interval;
+	u32 value;
+
+	ret = sxe_irq_general_config(dev);
+	if (ret) {
+		PMD_LOG_INFO(DRV, "unsupport msi-x, no need config irq");
+		goto l_out;
+	}
+
+	if (rte_intr_allow_others(handle)) {
+		vector = base = SXE_RX_VEC_BASE;
+	}
+
+	irq_interval = SXE_EITR_INTERVAL_US(SXE_QUEUE_ITR_INTERVAL_DEFAULT);
+
+	if (rte_intr_dp_is_en(handle)) {
+		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+			queue_id++) {
+			rx_queue = dev->data->rx_queues[queue_id];
+			if (dev->data->lro == 1) {
+				sxe_hw_ring_irq_interval_set(hw, vector,
+										irq_interval);
+			}
+
+			sxe_hw_ring_irq_map(hw, false,
+						rx_queue->reg_idx,
+						vector);
+			handle->intr_vec[queue_id] = vector;
+			PMD_LOG_INFO(DRV,
+					"queue id:%u reg_idx:%u vector:%u ",
+					queue_id,
+					rx_queue->reg_idx,
+					vector);
+			if (vector < base + handle->nb_efd - 1) {
+				vector++;
+			}
+		}
+		sxe_hw_event_irq_map(hw, 1, SXE_MISC_VEC_ID);
+	}
+
+	sxe_hw_ring_irq_interval_set(hw, 0, irq_interval);
+
+	sxe_hw_ring_irq_auto_disable(hw, true);
+
+	value = SXE_EIMS_ENABLE_MASK;
+	value &= ~(SXE_EIMS_OTHER | SXE_EIMS_MAILBOX | SXE_EIMS_LSC);
+	sxe_hw_event_irq_auto_clear_set(hw, value);
+
+l_out:
+	return;
+}
+
+s32 sxe_irq_configure(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	u16 irq_num;
+	s32 ret = 0;
+
+	if ((rte_intr_cap_multiple(handle) ||
+	     !RTE_ETH_DEV_SRIOV(eth_dev).active) &&
+	    eth_dev->data->dev_conf.intr_conf.rxq != 0) {
+		irq_num = eth_dev->data->nb_rx_queues;
+		if (irq_num > SXE_QUEUE_IRQ_NUM_MAX) {
+			PMD_LOG_ERR(DRV, "irq_num:%u exceed limit:%u ",
+				      irq_num, SXE_QUEUE_IRQ_NUM_MAX);
+			ret = -ENOTSUP;
+			goto l_out;
+		}
+
+		if (rte_intr_efd_enable(handle, irq_num)) {
+			ret = -SXE_ERR_CONFIG;
+			PMD_LOG_ERR(DRV,
+				      "intr_handle type:%d irq num:%d invalid",
+				      handle->type, irq_num);
+			goto l_out;
+		}
+	}
+
+	if (rte_intr_dp_is_en(handle) && !handle->intr_vec) {
+		handle->intr_vec = rte_zmalloc("intr_vec",
+				    eth_dev->data->nb_rx_queues * sizeof(u32), 0);
+		if (handle->intr_vec == NULL) {
+			PMD_LOG_ERR(DRV, "rx queue irq vector "
+					 "allocate %zuB memory fail.",
+					 eth_dev->data->nb_rx_queues * sizeof(u32));
+			ret = -ENOMEM;
+			goto l_out;
+		}
+	}
+
+	sxe_msix_configure(eth_dev);
+
+	sxe_irq_enable(eth_dev);
+
+	PMD_LOG_INFO(DRV,
+		      "intr_conf rxq:%u intr_handle type:%d rx queue num:%d "
+		      "queue irq num:%u total irq num:%u "
+		      "config done",
+		      eth_dev->data->dev_conf.intr_conf.rxq,
+		      handle->type,
+		      eth_dev->data->nb_rx_queues,
+		      handle->nb_efd,
+		      handle->max_intr);
+
+l_out:
+	return ret;
+}
+
+void sxe_irq_enable(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	struct sxe_hw *hw = &adapter->hw;
+
+	if (rte_intr_allow_others(handle)) {
+		sxe_link_info_output(eth_dev);
+
+		if (eth_dev->data->dev_conf.intr_conf.lsc != 0) {
+			irq->enable_mask |= SXE_EIMS_LSC;
+		} else {
+			irq->enable_mask &= ~SXE_EIMS_LSC;
+		}
+	} else {
+		rte_intr_callback_unregister(handle,
+					     sxe_event_irq_handler, eth_dev);
+		if (eth_dev->data->dev_conf.intr_conf.lsc != 0) {
+			PMD_LOG_ERR(DRV, "event irq not support.");
+		}
+	}
+
+	/* check if rxq interrupt is enabled */
+	if (eth_dev->data->dev_conf.intr_conf.rxq != 0 &&
+	    rte_intr_dp_is_en(handle)) {
+		irq->enable_mask |= SXE_EIMS_RTX_QUEUE;
+	}
+
+	rte_intr_enable(handle);
+
+	sxe_hw_specific_irq_enable(hw, irq->enable_mask);
+
+	PMD_LOG_INFO(DRV,
+		      "intr_handle type:%d enable irq mask:0x%x",
+		      handle->type,
+		      irq->enable_mask);
+
+	return;
+}
+
+void sxe_irq_vec_free(struct rte_intr_handle *handle)
+{
+	if (handle->intr_vec != NULL) {
+		rte_free(handle->intr_vec);
+		handle->intr_vec = NULL;
+	}
+
+	return;
+}
+
+void sxe_irq_disable(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+
+	if (!rte_intr_allow_others(handle)) {
+		rte_intr_callback_register(handle,
+					   sxe_event_irq_handler,
+					   (void *)eth_dev);
+	}
+
+	rte_intr_efd_disable(handle);
+	sxe_irq_vec_free(handle);
+
+	return;
+}
+
+void sxe_irq_uninit(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	u8 retry = 0;
+	s32 ret;
+
+	rte_intr_disable(handle);
+
+	do {
+		ret = rte_intr_callback_unregister(handle,
+				sxe_event_irq_handler, eth_dev);
+		if (ret >= 0 || ret == -ENOENT) {
+			break;
+		} else if (ret != -EAGAIN) {
+			PMD_LOG_ERR(DRV,
+				    "irq handler unregister fail, next to retry");
+		}
+		rte_delay_ms(100);
+	} while (retry++ < (10 + SXE_LINK_UP_TIME));
+
+	rte_eal_alarm_cancel(sxe_event_irq_delayed_handler, eth_dev);
+
+	return;
+}
+
+s32 sxe_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, u16 queue_id)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *intr_handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	u32 mask;
+
+	if (queue_id < 16) {
+		sxe_hw_all_irq_disable(hw);
+		irq->enable_mask |= (1 << queue_id);
+		sxe_hw_specific_irq_enable(hw, irq->enable_mask);
+	} else if (queue_id < 32) {
+		mask = sxe_hw_ring_irq_switch_get(hw, 0);
+		mask &= (1 << queue_id);
+		sxe_hw_ring_irq_switch_set(hw, 0, mask);
+	} else if (queue_id < 64) {
+		mask = sxe_hw_ring_irq_switch_get(hw, 1);
+		mask &= (1 << (queue_id - 32));
+		sxe_hw_ring_irq_switch_set(hw, 1, mask);
+	}
+
+	rte_intr_ack(intr_handle);
+
+	PMD_LOG_INFO(DRV, "queue_id:%u irq enabled enable_mask:0x%x.",
+		    queue_id, irq->enable_mask);
+
+	return 0;
+}
+
+s32 sxe_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, u16 queue_id)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	u32 mask;
+
+	if (queue_id < 16) {
+		sxe_hw_all_irq_disable(hw);
+		irq->enable_mask &= ~(1 << queue_id);
+		sxe_hw_specific_irq_enable(hw, irq->enable_mask);
+	} else if (queue_id < 32) {
+		mask = sxe_hw_ring_irq_switch_get(hw, 0);
+		mask &= ~(1 << queue_id);
+		sxe_hw_ring_irq_switch_set(hw, 0, mask);
+	} else if (queue_id < 64) {
+		mask = sxe_hw_ring_irq_switch_get(hw, 1);
+		mask &= ~(1 << (queue_id - 32));
+		sxe_hw_ring_irq_switch_set(hw, 1, mask);
+	}
+
+	PMD_LOG_INFO(DRV, "queue_id:%u irq disabled enable_mask:0x%x.",
+		    queue_id, irq->enable_mask);
+
+	return 0;
+}
+
diff --git a/drivers/net/sxe/pf/sxe_irq.h b/drivers/net/sxe/pf/sxe_irq.h
new file mode 100644
index 0000000000..322d7023c9
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_irq.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_IRQ_H__
+#define __SXE_IRQ_H__
+
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+
+#include "sxe_compat_platform.h"
+#include "sxe_compat_version.h"
+
+#define SXE_QUEUE_IRQ_NUM_MAX    15
+
+#define SXE_QUEUE_ITR_INTERVAL_DEFAULT   500 
+
+#define SXE_EITR_INTERVAL_UNIT_NS	2048
+#define SXE_EITR_ITR_INT_SHIFT          3
+#define SXE_IRQ_ITR_MASK                (0x00000FF8)
+#define SXE_EITR_INTERVAL_US(us) \
+	(((us) * 1000 / SXE_EITR_INTERVAL_UNIT_NS << SXE_EITR_ITR_INT_SHIFT) & \
+		SXE_IRQ_ITR_MASK)
+
+struct sxe_irq_context {
+	u32 action;          
+	u32 enable_mask;    
+	u32 enable_mask_original; 
+	rte_spinlock_t event_irq_lock;
+	bool to_pcs_init;
+};
+
+void sxe_event_irq_delayed_handler(void *param);
+
+void sxe_irq_init(struct rte_eth_dev *eth_dev);
+
+s32 sxe_irq_configure(struct rte_eth_dev *dev);
+
+void sxe_irq_enable(struct rte_eth_dev *eth_dev);
+
+void sxe_irq_disable(struct rte_eth_dev *eth_dev);
+
+void sxe_irq_uninit(struct rte_eth_dev *eth_dev);
+
+s32 sxe_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, u16 queue_id);
+
+s32 sxe_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, u16 queue_id);
+
+void sxe_irq_vec_free(struct rte_intr_handle *handle);
+
+#endif
+
diff --git a/drivers/net/sxe/pf/sxe_main.c b/drivers/net/sxe/pf/sxe_main.c
new file mode 100644
index 0000000000..3f30f26508
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_main.c
@@ -0,0 +1,326 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include <string.h>
+#include <sys/time.h>
+
+#include <rte_log.h>
+#include <rte_pci.h>
+
+#include "sxe_version.h"
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_bus_pci.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#elif defined DPDK_21_11_5
+#include <rte_bus_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <rte_dev.h>
+#else
+#include <bus_pci_driver.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <dev_driver.h>
+#endif
+
+#include "sxe_logs.h"
+#include "sxe_types.h"
+#include "sxe_ethdev.h"
+#include "sxe.h"
+#include "drv_msg.h"
+#include "sxe_cli.h"
+#include "sxe_queue.h"
+#include "sxe_errno.h"
+#include "sxe_compat_platform.h"
+#include "sxe_pmd_hdc.h"
+#include "sxe_vf.h"
+#include "sxe_queue_common.h"
+
+static const struct rte_pci_id sxe_pci_tbl[] = {
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_STARS, SXE_DEV_ID_ASIC) },
+	{.vendor_id = 0,}
+};
+
+s8 g_log_filename[LOG_FILE_NAME_LEN] = {0};
+
+bool is_log_created = false;
+
+#ifdef SXE_DPDK_DEBUG
+void sxe_log_stream_init(void)
+{
+	FILE *fp;
+	struct timeval	tv;
+	struct tm *td;
+	u8 len;
+	s8 time[40];
+
+	if (is_log_created) {
+		goto l_out;
+	}
+
+	memset(g_log_filename, 0, LOG_FILE_NAME_LEN);
+
+	len = snprintf(g_log_filename, LOG_FILE_NAME_LEN, "%s%s.",
+		      LOG_FILE_PATH, LOG_FILE_PREFIX);
+
+	gettimeofday(&tv, NULL);
+	td = localtime(&tv.tv_sec);
+	strftime(time, sizeof(time), "%Y-%m-%d-%H:%M:%S", td);
+
+	snprintf(g_log_filename + len, LOG_FILE_NAME_LEN - len,
+		"%s", time);
+
+	fp = fopen(g_log_filename, "w+");
+	if (fp == NULL) {
+		PMD_LOG_ERR(INIT, "open log file:%s fail, errno:%d %s.",
+			    g_log_filename, errno, strerror(errno));
+		goto l_out;
+	}
+
+	PMD_LOG_NOTICE(INIT, "log stream file:%s.", g_log_filename);
+
+	rte_openlog_stream(fp);
+
+	is_log_created = true;
+
+l_out:
+	return;
+}
+#endif
+
+static s32 sxe_probe(struct rte_pci_driver *pci_drv __rte_unused,
+					struct rte_pci_device *pci_dev)
+{
+	s32 ret;
+
+	printf("sxe_version[%s], sxe_commit_id[%s], sxe_branch[%s], sxe_build_time[%s]\n",
+		SXE_VERSION, SXE_COMMIT_ID, SXE_BRANCH, SXE_BUILD_TIME);
+
+#ifdef SXE_DPDK_DEBUG
+	sxe_log_stream_init();
+#endif
+
+	/* HDC */
+	sxe_hdc_channel_init();
+
+	ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+				sizeof(struct sxe_adapter),
+				eth_dev_pci_specific_init,
+				pci_dev,
+				sxe_ethdev_init, NULL);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "sxe pmd eth dev create fail.(err:%d)", ret);
+		goto l_out;
+	}
+
+	PMD_LOG_DEBUG(INIT, "%s sxe pmd probe done.", pci_dev->device.name);
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_remove(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev;
+	s32 ret;
+
+	eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
+	if (!eth_dev) {
+		ret = 0;
+		PMD_LOG_ERR(INIT, "sxe pmd dev has removed.");
+		goto l_out;
+	}
+
+	ret = rte_eth_dev_pci_generic_remove(pci_dev,
+					sxe_ethdev_uninit);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "sxe eth dev remove fail.(err:%d)", ret);
+		goto l_out;
+	}
+
+	sxe_hdc_channel_uninit();
+
+	PMD_LOG_DEBUG(INIT, "sxe pmd remove done.");
+
+l_out:
+	return ret;
+}
+
+STATIC struct rte_pci_driver rte_sxe_pmd = {
+	.id_table  = sxe_pci_tbl,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+	.probe     = sxe_probe,
+	.remove    = sxe_remove,
+};
+
+STATIC s32 sxe_mng_reset(struct sxe_hw *hw, bool enable)
+{
+	s32 ret;
+	sxe_mng_rst_s mng_rst;
+
+	mng_rst.enable = enable;
+	PMD_LOG_INFO(INIT, "mng reset, enable=%x\n", enable);
+
+	/* Send reset command */
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_MNG_RST,
+				(void *)&mng_rst, sizeof(mng_rst),
+				NULL, 0);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "mng reset failed, ret=%d\n", ret);
+		goto l_end;
+	}
+
+	PMD_LOG_INFO(INIT, "mng reset success, enable=%x\n", enable);
+
+l_end:
+	return ret;
+}
+
+s32 sxe_hw_reset(struct sxe_hw *hw)
+{
+	s32 ret;
+
+	/* Rx DBU off */
+	sxe_hw_rx_cap_switch_off(hw);
+
+	sxe_hw_all_irq_disable(hw);
+
+	sxe_hw_pending_irq_read_clear(hw);
+
+	sxe_hw_all_ring_disable(hw, SXE_HW_TXRX_RING_NUM_MAX);
+
+	ret = sxe_mng_reset(hw, false);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "mng reset disable failed, ret=%d\n", ret);
+		goto l_end;
+	}
+
+	ret = sxe_hw_nic_reset(hw);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "nic reset failed, ret=%d\n", ret);
+		goto l_end;
+	}
+
+	msleep(50);
+
+	ret = sxe_mng_reset(hw, true);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "mng reset enable failed, ret=%d\n", ret);
+		goto l_end;
+	}
+
+	sxe_hw_uc_addr_clear(hw);
+
+	sxe_hw_vt_disable(hw);
+
+l_end:
+	return ret;
+}
+
+void sxe_hw_start(struct sxe_hw *hw)
+{
+	sxe_hw_vlan_filter_array_clear(hw);
+
+	sxe_hw_stats_regs_clean(hw);
+
+	sxe_hw_no_snoop_disable(hw);
+
+	sxe_hw_dcb_rate_limiter_clear(hw, SXE_TXRX_RING_NUM_MAX);
+
+	sxe_fc_autoneg_localcap_set(hw);
+
+	hw->mac.auto_restart = true;
+	PMD_LOG_INFO(INIT, "auto_restart:%u.\n", hw->mac.auto_restart);
+
+	return;
+}
+
+static bool is_device_supported(struct rte_eth_dev *dev,
+					struct rte_pci_driver *drv)
+{
+	bool ret = true;
+
+	if (strcmp(dev->device->driver->name, drv->driver.name)) {
+		ret = false;
+	}
+
+	return ret;
+}
+
+bool is_sxe_supported(struct rte_eth_dev *dev)
+{
+	return is_device_supported(dev, &rte_sxe_pmd);
+}
+
+RTE_PMD_REGISTER_PCI(net_sxe, rte_sxe_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_sxe, sxe_pci_tbl);
+RTE_PMD_REGISTER_KMOD_DEP(net_sxe, "* igb_uio | uio_pci_generic | vfio-pci");
+
+#ifdef SXE_DPDK_DEBUG
+#ifdef DPDK_19_11_6
+s32 sxe_log_init;
+s32 sxe_log_drv;
+s32 sxe_log_rx;
+s32 sxe_log_tx;
+s32 sxe_log_hw;
+RTE_INIT(sxe_init_log)
+{
+	sxe_log_init = rte_log_register("pmd.net.sxe.init");
+	if (sxe_log_init >= 0)
+		rte_log_set_level(sxe_log_init, RTE_LOG_DEBUG);
+
+	sxe_log_drv = rte_log_register("pmd.net.sxe.drv");
+	if (sxe_log_drv >= 0)
+		rte_log_set_level(sxe_log_drv, RTE_LOG_DEBUG);
+
+	sxe_log_rx = rte_log_register("pmd.net.sxe.rx");
+	if (sxe_log_rx >= 0)
+		rte_log_set_level(sxe_log_rx, RTE_LOG_DEBUG);
+
+	sxe_log_tx = rte_log_register("pmd.net.sxe.tx");
+	if (sxe_log_tx >= 0)
+		rte_log_set_level(sxe_log_tx, RTE_LOG_DEBUG);
+
+	sxe_log_hw = rte_log_register("pmd.net.sxe.tx_hw");
+	if (sxe_log_hw >= 0)
+		rte_log_set_level(sxe_log_hw, RTE_LOG_DEBUG);
+}
+#else
+RTE_LOG_REGISTER(sxe_log_init, pmd.net.sxe.init, DEBUG);
+RTE_LOG_REGISTER(sxe_log_drv, pmd.net.sxe.drv, DEBUG);
+RTE_LOG_REGISTER(sxe_log_rx, pmd.net.sxe.rx, DEBUG);
+RTE_LOG_REGISTER(sxe_log_tx, pmd.net.sxe.tx, DEBUG);
+RTE_LOG_REGISTER(sxe_log_hw, pmd.net.sxe.tx_hw, DEBUG);
+#endif
+#else
+#ifdef DPDK_19_11_6
+s32 sxe_log_init;
+s32 sxe_log_drv;
+RTE_INIT(sxe_init_log)
+{
+	sxe_log_init = rte_log_register("pmd.net.sxe.init");
+	if (sxe_log_init >= 0)
+		rte_log_set_level(sxe_log_init, RTE_LOG_NOTICE);
+
+	sxe_log_drv = rte_log_register("pmd.net.sxe.drv");
+	if (sxe_log_drv >= 0)
+		rte_log_set_level(sxe_log_drv, RTE_LOG_NOTICE);
+}
+#else
+RTE_LOG_REGISTER(sxe_log_init, pmd.net.sxe.init, NOTICE);
+RTE_LOG_REGISTER(sxe_log_drv, pmd.net.sxe.drv, NOTICE);
+#endif
+#endif
+
+int sxe_eth_dev_callback_process(struct rte_eth_dev *dev,
+	enum rte_eth_event_type event, void *ret_param)
+{
+#ifdef DPDK_19_11_6
+	return _rte_eth_dev_callback_process(dev, event, ret_param);
+#else
+	return rte_eth_dev_callback_process(dev, event, ret_param);
+#endif
+}
+
diff --git a/drivers/net/sxe/pf/sxe_offload.c b/drivers/net/sxe/pf/sxe_offload.c
new file mode 100644
index 0000000000..deea11451a
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_offload.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+
+#include "sxe.h"
+#include "sxe_offload.h"
+#include "sxe_logs.h"
+#include "sxe_compat_version.h"
+#include "sxe_queue_common.h"
+#include "sxe_offload_common.h"
+
+STATIC u8 rss_sxe_key[40] = {
+	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+#define SXE_4_BIT_WIDTH  (CHAR_BIT / 2)
+#define SXE_4_BIT_MASK   RTE_LEN2MASK(SXE_4_BIT_WIDTH, u8)
+#define SXE_8_BIT_WIDTH  CHAR_BIT
+#define SXE_8_BIT_MASK   UINT8_MAX
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+u8* sxe_rss_hash_key_get(void)
+{
+	return rss_sxe_key;
+}
+#endif
+
+u64 sxe_rx_queue_offload_capa_get(struct rte_eth_dev *dev)
+{
+	return __sxe_rx_queue_offload_capa_get(dev);
+}
+
+u64 sxe_rx_port_offload_capa_get(struct rte_eth_dev *dev)
+{
+	return __sxe_rx_port_offload_capa_get(dev);
+}
+
+u64 sxe_tx_queue_offload_capa_get(struct rte_eth_dev *dev)
+{
+	RTE_SET_USED(dev);
+
+	return 0;
+}
+
+u64 sxe_tx_port_offload_capa_get(struct rte_eth_dev *dev)
+{
+	return __sxe_tx_port_offload_capa_get(dev);
+}
+
+void sxe_rss_disable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_hw_rss_cap_switch(hw, false);
+	return;
+}
+
+void sxe_rss_hash_set(struct sxe_hw *hw,
+				struct rte_eth_rss_conf *rss_conf)
+{
+	u8  *hash_key;
+	u32 rss_key[SXE_MAX_RSS_KEY_ENTRIES];
+	u16 i;
+	u64 rss_hf;
+	u32 rss_field = 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	hash_key = rss_conf->rss_key;
+	if (hash_key != NULL) {
+		for (i = 0; i < SXE_MAX_RSS_KEY_ENTRIES; i++) {
+			rss_key[i]  = hash_key[(i * 4)];
+			rss_key[i] |= hash_key[(i * 4) + 1] << 8;
+			rss_key[i] |= hash_key[(i * 4) + 2] << 16;
+			rss_key[i] |= hash_key[(i * 4) + 3] << 24;
+		}
+		sxe_hw_rss_key_set_all(hw, rss_key);
+	}
+
+	rss_hf = rss_conf->rss_hf;
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
+		rss_field |= SXE_MRQC_RSS_FIELD_IPV4;
+	}
+
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
+		rss_field |= SXE_MRQC_RSS_FIELD_IPV4_TCP;
+	}
+
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
+		rss_field |= SXE_MRQC_RSS_FIELD_IPV6;
+	}
+
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
+		rss_field |= SXE_MRQC_RSS_FIELD_IPV6_TCP;
+	}
+
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
+		rss_field |= SXE_MRQC_RSS_FIELD_IPV4_UDP;
+	}
+
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
+		rss_field |= SXE_MRQC_RSS_FIELD_IPV6_UDP;
+	}
+	sxe_hw_rss_field_set(hw, rss_field);
+
+	sxe_hw_rss_cap_switch(hw, true);
+
+	return;
+}
+
+void sxe_rss_configure(struct rte_eth_dev *dev)
+{
+	struct rte_eth_rss_conf *rss_conf;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u16 i;
+	u16 j;
+	u8  rss_indir_tbl[SXE_MAX_RETA_ENTRIES];    
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (adapter->rss_reta_updated == false) {
+		for (i = 0, j = 0; i < SXE_MAX_RETA_ENTRIES; i++, j++) {
+			if (j == dev->data->nb_rx_queues) {
+				j = 0;
+			}
+
+			rss_indir_tbl[i] = j;
+		}
+
+		sxe_hw_rss_redir_tbl_set_all(hw, rss_indir_tbl);
+	}
+
+	rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+	if ((rss_conf->rss_hf & SXE_RSS_OFFLOAD_ALL) == 0) {
+		PMD_LOG_INFO(INIT, "user rss config match hw supports is 0");
+		sxe_rss_disable(dev);
+		goto l_end;
+	}
+
+	if (rss_conf->rss_key == NULL) {
+		rss_conf->rss_key = rss_sxe_key; 
+	}
+
+	sxe_rss_hash_set(hw, rss_conf);
+
+l_end:
+	return;
+}
+
+s32 sxe_rss_reta_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_reta_entry64 *reta_conf,
+			u16 reta_size)
+{
+	u16 i;
+	u8 j, mask;
+	u32 reta, r;
+	u16 idx, shift;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct rte_eth_dev_data *dev_data = dev->data;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret = 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (!dev_data->dev_started) {
+		PMD_LOG_ERR(DRV,
+			"port %d must be started before rss reta update",
+			 dev_data->port_id);
+		ret = -EIO;
+		goto l_end;
+	}
+
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
+		PMD_LOG_ERR(DRV, "The size of hash lookup table configured "
+			"(%d) doesn't match the number hardware can supported "
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	for (i = 0; i < reta_size; i += SXE_4_BIT_WIDTH) {
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
+		mask = (u8)((reta_conf[idx].mask >> shift) &
+						SXE_4_BIT_MASK);
+		if (!mask) {
+			continue;
+		}
+
+		if (mask == SXE_4_BIT_MASK) {
+			r = 0;
+		} else {
+			r = sxe_hw_rss_redir_tbl_get_by_idx(hw, i);
+		}
+
+		for (j = 0, reta = 0; j < SXE_4_BIT_WIDTH; j++) {
+			if (mask & (0x1 << j)) {
+				reta |= reta_conf[idx].reta[shift + j] <<
+						(CHAR_BIT * j);
+			} else {
+				reta |= r & (SXE_8_BIT_MASK <<
+					(CHAR_BIT * j));
+			}
+		}
+
+		sxe_hw_rss_redir_tbl_set_by_idx(hw, i, reta);
+	}
+	adapter->rss_reta_updated = true;
+
+l_end:
+	return ret;
+}
+
+s32 sxe_rss_reta_query(struct rte_eth_dev *dev,
+			 struct rte_eth_rss_reta_entry64 *reta_conf,
+			 u16 reta_size)
+{
+	u16 i;
+	u8 j, mask;
+	u32 reta;
+	u16 idx, shift;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret = 0;
+
+	PMD_INIT_FUNC_TRACE();
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
+		PMD_LOG_ERR(DRV, "the size of hash lookup table configured "
+			"(%d) doesn't match the number hardware can supported "
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	for (i = 0; i < reta_size; i += SXE_4_BIT_WIDTH) {
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
+		mask = (u8)((reta_conf[idx].mask >> shift) &
+						SXE_4_BIT_MASK);
+		if (!mask) {
+			continue;
+		}
+
+		reta = sxe_hw_rss_redir_tbl_get_by_idx(hw, i);
+		for (j = 0; j < SXE_4_BIT_WIDTH; j++) {
+			if (mask & (0x1 << j)) {
+				reta_conf[idx].reta[shift + j] =
+					((reta >> (CHAR_BIT * j)) &
+						SXE_8_BIT_MASK);
+			}
+		}
+	}
+
+l_end:
+	return ret;
+}
+
+s32 sxe_rss_hash_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_conf *rss_conf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u64 rss_hf;
+	s32 ret = 0;
+
+	rss_hf = (rss_conf->rss_hf & SXE_RSS_OFFLOAD_ALL);
+
+	if (!sxe_hw_is_rss_enabled(hw)) {
+		if (rss_hf != 0){
+			PMD_LOG_ERR(DRV, "rss not init but want set");
+			ret = -EINVAL;
+			goto l_end;
+		}
+
+		goto l_end;
+	}
+
+	if (rss_hf == 0){
+		PMD_LOG_ERR(DRV, "rss init but want disable it");
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	sxe_rss_hash_set(hw, rss_conf);
+
+l_end:
+	return ret;
+}
+
+s32 sxe_rss_hash_conf_get(struct rte_eth_dev *dev,
+			    struct rte_eth_rss_conf *rss_conf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u8 *hash_key;
+	u32 rss_field;
+	u32 rss_key;
+	u64 rss_hf;
+	u16 i;
+
+	hash_key = rss_conf->rss_key;
+	if (hash_key != NULL) {
+		for (i = 0; i < SXE_MAX_RSS_KEY_ENTRIES; i++) {
+			rss_key = sxe_hw_rss_key_get_by_idx(hw, i);
+			hash_key[(i * 4)] = rss_key & 0x000000FF;
+			hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
+			hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
+			hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
+		}
+	}
+
+
+	if (!sxe_hw_is_rss_enabled(hw)) {
+		rss_conf->rss_hf = 0;
+		PMD_LOG_INFO(DRV, "rss not enabled,return 0");
+		goto l_end;
+	}
+
+	rss_hf = 0;
+	rss_field = sxe_hw_rss_field_get(hw);
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV4) {
+		rss_hf |= RTE_ETH_RSS_IPV4;
+	}
+
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV4_TCP) {
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
+	}
+
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV4_UDP) {
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
+	}
+
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV6) {
+		rss_hf |= RTE_ETH_RSS_IPV6;
+	}
+
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV6_TCP) {
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
+	}
+
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV6_UDP) {
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
+	}
+
+	PMD_LOG_DEBUG(DRV, "got rss hash func=0x%"SXE_PRIX64, rss_hf);
+	rss_conf->rss_hf = rss_hf;
+
+l_end:
+	return 0;
+}
+
diff --git a/drivers/net/sxe/pf/sxe_offload.h b/drivers/net/sxe/pf/sxe_offload.h
new file mode 100644
index 0000000000..d1f651feb6
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_offload.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_OFFLOAD_H__
+#define __SXE_OFFLOAD_H__
+
+#include "sxe_hw.h"
+
+#define SXE_RSS_OFFLOAD_ALL ( \
+		RTE_ETH_RSS_IPV4 | \
+		RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+		RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+		RTE_ETH_RSS_IPV6 | \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+u8* sxe_rss_hash_key_get(void);
+#endif
+
+void sxe_rss_hash_set(struct sxe_hw *hw,
+				struct rte_eth_rss_conf *rss_conf);
+
+u64 sxe_rx_queue_offload_capa_get(struct rte_eth_dev *dev);
+
+u64 sxe_rx_port_offload_capa_get(struct rte_eth_dev *dev);
+
+u64 sxe_tx_queue_offload_capa_get(struct rte_eth_dev *dev);
+
+u64 sxe_tx_port_offload_capa_get(struct rte_eth_dev *dev);
+
+void sxe_rss_disable(struct rte_eth_dev *dev);
+
+void sxe_rss_configure(struct rte_eth_dev *dev);
+
+s32 sxe_rss_reta_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_reta_entry64 *reta_conf,
+			u16 reta_size);
+
+s32 sxe_rss_reta_query(struct rte_eth_dev *dev,
+			 struct rte_eth_rss_reta_entry64 *reta_conf,
+			 u16 reta_size);
+
+s32 sxe_rss_hash_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_conf *rss_conf);
+
+s32 sxe_rss_hash_conf_get(struct rte_eth_dev *dev,
+			    struct rte_eth_rss_conf *rss_conf);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_phy.c b/drivers/net/sxe/pf/sxe_phy.c
new file mode 100644
index 0000000000..595bbcbc25
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_phy.c
@@ -0,0 +1,993 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <rte_dev.h>
+#else
+#include <ethdev_driver.h>
+#include <dev_driver.h>
+#endif
+
+#include <rte_cycles.h>
+#include <rte_net.h>
+
+#include "sxe.h"
+#include "sxe_hw.h"
+#include "sxe_phy.h"
+#include "drv_msg.h"
+#include "sxe_phy.h"
+#include "sxe_logs.h"
+#include "sxe_errno.h"
+#include "sxe_ethdev.h"
+#include "sxe_filter.h"
+#include "sxe_pmd_hdc.h"
+#include "sxe_filter.h"
+#include "sxe_compat_version.h"
+
+#define SXE_WAIT_LINK_UP_FAILED	1
+#define SXE_WARNING_TIMEOUT	9000 
+#define SXE_CHG_SFP_RATE_MS     40   
+#define SXE_1G_WAIT_PCS_MS      100  
+#define SXE_10G_WAIT_PCS_MS     100  
+#define SXE_HZ_TRANSTO_MS       1000
+#define SXE_AN_COMPLETE_TIME    5    
+#define SXE_10G_WAIT_13_TIME    13   
+#define SXE_10G_WAIT_5_TIME     5    
+
+STATIC void *sxe_setup_link_thread_handler(void *param)
+{
+	s32 ret;
+	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	u32 allowed_speeds = 0;
+	u32 conf_speeds = 0;
+	u32 speed = 0;
+	bool autoneg = false;
+
+	pthread_detach(pthread_self());
+
+	sxe_sfp_link_capabilities_get(adapter, &allowed_speeds, &autoneg);
+
+	sxe_conf_speed_get(dev, &conf_speeds);
+
+	speed = (conf_speeds & allowed_speeds) ? (conf_speeds & allowed_speeds) :
+		allowed_speeds;
+
+	if (adapter->phy_ctxt.sfp_info.multispeed_fiber) {
+		ret = sxe_multispeed_sfp_link_configure(dev, speed, true);
+	} else {
+		ret = sxe_sfp_link_configure(dev);
+	}
+	if (ret) {
+		PMD_LOG_ERR(INIT, "link setup failed, ret=%d", ret);
+	}
+
+	irq->action &= ~SXE_IRQ_LINK_CONFIG;
+	rte_atomic32_clear(&adapter->link_thread_running);
+	return NULL;
+}
+
+void sxe_wait_setup_link_complete(struct rte_eth_dev *dev,
+						uint32_t timeout_ms)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	uint32_t timeout = timeout_ms ? timeout_ms : SXE_WARNING_TIMEOUT;
+
+	while (rte_atomic32_read(&adapter->link_thread_running)) {
+		rte_delay_us_sleep(1000);
+		timeout--;
+
+		if (timeout_ms) {
+			if (!timeout) {
+				goto l_end;
+			}
+		} else if (!timeout) {
+			timeout = SXE_WARNING_TIMEOUT;
+			PMD_LOG_ERR(INIT, "link thread not complete too long time!");
+		}
+	}
+
+l_end:
+	return;
+}
+
+static s32 sxe_an_cap_get(struct sxe_adapter *adapter, sxe_an_cap_s *an_cap)
+{
+	s32 ret;
+	struct sxe_hw *hw = &adapter->hw;
+
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_AN_CAP_GET,
+				NULL, 0,
+				(void *)an_cap, sizeof(*an_cap));
+	if (ret) {
+		PMD_LOG_ERR(INIT, "hdc trans failed ret=%d, cmd:negotiaton cap get", ret);
+	}
+
+	return ret;
+}
+
+s32 sxe_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+	u32 i;
+	bool link_up, orig_link_up;
+	struct rte_eth_link link;
+	sxe_an_cap_s an_cap;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	sxe_link_speed link_speed = SXE_LINK_SPEED_UNKNOWN;
+
+	PMD_LOG_INFO(INIT, "link update start...");
+
+	memset(&link, 0, sizeof(link));
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed  = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+						RTE_ETH_LINK_SPEED_FIXED);
+
+	if (irq->action & SXE_IRQ_LINK_CONFIG) {
+		PMD_LOG_INFO(INIT, "other link config thread exsit");
+		goto l_end;
+	}
+
+	if (dev->data->dev_conf.intr_conf.lsc) {
+		wait_to_complete = 0;
+	}
+
+	sxe_link_info_get(adapter, &link_speed, &orig_link_up);
+	sxe_link_info_get(adapter, &link_speed, &link_up);
+
+	if (orig_link_up != link_up) {
+		PMD_LOG_INFO(INIT, "link status %s to %s",
+			(orig_link_up?"up":"down"),
+			(link_up?"up":"down"));
+	}
+
+	if (wait_to_complete) {
+		for (i = 0; i < SXE_LINK_UP_TIME; i++) {
+			if (link_up == true) {
+				break;
+			}
+
+			rte_delay_us_sleep(100000);
+
+			sxe_link_info_get(adapter, &link_speed, &link_up);
+		}
+	}
+
+	if (link_up == false) {
+		sxe_wait_setup_link_complete(dev, 0);
+		if (rte_atomic32_test_and_set(&adapter->link_thread_running)) {
+			if (adapter->phy_ctxt.sfp_tx_laser_disabled) {
+				PMD_LOG_INFO(INIT, "tx laser is disabled");
+				rte_atomic32_clear(&adapter->link_thread_running);
+			} else {
+				irq->action |= SXE_IRQ_LINK_CONFIG;
+				irq->to_pcs_init = true;
+				if (rte_ctrl_thread_create(&adapter->link_thread_tid,
+					"sxe-link-handler",
+					NULL,
+					sxe_setup_link_thread_handler,
+					dev) < 0) {
+					PMD_LOG_ERR(INIT,
+						"Create link thread failed!");
+					rte_atomic32_clear(&adapter->link_thread_running);
+				}
+			}
+		} else {
+			PMD_LOG_ERR(INIT, "other link thread is running now!");
+		}
+
+		goto l_end;
+	}
+
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	switch (link_speed) {
+	case SXE_LINK_SPEED_1GB_FULL:
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
+		if (adapter->phy_ctxt.sfp_tx_laser_disabled) {
+			PMD_LOG_INFO(INIT, "tx laser disabled, link state is down.\n");
+			link.link_status = RTE_ETH_LINK_DOWN;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		} else {
+			for (i = 0; i < SXE_AN_COMPLETE_TIME; i++) {
+				sxe_an_cap_get(adapter, &an_cap);
+				if (an_cap.peer.remote_fault != SXE_REMOTE_UNKNOWN) {
+					break;
+				}
+				rte_delay_us_sleep(100000);
+			}
+		}
+		break;
+
+	case SXE_LINK_SPEED_10GB_FULL:
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
+		break;
+	default:
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
+
+	}
+
+l_end:
+	PMD_LOG_INFO(INIT, "link update end, up=%x, speed=%x",
+						link.link_status, link_speed);
+	return rte_eth_linkstatus_set(dev, &link);
+}
+
+s32 sxe_link_status_update(struct rte_eth_dev *dev)
+{
+	u32 i;
+	bool link_up;
+	struct rte_eth_link link;
+	sxe_an_cap_s an_cap;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	sxe_link_speed link_speed = SXE_LINK_SPEED_UNKNOWN;
+
+	PMD_LOG_INFO(INIT, "link status update start...");
+
+	memset(&link, 0, sizeof(link));
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed  = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+						RTE_ETH_LINK_SPEED_FIXED);
+
+	sxe_link_info_get(adapter, &link_speed, &link_up);
+	if (link_up == false) {
+		PMD_LOG_INFO(INIT, "link status is down.");
+		goto l_end;
+	}
+
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	switch (link_speed) {
+	case SXE_LINK_SPEED_1GB_FULL:
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
+		for (i = 0; i < SXE_AN_COMPLETE_TIME; i++) {
+			sxe_an_cap_get(adapter, &an_cap);
+			if (an_cap.peer.remote_fault != SXE_REMOTE_UNKNOWN) {
+				break;
+			}
+			rte_delay_us_sleep(100000);
+		}
+		break;
+
+	case SXE_LINK_SPEED_10GB_FULL:
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
+		break;
+	default:
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
+
+	}
+
+l_end:
+	PMD_LOG_INFO(INIT, "link status update end, up=%x, speed=%x",
+						link.link_status, link_speed);
+	return rte_eth_linkstatus_set(dev, &link);
+}
+
+int sxe_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = (struct sxe_adapter *)dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	sxe_sfp_tx_laser_enable(adapter);
+
+	rte_spinlock_lock(&adapter->irq_ctxt.event_irq_lock);
+	sxe_hw_specific_irq_enable(hw, SXE_EIMS_LSC);
+	rte_spinlock_unlock(&adapter->irq_ctxt.event_irq_lock);
+
+	sxe_link_update(dev, 0);
+
+	return 0;
+}
+
+int sxe_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = (struct sxe_adapter *)dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	sxe_sfp_tx_laser_disable(adapter);
+
+	rte_spinlock_lock(&adapter->irq_ctxt.event_irq_lock);
+	sxe_hw_specific_irq_disable(hw, SXE_EIMS_LSC);
+	rte_spinlock_unlock(&adapter->irq_ctxt.event_irq_lock);
+
+	sxe_link_update(dev, 0);
+
+	return 0;
+}
+
+
+STATIC s32 sxe_sfp_eeprom_read(struct sxe_adapter *adapter, u16 offset,
+					u16 len, u8 *data)
+{
+	s32 ret;
+	struct sxe_sfp_rw_req req;
+	struct sxe_sfp_read_resp *resp;
+	u16 resp_len = sizeof(struct sxe_sfp_read_resp) + len;
+	struct sxe_hw *hw = &adapter->hw;
+
+	if (!data) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(INIT, "sfp read buff == NULL");
+		goto l_end;
+	}
+
+	if (len > SXE_SFP_EEPROM_SIZE_MAX) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(INIT, "sfp read size[%u] > eeprom max size[%d], ret=%d",
+					len, SXE_SFP_EEPROM_SIZE_MAX, ret);
+		goto l_end;
+	}
+
+	PMD_LOG_INFO(INIT, "sfp read, offset=%u, len=%u", offset, len);
+
+	req.len = len;
+	req.offset = offset;
+
+	resp = malloc(resp_len);
+	if (!resp) {
+		ret = -ENOMEM;
+		PMD_LOG_ERR(INIT, "sfp read, alloc resp mem failed");
+		goto l_end;
+	}
+
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_SFP_READ,
+				(void *)&req, sizeof(struct sxe_sfp_rw_req),
+				(void *)resp, resp_len);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "sfp read, hdc failed, offset=%u, len=%u, ret=%d",
+					offset, len, ret);
+		ret = -EIO;
+		goto l_free;
+	}
+
+	if (resp->len != len) {
+		ret = -EIO;
+		PMD_LOG_ERR(INIT, "sfp read failed, offset=%u, len=%u", offset, len);
+		goto l_free;
+	}
+
+	memcpy(data, resp->resp, len);
+
+l_free:
+	free(resp);
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_sfp_tx_laser_ctrl(struct sxe_adapter *adapter, bool is_disable)
+{
+	s32 ret;
+	sxe_spp_tx_able_s laser_disable;
+	struct sxe_hw *hw = &adapter->hw;
+
+	laser_disable.isDisable = is_disable;
+	adapter->phy_ctxt.sfp_tx_laser_disabled = is_disable;
+	PMD_LOG_INFO(INIT, "sfp tx laser ctrl start, is_disable=%x", is_disable);
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_TX_DIS_CTRL,
+				&laser_disable, sizeof(laser_disable),
+				NULL, 0);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "sfp tx laser ctrl failed, ret=%d", ret);
+		goto l_end;
+	}
+
+	PMD_LOG_INFO(INIT, "sfp tx laser ctrl success, is_disable=%x", is_disable);
+
+l_end:
+	return ret;
+}
+
+void sxe_sfp_tx_laser_enable(struct sxe_adapter *adapter)
+{
+	sxe_sfp_tx_laser_ctrl(adapter, false);
+
+	return;
+}
+
+void sxe_sfp_tx_laser_disable(struct sxe_adapter *adapter)
+{
+	sxe_sfp_tx_laser_ctrl(adapter, true);
+
+	return;
+}
+
+s32 sxe_sfp_reset(struct sxe_adapter *adapter)
+{
+	PMD_LOG_INFO(INIT, "auto_restart:%u.\n", adapter->hw.mac.auto_restart);
+
+	if(adapter->hw.mac.auto_restart) {
+		sxe_sfp_tx_laser_disable(adapter);
+		sxe_sfp_tx_laser_enable(adapter);
+		adapter->hw.mac.auto_restart = false;
+	}
+
+	return 0;
+}
+
+void sxe_sfp_link_capabilities_get(struct sxe_adapter *adapter, u32 *speed,
+							bool *autoneg)
+{
+	struct sxe_sfp_info *sfp = &adapter->phy_ctxt.sfp_info;
+
+	*speed = 0;
+
+	if (sfp->type == SXE_SFP_TYPE_1G_CU ||
+	    sfp->type == SXE_SFP_TYPE_1G_SXLX ) {
+		*speed = SXE_LINK_SPEED_1GB_FULL;
+		*autoneg = true;
+		goto l_end;
+	}
+
+	*speed = SXE_LINK_SPEED_10GB_FULL;
+	*autoneg = false;
+
+	if (sfp->multispeed_fiber) {
+		*speed |= SXE_LINK_SPEED_10GB_FULL | SXE_LINK_SPEED_1GB_FULL;
+		*autoneg = true;
+	}
+
+l_end:
+	PMD_LOG_INFO(INIT, "sfp link speed cap=%d", *speed);
+	return;
+}
+
+s32 sxe_sfp_rate_select(struct sxe_adapter *adapter, sxe_sfp_rate_e rate)
+{
+	s32 ret;
+	sxe_sfp_rate_able_s rate_able;
+	struct sxe_hw *hw = &adapter->hw;
+
+	rate_able.rate = rate;
+	PMD_LOG_INFO(INIT, "sfp tx rate select start, rate=%d", rate);
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_RATE_SELECT,
+				&rate_able, sizeof(rate_able),
+				NULL, 0);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "sfp rate select failed, ret=%d", ret);
+	}
+
+	PMD_LOG_INFO(INIT, "sfp tx rate select end, rate=%d", rate);
+
+	return ret;
+}
+
+s32 sxe_pcs_sds_init(struct sxe_adapter *adapter,
+				sxe_pcs_mode_e mode, u32 max_frame)
+{
+	s32 ret;
+	sxe_pcs_cfg_s pcs_cfg;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+
+	pcs_cfg.mode = mode;
+	pcs_cfg.mtu  = max_frame;
+	sxe_sfp_tx_laser_disable(adapter);
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_PCS_SDS_INIT,
+				(void *)&pcs_cfg, sizeof(pcs_cfg),
+				NULL, 0);
+	irq->to_pcs_init = false;
+	sxe_sfp_tx_laser_enable(adapter);
+	if (ret) {
+		LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:pcs init\n", ret);
+		goto l_end;
+	}
+
+	sxe_fc_mac_addr_set(adapter);
+
+	LOG_INFO_BDF("mode:%u max_frame:0x%x pcs sds init done.\n",
+		     mode, max_frame);
+l_end:
+	return ret;
+}
+
+s32 sxe_conf_speed_get(struct rte_eth_dev *dev, u32 *conf_speeds)
+{
+	s32 ret = 0;
+	u32 *link_speeds;
+	u32 allowed_speeds;
+
+	link_speeds = &dev->data->dev_conf.link_speeds;
+	allowed_speeds = RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G;
+
+	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
+		PMD_LOG_ERR(INIT, "invalid link setting, link_speed=%x",
+						*link_speeds);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	*conf_speeds = SXE_LINK_SPEED_UNKNOWN;
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
+		*conf_speeds = SXE_LINK_SPEED_1GB_FULL | \
+				 SXE_LINK_SPEED_10GB_FULL;
+	} else {
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10G) {
+			*conf_speeds |= SXE_LINK_SPEED_10GB_FULL;
+		}
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G) {
+			*conf_speeds |= SXE_LINK_SPEED_1GB_FULL;
+		}
+	}
+
+l_end:
+	return ret;
+}
+
+s32 sxe_multispeed_sfp_link_configure(struct rte_eth_dev *dev, u32 speed, bool is_in_thread)
+{
+	s32 ret;
+	bool autoneg, link_up;
+	u32 i, speed_cap, link_speed, speedcnt = 0;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	u32 highest_link_speed = SXE_LINK_SPEED_UNKNOWN;
+	u32 frame_size = adapter->mtu + SXE_ETH_DEAD_LOAD;
+	u8 wait_time = is_in_thread ? SXE_10G_WAIT_13_TIME : SXE_10G_WAIT_5_TIME;
+
+	sxe_sfp_link_capabilities_get(adapter, &speed_cap, &autoneg);
+
+	speed &= speed_cap;
+
+	if (speed & SXE_LINK_SPEED_10GB_FULL) {
+		PMD_LOG_DEBUG(INIT, "10G link cfg start\n");
+		irq->to_pcs_init = true;
+
+		speedcnt++;
+		highest_link_speed = SXE_LINK_SPEED_10GB_FULL;
+
+		ret = sxe_sfp_rate_select(adapter, SXE_SFP_RATE_10G);
+		if (ret) {
+			PMD_LOG_ERR(INIT, "set sfp rate failed, ret=%d", ret);
+			goto l_end;
+		}
+
+		rte_delay_us_sleep((SXE_CHG_SFP_RATE_MS * SXE_HZ_TRANSTO_MS));
+
+		ret = sxe_pcs_sds_init(adapter, SXE_PCS_MODE_10GBASE_KR_WO,
+						frame_size);
+		if (ret) {
+			goto l_end;
+		}
+
+
+		for (i = 0; i < wait_time; i++) {
+			rte_delay_us_sleep((SXE_10G_WAIT_PCS_MS * SXE_HZ_TRANSTO_MS));
+
+			sxe_link_info_get(adapter, &link_speed, &link_up);
+			if (link_up) {
+				PMD_LOG_INFO(INIT, "link cfg end, link up, speed is 10G");
+				goto l_out;
+			}
+		}
+
+		PMD_LOG_WARN(INIT, "10G link cfg failed, retry...");
+	}
+
+	if (speed & SXE_LINK_SPEED_1GB_FULL) {
+		PMD_LOG_DEBUG(INIT, "1G link cfg start\n");
+		irq->to_pcs_init = true;
+
+		speedcnt++;
+		if (highest_link_speed == SXE_LINK_SPEED_UNKNOWN) {
+			highest_link_speed = SXE_LINK_SPEED_1GB_FULL;
+		}
+
+		ret = sxe_sfp_rate_select(adapter, SXE_SFP_RATE_1G);
+		if (ret) {
+			PMD_LOG_ERR(INIT, "set sfp rate failed, ret=%d", ret);
+			goto l_end;
+		}
+
+		rte_delay_us_sleep((SXE_CHG_SFP_RATE_MS * SXE_HZ_TRANSTO_MS));
+
+		ret = sxe_pcs_sds_init(adapter, SXE_PCS_MODE_1000BASE_KX_W,
+						frame_size);
+		if (ret) {
+			goto l_end;
+		}
+
+
+		rte_delay_us_sleep(SXE_1G_WAIT_PCS_MS * SXE_HZ_TRANSTO_MS);
+
+		sxe_link_status_update(dev);
+
+		link_up = sxe_hw_is_link_state_up(hw);
+		if (link_up) {
+			PMD_LOG_INFO(INIT, "link cfg end, link up, speed is 1G");
+			goto l_out;
+		}
+
+		PMD_LOG_WARN(INIT, "1G link cfg failed, retry...");
+	}
+
+	if (speedcnt > 1) {
+		ret = sxe_multispeed_sfp_link_configure(dev, highest_link_speed, is_in_thread);
+	}
+l_out:
+
+	adapter->phy_ctxt.autoneg_advertised = 0;
+
+	if (speed & SXE_LINK_SPEED_10GB_FULL) {
+		adapter->phy_ctxt.autoneg_advertised |= SXE_LINK_SPEED_10GB_FULL;
+	}
+
+	if (speed & SXE_LINK_SPEED_1GB_FULL) {
+		adapter->phy_ctxt.autoneg_advertised |= SXE_LINK_SPEED_1GB_FULL;
+	}
+
+l_end:
+	return ret;
+}
+
+void sxe_link_info_get(struct sxe_adapter *adapter, u32 *link_speed, bool *link_up)
+{
+	struct sxe_hw *hw = &adapter->hw;
+
+	*link_up = sxe_hw_is_link_state_up(hw);
+	if (false == *link_up) {
+		PMD_LOG_INFO(INIT, "link state =%d, (1=link_up, 0=link_down)\n",
+								*link_up);
+		*link_speed = SXE_LINK_SPEED_UNKNOWN;
+	} else {
+		*link_speed = sxe_hw_link_speed_get(hw);
+	}
+
+	return;
+}
+
+static s32 sxe_sfp_fc_autoneg(struct sxe_adapter *adapter)
+{
+	s32 ret;
+	sxe_an_cap_s an_cap;
+	struct sxe_hw *hw = &adapter->hw;
+
+	ret = sxe_an_cap_get(adapter, &an_cap);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "get auto negotiate capacity failed, ret=%d", ret);
+		goto l_end;
+	}
+
+	if ((an_cap.local.pause_cap & SXE_PAUSE_CAP_SYMMETRIC_PAUSE) &&
+		(an_cap.peer.pause_cap & SXE_PAUSE_CAP_SYMMETRIC_PAUSE)) {
+		if (hw->fc.requested_mode == SXE_FC_FULL) {
+			hw->fc.current_mode = SXE_FC_FULL;
+			PMD_LOG_DEBUG(INIT, "Flow Control = FULL.");
+		} else {
+			hw->fc.current_mode = SXE_FC_RX_PAUSE;
+			PMD_LOG_DEBUG(INIT, "Flow Control=RX PAUSE frames only");
+		}
+	} else if ((an_cap.local.pause_cap == SXE_PAUSE_CAP_ASYMMETRIC_PAUSE) &&
+		(an_cap.peer.pause_cap == SXE_PAUSE_CAP_BOTH_PAUSE)) {
+		hw->fc.current_mode = SXE_FC_TX_PAUSE;
+		PMD_LOG_DEBUG(INIT, "Flow Control = TX PAUSE frames only.");
+	} else if ((an_cap.local.pause_cap == SXE_PAUSE_CAP_BOTH_PAUSE) &&
+		(an_cap.peer.pause_cap == SXE_PAUSE_CAP_ASYMMETRIC_PAUSE)) {
+		hw->fc.current_mode = SXE_FC_RX_PAUSE;
+		PMD_LOG_DEBUG(INIT, "Flow Control = RX PAUSE frames only.");
+	} else {
+		hw->fc.current_mode = SXE_FC_NONE;
+		PMD_LOG_DEBUG(INIT, "Flow Control = NONE.");
+	}
+
+l_end:
+	return ret;
+}
+
+static void sxe_fc_autoneg(struct sxe_adapter *adapter)
+{
+	struct sxe_hw *hw = &adapter->hw;
+
+	s32 ret = -SXE_ERR_FC_NOT_NEGOTIATED;
+	bool link_up;
+	u32 link_speed;
+	if (hw->fc.disable_fc_autoneg) {
+		PMD_LOG_INFO(INIT, "disable fc autoneg");
+		goto l_end;
+	}
+
+	sxe_link_info_get(adapter, &link_speed, &link_up);
+	if (!link_up) {
+		PMD_LOG_INFO(INIT, "link down, dont fc autoneg");
+		goto l_end;
+	}
+
+	if(link_speed != SXE_LINK_SPEED_1GB_FULL){
+		PMD_LOG_INFO(INIT, "link speed=%x, (0x80=10G, 0x20=1G), dont fc autoneg", link_speed);
+		goto l_end;
+	}
+
+	ret = sxe_sfp_fc_autoneg(adapter);
+l_end:
+	if(ret) {
+		hw->fc.current_mode = hw->fc.requested_mode;
+	}
+
+	return;
+}
+
+s32 sxe_fc_enable(struct sxe_adapter *adapter)
+{
+	s32 ret = 0;
+	u32 i;
+	struct sxe_hw *hw = &adapter->hw;
+
+	if (!hw->fc.pause_time) {
+		PMD_LOG_ERR(INIT, "link fc disabled since pause time is 0");
+		ret = -SXE_ERR_INVALID_LINK_SETTINGS;
+		goto l_end;
+	}
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		if ((hw->fc.current_mode & SXE_FC_TX_PAUSE) &&
+		    hw->fc.high_water[i]) {
+			if (!hw->fc.low_water[i] ||
+			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+				PMD_LOG_DEBUG(INIT, "invalid water mark configuration, "
+					"tc[%u] low_water=%u, high_water=%u",
+					i, hw->fc.low_water[i],
+					hw->fc.high_water[i]);
+				ret = -SXE_ERR_INVALID_LINK_SETTINGS;
+				goto l_end;
+			}
+		}
+	}
+
+	/* auto negotiation flow control local capability configuration */
+	sxe_fc_autoneg_localcap_set(hw);
+
+	sxe_fc_autoneg(adapter);
+
+	ret = sxe_hw_fc_enable(hw);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "link fc enable failed, ret=%d", ret);
+	}
+
+l_end:
+	return ret;
+}
+
+s32 sxe_pfc_enable(struct sxe_adapter *adapter, u8 tc_idx)
+{
+	s32 ret;
+	struct sxe_hw *hw = &adapter->hw;
+
+	if (!hw->fc.pause_time) {
+		LOG_ERROR_BDF("link fc disabled since pause time is 0");
+		ret = -SXE_ERR_INVALID_LINK_SETTINGS;
+		goto l_ret;
+	}
+
+	if (hw->fc.current_mode & SXE_FC_TX_PAUSE) {
+		if ((!hw->fc.high_water[tc_idx]) || (!hw->fc.low_water[tc_idx])) {
+			LOG_ERROR_BDF("Invalid water mark configuration");
+			ret = SXE_ERR_INVALID_LINK_SETTINGS;
+			goto l_ret;
+		}
+
+		if (hw->fc.low_water[tc_idx] >= hw->fc.high_water[tc_idx]) {
+			LOG_ERROR_BDF("Invalid water mark configuration");
+			ret = SXE_ERR_INVALID_LINK_SETTINGS;
+			goto l_ret;
+		}
+	}
+
+	sxe_fc_autoneg(adapter);
+
+	ret = sxe_hw_pfc_enable(hw, tc_idx);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "link fc enable failed, ret=%d", ret);
+	}
+
+l_ret:
+	return ret;
+}
+s32 sxe_sfp_identify(struct sxe_adapter *adapter)
+{
+	s32 ret;
+	enum sxe_sfp_type sfp_type;
+	u8 sfp_comp_code[SXE_SFP_COMP_CODE_SIZE];
+	struct sxe_sfp_info *sfp = &adapter->phy_ctxt.sfp_info;
+
+	PMD_LOG_INFO(INIT, "sfp identify start");
+
+	ret = sxe_sfp_eeprom_read(adapter, SXE_SFF_BASE_ADDR,
+				SXE_SFP_COMP_CODE_SIZE, sfp_comp_code);
+	if (ret) {
+		sfp_type = SXE_SFP_TYPE_UNKNOWN;
+		PMD_LOG_ERR(INIT, "get sfp identifier failed, ret=%d", ret);
+		goto l_end;
+	}
+
+	PMD_LOG_INFO(INIT, "sfp identifier=%x, cable_technology=%x, "
+			"10GB_code=%x, 1GB_code=%x",
+		sfp_comp_code[SXE_SFF_IDENTIFIER],
+		sfp_comp_code[SXE_SFF_CABLE_TECHNOLOGY],
+		sfp_comp_code[SXE_SFF_10GBE_COMP_CODES],
+		sfp_comp_code[SXE_SFF_1GBE_COMP_CODES]);
+
+	if (sfp_comp_code[SXE_SFF_IDENTIFIER] != SXE_SFF_IDENTIFIER_SFP) {
+		LOG_WARN("sfp type get failed, offset=%d, type=%x",
+			SXE_SFF_IDENTIFIER, sfp_comp_code[SXE_SFF_IDENTIFIER]);
+		sfp_type = SXE_SFP_TYPE_UNKNOWN;
+		ret = -SXE_ERR_SFF_NOT_SUPPORTED;
+		goto l_end;
+	}
+
+	if (sfp_comp_code[SXE_SFF_CABLE_TECHNOLOGY] & SXE_SFF_DA_PASSIVE_CABLE) {
+		sfp_type = SXE_SFP_TYPE_DA_CU;
+	}  else if (sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] & \
+		(SXE_SFF_10GBASESR_CAPABLE | SXE_SFF_10GBASELR_CAPABLE)) {
+		sfp_type = SXE_SFP_TYPE_SRLR;
+	} else if (sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \
+		SXE_SFF_1GBASET_CAPABLE) {
+		sfp_type = SXE_SFP_TYPE_1G_CU;
+	} else if ((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \
+		SXE_SFF_1GBASESX_CAPABLE) || \
+		(sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \
+		SXE_SFF_1GBASELX_CAPABLE)) {
+		sfp_type = SXE_SFP_TYPE_1G_SXLX;
+	} else {
+		sfp_type = SXE_SFP_TYPE_UNKNOWN;
+	}
+
+	sfp->multispeed_fiber = false;
+	if (((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \
+			SXE_SFF_1GBASESX_CAPABLE) &&
+		(sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] & \
+			SXE_SFF_10GBASESR_CAPABLE)) ||
+		((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \
+			SXE_SFF_1GBASELX_CAPABLE) &&
+		(sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] & \
+			SXE_SFF_10GBASELR_CAPABLE))) {
+		sfp->multispeed_fiber = true;
+	}
+
+	PMD_LOG_INFO(INIT, "identify sfp, sfp_type=%d, is_multispeed=%x",
+			sfp_type, sfp->multispeed_fiber);
+
+l_end:
+	adapter->phy_ctxt.sfp_info.type = sfp_type;
+	return ret;
+}
+
+s32 sxe_sfp_link_configure(struct rte_eth_dev *dev)
+{
+	s32 ret = 0;
+	bool an;
+	u32 pcs_mode = SXE_PCS_MODE_BUTT;
+	u32 speed;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	u32 frame_size = adapter->mtu + SXE_ETH_DEAD_LOAD;
+
+	sxe_sfp_link_capabilities_get(adapter, &speed, &an);
+
+	if (SXE_LINK_SPEED_1GB_FULL == speed) {
+		pcs_mode = SXE_PCS_MODE_1000BASE_KX_W;
+		adapter->phy_ctxt.autoneg_advertised = SXE_LINK_SPEED_1GB_FULL;
+	} else if (SXE_LINK_SPEED_10GB_FULL == speed) {
+		pcs_mode = SXE_PCS_MODE_10GBASE_KR_WO;
+		adapter->phy_ctxt.autoneg_advertised = SXE_LINK_SPEED_10GB_FULL;
+	}
+
+	ret = sxe_pcs_sds_init(adapter, pcs_mode, frame_size);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "pcs sds init failed, ret=%d", ret);
+	}
+
+	if (SXE_LINK_SPEED_1GB_FULL == speed) {
+		sxe_link_status_update(dev);
+	}
+
+	PMD_LOG_INFO(INIT, "link :cfg speed=%x, pcs_mode=%x, atuoreg=%d",
+					speed, pcs_mode, an);
+
+	return ret;
+}
+
+int sxe_get_module_info(struct rte_eth_dev *dev,
+			struct rte_eth_dev_module_info *info)
+{
+	s32 ret;
+	bool page_swap = false;
+	u8 sff8472_rev, addr_mode;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	ret = sxe_sfp_eeprom_read(adapter, SXE_SFF_8472_COMPLIANCE,
+					sizeof(sff8472_rev), &sff8472_rev);
+	if (ret) {
+		ret = -EIO;
+		goto l_end;
+	}
+
+	ret = sxe_sfp_eeprom_read(adapter, SXE_SFF_8472_DIAG_MONITOR_TYPE,
+					sizeof(addr_mode), &addr_mode);
+	if (ret) {
+		ret = -EIO;
+		goto l_end;
+	}
+
+	if (addr_mode & SXE_SFF_ADDRESSING_MODE) {
+		PMD_LOG_ERR(DRV, "address change required to access page 0xA2, "
+			"but not supported. Please report the module "
+			"type to the driver maintainers.");
+		page_swap = true;
+	}
+
+	if ((sff8472_rev == SXE_SFF_8472_UNSUP) || page_swap || \
+			!(addr_mode & SXE_SFF_DDM_IMPLEMENTED)) {
+		info->type = RTE_ETH_MODULE_SFF_8079;
+		info->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+	} else {
+		info->type = RTE_ETH_MODULE_SFF_8472;
+		info->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+	}
+
+	LOG_INFO("sfp support management is %x, eeprom addr mode=%x "
+			"eeprom type=%x, eeprom len=%d",
+		sff8472_rev, addr_mode, info->type, info->eeprom_len);
+
+l_end:
+	return ret;
+}
+
+int sxe_get_module_eeprom(struct rte_eth_dev *dev,
+				struct rte_dev_eeprom_info *info)
+{
+	s32 ret;
+	u8 *data = info->data;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	if (info->length == 0) {
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	ret = sxe_sfp_eeprom_read(adapter, info->offset, info->length, data);
+	if (ret) {
+		LOG_ERROR("read sfp failed");
+	}
+
+l_end:
+	return ret;
+}
+
+
+static enum sxe_media_type sxe_media_type_get(struct sxe_adapter *adapter)
+{
+	enum sxe_media_type type;
+
+	type = SXE_MEDIA_TYPE_FIBER;
+	adapter->phy_ctxt.is_sfp = true;
+
+	return type;
+}
+
+s32 sxe_phy_init(struct sxe_adapter *adapter)
+{
+	s32 ret = 0;
+	enum sxe_media_type media_type = sxe_media_type_get(adapter);
+
+	if (SXE_MEDIA_TYPE_FIBER == media_type) {
+		ret = sxe_sfp_identify(adapter);
+		if (ret) {
+			PMD_LOG_ERR(INIT, "phy identify failed, ret=%d", ret);
+		}
+	} else {
+		PMD_LOG_ERR(INIT, "phy init failed, only support SFP.");
+	}
+
+	return ret;
+}
diff --git a/drivers/net/sxe/pf/sxe_phy.h b/drivers/net/sxe/pf/sxe_phy.h
new file mode 100644
index 0000000000..b0ec2388b9
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_phy.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef  __SXE_PHY_H__
+#define  __SXE_PHY_H__
+
+#include <rte_ethdev.h>
+#include "drv_msg.h"
+#include "sxe_cli.h"
+#include "sxe_msg.h"
+
+#define SXE_SFF_BASE_ADDR			0x0
+#define SXE_SFF_IDENTIFIER			0x0  
+#define SXE_SFF_10GBE_COMP_CODES		0x3  
+#define SXE_SFF_1GBE_COMP_CODES			0x6  
+#define SXE_SFF_CABLE_TECHNOLOGY		0x8  
+#define SXE_SFF_8472_DIAG_MONITOR_TYPE		0x5C 
+#define SXE_SFF_8472_COMPLIANCE			0x5E 
+
+#define SXE_SFF_IDENTIFIER_SFP			0x3
+#define SXE_SFF_ADDRESSING_MODE			0x4  
+#define SXE_SFF_8472_UNSUP			0x0
+#define SXE_SFF_DDM_IMPLEMENTED			0x40 
+#define SXE_SFF_DA_PASSIVE_CABLE		0x4
+#define SXE_SFF_DA_ACTIVE_CABLE			0x8
+#define SXE_SFF_DA_SPEC_ACTIVE_LIMITING		0x4
+#define SXE_SFF_1GBASESX_CAPABLE		0x1
+#define SXE_SFF_1GBASELX_CAPABLE		0x2
+#define SXE_SFF_1GBASET_CAPABLE			0x8
+#define SXE_SFF_10GBASESR_CAPABLE		0x10
+#define SXE_SFF_10GBASELR_CAPABLE		0x20
+
+#define SXE_SFP_COMP_CODE_SIZE			10  
+#define SXE_SFP_EEPROM_SIZE_MAX			512 
+
+#define SXE_IRQ_LINK_UPDATE      (u32)(1 << 0)
+#define SXE_IRQ_LINK_CONFIG      (u32)(1 << 3)
+struct sxe_adapter;
+
+enum sxe_media_type {
+	SXE_MEDIA_TYPE_UNKWON = 0,
+	SXE_MEDIA_TYPE_FIBER  = 1, 
+};
+
+enum sxe_phy_idx {
+	SXE_SFP_IDX = 0,
+	SXE_PHY_MAX,
+};
+
+enum sxe_sfp_type {
+	SXE_SFP_TYPE_DA_CU       = 0, 
+	SXE_SFP_TYPE_SRLR        = 1, 
+	SXE_SFP_TYPE_1G_CU       = 2, 
+	SXE_SFP_TYPE_1G_SXLX     = 4, 
+	SXE_SFP_TYPE_UNKNOWN     = 0xFFFF ,
+};
+
+struct sxe_sfp_info {
+	enum sxe_sfp_type	type;
+	bool			multispeed_fiber; 
+};
+
+struct sxe_phy_context {
+	bool is_sfp;                  
+	bool sfp_tx_laser_disabled;   
+	u32  speed;                   
+	u32  autoneg_advertised;      
+	struct sxe_sfp_info sfp_info; 
+};
+
+s32 sxe_phy_init(struct sxe_adapter *adapter);
+
+s32 sxe_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+
+s32 sxe_link_status_update(struct rte_eth_dev *dev);
+
+void sxe_sfp_tx_laser_enable(struct sxe_adapter *adapter);
+
+void sxe_sfp_tx_laser_disable(struct sxe_adapter *adapter);
+
+int sxe_dev_set_link_up(struct rte_eth_dev *dev);
+
+int sxe_dev_set_link_down(struct rte_eth_dev *dev);
+
+void sxe_wait_setup_link_complete(struct rte_eth_dev *dev,
+						uint32_t timeout_ms);
+
+int sxe_get_module_info(struct rte_eth_dev *dev,
+			struct rte_eth_dev_module_info *info);
+
+int sxe_get_module_eeprom(struct rte_eth_dev *dev,
+				struct rte_dev_eeprom_info *info);
+s32 sxe_sfp_identify(struct sxe_adapter *adapter);
+s32 sxe_sfp_reset(struct sxe_adapter *adapter);
+
+s32 sxe_pcs_sds_init(struct sxe_adapter *adapter, 
+				sxe_pcs_mode_e mode, u32 max_frame);
+
+s32 sxe_sfp_rate_select(struct sxe_adapter *adapter, sxe_sfp_rate_e rate);
+
+s32 sxe_multispeed_sfp_link_configure(struct rte_eth_dev *dev, u32 speed, bool is_in_thread);
+
+s32 sxe_conf_speed_get(struct rte_eth_dev *dev, u32 *conf_speeds);
+
+s32 sxe_fc_enable(struct sxe_adapter *adapter);
+
+void sxe_link_info_get(struct sxe_adapter *adapter, u32 *link_speed, bool *link_up);
+
+s32 sxe_pfc_enable(struct sxe_adapter *adapter, u8 tc_idx);
+
+void sxe_sfp_link_capabilities_get(struct sxe_adapter *adapter, u32 *speed,
+							bool *autoneg);
+
+s32 sxe_sfp_link_configure(struct rte_eth_dev *dev);
+
+void sxe_mac_configure(struct sxe_adapter *adapter);
+
+s32 sxe_pcs_sds_init(struct sxe_adapter *adapter, sxe_pcs_mode_e mode,
+			     u32 max_frame);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_pmd_hdc.c b/drivers/net/sxe/pf/sxe_pmd_hdc.c
new file mode 100644
index 0000000000..9137776a01
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_pmd_hdc.c
@@ -0,0 +1,717 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include <rte_malloc.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+#include "sxe_compat_version.h"
+#include <semaphore.h>
+#include <pthread.h>
+#include <signal.h>
+#include "sxe_pmd_hdc.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe.h"
+#include "sxe_msg.h"
+#include "drv_msg.h"
+#include "sxe_errno.h"
+#include "sxe_common.h"
+
+static sem_t g_hdc_sem;
+
+#define SXE_SUCCESS			(0)
+
+#define SXE_HDC_TRYLOCK_MAX		200
+
+#define SXE_HDC_RELEASELOCK_MAX		20
+#define SXE_HDC_WAIT_TIME		1000
+#define SXE_HDC_BIT_1			0x1
+#define ONE_DWORD_LEN			(4)
+
+static sem_t *sxe_hdc_sema_get(void)
+{
+	return &g_hdc_sem;
+}
+
+void sxe_hdc_channel_init(void)
+{
+	s32 ret;
+	ret = sem_init(sxe_hdc_sema_get(), 0, 1);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "hdc sem init failed,ret=%d",ret);
+	}
+
+	sxe_trace_id_gen();
+
+	return;
+}
+
+void sxe_hdc_channel_uninit(void)
+{
+	sem_destroy(sxe_hdc_sema_get());
+	sxe_trace_id_clean();
+
+	return;
+}
+
+static s32 sxe_fw_time_sync_process(struct sxe_hw *hw)
+{
+	s32 ret;
+	u64 timestamp = sxe_time_get_real_ms();
+	struct sxe_adapter *adapter = hw->adapter;
+
+	LOG_DEBUG_BDF("sync time= %"SXE_PRIU64"ms\n", timestamp);
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_TINE_SYNC,
+				(void *)&timestamp, sizeof(timestamp),
+				NULL, 0);
+	if (ret) {
+		LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:time sync\n",ret);
+	}
+
+	return ret;
+}
+
+s32 sxe_fw_time_sync(struct sxe_hw *hw)
+{
+	s32 ret = 0;
+	s32 ret_v;
+	u32 status;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	status = sxe_hw_hdc_fw_status_get(hw);
+	if (status != SXE_FW_START_STATE_FINISHED) {
+		LOG_ERROR_BDF("fw[%p] status[0x%x] is not good",hw, status);
+		ret = -SXE_FW_STATUS_ERR;
+		goto l_ret;
+	}
+
+	ret_v = sxe_fw_time_sync_process(hw);
+	if (ret_v) {
+		LOG_WARN_BDF("fw time sync failed, ret_v=%d\n",ret_v);
+		goto l_ret;
+	}
+
+l_ret:
+	return ret;
+}
+
+static inline s32 sxe_hdc_lock_get(struct sxe_hw *hw)
+{
+	return sxe_hw_hdc_lock_get(hw, SXE_HDC_TRYLOCK_MAX);
+}
+
+static inline void sxe_hdc_lock_release(struct sxe_hw *hw)
+{
+	sxe_hw_hdc_lock_release(hw, SXE_HDC_RELEASELOCK_MAX);
+	return;
+}
+
+static inline s32 sxe_poll_fw_ack(struct sxe_hw *hw, u32 timeout)
+{
+	s32 ret = 0;
+	u32 i;
+	bool fw_ov = false;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	for (i = 0; i < timeout; i++) {
+		fw_ov = sxe_hw_hdc_is_fw_over_set(hw);
+		if (fw_ov) {
+			break;
+		}
+
+		msleep(10);
+	}
+
+	if (i >= timeout) {
+		LOG_ERROR_BDF("poll fw_ov timeout...\n");
+		ret = -SXE_ERR_HDC_FW_OV_TIMEOUT;
+		goto l_ret;
+	}
+
+	sxe_hw_hdc_fw_ov_clear(hw);
+l_ret:
+	return ret;
+
+}
+
+static inline void hdc_channel_clear(struct sxe_hw *hw)
+{
+	sxe_hw_hdc_fw_ov_clear(hw);
+	return;
+}
+
+static s32 hdc_packet_ack_get(struct sxe_hw *hw, u64 trace_id,
+				HdcHeader_u *pkt_header)
+{
+	s32 ret     = 0;
+	u32 timeout = SXE_HDC_WAIT_TIME;
+	struct sxe_adapter *adapter = hw->adapter;
+	UNUSED(trace_id);
+
+	pkt_header->dw0 = 0;
+	pkt_header->head.errCode = PKG_ERR_OTHER;
+
+	LOG_DEBUG_BDF("trace_id=0x%"SXE_PRIX64" hdc cmd ack get start\n", trace_id);
+	ret = sxe_poll_fw_ack(hw, timeout);
+	if (ret) {
+		LOG_ERROR_BDF("get fw ack failed, ret=%d\n", ret);
+		goto l_out;
+	}
+
+	pkt_header->dw0 = sxe_hw_hdc_fw_ack_header_get(hw);;
+	if (pkt_header->head.errCode == PKG_ERR_PKG_SKIP) {
+		ret = -SXE_HDC_PKG_SKIP_ERR;
+		goto l_out;
+	} else if (pkt_header->head.errCode != PKG_OK) {
+		ret = -SXE_HDC_PKG_OTHER_ERR;
+		goto l_out;
+	}
+
+l_out:
+	LOG_DEBUG_BDF("trace_id=0x%"SXE_PRIX64" hdc cmd ack get end ret=%d\n", trace_id, ret);
+	return ret;
+}
+
+static void hdc_packet_header_fill(HdcHeader_u *pkt_header,
+			u8 pkt_index, u16 total_len,
+			u16 pkt_num, u8 is_read)
+{
+	U16 pkt_len = 0;
+
+	pkt_header->dw0 = 0;
+
+	pkt_header->head.pid = (is_read == 0) ? pkt_index : (pkt_index - 1);
+
+	pkt_header->head.totalLen = SXE_HDC_LEN_TO_REG(total_len);
+
+	if (pkt_index == 0 && is_read == 0) {
+		pkt_header->head.startPkg = SXE_HDC_BIT_1;
+	}
+
+	if (pkt_index == (pkt_num - 1)) {
+		pkt_header->head.endPkg = SXE_HDC_BIT_1;
+		pkt_len = total_len - (DWORD_NUM * (pkt_num - 1));
+	} else {
+		pkt_len = DWORD_NUM;
+	}
+
+	pkt_header->head.len  = SXE_HDC_LEN_TO_REG(pkt_len);
+	pkt_header->head.isRd = is_read;
+	pkt_header->head.msi = 0;
+
+	return ;
+}
+
+static inline void hdc_packet_send_done(struct sxe_hw *hw)
+{
+	sxe_hw_hdc_packet_send_done(hw);
+	return;
+}
+
+static inline void hdc_packet_header_send(struct sxe_hw *hw,
+							u32 header)
+{
+	sxe_hw_hdc_packet_header_send(hw, header);
+	return;
+}
+
+static inline void hdc_packet_data_dword_send(struct sxe_hw *hw,
+						u16 dword_index, u32 value)
+{
+	sxe_hw_hdc_packet_data_dword_send(hw, dword_index, value);
+	return;
+}
+
+static void hdc_packet_send(struct sxe_hw *hw, u64 trace_id,
+			HdcHeader_u *pkt_header, u8 *data,
+			u16 data_len)
+{
+	u16          dw_idx   = 0;
+	u16          pkt_len       = 0;
+	u16          offset        = 0;
+	u32          pkg_data      = 0;
+	struct sxe_adapter *adapter = hw->adapter;
+	UNUSED(trace_id);
+
+	LOG_DEBUG_BDF("hw_addr[%p] trace_id=0x%"SXE_PRIX64" send pkt pkg_header[0x%x], "
+		"data_addr[%p], data_len[%u]\n",
+		hw, trace_id, pkt_header->dw0, data, data_len);
+
+	hdc_packet_header_send(hw, pkt_header->dw0);
+
+	if (data == NULL || data_len == 0) {
+	    goto l_send_done;
+	}
+
+	pkt_len = SXE_HDC_LEN_FROM_REG(pkt_header->head.len);
+	for (dw_idx = 0; dw_idx < pkt_len; dw_idx++) {
+		pkg_data = 0;
+
+		offset = dw_idx * 4;
+
+		if ((pkt_header->head.endPkg == SXE_HDC_BIT_1)
+			&& (dw_idx == (pkt_len - 1))
+			&& (data_len % 4 != 0)) {
+			memcpy((u8 *)&pkg_data, data + offset,
+					data_len % ONE_DWORD_LEN);
+		} else {
+			pkg_data = *(u32 *)(data + offset);
+		}
+
+		LOG_DEBUG_BDF("trace_id=0x%"SXE_PRIX64" send data to reg[%u] dword[0x%x]\n",
+				trace_id, dw_idx, pkg_data);
+		hdc_packet_data_dword_send(hw, dw_idx, pkg_data);
+	}
+
+l_send_done:
+	hdc_channel_clear(hw);
+
+	hdc_packet_send_done(hw);
+
+	return;
+}
+
+static inline u32 hdc_packet_data_dword_rcv(struct sxe_hw *hw,
+						u16 dword_index)
+{
+	return sxe_hw_hdc_packet_data_dword_rcv(hw, dword_index);
+}
+
+static void hdc_resp_data_rcv(struct sxe_hw *hw, u64 trace_id,
+				HdcHeader_u *pkt_header, u8 *out_data,
+				u16 out_len)
+{
+	u16          dw_idx      = 0;
+	u16          dw_num      = 0;
+	u16          offset = 0;
+	u32          pkt_data;
+	struct sxe_adapter *adapter = hw->adapter;
+	UNUSED(trace_id);
+
+	dw_num = SXE_HDC_LEN_FROM_REG(pkt_header->head.len);
+	for (dw_idx = 0; dw_idx < dw_num; dw_idx++) {
+		pkt_data= hdc_packet_data_dword_rcv(hw, dw_idx);
+		offset = dw_idx * ONE_DWORD_LEN;
+		LOG_DEBUG_BDF("trace_id=0x%"SXE_PRIX64" get data from reg[%u] dword=0x%x\n",
+				trace_id, dw_idx, pkt_data);
+
+		if ((pkt_header->head.endPkg == SXE_HDC_BIT_1)
+			&& (dw_idx == (dw_num - 1)) && (out_len % 4 != 0)) {
+			memcpy(out_data + offset, (u8 *)&pkt_data,
+					out_len % ONE_DWORD_LEN);
+		} else {
+			*(u32 *)(out_data + offset) = pkt_data;
+		}
+	}
+
+	return;
+}
+
+STATIC s32 hdc_req_process(struct sxe_hw *hw, u64 trace_id,
+			u8 *in_data, u16 in_len)
+{
+	s32 ret 	= 0;
+	u32 total_len	= 0;
+	u16 pkt_num     = 0;
+	u16 index       = 0;
+	u16 offset      = 0;
+	HdcHeader_u     pkt_header;
+	bool is_retry   = false;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	total_len  = (in_len + ONE_DWORD_LEN - 1) / ONE_DWORD_LEN;
+
+	pkt_num = (in_len + ONE_PACKET_LEN_MAX - 1) / ONE_PACKET_LEN_MAX;
+	LOG_DEBUG_BDF("hw[%p] trace_id=0x%"SXE_PRIX64" req in_data[%p] in_len=%u, "
+			"total_len=%uDWORD, pkt_num = %u\n",
+			hw, trace_id, in_data, in_len, total_len,
+			pkt_num);
+
+	for (index = 0; index < pkt_num; index++) {
+		LOG_DEBUG_BDF("trace_id=0x%"SXE_PRIX64" fill pkg header[%p], pkg_index[%u], "
+			"total_Len[%u], pkg_num[%u], is_read[no]\n",
+			trace_id, &pkt_header, index, total_len, pkt_num);
+		hdc_packet_header_fill(&pkt_header, index, total_len,
+						pkt_num, 0);
+
+		offset = index * DWORD_NUM * 4;
+		hdc_packet_send(hw, trace_id, &pkt_header,
+				in_data + offset, in_len);
+
+		if (index == pkt_num - 1) {
+			break;
+		}
+
+		ret = hdc_packet_ack_get(hw, trace_id, &pkt_header);
+		if (ret == -EINTR) {
+			LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" interrupted\n", trace_id);
+			goto l_out;
+		} else if (ret == -SXE_HDC_PKG_SKIP_ERR) {
+			LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" req ack"
+					"failed, retry\n", trace_id);
+			if (is_retry) {
+				ret = -SXE_HDC_RETRY_ERR;
+				goto l_out;
+			}
+
+			index --;
+			is_retry = true;
+			continue;
+		} else if (ret != SXE_HDC_SUCCESS) {
+			LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" req ack"
+					"failed, ret=%d\n", trace_id, ret);
+			ret = -SXE_HDC_RETRY_ERR;
+			goto l_out;
+		}
+
+		LOG_DEBUG_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" get req packet_index[%u]"
+			" ack succeed header[0x%x]\n",
+			trace_id, index, pkt_header.dw0);
+		is_retry = false;
+	}
+
+l_out:
+	return ret;
+}
+
+static s32 hdc_resp_process(struct sxe_hw *hw, u64 trace_id,
+			u8 *out_data, u16 out_len)
+{
+	s32          ret;
+	u32          req_dwords;
+	u32          resp_len;
+	u32          resp_dwords;
+	u16          pkt_num;
+	u16          index;
+	u16          offset;
+	HdcHeader_u  pkt_header;
+	bool     retry          = false;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	LOG_DEBUG_BDF("hdc trace_id=0x%"SXE_PRIX64" req's last cmd ack get\n",trace_id);
+	ret = hdc_packet_ack_get(hw, trace_id, &pkt_header);
+	if (ret == -EINTR) {
+		LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" interrupted\n", trace_id);
+		goto l_out;
+	} else if(ret) {
+		LOG_ERROR_BDF("hdc trace_id=0x%"SXE_PRIX64" ack get failed, ret=%d\n",
+				trace_id, ret);
+		ret = -SXE_HDC_RETRY_ERR;
+		goto l_out;
+	}
+
+	LOG_DEBUG_BDF("hdc trace_id=0x%"SXE_PRIX64" req's last cmd ack get"
+		"succeed header[0x%x]\n",trace_id, pkt_header.dw0);
+
+	if (!pkt_header.head.startPkg) {
+		ret = -SXE_HDC_RETRY_ERR;
+		LOG_ERROR_BDF("trace_id=0x%"SXE_PRIX64" ack header has error:"
+				"not set start bit\n",trace_id);
+		goto l_out;
+	}
+
+	req_dwords = (out_len + ONE_DWORD_LEN - 1) / ONE_DWORD_LEN;
+	resp_dwords  = SXE_HDC_LEN_FROM_REG(pkt_header.head.totalLen);
+	if (resp_dwords > req_dwords) {
+		ret = -SXE_HDC_RETRY_ERR;
+		LOG_ERROR_BDF("trace_id=0x%"SXE_PRIX64" rsv len check failed:"
+				"resp_dwords=%u, req_dwords=%u\n",trace_id,
+				resp_dwords, req_dwords);
+		goto l_out;
+	}
+
+	resp_len = resp_dwords << 2;
+	LOG_DEBUG_BDF("outlen = %u bytes, resp_len = %u bytes\n", out_len, resp_len);
+	if (resp_len > out_len) {
+		resp_len = out_len;
+	}
+
+	hdc_resp_data_rcv(hw, trace_id, &pkt_header, out_data, resp_len);
+
+	pkt_num = (resp_len + ONE_PACKET_LEN_MAX - 1) / ONE_PACKET_LEN_MAX;
+	for (index = 1; index < pkt_num; index++) {
+		LOG_DEBUG_BDF("trace_id=0x%"SXE_PRIX64" fill pkg header[%p], pkg_index[%u], "
+			"total_Len[%u], pkg_num[%u], is_read[yes]\n",
+			trace_id, &pkt_header, index, resp_dwords,
+			pkt_num);
+		hdc_packet_header_fill(&pkt_header, index, resp_dwords,
+					pkt_num, 1);
+
+		hdc_packet_send(hw, trace_id, &pkt_header, NULL, 0);
+
+		ret = hdc_packet_ack_get(hw, trace_id, &pkt_header);
+		if (ret == -EINTR) {
+			LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" interrupted\n", trace_id);
+			goto l_out;
+		} else if (ret == -SXE_HDC_PKG_SKIP_ERR) {
+			LOG_ERROR_BDF("trace_id=0x%"SXE_PRIX64" hdc resp ack polling"
+					"failed, ret=%d\n", trace_id, ret);
+			if (retry) {
+				ret = -SXE_HDC_RETRY_ERR;
+				goto l_out;
+			}
+
+			index --;
+			retry = true;
+			continue;
+		} else if (ret != SXE_HDC_SUCCESS) {
+			LOG_ERROR_BDF("trace_id=0x%"SXE_PRIX64" hdc resp ack polling"
+					"failed, ret=%d\n",trace_id, ret);
+			ret = -SXE_HDC_RETRY_ERR;
+			goto l_out;
+		}
+
+		LOG_DEBUG_BDF("hdc trace_id=0x%"SXE_PRIX64" resp pkt[%u] get "
+			"succeed header[0x%x]\n",
+			trace_id, index, pkt_header.dw0);
+
+		retry = false;
+
+		offset = index * DWORD_NUM * 4;
+		hdc_resp_data_rcv(hw, trace_id, &pkt_header,
+					out_data + offset, resp_len);
+	}
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_hdc_packet_trans(struct sxe_hw *hw, u64 trace_id,
+					struct sxe_hdc_trans_info *trans_info)
+{
+	s32 ret = SXE_SUCCESS;
+	u32 status;
+	struct sxe_adapter *adapter = hw->adapter;
+	u32 channel_state;
+
+	status = sxe_hw_hdc_fw_status_get(hw);
+	if (status != SXE_FW_START_STATE_FINISHED) {
+		LOG_ERROR_BDF("fw[%p] status[0x%x] is not good\n",hw, status);
+		ret = -SXE_FW_STATUS_ERR;
+		goto l_ret;
+	}
+
+	channel_state = sxe_hw_hdc_channel_state_get(hw);
+	if (channel_state != SXE_FW_HDC_TRANSACTION_IDLE) {
+		LOG_ERROR_BDF("hdc channel state is busy\n");
+		ret = -SXE_HDC_RETRY_ERR;
+		goto l_ret;
+	}
+
+	ret = sxe_hdc_lock_get(hw);
+	if (ret) {
+		LOG_ERROR_BDF("hw[%p] cmd trace_id=0x%"SXE_PRIX64" get hdc lock fail, ret=%d\n",
+				hw, trace_id, ret);
+		ret = -SXE_HDC_RETRY_ERR;
+		goto l_ret;
+	}
+
+	ret = hdc_req_process(hw, trace_id, trans_info->in.data,
+				trans_info->in.len);
+	if (ret) {
+		LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" req process"
+				"failed, ret=%d\n",trace_id, ret);
+		goto l_hdc_lock_release;
+	}
+
+	ret = hdc_resp_process(hw, trace_id, trans_info->out.data,
+				trans_info->out.len);
+	if (ret) {
+		LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" resp process"
+				"failed, ret=%d\n",trace_id, ret);
+	}
+
+l_hdc_lock_release:
+	sxe_hdc_lock_release(hw);
+l_ret:
+	return ret;
+}
+
+STATIC s32 sxe_hdc_cmd_process(struct sxe_hw *hw, u64 trace_id,
+				struct sxe_hdc_trans_info *trans_info)
+{
+	s32 ret;
+	u8 retry_idx;
+	struct sxe_adapter *adapter = hw->adapter;
+	sigset_t old_mask, new_mask;
+	sigemptyset(&new_mask);
+	sigaddset(&new_mask, SIGINT);
+	sigaddset(&new_mask, SIGTERM);
+	ret = pthread_sigmask(SIG_BLOCK, &new_mask, &old_mask);
+	if (ret) {
+		LOG_ERROR_BDF("hdc set signal mask failed, ret=%d\n", ret);
+		goto l_ret;
+	}
+
+	LOG_DEBUG_BDF("hw[%p] cmd trace=0x%"SXE_PRIX64" \n",hw, trace_id);
+	
+	ret = sem_wait(sxe_hdc_sema_get());
+	if (ret) {
+		LOG_WARN_BDF("hw[%p] hdc concurrency full\n", hw);
+		goto l_ret;
+	}
+
+	for (retry_idx = 0; retry_idx < 250; retry_idx++ ) {
+		ret = sxe_hdc_packet_trans(hw, trace_id, trans_info);
+		if (ret == SXE_SUCCESS) {
+			goto l_up;
+		} else if (ret == -SXE_HDC_RETRY_ERR) {
+			rte_delay_ms(10);
+			continue;
+	 	} else {
+			LOG_ERROR_BDF("sxe hdc packet trace_id=0x%"SXE_PRIX64
+					" trans error, ret=%d\n", trace_id, ret);
+			ret = -EFAULT;
+			goto l_up;
+		}
+	}
+
+l_up:
+	LOG_DEBUG_BDF("hw[%p] cmd trace=0x%"SXE_PRIX64"\n",hw, trace_id);
+	sem_post(sxe_hdc_sema_get());
+l_ret:
+	ret = pthread_sigmask(SIG_SETMASK, &old_mask, NULL);
+	if (ret) {
+		LOG_ERROR_BDF("hdc restore old signal mask failed, ret=%d\n", ret);
+	}
+	if (ret == -SXE_HDC_RETRY_ERR) {
+		ret = -EFAULT;
+	}
+	return ret;
+}
+
+static void sxe_cmd_hdr_init(struct sxe_hdc_cmd_hdr *cmd_hdr,
+					u8 cmd_type)
+{
+	cmd_hdr->cmd_type = cmd_type;
+	cmd_hdr->cmd_sub_type = 0;
+	return;
+}
+
+static void sxe_driver_cmd_msg_init(struct sxe_hdc_drv_cmd_msg *msg,
+						u16 opcode, u64 trace_id,
+						void *req_data, u16 req_len)
+{
+	LOG_DEBUG("cmd[opcode=0x%x], trace=0x%"SXE_PRIX64", req_data_len=%u start init\n",
+			opcode, trace_id, req_len);
+	msg->opcode = opcode;
+	msg->length.req_len = SXE_HDC_MSG_HDR_SIZE + req_len;
+	msg->traceid = trace_id;
+
+	if (req_data && req_len != 0) {
+		memcpy(msg->body, (u8 *)req_data, req_len);
+	}
+
+	return;
+}
+
+static void sxe_hdc_trans_info_init(
+					struct sxe_hdc_trans_info *trans_info,
+					u8 *in_data_buf, u16 in_len,
+					u8 *out_data_buf, u16 out_len)
+{
+	trans_info->in.data  = in_data_buf;
+	trans_info->in.len   = in_len;
+	trans_info->out.data = out_data_buf;
+	trans_info->out.len  = out_len;
+	return;
+}
+
+s32 sxe_driver_cmd_trans(struct sxe_hw *hw, u16 opcode,
+					void *req_data, u16 req_len,
+					void *resp_data, u16 resp_len)
+{
+	s32 ret = SXE_SUCCESS;
+	struct sxe_hdc_cmd_hdr *cmd_hdr;
+	struct sxe_hdc_drv_cmd_msg *msg;
+	struct sxe_hdc_drv_cmd_msg *ack;
+	struct sxe_hdc_trans_info trans_info;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	u8 *in_data_buf;
+	u8 *out_data_buf;
+	u16 in_len;
+	u16 out_len;
+	u64 trace_id = 0;
+	u16 ack_data_len;
+
+	in_len = SXE_HDC_CMD_HDR_SIZE + SXE_HDC_MSG_HDR_SIZE + req_len;
+	out_len = SXE_HDC_CMD_HDR_SIZE + SXE_HDC_MSG_HDR_SIZE + resp_len;
+
+	trace_id = sxe_trace_id_get();
+
+	in_data_buf = rte_zmalloc("pmd hdc in buffer", in_len, RTE_CACHE_LINE_SIZE);
+	if (in_data_buf == NULL) {
+		LOG_ERROR_BDF("cmd trace_id=0x%"SXE_PRIX64" kzalloc indata"
+				"mem len[%u] failed\n",trace_id, in_len);
+		ret = -ENOMEM;
+		goto l_ret;
+	}
+
+	out_data_buf = rte_zmalloc("pmd hdc out buffer", out_len, RTE_CACHE_LINE_SIZE);
+	if (out_data_buf == NULL) {
+		LOG_ERROR_BDF("cmd trace_id=0x%"SXE_PRIX64" kzalloc out_data"
+				"mem len[%u] failed\n",trace_id, out_len);
+		ret = -ENOMEM;
+		goto l_in_buf_free;
+	}
+
+	cmd_hdr = (struct sxe_hdc_cmd_hdr *)in_data_buf;
+	sxe_cmd_hdr_init(cmd_hdr, SXE_CMD_TYPE_DRV);
+
+	msg = (struct sxe_hdc_drv_cmd_msg *)((u8 *)in_data_buf + SXE_HDC_CMD_HDR_SIZE);
+	sxe_driver_cmd_msg_init(msg, opcode, trace_id, req_data, req_len);
+
+	LOG_DEBUG_BDF("trans drv cmd:trace_id=0x%"SXE_PRIX64", opcode[0x%x], "
+			"inlen=%u, out_len=%u\n",
+			trace_id, opcode, in_len, out_len);
+
+	sxe_hdc_trans_info_init(&trans_info,
+				in_data_buf, in_len,
+				out_data_buf, out_len);
+
+	ret = sxe_hdc_cmd_process(hw, trace_id, &trans_info);
+	if (ret) {
+		LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" hdc cmd process"
+				" failed, ret=%d\n",trace_id, ret);
+		goto l_out_buf_free;
+	}
+
+	ack = (struct sxe_hdc_drv_cmd_msg *)((u8 *)out_data_buf + SXE_HDC_CMD_HDR_SIZE);
+
+	if (ack->errcode) {
+		LOG_ERROR_BDF("driver get hdc ack failed trace_id=0x%"SXE_PRIX64", err=%d\n",
+				trace_id, ack->errcode);
+		ret = -EFAULT;
+		goto l_out_buf_free;
+	}
+
+	ack_data_len = ack->length.ack_len - SXE_HDC_MSG_HDR_SIZE;
+	if (resp_len != ack_data_len) {
+		LOG_ERROR("ack trace_id=0x%"SXE_PRIX64" data len[%u]"
+			" and resp_len[%u] dont match\n",
+			trace_id, ack_data_len, resp_len);
+		ret = -EFAULT;
+		goto l_out_buf_free;
+	}
+
+	if (resp_len != 0) {
+		memcpy(resp_data, ack->body, resp_len);
+	}
+
+	LOG_DEBUG_BDF("driver get hdc ack trace_id=0x%"SXE_PRIX64","
+			" ack_len=%u, ack_data_len=%u\n",
+			trace_id, ack->length.ack_len, ack_data_len);
+
+l_out_buf_free:
+	rte_free(out_data_buf);
+l_in_buf_free:
+	rte_free(in_data_buf);
+l_ret:
+	return ret;
+}
diff --git a/drivers/net/sxe/pf/sxe_pmd_hdc.h b/drivers/net/sxe/pf/sxe_pmd_hdc.h
new file mode 100644
index 0000000000..13671f3a83
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_pmd_hdc.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_HOST_HDC_H__
+#define __SXE_HOST_HDC_H__
+
+#include "sxe_hdc.h"
+#include "sxe_hw.h"
+#include "sxe_errno.h"
+
+#define SXE_HDC_SUCCESS              0
+#define SXE_HDC_FALSE                SXE_ERR_HDC(1)
+#define SXE_HDC_INVAL_PARAM          SXE_ERR_HDC(2)
+#define SXE_HDC_BUSY                 SXE_ERR_HDC(3)
+#define SXE_HDC_FW_OPS_FAILED        SXE_ERR_HDC(4)
+#define SXE_HDC_FW_OV_TIMEOUT        SXE_ERR_HDC(5)
+#define SXE_HDC_REQ_ACK_HEAD_ERR     SXE_ERR_HDC(6)
+#define SXE_HDC_REQ_ACK_TLEN_ERR     SXE_ERR_HDC(7)
+#define SXE_HDC_PKG_SKIP_ERR         SXE_ERR_HDC(8)
+#define SXE_HDC_PKG_OTHER_ERR        SXE_ERR_HDC(9)
+#define SXE_HDC_RETRY_ERR            SXE_ERR_HDC(10)
+#define SXE_FW_STATUS_ERR            SXE_ERR_HDC(11)
+
+struct sxe_hdc_data_info {
+	u8 *data;
+	u16 len;
+};
+
+struct sxe_hdc_trans_info {
+	struct sxe_hdc_data_info in;
+	struct sxe_hdc_data_info out;
+};
+
+s32 sxe_driver_cmd_trans(struct sxe_hw *hw, u16 opcode, 
+					void *req_data, u16 req_len, 
+					void *resp_data, u16 resp_len);
+
+void sxe_hdc_channel_init(void);
+
+void sxe_hdc_channel_uninit(void);
+
+s32 sxe_fw_time_sync(struct sxe_hw *hw);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_ptp.c b/drivers/net/sxe/pf/sxe_ptp.c
new file mode 100644
index 0000000000..166665ad11
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_ptp.c
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include "sxe.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe_ptp.h"
+
+#define SXE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
+
+static void sxe_timecounters_start(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	u32 shift = 0;
+
+	memset(&adapter->ptp_ctxt.systime_tc, 0, sizeof(struct rte_timecounter));
+	memset(&adapter->ptp_ctxt.rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+	memset(&adapter->ptp_ctxt.tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+	adapter->ptp_ctxt.systime_tc.cc_mask = SXE_CYCLECOUNTER_MASK;
+	adapter->ptp_ctxt.systime_tc.cc_shift = shift;
+	adapter->ptp_ctxt.systime_tc.nsec_mask = (1ULL << shift) - 1;
+
+	adapter->ptp_ctxt.rx_tstamp_tc.cc_mask = SXE_CYCLECOUNTER_MASK;
+	adapter->ptp_ctxt.rx_tstamp_tc.cc_shift = shift;
+	adapter->ptp_ctxt.rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+	adapter->ptp_ctxt.tx_tstamp_tc.cc_mask = SXE_CYCLECOUNTER_MASK;
+	adapter->ptp_ctxt.tx_tstamp_tc.cc_shift = shift;
+	adapter->ptp_ctxt.tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+	adapter->ptp_ctxt.tx_hwtstamp_nsec = 0;
+	adapter->ptp_ctxt.tx_hwtstamp_sec = 0;
+
+	return;
+}
+
+s32 sxe_timesync_enable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 tses = SXE_TSES_TXES_V2_ALL | SXE_TSES_RXES_V2_ALL;
+
+	sxe_hw_ptp_init(hw);
+
+	 
+	sxe_hw_ptp_timestamp_mode_set(hw, true, 0, tses);
+
+	sxe_hw_ptp_timestamp_enable(hw);
+
+	sxe_hw_ptp_rx_timestamp_clear(hw);
+
+	sxe_hw_ptp_systime_init(hw);
+
+	sxe_timecounters_start(dev);
+
+	return 0;
+}
+
+s32 sxe_timesync_disable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	sxe_hw_ptp_timestamp_disable(hw);
+
+	sxe_hw_ptp_timestamp_mode_set(hw, false, 0, 0);
+
+	sxe_hw_ptp_time_inc_stop(hw);
+
+	return 0;
+}
+
+s32 sxe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+				 struct timespec *timestamp,
+				 u32 flags __rte_unused)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u64 ns;
+	s32 ret = 0;
+	bool is_valid;
+	u64 rx_tstamp_cycles;
+
+	is_valid = sxe_hw_ptp_is_rx_timestamp_valid(hw);
+	if (!is_valid) {
+		PMD_LOG_ERR(DRV, "no valid ptp timestamp in rx register");
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	rx_tstamp_cycles = sxe_hw_ptp_rx_timestamp_get(hw);
+	ns = rte_timecounter_update(&adapter->ptp_ctxt.rx_tstamp_tc, rx_tstamp_cycles);
+	PMD_LOG_DEBUG(DRV, "got rx_tstamp_cycles = %"SXE_PRIU64"ns=%"SXE_PRIU64, 
+			rx_tstamp_cycles, ns);
+	*timestamp = rte_ns_to_timespec(ns);
+
+l_end:
+	return ret;
+}
+
+static u64 sxe_timesync_tx_tstamp_cycles_get(
+						struct sxe_adapter *adapter)
+{
+	return SXE_TIME_TO_NS(adapter->ptp_ctxt.tx_hwtstamp_nsec,
+				adapter->ptp_ctxt.tx_hwtstamp_sec);
+}
+
+s32 sxe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+				 struct timespec *timestamp)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u64 ns;
+	s32 ret = 0;
+	u64 tx_tstamp_cycles;
+	u32 ts_sec;
+	u32 ts_ns;
+	u32 last_sec;
+	u32 last_ns;
+	bool tx_tstamp_valid = true;
+	u8 i;
+
+	sxe_hw_ptp_tx_timestamp_get(hw, &ts_sec, &ts_ns);
+	if (ts_ns  != adapter->ptp_ctxt.tx_hwtstamp_nsec ||
+		ts_sec != adapter->ptp_ctxt.tx_hwtstamp_sec) {
+		for (i = 0; i < SXE_TXTS_POLL_CHECK; i++) {
+			sxe_hw_ptp_tx_timestamp_get(hw, &last_sec, &last_ns);
+		}
+
+		for (; i < SXE_TXTS_POLL; i++) {
+			sxe_hw_ptp_tx_timestamp_get(hw, &ts_sec, &ts_ns);
+			if ((last_ns != ts_ns) || (last_sec != ts_sec)) {
+				tx_tstamp_valid = false;
+				break;
+			}
+		}
+	}
+
+	if (!tx_tstamp_valid || ((ts_ns == adapter->ptp_ctxt.tx_hwtstamp_nsec)
+			&& (ts_sec == adapter->ptp_ctxt.tx_hwtstamp_sec))) {
+		PMD_LOG_DEBUG(DRV, "no valid ptp timestamp in tx register");
+		ret = -EINVAL;
+		goto l_end;
+	} else {
+		adapter->ptp_ctxt.tx_hwtstamp_nsec = ts_ns;
+		adapter->ptp_ctxt.tx_hwtstamp_sec  = ts_sec;
+		tx_tstamp_cycles = 
+			sxe_timesync_tx_tstamp_cycles_get(adapter);
+		ns = rte_timecounter_update(&adapter->ptp_ctxt.tx_tstamp_tc,
+						tx_tstamp_cycles);
+		PMD_LOG_DEBUG(DRV, "got tx_tstamp_cycles = %"
+			SXE_PRIU64"ns=%"SXE_PRIU64, tx_tstamp_cycles, ns);
+		*timestamp = rte_ns_to_timespec(ns);
+	}
+
+l_end:
+	return ret;
+}
+
+s32 sxe_timesync_adjust_time(struct rte_eth_dev *dev, s64 delta)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	PMD_LOG_DEBUG(DRV, "got delta = %"SXE_PRID64, delta);
+
+	adapter->ptp_ctxt.systime_tc.nsec += delta;
+	adapter->ptp_ctxt.rx_tstamp_tc.nsec += delta;
+	adapter->ptp_ctxt.tx_tstamp_tc.nsec += delta;
+
+	return 0;
+}
+
+s32 sxe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u64 ns, systime_cycles;
+
+	systime_cycles = sxe_hw_ptp_systime_get(hw);
+	ns = rte_timecounter_update(&adapter->ptp_ctxt.systime_tc, systime_cycles);
+	PMD_LOG_DEBUG(DRV, "got systime_cycles = %"SXE_PRIU64"ns=%"SXE_PRIU64, 
+			systime_cycles, ns);
+	*ts = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+s32 sxe_timesync_write_time(struct rte_eth_dev *dev, 
+					const struct timespec *ts)
+{
+	u64 ns;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	ns = rte_timespec_to_ns(ts);
+	PMD_LOG_DEBUG(DRV, "set systime ns = %"SXE_PRIU64, ns);
+	adapter->ptp_ctxt.systime_tc.nsec = ns;
+	adapter->ptp_ctxt.rx_tstamp_tc.nsec = ns;
+	adapter->ptp_ctxt.tx_tstamp_tc.nsec = ns;
+
+	return 0;
+}
diff --git a/drivers/net/sxe/pf/sxe_ptp.h b/drivers/net/sxe/pf/sxe_ptp.h
new file mode 100644
index 0000000000..367c1a34a0
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_ptp.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+ 
+#ifndef __SXE_PTP_H__
+#define __SXE_PTP_H__
+
+s32 sxe_timesync_enable(struct rte_eth_dev *dev);
+
+s32 sxe_timesync_disable(struct rte_eth_dev *dev);
+
+s32 sxe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+				 struct timespec *timestamp,
+				 u32 flags __rte_unused);
+
+s32 sxe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+				 struct timespec *timestamp);
+
+s32 sxe_timesync_adjust_time(struct rte_eth_dev *dev, s64 delta);
+
+s32 sxe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts);
+
+s32 sxe_timesync_write_time(struct rte_eth_dev *dev, 
+					const struct timespec *ts);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_queue.c b/drivers/net/sxe/pf/sxe_queue.c
new file mode 100644
index 0000000000..8a0042022b
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_queue.c
@@ -0,0 +1,856 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <rte_dev.h>
+#include "sxe_ethdev.h"
+#else
+#include <ethdev_driver.h>
+#include <dev_driver.h>
+#include "sxe_ethdev.h"
+#endif
+
+#include "rte_malloc.h"
+#include "sxe.h"
+#include "sxe_hw.h"
+#include "sxe_logs.h"
+#include "sxe_queue.h"
+#include "sxe_offload.h"
+#include "sxe_queue_common.h"
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#include "sxe_vec_common.h"
+#endif
+#include "sxe_compat_version.h"
+
+#define SXE_RXQ_SCAN_INTERVAL 				4
+
+#ifndef DEFAULT_TX_RS_THRESH
+#define DEFAULT_TX_RS_THRESH   32
+#endif
+
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif
+
+#define RTE_SXE_WAIT_100_US	100
+
+#define SXE_MMW_SIZE_DEFAULT        0x4
+#define SXE_MMW_SIZE_JUMBO_FRAME    0x14
+#define SXE_MAX_JUMBO_FRAME_SIZE    0x2600 
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+static s32 sxe_vf_rss_rxq_num_validate(struct rte_eth_dev *dev, u16 rxq_num)
+{
+	s32 ret = 0;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+	switch (rxq_num) {
+	case SXE_1_RING_PER_POOL:
+	case SXE_2_RING_PER_POOL:
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
+		break;
+	case SXE_4_RING_PER_POOL:
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
+		break;
+	default:
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
+		SXE_HW_TXRX_RING_NUM_MAX / RTE_ETH_DEV_SRIOV(dev).active;
+	RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
+		pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+
+	PMD_LOG_INFO(INIT, "enable sriov, vfs num:%u, %u pool mode, %u queue pre pool"
+				"vm total queue num are %u",
+				pci_dev->max_vfs,
+				RTE_ETH_DEV_SRIOV(dev).active,
+				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool,
+				RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx);
+l_end:
+	return ret;
+}
+
+s32 sxe_sriov_mq_mode_check(struct rte_eth_dev *dev)
+{
+	s32 ret = 0;
+	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+	u16 rx_q_num = dev->data->nb_rx_queues;
+	u16 tx_q_num = dev->data->nb_tx_queues;
+
+	switch (dev_conf->rxmode.mq_mode) {
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+			PMD_LOG_INFO(INIT, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in sriov");
+			break;
+
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+			PMD_LOG_ERR(INIT, "RTE_ETH_MQ_RX_VMDQ_DCB_RSS mode unsupported in sriov");
+			ret = -EINVAL;
+			goto l_end;
+
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
+			if ((rx_q_num <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) && 
+					sxe_vf_rss_rxq_num_validate(dev, rx_q_num)) {
+				PMD_LOG_ERR(INIT, "sriov is active, invalid queue number[%d], "
+					" for vmdq rss, allowed value are 1, 2 or 4",
+						rx_q_num);
+				ret = -EINVAL;
+				goto l_end;
+			}
+			break;
+
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_NONE:
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
+			break;
+
+		default:
+			PMD_LOG_ERR(INIT, "sriov is active, wrong mq_mode rx %d",
+					dev_conf->rxmode.mq_mode);
+			ret = -EINVAL;
+			goto l_end;
+	}
+
+	switch (dev_conf->txmode.mq_mode) {
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
+		PMD_LOG_INFO(INIT, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in sriov");
+		break;
+
+	case RTE_ETH_MQ_TX_DCB:
+		PMD_LOG_ERR(INIT, "RTE_ETH_MQ_TX_DCB mode unsupported in sriov");
+		ret = -EINVAL;
+		goto l_end;
+
+	default: 
+		dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_ONLY;
+		break;
+	}
+
+	if ((rx_q_num > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
+		(tx_q_num > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
+		PMD_LOG_ERR(INIT, "SRIOV is active,"
+				" rx_q_num=%d tx_q_num=%d queue number"
+				" must be less than or equal to %d.",
+				rx_q_num, tx_q_num,
+				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	PMD_LOG_INFO(INIT, "sriov enable, rx_mq_mode=%d, tx_mq_mode=%d, "
+			"rx_q_mun=%d, tx_q_num=%d, q_pre_pool=%d",
+			dev_conf->rxmode.mq_mode, dev_conf->txmode.mq_mode,
+			rx_q_num, tx_q_num, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+
+l_end:
+	return ret;
+}
+
+#endif
+
+static inline s32 sxe_non_sriov_mq_mode_check(struct rte_eth_dev *dev)
+{
+	s32 ret = -EINVAL;
+	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+	u16 rx_q_num = dev->data->nb_rx_queues;
+	u16 tx_q_num = dev->data->nb_tx_queues;
+
+	switch (dev_conf->rxmode.mq_mode) {
+	case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		PMD_LOG_ERR(INIT, "VMDQ+DCB+RSS mq_mode is not supported");
+		goto l_end;
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
+		if (rx_q_num != SXE_HW_TXRX_RING_NUM_MAX) {
+			PMD_LOG_ERR(INIT, "VMDQ+DCB selected, nb_rx_q != %d",
+					SXE_HW_TXRX_RING_NUM_MAX);
+			goto l_end;
+		}
+
+		if (!((dev_conf->rx_adv_conf.vmdq_dcb_conf.nb_queue_pools == \
+			RTE_ETH_16_POOLS ) || (
+			dev_conf->rx_adv_conf.vmdq_dcb_conf.nb_queue_pools == \
+			RTE_ETH_32_POOLS))) {
+			PMD_LOG_ERR(INIT, "VMDQ+DCB selected,"
+					" nb_queue_pools must be %d or %d",
+					RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
+			goto l_end;
+		}
+		break;
+	case RTE_ETH_MQ_RX_DCB:
+		if (!(dev_conf->rx_adv_conf.dcb_rx_conf.nb_tcs == RTE_ETH_4_TCS ||
+			dev_conf->rx_adv_conf.dcb_rx_conf.nb_tcs == RTE_ETH_8_TCS)) {
+			PMD_LOG_ERR(INIT, "DCB selected, nb_tcs != %d"
+					" and nb_tcs != %d",
+					RTE_ETH_4_TCS, RTE_ETH_8_TCS);
+			goto l_end;
+		}
+		break;
+	default:
+		PMD_LOG_INFO(INIT, "%d rx mq_mode supported",
+					dev_conf->rxmode.mq_mode);
+		break;
+	}
+
+	switch (dev_conf->txmode.mq_mode) {
+	case RTE_ETH_MQ_TX_NONE:
+		if (tx_q_num > SXE_HW_TX_NONE_MODE_Q_NUM) {
+			PMD_LOG_ERR(INIT, "Neither VT nor DCB are enabled, "
+					"nb_tx_q > %d.",
+					SXE_HW_TX_NONE_MODE_Q_NUM);
+			goto l_end;
+		}
+		break;
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
+		if (tx_q_num != SXE_HW_TXRX_RING_NUM_MAX) {
+			PMD_LOG_ERR(INIT, "VMDQ+DCB selected, nb_tx_q != %d",
+					SXE_HW_TXRX_RING_NUM_MAX);
+			goto l_end;
+		}
+
+		if (!((dev_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools == \
+			RTE_ETH_16_POOLS ) || (
+			dev_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools == \
+			RTE_ETH_32_POOLS))) {
+			PMD_LOG_ERR(INIT, "VMDQ+DCB selected,"
+					" nb_queue_pools must be %d or %d",
+					RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
+			goto l_end;
+		}
+		break;
+	case RTE_ETH_MQ_TX_DCB:
+		if (!(dev_conf->tx_adv_conf.dcb_tx_conf.nb_tcs == RTE_ETH_4_TCS ||
+			dev_conf->tx_adv_conf.dcb_tx_conf.nb_tcs == RTE_ETH_8_TCS)) {
+			PMD_LOG_ERR(INIT, "DCB selected, nb_tcs != %d"
+					" and nb_tcs != %d",
+					RTE_ETH_4_TCS, RTE_ETH_8_TCS);
+			goto l_end;
+		}
+		break;
+	default:
+		PMD_LOG_INFO(INIT, "%d tx mq_mode supported",
+					dev_conf->txmode.mq_mode);
+		break;
+	}
+
+	ret = 0;
+
+	PMD_LOG_INFO(INIT, "sriov disable, rx_mq_mode=%d, tx_mq_mode=%d, "
+		"rx_q_mun=%d, tx_q_num=%d",
+		dev_conf->rxmode.mq_mode, dev_conf->txmode.mq_mode,
+		rx_q_num, tx_q_num);
+
+l_end:
+	return ret;
+}
+
+s32 sxe_mq_mode_check(struct rte_eth_dev *dev)
+{
+	s32 ret = 0;
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	if (RTE_ETH_DEV_SRIOV(dev).active) {
+		ret = sxe_sriov_mq_mode_check(dev);
+#else
+	if (RTE_ETH_DEV_SRIOV(dev).active) {
+		ret = -ENOTSUP;
+		PMD_LOG_ERR(INIT, "sriov not supported");
+#endif
+	} else {
+		ret = sxe_non_sriov_mq_mode_check(dev);
+	}
+
+	return ret;
+}
+
+void sxe_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+		struct rte_eth_txq_info *q_info)
+{
+	__sxe_tx_queue_info_get(dev, queue_id, q_info);
+
+	return;
+}
+
+s32 __rte_cold sxe_txq_arg_validate(struct rte_eth_dev *dev, u16 ring_depth,
+				u16 *rs_thresh, u16 *free_thresh,
+				const struct rte_eth_txconf *tx_conf)
+{
+	s32 ret = -EINVAL;
+
+	if (ring_depth % SXE_TX_DESC_RING_ALIGN != 0 ||
+		(ring_depth > SXE_MAX_RING_DESC) ||
+		(ring_depth < SXE_MIN_RING_DESC)) {
+		goto l_end;
+	}
+
+	*free_thresh = (u16)((tx_conf->tx_free_thresh) ?
+			tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+	*rs_thresh = (DEFAULT_TX_RS_THRESH + *free_thresh > ring_depth) ?
+			ring_depth - *free_thresh : DEFAULT_TX_RS_THRESH;
+
+	if (tx_conf->tx_rs_thresh > 0) {
+		*rs_thresh = tx_conf->tx_rs_thresh;
+	}
+
+	if (*rs_thresh + *free_thresh > ring_depth) {
+		PMD_LOG_ERR(INIT, "tx_rs_thresh + tx_free_thresh must not "
+			     "exceed nb_desc. (tx_rs_thresh=%u "
+			     "tx_free_thresh=%u nb_desc=%u port = %d)",
+			     *rs_thresh, *free_thresh,
+			     ring_depth, dev->data->port_id);
+		goto l_end;
+	}
+
+	if (*rs_thresh >= (ring_depth - 2)) {
+		PMD_LOG_ERR(INIT, "tx_rs_thresh must be less than the number "
+				"of TX descriptors minus 2. (tx_rs_thresh=%u "
+				"port=%d)",
+				*rs_thresh, dev->data->port_id);
+		goto l_end;
+	}
+
+	if (*rs_thresh > DEFAULT_TX_RS_THRESH) {
+		PMD_LOG_ERR(INIT, "tx_rs_thresh must be less or equal than %u. "
+			"(tx_rs_thresh=%u port=%d)",
+			DEFAULT_TX_RS_THRESH, *rs_thresh,
+			dev->data->port_id);
+		goto l_end;
+	}
+
+	if (*free_thresh >= (ring_depth - 3)) {
+		PMD_LOG_ERR(INIT, "tx_rs_thresh must be less than the "
+			     "tx_free_thresh must be less than the number of "
+			     "TX descriptors minus 3. (tx_free_thresh=%u "
+			     "port=%d)",
+			     *free_thresh, dev->data->port_id);
+		goto l_end;
+	}
+
+	if (*rs_thresh > *free_thresh) {
+		PMD_LOG_ERR(INIT, "tx_rs_thresh must be less than or equal to "
+			     "tx_free_thresh. (tx_free_thresh=%u "
+			     "tx_rs_thresh=%u port=%d)",
+			     *free_thresh, *rs_thresh, dev->data->port_id);
+		goto l_end;
+	}
+
+	if ((ring_depth % *rs_thresh) != 0) {
+		PMD_LOG_ERR(INIT, "tx_rs_thresh must be a divisor of the "
+			     "number of TX descriptors. (tx_rs_thresh=%u "
+			     "port=%d, ring_depth=%d)",
+			     *rs_thresh, dev->data->port_id, ring_depth);
+		goto l_end;
+	}
+
+	if ((*rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
+		PMD_LOG_ERR(INIT, "TX WTHRESH must be set to 0 if "
+			     "tx_rs_thresh is greater than 1. "
+			     "(tx_rs_thresh=%u port=%d)",
+			     *rs_thresh, dev->data->port_id);
+		goto l_end;
+	}
+
+	ret = 0;
+
+l_end:
+	return ret;
+}
+
+static void __rte_cold sxe_tx_buffer_ring_free(sxe_tx_queue_s *txq)
+{
+	if (txq != NULL && txq->buffer_ring != NULL) {
+		rte_free(txq->buffer_ring);
+	}
+
+	return;
+}
+
+static void __rte_cold sxe_tx_queue_mbufs_release(sxe_tx_queue_s *txq)
+{
+	u32 i;
+
+	if (txq->buffer_ring != NULL) {
+		for (i = 0; i < txq->ring_depth; i++) {
+			if (txq->buffer_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free_seg(txq->buffer_ring[i].mbuf);
+				txq->buffer_ring[i].mbuf = NULL;
+			}
+		}
+	}
+
+	return;
+}
+
+void __rte_cold sxe_tx_queue_free(sxe_tx_queue_s *txq)
+{
+	__sxe_tx_queue_free(txq);
+
+	return;
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+void __rte_cold sxe_tx_queue_release(void *txq)
+{
+	sxe_tx_queue_free(txq);
+	return;
+}
+#else
+void __rte_cold sxe_tx_queue_release(struct rte_eth_dev *dev,
+					u16 queue_idx)
+{
+	sxe_tx_queue_free(dev->data->tx_queues[queue_idx]);
+	return;
+}
+#endif
+
+static void __rte_cold sxe_tx_queue_init(sxe_tx_queue_s *txq)
+{
+	u16 prev, i;
+	volatile sxe_tx_data_desc_u *txd;
+	static const sxe_tx_data_desc_u zeroed_desc = {{0}};
+	struct sxe_tx_buffer *tx_buffer = txq->buffer_ring;
+
+	for (i = 0; i < txq->ring_depth; i++) {
+		txq->desc_ring[i] = zeroed_desc;
+	}
+
+	prev = txq->ring_depth - 1;
+	for (i = 0; i < txq->ring_depth; i++) {
+		txd = &txq->desc_ring[i];
+		txd->wb.status = rte_cpu_to_le_32(SXE_TX_DESC_STAT_DD);
+		tx_buffer[i].mbuf       = NULL;
+		tx_buffer[i].last_id    = i;
+		tx_buffer[prev].next_id = i;
+		prev = i;
+	}
+
+	txq->ctx_curr      = 0;
+	txq->desc_used_num = 0;
+	txq->desc_free_num = txq->ring_depth - 1;
+	txq->next_to_use   = 0;
+	txq->next_to_clean = txq->ring_depth - 1;
+	txq->next_dd       = txq->rs_thresh  - 1;
+	txq->next_rs       = txq->rs_thresh  - 1;
+	memset((void *)&txq->ctx_cache, 0,
+			SXE_CTXT_DESC_NUM * sizeof(struct sxe_ctxt_info));
+
+	return;
+}
+
+sxe_tx_queue_s * __rte_cold sxe_tx_queue_alloc(
+					struct rte_eth_dev *dev,
+					u16 queue_idx,
+					u16 ring_depth,
+					u32 socket_id)
+{
+	sxe_tx_queue_s *txq;
+	const struct rte_memzone *tz;
+
+	if (dev->data->tx_queues[queue_idx] != NULL) {
+		sxe_tx_queue_free(dev->data->tx_queues[queue_idx]);
+		dev->data->tx_queues[queue_idx] = NULL;
+	}
+
+	txq = rte_zmalloc_socket("tx queue", sizeof(sxe_tx_queue_s),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq == NULL) {
+		PMD_LOG_ERR(INIT, "tx queue[%d] alloc failed", queue_idx);
+		goto l_end;
+	}
+
+	tz = rte_eth_dma_zone_reserve(dev, "tx_desc_ring", queue_idx,
+			sizeof(sxe_tx_data_desc_u) * SXE_MAX_RING_DESC,
+			SXE_ALIGN, socket_id);
+	if (tz == NULL) {
+		PMD_LOG_ERR(INIT, "tx desc ring alloc failed, queue_id=%d", queue_idx);
+		rte_free(txq);
+		txq = NULL;
+		goto l_end;
+	}
+
+	txq->buffer_ring = rte_zmalloc_socket("tx_buffer_ring",
+				sizeof(struct sxe_tx_buffer) * ring_depth,
+				RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq->buffer_ring == NULL) {
+		PMD_LOG_ERR(INIT, "tx buffer alloc failed, queue_id=%d", queue_idx);
+		rte_memzone_free(tz);
+		rte_free(txq);
+		txq = NULL;
+		goto l_end;
+	}
+
+	txq->mz = tz;
+	txq->base_addr = tz->iova;
+	txq->desc_ring = (sxe_tx_data_desc_u *)tz->addr;
+
+l_end:
+	return txq;
+}
+
+s32 __rte_cold sxe_tx_queue_start(struct rte_eth_dev *dev, u16 queue_id)
+{
+	sxe_tx_queue_s *txq = dev->data->tx_queues[queue_id];
+	struct sxe_hw *hw = (&((struct sxe_adapter *)(dev->data->dev_private))->hw);
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_hw_tx_ring_head_init(hw, txq->reg_idx);
+	sxe_hw_tx_ring_tail_init(hw, txq->reg_idx);
+	sxe_hw_tx_ring_switch(hw, txq->reg_idx, true);
+
+	dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+s32 __rte_cold sxe_tx_queue_stop(struct rte_eth_dev *dev, u16 queue_id)
+{
+	s32 poll_ms = RTE_SXE_REGISTER_POLL_WAIT_10_MS;
+	u32 head, tail;
+	sxe_tx_queue_s *txq = dev->data->tx_queues[queue_id];
+	struct sxe_hw *hw = (&((struct sxe_adapter *)(dev->data->dev_private))->hw);
+
+	PMD_INIT_FUNC_TRACE();
+
+	do {
+		rte_delay_us(RTE_SXE_WAIT_100_US);
+		sxe_hw_tx_ring_info_get(hw, txq->reg_idx, &head, &tail);
+
+	} while (--poll_ms && (head != tail));
+
+	if (!poll_ms) {
+		PMD_LOG_ERR(INIT, "Tx Queue %d is not empty when stopping.",
+				queue_id);
+	}
+
+	sxe_hw_tx_ring_switch(hw, txq->reg_idx, false);
+
+	if (txq->ops != NULL) {
+		txq->ops->mbufs_release(txq);
+		txq->ops->init(txq);
+	}
+	dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+void sxe_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+	struct rte_eth_rxq_info *qinfo)
+{
+	__sxe_rx_queue_info_get(dev, queue_id, qinfo);
+
+	return;
+}
+
+s32 __rte_cold sxe_rx_queue_mbufs_alloc(struct sxe_rx_queue *rxq)
+{
+	return __sxe_rx_queue_mbufs_alloc(rxq);
+}
+
+s32 __rte_cold sxe_rx_queue_start(struct rte_eth_dev *dev,
+						u16 queue_id)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw      *hw = &adapter->hw;
+	struct sxe_rx_queue *rxq;
+	u16 reg_idx;
+	s32 ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	rxq = dev->data->rx_queues[queue_id];
+	reg_idx = rxq->reg_idx;
+
+	ret = sxe_rx_queue_mbufs_alloc(rxq);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "could not alloc mbuf for queue:%d",
+			     queue_id);
+		goto l_end;
+	}
+
+	sxe_hw_rx_ring_switch(hw, reg_idx, true);
+
+	sxe_hw_rx_queue_desc_reg_configure(hw, reg_idx, 0, rxq->ring_depth - 1);
+	dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+l_end:
+	return ret;
+}
+
+STATIC void __rte_cold sxe_rx_queue_sc_mbufs_free(struct rte_mbuf *mbuf)
+{
+	u16 i;
+	u16 num_segs = mbuf->nb_segs;
+	struct rte_mbuf *next_seg;
+
+	for (i = 0; i < num_segs; i++) {
+		next_seg = mbuf->next;
+		rte_pktmbuf_free_seg(mbuf);
+		mbuf = next_seg;
+	}
+
+	return;
+}
+
+void __rte_cold sxe_rx_queue_mbufs_free(struct sxe_rx_queue *rxq)
+{
+	u16 i;
+	
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	if (rxq->is_using_sse) {
+		sxe_rx_queue_vec_mbufs_release(rxq);
+		goto l_out;
+	}
+#endif
+
+	if (rxq->buffer_ring != NULL) {
+		for (i = 0; i < rxq->ring_depth; i++) {
+			if (rxq->buffer_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free_seg(rxq->buffer_ring[i].mbuf);
+				rxq->buffer_ring[i].mbuf = NULL;
+			}
+		}
+		if (rxq->completed_pkts_num) {
+			for (i = 0; i < rxq->completed_pkts_num; ++i) {
+				struct rte_mbuf *mbuf;
+
+				mbuf = rxq->completed_ring[rxq->next_ret_pkg + i];
+				rte_pktmbuf_free_seg(mbuf);
+			}
+			rxq->completed_pkts_num = 0;
+		}
+	}
+
+	if (rxq->sc_buffer_ring) {
+		for (i = 0; i < rxq->ring_depth; i++) {
+			if (rxq->sc_buffer_ring[i].mbuf) {
+				sxe_rx_queue_sc_mbufs_free(rxq->sc_buffer_ring[i].mbuf);
+				rxq->sc_buffer_ring[i].mbuf = NULL;
+			}
+		}
+	}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+l_out:
+#endif
+
+	return;
+}
+
+void __rte_cold sxe_rx_queue_init(bool rx_batch_alloc_allowed,
+						struct sxe_rx_queue *rxq)
+{
+	static const sxe_rx_data_desc_u zeroed_desc = {{0}};
+	u16 i;
+	u16 len = rxq->ring_depth;
+
+	if (rx_batch_alloc_allowed) {
+		len += RTE_PMD_SXE_MAX_RX_BURST;
+	}
+
+	for (i = 0; i < len; i++) {
+		rxq->desc_ring[i] = zeroed_desc;
+	}
+
+	memset(&rxq->fake_mbuf, 0, sizeof(rxq->fake_mbuf));
+	for (i = rxq->ring_depth; i < len; ++i) {
+		rxq->buffer_ring[i].mbuf = &rxq->fake_mbuf;
+	}
+
+	rxq->completed_pkts_num = 0;
+	rxq->next_ret_pkg = 0;
+	rxq->batch_alloc_trigger = rxq->batch_alloc_size - 1;
+	rxq->processing_idx = 0;
+	rxq->hold_num = 0;
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	if (rxq->pkt_first_seg != NULL) {
+		rte_pktmbuf_free(rxq->pkt_first_seg);
+	}
+
+	rxq->pkt_first_seg = NULL;
+	rxq->pkt_last_seg = NULL;
+
+#if defined(RTE_ARCH_X86)
+	rxq->realloc_start = 0;
+	rxq->realloc_num = 0;
+#endif
+#endif
+
+	return;
+}
+
+void __rte_cold sxe_rx_queue_free(struct sxe_rx_queue *rxq)
+{
+	__sxe_rx_queue_free(rxq);
+	return;
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+void __rte_cold sxe_rx_queue_release(void *rxq)
+{
+	sxe_rx_queue_free(rxq);
+	return;
+}
+#else
+void __rte_cold sxe_rx_queue_release(struct rte_eth_dev *dev,
+					u16 queue_idx)
+{
+	sxe_rx_queue_free(dev->data->rx_queues[queue_idx]);
+	return;
+}
+#endif
+
+s32 __rte_cold sxe_rx_queue_stop(struct rte_eth_dev *dev, u16 queue_id)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw     *hw = &adapter->hw;
+	struct sxe_rx_queue *rxq;
+	u16 reg_idx;
+
+	PMD_INIT_FUNC_TRACE();
+
+	rxq = dev->data->rx_queues[queue_id];
+	reg_idx = rxq->reg_idx;
+
+	sxe_hw_rx_ring_switch(hw, reg_idx, false);
+
+	rte_delay_us(RTE_SXE_WAIT_100_US);
+
+	sxe_rx_queue_mbufs_free(rxq);
+	sxe_rx_queue_init(adapter->rx_batch_alloc_allowed, rxq);
+	dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+u32 sxe_rx_queue_count(struct rte_eth_dev *dev, u16 queue_id)
+#else
+u32 sxe_rx_queue_count(void *rx_queue)
+#endif
+{
+	volatile sxe_rx_data_desc_u *desc;
+	struct sxe_rx_queue *rxq;
+	u32 count = 0;
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+	rxq = dev->data->rx_queues[queue_id];
+#else
+	rxq = rx_queue;
+#endif
+	
+	desc = &(rxq->desc_ring[rxq->processing_idx]);
+
+	while ((count < rxq->ring_depth) &&
+		(desc->wb.upper.status_error &
+			rte_cpu_to_le_32(SXE_RXDADV_STAT_DD))) {
+		count += SXE_RXQ_SCAN_INTERVAL;
+		desc  += SXE_RXQ_SCAN_INTERVAL;
+		if (rxq->processing_idx + count >= rxq->ring_depth) {
+			desc = &(rxq->desc_ring[rxq->processing_idx +
+				count - rxq->ring_depth]);
+		}
+	}
+
+	return count;
+}
+
+void __rte_cold sxe_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed)
+{
+	__sxe_txrx_queues_clear(dev, rx_batch_alloc_allowed);
+
+	return;
+}
+
+void sxe_queues_free(struct rte_eth_dev *dev)
+{
+	__sxe_queues_free(dev);
+	return;
+}
+
+const struct sxe_txq_ops def_txq_ops = {
+	.init             = sxe_tx_queue_init,
+	.mbufs_release    = sxe_tx_queue_mbufs_release,
+	.buffer_ring_free = sxe_tx_buffer_ring_free,
+};
+
+const struct sxe_txq_ops *sxe_tx_default_ops_get(void)
+{
+	return &def_txq_ops;
+}
+
+void sxe_multi_queue_tx_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_hw *hw = (&((struct sxe_adapter *)(dev->data->dev_private))->hw);
+	u16 pools_num = RTE_ETH_DEV_SRIOV(dev).active;
+	bool sriov_active = !!pools_num;
+	bool vmdq_active = (dev->data->dev_conf.txmode.mq_mode == 
+				RTE_ETH_MQ_TX_VMDQ_ONLY);
+
+	sxe_hw_tx_multi_queue_configure(hw, vmdq_active, sriov_active, pools_num);
+
+	return;
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_21_11_5 || defined DPDK_19_11_6
+s32 sxe_queue_rate_limit_set(struct rte_eth_dev *dev, 
+					u16 queue_idx, u16 tx_rate)
+#else
+s32 sxe_queue_rate_limit_set(struct rte_eth_dev *dev, 
+					u16 queue_idx, u32 tx_rate)
+#endif
+{
+	int ret = 0;
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+	struct rte_eth_rxmode *rxmode;
+#endif
+	u32 rf_dec, rf_int, bcnrc_val;
+	u16 link_speed = dev->data->dev_link.link_speed;
+	struct sxe_adapter *adapter = (struct sxe_adapter *)(dev->data->dev_private);
+	struct sxe_hw *hw = &adapter->hw;
+
+	if (queue_idx >= SXE_HW_TXRX_RING_NUM_MAX) {
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	if (tx_rate != 0) {
+		rf_int = (u32)link_speed / (u32)tx_rate;
+		rf_dec = (u32)link_speed % (u32)tx_rate;
+		rf_dec = (rf_dec << SXE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
+
+		bcnrc_val = SXE_RTTBCNRC_RS_ENA;
+		bcnrc_val |= ((rf_int << SXE_RTTBCNRC_RF_INT_SHIFT) &
+				SXE_RTTBCNRC_RF_INT_MASK);
+		bcnrc_val |= (rf_dec & SXE_RTTBCNRC_RF_DEC_MASK);
+	} else {
+		bcnrc_val = 0;
+	}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+	rxmode = &dev->data->dev_conf.rxmode;
+
+	if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
+	    (rxmode->max_rx_pkt_len >= SXE_MAX_JUMBO_FRAME_SIZE)) {
+#else
+	if (dev->data->mtu + SXE_ETH_OVERHEAD >= SXE_MAX_JUMBO_FRAME_SIZE) {
+#endif
+		sxe_hw_dcb_max_mem_window_set(hw, 
+						SXE_MMW_SIZE_JUMBO_FRAME);
+	} else {
+		sxe_hw_dcb_max_mem_window_set(hw, SXE_MMW_SIZE_DEFAULT);
+	}
+
+	sxe_hw_dcb_tx_ring_rate_factor_set(hw, queue_idx, bcnrc_val);
+
+l_end:
+	return ret;
+}
+
diff --git a/drivers/net/sxe/pf/sxe_queue.h b/drivers/net/sxe/pf/sxe_queue.h
new file mode 100644
index 0000000000..ef3036a07d
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_queue.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_QUEUE_H__
+#define __SXE_QUEUE_H__
+
+#include "sxe_dpdk_version.h"
+#include "sxe_queue_common.h"
+
+#define SXE_TXRX_RING_NUM_MAX     64  
+
+#define SXE_TX_MAX_SEG          40
+
+#define	SXE_MIN_RING_DESC	32
+#define	SXE_MAX_RING_DESC	4096
+
+#define SXE_MMW_SIZE_DEFAULT        0x4
+#define SXE_MMW_SIZE_JUMBO_FRAME    0x14
+#define SXE_MAX_JUMBO_FRAME_SIZE    0x2600 
+
+#define SXE_DEFAULT_RX_FREE_THRESH  32
+#define SXE_DEFAULT_RX_PTHRESH      8
+#define SXE_DEFAULT_RX_HTHRESH      8
+#define SXE_DEFAULT_RX_WTHRESH      0
+
+#define SXE_DEFAULT_TX_FREE_THRESH  32
+#define SXE_DEFAULT_TX_PTHRESH      32
+#define SXE_DEFAULT_TX_HTHRESH      0
+#define SXE_DEFAULT_TX_WTHRESH      0
+#define SXE_DEFAULT_TX_RSBIT_THRESH 32
+
+#define SXE_ALIGN               128
+#define SXE_RX_DESC_RING_ALIGN	(SXE_ALIGN / sizeof(sxe_rx_data_desc_u))
+#define SXE_TX_DESC_RING_ALIGN	(SXE_ALIGN / sizeof(sxe_tx_data_desc_u))
+
+#define SXE_TX_MAX_SEG          40
+#define RTE_SXE_REGISTER_POLL_WAIT_10_MS  10
+
+typedef union sxe_tx_data_desc sxe_tx_data_desc_u;
+typedef struct sxe_rx_buffer   sxe_rx_buffer_s;
+typedef union sxe_rx_data_desc sxe_rx_data_desc_u;
+typedef struct sxe_tx_queue    sxe_tx_queue_s;
+typedef struct sxe_rx_queue    sxe_rx_queue_s;
+
+struct sxe_tx_context_desc {
+	__le32 vlan_macip_lens;
+	__le32 seqnum_seed;
+	__le32 type_tucmd_mlhl;
+	__le32 mss_l4len_idx;
+};
+
+s32 __rte_cold sxe_txq_arg_validate(struct rte_eth_dev *dev, u16 ring_depth,
+				u16 *rs_thresh, u16 *free_thresh,
+				const struct rte_eth_txconf *tx_conf);
+
+sxe_tx_queue_s * __rte_cold sxe_tx_queue_alloc(
+					struct rte_eth_dev *dev,
+					u16 queue_idx,
+					u16 ring_depth,
+					u32 socket_id);
+
+s32 __rte_cold sxe_tx_queue_start(struct rte_eth_dev *dev, u16 queue_id);
+
+s32 __rte_cold sxe_tx_queue_stop(struct rte_eth_dev *dev, u16 queue_id);
+
+void sxe_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+	struct rte_eth_rxq_info *qinfo);
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+void sxe_rx_queue_release(void *rxq);
+
+#else
+void __rte_cold sxe_rx_queue_release(struct rte_eth_dev *dev,
+					u16 queue_idx);
+#endif
+
+s32 sxe_rx_queue_start(struct rte_eth_dev *dev, u16 queue_id);
+
+s32 sxe_rx_queue_stop(struct rte_eth_dev *dev, u16 queue_id);
+
+void sxe_rx_queue_init(bool rx_batch_alloc_allowed,
+				sxe_rx_queue_s *rxq);
+
+void sxe_rx_queue_free(sxe_rx_queue_s *rxq);
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+u32 sxe_rx_queue_count(struct rte_eth_dev *dev, u16 queue_id);
+
+#else
+u32 sxe_rx_queue_count(void *rx_queue);
+#endif
+
+s32 sxe_mq_mode_check(struct rte_eth_dev *dev);
+
+void sxe_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed);
+
+void sxe_queues_free(struct rte_eth_dev *dev);
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+void __rte_cold sxe_tx_queue_release(void *txq);
+
+#else
+void __rte_cold sxe_tx_queue_release(struct rte_eth_dev *dev,
+					u16 queue_idx);
+#endif
+
+void sxe_multi_queue_tx_configure(struct rte_eth_dev *dev);
+
+void sxe_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+		struct rte_eth_txq_info *q_info);
+
+u16 sxe_pkts_simple_xmit(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+u16 sxe_pkts_vector_xmit(void *tx_queue, struct rte_mbuf **tx_pkts,
+			   u16 pkts_num);
+#endif
+
+u16 sxe_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num);
+
+u16 sxe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num);
+
+int sxe_tx_descriptor_status(void *tx_queue, u16 offset);
+
+#if defined DPDK_20_11_5 || defined DPDK_21_11_5 || defined DPDK_19_11_6
+s32 sxe_queue_rate_limit_set(struct rte_eth_dev *dev,
+					u16 queue_idx, u16 tx_rate);
+
+#else
+s32 sxe_queue_rate_limit_set(struct rte_eth_dev *dev, 
+					u16 queue_idx, u32 tx_rate);
+#endif
+
+const struct sxe_txq_ops *sxe_tx_default_ops_get(void);
+
+s32 __rte_cold sxe_rx_queue_mbufs_alloc(sxe_rx_queue_s *rxq);
+
+void __rte_cold sxe_tx_queue_free(sxe_tx_queue_s *txq);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+s32 sxe_sriov_mq_mode_check(struct rte_eth_dev *dev);
+
+#endif
+
+void __rte_cold sxe_rx_queue_mbufs_free(sxe_rx_queue_s *rxq);
+
+#endif 
diff --git a/drivers/net/sxe/pf/sxe_rx.c b/drivers/net/sxe/pf/sxe_rx.c
new file mode 100644
index 0000000000..febd9fc634
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_rx.c
@@ -0,0 +1,1567 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_mbuf.h>
+#include <rte_prefetch.h>
+#include <rte_malloc.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#include "sxe_ethdev.h"
+#endif
+
+#include "sxe.h"
+#include "sxe_rx.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe_queue.h"
+#include "sxe_offload.h"
+#include "sxe_dcb.h"
+#include "sxe_queue_common.h"
+#include "sxe_vf.h"
+#include "sxe_errno.h"
+#include "sxe_irq.h"
+#include "sxe_ethdev.h"
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#include "sxe_vec_common.h"
+#endif
+#include "sxe_rx_common.h"
+
+#define SXE_LRO_HDR_SIZE				128
+
+#define SXE_PACKET_TYPE_ETHER				0x00
+#define SXE_PACKET_TYPE_IPV4				0x01
+#define SXE_PACKET_TYPE_IPV4_TCP			0x11
+#define SXE_PACKET_TYPE_IPV4_UDP			0x21
+#define SXE_PACKET_TYPE_IPV4_SCTP			0x41
+#define SXE_PACKET_TYPE_IPV4_EXT			0x03
+#define SXE_PACKET_TYPE_IPV4_EXT_TCP			0x13
+#define SXE_PACKET_TYPE_IPV4_EXT_UDP			0x23
+#define SXE_PACKET_TYPE_IPV4_EXT_SCTP			0x43
+#define SXE_PACKET_TYPE_IPV6				0x04
+#define SXE_PACKET_TYPE_IPV6_TCP			0x14
+#define SXE_PACKET_TYPE_IPV6_UDP			0x24
+#define SXE_PACKET_TYPE_IPV6_SCTP			0x44
+#define SXE_PACKET_TYPE_IPV6_EXT			0x0C
+#define SXE_PACKET_TYPE_IPV6_EXT_TCP			0x1C
+#define SXE_PACKET_TYPE_IPV6_EXT_UDP			0x2C
+#define SXE_PACKET_TYPE_IPV6_EXT_SCTP			0x4C
+#define SXE_PACKET_TYPE_IPV4_IPV6			0x05
+#define SXE_PACKET_TYPE_IPV4_IPV6_TCP			0x15
+#define SXE_PACKET_TYPE_IPV4_IPV6_UDP			0x25
+#define SXE_PACKET_TYPE_IPV4_IPV6_SCTP			0x45
+#define SXE_PACKET_TYPE_IPV4_EXT_IPV6			0x07
+#define SXE_PACKET_TYPE_IPV4_EXT_IPV6_TCP		0x17
+#define SXE_PACKET_TYPE_IPV4_EXT_IPV6_UDP		0x27
+#define SXE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP		0x47
+#define SXE_PACKET_TYPE_IPV4_IPV6_EXT			0x0D
+#define SXE_PACKET_TYPE_IPV4_IPV6_EXT_TCP		0x1D
+#define SXE_PACKET_TYPE_IPV4_IPV6_EXT_UDP		0x2D
+#define SXE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP		0x4D
+#define SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT		0x0F
+#define SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP		0x1F
+#define SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP		0x2F
+#define SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP		0x4F
+
+#define SXE_PACKET_TYPE_NVGRE                   0x00
+#define SXE_PACKET_TYPE_NVGRE_IPV4              0x01
+#define SXE_PACKET_TYPE_NVGRE_IPV4_TCP          0x11
+#define SXE_PACKET_TYPE_NVGRE_IPV4_UDP          0x21
+#define SXE_PACKET_TYPE_NVGRE_IPV4_SCTP         0x41
+#define SXE_PACKET_TYPE_NVGRE_IPV4_EXT          0x03
+#define SXE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP      0x13
+#define SXE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP      0x23
+#define SXE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP     0x43
+#define SXE_PACKET_TYPE_NVGRE_IPV6              0x04
+#define SXE_PACKET_TYPE_NVGRE_IPV6_TCP          0x14
+#define SXE_PACKET_TYPE_NVGRE_IPV6_UDP          0x24
+#define SXE_PACKET_TYPE_NVGRE_IPV6_SCTP         0x44
+#define SXE_PACKET_TYPE_NVGRE_IPV6_EXT          0x0C
+#define SXE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP      0x1C
+#define SXE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP      0x2C
+#define SXE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP     0x4C
+#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6         0x05
+#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP     0x15
+#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP     0x25
+#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT     0x0D
+#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0x1D
+#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0x2D
+
+#define SXE_PACKET_TYPE_VXLAN                   0x80
+#define SXE_PACKET_TYPE_VXLAN_IPV4              0x81
+#define SXE_PACKET_TYPE_VXLAN_IPV4_TCP          0x91
+#define SXE_PACKET_TYPE_VXLAN_IPV4_UDP          0xA1
+#define SXE_PACKET_TYPE_VXLAN_IPV4_SCTP         0xC1
+#define SXE_PACKET_TYPE_VXLAN_IPV4_EXT          0x83
+#define SXE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP      0x93
+#define SXE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP      0xA3
+#define SXE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP     0xC3
+#define SXE_PACKET_TYPE_VXLAN_IPV6              0x84
+#define SXE_PACKET_TYPE_VXLAN_IPV6_TCP          0x94
+#define SXE_PACKET_TYPE_VXLAN_IPV6_UDP          0xA4
+#define SXE_PACKET_TYPE_VXLAN_IPV6_SCTP         0xC4
+#define SXE_PACKET_TYPE_VXLAN_IPV6_EXT          0x8C
+#define SXE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP      0x9C
+#define SXE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP      0xAC
+#define SXE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP     0xCC
+#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6         0x85
+#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP     0x95
+#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP     0xA5
+#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT     0x8D
+#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0x9D
+#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0xAD
+
+/* SXE current supported message types */
+const u32 sxe_ptype_table[SXE_PACKET_TYPE_MAX] __rte_cache_aligned = {
+	[SXE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
+	[SXE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4,
+	[SXE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+	[SXE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+	[SXE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
+	[SXE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT,
+	[SXE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+	[SXE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+	[SXE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
+	[SXE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV6,
+	[SXE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+	[SXE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+	[SXE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
+	[SXE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV6_EXT,
+	[SXE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+	[SXE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+	[SXE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
+	[SXE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6,
+	[SXE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+	RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6,
+	[SXE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6_EXT,
+	[SXE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6_EXT,
+	[SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
+		RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+};
+
+const u32 sxe_ptype_table_tn[SXE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
+	[SXE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER,
+	[SXE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
+	[SXE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+		RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+		RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+		RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
+		RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+		RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+		RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+		RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+		RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+		RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+		RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
+		RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+		RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+		RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+		RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+		RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+		RTE_PTYPE_INNER_L4_UDP,
+
+	[SXE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
+	[SXE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4_EXT,
+	[SXE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV6,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV6_EXT,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
+		RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+		RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
+		RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+		RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
+};
+
+void sxe_rx_mbuf_common_header_fill(
+					sxe_rx_queue_s *rxq,
+					struct rte_mbuf *mbuf,
+					volatile union sxe_rx_data_desc desc,
+					u32 pkt_info, u32 staterr)
+{
+	u64 pkt_flags;
+	u64 vlan_flags = rxq->vlan_flags;
+
+	LOG_DEBUG("port_id=%u, rxq=%u, desc.lower=0x%"SXE_PRIX64", upper=0x%"SXE_PRIX64","
+			"pkt_info=0x%x, staterr=0x%x",
+			rxq->port_id, rxq->queue_id,
+			rte_le_to_cpu_64(desc.read.pkt_addr),
+			rte_le_to_cpu_64(desc.read.hdr_addr),
+			pkt_info, staterr);
+
+	mbuf->port = rxq->port_id;
+
+	mbuf->vlan_tci = rte_le_to_cpu_16(desc.wb.upper.vlan);
+
+	pkt_flags = sxe_rx_desc_status_to_pkt_flags(staterr, vlan_flags);
+	pkt_flags |= sxe_rx_desc_error_to_pkt_flags(staterr);
+	pkt_flags |= sxe_rx_desc_pkt_info_to_pkt_flags((u16)pkt_info);
+
+	if (pkt_flags & (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD)) {
+		rxq->rx_stats.csum_err++;
+		LOG_WARN("pkt_flags:0x%"SXE_PRIX64" rx checksum error",
+				pkt_flags);
+	}
+
+	mbuf->ol_flags = pkt_flags;
+	mbuf->packet_type =
+		sxe_rxd_pkt_info_to_pkt_type(pkt_info,
+						rxq->pkt_type_mask);
+
+	if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH)) {
+		mbuf->hash.rss = rte_le_to_cpu_32(
+				desc.wb.lower.hi_dword.rss);
+	} else if (pkt_flags & RTE_MBUF_F_RX_FDIR) {
+		mbuf->hash.fdir.hash = rte_le_to_cpu_16(
+				desc.wb.lower.hi_dword.csum_ip.csum) &
+				SXE_SAMPLE_HASH_MASK;
+		mbuf->hash.fdir.id = rte_le_to_cpu_16(
+				desc.wb.lower.hi_dword.csum_ip.ip_id);
+	}
+
+	return;
+}
+
+static inline void sxe_rx_resource_prefetch(u16 next_idx,
+				sxe_rx_buffer_s *buf_ring,
+				volatile union sxe_rx_data_desc *desc_ring)
+{
+	/* preftech next mbuf */
+	rte_sxe_prefetch(buf_ring[next_idx].mbuf);
+
+	if ((next_idx & 0x3) == 0) {
+		rte_sxe_prefetch(&desc_ring[next_idx]);
+		rte_sxe_prefetch(&buf_ring[next_idx]);
+	}
+
+	return;
+}
+
+u16 sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,
+		u16 pkts_num)
+{
+	return __sxe_pkts_recv(rx_queue, rx_pkts, pkts_num);
+}
+
+static inline u16 sxe_ret_pkts_to_user(sxe_rx_queue_s *rxq,
+					struct rte_mbuf **rx_pkts,
+					u16 pkts_num)
+{
+	struct rte_mbuf **completed_mbuf = &rxq->completed_ring[rxq->next_ret_pkg];
+	u16 i;
+
+	pkts_num = (u16)RTE_MIN(pkts_num, rxq->completed_pkts_num);
+
+	for (i = 0; i < pkts_num; ++i) {
+		rx_pkts[i] = completed_mbuf[i];
+	}
+
+	/* Update completed packets num and next available position */
+	rxq->completed_pkts_num = (u16)(rxq->completed_pkts_num - pkts_num);
+	rxq->next_ret_pkg = (u16)(rxq->next_ret_pkg + pkts_num);
+
+	return pkts_num;
+}
+
+#define LOOK_AHEAD 8
+#if (LOOK_AHEAD != 8)
+#error "PMD SXE: LOOK_AHEAD must be 8\n"
+#endif
+
+static inline u16 sxe_rx_hw_ring_scan(sxe_rx_queue_s *rxq)
+{
+	volatile union sxe_rx_data_desc *rx_desc;
+	sxe_rx_buffer_s *rx_buf;
+	struct rte_mbuf *cur_mb;
+	u16 num_dd_set;
+	u32 status_arr[LOOK_AHEAD];
+	u32 pkt_info[LOOK_AHEAD];
+	u16 i, j;
+	u32 status;
+	u16 done_num = 0;
+	u16 pkt_len;
+
+	/* Obtain the desc and rx buff to be processed  */
+	rx_desc = &rxq->desc_ring[rxq->processing_idx];
+	rx_buf = &rxq->buffer_ring[rxq->processing_idx];
+
+	status = rx_desc->wb.upper.status_error;
+
+	if (!(status & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD))) {
+		goto l_end;
+	}
+
+	for (i = 0; i < RTE_PMD_SXE_MAX_RX_BURST;
+		i += LOOK_AHEAD, rx_desc += LOOK_AHEAD, rx_buf += LOOK_AHEAD) {
+		for (j = 0; j < LOOK_AHEAD; j++) {
+			status_arr[j] = rte_le_to_cpu_32(
+				rx_desc[j].wb.upper.status_error);
+		}
+
+		rte_smp_rmb();
+
+		for (num_dd_set = 0; num_dd_set < LOOK_AHEAD &&
+			(status_arr[num_dd_set] & SXE_RXDADV_STAT_DD);
+			num_dd_set++) {
+			;
+		}
+
+		for (j = 0; j < num_dd_set; j++) {
+			pkt_info[j] = rte_le_to_cpu_32(
+				rx_desc[j].wb.lower.lo_dword.data);
+		}
+
+		done_num += num_dd_set;
+
+		for (j = 0; j < num_dd_set; ++j) {
+			cur_mb = rx_buf[j].mbuf;
+
+			pkt_len = (u16)(rte_le_to_cpu_16(rx_desc[j].wb.upper.length) -
+							rxq->crc_len);
+			cur_mb->pkt_len = pkt_len;
+			cur_mb->data_len = pkt_len;
+			sxe_rx_mbuf_common_header_fill(rxq, cur_mb, rx_desc[j],
+						pkt_info[j], status_arr[j]);
+		}
+
+		for (j = 0; j < LOOK_AHEAD; ++j) {
+			rxq->completed_ring[i + j] = rx_buf[j].mbuf;
+		}
+
+		if (num_dd_set != LOOK_AHEAD) {
+			break;
+		}
+	}
+
+	for (i = 0; i < done_num; ++i) {
+		rxq->buffer_ring[rxq->processing_idx + i].mbuf = NULL;
+	}
+
+l_end:
+	return done_num;
+}
+
+STATIC inline s32 sxe_rx_bufs_batch_alloc(sxe_rx_queue_s *rxq,
+							bool reset_mbuf)
+{
+	volatile union sxe_rx_data_desc *desc_ring;
+	sxe_rx_buffer_s *buf_ring;
+	struct rte_mbuf *mbuf;
+	u16 alloc_idx;
+	__le64 dma_addr;
+	s32 diag, i;
+	s32 ret = 0;
+
+	alloc_idx = rxq->batch_alloc_trigger - (rxq->batch_alloc_size - 1);
+	buf_ring = &rxq->buffer_ring[alloc_idx];
+
+	LOG_DEBUG("port_id=%u, rxq=%u, alloc_idx=%u, "
+			"batch_alloc_trigger=%u, batch_alloc_size=%u\n",
+			rxq->port_id, rxq->queue_id, alloc_idx,
+			rxq->batch_alloc_trigger, rxq->batch_alloc_size);
+
+	diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)buf_ring,
+				    rxq->batch_alloc_size);
+	if (unlikely(diag != 0)) {
+		LOG_DEBUG("port_id=%u, rxq=%u buffer alloc failed\n",
+				rxq->port_id, rxq->queue_id);
+		ret = -ENOMEM;
+		goto l_end;
+	}
+
+	desc_ring = &rxq->desc_ring[alloc_idx];
+	for (i = 0; i < rxq->batch_alloc_size; ++i) {
+		mbuf = buf_ring[i].mbuf;
+		if (reset_mbuf) {
+			mbuf->port = rxq->port_id;
+		}
+
+		rte_mbuf_refcnt_set(mbuf, 1);
+		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+
+		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+		desc_ring[i].read.hdr_addr = 0;
+		desc_ring[i].read.pkt_addr = dma_addr;
+	}
+
+	rxq->batch_alloc_trigger = rxq->batch_alloc_trigger + rxq->batch_alloc_size;
+	if (rxq->batch_alloc_trigger >= rxq->ring_depth) {
+		rxq->batch_alloc_trigger = rxq->batch_alloc_size - 1;
+	}
+
+l_end:
+	return ret;
+}
+
+static inline u16 sxe_burst_pkts_recv(void *rx_queue,
+					struct rte_mbuf **rx_pkts,
+					u16 pkts_num)
+{
+	sxe_rx_queue_s *rxq = (sxe_rx_queue_s *)rx_queue;
+	u16 done_num;
+
+	if (rxq->completed_pkts_num) {
+		done_num = sxe_ret_pkts_to_user(rxq, rx_pkts, pkts_num);
+		LOG_DEBUG("there are %u mbuf in completed ring "
+				"of queue[%u] return to user, done_num=%u",
+				rxq->completed_pkts_num,
+				rxq->queue_id, done_num);
+		goto l_end;
+	}
+
+	done_num = (u16)sxe_rx_hw_ring_scan(rxq);
+
+	rxq->next_ret_pkg = 0;
+	rxq->completed_pkts_num = done_num;
+	rxq->processing_idx = (u16)(rxq->processing_idx + done_num);
+
+	if (rxq->processing_idx > rxq->batch_alloc_trigger) {
+		u16 alloced_idx = rxq->batch_alloc_trigger;
+
+		if (sxe_rx_bufs_batch_alloc(rxq, true) != 0) {
+			u32 i, j;
+
+			LOG_ERROR("rx mbuf alloc failed port_id=%u "
+					"queue_id=%u", (unsigned) rxq->port_id,
+					(u16)rxq->queue_id);
+
+			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+				rxq->batch_alloc_size;
+
+			rxq->completed_pkts_num = 0;
+			rxq->processing_idx = (u16)(rxq->processing_idx - done_num);
+			for (i = 0, j = rxq->processing_idx; i < done_num; ++i, ++j) {
+				rxq->buffer_ring[j].mbuf = rxq->completed_ring[i];
+			}
+
+			done_num = 0;
+			goto l_end;
+		}
+
+		rte_wmb();
+		SXE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr, alloced_idx);
+	}
+
+	if (rxq->processing_idx >= rxq->ring_depth) {
+		rxq->processing_idx = 0;
+	}
+
+	if (rxq->completed_pkts_num) {
+		done_num = sxe_ret_pkts_to_user(rxq, rx_pkts, pkts_num);
+		LOG_DEBUG("there are %u mbuf in completed ring "
+				"of queue[%u] return to user, done_num=%u",
+				rxq->completed_pkts_num,
+				rxq->queue_id, done_num);
+	}
+
+l_end:
+	return done_num;
+}
+
+u16 sxe_batch_alloc_pkts_recv(void *rx_queue,
+					struct rte_mbuf **rx_pkts,
+					u16 pkts_num)
+{
+	u16 done_num;
+
+	if (unlikely(pkts_num == 0)) {
+		LOG_DEBUG("user need pkts = 0");
+		done_num = 0;
+		goto l_end;
+	}
+
+	if (likely(pkts_num <= RTE_PMD_SXE_MAX_RX_BURST)) {
+		done_num = sxe_burst_pkts_recv(rx_queue, rx_pkts, pkts_num);
+		goto l_end;
+	}
+
+	done_num = 0;
+	while (pkts_num) {
+		u16 ret, n;
+
+		n = (u16)RTE_MIN(pkts_num, RTE_PMD_SXE_MAX_RX_BURST);
+		ret = sxe_burst_pkts_recv(rx_queue, &rx_pkts[done_num], n);
+		done_num = (u16)(done_num + ret);
+		pkts_num = (u16)(pkts_num - ret);
+		if (ret < n) {
+			break;
+		}
+	}
+
+l_end:
+	return done_num;
+}
+
+static inline s32 sxe_lro_new_mbufs_alloc(sxe_rx_queue_s *rxq,
+					struct rte_mbuf **new_mbuf,
+					u16 *hold_num, bool batch_alloc)
+{
+	s32 ret = 0;
+
+	LOG_DEBUG("rxq[%u] %s alloc mem, current num_hold=%u",
+			rxq->queue_id, batch_alloc ? "batch" : "single", *hold_num);
+	if (!batch_alloc) {
+		*new_mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+		if (*new_mbuf == NULL) {
+			LOG_DEBUG("RX mbuf alloc failed "
+				"port_id=%u queue_id=%u",
+				rxq->port_id, rxq->queue_id);
+
+			rte_eth_devices[rxq->port_id].data->
+						rx_mbuf_alloc_failed++;
+			ret = -ENOMEM;
+			goto l_end;
+		}
+
+		(*new_mbuf)->data_off = RTE_PKTMBUF_HEADROOM;
+	} else if (*hold_num > rxq->batch_alloc_size) {
+		u16 next_rdt = rxq->batch_alloc_trigger;
+
+		if (!sxe_rx_bufs_batch_alloc(rxq, false)) {
+			rte_wmb();
+			SXE_PCI_REG_WC_WRITE_RELAXED(
+						rxq->rdt_reg_addr,
+						next_rdt);
+
+			*hold_num -= rxq->batch_alloc_size;
+		} else {
+			LOG_DEBUG("RX bulk alloc failed "
+					"port_id=%u queue_id=%u",
+					rxq->port_id, rxq->queue_id);
+
+			rte_eth_devices[rxq->port_id].data->
+						rx_mbuf_alloc_failed++;
+			ret = -ENOMEM;
+			goto l_end;
+		}
+	}
+
+l_end:
+	return ret;
+}
+
+static inline void sxe_rx_resource_update(sxe_rx_buffer_s *rx_buf,
+				volatile union sxe_rx_data_desc *cur_desc,
+				struct rte_mbuf *new_mbuf, bool batch_alloc)
+{
+	LOG_DEBUG("%s update resource, new_mbuf=%p",
+				batch_alloc ? "batch" : "single", cur_desc);
+
+	if (!batch_alloc) {
+		__le64 dma =
+		  rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mbuf));
+		rx_buf->mbuf = new_mbuf;
+		cur_desc->read.hdr_addr = 0;
+		cur_desc->read.pkt_addr = dma;
+	} else {
+		rx_buf->mbuf = NULL;
+	}
+
+	return;
+}
+
+static inline u16 sxe_rx_next_idx_get(union sxe_rx_data_desc *desc,
+						u16 next_idx)
+{
+	u16 nextp_id;
+	u32 staterr = rte_le_to_cpu_32(desc->wb.upper.status_error);
+
+
+	if (sxe_lro_count(desc)) {
+		nextp_id =
+			(staterr & SXE_RXDADV_NEXTP_MASK) >>
+			SXE_RXDADV_NEXTP_SHIFT;
+	} else {
+		nextp_id = next_idx;
+	}
+	LOG_DEBUG("next idx = %u", nextp_id);
+	return nextp_id;
+}
+
+static inline void sxe_lro_first_seg_update(struct rte_mbuf **first_seg,
+						struct rte_mbuf *cur_mbuf,
+						u16 data_len)
+{
+	if (*first_seg == NULL) {
+		(*first_seg) = cur_mbuf;
+		(*first_seg)->pkt_len = data_len;
+		(*first_seg)->nb_segs = 1;
+	} else {
+		(*first_seg)->pkt_len += data_len;
+		(*first_seg)->nb_segs++;
+	}
+	return;
+}
+
+static inline void sxe_mbuf_fields_process(struct rte_mbuf *first_seg,
+					sxe_rx_queue_s *rxq,
+					union sxe_rx_data_desc desc,
+					struct rte_mbuf *cur_mbuf,
+					u32 staterr)
+{
+	u32 pkt_info;
+
+	pkt_info = rte_le_to_cpu_32(desc.wb.lower.lo_dword.data);
+	sxe_rx_mbuf_common_header_fill(rxq, first_seg, desc,
+					pkt_info, staterr);
+
+	first_seg->pkt_len -= rxq->crc_len;
+	if (unlikely(cur_mbuf->data_len <= rxq->crc_len)) {
+		struct rte_mbuf *lp;
+
+		for (lp = first_seg; lp->next != cur_mbuf; lp = lp->next) {
+			;
+		}
+
+		first_seg->nb_segs--;
+		lp->data_len -= rxq->crc_len - cur_mbuf->data_len;
+		lp->next = NULL;
+		rte_pktmbuf_free_seg(cur_mbuf);
+	} else {
+		cur_mbuf->data_len -= rxq->crc_len;
+	}
+
+	rte_packet_prefetch((u8 *)first_seg->buf_addr + first_seg->data_off);
+	return;
+}
+
+static inline u16 sxe_lro_pkts_recv(void *rx_queue,
+			struct rte_mbuf **rx_pkts, u16 pkts_num,
+			bool batch_alloc)
+{
+	sxe_rx_queue_s *rxq = rx_queue;
+	volatile union sxe_rx_data_desc *desc_ring = rxq->desc_ring;
+	sxe_rx_buffer_s *buf_ring = rxq->buffer_ring;
+	sxe_rx_buffer_s *sc_buf_ring = rxq->sc_buffer_ring;
+	u16 cur_idx = rxq->processing_idx;
+	u16 done_num = 0;
+	u16 hold_num = rxq->hold_num;
+	u16 prev_idx = rxq->processing_idx; 
+	s32 err;
+
+	while (done_num < pkts_num) {
+		bool is_eop;
+		sxe_rx_buffer_s *rx_buf;
+		sxe_rx_buffer_s *sc_rx_buf;
+		sxe_rx_buffer_s *next_sc_rx_buf = NULL;
+		sxe_rx_buffer_s *next_rx_buf = NULL;
+		struct rte_mbuf *first_seg;
+		struct rte_mbuf *cur_mbuf;
+		struct rte_mbuf *new_mbuf = NULL;
+		union sxe_rx_data_desc desc_copy;
+		u16 data_len;
+		u16 next_idx;
+		volatile union sxe_rx_data_desc *cur_desc;
+		u32 staterr;
+
+next_desc:
+		cur_desc = &desc_ring[cur_idx];
+		staterr = rte_le_to_cpu_32(cur_desc->wb.upper.status_error);
+
+		if (!(staterr & SXE_RXDADV_STAT_DD)) {
+			break;
+		}
+
+		__atomic_thread_fence(__ATOMIC_ACQUIRE);
+
+		desc_copy = *cur_desc;
+
+		LOG_DEBUG("port_id=%u queue_id=%u cur_idx=%u "
+				"staterr=0x%x data_len=%u",
+				rxq->port_id, rxq->queue_id, cur_idx, staterr,
+				rte_le_to_cpu_16(desc_copy.wb.upper.length));
+
+		err = sxe_lro_new_mbufs_alloc(rxq, &new_mbuf, &hold_num, batch_alloc);
+		if (err) {
+			LOG_ERROR("mbuf %s alloc failed",
+					batch_alloc ? "batch" : "single");
+			break;
+		}
+
+		hold_num++;
+		rx_buf = &buf_ring[cur_idx];
+		is_eop = !!(staterr & SXE_RXDADV_STAT_EOP);
+
+		next_idx = cur_idx + 1;
+		if (next_idx == rxq->ring_depth) {
+			next_idx = 0;
+		}
+
+		sxe_rx_resource_prefetch(next_idx, buf_ring, desc_ring);
+
+		cur_mbuf = rx_buf->mbuf;
+
+		sxe_rx_resource_update(rx_buf, cur_desc, new_mbuf, batch_alloc);
+
+		data_len = rte_le_to_cpu_16(desc_copy.wb.upper.length);
+		cur_mbuf->data_len = data_len;
+
+		if (!is_eop) {
+			u16 nextp_id = sxe_rx_next_idx_get(&desc_copy, next_idx);
+
+			next_sc_rx_buf = &sc_buf_ring[nextp_id];
+			next_rx_buf = &buf_ring[nextp_id];
+			rte_sxe_prefetch(next_rx_buf);
+		}
+
+		sc_rx_buf = &sc_buf_ring[cur_idx];
+		first_seg = sc_rx_buf->mbuf;
+		sc_rx_buf->mbuf = NULL;
+
+		sxe_lro_first_seg_update(&first_seg, cur_mbuf, data_len);
+
+		prev_idx = cur_idx;
+		cur_idx = next_idx;
+
+		if (!is_eop && next_rx_buf) {
+			cur_mbuf->next = next_rx_buf->mbuf;
+			next_sc_rx_buf->mbuf = first_seg;
+			goto next_desc;
+		}
+
+		sxe_mbuf_fields_process(first_seg, rxq, desc_copy, cur_mbuf, staterr);
+
+		rx_pkts[done_num++] = first_seg;
+	}
+
+	rxq->processing_idx = cur_idx;
+
+	if (!batch_alloc && hold_num > rxq->batch_alloc_size) {
+		LOG_DEBUG("port_id=%u queue_id=%u rx_tail=%u "
+			   "num_hold=%u done_num=%u",
+			   rxq->port_id, rxq->queue_id,
+			   cur_idx, hold_num, done_num);
+
+		rte_wmb();
+		SXE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr, prev_idx);
+		hold_num = 0;
+	}
+
+	rxq->hold_num = hold_num;
+	return done_num;
+}
+
+u16 sxe_batch_alloc_lro_pkts_recv(void *rx_queue,
+					struct rte_mbuf **rx_pkts,
+					u16 pkts_num)
+{
+	return sxe_lro_pkts_recv(rx_queue, rx_pkts, pkts_num, true);
+}
+
+u16 sxe_single_alloc_lro_pkts_recv(void *rx_queue,
+					struct rte_mbuf **rx_pkts,
+					u16 pkts_num)
+{
+	return sxe_lro_pkts_recv(rx_queue, rx_pkts, pkts_num, false);
+}
+
+void __rte_cold sxe_rx_function_set(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
+{
+	__sxe_rx_function_set(dev, rx_batch_alloc_allowed, rx_vec_allowed);
+	return;
+}
+
+#ifdef ETH_DEV_RX_DESC_DONE
+s32 sxe_rx_descriptor_done(void *rx_queue, u16 offset)
+{
+	volatile union sxe_rx_data_desc *desc;
+	sxe_rx_queue_s *rxq = rx_queue;
+	u32 index;
+	s32 is_done = 0;
+
+	LOG_DEBUG("check rx queue[%u], offset desc[%u]\n",
+			rxq->queue_id, offset);
+	if (unlikely(offset >= rxq->ring_depth)) {
+		LOG_DEBUG("offset=%u >= ring depth=%u\n",
+				offset, rxq->ring_depth);
+		goto l_end;
+	}
+
+	index = rxq->processing_idx + offset;
+	if (index >= rxq->ring_depth) {
+		index -= rxq->ring_depth;
+	}
+
+	desc = &rxq->desc_ring[index];
+	is_done = !!(desc->wb.upper.status_error &
+			rte_cpu_to_le_32(SXE_RXDADV_STAT_DD));
+
+l_end:
+	return is_done;
+}
+#endif
+
+s32 sxe_rx_descriptor_status(void *rx_queue, u16 offset)
+{
+	int ret = RTE_ETH_RX_DESC_AVAIL;
+	sxe_rx_queue_s *rxq = rx_queue;
+	volatile u32 *status;
+	u32 hold_num, desc;
+
+	if (unlikely(offset >= rxq->ring_depth)) {
+		LOG_DEBUG("rx queue[%u] get desc status err,"
+			"offset=%u >= ring_depth=%u\n",
+			rxq->queue_id, offset, rxq->ring_depth);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#if defined(RTE_ARCH_X86)
+	if (rxq->is_using_sse)
+		hold_num = rxq->realloc_num;
+	else
+#endif
+#endif
+
+		hold_num = rxq->hold_num;
+	if (offset >= rxq->ring_depth - hold_num) {
+		ret = RTE_ETH_RX_DESC_UNAVAIL;
+		goto l_end;
+	}
+
+	desc = rxq->processing_idx + offset;
+	if (desc >= rxq->ring_depth) {
+		desc -= rxq->ring_depth;
+	}
+
+	status = &rxq->desc_ring[desc].wb.upper.status_error;
+	if (*status & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD)) {
+		ret =  RTE_ETH_RX_DESC_DONE;
+	}
+
+l_end:
+	LOG_DEBUG("rx queue[%u] get desc status=%d\n",rxq->queue_id, ret);
+	return ret;
+}
+
+s32 __rte_cold sxe_rx_queue_setup(struct rte_eth_dev *dev,
+			 u16 queue_idx, u16 desc_num,
+			 unsigned int socket_id,
+			 const struct rte_eth_rxconf *rx_conf,
+			 struct rte_mempool *mp)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw     *hw = &adapter->hw;
+	struct rx_setup rx_setup = { 0 };
+	s32 ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	rx_setup.desc_num = desc_num;
+	rx_setup.queue_idx = queue_idx;
+	rx_setup.socket_id = socket_id;
+	rx_setup.mp = mp;
+	rx_setup.dev = dev;
+	rx_setup.reg_base_addr = hw->reg_base_addr;
+	rx_setup.rx_conf = rx_conf;
+	rx_setup.rx_batch_alloc_allowed = &adapter->rx_batch_alloc_allowed;
+
+	ret = __sxe_rx_queue_setup(&rx_setup, false);
+	if (ret) {
+		LOG_ERROR_BDF("rx queue setup fail.(err:%d)", ret);
+	}
+
+	return ret;
+}
+
+static void sxe_rx_mode_configure(struct sxe_hw *hw)
+{
+	u32 flt_ctrl;
+
+	flt_ctrl = sxe_hw_rx_mode_get(hw);
+	LOG_DEBUG("read flt_ctrl=%u", flt_ctrl);
+	flt_ctrl |= SXE_FCTRL_BAM;
+	flt_ctrl |= SXE_FCTRL_DPF;
+	flt_ctrl |= SXE_FCTRL_PMCF;
+	LOG_DEBUG("write flt_ctrl=0x%x", flt_ctrl);
+	sxe_hw_rx_mode_set(hw, flt_ctrl);
+	return;
+}
+
+static inline void
+	sxe_rx_queue_offload_configure(struct rte_eth_dev *dev)
+{
+	u16 i;
+	sxe_rx_queue_s *rxq;
+	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
+			rxq->crc_len = RTE_ETHER_CRC_LEN;
+		} else {
+			rxq->crc_len = 0;
+		}
+
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	}
+
+	return;
+}
+
+static inline void
+	sxe_rx_offload_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw     *hw = &adapter->hw;
+	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+	bool crc_strp_on;
+	bool ip_csum_offload;
+
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
+		crc_strp_on = false;
+	} else {
+		crc_strp_on = true;
+	}
+	sxe_hw_rx_dma_ctrl_init(hw, crc_strp_on);
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+		adapter->mtu = rx_conf->max_rx_pkt_len - SXE_ETH_OVERHEAD;
+	}
+#else
+	if (dev->data->mtu > RTE_ETHER_MTU) {
+		adapter->mtu = dev->data->mtu;
+	}
+#endif
+
+	rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
+		dev->data->scattered_rx = 1;
+	}
+
+	sxe_hw_rx_udp_frag_checksum_disable(hw);
+
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
+		ip_csum_offload = true;
+	} else {
+		ip_csum_offload = false;
+	}
+
+	sxe_hw_rx_ip_checksum_offload_switch(hw, ip_csum_offload);
+
+	sxe_rx_queue_offload_configure(dev);
+
+	return;
+}
+
+static inline void sxe_rx_queue_attr_configure(
+					struct rte_eth_dev *dev,
+					sxe_rx_queue_s *queue)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw     *hw = &adapter->hw;
+	u32 srrctl_size;
+	u64 desc_dma_addr;
+	u32 desc_mem_len;
+	u8 reg_idx;
+	u16 buf_size;
+	u32 frame_size = SXE_GET_FRAME_SIZE(dev);
+	reg_idx = queue->reg_idx;
+
+	sxe_hw_rx_ring_switch(hw, reg_idx, false);
+
+	desc_mem_len = queue->ring_depth * sizeof(union sxe_rx_data_desc);
+	desc_dma_addr = queue->base_addr;
+	sxe_hw_rx_ring_desc_configure(hw, desc_mem_len,
+						desc_dma_addr, reg_idx);
+
+	buf_size = (u16)(rte_pktmbuf_data_room_size(queue->mb_pool) -
+		RTE_PKTMBUF_HEADROOM);
+
+	sxe_hw_rx_rcv_ctl_configure(hw, reg_idx,
+			SXE_LRO_HDR_SIZE, buf_size);
+
+	if (queue->drop_en) {
+		sxe_hw_rx_drop_switch(hw, reg_idx, true);
+	}
+
+	sxe_hw_rx_desc_thresh_set(hw, reg_idx);
+
+	srrctl_size = ((buf_size >> SXE_SRRCTL_BSIZEPKT_SHIFT) &
+				SXE_SRRCTL_BSIZEPKT_MASK);
+
+	buf_size = (u16) ((srrctl_size & SXE_SRRCTL_BSIZEPKT_MASK) <<
+				SXE_SRRCTL_BSIZEPKT_SHIFT);
+
+	if (frame_size + 2 * SXE_VLAN_TAG_SIZE > buf_size) {
+		dev->data->scattered_rx = 1;
+	}
+
+	sxe_hw_rx_ring_switch(hw, reg_idx, true);
+	return;
+}
+
+static inline void sxe_rx_queue_configure(struct rte_eth_dev *dev)
+{
+	u16 i;
+	sxe_rx_queue_s **queue = (sxe_rx_queue_s **)dev->data->rx_queues;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		sxe_rx_queue_attr_configure(dev, queue[i]);
+	}
+	return;
+}
+
+static u32 sxe_lro_max_desc_get(struct rte_mempool *pool)
+{
+	u8 desc_num;
+	struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
+
+	u16 maxdesc = RTE_IPV4_MAX_PKT_LEN /
+			(mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
+
+	if (maxdesc >= 16) {
+		desc_num = SXE_LROCTL_MAXDESC_16;
+	} else if (maxdesc >= 8) {
+		desc_num = SXE_LROCTL_MAXDESC_8;
+	} else if (maxdesc >= 4) {
+		desc_num = SXE_LROCTL_MAXDESC_4;
+	} else {
+		desc_num = SXE_LROCTL_MAXDESC_1;
+	}
+
+	return desc_num;
+}
+
+static s32 sxe_lro_sanity_check(struct rte_eth_dev *dev, bool *lro_capable)
+{
+	s32 ret = 0;
+	struct rte_eth_dev_info dev_info = { 0 };
+	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+
+
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
+		(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
+		PMD_LOG_CRIT(INIT, "lro can't be enabled when HW CRC "
+				"is disabled");
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	dev->dev_ops->dev_infos_get(dev, &dev_info);
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
+		*lro_capable = true;
+	}
+
+	if (!(*lro_capable) && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
+		PMD_LOG_CRIT(INIT, "lro is requested on HW that doesn't "
+				   "support it");
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+l_end:
+	return ret;
+}
+
+static void sxe_lro_hw_configure(struct sxe_hw *hw, bool lro_capable,
+					struct rte_eth_rxmode *rx_conf)
+{
+	bool is_enable;
+
+	sxe_hw_rx_lro_ack_switch(hw, false);
+
+	sxe_hw_rx_dma_lro_ctrl_set(hw);
+
+	if ((lro_capable) && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
+		is_enable = true;
+	} else {
+		is_enable = false;
+	}
+
+	if (is_enable) {
+		sxe_hw_rx_nfs_filter_disable(hw);
+	}
+
+	sxe_hw_rx_lro_enable(hw, is_enable);
+	return;
+}
+
+static void sxe_lro_irq_configure(struct sxe_hw *hw, u16 reg_idx,
+						u16 irq_idx)
+{
+	u32 irq_interval;
+
+	irq_interval = SXE_EITR_INTERVAL_US(SXE_QUEUE_ITR_INTERVAL_DEFAULT);
+	sxe_hw_ring_irq_interval_set(hw, reg_idx, irq_interval);
+
+	sxe_hw_ring_irq_map(hw, false, reg_idx, irq_idx);
+
+	return;
+}
+
+static void sxe_lro_hw_queue_configure(struct rte_eth_dev *dev,
+						struct sxe_hw *hw)
+{
+	u16 i;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		sxe_rx_queue_s *rxq = dev->data->rx_queues[i];
+		u16 reg_idx = rxq->reg_idx;
+		u32 max_desc_num;
+
+		max_desc_num = sxe_lro_max_desc_get(rxq->mb_pool);
+		sxe_hw_rx_lro_ctl_configure(hw, reg_idx, max_desc_num);
+
+		sxe_lro_irq_configure(hw, reg_idx, i);
+	}
+
+	return;
+}
+
+static s32 sxe_lro_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw     *hw = &adapter->hw;
+	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+	bool lro_capable = false;
+
+	s32 ret;
+
+	ret = sxe_lro_sanity_check(dev, &lro_capable);
+	if (ret) {
+		PMD_LOG_CRIT(INIT, "lro sanity check failed, err=%d", ret);
+		goto l_end;
+	}
+
+	sxe_lro_hw_configure(hw, lro_capable, rx_conf);
+
+	if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
+		PMD_LOG_DEBUG(INIT, "user app do not turn lro on");
+		goto l_end;
+	}
+
+	sxe_lro_hw_queue_configure(dev, hw);
+
+	dev->data->lro = 1;
+
+	PMD_LOG_DEBUG(INIT, "enabling lro mode");
+
+l_end:
+	return ret;
+}
+
+static s32 __rte_cold sxe_rx_start(struct rte_eth_dev *dev)
+{
+	sxe_rx_queue_s *rxq;
+	u16 i;
+	s32 ret = 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		if (!rxq->deferred_start) {
+			ret = sxe_rx_queue_start(dev, i);
+			if (ret < 0) {
+				PMD_LOG_ERR(INIT, "rx queue[%u] start failed",i);
+				goto l_end;
+			}
+		}
+	}
+
+l_end:
+	return ret;
+}
+
+s32 __rte_cold sxe_rx_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw     *hw = &adapter->hw;
+	s32 ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_hw_rx_cap_switch_off(hw);
+
+	sxe_hw_rx_pkt_buf_size_set(hw, 0, SXE_RX_PKT_BUF_SIZE);
+
+	sxe_rx_mode_configure(hw);
+
+	sxe_rx_offload_configure(dev);
+
+	sxe_rx_queue_configure(dev);
+
+	sxe_rx_features_configure(dev);
+
+	ret = sxe_lro_configure(dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "lro config failed, err = %d", ret);
+		goto l_end;
+	}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	sxe_rx_function_set(dev, adapter->rx_batch_alloc_allowed, &adapter->rx_vec_allowed);
+#else
+	sxe_rx_function_set(dev, adapter->rx_batch_alloc_allowed, NULL);
+#endif
+
+	ret = sxe_rx_start(dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "rx start failed, err = %d", ret);
+		goto l_end;
+	}
+
+l_end:
+	return ret;
+}
+
+static void sxe_vmdq_rx_mode_get(u32 rx_mask, u32 *orig_val)
+{
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) {
+		*orig_val |= SXE_VMOLR_AUPE;
+	}
+
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC) {
+		*orig_val |= SXE_VMOLR_ROMPE;
+	}
+
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC) {
+		*orig_val |= SXE_VMOLR_ROPE;
+	}
+
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST) {
+		*orig_val |= SXE_VMOLR_BAM;
+	}
+
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST) {
+		*orig_val |= SXE_VMOLR_MPE;
+	}
+
+	return;
+}
+
+static void sxe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+	struct rte_eth_vmdq_rx_conf *cfg;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw     *hw = &adapter->hw;
+	enum rte_eth_nb_pools pools_num;
+	u32 rx_mode = 0;
+	u16 i;
+
+	PMD_INIT_FUNC_TRACE();
+	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+	pools_num = cfg->nb_queue_pools;
+
+	sxe_rss_disable(dev);
+
+	sxe_hw_vmdq_mq_configure(hw);
+
+	sxe_hw_vmdq_default_pool_configure(hw,
+						cfg->enable_default_pool,
+						cfg->default_pool);
+
+	sxe_vmdq_rx_mode_get(cfg->rx_mode, &rx_mode);
+	sxe_hw_vmdq_vlan_configure(hw, pools_num, rx_mode);
+
+	for (i = 0; i < cfg->nb_pool_maps; i++) {
+		sxe_hw_vmdq_pool_configure(hw, i,
+						cfg->pool_map[i].vlan_id,
+						cfg->pool_map[i].pools);
+	}
+
+	if (cfg->enable_loop_back) {
+		sxe_hw_vmdq_loopback_configure(hw);
+	}
+
+	return;
+}
+
+s32 sxe_rx_features_configure(struct rte_eth_dev *dev)
+{
+	s32 ret = 0;
+
+	if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+		switch (dev->data->dev_conf.rxmode.mq_mode) {
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			sxe_rss_configure(dev);
+			break;
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+			sxe_dcb_vmdq_rx_hw_configure(dev);
+			break;
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+			sxe_vmdq_rx_hw_configure(dev);
+			break;
+		case RTE_ETH_MQ_RX_NONE:
+		default:
+			sxe_rss_disable(dev);
+			break;
+		}
+	} else {
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+		switch (dev->data->dev_conf.rxmode.mq_mode) {
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			sxe_vf_rss_configure(dev);
+			break;
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_DCB:
+			sxe_dcb_vmdq_rx_hw_configure(dev);
+			break;
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
+			ret = -SXE_ERR_CONFIG;
+			PMD_LOG_ERR(DRV,
+				"DCB and RSS with vmdq or sriov not "
+				"support.(err:%d)", ret);
+			break;
+		default:
+			sxe_vf_default_mode_configure(dev);
+			break;
+		}
+#else
+		PMD_LOG_ERR(INIT, "unsupport sriov");
+		ret = -EINVAL;
+#endif
+	}
+
+	LOG_INFO("pool num:%u rx mq_mode:0x%x configure result:%d.",
+		     RTE_ETH_DEV_SRIOV(dev).active,
+		     dev->data->dev_conf.rxmode.mq_mode, ret);
+
+	return ret;
+}
+
+const u32 *sxe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+	return __sxe_dev_supported_ptypes_get(dev);
+}
+
+#ifdef ETH_DEV_OPS_MONITOR
+static s32
+sxe_monitor_callback(const u64 value,
+		const u64 arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
+{
+	const u64 dd_state = rte_cpu_to_le_32(SXE_RXDADV_STAT_DD);
+	return (value & dd_state) == dd_state ? -1 : 0;
+}
+
+s32
+sxe_monitor_addr_get(void *rx_queue, struct rte_power_monitor_cond *pmc)
+{
+	volatile union sxe_rx_data_desc *rxdp;
+	struct sxe_rx_queue *rxq = rx_queue;
+
+	rxdp = &rxq->desc_ring[rxq->processing_idx];
+
+	pmc->addr = &rxdp->wb.upper.status_error;
+	pmc->fn = sxe_monitor_callback;
+	pmc->size = sizeof(u32);
+
+	return 0;
+}
+#endif
diff --git a/drivers/net/sxe/pf/sxe_rx.h b/drivers/net/sxe/pf/sxe_rx.h
new file mode 100644
index 0000000000..7322a54a2c
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_rx.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_DPDK_RX_H__
+#define __SXE_DPDK_RX_H__
+
+#include "sxe_types.h"
+#include "sxe_queue.h"
+#include "sxe_hw.h"
+#include "sxe_compat_version.h"
+#include "sxe_logs.h"
+
+#define SXE_RXDADV_ERR_CKSUM_BIT  30
+#define SXE_RXDADV_ERR_CKSUM_MSK  3
+
+#define SXE_PACKET_TYPE_MAX               0X80
+#define SXE_PACKET_TYPE_TN_MAX            0X100
+#define SXE_PACKET_TYPE_MASK              0X7F
+#define SXE_RXD_STAT_TMST                 0x10000   
+
+#define SXE_DESCS_PER_LOOP 4
+
+#define SXE_PCI_REG_WC_WRITE(reg, value)			\
+	rte_write32_wc((rte_cpu_to_le_32(value)), reg)
+#define SXE_PCI_REG_WC_WRITE_RELAXED(reg, value)		\
+	rte_write32_wc_relaxed((rte_cpu_to_le_32(value)), reg)
+
+#define SXE_RX_RING_SIZE ((SXE_MAX_RING_DESC + RTE_PMD_SXE_MAX_RX_BURST) * \
+			sizeof(sxe_rx_data_desc_u))
+
+extern const u32 sxe_ptype_table[SXE_PACKET_TYPE_MAX];
+extern const u32 sxe_ptype_table_tn[SXE_PACKET_TYPE_TN_MAX];
+
+static inline u64 sxe_rx_desc_status_to_pkt_flags(u32 rx_status,
+							u64 vlan_flags)
+{
+	u64 pkt_flags;
+
+	pkt_flags = (rx_status & SXE_RXD_STAT_VP) ?  vlan_flags : 0;
+
+#ifdef RTE_LIBRTE_IEEE1588
+	if (rx_status & SXE_RXD_STAT_TMST) {
+		pkt_flags = pkt_flags | RTE_MBUF_F_RX_IEEE1588_TMST;
+	}
+#endif
+	return pkt_flags;
+}
+
+static inline u64 sxe_rx_desc_error_to_pkt_flags(u32 rx_status)
+{
+	u64 pkt_flags;
+
+	static u64 error_to_pkt_flags_map[4] = {
+		RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
+		RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+		RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
+		RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD
+	};
+
+	pkt_flags = error_to_pkt_flags_map[(rx_status >>
+		SXE_RXDADV_ERR_CKSUM_BIT) & SXE_RXDADV_ERR_CKSUM_MSK];
+
+	if ((rx_status & SXE_RXD_STAT_OUTERIPCS) &&
+	    (rx_status & SXE_RXDADV_ERR_OUTERIPER)) {
+		pkt_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
+	}
+
+	return pkt_flags;
+}
+
+static inline u64 sxe_rx_desc_pkt_info_to_pkt_flags(u16 pkt_info)
+{
+	u64 flags = 0;
+	static u64 ip_rss_types_map[16] __rte_cache_aligned = {
+		0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
+		0, RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH,
+		RTE_MBUF_F_RX_RSS_HASH, 0, 0, 0,
+		0, 0, 0,  RTE_MBUF_F_RX_FDIR,
+	};
+
+#ifdef RTE_LIBRTE_IEEE1588
+		static u64 ip_pkt_etqf_map[8] = {
+			0, 0, 0, RTE_MBUF_F_RX_IEEE1588_PTP,
+			0, 0, 0, 0,
+		};
+
+		if (likely(pkt_info & SXE_RXDADV_PKTTYPE_ETQF)) {
+			flags = ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
+				ip_rss_types_map[pkt_info & 0XF];
+		} else {
+			flags = ip_rss_types_map[pkt_info & 0XF];
+		}
+#else
+		flags = ip_rss_types_map[pkt_info & 0XF];
+#endif
+	return flags;
+}
+
+static inline u32 sxe_rxd_pkt_info_to_pkt_type(u32 pkt_info,
+							u16 ptype_mask)
+{
+
+	if (unlikely(pkt_info & SXE_RXDADV_PKTTYPE_ETQF)) {
+		return RTE_PTYPE_UNKNOWN;
+	}
+
+	pkt_info = (pkt_info >> SXE_RXDADV_PKTTYPE_ETQF_SHIFT) & ptype_mask;
+
+	pkt_info &= SXE_PACKET_TYPE_MASK;
+
+	return sxe_ptype_table[pkt_info];
+}
+
+static inline u32 sxe_lro_count(sxe_rx_data_desc_u *rx)
+{
+	return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
+		SXE_RXDADV_LROCNT_MASK) >> SXE_RXDADV_LROCNT_SHIFT;
+}
+
+static inline bool __rte_cold
+	sxe_check_is_rx_batch_alloc_support(
+						sxe_rx_queue_s *rxq)
+{
+	bool support = true;
+
+	if (!(rxq->batch_alloc_size >= RTE_PMD_SXE_MAX_RX_BURST)) {
+		PMD_LOG_DEBUG(INIT, "rx burst batch alloc check: "
+			     "rxq->batch_alloc_size=%d, "
+			     "RTE_PMD_SXE_MAX_RX_BURST=%d",
+			     rxq->batch_alloc_size, RTE_PMD_SXE_MAX_RX_BURST);
+		support = false;
+	} else if (!(rxq->batch_alloc_size < rxq->ring_depth)) {
+		PMD_LOG_DEBUG(INIT, "rx burst batch alloc check: "
+			     "rxq->batch_alloc_size=%d, "
+			     "rxq->ring_depth=%d",
+			     rxq->batch_alloc_size, rxq->ring_depth);
+		support = false;
+	} else if (!((rxq->ring_depth % rxq->batch_alloc_size) == 0)) {
+		PMD_LOG_DEBUG(INIT, "rx burst batch alloc preconditions: "
+			     "rxq->nb_rx_desc=%d, "
+			     "rxq->batch_alloc_size=%d",
+			     rxq->ring_depth, rxq->batch_alloc_size);
+		support = false;
+	}
+
+	return support;
+}
+
+s32 sxe_rx_configure(struct rte_eth_dev *dev);
+
+void sxe_rx_function_set(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
+
+#ifdef ETH_DEV_RX_DESC_DONE
+s32 sxe_rx_descriptor_done(void *rx_queue, u16 offset);
+#endif
+
+s32 sxe_rx_descriptor_status(void *rx_queue, u16 offset);
+
+u16 sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,u16 num_pkts);
+
+s32 sxe_rx_queue_setup(struct rte_eth_dev *dev,
+			 u16 queue_idx,u16 num_desc,
+			 unsigned int socket_id,
+			 const struct rte_eth_rxconf *rx_conf,
+			 struct rte_mempool *mp);
+
+s32 sxe_rx_features_configure(struct rte_eth_dev *dev);
+
+const u32 *sxe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+
+#ifdef ETH_DEV_OPS_MONITOR
+s32
+sxe_monitor_addr_get(void *rx_queue, struct rte_power_monitor_cond *pmc);
+#endif
+
+void sxe_rx_mbuf_common_header_fill(
+					sxe_rx_queue_s *rxq,
+					struct rte_mbuf *mbuf,
+					volatile sxe_rx_data_desc_u desc,
+					u32 pkt_info, u32 staterr);
+
+u16 sxe_batch_alloc_lro_pkts_recv(void *rx_queue,
+					struct rte_mbuf **rx_pkts,
+					u16 pkts_num);
+
+u16 sxe_single_alloc_lro_pkts_recv(void *rx_queue,
+					struct rte_mbuf **rx_pkts,
+					u16 pkts_num);
+
+u16 sxe_batch_alloc_pkts_recv(void *rx_queue,
+					struct rte_mbuf **rx_pkts,
+					u16 pkts_num);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_stats.c b/drivers/net/sxe/pf/sxe_stats.c
new file mode 100644
index 0000000000..5d9de2991c
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_stats.c
@@ -0,0 +1,593 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#include "sxe_stats.h"
+#include "sxe.h"
+#include "sxe_logs.h"
+#include "sxe_errno.h"
+#include "sxe_queue.h"
+#include "sxe_compat_platform.h"
+#include <rte_string_fns.h>
+
+#define SXE_STAT_MAP_WIDTH 8
+#define SXE_STAT_MAP_CNT 4
+#define SXE_STAT_MAP_MASK 0x0F
+
+#define SXE_QUEUE_STAT_COUNT   \
+    (sizeof(stats_info->hw_stats.qprc) / sizeof(stats_info->hw_stats.qprc[0]))
+
+static const struct sxe_stats_field sxe_xstats_sw_field[] = {
+	{"rx_l3_l4_xsum_error", offsetof(struct sxe_sw_stats,
+		hw_csum_rx_error)},
+};
+
+static const struct sxe_stats_field sxe_xstats_mac_field[] = {
+	{"rx_crc_errors", offsetof(struct sxe_mac_stats, crcerrs)},
+	{"rx_error_bytes", offsetof(struct sxe_mac_stats, errbc)},
+	{"rx_length_errors", offsetof(struct sxe_mac_stats, rlec)},
+	{"rx_size_64_packets", offsetof(struct sxe_mac_stats, prc64)},
+	{"rx_size_65_to_127_packets", offsetof(struct sxe_mac_stats, prc127)},
+	{"rx_size_128_to_255_packets", offsetof(struct sxe_mac_stats, prc255)},
+	{"rx_size_256_to_511_packets", offsetof(struct sxe_mac_stats, prc511)},
+	{"rx_size_512_to_1023_packets", offsetof(struct sxe_mac_stats,
+		prc1023)},
+	{"rx_size_1024_to_max_packets", offsetof(struct sxe_mac_stats,
+		prc1522)},
+	{"rx_broadcast_packets", offsetof(struct sxe_mac_stats, bprc)},
+	{"rx_multicast_packets", offsetof(struct sxe_mac_stats, mprc)},
+	{"rx_fragment_errors", offsetof(struct sxe_mac_stats, rfc)},
+	{"rx_undersize_errors", offsetof(struct sxe_mac_stats, ruc)},
+	{"rx_oversize_errors", offsetof(struct sxe_mac_stats, roc)},
+	{"rx_jabber_errors", offsetof(struct sxe_mac_stats, rjc)},
+	{"rx_size_packets", offsetof(struct sxe_mac_stats, tpr)},
+	{"rx_size_bytes", offsetof(struct sxe_mac_stats, tor)},
+	{"tx_size_packets", offsetof(struct sxe_mac_stats, tpt)},
+	{"tx_size_64_packets", offsetof(struct sxe_mac_stats, ptc64)},
+	{"tx_size_65_to_127_packets", offsetof(struct sxe_mac_stats, ptc127)},
+	{"tx_size_128_to_255_packets", offsetof(struct sxe_mac_stats, ptc255)},
+	{"tx_size_256_to_511_packets", offsetof(struct sxe_mac_stats, ptc511)},
+	{"tx_size_512_to_1023_packets", offsetof(struct sxe_mac_stats,
+		ptc1023)},
+	{"tx_size_1024_to_max_packets", offsetof(struct sxe_mac_stats,
+		ptc1522)},
+	{"tx_multicast_packets", offsetof(struct sxe_mac_stats, mptc)},
+	{"tx_broadcast_packets", offsetof(struct sxe_mac_stats, bptc)},
+
+	{"flow_navigator_add_filters", offsetof(struct sxe_mac_stats,
+		fnavadd)},
+	{"flow_navigator_remove_filters", offsetof(struct sxe_mac_stats,
+		fnavrmv)},
+	{"flow_navigator_filters_add_errs", offsetof(struct sxe_mac_stats,
+		fnavadderr)},
+	{"flow_navigator_filters_remove_errs", offsetof(struct sxe_mac_stats,
+		fnavrmverr)},
+	{"flow_navigator_matched_filters", offsetof(struct sxe_mac_stats,
+		fnavmatch)},
+	{"flow_navigator_missed_filters", offsetof(struct sxe_mac_stats,
+		fnavmiss)},
+};
+
+static const struct sxe_stats_field sxe_xstats_fc_field[] = {
+	{"dropped", offsetof(struct sxe_mac_stats, mpc)},
+	{"rx_xon_xoff_packets", offsetof(struct sxe_mac_stats, prcpf)},
+	{"tx_xon_xoff_packets", offsetof(struct sxe_mac_stats, pfct)},
+};
+
+#define SXE_XSTAT_SW_CNT  (sizeof(sxe_xstats_sw_field) / \
+		      sizeof(sxe_xstats_sw_field[0]))
+
+#define SXE_XSTAT_MAC_CNT (sizeof(sxe_xstats_mac_field) / \
+		      sizeof(sxe_xstats_mac_field[0]))
+
+#define SXE_XSTAT_FC_CNT (sizeof(sxe_xstats_fc_field) / \
+			   sizeof(sxe_xstats_fc_field[0]))
+
+#define SXE_FC_PRIO_VALUES 8
+
+#define SXE_XSTAT_CNT  (SXE_XSTAT_MAC_CNT + SXE_XSTAT_SW_CNT + \
+			SXE_XSTAT_FC_CNT * SXE_FC_PRIO_VALUES)
+
+#ifdef SXE_TEST
+u32 sxe_xstats_cnt_get(void)
+{
+	return SXE_XSTAT_CNT;
+}
+#endif
+
+s32 sxe_eth_stats_get(struct rte_eth_dev *eth_dev,
+				struct rte_eth_stats *stats)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_stats_info *stats_info = &adapter->stats_info;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 i;
+	u64 rx_packets = 0; 
+	u64 rx_bytes = 0;   
+	s32 ret = 0;
+
+	sxe_hw_stats_get(hw, &stats_info->hw_stats);
+
+	if (stats == NULL) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "input param stats is null.");
+		goto l_out;
+	}
+
+	for (i = 0; i < SXE_QUEUE_STAT_COUNT; i++) {
+		rx_packets += stats_info->hw_stats.qprc[i];
+		rx_bytes += stats_info->hw_stats.qbrc[i];
+	
+		stats->q_ipackets[i] = stats_info->hw_stats.qprc[i];
+		stats->q_opackets[i] = stats_info->hw_stats.qptc[i];
+		stats->q_ibytes[i] = stats_info->hw_stats.qbrc[i];
+		stats->q_obytes[i] = stats_info->hw_stats.qbtc[i];
+		stats->q_errors[i] = stats_info->hw_stats.qprdc[i];
+	}
+
+	stats->ipackets = rx_packets;
+	stats->ibytes = rx_bytes;
+	stats->opackets = stats_info->hw_stats.gptc;
+	stats->obytes = stats_info->hw_stats.gotc;
+
+	stats->imissed  = 0;
+	stats->ierrors  = stats_info->hw_stats.crcerrs +
+			  stats_info->hw_stats.rlec +
+			  stats_info->hw_stats.ruc +
+			  stats_info->hw_stats.roc +
+			  stats_info->hw_stats.rfc;
+
+	stats->oerrors  = 0;
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_hw_xstat_offset_get(u32 id, u32 *offset)
+{
+	s32 ret = 0;
+	u32 size = SXE_XSTAT_MAC_CNT;
+
+	if (id < size) {
+		*offset = sxe_xstats_mac_field[id].offset;
+	} else {
+		ret = -SXE_ERR_PARAM;
+		PMD_LOG_ERR(DRV, "invalid id:%u exceed stats size cnt:%u.",
+			    id, size);
+	}
+
+	return ret;
+}
+
+static s32 sxe_sw_xstat_offset_get(u32 id, u32 *offset)
+{
+	s32 ret = 0;
+	u32 size = SXE_XSTAT_SW_CNT;
+
+	if (id < size) {
+		*offset = sxe_xstats_sw_field[id].offset;
+	} else {
+		ret = -SXE_ERR_PARAM;
+		PMD_LOG_ERR(DRV, "invalid id:%u exceed stats size cnt:%u.",
+			    id, size);
+	}
+
+	return ret;
+}
+
+static s32 sxe_fc_xstat_field_offset_get(u32 id, u8 priority, u32 *offset)
+{
+	s32 ret = 0;
+	u32 size = SXE_XSTAT_FC_CNT;
+
+	if (id < size) {
+		*offset = sxe_xstats_fc_field[id].offset + (sizeof(u64) * priority);
+	} else {
+		ret = -SXE_ERR_PARAM;
+		PMD_LOG_ERR(DRV, "invalid id:%u exceed stats size cnt:%u.",
+			    id, size);
+	}
+
+	return ret;
+}
+
+static void sxe_sw_stats_get(struct rte_eth_dev *eth_dev, 
+				struct sxe_sw_stats *stats)
+{
+	u32 i;
+	u64 hw_csum_rx_error = 0;
+	sxe_rx_queue_s *rxq;
+	
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+		rxq = eth_dev->data->rx_queues[i];
+		hw_csum_rx_error += rxq->rx_stats.csum_err;
+	}
+	stats->hw_csum_rx_error = hw_csum_rx_error;
+
+	return;
+}
+
+s32 sxe_xstats_get(struct rte_eth_dev *eth_dev,
+				struct rte_eth_xstat *xstats,
+				u32 usr_cnt)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_stats_info *stats_info = &adapter->stats_info;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 i;
+	u32 cnt;
+	s32 ret;
+	u32 offset;
+	u8 prio;
+
+	cnt = SXE_XSTAT_CNT;
+	PMD_LOG_INFO(DRV, "xstat size:%u. hw xstat field cnt:%lu "
+		    "fc xstat field cnt:%lu ", cnt,
+		    SXE_XSTAT_MAC_CNT,
+		    SXE_XSTAT_FC_CNT);
+
+	if (usr_cnt < cnt) {
+		ret = cnt;
+		PMD_LOG_ERR(DRV, "user usr_cnt:%u less than stats cnt:%u.",
+			    usr_cnt, cnt);
+		goto l_out;
+	}
+
+	sxe_hw_stats_get(hw, &stats_info->hw_stats);
+	sxe_sw_stats_get(eth_dev, &stats_info->sw_stats);
+
+	if (xstats == NULL) {
+		ret = 0;
+		PMD_LOG_ERR(DRV, "usr_cnt:%u, input param xstats is null.", usr_cnt);
+		goto l_out;
+	}
+
+	cnt = 0;
+	for (i = 0; i < SXE_XSTAT_MAC_CNT; i++) {
+		sxe_hw_xstat_offset_get(i, &offset);
+		xstats[cnt].value = *(u64 *)(((s8 *)(&stats_info->hw_stats)) + offset);
+		xstats[cnt].id = cnt;
+		cnt++;
+	}
+
+	for (i = 0; i < SXE_XSTAT_SW_CNT; i++) {
+		sxe_sw_xstat_offset_get(i, &offset);
+		xstats[cnt].value = *(u64 *)(((s8 *)(&stats_info->sw_stats)) + offset);
+		xstats[cnt].id = cnt;
+		cnt++;
+	}
+
+	for (i = 0; i < SXE_XSTAT_FC_CNT; i++) {
+		for (prio = 0; prio < SXE_FC_PRIO_VALUES; prio++) {
+			sxe_fc_xstat_field_offset_get(i, prio, &offset);
+			xstats[cnt].value = *(u64 *)(((s8 *)(&stats_info->hw_stats))
+					  + offset);
+			xstats[cnt].id = cnt;
+			cnt++;
+		}
+	}
+
+	ret = cnt;
+	PMD_LOG_INFO(DRV, "usr_cnt:%u stats cnt:%u stats done.", usr_cnt, cnt);
+
+l_out:
+	return ret;
+}
+
+s32 sxe_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_stats_info *stats_info = &adapter->stats_info;
+	struct sxe_hw *hw = &adapter->hw;
+	sxe_rx_queue_s *rxq;
+	u32 i;
+
+	sxe_eth_stats_get(eth_dev, NULL);
+	sxe_hw_stats_seq_clean(hw, &stats_info->hw_stats);
+
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+		rxq = eth_dev->data->rx_queues[i];
+		memset(&rxq->rx_stats, 0, sizeof(rxq->rx_stats));
+	}
+
+	memset(&stats_info->hw_stats, 0, sizeof(stats_info->hw_stats));
+	memset(&stats_info->sw_stats, 0, sizeof(stats_info->sw_stats));
+
+	return 0;
+}
+
+s32 sxe_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_stats_info *stats_info = &adapter->stats_info;
+	struct sxe_hw *hw = &adapter->hw;
+	sxe_rx_queue_s *rxq;
+	u32 size = SXE_XSTAT_CNT;
+	u32 i;
+
+	sxe_xstats_get(eth_dev, NULL, size);
+	sxe_hw_stats_seq_clean(hw, &stats_info->hw_stats);
+
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+		rxq = eth_dev->data->rx_queues[i];
+		memset(&rxq->rx_stats, 0, sizeof(rxq->rx_stats));
+	}
+
+	memset(&stats_info->hw_stats, 0, sizeof(stats_info->hw_stats));
+	memset(&stats_info->sw_stats, 0, sizeof(stats_info->sw_stats));
+
+	return 0;
+}
+
+s32 sxe_xstats_names_get(__rte_unused struct rte_eth_dev *dev,
+	struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int usr_cnt)
+{
+	u32 i = 0;
+	u32 cnt = 0;
+	s32 ret;
+	u8 prio;
+
+	if (xstats_names == NULL) {
+		ret = SXE_XSTAT_CNT;
+		PMD_LOG_INFO(DRV, "xstats field size:%u.", ret);
+		goto l_out;
+	}
+
+	if (usr_cnt < SXE_XSTAT_CNT) {
+		ret = -SXE_ERR_PARAM;
+		PMD_LOG_ERR(DRV, "max:%lu usr_cnt:%u invalid.(err:%d)",
+			    SXE_XSTAT_CNT, usr_cnt, ret);
+		goto l_out;
+	}
+
+	for (i = 0; i < SXE_XSTAT_MAC_CNT; i++) {
+		strlcpy(xstats_names[cnt].name,
+			sxe_xstats_mac_field[i].name,
+			sizeof(xstats_names[cnt].name));
+		cnt++;
+	}
+
+	for (i = 0; i < SXE_XSTAT_SW_CNT; i++) {
+		strlcpy(xstats_names[cnt].name,
+			sxe_xstats_sw_field[i].name,
+			sizeof(xstats_names[cnt].name));
+		cnt++;
+	}
+
+	for (i = 0; i < SXE_XSTAT_FC_CNT; i++) {
+		for (prio = 0; prio < SXE_FC_PRIO_VALUES; prio++) {
+			snprintf(xstats_names[cnt].name,
+				sizeof(xstats_names[cnt].name),
+				"priority%u_%s", prio,
+				sxe_xstats_fc_field[i].name);
+			cnt++;
+		}
+	}
+
+	ret = cnt;
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_all_xstats_value_get(struct rte_eth_dev *eth_dev,
+						u64 *values, u32 usr_cnt)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_stats_info *stats_info = &adapter->stats_info;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 size = SXE_XSTAT_CNT;
+	s32 ret;
+	u32 offset;
+	u32 cnt = 0;
+	u32 i;
+	u8 prio;
+
+	if (usr_cnt < size) {
+		PMD_LOG_WARN(DRV, "ids null usr_cnt:%u less than xstats"
+			     " cnt:%u, return xstat cnt.",
+			      usr_cnt, size);
+		ret = size;
+		goto l_out;
+	}
+
+	sxe_hw_stats_get(hw, &stats_info->hw_stats);
+	sxe_sw_stats_get(eth_dev, &stats_info->sw_stats);
+
+	if (values == NULL) {
+		PMD_LOG_WARN(DRV, "ids and values null, "
+			     "read clean stats regs");
+		ret = 0;
+		goto l_out;
+	}
+
+	for (i = 0; i < SXE_XSTAT_MAC_CNT; i++) {
+		sxe_hw_xstat_offset_get(i, &offset);
+		values[cnt] = *(u64 *)(((s8 *)(&stats_info->hw_stats)) + offset);
+		cnt++;
+	}
+
+	for (i = 0; i < SXE_XSTAT_SW_CNT; i++) {
+		sxe_sw_xstat_offset_get(i, &offset);
+		values[cnt] = *(u64 *)(((s8 *)(&stats_info->sw_stats)) + offset);
+		cnt++;
+	}
+
+	for (i = 0; i < SXE_XSTAT_FC_CNT; i++) {
+		for (prio = 0; prio < SXE_FC_PRIO_VALUES; prio++) {
+			sxe_fc_xstat_field_offset_get(i, prio, &offset);
+			values[cnt] = *(u64 *)(((s8 *)(&stats_info->hw_stats))
+					  + offset);
+			cnt++;
+		}
+	}
+
+	ret = cnt;
+
+l_out:
+	return ret;
+}
+
+s32 sxe_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+					const u64 *ids,
+					u64 *values, u32 usr_cnt)
+{
+	s32 ret;
+	u32 size = SXE_XSTAT_CNT;
+	u32 i;
+	u64 value_all[size];
+
+	if (ids == NULL) {
+		ret = sxe_all_xstats_value_get(eth_dev, values, usr_cnt);
+		goto l_out;
+	}
+
+	if (values == NULL) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "invalid param values.");
+		goto l_out;
+	}
+
+	sxe_all_xstats_value_get(eth_dev, value_all, size);
+
+	for (i = 0; i < usr_cnt; i++) {
+		if (ids[i] >= size) {
+			PMD_LOG_ERR(DRV, "index:%u invalid ids:%lu.", i, ids[i]);
+			ret = -EINVAL;
+			goto l_out;
+		}
+		values[i] = value_all[ids[i]];
+	}
+
+	ret = usr_cnt;
+
+l_out:
+	return ret;
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+s32 sxe_xstats_names_get_by_id(
+	struct rte_eth_dev *eth_dev,
+	struct rte_eth_xstat_name *xstats_names,
+	const u64 *ids,
+	u32 usr_cnt)
+#else
+s32 sxe_xstats_names_get_by_id(
+	struct rte_eth_dev *eth_dev,
+	const u64 *ids,
+	struct rte_eth_xstat_name *xstats_names,
+	u32 usr_cnt)
+#endif
+{
+	s32 ret;
+	u32 i;
+	u32 size = SXE_XSTAT_CNT;
+	struct rte_eth_xstat_name xstat_names_all[size];
+
+	if (ids == NULL) {
+		ret = sxe_xstats_names_get(eth_dev, xstats_names, usr_cnt);
+		goto l_out;
+	}
+
+	sxe_xstats_names_get(eth_dev, xstat_names_all, size);
+	for (i = 0; i < usr_cnt; i++) {
+		if (ids[i] >= size) {
+			PMD_LOG_ERR(DRV, "index:%u invalid ids:%lu.", i, ids[i]);
+			ret = -EINVAL;
+			goto l_out;
+		}
+		strcpy(xstats_names[ids[i]].name, xstat_names_all[ids[i]].name);
+	}
+
+	ret = usr_cnt;
+
+l_out:
+	return ret;
+}
+
+s32 sxe_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+				  u16 queue_id,
+				  u8 stat_reg_idx,
+				  u8 is_rx)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_stats_map *stats_map = &(adapter->stats_info.stats_map);
+	u32 qsmr_mask = 0;
+	u32 map_mask = SXE_STAT_MAP_MASK;
+	u8 reg_idx;
+	u8 map_idx;
+	s32 ret = 0;
+
+	reg_idx = queue_id / SXE_STAT_MAP_CNT;
+	if (reg_idx >= SXE_QUEUE_STATS_MAP_REG_NUM) {
+		ret = -EIO;
+		PMD_LOG_ERR(DRV, "invalid queue_id:%u reg_idx exceeded "
+			    "max map cnt:%u.(err:%d)",
+			    queue_id, SXE_QUEUE_STATS_MAP_REG_NUM, ret);
+		goto l_out;
+	}
+
+	map_idx = (u8)(queue_id % SXE_STAT_MAP_CNT);
+	map_mask <<= (SXE_STAT_MAP_WIDTH * map_idx);
+
+	if (!is_rx) {
+		stats_map->txq_stats_map[reg_idx] &= ~map_mask;
+	} else {
+		stats_map->rxq_stats_map[reg_idx] &= ~map_mask;
+	}
+
+	qsmr_mask = (stat_reg_idx & SXE_STAT_MAP_MASK) << (SXE_STAT_MAP_WIDTH * map_idx);
+	if (!is_rx) {
+		stats_map->txq_stats_map[reg_idx] |= qsmr_mask;
+		sxe_hw_txq_stat_map_set(hw, reg_idx, stats_map->txq_stats_map[reg_idx]);
+	} else {
+		stats_map->rxq_stats_map[reg_idx] |= qsmr_mask;
+		sxe_hw_rxq_stat_map_set(hw, reg_idx, stats_map->rxq_stats_map[reg_idx]);
+	}
+
+	PMD_LOG_INFO(DRV, "port %u %s queue_id %d stat map to stat reg[%u] "
+		     "%s[%u] 0x%08x ",
+		     (u16)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+		     queue_id, stat_reg_idx,
+		     is_rx ? "RQSMR" : "TQSM", reg_idx,
+		     is_rx ? stats_map->rxq_stats_map[reg_idx] :
+		     stats_map->txq_stats_map[reg_idx]);
+
+l_out:
+	return ret;
+}
+
+void sxe_queue_stats_map_restore(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_stats_map *stats_map = &(adapter->stats_info.stats_map);
+	u8 reg_idx;
+
+	for (reg_idx = 0; reg_idx < SXE_QUEUE_STATS_MAP_REG_NUM; reg_idx++) {
+		sxe_hw_txq_stat_map_set(hw, reg_idx, stats_map->txq_stats_map[reg_idx]);
+		sxe_hw_rxq_stat_map_set(hw, reg_idx, stats_map->rxq_stats_map[reg_idx]);
+	}
+
+	return;
+}
+
+void sxe_queue_stats_map_reset(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u8 reg_idx;
+
+#ifdef SET_AUTOFILL_QUEUE_XSTATS
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+#endif
+
+	for (reg_idx = 0; reg_idx < SXE_QUEUE_STATS_MAP_REG_NUM; reg_idx++) {
+		sxe_hw_txq_stat_map_set(hw, reg_idx, 0);
+		sxe_hw_rxq_stat_map_set(hw, reg_idx, 0);
+	}
+
+	return;
+}
+
diff --git a/drivers/net/sxe/pf/sxe_stats.h b/drivers/net/sxe/pf/sxe_stats.h
new file mode 100644
index 0000000000..792a160753
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_stats.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_STATS_H__
+#define __SXE_STATS_H__
+
+#include <rte_ethdev.h>
+#include <rte_ethdev_core.h>
+
+#include "sxe_dpdk_version.h"
+#include "sxe_hw.h"
+
+#define SXE_STATS_FIELD_NAME_SIZE  50
+
+struct sxe_sw_stats {
+	u64 hw_csum_rx_error;  
+};
+
+struct sxe_stats_map {
+	u32 txq_stats_map[SXE_QUEUE_STATS_MAP_REG_NUM];
+	u32 rxq_stats_map[SXE_QUEUE_STATS_MAP_REG_NUM];
+};
+
+struct sxe_stats_info {
+	struct sxe_sw_stats  sw_stats;  
+	struct sxe_mac_stats hw_stats;  	
+	struct sxe_stats_map stats_map; 
+};
+
+struct sxe_stats_field {
+	s8  name[SXE_STATS_FIELD_NAME_SIZE];
+	u32 offset;
+};
+
+s32 sxe_eth_stats_get(struct rte_eth_dev *eth_dev,
+				struct rte_eth_stats *stats);
+
+s32 sxe_stats_reset(struct rte_eth_dev *eth_dev);
+
+s32 sxe_xstats_get(struct rte_eth_dev *eth_dev,
+				struct rte_eth_xstat *xstats,
+				u32 cnt);
+
+s32 sxe_xstats_reset(struct rte_eth_dev *eth_dev);
+
+
+s32 sxe_xstats_names_get(__rte_unused struct rte_eth_dev *dev,
+	struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size);
+
+s32 sxe_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+					const ulong *ids,
+					ulong *values, u32 usr_cnt);
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+s32 sxe_xstats_names_get_by_id(
+	struct rte_eth_dev *eth_dev,
+	struct rte_eth_xstat_name *xstats_names,
+	const ulong *ids,
+	u32 usr_cnt);
+#else
+s32 sxe_xstats_names_get_by_id(
+	struct rte_eth_dev *eth_dev,
+	const ulong *ids,
+	struct rte_eth_xstat_name *xstats_names,
+	u32 usr_cnt);
+#endif
+
+s32 sxe_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+				  u16 queue_id,
+				  u8 stat_reg_idx,
+				  u8 is_rx);
+
+void sxe_queue_stats_map_restore(struct rte_eth_dev *eth_dev);
+
+void sxe_queue_stats_map_reset(struct rte_eth_dev *eth_dev);
+
+#endif
+
diff --git a/drivers/net/sxe/pf/sxe_tx.c b/drivers/net/sxe/pf/sxe_tx.c
new file mode 100644
index 0000000000..6b92e6faed
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_tx.c
@@ -0,0 +1,1069 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <rte_dev.h>
+#else
+#include <ethdev_driver.h>
+#include <dev_driver.h>
+#endif
+
+#include <rte_net.h>
+
+#include "sxe.h"
+#include "sxe_tx.h"
+#include "sxe_hw.h"
+#include "sxe_logs.h"
+#include "sxe_queue_common.h"
+#include "sxe_tx_common.h"
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#include "sxe_vec_common.h"
+#include <rte_vect.h>
+#endif
+#include "sxe_compat_version.h"
+
+#define SXE_TX_DESC_NO_WB 1
+
+#ifdef RTE_LIBRTE_IEEE1588
+#define SXE_TX_IEEE1588_TMST RTE_MBUF_F_TX_IEEE1588_TMST
+#else
+#define SXE_TX_IEEE1588_TMST 0
+#endif
+
+#define SXE_TX_OFFLOAD_MASK (			 \
+		RTE_MBUF_F_TX_OUTER_IPV6 |		 \
+		RTE_MBUF_F_TX_OUTER_IPV4 |		 \
+		RTE_MBUF_F_TX_IPV6 |			 \
+		RTE_MBUF_F_TX_IPV4 |			 \
+		RTE_MBUF_F_TX_VLAN |		 \
+		RTE_MBUF_F_TX_IP_CKSUM |		 \
+		RTE_MBUF_F_TX_L4_MASK |		 \
+		RTE_MBUF_F_TX_TCP_SEG |		 \
+		RTE_MBUF_F_TX_MACSEC  |      \
+		RTE_MBUF_F_TX_OUTER_IP_CKSUM |		 \
+		SXE_TX_IEEE1588_TMST)
+
+#define SXE_TX_OFFLOAD_NOTSUP_MASK (RTE_MBUF_F_TX_OFFLOAD_MASK ^ SXE_TX_OFFLOAD_MASK)
+#define RTE_SXE_MAX_TX_FREE_BUF_SZ 64
+#define SXE_TXD_IDX_SHIFT	4 
+#define SXE_TX_MIN_PKT_LEN	14
+
+extern const struct sxe_txq_ops def_txq_ops;
+
+void __rte_cold sxe_tx_function_set(struct rte_eth_dev *dev,
+					sxe_tx_queue_s *txq)
+{
+	/* Offload off and signle simple tx code path < 32 use simple tx code path */
+	if ((txq->offloads == 0) &&
+	    (txq->rs_thresh >= RTE_PMD_SXE_MAX_TX_BURST)){
+		dev->tx_pkt_prepare = NULL;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+		if (txq->rs_thresh <= RTE_SXE_MAX_TX_FREE_BUF_SZ &&
+#ifndef DPDK_19_11_6
+		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
+#endif
+		    (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+		    sxe_txq_vec_setup(txq) == 0)) {
+			dev->tx_pkt_burst   = sxe_pkts_vector_xmit;
+			PMD_LOG_INFO(INIT, "using vector tx code path");
+		} else {
+			dev->tx_pkt_burst   = sxe_pkts_simple_xmit;
+			PMD_LOG_INFO(INIT, "using simple tx code path");
+		}
+#else
+		dev->tx_pkt_burst	= sxe_pkts_simple_xmit;
+		PMD_LOG_INFO(INIT, "using simple tx code path");
+#endif
+
+	} else {
+		dev->tx_pkt_burst   = sxe_pkts_xmit_with_offload;;
+		dev->tx_pkt_prepare = sxe_prep_pkts;
+
+		PMD_LOG_INFO(INIT, "using full-featured tx code path");
+		PMD_LOG_INFO(INIT, " - offloads = 0x%" PRIx64,
+					(long unsigned int)txq->offloads);
+		PMD_LOG_INFO(INIT, " - tx_rs_thresh = %d "
+				   "[RTE_PMD_SXE_MAX_TX_BURST=%d]",
+				txq->rs_thresh,
+				RTE_PMD_SXE_MAX_TX_BURST);
+	}
+
+	return;
+}
+
+int __rte_cold sxe_tx_queue_setup(struct rte_eth_dev *dev,
+				u16 tx_queue_id,
+				u16 ring_depth,
+				u32 socket_id,
+				const struct rte_eth_txconf *tx_conf)
+{
+	s32 ret;
+	struct sxe_hw *hw = (&((struct sxe_adapter *)(dev->data->dev_private))->hw);
+	struct tx_setup tx_setup;
+
+	tx_setup.dev = dev;
+	tx_setup.desc_num = ring_depth;
+	tx_setup.queue_idx = tx_queue_id;
+	tx_setup.socket_id = socket_id;
+	tx_setup.reg_base_addr = hw->reg_base_addr;
+	tx_setup.tx_conf = tx_conf;
+
+	ret = __sxe_tx_queue_setup(&tx_setup, false);
+
+	return ret;
+}
+
+static void __rte_cold sxe_tx_start(struct rte_eth_dev *dev)
+{
+	u32 i;
+	sxe_tx_queue_s *txq;
+	struct sxe_hw *hw = (&((struct sxe_adapter *)(dev->data->dev_private))->hw);
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_hw_tx_enable(hw);
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		sxe_hw_tx_desc_thresh_set(hw, txq->reg_idx,
+				txq->wthresh, txq->hthresh, txq->pthresh);
+		if (!txq->tx_deferred_start) {
+			sxe_tx_queue_start(dev, i);
+		}
+	}
+
+	return;
+}
+
+static void sxe_tx_buf_configure(struct sxe_hw *hw)
+{
+	sxe_hw_tx_pkt_buf_switch(hw, false);
+
+	sxe_hw_tx_pkt_buf_size_configure(hw, 0);
+
+	sxe_hw_tx_pkt_buf_thresh_configure(hw, 0, false);
+
+	sxe_hw_tx_pkt_buf_switch(hw, true);
+
+	sxe_hw_mac_pad_enable(hw);
+
+	return;
+}
+
+void __rte_cold sxe_tx_configure(struct rte_eth_dev *dev)
+{
+	u16 i;
+	u64 queue_dma_addr;
+	u32 ring_size;
+	sxe_tx_queue_s *txq;
+	struct sxe_hw *hw = (&((struct sxe_adapter *)(dev->data->dev_private))->hw);
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_multi_queue_tx_configure(dev);
+
+	sxe_tx_buf_configure(hw);
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		queue_dma_addr = txq->base_addr;
+		ring_size = txq->ring_depth * sizeof(sxe_tx_data_desc_u);
+
+		sxe_hw_tx_ring_desc_configure(hw, ring_size, queue_dma_addr,
+						txq->reg_idx);
+	}
+
+	sxe_tx_start(dev);
+
+	return;
+}
+
+static inline void sxe_single_desc_fill(volatile sxe_tx_data_desc_u *desc,
+				struct rte_mbuf **pkts)
+{
+	u32 pkt_len;
+	u64 buf_dma_addr;
+
+	buf_dma_addr = rte_mbuf_data_iova(*pkts);
+	pkt_len = (*pkts)->data_len;
+
+	desc->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+	desc->read.cmd_type_len =
+			rte_cpu_to_le_32((u32)SXE_TX_DESC_FLAGS | pkt_len);
+	desc->read.olinfo_status =
+			rte_cpu_to_le_32(pkt_len << SXE_TX_DESC_PAYLEN_SHIFT);
+	rte_sxe_prefetch(&(*pkts)->pool);
+
+	return;
+}
+
+#define TX4_PER_LOOP 4
+#define TX4_PER_LOOP_MASK (TX4_PER_LOOP - 1)
+
+static inline void sxe_four_desc_fill(volatile sxe_tx_data_desc_u *desc,
+			struct rte_mbuf **pkts)
+{
+	s32 i;
+	u64 buf_dma_addr;
+	u32 pkt_len;
+
+	for (i = 0; i < TX4_PER_LOOP; ++i, ++desc, ++pkts) {
+		buf_dma_addr = rte_mbuf_data_iova(*pkts);
+		pkt_len = (*pkts)->data_len;
+
+		desc->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+
+		desc->read.cmd_type_len =
+			rte_cpu_to_le_32((u32)SXE_TX_DESC_FLAGS | pkt_len);
+
+		desc->read.olinfo_status =
+			rte_cpu_to_le_32(pkt_len << SXE_TX_DESC_PAYLEN_SHIFT);
+
+		rte_sxe_prefetch(&(*pkts)->pool);
+	}
+
+	return;
+}
+
+static inline void sxe_tx_ring_fill(sxe_tx_queue_s *txq,
+				struct rte_mbuf **pkts, u16 pkts_num)
+{
+	u32 i, j, mainpart, leftover;
+	volatile sxe_tx_data_desc_u *desc =
+					&(txq->desc_ring[txq->next_to_use]);
+	struct sxe_tx_buffer *buffer = &(txq->buffer_ring[txq->next_to_use]);
+
+	mainpart = (pkts_num & ((u32) ~TX4_PER_LOOP_MASK));
+	leftover = (pkts_num & ((u32)  TX4_PER_LOOP_MASK));
+
+	for (i = 0; i < mainpart; i += TX4_PER_LOOP) {
+		for (j = 0; j < TX4_PER_LOOP; ++j) {
+			(buffer + i + j)->mbuf = *(pkts + i + j);
+		}
+		sxe_four_desc_fill(desc + i, pkts + i);
+	}
+
+	if (unlikely(leftover > 0)) {
+		for (i = 0; i < leftover; ++i) {
+			(buffer + mainpart + i)->mbuf = *(pkts + mainpart + i);
+			sxe_single_desc_fill(desc + mainpart + i,
+						pkts + mainpart + i);
+		}
+	}
+
+	return;
+}
+
+s32 sxe_tx_bufs_free(sxe_tx_queue_s *txq)
+{
+	s32 ret = 0;
+	u32 status;
+	s32 i, mbuf_free_num = 0;
+	struct sxe_tx_buffer *buffer;
+	struct rte_mbuf *mbuf, *free_mbuf[RTE_SXE_MAX_TX_FREE_BUF_SZ];
+
+	status = txq->desc_ring[txq->next_dd].wb.status;
+	if (!(status & rte_cpu_to_le_32(SXE_TX_DESC_STAT_DD))) {
+		ret = 0;
+		goto l_end;
+	}
+
+	buffer = &(txq->buffer_ring[txq->next_dd - (txq->rs_thresh - 1)]);
+
+	for (i = 0; i < txq->rs_thresh; ++i, ++buffer) {
+		mbuf = rte_pktmbuf_prefree_seg(buffer->mbuf);
+		buffer->mbuf = NULL;
+
+		if (unlikely(mbuf == NULL)) {
+			continue;
+		}
+
+		if (mbuf_free_num >= RTE_SXE_MAX_TX_FREE_BUF_SZ ||
+		    (mbuf_free_num > 0 && mbuf->pool != free_mbuf[0]->pool)) {
+			rte_mempool_put_bulk(free_mbuf[0]->pool,
+					     (void **)free_mbuf, mbuf_free_num);
+			mbuf_free_num = 0;
+		}
+
+		free_mbuf[mbuf_free_num++] = mbuf;
+	}
+
+	if (mbuf_free_num > 0) {
+		rte_mempool_put_bulk(free_mbuf[0]->pool,
+					(void **)free_mbuf, mbuf_free_num);
+	}
+
+	txq->next_dd       += txq->rs_thresh;
+	txq->desc_free_num += txq->rs_thresh;
+	if (txq->next_dd >= txq->ring_depth) {
+		txq->next_dd = txq->rs_thresh - 1;
+	}
+
+	ret = txq->rs_thresh;
+
+l_end:
+	return ret;
+}
+
+static inline u16 sxe_pkts_xmit(void *tx_queue,
+			struct rte_mbuf **tx_pkts, u16 xmit_pkts_num)
+{
+	u16 n = 0;
+	sxe_tx_queue_s *txq = (sxe_tx_queue_s *)tx_queue;
+	volatile sxe_tx_data_desc_u *desc_ring = txq->desc_ring;
+
+	if (txq->desc_free_num < txq->free_thresh) {
+		sxe_tx_bufs_free(txq);
+	}
+
+	xmit_pkts_num = (u16)RTE_MIN(txq->desc_free_num, xmit_pkts_num);
+	if (unlikely(xmit_pkts_num == 0)) {
+		LOG_DEBUG("simple xmit: not enough free desc, "
+			"free_desc=%u, need_xmit_pkts=%u",
+			txq->desc_free_num, xmit_pkts_num);
+		goto l_end;
+	}
+
+	txq->desc_free_num -= xmit_pkts_num;
+
+	if ((txq->next_to_use + xmit_pkts_num) > txq->ring_depth) {
+		n = txq->ring_depth - txq->next_to_use;
+
+		sxe_tx_ring_fill(txq, tx_pkts, n);
+
+		desc_ring[txq->next_rs].read.cmd_type_len |=
+			rte_cpu_to_le_32(SXE_TX_DESC_RS_MASK);
+		txq->next_rs = (u16)(txq->rs_thresh - 1);
+
+		txq->next_to_use = 0;
+	}
+
+	sxe_tx_ring_fill(txq, tx_pkts + n, (u16)(xmit_pkts_num - n));
+	txq->next_to_use = (u16)(txq->next_to_use + (xmit_pkts_num - n));
+
+	if (txq->next_to_use > txq->next_rs) {
+		desc_ring[txq->next_rs].read.cmd_type_len |=
+			rte_cpu_to_le_32(SXE_TX_DESC_RS_MASK);
+		txq->next_rs = (u16)(txq->next_rs + txq->rs_thresh);
+		if (txq->next_rs >= txq->ring_depth) {
+			txq->next_rs = (u16)(txq->rs_thresh - 1);
+		}
+	}
+
+	if (txq->next_to_use >= txq->ring_depth) {
+		txq->next_to_use = 0;
+	}
+
+	rte_wmb();
+	rte_write32_wc_relaxed((rte_cpu_to_le_32(txq->next_to_use)),
+							txq->tdt_reg_addr);
+
+l_end:
+	return xmit_pkts_num;
+}
+
+u16 sxe_pkts_simple_xmit(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num)
+{
+	sxe_tx_queue_s *queue = tx_queue;
+	u16 ret, xmit_pkts_num, need_xmit_pkts;
+	UNUSED(queue);
+
+	if (likely(pkts_num <= RTE_PMD_SXE_MAX_TX_BURST)) {
+		xmit_pkts_num = sxe_pkts_xmit(tx_queue, tx_pkts, pkts_num);
+		goto l_end;
+	}
+
+	/* When pkts_num > 32, it needs to besent in a loop */
+	xmit_pkts_num = 0;
+	while (pkts_num) {
+		need_xmit_pkts = (u16)RTE_MIN(pkts_num, RTE_PMD_SXE_MAX_TX_BURST);
+
+		/* Signle transmit */
+		ret = sxe_pkts_xmit(tx_queue, &(tx_pkts[xmit_pkts_num]),
+							need_xmit_pkts);
+
+		pkts_num      -= ret;
+		xmit_pkts_num += ret;
+
+		/* Don't have enough desc */
+		if (ret < need_xmit_pkts) {
+			break;
+		}
+	}
+
+	LOG_DEBUG("simple xmit:port_id=%u, queue_id=%u, "
+		"remain_pkts_num=%d, xmit_pkts_num=%d",
+		queue->port_id, queue->port_id,
+		pkts_num, xmit_pkts_num);
+
+l_end:
+	return xmit_pkts_num;
+}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+u16 sxe_pkts_vector_xmit(void *tx_queue, struct rte_mbuf **tx_pkts,
+			   u16 pkts_num)
+{
+	u16 xmit_pkts_num = 0;
+	sxe_tx_queue_s *queue = (sxe_tx_queue_s *)tx_queue;
+
+	while (pkts_num) {
+		u16 ret, need_xmit_pkts;
+
+		need_xmit_pkts = (u16)RTE_MIN(pkts_num, queue->rs_thresh);
+		ret = __sxe_pkts_vector_xmit(tx_queue, &tx_pkts[xmit_pkts_num],
+				need_xmit_pkts);
+
+		xmit_pkts_num += ret;
+		pkts_num -= ret;
+		if (ret < need_xmit_pkts) {
+			break;
+		}
+	}
+
+	return xmit_pkts_num;
+}
+#endif
+
+u16 sxe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num)
+{
+	s32 i, ret;
+	u64 ol_flags;
+	struct rte_mbuf *mbuf;
+	sxe_tx_queue_s *txq = (sxe_tx_queue_s *)tx_queue;
+
+	/* Check if the pkts is legal */
+	for (i = 0; i < pkts_num; i++) {
+		mbuf = tx_pkts[i];
+		ol_flags = mbuf->ol_flags;
+
+		if (mbuf->nb_segs > SXE_TX_MAX_SEG - txq->wthresh) {
+			rte_errno = EINVAL;
+			goto l_end;
+		}
+
+		/* Check offload */
+		if (ol_flags & SXE_TX_OFFLOAD_NOTSUP_MASK) {
+			rte_errno = ENOTSUP;
+			goto l_end;
+		}
+
+		if (mbuf->pkt_len < SXE_TX_MIN_PKT_LEN) {
+			rte_errno = EINVAL;
+			goto l_end;
+		}
+
+#ifdef RTE_ETHDEV_DEBUG_TX
+		ret = rte_validate_tx_offload(mbuf);
+		if (ret != 0) {
+			rte_errno = -ret;
+			goto l_end;
+		}
+#endif
+		ret = rte_net_intel_cksum_prepare(mbuf);
+		if (ret != 0) {
+			rte_errno = -ret;
+			goto l_end;
+		}
+	}
+
+l_end:
+	return i;
+}
+
+static inline bool sxe_cache_ctxt_desc_match(
+				sxe_tx_queue_s *txq,
+				struct rte_mbuf *pkt,
+				u64 flags,
+				union sxe_tx_offload *ol_info)
+{
+	bool ret;
+
+	ol_info->l2_len       = pkt->l2_len;
+	ol_info->l3_len       = pkt->l3_len;
+	ol_info->l4_len       = pkt->l4_len;
+	ol_info->vlan_tci     = pkt->vlan_tci;
+	ol_info->tso_segsz    = pkt->tso_segsz;
+	ol_info->outer_l2_len = pkt->outer_l2_len;
+	ol_info->outer_l3_len = pkt->outer_l3_len;
+
+	if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+		     & ol_info->data[0])) &&
+		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+		     & ol_info->data[1])))) {
+
+		ret = false;
+		goto l_end;
+	}
+
+	txq->ctx_curr ^= 1;
+
+	if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+		     & ol_info->data[0])) &&
+		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+		     & ol_info->data[1])))) {
+
+		ret = false;
+		goto l_end;
+	}
+
+	ret = true;
+
+l_end:
+	return ret;
+}
+
+static inline void sxe_ctxt_desc_fill(sxe_tx_queue_s *txq,
+			volatile struct sxe_tx_context_desc *ctx_txd,
+			u64 ol_flags,
+			union sxe_tx_offload tx_offload,
+			__rte_unused u64 *mdata)
+{
+	u32 type_tucmd_mlhl;
+	u32 mss_l4len_idx = 0;
+	u32 ctx_idx;
+	u32 vlan_macip_lens;
+	union sxe_tx_offload tx_offload_mask;
+	u32 seqnum_seed = 0;
+
+	ctx_idx = txq->ctx_curr;
+	tx_offload_mask.data[0] = 0;
+	tx_offload_mask.data[1] = 0;
+	type_tucmd_mlhl = 0;
+
+
+	mss_l4len_idx |= (ctx_idx << SXE_TXD_IDX_SHIFT);
+
+	if (ol_flags & RTE_MBUF_F_TX_VLAN) {
+		tx_offload_mask.vlan_tci |= ~0;
+	}
+
+	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+		if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
+			type_tucmd_mlhl = SXE_TX_CTXTD_TUCMD_IPV4 |
+				SXE_TX_CTXTD_TUCMD_L4T_TCP |
+				SXE_TX_CTXTD_DTYP_CTXT;
+		} else {
+			type_tucmd_mlhl = SXE_TX_CTXTD_TUCMD_IPV6 |
+				SXE_TX_CTXTD_TUCMD_L4T_TCP |
+				SXE_TX_CTXTD_DTYP_CTXT;
+		}
+		mss_l4len_idx |= tx_offload.tso_segsz << SXE_TX_CTXTD_MSS_SHIFT;
+		mss_l4len_idx |= tx_offload.l4_len << SXE_TX_CTXTD_L4LEN_SHIFT;
+
+		tx_offload_mask.l2_len |= ~0;
+		tx_offload_mask.l3_len |= ~0;
+		tx_offload_mask.l4_len |= ~0;
+		tx_offload_mask.tso_segsz |= ~0;
+
+	} else {
+		if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
+			type_tucmd_mlhl = SXE_TX_CTXTD_TUCMD_IPV4;
+			tx_offload_mask.l2_len |= ~0;
+			tx_offload_mask.l3_len |= ~0;
+		}
+
+		switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+		case RTE_MBUF_F_TX_UDP_CKSUM:
+			type_tucmd_mlhl |= SXE_TX_CTXTD_TUCMD_L4T_UDP |
+				SXE_TX_CTXTD_DTYP_CTXT;
+			mss_l4len_idx |= sizeof(struct rte_udp_hdr)
+				<< SXE_TX_CTXTD_L4LEN_SHIFT;
+			tx_offload_mask.l2_len |= ~0;
+			tx_offload_mask.l3_len |= ~0;
+			break;
+		case RTE_MBUF_F_TX_TCP_CKSUM:
+			type_tucmd_mlhl |= SXE_TX_CTXTD_TUCMD_L4T_TCP |
+				SXE_TX_CTXTD_DTYP_CTXT;
+			mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
+				<< SXE_TX_CTXTD_L4LEN_SHIFT;
+			tx_offload_mask.l2_len |= ~0;
+			tx_offload_mask.l3_len |= ~0;
+			break;
+		case RTE_MBUF_F_TX_SCTP_CKSUM:
+			type_tucmd_mlhl |= SXE_TX_CTXTD_TUCMD_L4T_SCTP |
+				SXE_TX_CTXTD_DTYP_CTXT;
+			mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
+				<< SXE_TX_CTXTD_L4LEN_SHIFT;
+			tx_offload_mask.l2_len |= ~0;
+			tx_offload_mask.l3_len |= ~0;
+			break;
+		default:
+			type_tucmd_mlhl |= SXE_TX_CTXTD_TUCMD_L4T_RSV |
+				SXE_TX_CTXTD_DTYP_CTXT;
+			break;
+		}
+	}
+
+	vlan_macip_lens = tx_offload.l3_len;
+	vlan_macip_lens |= ((u32)tx_offload.vlan_tci << SXE_TX_CTXTD_VLAN_SHIFT);
+
+	if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
+		tx_offload_mask.outer_l2_len |= ~0;
+		tx_offload_mask.outer_l3_len |= ~0;
+		tx_offload_mask.l2_len |= ~0;
+		seqnum_seed |= tx_offload.outer_l3_len
+			       << SXE_TX_CTXTD_OUTER_IPLEN_SHIFT;
+		seqnum_seed |= tx_offload.l2_len
+			       << SXE_TX_CTXTD_TUNNEL_LEN_SHIFT;
+		vlan_macip_lens |= (tx_offload.outer_l2_len <<
+				    	SXE_TX_CTXTD_MACLEN_SHIFT);
+	} else {
+		vlan_macip_lens |= (tx_offload.l2_len <<
+				    	SXE_TX_CTXTD_MACLEN_SHIFT);
+	}
+
+	txq->ctx_cache[ctx_idx].flags = ol_flags;
+	txq->ctx_cache[ctx_idx].tx_offload.data[0]  =
+		tx_offload_mask.data[0] & tx_offload.data[0];
+	txq->ctx_cache[ctx_idx].tx_offload.data[1]  =
+		tx_offload_mask.data[1] & tx_offload.data[1];
+	txq->ctx_cache[ctx_idx].tx_offload_mask    = tx_offload_mask;
+
+	ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
+	ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
+	ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
+	ctx_txd->seqnum_seed     = seqnum_seed;
+
+	return;
+}
+
+static inline u32 sxe_tx_desc_csum_info_setup(u64 ol_flags)
+{
+	u32 desc_csum = 0;
+
+	if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM) {
+		desc_csum |= SXE_TXD_POPTS_TXSM;
+	}
+
+	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
+		desc_csum |= SXE_TXD_POPTS_IXSM;
+	}
+
+	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+		desc_csum |= SXE_TXD_POPTS_TXSM;
+	}
+
+	return desc_csum;
+}
+
+static inline u32 sxe_tx_desc_cmdtype_setup(u64 ol_flags)
+{
+	u32 cmdtype = 0;
+
+	if (ol_flags & RTE_MBUF_F_TX_VLAN) {
+		cmdtype |= SXE_TX_DESC_VLE;
+	}
+
+	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+		cmdtype |= SXE_TXD_DCMD_TSE;
+	}
+
+	if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
+		cmdtype |= (1 << SXE_TX_OUTERIPCS_SHIFT);
+	}
+
+#ifdef SXE_DPDK_MACSEC
+	if (ol_flags & RTE_MBUF_F_TX_MACSEC) {
+		cmdtype |= SXE_TXD_MAC_LINKSEC;
+	}
+#endif
+
+	return cmdtype;
+}
+
+static inline s32 sxe_xmit_cleanup(sxe_tx_queue_s *txq)
+{
+	s32 ret = 0;
+	u32 wb_status;
+	u16 ntc = txq->next_to_clean;
+	u16 ring_depth = txq->ring_depth;
+	u16 desc_to_clean_to, nb_tx_to_clean;
+	struct sxe_tx_buffer *buffer_ring = txq->buffer_ring;
+	volatile sxe_tx_data_desc_u *desc_ring = txq->desc_ring;
+
+	PMD_INIT_FUNC_TRACE();
+
+	desc_to_clean_to = (u16)(ntc + txq->rs_thresh);
+
+	if (desc_to_clean_to >= ring_depth) {
+		desc_to_clean_to = (u16)(desc_to_clean_to - ring_depth);
+	}
+
+	desc_to_clean_to = buffer_ring[desc_to_clean_to].last_id;
+
+	wb_status = desc_ring[desc_to_clean_to].wb.status;
+	if (!(wb_status & rte_cpu_to_le_32(SXE_TX_DESC_STAT_DD))) {
+		LOG_DEBUG("TX descriptor %4u is not done"
+				"(port=%d queue=%d)",
+				desc_to_clean_to,
+				txq->port_id, txq->queue_idx);
+
+		ret = -SXE_TX_DESC_NO_WB;
+		goto l_end;
+	}
+
+	if (ntc > desc_to_clean_to) {
+		nb_tx_to_clean = (u16)((ring_depth - ntc) +
+						desc_to_clean_to);
+	} else {
+		nb_tx_to_clean = (u16)(desc_to_clean_to - ntc);
+	}
+
+	LOG_DEBUG("Cleaning %4u TX descriptors: %4u to %4u "
+			"(port=%d queue=%d)",
+			nb_tx_to_clean, ntc, desc_to_clean_to,
+			txq->port_id, txq->queue_idx);
+
+	desc_ring[desc_to_clean_to].wb.status = 0;
+
+	txq->next_to_clean = desc_to_clean_to;
+
+	txq->desc_free_num = (u16)(txq->desc_free_num + nb_tx_to_clean);
+
+l_end:
+	return ret;
+}
+
+static inline s32 sxe_tx_pkt_desc_clean(
+			sxe_tx_queue_s *txq,
+			u32 need_desc_num)
+{
+	s32 ret = 0;
+
+	LOG_DEBUG("Not enough free TX descriptors "
+			"need_desc_num=%4u nb_free=%4u "
+			"(port=%d queue=%d)",
+			need_desc_num, txq->desc_free_num,
+			txq->port_id, txq->queue_idx);
+
+	ret = sxe_xmit_cleanup(txq);
+	if (ret) {
+		goto l_end;
+	}
+
+	if (unlikely(need_desc_num > txq->rs_thresh)) {
+		LOG_DEBUG(
+			"The number of descriptors needed to "
+			"transmit the packet exceeds the "
+			"RS bit threshold. This will impact "
+			"performance."
+			"need_desc_num=%4u nb_free=%4u "
+			"rs_thresh=%4u. "
+			"(port=%d queue=%d)",
+			need_desc_num, txq->desc_free_num,
+			txq->rs_thresh,
+			txq->port_id, txq->queue_idx);
+
+		/* Clean up enought desc */
+		while (need_desc_num > txq->desc_free_num) {
+			ret = sxe_xmit_cleanup(txq);
+			if (ret) {
+				goto l_end;
+			}
+		}
+	}
+
+l_end:
+	return ret;
+}
+
+u16 __sxe_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num)
+{
+	s32 ret;
+	u64 ol_req;
+	bool new_ctx;
+	u64 buf_dma_addr;
+	struct rte_mbuf *pkt;
+	struct rte_mbuf *m_seg;
+	union sxe_tx_offload ol_info;
+	sxe_tx_queue_s  *txq = tx_queue;
+	u32 pkt_len, cmd_type_len, olinfo_status;
+	u16 need_desc_num, last_desc_idx, xmit_num, ntu, seg_len;
+	volatile sxe_tx_data_desc_u *tail_desc = NULL; 
+	volatile sxe_tx_data_desc_u *desc_ring, *desc;
+	struct sxe_tx_buffer *buffer_ring, *buffer, *next_buffer;
+
+	ol_info.data[SXE_CTXT_DESC_0] = 0;
+	ol_info.data[SXE_CTXT_DESC_1] = 0;
+	ntu         = txq->next_to_use;
+	desc_ring   = txq->desc_ring;
+	buffer_ring = txq->buffer_ring;
+	buffer      = &buffer_ring[ntu];
+
+	if (txq->desc_free_num < txq->free_thresh) {
+		sxe_xmit_cleanup(txq);
+	}
+
+	/* Refresh cache, pre fetch data to cache */
+	rte_sxe_prefetch(&buffer->mbuf->pool);
+
+	for (xmit_num = 0; xmit_num < pkts_num; xmit_num++) {
+		new_ctx = false;
+		pkt = *tx_pkts++;
+		pkt_len = pkt->pkt_len;
+
+		ol_req = pkt->ol_flags & SXE_TX_OFFLOAD_MASK;
+		if (ol_req) {
+			new_ctx = sxe_cache_ctxt_desc_match(txq, pkt, ol_req, &ol_info);
+		}
+
+		need_desc_num = (u16)(pkt->nb_segs + new_ctx);
+
+		if (tail_desc != NULL &&
+		    need_desc_num + txq->desc_used_num >= txq->rs_thresh) {
+			tail_desc->read.cmd_type_len |=
+				rte_cpu_to_le_32(SXE_TX_DESC_RS_MASK);
+		}
+
+		last_desc_idx = (u16) (ntu + need_desc_num - 1);
+
+		if (last_desc_idx >= txq->ring_depth) {
+			last_desc_idx = (u16) (last_desc_idx - txq->ring_depth);
+		}
+
+		LOG_DEBUG("port_id=%u queue_id=%u pktlen=%u"
+			   " next_to_ues=%u last_desc_idx=%u",
+			   (unsigned) txq->port_id,
+			   (unsigned) txq->queue_idx,
+			   (unsigned) pkt_len,
+			   (unsigned) ntu,
+			   (unsigned) last_desc_idx);
+
+		if (need_desc_num > txq->desc_free_num) {
+			ret = sxe_tx_pkt_desc_clean(txq, need_desc_num);
+			if(ret) {
+				if (0 == xmit_num) {
+					goto l_end;
+				}
+
+				goto l_end_of_tx;
+			}
+		}
+
+		cmd_type_len = SXE_TX_DESC_TYPE_DATA | SXE_TX_DESC_IFCS;
+#ifdef RTE_LIBRTE_IEEE1588
+		if (pkt->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) {
+			cmd_type_len |= SXE_TXD_MAC_1588;
+		}
+#endif
+
+		olinfo_status = 0;
+		if (ol_req) {
+
+			if (pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+				pkt_len -= (ol_info.l2_len +
+					ol_info.l3_len + ol_info.l4_len);
+			}
+
+			if (new_ctx) {
+				volatile struct sxe_tx_context_desc *ctx_desc;
+
+				ctx_desc = (volatile struct
+					sxe_tx_context_desc *) &desc_ring[ntu];
+
+				next_buffer = &buffer_ring[buffer->next_id];
+				rte_prefetch0(&next_buffer->mbuf->pool);
+
+				if (buffer->mbuf != NULL) {
+					rte_pktmbuf_free_seg(buffer->mbuf);
+					buffer->mbuf = NULL;
+				}
+
+				sxe_ctxt_desc_fill(txq, ctx_desc, ol_req,
+						ol_info, NULL);
+
+				buffer->last_id = last_desc_idx;
+				ntu = buffer->next_id;
+				buffer = next_buffer;
+			}
+
+			LOG_DEBUG("tx need offload, port_id=%u "
+			"queue_id=%u pktlen=%u, ctxt_id=%u",
+			   (unsigned) txq->port_id,
+			   (unsigned) txq->queue_idx,
+			   (unsigned) pkt_len,
+			   (unsigned) txq->ctx_curr);
+
+			cmd_type_len  |= sxe_tx_desc_cmdtype_setup(pkt->ol_flags);
+			olinfo_status |= sxe_tx_desc_csum_info_setup(pkt->ol_flags);
+			olinfo_status |= txq->ctx_curr << SXE_TXD_IDX_SHIFT;
+		}
+		olinfo_status |= (pkt_len << SXE_TX_DESC_PAYLEN_SHIFT);
+
+		m_seg = pkt;
+		do {
+			desc = &desc_ring[ntu];
+			next_buffer = &buffer_ring[buffer->next_id];
+
+			rte_prefetch0(&next_buffer->mbuf->pool);
+			if (buffer->mbuf != NULL) {
+				rte_pktmbuf_free_seg(buffer->mbuf);
+			}
+
+			buffer->mbuf = m_seg;  
+
+			seg_len = m_seg->data_len;
+
+			buf_dma_addr = rte_mbuf_data_iova(m_seg);
+			desc->read.buffer_addr =
+				rte_cpu_to_le_64(buf_dma_addr);
+			desc->read.cmd_type_len =
+				rte_cpu_to_le_32(cmd_type_len | seg_len);
+			desc->read.olinfo_status =
+				rte_cpu_to_le_32(olinfo_status);
+			buffer->last_id = last_desc_idx;
+			ntu = buffer->next_id;
+			buffer = next_buffer;
+			m_seg = m_seg->next;
+		} while (m_seg != NULL);
+
+		cmd_type_len |= SXE_TX_DESC_EOP_MASK;
+		txq->desc_used_num += need_desc_num;
+		txq->desc_free_num -= need_desc_num;
+
+		if (txq->desc_used_num >= txq->rs_thresh) {
+			LOG_DEBUG("Setting RS bit on TXD id="
+					"%4u (port=%d queue=%d)",
+					last_desc_idx, txq->port_id, txq->queue_idx);
+
+			cmd_type_len |= SXE_TX_DESC_RS_MASK;
+
+			txq->desc_used_num = 0;
+			tail_desc = NULL;
+		} else {
+			tail_desc = desc;
+		}
+
+		desc->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
+	}
+
+l_end_of_tx:
+	if (tail_desc != NULL)
+		tail_desc->read.cmd_type_len |= rte_cpu_to_le_32(SXE_TX_DESC_RS_MASK);
+
+	rte_wmb();
+
+	LOG_DEBUG("port_id=%u queue_idx=%u next_to_use=%u xmit_num=%u",
+		   (unsigned) txq->port_id, (unsigned) txq->queue_idx,
+		   (unsigned) ntu, (unsigned) xmit_num);
+
+	rte_write32_wc_relaxed(ntu, txq->tdt_reg_addr);
+
+	txq->next_to_use = ntu;
+
+l_end:
+	return xmit_num;
+}
+
+u16 sxe_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num)
+{
+	return __sxe_pkts_xmit_with_offload(tx_queue, tx_pkts, pkts_num);
+}
+
+u32 sxe_tx_done_cleanup_full(sxe_tx_queue_s *txq, u32 free_cnt)
+{
+	u32 pkt_cnt;
+	u16 i, ntu, tx_id;
+	u16 nb_tx_free_last;
+	u16 nb_tx_to_clean;
+	struct sxe_tx_buffer *buffer_ring = txq->buffer_ring;
+
+	ntu    = txq->next_to_use;
+	tx_id  = buffer_ring[ntu].next_id;
+
+	if (txq->desc_free_num == 0 && sxe_xmit_cleanup(txq)) {
+		pkt_cnt = 0;
+		goto l_end;
+	}
+
+	nb_tx_to_clean  = txq->desc_free_num;
+	nb_tx_free_last = txq->desc_free_num;
+
+	if (!free_cnt) {
+		free_cnt = txq->ring_depth;
+	}
+
+	for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+		for (i = 0; i < (nb_tx_to_clean && pkt_cnt < free_cnt && \
+			tx_id != ntu); i++) {
+			if (buffer_ring[tx_id].mbuf != NULL) {
+				rte_pktmbuf_free_seg(buffer_ring[tx_id].mbuf);
+				buffer_ring[tx_id].mbuf = NULL;
+
+				pkt_cnt += (buffer_ring[tx_id].last_id == tx_id);
+			}
+
+			tx_id = buffer_ring[tx_id].next_id;
+		}
+
+		if (txq->rs_thresh > txq->ring_depth - txq->desc_free_num || \
+				tx_id == ntu) {
+			break;
+		}
+
+		if (pkt_cnt < free_cnt) {
+			if (sxe_xmit_cleanup(txq)) {
+				break;
+			}
+
+			nb_tx_to_clean = txq->desc_free_num - nb_tx_free_last;
+			nb_tx_free_last = txq->desc_free_num;
+		}
+	}
+
+l_end:
+	return pkt_cnt;
+}
+
+int sxe_tx_done_cleanup_simple(sxe_tx_queue_s *txq, u32 free_cnt)
+{
+	int i, n, cnt;
+
+	if (free_cnt == 0 || free_cnt > txq->ring_depth) {
+		free_cnt = txq->ring_depth;
+	}
+
+	cnt = free_cnt - free_cnt % txq->rs_thresh;
+
+	for (i = 0; i < cnt; i += n) {
+		if (txq->ring_depth - txq->desc_free_num < txq->rs_thresh) {
+			break;
+		}
+
+		n = sxe_tx_bufs_free(txq);
+		if (n == 0) {
+			break;
+		}
+	}
+
+	return i;
+}
+
+int sxe_tx_done_cleanup(void *tx_queue, u32 free_cnt)
+{
+	s32 ret;
+
+	ret = __sxe_tx_done_cleanup(tx_queue, free_cnt);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "tx cleanup fail.(err:%d)", ret);
+	}
+
+	return ret;
+}
+
+int sxe_tx_descriptor_status(void *tx_queue, u16 offset)
+{
+	return __sxe_tx_descriptor_status(tx_queue, offset);
+}
diff --git a/drivers/net/sxe/pf/sxe_tx.h b/drivers/net/sxe/pf/sxe_tx.h
new file mode 100644
index 0000000000..78249c3340
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_tx.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_TX_H__
+#define __SXE_TX_H__
+
+#include <rte_mbuf_core.h>
+
+#include "sxe_queue.h"
+
+#define RTE_PMD_SXE_MAX_TX_BURST 32
+
+void __rte_cold sxe_tx_configure(struct rte_eth_dev *dev);
+
+int __rte_cold sxe_tx_queue_setup(struct rte_eth_dev *dev,
+				u16 tx_queue_id,
+				u16 ring_depth,
+				u32 socket_id,
+				const struct rte_eth_txconf *tx_conf);
+int sxe_tx_done_cleanup(void *tx_queue, u32 free_cnt);
+
+void __rte_cold sxe_tx_function_set(struct rte_eth_dev *dev,
+					sxe_tx_queue_s *txq);
+
+int sxe_tx_done_cleanup_simple(sxe_tx_queue_s *txq, u32 free_cnt);
+
+u32 sxe_tx_done_cleanup_full(sxe_tx_queue_s *txq, u32 free_cnt);
+
+s32 sxe_tx_bufs_free(sxe_tx_queue_s *txq);
+
+#endif 
diff --git a/drivers/net/sxe/pf/sxe_vf.c b/drivers/net/sxe/pf/sxe_vf.c
new file mode 100644
index 0000000000..74a0bbb370
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_vf.c
@@ -0,0 +1,1275 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_memcpy.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_bus_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <rte_bus_pci.h>
+#else
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <bus_pci_driver.h>
+#endif
+
+#include "sxe_logs.h"
+#include "sxe_vf.h"
+#include "sxe_hw.h"
+#include "sxe.h"
+#include "sxe_errno.h"
+#include "sxe_filter.h"
+#include "sxe_offload.h"
+#include "sxe_ethdev.h"
+
+#define SXE_MR_VLAN_MASK  0xFFFFFFFF
+#define SXE_MR_VLAN_MSB_BIT_OFFSET 32
+
+#define SXE_MR_VIRTUAL_POOL_MASK         0xFFFFFFFF
+#define SXE_MR_VIRTUAL_POOL_MSB_BIT_MASK 32
+
+static inline s32 sxe_vf_mac_addr_generate(struct rte_eth_dev *eth_dev, u16 vf_num)
+{
+	u8 vf_mac_addr[RTE_ETHER_ADDR_LEN];
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_vf_info *vf_info = adapter->vt_ctxt.vf_info;
+	u16 idx;
+
+	for (idx = 0; idx < vf_num; idx++) {
+		rte_eth_random_addr(vf_mac_addr);
+		memcpy(vf_info[idx].mac_addr, vf_mac_addr, RTE_ETHER_ADDR_LEN);
+	}
+
+	return 0;
+}
+
+static void sxe_vt_mode_configure(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 gpie;
+	u32 pcie_ext;
+
+	pcie_ext = sxe_hw_pcie_vt_mode_get(hw);
+	pcie_ext &= ~SXE_GCR_EXT_VT_MODE_MASK;
+
+	gpie = sxe_hw_irq_general_reg_get(hw);
+	gpie &= ~SXE_GPIE_VTMODE_MASK;
+	gpie |= SXE_GPIE_MSIX_MODE;
+
+	switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
+	case RTE_ETH_64_POOLS:
+		pcie_ext |= SXE_GCR_EXT_VT_MODE_64;
+		gpie |= SXE_GPIE_VTMODE_64;
+		break;
+	case RTE_ETH_32_POOLS:
+		pcie_ext |= SXE_GCR_EXT_VT_MODE_32;
+		gpie |= SXE_GPIE_VTMODE_32;
+		break;
+	case RTE_ETH_16_POOLS:
+		pcie_ext |= SXE_GCR_EXT_VT_MODE_16;
+		gpie |= SXE_GPIE_VTMODE_16;
+		break;
+	}
+
+	sxe_hw_pcie_vt_mode_set(hw, pcie_ext);
+	sxe_hw_irq_general_reg_set(hw, gpie);
+
+	return;
+}
+
+s32 sxe_vt_init(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_vf_info **vf_info = &adapter->vt_ctxt.vf_info;
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+	struct sxe_mirror_info *mirror_info = &adapter->vt_ctxt.mr_info;
+#endif
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	u16 vf_num;
+	s32 ret = 0;
+	u8 nb_queue;
+
+	PMD_INIT_FUNC_TRACE();
+
+	RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
+	/* get vf num from max_vfs or sriov_numvfs */
+	vf_num = sxe_vf_num_get(eth_dev);
+	if (vf_num == 0) {
+		LOG_WARN_BDF("no vf, no need init vt");
+		goto l_out;
+	}
+
+	*vf_info = rte_zmalloc("vf_info", sizeof(struct sxe_vf_info) * vf_num, 0);
+	if (*vf_info == NULL) {
+		LOG_WARN_BDF("vf_info allocate memory fail.");
+		ret = -ENOMEM;
+		goto l_out;
+	}
+
+	ret = rte_eth_switch_domain_alloc(&(*vf_info)->domain_id);
+	if (ret) {
+		LOG_ERROR_BDF("failed to allocate switch domain for device %d", ret);
+		goto l_free_vf_info;
+	}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+	memset(mirror_info, 0, sizeof(struct sxe_mirror_info));
+#endif
+
+	if (vf_num >= RTE_ETH_32_POOLS) { 
+		nb_queue = 2;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+	} else if (vf_num >= RTE_ETH_16_POOLS) { 
+		nb_queue = 4;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
+	} else { 
+		nb_queue = 8;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
+	}
+
+	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
+	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
+	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (u16)(vf_num * nb_queue);
+
+	sxe_vf_mac_addr_generate(eth_dev, vf_num);
+
+	sxe_hw_mbx_init(hw);
+
+	irq->enable_mask |= SXE_EIMS_MAILBOX;
+
+	sxe_vt_mode_configure(eth_dev);
+
+	LOG_INFO_BDF("vf_num:%d domain id:%u init done.",
+		      vf_num, (*vf_info)->domain_id);
+
+l_out:
+	return ret;
+
+l_free_vf_info:
+	rte_free(*vf_info);
+	*vf_info = NULL;
+	return ret;
+}
+
+static void sxe_pf_pool_enable(struct rte_eth_dev *eth_dev, u16 vf_num)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 enable_mask = ~0;
+	u8 vf_reg_idx = ((vf_num >> 5) > 0) ? 1: 0;
+	u8 vf_bit_index = vf_num & ((1 << 5) - 1);
+
+	sxe_hw_rx_pool_bitmap_set(hw, vf_reg_idx, enable_mask << vf_bit_index);
+	sxe_hw_rx_pool_bitmap_set(hw, (vf_reg_idx ^ 1), (vf_reg_idx - 1));
+
+	sxe_hw_tx_pool_bitmap_set(hw, vf_reg_idx, enable_mask << vf_bit_index);
+	sxe_hw_tx_pool_bitmap_set(hw, (vf_reg_idx ^ 1), (vf_reg_idx - 1));
+
+	return;
+}
+
+static void sxe_vf_vlan_filter_enable(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 enable_mask = ~0;
+	u32 vlan_ctl;
+	u8 i;
+
+	vlan_ctl = sxe_hw_vlan_type_get(hw);
+	vlan_ctl |= SXE_VLNCTRL_VFE;
+	sxe_hw_vlan_type_set(hw, vlan_ctl);
+
+	for (i = 0; i < SXE_VFT_TBL_SIZE; i++) {
+		sxe_hw_vlan_filter_array_write(hw, i, enable_mask);
+	}
+
+	return;
+}
+
+void sxe_vt_configure(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u16 vf_num;
+	u16 pf_pool_idx = RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx;
+
+	vf_num = sxe_vf_num_get(eth_dev);
+	if (vf_num == 0) {
+		LOG_WARN_BDF("no vf, no need configure vt");
+		goto l_out;
+	}
+
+	sxe_hw_vt_ctrl_cfg(hw, pf_pool_idx);
+
+	sxe_pf_pool_enable(eth_dev, vf_num);
+
+	sxe_hw_vt_pool_loopback_switch(hw, true);
+
+	sxe_hw_mac_pool_clear(hw, 0);
+	sxe_hw_mac_pool_clear(hw, SXE_UC_ENTRY_NUM_MAX - 1);
+
+	sxe_hw_uc_addr_pool_enable(hw, 0, pf_pool_idx);
+
+	sxe_vt_mode_configure(eth_dev);
+
+	sxe_vf_vlan_filter_enable(eth_dev);
+
+	sxe_hw_pool_mac_anti_spoof_set(hw, vf_num, 0);
+
+	sxe_rx_fc_threshold_set(hw);
+
+l_out:
+	return;
+}
+
+void sxe_vt_uninit(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_vf_info **vf_info = &adapter->vt_ctxt.vf_info;
+	u16 vf_num;
+	int ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
+	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0;
+	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0;
+	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0;
+
+	vf_num = sxe_vf_num_get(eth_dev);
+	if ((vf_num == 0) || (*vf_info) == NULL) {
+		LOG_INFO_BDF("vf_num:%u vf_info:%p, no need free vf_info.",
+			     vf_num, *vf_info);
+		goto l_out;
+	}
+
+	ret = rte_eth_switch_domain_free((*vf_info)->domain_id);
+	if (ret) {
+		LOG_ERROR_BDF("failed to free switch domain: %d", ret);
+	}
+
+	rte_free(*vf_info);
+	*vf_info = NULL;
+
+l_out:
+	return;
+}
+
+s32 sxe_vf_rss_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	bool is_4q_per_pool;
+	s32 ret = 0;
+
+	sxe_rss_configure(dev);
+
+	switch (RTE_ETH_DEV_SRIOV(dev).active) {
+	case RTE_ETH_64_POOLS:
+		is_4q_per_pool = false;
+		break;
+
+	case RTE_ETH_32_POOLS:
+		is_4q_per_pool = true;
+		break;
+
+	default: 
+		ret = -EINVAL;
+		LOG_ERROR_BDF("invalid pool number:%u in iov mode with rss.(err:%d)",
+			      RTE_ETH_DEV_SRIOV(dev).active, ret);
+		goto l_out;
+	}
+
+	sxe_hw_rx_multi_ring_configure(hw, 0, is_4q_per_pool, true);
+
+	LOG_INFO_BDF("pool num:%u is_4q_per_pool:%u configure done.",
+		    RTE_ETH_DEV_SRIOV(dev).active, is_4q_per_pool);
+
+l_out:
+	return ret;
+}
+
+s32 sxe_vf_default_mode_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret = 0;
+	u8 tcs = 0;
+	bool is_4q_per_pool = false;
+
+	switch (RTE_ETH_DEV_SRIOV(dev).active) {
+	case RTE_ETH_64_POOLS:
+		is_4q_per_pool = false;
+		break;
+
+	case RTE_ETH_32_POOLS:
+		is_4q_per_pool = true;
+		break;
+
+	case RTE_ETH_16_POOLS:
+		tcs = 8;
+		break;
+	default:
+		ret = -SXE_ERR_CONFIG;
+		LOG_ERROR_BDF("invalid pool number:%u (err:%d)",
+			      RTE_ETH_DEV_SRIOV(dev).active, ret);
+		goto l_out;
+	}
+
+	sxe_hw_rx_multi_ring_configure(hw, tcs, is_4q_per_pool, true);
+
+l_out:
+	return ret;
+}
+
+static void sxe_filter_mode_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u16 vf_num = sxe_vf_num_get(dev);
+	u32 filter_ctrl = sxe_hw_rx_mode_get(hw);
+	u32 vm_l2_ctrl = SXE_VMOLR_AUPE | SXE_VMOLR_BAM;
+
+	filter_ctrl &= ~(SXE_FCTRL_SBP | SXE_FCTRL_UPE | SXE_FCTRL_MPE);
+
+	filter_ctrl |= SXE_FCTRL_BAM;
+
+	if (dev->data->promiscuous) {
+		filter_ctrl |= (SXE_FCTRL_UPE | SXE_FCTRL_MPE);
+		vm_l2_ctrl |= (SXE_VMOLR_ROPE | SXE_VMOLR_MPE);
+	} else {
+		if (dev->data->all_multicast) { 
+			filter_ctrl |= SXE_FCTRL_MPE;
+			vm_l2_ctrl |= SXE_VMOLR_MPE;
+		} else {
+			vm_l2_ctrl |= SXE_VMOLR_ROMPE;
+		}
+	}
+
+	vm_l2_ctrl |= sxe_hw_pool_rx_mode_get(hw, vf_num) &
+			~(SXE_VMOLR_MPE | SXE_VMOLR_ROMPE | SXE_VMOLR_ROPE);
+
+	sxe_hw_pool_rx_mode_set(hw, vm_l2_ctrl, vf_num);
+
+	sxe_hw_rx_mode_set(hw, filter_ctrl);
+
+	sxe_vlan_strip_switch_set(dev);
+}
+
+static inline void sxe_vf_flr_handle(struct rte_eth_dev *dev, u16 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vf_info *vf_info = adapter->vt_ctxt.vf_info;
+	u32 vm_l2_ctrl = sxe_hw_pool_rx_mode_get(hw, vf);
+
+	sxe_sw_uc_entry_vf_del(adapter, vf, false);
+
+	vm_l2_ctrl |= (SXE_VMOLR_AUPE | SXE_VMOLR_ROPE | SXE_VMOLR_BAM);
+
+	sxe_hw_pool_rx_mode_set(hw, vm_l2_ctrl, vf);
+
+	sxe_hw_tx_vlan_tag_clear(hw, vf);
+
+	vf_info[vf].mc_hash_used = 0;
+
+	sxe_filter_mode_configure(dev);
+
+	return;
+}
+
+static s32 sxe_vf_dev_mac_addr_set_handler(struct rte_eth_dev *dev, u32 *msgbuf, u32 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_uc_addr_msg mac_msg = *(struct sxe_uc_addr_msg *)msgbuf;
+	struct sxe_vf_info *vf_info = adapter->vt_ctxt.vf_info;
+	u32 rar_idx = sxe_sw_uc_entry_vf_add(adapter, vf, mac_msg.uc_addr, false);
+	s32 ret = -SXE_ERR_PARAM;
+
+	if (rte_is_valid_assigned_ether_addr(
+			(struct rte_ether_addr *)mac_msg.uc_addr)) {
+		rte_memcpy(vf_info[vf].mac_addr, mac_msg.uc_addr, RTE_ETHER_ADDR_LEN);
+		ret = sxe_hw_uc_addr_add(&adapter->hw, rar_idx, mac_msg.uc_addr, vf);
+		if (ret) {
+			LOG_ERROR_BDF("vf:%u mac addr:"MAC_FMT" set fail.(err:%d)",
+				      vf, MAC_ADDR(mac_msg.uc_addr), ret);
+		}
+	}
+
+	return ret;
+}
+
+STATIC s32 sxe_mbx_api_set_handler(struct rte_eth_dev *dev,
+						    u32 *msg, u32 vf_idx)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_mbx_api_msg *api_msg = (struct sxe_mbx_api_msg *)msg;
+	struct sxe_vf_info *vf_info = &(adapter->vt_ctxt.vf_info[vf_idx]);
+	s32 ret = 0;
+
+	switch (api_msg->api_version) {
+	case SXE_MBX_API_10:
+	case SXE_MBX_API_11:
+	case SXE_MBX_API_12:
+	case SXE_MBX_API_13:
+		vf_info->mbx_version = api_msg->api_version;
+		break;
+	default:
+		ret = -SXE_ERR_PARAM;
+		LOG_ERROR_BDF("invalid mailbox api version:%u.\n",
+			  api_msg->api_version);
+		break;
+	}
+
+	LOG_INFO_BDF("mailbox api version:0x%x.(err:%d)",
+	             vf_info->mbx_version,
+	             ret);
+
+	return ret;
+}
+
+static s32 sxe_pf_ring_info_get(struct rte_eth_dev *dev, u32 *msgbuf, u32 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vf_info *vf_info = &(adapter->vt_ctxt.vf_info[vf]);
+	struct sxe_ring_info_msg *ring_msg = (struct sxe_ring_info_msg *)msgbuf;
+	u32 default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+	struct rte_eth_vmdq_dcb_tx_conf *vmdq_dcb_tx_conf;
+	u8 num_tcs;
+	u32 vmvir;
+	u32 vlan_action;
+	u32 vlan_id;
+	u32 user_priority;
+	s32 ret = 0;
+
+	switch (vf_info->mbx_version) {
+	case SXE_MBX_API_11:
+	case SXE_MBX_API_12:
+	case SXE_MBX_API_13:
+		break;
+	default:
+		ret = -SXE_ERR_CONFIG;
+		LOG_ERROR_BDF("mailbod version:0x%x not support get ring"
+			      " info.(err:%d)",
+			      vf_info->mbx_version, ret);
+		goto l_out;
+	}
+
+	ring_msg->max_rx_num = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+	ring_msg->max_tx_num  = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+
+	ring_msg->default_tc = default_q;
+
+	switch (dev->data->dev_conf.txmode.mq_mode) {
+	case RTE_ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_DCB:
+		ret = -SXE_ERR_CONFIG;
+		LOG_ERROR_BDF("vf_idx:%u sriov eanble, not support tx queue mode:0x%x.",
+			vf,
+			dev->data->dev_conf.txmode.mq_mode);
+		goto l_out;
+
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
+		vmdq_dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+		switch (vmdq_dcb_tx_conf->nb_queue_pools) {
+		case RTE_ETH_16_POOLS:
+			num_tcs = RTE_ETH_8_TCS;
+			break;
+		case RTE_ETH_32_POOLS:
+			num_tcs = RTE_ETH_4_TCS;
+			break;
+		default:
+			ret = -SXE_ERR_CONFIG;
+			LOG_ERROR_BDF("vf:%u sriov enable, tx queue mode:0x%x "
+				      "invalid pool num:%u.(err:%d)",
+					vf,
+					dev->data->dev_conf.txmode.mq_mode,
+					vmdq_dcb_tx_conf->nb_queue_pools,
+					ret);
+			goto l_out;
+		}
+		break;
+
+	case RTE_ETH_MQ_TX_VMDQ_ONLY:
+		vmvir = sxe_hw_tx_vlan_insert_get(hw, vf);
+		vlan_action = vmvir & SXE_VMVIR_VLANA_MASK;
+		vlan_id = vmvir & SXE_VMVIR_VLAN_VID_MASK;
+		user_priority = (vmvir & SXE_VMVIR_VLAN_UP_MASK) >> VLAN_PRIO_SHIFT;
+		if ((vlan_action == SXE_VMVIR_VLANA_DEFAULT) &&
+			((vlan_id !=  0) || (user_priority != 0))) {
+			num_tcs = 1;
+		} else {
+			num_tcs = 0;
+		}
+		break;
+
+	default:
+		ret = -SXE_ERR_CONFIG;
+		LOG_ERROR_BDF("vf_idx:%u sriov eanble, invalid tx queue mode:0x%x.",
+			vf,
+			dev->data->dev_conf.txmode.mq_mode);
+		goto l_out;
+	}
+
+	ring_msg->tc_num = num_tcs;
+
+	LOG_INFO_BDF("max_rx_num:%u max_tx_num:%u default queue:%u tc_num:%u.",
+		    ring_msg->max_rx_num, ring_msg->max_tx_num,
+		    ring_msg->default_tc, ring_msg->tc_num);
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_vf_rss_hash_conf_get(struct rte_eth_dev *dev, u32 *msgbuf, u32 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct rte_eth_rss_conf rss_conf;
+	struct sxe_rss_hash_msg *rss_msg = (struct sxe_rss_hash_msg *)msgbuf;
+
+	UNUSED(vf);
+	rss_conf.rss_key = malloc(SXE_RSS_KEY_SIZE);
+	sxe_rss_hash_conf_get(dev, &rss_conf);
+
+	memcpy(rss_msg->hash_key, rss_conf.rss_key, SXE_RSS_KEY_SIZE);
+	rss_msg->rss_hf = rss_conf.rss_hf;
+
+	free(rss_conf.rss_key);
+
+	LOG_INFO_BDF("vf[%u] rss hash conf get, rss_key:%s, rss_hf:%ld\n",
+			vf, rss_msg->hash_key, rss_msg->rss_hf);
+
+	return 0;
+}
+
+static s32 sxe_vf_vlan_id_set_handler(struct rte_eth_dev *dev,
+						u32 *msgbuf, u32 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vf_info *vf_info = adapter->vt_ctxt.vf_info;
+	struct sxe_vlan_msg *vlan_msg = (struct sxe_vlan_msg *)msgbuf;
+	u32 vlan_id = (vlan_msg->vlan_id & SXE_VLVF_VLANID_MASK);
+	s32 ret;
+
+	ret = sxe_hw_vlan_filter_configure(hw, vlan_id, vf, vlan_msg->add, false);
+	if (ret == 0) {
+		if (vlan_msg->add) {
+		vf_info[vf].vlan_cnt++;
+		} else if (vf_info[vf].vlan_cnt) {
+			vf_info[vf].vlan_cnt--;
+		}
+	}
+
+	LOG_INFO_BDF("vf[%u] %s vid[%u] done vlan_cnt:%u ret = %d",
+			vf, vlan_msg->add ? "add" : "delete",
+			vlan_id,
+			vf_info[vf].vlan_cnt, ret);
+
+	return ret;
+}
+
+static s32 sxe_vf_max_frame_set_handler(struct rte_eth_dev *dev,
+						u32 *msgbuf, u32 vf)
+
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vf_info *vf_info = &(adapter->vt_ctxt.vf_info[vf]);
+	struct sxe_max_frame_msg *msg = (struct sxe_max_frame_msg *)msgbuf;
+	u32 vf_max_frame = msg->max_frame + SXE_ETH_OVERHEAD;
+	s32 ret = 0;
+	u32 cur_max_frs;
+	u32 frame_size = SXE_GET_FRAME_SIZE(dev);
+
+	switch (vf_info->mbx_version) {
+	case SXE_MBX_API_11:
+	case SXE_MBX_API_12:
+	case SXE_MBX_API_13:
+		if (frame_size > SXE_ETH_MAX_LEN) {
+			LOG_WARN_BDF("pf jumbo frame enabled.");
+			break;
+		}
+		// fall through
+	default:
+		if ((vf_max_frame > SXE_ETH_MAX_LEN) ||
+		    (frame_size > SXE_ETH_MAX_LEN)) {
+		        ret = -SXE_ERR_PARAM;
+			LOG_ERROR_BDF("mbx version:0x%x pf max pkt len:0x%x vf:%u"
+				      " max_frames:0x%x max_len:0x%x.(err:%d)",
+				      vf_info->mbx_version,
+				      frame_size,
+				      vf, vf_max_frame,
+				      SXE_ETH_MAX_LEN, ret);
+			goto l_out;
+		}
+		break;
+	}
+
+	if ((vf_max_frame < RTE_ETHER_MIN_LEN) ||
+		(vf_max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)) {
+	        ret = -SXE_ERR_PARAM;
+		LOG_ERROR_BDF("mbx version:0x%x vf:%u invalid max_frame:%u (err:%d)",
+			      vf_info->mbx_version,
+			      vf,
+			      vf_max_frame,
+			      ret);
+		goto l_out;
+	}
+
+	cur_max_frs = sxe_hw_mac_max_frame_get(hw);
+	if (vf_max_frame > cur_max_frs) {
+		ret = -SXE_ERR_PARAM;
+		LOG_ERROR_BDF("mbx version:0x%x vf:%u invalid max_frame:%u >= cur_max_frs:%u",
+			      vf_info->mbx_version,
+			      vf,
+			      vf_max_frame,
+			      cur_max_frs);
+		goto l_out;
+	}
+
+l_out:
+	return ret;
+}
+
+static void sxe_vf_mc_promisc_disable(struct rte_eth_dev *dev, u32 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 vm_l2_ctrl = sxe_hw_pool_rx_mode_get(hw, vf);
+
+	vm_l2_ctrl &= ~SXE_VMOLR_MPE;
+
+	sxe_hw_pool_rx_mode_set(hw, vm_l2_ctrl, vf);
+
+	return;
+}
+
+static s32 sxe_vf_mc_addr_sync(struct rte_eth_dev *dev,
+					u32 *msgbuf, u32 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf];
+	struct sxe_mc_sync_msg *mc_msg = (struct sxe_mc_sync_msg *)msgbuf;
+	u8 mc_cnt = min(mc_msg->mc_cnt, SXE_VF_MC_ENTRY_NUM_MAX);
+	u32 mta_idx;
+	u32 mta_shift;
+	u32 vm_l2_filter = sxe_hw_pool_rx_mode_get(hw, vf);
+	int i;
+
+	sxe_vf_mc_promisc_disable(dev, vf);
+
+	vf_info->mc_hash_used = mc_cnt;
+	for (i = 0; i < mc_cnt; i++) {
+		vf_info->mc_hash[i] = mc_msg->mc_addr_extract[i];
+		LOG_INFO_BDF("vf_idx:%u mc_cnt:%u mc_hash[%d]:0x%x\n",
+			     vf, mc_cnt, i, vf_info->mc_hash[i]);
+	}
+
+	if (mc_cnt == 0) {
+		vm_l2_filter &= ~SXE_VMOLR_ROMPE;
+		sxe_hw_pool_rx_mode_set(hw, vm_l2_filter, vf);
+		LOG_WARN_BDF("vf:%u request disable mta filter.", vf);
+	} else {
+		for (i = 0; i < mc_cnt; i++) {
+			mta_idx = (vf_info->mc_hash[i] >> SXE_MC_ADDR_SHIFT) &
+				  SXE_MC_ADDR_REG_MASK;
+			mta_shift = vf_info->mc_hash[i] & SXE_MC_ADDR_BIT_MASK;
+			sxe_hw_mta_hash_table_update(hw, mta_idx, mta_shift);
+
+			LOG_INFO_BDF("vf_idx:%u mc_cnt:%u mc_hash[%d]:0x%x"
+				"reg_idx=%u, bit_idx=%u.\n",
+				vf, mc_cnt, i, vf_info->mc_hash[i],
+				mta_idx, mta_shift);
+		}
+
+		vm_l2_filter |= SXE_VMOLR_ROMPE;
+		sxe_hw_pool_rx_mode_set(hw, vm_l2_filter, vf);
+	}
+
+	return 0;
+}
+
+static s32 sxe_vf_cast_mode_handler(struct rte_eth_dev *dev,
+					u32 *msgbuf, u32 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf];
+	struct sxe_cast_mode_msg *cast_msg = (struct sxe_cast_mode_msg *)msgbuf;
+	u32 enable;
+	u32 disable;
+	u32 vm_l2_filter;
+	s32 ret = 0;
+
+	switch (vf_info->mbx_version) {
+	case SXE_MBX_API_12:
+		if (cast_msg->cast_mode == SXE_CAST_MODE_PROMISC) {
+			ret = -EOPNOTSUPP;
+			LOG_ERROR_BDF("mbx api:12 vf:%u cast_mode:0x%x "
+				     "unsupport.(err:%d)",
+				      vf, cast_msg->cast_mode, ret);
+			goto l_out;
+		}
+		break;
+	case SXE_MBX_API_13:
+		break;
+	default:
+		ret = -SXE_ERR_PARAM;
+		LOG_ERROR_BDF("vf:%u invalid mbx api version:0x%x.\n",
+			     vf, vf_info->mbx_version);
+		goto l_out;
+	}
+
+	if (vf_info->cast_mode == cast_msg->cast_mode) {
+		LOG_INFO_BDF("vf:%d currut mode equal set mode:0x%x, skip set.",
+			     vf, cast_msg->cast_mode);
+		goto l_out;
+	}
+
+	switch (cast_msg->cast_mode) {
+	case SXE_CAST_MODE_NONE:
+		disable = SXE_VMOLR_BAM | SXE_VMOLR_ROMPE | SXE_VMOLR_MPE;
+		enable = 0;
+		break;
+
+	case SXE_CAST_MODE_MULTI:
+		disable = SXE_VMOLR_MPE;
+		enable = SXE_VMOLR_BAM | SXE_VMOLR_ROMPE;
+		break;
+
+	case SXE_CAST_MODE_ALLMULTI:
+		disable = 0;
+		enable = SXE_VMOLR_BAM | SXE_VMOLR_ROMPE |
+			SXE_VMOLR_MPE;
+		break;
+
+	case SXE_CAST_MODE_PROMISC:
+		ret = -EOPNOTSUPP;
+		LOG_ERROR_BDF("vf:%d promisc mode not support.(ret:%d)\n",
+		              vf, ret);
+		goto l_out;
+
+	default:
+		ret = -SXE_ERR_PARAM;
+		LOG_ERROR_BDF("vf:%u invalid cast mode:0x%x.\n",
+			     vf, cast_msg->cast_mode);
+		goto l_out;
+	}
+
+	vm_l2_filter = sxe_hw_pool_rx_mode_get(hw, vf);
+	vm_l2_filter &= ~disable;
+	vm_l2_filter |= enable;
+	sxe_hw_pool_rx_mode_set(hw, vm_l2_filter, vf);
+
+	LOG_INFO_BDF("vf:%d filter reg:0x%x mode:%d.\n",
+		     vf, vm_l2_filter, cast_msg->cast_mode);
+
+	vf_info->cast_mode = cast_msg->cast_mode;
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_vf_uc_addr_sync_handler(struct rte_eth_dev *dev,
+					u32 *msgbuf, u32 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf];
+	struct sxe_uc_sync_msg *uc_msg = (struct sxe_uc_sync_msg *)msgbuf;
+	s32 ret = 0;
+	u8 rar_idx;
+
+	if (uc_msg->index) {
+		if (!rte_is_valid_assigned_ether_addr(
+			(struct rte_ether_addr *)uc_msg->addr)) {
+			ret = -SXE_ERR_PARAM;
+			LOG_ERROR_BDF("vf:%u mac addr:"MAC_FMT" invalid.(err:%d).",
+				      vf, MAC_ADDR(uc_msg->addr), ret);
+			goto l_out;
+		}
+
+		vf_info->uc_mac_cnt++;
+		rar_idx = sxe_sw_uc_entry_vf_add(adapter, vf, (u8 *)uc_msg->addr, true);
+		sxe_hw_uc_addr_add\r(hw, rar_idx, (u8 *)uc_msg->addr, vf);
+	} else {
+		if (vf_info->uc_mac_cnt) {
+			sxe_sw_uc_entry_vf_del(adapter, vf, true);
+			vf_info->uc_mac_cnt = 0;
+		}
+	}
+
+l_out:
+	return ret;
+}
+
+STATIC struct sxe_msg_table msg_table[] = {
+	[SXE_VFREQ_MAC_ADDR_SET] = {SXE_VFREQ_MAC_ADDR_SET, sxe_vf_dev_mac_addr_set_handler},
+	[SXE_VFREQ_MC_ADDR_SYNC] = {SXE_VFREQ_MC_ADDR_SYNC, sxe_vf_mc_addr_sync},
+	[SXE_VFREQ_VLAN_SET] = {SXE_VFREQ_VLAN_SET, sxe_vf_vlan_id_set_handler},
+	[SXE_VFREQ_LPE_SET] = {SXE_VFREQ_LPE_SET, sxe_vf_max_frame_set_handler},
+	[SXE_VFREQ_UC_ADDR_SYNC] = {SXE_VFREQ_UC_ADDR_SYNC, sxe_vf_uc_addr_sync_handler},
+	[SXE_VFREQ_API_NEGOTIATE] = {SXE_VFREQ_API_NEGOTIATE, sxe_mbx_api_set_handler},
+	[SXE_VFREQ_RING_INFO_GET] = {SXE_VFREQ_RING_INFO_GET, sxe_pf_ring_info_get},
+	[SXE_VFREQ_CAST_MODE_SET] = {SXE_VFREQ_CAST_MODE_SET, sxe_vf_cast_mode_handler},
+	[SXE_VFREQ_RSS_CONF_GET] = {SXE_VFREQ_RSS_CONF_GET, sxe_vf_rss_hash_conf_get},
+};
+
+static void sxe_vf_pool_enable(struct rte_eth_dev *dev, u8 vf_idx)
+{
+	u32 enable_pool;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	u8 reg_idx = vf_idx / 32;
+	u8 bit_idx = vf_idx % 32;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf_idx];
+
+	enable_pool = sxe_hw_tx_pool_bitmap_get(hw, reg_idx);
+	enable_pool |= BIT(bit_idx);
+	sxe_hw_tx_pool_bitmap_set(hw, reg_idx,enable_pool);
+
+	sxe_hw_vf_queue_drop_enable(hw, vf_idx,
+				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+
+	enable_pool = sxe_hw_rx_pool_bitmap_get(hw, reg_idx);
+	enable_pool |= BIT(bit_idx);
+	sxe_hw_rx_pool_bitmap_set(hw, reg_idx,enable_pool);
+
+	vf_info->is_ready = true;
+
+	sxe_hw_spoof_count_enable(hw, reg_idx, bit_idx);
+
+	return;
+}
+
+static void sxe_vf_reset_msg_handle(struct rte_eth_dev *dev, u8 vf_idx)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_rst_reply reply = {};
+	u8 *mac_addr = adapter->vt_ctxt.vf_info[vf_idx].mac_addr;
+	u8 *addr_bytes = (u8 *)(((struct rte_ether_addr *)mac_addr)->addr_bytes);
+	u32 rar_idx = sxe_sw_uc_entry_vf_add(adapter, vf_idx, addr_bytes, false);
+
+	LOG_INFO_BDF("receive vf_idx:%d reset msg.\n", vf_idx);
+
+	sxe_vf_pool_enable(dev, vf_idx);
+
+	sxe_vf_flr_handle(dev, vf_idx);
+
+	sxe_hw_uc_addr_add(&adapter->hw, rar_idx, addr_bytes, vf_idx);
+
+	sxe_vf_mc_promisc_disable(dev, vf_idx);
+
+	reply.msg_type = SXE_VFREQ_RESET | SXE_MSGTYPE_ACK;
+	reply.mc_filter_type = SXE_MC_FILTER_TYPE0;
+	rte_memcpy(reply.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
+
+	sxe_hw_send_msg_to_vf(hw, (u32 *)&reply,
+				SXE_MSG_NUM(sizeof(reply)), vf_idx);
+
+	adapter->vt_ctxt.vf_info->is_ready = true;
+
+	LOG_INFO_BDF("vf_idx:%d reset msg:0x%x handle done.send mac addr:"MAC_FMT
+		    " mc type:%d to vf.",
+		    vf_idx, reply.msg_type,
+		    MAC_ADDR(mac_addr), SXE_MC_FILTER_TYPE0);
+
+	return;
+}
+
+STATIC s32 sxe_req_msg_handle(struct rte_eth_dev *dev, u32 *msg,
+					u8 vf_idx)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret = 0;
+	u16 cmd_id = msg[0] & SXE_VFREQ_MASK;
+	struct rte_pmd_sxe_mb_event_param user_param;
+
+	if (cmd_id > SXE_VFREQ_CAST_MODE_SET &&
+		cmd_id <= SXE_VFREQ_IPSEC_DEL) {
+		ret = -SXE_ERR_PARAM;
+		LOG_ERROR_BDF("vf_idx:%u msg:0x%x invalid cmd_id:0x%x.\n",
+			   vf_idx, msg[0], cmd_id);
+		goto l_out;
+	}
+
+	user_param.ret = RTE_PMD_SXE_MB_EVENT_PROCEED;
+	user_param.vf_idx = vf_idx;
+	user_param.msg_type = msg[0] & 0xFFFF;
+	user_param.msg = (void *)msg;
+
+	if (cmd_id == SXE_VFREQ_RESET) {
+		ret = 0;
+		sxe_vf_reset_msg_handle(dev, vf_idx);
+
+		sxe_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
+					      &user_param);
+		goto l_out;
+	}
+
+	sxe_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
+					      &user_param);
+
+	LOG_INFO_BDF("vf_idx:%u cmd_id:0x%x user configure:0x%x.",
+			vf_idx, cmd_id, user_param.ret);
+
+	if (!adapter->vt_ctxt.vf_info[vf_idx].is_ready) {
+		msg[0] |= SXE_MSGTYPE_NACK;
+		ret = sxe_hw_send_msg_to_vf(hw, msg,
+					SXE_MSG_NUM(sizeof(msg[0])), vf_idx);
+		LOG_WARN_BDF("vf_idx:%d not ready now, send nack to vf.ret:%d.\n",
+			  vf_idx, ret);
+		goto l_out;
+	}
+
+	if (msg_table[cmd_id].msg_func) {
+		if ((user_param.ret == RTE_PMD_SXE_MB_EVENT_PROCEED) ||
+		    (cmd_id == SXE_VFREQ_API_NEGOTIATE) ||
+		    (cmd_id == SXE_VFREQ_RING_INFO_GET)) {
+			ret = msg_table[cmd_id].msg_func(dev, msg, vf_idx);
+		}
+		LOG_INFO_BDF("msg:0x%x cmd_id:0x%x handle done.ret:%d\n",
+			 msg[0], cmd_id, ret);
+	} else {
+		ret = -SXE_ERR_PARAM;
+	}
+
+	if (!ret) {
+		msg[0] |= SXE_MSGTYPE_ACK;
+	} else {
+		msg[0] |= SXE_MSGTYPE_NACK;
+		LOG_ERROR_BDF("vf_idx:%u msg_type:0x%x cmdId:0x%x invalid.(err:%d)\n",
+			      vf_idx, msg[0], cmd_id, ret);
+	}
+
+	ret = sxe_hw_send_msg_to_vf(hw, msg, SXE_MBX_MSG_NUM, vf_idx);
+	if (ret) {
+		LOG_ERROR_BDF("vf:%d msg:0x%x reply fail.(err:%d).\n",
+			   vf_idx, msg[0], ret);
+	}
+
+	LOG_INFO_BDF("pf reply vf:%d msg:0x%x done.ret:%d\n", vf_idx, msg[0], ret);
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_vf_req_msg_handle(struct rte_eth_dev *dev, u8 vf_idx)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 msg[SXE_MBX_MSG_NUM] = {0};
+	s32 ret;
+
+	ret = sxe_hw_rcv_msg_from_vf(hw, msg, SXE_MBX_MSG_NUM, vf_idx);
+	if (ret) {
+		LOG_ERROR_BDF("rcv vf:0x%x req msg:0x%x fail.(err:%d)\n",
+			   vf_idx, msg[0], ret);
+		goto l_out;
+	}
+
+	LOG_INFO_BDF("rcv vf_idx:%d req msg:0x%x.\n", vf_idx, msg[0]);
+
+	if (msg[0] & (SXE_MSGTYPE_ACK | SXE_MSGTYPE_NACK)) {
+		LOG_WARN_BDF("msg:0x%x has handled, no need dup handle.\n",
+			  msg[0]);
+		goto l_out;
+	}
+
+	ret = sxe_req_msg_handle(dev, msg, vf_idx);
+	if (ret) {
+		LOG_ERROR_BDF("vf:%d request msg handle fail.(err:%d)\n",
+			  vf_idx, ret);
+	}
+
+l_out:
+	return ret;
+}
+
+static void sxe_vf_ack_msg_handle(struct rte_eth_dev *eth_dev, u8 vf_idx)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	u32 msg = SXE_MSGTYPE_NACK;
+
+	if (!adapter->vt_ctxt.vf_info[vf_idx].is_ready) {
+		sxe_hw_send_msg_to_vf(&adapter->hw, &msg,
+					SXE_MSG_NUM(sizeof(msg)), vf_idx);
+	}
+
+	return;
+}
+
+void sxe_mbx_irq_handler(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u16 vf_num = sxe_vf_num_get(eth_dev);
+	u8 vf_idx;
+
+	LOG_DEBUG_BDF("mailbox irq triggered vf_num:%u.\n", vf_num);
+
+	for (vf_idx = 0; vf_idx < vf_num; vf_idx++) {
+		if (sxe_hw_vf_rst_check(hw, vf_idx)) {
+			LOG_WARN_BDF("vf_idx:%d flr triggered.\n", vf_idx);
+			sxe_vf_flr_handle(eth_dev, vf_idx);
+		}
+
+		if (sxe_hw_vf_req_check(hw, vf_idx)) {
+			sxe_vf_req_msg_handle(eth_dev, vf_idx);
+		}
+
+		if (sxe_hw_vf_ack_check(hw, vf_idx)) {
+			sxe_vf_ack_msg_handle(eth_dev, vf_idx);
+		}
+	}
+
+	return;
+}
+
+#ifdef ETH_DEV_MIRROR_RULE
+static s32 sxe_mirror_conf_check(struct sxe_hw *hw, u8 rule_id,
+					  u8 rule_type)
+{
+	s32 ret = 0;
+
+	if (sxe_hw_vt_status(hw) == 0) {
+		ret = -ENOTSUP;
+		PMD_LOG_ERR(DRV, "virtual disabled, mirror rule not support.(err:%d)",
+		              ret);
+		goto l_out;
+	}
+
+	if (rule_id >= SXE_MIRROR_RULES_MAX) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "invalid rule_id:%u rule id max:%u.(err:%d)",
+		              rule_id, SXE_MIRROR_RULES_MAX, ret);
+		goto l_out;
+	}
+
+	if (SXE_MIRROR_TYPE_INVALID(rule_type)) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "unsupported mirror type 0x%x.(err:%d)",
+			      rule_type, ret);
+	}
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_vlan_mirror_configure(struct rte_eth_dev *dev,
+		      struct rte_eth_mirror_conf *mirror_conf,
+		      u8 rule_id, u8 on)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_mirror_info *mirror_info = &(adapter->vt_ctxt.mr_info);
+	u32 mv_msb = 0;
+	u32 mv_lsb = 0;
+	u64 vlan_mask = 0;
+	u32 vlvf;
+	u8 i;
+	u8 reg_idx;
+	s32 ret = 0;
+
+	for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+		if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+			ret = sxe_hw_vlvf_slot_find(
+					hw,
+					mirror_conf->vlan.vlan_id[i],
+					false);
+			if (ret < 0) {
+				ret = -EINVAL;
+				LOG_ERROR_BDF("vlan_id[%u]:0x%x no matched vlvf."
+					      "(err:%d)",
+					      i,
+					      mirror_conf->vlan.vlan_id[i],
+					      ret);
+				goto l_out;
+			}
+
+			reg_idx = ret;
+			vlvf = sxe_hw_vlan_pool_filter_read(hw, reg_idx);
+			if ((vlvf & SXE_VLVF_VIEN) &&
+			    ((vlvf & SXE_VLVF_VLANID_MASK) ==
+			      mirror_conf->vlan.vlan_id[i])) {
+				vlan_mask |= (1ULL << reg_idx);
+			} else{
+				ret = -EINVAL;
+				LOG_ERROR_BDF("i:%u vlan_id:0x%x "
+					      "vlvf[%u]:0x%x not meet request."
+					      "(err:%d)",
+					      i,
+					      mirror_conf->vlan.vlan_id[i],
+					      reg_idx,
+					      vlvf,
+					      ret);
+				goto l_out;
+			}
+		}
+	}
+
+	if (on) {
+		mv_lsb = vlan_mask & SXE_MR_VLAN_MASK;
+		mv_msb = vlan_mask >> SXE_MR_VLAN_MSB_BIT_OFFSET;
+
+		mirror_info->mr_conf[rule_id].vlan.vlan_mask =
+					mirror_conf->vlan.vlan_mask;
+
+		for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+			if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+				mirror_info->mr_conf[rule_id].vlan.vlan_id[i] =
+					mirror_conf->vlan.vlan_id[i];
+				LOG_INFO_BDF("rule_id:%u vlan id:0x%x add mirror"
+					     " to dst_pool:%u",
+					     rule_id,
+					     mirror_conf->vlan.vlan_id[i],
+					     mirror_conf->dst_pool);
+			}
+		}
+	} else {
+		mv_lsb = 0;
+		mv_msb = 0;
+		mirror_info->mr_conf[rule_id].vlan.vlan_mask = 0;
+
+		for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+			mirror_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
+			LOG_INFO_BDF("rule_id:%u vlan id:0x%x del mirror"
+				     " from dst_pool:%u",
+				     rule_id,
+				     mirror_conf->vlan.vlan_id[i],
+				     mirror_conf->dst_pool);
+		}
+	}
+
+	sxe_hw_mirror_vlan_set(hw, rule_id, mv_lsb, mv_msb);
+
+l_out:
+	return ret;
+}
+
+static void sxe_virtual_pool_mirror_configure(struct rte_eth_dev *dev,
+		      struct rte_eth_mirror_conf *mirror_conf,
+		      u8 rule_id, u8 on)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_mirror_info *mirror_info = &(adapter->vt_ctxt.mr_info);
+	u32 lsb = 0;
+	u32 msb = 0;
+
+	if (on) {
+		lsb = mirror_conf->pool_mask & SXE_MR_VIRTUAL_POOL_MASK;
+		msb = mirror_conf->pool_mask >> SXE_MR_VIRTUAL_POOL_MSB_BIT_MASK;
+		mirror_info->mr_conf[rule_id].pool_mask = mirror_conf->pool_mask;
+	} else {
+		lsb = 0;
+		msb = 0;
+		mirror_info->mr_conf[rule_id].pool_mask = 0;
+	}
+
+	sxe_hw_mirror_virtual_pool_set(hw, rule_id, lsb, msb);
+
+	return;
+}
+
+s32 sxe_mirror_rule_set(struct rte_eth_dev *dev,
+		      struct rte_eth_mirror_conf *mirror_conf,
+		      u8 rule_id, u8 on)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_mirror_info *mirror_info = &(adapter->vt_ctxt.mr_info);
+	u8 mirror_type = 0;
+	s32 ret;
+
+	ret = sxe_mirror_conf_check(hw, rule_id, mirror_conf->rule_type);
+	if (ret) {
+		LOG_ERROR_BDF("rule_id:%u mirror config param invalid.(err:%d)",
+			      rule_id, ret);
+		goto l_out;
+	}
+
+	if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
+		mirror_type |= SXE_MRCTL_VLME;
+		ret = sxe_vlan_mirror_configure(dev, mirror_conf, rule_id, on);
+		if (ret) {
+			LOG_ERROR_BDF("vlan mirror configure fail.(err:%d)", ret);
+			goto l_out;
+		}
+	}
+
+	if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+		mirror_type |= SXE_MRCTL_VPME;
+		sxe_virtual_pool_mirror_configure(dev, mirror_conf, rule_id, on);
+	}
+
+	if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT) {
+		mirror_type |= SXE_MRCTL_UPME;
+	}
+
+	if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT) {
+		mirror_type |= SXE_MRCTL_DPME;
+	}
+
+	sxe_hw_mirror_ctl_set(hw, rule_id, mirror_type, mirror_conf->dst_pool, on);
+
+	mirror_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
+	mirror_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
+
+	LOG_INFO_BDF("rule_id:%u mirrror type:0x%x %s success. "
+		     "vlan id mask:0x%"SXE_PRIX64" virtaul pool mask:0x%"SXE_PRIX64
+		     " dst_pool:%u.",
+		     rule_id,
+		     mirror_conf->rule_type,
+		     on ? "add" : "delete",
+		     mirror_conf->vlan.vlan_mask,
+		     mirror_conf->pool_mask,
+		     mirror_conf->dst_pool);
+
+l_out:
+	return ret;
+}
+
+s32 sxe_mirror_rule_reset(struct rte_eth_dev *dev, u8 rule_id)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_mirror_info *mirror_info = &(adapter->vt_ctxt.mr_info);
+	s32 ret;
+
+	ret = sxe_mirror_conf_check(hw, rule_id, SXE_ETH_MIRROR_TYPE_MASK);
+	if (ret) {
+		LOG_ERROR_BDF("rule_id:%u mirror config param invalid.(err:%d)",
+			      rule_id, ret);
+		goto l_out;
+	}
+
+	memset(&mirror_info->mr_conf[rule_id], 0,
+	       sizeof(struct rte_eth_mirror_conf));
+
+	sxe_hw_mirror_rule_clear(hw, rule_id);
+
+	LOG_INFO_BDF("rule_id:%u reset susccess.", rule_id);
+
+l_out:
+	return ret;
+}
+
+#endif
+#endif
diff --git a/drivers/net/sxe/pf/sxe_vf.h b/drivers/net/sxe/pf/sxe_vf.h
new file mode 100644
index 0000000000..8690b9e7fd
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_vf.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_VF_H__
+#define __SXE_VF_H__
+
+#include "sxe_dpdk_version.h"
+#include <rte_ethdev.h>
+#if defined DPDK_20_11_5 || defined DPDK_21_11_5 || defined DPDK_19_11_6
+#include <rte_bus_pci.h>
+#else
+#include <bus_pci_driver.h>
+#endif
+
+#include "sxe_hw.h"
+
+#define SXE_MIRROR_RULES_MAX   4
+
+#define SXE_MSG_NUM(size)         DIV_ROUND_UP(size, 4)
+
+#define SXE_MSGTYPE_ACK    0x80000000
+#define SXE_MSGTYPE_NACK   0x40000000
+
+#define SXE_VFREQ_RESET               0x01 
+#define SXE_VFREQ_MAC_ADDR_SET        0x02 
+#define SXE_VFREQ_MC_ADDR_SYNC        0x03 
+#define SXE_VFREQ_VLAN_SET            0x04 
+#define SXE_VFREQ_LPE_SET             0x05  
+
+#define SXE_VFREQ_UC_ADDR_SYNC        0x06  
+
+#define SXE_VFREQ_API_NEGOTIATE       0x08  
+
+#define SXE_VFREQ_RING_INFO_GET       0x09  
+#define SXE_VFREQ_REDIR_TBL_GET       0x0a
+#define SXE_VFREQ_RSS_KEY_GET         0x0b
+#define SXE_VFREQ_CAST_MODE_SET       0x0c  
+#define SXE_VFREQ_LINK_ENABLE_GET     0X0d  
+#define SXE_VFREQ_IPSEC_ADD           0x0e
+#define SXE_VFREQ_IPSEC_DEL           0x0f
+#define SXE_VFREQ_RSS_CONF_GET        0x10
+
+#define SXE_VFREQ_MASK                0xFF
+
+#define SXE_MIRROR_TYPE_INVALID(mirror_type) \
+	((mirror_type) & ~(u8)(ETH_MIRROR_VIRTUAL_POOL_UP | \
+	ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
+
+#define SXE_ETH_MIRROR_TYPE_MASK \
+			(ETH_MIRROR_VIRTUAL_POOL_UP | ETH_MIRROR_UPLINK_PORT \
+			| ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN)
+
+static inline u16 sxe_vf_num_get(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+	return pci_dev->max_vfs;
+}
+
+enum sxe_mbx_api_version {
+	SXE_MBX_API_10 = 0,
+	SXE_MBX_API_11,
+	SXE_MBX_API_12,
+	SXE_MBX_API_13, 
+	SXE_MBX_API_14, 
+
+	SXE_MBX_API_NR, 
+};
+
+enum sxe_cast_mode {
+	SXE_CAST_MODE_NONE = 0, 
+	SXE_CAST_MODE_MULTI,    
+	SXE_CAST_MODE_ALLMULTI, 
+	SXE_CAST_MODE_PROMISC,  
+};
+
+struct sxe_vf_info {
+	u8 mac_addr[RTE_ETHER_ADDR_LEN]; 
+	u16 mc_hash[SXE_VF_MC_ENTRY_NUM_MAX]; 
+	u8  mc_hash_used; 
+	u8 cast_mode; 
+	u8  trusted :1;  
+	u8  is_ready :1; 
+	u8  spoof_chk_enabled :1; 
+	u8  rss_query_enabled :1; 
+	u8  mac_from_pf :1; 
+	u8  reserved :3;  
+	u16 domain_id;
+	u16 tx_rate;    
+	u32 mbx_version; 
+	u32 vlan_cnt;     
+	u32 uc_mac_cnt;  
+};
+
+#ifdef ETH_DEV_MIRROR_RULE
+struct sxe_mirror_info {
+	struct rte_eth_mirror_conf mr_conf[SXE_MIRROR_RULES_MAX];
+
+};
+#endif
+
+struct sxe_virtual_context {
+	u8   pflink_fullchk;
+	u32 mbx_version; 
+	struct sxe_vf_info *vf_info;    
+#ifdef ETH_DEV_MIRROR_RULE
+	struct sxe_mirror_info mr_info; 
+#endif
+};
+
+struct sxe_msg_table {
+	u32 msg_type;
+	s32 (*msg_func)(struct rte_eth_dev *dev, u32 *msg, u32 vf_idx);
+};
+
+enum RTE_PMD_SXE_MB_event_rsp {
+	RTE_PMD_SXE_MB_EVENT_NOOP_ACK,  
+	RTE_PMD_SXE_MB_EVENT_NOOP_NACK, 
+	RTE_PMD_SXE_MB_EVENT_PROCEED,   
+	RTE_PMD_SXE_MB_EVENT_MAX        
+};
+
+struct rte_pmd_sxe_mb_event_param {
+	u16 vf_idx;     
+	u16 msg_type;   
+	u16 ret;        
+	void *msg;      
+};
+
+struct sxe_mbx_api_msg {
+	u32 msg_type;
+	u32 api_version;
+};
+
+struct sxe_uc_addr_msg {
+	u32 msg_type;
+	u8 uc_addr[RTE_ETHER_ADDR_LEN];
+	u16 pad;
+};
+
+struct sxe_rst_rcv {
+	u32 msg_type;
+};
+
+struct sxe_rst_reply {
+	u32 msg_type;
+	u32 mac_addr[2];
+	u32 mc_filter_type;
+};
+
+struct sxe_rst_msg {
+	union {
+		struct sxe_rst_rcv rcv;
+		struct sxe_rst_reply reply;
+	};
+};
+
+struct sxe_ring_info_msg {
+	u32 msg_type;
+	u8  max_rx_num; 
+	u8  max_tx_num; 
+	u8  tc_num;     
+	u8  default_tc; 
+};
+
+struct sxe_rss_hash_msg {
+	u32 msg_type;
+	u8  hash_key[SXE_RSS_KEY_SIZE];
+	u64 rss_hf;
+};
+
+struct sxe_vlan_msg {
+	u16 msg_type;
+	u16 add;
+	u32 vlan_id;
+};
+
+struct sxe_mc_sync_msg {
+	u16 msg_type;
+	u16 mc_cnt;  
+	u16 mc_addr_extract[SXE_VF_MC_ENTRY_NUM_MAX];
+};
+
+struct sxe_cast_mode_msg {
+	u32 msg_type;
+	u32 cast_mode;
+};
+
+struct sxe_uc_sync_msg {
+	u16 msg_type;
+	u16 index;
+	u32 addr[2];
+};
+
+struct sxe_max_frame_msg {
+	u32 msg_type;
+	u32 max_frame;
+};
+
+s32 sxe_vt_init(struct rte_eth_dev *eth_dev);
+
+void sxe_vt_configure(struct rte_eth_dev *eth_dev);
+
+void sxe_vt_uninit(struct rte_eth_dev *eth_dev);
+
+s32 sxe_vf_rss_configure(struct rte_eth_dev *dev);
+
+s32 sxe_vf_default_mode_configure(struct rte_eth_dev *dev);
+
+void sxe_mbx_irq_handler(struct rte_eth_dev *eth_dev);
+
+#ifdef ETH_DEV_MIRROR_RULE
+s32 sxe_mirror_rule_set(struct rte_eth_dev *dev,
+		      struct rte_eth_mirror_conf *mirror_conf,
+		      u8 rule_id, u8 on);
+
+s32 sxe_mirror_rule_reset(struct rte_eth_dev *dev, u8 rule_id);
+
+#endif
+#endif
diff --git a/drivers/net/sxe/rte_pmd_sxe_version.map b/drivers/net/sxe/rte_pmd_sxe_version.map
new file mode 100644
index 0000000000..e85eb752b4
--- /dev/null
+++ b/drivers/net/sxe/rte_pmd_sxe_version.map
@@ -0,0 +1,10 @@
+DPDK_20.0 {
+	global: 
+	rte_pmd_sxe_tx_loopback_set;
+	rte_pmd_sxe_tc_bw_set;
+	local: *;
+};
+
+#EXPERIMENTAL {
+#	global: *;
+#};
diff --git a/drivers/net/sxe/sxe_drv_type.h b/drivers/net/sxe/sxe_drv_type.h
new file mode 100644
index 0000000000..c7bda4f558
--- /dev/null
+++ b/drivers/net/sxe/sxe_drv_type.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_DRV_TYPEDEF_H__
+#define __SXE_DRV_TYPEDEF_H__
+
+#ifdef SXE_DPDK
+#include "sxe_types.h"
+#ifndef bool
+typedef _Bool bool;
+#endif
+#else
+#include <linux/types.h>
+#endif
+
+typedef u8 U8;
+typedef u16 U16;
+typedef u32 U32;
+typedef u64 U64;
+typedef bool BOOL;
+
+#endif
diff --git a/drivers/net/sxe/version.map b/drivers/net/sxe/version.map
new file mode 100644
index 0000000000..2064d17939
--- /dev/null
+++ b/drivers/net/sxe/version.map
@@ -0,0 +1,24 @@
+DPDK_21 {
+	global: 
+	rte_pmd_sxe_tx_loopback_set;
+	rte_pmd_sxe_tc_bw_set;
+	local: *;
+};
+
+DPDK_22 {
+	global: 
+	rte_pmd_sxe_tx_loopback_set;
+	rte_pmd_sxe_tc_bw_set;
+	local: *;
+};
+
+DPDK_23 {
+	global: 
+	rte_pmd_sxe_tx_loopback_set;
+	rte_pmd_sxe_tc_bw_set;
+	local: *;
+};
+
+#EXPERIMENTAL {
+#	global: *;
+#};
diff --git a/drivers/net/sxe/vf/sxevf.h b/drivers/net/sxe/vf/sxevf.h
new file mode 100644
index 0000000000..52d294d869
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXEVF_H__
+#define __SXEVF_H__
+
+#include <rte_pci.h>
+
+#include "sxevf_irq.h"
+#include "sxevf_hw.h"
+#include "sxevf_filter.h"
+#include "sxevf_stats.h"
+
+#define SXEVF_DEVARG_LINK_CHECK           "link_check"
+
+struct sxevf_adapter {
+	s8 name[PCI_PRI_STR_SIZE+1]; 
+	u8 max_rx_queue; 
+	u8 max_tx_queue; 
+
+	struct sxevf_hw hw;
+	struct sxevf_irq_context irq_ctxt;
+	struct sxevf_vlan_context vlan_ctxt;
+	struct sxevf_mac_filter_context mac_filter_ctxt;
+	struct sxevf_stats_info stats_info;
+
+	rte_atomic32_t link_thread_running;
+	pthread_t link_thread_tid;
+	u8 link_check;
+	bool stop;
+	bool rx_batch_alloc_allowed;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	bool rx_vec_allowed;
+#endif
+	u8 rss_reta_updated;
+};
+
+struct sxevf_thread_param {
+	struct rte_eth_dev *dev;
+	pthread_barrier_t barrier;
+};
+
+#endif
+
diff --git a/drivers/net/sxe/vf/sxevf_ethdev.c b/drivers/net/sxe/vf/sxevf_ethdev.c
new file mode 100644
index 0000000000..d656dc83fc
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_ethdev.c
@@ -0,0 +1,811 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_bus_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <rte_bus_pci.h>
+#else
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <bus_pci_driver.h>
+#endif
+
+#include <rte_ethdev.h>
+#include <rte_kvargs.h>
+#include <rte_common.h>
+#include <rte_string_fns.h>
+
+#include "sxevf.h"
+#include "sxe_rx.h"
+#include "sxe_logs.h"
+#include "sxevf_msg.h"
+#include "sxe_errno.h"
+#include "sxevf_tx.h"
+#include "sxevf_rx.h"
+#include "sxevf_ethdev.h"
+#include "sxevf_queue.h"
+#include "sxevf_offload.h"
+#include "sxe_compat_version.h"
+
+#define SXEVF_ETH_OVERHEAD     (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)  
+#define SXEVF_HKEY_MAX_INDEX   (10)
+#define SXEVF_RSS_OFFLOAD_ALL ( \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
+
+#define SXEVF_DEFAULT_RX_FREE_THRESH  32
+#define SXEVF_DEFAULT_RX_PTHRESH      8
+#define SXEVF_DEFAULT_RX_HTHRESH      8
+#define SXEVF_DEFAULT_RX_WTHRESH      0
+
+#define SXEVF_DEFAULT_TX_FREE_THRESH  32
+#define SXEVF_DEFAULT_TX_PTHRESH      32
+#define SXEVF_DEFAULT_TX_HTHRESH      0
+#define SXEVF_DEFAULT_TX_WTHRESH      0
+#define SXEVF_DEFAULT_TX_RSBIT_THRESH 32
+
+#define	SXEVF_MIN_RING_DESC     32
+#define	SXEVF_MAX_RING_DESC     4096
+
+#define	SXEVF_ALIGN             128
+#define SXEVF_RXD_ALIGN        (SXEVF_ALIGN / sizeof(sxevf_rx_data_desc_u))
+#define SXEVF_TXD_ALIGN        (SXEVF_ALIGN / sizeof(sxevf_tx_data_desc_u))
+
+#define SXEVF_TX_MAX_SEG            40
+#define SXEVF_DEFAULT_TX_QUEUE_NUM  1
+#define SXEVF_DEFAULT_RX_QUEUE_NUM  1
+#define SXEVF_RX_BUF_MIN      1024
+#define SXEVF_RX_BUF_LEN_MAX  9728
+
+static const struct rte_eth_desc_lim rx_desc_lim = {
+	.nb_max = SXEVF_MAX_RING_DESC,
+	.nb_min = SXEVF_MIN_RING_DESC,
+	.nb_align = SXEVF_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+	.nb_max = SXEVF_MAX_RING_DESC,
+	.nb_min = SXEVF_MIN_RING_DESC,
+	.nb_align = SXEVF_TXD_ALIGN,
+	.nb_seg_max = SXEVF_TX_MAX_SEG,
+	.nb_mtu_seg_max = SXEVF_TX_MAX_SEG,
+};
+
+static const char * const sxevf_valid_arguments[] = {
+	SXEVF_DEVARG_LINK_CHECK,
+	NULL
+};
+
+STATIC s32 sxevf_devargs_handle(__rte_unused const char *key, const char *value,
+		  void *extra_args)
+{
+	u16 *n = extra_args;
+	s32 ret;
+
+	if (value == NULL || extra_args == NULL) {
+		ret = -EINVAL;
+		LOG_ERROR("invalid args.(err:%d)", ret);
+		goto l_out;
+	}
+
+	*n = (u16)strtoul(value, NULL, 0);
+	if (*n == USHRT_MAX && errno == ERANGE) {
+		ret = -ERANGE;
+		LOG_ERROR("invalid args.(err:%d)", ret);
+		goto l_out;
+	}
+
+	ret = 0;
+
+l_out:
+	return ret;
+}
+
+STATIC void sxevf_devargs_parse(struct sxevf_adapter *adapter,
+		      struct rte_devargs *devargs)
+{
+	struct rte_kvargs *kvlist;
+	u16 check;
+
+	if (devargs == NULL) {
+		LOG_INFO_BDF("no dev args.");
+		goto l_out;
+	}
+
+	kvlist = rte_kvargs_parse(devargs->args, sxevf_valid_arguments);
+	if (kvlist == NULL)
+		return;
+
+	if (rte_kvargs_count(kvlist, SXEVF_DEVARG_LINK_CHECK) == 1 &&
+	    rte_kvargs_process(kvlist, SXEVF_DEVARG_LINK_CHECK,
+			       sxevf_devargs_handle, &check) == 0 &&
+	    check == 1) {
+		adapter->link_check = 1;
+	}
+
+	LOG_INFO_BDF("dev args link_check:%u", adapter->link_check);
+
+	rte_kvargs_free(kvlist);
+l_out:
+	return;
+}
+
+static s32 sxevf_hw_dev_reset(struct sxevf_hw *hw)
+{
+	u32 retry = SXEVF_RST_CHECK_NUM;
+	s32 ret;
+	struct sxevf_rst_msg msg = {};
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	adapter->stop = true;
+
+	sxevf_hw_stop(hw);
+
+	/* Mail box init */
+	sxevf_mbx_init(hw);
+
+
+	sxevf_hw_reset(hw);
+
+	while (!sxevf_pf_rst_check(hw) && retry) {
+		retry--;
+		udelay(5);
+	}
+
+	if (!retry) {
+		ret = -SXEVF_ERR_RESET_FAILED;
+		LOG_ERROR_BDF("retry:%u use up, pf has not reset done.(err:%d)\n",
+		               SXEVF_RST_CHECK_NUM, ret);
+		goto l_out;
+	}
+
+	LOG_INFO_BDF("pf reset done.");
+
+	hw->mbx.retry = SXEVF_MBX_RETRY_COUNT;
+
+	sxevf_rxtx_reg_init(hw);
+
+	/* Send reset message to pf */
+	msg.msg_type = SXEVF_RESET;
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg,
+				    SXEVF_MSG_NUM(sizeof(msg)));
+	if (ret) {
+		LOG_ERROR_BDF("vf reset msg:%d len:%zu mailbox fail.(err:%d)\n",
+			  msg.msg_type, SXEVF_MSG_NUM(sizeof(msg)), ret);
+		goto l_out;
+	}
+
+	if (msg.msg_type == (SXEVF_RESET | SXEVF_MSGTYPE_ACK)) {
+		memcpy(&adapter->mac_filter_ctxt.def_mac_addr,
+			(u8 *)(msg.mac_addr), SXEVF_MAC_ADDR_LEN);
+	}
+
+	adapter->mac_filter_ctxt.mc_filter_type = msg.mc_fiter_type;
+
+	LOG_INFO_BDF("vf get mc filter type:%d default mac addr:"MAC_FMT" from pf.\n",
+		  adapter->mac_filter_ctxt.mc_filter_type,
+		  MAC_ADDR(&adapter->mac_filter_ctxt.def_mac_addr));
+
+l_out:
+	return ret;
+}
+
+static s32 sxevf_hw_base_init(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	s32 ret;
+
+	hw->reg_base_addr = (void *)pci_dev->mem_resource[0].addr;
+	PMD_LOG_INFO(INIT, "eth_dev[%u] got reg_base_addr=%p",
+			eth_dev->data->port_id, hw->reg_base_addr);
+	hw->adapter = adapter;
+
+	strlcpy(adapter->name, pci_dev->device.name, sizeof(adapter->name) - 1);
+	adapter->stop = true;
+
+	adapter->max_rx_queue = SXEVF_DEFAULT_RX_QUEUE_NUM;
+	adapter->max_tx_queue = SXEVF_DEFAULT_TX_QUEUE_NUM;
+
+	ret = sxevf_hw_dev_reset(hw);
+	if (ret < 0) {
+		PMD_LOG_ERR(INIT, "hw dev reset failed, ret=%d", ret);
+		goto l_out;
+	} else {
+		adapter->stop = false;
+	}
+
+	ret = sxevf_mac_addr_init(eth_dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "mac addr init fail, ret=%d", ret);
+		goto l_out;
+	}
+
+l_out:
+	return ret;
+}
+
+static void sxevf_txrx_start(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	sxevf_tx_queue_s *txq;
+	sxevf_rx_queue_s *rxq;
+	u16 i;
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		txq = eth_dev->data->tx_queues[i];
+		sxevf_tx_ring_switch(hw, txq->reg_idx, true);
+	}
+
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+		rxq = eth_dev->data->rx_queues[i];
+		sxevf_rx_ring_switch(hw, rxq->reg_idx, true);
+
+		rte_wmb();
+
+		sxevf_rx_desc_tail_set(hw, rxq->reg_idx, rxq->ring_depth - 1);
+	}
+
+	return;
+}
+
+static s32 sxevf_dev_start(struct rte_eth_dev *dev)
+{
+	s32 ret;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_stats_info *stats_info = &adapter->stats_info;
+	struct sxevf_hw *hw = &adapter->hw;
+
+	PMD_INIT_FUNC_TRACE();
+
+	ret = sxevf_hw_dev_reset(hw);
+	if (ret) {
+		LOG_ERROR_BDF("dev reset fail.");
+		goto l_out;
+	}
+
+	sxevf_mbx_api_version_init(adapter);
+
+	sxevf_tx_configure(dev);
+
+	ret = sxevf_rx_configure(dev);
+	if (ret) {
+		LOG_ERROR_BDF("rx configure fail.(err:%d)", ret);
+		goto l_clear_queue;
+	}
+
+	sxevf_vlan_filter_configure(dev);
+
+	sxevf_txrx_start(dev);
+
+	sxevf_irq_configure(dev);
+
+	sxevf_stats_init_value_get(hw, &stats_info->hw_stats);
+
+	adapter->stop = false;
+
+l_out:
+	return ret;
+
+l_clear_queue:
+	sxevf_txrx_queues_clear(dev, adapter->rx_batch_alloc_allowed);
+	return ret;
+}
+
+#ifdef DPDK_19_11_6
+static void sxevf_dev_stop(struct rte_eth_dev *dev)
+#else
+static s32 sxevf_dev_stop(struct rte_eth_dev *dev)
+#endif
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (adapter->stop) {
+		LOG_INFO_BDF("eth dev has been stoped.");
+		goto l_out;
+	}
+
+	adapter->stop = false;
+	dev->data->dev_started = false;
+	dev->data->scattered_rx = false;
+
+	sxevf_hw_stop(hw);
+
+	sxevf_vfta_sync(dev, false);
+
+	sxevf_txrx_queues_clear(dev, adapter->rx_batch_alloc_allowed);
+
+	sxevf_irq_free(dev);
+
+l_out:
+#ifdef DPDK_19_11_6
+	return;
+#else
+	return 0;
+#endif
+}
+
+#ifdef DPDK_19_11_6
+static void sxevf_dev_close(struct rte_eth_dev *dev)
+#else
+static s32 sxevf_dev_close(struct rte_eth_dev *dev)
+#endif
+{
+	s32 ret = 0;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_stats_info *stats_info = &adapter->stats_info;
+	struct sxevf_hw *hw = &adapter->hw;
+
+	PMD_INIT_FUNC_TRACE();
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		LOG_INFO_BDF("secondery procee can't close dev.");
+		goto l_out;
+	}
+
+	ret = sxevf_hw_dev_reset(hw);
+	if (ret) {
+		LOG_ERROR_BDF("dev reset fail.");
+	}
+
+	sxevf_dev_stop(dev);
+
+	sxevf_stats_init_value_get(hw, &stats_info->hw_stats);
+
+	sxevf_queues_free(dev);
+
+	sxevf_irq_unregister(dev);
+
+l_out:
+#ifdef DPDK_19_11_6
+	return;
+#else
+	return ret;
+#endif
+}
+
+STATIC s32 sxevf_dev_reset(struct rte_eth_dev *dev)
+{
+	s32 ret;
+
+	ret = sxevf_ethdev_uninit(dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "dev uninit fail.");
+		goto l_out;
+	}
+
+	ret = sxevf_ethdev_init(dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "dev init fail.");
+	}
+
+l_out:
+	return ret;
+}
+
+static s32 sxevf_dev_info_get(struct rte_eth_dev *dev,
+		     struct rte_eth_dev_info *dev_info)
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+	dev_info->max_rx_queues = adapter->max_rx_queue;
+	dev_info->max_tx_queues = adapter->max_tx_queue;
+	dev_info->min_rx_bufsize = SXEVF_RX_BUF_MIN; 
+	dev_info->max_rx_pktlen = SXEVF_RX_BUF_LEN_MAX; 
+	dev_info->max_mtu = dev_info->max_rx_pktlen - SXEVF_ETH_OVERHEAD;
+	dev_info->max_mac_addrs = adapter->mac_filter_ctxt.uc_table_size;
+	dev_info->max_hash_mac_addrs = SXEVF_UTA_HASH_BIT_MAX;
+	dev_info->max_vfs = pci_dev->max_vfs;
+	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
+
+	dev_info->rx_queue_offload_capa = sxevf_rx_queue_offloads_get(dev);
+	dev_info->rx_offload_capa = (sxevf_rx_port_offloads_get(dev) |
+				     dev_info->rx_queue_offload_capa);
+	dev_info->tx_queue_offload_capa = sxevf_tx_queue_offloads_get(dev);
+	dev_info->tx_offload_capa = sxevf_tx_port_offloads_get(dev);
+
+	dev_info->hash_key_size = SXEVF_HKEY_MAX_INDEX * sizeof(u32);
+	dev_info->reta_size = 0;
+	dev_info->flow_type_rss_offloads = SXEVF_RSS_OFFLOAD_ALL;
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_thresh = {
+			.pthresh = SXEVF_DEFAULT_RX_PTHRESH,
+			.hthresh = SXEVF_DEFAULT_RX_HTHRESH,
+			.wthresh = SXEVF_DEFAULT_RX_WTHRESH,
+		},
+		.rx_free_thresh = SXEVF_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+		.offloads = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_thresh = {
+			.pthresh = SXEVF_DEFAULT_TX_PTHRESH,
+			.hthresh = SXEVF_DEFAULT_TX_HTHRESH,
+			.wthresh = SXEVF_DEFAULT_TX_WTHRESH,
+		},
+		.tx_free_thresh = SXEVF_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = SXEVF_DEFAULT_TX_RSBIT_THRESH,
+		.offloads = 0,
+	};
+
+	dev_info->rx_desc_lim = rx_desc_lim;
+	dev_info->tx_desc_lim = tx_desc_lim;
+
+#ifdef DPDK_22_11_3
+	dev_info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PASSIVE;
+#endif
+
+	return 0;
+}
+
+static s32 sxevf_mtu_set(struct rte_eth_dev *dev, u16 mtu)
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	u32 max_frame = mtu + SXEVF_ETH_OVERHEAD;
+	s32 ret;
+
+	if (mtu < RTE_ETHER_MIN_MTU ||
+			max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
+		ret = -EINVAL;
+		LOG_ERROR_BDF("invalid mtu:%u.", mtu);
+		goto l_out;
+	}
+
+	if (dev->data->dev_started && !dev->data->scattered_rx &&
+	    ((max_frame + 2 * SXEVF_VLAN_TAG_SIZE) >
+	     (dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))) {
+		ret = -EINVAL;
+		LOG_ERROR_BDF("max_frame:%u stop port first.(err:%d)",
+			      max_frame, ret);
+		goto l_out;
+	}
+
+	ret = sxevf_rx_max_frame_set(hw, mtu);
+	if (ret) {
+		LOG_ERROR_BDF("max_frame:%u set fail.(err:%d)", max_frame, ret);
+		ret = -EINVAL;
+		goto l_out;
+	}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+	dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
+#endif
+
+	LOG_INFO_BDF("change max frame size to %u success.", max_frame);
+
+l_out:
+	return ret;
+}
+
+static s32 sxevf_dev_configure(struct rte_eth_dev *dev)
+{
+	struct rte_eth_conf *conf = &dev->data->dev_conf;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+
+	LOG_INFO_BDF("Configured Virtual Function port id: %d",
+		     dev->data->port_id);
+
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+	}
+
+#ifndef RTE_LIBRTE_SXEVF_PF_DISABLE_STRIP_CRC
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
+		LOG_INFO_BDF("VF can't disable HW CRC Strip");
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
+	}
+#else
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
+		LOG_INFO_BDF("VF can't enable HW CRC Strip");
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
+	}
+#endif
+
+	adapter->rx_batch_alloc_allowed = true;
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	adapter->rx_vec_allowed = true;
+#endif
+
+	return 0;
+}
+
+static const struct sxevf_reg_info sxevf_regs_general[] = {
+	{SXE_VFCTRL, 1, 1, "SXE_VFCTRL"},
+	{SXE_VFSTATUS, 1, 1, "SXE_VFSTATUS"},
+	{SXE_VFLINKS, 1, 1, "SXE_VFLINKS"},
+	{SXE_VFFRTIMER, 1, 1, "SXE_VFFRTIMER"},
+	{SXE_VFMAILBOX, 1, 1, "SXE_VFMAILBOX"},
+	{SXE_VFMBMEM, 16, 4, "SXE_VFMBMEM"},
+	{SXE_VFRXMEMWRAP, 1, 1, "SXE_VFRXMEMWRAP"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxevf_reg_info sxevf_regs_interrupt[] = {
+	{SXE_VFEICR, 1, 1, "SXE_VFEICR"},
+	{SXE_VFEICS, 1, 1, "SXE_VFEICS"},
+	{SXE_VFEIMS, 1, 1, "SXE_VFEIMS"},
+	{SXE_VFEIMC, 1, 1, "SXE_VFEIMC"},
+	{SXE_VFEIAM, 1, 1, "SXE_VFEIAM"},
+	{SXE_VFEITR(0), 2, 4, "SXE_VFEITR"},
+	{SXE_VFIVAR(0), 4, 4, "SXE_VFIVAR"},
+	{SXE_VFIVAR_MISC, 1, 1, "SXE_VFIVAR_MISC"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxevf_reg_info sxevf_regs_rxdma[] = {
+	{SXE_VFRDBAL(0), 8, 0x40, "SXE_VFRDBAL"},
+	{SXE_VFRDBAH(0), 8, 0x40, "SXE_VFRDBAH"},
+	{SXE_VFRDLEN(0), 8, 0x40, "SXE_VFRDLEN"},
+	{SXE_VFRDH(0), 8, 0x40, "SXE_VFRDH"},
+	{SXE_VFRDT(0), 8, 0x40, "SXE_VFRDT"},
+	{SXE_VFRXDCTL(0), 8, 0x40, "SXE_VFRXDCTL"},
+	{SXE_VFSRRCTL(0), 8, 0x40, "SXE_VFSRRCTL"},
+	{SXE_VFPSRTYPE, 1, 1,	"SXE_VFPSRTYPE"},
+	{SXE_VFLROCTL(0), 8, 0x40, "SXE_VFRSCCTL"},
+	{SXE_VFDCA_RXCTRL(0), 8, 0x40, "SXE_VFDCA_RXCTRL"},
+	{SXE_VFDCA_TXCTRL(0), 8, 0x40, "SXE_VFDCA_TXCTRL"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxevf_reg_info sxevf_regs_tx[] = {
+	{SXE_VFTDBAL(0), 4, 0x40, "SXE_VFTDBAL"},
+	{SXE_VFTDBAH(0), 4, 0x40, "SXE_VFTDBAH"},
+	{SXE_VFTDLEN(0), 4, 0x40, "SXE_VFTDLEN"},
+	{SXE_VFTDH(0), 4, 0x40, "SXE_VFTDH"},
+	{SXE_VFTDT(0), 4, 0x40, "SXE_VFTDT"},
+	{SXE_VFTXDCTL(0), 4, 0x40, "SXE_VFTXDCTL"},
+	{SXE_VFTDWBAL(0), 4, 0x40, "SXE_VFTDWBAL"},
+	{SXE_VFTDWBAH(0), 4, 0x40, "SXE_VFTDWBAH"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxevf_reg_info *sxevf_regs_group[] = {
+				sxevf_regs_general,
+				sxevf_regs_interrupt,
+				sxevf_regs_rxdma,
+				sxevf_regs_tx,
+				NULL};
+
+static u32 sxevf_regs_group_count(const struct sxevf_reg_info *regs)
+{
+	int i = 0;
+	int count = 0;
+
+	while (regs[i].count) {
+		count += regs[i++].count;
+	}
+
+	return count;
+};
+
+u32 sxevf_regs_group_num_get(void)
+{
+	u32 i = 0;
+	u32 count = 0;
+	const struct sxevf_reg_info *reg_group;
+	const struct sxevf_reg_info **reg_set = sxevf_regs_group;
+
+	while ((reg_group = reg_set[i++])) {
+		count += sxevf_regs_group_count(reg_group);
+	}
+
+	PMD_LOG_INFO(INIT, "read regs cnt=%u\n", count);
+
+	return count;
+}
+
+void sxevf_regs_group_read(struct sxevf_hw *hw, u32 *data)
+{
+	u32 cnt = 0, i = 0;
+	const struct sxevf_reg_info *reg_group;
+	const struct sxevf_reg_info **reg_set = sxevf_regs_group;
+
+	while ((reg_group = reg_set[i++])) {
+		cnt += sxevf_hw_regs_group_read(hw, reg_group, &data[cnt]);
+	}
+
+	PMD_LOG_INFO(INIT, "read regs cnt=%u, regs num=%u\n",
+	             cnt, sxevf_regs_group_num_get());
+
+	return;
+}
+
+static int sxevf_get_regs(struct rte_eth_dev *dev,
+	      struct rte_dev_reg_info *regs)
+{
+	s32 ret = 0;
+	u32 *data = regs->data;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	u32 length = sxevf_regs_group_num_get();
+
+	if (data == NULL) {
+		regs->length = length;
+		regs->width = sizeof(u32);
+		goto l_end;
+	}
+
+	if ((regs->length == 0) || (regs->length == length)) {
+		sxevf_regs_group_read(hw, data);
+
+		goto l_end;
+	}
+
+	ret = -ENOTSUP;
+	PMD_LOG_ERR(INIT, "get regs: inval param: regs_len=%u, regs->data=%p, "
+			  "regs_offset=%u,  regs_width=%u, regs_version=%u",
+			  regs->length, regs->data,
+			  regs->offset, regs->width,
+			  regs->version);
+
+l_end:
+	return ret;
+}
+
+static const struct eth_dev_ops sxevf_eth_dev_ops = {
+	.dev_configure        = sxevf_dev_configure,
+	.dev_start            = sxevf_dev_start,
+	.dev_stop             = sxevf_dev_stop,
+	.link_update          = sxevf_link_update,
+	.stats_get            = sxevf_eth_stats_get,
+	.xstats_get           = sxevf_xstats_get,
+	.stats_reset          = sxevf_dev_stats_reset,
+	.xstats_reset         = sxevf_dev_stats_reset,
+	.xstats_get_names     = sxevf_xstats_names_get,
+	.dev_close            = sxevf_dev_close,
+	.dev_reset	      = sxevf_dev_reset,
+	.promiscuous_enable   = sxevf_promiscuous_enable,
+	.promiscuous_disable  = sxevf_promiscuous_disable,
+	.allmulticast_enable  = sxevf_allmulticast_enable,
+	.allmulticast_disable = sxevf_allmulticast_disable,
+	.dev_infos_get        = sxevf_dev_info_get,
+	.dev_supported_ptypes_get = sxevf_dev_supported_ptypes_get,
+	.mtu_set              = sxevf_mtu_set,
+	.vlan_filter_set      = sxevf_vlan_filter_set,
+	.vlan_strip_queue_set = sxevf_vlan_strip_queue_set,
+	.vlan_offload_set     = sxevf_vlan_offload_set,
+	.rx_queue_setup       = sxevf_rx_queue_setup,
+	.rx_queue_release     = sxevf_rx_queue_release,
+	.tx_queue_setup       = sxevf_tx_queue_setup,
+	.tx_queue_release     = sxevf_tx_queue_release,
+	.rx_queue_intr_enable = sxevf_rx_queue_intr_enable,
+	.rx_queue_intr_disable = sxevf_rx_queue_intr_disable,
+	.mac_addr_add         = sxevf_mac_addr_add,
+	.mac_addr_remove      = sxevf_mac_addr_remove,
+	.set_mc_addr_list     = sxevf_set_mc_addr_list,
+	.rxq_info_get         = sxevf_rx_queue_info_get,
+	.txq_info_get         = sxevf_tx_queue_info_get,
+	.mac_addr_set         = sxevf_default_mac_addr_set,
+	.get_reg              = sxevf_get_regs,
+	.reta_update          = sxevf_rss_reta_update,
+	.reta_query           = sxevf_rss_reta_query,
+	.rss_hash_update      = sxevf_rss_hash_update,
+	.rss_hash_conf_get    = sxevf_rss_hash_conf_get,
+	.tx_done_cleanup      = sxevf_tx_done_cleanup,
+#ifdef ETH_DEV_OPS_MONITOR
+	.get_monitor_addr     = sxe_monitor_addr_get,
+#endif
+#ifdef ETH_DEV_OPS_HAS_DESC_RELATE
+	.rx_descriptor_status = sxevf_rx_descriptor_status,
+	.tx_descriptor_status = sxevf_tx_descriptor_status,
+#ifdef ETH_DEV_RX_DESC_DONE
+	.rx_descriptor_done   = sxevf_rx_descriptor_done,
+#endif
+#endif
+};
+
+s32 sxevf_ethdev_init(struct rte_eth_dev *eth_dev)
+{
+	s32 ret = 0;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_stats_info *stats_info = &adapter->stats_info;
+	struct sxevf_hw *hw = &adapter->hw;
+	u8 default_tc;
+	u8 tc_num;
+
+	PMD_INIT_FUNC_TRACE();
+
+	eth_dev->dev_ops = &sxevf_eth_dev_ops;
+
+#ifndef ETH_DEV_OPS_HAS_DESC_RELATE
+	eth_dev->rx_descriptor_status = sxevf_rx_descriptor_status;
+	eth_dev->tx_descriptor_status = sxevf_tx_descriptor_status;
+#ifdef ETH_DEV_RX_DESC_DONE
+	eth_dev->rx_descriptor_done   = sxevf_rx_descriptor_done;
+#endif
+#endif
+
+	eth_dev->rx_pkt_burst         = &sxevf_pkts_recv;
+	eth_dev->tx_pkt_burst = &sxevf_pkts_xmit_with_offload;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		sxevf_secondary_proc_init(eth_dev);
+		goto l_out;
+	}
+
+	sxevf_devargs_parse(eth_dev->data->dev_private,
+			      pci_dev->device.devargs);
+
+	rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+#ifdef  DPDK_19_11_6
+    eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+#else
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+#endif
+
+
+	ret = sxevf_hw_base_init(eth_dev);
+	if (ret) {
+		ret = -EIO;
+		LOG_ERROR_BDF("hw base init fail.(err:%d)", ret);
+		goto l_out;
+	}
+
+	sxevf_dev_stats_reset(eth_dev);
+
+	sxevf_stats_init_value_get(hw, &stats_info->hw_stats);
+
+	sxevf_mbx_api_version_init(adapter);
+
+	sxevf_ring_info_get(adapter, &default_tc, &tc_num);
+
+	sxevf_irq_init(eth_dev);
+
+	LOG_INFO_BDF("sxevf eth dev init done.");
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_ethdev_uninit(struct rte_eth_dev *eth_dev)
+{
+	s32 ret = 0;
+
+	PMD_INIT_FUNC_TRACE();
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		PMD_LOG_WARN(INIT, "secondery procee can't unint.");
+		goto l_out;
+	}
+
+	sxevf_dev_close(eth_dev);
+
+l_out:
+	return ret;
+}
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_ethdev.h b/drivers/net/sxe/vf/sxevf_ethdev.h
new file mode 100644
index 0000000000..4eb33321a3
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_ethdev.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_ETHDEV_H__
+#define __SXEVF_ETHDEV_H__
+
+s32 sxevf_ethdev_init(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_ethdev_uninit(struct rte_eth_dev *eth_dev);
+
+u32 sxevf_regs_group_num_get(void);
+
+void sxevf_regs_group_read(struct sxevf_hw *hw, u32 *data);
+
+#endif
+
diff --git a/drivers/net/sxe/vf/sxevf_filter.c b/drivers/net/sxe/vf/sxevf_filter.c
new file mode 100644
index 0000000000..4f788ee4a1
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_filter.c
@@ -0,0 +1,511 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <string.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "sxevf.h"
+#include "sxe_logs.h"
+#include "sxevf_msg.h"
+#include "sxe_errno.h"
+#include "sxevf_filter.h"
+#include "sxevf_rx.h"
+#include "sxevf_queue.h"
+#include "sxe_compat_version.h"
+
+#define  SXEVF_MAC_ADDR_EXTRACT_MASK  (0xFFF) 
+#define  SXEVF_MAC_ADDR_SHIFT         (5)     
+#define  SXEVF_MAC_ADDR_REG_MASK      (0x7F)  
+#define  SXEVF_MAC_ADDR_BIT_MASK      (0x1F)  
+
+#define SXEVF_STRIP_BITMAP_SET(h, q) \
+	do { \
+		u32 idx = (q) / (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		u32 bit = (q) % (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		(h)->strip_bitmap[idx] |= 1 << bit;\
+	} while (0)
+
+#define SXEVF_STRIP_BITMAP_CLEAR(h, q) \
+	do {\
+		u32 idx = (q) / (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		u32 bit = (q) % (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		(h)->strip_bitmap[idx] &= ~(1 << bit);\
+	} while (0)
+
+#define SXEVF_STRIP_BITMAP_GET(h, q, r) \
+	do {\
+		u32 idx = (q) / (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		u32 bit = (q) % (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		(r) = (h)->strip_bitmap[idx] >> bit & 1;\
+	} while (0)
+
+static void sxevf_random_mac_addr_generate(struct rte_ether_addr *mac_addr)
+{
+	u64 random;
+
+	mac_addr->addr_bytes[0] = 0xe4;
+	mac_addr->addr_bytes[1] = 0xb6;
+	mac_addr->addr_bytes[2] = 0x33;
+
+	mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
+
+	random = rte_rand();
+	memcpy(&mac_addr->addr_bytes[3], &random, 3);
+
+	return;
+}
+
+s32 sxevf_mac_addr_init(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_mac_filter_context *mac_filter = &adapter->mac_filter_ctxt;
+	s32 ret = 0;
+
+	eth_dev->data->mac_addrs = rte_zmalloc("sxe",
+				RTE_ETHER_ADDR_LEN * SXEVF_HW_UC_ENTRY_NUM_MAX, 0);
+	if (eth_dev->data->mac_addrs == NULL) {
+		LOG_ERROR_BDF("mac addr allocate %u B fail.",
+			     RTE_ETHER_ADDR_LEN * SXEVF_HW_UC_ENTRY_NUM_MAX);
+		ret = -ENOMEM;
+		goto l_out;
+	}
+
+	if (rte_is_zero_ether_addr(&mac_filter->def_mac_addr)) {
+		sxevf_random_mac_addr_generate(&mac_filter->def_mac_addr);
+		ret = sxevf_mac_addr_set(hw, mac_filter->def_mac_addr.addr_bytes);
+		if (ret) {
+			LOG_ERROR_BDF("vf uc mac addr set fail.(err:%d)", ret);
+			goto l_free;
+		}
+		LOG_INFO_BDF("generate random mac_addr:"MAC_FMT,
+			MAC_ADDR(mac_filter->def_mac_addr.addr_bytes));
+	}
+
+	rte_ether_addr_copy(&mac_filter->def_mac_addr, &eth_dev->data->mac_addrs[0]);
+
+	mac_filter->uc_table_size = SXEVF_HW_UC_ENTRY_NUM_MAX;
+
+l_out:
+	return ret;
+
+l_free:
+	rte_free(eth_dev->data->mac_addrs);
+	eth_dev->data->mac_addrs = NULL;
+	return ret;
+}
+
+void sxevf_vfta_sync(struct rte_eth_dev *eth_dev, bool on)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_vlan_context *vlan_ctxt = &adapter->vlan_ctxt;
+	u32 reg_idx;
+	u32 bit_idx;
+	u32 vfta;
+	u32 mask;
+	u32 vlan_id;
+
+	for (reg_idx = 0; reg_idx < SXEVF_VFT_TBL_SIZE; reg_idx++) {
+		vfta = vlan_ctxt->vlan_table[reg_idx];
+		if (vfta) {
+			mask = 1;
+			for (bit_idx = 0; bit_idx < 32; bit_idx++) {
+				vlan_id = (reg_idx << 5) + bit_idx;
+				if (vfta & mask) {
+					sxevf_vlan_id_set(hw, vlan_id, on);
+				}
+				mask <<= 1;
+			}
+		}
+	}
+
+	return;
+}
+
+static void sxevf_vlan_strip_bitmap_set(struct rte_eth_dev *dev, u16 queue_idx, bool on)
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_vlan_context *vlan_ctxt = &adapter->vlan_ctxt;
+	sxevf_rx_queue_s *rxq;
+
+	if (queue_idx >= adapter->max_rx_queue) {
+		LOG_ERROR_BDF("invalid queue idx:%u exceed max"
+			   " queue  number:%u.",
+			   queue_idx, adapter->max_rx_queue);
+		goto l_out;
+	}
+
+	if (on) {
+		SXEVF_STRIP_BITMAP_SET(vlan_ctxt, queue_idx);
+	} else {
+		SXEVF_STRIP_BITMAP_CLEAR(vlan_ctxt, queue_idx);
+	}
+
+	if (queue_idx >= dev->data->nb_rx_queues) {
+		LOG_ERROR_BDF("invalid queue_idx id:%u exceed rx "
+			   " queue number:%u.",
+			   queue_idx, dev->data->nb_rx_queues);
+		goto l_out;
+	}
+
+	rxq = dev->data->rx_queues[queue_idx];
+
+	if (on) {
+		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	} else {
+		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	}
+
+	LOG_INFO_BDF("queue idx:%u vlan strip on:%d set bitmap and offload done.",
+		     queue_idx, on);
+
+l_out:
+	return;
+}
+
+static void sxevf_vlan_strip_switch_set(struct rte_eth_dev *dev)
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	u16 i;
+	sxevf_rx_queue_s *rxq;
+	bool on;
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+			on = true;
+		} else {
+			on = false;
+		}
+		sxevf_hw_vlan_tag_strip_switch(hw, i, on);
+
+		sxevf_vlan_strip_bitmap_set(dev, i, on);
+	}
+
+	return;
+}
+
+static void sxevf_vlan_offload_configure(struct rte_eth_dev *dev, s32 mask)
+{
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		sxevf_vlan_strip_switch_set(dev);
+	}
+
+	return;
+}
+
+void sxevf_vlan_filter_configure(struct rte_eth_dev *eth_dev)
+{
+	u32 vlan_mask;
+
+	sxevf_vfta_sync(eth_dev, true);
+
+	vlan_mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		    RTE_ETH_VLAN_EXTEND_MASK;
+
+	sxevf_vlan_offload_configure(eth_dev, vlan_mask);
+
+	return;
+}
+
+s32 sxevf_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	s32 ret;
+
+	ret = sxevf_cast_mode_set(hw, SXEVF_CAST_MODE_PROMISC);
+	if (ret) {
+		LOG_ERROR_BDF("cast mode:0x%x set fail.(err:%d)",
+			      SXEVF_CAST_MODE_PROMISC, ret);
+	}
+
+	return ret;
+}
+
+s32 sxevf_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	s32 mode = SXEVF_CAST_MODE_NONE;
+	s32 ret;
+
+	if (eth_dev->data->all_multicast) {
+		mode = SXEVF_CAST_MODE_ALLMULTI;
+	}
+	ret = sxevf_cast_mode_set(hw, mode);
+	if (ret) {
+		LOG_ERROR_BDF("disable mc promiscuous fail.(err:%d)", ret);
+	}
+
+	return ret;
+}
+
+s32 sxevf_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	s32 ret = 0;
+
+	if (eth_dev->data->promiscuous) {
+		goto l_out;
+	}
+	
+	ret = sxevf_cast_mode_set(hw, SXEVF_CAST_MODE_ALLMULTI);
+	if (ret) {
+		LOG_ERROR_BDF("cast mode:0x%x set fail.(err:%d)",
+			      SXEVF_CAST_MODE_ALLMULTI, ret);
+	}
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	s32 ret = 0;
+
+	if (eth_dev->data->promiscuous) {
+		goto l_out;
+	}
+
+	ret = sxevf_cast_mode_set(hw, SXEVF_CAST_MODE_MULTI);
+	if (ret) {
+		LOG_ERROR_BDF("disable mc promiscuous fail.(err:%d)", ret);
+	}
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_vlan_filter_set(struct rte_eth_dev *eth_dev,  u16 vlan_id, s32 on)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_vlan_context *vlan_ctxt = &adapter->vlan_ctxt;
+	s32 ret;
+	u8 reg_idx;
+	u8 bit_idx;
+
+	ret = sxevf_vlan_id_set(hw, vlan_id, on);
+	if (ret) {
+		LOG_ERROR_BDF("vlan_id:0x%x status:%u set fail.(err:%d)",
+			      vlan_id, on, ret);
+		goto l_out;
+	}
+
+	reg_idx = (vlan_id >> SXEVF_VLAN_ID_SHIFT) & SXEVF_VLAN_ID_REG_MASK;
+	bit_idx = (vlan_id & SXEVF_VLAN_ID_BIT_MASK);
+
+	if (on) {
+		vlan_ctxt->vlan_table[reg_idx] |= (1 << bit_idx);
+	} else {
+		vlan_ctxt->vlan_table[reg_idx] &= ~(1 << bit_idx);
+	}
+
+	LOG_INFO_BDF("vlan_id:0x%x status:%u set success.", vlan_id, on);
+
+l_out:
+	return ret;
+}
+
+void sxevf_vlan_strip_queue_set(struct rte_eth_dev *dev, u16 queue, s32 on)
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+
+	if (queue > adapter->max_rx_queue) {
+		LOG_ERROR_BDF("queue id:%u invalid exceed max rx queue num:%u",
+			      queue, adapter->max_rx_queue);
+		goto l_out;
+	}
+
+	sxevf_hw_vlan_tag_strip_switch(hw, queue, on);
+
+	sxevf_vlan_strip_bitmap_set(dev, queue, on);
+
+	LOG_INFO_BDF("queue:%u vlan tag strip on:%u done", queue, on);
+
+l_out:
+	return;
+}
+
+static void sxevf_vlan_strip_offload_configure(struct rte_eth_dev *dev, s32 mask)
+{
+	u16 i;
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+	sxevf_rx_queue_s *rxq;
+
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+			for (i = 0; i < dev->data->nb_rx_queues; i++) {
+				rxq = dev->data->rx_queues[i];
+				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+			}
+		} else {
+			for (i = 0; i < dev->data->nb_rx_queues; i++) {
+				rxq = dev->data->rx_queues[i];
+				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+			}
+		}
+	}
+
+	PMD_LOG_INFO(DRV, "mask:0x%x rx mode offload:0x%"SXE_PRIX64
+		     " all queue vlan strip offload flag configure done",
+		     mask, rxmode->offloads);
+
+	return;
+}
+
+s32 sxevf_vlan_offload_set(struct rte_eth_dev *dev, s32 mask)
+{
+	sxevf_vlan_strip_offload_configure(dev, mask);
+
+	sxevf_vlan_offload_configure(dev, mask);
+
+	PMD_LOG_INFO(DRV, "vlan offload mask:0x%d set done.", mask);
+
+	return 0;
+}
+
+s32 sxevf_default_mac_addr_set(struct rte_eth_dev *dev,
+			     struct rte_ether_addr *mac_addr)
+{
+	s32 ret;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+
+	ret = sxevf_mac_addr_set(hw, mac_addr->addr_bytes);
+	if (ret) {
+		LOG_ERROR_BDF("modify default mac addr to "MAC_FMT" fail.(err:%d)",
+			      MAC_ADDR(mac_addr->addr_bytes), ret);
+	}
+
+	LOG_INFO_BDF("modify default mac addr to "MAC_FMT" success.",
+		      MAC_ADDR(mac_addr->addr_bytes));
+
+	return ret;
+}
+
+s32 sxevf_mac_addr_add(struct rte_eth_dev *dev,
+			     struct rte_ether_addr *mac_addr,
+			     __rte_unused u32 rar_idx ,__rte_unused u32 pool)
+{
+	s32 ret;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_mac_filter_context *mac_ctxt = &adapter->mac_filter_ctxt;
+
+	if (memcmp(mac_ctxt->def_mac_addr.addr_bytes, mac_addr->addr_bytes,
+		    sizeof(*mac_addr)) == 0) {
+		ret = -EINVAL;
+		LOG_ERROR_BDF("mac_addr:"MAC_FMT" eaqual to defalut mac addr"
+			     " skip mac addr add.(err:%d)",
+			     MAC_ADDR(mac_addr->addr_bytes), ret);
+		goto l_out;
+	}
+
+	ret = sxevf_uc_addr_add(hw, 2, mac_addr->addr_bytes);
+	if (ret) {
+		LOG_ERROR_BDF("mac_addr:"MAC_FMT" add fail.(err:%d)",
+			      MAC_ADDR(mac_addr->addr_bytes), ret);
+		goto l_out;
+	}
+
+	LOG_INFO_BDF("mac_addr:"MAC_FMT" add success.",
+	     	      MAC_ADDR(mac_addr->addr_bytes));
+
+l_out:
+	return ret;
+}
+
+void sxevf_mac_addr_remove(struct rte_eth_dev *dev, u32 index)
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_mac_filter_context *mac_ctxt = &adapter->mac_filter_ctxt;
+	struct rte_ether_addr *mac_addr; 
+	u8 i;
+
+	sxevf_uc_addr_add(hw, 0, NULL);
+
+	for (i = 0, mac_addr = dev->data->mac_addrs; i < mac_ctxt->uc_table_size;
+	     i++, mac_addr++) {
+		if ((i == index) || rte_is_zero_ether_addr(mac_addr) ||
+		(memcmp(mac_ctxt->def_mac_addr.addr_bytes, mac_addr->addr_bytes,
+		        sizeof(*mac_addr)) == 0)) {
+			continue;
+		}
+		sxevf_uc_addr_add(hw, 2, mac_addr->addr_bytes);
+	}
+
+	LOG_INFO_BDF("index:%u mac addr"MAC_FMT" remove success.",
+		      index, MAC_ADDR(dev->data->mac_addrs[index].addr_bytes));
+	return;
+}
+
+static u16 sxevf_hash_mac_addr_parse(u8 *mac_addr)
+{
+	u16 extracted = ((mac_addr[4] >> 4) |
+			(((u16)mac_addr[5]) << 4));
+
+	extracted &= SXEVF_MAC_ADDR_EXTRACT_MASK;
+
+	PMD_LOG_DEBUG(DRV, "mac_addr:"MAC_FMT" parse result:0x%x",
+			 MAC_ADDR(mac_addr), extracted);
+
+	return extracted;
+}
+
+s32 sxevf_set_mc_addr_list(struct rte_eth_dev *dev,
+			  struct rte_ether_addr *mc_addr_list,
+			  u32 nb_mc_addr)
+{
+	s32 ret;
+	u32 result;
+	struct sxevf_mc_sync_msg msg;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	u32 i;
+
+	msg.msg_type = SXEVF_MC_ADDR_SYNC;
+	msg.mc_cnt = min(nb_mc_addr, (u32)SXEVF_MC_ENTRY_NUM_MAX);
+
+	for (i = 0; i < msg.mc_cnt; i++) {
+		msg.mc_addr_extract[i] = sxevf_hash_mac_addr_parse(mc_addr_list->addr_bytes);
+		mc_addr_list++;
+	}
+
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg)));
+	result = (msg.mc_cnt << 16) | msg.msg_type;
+
+	if (ret || ((result & SXEVF_MC_ADDR_SYNC) &&
+		    (result & SXEVF_MSGTYPE_NACK))) {
+		ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR;
+		goto l_out;
+	}
+
+	PMD_LOG_DEBUG(DRV, "msg_type:0x%x len:%zu mc_cnt:%d msg "
+		  "result:0x%x.(ret:%d)\n",
+		  msg.msg_type, SXEVF_MSG_NUM(sizeof(msg)),
+		  msg.mc_cnt, result, ret);
+
+l_out:
+	return ret;
+}
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_filter.h b/drivers/net/sxe/vf/sxevf_filter.h
new file mode 100644
index 0000000000..9e74718b95
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_filter.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_FILTER_H__
+#define __SXEVF_FILTER_H__
+
+#include <rte_ether.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+
+#define SXEVF_MTA_ENTRY_NUM_MAX        128
+#define SXEVF_UTA_HASH_BIT_MAX         4096 
+#define VLAN_N_VID     4096
+#define BYTE_BIT_NUM   8
+
+#define  SXEVF_VLAN_ID_SHIFT         (5)     
+#define  SXEVF_VLAN_ID_REG_MASK      (0x7F)  
+#define  SXEVF_VLAN_ID_BIT_MASK      (0x1F)  
+
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_ADDR(x) ((u8*)(x))[0],((u8*)(x))[1], \
+			   ((u8*)(x))[2],((u8*)(x))[3], \
+			   ((u8*)(x))[4],((u8*)(x))[5]
+
+#define SXEVF_VLAN_STRIP_BITMAP_SIZE    \
+        (SXEVF_HW_TXRX_RING_NUM_MAX / (sizeof(u32) * BYTE_BIT_NUM))
+
+struct sxevf_vlan_context {
+	u32 vlan_table[SXEVF_VFT_TBL_SIZE];  
+	u32 strip_bitmap[SXEVF_VLAN_STRIP_BITMAP_SIZE];
+	u32 vlan_table_size;
+};
+
+struct sxevf_mac_filter_context {
+	struct rte_ether_addr def_mac_addr; 
+	u8  mc_filter_type;        
+	u32 uc_table_size;
+};
+
+void sxevf_vlan_filter_init(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_mac_addr_init(struct rte_eth_dev *eth_dev);
+
+void sxevf_vlan_filter_configure(struct rte_eth_dev *eth_dev);
+
+void sxevf_vfta_sync(struct rte_eth_dev *eth_dev, bool on);
+
+s32 sxevf_promiscuous_disable(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_promiscuous_enable(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_allmulticast_disable(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_allmulticast_enable(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_vlan_filter_set(struct rte_eth_dev *eth_dev,  u16 vlan_id, s32 on);
+
+void sxevf_vlan_strip_queue_set(struct rte_eth_dev *dev, u16 queue, s32 on);
+
+s32 sxevf_vlan_offload_set(struct rte_eth_dev *dev, s32 mask);
+
+s32 sxevf_default_mac_addr_set(struct rte_eth_dev *dev,
+			     struct rte_ether_addr *mac_addr);
+
+void sxevf_mac_addr_remove(struct rte_eth_dev *dev, u32 index);
+
+s32 sxevf_mac_addr_add(struct rte_eth_dev *dev,
+			     struct rte_ether_addr *mac_addr,
+			     __rte_unused u32 rar_idx ,__rte_unused u32 pool);
+
+s32 sxevf_set_mc_addr_list(struct rte_eth_dev *dev,
+			  struct rte_ether_addr *mc_addr_list,
+			  u32 nb_mc_addr);
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_irq.c b/drivers/net/sxe/vf/sxevf_irq.c
new file mode 100644
index 0000000000..646a10d6dc
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_irq.c
@@ -0,0 +1,455 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_alarm.h>
+#include <rte_interrupts.h>
+#include <rte_malloc.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_bus_pci.h>
+#include <rte_eal_interrupts.h>
+#elif defined DPDK_21_11_5
+#include <rte_bus_pci.h>
+#include <eal_interrupts.h>
+#else
+#include <bus_pci_driver.h>
+#include <eal_interrupts.h>
+#endif
+
+#include "sxevf.h"
+#include "sxe_logs.h"
+#include "sxe_errno.h"
+#include "sxevf_rx.h"
+#include "sxevf_irq.h"
+#include "sxevf_msg.h"
+#include "sxevf_queue.h"
+#include "sxe_compat_version.h"
+
+#define SXEVF_IRQ_LINK_CONFIG      (u32)(1 << 3)
+
+#define SXEVF_RX_OTHER_IRQ_MASK     (3)
+
+#define SXEVF_MISC_VEC_ID        RTE_INTR_VEC_ZERO_OFFSET
+
+#define SXEVF_RX_VEC_BASE          RTE_INTR_VEC_RXTX_OFFSET
+
+#define SXEVF_EITR_INTERVAL_UNIT_NS	2048
+#define SXEVF_EITR_ITR_INT_SHIFT        3
+#define SXEVF_IRQ_ITR_MASK              (0x00000FF8)
+#define SXEVF_EITR_INTERVAL_US(us) \
+	(((us) * 1000 / SXEVF_EITR_INTERVAL_UNIT_NS << SXEVF_EITR_ITR_INT_SHIFT) & \
+		SXEVF_IRQ_ITR_MASK)
+
+#define SXEVF_QUEUE_ITR_INTERVAL_DEFAULT   500 
+
+void sxevf_intr_disable(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_irq_context *irq_ctxt = &adapter->irq_ctxt;
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxevf_irq_disable(hw);
+
+	irq_ctxt->enable_mask = 0;
+
+	return;
+}
+
+void sxevf_intr_enable(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_irq_context *irq_ctxt = &adapter->irq_ctxt;
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxevf_irq_enable(hw, SXEVF_RX_OTHER_IRQ_MASK);
+
+	irq_ctxt->enable_mask = SXEVF_RX_OTHER_IRQ_MASK;
+
+	return;
+}
+
+static s32 sxevf_ctrl_msg_check(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	u32 ctrl_msg;
+	s32 ret;
+
+	ret = sxevf_ctrl_msg_rcv_and_clear(hw, (u32 *)&ctrl_msg,
+				SXEVF_MSG_NUM(sizeof(ctrl_msg)));
+	if (ret) {
+		PMD_LOG_INFO(DRV, "ctrl msg rcv fail due to lock fail.(err:%d)\n", ret);
+		goto l_end;
+	}
+
+	if (ctrl_msg & SXEVF_PF_CTRL_MSG_REINIT) {
+		sxe_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_RESET,
+					     NULL);
+		PMD_LOG_INFO(DRV, "rcv reinit msg.\n");
+	}
+
+l_end:
+	return ret;
+}
+
+STATIC s32 sxevf_link_msg_check(struct rte_eth_dev *eth_dev, bool *link_up)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	u32 ctrl_msg;
+	s32 ret;
+
+	ret = sxevf_ctrl_msg_rcv_and_clear(hw, (u32 *)&ctrl_msg,
+				SXEVF_MSG_NUM(sizeof(ctrl_msg)));
+	if (ret) {
+		PMD_LOG_INFO(DRV, "ctrl msg rcv fail due to lock fail.(err:%d)\n", ret);
+		goto l_end;
+	}
+
+	if (ctrl_msg & SXEVF_PF_CTRL_MSG_NETDEV_DOWN) {
+			*link_up = false;
+			PMD_LOG_INFO(DRV, "rcv ctrl msg:0x%x need link down.\n", ctrl_msg);
+		} else if (ctrl_msg & SXEVF_PF_CTRL_MSG_LINK_UPDATE) {
+			*link_up = true;
+			PMD_LOG_INFO(DRV, "rcv ctrl msg:0x%x physical link up.\n", ctrl_msg);
+		}
+
+l_end:
+	return ret;
+}
+
+STATIC void sxevf_mbx_irq_handler(void *data)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)data;
+
+	sxevf_intr_disable(eth_dev);
+
+	sxevf_ctrl_msg_check(eth_dev);
+
+	sxevf_intr_enable(eth_dev);
+
+	return;
+}
+
+void sxevf_irq_init(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *irq_handle = SXE_PCI_INTR_HANDLE(pci_dev);
+
+	sxevf_intr_disable(eth_dev);
+
+	rte_intr_callback_register(irq_handle,
+				   sxevf_mbx_irq_handler, eth_dev);
+
+	rte_intr_enable(irq_handle);
+	sxevf_intr_enable(eth_dev);
+
+	return;
+}
+
+static s32 sxevf_msix_configure(struct rte_eth_dev *dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	sxevf_rx_queue_s *rx_queue;
+	u16 queue_id;
+	u16 vector = SXEVF_MISC_VEC_ID;
+	u16 base = SXEVF_MISC_VEC_ID;
+	u32 irq_interval;
+	s32 ret = 0;
+
+	sxevf_event_irq_map(hw, vector);
+
+	if (!rte_intr_dp_is_en(handle)) {
+		ret = -SXE_ERR_PARAM;
+		PMD_LOG_ERR(DRV, "intr type:%u nb_efd:%u irq unsupported.(err:%d)\n",
+				  handle->type, handle->nb_efd, ret);
+		goto l_out;
+	}
+
+	if (rte_intr_allow_others(handle)) {
+		vector = base = SXEVF_RX_VEC_BASE;
+	}
+
+	for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+		queue_id++) {
+		rx_queue = dev->data->rx_queues[queue_id];
+		sxevf_hw_ring_irq_map(hw, false,
+					rx_queue->reg_idx,
+					vector);
+		handle->intr_vec[queue_id] = vector;
+		PMD_LOG_INFO(DRV,
+				"queue id:%u reg_idx:%u vector:%u ",
+				queue_id,
+				rx_queue->reg_idx,
+				vector);
+		if (vector < base + handle->nb_efd - 1) {
+			vector++;
+		}
+	}
+
+	irq_interval = SXEVF_EITR_INTERVAL_US(SXEVF_QUEUE_ITR_INTERVAL_DEFAULT);
+	sxevf_ring_irq_interval_set(hw, 0, irq_interval);
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_irq_configure(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	u16 irq_num;
+	s32 ret = 0;
+
+	if (rte_intr_cap_multiple(handle) &&
+	     eth_dev->data->dev_conf.intr_conf.rxq != 0) {
+		irq_num = 1;
+		if (rte_intr_efd_enable(handle, irq_num)) {
+			ret = -SXE_ERR_CONFIG;
+			PMD_LOG_ERR(DRV,
+				      "intr_handle type:%d irq num:%d invalid",
+				      handle->type, irq_num);
+			goto l_out;
+		}
+	}
+
+	if (rte_intr_dp_is_en(handle) && !handle->intr_vec) {
+		handle->intr_vec = rte_zmalloc("intr_vec",
+				    eth_dev->data->nb_rx_queues * sizeof(u32), 0);
+		if (handle->intr_vec == NULL) {
+			PMD_LOG_ERR(DRV, "rx queue irq vector "
+					 "allocate %zuB memory fail.",
+					 eth_dev->data->nb_rx_queues * sizeof(u32));
+			ret = -ENOMEM;
+			goto l_out;
+		}
+	}
+
+	ret = sxevf_msix_configure(eth_dev);
+	if (ret) {
+		PMD_LOG_ERR(DRV, "intr type:%u nb_efd:%u irq unsupported.(err:%d)\n",
+				  handle->type, handle->nb_efd, ret);
+		goto l_out;
+	}
+
+	rte_intr_disable(handle);
+
+	rte_intr_enable(handle);
+
+	sxevf_intr_enable(eth_dev);
+
+	PMD_LOG_INFO(DRV,
+		      "intr_handle type:%d rx queue num:%d "
+		      "queue irq num:%u total irq num:%u "
+		      "config done",
+		      handle->type,
+		      eth_dev->data->nb_rx_queues,
+		      handle->nb_efd,
+		      handle->max_intr);
+
+l_out:
+	return ret;
+}
+
+void sxevf_irq_free(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+
+	rte_intr_disable(handle);
+
+	if (handle->intr_vec) {
+		rte_free(handle->intr_vec);
+		handle->intr_vec = NULL;
+	}
+
+	return;
+}
+
+void sxevf_irq_unregister(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+
+	rte_intr_callback_unregister(handle, sxevf_mbx_irq_handler, eth_dev);
+
+	return;
+}
+
+s32 sxevf_rx_queue_intr_enable(struct rte_eth_dev *dev, u16 queue_id)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_irq_context *irq_ctxt = &adapter->irq_ctxt;
+	u32 vector = SXEVF_MISC_VEC_ID;
+
+	RTE_SET_USED(queue_id);
+
+	if (rte_intr_allow_others(intr_handle)) {
+		vector = SXEVF_RX_VEC_BASE;
+	}
+
+	irq_ctxt->enable_mask |= (1 << vector);
+
+	sxevf_specific_irq_enable(hw, irq_ctxt->enable_mask);
+
+	rte_intr_ack(intr_handle);
+
+	return 0;
+}
+
+s32 sxevf_rx_queue_intr_disable(struct rte_eth_dev *dev, u16 queue_id)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_irq_context *irq_ctxt = &adapter->irq_ctxt;
+	u32 vector = SXEVF_MISC_VEC_ID;
+
+	RTE_SET_USED(queue_id);
+
+	if (rte_intr_allow_others(intr_handle)) {
+		vector = SXEVF_RX_VEC_BASE;
+	}
+
+	irq_ctxt->enable_mask &= ~(1 << vector);
+
+	sxevf_specific_irq_enable(hw, irq_ctxt->enable_mask);
+
+	return 0;
+}
+
+static void sxevf_physical_link_check(struct rte_eth_dev *dev,  u32 *link_speed, bool *link_up)
+{
+	u32 link_reg, i;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+
+	link_reg = sxevf_link_state_get(hw);
+	if (!(link_reg & SXE_VFLINKS_UP)) {
+		*link_up = false;
+		goto l_end;
+	}
+
+	for (i = 0; i < 5; i++) {
+		udelay(100);
+		link_reg = sxevf_link_state_get(hw);
+		if (!(link_reg & SXE_VFLINKS_UP)) {
+			*link_up = false;
+			goto l_end;
+		}
+	}
+
+	switch (link_reg & SXE_VFLINKS_SPEED) {
+	case SXE_VFLINKS_SPEED_10G:
+		*link_speed = SXEVF_LINK_SPEED_10GB_FULL;
+		break;
+	case SXE_VFLINKS_SPEED_1G:
+		*link_speed = SXEVF_LINK_SPEED_1GB_FULL;
+		break;
+	case SXE_VFLINKS_SPEED_100:
+		*link_speed = SXEVF_LINK_SPEED_100_FULL;
+		break;
+	default:
+		*link_speed = SXEVF_LINK_SPEED_UNKNOWN;
+	}
+
+	*link_up = true;
+
+l_end:
+	PMD_LOG_INFO(DRV, "link up status:%d.\n", *link_up);
+	return;
+}
+
+static void sxevf_link_info_get(struct rte_eth_dev *dev, int wait_to_complete,
+				   u32 *link_speed, bool *link_up)
+{
+	s32 ret;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+
+	sxevf_physical_link_check(dev, link_speed, link_up);
+
+	if ((wait_to_complete == 0) && (adapter->link_check == 0)) {
+		if (*link_speed == SXEVF_LINK_SPEED_UNKNOWN) {
+			*link_up = false;
+		} else {
+			*link_up = true;
+		}
+		goto l_end;
+	}
+
+	if (*link_up) {
+		ret = sxevf_link_msg_check(dev, link_up);
+		if (ret) {
+			PMD_LOG_ERR(DRV, "ctrl msg rcv fail, try to next workqueue.\n");
+			goto l_end;
+		}
+	}
+
+l_end:
+	return;
+}
+
+s32 sxevf_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+	bool link_up;
+	struct rte_eth_link link;
+	u32 link_speed = SXEVF_LINK_SPEED_UNKNOWN;
+
+	PMD_LOG_INFO(INIT, "link update start...");
+
+	memset(&link, 0, sizeof(link));
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed  = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+						RTE_ETH_LINK_SPEED_FIXED);
+
+	if ((wait_to_complete == 0) || dev->data->dev_conf.intr_conf.lsc) {
+		wait_to_complete = 0;
+	}
+
+	sxevf_link_info_get(dev, wait_to_complete, &link_speed, &link_up);
+
+	if (link_up == false) {
+		PMD_LOG_ERR(DRV, "other link thread is running now!");
+
+		goto l_end;
+	}
+
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	switch (link_speed) {
+	case SXEVF_LINK_SPEED_1GB_FULL:
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
+		break;
+
+	case SXEVF_LINK_SPEED_10GB_FULL:
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
+		break;
+	default:
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
+	}
+
+l_end:
+	PMD_LOG_INFO(DRV, "link update end, up=%x, speed=%x",
+			  link_up, link_speed);
+	return rte_eth_linkstatus_set(dev, &link);
+}
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_irq.h b/drivers/net/sxe/vf/sxevf_irq.h
new file mode 100644
index 0000000000..169eb1f0fd
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_irq.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_IRQ_H__
+#define __SXEVF_IRQ_H__
+
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+#include "sxe_compat_platform.h"
+
+struct sxevf_irq_context {
+	u32 enable_mask;    
+	u32 enable_mask_original; 
+};
+
+void sxevf_intr_disable(struct rte_eth_dev *eth_dev);
+
+void sxevf_intr_enable(struct rte_eth_dev *eth_dev);
+
+void sxevf_irq_init(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_irq_configure(struct rte_eth_dev *eth_dev);
+
+void sxevf_irq_free(struct rte_eth_dev *eth_dev);
+
+void sxevf_irq_unregister(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_rx_queue_intr_disable(struct rte_eth_dev *dev, u16 queue_id);
+
+s32 sxevf_rx_queue_intr_enable(struct rte_eth_dev *dev, u16 queue_id);
+
+s32 sxevf_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+
+#endif
+
diff --git a/drivers/net/sxe/vf/sxevf_main.c b/drivers/net/sxe/vf/sxevf_main.c
new file mode 100644
index 0000000000..72d600c0b1
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_main.c
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <string.h>
+#include <sys/time.h>
+
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_dev.h>
+
+#include "sxe_version.h"
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_bus_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <rte_bus_pci.h>
+#else
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <bus_pci_driver.h>
+#endif
+
+#include "sxevf.h"
+#include "sxe_logs.h"
+#include "sxevf_ethdev.h"
+#include "sxe_queue_common.h"
+
+#define PCI_VENDOR_ID_STARS      0x1FF2
+#define SXEVF_DEV_ID_ASIC        0x10A2
+
+static s32 sxevf_probe(struct rte_pci_driver *pci_drv __rte_unused,
+					struct rte_pci_device *pci_dev)
+{
+	s32 ret;
+
+	printf("sxe_version[%s], sxe_commit_id[%s], sxe_branch[%s], sxe_build_time[%s]\n", 
+		SXE_VERSION, SXE_COMMIT_ID, SXE_BRANCH, SXE_BUILD_TIME);
+
+#ifdef SXE_DPDK_DEBUG
+	sxe_log_stream_init();
+#endif
+
+	ret = rte_eth_dev_pci_generic_probe(pci_dev,
+		sizeof(struct sxevf_adapter), sxevf_ethdev_init);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "sxe pmd eth dev create fail.(err:%d)", ret);
+		goto l_out;
+	}
+
+	PMD_LOG_DEBUG(INIT, "%s sxevf pmd probe done.", pci_dev->device.name);
+
+l_out:
+	return ret;
+}
+
+static s32 sxevf_remove(struct rte_pci_device *pci_dev)
+{
+	s32 ret;
+
+	ret = rte_eth_dev_pci_generic_remove(pci_dev,
+			sxevf_ethdev_uninit);
+	if (ret) {
+		LOG_ERROR("vf remove fail.(err:%d)", ret);
+	}
+
+	return ret;
+}
+
+static const struct rte_pci_id sxevf_pci_tbl[] = {
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_STARS, SXEVF_DEV_ID_ASIC) },
+	{.vendor_id = 0,}
+};
+
+STATIC struct rte_pci_driver rte_sxevf_pmd = {
+	.id_table  = sxevf_pci_tbl,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe     = sxevf_probe,
+	.remove    = sxevf_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_sxevf, rte_sxevf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_sxevf, sxevf_pci_tbl);
+RTE_PMD_REGISTER_KMOD_DEP(net_sxevf, "* igb_uio | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_sxevf,
+			      SXEVF_DEVARG_LINK_CHECK "=<0|1>");
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_msg.c b/drivers/net/sxe/vf/sxevf_msg.c
new file mode 100644
index 0000000000..6cd64fc1b3
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_msg.c
@@ -0,0 +1,646 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include "sxevf.h"
+#include "sxevf_msg.h"
+#include "sxevf_hw.h"
+#include "sxe_errno.h"
+#include "sxe_logs.h"
+
+#define SXEVF_PFMSG_MASK    0xFF00
+#define SXEVF_DEFAULT_TC_NUM        1
+
+void sxevf_mbx_init(struct sxevf_hw *hw)
+{
+	hw->mbx.msg_len = SXEVF_MBX_MSG_NUM;
+
+	hw->mbx.stats.rcv_msgs = 0;
+	hw->mbx.stats.send_msgs = 0;
+	hw->mbx.stats.acks = 0;
+	hw->mbx.stats.reqs = 0;
+	hw->mbx.stats.rsts = 0;
+
+	hw->mbx.retry = 0;
+	hw->mbx.interval = SXEVF_MBX_RETRY_INTERVAL;
+
+	hw->mbx.api_version = SXEVF_MBX_API_10;
+
+	return;
+}
+
+static u32 sxevf_mbx_reg_read(struct sxevf_hw *hw)
+{
+	u32 value = sxevf_mailbox_read(hw);
+
+	value |= hw->mbx.reg_value;
+
+	hw->mbx.reg_value |= value & SXE_VFMAILBOX_RC_BIT;
+
+	return value;
+}
+
+static bool sxevf_mbx_bit_check(struct sxevf_hw *hw, u32 mask)
+{
+	bool ret = false;
+	u32 value = sxevf_mbx_reg_read(hw);
+
+	if (value & mask) {
+		ret = true;
+	}
+
+	hw->mbx.reg_value &= ~mask;
+
+	return ret;
+}
+
+STATIC bool sxevf_pf_msg_check(struct sxevf_hw *hw)
+{
+	bool ret = false;
+
+	if (sxevf_mbx_bit_check(hw, SXE_VFMAILBOX_PFSTS)) {
+		hw->mbx.stats.reqs++;
+		ret = true;
+	}
+
+	return ret;
+}
+
+STATIC bool sxevf_pf_ack_check(struct sxevf_hw *hw)
+{
+	bool ret = false;
+
+	if (sxevf_mbx_bit_check(hw, SXE_VFMAILBOX_PFACK)) {
+		hw->mbx.stats.acks++;
+		ret = true;
+	}
+
+	return ret;
+}
+
+bool sxevf_pf_rst_check(struct sxevf_hw *hw)
+{
+	bool ret = false;
+
+	if (!sxevf_mbx_bit_check(hw, (SXE_VFMAILBOX_RSTI |
+				      SXE_VFMAILBOX_RSTD))) {
+		hw->mbx.stats.rsts++;
+		ret = true;
+	}
+
+	return ret;
+}
+
+STATIC s32 sxevf_mailbox_lock(struct sxevf_hw *hw)
+{
+	u32 mailbox;
+	u32 retry = SXEVF_MBX_RETRY_COUNT;
+	s32 ret = -SXEVF_ERR_MBX_LOCK_FAIL;
+
+	while (retry--) {
+		mailbox = sxevf_mbx_reg_read(hw);
+		mailbox |= SXE_VFMAILBOX_VFU;
+		sxevf_mailbox_write(hw, mailbox);
+
+		if (sxevf_mbx_reg_read(hw) && SXE_VFMAILBOX_VFU) {
+			ret = 0;
+			break;
+		}
+
+		udelay(hw->mbx.interval);
+	}
+
+	return ret;
+}
+
+static void sxevf_mailbox_unlock(struct sxevf_hw *hw)
+{
+	u32 mailbox;
+
+	mailbox = sxevf_mbx_reg_read(hw);
+	mailbox &= ~SXE_VFMAILBOX_VFU;
+	sxevf_mailbox_write(hw, mailbox);
+
+	return;
+}
+
+STATIC bool sxevf_msg_poll(struct sxevf_hw *hw)
+{
+	struct sxevf_mbx_info *mbx = &hw->mbx;
+	u32 retry = mbx->retry;
+	bool ret = true;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	while (!sxevf_pf_msg_check(hw) && retry) {
+		retry--;
+		udelay(mbx->interval);
+	}
+
+	if (!retry) {
+		LOG_ERROR_BDF("retry:%d send msg to pf done, but don't check pf reply.\n",
+			  mbx->retry);
+		mbx->retry = 0;
+		ret = false;
+	}
+
+	return ret;
+}
+
+STATIC bool sxevf_ack_poll(struct sxevf_hw *hw)
+{
+	struct sxevf_mbx_info *mbx = &hw->mbx;
+	u32 retry = mbx->retry;
+	bool ret = true;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	while (!sxevf_pf_ack_check(hw) && retry) {
+		retry--;
+		udelay(mbx->interval);
+	}
+
+	if (!retry) {
+		LOG_ERROR_BDF("send msg to pf, retry:%d but don't check pf ack, "
+			  "init mbx retry to 0.\n",
+			  mbx->retry);
+		mbx->retry = 0;
+		ret = false;
+	}
+
+	return ret;
+}
+
+STATIC void sxevf_pf_msg_and_ack_clear(struct sxevf_hw *hw)
+{
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	LOG_INFO_BDF("clear pending pf msg and ack.\n");
+
+	sxevf_pf_msg_check(hw);
+	sxevf_pf_ack_check(hw);
+
+	return;
+}
+
+static s32 sxevf_send_msg_to_pf(struct sxevf_hw *hw, u32 *msg, u16 msg_len)
+{
+	struct sxevf_mbx_info *mbx = &hw->mbx;
+	s32 ret = 0;
+	u16 i;
+	u32 old;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	if (!mbx->retry) {
+		ret = -SXEVF_ERR_NOT_READY;
+		LOG_ERROR_BDF("msg:0x%x len:%d send fail due to timeout.(err:%d)\n",
+			  msg[0], msg_len, ret);
+		goto l_out;
+	}
+
+	if (msg_len > mbx->msg_len) {
+		ret = -EINVAL;
+		LOG_ERROR_BDF("vf msg:0x%x len:%d exceed limit:%d "
+			  "send fail.(err:%d)\n",
+			  msg[0], msg_len, mbx->msg_len, ret);
+		goto l_out;
+	}
+
+	ret = sxevf_mailbox_lock(hw);
+	if (ret) {
+		LOG_ERROR_BDF("msg:0x%x len:%d send lock mailbox fail.(err:%d)\n",
+			   msg[0], msg_len, ret);
+		goto l_out;
+	}
+
+	sxevf_pf_msg_and_ack_clear(hw);
+
+	old = sxevf_msg_read(hw, 0);
+	msg[0] |= (old & SXEVF_PFMSG_MASK);
+
+	for (i = 0; i < msg_len; i++) {
+		sxevf_msg_write(hw, i, msg[i]);
+	}
+
+	sxevf_pf_req_irq_trigger(hw);
+
+	hw->mbx.stats.send_msgs++;
+
+	if (!sxevf_ack_poll(hw)) {
+		ret = -SXEVF_ERR_POLL_ACK_FAIL;
+		LOG_ERROR_BDF("msg:0x%x len:%d send done, but don't poll ack.\n",
+			   msg[0], msg_len);
+		goto l_out;
+	}
+
+	LOG_INFO_BDF("vf send msg:0x%x len:%d to pf and polled pf ack done."
+		 "stats send_msg:%d ack:%d.\n",
+		 msg[0], msg_len,
+		 mbx->stats.send_msgs, mbx->stats.acks);
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_mbx_msg_rcv(struct sxevf_hw *hw, u32 *msg, u16 msg_len)
+{
+	u32 i;
+	u16 msg_entry;
+	s32 ret = 0;
+	struct sxevf_mbx_info *mbx = &hw->mbx;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	msg_entry = (msg_len > mbx->msg_len) ? mbx->msg_len : msg_len;
+
+	ret = sxevf_mailbox_lock(hw);
+	if (ret) {
+		LOG_ERROR_BDF("size:%d rcv lock mailbox fail.(err:%d)\n",
+			   msg_entry, ret);
+		goto l_end;
+	}
+
+	for (i = 0; i < msg_entry; i++) {
+		msg[i] = sxevf_msg_read(hw, i);
+	}
+
+	msg[0] &= ~SXEVF_PFMSG_MASK;
+
+	sxevf_pf_ack_irq_trigger(hw);
+
+	mbx->stats.rcv_msgs++;
+l_end:
+	return ret;
+
+}
+
+s32 sxevf_ctrl_msg_rcv(struct sxevf_hw *hw, u32 *msg, u16 msg_len)
+{
+	u16 i;
+	u16 msg_entry;
+	s32 ret = 0;
+	struct sxevf_mbx_info *mbx = &hw->mbx;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	msg_entry = (msg_len > mbx->msg_len) ? mbx->msg_len : msg_len;
+
+	ret = sxevf_mailbox_lock(hw);
+	if (ret) {
+		LOG_ERROR_BDF("size:%d rcv lock mailbox fail.(err:%d)\n",
+			   msg_entry, ret);
+		goto l_end;
+	}
+
+	for (i = 0; i < msg_entry; i++) {
+		msg[i] = sxevf_msg_read(hw, i);
+	}
+
+	sxevf_mailbox_unlock(hw);
+
+	LOG_INFO_BDF("rcv pf mailbox msg:0x%x.\n", *msg);
+
+	mbx->stats.rcv_msgs++;
+l_end:
+	return ret;
+}
+
+s32 sxevf_ctrl_msg_rcv_and_clear(struct sxevf_hw *hw, u32 *msg, u16 msg_len)
+{
+	u16 i;
+	u16 msg_entry;
+	s32 ret = 0;
+	u32 clear;
+	struct sxevf_mbx_info *mbx = &hw->mbx;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	msg_entry = (msg_len > mbx->msg_len) ? mbx->msg_len : msg_len;
+
+	ret = sxevf_mailbox_lock(hw);
+	if (ret) {
+		LOG_ERROR_BDF("size:%d rcv lock mailbox fail.(err:%d)\n",
+			   msg_entry, ret);
+		goto l_end;
+	}
+
+	for (i = 0; i < msg_entry; i++) {
+		msg[i] = sxevf_msg_read(hw, i);
+	}
+
+	clear = msg[0] & (~SXEVF_PFMSG_MASK);
+	sxevf_msg_write(hw, 0, clear);
+
+	sxevf_mailbox_unlock(hw);
+
+	LOG_INFO_BDF("rcv pf mailbox msg:0x%x.\n", *msg);
+
+	mbx->stats.rcv_msgs++;
+l_end:
+	return ret;
+}
+
+static s32 sxevf_rcv_msg_from_pf(struct sxevf_hw *hw, u32 *msg, u16 msg_len)
+{
+	s32 ret;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	if (!sxevf_msg_poll(hw)) {
+		ret = -SXEVF_ERR_POLL_MSG_FAIL;
+		LOG_ERROR_BDF("retry:%d don't poll pf msg.\n", hw->mbx.retry);
+		goto l_out;
+	}
+
+	ret = sxevf_mbx_msg_rcv(hw, msg, msg_len);
+	if (ret < 0) {
+		LOG_ERROR_BDF("retry:%d read msg fail.\n", hw->mbx.retry);
+		goto l_out;
+	}
+
+	LOG_INFO_BDF("vf polled pf msg:0x%x and rcv pf msg done. "
+		"stats req:%d rcv_msg:%d\n",
+		 msg[0], hw->mbx.stats.reqs, hw->mbx.stats.rcv_msgs);
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_send_and_rcv_msg(struct sxevf_hw *hw, u32 *msg, u8 msg_len)
+{
+	s32 ret;
+	u16 msg_type = msg[0] & 0xFF;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	ret = sxevf_send_msg_to_pf(hw, msg, msg_len);
+	if (ret) {
+		LOG_ERROR_BDF("msg:0x%x len:%u msg send fail.(err:%d).\n",
+			   msg[0], msg_len, ret);
+		goto l_out;
+	}
+
+	if (msg_type == SXEVF_RESET) {
+		mdelay(10);
+	}
+
+	ret = sxevf_rcv_msg_from_pf(hw, msg, msg_len);
+	if (ret) {
+		LOG_ERROR_BDF("msg:0x%x len:%u rcv fail.(err:%d).\n",
+			   msg[0], msg_len, ret);
+		goto l_out;
+	}
+
+	LOG_INFO_BDF("send and rcv msg:0x%x len:%u success.\n", msg[0], msg_len);
+
+l_out:
+	return ret;
+}
+
+void sxevf_mbx_api_version_init(struct sxevf_adapter *adapter)
+{
+	s32 ret;
+	struct sxevf_hw *hw = &adapter->hw;
+	static const int api[] = {
+		SXEVF_MBX_API_13,
+		SXEVF_MBX_API_12,
+		SXEVF_MBX_API_11,
+		SXEVF_MBX_API_10,
+		SXEVF_MBX_API_NR
+	};
+	u32 idx = 0;
+	struct sxevf_mbx_api_msg msg;
+
+	while (api[idx] != SXEVF_MBX_API_NR) {
+		msg.msg_type = SXEVF_API_NEGOTIATE;
+		msg.api_version = api[idx];
+
+		ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg)));
+		if (!ret && (msg.msg_type == (SXEVF_API_NEGOTIATE | SXEVF_MSGTYPE_ACK))) {
+			hw->mbx.api_version = api[idx];
+			break;
+		} else {
+			idx++;
+		}
+	}
+
+	LOG_INFO_BDF("mailbox api version:%u", hw->mbx.api_version);
+
+	return;
+}
+
+s32 sxevf_ring_info_get(struct sxevf_adapter *adapter,
+			u8 *tc_num, u8 *default_tc)
+{
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_ring_info_msg req = {};
+	s32 ret;
+
+	req.msg_type = SXEVF_RING_INFO_GET;
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&req,
+				     SXEVF_MSG_NUM(sizeof(req)));
+	if (ret) {
+		LOG_ERROR_BDF("msg:0x%x send or rcv reply failed.(err:%d)\n",
+			   req.msg_type, ret);
+		goto l_out;
+	}
+
+	if (req.msg_type != (SXEVF_MSGTYPE_ACK | SXEVF_RING_INFO_GET)) {
+		ret = -SXEVF_ERR_REPLY_INVALID;
+		LOG_WARN_BDF("msg:0x%x not expected.(err:%d)\n", req.msg_type, ret);
+		goto l_out;
+	}
+
+	LOG_DEBUG_BDF("original ring info from pf, max_tx_num:%u max_rx_num:%u "
+		 "tc_num:%u default_tc:%u.\n",
+		 req.max_tx_num, req.max_rx_num, req.tc_num, req.default_tc);
+
+	if ((req.max_tx_num == 0) ||
+	    (req.max_tx_num > SXEVF_TXRX_RING_NUM_MAX)) {
+		req.max_tx_num = SXEVF_TXRX_RING_NUM_MAX;
+	}
+
+	if ((req.max_rx_num == 0) ||
+	    (req.max_rx_num > SXEVF_TXRX_RING_NUM_MAX)) {
+		req.max_rx_num = SXEVF_TXRX_RING_NUM_MAX;
+	}
+
+	if (req.tc_num > req.max_rx_num) {
+		req.tc_num = SXEVF_DEFAULT_TC_NUM;
+	}
+	*tc_num = req.tc_num;
+
+	if (req.default_tc > req.max_tx_num) {
+		req.default_tc = 0;
+	}
+
+	*default_tc = req.default_tc;
+
+	adapter->max_rx_queue = req.max_rx_num;
+	adapter->max_tx_queue = req.max_tx_num;
+
+	LOG_INFO_BDF("ring info max_tx_num:%u max_rx_num:%u "
+		 "tc_num:%u default_tc:%u.\n",
+		 req.max_tx_num, req.max_rx_num, req.tc_num, req.default_tc);
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_rss_hash_config_get(struct sxevf_adapter *adapter,
+			struct rte_eth_rss_conf *rss_conf)
+{
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_rss_hash_msg msg = {};
+	s32 ret;
+
+	msg.msg_type = SXEVF_RSS_CONF_GET;
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg,
+				     SXEVF_MSG_NUM(sizeof(msg)));
+	if (ret) {
+		LOG_ERROR_BDF("msg:0x%x send or rcv reply failed.(err:%d)\n",
+			   msg.msg_type, ret);
+		goto l_out;
+	}
+
+	if (msg.msg_type != (SXEVF_MSGTYPE_ACK | SXEVF_RSS_CONF_GET)) {
+		ret = -SXEVF_ERR_REPLY_INVALID;
+		LOG_WARN_BDF("msg:0x%x not expected.(err:%d)\n", msg.msg_type, ret);
+		goto l_out;
+	}
+
+	rss_conf->rss_key = msg.hash_key;
+	rss_conf->rss_hf = msg.rss_hf;
+
+	LOG_INFO_BDF("rss hash conf get success, msg:0x%x rss_key:%s rss_func:%ld.\n ",
+		 msg.msg_type, msg.hash_key, msg.rss_hf);
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_mac_addr_set(struct sxevf_hw *hw, u8 *uc_addr)
+{
+	s32 ret;
+	struct sxevf_uc_addr_msg msg = {};
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	msg.msg_type = SXEVF_DEV_MAC_ADDR_SET;
+	memcpy(msg.uc_addr, uc_addr, SXEVF_MAC_ADDR_LEN);
+
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg)));
+	if (!ret && (msg.msg_type ==
+		   (SXEVF_DEV_MAC_ADDR_SET | SXEVF_MSGTYPE_NACK))) {
+		ret = -EPERM;
+		LOG_ERROR_BDF("msg:0x%x uc addr:%pM replyed nack.\n",
+			   msg.msg_type, uc_addr);
+		goto l_out;
+	}
+
+	if (ret) {
+		LOG_ERROR_BDF("msg:0x%x uc addr:%pM set fail.(err:%d)\n",
+			   msg.msg_type, uc_addr, ret);
+		ret = -EPERM;
+		goto l_out;
+	}
+
+	LOG_INFO_BDF("msg:0x%x uc addr:%pM set success.\n", msg.msg_type, uc_addr);
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_rx_max_frame_set(struct sxevf_hw *hw, u32 mtu)
+{
+	struct sxevf_max_frame_msg msg = {};
+	s32 ret;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	msg.msg_type = SXEVF_LPE_SET;
+	msg.max_frame = mtu;
+
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg,
+					     SXEVF_MSG_NUM(sizeof(msg)));
+	if (ret || ((msg.msg_type & SXEVF_LPE_SET) &&
+		    (msg.msg_type & SXEVF_MSGTYPE_NACK))) {
+		ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR;
+	}
+
+	LOG_INFO_BDF("msg_type:0x%x max_frame:0x%x (ret:%d)\n",
+		   msg.msg_type, msg.max_frame, ret);
+
+	return ret;
+}
+
+s32 sxevf_vlan_id_set(struct sxevf_hw *hw, u32 vlan_id,
+						 bool vlan_on)
+{
+	struct sxevf_vlan_filter_msg msg = {};
+	s32 ret;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	msg.msg_type = SXEVF_VLAN_SET;
+	msg.vlan_id = vlan_id;
+	msg.msg_type |= vlan_on << SXEVF_MSGINFO_SHIFT;
+
+	LOG_INFO_BDF("update vlan[%u], vlan on = %s\n", vlan_id, vlan_on ? "yes" : "no");
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg,
+					     SXEVF_MSG_NUM(sizeof(msg)));
+	LOG_INFO_BDF("update vlan[%u] ret = %d\n",vlan_id, ret);
+
+	msg.msg_type &= ~(0xFF << SXEVF_MSGINFO_SHIFT);
+
+	if (ret || (msg.msg_type != (SXEVF_VLAN_SET | SXEVF_MSGTYPE_ACK))) {
+		ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR;
+	}
+
+	return ret;
+}
+
+s32 sxevf_cast_mode_set(struct sxevf_hw *hw, enum sxevf_cast_mode mode)
+{
+	struct sxevf_cast_mode_msg msg = {};
+	s32 ret;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	msg.msg_type = SXEVF_CAST_MODE_SET;
+	msg.cast_mode = mode;
+
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg)));
+	if (ret || (msg.msg_type != (SXEVF_CAST_MODE_SET | SXEVF_MSGTYPE_ACK))) {
+		ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR;
+	}
+
+	LOG_INFO_BDF("msg_type:0x%x mode:0x%x msg result:0x%x.(ret:%d)\n",
+		   msg.msg_type, mode, msg.msg_type, ret);
+
+	return ret;
+}
+
+s32 sxevf_uc_addr_add(struct sxevf_hw *hw, u32 index, u8 *mac_addr)
+{
+	s32 ret = 0;
+	struct sxevf_adapter *adapter = hw->adapter;
+	struct sxevf_uc_sync_msg msg = {};
+	u32 check;
+	u32 result;
+
+	msg.msg_type = SXEVF_UC_ADDR_SYNC;
+	msg.index = index;
+	check = *(u32 *)&msg;
+
+	if (mac_addr) {
+		memcpy((u8 *)&msg.addr, mac_addr, SXEVF_MAC_ADDR_LEN);
+	}
+
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg)));
+	result = *(u32 *)&msg;
+
+	if (ret || (result != (check | SXEVF_MSGTYPE_ACK))) {
+		ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR;
+	}
+
+	LOG_INFO_BDF("msg_type:0x%x index:%d addr:%pM sync done "
+		 " result:0x%x msg.(ret:%d)\n",
+		 msg.msg_type, index, mac_addr, result, ret);
+
+	return ret;
+
+}
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_msg.h b/drivers/net/sxe/vf/sxevf_msg.h
new file mode 100644
index 0000000000..c3e22d7785
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_msg.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXEVF_MSG_H__
+#define __SXEVF_MSG_H__
+
+struct sxevf_adapter;
+
+#define SXEVF_MAC_ADDR_LEN 6
+
+#define SXEVF_UC_ENTRY_NUM_MAX 10
+#define SXEVF_MC_ENTRY_NUM_MAX 30
+
+#define SXEVF_MBX_MSG_NUM        16
+#define SXEVF_MBX_RETRY_INTERVAL 500
+#define SXEVF_MBX_RETRY_COUNT    2000
+
+#define SXEVF_RST_CHECK_NUM          200
+
+#define SXEVF_DEFAULT_ADDR_LEN       4
+#define SXEVF_MC_FILTER_TYPE_WORD    3
+
+#define SXEVF_RESET               0x01 
+#define SXEVF_DEV_MAC_ADDR_SET    0x02 
+#define SXEVF_MC_ADDR_SYNC        0x03 
+#define SXEVF_VLAN_SET            0x04 
+#define SXEVF_LPE_SET             0x05  
+
+#define SXEVF_UC_ADDR_SYNC        0x06  
+
+#define SXEVF_API_NEGOTIATE       0x08  
+
+#define SXEVF_RING_INFO_GET       0x09  
+
+#define SXEVF_REDIR_TBL_GET       0x0a 
+#define SXEVF_RSS_KEY_GET         0x0b 
+#define SXEVF_CAST_MODE_SET       0x0c 
+#define SXEVF_LINK_ENABLE_GET 	  0X0d  
+#define SXEVF_IPSEC_ADD           0x0e 
+#define SXEVF_IPSEC_DEL           0x0f 
+#define SXEVF_RSS_CONF_GET    	  0x10 
+
+#define SXEVF_PF_CTRL_MSG_LINK_UPDATE  0x100
+#define SXEVF_PF_CTRL_MSG_NETDEV_DOWN   0x200
+
+#define SXEVF_PF_CTRL_MSG_REINIT        0x400
+
+#define SXEVF_PF_CTRL_MSG_MASK          0x700
+#define SXEVF_PFREQ_MASK                0xFF00 
+
+#define SXEVF_RSS_HASH_KEY_SIZE   (40)  
+#define SXEVF_MAX_RETA_ENTRIES    (128) 
+#define SXEVF_RETA_ENTRIES_DWORDS (SXEVF_MAX_RETA_ENTRIES / 16)
+
+#define SXEVF_TX_QUEUES      1 
+#define SXEVF_RX_QUEUES      2 
+#define SXEVF_TRANS_VLAN     3 
+#define SXEVF_DEF_QUEUE      4 
+
+#define SXEVF_MSGTYPE_ACK    0x80000000
+#define SXEVF_MSGTYPE_NACK   0x40000000
+
+#define SXEVF_MSGINFO_SHIFT  16
+#define SXEVF_MSGINFO_MASK   (0xFF << SXEVF_MSGINFO_SHIFT)
+
+#define SXEVF_MSG_NUM(size)   DIV_ROUND_UP(size, 4)
+
+enum sxevf_mbx_api_version {
+	SXEVF_MBX_API_10 = 0,
+	SXEVF_MBX_API_11,
+	SXEVF_MBX_API_12,
+	SXEVF_MBX_API_13, 
+	SXEVF_MBX_API_14, 
+
+	SXEVF_MBX_API_NR, 
+};
+
+enum sxevf_cast_mode {
+	SXEVF_CAST_MODE_NONE = 0, 
+	SXEVF_CAST_MODE_MULTI,    
+	SXEVF_CAST_MODE_ALLMULTI, 
+	SXEVF_CAST_MODE_PROMISC,  
+};
+
+struct sxevf_rst_msg {
+	u32 msg_type;
+	u32 mac_addr[2];
+	u32 mc_fiter_type;
+};
+
+struct sxevf_mbx_api_msg {
+	u32 msg_type;
+	u32 api_version;
+};
+
+struct sxevf_ring_info_msg {
+	u32 msg_type;
+	u8  max_rx_num;
+	u8  max_tx_num;
+	u8  tc_num;
+	u8  default_tc;
+};
+
+struct sxevf_uc_addr_msg {
+	u32 msg_type;
+	u8 uc_addr[SXEVF_MAC_ADDR_LEN];
+	u16 pad;
+};
+
+struct sxevf_cast_mode_msg {
+	u32 msg_type;
+	u32 cast_mode;
+};
+
+struct sxevf_mc_sync_msg {
+	u16 msg_type;
+	u16 mc_cnt;
+	u16 mc_addr_extract[SXEVF_MC_ENTRY_NUM_MAX];
+};
+
+struct sxevf_uc_sync_msg {
+	u16 msg_type;
+	u16 index;
+	u32 addr[2];
+};
+
+struct sxevf_max_frame_msg {
+	u32 msg_type;
+	u32 max_frame;
+};
+
+struct sxevf_vlan_filter_msg {
+	u32 msg_type;
+	u32 vlan_id;
+};
+
+struct sxevf_redir_tbl_msg {
+	u32 type;
+	u32 entries[SXEVF_RETA_ENTRIES_DWORDS];
+};
+
+struct sxevf_rss_hsah_key_msg {
+	u32 type;
+	u8  hash_key[SXEVF_RSS_HASH_KEY_SIZE];
+};
+
+struct sxevf_rss_hash_msg {
+	u32 msg_type;
+	u8  hash_key[SXEVF_RSS_HASH_KEY_SIZE];
+	u64 rss_hf;
+};
+
+struct sxevf_ipsec_add_msg {
+	u32 msg_type;
+	u32 pf_sa_idx;
+	__be32 spi;
+	u8 flags;
+	u8 proto;
+	u16 family;
+	__be32 addr[4];
+	u32 key[5];
+};
+
+struct sxevf_ipsec_del_msg {
+	u32 msg_type;
+	u32 sa_idx;
+};
+
+void sxevf_mbx_init(struct sxevf_hw *hw);
+
+void sxevf_mbx_api_version_init(struct sxevf_adapter *adapter);
+
+bool sxevf_pf_rst_check(struct sxevf_hw *hw);
+
+s32 sxevf_mbx_msg_rcv(struct sxevf_hw *hw, u32 *msg, u16 msg_len);
+
+s32 sxevf_send_and_rcv_msg(struct sxevf_hw *hw, u32 *msg, u8 msg_len);
+
+s32 sxevf_mac_addr_set(struct sxevf_hw *hw, u8 *uc_addr);
+
+s32 sxevf_ring_info_get(struct sxevf_adapter *adapter,
+			u8 *tc_num, u8 *default_tc);
+
+s32 sxevf_rss_hash_config_get(struct sxevf_adapter *adapter,
+			struct rte_eth_rss_conf *rss_conf);
+
+void sxevf_mbx_api_version_init(struct sxevf_adapter *adapter);
+
+s32 sxevf_ctrl_msg_rcv(struct sxevf_hw *hw, u32 *msg, u16 msg_len);
+
+s32 sxevf_rx_max_frame_set(struct sxevf_hw *hw, u32 mtu);
+
+s32 sxevf_vlan_id_set(struct sxevf_hw *hw, u32 vlan,
+						 bool vlan_on);
+s32 sxevf_cast_mode_set(struct sxevf_hw *hw, enum sxevf_cast_mode mode);
+
+s32 sxevf_uc_addr_add(struct sxevf_hw *hw, u32 index, u8 *mac_addr);
+
+s32 sxevf_ctrl_msg_rcv_and_clear(struct sxevf_hw *hw, u32 *msg, u16 msg_len);
+
+#endif 
diff --git a/drivers/net/sxe/vf/sxevf_offload.c b/drivers/net/sxe/vf/sxevf_offload.c
new file mode 100644
index 0000000000..91f8d6d2e6
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_offload.c
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+
+#include "sxe_types.h"
+#include "sxe_offload_common.h"
+#include "sxevf_offload.h"
+
+u64 sxevf_rx_queue_offloads_get(struct rte_eth_dev *dev)
+{
+	return __sxe_rx_queue_offload_capa_get(dev);
+}
+
+u64 sxevf_rx_port_offloads_get(struct rte_eth_dev *dev)
+{
+	return __sxe_rx_port_offload_capa_get(dev);
+}
+
+u64 sxevf_tx_queue_offloads_get(struct rte_eth_dev *dev)
+{
+	RTE_SET_USED(dev);
+
+	return 0;
+}
+
+u64 sxevf_tx_port_offloads_get(struct rte_eth_dev *dev)
+{
+	return __sxe_tx_port_offload_capa_get(dev);
+}
+
diff --git a/drivers/net/sxe/vf/sxevf_offload.h b/drivers/net/sxe/vf/sxevf_offload.h
new file mode 100644
index 0000000000..9c5ab4cb8d
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_offload.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_OFFLOAD_H__
+#define __SXEVF_OFFLOAD_H__
+
+u64 sxevf_rx_queue_offloads_get(struct rte_eth_dev *dev);
+
+u64 sxevf_rx_port_offloads_get(struct rte_eth_dev *dev);
+
+u64 sxevf_tx_queue_offloads_get(struct rte_eth_dev *dev);
+
+u64 sxevf_tx_port_offloads_get(struct rte_eth_dev *dev);
+
+#endif
+
diff --git a/drivers/net/sxe/vf/sxevf_queue.c b/drivers/net/sxe/vf/sxevf_queue.c
new file mode 100644
index 0000000000..5e7d9ec17d
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_queue.c
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <rte_byteorder.h>
+#include <rte_mbuf_core.h>
+#include <rte_ethdev.h>
+
+#include "sxe_dpdk_version.h"
+#include "sxevf_rx.h"
+#include "sxevf_tx.h"
+#include "sxe_logs.h"
+#include "sxevf.h"
+#include "sxe_queue_common.h"
+#include "sxevf_hw.h"
+#include "sxe_offload.h"
+#include "sxe_ethdev.h"
+#include "sxevf_queue.h"
+#include "sxevf_msg.h"
+
+s32 __rte_cold sxevf_rx_queue_mbufs_alloc(sxevf_rx_queue_s *rxq)
+{
+	s32 ret;
+
+	ret = __sxe_rx_queue_mbufs_alloc((sxevf_rx_queue_s *)rxq);
+
+	return ret;
+}
+
+s32 __rte_cold sxevf_rx_queue_setup(struct rte_eth_dev *dev,
+			 u16 queue_idx, u16 desc_num,
+			 unsigned int socket_id,
+			 const struct rte_eth_rxconf *rx_conf,
+			 struct rte_mempool *mp)
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw     *hw = &adapter->hw;
+	struct rx_setup rx_setup = {};
+	s32 ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	rx_setup.desc_num = desc_num;
+	rx_setup.queue_idx = queue_idx;
+	rx_setup.socket_id = socket_id;
+	rx_setup.mp = mp;
+	rx_setup.dev = dev;
+	rx_setup.reg_base_addr = hw->reg_base_addr;
+	rx_setup.rx_conf = rx_conf;
+	rx_setup.rx_batch_alloc_allowed = &adapter->rx_batch_alloc_allowed;
+
+	ret = __sxe_rx_queue_setup(&rx_setup, true);
+	if (ret) {
+		LOG_ERROR_BDF("rx queue setup fail.(err:%d)", ret);
+	}
+
+	return ret;
+}
+
+s32 __rte_cold sxevf_tx_queue_setup(struct rte_eth_dev *dev,
+				u16 tx_queue_id,
+				u16 ring_depth,
+				u32 socket_id,
+				const struct rte_eth_txconf *tx_conf)
+{
+	s32 ret;
+	struct sxevf_hw *hw = (&((struct sxevf_adapter *)(dev->data->dev_private))->hw);
+	struct tx_setup tx_setup;
+
+	tx_setup.dev = dev;
+	tx_setup.desc_num = ring_depth;
+	tx_setup.queue_idx = tx_queue_id;
+	tx_setup.socket_id = socket_id;
+	tx_setup.reg_base_addr = hw->reg_base_addr;
+	tx_setup.tx_conf = tx_conf;
+
+	ret = __sxe_tx_queue_setup(&tx_setup, true);
+	if (ret) {
+		PMD_LOG_ERR(DRV, "rx queue setup fail.(err:%d)", ret);
+	}
+
+	return ret;
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+void __rte_cold sxevf_rx_queue_release(void *rxq)
+{
+	__sxe_rx_queue_free(rxq);
+}
+
+void __rte_cold sxevf_tx_queue_release(void *txq)
+{
+	__sxe_tx_queue_free(txq);
+	return;
+}
+
+#else
+void __rte_cold
+sxevf_rx_queue_release(struct rte_eth_dev *dev, u16 queue_id)
+{
+	__sxe_rx_queue_free(dev->data->rx_queues[queue_id]);
+}
+
+void __rte_cold
+sxevf_tx_queue_release(struct rte_eth_dev *dev, u16 queue_id)
+{
+	__sxe_tx_queue_free(dev->data->tx_queues[queue_id]);
+	return;
+}
+#endif
+
+void sxevf_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+	struct rte_eth_rxq_info *qinfo)
+{
+	__sxe_rx_queue_info_get(dev, queue_id, qinfo);
+
+	return;
+}
+
+void sxevf_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+		struct rte_eth_txq_info *q_info)
+{
+	__sxe_tx_queue_info_get(dev, queue_id, q_info);
+
+	return;
+}
+
+s32 sxevf_tx_done_cleanup(void *tx_queue, u32 free_cnt)
+{
+	s32 ret;
+
+	/* Tx queue cleanup */
+	ret = __sxe_tx_done_cleanup(tx_queue, free_cnt);
+	if (ret) {
+		PMD_LOG_ERR(DRV, "tx cleanup fail.(err:%d)", ret);
+	}
+
+	return ret;
+}
+
+s32 sxevf_rss_reta_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_reta_entry64 *reta_conf,
+			u16 reta_size)
+{
+	s32 ret = -ENOTSUP;
+
+	PMD_INIT_FUNC_TRACE();
+
+	RTE_SET_USED(reta_conf);
+	RTE_SET_USED(reta_size);
+
+	if (!dev->data->dev_started) {
+		PMD_LOG_ERR(DRV,
+			"port %d must be started before rss reta update",
+			 dev->data->port_id);
+		ret = -EIO;
+		goto l_out;
+	}
+
+	PMD_LOG_ERR(DRV, "rss reta update is not supported on vf.(err:%d)", ret);
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_rss_reta_query(struct rte_eth_dev *dev,
+			 struct rte_eth_rss_reta_entry64 *reta_conf,
+			 u16 reta_size)
+{
+	s32 ret = 0;
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(reta_conf);
+
+	if (reta_size != 0) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "vf rss reta size:0, not support query.(err:%d)", ret);
+	}
+
+	return ret;
+}
+
+s32 sxevf_rss_hash_conf_get(struct rte_eth_dev *dev,
+			    struct rte_eth_rss_conf *rss_conf)
+{
+	s32 ret = 0;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+
+	ret = sxevf_rss_hash_config_get(adapter, rss_conf);
+	if (ret) {
+		LOG_ERROR_BDF("rss hash config get failed.(err:%d)\n", ret);
+		goto l_out;
+	}
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_rss_hash_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_conf *rss_conf)
+{
+	s32 ret = -ENOTSUP;
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(rss_conf);
+
+	PMD_LOG_ERR(DRV, "rss hash update is not supported on vf.(err:%d)", ret);
+
+	return ret;
+}
+
+void sxevf_secondary_proc_init(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	bool rx_vec_allowed = 0;
+
+	__sxe_secondary_proc_init(eth_dev, adapter->rx_batch_alloc_allowed, &rx_vec_allowed);
+	return;
+}
+
+void __rte_cold sxevf_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed)
+{
+	__sxe_txrx_queues_clear(dev, rx_batch_alloc_allowed);
+	return;
+}
+
+void sxevf_queues_free(struct rte_eth_dev *dev)
+{
+	__sxe_queues_free(dev);
+
+	return;
+}
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_queue.h b/drivers/net/sxe/vf/sxevf_queue.h
new file mode 100644
index 0000000000..1a061231a5
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_queue.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_QUEUEU_H__
+#define __SXEVF_QUEUEU_H__
+
+#include "sxe_dpdk_version.h"
+#include "sxe_types.h"
+#include "sxe_queue_common.h"
+
+typedef union sxe_tx_data_desc sxevf_tx_data_desc_u;
+typedef struct sxe_rx_buffer   sxevf_rx_buffer_s;
+typedef union sxe_rx_data_desc sxevf_rx_data_desc_u;
+typedef struct sxe_tx_queue    sxevf_tx_queue_s;
+typedef struct sxe_rx_queue    sxevf_rx_queue_s;
+
+s32 __rte_cold sxevf_rx_queue_mbufs_alloc(sxevf_rx_queue_s *rxq);
+
+s32 __rte_cold sxevf_rx_queue_setup(struct rte_eth_dev *dev,
+			 u16 queue_idx, u16 desc_num,
+			 unsigned int socket_id,
+			 const struct rte_eth_rxconf *rx_conf,
+			 struct rte_mempool *mp);
+
+s32 __rte_cold sxevf_tx_queue_setup(struct rte_eth_dev *dev,
+				u16 tx_queue_id,
+				u16 ring_depth,
+				u32 socket_id,
+				const struct rte_eth_txconf *tx_conf);
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+void __rte_cold sxevf_tx_queue_release(void *txq);
+
+void __rte_cold sxevf_rx_queue_release(void *rxq);
+
+#else
+void __rte_cold sxevf_tx_queue_release(struct rte_eth_dev *dev, u16 queue_id);
+
+void __rte_cold sxevf_rx_queue_release(struct rte_eth_dev *dev, u16 queue_id);
+#endif
+
+void sxevf_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+	struct rte_eth_rxq_info *qinfo);
+
+void sxevf_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+		struct rte_eth_txq_info *q_info);
+
+s32 sxevf_tx_done_cleanup(void *tx_queue, u32 free_cnt);
+
+s32 sxevf_rss_reta_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_reta_entry64 *reta_conf,
+			u16 reta_size);
+
+s32 sxevf_rss_reta_query(struct rte_eth_dev *dev,
+			 struct rte_eth_rss_reta_entry64 *reta_conf,
+			 u16 reta_size);
+
+s32 sxevf_rss_hash_conf_get(struct rte_eth_dev *dev,
+			    struct rte_eth_rss_conf *rss_conf);
+
+s32 sxevf_rss_hash_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_conf *rss_conf);
+
+void sxevf_secondary_proc_init(struct rte_eth_dev *eth_dev);
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+s32 sxevf_rx_descriptor_done(void *rx_queue, u16 offset);
+#endif
+
+s32 sxevf_rx_descriptor_status(void *rx_queue, u16 offset);
+
+u16 sxevf_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,u16 num_pkts);
+
+u16 sxevf_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num);
+s32 sxevf_tx_descriptor_status(void *tx_queue, u16 offset);
+
+void __rte_cold sxevf_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed);
+
+void sxevf_queues_free(struct rte_eth_dev *dev);
+
+#endif
+
diff --git a/drivers/net/sxe/vf/sxevf_rx.c b/drivers/net/sxe/vf/sxevf_rx.c
new file mode 100644
index 0000000000..53b9168345
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_rx.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <rte_common.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+
+#include "sxe_logs.h"
+#include "sxe_errno.h"
+#include "sxevf.h"
+#include "sxevf_msg.h"
+#include "sxevf_rx.h"
+#include "sxe_rx_common.h"
+#include "sxevf_queue.h"
+#include "sxevf_rx.h"
+#include "sxe_ethdev.h"
+
+#define SXEVF_RX_HDR_SIZE  256
+
+static void sxevf_rss_bit_num_configure(struct sxevf_hw *hw, u16 rx_queues_num)
+{
+	u32 psrtype;
+
+	psrtype = (rx_queues_num >> 1) << SXEVF_PSRTYPE_RQPL_SHIFT;
+
+	sxevf_rss_bit_num_set(hw, psrtype);
+
+	return;
+}
+
+static void sxevf_rxmode_offload_configure(struct rte_eth_dev *eth_dev,
+						u64 queue_offload, u32 buf_size)
+{
+	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+	u32 frame_size = SXE_GET_FRAME_SIZE(eth_dev);
+
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
+	    ((frame_size + 2 * SXEVF_VLAN_TAG_SIZE) > buf_size)) {
+		if (!eth_dev->data->scattered_rx) {
+			PMD_LOG_WARN(DRV, "rxmode offload:0x%"SXE_PRIX64" max_rx_pkt_len:%u "
+				    "buf_size:%u enable rx scatter",
+				    rxmode->offloads,
+				    frame_size,
+				    buf_size);
+		}
+		eth_dev->data->scattered_rx = 1;
+	}
+
+	if (queue_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	}
+
+	return;
+}
+
+static s32 sxevf_rx_queue_configure(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	sxevf_rx_queue_s *rxq;
+	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+	s32 ret;
+	u16 i;
+	u32 len;
+	u32 buf_size;
+
+	rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+		rxq = eth_dev->data->rx_queues[i];
+		len = rxq->ring_depth * sizeof(sxevf_rx_data_desc_u);
+
+		ret = sxevf_rx_queue_mbufs_alloc(rxq);
+		if (ret) {
+			LOG_ERROR_BDF("rx queue num:%u queue id:%u alloc "
+				      "rx buffer fail.(err:%d)",
+				      eth_dev->data->nb_rx_queues, i, ret);
+			goto l_out;
+		}
+
+		buf_size = (u16)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+			RTE_PKTMBUF_HEADROOM);
+
+		sxevf_rx_ring_desc_configure(hw, len, rxq->base_addr, rxq->reg_idx);
+
+		sxevf_rx_rcv_ctl_configure(hw, rxq->reg_idx, SXEVF_RX_HDR_SIZE,
+					   buf_size, rxq->drop_en);
+
+		sxevf_rxmode_offload_configure(eth_dev, rxq->offloads, buf_size);
+	}
+
+	sxevf_rss_bit_num_configure(hw, eth_dev->data->nb_rx_queues);
+
+	sxevf_rx_function_set(eth_dev);
+
+l_out:
+	return ret;
+
+}
+
+s32 sxevf_rx_configure(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	u32 frame_size = SXE_GET_FRAME_SIZE(eth_dev);
+	u32 mtu = frame_size - SXE_ETH_OVERHEAD;
+	s32 ret;
+
+	if (rte_is_power_of_2(eth_dev->data->nb_rx_queues) == 0) {
+		ret = -SXEVF_ERR_PARAM;
+		LOG_ERROR_BDF("invalid rx queue num:%u.",
+			 eth_dev->data->nb_rx_queues);
+		goto l_out;
+	}
+
+	if (eth_dev->data->nb_rx_queues > adapter->max_rx_queue) {
+		ret = -SXEVF_ERR_PARAM;
+		LOG_ERROR_BDF("invalid rx queue num:%u exceed max rx queue:%u ",
+			eth_dev->data->nb_rx_queues,
+			adapter->max_rx_queue);
+		goto l_out;
+	}
+
+	ret = sxevf_rx_max_frame_set(hw, mtu);
+	if (ret) {
+		LOG_ERROR_BDF("max frame size:%u set fail.(err:%d)",
+			      frame_size, ret);
+		goto l_out;
+	}
+
+	ret = sxevf_rx_queue_configure(eth_dev);
+	if (ret) {
+		LOG_ERROR_BDF("rx queue num:%u configure fail.(err:%u)",
+			      eth_dev->data->nb_rx_queues, ret);
+	}
+
+l_out:
+	return ret;
+}
+
+void __rte_cold sxevf_rx_function_set(struct rte_eth_dev *dev)
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	__sxe_rx_function_set(dev, adapter->rx_batch_alloc_allowed, &adapter->rx_vec_allowed);
+#else
+	__sxe_rx_function_set(dev, adapter->rx_batch_alloc_allowed, NULL);
+#endif
+
+	return; 
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+s32 sxevf_rx_descriptor_done(void *rx_queue, u16 offset)
+{
+	return __sxe_rx_descriptor_done(rx_queue,offset);
+}
+#endif
+
+s32 sxevf_rx_descriptor_status(void *rx_queue, u16 offset)
+{
+	return __sxe_rx_descriptor_status(rx_queue, offset);
+}
+
+u16 sxevf_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,u16 num_pkts)
+{
+	return __sxe_pkts_recv(rx_queue, rx_pkts, num_pkts);
+}
+
+const u32 *sxevf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+	return __sxe_dev_supported_ptypes_get(dev);
+}
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_rx.h b/drivers/net/sxe/vf/sxevf_rx.h
new file mode 100644
index 0000000000..8e862b7e01
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_rx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_RX_H__
+#define __SXEVF_RX_H__
+
+#include "sxe_queue_common.h"
+
+#define SXEVF_RX_DESC_RING_ALIGN	(SXE_ALIGN / sizeof(sxevf_rx_data_desc_t))
+
+s32 sxevf_rx_configure(struct rte_eth_dev *eth_dev);
+
+const u32 *sxevf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+
+void __rte_cold sxevf_rx_function_set(struct rte_eth_dev *dev);
+
+#endif
+
diff --git a/drivers/net/sxe/vf/sxevf_stats.c b/drivers/net/sxe/vf/sxevf_stats.c
new file mode 100644
index 0000000000..f82ccf1fd7
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_stats.c
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <rte_ethdev.h>
+
+#include "sxevf_stats.h"
+#include "sxe_logs.h"
+#include "sxe_errno.h"
+#include "sxevf.h"
+
+#if defined DPDK_19_11_6
+#include <rte_string_fns.h>
+#endif
+
+#define SXE_HW_XSTATS_CNT (sizeof(sxevf_xstats_field) / \
+		      sizeof(sxevf_xstats_field[0]))
+
+static const struct sxevf_stats_field sxevf_xstats_field[] = {
+	{"rx_multicast_packets", offsetof(struct sxevf_hw_stats, vfmprc)},
+};
+
+#ifdef SXE_TEST
+STATIC u32 sxevf_xstats_cnt_get(void)
+{
+	return SXE_HW_XSTATS_CNT;
+}
+#endif
+
+s32 sxevf_eth_stats_get(struct rte_eth_dev *eth_dev,
+				struct rte_eth_stats *stats)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_stats_info *stats_info = &adapter->stats_info;
+	struct sxevf_hw *hw = &adapter->hw;
+	s32 ret = 0;
+
+	sxevf_packet_stats_get(hw, &stats_info->hw_stats);
+
+	if (stats == NULL) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "input param stats is null.");
+		goto l_out;
+	}
+
+	stats->ipackets = stats_info->hw_stats.vfgprc;
+	stats->ibytes   = stats_info->hw_stats.vfgorc;
+	stats->opackets = stats_info->hw_stats.vfgptc;
+	stats->obytes   = stats_info->hw_stats.vfgotc - stats->opackets * RTE_ETHER_CRC_LEN;
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_dev_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_stats_info *stats_info = &adapter->stats_info;
+
+	sxevf_eth_stats_get(eth_dev, NULL);
+
+	stats_info->hw_stats.vfgprc = 0;
+	stats_info->hw_stats.vfgorc = 0;
+	stats_info->hw_stats.vfgptc = 0;
+	stats_info->hw_stats.vfgotc = 0;
+	stats_info->hw_stats.vfmprc = 0;
+
+	return 0;
+}
+
+static s32 sxevf_hw_xstat_offset_get(u32 id, u32 *offset)
+{
+	s32 ret = 0;
+	u32 size = SXE_HW_XSTATS_CNT;
+
+	if (id < size) {
+		*offset = sxevf_xstats_field[id].offset;
+	} else {
+		ret = -SXE_ERR_PARAM;
+		PMD_LOG_ERR(DRV, "invalid id:%u exceed stats size cnt:%u.",
+			    id, size);
+	}
+
+	return ret;
+}
+
+s32 sxevf_xstats_get(struct rte_eth_dev *eth_dev,
+				struct rte_eth_xstat *xstats,
+				u32 usr_cnt)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_stats_info *stats_info = &adapter->stats_info;
+	struct sxevf_hw *hw = &adapter->hw;
+	u32 i;
+	u32 cnt;
+	s32 ret;
+	u32 offset;
+
+	cnt = SXE_HW_XSTATS_CNT;
+	PMD_LOG_INFO(DRV, "xstat size:%u. hw xstat field cnt:%lu ",
+		    cnt,
+		    SXE_HW_XSTATS_CNT);
+
+	if (usr_cnt < cnt) {
+		ret = cnt;
+		PMD_LOG_ERR(DRV, "user usr_cnt:%u less than stats cnt:%u.",
+			    usr_cnt, cnt);
+		goto l_out;
+	}
+
+	sxevf_packet_stats_get(hw, &stats_info->hw_stats);
+
+	if (xstats == NULL) {
+		ret = 0;
+		PMD_LOG_ERR(DRV, "usr_cnt:%u, input param xstats is null.",
+		            usr_cnt);
+		goto l_out;
+	}
+
+	cnt = 0;
+	for (i = 0; i < SXE_HW_XSTATS_CNT; i++) {
+		sxevf_hw_xstat_offset_get(i, &offset);
+		xstats[cnt].value = *(ulong *)(((s8 *)(&stats_info->hw_stats)) + offset);
+		xstats[cnt].id = cnt;
+		cnt++;
+	}
+
+	ret = SXE_HW_XSTATS_CNT;
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_xstats_names_get(__rte_unused struct rte_eth_dev *dev,
+	struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int usr_cnt)
+{
+	u32 i = 0;
+	u32 cnt = 0;
+	s32 ret;
+
+	if (xstats_names == NULL) {
+		ret = SXE_HW_XSTATS_CNT;
+		PMD_LOG_INFO(DRV, "xstats field size:%u.", ret);
+		goto l_out;
+	} else if (usr_cnt < SXE_HW_XSTATS_CNT) {
+		ret = -ENOMEM;
+		PMD_LOG_ERR(DRV, "usr_cnt:%u invalid.(err:%d).", usr_cnt, ret);
+		goto l_out;
+	}
+
+	for (i = 0; i < SXE_HW_XSTATS_CNT; i++) {
+		strlcpy(xstats_names[cnt].name,
+			sxevf_xstats_field[i].name,
+			sizeof(xstats_names[cnt].name));
+		cnt++;
+	}
+
+	ret = cnt;
+
+l_out:
+	return ret;
+}
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_stats.h b/drivers/net/sxe/vf/sxevf_stats.h
new file mode 100644
index 0000000000..bdfd5178fd
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_stats.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_STATS_H__
+#define __SXEVF_STATS_H__
+
+#include "sxevf_hw.h"
+
+struct sxevf_stats_field {
+	s8  name[RTE_ETH_XSTATS_NAME_SIZE];
+	u32 offset;
+};
+
+struct sxevf_stats_info {
+	struct sxevf_hw_stats hw_stats;
+};
+
+s32 sxevf_eth_stats_get(struct rte_eth_dev *eth_dev,
+				struct rte_eth_stats *stats);
+
+s32 sxevf_dev_stats_reset(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_xstats_get(struct rte_eth_dev *eth_dev,
+				struct rte_eth_xstat *xstats,
+				u32 usr_cnt);
+
+s32 sxevf_xstats_names_get(__rte_unused struct rte_eth_dev *dev,
+	struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int usr_cnt);
+
+#endif
+
diff --git a/drivers/net/sxe/vf/sxevf_tx.c b/drivers/net/sxe/vf/sxevf_tx.c
new file mode 100644
index 0000000000..667a165c64
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_tx.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <rte_ethdev.h>
+
+#include "sxe_logs.h"
+#include "sxevf.h"
+#include "sxevf_tx.h"
+#include "sxevf_queue.h"
+#include "sxe_tx_common.h"
+
+void sxevf_tx_configure(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	sxevf_tx_queue_s *txq;
+	u16 i;
+	u32 len;
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		txq = eth_dev->data->tx_queues[i];
+		len = txq->ring_depth * sizeof(sxevf_tx_data_desc_u);
+		sxevf_tx_desc_configure(hw, len, txq->base_addr, txq->reg_idx);
+
+		sxevf_tx_queue_thresh_set(hw, txq->reg_idx,
+			txq->pthresh, txq->hthresh, txq->wthresh);
+	}
+
+	LOG_DEBUG_BDF("tx queue num:%u tx configure done.",
+			eth_dev->data->nb_tx_queues);
+
+	return;
+}
+
+s32 sxevf_tx_descriptor_status(void *tx_queue, u16 offset)
+{
+	return __sxe_tx_descriptor_status(tx_queue, offset);
+}
+
+u16 sxevf_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num)
+{
+	return __sxe_pkts_xmit_with_offload(tx_queue, tx_pkts, pkts_num);
+}
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_tx.h b/drivers/net/sxe/vf/sxevf_tx.h
new file mode 100644
index 0000000000..858341db97
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_tx.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_TX_H__
+#define __SXEVF_TX_H__
+
+#include "sxe_queue_common.h"
+
+#define SXEVF_TX_DESC_RING_ALIGN	(SXE_ALIGN / sizeof(sxevf_tx_data_desc_u))
+
+void sxevf_tx_configure(struct rte_eth_dev *eth_dev);
+
+#endif
+
-- 
2.45.2.windows.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2024-09-09 11:35 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20240826081101.2313>
2024-09-06 23:37 ` [PATCH v2] net/sxe: add net driver sxe Jie Liu
2024-09-06 23:38   ` Jie Liu
2024-08-26  8:10 [PATCH] " Jie Liu
2024-09-06 23:39 ` [PATCH v2] " Jie Liu
2024-09-09 11:35   ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).