DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/sxe: add net driver sxe
@ 2024-08-26  8:10 Jie Liu
  2024-09-06 23:39 ` [PATCH v2] " Jie Liu
  0 siblings, 1 reply; 3+ messages in thread
From: Jie Liu @ 2024-08-26  8:10 UTC (permalink / raw)
  To: anatoly.burakov; +Cc: dev, Jie Liu

Adding complete pmd library and doc build infrastructure
and claim the maintainership for sxe PMD.

Signed-off-by: Jie Liu <liujie5@linkdatatechnology.com>
---
 MAINTAINERS                                |    6 +
 app/test-pmd/meson.build                   |    3 +
 doc/guides/nics/features/sxe.ini           |   81 +
 doc/guides/nics/features/sxe_vf.ini        |   39 +
 doc/guides/nics/index.rst                  |    1 +
 doc/guides/nics/sxe.rst                    |   71 +
 drivers/net/meson.build                    |    1 +
 drivers/net/sxe/Makefile                   |  105 +
 drivers/net/sxe/base/docker_version        |    4 +
 drivers/net/sxe/base/sxe_common.c          |   66 +
 drivers/net/sxe/base/sxe_common.h          |   15 +
 drivers/net/sxe/base/sxe_compat_platform.h |  143 +
 drivers/net/sxe/base/sxe_compat_version.h  |  304 +
 drivers/net/sxe/base/sxe_dpdk_version.h    |   20 +
 drivers/net/sxe/base/sxe_errno.h           |   61 +
 drivers/net/sxe/base/sxe_hw.c              | 6647 ++++++++++++++++++++
 drivers/net/sxe/base/sxe_hw.h              | 1505 +++++
 drivers/net/sxe/base/sxe_logs.h            |  299 +
 drivers/net/sxe/base/sxe_offload_common.c  |   66 +
 drivers/net/sxe/base/sxe_offload_common.h  |   15 +
 drivers/net/sxe/base/sxe_queue_common.c    |  450 ++
 drivers/net/sxe/base/sxe_queue_common.h    |  236 +
 drivers/net/sxe/base/sxe_rx_common.c       |  349 +
 drivers/net/sxe/base/sxe_rx_common.h       |   24 +
 drivers/net/sxe/base/sxe_tx_common.c       |   49 +
 drivers/net/sxe/base/sxe_tx_common.h       |   12 +
 drivers/net/sxe/base/sxe_types.h           |   40 +
 drivers/net/sxe/base/sxevf_hw.c            | 1057 ++++
 drivers/net/sxe/base/sxevf_hw.h            |  351 ++
 drivers/net/sxe/base/sxevf_regs.h          |  119 +
 drivers/net/sxe/include/drv_msg.h          |   22 +
 drivers/net/sxe/include/readme.txt         |    0
 drivers/net/sxe/include/sxe/mgl/sxe_port.h |   40 +
 drivers/net/sxe/include/sxe/sxe_cli.h      |  213 +
 drivers/net/sxe/include/sxe/sxe_hdc.h      |   43 +
 drivers/net/sxe/include/sxe/sxe_ioctl.h    |   21 +
 drivers/net/sxe/include/sxe/sxe_msg.h      |  139 +
 drivers/net/sxe/include/sxe/sxe_regs.h     | 1276 ++++
 drivers/net/sxe/include/sxe_type.h         |  794 +++
 drivers/net/sxe/include/sxe_version.h      |   32 +
 drivers/net/sxe/meson.build                |   55 +
 drivers/net/sxe/pf/rte_pmd_sxe.h           |   33 +
 drivers/net/sxe/pf/sxe.h                   |  117 +
 drivers/net/sxe/pf/sxe_dcb.c               | 1014 +++
 drivers/net/sxe/pf/sxe_dcb.h               |   99 +
 drivers/net/sxe/pf/sxe_ethdev.c            | 1109 ++++
 drivers/net/sxe/pf/sxe_ethdev.h            |   27 +
 drivers/net/sxe/pf/sxe_filter.c            |  826 +++
 drivers/net/sxe/pf/sxe_filter.h            |  119 +
 drivers/net/sxe/pf/sxe_flow_ctrl.c         |  100 +
 drivers/net/sxe/pf/sxe_flow_ctrl.h         |   16 +
 drivers/net/sxe/pf/sxe_irq.c               |  562 ++
 drivers/net/sxe/pf/sxe_irq.h               |   56 +
 drivers/net/sxe/pf/sxe_main.c              |  326 +
 drivers/net/sxe/pf/sxe_offload.c           |  365 ++
 drivers/net/sxe/pf/sxe_offload.h           |   51 +
 drivers/net/sxe/pf/sxe_phy.c               |  993 +++
 drivers/net/sxe/pf/sxe_phy.h               |  121 +
 drivers/net/sxe/pf/sxe_pmd_hdc.c           |  717 +++
 drivers/net/sxe/pf/sxe_pmd_hdc.h           |   44 +
 drivers/net/sxe/pf/sxe_ptp.c               |  204 +
 drivers/net/sxe/pf/sxe_ptp.h               |   26 +
 drivers/net/sxe/pf/sxe_queue.c             |  856 +++
 drivers/net/sxe/pf/sxe_queue.h             |  147 +
 drivers/net/sxe/pf/sxe_rx.c                | 1567 +++++
 drivers/net/sxe/pf/sxe_rx.h                |  195 +
 drivers/net/sxe/pf/sxe_stats.c             |  593 ++
 drivers/net/sxe/pf/sxe_stats.h             |   79 +
 drivers/net/sxe/pf/sxe_tx.c                | 1069 ++++
 drivers/net/sxe/pf/sxe_tx.h                |   31 +
 drivers/net/sxe/pf/sxe_vf.c                | 1275 ++++
 drivers/net/sxe/pf/sxe_vf.h                |  221 +
 drivers/net/sxe/rte_pmd_sxe_version.map    |   10 +
 drivers/net/sxe/sxe_drv_type.h             |   23 +
 drivers/net/sxe/version.map                |   24 +
 drivers/net/sxe/vf/sxevf.h                 |   44 +
 drivers/net/sxe/vf/sxevf_ethdev.c          |  811 +++
 drivers/net/sxe/vf/sxevf_ethdev.h          |   17 +
 drivers/net/sxe/vf/sxevf_filter.c          |  511 ++
 drivers/net/sxe/vf/sxevf_filter.h          |   79 +
 drivers/net/sxe/vf/sxevf_irq.c             |  455 ++
 drivers/net/sxe/vf/sxevf_irq.h             |   40 +
 drivers/net/sxe/vf/sxevf_main.c            |   94 +
 drivers/net/sxe/vf/sxevf_msg.c             |  646 ++
 drivers/net/sxe/vf/sxevf_msg.h             |  201 +
 drivers/net/sxe/vf/sxevf_offload.c         |   36 +
 drivers/net/sxe/vf/sxevf_offload.h         |   17 +
 drivers/net/sxe/vf/sxevf_queue.c           |  236 +
 drivers/net/sxe/vf/sxevf_queue.h           |   82 +
 drivers/net/sxe/vf/sxevf_rx.c              |  182 +
 drivers/net/sxe/vf/sxevf_rx.h              |   19 +
 drivers/net/sxe/vf/sxevf_stats.c           |  166 +
 drivers/net/sxe/vf/sxevf_stats.h           |   32 +
 drivers/net/sxe/vf/sxevf_tx.c              |   48 +
 drivers/net/sxe/vf/sxevf_tx.h              |   15 +
 95 files changed, 31520 insertions(+)
 create mode 100644 doc/guides/nics/features/sxe.ini
 create mode 100644 doc/guides/nics/features/sxe_vf.ini
 create mode 100644 doc/guides/nics/sxe.rst
 create mode 100644 drivers/net/sxe/Makefile
 create mode 100644 drivers/net/sxe/base/docker_version
 create mode 100644 drivers/net/sxe/base/sxe_common.c
 create mode 100644 drivers/net/sxe/base/sxe_common.h
 create mode 100644 drivers/net/sxe/base/sxe_compat_platform.h
 create mode 100644 drivers/net/sxe/base/sxe_compat_version.h
 create mode 100644 drivers/net/sxe/base/sxe_dpdk_version.h
 create mode 100644 drivers/net/sxe/base/sxe_errno.h
 create mode 100644 drivers/net/sxe/base/sxe_hw.c
 create mode 100644 drivers/net/sxe/base/sxe_hw.h
 create mode 100644 drivers/net/sxe/base/sxe_logs.h
 create mode 100644 drivers/net/sxe/base/sxe_offload_common.c
 create mode 100644 drivers/net/sxe/base/sxe_offload_common.h
 create mode 100644 drivers/net/sxe/base/sxe_queue_common.c
 create mode 100644 drivers/net/sxe/base/sxe_queue_common.h
 create mode 100644 drivers/net/sxe/base/sxe_rx_common.c
 create mode 100644 drivers/net/sxe/base/sxe_rx_common.h
 create mode 100644 drivers/net/sxe/base/sxe_tx_common.c
 create mode 100644 drivers/net/sxe/base/sxe_tx_common.h
 create mode 100644 drivers/net/sxe/base/sxe_types.h
 create mode 100644 drivers/net/sxe/base/sxevf_hw.c
 create mode 100644 drivers/net/sxe/base/sxevf_hw.h
 create mode 100644 drivers/net/sxe/base/sxevf_regs.h
 create mode 100644 drivers/net/sxe/include/drv_msg.h
 create mode 100644 drivers/net/sxe/include/readme.txt
 create mode 100644 drivers/net/sxe/include/sxe/mgl/sxe_port.h
 create mode 100644 drivers/net/sxe/include/sxe/sxe_cli.h
 create mode 100644 drivers/net/sxe/include/sxe/sxe_hdc.h
 create mode 100644 drivers/net/sxe/include/sxe/sxe_ioctl.h
 create mode 100644 drivers/net/sxe/include/sxe/sxe_msg.h
 create mode 100644 drivers/net/sxe/include/sxe/sxe_regs.h
 create mode 100644 drivers/net/sxe/include/sxe_type.h
 create mode 100644 drivers/net/sxe/include/sxe_version.h
 create mode 100644 drivers/net/sxe/meson.build
 create mode 100644 drivers/net/sxe/pf/rte_pmd_sxe.h
 create mode 100644 drivers/net/sxe/pf/sxe.h
 create mode 100644 drivers/net/sxe/pf/sxe_dcb.c
 create mode 100644 drivers/net/sxe/pf/sxe_dcb.h
 create mode 100644 drivers/net/sxe/pf/sxe_ethdev.c
 create mode 100644 drivers/net/sxe/pf/sxe_ethdev.h
 create mode 100644 drivers/net/sxe/pf/sxe_filter.c
 create mode 100644 drivers/net/sxe/pf/sxe_filter.h
 create mode 100644 drivers/net/sxe/pf/sxe_flow_ctrl.c
 create mode 100644 drivers/net/sxe/pf/sxe_flow_ctrl.h
 create mode 100644 drivers/net/sxe/pf/sxe_irq.c
 create mode 100644 drivers/net/sxe/pf/sxe_irq.h
 create mode 100644 drivers/net/sxe/pf/sxe_main.c
 create mode 100644 drivers/net/sxe/pf/sxe_offload.c
 create mode 100644 drivers/net/sxe/pf/sxe_offload.h
 create mode 100644 drivers/net/sxe/pf/sxe_phy.c
 create mode 100644 drivers/net/sxe/pf/sxe_phy.h
 create mode 100644 drivers/net/sxe/pf/sxe_pmd_hdc.c
 create mode 100644 drivers/net/sxe/pf/sxe_pmd_hdc.h
 create mode 100644 drivers/net/sxe/pf/sxe_ptp.c
 create mode 100644 drivers/net/sxe/pf/sxe_ptp.h
 create mode 100644 drivers/net/sxe/pf/sxe_queue.c
 create mode 100644 drivers/net/sxe/pf/sxe_queue.h
 create mode 100644 drivers/net/sxe/pf/sxe_rx.c
 create mode 100644 drivers/net/sxe/pf/sxe_rx.h
 create mode 100644 drivers/net/sxe/pf/sxe_stats.c
 create mode 100644 drivers/net/sxe/pf/sxe_stats.h
 create mode 100644 drivers/net/sxe/pf/sxe_tx.c
 create mode 100644 drivers/net/sxe/pf/sxe_tx.h
 create mode 100644 drivers/net/sxe/pf/sxe_vf.c
 create mode 100644 drivers/net/sxe/pf/sxe_vf.h
 create mode 100644 drivers/net/sxe/rte_pmd_sxe_version.map
 create mode 100644 drivers/net/sxe/sxe_drv_type.h
 create mode 100644 drivers/net/sxe/version.map
 create mode 100644 drivers/net/sxe/vf/sxevf.h
 create mode 100644 drivers/net/sxe/vf/sxevf_ethdev.c
 create mode 100644 drivers/net/sxe/vf/sxevf_ethdev.h
 create mode 100644 drivers/net/sxe/vf/sxevf_filter.c
 create mode 100644 drivers/net/sxe/vf/sxevf_filter.h
 create mode 100644 drivers/net/sxe/vf/sxevf_irq.c
 create mode 100644 drivers/net/sxe/vf/sxevf_irq.h
 create mode 100644 drivers/net/sxe/vf/sxevf_main.c
 create mode 100644 drivers/net/sxe/vf/sxevf_msg.c
 create mode 100644 drivers/net/sxe/vf/sxevf_msg.h
 create mode 100644 drivers/net/sxe/vf/sxevf_offload.c
 create mode 100644 drivers/net/sxe/vf/sxevf_offload.h
 create mode 100644 drivers/net/sxe/vf/sxevf_queue.c
 create mode 100644 drivers/net/sxe/vf/sxevf_queue.h
 create mode 100644 drivers/net/sxe/vf/sxevf_rx.c
 create mode 100644 drivers/net/sxe/vf/sxevf_rx.h
 create mode 100644 drivers/net/sxe/vf/sxevf_stats.c
 create mode 100644 drivers/net/sxe/vf/sxevf_stats.h
 create mode 100644 drivers/net/sxe/vf/sxevf_tx.c
 create mode 100644 drivers/net/sxe/vf/sxevf_tx.h

diff --git a/MAINTAINERS b/MAINTAINERS
index c5a703b5c0..03adb4036f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -981,6 +981,12 @@ F: drivers/net/sfc/
 F: doc/guides/nics/sfc_efx.rst
 F: doc/guides/nics/features/sfc.ini
 
+Linkdata sxe
+M: Jie Liu <liujie5@linkdatatechnology.com>
+F: drivers/net/sxe/
+F: doc/guides/nics/sxe.rst
+F: doc/guides/nics/features/sxe*.ini
+
 Wangxun ngbe
 M: Jiawen Wu <jiawenwu@trustnetic.com>
 F: drivers/net/ngbe/
diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build
index 719f875be0..34ca42bd55 100644
--- a/app/test-pmd/meson.build
+++ b/app/test-pmd/meson.build
@@ -72,6 +72,9 @@ endif
 if dpdk_conf.has('RTE_NET_DPAA')
     deps += ['bus_dpaa', 'mempool_dpaa', 'net_dpaa']
 endif
+if dpdk_conf.has('RTE_NET_SXE')
+    deps += 'net_sxe'
+endif
 
 # Driver-specific commands are located in driver directories.
 includes = include_directories('.')
diff --git a/doc/guides/nics/features/sxe.ini b/doc/guides/nics/features/sxe.ini
new file mode 100644
index 0000000000..5a18808ccf
--- /dev/null
+++ b/doc/guides/nics/features/sxe.ini
@@ -0,0 +1,81 @@
+;
+; Supported features of the 'sxe' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities   = Y
+Link speed configuration = Y
+Link status          = Y
+Link status event    = Y
+Rx interrupt         = Y
+Queue start/stop     = Y
+Power mgmt address monitor = Y
+MTU update           = Y
+Scattered Rx         = Y
+LRO                  = Y
+TSO                  = Y
+Promiscuous mode     = Y
+Allmulticast mode    = Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
+RSS hash             = Y
+RSS key update       = Y
+RSS reta update      = Y
+VMDq                 = Y
+SR-IOV               = Y
+DCB                  = Y
+VLAN filter          = Y
+Flow control         = Y
+Rate limitation      = Y
+Traffic manager      = Y
+Inline crypto        = Y
+CRC offload          = P
+VLAN offload         = P
+QinQ offload         = P
+L3 checksum offload  = P
+L4 checksum offload  = P
+Inner L3 checksum    = P
+Inner L4 checksum    = P
+Packet type parsing  = Y
+Timesync             = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
+Basic stats          = Y
+Extended stats       = Y
+Stats per queue      = Y
+FW version           = Y
+EEPROM dump          = Y
+Module EEPROM dump   = Y
+Registers dump       = Y
+Multiprocess aware   = Y
+FreeBSD              = Y
+Linux                = Y
+ARMv8                = Y
+LoongArch64          = Y
+rv64                 = Y
+x86-32               = Y
+x86-64               = Y
+
+[rte_flow items]
+eth                  = P
+e_tag                = Y
+fuzzy                = Y
+ipv4                 = Y
+ipv6                 = Y
+nvgre                = Y
+raw                  = Y
+sctp                 = Y
+tcp                  = Y
+udp                  = Y
+vlan                 = P
+vxlan                = Y
+
+[rte_flow actions]
+drop                 = Y
+mark                 = Y
+pf                   = Y
+queue                = Y
+rss                  = Y
+security             = Y
+vf                   = Y
diff --git a/doc/guides/nics/features/sxe_vf.ini b/doc/guides/nics/features/sxe_vf.ini
new file mode 100644
index 0000000000..49eaeaaaae
--- /dev/null
+++ b/doc/guides/nics/features/sxe_vf.ini
@@ -0,0 +1,39 @@
+;
+; Supported features of the 'sxe_vf' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Link status          = Y
+Rx interrupt         = Y
+Power mgmt address monitor = Y
+MTU update           = Y
+Scattered Rx         = Y
+LRO                  = Y
+TSO                  = Y
+Promiscuous mode     = Y
+Allmulticast mode    = Y
+Unicast MAC filter   = Y
+RSS hash             = Y
+RSS key update       = Y
+RSS reta update      = Y
+VLAN filter          = Y
+Inline crypto        = Y
+CRC offload          = P
+VLAN offload         = P
+QinQ offload         = P
+L3 checksum offload  = P
+L4 checksum offload  = P
+Inner L3 checksum    = P
+Inner L4 checksum    = P
+Packet type parsing  = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
+Basic stats          = Y
+Extended stats       = Y
+Registers dump       = Y
+FreeBSD              = Y
+Linux                = Y
+ARMv8                = Y
+x86-32               = Y
+x86-64               = Y
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index c14bc7988a..ac06a1c72d 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -69,3 +69,4 @@ Network Interface Controller Drivers
     vhost
     virtio
     vmxnet3
+    sxe
diff --git a/doc/guides/nics/sxe.rst b/doc/guides/nics/sxe.rst
new file mode 100644
index 0000000000..93969118be
--- /dev/null
+++ b/doc/guides/nics/sxe.rst
@@ -0,0 +1,71 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright (C), 2022, Linkdata Technology Co., Ltd.
+
+SXE Poll Mode Driver
+======================
+
+The SXE PMD (librte_pmd_sxe) provides poll mode driver support
+for Linkdata 1160-2X 10GE Ethernet Adapter.
+
+Features
+--------
+- PXE boot
+- PTP(Precision Time Protocol)
+- VMDq(Virtual Machine Device Queues)
+- SR-IOV,max 2PF,63VF per PF
+- 128 L2 Ethernet MAC Address Filters (unicast and multicast) 
+- 64 L2 VLAN filters
+- pldm over mctp over smbus
+- 802.1q VLAN
+- Low Latency Interrupts
+- LRO
+- Promiscuous mode
+- Multicast mode
+- Multiple queues for TX and RX
+- Receiver Side Scaling (RSS)
+- MAC/VLAN filtering
+- Packet type information
+- Checksum offload
+- VLAN/QinQ stripping and inserting
+- TSO offload
+- Port hardware statistics
+- Link state information
+- Link flow control
+- Interrupt mode for RX
+- Scattered and gather for TX and RX
+- DCB
+- IEEE 1588
+- FW version
+- Generic flow API
+
+Configuration
+-------------
+
+Dynamic Logging Parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+One may leverage EAL option "--log-level" to change default levels
+for the log types supported by the driver. The option is used with
+an argument typically consisting of two parts separated by a colon.
+
+SXE PMD provides the following log types available for control:
+
+- ``pmd.net.sxe.drv`` (default level is **DEBUG**)
+
+  Affects driver-wide messages unrelated to any particular devices.
+
+- ``pmd.net.sxe.init`` (default level is **DEBUG**)
+
+  Extra logging of the messages during PMD initialization.
+
+- ``pmd.net.sxe.rx`` (default level is **DEBUG**)
+
+  Affects rx-wide messages.
+- ``pmd.net.sxe.tx`` (default level is **DEBUG**)
+
+  Affects tx-wide messages.
+------------------------------
+
+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+for details.
+
diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index fb6d34b782..4d716d76cd 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -62,6 +62,7 @@ drivers = [
         'vhost',
         'virtio',
         'vmxnet3',
+        'sxe',
 ]
 std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc
 std_deps += ['bus_pci']         # very many PMDs depend on PCI, so make std
diff --git a/drivers/net/sxe/Makefile b/drivers/net/sxe/Makefile
new file mode 100644
index 0000000000..5e2870fdc4
--- /dev/null
+++ b/drivers/net/sxe/Makefile
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_sxe.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -DSXE_DPDK
+CFLAGS += -DSXE_HOST_DRIVER
+CFLAGS += -DSXE_DPDK_L4_FEATURES
+CFLAGS += -DSXE_DPDK_SRIOV
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_sxe_version.map
+
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+#
+# CFLAGS for icc
+#
+CFLAGS_BASE_DRIVER  = -diag-disable 174 -diag-disable 593 -diag-disable 869
+CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259
+
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+#
+# CFLAGS for clang
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+else
+#
+# CFLAGS for gcc
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+CFLAGS_BASE_DRIVER += -Wmissing-prototypes
+
+endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lpthread
+
+#
+# Add extra flags for base driver files (also known as shared code)
+# to disable warnings in them
+#
+
+$(shell cp $(SRCDIR)/pf/* $(SRCDIR))
+$(shell cp $(SRCDIR)/vf/* $(SRCDIR))
+$(shell cp $(SRCDIR)/base/* $(SRCDIR))
+$(shell cp $(SRCDIR)/include/*.h $(SRCDIR))
+$(shell cp $(SRCDIR)/include/sxe/*.h $(SRCDIR))
+$(shell cp $(SRCDIR)/include/sxe/mgl/*.h $(SRCDIR))
+$(warning "file copy done")
+
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_testpmd.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_offload_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_queue_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_rx_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_tx_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_hw.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_dcb.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_flow_ctrl.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_irq.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_offload.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_pmd_hdc.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_ptp.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_queue.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_stats.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_vf.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_msg.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_irq.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_stats.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_queue.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_offload.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_SXE_PMD)-include := rte_pmd_sxe.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_SXE_PMD)-include += sxe_dcb.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
+
diff --git a/drivers/net/sxe/base/docker_version b/drivers/net/sxe/base/docker_version
new file mode 100644
index 0000000000..33ecb22479
--- /dev/null
+++ b/drivers/net/sxe/base/docker_version
@@ -0,0 +1,4 @@
+dpdk_images_v0.1:
+只包含dodk源码,源码目录:/usr/src/dpdk
+dpdk_images_v0.2:
+包含dpdk源码和build下的原生编译产物
diff --git a/drivers/net/sxe/base/sxe_common.c b/drivers/net/sxe/base/sxe_common.c
new file mode 100644
index 0000000000..62f76ccf3f
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_common.c
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include <pthread.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "sxe_types.h"
+#include "sxe_common.h"
+
+#define SXE_TRACE_ID_COUNT_MASK  0x00000000000000FFLLU
+#define SXE_TRACE_ID_TID_MASK    0x0000000000FFFF00LLU
+#define SXE_TRACE_ID_TIME_MASK   0x00FFFFFFFF000000LLU
+#define SXE_TRACE_ID_FLAG        0xFF00000000000000LLU
+
+#define SXE_TRACE_ID_COUNT_SHIFT 0
+#define SXE_TRACE_ID_TID_SHIFT   8
+#define SXE_TRACE_ID_TIME_SHIFT  24
+
+#define SXE_SEC_TO_MS(sec) (sec * 1000ULL)
+#define SXE_SEC_TO_NS(sec) (sec * 1000000000ULL)
+
+#define SXE_USEC_PER_MS          1000
+
+static u64 sxe_trace_id      = 0;
+
+u64 sxe_time_get_real_ms(void)
+{
+	u64 ms = 0;
+	struct timeval      tv = { 0 };
+	s32 ret = gettimeofday(&tv, NULL);
+	if(ret < 0) {
+		goto l_end;
+	}
+
+	ms = SXE_SEC_TO_MS(tv.tv_sec) + tv.tv_usec / SXE_USEC_PER_MS;
+
+l_end:
+	return ms;
+}
+
+u64 sxe_trace_id_gen(void)
+{
+	u64 tid       = getpid() + (pthread_self() << 20);
+	u64 index     = 0;
+	u64 timestamp = sxe_time_get_real_ms();
+
+	sxe_trace_id = (SXE_TRACE_ID_FLAG)
+		| ((timestamp << SXE_TRACE_ID_TIME_SHIFT) & SXE_TRACE_ID_TIME_MASK)
+		| ((tid << SXE_TRACE_ID_TID_SHIFT) & SXE_TRACE_ID_TID_MASK)
+		| ((index << SXE_TRACE_ID_COUNT_SHIFT) & SXE_TRACE_ID_COUNT_MASK);
+	return sxe_trace_id;
+}
+
+void sxe_trace_id_clean(void)
+{
+	sxe_trace_id = 0;
+	return;
+}
+
+u64 sxe_trace_id_get(void)
+{
+	return sxe_trace_id++;
+}
diff --git a/drivers/net/sxe/base/sxe_common.h b/drivers/net/sxe/base/sxe_common.h
new file mode 100644
index 0000000000..43c062b937
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_common.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_DPDK_COMMON_H__
+#define __SXE_DPDK_COMMON_H__
+
+u64 sxe_trace_id_gen(void);
+
+void sxe_trace_id_clean(void);
+
+u64 sxe_trace_id_get(void);
+
+u64 sxe_time_get_real_ms(void);
+
+#endif
diff --git a/drivers/net/sxe/base/sxe_compat_platform.h b/drivers/net/sxe/base/sxe_compat_platform.h
new file mode 100644
index 0000000000..8509f3cf0c
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_compat_platform.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_COMPAT_PLATFORM_H__
+#define __SXE_COMPAT_PLATFORM_H__
+
+#include <rte_cycles.h>
+#include <rte_branch_prediction.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_common.h>
+
+#include "sxe_types.h"
+
+#define  false 0
+#define  true  1
+
+#ifdef SXE_TEST
+#define STATIC
+#else
+#define STATIC static
+#endif
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d)      (((n) + (d) - 1) / (d))
+#endif
+
+#define __iomem
+#define __force
+
+#define min(a,b)	RTE_MIN(a,b)
+
+#ifdef __has_attribute
+#if __has_attribute(__fallthrough__)
+# define fallthrough __attribute__((__fallthrough__))
+#else
+# define fallthrough do {} while (0)  
+#endif 
+#else
+# define fallthrough do {} while (0)  
+#endif 
+
+#define __swab32(_value) \
+	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
+	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
+#define __swab16(_value) \
+	(((u16)(_value) >> 8) | ((u16)(_value) << 8))
+
+#define cpu_to_be16(o) rte_cpu_to_be_16(o)
+#define cpu_to_be32(o) rte_cpu_to_be_32(o)
+#define cpu_to_be64(o) rte_cpu_to_be_64(o)
+#define cpu_to_le32(o) rte_cpu_to_le_32(o)
+#define be16_to_cpu(o) rte_be_to_cpu_16(o)
+#define be32_to_cpu(o) rte_be_to_cpu_32(o)
+#define be64_to_cpu(o) rte_be_to_cpu_64(o)
+#define le32_to_cpu(o) rte_le_to_cpu_32(o)
+
+#ifndef ntohs
+#define ntohs(o) be16_to_cpu(o)
+#endif
+
+#ifndef ntohl
+#define ntohl(o) be32_to_cpu(o)
+#endif
+
+#ifndef htons
+#define htons(o) cpu_to_be16(o)
+#endif
+
+#ifndef htonl
+#define htonl(o) cpu_to_be32(o)
+#endif
+#define mdelay rte_delay_ms
+#define udelay rte_delay_us
+#define usleep_range(min, max) rte_delay_us(min)
+#define msleep(x)             rte_delay_us(x*1000)
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#define BIT(x)	(1UL << (x))
+#define DMA_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+
+#define NSEC_PER_SEC	1000000000L
+
+#define ETH_P_1588	0x88F7		
+
+#define VLAN_PRIO_SHIFT		13
+
+static inline void
+set_bit(unsigned long nr, void *addr)
+{
+        int *m = ((int *)addr) + (nr >> 5);
+        *m |= 1 << (nr & 31);
+}
+
+static inline int
+test_bit(int nr, const void *addr)
+{
+	return (1UL & (((const int *)addr)[nr >> 5] >> (nr & 31))) != 0UL;
+}
+
+static inline void
+clear_bit(unsigned long nr, void *addr)
+{
+	int *m = ((int *)addr) + (nr >> 5);
+	*m &= ~(1 << (nr & 31));
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, void *addr)
+{
+	unsigned long mask = 1 << (nr & 0x1f);
+	int *m = ((int *)addr) + (nr >> 5);
+	int old = *m;
+
+	*m = old & ~mask;
+	return (old & mask) != 0;
+}
+
+static __rte_always_inline uint64_t
+readq(volatile void *addr)
+{
+	return rte_le_to_cpu_64(rte_read64(addr));
+}
+
+static __rte_always_inline void
+writeq(uint64_t value, volatile void *addr)
+{
+	rte_write64(rte_cpu_to_le_64(value), addr);
+}
+
+static inline u32 sxe_read_addr(const volatile void *addr)
+{
+	return rte_le_to_cpu_32(rte_read32(addr));
+}
+
+static inline void  sxe_write_addr(u32 value, volatile void *addr)
+{
+	rte_write32((rte_cpu_to_le_32(value)), addr);
+	return;
+}
+
+#endif
diff --git a/drivers/net/sxe/base/sxe_compat_version.h b/drivers/net/sxe/base/sxe_compat_version.h
new file mode 100644
index 0000000000..32d1a0862a
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_compat_version.h
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_COMPAT_VERSION_H__
+#define __SXE_COMPAT_VERSION_H__
+
+#include <stdbool.h>
+#include "sxe_dpdk_version.h"
+
+struct rte_eth_dev;
+enum rte_eth_event_type;
+
+int sxe_eth_dev_callback_process(struct rte_eth_dev *dev,
+	enum rte_eth_event_type event, void *ret_param);
+
+#ifdef DPDK_19_11_6
+#define ETH_DEV_OPS_HAS_DESC_RELATE
+
+#define __rte_cold __attribute__((cold))
+
+#define ETH_SPEED_NUM_UNKNOWN UINT32_MAX 
+#ifdef RTE_ARCH_ARM64
+#define RTE_ARCH_ARM
+#endif
+
+#else
+
+#define SET_AUTOFILL_QUEUE_XSTATS
+#define PCI_REG_WC_WRITE
+
+#endif
+
+#ifndef PCI_REG_WC_WRITE
+#define rte_write32_wc rte_write32
+#define rte_write32_wc_relaxed rte_write32_relaxed
+#endif
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+
+#define    RTE_ETH_RSS_IPV4                ETH_RSS_IPV4
+#define    RTE_ETH_RSS_NONFRAG_IPV4_TCP    ETH_RSS_NONFRAG_IPV4_TCP
+#define    RTE_ETH_RSS_NONFRAG_IPV4_UDP    ETH_RSS_NONFRAG_IPV4_UDP
+#define    RTE_ETH_RSS_IPV6                ETH_RSS_IPV6
+#define    RTE_ETH_RSS_NONFRAG_IPV6_TCP    ETH_RSS_NONFRAG_IPV6_TCP
+#define    RTE_ETH_RSS_NONFRAG_IPV6_UDP    ETH_RSS_NONFRAG_IPV6_UDP
+#define    RTE_ETH_RSS_IPV6_EX             ETH_RSS_IPV6_EX
+#define    RTE_ETH_RSS_IPV6_TCP_EX         ETH_RSS_IPV6_TCP_EX
+#define    RTE_ETH_RSS_IPV6_UDP_EX         ETH_RSS_IPV6_UDP_EX
+
+
+#define    RTE_ETH_VLAN_TYPE_UNKNOWN       ETH_VLAN_TYPE_UNKNOWN
+#define    RTE_ETH_VLAN_TYPE_INNER         ETH_VLAN_TYPE_INNER
+#define    RTE_ETH_VLAN_TYPE_OUTER         ETH_VLAN_TYPE_OUTER
+#define    RTE_ETH_VLAN_TYPE_MAX           ETH_VLAN_TYPE_MAX
+
+
+#define    RTE_ETH_8_POOLS        ETH_8_POOLS
+#define    RTE_ETH_16_POOLS       ETH_16_POOLS
+#define    RTE_ETH_32_POOLS       ETH_32_POOLS
+#define    RTE_ETH_64_POOLS       ETH_64_POOLS
+
+
+#define RTE_ETH_4_TCS       ETH_4_TCS
+#define RTE_ETH_8_TCS       ETH_8_TCS
+
+
+#define RTE_ETH_MQ_RX_NONE          ETH_MQ_RX_NONE
+#define RTE_ETH_MQ_RX_RSS           ETH_MQ_RX_RSS
+#define RTE_ETH_MQ_RX_DCB           ETH_MQ_RX_DCB
+#define RTE_ETH_MQ_RX_DCB_RSS       ETH_MQ_RX_DCB_RSS
+#define RTE_ETH_MQ_RX_VMDQ_ONLY     ETH_MQ_RX_VMDQ_ONLY
+#define RTE_ETH_MQ_RX_VMDQ_RSS      ETH_MQ_RX_VMDQ_RSS
+#define RTE_ETH_MQ_RX_VMDQ_DCB      ETH_MQ_RX_VMDQ_DCB
+#define RTE_ETH_MQ_RX_VMDQ_DCB_RSS  ETH_MQ_RX_VMDQ_DCB_RSS
+
+
+#define RTE_ETH_MQ_TX_NONE          ETH_MQ_TX_NONE
+#define RTE_ETH_MQ_TX_DCB           ETH_MQ_TX_DCB
+#define RTE_ETH_MQ_TX_VMDQ_DCB      ETH_MQ_TX_VMDQ_DCB
+#define RTE_ETH_MQ_TX_VMDQ_ONLY     ETH_MQ_TX_VMDQ_ONLY
+
+
+#define RTE_ETH_FC_NONE         RTE_FC_NONE
+#define RTE_ETH_FC_RX_PAUSE     RTE_FC_RX_PAUSE
+#define RTE_ETH_FC_TX_PAUSE     RTE_FC_TX_PAUSE
+#define RTE_ETH_FC_FULL         RTE_FC_FULL
+
+
+#define RTE_ETH_MQ_RX_RSS_FLAG      ETH_MQ_RX_RSS_FLAG
+#define RTE_ETH_MQ_RX_DCB_FLAG      ETH_MQ_RX_DCB_FLAG
+#define RTE_ETH_MQ_RX_VMDQ_FLAG     ETH_MQ_RX_VMDQ_FLAG
+
+
+#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP       DEV_RX_OFFLOAD_VLAN_STRIP
+#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM       DEV_RX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM        DEV_RX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM        DEV_RX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_LRO          DEV_RX_OFFLOAD_TCP_LRO
+#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP       DEV_RX_OFFLOAD_QINQ_STRIP
+#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP     DEV_RX_OFFLOAD_MACSEC_STRIP
+#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER      DEV_RX_OFFLOAD_VLAN_FILTER
+#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND      DEV_RX_OFFLOAD_VLAN_EXTEND
+#define RTE_ETH_RX_OFFLOAD_SCATTER          DEV_RX_OFFLOAD_SCATTER
+#define RTE_ETH_RX_OFFLOAD_TIMESTAMP        DEV_RX_OFFLOAD_TIMESTAMP
+#define RTE_ETH_RX_OFFLOAD_SECURITY         DEV_RX_OFFLOAD_SECURITY
+#define RTE_ETH_RX_OFFLOAD_KEEP_CRC         DEV_RX_OFFLOAD_KEEP_CRC
+#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM       DEV_RX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_RSS_HASH         DEV_RX_OFFLOAD_RSS_HASH
+
+
+#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT      DEV_TX_OFFLOAD_VLAN_INSERT
+#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM       DEV_TX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM        DEV_TX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM        DEV_TX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM       DEV_TX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_TSO          DEV_TX_OFFLOAD_TCP_TSO
+#define RTE_ETH_TX_OFFLOAD_UDP_TSO          DEV_TX_OFFLOAD_UDP_TSO
+#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT      DEV_TX_OFFLOAD_QINQ_INSERT
+#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    DEV_TX_OFFLOAD_VXLAN_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO      DEV_TX_OFFLOAD_GRE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO     DEV_TX_OFFLOAD_IPIP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   DEV_TX_OFFLOAD_GENEVE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT    DEV_TX_OFFLOAD_MACSEC_INSERT
+#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE      DEV_TX_OFFLOAD_MT_LOCKFREE
+#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS       DEV_TX_OFFLOAD_MULTI_SEGS
+#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE   DEV_TX_OFFLOAD_MBUF_FAST_FREE
+#define RTE_ETH_TX_OFFLOAD_SECURITY         DEV_TX_OFFLOAD_SECURITY
+#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO      DEV_TX_OFFLOAD_UDP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO       DEV_TX_OFFLOAD_IP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM  DEV_TX_OFFLOAD_OUTER_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP
+
+
+#define RTE_ETH_LINK_SPEED_AUTONEG      ETH_LINK_SPEED_AUTONEG
+#define RTE_ETH_LINK_SPEED_FIXED        ETH_LINK_SPEED_FIXED
+#define RTE_ETH_LINK_SPEED_1G           ETH_LINK_SPEED_1G
+#define RTE_ETH_LINK_SPEED_10G          ETH_LINK_SPEED_10G
+
+#define RTE_ETH_SPEED_NUM_NONE          ETH_SPEED_NUM_NONE
+#define RTE_ETH_SPEED_NUM_1G            ETH_SPEED_NUM_1G  
+#define RTE_ETH_SPEED_NUM_10G           ETH_SPEED_NUM_10G
+#define RTE_ETH_SPEED_NUM_UNKNOWN       ETH_SPEED_NUM_UNKNOWN
+
+
+#define RTE_ETH_LINK_HALF_DUPLEX        ETH_LINK_HALF_DUPLEX
+#define RTE_ETH_LINK_FULL_DUPLEX        ETH_LINK_FULL_DUPLEX
+#define RTE_ETH_LINK_DOWN               ETH_LINK_DOWN       
+#define RTE_ETH_LINK_UP                 ETH_LINK_UP 
+
+
+#define RTE_ETH_RSS_RETA_SIZE_128       ETH_RSS_RETA_SIZE_128
+#define RTE_ETH_RETA_GROUP_SIZE         RTE_RETA_GROUP_SIZE
+
+
+#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS   ETH_VMDQ_MAX_VLAN_FILTERS
+#define RTE_ETH_DCB_NUM_USER_PRIORITIES ETH_DCB_NUM_USER_PRIORITIES
+#define RTE_ETH_VMDQ_DCB_NUM_QUEUES     ETH_VMDQ_DCB_NUM_QUEUES
+#define RTE_ETH_DCB_NUM_QUEUES          ETH_DCB_NUM_QUEUES
+
+
+#define RTE_ETH_DCB_PFC_SUPPORT     ETH_DCB_PFC_SUPPORT
+
+
+#define RTE_ETH_VLAN_STRIP_OFFLOAD   ETH_VLAN_STRIP_OFFLOAD
+#define RTE_ETH_VLAN_FILTER_OFFLOAD  ETH_VLAN_FILTER_OFFLOAD
+#define RTE_ETH_VLAN_EXTEND_OFFLOAD  ETH_VLAN_EXTEND_OFFLOAD
+#define RTE_ETH_QINQ_STRIP_OFFLOAD   ETH_QINQ_STRIP_OFFLOAD
+
+#define RTE_ETH_VLAN_STRIP_MASK      ETH_VLAN_STRIP_MASK
+#define RTE_ETH_VLAN_FILTER_MASK     ETH_VLAN_FILTER_MASK
+#define RTE_ETH_VLAN_EXTEND_MASK     ETH_VLAN_EXTEND_MASK
+#define RTE_ETH_QINQ_STRIP_MASK      ETH_QINQ_STRIP_MASK
+#define RTE_ETH_VLAN_ID_MAX          ETH_VLAN_ID_MAX
+
+
+#define RTE_ETH_NUM_RECEIVE_MAC_ADDR   ETH_NUM_RECEIVE_MAC_ADDR
+#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY ETH_VMDQ_NUM_UC_HASH_ARRAY
+
+#define RTE_ETH_VMDQ_ACCEPT_UNTAG      ETH_VMDQ_ACCEPT_UNTAG
+#define RTE_ETH_VMDQ_ACCEPT_HASH_MC    ETH_VMDQ_ACCEPT_HASH_MC
+#define RTE_ETH_VMDQ_ACCEPT_HASH_UC    ETH_VMDQ_ACCEPT_HASH_UC
+#define RTE_ETH_VMDQ_ACCEPT_BROADCAST  ETH_VMDQ_ACCEPT_BROADCAST
+#define RTE_ETH_VMDQ_ACCEPT_MULTICAST  ETH_VMDQ_ACCEPT_MULTICAST
+
+#define RTE_VLAN_HLEN       4  
+
+
+#define RTE_MBUF_F_RX_VLAN                  PKT_RX_VLAN
+#define RTE_MBUF_F_RX_RSS_HASH              PKT_RX_RSS_HASH
+#define RTE_MBUF_F_RX_FDIR                  PKT_RX_FDIR
+#define RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD    PKT_RX_EIP_CKSUM_BAD
+#define RTE_MBUF_F_RX_VLAN_STRIPPED         PKT_RX_VLAN_STRIPPED
+#define RTE_MBUF_F_RX_IP_CKSUM_MASK         PKT_RX_IP_CKSUM_MASK
+#define RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN      PKT_RX_IP_CKSUM_UNKNOWN
+#define RTE_MBUF_F_RX_IP_CKSUM_BAD          PKT_RX_IP_CKSUM_BAD
+#define RTE_MBUF_F_RX_IP_CKSUM_GOOD         PKT_RX_IP_CKSUM_GOOD
+#define RTE_MBUF_F_RX_IP_CKSUM_NONE         PKT_RX_IP_CKSUM_NONE
+#define RTE_MBUF_F_RX_L4_CKSUM_MASK         PKT_RX_L4_CKSUM_MASK
+#define RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN      PKT_RX_L4_CKSUM_UNKNOWN
+#define RTE_MBUF_F_RX_L4_CKSUM_BAD          PKT_RX_L4_CKSUM_BAD
+#define RTE_MBUF_F_RX_L4_CKSUM_GOOD         PKT_RX_L4_CKSUM_GOOD
+#define RTE_MBUF_F_RX_L4_CKSUM_NONE         PKT_RX_L4_CKSUM_NONE
+#define RTE_MBUF_F_RX_IEEE1588_PTP          PKT_RX_IEEE1588_PTP
+#define RTE_MBUF_F_RX_IEEE1588_TMST         PKT_RX_IEEE1588_TMST
+#define RTE_MBUF_F_RX_FDIR_ID               PKT_RX_FDIR_ID
+#define RTE_MBUF_F_RX_FDIR_FLX              PKT_RX_FDIR_FLX
+#define RTE_MBUF_F_RX_QINQ_STRIPPED         PKT_RX_QINQ_STRIPPED
+#define RTE_MBUF_F_RX_LRO                   PKT_RX_LRO
+#define RTE_MBUF_F_RX_SEC_OFFLOAD	        PKT_RX_SEC_OFFLOAD
+#define RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED	PKT_RX_SEC_OFFLOAD_FAILED
+#define RTE_MBUF_F_RX_QINQ                  PKT_RX_QINQ
+
+#define RTE_MBUF_F_TX_SEC_OFFLOAD	        PKT_TX_SEC_OFFLOAD
+#define RTE_MBUF_F_TX_MACSEC                PKT_TX_MACSEC
+#define RTE_MBUF_F_TX_QINQ                  PKT_TX_QINQ
+#define RTE_MBUF_F_TX_TCP_SEG               PKT_TX_TCP_SEG
+#define RTE_MBUF_F_TX_IEEE1588_TMST         PKT_TX_IEEE1588_TMST
+#define RTE_MBUF_F_TX_L4_NO_CKSUM           PKT_TX_L4_NO_CKSUM
+#define RTE_MBUF_F_TX_TCP_CKSUM             PKT_TX_TCP_CKSUM
+#define RTE_MBUF_F_TX_SCTP_CKSUM            PKT_TX_SCTP_CKSUM
+#define RTE_MBUF_F_TX_UDP_CKSUM             PKT_TX_UDP_CKSUM
+#define RTE_MBUF_F_TX_L4_MASK               PKT_TX_L4_MASK
+#define RTE_MBUF_F_TX_IP_CKSUM              PKT_TX_IP_CKSUM
+#define RTE_MBUF_F_TX_IPV4                  PKT_TX_IPV4
+#define RTE_MBUF_F_TX_IPV6                  PKT_TX_IPV6
+#define RTE_MBUF_F_TX_VLAN                  PKT_TX_VLAN
+#define RTE_MBUF_F_TX_OUTER_IP_CKSUM        PKT_TX_OUTER_IP_CKSUM
+#define RTE_MBUF_F_TX_OUTER_IPV4            PKT_TX_OUTER_IPV4
+#define RTE_MBUF_F_TX_OUTER_IPV6            PKT_TX_OUTER_IPV6
+
+#define RTE_MBUF_F_TX_OFFLOAD_MASK          PKT_TX_OFFLOAD_MASK
+
+#define RTE_ETH_8_POOLS                     ETH_8_POOLS
+#define RTE_ETH_16_POOLS                    ETH_16_POOLS
+#define RTE_ETH_32_POOLS                    ETH_32_POOLS
+#define RTE_ETH_64_POOLS                    ETH_64_POOLS
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#define RTE_ETHDEV_DEBUG_RX
+#define RTE_ETHDEV_DEBUG_TX
+#endif
+
+#endif
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#define rte_eth_fdir_pballoc_type   rte_fdir_pballoc_type
+#define rte_eth_fdir_conf 			rte_fdir_conf
+
+#define RTE_ETH_FDIR_PBALLOC_64K   RTE_FDIR_PBALLOC_64K
+#define RTE_ETH_FDIR_PBALLOC_128K  RTE_FDIR_PBALLOC_128K
+#define RTE_ETH_FDIR_PBALLOC_256K  RTE_FDIR_PBALLOC_256K
+#endif
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+
+#define SXE_PCI_INTR_HANDLE(pci_dev) \
+	(&((pci_dev)->intr_handle))
+
+#define SXE_DEV_FNAV_CONF(dev) \
+	(&((dev)->data->dev_conf.fdir_conf)) 
+#define SXE_GET_FRAME_SIZE(dev) \
+	(dev->data->dev_conf.rxmode.max_rx_pkt_len)
+	
+#elif defined DPDK_21_11_5
+#define SXE_PCI_INTR_HANDLE(pci_dev) \
+	((pci_dev)->intr_handle)
+#define SXE_DEV_FNAV_CONF(dev) \
+	(&((dev)->data->dev_conf.fdir_conf)) 
+#define SXE_GET_FRAME_SIZE(dev) \
+	(dev->data->mtu + SXE_ETH_OVERHEAD)
+
+#else
+#define SXE_PCI_INTR_HANDLE(pci_dev) \
+	((pci_dev)->intr_handle)
+#define SXE_DEV_FNAV_CONF(dev) \
+	(&((struct sxe_adapter *)(dev)->data->dev_private)->fnav_conf) 
+#define RTE_ADAPTER_HAVE_FNAV_CONF
+#define SXE_GET_FRAME_SIZE(dev) \
+	(dev->data->mtu + SXE_ETH_OVERHEAD)
+
+#endif
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#define ETH_DEV_OPS_FILTER_CTRL
+#define DEV_RX_JUMBO_FRAME
+#define ETH_DEV_MIRROR_RULE
+#define ETH_DEV_RX_DESC_DONE
+#else
+#define ETH_DEV_OPS_MONITOR
+#endif
+
+#ifdef DPDK_22_11_3
+#define DEV_RX_OFFLOAD_CHECKSUM RTE_ETH_RX_OFFLOAD_CHECKSUM
+#endif
+
+#ifdef DPDK_22_11_3
+#define ETH_DCB_NUM_USER_PRIORITIES RTE_ETH_DCB_NUM_USER_PRIORITIES
+#endif
+
+#endif
diff --git a/drivers/net/sxe/base/sxe_dpdk_version.h b/drivers/net/sxe/base/sxe_dpdk_version.h
new file mode 100644
index 0000000000..902812566a
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_dpdk_version.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_DPDK_VERSION_H__
+#define __SXE_DPDK_VERSION_H__
+
+#include <rte_version.h>
+
+#if (RTE_VERSION >= RTE_VERSION_NUM(19, 0, 0, 0) && RTE_VERSION < RTE_VERSION_NUM(19, 12, 0, 0))
+	#define DPDK_19_11_6
+#elif (RTE_VERSION >= RTE_VERSION_NUM(20, 0, 0, 0) && RTE_VERSION < RTE_VERSION_NUM(20, 12, 0, 0))
+	#define DPDK_20_11_5
+#elif (RTE_VERSION >= RTE_VERSION_NUM(21, 0, 0, 0) && RTE_VERSION < RTE_VERSION_NUM(21, 12, 0, 0))
+	#define DPDK_21_11_5
+#elif (RTE_VERSION >= RTE_VERSION_NUM(22, 0, 0, 0) && RTE_VERSION < RTE_VERSION_NUM(22, 12, 0, 0))
+	#define DPDK_22_11_3
+#endif
+
+#endif
diff --git a/drivers/net/sxe/base/sxe_errno.h b/drivers/net/sxe/base/sxe_errno.h
new file mode 100644
index 0000000000..e4de8bef29
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_errno.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_ERRNO_H__
+#define __SXE_ERRNO_H__
+
+#define SXE_ERR_MODULE_STANDARD			0
+#define SXE_ERR_MODULE_PF				1
+#define SXE_ERR_MODULE_VF				2
+#define SXE_ERR_MODULE_HDC				3
+
+#define SXE_ERR_MODULE_OFFSET	16
+#define SXE_ERR_MODULE(module, errcode)		\
+	((module << SXE_ERR_MODULE_OFFSET) | errcode)
+#define SXE_ERR_PF(errcode)		SXE_ERR_MODULE(SXE_ERR_MODULE_PF, errcode)
+#define SXE_ERR_VF(errcode)		SXE_ERR_MODULE(SXE_ERR_MODULE_VF, errcode)
+#define SXE_ERR_HDC(errcode)	SXE_ERR_MODULE(SXE_ERR_MODULE_HDC, errcode)
+
+#define SXE_ERR_CONFIG                        EINVAL
+#define SXE_ERR_PARAM                         EINVAL
+#define SXE_ERR_RESET_FAILED                  EPERM
+#define SXE_ERR_NO_SPACE                      ENOSPC
+#define SXE_ERR_FNAV_CMD_INCOMPLETE           EBUSY
+#define SXE_ERR_MBX_LOCK_FAIL                 EBUSY
+#define SXE_ERR_OPRATION_NOT_PERM             EPERM
+#define SXE_ERR_LINK_STATUS_INVALID           EINVAL
+#define SXE_ERR_LINK_SPEED_INVALID            EINVAL
+#define SXE_ERR_DEVICE_NOT_SUPPORTED          EOPNOTSUPP
+#define SXE_ERR_HDC_LOCK_BUSY                 EBUSY
+#define SXE_ERR_HDC_FW_OV_TIMEOUT             ETIMEDOUT
+#define SXE_ERR_MDIO_CMD_TIMEOUT              ETIMEDOUT
+#define SXE_ERR_INVALID_LINK_SETTINGS         EINVAL
+#define SXE_ERR_FNAV_REINIT_FAILED            EIO
+#define SXE_ERR_CLI_FAILED                    EIO
+#define SXE_ERR_MASTER_REQUESTS_PENDING       SXE_ERR_PF(1)
+#define SXE_ERR_SFP_NO_INIT_SEQ_PRESENT       SXE_ERR_PF(2)
+#define SXE_ERR_ENABLE_SRIOV_FAIL             SXE_ERR_PF(3)
+#define SXE_ERR_IPSEC_SA_STATE_NOT_EXSIT      SXE_ERR_PF(4)
+#define SXE_ERR_SFP_NOT_PERSENT               SXE_ERR_PF(5)
+#define SXE_ERR_PHY_NOT_PERSENT               SXE_ERR_PF(6)
+#define SXE_ERR_PHY_RESET_FAIL                SXE_ERR_PF(7)
+#define SXE_ERR_FC_NOT_NEGOTIATED             SXE_ERR_PF(8)
+#define SXE_ERR_SFF_NOT_SUPPORTED             SXE_ERR_PF(9)
+
+#define SXEVF_ERR_MAC_ADDR_INVALID              EINVAL
+#define SXEVF_ERR_RESET_FAILED                  EIO
+#define SXEVF_ERR_ARGUMENT_INVALID              EINVAL
+#define SXEVF_ERR_NOT_READY                     EBUSY
+#define SXEVF_ERR_POLL_ACK_FAIL                 EIO
+#define SXEVF_ERR_POLL_MSG_FAIL                 EIO
+#define SXEVF_ERR_MBX_LOCK_FAIL                 EBUSY
+#define SXEVF_ERR_REPLY_INVALID                 EINVAL
+#define SXEVF_ERR_IRQ_NUM_INVALID               EINVAL
+#define SXEVF_ERR_PARAM                         EINVAL
+#define SXEVF_ERR_MAILBOX_FAIL                  SXE_ERR_VF(1)
+#define SXEVF_ERR_MSG_HANDLE_ERR                SXE_ERR_VF(2)
+#define SXEVF_ERR_DEVICE_NOT_SUPPORTED          SXE_ERR_VF(3)
+#define SXEVF_ERR_IPSEC_SA_STATE_NOT_EXSIT      SXE_ERR_VF(4)
+
+#endif
diff --git a/drivers/net/sxe/base/sxe_hw.c b/drivers/net/sxe/base/sxe_hw.c
new file mode 100644
index 0000000000..14d1d67456
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_hw.c
@@ -0,0 +1,6647 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifdef SXE_PHY_CONFIGURE
+#include <linux/mdio.h>
+#endif
+#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST)
+#include "sxe_pci.h"
+#include "sxe_log.h"
+#include "sxe_debug.h"
+#include "sxe_host_hdc.h"
+#include "sxe_sriov.h"
+#include "sxe_compat.h"
+#else
+#include "sxe_errno.h"
+#include "sxe_logs.h"
+#include "sxe.h"
+
+#include "sxe_hw.h"
+#endif
+
+
+#define SXE_PFMSG_MASK  (0xFF00)
+
+#define SXE_MSGID_MASK  (0xFFFFFFFF)
+
+#define SXE_CTRL_MSG_MASK          (0x700)
+
+#define SXE_RING_WAIT_LOOP        10
+#define SXE_REG_NAME_LEN          16
+#define SXE_DUMP_REG_STRING_LEN   73
+#define SXE_DUMP_REGS_NUM         64
+#define SXE_MAX_RX_DESC_POLL      10
+#define SXE_LPBK_EN               0x00000001
+#define SXE_MACADDR_LOW_4_BYTE    4
+#define SXE_MACADDR_HIGH_2_BYTE   2
+#define SXE_RSS_FIELD_MASK        0xffff0000
+#define SXE_MRQE_MASK             0x0000000f
+
+#define SXE_HDC_DATA_LEN_MAX         256
+
+#define SXE_8_TC_MSB				(0x11111111)
+
+STATIC u32 sxe_read_reg(struct sxe_hw *hw, u32 reg);
+STATIC void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value);
+static void sxe_write_reg64(struct sxe_hw *hw, u32 reg, u64 value);
+
+#define SXE_WRITE_REG_ARRAY_32(a, reg, offset, value) \
+	sxe_write_reg(a, reg + (offset << 2), value)
+#define SXE_READ_REG_ARRAY_32(a, reg, offset) \
+	sxe_read_reg(a, reg + (offset << 2))
+
+#define SXE_REG_READ(hw, addr)        sxe_read_reg(hw, addr)
+#define SXE_REG_WRITE(hw, reg, value) sxe_write_reg(hw, reg, value)
+#define SXE_WRITE_FLUSH(a) sxe_read_reg(a, SXE_STATUS)
+#define SXE_REG_WRITE_ARRAY(hw, reg, offset, value) \
+	sxe_write_reg(hw, (reg) + ((offset) << 2), (value))
+
+#define SXE_SWAP_32(_value) __swab32((_value))
+
+#define SXE_REG_WRITE_BE32(a, reg, value) \
+	SXE_REG_WRITE((a), (reg), SXE_SWAP_32(ntohl(value)))
+
+#define SXE_SWAP_16(_value) __swab16((_value))
+
+#define SXE_REG64_WRITE(a, reg, value) sxe_write_reg64((a), (reg), (value))
+
+enum sxe_ipsec_table {
+	SXE_IPSEC_IP_TABLE = 0,
+	SXE_IPSEC_SPI_TABLE,
+	SXE_IPSEC_KEY_TABLE,
+};
+
+u32 mac_regs[] = {
+	SXE_COMCTRL,
+	SXE_PCCTRL,
+	SXE_LPBKCTRL,
+	SXE_MAXFS,
+	SXE_VLANCTRL,
+	SXE_VLANID,
+	SXE_LINKS,
+	SXE_HLREG0,
+	SXE_MFLCN,
+	SXE_MACC,
+};
+
+u16 sxe_mac_reg_num_get(void)
+{
+	return ARRAY_SIZE(mac_regs);
+}
+
+
+#ifndef SXE_DPDK 
+
+void sxe_hw_fault_handle(struct sxe_hw *hw)
+{
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (test_bit(SXE_HW_FAULT, &hw->state)) {
+		goto l_ret;
+	}
+
+	set_bit(SXE_HW_FAULT, &hw->state);
+
+	LOG_DEV_ERR("sxe nic hw fault\n");
+
+	if ((hw->fault_handle != NULL) && (hw->priv != NULL) ) {
+		hw->fault_handle(hw->priv);
+	}
+
+l_ret:
+	return;
+}
+
+static u32 sxe_hw_fault_check(struct sxe_hw *hw, u32 reg)
+{
+	u32 i, value;
+	u8  __iomem *base_addr = hw->reg_base_addr;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (sxe_is_hw_fault(hw)) {
+		goto l_out;
+	}
+
+	for (i = 0; i < SXE_REG_READ_RETRY; i++) {
+		value = hw->reg_read(base_addr + SXE_STATUS);
+		if (value != SXE_REG_READ_FAIL) {
+			break;
+		}
+
+		mdelay(3);
+	}
+
+	if (SXE_REG_READ_FAIL == value) {
+		LOG_ERROR_BDF("read registers multiple times failed, ret=%#x\n", value);
+		sxe_hw_fault_handle(hw);
+	} else {
+		value = hw->reg_read(base_addr + reg);
+	}
+
+	return value;
+l_out:
+	return SXE_REG_READ_FAIL;
+}
+
+STATIC u32 sxe_read_reg(struct sxe_hw *hw, u32 reg)
+{
+	u32 value;
+	u8  __iomem *base_addr = hw->reg_base_addr;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (sxe_is_hw_fault(hw)) {
+		value = SXE_REG_READ_FAIL;
+		goto l_ret;
+	}
+
+	value = hw->reg_read(base_addr + reg);
+	if (unlikely(SXE_REG_READ_FAIL == value)) {
+		LOG_ERROR_BDF("reg[0x%x] read failed, ret=%#x\n", reg, value);
+		value = sxe_hw_fault_check(hw, reg);
+	}
+
+l_ret:
+	return value;
+}
+
+STATIC void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value)
+{
+	u8 __iomem *base_addr = hw->reg_base_addr;
+
+	if (sxe_is_hw_fault(hw)) {
+		goto l_ret;
+	}
+
+	hw->reg_write(value, base_addr + reg);
+
+l_ret:
+	return;
+}
+
+#else 
+
+STATIC u32 sxe_read_reg(struct sxe_hw *hw, u32 reg)
+{
+	u32 i, value;
+	u8  __iomem *base_addr = hw->reg_base_addr;
+
+	value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
+	if (unlikely(SXE_REG_READ_FAIL == value)) {
+
+		value = rte_le_to_cpu_32(rte_read32(base_addr + SXE_STATUS));
+		if (unlikely(SXE_REG_READ_FAIL != value)) {
+
+			value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
+		} else {
+			LOG_ERROR("reg[0x%x] and reg[0x%x] read failed, ret=%#x\n",
+							reg, SXE_STATUS, value);
+			for (i = 0; i < SXE_REG_READ_RETRY; i++) {
+
+				value = rte_le_to_cpu_32(rte_read32(base_addr + SXE_STATUS));
+				if (unlikely(SXE_REG_READ_FAIL != value)) {
+
+					value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
+					LOG_INFO("reg[0x%x] read ok, value=%#x\n",
+									reg, value);
+					break;
+				} else {
+					LOG_ERROR("reg[0x%x] and reg[0x%x] read failed, ret=%#x\n",
+							reg, SXE_STATUS, value);
+				}
+
+				mdelay(3);
+			}
+		}
+	}
+
+	return value;
+}
+
+STATIC void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value)
+{
+	u8 __iomem *base_addr = hw->reg_base_addr;
+
+	rte_write32((rte_cpu_to_le_32(value)), (base_addr + reg));
+
+	return;
+}
+#endif
+
+static void sxe_write_reg64(struct sxe_hw *hw, u32 reg, u64 value)
+{
+	u8 __iomem *reg_addr = hw->reg_base_addr;
+
+	if (sxe_is_hw_fault(hw)) {
+		goto l_ret;
+	}
+
+	writeq(value, reg_addr + reg);
+
+l_ret:
+	return;
+}
+
+
+void sxe_hw_no_snoop_disable(struct sxe_hw *hw)
+{
+	u32 ctrl_ext;
+
+	ctrl_ext = SXE_REG_READ(hw, SXE_CTRL_EXT);
+	ctrl_ext |= SXE_CTRL_EXT_NS_DIS;
+	SXE_REG_WRITE(hw, SXE_CTRL_EXT, ctrl_ext);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+s32 sxe_hw_uc_addr_pool_enable(struct sxe_hw *hw,
+						u8 rar_idx, u8 pool_idx)
+{
+	s32 ret = 0;
+	u32 value;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (rar_idx > SXE_UC_ENTRY_NUM_MAX) {
+		ret = -SXE_ERR_PARAM;
+		LOG_DEV_ERR("pool_idx:%d rar_idx:%d invalid.\n",
+			  pool_idx, rar_idx);
+		goto l_end;
+	}
+
+	if (pool_idx < 32) {
+		value = SXE_REG_READ(hw, SXE_MPSAR_LOW(rar_idx));
+		value |= BIT(pool_idx);
+		SXE_REG_WRITE(hw, SXE_MPSAR_LOW(rar_idx), value);
+	} else {
+		value = SXE_REG_READ(hw, SXE_MPSAR_HIGH(rar_idx));
+		value |= BIT(pool_idx - 32);
+		SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(rar_idx), value);
+	}
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_hw_uc_addr_pool_disable(struct sxe_hw *hw, u8 rar_idx)
+{
+	u32 hi;
+	u32 low;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	hi = SXE_REG_READ(hw, SXE_MPSAR_HIGH(rar_idx));
+	low = SXE_REG_READ(hw, SXE_MPSAR_LOW(rar_idx));
+
+	if (sxe_is_hw_fault(hw)) {
+		goto l_end;
+	}
+
+	if (!hi & !low) {
+		LOG_DEBUG_BDF("no need clear rar-pool relation register.\n");
+		goto l_end;
+	}
+
+	if (low) {
+		SXE_REG_WRITE(hw, SXE_MPSAR_LOW(rar_idx), 0);
+	}
+	if (hi) {
+		SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(rar_idx), 0);
+	}
+
+
+l_end:
+	return 0;
+}
+
+s32 sxe_hw_nic_reset(struct sxe_hw *hw)
+{
+	s32 ret = 0;
+	u32 ctrl, i;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	ctrl = SXE_CTRL_RST;
+	ctrl |= SXE_REG_READ(hw, SXE_CTRL);
+	ctrl &= ~SXE_CTRL_GIO_DIS;
+	SXE_REG_WRITE(hw, SXE_CTRL, ctrl);
+
+	SXE_WRITE_FLUSH(hw);
+	usleep_range(1000, 1200);
+
+	for (i = 0; i < 10; i++) {
+		ctrl = SXE_REG_READ(hw, SXE_CTRL);
+		if (!(ctrl & SXE_CTRL_RST_MASK)) {
+			break;
+		}
+		udelay(1);
+	}
+
+	if (ctrl & SXE_CTRL_RST_MASK) {
+		ret = -SXE_ERR_RESET_FAILED;
+		LOG_DEV_ERR("reset polling failed to complete\n");
+	}
+
+	return ret;
+}
+
+void sxe_hw_pf_rst_done_set(struct sxe_hw *hw)
+{
+	u32 value;
+
+	value = SXE_REG_READ(hw, SXE_CTRL_EXT);
+	value |= SXE_CTRL_EXT_PFRSTD;
+	SXE_REG_WRITE(hw, SXE_CTRL_EXT, value);
+
+	return;
+}
+
+static void sxe_hw_regs_flush(struct sxe_hw *hw)
+{
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+
+static const struct sxe_reg_info sxe_reg_info_tbl[] = {
+
+	{SXE_CTRL, 1, 1, "CTRL"},
+	{SXE_STATUS, 1, 1, "STATUS"},
+	{SXE_CTRL_EXT, 1, 1, "CTRL_EXT"},
+
+	{SXE_EICR, 1, 1, "EICR"},
+
+	{SXE_SRRCTL(0), 16, 0x4, "SRRCTL"},
+	{SXE_RDH(0), 64, 0x40, "RDH"},
+	{SXE_RDT(0), 64, 0x40, "RDT"},
+	{SXE_RXDCTL(0), 64, 0x40, "RXDCTL"},
+	{SXE_RDBAL(0), 64, 0x40, "RDBAL"},
+	{SXE_RDBAH(0), 64, 0x40, "RDBAH"},
+
+	{SXE_TDBAL(0), 32, 0x40, "TDBAL"},
+	{SXE_TDBAH(0), 32, 0x40, "TDBAH"},
+	{SXE_TDLEN(0), 32, 0x40, "TDLEN"},
+	{SXE_TDH(0), 32, 0x40, "TDH"},
+	{SXE_TDT(0), 32, 0x40, "TDT"},
+	{SXE_TXDCTL(0), 32, 0x40, "TXDCTL"},
+
+	{ .name = NULL }
+};
+
+static void sxe_hw_reg_print(struct sxe_hw *hw,
+				const struct sxe_reg_info *reginfo)
+{
+	u32 i, j;
+	s8 *value;
+	u32 first_reg_idx = 0;
+	u32 regs[SXE_DUMP_REGS_NUM];
+	s8 reg_name[SXE_REG_NAME_LEN];
+	s8 buf[SXE_DUMP_REG_STRING_LEN];
+	struct sxe_adapter *adapter = hw->adapter;
+
+	switch (reginfo->addr) {
+	case SXE_SRRCTL(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_SRRCTL(i));
+		}
+		break;
+	case SXE_RDLEN(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_RDLEN(i));
+		}
+		break;
+	case SXE_RDH(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_RDH(i));
+		}
+		break;
+	case SXE_RDT(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_RDT(i));
+		}
+		break;
+	case SXE_RXDCTL(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_RXDCTL(i));
+		}
+		break;
+	case SXE_RDBAL(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_RDBAL(i));
+		}
+		break;
+	case SXE_RDBAH(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_RDBAH(i));
+		}
+		break;
+	case SXE_TDBAL(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_TDBAL(i));
+		}
+		break;
+	case SXE_TDBAH(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_TDBAH(i));
+		}
+		break;
+	case SXE_TDLEN(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_TDLEN(i));
+		}
+		break;
+	case SXE_TDH(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_TDH(i));
+		}
+		break;
+	case SXE_TDT(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_TDT(i));
+		}
+		break;
+	case SXE_TXDCTL(0):
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+			regs[i] = SXE_REG_READ(hw, SXE_TXDCTL(i));
+		}
+		break;
+	default:
+		LOG_DEV_INFO("%-15s %08x\n",
+			reginfo->name, SXE_REG_READ(hw, reginfo->addr));
+		goto l_end;
+	}
+
+	while (first_reg_idx < SXE_DUMP_REGS_NUM) {
+		value = buf;
+		snprintf(reg_name, SXE_REG_NAME_LEN,
+			"%s[%d-%d]", reginfo->name,
+			first_reg_idx, (first_reg_idx + 7));
+
+		for (j = 0; j < 8; j++) {
+			value += sprintf(value, " %08x", regs[first_reg_idx++]);
+		}
+
+		LOG_DEV_ERR("%-15s%s\n", reg_name, buf);
+	}
+
+l_end:
+	return;
+}
+
+static void sxe_hw_reg_dump(struct sxe_hw *hw)
+{
+	const struct sxe_reg_info *reginfo;
+
+	for (reginfo = (const struct sxe_reg_info *)sxe_reg_info_tbl;
+	     reginfo->name; reginfo++) {
+		sxe_hw_reg_print(hw, reginfo);
+	}
+
+	return;
+}
+
+static s32 sxe_hw_status_reg_test(struct sxe_hw *hw)
+{
+	s32 ret = 0;
+	u32 value, before, after;
+	u32 toggle = 0x7FFFF30F;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	before = SXE_REG_READ(hw, SXE_STATUS);
+	value = (SXE_REG_READ(hw, SXE_STATUS) & toggle);
+	SXE_REG_WRITE(hw, SXE_STATUS, toggle);
+	after = SXE_REG_READ(hw, SXE_STATUS) & toggle;
+	if (value != after) {
+		LOG_MSG_ERR(drv, "failed status register test got: "
+				"0x%08X expected: 0x%08X\n",
+					after, value);
+		ret = -SXE_DIAG_TEST_BLOCKED;
+		goto l_end;
+	}
+
+	SXE_REG_WRITE(hw, SXE_STATUS, before);
+
+l_end:
+	return ret;
+}
+
+#define PATTERN_TEST	1
+#define SET_READ_TEST	2
+#define WRITE_NO_TEST	3
+#define TABLE32_TEST	4
+#define TABLE64_TEST_LO	5
+#define TABLE64_TEST_HI	6
+
+struct sxe_self_test_reg {
+	u32 reg;
+	u8  array_len;
+	u8  test_type;
+	u32 mask;
+	u32 write;
+};
+
+static const struct sxe_self_test_reg self_test_reg[] = {
+	{ SXE_FCRTL(0),  1,   PATTERN_TEST, 0x8007FFE0, 0x8007FFF0 },
+	{ SXE_FCRTH(0),  1,   PATTERN_TEST, 0x8007FFE0, 0x8007FFF0 },
+	{ SXE_PFCTOP,    1,   PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_FCTTV(0),  1,   PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_VLNCTRL,   1,   PATTERN_TEST, 0x00000000, 0x00000000 },
+	{ SXE_RDBAL(0),  4,   PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+	{ SXE_RDBAH(0),  4,   PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_RDLEN(0),  4,   PATTERN_TEST, 0x000FFFFF, 0x000FFFFF },
+	{ SXE_RXDCTL(0), 4,   WRITE_NO_TEST, 0, SXE_RXDCTL_ENABLE },
+	{ SXE_RDT(0),    4,   PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ SXE_RXDCTL(0), 4,   WRITE_NO_TEST, 0, 0 },
+	{ SXE_TDBAL(0),  4,   PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ SXE_TDBAH(0),  4,   PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_TDLEN(0),  4,   PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+	{ SXE_RXCTRL,    1,   SET_READ_TEST, 0x00000001, 0x00000001 },
+	{ SXE_RAL(0),    16,  TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_RAL(0),    16,  TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
+	{ SXE_MTA(0),    128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ .reg = 0 }
+};
+
+static s32 sxe_hw_reg_pattern_test(struct sxe_hw *hw, u32 reg,
+				u32 mask, u32 write)
+{
+	s32 ret = 0;
+	u32 pat, val, before;
+	struct sxe_adapter *adapter = hw->adapter;
+	static const u32 test_pattern[] = {
+		0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFE};
+
+	if (sxe_is_hw_fault(hw)) {
+		LOG_ERROR_BDF("hw fault\n");
+		ret = -SXE_DIAG_TEST_BLOCKED;
+		goto l_end;
+	}
+
+	for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
+		before = SXE_REG_READ(hw, reg);
+
+		SXE_REG_WRITE(hw, reg, test_pattern[pat] & write);
+		val = SXE_REG_READ(hw, reg);
+		if (val != (test_pattern[pat] & write & mask)) {
+			LOG_MSG_ERR(drv, "pattern test reg %04X failed: "
+					"got 0x%08X expected 0x%08X\n",
+				reg, val, (test_pattern[pat] & write & mask));
+			SXE_REG_WRITE(hw, reg, before);
+			ret = -SXE_DIAG_REG_PATTERN_TEST_ERR;
+			goto l_end;
+		}
+
+		SXE_REG_WRITE(hw, reg, before);
+	}
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_hw_reg_set_and_check(struct sxe_hw *hw, int reg,
+				u32 mask, u32 write)
+{
+	s32 ret = 0;
+	u32 val, before;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (sxe_is_hw_fault(hw)) {
+		LOG_ERROR_BDF("hw fault\n");
+		ret = -SXE_DIAG_TEST_BLOCKED;
+		goto l_end;
+	}
+
+	before = SXE_REG_READ(hw, reg);
+	SXE_REG_WRITE(hw, reg, write & mask);
+	val = SXE_REG_READ(hw, reg);
+	if ((write & mask) != (val & mask)) {
+		LOG_MSG_ERR(drv, "set/check reg %04X test failed: "
+				"got 0x%08X expected 0x%08X\n",
+			reg, (val & mask), (write & mask));
+		SXE_REG_WRITE(hw, reg, before);
+		ret = -SXE_DIAG_CHECK_REG_TEST_ERR;
+		goto l_end;
+	}
+
+	SXE_REG_WRITE(hw, reg, before);
+
+l_end:
+	return ret;
+}
+
+STATIC s32 sxe_hw_regs_test(struct sxe_hw *hw)
+{
+	u32 i;
+	s32 ret = 0;
+	const struct sxe_self_test_reg *test = self_test_reg;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	ret = sxe_hw_status_reg_test(hw);
+	if (ret) {
+		LOG_MSG_ERR(drv, "status register test failed\n");
+		goto l_end;
+	}
+
+	while (test->reg) {
+		for (i = 0; i < test->array_len; i++) {
+			switch (test->test_type) {
+			case PATTERN_TEST:
+				ret = sxe_hw_reg_pattern_test(hw,
+					test->reg + (i * 0x40),
+					test->mask, test->write);
+				break;
+			case TABLE32_TEST:
+				ret = sxe_hw_reg_pattern_test(hw,
+					test->reg + (i * 4),
+					test->mask, test->write);
+				break;
+			case TABLE64_TEST_LO:
+				ret = sxe_hw_reg_pattern_test(hw,
+					test->reg + (i * 8),
+					test->mask, test->write);
+				break;
+			case TABLE64_TEST_HI:
+				ret = sxe_hw_reg_pattern_test(hw,
+					(test->reg + 4) + (i * 8),
+					test->mask, test->write);
+				break;
+			case SET_READ_TEST:
+				ret = sxe_hw_reg_set_and_check(hw,
+					test->reg + (i * 0x40),
+					test->mask, test->write);
+				break;
+			case WRITE_NO_TEST:
+				SXE_REG_WRITE(hw, test->reg + (i * 0x40),
+						test->write);
+				break;
+			default:
+				LOG_ERROR_BDF("reg test mod err, type=%d\n",
+						test->test_type);
+				break;
+			}
+
+			if (ret) {
+				goto l_end;
+			}
+
+		}
+		test++;
+	}
+
+l_end:
+	return ret;
+}
+
+static const struct sxe_setup_operations sxe_setup_ops = {
+	.regs_dump		= sxe_hw_reg_dump,
+	.reg_read		= sxe_read_reg,
+	.reg_write		= sxe_write_reg,
+	.regs_test		= sxe_hw_regs_test,
+	.reset			= sxe_hw_nic_reset,
+	.regs_flush		= sxe_hw_regs_flush,
+	.pf_rst_done_set	= sxe_hw_pf_rst_done_set,
+	.no_snoop_disable	= sxe_hw_no_snoop_disable,
+};
+
+
+static void sxe_hw_ring_irq_enable(struct sxe_hw *hw, u64 qmask)
+{
+	u32 mask0, mask1;
+
+	mask0 = qmask & 0xFFFFFFFF;
+	mask1 = qmask >> 32;
+
+	if (mask0 && mask1) {
+		SXE_REG_WRITE(hw, SXE_EIMS_EX(0), mask0);
+		SXE_REG_WRITE(hw, SXE_EIMS_EX(1), mask1);
+	} else if (mask0) {
+		SXE_REG_WRITE(hw, SXE_EIMS_EX(0), mask0);
+	} else if (mask1) {
+		SXE_REG_WRITE(hw, SXE_EIMS_EX(1), mask1);
+	}
+
+	return;
+}
+
+u32 sxe_hw_pending_irq_read_clear(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_EICR);
+}
+
+void sxe_hw_pending_irq_write_clear(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_EICR, value);
+	return;
+}
+
+u32 sxe_hw_irq_cause_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_EICS);
+}
+
+static void sxe_hw_event_irq_trigger(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_EICS, (SXE_EICS_TCP_TIMER | SXE_EICS_OTHER));
+
+	return;
+}
+
+static void sxe_hw_ring_irq_trigger(struct sxe_hw *hw, u64 eics)
+{
+	u32 mask;
+
+	mask = (eics & 0xFFFFFFFF);
+	SXE_REG_WRITE(hw, SXE_EICS_EX(0), mask);
+	mask = (eics >> 32);
+	SXE_REG_WRITE(hw, SXE_EICS_EX(1), mask);
+	return;
+}
+
+void sxe_hw_ring_irq_auto_disable(struct sxe_hw *hw,
+					bool is_msix)
+{
+	if (true == is_msix) {
+		SXE_REG_WRITE(hw, SXE_EIAM_EX(0), 0xFFFFFFFF);
+		SXE_REG_WRITE(hw, SXE_EIAM_EX(1), 0xFFFFFFFF);
+	} else {
+		SXE_REG_WRITE(hw, SXE_EIAM, SXE_EICS_RTX_QUEUE);
+	}
+
+	return;
+}
+
+void sxe_hw_irq_general_reg_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_GPIE, value);
+
+	return;
+}
+
+u32 sxe_hw_irq_general_reg_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_GPIE);
+}
+
+static void sxe_hw_set_eitrsel(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_EITRSEL, value);
+
+	return;
+}
+
+void sxe_hw_event_irq_map(struct sxe_hw *hw, u8 offset, u16 irq_idx)
+{
+	u8  allocation;
+	u32 ivar, position;
+
+	allocation = irq_idx | SXE_IVAR_ALLOC_VALID;
+
+	position = (offset & 1) * 8;
+
+	ivar = SXE_REG_READ(hw, SXE_IVAR_MISC);
+	ivar &= ~(0xFF << position);
+	ivar |= (allocation << position);
+
+	SXE_REG_WRITE(hw, SXE_IVAR_MISC, ivar);
+
+	return;
+}
+
+void sxe_hw_ring_irq_map(struct sxe_hw *hw, bool is_tx,
+						u16 reg_idx, u16 irq_idx)
+{
+	u8  allocation;
+	u32 ivar, position;
+
+	allocation = irq_idx | SXE_IVAR_ALLOC_VALID;
+
+	position = ((reg_idx & 1) * 16) + (8 * is_tx);
+
+	ivar = SXE_REG_READ(hw, SXE_IVAR(reg_idx >> 1));
+	ivar &= ~(0xFF << position);
+	ivar |= (allocation << position);
+
+	SXE_REG_WRITE(hw, SXE_IVAR(reg_idx >> 1), ivar);
+
+	return;
+}
+
+void sxe_hw_ring_irq_interval_set(struct sxe_hw *hw,
+						u16 irq_idx, u32 interval)
+{
+	u32 eitr = interval & SXE_EITR_ITR_MASK;
+
+	eitr |= SXE_EITR_CNT_WDIS;
+
+	SXE_REG_WRITE(hw, SXE_EITR(irq_idx), eitr);
+
+	return;
+}
+
+static void sxe_hw_event_irq_interval_set(struct sxe_hw *hw,
+						u16 irq_idx, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_EITR(irq_idx), value);
+
+	return;
+}
+
+void sxe_hw_event_irq_auto_clear_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_EIAC, value);
+
+	return;
+}
+
+void sxe_hw_specific_irq_disable(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_EIMC, value);
+
+	return;
+}
+
+void sxe_hw_specific_irq_enable(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_EIMS, value);
+
+	return;
+}
+
+void sxe_hw_all_irq_disable(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_EIMC, 0xFFFF0000);
+
+	SXE_REG_WRITE(hw, SXE_EIMC_EX(0), ~0);
+	SXE_REG_WRITE(hw, SXE_EIMC_EX(1), ~0);
+
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+static void sxe_hw_spp_configure(struct sxe_hw *hw, u32 hw_spp_proc_delay_us)
+{
+	SXE_REG_WRITE(hw, SXE_SPP_PROC,
+			(SXE_REG_READ(hw, SXE_SPP_PROC) &
+			~SXE_SPP_PROC_DELAY_US_MASK) |
+			hw_spp_proc_delay_us);
+
+	return;
+}
+
+static s32 sxe_hw_irq_test(struct sxe_hw *hw, u32 *icr, bool shared)
+{
+	s32 ret = 0;
+	u32 i, mask;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	sxe_hw_specific_irq_disable(hw, 0xFFFFFFFF);
+	sxe_hw_regs_flush(hw);
+	usleep_range(10000, 20000);
+
+	for (i = 0; i < 10; i++) {
+		mask = BIT(i);
+		if (!shared) {
+			LOG_INFO_BDF("test irq: irq test start\n");
+			*icr = 0;
+			SXE_REG_WRITE(hw, SXE_EIMC, ~mask & 0x00007FFF);
+			SXE_REG_WRITE(hw, SXE_EICS, ~mask & 0x00007FFF);
+			sxe_hw_regs_flush(hw);
+			usleep_range(10000, 20000);
+
+			if (*icr & mask) {
+				LOG_ERROR_BDF("test irq: failed, eicr = %x\n", *icr);
+				ret = -SXE_DIAG_DISABLE_IRQ_TEST_ERR;
+				break;
+			}
+			LOG_INFO_BDF("test irq: irq test end\n");
+		}
+
+		LOG_INFO_BDF("test irq: mask irq test start\n");
+		*icr = 0;
+		SXE_REG_WRITE(hw, SXE_EIMS, mask);
+		SXE_REG_WRITE(hw, SXE_EICS, mask);
+		sxe_hw_regs_flush(hw);
+		usleep_range(10000, 20000);
+
+		if (!(*icr & mask)) {
+			LOG_ERROR_BDF("test irq: mask failed, eicr = %x\n", *icr);
+			ret = -SXE_DIAG_ENABLE_IRQ_TEST_ERR;
+			break;
+		}
+		LOG_INFO_BDF("test irq: mask irq test end\n");
+
+		sxe_hw_specific_irq_disable(hw, mask);
+		sxe_hw_regs_flush(hw);
+		usleep_range(10000, 20000);
+
+		if (!shared) {
+			LOG_INFO_BDF("test irq: other irq test start\n");
+			*icr = 0;
+			SXE_REG_WRITE(hw, SXE_EIMC, ~mask & 0x00007FFF);
+			SXE_REG_WRITE(hw, SXE_EICS, ~mask & 0x00007FFF);
+			sxe_hw_regs_flush(hw);
+			usleep_range(10000, 20000);
+
+			if (*icr) {
+				LOG_ERROR_BDF("test irq: other irq failed, eicr = %x\n", *icr);
+				ret = -SXE_DIAG_DISABLE_OTHER_IRQ_TEST_ERR;
+				break;
+			}
+			LOG_INFO_BDF("test irq: other irq test end\n");
+		}
+	}
+
+	sxe_hw_specific_irq_disable(hw, 0xFFFFFFFF);
+	sxe_hw_regs_flush(hw);
+	usleep_range(10000, 20000);
+
+	return ret;
+}
+
+static const struct sxe_irq_operations sxe_irq_ops = {
+	.event_irq_auto_clear_set	= sxe_hw_event_irq_auto_clear_set,
+	.ring_irq_interval_set		= sxe_hw_ring_irq_interval_set,
+	.event_irq_interval_set		= sxe_hw_event_irq_interval_set,
+	.set_eitrsel			= sxe_hw_set_eitrsel,
+	.ring_irq_map			= sxe_hw_ring_irq_map,
+	.event_irq_map			= sxe_hw_event_irq_map,
+	.irq_general_reg_set		= sxe_hw_irq_general_reg_set,
+	.irq_general_reg_get		= sxe_hw_irq_general_reg_get,
+	.ring_irq_auto_disable		= sxe_hw_ring_irq_auto_disable,
+	.pending_irq_read_clear		= sxe_hw_pending_irq_read_clear,
+	.pending_irq_write_clear	= sxe_hw_pending_irq_write_clear,
+	.ring_irq_enable		= sxe_hw_ring_irq_enable,
+	.irq_cause_get			= sxe_hw_irq_cause_get,
+	.event_irq_trigger		= sxe_hw_event_irq_trigger,
+	.ring_irq_trigger		= sxe_hw_ring_irq_trigger,
+	.specific_irq_disable		= sxe_hw_specific_irq_disable,
+	.specific_irq_enable		= sxe_hw_specific_irq_enable,
+	.all_irq_disable		= sxe_hw_all_irq_disable,
+	.spp_configure			= sxe_hw_spp_configure,
+	.irq_test			= sxe_hw_irq_test,
+};
+
+
+u32 sxe_hw_link_speed_get(struct sxe_hw *hw)
+{
+	u32 speed, value;
+	struct sxe_adapter *adapter = hw->adapter;
+	value = SXE_REG_READ(hw, SXE_COMCTRL);
+
+	if ((value & SXE_COMCTRL_SPEED_10G) == SXE_COMCTRL_SPEED_10G) {
+		speed = SXE_LINK_SPEED_10GB_FULL;
+	} else if ((value & SXE_COMCTRL_SPEED_1G) == SXE_COMCTRL_SPEED_1G) {
+		speed = SXE_LINK_SPEED_1GB_FULL;
+	} else {
+		speed = SXE_LINK_SPEED_UNKNOWN;
+	}
+
+	LOG_DEBUG_BDF("hw link speed=%x, (0x80=10G, 0x20=1G)\n, reg=%x",
+			speed, value);
+
+	return speed;
+}
+
+void sxe_hw_link_speed_set(struct sxe_hw *hw, u32 speed)
+{
+	u32 ctrl;
+
+	ctrl = SXE_REG_READ(hw, SXE_COMCTRL);
+
+	if (SXE_LINK_SPEED_1GB_FULL == speed) {
+		ctrl |= SXE_COMCTRL_SPEED_1G;
+	} else if (SXE_LINK_SPEED_10GB_FULL == speed) {
+		ctrl |= SXE_COMCTRL_SPEED_10G;
+	}
+
+	SXE_REG_WRITE(hw, SXE_COMCTRL, ctrl);
+
+	return;
+}
+
+STATIC bool sxe_hw_1g_link_up_check(struct sxe_hw *hw)
+{
+	return (SXE_REG_READ(hw, SXE_LINKS) & SXE_LINKS_UP) ? true : false;
+}
+
+bool sxe_hw_is_link_state_up(struct sxe_hw *hw)
+{
+	bool ret = false;
+	u32 links_reg, link_speed;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	links_reg  = SXE_REG_READ(hw, SXE_LINKS);
+
+	LOG_DEBUG_BDF("nic link reg: 0x%x\n", links_reg);
+
+	if (links_reg & SXE_LINKS_UP) {
+		ret = true;
+
+		link_speed = sxe_hw_link_speed_get(hw);
+		if ((link_speed == SXE_LINK_SPEED_10GB_FULL) &&
+		    (links_reg & SXE_10G_LINKS_DOWN)) {
+			ret = false;
+		}
+	}
+
+	return ret;
+}
+
+void sxe_hw_mac_pad_enable(struct sxe_hw *hw)
+{
+	u32 ctl;
+
+	ctl = SXE_REG_READ(hw, SXE_MACCFG);
+	ctl |= SXE_MACCFG_PAD_EN;
+	SXE_REG_WRITE(hw, SXE_MACCFG, ctl);
+
+	return;
+}
+
+s32 sxe_hw_fc_enable(struct sxe_hw *hw)
+{
+	s32 ret = 0;
+	u8  i;
+	u32 reg;
+	u32 flctrl_val;
+	u32 fcrtl, fcrth;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	flctrl_val = SXE_REG_READ(hw, SXE_FLCTRL);
+	flctrl_val &= ~(SXE_FCTRL_TFCE_MASK | SXE_FCTRL_RFCE_MASK |
+		       SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
+
+	switch (hw->fc.current_mode) {
+	case SXE_FC_NONE:
+		break;
+	case SXE_FC_RX_PAUSE:
+		flctrl_val |= SXE_FCTRL_RFCE_LFC_EN;
+		break;
+	case SXE_FC_TX_PAUSE:
+		flctrl_val |= SXE_FCTRL_TFCE_LFC_EN;
+		break;
+	case SXE_FC_FULL:
+		flctrl_val |= SXE_FCTRL_RFCE_LFC_EN;
+		flctrl_val |= SXE_FCTRL_TFCE_LFC_EN;
+		break;
+	default:
+		LOG_DEV_DEBUG("flow control param set incorrectly\n");
+		ret = -SXE_ERR_CONFIG;
+		goto l_ret;
+	}
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		if ((hw->fc.current_mode & SXE_FC_TX_PAUSE) &&
+		    hw->fc.high_water[i]) {
+			fcrtl = (hw->fc.low_water[i] << 9) | SXE_FCRTL_XONE;
+			SXE_REG_WRITE(hw, SXE_FCRTL(i), fcrtl);
+			fcrth = (hw->fc.high_water[i] << 9) | SXE_FCRTH_FCEN;
+		} else {
+			SXE_REG_WRITE(hw, SXE_FCRTL(i), 0);
+			fcrth = (SXE_REG_READ(hw, SXE_RXPBSIZE(i)) - 24576) >> 1;
+		}
+
+		SXE_REG_WRITE(hw, SXE_FCRTH(i), fcrth);
+	}
+
+	flctrl_val |= SXE_FCTRL_TFCE_DPF_EN;
+
+	if ((hw->fc.current_mode & SXE_FC_TX_PAUSE)) {
+		flctrl_val |= (SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
+	}
+
+	SXE_REG_WRITE(hw, SXE_FLCTRL, flctrl_val);
+
+	reg = SXE_REG_READ(hw, SXE_PFCTOP);
+	reg &= ~SXE_PFCTOP_FCOP_MASK;
+	reg |= SXE_PFCTOP_FCT;
+	reg |= SXE_PFCTOP_FCOP_LFC;
+	SXE_REG_WRITE(hw, SXE_PFCTOP, reg);
+
+	reg = hw->fc.pause_time * 0x00010001U;
+	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) {
+		SXE_REG_WRITE(hw, SXE_FCTTV(i), reg);
+	}
+
+	SXE_REG_WRITE(hw, SXE_FCRTV, hw->fc.pause_time / 2);
+
+l_ret:
+	return ret;
+}
+
+void sxe_fc_autoneg_localcap_set(struct sxe_hw *hw)
+{
+	u32 reg = 0;
+
+	if (hw->fc.requested_mode == SXE_FC_DEFAULT) {
+		hw->fc.requested_mode = SXE_FC_FULL;
+	}
+
+	reg = SXE_REG_READ(hw, SXE_PCS1GANA);
+
+	switch (hw->fc.requested_mode) {
+	case SXE_FC_NONE:
+		reg &= ~(SXE_PCS1GANA_SYM_PAUSE | SXE_PCS1GANA_ASM_PAUSE);
+		break;
+	case SXE_FC_TX_PAUSE:
+		reg |= SXE_PCS1GANA_ASM_PAUSE;
+		reg &= ~SXE_PCS1GANA_SYM_PAUSE;
+		break;
+	case SXE_FC_RX_PAUSE:
+	case SXE_FC_FULL:
+		reg |= SXE_PCS1GANA_SYM_PAUSE | SXE_PCS1GANA_ASM_PAUSE;
+		break;
+	default:
+		LOG_ERROR("Flow control param set incorrectly.");
+		break;
+	}
+
+	SXE_REG_WRITE(hw, SXE_PCS1GANA, reg);
+	return;
+}
+
+s32 sxe_hw_pfc_enable(struct sxe_hw *hw, u8 tc_idx)
+{
+	s32 ret = 0;
+	u8  i;
+	u32 reg;
+	u32 flctrl_val;
+	u32 fcrtl, fcrth;
+	struct sxe_adapter *adapter = hw->adapter;
+	u8 rx_en_num;
+
+	flctrl_val = SXE_REG_READ(hw, SXE_FLCTRL);
+	flctrl_val &= ~(SXE_FCTRL_TFCE_MASK | SXE_FCTRL_RFCE_MASK |
+		       SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
+
+	switch (hw->fc.current_mode) {
+	case SXE_FC_NONE:
+		rx_en_num = 0;
+		for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+			reg = SXE_REG_READ(hw, SXE_FCRTH(i));
+			if (reg & SXE_FCRTH_FCEN) {
+				rx_en_num++;
+			}
+		}
+		if (rx_en_num > 1) {
+			flctrl_val |= SXE_FCTRL_TFCE_PFC_EN;
+		}
+
+		break;
+
+	case SXE_FC_RX_PAUSE:
+		flctrl_val |= SXE_FCTRL_RFCE_PFC_EN;
+
+		rx_en_num = 0;
+		for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+			reg = SXE_REG_READ(hw, SXE_FCRTH(i));
+			if (reg & SXE_FCRTH_FCEN) {
+				rx_en_num++;
+			}
+		}
+
+		if (rx_en_num > 1) {
+			flctrl_val |= SXE_FCTRL_TFCE_PFC_EN;
+		}
+
+		break;
+	case SXE_FC_TX_PAUSE:
+		flctrl_val |= SXE_FCTRL_TFCE_PFC_EN;
+		break;
+	case SXE_FC_FULL:
+		flctrl_val |= SXE_FCTRL_RFCE_PFC_EN;
+		flctrl_val |= SXE_FCTRL_TFCE_PFC_EN;
+		break;
+	default:
+		LOG_DEV_DEBUG("flow control param set incorrectly\n");
+		ret = -SXE_ERR_CONFIG;
+		goto l_ret;
+	}
+
+	if ((hw->fc.current_mode & SXE_FC_TX_PAUSE) &&
+	    hw->fc.high_water[tc_idx]) {
+		fcrtl = (hw->fc.low_water[tc_idx] << 9) | SXE_FCRTL_XONE;
+		SXE_REG_WRITE(hw, SXE_FCRTL(tc_idx), fcrtl);
+		fcrth = (hw->fc.high_water[tc_idx] << 9) | SXE_FCRTH_FCEN;
+	} else {
+		SXE_REG_WRITE(hw, SXE_FCRTL(tc_idx), 0);
+		fcrth = (SXE_REG_READ(hw, SXE_RXPBSIZE(tc_idx)) - 24576) >> 1;
+	}
+
+	SXE_REG_WRITE(hw, SXE_FCRTH(tc_idx), fcrth);
+
+	flctrl_val |= SXE_FCTRL_TFCE_DPF_EN;
+
+	if ((hw->fc.current_mode & SXE_FC_TX_PAUSE)) {
+		flctrl_val |= (BIT(tc_idx) << 16) & SXE_FCTRL_TFCE_FCEN_MASK;
+		flctrl_val |= (BIT(tc_idx) << 24) & SXE_FCTRL_TFCE_XONE_MASK;
+	}
+
+	SXE_REG_WRITE(hw, SXE_FLCTRL, flctrl_val);
+
+	reg = SXE_REG_READ(hw, SXE_PFCTOP);
+	reg &= ~SXE_PFCTOP_FCOP_MASK;
+	reg |= SXE_PFCTOP_FCT;
+	reg |= SXE_PFCTOP_FCOP_PFC;
+	SXE_REG_WRITE(hw, SXE_PFCTOP, reg);
+
+	reg = hw->fc.pause_time * 0x00010001U;
+	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) {
+		SXE_REG_WRITE(hw, SXE_FCTTV(i), reg);
+	}
+
+	SXE_REG_WRITE(hw, SXE_FCRTV, hw->fc.pause_time / 2);
+
+l_ret:
+	return ret;
+}
+
+void sxe_hw_crc_configure(struct sxe_hw *hw)
+{
+	u32 ctrl = SXE_REG_READ(hw, SXE_PCCTRL);
+
+	ctrl |=  SXE_PCCTRL_TXCE | SXE_PCCTRL_RXCE | SXE_PCCTRL_PCSC_ALL;
+	SXE_REG_WRITE(hw, SXE_PCCTRL, ctrl);
+
+	return;
+}
+
+void sxe_hw_loopback_switch(struct sxe_hw *hw, bool is_enable)
+{
+	u32 value;
+
+	value = (true == is_enable) ? SXE_LPBK_EN : 0;
+
+	SXE_REG_WRITE(hw, SXE_LPBKCTRL, value);
+
+	return;
+}
+
+void sxe_hw_mac_txrx_enable(struct sxe_hw *hw)
+{
+	u32 ctl;
+
+	ctl = SXE_REG_READ(hw, SXE_COMCTRL);
+	ctl |= SXE_COMCTRL_TXEN | SXE_COMCTRL_RXEN | SXE_COMCTRL_EDSEL;
+	SXE_REG_WRITE(hw, SXE_COMCTRL, ctl);
+
+	return;
+}
+
+void sxe_hw_mac_max_frame_set(struct sxe_hw *hw, u32 max_frame)
+{
+	u32 maxfs = SXE_REG_READ(hw, SXE_MAXFS);
+
+	if (max_frame != (maxfs >> SXE_MAXFS_MFS_SHIFT)) {
+		maxfs &= ~SXE_MAXFS_MFS_MASK;
+		maxfs |= max_frame << SXE_MAXFS_MFS_SHIFT;
+	}
+
+	maxfs |=  SXE_MAXFS_RFSEL | SXE_MAXFS_TFSEL;
+	SXE_REG_WRITE(hw, SXE_MAXFS, maxfs);
+
+	return;
+}
+
+u32 sxe_hw_mac_max_frame_get(struct sxe_hw *hw)
+{
+	u32 maxfs = SXE_REG_READ(hw, SXE_MAXFS);
+
+	maxfs &= SXE_MAXFS_MFS_MASK;
+	maxfs >>= SXE_MAXFS_MFS_SHIFT;
+
+	return maxfs;
+}
+
+bool sxe_device_supports_autoneg_fc(struct sxe_hw *hw)
+{
+	bool supported = true;
+	bool link_up = sxe_hw_is_link_state_up(hw);
+	u32  link_speed = sxe_hw_link_speed_get(hw);
+
+	if (link_up) {
+		supported = (link_speed == SXE_LINK_SPEED_1GB_FULL) ?
+				true : false;
+	}
+
+	return supported;
+}
+
+STATIC void sxe_hw_fc_param_init(struct sxe_hw *hw)
+{
+	hw->fc.requested_mode = SXE_FC_FULL;
+	hw->fc.current_mode = SXE_FC_FULL;	
+	hw->fc.pause_time = SXE_DEFAULT_FCPAUSE;
+
+	hw->fc.disable_fc_autoneg = true;
+	return;
+}
+
+void sxe_hw_fc_tc_high_water_mark_set(struct sxe_hw *hw,
+							u8 tc_idx, u32 mark)
+{
+	hw->fc.high_water[tc_idx] = mark;
+
+	return;
+}
+
+void sxe_hw_fc_tc_low_water_mark_set(struct sxe_hw *hw,
+							u8 tc_idx, u32 mark)
+{
+	hw->fc.low_water[tc_idx] = mark;
+
+	return;
+}
+
+bool sxe_hw_is_fc_autoneg_disabled(struct sxe_hw *hw)
+{
+	return hw->fc.disable_fc_autoneg;
+}
+
+void sxe_hw_fc_autoneg_disable_set(struct sxe_hw *hw,
+							bool is_disabled)
+{
+	hw->fc.disable_fc_autoneg = is_disabled;
+	return;
+}
+
+static enum sxe_fc_mode sxe_hw_fc_current_mode_get(struct sxe_hw *hw)
+{
+	return hw->fc.current_mode;
+}
+
+static enum sxe_fc_mode sxe_hw_fc_requested_mode_get(struct sxe_hw *hw)
+{
+	return hw->fc.requested_mode;
+}
+
+void sxe_hw_fc_requested_mode_set(struct sxe_hw *hw,
+						enum sxe_fc_mode mode)
+{
+	hw->fc.requested_mode = mode;
+	return;
+}
+
+static const struct sxe_mac_operations sxe_mac_ops = {
+	.link_up_1g_check			= sxe_hw_1g_link_up_check,
+	.link_state_is_up		= sxe_hw_is_link_state_up,
+	.link_speed_get			= sxe_hw_link_speed_get,
+	.link_speed_set			= sxe_hw_link_speed_set,
+	.pad_enable			= sxe_hw_mac_pad_enable,
+	.crc_configure			= sxe_hw_crc_configure,
+	.loopback_switch		= sxe_hw_loopback_switch,
+	.txrx_enable			= sxe_hw_mac_txrx_enable,
+	.max_frame_set			= sxe_hw_mac_max_frame_set,
+	.max_frame_get			= sxe_hw_mac_max_frame_get,
+	.fc_enable			= sxe_hw_fc_enable,
+	.fc_autoneg_localcap_set	= sxe_fc_autoneg_localcap_set,
+	.fc_tc_high_water_mark_set	= sxe_hw_fc_tc_high_water_mark_set,
+	.fc_tc_low_water_mark_set	= sxe_hw_fc_tc_low_water_mark_set,
+	.fc_param_init			= sxe_hw_fc_param_init,
+	.fc_current_mode_get		= sxe_hw_fc_current_mode_get,
+	.fc_requested_mode_get		= sxe_hw_fc_requested_mode_get,
+	.fc_requested_mode_set		= sxe_hw_fc_requested_mode_set,
+	.is_fc_autoneg_disabled		= sxe_hw_is_fc_autoneg_disabled,
+	.fc_autoneg_disable_set		= sxe_hw_fc_autoneg_disable_set,
+};
+
+u32 sxe_hw_rx_mode_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_FCTRL);
+}
+
+u32 sxe_hw_pool_rx_mode_get(struct sxe_hw *hw, u16 pool_idx)
+{
+	return SXE_REG_READ(hw, SXE_VMOLR(pool_idx));
+}
+
+void sxe_hw_rx_mode_set(struct sxe_hw *hw, u32 filter_ctrl)
+{
+	SXE_REG_WRITE(hw, SXE_FCTRL, filter_ctrl);
+	return;
+}
+
+void sxe_hw_pool_rx_mode_set(struct sxe_hw *hw,
+						u32 vmolr, u16 pool_idx)
+{
+	SXE_REG_WRITE(hw, SXE_VMOLR(pool_idx), vmolr);
+	return;
+}
+
+void sxe_hw_rx_lro_enable(struct sxe_hw *hw, bool is_enable)
+{
+	u32 rfctl = SXE_REG_READ(hw, SXE_RFCTL);
+	rfctl &= ~SXE_RFCTL_LRO_DIS;
+
+	if (!is_enable) {
+		rfctl |= SXE_RFCTL_LRO_DIS;
+	}
+
+	SXE_REG_WRITE(hw, SXE_RFCTL, rfctl);
+	return;
+}
+
+void sxe_hw_rx_nfs_filter_disable(struct sxe_hw *hw)
+{
+	u32 rfctl = 0;
+
+	rfctl |= (SXE_RFCTL_NFSW_DIS | SXE_RFCTL_NFSR_DIS);
+	SXE_REG_WRITE(hw, SXE_RFCTL, rfctl);
+	return;
+}
+
+void sxe_hw_rx_udp_frag_checksum_disable(struct sxe_hw *hw)
+{
+	u32 rxcsum;
+
+	rxcsum = SXE_REG_READ(hw, SXE_RXCSUM);
+	rxcsum |= SXE_RXCSUM_PCSD;
+	SXE_REG_WRITE(hw, SXE_RXCSUM, rxcsum);
+	return;
+}
+
+void sxe_hw_fc_mac_addr_set(struct sxe_hw *hw, u8 *mac_addr)
+{
+	u32 mac_addr_h, mac_addr_l;
+
+	mac_addr_l = ((u32)mac_addr[5] |
+		    ((u32)mac_addr[4] << 8) |
+		    ((u32)mac_addr[3] << 16) |
+		    ((u32)mac_addr[2] << 24));
+	mac_addr_h = (((u32)mac_addr[1] << 16) |
+		    ((u32)mac_addr[0] << 24));
+
+	SXE_REG_WRITE(hw, SXE_SACONH, mac_addr_h);
+	SXE_REG_WRITE(hw, SXE_SACONL, mac_addr_l);
+
+	return;
+}
+
+s32 sxe_hw_uc_addr_add(struct sxe_hw *hw, u32 rar_idx,
+					u8 *addr, u32 pool_idx)
+{
+	s32 ret = 0;
+	u32 rar_low, rar_high;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (rar_idx >= SXE_UC_ENTRY_NUM_MAX) {
+		LOG_DEV_DEBUG("RAR rar_idx %d is out of range:%u.\n",
+			rar_idx, SXE_UC_ENTRY_NUM_MAX);
+		ret = -SXE_ERR_PARAM;
+		goto l_end;
+	}
+
+	sxe_hw_uc_addr_pool_enable(hw, rar_idx, pool_idx);
+
+	rar_low = ((u32)addr[0] |
+		   ((u32)addr[1] << 8) |
+		   ((u32)addr[2] << 16) |
+		   ((u32)addr[3] << 24));
+
+	rar_high = SXE_REG_READ(hw, SXE_RAH(rar_idx));
+	rar_high &= ~(0x0000FFFF | SXE_RAH_AV);
+	rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
+
+	rar_high |= SXE_RAH_AV;
+
+	SXE_REG_WRITE(hw, SXE_RAL(rar_idx), rar_low);
+	SXE_WRITE_FLUSH(hw);
+	SXE_REG_WRITE(hw, SXE_RAH(rar_idx), rar_high);
+
+	LOG_DEBUG_BDF("rar_idx:%d pool_idx:%u addr:%pM add to rar done\n",
+		rar_idx, pool_idx, addr);
+
+l_end:
+	return ret;
+}
+
+s32 sxe_hw_uc_addr_del(struct sxe_hw *hw, u32 index)
+{
+	s32 ret = 0;
+	u32 rar_high;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (index >= SXE_UC_ENTRY_NUM_MAX) {
+		ret = -SXE_ERR_PARAM;
+		LOG_ERROR_BDF("uc_entry_num:%d index:%u invalid.(err:%d)\n",
+			  SXE_UC_ENTRY_NUM_MAX, index, ret);
+		goto l_end;
+	}
+
+	rar_high = SXE_REG_READ(hw, SXE_RAH(index));
+	rar_high &= ~(0x0000FFFF | SXE_RAH_AV);
+
+	SXE_REG_WRITE(hw, SXE_RAH(index), rar_high);
+	SXE_WRITE_FLUSH(hw);
+	SXE_REG_WRITE(hw, SXE_RAL(index), 0);
+
+	sxe_hw_uc_addr_pool_disable(hw, index);
+
+l_end:
+	return ret;
+}
+
+void sxe_hw_mta_hash_table_set(struct sxe_hw *hw,
+						u8 index, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_MTA(index), value);
+	return;
+}
+
+void sxe_hw_mta_hash_table_update(struct sxe_hw *hw,
+						u8 reg_idx, u8 bit_idx)
+{
+	u32 value = SXE_REG_READ(hw, SXE_MTA(reg_idx));
+
+	value |= BIT(bit_idx);
+
+	LOG_INFO("mta update value:0x%x.\n", value);
+	SXE_REG_WRITE(hw, SXE_MTA(reg_idx), value);
+
+	return;
+}
+
+void sxe_hw_mc_filter_enable(struct sxe_hw *hw)
+{
+	u32 value = SXE_MC_FILTER_TYPE0 | SXE_MCSTCTRL_MFE;
+
+	SXE_REG_WRITE(hw, SXE_MCSTCTRL, value);
+
+	return;
+}
+
+static void sxe_hw_mc_filter_disable(struct sxe_hw *hw)
+{
+	u32 value = SXE_REG_READ(hw, SXE_MCSTCTRL);
+
+	value &= ~SXE_MCSTCTRL_MFE;
+
+	SXE_REG_WRITE(hw, SXE_MCSTCTRL, value);
+
+	return;
+}
+
+void sxe_hw_uc_addr_clear(struct sxe_hw *hw)
+{
+	u32 i;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	sxe_hw_uc_addr_pool_disable(hw, 0);
+
+	LOG_DEV_DEBUG("clear uc filter addr register:0-%d\n",
+		   SXE_UC_ENTRY_NUM_MAX - 1);
+	for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
+		SXE_REG_WRITE(hw, SXE_RAL(i), 0);
+		SXE_REG_WRITE(hw, SXE_RAH(i), 0);
+	}
+
+	LOG_DEV_DEBUG("clear %u uta filter addr register\n",
+			SXE_UTA_ENTRY_NUM_MAX);
+	for (i = 0; i < SXE_UTA_ENTRY_NUM_MAX; i++) {
+		SXE_REG_WRITE(hw, SXE_UTA(i), 0);
+	}
+
+	SXE_REG_WRITE(hw, SXE_MCSTCTRL, SXE_MC_FILTER_TYPE0);
+
+	LOG_DEV_DEBUG("clear %u mta filter addr register\n",
+			SXE_MTA_ENTRY_NUM_MAX);
+	for (i = 0; i < SXE_MTA_ENTRY_NUM_MAX; i++) {
+		SXE_REG_WRITE(hw, SXE_MTA(i), 0);
+	}
+
+	return;
+}
+
+static void sxe_hw_ethertype_filter_set(struct sxe_hw *hw,
+						u8 filter_type, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_ETQF(filter_type), value);
+	return;
+}
+
+void sxe_hw_vt_ctrl_cfg(struct sxe_hw *hw, u8 default_pool)
+{
+	u32 ctrl;
+
+	ctrl = SXE_REG_READ(hw, SXE_VT_CTL);
+
+	ctrl |= SXE_VT_CTL_VT_ENABLE; 
+	ctrl &= ~SXE_VT_CTL_POOL_MASK;
+	ctrl |= default_pool << SXE_VT_CTL_POOL_SHIFT;
+	ctrl |= SXE_VT_CTL_REPLEN; 
+
+	SXE_REG_WRITE(hw, SXE_VT_CTL, ctrl);
+
+	return;
+}
+
+void sxe_hw_vt_disable(struct sxe_hw *hw)
+{
+	u32 vmdctl;
+
+	vmdctl = SXE_REG_READ(hw, SXE_VT_CTL);
+	vmdctl &= ~SXE_VMD_CTL_POOL_EN;
+	SXE_REG_WRITE(hw, SXE_VT_CTL, vmdctl);
+
+	return;
+}
+
+#ifdef SXE_WOL_CONFIGURE
+
+static void sxe_hw_wol_status_set(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_WUS, ~0);
+
+	return;
+}
+
+static void sxe_hw_wol_mode_set(struct sxe_hw *hw, u32 wol_status)
+{
+	u32 fctrl;
+
+	SXE_REG_WRITE(hw, SXE_WUC, SXE_WUC_PME_EN);
+
+	fctrl = SXE_REG_READ(hw, SXE_FCTRL);
+	fctrl |= SXE_FCTRL_BAM;
+	if (wol_status & SXE_WUFC_MC) {
+		fctrl |= SXE_FCTRL_MPE;
+	}
+
+	SXE_REG_WRITE(hw, SXE_FCTRL, fctrl);
+
+	SXE_REG_WRITE(hw, SXE_WUFC, wol_status);
+	sxe_hw_wol_status_set(hw);
+
+	return;
+}
+
+static void sxe_hw_wol_mode_clean(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_WUC, 0);
+	SXE_REG_WRITE(hw, SXE_WUFC, 0);
+
+	return;
+}
+#endif
+
+static const struct sxe_filter_mac_operations sxe_filter_mac_ops = {
+	.rx_mode_get			= sxe_hw_rx_mode_get,
+	.rx_mode_set			= sxe_hw_rx_mode_set,
+	.pool_rx_mode_get		= sxe_hw_pool_rx_mode_get,
+	.pool_rx_mode_set		= sxe_hw_pool_rx_mode_set,
+	.rx_lro_enable			= sxe_hw_rx_lro_enable,
+	.uc_addr_add			= sxe_hw_uc_addr_add,
+	.uc_addr_del			= sxe_hw_uc_addr_del,
+	.uc_addr_clear			= sxe_hw_uc_addr_clear,
+	.fc_mac_addr_set		= sxe_hw_fc_mac_addr_set,
+	.mta_hash_table_set		= sxe_hw_mta_hash_table_set,
+	.mta_hash_table_update		= sxe_hw_mta_hash_table_update,
+
+	.mc_filter_enable		= sxe_hw_mc_filter_enable,
+	.mc_filter_disable		= sxe_hw_mc_filter_disable,
+	.rx_nfs_filter_disable		= sxe_hw_rx_nfs_filter_disable,
+	.ethertype_filter_set		= sxe_hw_ethertype_filter_set,
+	.vt_ctrl_configure		= sxe_hw_vt_ctrl_cfg,
+	.uc_addr_pool_enable		= sxe_hw_uc_addr_pool_enable,
+	.rx_udp_frag_checksum_disable	= sxe_hw_rx_udp_frag_checksum_disable,
+
+#ifdef SXE_WOL_CONFIGURE
+	.wol_mode_set			= sxe_hw_wol_mode_set,
+	.wol_mode_clean			= sxe_hw_wol_mode_clean,
+	.wol_status_set			= sxe_hw_wol_status_set,
+#endif
+
+	.vt_disable                     = sxe_hw_vt_disable,
+};
+
+u32 sxe_hw_vlan_pool_filter_read(struct sxe_hw *hw, u16 reg_index)
+{
+	return SXE_REG_READ(hw, SXE_VLVF(reg_index));
+}
+
+static void sxe_hw_vlan_pool_filter_write(struct sxe_hw *hw,
+						u16 reg_index, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_VLVF(reg_index), value);
+	return;
+}
+
+static u32 sxe_hw_vlan_pool_filter_bitmap_read(struct sxe_hw *hw,
+							u16 reg_index)
+{
+	return SXE_REG_READ(hw, SXE_VLVFB(reg_index));
+}
+
+static void sxe_hw_vlan_pool_filter_bitmap_write(struct sxe_hw *hw,
+						u16 reg_index, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_VLVFB(reg_index), value);
+	return;
+}
+
+void sxe_hw_vlan_filter_array_write(struct sxe_hw *hw,
+					u16 reg_index, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_VFTA(reg_index), value);
+	return;
+}
+
+u32 sxe_hw_vlan_filter_array_read(struct sxe_hw *hw, u16 reg_index)
+{
+	return SXE_REG_READ(hw, SXE_VFTA(reg_index));
+}
+
+void sxe_hw_vlan_filter_switch(struct sxe_hw *hw, bool is_enable)
+{
+	u32 vlnctrl;
+
+	vlnctrl = SXE_REG_READ(hw, SXE_VLNCTRL);
+	if (is_enable) {
+		vlnctrl |= SXE_VLNCTRL_VFE;
+	} else {
+		vlnctrl &= ~SXE_VLNCTRL_VFE;
+	}
+
+	SXE_REG_WRITE(hw, SXE_VLNCTRL, vlnctrl);
+	return;
+}
+
+static void sxe_hw_vlan_untagged_pkts_rcv_switch(struct sxe_hw *hw,
+							u32 vf, bool accept)
+{
+	u32 vmolr = SXE_REG_READ(hw, SXE_VMOLR(vf));
+	vmolr |= SXE_VMOLR_BAM;
+	if (accept) {
+		vmolr |= SXE_VMOLR_AUPE;
+	} else {
+		vmolr &= ~SXE_VMOLR_AUPE;
+	}
+
+	LOG_WARN("vf:%u value:0x%x.\n", vf, vmolr);
+	SXE_REG_WRITE(hw, SXE_VMOLR(vf), vmolr);
+	return;
+}
+
+s32 sxe_hw_vlvf_slot_find(struct sxe_hw *hw, u32 vlan, bool vlvf_bypass)
+{
+	s32 ret, regindex, first_empty_slot;
+	u32 bits;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (vlan == 0) {
+		ret = 0;
+		goto l_end;
+	}
+
+	first_empty_slot = vlvf_bypass ? -SXE_ERR_NO_SPACE : 0;
+
+	vlan |= SXE_VLVF_VIEN;
+
+	for (regindex = SXE_VLVF_ENTRIES; --regindex;) {
+		bits = SXE_REG_READ(hw, SXE_VLVF(regindex));
+		if (bits == vlan) {
+			ret = regindex;
+			goto l_end;
+		}
+
+		if (!first_empty_slot && !bits) {
+			first_empty_slot = regindex;
+		}
+	}
+
+	if (!first_empty_slot) {
+		LOG_DEV_WARN("no space in VLVF.\n");
+	}
+
+	ret = first_empty_slot ? : -SXE_ERR_NO_SPACE;
+l_end:
+	return ret;
+}
+
+s32 sxe_hw_vlan_filter_configure(struct sxe_hw *hw,
+					u32 vid, u32 pool,
+					bool vlan_on, bool vlvf_bypass)
+{
+	s32 ret = 0;
+	u32 regidx, vfta_delta, vfta, bits;
+	s32 vlvf_index;
+
+	LOG_DEBUG("vid: %u, pool: %u, vlan_on: %d, vlvf_bypass: %d",
+		vid, pool, vlan_on, vlvf_bypass);
+
+	if ((vid > 4095) || (pool > 63)) {
+		ret = -SXE_ERR_PARAM;
+		goto l_end;
+	}
+
+
+	regidx = vid / 32;
+	vfta_delta = BIT(vid % 32);
+	vfta = SXE_REG_READ(hw, SXE_VFTA(regidx));
+
+	vfta_delta &= vlan_on ? ~vfta : vfta;
+	vfta ^= vfta_delta;
+
+	if (!(SXE_REG_READ(hw, SXE_VT_CTL) & SXE_VT_CTL_VT_ENABLE)) {
+		goto vfta_update;
+	}
+
+	vlvf_index = sxe_hw_vlvf_slot_find(hw, vid, vlvf_bypass);
+	if (vlvf_index < 0) {
+		if (vlvf_bypass) {
+			goto vfta_update;
+		}
+
+		ret = vlvf_index;
+		goto l_end;
+	}
+
+	bits = SXE_REG_READ(hw, SXE_VLVFB(vlvf_index * 2 + pool / 32));
+
+	bits |= BIT(pool % 32);
+	if (vlan_on) {
+		goto vlvf_update;
+	}
+
+	bits ^= BIT(pool % 32);
+
+	if (!bits &&
+	    !SXE_REG_READ(hw, SXE_VLVFB(vlvf_index * 2 + 1 - pool / 32))) {
+		if (vfta_delta) {
+			SXE_REG_WRITE(hw, SXE_VFTA(regidx), vfta);
+		}
+
+		SXE_REG_WRITE(hw, SXE_VLVF(vlvf_index), 0);
+		SXE_REG_WRITE(hw, SXE_VLVFB(vlvf_index * 2 + pool / 32), 0);
+
+		goto l_end;
+	}
+
+	vfta_delta = 0;
+
+vlvf_update:
+	SXE_REG_WRITE(hw, SXE_VLVFB(vlvf_index * 2 + pool / 32), bits);
+	SXE_REG_WRITE(hw, SXE_VLVF(vlvf_index), SXE_VLVF_VIEN | vid);
+
+vfta_update:
+	if (vfta_delta) {
+		SXE_REG_WRITE(hw, SXE_VFTA(regidx), vfta);
+	}
+
+l_end:
+	return ret;
+}
+
+void sxe_hw_vlan_filter_array_clear(struct sxe_hw *hw)
+{
+	u32 offset;
+
+	for (offset = 0; offset < SXE_VFT_TBL_SIZE; offset++) {
+		SXE_REG_WRITE(hw, SXE_VFTA(offset), 0);
+	}
+
+	for (offset = 0; offset < SXE_VLVF_ENTRIES; offset++) {
+		SXE_REG_WRITE(hw, SXE_VLVF(offset), 0);
+		SXE_REG_WRITE(hw, SXE_VLVFB(offset * 2), 0);
+		SXE_REG_WRITE(hw, SXE_VLVFB(offset * 2 + 1), 0);
+	}
+
+	return;
+}
+
+static const struct sxe_filter_vlan_operations sxe_filter_vlan_ops = {
+	.pool_filter_read		= sxe_hw_vlan_pool_filter_read,
+	.pool_filter_write		= sxe_hw_vlan_pool_filter_write,
+	.pool_filter_bitmap_read	= sxe_hw_vlan_pool_filter_bitmap_read,
+	.pool_filter_bitmap_write	= sxe_hw_vlan_pool_filter_bitmap_write,
+	.filter_array_write		= sxe_hw_vlan_filter_array_write,
+	.filter_array_read		= sxe_hw_vlan_filter_array_read,
+	.filter_array_clear		= sxe_hw_vlan_filter_array_clear,
+	.filter_switch			= sxe_hw_vlan_filter_switch,
+	.untagged_pkts_rcv_switch	= sxe_hw_vlan_untagged_pkts_rcv_switch,
+	.filter_configure		= sxe_hw_vlan_filter_configure,
+};
+
+
+static void sxe_hw_rx_pkt_buf_switch(struct sxe_hw *hw, bool is_on)
+{
+	u32 dbucfg = SXE_REG_READ(hw, SXE_DRXCFG);
+
+	if (is_on) {
+		dbucfg |= SXE_DRXCFG_DBURX_START;
+	} else {
+		dbucfg &= ~SXE_DRXCFG_DBURX_START;
+	}
+
+	SXE_REG_WRITE(hw, SXE_DRXCFG, dbucfg);
+
+	return;
+}
+
+static void sxe_hw_rx_pkt_buf_size_configure(struct sxe_hw *hw,
+			     u8 num_pb,
+			     u32 headroom,
+			     u16 strategy)
+{
+	u16 total_buf_size = (SXE_RX_PKT_BUF_SIZE - headroom);
+	u32 rx_buf_size;
+	u16 i = 0;
+
+	if (!num_pb) {
+		num_pb = 1;
+	}
+
+	switch (strategy) {
+	case (PBA_STRATEGY_WEIGHTED):
+		rx_buf_size = ((total_buf_size * 5 * 2) / (num_pb * 8));
+		total_buf_size -= rx_buf_size * (num_pb / 2);
+		rx_buf_size <<= SXE_RX_PKT_BUF_SIZE_SHIFT;
+		for (i = 0; i < (num_pb / 2); i++) {
+			SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), rx_buf_size);
+		}
+		fallthrough;
+	case (PBA_STRATEGY_EQUAL):
+		rx_buf_size = (total_buf_size / (num_pb - i))
+				<< SXE_RX_PKT_BUF_SIZE_SHIFT;
+		for (; i < num_pb; i++) {
+			SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), rx_buf_size);
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	for (; i < SXE_PKG_BUF_NUM_MAX; i++) {
+		SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), 0);
+	}
+
+	return;
+}
+
+u32 sxe_hw_rx_pkt_buf_size_get(struct sxe_hw *hw, u8 pb)
+{
+	return SXE_REG_READ(hw, SXE_RXPBSIZE(pb));
+}
+
+void sxe_hw_rx_multi_ring_configure(struct sxe_hw *hw,
+						u8 tcs, bool is_4q_per_pool,
+						bool sriov_enable)
+{
+	u32 mrqc = SXE_REG_READ(hw, SXE_MRQC);
+
+	mrqc &= ~SXE_MRQE_MASK;
+
+	if (sriov_enable) {
+		if (tcs > 4) {
+			mrqc |= SXE_MRQC_VMDQRT8TCEN;	
+		} else if (tcs > 1) {
+			mrqc |= SXE_MRQC_VMDQRT4TCEN;	
+		} else if (is_4q_per_pool == true) {
+			mrqc |= SXE_MRQC_VMDQRSS32EN;
+		} else {
+			mrqc |= SXE_MRQC_VMDQRSS64EN;
+		}
+	} else {
+		if (tcs > 4) {
+			mrqc |= SXE_MRQC_RTRSS8TCEN;
+		} else if (tcs > 1) {
+			mrqc |= SXE_MRQC_RTRSS4TCEN;
+		} else {
+			mrqc |= SXE_MRQC_RSSEN;
+		}
+	}
+
+	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+	return;
+}
+
+static void sxe_hw_rss_hash_pkt_type_set(struct sxe_hw *hw, u32 version)
+{
+	u32 mrqc = 0;
+	u32 rss_field = 0;
+
+	rss_field |= SXE_MRQC_RSS_FIELD_IPV4 |
+		     SXE_MRQC_RSS_FIELD_IPV4_TCP |
+		     SXE_MRQC_RSS_FIELD_IPV6 |
+		     SXE_MRQC_RSS_FIELD_IPV6_TCP;
+
+	if (version == SXE_RSS_IP_VER_4) {
+		rss_field |= SXE_MRQC_RSS_FIELD_IPV4_UDP;
+	}
+	if (version == SXE_RSS_IP_VER_6) {
+		rss_field |= SXE_MRQC_RSS_FIELD_IPV6_UDP;
+	}
+
+	mrqc |= rss_field;
+	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+	return;
+}
+
+static void sxe_hw_rss_hash_pkt_type_update(struct sxe_hw *hw,
+							u32 version)
+{
+	u32 mrqc;
+
+	mrqc = SXE_REG_READ(hw, SXE_MRQC);
+
+	mrqc |= SXE_MRQC_RSS_FIELD_IPV4
+	      | SXE_MRQC_RSS_FIELD_IPV4_TCP
+	      | SXE_MRQC_RSS_FIELD_IPV6
+	      | SXE_MRQC_RSS_FIELD_IPV6_TCP;
+
+	mrqc &= ~(SXE_MRQC_RSS_FIELD_IPV4_UDP |
+		  SXE_MRQC_RSS_FIELD_IPV6_UDP);
+
+	if (version == SXE_RSS_IP_VER_4) {
+		mrqc |= SXE_MRQC_RSS_FIELD_IPV4_UDP;
+	}
+	if (version == SXE_RSS_IP_VER_6) {
+		mrqc |= SXE_MRQC_RSS_FIELD_IPV6_UDP;
+	}
+
+	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+	return;
+}
+
+static void sxe_hw_rss_rings_used_set(struct sxe_hw *hw, u32 rss_num,
+						u16 pool, u16 pf_offset)
+{
+	u32 psrtype = 0;
+
+	if (rss_num > 3) {
+		psrtype |= 2u << 29;
+	} else if (rss_num > 1) {
+		psrtype |= 1u << 29;
+	}
+
+	while (pool--) {
+		SXE_REG_WRITE(hw, SXE_PSRTYPE(pf_offset + pool), psrtype);
+	}
+
+	return;
+}
+
+void sxe_hw_rss_key_set_all(struct sxe_hw *hw, u32 *rss_key)
+{
+	u32 i;
+
+	for (i = 0; i < SXE_MAX_RSS_KEY_ENTRIES; i++) {
+		SXE_REG_WRITE(hw, SXE_RSSRK(i), rss_key[i]);
+	}
+
+	return;
+}
+
+void sxe_hw_rss_redir_tbl_reg_write(struct sxe_hw *hw,
+						u16 reg_idx, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_RETA(reg_idx >> 2), value);
+	return;
+}
+
+void sxe_hw_rss_redir_tbl_set_all(struct sxe_hw *hw, u8 *redir_tbl)
+{
+	u32 i;
+	u32 tbl = 0;
+	u32 indices_multi = 0x1;
+
+
+	for (i = 0; i < SXE_MAX_RETA_ENTRIES; i++) {
+		tbl |= indices_multi * redir_tbl[i] << (i & 0x3) * 8;
+		if ((i & 3) == 3) {
+			sxe_hw_rss_redir_tbl_reg_write(hw, i, tbl);
+			tbl = 0;
+		}
+	}
+	return;
+}
+
+void sxe_hw_rx_cap_switch_on(struct sxe_hw *hw)
+{
+	u32 rxctrl;
+
+	if (hw->mac.set_lben) {
+		u32 pfdtxgswc = SXE_REG_READ(hw, SXE_PFDTXGSWC);
+		pfdtxgswc |= SXE_PFDTXGSWC_VT_LBEN;
+		SXE_REG_WRITE(hw, SXE_PFDTXGSWC, pfdtxgswc);
+		hw->mac.set_lben = false;
+	}
+
+	rxctrl = SXE_REG_READ(hw, SXE_RXCTRL);
+	rxctrl |= SXE_RXCTRL_RXEN;
+	SXE_REG_WRITE(hw, SXE_RXCTRL, rxctrl);
+
+	return;
+}
+
+void sxe_hw_rx_cap_switch_off(struct sxe_hw *hw)
+{
+	u32 rxctrl;
+
+	rxctrl = SXE_REG_READ(hw, SXE_RXCTRL);
+	if (rxctrl & SXE_RXCTRL_RXEN) {
+		u32 pfdtxgswc = SXE_REG_READ(hw, SXE_PFDTXGSWC);
+		if (pfdtxgswc & SXE_PFDTXGSWC_VT_LBEN) {
+			pfdtxgswc &= ~SXE_PFDTXGSWC_VT_LBEN;
+			SXE_REG_WRITE(hw, SXE_PFDTXGSWC, pfdtxgswc);
+			hw->mac.set_lben = true;
+		} else {
+			hw->mac.set_lben = false;
+		}
+		rxctrl &= ~SXE_RXCTRL_RXEN;
+		SXE_REG_WRITE(hw, SXE_RXCTRL, rxctrl);
+	}
+
+	return;
+}
+
+static void sxe_hw_rx_func_switch_on(struct sxe_hw *hw)
+{
+	u32 rxctrl;
+
+	rxctrl = SXE_REG_READ(hw, SXE_COMCTRL);
+	rxctrl |= SXE_COMCTRL_RXEN | SXE_COMCTRL_EDSEL;
+	SXE_REG_WRITE(hw, SXE_COMCTRL, rxctrl);
+
+	return;
+}
+
+void sxe_hw_tx_pkt_buf_switch(struct sxe_hw *hw, bool is_on)
+{
+	u32 dbucfg;
+
+	dbucfg = SXE_REG_READ(hw, SXE_DTXCFG);
+
+	if (is_on) {
+		dbucfg |= SXE_DTXCFG_DBUTX_START;
+		dbucfg |= SXE_DTXCFG_DBUTX_BUF_ALFUL_CFG;
+		SXE_REG_WRITE(hw, SXE_DTXCFG, dbucfg);
+	} else {
+		dbucfg &= ~SXE_DTXCFG_DBUTX_START;
+		SXE_REG_WRITE(hw, SXE_DTXCFG, dbucfg);
+	}
+
+	return;
+}
+
+void sxe_hw_tx_pkt_buf_size_configure(struct sxe_hw *hw, u8 num_pb)
+{
+	u32 i, tx_pkt_size;
+
+	if (!num_pb){
+		num_pb = 1;
+	}
+
+	tx_pkt_size = SXE_TX_PBSIZE_MAX / num_pb;
+	for (i = 0; i < num_pb; i++) {
+		SXE_REG_WRITE(hw, SXE_TXPBSIZE(i), tx_pkt_size);
+	}
+
+	for (; i < SXE_PKG_BUF_NUM_MAX; i++) {
+		SXE_REG_WRITE(hw, SXE_TXPBSIZE(i), 0);
+	}
+
+	return;
+}
+
+void sxe_hw_rx_lro_ack_switch(struct sxe_hw *hw, bool is_on)
+{
+	u32 lro_dbu = SXE_REG_READ(hw, SXE_LRODBU);
+
+	if (is_on) {
+		lro_dbu &= ~SXE_LRODBU_LROACKDIS;
+	} else {
+		lro_dbu |= SXE_LRODBU_LROACKDIS;
+	}
+
+	SXE_REG_WRITE(hw, SXE_LRODBU, lro_dbu);
+
+	return;
+}
+
+static void sxe_hw_vf_rx_switch(struct sxe_hw *hw,
+				u32 reg_offset, u32 vf_index, bool is_off)
+{
+	u32 vfre = SXE_REG_READ(hw, SXE_VFRE(reg_offset));
+	if (is_off) {
+		vfre &= ~BIT(vf_index);
+	} else {
+		vfre |= BIT(vf_index);
+	}
+
+	SXE_REG_WRITE(hw, SXE_VFRE(reg_offset), vfre);
+
+	return;
+}
+
+STATIC s32 sxe_hw_fnav_wait_init_done(struct sxe_hw *hw)
+{
+	u32 i;
+	s32 ret = 0;
+	struct sxe_adapter *adapter = hw->adapter;
+	for (i = 0; i < SXE_FNAV_INIT_DONE_POLL; i++) {
+		if (SXE_REG_READ(hw, SXE_FNAVCTRL) &
+				   SXE_FNAVCTRL_INIT_DONE) {
+			break;
+		}
+
+		usleep_range(1000, 2000);
+	}
+
+	if (i >= SXE_FNAV_INIT_DONE_POLL) {
+		LOG_DEV_DEBUG("flow navigator poll time exceeded!\n");
+		ret = -SXE_ERR_FNAV_REINIT_FAILED;
+	}
+
+	return ret;
+}
+
+void sxe_hw_fnav_enable(struct sxe_hw *hw, u32 fnavctrl)
+{
+	u32 fnavctrl_ori;
+	bool is_clear_stat = false;
+
+	SXE_REG_WRITE(hw, SXE_FNAVHKEY, SXE_FNAV_BUCKET_HASH_KEY);
+	SXE_REG_WRITE(hw, SXE_FNAVSKEY, SXE_FNAV_SAMPLE_HASH_KEY);
+
+	fnavctrl_ori = SXE_REG_READ(hw, SXE_FNAVCTRL);
+	if((fnavctrl_ori & 0x13) != (fnavctrl & 0x13)) {
+		is_clear_stat = true;
+	}
+
+	SXE_REG_WRITE(hw, SXE_FNAVCTRL, fnavctrl);
+	SXE_WRITE_FLUSH(hw);
+
+	sxe_hw_fnav_wait_init_done(hw);
+
+	if(is_clear_stat) {
+		SXE_REG_READ(hw, SXE_FNAVUSTAT);
+		SXE_REG_READ(hw, SXE_FNAVFSTAT);
+		SXE_REG_READ(hw, SXE_FNAVMATCH);
+		SXE_REG_READ(hw, SXE_FNAVMISS);
+		SXE_REG_READ(hw, SXE_FNAVLEN);
+	}
+
+	return;
+}
+
+static s32 sxe_hw_fnav_mode_init(struct sxe_hw *hw,
+					u32 fnavctrl, u32 sxe_fnav_mode)
+{
+	struct sxe_adapter *adapter = hw->adapter;
+
+	LOG_DEBUG_BDF("fnavctrl=0x%x, sxe_fnav_mode=%u\n", fnavctrl, sxe_fnav_mode);
+
+	if ((sxe_fnav_mode != SXE_FNAV_SAMPLE_MODE) &&
+		(sxe_fnav_mode != SXE_FNAV_SPECIFIC_MODE)) {
+		LOG_ERROR_BDF("mode[%u] a error fnav mode, fnav do not work. please use"
+			"SXE_FNAV_SAMPLE_MODE or SXE_FNAV_SPECIFIC_MODE\n",
+			sxe_fnav_mode);
+		goto l_end;
+	}
+
+	if (sxe_fnav_mode == SXE_FNAV_SPECIFIC_MODE) {
+		fnavctrl |= SXE_FNAVCTRL_SPECIFIC_MATCH |
+			 (SXE_FNAV_DROP_QUEUE << SXE_FNAVCTRL_DROP_Q_SHIFT);
+	}
+
+	fnavctrl |= (0x6 << SXE_FNAVCTRL_FLEX_SHIFT) |
+		    (0xA << SXE_FNAVCTRL_MAX_LENGTH_SHIFT) |
+		    (4 << SXE_FNAVCTRL_FULL_THRESH_SHIFT);
+
+	sxe_hw_fnav_enable(hw, fnavctrl);
+
+l_end:
+	return 0;
+}
+
+u32 sxe_hw_fnav_port_mask_get(__be16 src_port_mask, __be16 dst_port_mask)
+{
+	u32 mask = ntohs(dst_port_mask);
+
+	mask <<= SXE_FNAVTCPM_DPORTM_SHIFT;
+	mask |= ntohs(src_port_mask);
+	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+static s32 sxe_hw_fnav_vm_pool_mask_get(struct sxe_hw *hw,
+					u8 vm_pool, u32 *fnavm)
+{
+	s32 ret = 0;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	switch (vm_pool & SXE_SAMPLE_VM_POOL_MASK) {
+	case 0x0:
+		*fnavm |= SXE_FNAVM_POOL;
+		fallthrough;
+	case 0x7F:
+		break;
+	default:
+		LOG_DEV_ERR("error on vm pool mask\n");
+		ret = -SXE_ERR_CONFIG;
+	}
+
+	return ret;
+}
+
+static s32 sxe_hw_fnav_flow_type_mask_get(struct sxe_hw *hw,
+					union sxe_fnav_rule_info *input_mask,
+					u32 *fnavm)
+{
+	s32 ret = 0;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	switch (input_mask->ntuple.flow_type & SXE_SAMPLE_L4TYPE_MASK) {
+	case 0x0:
+		*fnavm |= SXE_FNAVM_L4P;
+		if (input_mask->ntuple.dst_port ||
+		    input_mask->ntuple.src_port) {
+			LOG_DEV_ERR("error on src/dst port mask\n");
+			ret = -SXE_ERR_CONFIG;
+			goto l_ret;
+		}
+		break;
+	case SXE_SAMPLE_L4TYPE_MASK:
+		break;
+	default:
+		LOG_DEV_ERR("error on flow type mask\n");
+		ret = -SXE_ERR_CONFIG;
+	}
+
+l_ret:
+	return ret;
+}
+
+static s32 sxe_hw_fnav_vlan_mask_get(struct sxe_hw *hw,
+					__be16 vlan_id, u32 *fnavm)
+{
+	s32 ret = 0;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	switch (ntohs(vlan_id) & SXE_SAMPLE_VLAN_MASK) {
+	case 0x0000:
+		*fnavm |= SXE_FNAVM_VLANID;
+		fallthrough;
+	case 0x0FFF:
+		*fnavm |= SXE_FNAVM_VLANP;
+		break;
+	case 0xE000:
+		*fnavm |= SXE_FNAVM_VLANID;
+		fallthrough;
+	case 0xEFFF:
+		break;
+	default:
+		LOG_DEV_ERR("error on VLAN mask\n");
+		ret = -SXE_ERR_CONFIG;
+	}
+
+	return ret;
+}
+
+static s32 sxe_hw_fnav_flex_bytes_mask_get(struct sxe_hw *hw,
+					__be16 flex_bytes, u32 *fnavm)
+{
+	s32 ret = 0;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	switch ((__force u16)flex_bytes & SXE_SAMPLE_FLEX_BYTES_MASK) {
+	case 0x0000:
+		*fnavm |= SXE_FNAVM_FLEX;
+		fallthrough;
+	case 0xFFFF:
+		break;
+	default:
+		LOG_DEV_ERR("error on flexible byte mask\n");
+		ret = -SXE_ERR_CONFIG;
+	}
+
+	return ret;
+}
+
+s32 sxe_hw_fnav_specific_rule_mask_set(struct sxe_hw *hw,
+				    union sxe_fnav_rule_info *input_mask)
+{
+	s32 ret;
+	u32 fnavm = SXE_FNAVM_DIPv6;
+	u32 fnavtcpm;
+	struct sxe_adapter *adapter = hw->adapter;
+
+
+	if (input_mask->ntuple.bkt_hash) {
+		LOG_DEV_ERR("bucket hash should always be 0 in mask\n");
+	}
+
+	ret = sxe_hw_fnav_vm_pool_mask_get(hw, input_mask->ntuple.vm_pool, &fnavm);
+	if (ret) {
+		goto l_err_config;
+	}
+
+	ret = sxe_hw_fnav_flow_type_mask_get(hw, input_mask,  &fnavm);
+	if (ret) {
+		goto l_err_config;
+	}
+
+	ret = sxe_hw_fnav_vlan_mask_get(hw, input_mask->ntuple.vlan_id, &fnavm);
+	if (ret) {
+		goto l_err_config;
+	}
+
+	ret = sxe_hw_fnav_flex_bytes_mask_get(hw, input_mask->ntuple.flex_bytes, &fnavm);
+	if (ret) {
+		goto l_err_config;
+	}
+
+	LOG_DEBUG_BDF("fnavm = 0x%x\n", fnavm);
+	SXE_REG_WRITE(hw, SXE_FNAVM, fnavm);
+
+	fnavtcpm = sxe_hw_fnav_port_mask_get(input_mask->ntuple.src_port,
+					     input_mask->ntuple.dst_port);
+
+	LOG_DEBUG_BDF("fnavtcpm = 0x%x\n", fnavtcpm);
+	SXE_REG_WRITE(hw, SXE_FNAVTCPM, ~fnavtcpm);
+	SXE_REG_WRITE(hw, SXE_FNAVUDPM, ~fnavtcpm);
+
+	SXE_REG_WRITE_BE32(hw, SXE_FNAVSIP4M,
+			     ~input_mask->ntuple.src_ip[0]);
+	SXE_REG_WRITE_BE32(hw, SXE_FNAVDIP4M,
+			     ~input_mask->ntuple.dst_ip[0]);
+
+	return 0;
+
+l_err_config:
+	return -SXE_ERR_CONFIG;
+}
+
+STATIC s32 sxe_hw_fnav_cmd_complete_check(struct sxe_hw *hw,
+							u32 *fnavcmd)
+{
+	u32 i;
+
+	for (i = 0; i < SXE_FNAVCMD_CMD_POLL * 10; i++) {
+		*fnavcmd = SXE_REG_READ(hw, SXE_FNAVCMD);
+		if (!(*fnavcmd & SXE_FNAVCMD_CMD_MASK)) {
+			return 0;
+		}
+
+		udelay(10);
+	}
+
+	return -SXE_ERR_FNAV_CMD_INCOMPLETE;
+}
+
+static void sxe_hw_fnav_filter_ip_set(struct sxe_hw *hw,
+					union sxe_fnav_rule_info *input)
+{
+	SXE_REG_WRITE_BE32(hw, SXE_FNAVSIPv6(0),
+			     input->ntuple.src_ip[0]);
+	SXE_REG_WRITE_BE32(hw, SXE_FNAVSIPv6(1),
+			     input->ntuple.src_ip[1]);
+	SXE_REG_WRITE_BE32(hw, SXE_FNAVSIPv6(2),
+			     input->ntuple.src_ip[2]);
+
+	SXE_REG_WRITE_BE32(hw, SXE_FNAVIPSA, input->ntuple.src_ip[0]);
+
+	SXE_REG_WRITE_BE32(hw, SXE_FNAVIPDA, input->ntuple.dst_ip[0]);
+
+	return;
+}
+
+static void sxe_hw_fnav_filter_port_set(struct sxe_hw *hw,
+					union sxe_fnav_rule_info *input)
+{
+	u32 fnavport;
+
+	fnavport = be16_to_cpu(input->ntuple.dst_port);
+	fnavport <<= SXE_FNAVPORT_DESTINATION_SHIFT;
+	fnavport |= be16_to_cpu(input->ntuple.src_port);
+	SXE_REG_WRITE(hw, SXE_FNAVPORT, fnavport);
+
+	return;
+}
+
+static void sxe_hw_fnav_filter_vlan_set(struct sxe_hw *hw,
+					union sxe_fnav_rule_info *input)
+{
+	u32 fnavvlan;
+
+	fnavvlan = ntohs(SXE_SWAP_16(input->ntuple.flex_bytes));
+	fnavvlan <<= SXE_FNAVVLAN_FLEX_SHIFT;
+	fnavvlan |= ntohs(input->ntuple.vlan_id);
+	SXE_REG_WRITE(hw, SXE_FNAVVLAN, fnavvlan);
+
+	return;
+}
+
+static void sxe_hw_fnav_filter_bkt_hash_set(struct sxe_hw *hw,
+					union sxe_fnav_rule_info *input,
+					u16 soft_id)
+{
+	u32 fnavhash;
+
+	fnavhash = (__force u32)input->ntuple.bkt_hash;
+	fnavhash |= soft_id << SXE_FNAVHASH_SIG_SW_INDEX_SHIFT;
+	SXE_REG_WRITE(hw, SXE_FNAVHASH, fnavhash);
+
+	return;
+}
+
+static s32 sxe_hw_fnav_filter_cmd_set(struct sxe_hw *hw,
+					union sxe_fnav_rule_info *input,
+					u8 queue)
+{
+	u32 fnavcmd;
+	s32 ret;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	fnavcmd = SXE_FNAVCMD_CMD_ADD_FLOW | SXE_FNAVCMD_FILTER_UPDATE |
+		  SXE_FNAVCMD_LAST | SXE_FNAVCMD_QUEUE_EN;
+
+#ifndef SXE_DPDK
+	if (queue == SXE_FNAV_DROP_QUEUE) {
+		fnavcmd |= SXE_FNAVCMD_DROP;
+	}
+#endif
+
+	fnavcmd |= input->ntuple.flow_type << SXE_FNAVCMD_FLOW_TYPE_SHIFT;
+	fnavcmd |= (u32)queue << SXE_FNAVCMD_RX_QUEUE_SHIFT;
+	fnavcmd |= (u32)input->ntuple.vm_pool << SXE_FNAVCMD_VT_POOL_SHIFT;
+
+	SXE_REG_WRITE(hw, SXE_FNAVCMD, fnavcmd);
+	ret = sxe_hw_fnav_cmd_complete_check(hw, &fnavcmd);
+	if (ret) {
+		LOG_DEV_ERR("flow navigator command did not complete!\n");
+	}
+
+	return ret;
+}
+
+s32 sxe_hw_fnav_specific_rule_add(struct sxe_hw *hw,
+					  union sxe_fnav_rule_info *input,
+					  u16 soft_id, u8 queue)
+{
+	s32 ret;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	sxe_hw_fnav_filter_ip_set(hw, input);
+
+	sxe_hw_fnav_filter_port_set(hw, input);
+
+	sxe_hw_fnav_filter_vlan_set(hw, input);
+
+	sxe_hw_fnav_filter_bkt_hash_set(hw, input, soft_id);
+
+	SXE_WRITE_FLUSH(hw);
+
+	ret = sxe_hw_fnav_filter_cmd_set(hw, input, queue);
+	if (ret) {
+		LOG_ERROR_BDF("set fnav filter cmd error. ret=%d\n", ret);
+	}
+
+	return ret;
+}
+
+s32 sxe_hw_fnav_specific_rule_del(struct sxe_hw *hw,
+					  union sxe_fnav_rule_info *input,
+					  u16 soft_id)
+{
+	u32 fnavhash;
+	u32 fnavcmd;
+	s32 ret;
+	struct sxe_adapter *adapter = hw->adapter;
+
+
+	fnavhash = (__force u32)input->ntuple.bkt_hash;
+	fnavhash |= soft_id << SXE_FNAVHASH_SIG_SW_INDEX_SHIFT;
+	SXE_REG_WRITE(hw, SXE_FNAVHASH, fnavhash);
+
+	SXE_WRITE_FLUSH(hw);
+
+	SXE_REG_WRITE(hw, SXE_FNAVCMD, SXE_FNAVCMD_CMD_QUERY_REM_FILT);
+
+	ret = sxe_hw_fnav_cmd_complete_check(hw, &fnavcmd);
+	if (ret) {
+		LOG_DEV_ERR("flow navigator command did not complete!\n");
+		return ret;
+	}
+
+	if (fnavcmd & SXE_FNAVCMD_FILTER_VALID) {
+		SXE_REG_WRITE(hw, SXE_FNAVHASH, fnavhash);
+		SXE_WRITE_FLUSH(hw);
+		SXE_REG_WRITE(hw, SXE_FNAVCMD,
+				SXE_FNAVCMD_CMD_REMOVE_FLOW);
+	}
+
+	return 0;
+}
+
+void sxe_hw_fnav_sample_rule_configure(struct sxe_hw *hw,
+					  u8 flow_type, u32 hash_value, u8 queue)
+{
+	u32 fnavcmd;
+	u64 fnavhashcmd;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	fnavcmd = SXE_FNAVCMD_CMD_ADD_FLOW | SXE_FNAVCMD_FILTER_UPDATE |
+		  SXE_FNAVCMD_LAST | SXE_FNAVCMD_QUEUE_EN;
+	fnavcmd |= (u32)flow_type << SXE_FNAVCMD_FLOW_TYPE_SHIFT;
+	fnavcmd |= (u32)queue << SXE_FNAVCMD_RX_QUEUE_SHIFT;
+
+	fnavhashcmd = (u64)fnavcmd << 32;
+	fnavhashcmd |= hash_value;
+	SXE_REG64_WRITE(hw, SXE_FNAVHASH, fnavhashcmd);
+
+	LOG_DEV_DEBUG("tx queue=%x hash=%x\n", queue, (u32)fnavhashcmd);
+
+	return;
+}
+
+static u64 sxe_hw_fnav_sample_rule_hash_get(struct sxe_hw *hw,
+					  u8 flow_type, u32 hash_value, u8 queue)
+{
+	u32 fnavcmd;
+	u64 fnavhashcmd;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	fnavcmd = SXE_FNAVCMD_CMD_ADD_FLOW | SXE_FNAVCMD_FILTER_UPDATE |
+		  SXE_FNAVCMD_LAST | SXE_FNAVCMD_QUEUE_EN;
+	fnavcmd |= (u32)flow_type << SXE_FNAVCMD_FLOW_TYPE_SHIFT;
+	fnavcmd |= (u32)queue << SXE_FNAVCMD_RX_QUEUE_SHIFT;
+
+	fnavhashcmd = (u64)fnavcmd << 32;
+	fnavhashcmd |= hash_value;
+
+	LOG_DEV_DEBUG("tx queue=%x hash=%x\n", queue, (u32)fnavhashcmd);
+
+	return fnavhashcmd;
+}
+
+static s32 sxe_hw_fnav_sample_hash_cmd_get(struct sxe_hw *hw,
+					  u8  flow_type,
+					  u32 hash_value,
+					  u8  queue, u64 *hash_cmd)
+{
+	s32 ret = 0;
+	u8 pkg_type;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	pkg_type = flow_type & SXE_SAMPLE_FLOW_TYPE_MASK;
+	switch (pkg_type) {
+	case SXE_SAMPLE_FLOW_TYPE_TCPV4:
+	case SXE_SAMPLE_FLOW_TYPE_UDPV4:
+	case SXE_SAMPLE_FLOW_TYPE_SCTPV4:
+	case SXE_SAMPLE_FLOW_TYPE_TCPV6:
+	case SXE_SAMPLE_FLOW_TYPE_UDPV6:
+	case SXE_SAMPLE_FLOW_TYPE_SCTPV6:
+		break;
+	default:
+		LOG_DEV_ERR("error on flow type input\n");
+		ret = -SXE_ERR_CONFIG;
+		goto l_end;
+	}
+
+	*hash_cmd = sxe_hw_fnav_sample_rule_hash_get(hw, pkg_type, hash_value, queue);
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_hw_fnav_single_sample_rule_del(struct sxe_hw *hw,
+						u32 hash)
+{
+	u32 fdircmd;
+	s32 ret;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	SXE_REG_WRITE(hw, SXE_FNAVHASH, hash);
+	SXE_WRITE_FLUSH(hw);
+
+	SXE_REG_WRITE(hw, SXE_FNAVCMD, SXE_FNAVCMD_CMD_REMOVE_FLOW);
+	ret = sxe_hw_fnav_cmd_complete_check(hw, &fdircmd);
+	if (ret) {
+		LOG_DEV_ERR("flow navigator previous command did not complete,"
+			"aborting table re-initialization.\n");
+	}
+
+	return ret;
+}
+
+s32 sxe_hw_fnav_sample_rules_table_reinit(struct sxe_hw *hw)
+{
+	u32 fnavctrl = SXE_REG_READ(hw, SXE_FNAVCTRL);
+	u32 fnavcmd;
+	s32 ret;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	fnavctrl &= ~SXE_FNAVCTRL_INIT_DONE;
+
+	ret = sxe_hw_fnav_cmd_complete_check(hw, &fnavcmd);
+	if (ret) {
+		LOG_DEV_ERR("flow navigator previous command did not complete,"
+			"aborting table re-initialization.\n");
+		goto l_ret;
+	}
+
+	SXE_REG_WRITE(hw, SXE_FNAVFREE, 0);
+	SXE_WRITE_FLUSH(hw);
+
+	SXE_REG_WRITE(hw, SXE_FNAVCMD,
+			(SXE_REG_READ(hw, SXE_FNAVCMD) |
+			 SXE_FNAVCMD_CLEARHT));
+	SXE_WRITE_FLUSH(hw);
+	SXE_REG_WRITE(hw, SXE_FNAVCMD,
+			(SXE_REG_READ(hw, SXE_FNAVCMD) &
+			 ~SXE_FNAVCMD_CLEARHT));
+	SXE_WRITE_FLUSH(hw);
+
+	SXE_REG_WRITE(hw, SXE_FNAVHASH, 0x00);
+	SXE_WRITE_FLUSH(hw);
+
+	SXE_REG_WRITE(hw, SXE_FNAVCTRL, fnavctrl);
+	SXE_WRITE_FLUSH(hw);
+
+	ret = sxe_hw_fnav_wait_init_done(hw);
+	if (ret) {
+		LOG_ERROR_BDF("flow navigator simple poll time exceeded!\n");
+		goto l_ret;
+	}
+
+	SXE_REG_READ(hw, SXE_FNAVUSTAT);
+	SXE_REG_READ(hw, SXE_FNAVFSTAT);
+	SXE_REG_READ(hw, SXE_FNAVMATCH);
+	SXE_REG_READ(hw, SXE_FNAVMISS);
+	SXE_REG_READ(hw, SXE_FNAVLEN);
+
+l_ret:
+	return ret;
+}
+
+static void sxe_hw_fnav_sample_stats_reinit(struct sxe_hw *hw)
+{
+	SXE_REG_READ(hw, SXE_FNAVUSTAT);
+	SXE_REG_READ(hw, SXE_FNAVFSTAT);
+	SXE_REG_READ(hw, SXE_FNAVMATCH);
+	SXE_REG_READ(hw, SXE_FNAVMISS);
+	SXE_REG_READ(hw, SXE_FNAVLEN);
+
+	return;
+}
+
+static void sxe_hw_ptp_freq_adjust(struct sxe_hw *hw, u32 adj_freq)
+{
+	SXE_REG_WRITE(hw, SXE_TIMADJL, 0);
+	SXE_REG_WRITE(hw, SXE_TIMADJH, adj_freq);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+u64 sxe_hw_ptp_systime_get(struct sxe_hw *hw)
+{
+	struct sxe_adapter *adapter = hw->adapter;
+	u32 systiml;
+	u32 systimm;
+	u64 ns;
+
+	systiml = SXE_REG_READ(hw, SXE_SYSTIML);
+	systimm = SXE_REG_READ(hw, SXE_SYSTIMM);
+	ns = SXE_TIME_TO_NS(systiml, systimm);
+
+	LOG_DEBUG_BDF("get ptp hw systime systiml=%u, systimm=%u, ns=%"SXE_PRIU64"\n",
+			systiml, systimm, ns);
+	return ns;
+}
+
+void sxe_hw_ptp_systime_init(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_SYSTIML, 0);
+	SXE_REG_WRITE(hw, SXE_SYSTIMM, 0);
+	SXE_REG_WRITE(hw, SXE_SYSTIMH, 0);
+
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+
+void sxe_hw_ptp_init(struct sxe_hw *hw)
+{
+	u32 regval;
+	u32 tsctl = SXE_TSCTRL_TSEN |
+	SXE_TSCTRL_VER_2 |
+	SXE_TSCTRL_PTYP_ALL |
+	SXE_TSCTRL_L4_UNICAST;
+
+	regval = SXE_REG_READ(hw, SXE_TSCTRL);
+	regval &= ~SXE_TSCTRL_ONESTEP;	
+	regval &= ~SXE_TSCTRL_CSEN;	
+	regval |= tsctl;
+	SXE_REG_WRITE(hw, SXE_TSCTRL, regval);
+
+	SXE_REG_WRITE(hw, SXE_TIMINC,
+			SXE_TIMINC_SET(SXE_INCPD, SXE_IV_NS, SXE_IV_SNS));
+
+	return;
+}
+
+void sxe_hw_ptp_rx_timestamp_clear(struct sxe_hw *hw)
+{
+	SXE_REG_READ(hw, SXE_RXSTMPH);
+	return;
+}
+
+void sxe_hw_ptp_tx_timestamp_get(struct sxe_hw *hw,
+						u32 *ts_sec, u32 *ts_ns)
+{
+	u32 reg_sec;
+	u32 reg_ns;
+	u32 sec_8bit;
+	u32 sec_24bit;
+	u32 systimm;
+	u32 systimm_8bit;
+	u32 systimm_24bit;
+
+	SXE_REG64_WRITE(hw, SXE_TXSTMP_SEL, SXE_TXTS_MAGIC0);
+	reg_ns = SXE_REG_READ(hw, SXE_TXSTMP_VAL);
+	SXE_REG64_WRITE(hw, SXE_TXSTMP_SEL, SXE_TXTS_MAGIC1);
+	reg_sec = SXE_REG_READ(hw, SXE_TXSTMP_VAL);
+	systimm = SXE_REG_READ(hw, SXE_SYSTIMM);
+
+
+	sec_8bit  = reg_sec & 0x000000FF;
+	sec_24bit = (reg_sec >> 8) & 0x00FFFFFF;
+
+	systimm_24bit = systimm & 0x00FFFFFF;
+	systimm_8bit  = systimm & 0xFF000000;
+
+	*ts_ns  = (sec_8bit << 24) | ((reg_ns & 0xFFFFFF00) >> 8);
+
+	if (unlikely((sec_24bit - systimm_24bit) >= 0x00FFFFF0)) {
+		if (systimm_8bit >= 1) {
+			systimm_8bit -= 1;
+		}
+	}
+
+	*ts_sec = systimm_8bit | sec_24bit;
+	return;
+}
+
+u64 sxe_hw_ptp_rx_timestamp_get(struct sxe_hw *hw)
+{
+	struct sxe_adapter *adapter = hw->adapter;
+	u32 rxtsl;
+	u32 rxtsh;
+	u64 ns;
+
+	rxtsl = SXE_REG_READ(hw, SXE_RXSTMPL);
+	rxtsh = SXE_REG_READ(hw, SXE_RXSTMPH);
+	ns = SXE_TIME_TO_NS(rxtsl, rxtsh);
+
+	LOG_DEBUG_BDF("ptp get rx ptp timestamp low=%u, high=%u, ns=%"SXE_PRIU64"\n",
+			rxtsl, rxtsh, ns);
+	return ns;
+}
+
+bool sxe_hw_ptp_is_rx_timestamp_valid(struct sxe_hw *hw)
+{
+	bool rx_tmstamp_valid = false;
+	u32 tsyncrxctl;
+
+	tsyncrxctl = SXE_REG_READ(hw, SXE_TSYNCRXCTL);
+	if (tsyncrxctl & SXE_TSYNCRXCTL_RXTT) {
+		rx_tmstamp_valid = true;
+	}
+
+	return rx_tmstamp_valid;
+}
+
+void sxe_hw_ptp_timestamp_mode_set(struct sxe_hw *hw,
+					bool is_l2, u32 tsctl, u32 tses)
+{
+	u32 regval;
+
+	if (is_l2) {
+		SXE_REG_WRITE(hw, SXE_ETQF(SXE_ETQF_FILTER_1588),
+			(SXE_ETQF_FILTER_EN |   
+			 SXE_ETQF_1588 |	
+			 ETH_P_1588));		
+	} else {
+		SXE_REG_WRITE(hw, SXE_ETQF(SXE_ETQF_FILTER_1588), 0);
+	}
+
+	if (tsctl) {
+		regval = SXE_REG_READ(hw, SXE_TSCTRL);
+		regval |= tsctl;
+		SXE_REG_WRITE(hw, SXE_TSCTRL, regval);
+	}
+
+	SXE_REG_WRITE(hw, SXE_TSES, tses);
+
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_ptp_timestamp_enable(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_TSYNCTXCTL,
+			(SXE_REG_READ(hw, SXE_TSYNCTXCTL) |
+			SXE_TSYNCTXCTL_TEN));
+
+	SXE_REG_WRITE(hw, SXE_TSYNCRXCTL,
+			(SXE_REG_READ(hw, SXE_TSYNCRXCTL) |
+			SXE_TSYNCRXCTL_REN));
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+static void sxe_hw_dcb_tc_rss_configure(struct sxe_hw *hw, u16 rss)
+{
+	u32 msb = 0;
+
+	while (rss) {
+		msb++;
+		rss >>= 1;
+	}
+
+	SXE_REG_WRITE(hw, SXE_RQTC, msb * SXE_8_TC_MSB);
+}
+
+static void sxe_hw_tx_ring_disable(struct sxe_hw *hw, u8 reg_idx,
+				 unsigned long timeout)
+{
+	unsigned long wait_delay, delay_interval;
+	int wait_loop;
+	u32 txdctl;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+	txdctl &= ~SXE_TXDCTL_ENABLE;
+	SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+
+	delay_interval = timeout / 100;
+
+	wait_loop = SXE_MAX_RX_DESC_POLL;
+	wait_delay = delay_interval;
+
+	while (wait_loop--) {
+		usleep_range(wait_delay, wait_delay + 10);
+		wait_delay += delay_interval * 2;
+		txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+
+		if (!(txdctl & SXE_TXDCTL_ENABLE)) {
+			return;
+		}
+	}
+
+	LOG_MSG_ERR(drv, "register TXDCTL.ENABLE not cleared within the polling period\n");
+}
+
+static void sxe_hw_rx_ring_disable(struct sxe_hw *hw, u8 reg_idx,
+				 unsigned long timeout)
+{
+	unsigned long wait_delay, delay_interval;
+	int wait_loop;
+	u32 rxdctl;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+	rxdctl &= ~SXE_RXDCTL_ENABLE;
+
+	SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+
+	delay_interval = timeout / 100;
+
+	wait_loop = SXE_MAX_RX_DESC_POLL;
+	wait_delay = delay_interval;
+
+	while (wait_loop--) {
+		usleep_range(wait_delay, wait_delay + 10);
+		wait_delay += delay_interval * 2;
+		rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+
+		if (!(rxdctl & SXE_RXDCTL_ENABLE))
+			return;
+	}
+
+	LOG_MSG_ERR(drv, "register RXDCTL.ENABLE not cleared within the polling period\n");
+}
+
+static u32 sxe_hw_tx_dbu_fc_status_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_TXPBFCS);
+}
+
+static void sxe_hw_fnav_sample_hash_set(struct sxe_hw *hw, u64 hash)
+{
+	SXE_REG64_WRITE(hw, SXE_FNAVHASH, hash);
+	return;
+}
+
+static const struct sxe_dbu_operations sxe_dbu_ops = {
+	.rx_pkt_buf_size_configure	= sxe_hw_rx_pkt_buf_size_configure,
+	.rx_pkt_buf_switch		= sxe_hw_rx_pkt_buf_switch,
+	.rx_multi_ring_configure	= sxe_hw_rx_multi_ring_configure,
+	.rss_key_set_all		= sxe_hw_rss_key_set_all,
+	.rss_redir_tbl_set_all		= sxe_hw_rss_redir_tbl_set_all,
+	.rx_cap_switch_on		= sxe_hw_rx_cap_switch_on,
+	.rx_cap_switch_off		= sxe_hw_rx_cap_switch_off,
+	.rss_hash_pkt_type_set		= sxe_hw_rss_hash_pkt_type_set,
+	.rss_hash_pkt_type_update	= sxe_hw_rss_hash_pkt_type_update,
+	.rss_rings_used_set		= sxe_hw_rss_rings_used_set,
+	.lro_ack_switch			= sxe_hw_rx_lro_ack_switch,
+
+	.fnav_mode_init			= sxe_hw_fnav_mode_init,
+	.fnav_specific_rule_mask_set	= sxe_hw_fnav_specific_rule_mask_set,
+	.fnav_specific_rule_add		= sxe_hw_fnav_specific_rule_add,
+	.fnav_specific_rule_del		= sxe_hw_fnav_specific_rule_del,
+	.fnav_sample_hash_cmd_get	= sxe_hw_fnav_sample_hash_cmd_get,
+	.fnav_sample_stats_reinit	= sxe_hw_fnav_sample_stats_reinit,
+	.fnav_sample_hash_set		= sxe_hw_fnav_sample_hash_set,
+	.fnav_single_sample_rule_del	= sxe_hw_fnav_single_sample_rule_del,
+
+	.tx_pkt_buf_switch		= sxe_hw_tx_pkt_buf_switch,
+	.tx_pkt_buf_size_configure	= sxe_hw_tx_pkt_buf_size_configure,
+
+	.ptp_init			= sxe_hw_ptp_init,
+	.ptp_freq_adjust		= sxe_hw_ptp_freq_adjust,
+	.ptp_systime_init		= sxe_hw_ptp_systime_init,
+	.ptp_systime_get		= sxe_hw_ptp_systime_get,
+	.ptp_tx_timestamp_get		= sxe_hw_ptp_tx_timestamp_get,
+	.ptp_timestamp_mode_set		= sxe_hw_ptp_timestamp_mode_set,
+	.ptp_timestamp_enable		= sxe_hw_ptp_timestamp_enable,
+	.ptp_rx_timestamp_clear		= sxe_hw_ptp_rx_timestamp_clear,
+	.ptp_rx_timestamp_get		= sxe_hw_ptp_rx_timestamp_get,
+	.ptp_is_rx_timestamp_valid	= sxe_hw_ptp_is_rx_timestamp_valid,
+
+	.dcb_tc_rss_configure		= sxe_hw_dcb_tc_rss_configure,
+	.vf_rx_switch			= sxe_hw_vf_rx_switch,
+	.rx_pkt_buf_size_get		= sxe_hw_rx_pkt_buf_size_get,
+	.rx_func_switch_on		= sxe_hw_rx_func_switch_on,
+
+	.tx_ring_disable		= sxe_hw_tx_ring_disable,
+	.rx_ring_disable		= sxe_hw_rx_ring_disable,
+
+	.tx_dbu_fc_status_get		= sxe_hw_tx_dbu_fc_status_get,
+};
+
+
+void sxe_hw_rx_dma_ctrl_init(struct sxe_hw *hw, bool crc_strip_on)
+{
+	u32 rx_dma_ctrl = SXE_REG_READ(hw, SXE_RDRXCTL);
+
+	if (crc_strip_on) {
+		rx_dma_ctrl |= SXE_RDRXCTL_CRCSTRIP;
+	} else {
+		rx_dma_ctrl &= ~SXE_RDRXCTL_CRCSTRIP;
+	}
+
+	rx_dma_ctrl &= ~SXE_RDRXCTL_LROFRSTSIZE;
+	SXE_REG_WRITE(hw, SXE_RDRXCTL, rx_dma_ctrl);
+	return;
+}
+
+void sxe_hw_rx_dma_lro_ctrl_set(struct sxe_hw *hw)
+{
+	u32 rx_dma_ctrl = SXE_REG_READ(hw, SXE_RDRXCTL);
+
+	rx_dma_ctrl |= SXE_RDRXCTL_LROACKC;
+	SXE_REG_WRITE(hw, SXE_RDRXCTL, rx_dma_ctrl);
+	return;
+}
+
+void sxe_hw_rx_desc_thresh_set(struct sxe_hw *hw, u8 reg_idx)
+{
+	u32 rxdctl;
+	rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+	rxdctl |= 0x40 << SXE_RXDCTL_PREFETCH_NUM_CFG_SHIFT;
+	rxdctl |= 0x2 << SXE_RXDCTL_DESC_FIFO_AE_TH_SHIFT;
+	rxdctl |= 0x10;
+	SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+
+	return;
+}
+
+void sxe_hw_rx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on)
+{
+	u32 rxdctl;
+	u32 wait_loop = SXE_RING_WAIT_LOOP;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+	if (is_on) {
+		rxdctl |= SXE_RXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+
+		do {
+			usleep_range(1000, 2000);
+			rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+		} while (--wait_loop && !(rxdctl & SXE_RXDCTL_ENABLE));
+	} else {
+		rxdctl &= ~SXE_RXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+
+		do {
+			usleep_range(1000, 2000);
+			rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+		} while (--wait_loop && (rxdctl & SXE_RXDCTL_ENABLE));
+	}
+
+	SXE_WRITE_FLUSH(hw);
+
+	if (!wait_loop) {
+		LOG_MSG_ERR(drv, "rx ring %u switch %u failed within "
+			  "the polling period\n", reg_idx, is_on);
+	}
+
+	return;
+}
+
+void sxe_hw_rx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on)
+{
+	u32 rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+	if (is_on) {
+		rxdctl |= SXE_RXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+	} else {
+		rxdctl &= ~SXE_RXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+	}
+
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_rx_queue_desc_reg_configure(struct sxe_hw *hw,
+					u8 reg_idx, u32 rdh_value,
+					u32 rdt_value)
+{
+	SXE_REG_WRITE(hw, SXE_RDH(reg_idx), rdh_value);
+	SXE_REG_WRITE(hw, SXE_RDT(reg_idx), rdt_value);
+	return;
+}
+
+static void sxe_hw_rx_ring_head_init(struct sxe_hw *hw, u8 reg_idx)
+{
+	SXE_REG_WRITE(hw, SXE_RDH(reg_idx), 0);
+
+	return;
+}
+
+static void sxe_hw_rx_ring_tail_init(struct sxe_hw *hw, u8 reg_idx)
+{
+	SXE_REG_WRITE(hw, SXE_RDT(reg_idx), 0);
+
+	return;
+}
+
+void sxe_hw_rx_ring_desc_configure(struct sxe_hw *hw,
+					u32 desc_mem_len, u64 desc_dma_addr,
+					u8 reg_idx)
+{
+	SXE_REG_WRITE(hw, SXE_RDBAL(reg_idx),
+			(desc_dma_addr & DMA_BIT_MASK(32)));
+	SXE_REG_WRITE(hw, SXE_RDBAH(reg_idx), (desc_dma_addr >> 32));
+	SXE_REG_WRITE(hw, SXE_RDLEN(reg_idx), desc_mem_len);
+
+	SXE_WRITE_FLUSH(hw);
+
+	sxe_hw_rx_ring_head_init(hw, reg_idx);
+	sxe_hw_rx_ring_tail_init(hw, reg_idx);
+
+	return;
+}
+
+void sxe_hw_rx_rcv_ctl_configure(struct sxe_hw *hw, u8 reg_idx,
+				   u32 header_buf_len, u32 pkg_buf_len
+				   )
+{
+	u32 srrctl;
+
+	srrctl = ((header_buf_len << SXE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+			SXE_SRRCTL_BSIZEHDR_MASK);
+	srrctl |= ((pkg_buf_len >> SXE_SRRCTL_BSIZEPKT_SHIFT) &
+			SXE_SRRCTL_BSIZEPKT_MASK);
+
+	SXE_REG_WRITE(hw, SXE_SRRCTL(reg_idx), srrctl);
+
+	return;
+}
+
+void sxe_hw_rx_lro_ctl_configure(struct sxe_hw *hw,
+						u8 reg_idx, u32 max_desc)
+{
+	u32 lroctrl;
+	lroctrl = SXE_REG_READ(hw, SXE_LROCTL(reg_idx));
+	lroctrl |= SXE_LROCTL_LROEN;
+	lroctrl |= max_desc;
+	SXE_REG_WRITE(hw, SXE_LROCTL(reg_idx), lroctrl);
+
+	return;
+}
+
+static u32 sxe_hw_rx_desc_ctrl_get(struct sxe_hw *hw, u8 reg_idx)
+{
+	return SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+}
+
+static void sxe_hw_dcb_arbiter_set(struct sxe_hw *hw, bool is_enable)
+{
+	u32 rttdcs;
+
+	rttdcs = SXE_REG_READ(hw, SXE_RTTDCS);
+
+	if (true == is_enable) {
+		rttdcs &= ~SXE_RTTDCS_ARBDIS;
+		rttdcs &= ~SXE_RTTDCS_BPBFSM;
+
+		SXE_REG_WRITE(hw, SXE_RTTDCS, rttdcs);
+	} else {
+		rttdcs |= SXE_RTTDCS_ARBDIS;
+		SXE_REG_WRITE(hw, SXE_RTTDCS, rttdcs);
+	}
+
+	return;
+}
+
+
+static void sxe_hw_tx_multi_ring_configure(struct sxe_hw *hw, u8 tcs,
+				u16 pool_mask, bool sriov_enable, u16 max_txq)
+{
+	u32 mtqc;
+
+	sxe_hw_dcb_arbiter_set(hw, false);
+
+	if (true == sriov_enable) {
+		mtqc = SXE_MTQC_VT_ENA;
+		if (tcs > SXE_DCB_4_TC)
+			mtqc |= SXE_MTQC_RT_ENA | SXE_MTQC_8TC_8TQ;
+		else if (tcs > SXE_DCB_1_TC)
+			mtqc |= SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ;
+		else if (pool_mask == SXE_4Q_PER_POOL_MASK)
+			mtqc |= SXE_MTQC_32VF;
+		else
+			mtqc |= SXE_MTQC_64VF;
+	} else {
+		if (tcs > SXE_DCB_4_TC) {
+			mtqc = SXE_MTQC_RT_ENA | SXE_MTQC_8TC_8TQ;
+		} else if (tcs > SXE_DCB_1_TC) {
+			mtqc = SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ;
+		} else {
+			if (max_txq > 63) {
+				mtqc = SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ;
+			} else {
+				mtqc = SXE_MTQC_64Q_1PB;
+			}
+		}
+	}
+
+	SXE_REG_WRITE(hw, SXE_MTQC, mtqc);
+
+	sxe_hw_dcb_arbiter_set(hw, true);
+
+	return;
+}
+
+void sxe_hw_tx_ring_head_init(struct sxe_hw *hw, u8 reg_idx)
+{
+	SXE_REG_WRITE(hw, SXE_TDH(reg_idx), 0);
+
+	return;
+}
+
+void sxe_hw_tx_ring_tail_init(struct sxe_hw *hw, u8 reg_idx)
+{
+	SXE_REG_WRITE(hw, SXE_TDT(reg_idx), 0);
+
+	return;
+}
+
+void sxe_hw_tx_ring_desc_configure(struct sxe_hw *hw,
+					u32 desc_mem_len,
+					u64 desc_dma_addr, u8 reg_idx)
+{
+	SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), 0);
+
+	SXE_WRITE_FLUSH(hw);
+
+	SXE_REG_WRITE(hw, SXE_TDBAL(reg_idx), (desc_dma_addr & \
+						DMA_BIT_MASK(32)));
+	SXE_REG_WRITE(hw, SXE_TDBAH(reg_idx), (desc_dma_addr >> 32));
+	SXE_REG_WRITE(hw, SXE_TDLEN(reg_idx), desc_mem_len);
+	sxe_hw_tx_ring_head_init(hw, reg_idx);
+	sxe_hw_tx_ring_tail_init(hw, reg_idx);
+
+	return;
+}
+
+void sxe_hw_tx_desc_thresh_set(
+				struct sxe_hw *hw,
+				u8 reg_idx,
+				u32 wb_thresh,
+				u32 host_thresh,
+				u32 prefech_thresh)
+{
+	u32 txdctl = 0;
+
+	txdctl |= (wb_thresh << SXE_TXDCTL_WTHRESH_SHIFT);
+	txdctl |= (host_thresh << SXE_TXDCTL_HTHRESH_SHIFT) | prefech_thresh;
+
+	SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+
+	return;
+}
+
+void sxe_hw_all_ring_disable(struct sxe_hw *hw, u32 ring_max)
+{
+	u32 i, value;
+
+	for (i = 0; i < ring_max; i++) {
+		value = SXE_REG_READ(hw, SXE_TXDCTL(i));
+		value &= ~SXE_TXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_TXDCTL(i), value);
+
+		value = SXE_REG_READ(hw, SXE_RXDCTL(i));
+		value &= ~SXE_RXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_RXDCTL(i), value);
+	}
+
+	SXE_WRITE_FLUSH(hw);
+	usleep_range(1000, 2000);
+
+	return;
+}
+
+void sxe_hw_tx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on)
+{
+	u32 wait_loop = SXE_RING_WAIT_LOOP;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	u32 txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+	if (is_on) {
+		txdctl |= SXE_TXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+
+		do {
+			usleep_range(1000, 2000);
+			txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+		} while (--wait_loop && !(txdctl & SXE_TXDCTL_ENABLE));
+	} else {
+		txdctl &= ~SXE_TXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+
+		do {
+			usleep_range(1000, 2000);
+			txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+		} while (--wait_loop && (txdctl & SXE_TXDCTL_ENABLE));
+	}
+
+	if (!wait_loop) {
+		LOG_DEV_ERR("tx ring %u switch %u failed within "
+			  "the polling period\n", reg_idx, is_on);
+	}
+
+	return;
+}
+
+void sxe_hw_tx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on)
+{
+	u32 txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+	if (is_on) {
+		txdctl |= SXE_TXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+	} else {
+		txdctl &= ~SXE_TXDCTL_ENABLE;
+		SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+	}
+
+	return;
+}
+
+void sxe_hw_tx_pkt_buf_thresh_configure(struct sxe_hw *hw,
+					u8 num_pb, bool dcb_enable)
+{
+	u32 i, tx_pkt_size, tx_pb_thresh;
+
+	if (!num_pb){
+		num_pb = 1;
+	}
+
+	tx_pkt_size = SXE_TX_PBSIZE_MAX / num_pb;
+	if (true == dcb_enable) {
+		tx_pb_thresh = (tx_pkt_size / 1024) - SXE_TX_PKT_SIZE_MAX;
+	} else {
+		tx_pb_thresh = (tx_pkt_size / 1024) - SXE_NODCB_TX_PKT_SIZE_MAX;
+	}
+
+	for (i = 0; i < num_pb; i++) {
+		SXE_REG_WRITE(hw, SXE_TXPBTHRESH(i), tx_pb_thresh);
+	}
+
+	for (; i < SXE_PKG_BUF_NUM_MAX; i++) {
+		SXE_REG_WRITE(hw, SXE_TXPBTHRESH(i), 0);
+	}
+
+	return;
+}
+
+void sxe_hw_tx_enable(struct sxe_hw *hw)
+{
+	u32 ctl;
+
+	ctl = SXE_REG_READ(hw, SXE_DMATXCTL);
+	ctl |= SXE_DMATXCTL_TE;
+	SXE_REG_WRITE(hw, SXE_DMATXCTL, ctl);
+
+	return;
+}
+
+static u32 sxe_hw_tx_desc_ctrl_get(struct sxe_hw *hw, u8 reg_idx)
+{
+	return SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+}
+
+static void sxe_hw_tx_desc_wb_thresh_clear(struct sxe_hw *hw, u8 reg_idx)
+{
+	u32 reg_data;
+
+	reg_data = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+	reg_data &= ~SXE_TXDCTL_ENABLE;
+	SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg_data);
+	SXE_WRITE_FLUSH(hw);
+	reg_data &= ~(0x7f<<16);
+	reg_data |= SXE_TXDCTL_ENABLE;
+	SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg_data);
+
+	return;
+}
+
+void sxe_hw_vlan_tag_strip_switch(struct sxe_hw *hw,
+					u16 reg_index, bool is_enable)
+{
+	u32 rxdctl;
+
+	rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_index));
+
+	if (is_enable) {
+		rxdctl |= SXE_RXDCTL_VME;
+	} else {
+		rxdctl &= ~SXE_RXDCTL_VME;
+	}
+
+	SXE_REG_WRITE(hw, SXE_RXDCTL(reg_index), rxdctl);
+
+	return;
+}
+
+static void sxe_hw_tx_vlan_tag_set(struct sxe_hw *hw,
+				   u16 vid, u16 qos, u32 vf)
+{
+	u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | SXE_VMVIR_VLANA_DEFAULT;
+
+	SXE_REG_WRITE(hw, SXE_VMVIR(vf), vmvir);
+	return;
+}
+
+void sxe_hw_tx_vlan_tag_clear(struct sxe_hw *hw, u32 vf)
+{
+	SXE_REG_WRITE(hw, SXE_VMVIR(vf), 0);
+	return;
+}
+
+u32 sxe_hw_tx_vlan_insert_get(struct sxe_hw *hw, u32 vf)
+{
+	return SXE_REG_READ(hw, SXE_VMVIR(vf));
+}
+
+void sxe_hw_tx_ring_info_get(struct sxe_hw *hw,
+				u8 idx, u32 *head, u32 *tail)
+{
+	*head = SXE_REG_READ(hw, SXE_TDH(idx));
+	*tail = SXE_REG_READ(hw, SXE_TDT(idx));
+
+	return;
+}
+
+void sxe_hw_dcb_rx_bw_alloc_configure(struct sxe_hw *hw,
+				      u16 *refill,
+				      u16 *max,
+				      u8 *bwg_id,
+				      u8 *prio_type,
+				      u8 *prio_tc,
+				      u8 max_priority)
+{
+	u32    reg;
+	u32    credit_refill;
+	u32    credit_max;
+	u8     i;
+
+	reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC | SXE_RTRPCS_ARBDIS;
+	SXE_REG_WRITE(hw, SXE_RTRPCS, reg);
+
+	reg = 0;
+	for (i = 0; i < max_priority; i++) {
+		reg |= (prio_tc[i] << (i * SXE_RTRUP2TC_UP_SHIFT));
+	}
+
+	SXE_REG_WRITE(hw, SXE_RTRUP2TC, reg);
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		credit_refill = refill[i];
+		credit_max    = max[i];
+		reg = credit_refill | (credit_max << SXE_RTRPT4C_MCL_SHIFT);
+
+		reg |= (u32)(bwg_id[i]) << SXE_RTRPT4C_BWG_SHIFT;
+
+		if (prio_type[i] == PRIO_LINK) {
+			reg |= SXE_RTRPT4C_LSP;
+		}
+
+		SXE_REG_WRITE(hw, SXE_RTRPT4C(i), reg);
+	}
+
+	reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC;
+	SXE_REG_WRITE(hw, SXE_RTRPCS, reg);
+
+	return;
+}
+
+void sxe_hw_dcb_tx_desc_bw_alloc_configure(struct sxe_hw *hw,
+					   u16 *refill,
+					   u16 *max,
+					   u8 *bwg_id,
+					   u8 *prio_type)
+{
+	u32    reg, max_credits;
+	u8     i;
+
+	for (i = 0; i < 128; i++) {
+		SXE_REG_WRITE(hw, SXE_RTTDQSEL, i);
+		SXE_REG_WRITE(hw, SXE_RTTDT1C, 0);
+	}
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		max_credits = max[i];
+		reg = max_credits << SXE_RTTDT2C_MCL_SHIFT;
+		reg |= refill[i];
+		reg |= (u32)(bwg_id[i]) << SXE_RTTDT2C_BWG_SHIFT;
+
+		if (prio_type[i] == PRIO_GROUP) {
+			reg |= SXE_RTTDT2C_GSP;
+		}
+
+		if (prio_type[i] == PRIO_LINK) {
+			reg |= SXE_RTTDT2C_LSP;
+		}
+
+		SXE_REG_WRITE(hw, SXE_RTTDT2C(i), reg);
+	}
+
+	reg = SXE_RTTDCS_TDPAC | SXE_RTTDCS_TDRM;
+	SXE_REG_WRITE(hw, SXE_RTTDCS, reg);
+
+	return;
+}
+
+void sxe_hw_dcb_tx_data_bw_alloc_configure(struct sxe_hw *hw,
+					   u16 *refill,
+					   u16 *max,
+					   u8 *bwg_id,
+					   u8 *prio_type,
+					   u8 *prio_tc,
+					   u8 max_priority)
+{
+	u32 reg;
+	u8 i;
+
+	reg = SXE_RTTPCS_TPPAC | SXE_RTTPCS_TPRM |
+	      (SXE_RTTPCS_ARBD_DCB << SXE_RTTPCS_ARBD_SHIFT) |
+	      SXE_RTTPCS_ARBDIS;
+	SXE_REG_WRITE(hw, SXE_RTTPCS, reg);
+
+	reg = 0;
+	for (i = 0; i < max_priority; i++) {
+		reg |= (prio_tc[i] << (i * SXE_RTTUP2TC_UP_SHIFT));
+	}
+
+	SXE_REG_WRITE(hw, SXE_RTTUP2TC, reg);
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		reg = refill[i];
+		reg |= (u32)(max[i]) << SXE_RTTPT2C_MCL_SHIFT;
+		reg |= (u32)(bwg_id[i]) << SXE_RTTPT2C_BWG_SHIFT;
+
+		if (prio_type[i] == PRIO_GROUP) {
+			reg |= SXE_RTTPT2C_GSP;
+		}
+
+		if (prio_type[i] == PRIO_LINK) {
+			reg |= SXE_RTTPT2C_LSP;
+		}
+
+		SXE_REG_WRITE(hw, SXE_RTTPT2C(i), reg);
+	}
+
+	reg = SXE_RTTPCS_TPPAC | SXE_RTTPCS_TPRM |
+	      (SXE_RTTPCS_ARBD_DCB << SXE_RTTPCS_ARBD_SHIFT);
+	SXE_REG_WRITE(hw, SXE_RTTPCS, reg);
+
+	return;
+}
+
+void sxe_hw_dcb_pfc_configure(struct sxe_hw *hw,
+						u8 pfc_en, u8 *prio_tc,
+						u8 max_priority)
+{
+	u32 i, j, fcrtl, reg;
+	u8 max_tc = 0;
+	u32 reg_val;
+
+	reg_val = SXE_REG_READ(hw, SXE_FLCTRL);
+
+	reg_val &= ~SXE_FCTRL_TFCE_MASK;
+	reg_val |= SXE_FCTRL_TFCE_PFC_EN;
+
+	reg_val |= SXE_FCTRL_TFCE_DPF_EN;
+
+	reg_val &= ~(SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
+	reg_val |= (pfc_en << 16) & SXE_FCTRL_TFCE_FCEN_MASK;
+	reg_val |= (pfc_en << 24) & SXE_FCTRL_TFCE_XONE_MASK;
+
+	reg_val &= ~SXE_FCTRL_RFCE_MASK;
+	reg_val |= SXE_FCTRL_RFCE_PFC_EN;
+	SXE_REG_WRITE(hw, SXE_FLCTRL, reg_val);
+
+	reg_val = SXE_REG_READ(hw, SXE_PFCTOP);
+	reg_val &= ~SXE_PFCTOP_FCOP_MASK;
+	reg_val |= SXE_PFCTOP_FCT;
+	reg_val |= SXE_PFCTOP_FCOP_PFC;
+	SXE_REG_WRITE(hw, SXE_PFCTOP, reg_val);
+
+	for (i = 0; i < max_priority; i++) {
+		if (prio_tc[i] > max_tc) {
+			max_tc = prio_tc[i];
+		}
+	}
+
+	for (i = 0; i <= max_tc; i++) {
+		int enabled = 0;
+
+		for (j = 0; j < max_priority; j++) {
+			if ((prio_tc[j] == i) && (pfc_en & BIT(j))) {
+				enabled = 1;
+				break;
+			}
+		}
+
+		if (enabled) {
+			reg = (hw->fc.high_water[i] << 9) | SXE_FCRTH_FCEN;
+			fcrtl = (hw->fc.low_water[i] << 9) | SXE_FCRTL_XONE;
+			SXE_REG_WRITE(hw, SXE_FCRTL(i), fcrtl);
+		} else {
+
+			reg = (SXE_REG_READ(hw, SXE_RXPBSIZE(i)) - 24576) >> 1;
+			SXE_REG_WRITE(hw, SXE_FCRTL(i), 0);
+		}
+
+		SXE_REG_WRITE(hw, SXE_FCRTH(i), reg);
+	}
+
+	for (; i < MAX_TRAFFIC_CLASS; i++) {
+		SXE_REG_WRITE(hw, SXE_FCRTL(i), 0);
+		SXE_REG_WRITE(hw, SXE_FCRTH(i), 0);
+	}
+
+	reg = hw->fc.pause_time * 0x00010001;
+	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) {
+		SXE_REG_WRITE(hw, SXE_FCTTV(i), reg);
+	}
+
+	SXE_REG_WRITE(hw, SXE_FCRTV, hw->fc.pause_time / 2);
+
+	return;
+}
+
+static void sxe_hw_dcb_8tc_vmdq_off_stats_configure(struct sxe_hw *hw)
+{
+	u32 reg;
+	u8  i;
+
+	for (i = 0; i < 32; i++) {
+		reg = 0x01010101 * (i / 4);
+		SXE_REG_WRITE(hw, SXE_RQSMR(i), reg);
+	}
+
+	for (i = 0; i < 32; i++) {
+		if (i < 8) {
+			reg = 0x00000000;
+		} else if (i < 16) {
+			reg = 0x01010101;
+		} else if (i < 20) {
+			reg = 0x02020202;
+		} else if (i < 24) {
+			reg = 0x03030303;
+		} else if (i < 26) {
+			reg = 0x04040404;
+		} else if (i < 28) {
+			reg = 0x05050505;
+		} else if (i < 30) {
+			reg = 0x06060606;
+		} else {
+			reg = 0x07070707;
+		}
+
+		SXE_REG_WRITE(hw, SXE_TQSM(i), reg);
+	}
+
+	return;
+}
+
+static void sxe_hw_dcb_rx_up_tc_map_set(struct sxe_hw *hw, u8 tc)
+{
+	u8 i;
+	u32 reg, rsave;
+
+	reg = SXE_REG_READ(hw, SXE_RTRUP2TC);
+	rsave = reg;
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		u8 up2tc = reg >> (i * SXE_RTRUP2TC_UP_SHIFT);
+
+		if (up2tc > tc) {
+			reg &= ~(0x7 << SXE_RTRUP2TC_UP_MASK);
+		}
+	}
+
+	if (reg != rsave) {
+		SXE_REG_WRITE(hw, SXE_RTRUP2TC, reg);
+	}
+
+	return;
+}
+
+void sxe_hw_vt_pool_loopback_switch(struct sxe_hw *hw,
+							bool is_enable)
+{
+	if (true == is_enable) {
+		SXE_REG_WRITE(hw, SXE_PFDTXGSWC, SXE_PFDTXGSWC_VT_LBEN);
+	} else {
+		SXE_REG_WRITE(hw, SXE_PFDTXGSWC, 0);
+	}
+
+	return;
+}
+
+void sxe_hw_pool_rx_ring_drop_enable(struct sxe_hw *hw, u8 vf_idx,
+					u16 pf_vlan, u8 ring_per_pool)
+{
+	u32 qde = SXE_QDE_ENABLE;
+	u8 i;
+
+	if (pf_vlan) {
+		qde |= SXE_QDE_HIDE_VLAN;
+	}
+
+	for (i = (vf_idx * ring_per_pool); i < ((vf_idx + 1) * ring_per_pool); i++)
+	{
+		u32 value;
+
+		SXE_WRITE_FLUSH(hw);
+
+		value = i << SXE_QDE_IDX_SHIFT;
+		value |= qde | SXE_QDE_WRITE;
+
+		SXE_REG_WRITE(hw, SXE_QDE, value);
+	}
+
+	return;
+}
+
+u32 sxe_hw_rx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx)
+{
+	return SXE_REG_READ(hw, SXE_VFRE(reg_idx));
+}
+
+void sxe_hw_rx_pool_bitmap_set(struct sxe_hw *hw,
+						u8 reg_idx, u32 bitmap)
+{
+	SXE_REG_WRITE(hw, SXE_VFRE(reg_idx), bitmap);
+
+	return;
+}
+
+u32 sxe_hw_tx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx)
+{
+	return SXE_REG_READ(hw, SXE_VFTE(reg_idx));
+}
+
+void sxe_hw_tx_pool_bitmap_set(struct sxe_hw *hw,
+						u8 reg_idx, u32 bitmap)
+{
+	SXE_REG_WRITE(hw, SXE_VFTE(reg_idx), bitmap);
+
+	return;
+}
+
+void sxe_hw_dcb_max_mem_window_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_RTTBCNRM, value);
+
+	return;
+}
+
+void sxe_hw_dcb_tx_ring_rate_factor_set(struct sxe_hw *hw,
+							u32 ring_idx, u32 rate)
+{
+	SXE_REG_WRITE(hw, SXE_RTTDQSEL, ring_idx);
+	SXE_REG_WRITE(hw, SXE_RTTBCNRC, rate);
+
+	return;
+}
+
+void sxe_hw_spoof_count_enable(struct sxe_hw *hw,
+						u8 reg_idx, u8 bit_index)
+{
+	u32 value = SXE_REG_READ(hw, SXE_VMECM(reg_idx));
+
+	value |= BIT(bit_index);
+
+	SXE_REG_WRITE(hw, SXE_VMECM(reg_idx), value);
+
+	return;
+}
+
+void sxe_hw_pool_mac_anti_spoof_set(struct sxe_hw *hw,
+							u8 vf_idx, bool status)
+{
+	u8 reg_index = vf_idx >> 3;
+	u8 bit_index = vf_idx % 8;
+	u32 value;
+
+	value = SXE_REG_READ(hw, SXE_SPOOF(reg_index));
+
+	if (status) {
+		value |= BIT(bit_index);
+	} else {
+		value &= ~BIT(bit_index);
+	}
+
+	SXE_REG_WRITE(hw, SXE_SPOOF(reg_index), value);
+
+	return;
+}
+
+static void sxe_hw_dcb_rx_up_tc_map_get(struct sxe_hw *hw, u8 *map)
+{
+	u32 reg, i;
+
+	reg = SXE_REG_READ(hw, SXE_RTRUP2TC);
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		map[i] = SXE_RTRUP2TC_UP_MASK &
+			(reg >> (i * SXE_RTRUP2TC_UP_SHIFT));
+	}
+
+	return;
+}
+
+void sxe_hw_rx_drop_switch(struct sxe_hw *hw, u8 idx, bool is_enable)
+{
+	u32 srrctl = SXE_REG_READ(hw, SXE_SRRCTL(idx));
+
+	if (true == is_enable) {
+		srrctl |= SXE_SRRCTL_DROP_EN;
+	} else {
+		srrctl &= ~SXE_SRRCTL_DROP_EN;
+	}
+
+	SXE_REG_WRITE(hw, SXE_SRRCTL(idx), srrctl);
+
+	return;
+}
+
+static void sxe_hw_pool_vlan_anti_spoof_set(struct sxe_hw *hw,
+							u8 vf_idx, bool status)
+{
+	u8 reg_index = vf_idx >> 3;
+	u8 bit_index = (vf_idx % 8) + SXE_SPOOF_VLAN_SHIFT;
+	u32 value;
+
+	value = SXE_REG_READ(hw, SXE_SPOOF(reg_index));
+
+	if (status) {
+		value |= BIT(bit_index);
+	} else {
+		value &= ~BIT(bit_index);
+	}
+
+	SXE_REG_WRITE(hw, SXE_SPOOF(reg_index), value);
+
+	return;
+}
+
+static void sxe_hw_vf_tx_desc_addr_clear(struct sxe_hw *hw,
+						u8 vf_idx, u8 ring_per_pool)
+{
+	u8 i;
+
+	for (i = 0; i < ring_per_pool; i++) {
+		SXE_REG_WRITE(hw, SXE_PVFTDWBAL_N(ring_per_pool, vf_idx, i), 0);
+		SXE_REG_WRITE(hw, SXE_PVFTDWBAH_N(ring_per_pool, vf_idx, i), 0);
+	}
+
+	return;
+}
+
+static void sxe_hw_vf_tx_ring_disable(struct sxe_hw *hw,
+						u8 ring_per_pool, u8 vf_idx)
+{
+	u32 ring_idx;
+	u32 reg;
+
+	for (ring_idx = 0; ring_idx < ring_per_pool; ring_idx++) {
+		u32 reg_idx = vf_idx * ring_per_pool + ring_idx;
+		reg = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+		if (reg) {
+			reg |= SXE_TXDCTL_ENABLE;
+			SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg);
+			reg &= ~SXE_TXDCTL_ENABLE;
+			SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg);
+		}
+	}
+
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_dcb_rate_limiter_clear(struct sxe_hw *hw, u8 ring_max)
+{
+	u32 i;
+
+	for (i = 0; i < ring_max; i++) {
+		SXE_REG_WRITE(hw, SXE_RTTDQSEL, i);
+		SXE_REG_WRITE(hw, SXE_RTTBCNRC, 0);
+	}
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+static void sxe_hw_tx_tph_update(struct sxe_hw *hw, u8 ring_idx, u8 cpu)
+{
+	u32 value = cpu;
+
+	value <<= SXE_TPH_TXCTRL_CPUID_SHIFT;
+
+	value |= SXE_TPH_TXCTRL_DESC_RRO_EN | \
+		 SXE_TPH_TXCTRL_DATA_RRO_EN | \
+		 SXE_TPH_TXCTRL_DESC_TPH_EN;
+
+	SXE_REG_WRITE(hw, SXE_TPH_TXCTRL(ring_idx), value);
+	return;
+}
+
+static void sxe_hw_rx_tph_update(struct sxe_hw *hw, u8 ring_idx, u8 cpu)
+{
+	u32 value = cpu;
+
+	value <<= SXE_TPH_RXCTRL_CPUID_SHIFT;
+
+	value |= SXE_TPH_RXCTRL_DESC_RRO_EN | \
+		 SXE_TPH_RXCTRL_DATA_TPH_EN | \
+		 SXE_TPH_RXCTRL_DESC_TPH_EN;
+
+	SXE_REG_WRITE(hw, SXE_TPH_RXCTRL(ring_idx), value);
+	return;
+}
+
+static void sxe_hw_tph_switch(struct sxe_hw *hw, bool is_enable)
+{
+	if (is_enable == true) {
+		SXE_REG_WRITE(hw, SXE_TPH_CTRL, SXE_TPH_CTRL_MODE_CB2);
+	} else {
+		SXE_REG_WRITE(hw, SXE_TPH_CTRL, SXE_TPH_CTRL_DISABLE);
+	}
+
+	return;
+}
+
+static const struct sxe_dma_operations sxe_dma_ops = {
+	.rx_dma_ctrl_init		= sxe_hw_rx_dma_ctrl_init,
+	.rx_ring_switch			= sxe_hw_rx_ring_switch,
+	.rx_ring_switch_not_polling	= sxe_hw_rx_ring_switch_not_polling,
+	.rx_ring_desc_configure		= sxe_hw_rx_ring_desc_configure,
+	.rx_desc_thresh_set		= sxe_hw_rx_desc_thresh_set,
+	.rx_rcv_ctl_configure		= sxe_hw_rx_rcv_ctl_configure,
+	.rx_lro_ctl_configure		= sxe_hw_rx_lro_ctl_configure,
+	.rx_desc_ctrl_get		= sxe_hw_rx_desc_ctrl_get,
+	.rx_dma_lro_ctl_set		= sxe_hw_rx_dma_lro_ctrl_set,
+	.rx_drop_switch			= sxe_hw_rx_drop_switch,
+	.pool_rx_ring_drop_enable	= sxe_hw_pool_rx_ring_drop_enable,
+	.rx_tph_update			= sxe_hw_rx_tph_update,
+
+	.tx_enable			= sxe_hw_tx_enable,
+	.tx_multi_ring_configure	= sxe_hw_tx_multi_ring_configure,
+	.tx_ring_desc_configure		= sxe_hw_tx_ring_desc_configure,
+	.tx_desc_thresh_set		= sxe_hw_tx_desc_thresh_set,
+	.tx_desc_wb_thresh_clear	= sxe_hw_tx_desc_wb_thresh_clear,
+	.tx_ring_switch			= sxe_hw_tx_ring_switch,
+	.tx_ring_switch_not_polling     = sxe_hw_tx_ring_switch_not_polling,
+	.tx_pkt_buf_thresh_configure	= sxe_hw_tx_pkt_buf_thresh_configure,
+	.tx_desc_ctrl_get		= sxe_hw_tx_desc_ctrl_get,
+	.tx_ring_info_get		= sxe_hw_tx_ring_info_get,
+	.tx_tph_update			= sxe_hw_tx_tph_update,
+
+	.tph_switch			= sxe_hw_tph_switch,
+
+	.vlan_tag_strip_switch		= sxe_hw_vlan_tag_strip_switch,
+	.tx_vlan_tag_set		= sxe_hw_tx_vlan_tag_set,
+	.tx_vlan_tag_clear		= sxe_hw_tx_vlan_tag_clear,
+
+	.dcb_rx_bw_alloc_configure	= sxe_hw_dcb_rx_bw_alloc_configure,
+	.dcb_tx_desc_bw_alloc_configure	= sxe_hw_dcb_tx_desc_bw_alloc_configure,
+	.dcb_tx_data_bw_alloc_configure	= sxe_hw_dcb_tx_data_bw_alloc_configure,
+	.dcb_pfc_configure		= sxe_hw_dcb_pfc_configure,
+	.dcb_tc_stats_configure		= sxe_hw_dcb_8tc_vmdq_off_stats_configure,
+	.dcb_rx_up_tc_map_set		= sxe_hw_dcb_rx_up_tc_map_set,
+	.dcb_rx_up_tc_map_get		= sxe_hw_dcb_rx_up_tc_map_get,
+	.dcb_rate_limiter_clear		= sxe_hw_dcb_rate_limiter_clear,
+	.dcb_tx_ring_rate_factor_set	= sxe_hw_dcb_tx_ring_rate_factor_set,
+
+	.vt_pool_loopback_switch	= sxe_hw_vt_pool_loopback_switch,
+	.rx_pool_get			= sxe_hw_rx_pool_bitmap_get,
+	.rx_pool_set			= sxe_hw_rx_pool_bitmap_set,
+	.tx_pool_get			= sxe_hw_tx_pool_bitmap_get,
+	.tx_pool_set			= sxe_hw_tx_pool_bitmap_set,
+
+	.vf_tx_desc_addr_clear		= sxe_hw_vf_tx_desc_addr_clear,
+	.pool_mac_anti_spoof_set	= sxe_hw_pool_mac_anti_spoof_set,
+	.pool_vlan_anti_spoof_set	= sxe_hw_pool_vlan_anti_spoof_set,
+
+	.max_dcb_memory_window_set	= sxe_hw_dcb_max_mem_window_set,
+	.spoof_count_enable		= sxe_hw_spoof_count_enable,
+
+	.vf_tx_ring_disable	        = sxe_hw_vf_tx_ring_disable,
+	.all_ring_disable               = sxe_hw_all_ring_disable,
+	.tx_ring_tail_init 	        = sxe_hw_tx_ring_tail_init,
+};
+
+
+#ifdef SXE_IPSEC_CONFIGURE
+
+static void sxe_hw_ipsec_rx_sa_load(struct sxe_hw *hw, u16 idx,
+					u8 type)
+{
+	u32 reg = SXE_REG_READ(hw, SXE_IPSRXIDX);
+
+	reg &= SXE_RXTXIDX_IPS_EN;
+	reg |= type << SXE_RXIDX_TBL_SHIFT |
+	       idx << SXE_RXTXIDX_IDX_SHIFT |
+	       SXE_RXTXIDX_WRITE;
+	SXE_REG_WRITE(hw, SXE_IPSRXIDX, reg);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+static void sxe_hw_ipsec_rx_ip_store(struct sxe_hw *hw,
+					     __be32 *ip_addr, u8 ip_len, u8 ip_idx)
+{
+	u8 i;
+
+	for (i = 0; i < ip_len; i++) {
+		SXE_REG_WRITE(hw, SXE_IPSRXIPADDR(i),
+				(__force u32)cpu_to_le32((__force u32)ip_addr[i]));
+	}
+	SXE_WRITE_FLUSH(hw);
+	sxe_hw_ipsec_rx_sa_load(hw, ip_idx, SXE_IPSEC_IP_TABLE);
+
+	return;
+}
+
+static void sxe_hw_ipsec_rx_spi_store(struct sxe_hw *hw,
+					     __be32 spi, u8 ip_idx, u16 sa_idx)
+{
+	SXE_REG_WRITE(hw, SXE_IPSRXSPI, (__force u32)cpu_to_le32((__force u32)spi));
+
+	SXE_REG_WRITE(hw, SXE_IPSRXIPIDX, ip_idx);
+
+	SXE_WRITE_FLUSH(hw);
+
+	sxe_hw_ipsec_rx_sa_load(hw, sa_idx, SXE_IPSEC_SPI_TABLE);
+
+	return;
+}
+
+static void sxe_hw_ipsec_rx_key_store(struct sxe_hw *hw,
+			u32 *key,  u8 key_len, u32 salt, u32 mode, u16 sa_idx)
+{
+	u8 i;
+
+	for (i = 0; i < key_len; i++) {
+		SXE_REG_WRITE(hw, SXE_IPSRXKEY(i),
+				(__force u32)cpu_to_be32(key[(key_len - 1) - i]));
+	}
+
+	SXE_REG_WRITE(hw, SXE_IPSRXSALT, (__force u32)cpu_to_be32(salt));
+	SXE_REG_WRITE(hw, SXE_IPSRXMOD, mode);
+	SXE_WRITE_FLUSH(hw);
+
+	sxe_hw_ipsec_rx_sa_load(hw, sa_idx, SXE_IPSEC_KEY_TABLE);
+
+	return;
+}
+
+static void sxe_hw_ipsec_tx_sa_load(struct sxe_hw *hw, u16 idx)
+{
+	u32 reg = SXE_REG_READ(hw, SXE_IPSTXIDX);
+
+	reg &= SXE_RXTXIDX_IPS_EN;
+	reg |= idx << SXE_RXTXIDX_IDX_SHIFT | SXE_RXTXIDX_WRITE;
+	SXE_REG_WRITE(hw, SXE_IPSTXIDX, reg);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+static void sxe_hw_ipsec_tx_key_store(struct sxe_hw *hw, u32 *key,
+						u8 key_len, u32 salt, u16 sa_idx)
+{
+	u8 i;
+
+	for (i = 0; i < key_len; i++) {
+		SXE_REG_WRITE(hw, SXE_IPSTXKEY(i),
+			(__force u32)cpu_to_be32(key[(key_len - 1) - i]));
+	}
+	SXE_REG_WRITE(hw, SXE_IPSTXSALT, (__force u32)cpu_to_be32(salt));
+	SXE_WRITE_FLUSH(hw);
+
+	sxe_hw_ipsec_tx_sa_load(hw, sa_idx);
+
+	return;
+}
+
+static void sxe_hw_ipsec_sec_data_stop(struct sxe_hw *hw, bool is_linkup)
+{
+	u32 tx_empty, rx_empty;
+	u32 limit;
+	u32 reg;
+
+	reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+	reg |= SXE_SECTXCTRL_TX_DIS;
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+	reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+	reg |= SXE_SECRXCTRL_RX_DIS;
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+
+	tx_empty = SXE_REG_READ(hw, SXE_SECTXSTAT) & SXE_SECTXSTAT_SECTX_RDY;
+	rx_empty = SXE_REG_READ(hw, SXE_SECRXSTAT) & SXE_SECRXSTAT_SECRX_RDY;
+	if (tx_empty && rx_empty) {
+		goto l_out;
+	}
+
+	if (!is_linkup) {
+		SXE_REG_WRITE(hw, SXE_LPBKCTRL, SXE_LPBKCTRL_EN);
+
+		SXE_WRITE_FLUSH(hw);
+		mdelay(3);
+	}
+
+	limit = 20;
+	do {
+	   mdelay(10);
+	   tx_empty = SXE_REG_READ(hw, SXE_SECTXSTAT) &
+		   SXE_SECTXSTAT_SECTX_RDY;
+	   rx_empty = SXE_REG_READ(hw, SXE_SECRXSTAT) &
+		   SXE_SECRXSTAT_SECRX_RDY;
+	} while (!(tx_empty && rx_empty) && limit--);
+
+	if (!is_linkup) {
+		SXE_REG_WRITE(hw, SXE_LPBKCTRL, 0);
+
+		SXE_WRITE_FLUSH(hw);
+	}
+
+l_out:
+	return;
+}
+
+static void sxe_hw_ipsec_engine_start(struct sxe_hw *hw, bool is_linkup)
+{
+	u32 reg;
+
+	sxe_hw_ipsec_sec_data_stop(hw, is_linkup);
+
+	reg = SXE_REG_READ(hw, SXE_SECTXMINIFG);
+	reg = (reg & 0xfffffff0) | 0x3;
+	SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg);
+
+	reg = SXE_REG_READ(hw, SXE_SECTXBUFFAF);
+	reg = (reg & 0xfffffc00) | 0x15;
+	SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, reg);
+
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, 0);
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, SXE_SECTXCTRL_STORE_FORWARD);
+
+	SXE_REG_WRITE(hw, SXE_IPSTXIDX, SXE_RXTXIDX_IPS_EN);
+	SXE_REG_WRITE(hw, SXE_IPSRXIDX, SXE_RXTXIDX_IPS_EN);
+
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+static void sxe_hw_ipsec_engine_stop(struct sxe_hw *hw, bool is_linkup)
+{
+	u32 reg;
+
+	sxe_hw_ipsec_sec_data_stop(hw, is_linkup);
+
+	SXE_REG_WRITE(hw, SXE_IPSTXIDX, 0);
+	SXE_REG_WRITE(hw, SXE_IPSRXIDX, 0);
+
+	reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+	reg |= SXE_SECTXCTRL_SECTX_DIS;
+	reg &= ~SXE_SECTXCTRL_STORE_FORWARD;
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+	reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+	reg |= SXE_SECRXCTRL_SECRX_DIS;
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+
+	 SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, 0x250);
+
+	 reg = SXE_REG_READ(hw, SXE_SECTXMINIFG);
+	 reg = (reg & 0xfffffff0) | 0x1;
+	 SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg);
+
+	 SXE_REG_WRITE(hw, SXE_SECTXCTRL, SXE_SECTXCTRL_SECTX_DIS);
+	 SXE_REG_WRITE(hw, SXE_SECRXCTRL, SXE_SECRXCTRL_SECRX_DIS);
+
+	 SXE_WRITE_FLUSH(hw);
+
+	 return;
+}
+
+bool sxe_hw_ipsec_offload_is_disable(struct sxe_hw *hw)
+{
+	u32 tx_dis = SXE_REG_READ(hw, SXE_SECTXSTAT);
+	u32 rx_dis = SXE_REG_READ(hw, SXE_SECRXSTAT);
+	bool ret = false;
+
+	if ((tx_dis & SXE_SECTXSTAT_SECTX_OFF_DIS) ||
+	    (rx_dis & SXE_SECRXSTAT_SECRX_OFF_DIS)) {
+		ret = true;
+	}
+
+	return ret;
+}
+
+void sxe_hw_ipsec_sa_disable(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_IPSRXIDX, 0);
+	SXE_REG_WRITE(hw, SXE_IPSTXIDX, 0);
+
+	return;
+}
+
+static const struct sxe_sec_operations sxe_sec_ops = {
+	.ipsec_rx_ip_store		= sxe_hw_ipsec_rx_ip_store,
+	.ipsec_rx_spi_store		= sxe_hw_ipsec_rx_spi_store,
+	.ipsec_rx_key_store		= sxe_hw_ipsec_rx_key_store,
+	.ipsec_tx_key_store		= sxe_hw_ipsec_tx_key_store,
+	.ipsec_sec_data_stop		= sxe_hw_ipsec_sec_data_stop,
+	.ipsec_engine_start		= sxe_hw_ipsec_engine_start,
+	.ipsec_engine_stop		= sxe_hw_ipsec_engine_stop,
+	.ipsec_sa_disable		= sxe_hw_ipsec_sa_disable,
+	.ipsec_offload_is_disable	= sxe_hw_ipsec_offload_is_disable,
+};
+#endif
+
+static const struct sxe_sec_operations sxe_sec_ops = { 0 };
+
+
+void sxe_hw_stats_regs_clean(struct sxe_hw *hw)
+{
+	u16 i;
+	for (i = 0; i < 16; i++) {
+		SXE_REG_READ(hw, SXE_QPTC(i));
+		SXE_REG_READ(hw, SXE_QPRC(i));
+		SXE_REG_READ(hw, SXE_QBTC_H(i));
+		SXE_REG_READ(hw, SXE_QBTC_L(i));
+		SXE_REG_READ(hw, SXE_QBRC_H(i));
+		SXE_REG_READ(hw, SXE_QBRC_L(i));
+		SXE_REG_READ(hw, SXE_QPRDC(i));
+	}
+
+	SXE_REG_READ(hw, SXE_RXDGBCH);
+	SXE_REG_READ(hw, SXE_RXDGBCL);
+	SXE_REG_READ(hw, SXE_RXDGPC);
+	SXE_REG_READ(hw, SXE_TXDGPC);
+	SXE_REG_READ(hw, SXE_TXDGBCH);
+	SXE_REG_READ(hw, SXE_TXDGBCL);
+	SXE_REG_READ(hw,SXE_RXDDGPC);
+	SXE_REG_READ(hw, SXE_RXDDGBCH);
+	SXE_REG_READ(hw,SXE_RXDDGBCL);
+	SXE_REG_READ(hw,SXE_RXLPBKGPC);
+	SXE_REG_READ(hw, SXE_RXLPBKGBCH);
+	SXE_REG_READ(hw,SXE_RXLPBKGBCL);
+	SXE_REG_READ(hw,SXE_RXDLPBKGPC);
+	SXE_REG_READ(hw, SXE_RXDLPBKGBCH);
+	SXE_REG_READ(hw,SXE_RXDLPBKGBCL);
+	SXE_REG_READ(hw,SXE_RXTPCIN);
+	SXE_REG_READ(hw,SXE_RXTPCOUT);
+	SXE_REG_READ(hw,SXE_RXPRDDC);
+	SXE_REG_READ(hw, SXE_TXSWERR);
+	SXE_REG_READ(hw, SXE_TXSWITCH);
+	SXE_REG_READ(hw, SXE_TXREPEAT);
+	SXE_REG_READ(hw, SXE_TXDESCERR);
+
+	SXE_REG_READ(hw, SXE_CRCERRS);
+	SXE_REG_READ(hw, SXE_ERRBC);
+	SXE_REG_READ(hw, SXE_RLEC);
+	SXE_REG_READ(hw, SXE_PRC64);
+	SXE_REG_READ(hw, SXE_PRC127);
+	SXE_REG_READ(hw, SXE_PRC255);
+	SXE_REG_READ(hw, SXE_PRC511);
+	SXE_REG_READ(hw, SXE_PRC1023);
+	SXE_REG_READ(hw, SXE_PRC1522);
+	SXE_REG_READ(hw, SXE_GPRC);
+	SXE_REG_READ(hw, SXE_BPRC);
+	SXE_REG_READ(hw, SXE_MPRC);
+	SXE_REG_READ(hw, SXE_GPTC);
+	SXE_REG_READ(hw, SXE_GORCL);
+	SXE_REG_READ(hw, SXE_GORCH);
+	SXE_REG_READ(hw, SXE_GOTCL);
+	SXE_REG_READ(hw, SXE_GOTCH);
+	SXE_REG_READ(hw, SXE_RUC);
+	SXE_REG_READ(hw, SXE_RFC);
+	SXE_REG_READ(hw, SXE_ROC);
+	SXE_REG_READ(hw, SXE_RJC);
+	for (i = 0; i < 8; i++) {
+		SXE_REG_READ(hw, SXE_PRCPF(i));
+	}
+	SXE_REG_READ(hw, SXE_TORL);
+	SXE_REG_READ(hw, SXE_TORH);
+	SXE_REG_READ(hw, SXE_TPR);
+	SXE_REG_READ(hw, SXE_TPT);
+	SXE_REG_READ(hw, SXE_PTC64);
+	SXE_REG_READ(hw, SXE_PTC127);
+	SXE_REG_READ(hw, SXE_PTC255);
+	SXE_REG_READ(hw, SXE_PTC511);
+	SXE_REG_READ(hw, SXE_PTC1023);
+	SXE_REG_READ(hw, SXE_PTC1522);
+	SXE_REG_READ(hw, SXE_MPTC);
+	SXE_REG_READ(hw, SXE_BPTC);
+	for (i = 0; i < 8; i++) {
+		SXE_REG_READ(hw, SXE_PFCT(i));
+	}
+
+	return;
+}
+
+static void sxe_hw_stats_seq_get(struct sxe_hw *hw, struct sxe_mac_stats *stats)
+{
+	u8 i;
+	u64 tx_pfc_num = 0;
+#ifdef SXE_DPDK
+	u64 gotch = 0;
+	u32 rycle_cnt = 10;
+#endif
+
+	for (i = 0; i < 8; i++) {
+		stats->prcpf[i] += SXE_REG_READ(hw,SXE_PRCPF(i));
+		tx_pfc_num = SXE_REG_READ(hw,SXE_PFCT(i));
+		stats->pfct[i] += tx_pfc_num;
+		stats->total_tx_pause += tx_pfc_num;
+	}
+
+	stats->total_gptc += SXE_REG_READ(hw, SXE_GPTC);
+	stats->total_gotc += (SXE_REG_READ(hw, SXE_GOTCL) |
+			((u64)SXE_REG_READ(hw, SXE_GOTCH) << 32));
+#ifdef SXE_DPDK
+	do {
+		gotch = SXE_REG_READ(hw, SXE_GOTCH);
+		rycle_cnt--;
+	} while (gotch != 0 && rycle_cnt != 0);
+	if (gotch != 0) {
+		LOG_INFO("GOTCH is not clear!\n");
+	}
+#endif
+
+	return;
+}
+
+void sxe_hw_stats_seq_clean(struct sxe_hw *hw, struct sxe_mac_stats *stats)
+{
+	u8 i;
+	u64 tx_pfc_num = 0;
+	u64 gotch = 0;
+	u32 rycle_cnt = 10;
+
+	stats->total_gotc += (SXE_REG_READ(hw, SXE_GOTCL) |
+			((u64)SXE_REG_READ(hw, SXE_GOTCH) << 32));
+	stats->total_gptc += SXE_REG_READ(hw, SXE_GPTC);
+	do {
+		gotch = SXE_REG_READ(hw, SXE_GOTCH);
+		rycle_cnt--;
+	} while (gotch != 0 && rycle_cnt != 0);
+	if (gotch != 0) {
+		LOG_INFO("GOTCH is not clear!\n");
+	}
+	
+	for (i = 0; i < 8; i++) {
+		stats->prcpf[i] += SXE_REG_READ(hw,SXE_PRCPF(i));
+		tx_pfc_num = SXE_REG_READ(hw,SXE_PFCT(i));
+		stats->pfct[i] += tx_pfc_num;
+		stats->total_tx_pause += tx_pfc_num;
+	}
+
+	return;
+}
+
+void sxe_hw_stats_get(struct sxe_hw *hw, struct sxe_mac_stats *stats)
+{
+	u64 rjc;
+	u32 i, rx_dbu_drop, ring_drop = 0;
+	u64 tpr = 0;
+#ifdef SXE_DPDK
+	u32 rycle_cnt = 10;
+	u64 gorch, torh = 0;
+#endif
+
+	for (i = 0; i < 16; i++) {
+		stats->qptc[i] += SXE_REG_READ(hw, SXE_QPTC(i));
+		stats->qprc[i] += SXE_REG_READ(hw, SXE_QPRC(i));
+		ring_drop = SXE_REG_READ(hw, SXE_QPRDC(i));
+		stats->qprdc[i] += ring_drop;
+		stats->hw_rx_no_dma_resources += ring_drop;
+
+		stats->qbtc[i] += ((u64)SXE_REG_READ(hw, SXE_QBTC_H(i)) << 32);
+		SXE_RMB();
+		stats->qbtc[i] += SXE_REG_READ(hw, SXE_QBTC_L(i));
+
+		stats->qbrc[i] += ((u64)SXE_REG_READ(hw, SXE_QBRC_H(i)) << 32);
+		SXE_RMB();
+		stats->qbrc[i] += SXE_REG_READ(hw, SXE_QBRC_L(i));
+	}
+	stats->rxdgbc += ((u64)SXE_REG_READ(hw, SXE_RXDGBCH) << 32) +
+				(SXE_REG_READ(hw, SXE_RXDGBCL));
+
+	stats->rxdgpc += SXE_REG_READ(hw, SXE_RXDGPC);
+	stats->txdgpc += SXE_REG_READ(hw, SXE_TXDGPC);
+	stats->txdgbc += (((u64)SXE_REG_READ(hw, SXE_TXDGBCH) << 32) +
+				SXE_REG_READ(hw, SXE_TXDGBCL));
+
+	stats->rxddpc += SXE_REG_READ(hw,SXE_RXDDGPC);
+	stats->rxddbc += ((u64)SXE_REG_READ(hw, SXE_RXDDGBCH) << 32) +
+				(SXE_REG_READ(hw,SXE_RXDDGBCL));
+
+	stats->rxlpbkpc += SXE_REG_READ(hw,SXE_RXLPBKGPC);
+	stats->rxlpbkbc += ((u64)SXE_REG_READ(hw, SXE_RXLPBKGBCH) << 32) +
+			(SXE_REG_READ(hw,SXE_RXLPBKGBCL));
+
+	stats->rxdlpbkpc += SXE_REG_READ(hw,SXE_RXDLPBKGPC);
+	stats->rxdlpbkbc += ((u64)SXE_REG_READ(hw, SXE_RXDLPBKGBCH) << 32) +
+				(SXE_REG_READ(hw,SXE_RXDLPBKGBCL));
+	stats->rxtpcing += SXE_REG_READ(hw,SXE_RXTPCIN);
+	stats->rxtpceng += SXE_REG_READ(hw,SXE_RXTPCOUT);
+	stats->prddc += SXE_REG_READ(hw,SXE_RXPRDDC);
+	stats->txswerr += SXE_REG_READ(hw, SXE_TXSWERR);
+	stats->txswitch += SXE_REG_READ(hw, SXE_TXSWITCH);
+	stats->txrepeat += SXE_REG_READ(hw, SXE_TXREPEAT);
+	stats->txdescerr += SXE_REG_READ(hw, SXE_TXDESCERR);
+
+	for (i = 0; i < 8; i++) {
+		stats->dburxtcin[i] += SXE_REG_READ(hw, SXE_DBUDRTCICNT(i));
+		stats->dburxtcout[i] += SXE_REG_READ(hw, SXE_DBUDRTCOCNT(i));
+		stats->dburxgdreecnt[i] += SXE_REG_READ(hw, SXE_DBUDREECNT(i));
+		rx_dbu_drop = SXE_REG_READ(hw, SXE_DBUDROFPCNT(i));
+		stats->dburxdrofpcnt[i] += rx_dbu_drop;
+		stats->dbutxtcin[i] += SXE_REG_READ(hw,SXE_DBUDTTCICNT(i));
+		stats->dbutxtcout[i] += SXE_REG_READ(hw,SXE_DBUDTTCOCNT(i));
+	}
+
+	stats->fnavadd += (SXE_REG_READ(hw, SXE_FNAVUSTAT) & 0xFFFF);
+	stats->fnavrmv += ((SXE_REG_READ(hw, SXE_FNAVUSTAT) >> 16) & 0xFFFF);
+	stats->fnavadderr += (SXE_REG_READ(hw, SXE_FNAVFSTAT) & 0xFFFF);
+	stats->fnavrmverr += ((SXE_REG_READ(hw, SXE_FNAVFSTAT) >> 16) & 0xFFFF);
+	stats->fnavmatch += SXE_REG_READ(hw, SXE_FNAVMATCH);
+	stats->fnavmiss += SXE_REG_READ(hw, SXE_FNAVMISS);
+
+	sxe_hw_stats_seq_get(hw, stats);
+
+	stats->crcerrs += SXE_REG_READ(hw, SXE_CRCERRS);
+	stats->errbc   += SXE_REG_READ(hw, SXE_ERRBC);
+	stats->bprc += SXE_REG_READ(hw, SXE_BPRC);
+	stats->mprc += SXE_REG_READ(hw, SXE_MPRC);
+	stats->roc += SXE_REG_READ(hw, SXE_ROC);
+	stats->prc64 += SXE_REG_READ(hw, SXE_PRC64);
+	stats->prc127 += SXE_REG_READ(hw, SXE_PRC127);
+	stats->prc255 += SXE_REG_READ(hw, SXE_PRC255);
+	stats->prc511 += SXE_REG_READ(hw, SXE_PRC511);
+	stats->prc1023 += SXE_REG_READ(hw, SXE_PRC1023);
+	stats->prc1522 += SXE_REG_READ(hw, SXE_PRC1522);
+	stats->rlec += SXE_REG_READ(hw, SXE_RLEC);
+	stats->mptc += SXE_REG_READ(hw, SXE_MPTC);
+	stats->ruc += SXE_REG_READ(hw, SXE_RUC);
+	stats->rfc += SXE_REG_READ(hw, SXE_RFC);
+
+	rjc = SXE_REG_READ(hw, SXE_RJC);
+	stats->rjc += rjc;
+	stats->roc += rjc;
+
+	tpr = SXE_REG_READ(hw, SXE_TPR);
+	stats->tpr += tpr;
+	stats->tpt += SXE_REG_READ(hw, SXE_TPT);
+	stats->ptc64 += SXE_REG_READ(hw, SXE_PTC64);
+	stats->ptc127 += SXE_REG_READ(hw, SXE_PTC127);
+	stats->ptc255 += SXE_REG_READ(hw, SXE_PTC255);
+	stats->ptc511 += SXE_REG_READ(hw, SXE_PTC511);
+	stats->ptc1023 += SXE_REG_READ(hw, SXE_PTC1023);
+	stats->ptc1522 += SXE_REG_READ(hw, SXE_PTC1522);
+	stats->bptc += SXE_REG_READ(hw, SXE_BPTC);
+
+	stats->gprc += SXE_REG_READ(hw, SXE_GPRC);
+	stats->gorc += (SXE_REG_READ(hw, SXE_GORCL) |
+			((u64)SXE_REG_READ(hw, SXE_GORCH) << 32));
+#ifdef SXE_DPDK
+	do {
+		gorch = SXE_REG_READ(hw, SXE_GORCH);
+		rycle_cnt--;
+	} while (gorch != 0 && rycle_cnt != 0);
+	if (gorch != 0) {
+		LOG_INFO("GORCH is not clear!\n");
+	}
+#endif
+
+	stats->tor += (SXE_REG_READ(hw, SXE_TORL) |
+			((u64)SXE_REG_READ(hw, SXE_TORH) << 32));
+#ifdef SXE_DPDK
+	rycle_cnt = 10;
+	do {
+		torh = SXE_REG_READ(hw, SXE_TORH);
+		rycle_cnt--;
+	} while (torh != 0 && rycle_cnt != 0);
+	if (torh != 0) {
+		LOG_INFO("TORH is not clear!\n");
+	}
+#endif
+
+#ifdef SXE_DPDK
+	stats->tor -= tpr * RTE_ETHER_CRC_LEN;
+	stats->gptc = stats->total_gptc - stats->total_tx_pause;
+	stats->gotc = stats->total_gotc - stats->total_tx_pause * RTE_ETHER_MIN_LEN
+			- stats->gptc * RTE_ETHER_CRC_LEN;
+#else
+	stats->gptc = stats->total_gptc;
+	stats->gotc = stats->total_gotc;
+#endif
+
+	return;
+}
+
+static u32 sxe_hw_tx_packets_num_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_TXDGPC);
+}
+
+static u32 sxe_hw_unsec_packets_num_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_SSVPC);
+}
+
+static u32 sxe_hw_mac_stats_dump(struct sxe_hw *hw, u32 *regs_buff, u32 buf_size)
+{
+	u32 i;
+	u32 regs_num = buf_size / sizeof(u32);
+
+	for (i = 0; i < regs_num; i++) {
+		regs_buff[i] = SXE_REG_READ(hw, mac_regs[i]);
+	}
+
+	return i;
+}
+
+static u32 sxe_hw_tx_dbu_to_mac_stats(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_DTMPCNT);
+}
+
+static const struct sxe_stat_operations sxe_stat_ops = {
+	.stats_get			= sxe_hw_stats_get,
+	.stats_clear			= sxe_hw_stats_regs_clean,
+	.mac_stats_dump			= sxe_hw_mac_stats_dump,
+	.tx_packets_num_get		= sxe_hw_tx_packets_num_get,
+	.unsecurity_packets_num_get	= sxe_hw_unsec_packets_num_get,
+	.tx_dbu_to_mac_stats		= sxe_hw_tx_dbu_to_mac_stats,
+};
+
+void sxe_hw_mbx_init(struct sxe_hw *hw)
+{
+	hw->mbx.msg_len = SXE_MBX_MSG_NUM;
+	hw->mbx.interval = SXE_MBX_RETRY_INTERVAL;
+	hw->mbx.retry = SXE_MBX_RETRY_COUNT;
+
+	hw->mbx.stats.rcv_msgs   = 0;
+	hw->mbx.stats.send_msgs  = 0;
+	hw->mbx.stats.acks       = 0;
+	hw->mbx.stats.reqs       = 0;
+	hw->mbx.stats.rsts       = 0;
+
+	return;
+}
+
+static bool sxe_hw_vf_irq_check(struct sxe_hw *hw, u32 mask, u32 index)
+{
+	u32 value = SXE_REG_READ(hw, SXE_PFMBICR(index));
+
+	if (value & mask) {
+		SXE_REG_WRITE(hw, SXE_PFMBICR(index), mask);
+		return true;
+	}
+
+	return false;
+}
+
+bool sxe_hw_vf_rst_check(struct sxe_hw *hw, u8 vf_idx)
+{
+	u32 index = vf_idx >> 5;
+	u32 bit = vf_idx % 32;
+	u32 value;
+
+	value = SXE_REG_READ(hw, SXE_VFLRE(index));
+	if (value & BIT(bit)) {
+		SXE_REG_WRITE(hw, SXE_VFLREC(index), BIT(bit));
+		hw->mbx.stats.rsts++;
+		return true;
+	}
+
+	return false;
+}
+
+bool sxe_hw_vf_req_check(struct sxe_hw *hw, u8 vf_idx)
+{
+	u8 index = vf_idx >> 4;
+	u8 bit = vf_idx % 16;
+
+	if (sxe_hw_vf_irq_check(hw, SXE_PFMBICR_VFREQ << bit, index)) {
+		hw->mbx.stats.reqs++;
+		return true;
+	}
+
+	return false;
+}
+
+bool sxe_hw_vf_ack_check(struct sxe_hw *hw, u8 vf_idx)
+{
+	u8 index = vf_idx >> 4;
+	u8 bit = vf_idx % 16;
+
+	if (sxe_hw_vf_irq_check(hw, SXE_PFMBICR_VFACK << bit, index)) {
+		hw->mbx.stats.acks++;
+		return true;
+	}
+
+	return false;
+}
+
+static bool sxe_hw_mbx_lock(struct sxe_hw *hw, u8 vf_idx)
+{
+	u32 value;
+	bool ret = false;
+	u32 retry = hw->mbx.retry;
+
+	while (retry--) {
+		SXE_REG_WRITE(hw, SXE_PFMAILBOX(vf_idx), SXE_PFMAILBOX_PFU);
+
+		value = SXE_REG_READ(hw, SXE_PFMAILBOX(vf_idx));
+		if (value & SXE_PFMAILBOX_PFU) {
+			ret = true;
+			break;
+		}
+
+		udelay(hw->mbx.interval);
+	}
+
+	return ret;
+}
+
+s32 sxe_hw_rcv_msg_from_vf(struct sxe_hw *hw, u32 *msg,
+				u16 msg_len, u16 index)
+{
+	struct sxe_mbx_info *mbx = &hw->mbx;
+	u8 i;
+	s32 ret = 0;
+	u16 msg_entry;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	msg_entry = (msg_len > mbx->msg_len) ? mbx->msg_len : msg_len;
+
+	if (!sxe_hw_mbx_lock(hw, index)) {
+		ret = -SXE_ERR_MBX_LOCK_FAIL;
+		LOG_ERROR_BDF("vf idx:%d msg_len:%d rcv lock mailbox fail.(err:%d)\n",
+			   index, msg_len, ret);
+		goto l_out;
+	}
+
+	for (i = 0; i < msg_entry; i++) {
+		msg[i] = SXE_REG_READ(hw, (SXE_PFMBMEM(index) + (i << 2)));
+		LOG_DEBUG_BDF("vf_idx:%u read mbx mem[%u]:0x%x.\n",
+			      index, i, msg[i]);
+	}
+
+	SXE_REG_WRITE(hw, SXE_PFMAILBOX(index), SXE_PFMAILBOX_ACK);
+	mbx->stats.rcv_msgs++;
+
+l_out:
+	return ret;
+}
+
+s32 sxe_hw_send_msg_to_vf(struct sxe_hw *hw, u32 *msg,
+				u16 msg_len, u16 index)
+{
+	struct sxe_mbx_info *mbx = &hw->mbx;
+	u8 i;
+	s32 ret = 0;
+	u32 old;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (msg_len > mbx->msg_len) {
+		ret = -EINVAL;
+		LOG_ERROR_BDF("pf reply msg num:%d exceed limit:%d reply fail.(err:%d)\n",
+			  msg_len, mbx->msg_len, ret);
+		goto l_out;
+	}
+
+	if (!sxe_hw_mbx_lock(hw, index)) {
+		ret = -SXE_ERR_MBX_LOCK_FAIL;
+		LOG_ERROR_BDF("send msg len:%u to vf idx:%u msg[0]:0x%x "
+			   "lock mailbox fail.(err:%d)\n",
+			   msg_len, index, msg[0], ret);
+		goto l_out;
+	}
+
+	old = SXE_REG_READ(hw, (SXE_PFMBMEM(index)));
+	LOG_DEBUG_BDF("original send msg:0x%x. mbx mem[0]:0x%x\n", *msg, old);
+	if (msg[0] & SXE_CTRL_MSG_MASK) {
+		msg[0] |= (old & SXE_MSGID_MASK);
+	} else {
+		msg[0] |= (old & SXE_PFMSG_MASK);
+	}
+
+	for (i = 0; i < msg_len; i++) {
+		SXE_REG_WRITE(hw, (SXE_PFMBMEM(index) + (i << 2)), msg[i]);
+		LOG_DEBUG_BDF("vf_idx:%u write mbx mem[%u]:0x%x.\n",
+			      index, i, msg[i]);
+	}
+
+	SXE_REG_WRITE(hw, SXE_PFMAILBOX(index), SXE_PFMAILBOX_STS);
+	mbx->stats.send_msgs++;
+
+l_out:
+	return ret;
+}
+
+void sxe_hw_mbx_mem_clear(struct sxe_hw *hw, u8 vf_idx)
+{
+	u8 msg_idx;
+	struct sxe_adapter *adapter = hw->adapter;
+	for (msg_idx = 0; msg_idx < hw->mbx.msg_len; msg_idx++) {
+		SXE_REG_WRITE_ARRAY(hw, SXE_PFMBMEM(vf_idx), msg_idx, 0);
+	}
+
+	SXE_WRITE_FLUSH(hw);
+
+	LOG_INFO_BDF("vf_idx:%u clear mbx mem.\n", vf_idx);
+	return;
+}
+
+static const struct sxe_mbx_operations sxe_mbx_ops = {
+	.init		= sxe_hw_mbx_init,
+
+	.req_check	= sxe_hw_vf_req_check,
+	.ack_check	= sxe_hw_vf_ack_check,
+	.rst_check	= sxe_hw_vf_rst_check,
+
+	.msg_send	= sxe_hw_send_msg_to_vf,
+	.msg_rcv	= sxe_hw_rcv_msg_from_vf,
+
+	.mbx_mem_clear	= sxe_hw_mbx_mem_clear,
+};
+
+void sxe_hw_pcie_vt_mode_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_GCR_EXT, value);
+
+	return;
+}
+
+static const struct sxe_pcie_operations sxe_pcie_ops = {
+	.vt_mode_set	= sxe_hw_pcie_vt_mode_set,
+};
+
+s32 sxe_hw_hdc_lock_get(struct sxe_hw *hw, u32 trylock)
+{
+	u32 val;
+	u16 i;
+	s32 ret = 0;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	SXE_REG_WRITE(hw, SXE_HDC_SW_LK, SXE_HDC_RELEASE_SW_LK);
+	SXE_WRITE_FLUSH(hw);
+
+	for (i = 0; i < trylock; i++) {
+		val = SXE_REG_READ(hw, SXE_HDC_SW_LK) & SXE_HDC_SW_LK_BIT;
+		if (!val) {
+			break;
+		}
+
+		udelay(10);
+	}
+
+	if (i >= trylock) {
+		LOG_ERROR_BDF("hdc is busy, reg: 0x%x\n", val);
+		ret = -SXE_ERR_HDC_LOCK_BUSY;
+		goto l_out;
+	}
+
+	val = SXE_REG_READ(hw, SXE_HDC_PF_LK) & SXE_HDC_PF_LK_BIT;
+	if (!val) {
+		SXE_REG_WRITE(hw, SXE_HDC_SW_LK, SXE_HDC_RELEASE_SW_LK);
+		LOG_ERROR_BDF("get hdc lock fail, reg: 0x%x\n", val);
+		ret = -SXE_ERR_HDC_LOCK_BUSY;
+		goto l_out;
+	}
+
+	hw->hdc.pf_lock_val = val;
+	LOG_DEBUG_BDF("hw[%p]'s port[%u] got pf lock\n", hw, val);
+
+l_out:
+	return ret;
+}
+
+void sxe_hw_hdc_lock_release(struct sxe_hw *hw, u32 retry_cnt)
+{
+	struct sxe_adapter *adapter = hw->adapter;
+
+	do {
+		SXE_REG_WRITE(hw, SXE_HDC_SW_LK, SXE_HDC_RELEASE_SW_LK);
+		udelay(1);
+		if (!(SXE_REG_READ(hw, SXE_HDC_PF_LK) & hw->hdc.pf_lock_val)) {
+			LOG_DEBUG_BDF("hw[%p]'s port[%u] release pf lock\n", hw,
+				hw->hdc.pf_lock_val);
+			hw->hdc.pf_lock_val = 0;
+			break;
+		}
+	} while((retry_cnt--) > 0);
+
+	return;
+}
+
+void sxe_hw_hdc_fw_ov_clear(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_HDC_FW_OV, 0);
+}
+
+bool sxe_hw_hdc_is_fw_over_set(struct sxe_hw *hw)
+{
+	bool fw_ov = false;
+
+	if (SXE_REG_READ(hw, SXE_HDC_FW_OV) & SXE_HDC_FW_OV_BIT) {
+		fw_ov = true;
+	}
+
+	return fw_ov;
+}
+
+void sxe_hw_hdc_packet_send_done(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_HDC_SW_OV, SXE_HDC_SW_OV_BIT);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_hdc_packet_header_send(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_HDC_PACKET_HEAD0, value);
+
+	return;
+}
+
+void sxe_hw_hdc_packet_data_dword_send(struct sxe_hw *hw,
+						u16 dword_index, u32 value)
+{
+	SXE_WRITE_REG_ARRAY_32(hw, SXE_HDC_PACKET_DATA0, dword_index, value);
+	return;
+}
+
+u32 sxe_hw_hdc_fw_ack_header_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_HDC_PACKET_HEAD0);
+}
+
+u32 sxe_hw_hdc_packet_data_dword_rcv(struct sxe_hw *hw,
+						u16 dword_index)
+{
+	return SXE_READ_REG_ARRAY_32(hw, SXE_HDC_PACKET_DATA0, dword_index);
+}
+
+u32 sxe_hw_hdc_fw_status_get(struct sxe_hw *hw)
+{
+	struct sxe_adapter *adapter = hw->adapter;
+	u32 status = SXE_REG_READ(hw, SXE_FW_STATUS_REG);
+
+	LOG_DEBUG_BDF("fw status[0x%x]\n", status);
+
+	return status;
+}
+
+void sxe_hw_hdc_drv_status_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_DRV_STATUS_REG, value);
+	return;
+}
+
+u32 sxe_hw_hdc_channel_state_get(struct sxe_hw *hw)
+{
+	struct sxe_adapter *adapter = hw->adapter;
+
+	u32 state = SXE_REG_READ(hw, SXE_FW_HDC_STATE_REG);
+
+	LOG_DEBUG_BDF("hdc channel state[0x%x]\n", state);
+
+	return state;
+}
+
+STATIC u32 sxe_hw_hdc_irq_event_get(struct sxe_hw *hw)
+{
+	u32 status = SXE_REG_READ(hw, SXE_HDC_MSI_STATUS_REG);
+	struct sxe_adapter *adapter = hw->adapter;
+
+	LOG_DEBUG_BDF("msi status[0x%x]\n", status);
+
+	return status;
+}
+
+static void sxe_hw_hdc_irq_event_clear(struct sxe_hw *hw, u32 event)
+{
+	u32 status = SXE_REG_READ(hw, SXE_HDC_MSI_STATUS_REG);
+	struct sxe_adapter *adapter = hw->adapter;
+
+	LOG_DEBUG_BDF("msi status[0x%x] and clear bit=[0x%x]\n", status, event);
+
+	status &= ~event;
+	SXE_REG_WRITE(hw, SXE_HDC_MSI_STATUS_REG, status);
+
+	return;
+}
+
+static void sxe_hw_hdc_resource_clean(struct sxe_hw *hw)
+{
+	u16 i;
+
+	SXE_REG_WRITE(hw, SXE_HDC_SW_LK, 0x0);
+	SXE_REG_WRITE(hw, SXE_HDC_PACKET_HEAD0, 0x0);
+	for (i = 0; i < SXE_HDC_DATA_LEN_MAX; i++) {
+		SXE_WRITE_REG_ARRAY_32(hw, SXE_HDC_PACKET_DATA0, i, 0x0);
+	}
+
+	return;
+}
+
+static const struct sxe_hdc_operations sxe_hdc_ops = {
+	.pf_lock_get            = sxe_hw_hdc_lock_get,
+	.pf_lock_release        = sxe_hw_hdc_lock_release,
+	.is_fw_over_set         = sxe_hw_hdc_is_fw_over_set,
+	.fw_ack_header_rcv      = sxe_hw_hdc_fw_ack_header_get,
+	.packet_send_done       = sxe_hw_hdc_packet_send_done,
+	.packet_header_send     = sxe_hw_hdc_packet_header_send,
+	.packet_data_dword_send = sxe_hw_hdc_packet_data_dword_send,
+	.packet_data_dword_rcv  = sxe_hw_hdc_packet_data_dword_rcv,
+	.fw_status_get          = sxe_hw_hdc_fw_status_get,
+	.drv_status_set         = sxe_hw_hdc_drv_status_set,
+	.irq_event_get          = sxe_hw_hdc_irq_event_get,
+	.irq_event_clear        = sxe_hw_hdc_irq_event_clear,
+	.fw_ov_clear            = sxe_hw_hdc_fw_ov_clear,
+	.channel_state_get      = sxe_hw_hdc_channel_state_get,
+	.resource_clean         = sxe_hw_hdc_resource_clean,
+};
+
+#ifdef SXE_PHY_CONFIGURE
+#define SXE_MDIO_COMMAND_TIMEOUT 100 
+
+static s32 sxe_hw_phy_reg_write(struct sxe_hw *hw, s32 prtad, u32 reg_addr,
+				u32 device_type, u16 phy_data)
+{
+	s32 ret;
+	u32 i, command;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	SXE_REG_WRITE(hw, SXE_MSCD, (u32)phy_data);
+
+	command = ((reg_addr << SXE_MSCA_NP_ADDR_SHIFT)  |
+		   (device_type << SXE_MSCA_DEV_TYPE_SHIFT) |
+		   (prtad << SXE_MSCA_PHY_ADDR_SHIFT) |
+		   (SXE_MSCA_ADDR_CYCLE | SXE_MSCA_MDI_CMD_ON_PROG));
+
+	SXE_REG_WRITE(hw, SXE_MSCA, command);
+
+	for (i = 0; i < SXE_MDIO_COMMAND_TIMEOUT; i++) {
+		udelay(10);
+
+		command = SXE_REG_READ(hw, SXE_MSCA);
+		if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0) {
+			break;
+		}
+	}
+
+	if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) {
+		LOG_DEV_ERR("phy write cmd didn't complete, "
+			"reg_addr=%u, device_type=%u\n", reg_addr, device_type);
+		ret = -SXE_ERR_MDIO_CMD_TIMEOUT;
+		goto l_end;
+	}
+
+	command = ((reg_addr << SXE_MSCA_NP_ADDR_SHIFT)  |
+		   (device_type << SXE_MSCA_DEV_TYPE_SHIFT) |
+		   (prtad << SXE_MSCA_PHY_ADDR_SHIFT) |
+		   (SXE_MSCA_WRITE | SXE_MSCA_MDI_CMD_ON_PROG));
+
+	SXE_REG_WRITE(hw, SXE_MSCA, command);
+
+	for (i = 0; i < SXE_MDIO_COMMAND_TIMEOUT; i++) {
+		udelay(10);
+
+		command = SXE_REG_READ(hw, SXE_MSCA);
+		if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0) {
+			break;
+		}
+	}
+
+	if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) {
+		LOG_DEV_ERR("phy write cmd didn't complete, "
+			"reg_addr=%u, device_type=%u\n", reg_addr, device_type);
+		ret = -SXE_ERR_MDIO_CMD_TIMEOUT;
+	}
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_hw_phy_reg_read(struct sxe_hw *hw, s32 prtad, u32 reg_addr,
+				u32 device_type, u16 *phy_data)
+{
+	s32 ret = 0;
+	u32 i, data, command;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	command = ((reg_addr << SXE_MSCA_NP_ADDR_SHIFT)  |
+		   (device_type << SXE_MSCA_DEV_TYPE_SHIFT) |
+		   (prtad << SXE_MSCA_PHY_ADDR_SHIFT) |
+		   (SXE_MSCA_ADDR_CYCLE | SXE_MSCA_MDI_CMD_ON_PROG));
+
+	SXE_REG_WRITE(hw, SXE_MSCA, command);
+
+	for (i = 0; i < SXE_MDIO_COMMAND_TIMEOUT; i++) {
+		udelay(10);
+
+		command = SXE_REG_READ(hw, SXE_MSCA);
+		if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0) {
+			break;
+		}
+	}
+
+	if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) {
+		LOG_DEV_ERR("phy read cmd didn't complete, "
+			"reg_addr=%u, device_type=%u\n", reg_addr, device_type);
+		ret = -SXE_ERR_MDIO_CMD_TIMEOUT;
+		goto l_end;
+	}
+
+	command = ((reg_addr << SXE_MSCA_NP_ADDR_SHIFT)  |
+		   (device_type << SXE_MSCA_DEV_TYPE_SHIFT) |
+		   (prtad << SXE_MSCA_PHY_ADDR_SHIFT) |
+		   (SXE_MSCA_READ | SXE_MSCA_MDI_CMD_ON_PROG));
+
+	SXE_REG_WRITE(hw, SXE_MSCA, command);
+
+	for (i = 0; i < SXE_MDIO_COMMAND_TIMEOUT; i++) {
+		udelay(10);
+
+		command = SXE_REG_READ(hw, SXE_MSCA);
+		if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0)
+			break;
+	}
+
+	if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) {
+		LOG_DEV_ERR("phy write cmd didn't complete, "
+			"reg_addr=%u, device_type=%u\n", reg_addr, device_type);
+		ret = -SXE_ERR_MDIO_CMD_TIMEOUT;
+		goto l_end;
+	}
+
+	data = SXE_REG_READ(hw, SXE_MSCD);
+	data >>= MDIO_MSCD_RDATA_SHIFT;
+	*phy_data = (u16)(data);
+
+l_end:
+	return ret;
+}
+
+#define SXE_PHY_REVISION_MASK		0x000F
+#define SXE_PHY_ID_HIGH_5_BIT_MASK	0xFC00
+#define SXE_PHY_ID_HIGH_SHIFT		10
+
+static s32 sxe_hw_phy_id_get(struct sxe_hw *hw, u32 prtad, u32 *id)
+{
+	s32 ret;
+	u16 phy_id_high = 0;
+	u16 phy_id_low = 0;
+
+
+	ret = sxe_hw_phy_reg_read(hw, prtad, MDIO_DEVID1, MDIO_MMD_PMAPMD,
+				      &phy_id_low);
+
+	if (ret) {
+		LOG_ERROR("get phy id upper 16 bits failed, prtad=%d\n", prtad);
+		goto l_end;
+	}
+
+	ret = sxe_hw_phy_reg_read(hw, prtad, MDIO_DEVID2, MDIO_MMD_PMAPMD,
+					&phy_id_high);
+	if (ret) {
+		LOG_ERROR("get phy id lower 4 bits failed, prtad=%d\n", prtad);
+		goto l_end;
+	}
+
+	*id = (u32)((phy_id_high >> SXE_PHY_ID_HIGH_SHIFT) << 16);
+	*id |= (u32)phy_id_low;
+
+l_end:
+	return ret;
+}
+
+s32 sxe_hw_phy_link_cap_get(struct sxe_hw *hw, u32 prtad, u32 *speed)
+{
+	s32 ret;
+	u16 speed_ability;
+
+	ret = hw->phy.ops->reg_read(hw, prtad, MDIO_SPEED, MDIO_MMD_PMAPMD,
+				      &speed_ability);
+	if (ret) {
+		*speed = 0;
+		LOG_ERROR("get phy link cap failed, ret=%d, prtad=%d\n",
+							ret, prtad);
+		goto l_end;
+	}
+
+	if (speed_ability & MDIO_SPEED_10G) {
+		*speed |= SXE_LINK_SPEED_10GB_FULL;
+	}
+
+	if (speed_ability & MDIO_PMA_SPEED_1000) {
+		*speed |= SXE_LINK_SPEED_1GB_FULL;
+	}
+
+	if (speed_ability & MDIO_PMA_SPEED_100) {
+		*speed |= SXE_LINK_SPEED_100_FULL;
+	}
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_hw_phy_ctrl_reset(struct sxe_hw *hw, u32 prtad)
+{
+	u32 i;
+	s32 ret;
+	u16 ctrl;
+
+	ret = sxe_hw_phy_reg_write(hw, prtad, MDIO_CTRL1,
+			 MDIO_MMD_PHYXS, MDIO_CTRL1_RESET);
+	if (ret) {
+		LOG_ERROR("phy reset failed, ret=%d\n", ret);
+		goto l_end;
+	}
+
+	for (i = 0; i < 30; i++) {
+		msleep(100);
+		ret = sxe_hw_phy_reg_read(hw, prtad, MDIO_CTRL1,
+					MDIO_MMD_PHYXS, &ctrl);
+		if (ret) {
+			goto l_end;
+		}
+
+		if (!(ctrl & MDIO_CTRL1_RESET)) {
+			udelay(2);
+			break;
+		}
+	}
+
+	if (ctrl & MDIO_CTRL1_RESET) {
+		LOG_DEV_ERR("phy reset polling failed to complete\n");
+		return -SXE_ERR_PHY_RESET_FAIL;
+	}
+
+l_end:
+	return ret;
+}
+
+static const struct sxe_phy_operations sxe_phy_hw_ops = {
+	.reg_write	= sxe_hw_phy_reg_write,
+	.reg_read	= sxe_hw_phy_reg_read,
+	.identifier_get	= sxe_hw_phy_id_get,
+	.link_cap_get	= sxe_hw_phy_link_cap_get,
+	.reset		= sxe_hw_phy_ctrl_reset,
+};
+#endif
+
+void sxe_hw_ops_init(struct sxe_hw *hw)
+{
+	hw->setup.ops	= &sxe_setup_ops;
+	hw->irq.ops	= &sxe_irq_ops;
+	hw->mac.ops	= &sxe_mac_ops;
+	hw->dbu.ops	= &sxe_dbu_ops;
+	hw->dma.ops	= &sxe_dma_ops;
+	hw->sec.ops	= &sxe_sec_ops;
+	hw->stat.ops	= &sxe_stat_ops;
+	hw->mbx.ops	= &sxe_mbx_ops;
+	hw->pcie.ops	= &sxe_pcie_ops;
+	hw->hdc.ops	= &sxe_hdc_ops;
+#ifdef SXE_PHY_CONFIGURE
+	hw->phy.ops     = &sxe_phy_hw_ops;
+#endif
+
+	hw->filter.mac.ops	= &sxe_filter_mac_ops;
+	hw->filter.vlan.ops	= &sxe_filter_vlan_ops;
+	return;
+}
+
+u32 sxe_hw_rss_key_get_by_idx(struct sxe_hw *hw, u8 reg_idx)
+{
+	u32 rss_key;
+
+	if (reg_idx >= SXE_MAX_RSS_KEY_ENTRIES) {
+		rss_key = 0;
+	} else {
+		rss_key = SXE_REG_READ(hw, SXE_RSSRK(reg_idx));
+	}
+
+	return rss_key;
+}
+
+bool sxe_hw_is_rss_enabled(struct sxe_hw *hw)
+{
+	bool rss_enable = false;
+	u32 mrqc = SXE_REG_READ(hw, SXE_MRQC);
+	if (mrqc & SXE_MRQC_RSSEN) {
+		rss_enable = true;
+	}
+
+	return rss_enable;
+}
+
+static u32 sxe_hw_mrqc_reg_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_MRQC);
+}
+
+u32 sxe_hw_rss_field_get(struct sxe_hw *hw)
+{
+	u32 mrqc = sxe_hw_mrqc_reg_get(hw);
+	return (mrqc & SXE_RSS_FIELD_MASK);
+}
+
+#ifdef SXE_DPDK 
+
+#define SXE_TRAFFIC_CLASS_MAX  8
+
+#define SXE_MR_VLAN_MSB_REG_OFFSET         4
+#define SXE_MR_VIRTUAL_POOL_MSB_REG_OFFSET 4
+
+#define SXE_MR_TYPE_MASK                   0x0F
+#define SXE_MR_DST_POOL_OFFSET             8
+
+void sxe_hw_rx_pkt_buf_size_set(struct sxe_hw *hw, u8 tc_idx, u16 pbsize)
+{
+	u32 rxpbsize = pbsize << SXE_RX_PKT_BUF_SIZE_SHIFT;
+
+	sxe_hw_rx_pkt_buf_switch(hw, false);
+	SXE_REG_WRITE(hw, SXE_RXPBSIZE(tc_idx), rxpbsize);
+	sxe_hw_rx_pkt_buf_switch(hw, true);
+
+	return;
+}
+
+void sxe_hw_dcb_vmdq_mq_configure(struct sxe_hw *hw, u8 num_pools)
+{
+	u16 pbsize;
+	u8 i, nb_tcs;
+	u32 mrqc;
+
+	nb_tcs = SXE_VMDQ_DCB_NUM_QUEUES / num_pools;
+
+	pbsize = (u8)(SXE_RX_PKT_BUF_SIZE / nb_tcs);
+
+	for (i = 0; i < nb_tcs; i++) {
+		sxe_hw_rx_pkt_buf_size_set(hw, i, pbsize);
+	}
+
+	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		sxe_hw_rx_pkt_buf_size_set(hw, i, 0);
+	}
+
+	mrqc = (num_pools == RTE_ETH_16_POOLS) ?
+		SXE_MRQC_VMDQRT8TCEN : SXE_MRQC_VMDQRT4TCEN;
+	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+	SXE_REG_WRITE(hw, SXE_RTRPCS, SXE_RTRPCS_RRM);
+
+	return;
+}
+
+static const struct sxe_reg_info sxe_regs_general_group[] = {
+	{SXE_CTRL, 1, 1, "SXE_CTRL"},
+	{SXE_STATUS, 1, 1, "SXE_STATUS"},
+	{SXE_CTRL_EXT, 1, 1, "SXE_CTRL_EXT"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_interrupt_group[] = {
+	{SXE_EICS, 1, 1, "SXE_EICS"},
+	{SXE_EIMS, 1, 1, "SXE_EIMS"},
+	{SXE_EIMC, 1, 1, "SXE_EIMC"},
+	{SXE_EIAC, 1, 1, "SXE_EIAC"},
+	{SXE_EIAM, 1, 1, "SXE_EIAM"},
+	{SXE_EITR(0), 24, 4, "SXE_EITR"},
+	{SXE_IVAR(0), 24, 4, "SXE_IVAR"},
+	{SXE_GPIE, 1, 1, "SXE_GPIE"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_fctl_group[] = {
+	{SXE_PFCTOP, 1, 1, "SXE_PFCTOP"},
+	{SXE_FCRTV, 1, 1, "SXE_FCRTV"},
+	{SXE_TFCS, 1, 1, "SXE_TFCS"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_rxdma_group[] = {
+	{SXE_RDBAL(0), 64, 0x40, "SXE_RDBAL"},
+	{SXE_RDBAH(0), 64, 0x40, "SXE_RDBAH"},
+	{SXE_RDLEN(0), 64, 0x40, "SXE_RDLEN"},
+	{SXE_RDH(0), 64, 0x40, "SXE_RDH"},
+	{SXE_RDT(0), 64, 0x40, "SXE_RDT"},
+	{SXE_RXDCTL(0), 64, 0x40, "SXE_RXDCTL"},
+	{SXE_SRRCTL(0), 16, 0x4, "SXE_SRRCTL"},
+	{SXE_TPH_RXCTRL(0), 16, 4, "SXE_TPH_RXCTRL"},
+	{SXE_RDRXCTL, 1, 1, "SXE_RDRXCTL"},
+	{SXE_RXPBSIZE(0), 8, 4, "SXE_RXPBSIZE"},
+	{SXE_RXCTRL, 1, 1, "SXE_RXCTRL"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_rx_group[] = {
+	{SXE_RXCSUM, 1, 1, "SXE_RXCSUM"},
+	{SXE_RFCTL, 1, 1, "SXE_RFCTL"},
+	{SXE_RAL(0), 16, 8, "SXE_RAL"},
+	{SXE_RAH(0), 16, 8, "SXE_RAH"},
+	{SXE_PSRTYPE(0), 1, 4, "SXE_PSRTYPE"},
+	{SXE_FCTRL, 1, 1, "SXE_FCTRL"},
+	{SXE_VLNCTRL, 1, 1, "SXE_VLNCTRL"},
+	{SXE_MCSTCTRL, 1, 1, "SXE_MCSTCTRL"},
+	{SXE_MRQC, 1, 1, "SXE_MRQC"},
+	{SXE_VMD_CTL, 1, 1, "SXE_VMD_CTL"},
+
+	{0, 0, 0, ""}
+};
+
+static struct sxe_reg_info sxe_regs_tx_group[] = {
+	{SXE_TDBAL(0), 32, 0x40, "SXE_TDBAL"},
+	{SXE_TDBAH(0), 32, 0x40, "SXE_TDBAH"},
+	{SXE_TDLEN(0), 32, 0x40, "SXE_TDLEN"},
+	{SXE_TDH(0), 32, 0x40, "SXE_TDH"},
+	{SXE_TDT(0), 32, 0x40, "SXE_TDT"},
+	{SXE_TXDCTL(0), 32, 0x40, "SXE_TXDCTL"},
+	{SXE_TPH_TXCTRL(0), 16, 4, "SXE_TPH_TXCTRL"},
+	{SXE_TXPBSIZE(0), 8, 4, "SXE_TXPBSIZE"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_wakeup_group[] = {
+	{SXE_WUC, 1, 1, "SXE_WUC"},
+	{SXE_WUFC, 1, 1, "SXE_WUFC"},
+	{SXE_WUS, 1, 1, "SXE_WUS"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_dcb_group[] = {
+	{0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_diagnostic_group[] = {
+
+	{SXE_MFLCN, 1, 1, "SXE_MFLCN"},
+	{0, 0, 0, ""},
+};
+
+static const struct sxe_reg_info *sxe_regs_group[] = {
+				sxe_regs_general_group,
+				sxe_regs_interrupt_group,
+				sxe_regs_fctl_group,
+				sxe_regs_rxdma_group,
+				sxe_regs_rx_group,
+				sxe_regs_tx_group,
+				sxe_regs_wakeup_group,
+				sxe_regs_dcb_group,
+				sxe_regs_diagnostic_group,
+				NULL};
+
+static u32 sxe_regs_group_count(const struct sxe_reg_info *regs)
+{
+	int i = 0;
+	int count = 0;
+
+	while (regs[i].count) {
+		count += regs[i++].count;
+	}
+
+	return count;
+};
+
+static u32 sxe_hw_regs_group_read(struct sxe_hw *hw,
+				const struct sxe_reg_info *regs,
+				u32 *reg_buf)
+{
+	u32 j, i = 0;
+	int count = 0;
+
+	while (regs[i].count) {
+		for (j = 0; j < regs[i].count; j++) {
+			reg_buf[count + j] = SXE_REG_READ(hw,
+					regs[i].addr + j * regs[i].stride);
+			LOG_INFO("regs= %s, regs_addr=%x, regs_value=%04x\n",
+				regs[i].name , regs[i].addr, reg_buf[count + j]);
+		}
+
+		i++;
+		count += j;
+	}
+
+	return count;
+};
+
+u32 sxe_hw_all_regs_group_num_get(void)
+{
+	u32 i = 0;
+	u32 count = 0;
+	const struct sxe_reg_info *reg_group;
+	const struct sxe_reg_info **reg_set = sxe_regs_group;
+
+	while ((reg_group = reg_set[i++])) {
+		count += sxe_regs_group_count(reg_group);
+	}
+
+	return count;
+}
+
+void sxe_hw_all_regs_group_read(struct sxe_hw *hw, u32 *data)
+{
+	u32 count = 0, i = 0;
+	const struct sxe_reg_info *reg_group;
+	const struct sxe_reg_info **reg_set = sxe_regs_group;
+
+	while ((reg_group = reg_set[i++])) {
+		count += sxe_hw_regs_group_read(hw, reg_group, &data[count]);
+	}
+
+	LOG_INFO("read regs cnt=%u, regs num=%u\n",
+				count, sxe_hw_all_regs_group_num_get());
+
+	return;
+}
+
+static void sxe_hw_default_pool_configure(struct sxe_hw *hw,
+						u8 default_pool_enabled,
+						u8 default_pool_idx)
+{
+	u32 vt_ctl;
+
+	vt_ctl = SXE_VT_CTL_VT_ENABLE | SXE_VT_CTL_REPLEN;
+	if (default_pool_enabled) {
+		vt_ctl |= (default_pool_idx << SXE_VT_CTL_POOL_SHIFT);
+	} else {
+		vt_ctl |= SXE_VT_CTL_DIS_DEFPL;
+	}
+
+	SXE_REG_WRITE(hw, SXE_VT_CTL, vt_ctl);
+	return;
+}
+
+void sxe_hw_dcb_vmdq_default_pool_configure(struct sxe_hw *hw,
+						u8 default_pool_enabled,
+						u8 default_pool_idx)
+{
+	sxe_hw_default_pool_configure(hw, default_pool_enabled, default_pool_idx);
+	return;
+}
+
+u32 sxe_hw_ring_irq_switch_get(struct sxe_hw *hw, u8 idx)
+{
+	u32 mask;
+
+	if (idx == 0) {
+		mask = SXE_REG_READ(hw, SXE_EIMS_EX(0));
+	} else {
+		mask = SXE_REG_READ(hw, SXE_EIMS_EX(1));
+	}
+
+	return mask;
+}
+
+void sxe_hw_ring_irq_switch_set(struct sxe_hw *hw, u8 idx, u32 value)
+{
+	if (idx == 0) {
+		SXE_REG_WRITE(hw, SXE_EIMS_EX(0), value);
+	} else {
+		SXE_REG_WRITE(hw, SXE_EIMS_EX(1), value);
+	}
+
+	return;
+}
+
+void sxe_hw_dcb_vmdq_up_2_tc_configure(struct sxe_hw *hw,
+						u8 *tc_arr)
+{
+	u32 up2tc;
+	u8 i;
+
+	up2tc = 0;
+	for (i = 0; i < MAX_USER_PRIORITY; i++) {
+		up2tc |= ((tc_arr[i] & 0x07) << (i * 3));
+	}
+
+	SXE_REG_WRITE(hw, SXE_RTRUP2TC, up2tc);
+
+	return;
+}
+
+u32 sxe_hw_uta_hash_table_get(struct sxe_hw *hw, u8 reg_idx)
+{
+	return SXE_REG_READ(hw, SXE_UTA(reg_idx));
+}
+
+void sxe_hw_uta_hash_table_set(struct sxe_hw *hw,
+				u8 reg_idx, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_UTA(reg_idx), value);
+
+	return;
+}
+
+u32 sxe_hw_vlan_type_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_VLNCTRL);
+}
+
+void sxe_hw_vlan_type_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_VLNCTRL, value);
+	return;
+}
+
+void sxe_hw_dcb_vmdq_vlan_configure(struct sxe_hw *hw,
+						u8 num_pools)
+{
+	u32 vlanctrl;
+	u8 i;
+
+	vlanctrl = SXE_REG_READ(hw, SXE_VLNCTRL);
+	vlanctrl |= SXE_VLNCTRL_VFE;
+	SXE_REG_WRITE(hw, SXE_VLNCTRL, vlanctrl);
+
+	for (i = 0; i < SXE_VFT_TBL_SIZE; i++) {
+		SXE_REG_WRITE(hw, SXE_VFTA(i), 0xFFFFFFFF);
+	}
+
+	SXE_REG_WRITE(hw, SXE_VFRE(0),
+			num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+	SXE_REG_WRITE(hw, SXE_MPSAR_LOW(0), 0xFFFFFFFF);
+	SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(0), 0xFFFFFFFF);
+
+	return;
+}
+
+void sxe_hw_vlan_ext_type_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_EXVET, value);
+	return;
+}
+
+u32 sxe_hw_txctl_vlan_type_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_DMATXCTL);
+}
+
+void sxe_hw_txctl_vlan_type_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_DMATXCTL, value);
+	return;
+}
+
+u32 sxe_hw_ext_vlan_get(struct sxe_hw *hw)
+{
+	return SXE_REG_READ(hw, SXE_CTRL_EXT);
+}
+
+void sxe_hw_ext_vlan_set(struct sxe_hw *hw, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_CTRL_EXT, value);
+	return;
+}
+
+void sxe_hw_rxq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_RQSMR(idx), value);
+	return;
+}
+
+void sxe_hw_dcb_vmdq_pool_configure(struct sxe_hw *hw,
+						u8 pool_idx, u16 vlan_id,
+						u64 pools_map)
+{
+	SXE_REG_WRITE(hw, SXE_VLVF(pool_idx), (SXE_VLVF_VIEN |
+			(vlan_id & 0xFFF)));
+
+	SXE_REG_WRITE(hw, SXE_VLVFB(pool_idx * 2), pools_map);
+
+	return;
+}
+
+void sxe_hw_txq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value)
+{
+	SXE_REG_WRITE(hw, SXE_TQSM(idx), value);
+	return;
+}
+
+void sxe_hw_dcb_rx_configure(struct sxe_hw *hw, bool is_vt_on,
+					u8 sriov_active, u8 tc_num)
+{
+	u32 reg;
+	u32 vlanctrl;
+	u8 i;
+	u32 q;
+
+	reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC | SXE_RTRPCS_ARBDIS;
+	SXE_REG_WRITE(hw, SXE_RTRPCS, reg);
+
+	reg = SXE_REG_READ(hw, SXE_MRQC);
+	if (tc_num == 4) {
+		if (is_vt_on) {
+			reg = (reg & ~SXE_MRQC_MRQE_MASK) |
+				SXE_MRQC_VMDQRT4TCEN;
+		} else {
+			SXE_REG_WRITE(hw, SXE_VT_CTL, 0);
+			reg = (reg & ~SXE_MRQC_MRQE_MASK) |
+				SXE_MRQC_RTRSS4TCEN;
+		}
+	}
+
+	if (tc_num == 8) {
+		if (is_vt_on) {
+			reg = (reg & ~SXE_MRQC_MRQE_MASK) |
+				SXE_MRQC_VMDQRT8TCEN;
+		} else {
+			SXE_REG_WRITE(hw, SXE_VT_CTL, 0);
+			reg = (reg & ~SXE_MRQC_MRQE_MASK) |
+				SXE_MRQC_RTRSS8TCEN;
+		}
+	}
+
+	SXE_REG_WRITE(hw, SXE_MRQC, reg);
+
+	if (sriov_active == 0) {
+		for (q = 0; q < SXE_HW_TXRX_RING_NUM_MAX; q++) {
+			SXE_REG_WRITE(hw, SXE_QDE,
+				(SXE_QDE_WRITE |
+				 (q << SXE_QDE_IDX_SHIFT)));
+		}
+	} else {
+		for (q = 0; q < SXE_HW_TXRX_RING_NUM_MAX; q++) {
+			SXE_REG_WRITE(hw, SXE_QDE,
+				(SXE_QDE_WRITE |
+				 (q << SXE_QDE_IDX_SHIFT) |
+				 SXE_QDE_ENABLE));
+		}
+	}
+
+	vlanctrl = SXE_REG_READ(hw, SXE_VLNCTRL);
+	vlanctrl |= SXE_VLNCTRL_VFE;
+	SXE_REG_WRITE(hw, SXE_VLNCTRL, vlanctrl);
+
+	for (i = 0; i < SXE_VFT_TBL_SIZE; i++) {
+		SXE_REG_WRITE(hw, SXE_VFTA(i), 0xFFFFFFFF);
+	}
+
+	reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC;
+	SXE_REG_WRITE(hw, SXE_RTRPCS, reg);
+
+	return;
+}
+
+void sxe_hw_fc_status_get(struct sxe_hw *hw,
+					bool *rx_pause_on, bool *tx_pause_on)
+{
+	u32 flctrl;
+
+	flctrl = SXE_REG_READ(hw, SXE_FLCTRL);
+	if (flctrl & (SXE_FCTRL_RFCE_PFC_EN | SXE_FCTRL_RFCE_LFC_EN)) {
+		*rx_pause_on = true;
+	} else {
+		*rx_pause_on = false;
+	}
+
+	if (flctrl & (SXE_FCTRL_TFCE_PFC_EN | SXE_FCTRL_TFCE_LFC_EN)) {
+		*tx_pause_on = true;
+	} else {
+		*tx_pause_on = false;
+	}
+
+	return;
+}
+
+void sxe_hw_fc_base_init(struct sxe_hw *hw)
+{
+	u8 i;
+
+	hw->fc.requested_mode = SXE_FC_NONE;
+	hw->fc.current_mode = SXE_FC_NONE;
+	hw->fc.pause_time = SXE_DEFAULT_FCPAUSE;
+	hw->fc.disable_fc_autoneg = false;
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		hw->fc.low_water[i]  = SXE_FC_DEFAULT_LOW_WATER_MARK;
+		hw->fc.high_water[i] = SXE_FC_DEFAULT_HIGH_WATER_MARK;
+	}
+
+	hw->fc.send_xon = 1;
+	return;
+}
+
+u32 sxe_hw_fc_tc_high_water_mark_get(struct sxe_hw *hw, u8 tc_idx)
+{
+	return hw->fc.high_water[tc_idx];
+}
+
+u32 sxe_hw_fc_tc_low_water_mark_get(struct sxe_hw *hw, u8 tc_idx)
+{
+	return hw->fc.low_water[tc_idx];
+}
+
+u16 sxe_hw_fc_send_xon_get(struct sxe_hw *hw)
+{
+	return hw->fc.send_xon;
+}
+
+void sxe_hw_fc_send_xon_set(struct sxe_hw *hw, u16 send_xon)
+{
+	hw->fc.send_xon = send_xon;
+	return;
+}
+
+u16 sxe_hw_fc_pause_time_get(struct sxe_hw *hw)
+{
+	return hw->fc.pause_time;
+}
+
+void sxe_hw_fc_pause_time_set(struct sxe_hw *hw, u16 pause_time)
+{
+	hw->fc.pause_time = pause_time;
+	return;
+}
+
+void sxe_hw_dcb_tx_configure(struct sxe_hw *hw, bool is_vt_on, u8 tc_num)
+{
+	u32 reg;
+
+	reg = SXE_REG_READ(hw, SXE_RTTDCS);
+	reg |= SXE_RTTDCS_ARBDIS;
+	SXE_REG_WRITE(hw, SXE_RTTDCS, reg);
+
+	if (tc_num == 8) {
+		reg = SXE_MTQC_RT_ENA | SXE_MTQC_8TC_8TQ;
+	} else {
+		reg = SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ;
+	}
+
+	if (is_vt_on) {
+		reg |= SXE_MTQC_VT_ENA;
+	}
+
+	SXE_REG_WRITE(hw, SXE_MTQC, reg);
+
+	reg = SXE_REG_READ(hw, SXE_RTTDCS);
+	reg &= ~SXE_RTTDCS_ARBDIS;
+	SXE_REG_WRITE(hw, SXE_RTTDCS, reg);
+
+
+	return;
+}
+
+void sxe_hw_rx_ip_checksum_offload_switch(struct sxe_hw *hw,
+							bool is_on)
+{
+	u32 rxcsum;
+
+	rxcsum = SXE_REG_READ(hw, SXE_RXCSUM);
+	if (is_on) {
+		rxcsum |= SXE_RXCSUM_IPPCSE;
+	} else {
+		rxcsum &= ~SXE_RXCSUM_IPPCSE;
+	}
+
+	SXE_REG_WRITE(hw, SXE_RXCSUM, rxcsum);
+
+	return;
+}
+
+void sxe_hw_rss_cap_switch(struct sxe_hw *hw, bool is_on)
+{
+	u32 mrqc = SXE_REG_READ(hw, SXE_MRQC);
+	if (is_on) {
+		mrqc |= SXE_MRQC_RSSEN;
+	} else {
+		mrqc &= ~SXE_MRQC_RSSEN;
+	}
+
+	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+	return;
+}
+
+void sxe_hw_pool_xmit_enable(struct sxe_hw *hw, u16 reg_idx, u8 pool_num)
+{
+	SXE_REG_WRITE(hw, SXE_VFTE(reg_idx),
+		pool_num == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+	return;
+}
+
+void sxe_hw_rss_field_set(struct sxe_hw *hw, u32 rss_field)
+{
+	u32 mrqc = SXE_REG_READ(hw, SXE_MRQC);
+
+	mrqc &= ~SXE_RSS_FIELD_MASK;
+	mrqc |= rss_field;
+	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+	return;
+}
+
+static void sxe_hw_dcb_4tc_vmdq_off_stats_configure(struct sxe_hw *hw)
+{
+	u32 reg;
+	u8  i;
+
+	for (i = 0; i < 32; i++) {
+		if (i % 8 > 3) {
+			continue;
+		}
+
+		reg = 0x01010101 * (i / 8);
+		SXE_REG_WRITE(hw, SXE_RQSMR(i), reg);
+	}
+	for (i = 0; i < 32; i++) {
+		if (i < 16) {
+			reg = 0x00000000;
+		} else if (i < 24) {
+			reg = 0x01010101;
+		} else if (i < 28) {
+			reg = 0x02020202;
+		} else {
+			reg = 0x03030303;
+		}
+
+		SXE_REG_WRITE(hw, SXE_TQSM(i), reg);
+	}
+
+	return;
+}
+
+static void sxe_hw_dcb_4tc_vmdq_on_stats_configure(struct sxe_hw *hw)
+{
+	u8  i;
+
+	for (i = 0; i < 32; i++) {
+		SXE_REG_WRITE(hw, SXE_RQSMR(i), 0x03020100);
+	}
+
+
+	for (i = 0; i < 32; i++) {
+		SXE_REG_WRITE(hw, SXE_TQSM(i), 0x03020100);
+	}
+
+	return;
+}
+
+void sxe_hw_rss_redir_tbl_set_by_idx(struct sxe_hw *hw,
+						u16 reg_idx, u32 value)
+{
+	return sxe_hw_rss_redir_tbl_reg_write(hw, reg_idx, value);
+}
+
+static u32 sxe_hw_rss_redir_tbl_reg_read(struct sxe_hw *hw, u16 reg_idx)
+{
+	return SXE_REG_READ(hw, SXE_RETA(reg_idx >> 2));
+}
+
+u32 sxe_hw_rss_redir_tbl_get_by_idx(struct sxe_hw *hw, u16 reg_idx)
+{
+	return sxe_hw_rss_redir_tbl_reg_read(hw, reg_idx);
+}
+
+void sxe_hw_ptp_time_inc_stop(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_TIMINC, 0);
+	return;
+}
+
+void sxe_hw_dcb_tc_stats_configure(struct sxe_hw *hw,
+					u8 tc_num, bool vmdq_active)
+{
+	if (tc_num == 8 && vmdq_active == false) {
+		sxe_hw_dcb_8tc_vmdq_off_stats_configure(hw);
+	} else if (tc_num == 4 && vmdq_active == false) {
+		sxe_hw_dcb_4tc_vmdq_off_stats_configure(hw);
+	} else if (tc_num == 4 && vmdq_active == true) {
+		sxe_hw_dcb_4tc_vmdq_on_stats_configure(hw);
+	}
+
+	return;
+}
+
+void sxe_hw_ptp_timestamp_disable(struct sxe_hw *hw)
+{
+	SXE_REG_WRITE(hw, SXE_TSYNCTXCTL,
+			(SXE_REG_READ(hw, SXE_TSYNCTXCTL) &
+			~SXE_TSYNCTXCTL_TEN));
+
+	SXE_REG_WRITE(hw, SXE_TSYNCRXCTL,
+			(SXE_REG_READ(hw, SXE_TSYNCRXCTL) &
+			~SXE_TSYNCRXCTL_REN));
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_mac_pool_clear(struct sxe_hw *hw, u8 rar_idx)
+{
+	struct sxe_adapter *adapter = hw->adapter;
+
+	if (rar_idx > SXE_UC_ENTRY_NUM_MAX) {
+		LOG_ERROR_BDF("rar_idx:%d invalid.(err:%d)\n",
+			  rar_idx, SXE_ERR_PARAM);
+		goto l_end;
+	}
+
+	SXE_REG_WRITE(hw, SXE_MPSAR_LOW(rar_idx), 0);
+	SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(rar_idx), 0);
+
+l_end:
+	return;
+}
+
+void sxe_hw_vmdq_mq_configure(struct sxe_hw *hw)
+{
+	u32 mrqc;
+
+	mrqc = SXE_MRQC_VMDQEN;
+	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+	return;
+}
+
+void sxe_hw_vmdq_default_pool_configure(struct sxe_hw *hw,
+						u8 default_pool_enabled,
+						u8 default_pool_idx)
+{
+	sxe_hw_default_pool_configure(hw, default_pool_enabled, default_pool_idx);
+	return;
+}
+
+void sxe_hw_vmdq_vlan_configure(struct sxe_hw *hw,
+						u8 num_pools, u32 rx_mode)
+{
+	u32 vlanctrl;
+	u8 i;
+
+	vlanctrl = SXE_REG_READ(hw, SXE_VLNCTRL);
+	vlanctrl |= SXE_VLNCTRL_VFE;
+	SXE_REG_WRITE(hw, SXE_VLNCTRL, vlanctrl);
+
+	for (i = 0; i < SXE_VFT_TBL_SIZE; i++) {
+		SXE_REG_WRITE(hw, SXE_VFTA(i), 0xFFFFFFFF);
+	}
+
+	SXE_REG_WRITE(hw, SXE_VFRE(0), 0xFFFFFFFF);
+	if (num_pools == RTE_ETH_64_POOLS) {
+		SXE_REG_WRITE(hw, SXE_VFRE(1), 0xFFFFFFFF);
+	}
+
+	for (i = 0; i < num_pools; i++) {
+		SXE_REG_WRITE(hw, SXE_VMOLR(i), rx_mode);
+	}
+
+	SXE_REG_WRITE(hw, SXE_MPSAR_LOW(0), 0xFFFFFFFF);
+	SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(0), 0xFFFFFFFF);
+
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+
+u32 sxe_hw_pcie_vt_mode_get(struct sxe_hw *hw)
+{
+
+	return SXE_REG_READ(hw, SXE_GCR_EXT);
+}
+
+void sxe_rx_fc_threshold_set(struct sxe_hw *hw)
+{
+	u8 i;
+	u32 high;
+
+	for (i = 0; i < SXE_TRAFFIC_CLASS_MAX; i++) {
+		SXE_REG_WRITE(hw, SXE_FCRTL(i), 0);
+		high = SXE_REG_READ(hw, SXE_RXPBSIZE(i)) - 32;
+		SXE_REG_WRITE(hw, SXE_FCRTH(i), high);
+	}
+
+	return;
+}
+
+void sxe_hw_vmdq_pool_configure(struct sxe_hw *hw,
+						u8 pool_idx, u16 vlan_id,
+						u64 pools_map)
+{
+	SXE_REG_WRITE(hw, SXE_VLVF(pool_idx), (SXE_VLVF_VIEN |
+			(vlan_id & SXE_RXD_VLAN_ID_MASK)));
+
+	if (((pools_map >> 32) & 0xFFFFFFFF) == 0) {
+		SXE_REG_WRITE(hw, SXE_VLVFB(pool_idx * 2),
+			(pools_map & 0xFFFFFFFF));
+	} else {
+		SXE_REG_WRITE(hw, SXE_VLVFB((pool_idx * 2 + 1)),
+			((pools_map >> 32) & 0xFFFFFFFF));
+	}
+
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+
+void sxe_hw_vmdq_loopback_configure(struct sxe_hw *hw)
+{
+	u8 i;
+	SXE_REG_WRITE(hw, SXE_PFDTXGSWC, SXE_PFDTXGSWC_VT_LBEN);
+	for (i = 0; i < SXE_VMTXSW_REGISTER_COUNT; i++) {
+		SXE_REG_WRITE(hw, SXE_VMTXSW(i), 0xFFFFFFFF);
+	}
+
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+
+void sxe_hw_tx_multi_queue_configure(struct sxe_hw *hw,
+				bool vmdq_enable, bool sriov_enable, u16 pools_num)
+{
+	u32 mtqc;
+
+	sxe_hw_dcb_arbiter_set(hw, false);
+
+	if (sriov_enable) {
+		switch (pools_num) {
+		case RTE_ETH_64_POOLS:
+			mtqc = SXE_MTQC_VT_ENA | SXE_MTQC_64VF;
+			break;
+		case RTE_ETH_32_POOLS:
+			mtqc = SXE_MTQC_VT_ENA | SXE_MTQC_32VF;
+			break;
+		case RTE_ETH_16_POOLS:
+			mtqc = SXE_MTQC_VT_ENA | SXE_MTQC_RT_ENA |
+				SXE_MTQC_8TC_8TQ;
+			break;
+		default:
+			mtqc = SXE_MTQC_64Q_1PB;
+		}
+	} else {
+		if (vmdq_enable) {
+			u8 queue_idx;
+			SXE_REG_WRITE(hw, SXE_VFTE(0), UINT32_MAX);
+			SXE_REG_WRITE(hw, SXE_VFTE(1), UINT32_MAX);
+
+			for (queue_idx = 0; queue_idx < SXE_HW_TXRX_RING_NUM_MAX;
+			    queue_idx++) {
+				SXE_REG_WRITE(hw, SXE_QDE,
+					(SXE_QDE_WRITE |
+					(queue_idx << SXE_QDE_IDX_SHIFT)));
+			}
+
+			mtqc = SXE_MTQC_VT_ENA | SXE_MTQC_64VF;
+		} else {
+			mtqc = SXE_MTQC_64Q_1PB;
+		}
+	}
+
+	SXE_REG_WRITE(hw, SXE_MTQC, mtqc);
+
+	sxe_hw_dcb_arbiter_set(hw, true);
+
+	return;
+}
+
+void sxe_hw_vf_queue_drop_enable(struct sxe_hw *hw, u8 vf_idx,
+					u8 ring_per_pool)
+{
+	u32 value;
+	u8 i;
+
+	for (i = (vf_idx * ring_per_pool); i < ((vf_idx + 1) * ring_per_pool); i++)
+	{
+		value = SXE_QDE_ENABLE | SXE_QDE_WRITE;
+		SXE_WRITE_FLUSH(hw);
+
+		value |= i << SXE_QDE_IDX_SHIFT;
+
+		SXE_REG_WRITE(hw, SXE_QDE, value);
+	}
+
+	return;
+}
+
+bool sxe_hw_vt_status(struct sxe_hw *hw)
+{
+	bool ret;
+	u32 vt_ctl = SXE_REG_READ(hw, SXE_VT_CTL);
+
+	if (vt_ctl & SXE_VMD_CTL_POOL_EN) {
+		ret = true;
+	} else {
+		ret = false;
+	}
+
+	return ret;
+}
+
+void sxe_hw_mirror_ctl_set(struct sxe_hw *hw, u8 rule_id,
+				    u8 mirror_type, u8 dst_pool, bool on)
+{
+	u32 mr_ctl;
+
+	mr_ctl = SXE_REG_READ(hw, SXE_MRCTL(rule_id));
+
+	if (on) {
+		mr_ctl |= mirror_type;
+		mr_ctl &= SXE_MR_TYPE_MASK;
+		mr_ctl |= dst_pool << SXE_MR_DST_POOL_OFFSET;
+	} else {
+		mr_ctl &= ~(mirror_type & SXE_MR_TYPE_MASK);
+	}
+
+	SXE_REG_WRITE(hw, SXE_MRCTL(rule_id), mr_ctl);
+
+	return;
+}
+
+void sxe_hw_mirror_virtual_pool_set(struct sxe_hw *hw, u8 rule_id,u32 lsb, u32 msb)
+{
+	SXE_REG_WRITE(hw, SXE_VMRVM(rule_id), lsb);
+	SXE_REG_WRITE(hw, SXE_VMRVM(rule_id  + SXE_MR_VIRTUAL_POOL_MSB_REG_OFFSET), msb);
+
+	return;
+}
+
+void sxe_hw_mirror_vlan_set(struct sxe_hw *hw, u8 rule_id,u32 lsb, u32 msb)
+{
+	SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id), lsb);
+	SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id  + SXE_MR_VLAN_MSB_REG_OFFSET), msb);
+
+	return;
+}
+
+void sxe_hw_mirror_rule_clear(struct sxe_hw *hw, u8 rule_id)
+{
+	SXE_REG_WRITE(hw, SXE_MRCTL(rule_id), 0);
+
+	SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id), 0);
+	SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id  + SXE_MR_VLAN_MSB_REG_OFFSET), 0);
+
+	SXE_REG_WRITE(hw, SXE_VMRVM(rule_id), 0);
+	SXE_REG_WRITE(hw, SXE_VMRVM(rule_id  + SXE_MR_VIRTUAL_POOL_MSB_REG_OFFSET), 0);
+
+	return;
+}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+void sxe_hw_fivetuple_filter_add(struct rte_eth_dev *dev,
+					struct sxe_fivetuple_node_info *filter)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u16 i;
+	u32 ftqf, sdpqf;
+	u32 l34timir = 0;
+	u8 mask = 0xff;
+
+	i = filter->index;
+
+	sdpqf = (u32)(filter->filter_info.dst_port << SXE_SDPQF_DSTPORT_SHIFT);
+	sdpqf = sdpqf | (filter->filter_info.src_port & SXE_SDPQF_SRCPORT);
+
+	ftqf = (u32)(filter->filter_info.protocol & SXE_FTQF_PROTOCOL_MASK);
+	ftqf |= (u32)((filter->filter_info.priority &
+			SXE_FTQF_PRIORITY_MASK) << SXE_FTQF_PRIORITY_SHIFT);
+
+	if (filter->filter_info.src_ip_mask == 0) {
+		mask &= SXE_FTQF_SOURCE_ADDR_MASK;
+	}
+	if (filter->filter_info.dst_ip_mask == 0) {
+		mask &= SXE_FTQF_DEST_ADDR_MASK;
+	}
+	if (filter->filter_info.src_port_mask == 0) {
+		mask &= SXE_FTQF_SOURCE_PORT_MASK;
+	}
+	if (filter->filter_info.dst_port_mask == 0) {
+		mask &= SXE_FTQF_DEST_PORT_MASK;
+	}
+	if (filter->filter_info.proto_mask == 0) {
+		mask &= SXE_FTQF_PROTOCOL_COMP_MASK;
+	}
+	ftqf |= mask << SXE_FTQF_5TUPLE_MASK_SHIFT;
+	ftqf |= SXE_FTQF_POOL_MASK_EN;
+	ftqf |= SXE_FTQF_QUEUE_ENABLE;
+
+	LOG_DEBUG("add fivetuple filter, index[%u], src_ip[0x%x], dst_ip[0x%x]"
+		"src_port[%u], dst_port[%u], ftqf[0x%x], queue[%u]", i, filter->filter_info.src_ip,
+		filter->filter_info.dst_ip, filter->filter_info.src_port, filter->filter_info.dst_port,
+		ftqf, filter->queue);
+
+	SXE_REG_WRITE(hw, SXE_DAQF(i), filter->filter_info.dst_ip);
+	SXE_REG_WRITE(hw, SXE_SAQF(i), filter->filter_info.src_ip);
+	SXE_REG_WRITE(hw, SXE_SDPQF(i), sdpqf);
+	SXE_REG_WRITE(hw, SXE_FTQF(i), ftqf);
+
+	l34timir |= SXE_L34T_IMIR_RESERVE;
+	l34timir |= (u32)(filter->queue << SXE_L34T_IMIR_QUEUE_SHIFT);
+	SXE_REG_WRITE(hw, SXE_L34T_IMIR(i), l34timir);
+
+	return;
+}
+
+void sxe_hw_fivetuple_filter_del(struct sxe_hw *hw, u16 reg_index)
+{
+	SXE_REG_WRITE(hw, SXE_DAQF(reg_index), 0);
+	SXE_REG_WRITE(hw, SXE_SAQF(reg_index), 0);
+	SXE_REG_WRITE(hw, SXE_SDPQF(reg_index), 0);
+	SXE_REG_WRITE(hw, SXE_FTQF(reg_index), 0);
+	SXE_REG_WRITE(hw, SXE_L34T_IMIR(reg_index), 0);
+
+	return;
+}
+
+void sxe_hw_ethertype_filter_add(struct sxe_hw *hw,
+					u8 reg_index, u16 ethertype, u16 queue)
+{
+	u32 etqf = 0;
+	u32 etqs = 0;
+
+	etqf = SXE_ETQF_FILTER_EN;
+	etqf |= (u32)ethertype;
+	etqs |= (u32)((queue << SXE_ETQS_RX_QUEUE_SHIFT) &
+			SXE_ETQS_RX_QUEUE);
+	etqs |= SXE_ETQS_QUEUE_EN;
+
+	SXE_REG_WRITE(hw, SXE_ETQF(reg_index), etqf);
+	SXE_REG_WRITE(hw, SXE_ETQS(reg_index), etqs);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_ethertype_filter_del(struct sxe_hw *hw, u8 filter_type)
+{
+	SXE_REG_WRITE(hw, SXE_ETQF(filter_type), 0);
+	SXE_REG_WRITE(hw, SXE_ETQS(filter_type), 0);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_syn_filter_add(struct sxe_hw *hw, u16 queue, u8 priority)
+{
+	u32 synqf;
+
+	synqf = (u32)(((queue << SXE_SYN_FILTER_QUEUE_SHIFT) &
+			SXE_SYN_FILTER_QUEUE) | SXE_SYN_FILTER_ENABLE);
+
+	if (priority) {
+		synqf |= SXE_SYN_FILTER_SYNQFP;
+	} else {
+		synqf &= ~SXE_SYN_FILTER_SYNQFP;
+	}
+
+	SXE_REG_WRITE(hw, SXE_SYNQF, synqf);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_syn_filter_del(struct sxe_hw *hw)
+{
+	u32 synqf;
+
+	synqf = SXE_REG_READ(hw, SXE_SYNQF);
+
+	synqf &= ~(SXE_SYN_FILTER_QUEUE | SXE_SYN_FILTER_ENABLE);
+	SXE_REG_WRITE(hw, SXE_SYNQF, synqf);
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_fnav_rx_pkt_buf_size_reset(struct sxe_hw *hw, u32 pbsize)
+{
+	S32 i;
+
+	SXE_REG_WRITE(hw, SXE_RXPBSIZE(0), (SXE_REG_READ(hw, SXE_RXPBSIZE(0)) - pbsize));
+	for (i = 1; i < 8; i++) {
+		SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), 0);
+	}
+
+	return;
+}
+
+void sxe_hw_fnav_flex_mask_set(struct sxe_hw *hw, u16 flex_mask)
+{
+	u32 fnavm;
+
+	fnavm = SXE_REG_READ(hw, SXE_FNAVM);
+	if (flex_mask == UINT16_MAX) {
+		fnavm &= ~SXE_FNAVM_FLEX;
+	}
+
+	SXE_REG_WRITE(hw, SXE_FNAVM, fnavm);
+	return;
+}
+
+void sxe_hw_fnav_ipv6_mask_set(struct sxe_hw *hw, u16 src_mask, u16 dst_mask)
+{
+	u32 fnavipv6m;
+
+	fnavipv6m = (dst_mask << 16) | src_mask;
+	SXE_REG_WRITE(hw, SXE_FNAVIP6M, ~fnavipv6m);
+
+	return;
+}
+
+s32 sxe_hw_fnav_flex_offset_set(struct sxe_hw *hw, u16 offset)
+{
+	u32 fnavctrl;
+	s32 ret;
+
+	fnavctrl = SXE_REG_READ(hw, SXE_FNAVCTRL);
+	fnavctrl &= ~SXE_FNAVCTRL_FLEX_MASK;
+	fnavctrl |= ((offset >> 1)
+		<< SXE_FNAVCTRL_FLEX_SHIFT);
+
+	SXE_REG_WRITE(hw, SXE_FNAVCTRL, fnavctrl);
+	SXE_WRITE_FLUSH(hw);
+
+	ret = sxe_hw_fnav_wait_init_done(hw);
+	if (ret) {
+		LOG_ERROR("flow director signature poll time exceeded!\n");
+	}
+
+	return ret;
+}
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
+static void sxe_macsec_stop_data(struct sxe_hw *hw, bool link)
+{
+	u32 t_rdy, r_rdy;
+	u32 limit;
+	u32 reg;
+
+	reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+	reg |= SXE_SECTXCTRL_TX_DIS;
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+	reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+	reg |= SXE_SECRXCTRL_RX_DIS;
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+	SXE_WRITE_FLUSH(hw);
+
+	t_rdy = SXE_REG_READ(hw, SXE_SECTXSTAT) &
+		SXE_SECTXSTAT_SECTX_RDY;
+	r_rdy = SXE_REG_READ(hw, SXE_SECRXSTAT) &
+		SXE_SECRXSTAT_SECRX_RDY;
+	if (t_rdy && r_rdy)
+		return;
+
+	if (!link) {
+		SXE_REG_WRITE(hw, SXE_LPBKCTRL, 0x1);
+
+		SXE_WRITE_FLUSH(hw);
+		mdelay(3);
+	}
+
+	limit = 20;
+	do {
+		mdelay(10);
+		t_rdy = SXE_REG_READ(hw, SXE_SECTXSTAT) &
+			SXE_SECTXSTAT_SECTX_RDY;
+		r_rdy = SXE_REG_READ(hw, SXE_SECRXSTAT) &
+			SXE_SECRXSTAT_SECRX_RDY;
+	} while (!(t_rdy && r_rdy) && limit--);
+
+	if (!link) {
+		SXE_REG_WRITE(hw, SXE_LPBKCTRL, 0x0);
+		SXE_WRITE_FLUSH(hw);
+	}
+
+	return;
+}
+void sxe_hw_rx_queue_mode_set(struct sxe_hw *hw, u32 mrqc)
+{
+	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+	return;
+}
+
+void sxe_hw_macsec_enable(struct sxe_hw *hw, bool is_up, u32 tx_mode,
+				u32 rx_mode, u32 pn_trh)
+{
+	u32 reg;
+
+	sxe_macsec_stop_data(hw, is_up);
+
+	reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+	reg &= ~SXE_SECTXCTRL_SECTX_DIS;
+	reg &= ~SXE_SECTXCTRL_STORE_FORWARD;
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+	SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, 0x250);
+
+	reg = SXE_REG_READ(hw, SXE_SECTXMINIFG);
+	reg = (reg & 0xfffffff0) | 0x3;
+	SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg);
+
+	reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+	reg &= ~SXE_SECRXCTRL_SECRX_DIS;
+	reg |= SXE_SECRXCTRL_RP;
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+
+	reg = tx_mode & SXE_LSECTXCTRL_EN_MASK;
+	reg |= SXE_LSECTXCTRL_AISCI;
+	reg &= ~SXE_LSECTXCTRL_PNTHRSH_MASK;
+	reg |= (pn_trh << SXE_LSECTXCTRL_PNTHRSH_SHIFT);
+	SXE_REG_WRITE(hw, SXE_LSECTXCTRL, reg);
+
+	reg = (rx_mode << SXE_LSECRXCTRL_EN_SHIFT) & SXE_LSECRXCTRL_EN_MASK;
+	reg |= SXE_LSECRXCTRL_RP;
+	reg |= SXE_LSECRXCTRL_DROP_EN;
+	SXE_REG_WRITE(hw, SXE_LSECRXCTRL, reg);
+
+	reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+	reg &= ~SXE_SECTXCTRL_TX_DIS;
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+	reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+	reg &= ~SXE_SECRXCTRL_RX_DIS;
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+
+	SXE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxe_hw_macsec_disable(struct sxe_hw *hw, bool is_up)
+{
+	u32 reg;
+
+	sxe_macsec_stop_data(hw, is_up);
+
+	reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+	reg |= SXE_SECTXCTRL_SECTX_DIS;
+	reg &= ~SXE_SECTXCTRL_STORE_FORWARD;
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+	reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+	reg |= SXE_SECRXCTRL_SECRX_DIS;
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+
+	SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, 0x250);
+
+	reg = SXE_REG_READ(hw, SXE_SECTXMINIFG);
+	reg = (reg & 0xfffffff0) | 0x1;
+	SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg);
+
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, SXE_SECTXCTRL_SECTX_DIS);
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, SXE_SECRXCTRL_SECRX_DIS);
+
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+
+void sxe_hw_macsec_txsc_set(struct sxe_hw *hw, u32 scl, u32 sch)
+{
+	SXE_REG_WRITE(hw, SXE_LSECTXSCL, scl);
+	SXE_REG_WRITE(hw, SXE_LSECTXSCH, sch);
+
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+
+void sxe_hw_macsec_rxsc_set(struct sxe_hw *hw, u32 scl, u32 sch, u16 pi)
+{
+	u32 reg = sch;
+
+	SXE_REG_WRITE(hw, SXE_LSECRXSCL, scl);
+
+	reg |= (pi << SXE_LSECRXSCH_PI_SHIFT) & SXE_LSECRXSCH_PI_MASK;
+	SXE_REG_WRITE(hw, SXE_LSECRXSCH, reg);
+
+	SXE_WRITE_FLUSH(hw);
+	return;
+
+}
+
+void sxe_hw_macsec_tx_sa_configure(struct sxe_hw *hw, u8 sa_idx,
+				u8 an, u32 pn, u32 *keys)
+{
+	u32 reg;
+	u8 i;
+
+	reg = SXE_REG_READ(hw, SXE_LSECTXSA);
+	reg &= ~SXE_LSECTXSA_SELSA;
+	reg |= (sa_idx << SXE_LSECTXSA_SELSA_SHIFT) & SXE_LSECTXSA_SELSA;
+	SXE_REG_WRITE(hw, SXE_LSECTXSA, reg);
+	SXE_WRITE_FLUSH(hw);
+
+	SXE_REG_WRITE(hw, SXE_LSECTXPN(sa_idx), pn);
+	for (i = 0; i < 4; i++) {
+		SXE_REG_WRITE(hw, SXE_LSECTXKEY(sa_idx, i), keys[i]);
+	}
+	SXE_WRITE_FLUSH(hw);
+
+	reg = SXE_REG_READ(hw, SXE_LSECTXSA);
+	if (sa_idx == 0) {
+		reg &= ~SXE_LSECTXSA_AN0_MASK;
+		reg |= (an << SXE_LSECTXSA_AN0_SHIFT) & SXE_LSECTXSA_AN0_MASK;
+		reg &= ~SXE_LSECTXSA_SELSA;
+		SXE_REG_WRITE(hw, SXE_LSECTXSA, reg);
+	} else if (sa_idx == 1) {
+		reg &= ~SXE_LSECTXSA_AN1_MASK;
+		reg |= (an << SXE_LSECTXSA_AN1_SHIFT) & SXE_LSECTXSA_AN1_MASK;
+		reg |= SXE_LSECTXSA_SELSA;
+		SXE_REG_WRITE(hw, SXE_LSECTXSA, reg);
+	}
+
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+
+void sxe_hw_macsec_rx_sa_configure(struct sxe_hw *hw, u8 sa_idx,
+				u8 an, u32 pn, u32 *keys)
+{
+	u32 reg;
+	u8 i;
+
+	reg = SXE_REG_READ(hw, SXE_LSECRXSA(sa_idx));
+	reg &= ~SXE_LSECRXSA_SAV;
+	reg |= (0 << SXE_LSECRXSA_SAV_SHIFT) & SXE_LSECRXSA_SAV;
+
+	SXE_REG_WRITE(hw, SXE_LSECRXSA(sa_idx), reg);
+
+	SXE_WRITE_FLUSH(hw);
+
+	SXE_REG_WRITE(hw, SXE_LSECRXPN(sa_idx), pn);
+
+	for (i = 0; i < 4; i++) {
+		SXE_REG_WRITE(hw, SXE_LSECRXKEY(sa_idx, i), keys[i]);
+	}
+	SXE_WRITE_FLUSH(hw);
+
+	reg = ((an << SXE_LSECRXSA_AN_SHIFT) & SXE_LSECRXSA_AN_MASK) | SXE_LSECRXSA_SAV;
+	SXE_REG_WRITE(hw, SXE_LSECRXSA(sa_idx), reg);
+	SXE_WRITE_FLUSH(hw);
+	return;
+}
+				
+#endif 
+#endif 
diff --git a/drivers/net/sxe/base/sxe_hw.h b/drivers/net/sxe/base/sxe_hw.h
new file mode 100644
index 0000000000..8adc9fc15b
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_hw.h
@@ -0,0 +1,1505 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_HW_H__
+#define __SXE_HW_H__
+
+#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST)
+#include <linux/types.h>
+#include <linux/kernel.h>
+#else
+#include "sxe_types.h"
+#include "sxe_compat_platform.h"
+#include "sxe_compat_version.h"
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+#include <inttypes.h>
+#endif
+
+#include "sxe_regs.h"
+
+#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST)
+#define SXE_PRIU64  "llu"
+#define SXE_PRIX64  "llx"
+#define SXE_PRID64  "lld"
+#define SXE_RMB()     rmb()
+
+#else
+#define SXE_PRIU64  PRIu64
+#define SXE_PRIX64  PRIx64
+#define SXE_PRID64  PRId64
+#define SXE_RMB()     rte_rmb()
+#endif
+
+struct sxe_hw;
+struct sxe_filter_mac;
+struct sxe_fc_info;
+
+#define SXE_MAC_ADDR_LEN 6
+#define SXE_QUEUE_STATS_MAP_REG_NUM 32
+
+#define SXE_FC_DEFAULT_HIGH_WATER_MARK    0x80
+#define SXE_FC_DEFAULT_LOW_WATER_MARK     0x40
+
+#define  SXE_MC_ADDR_EXTRACT_MASK  (0xFFF)
+#define  SXE_MC_ADDR_SHIFT         (5)    
+#define  SXE_MC_ADDR_REG_MASK      (0x7F) 
+#define  SXE_MC_ADDR_BIT_MASK      (0x1F) 
+
+#define SXE_TXTS_POLL_CHECK		3
+#define SXE_TXTS_POLL			5
+#define SXE_TIME_TO_NS(ns, sec)	(((u64)(ns)) + (u64)(((u64)(sec)) * NSEC_PER_SEC))
+
+enum sxe_strict_prio_type {
+	PRIO_NONE = 0, 
+	PRIO_GROUP,    
+	PRIO_LINK      
+};
+
+enum sxe_mc_filter_type {
+	SXE_MC_FILTER_TYPE0 = 0,  
+	SXE_MC_FILTER_TYPE1,      
+	SXE_MC_FILTER_TYPE2,      
+	SXE_MC_FILTER_TYPE3       
+};
+
+#define SXE_POOLS_NUM_MAX 64
+#define SXE_16_POOL 16
+#define SXE_32_POOL 32
+#define SXE_1_RING_PER_POOL 1
+#define SXE_2_RING_PER_POOL 2
+#define SXE_3_RING_PER_POOL 3
+#define SXE_4_RING_PER_POOL 4
+
+#define SXE_DCB_1_TC 1
+#define SXE_DCB_4_TC 4
+#define SXE_DCB_8_TC 8
+
+#define SXE_8Q_PER_POOL_MASK   0x78
+#define SXE_4Q_PER_POOL_MASK   0x7C
+#define SXE_2Q_PER_POOL_MASK   0x7E
+
+#define SXE_VF_NUM_16		16
+#define SXE_VF_NUM_32		32
+
+#define SXE_TX_DESC_EOP_MASK  0x01000000   
+#define SXE_TX_DESC_RS_MASK   0x08000000   
+#define SXE_TX_DESC_STAT_DD   0x00000001   
+#define SXE_TX_DESC_CMD       (SXE_TX_DESC_EOP_MASK | SXE_TX_DESC_RS_MASK)
+#define SXE_TX_DESC_TYPE_DATA 0x00300000   
+#define SXE_TX_DESC_DEXT      0x20000000   
+#define SXE_TX_DESC_IFCS      0x02000000   
+#define SXE_TX_DESC_VLE       0x40000000 
+#define SXE_TX_DESC_TSTAMP    0x00080000 
+#define SXE_TX_DESC_FLAGS     (SXE_TX_DESC_TYPE_DATA | \
+				SXE_TX_DESC_IFCS | \
+				SXE_TX_DESC_DEXT| \
+				SXE_TX_DESC_EOP_MASK)
+#define SXE_TXD_DTYP_CTXT     0x00200000 
+#define SXE_TXD_DCMD_TSE      0x80000000 
+#define SXE_TXD_MAC_LINKSEC   0x00040000 
+#define SXE_TXD_MAC_1588      0x00080000 
+#define SXE_TX_DESC_PAYLEN_SHIFT     14
+#define SXE_TX_OUTERIPCS_SHIFT	17 
+
+#define SXE_TX_POPTS_IXSM   0x01
+#define SXE_TX_POPTS_TXSM   0x02
+#define SXE_TXD_POPTS_SHIFT 8  
+#define SXE_TXD_POPTS_IXSM  (SXE_TX_POPTS_IXSM << SXE_TXD_POPTS_SHIFT)
+#define SXE_TXD_POPTS_TXSM  (SXE_TX_POPTS_TXSM << SXE_TXD_POPTS_SHIFT)
+#define SXE_TXD_POPTS_IPSEC (0x00000400)
+
+#define SXE_TX_CTXTD_DTYP_CTXT      0x00200000 
+#define SXE_TX_CTXTD_TUCMD_IPV6     0x00000000 
+#define SXE_TX_CTXTD_TUCMD_IPV4     0x00000400 
+#define SXE_TX_CTXTD_TUCMD_L4T_UDP  0x00000000 
+#define SXE_TX_CTXTD_TUCMD_L4T_TCP  0x00000800 
+#define SXE_TX_CTXTD_TUCMD_L4T_SCTP 0x00001000 
+#define SXE_TX_CTXTD_TUCMD_L4T_RSV  0x00001800 
+#define SXE_TX_CTXTD_TUCMD_IPSEC_TYPE_ESP   0x00002000 
+#define SXE_TX_CTXTD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 
+
+#define SXE_TX_CTXTD_L4LEN_SHIFT          8  
+#define SXE_TX_CTXTD_MSS_SHIFT            16 
+#define SXE_TX_CTXTD_MACLEN_SHIFT         9  
+#define SXE_TX_CTXTD_VLAN_SHIFT           16
+#define SXE_TX_CTXTD_VLAN_MASK            0xffff0000
+#define SXE_TX_CTXTD_MACLEN_MASK          0x0000fE00
+#define SXE_TX_CTXTD_OUTER_IPLEN_SHIFT    16 
+#define SXE_TX_CTXTD_TUNNEL_LEN_SHIFT     24 
+
+#define SXE_VLAN_TAG_SIZE     4
+
+#define SXE_RSS_KEY_SIZE                (40)  
+#define SXE_MAX_RSS_KEY_ENTRIES		(10)  
+#define SXE_MAX_RETA_ENTRIES            (128) 
+
+#define SXE_TIMINC_IV_NS_SHIFT  8
+#define SXE_TIMINC_INCPD_SHIFT  24
+#define SXE_TIMINC_SET(incpd, iv_ns, iv_sns)   \
+	(((incpd) << SXE_TIMINC_INCPD_SHIFT) | \
+	((iv_ns) << SXE_TIMINC_IV_NS_SHIFT) | (iv_sns))
+
+#define PBA_STRATEGY_EQUAL       (0)    
+#define PBA_STRATEGY_WEIGHTED    (1)	
+#define SXE_PKG_BUF_NUM_MAX               (8)
+#define SXE_HW_TXRX_RING_NUM_MAX 128
+#define SXE_VMDQ_DCB_NUM_QUEUES  SXE_HW_TXRX_RING_NUM_MAX
+#define SXE_RX_PKT_BUF_SIZE 			(512)
+
+#define SXE_UC_ENTRY_NUM_MAX   128
+#define SXE_HW_TX_NONE_MODE_Q_NUM 64
+
+#define SXE_MBX_MSG_NUM    16
+#define SXE_MBX_RETRY_INTERVAL   500
+#define SXE_MBX_RETRY_COUNT      2000
+
+#define SXE_VF_UC_ENTRY_NUM_MAX 10
+#define SXE_VF_MC_ENTRY_NUM_MAX 30
+
+#define SXE_UTA_ENTRY_NUM_MAX   128
+#define SXE_MTA_ENTRY_NUM_MAX   128
+#define SXE_HASH_UC_NUM_MAX   4096 
+
+#define  SXE_MAC_ADDR_EXTRACT_MASK  (0xFFF) 
+#define  SXE_MAC_ADDR_SHIFT         (5)     
+#define  SXE_MAC_ADDR_REG_MASK      (0x7F)  
+#define  SXE_MAC_ADDR_BIT_MASK      (0x1F)  
+
+#define  SXE_VFT_TBL_SIZE          (128)   
+#define  SXE_VLAN_ID_SHIFT         (5)     
+#define  SXE_VLAN_ID_REG_MASK      (0x7F)  
+#define  SXE_VLAN_ID_BIT_MASK      (0x1F)  
+
+#define SXE_TX_PBSIZE_MAX    0x00028000 
+#define SXE_TX_PKT_SIZE_MAX  0xA        
+#define SXE_NODCB_TX_PKT_SIZE_MAX 0x14 
+#define SXE_RING_ENABLE_WAIT_LOOP 10
+
+#define VFTA_BLOCK_SIZE 		8
+#define VF_BLOCK_BITS 			(32)
+#define SXE_MAX_MAC_HDR_LEN		127
+#define SXE_MAX_NETWORK_HDR_LEN		511
+#define SXE_MAC_ADDR_LEN		6
+
+#define SXE_FNAV_BUCKET_HASH_KEY    0x3DAD14E2
+#define SXE_FNAV_SAMPLE_HASH_KEY 0x174D3614
+#define SXE_SAMPLE_COMMON_HASH_KEY \
+		(SXE_FNAV_BUCKET_HASH_KEY & SXE_FNAV_SAMPLE_HASH_KEY)
+
+#define SXE_SAMPLE_HASH_MASK		0x7fff
+#define SXE_SAMPLE_L4TYPE_MASK		0x3
+#define SXE_SAMPLE_L4TYPE_UDP		0x1
+#define SXE_SAMPLE_L4TYPE_TCP		0x2
+#define SXE_SAMPLE_L4TYPE_SCTP		0x3
+#define SXE_SAMPLE_L4TYPE_IPV6_MASK	0x4
+#define SXE_SAMPLE_L4TYPE_TUNNEL_MASK	0x10
+#define SXE_SAMPLE_FLOW_TYPE_MASK	0xF
+
+#define SXE_SAMPLE_VM_POOL_MASK		0x7F
+#define SXE_SAMPLE_VLAN_MASK		0xEFFF
+#define SXE_SAMPLE_FLEX_BYTES_MASK	0xFFFF
+
+#define SXE_FNAV_INIT_DONE_POLL               10
+#define SXE_FNAV_DROP_QUEUE                   127
+
+#define MAX_TRAFFIC_CLASS        8
+#define DEF_TRAFFIC_CLASS        1
+
+#define SXE_LINK_SPEED_UNKNOWN   0
+#define SXE_LINK_SPEED_10_FULL   0x0002
+#define SXE_LINK_SPEED_100_FULL  0x0008
+#define SXE_LINK_SPEED_1GB_FULL  0x0020
+#define SXE_LINK_SPEED_10GB_FULL 0x0080
+
+typedef u32 sxe_link_speed;
+#ifdef SXE_TEST
+#define SXE_LINK_MBPS_SPEED_DEFAULT 1000
+#else
+#define SXE_LINK_MBPS_SPEED_DEFAULT 10000
+#endif
+
+#define SXE_LINK_MBPS_SPEED_MIN   (10)
+
+enum sxe_rss_ip_version {
+	SXE_RSS_IP_VER_4 = 4,
+	SXE_RSS_IP_VER_6 = 6,
+};
+
+enum sxe_fnav_mode {
+	SXE_FNAV_SAMPLE_MODE = 1,
+	SXE_FNAV_SPECIFIC_MODE	= 2,
+};
+
+enum sxe_sample_type {
+	SXE_SAMPLE_FLOW_TYPE_IPV4   = 0x0,
+	SXE_SAMPLE_FLOW_TYPE_UDPV4  = 0x1,
+	SXE_SAMPLE_FLOW_TYPE_TCPV4  = 0x2,
+	SXE_SAMPLE_FLOW_TYPE_SCTPV4 = 0x3,
+	SXE_SAMPLE_FLOW_TYPE_IPV6   = 0x4,
+	SXE_SAMPLE_FLOW_TYPE_UDPV6  = 0x5,
+	SXE_SAMPLE_FLOW_TYPE_TCPV6  = 0x6,
+	SXE_SAMPLE_FLOW_TYPE_SCTPV6 = 0x7,
+};
+
+enum {
+	SXE_DIAG_TEST_PASSED                = 0,
+	SXE_DIAG_TEST_BLOCKED               = 1,
+	SXE_DIAG_STATS_REG_TEST_ERR         = 2,
+	SXE_DIAG_REG_PATTERN_TEST_ERR       = 3,
+	SXE_DIAG_CHECK_REG_TEST_ERR         = 4,
+	SXE_DIAG_DISABLE_IRQ_TEST_ERR       = 5,
+	SXE_DIAG_ENABLE_IRQ_TEST_ERR        = 6,
+	SXE_DIAG_DISABLE_OTHER_IRQ_TEST_ERR = 7,
+	SXE_DIAG_TX_RING_CONFIGURE_ERR      = 8,
+	SXE_DIAG_RX_RING_CONFIGURE_ERR      = 9,
+	SXE_DIAG_ALLOC_SKB_ERR              = 10,
+	SXE_DIAG_LOOPBACK_SEND_TEST_ERR     = 11,
+	SXE_DIAG_LOOPBACK_RECV_TEST_ERR     = 12,
+};
+
+#define SXE_RXD_STAT_DD       0x01    
+#define SXE_RXD_STAT_EOP      0x02    
+#define SXE_RXD_STAT_FLM      0x04    
+#define SXE_RXD_STAT_VP       0x08    
+#define SXE_RXDADV_NEXTP_MASK   0x000FFFF0 
+#define SXE_RXDADV_NEXTP_SHIFT  0x00000004
+#define SXE_RXD_STAT_UDPCS    0x10    
+#define SXE_RXD_STAT_L4CS     0x20    
+#define SXE_RXD_STAT_IPCS     0x40    
+#define SXE_RXD_STAT_PIF      0x80    
+#define SXE_RXD_STAT_CRCV     0x100   
+#define SXE_RXD_STAT_OUTERIPCS  0x100 
+#define SXE_RXD_STAT_VEXT     0x200   
+#define SXE_RXD_STAT_UDPV     0x400   
+#define SXE_RXD_STAT_DYNINT   0x800   
+#define SXE_RXD_STAT_LLINT    0x800   
+#define SXE_RXD_STAT_TSIP     0x08000 
+#define SXE_RXD_STAT_TS       0x10000 
+#define SXE_RXD_STAT_SECP     0x20000 
+#define SXE_RXD_STAT_LB       0x40000 
+#define SXE_RXD_STAT_ACK      0x8000  
+#define SXE_RXD_ERR_CE        0x01    
+#define SXE_RXD_ERR_LE        0x02    
+#define SXE_RXD_ERR_PE        0x08    
+#define SXE_RXD_ERR_OSE       0x10    
+#define SXE_RXD_ERR_USE       0x20    
+#define SXE_RXD_ERR_TCPE      0x40    
+#define SXE_RXD_ERR_IPE       0x80    
+#define SXE_RXDADV_ERR_MASK           0xfff00000 
+#define SXE_RXDADV_ERR_SHIFT          20         
+#define SXE_RXDADV_ERR_OUTERIPER	0x04000000 
+#define SXE_RXDADV_ERR_FCEOFE         0x80000000 
+#define SXE_RXDADV_ERR_FCERR          0x00700000 
+#define SXE_RXDADV_ERR_FNAV_LEN       0x00100000 
+#define SXE_RXDADV_ERR_FNAV_DROP      0x00200000 
+#define SXE_RXDADV_ERR_FNAV_COLL      0x00400000 
+#define SXE_RXDADV_ERR_HBO    0x00800000 
+#define SXE_RXDADV_ERR_CE     0x01000000 
+#define SXE_RXDADV_ERR_LE     0x02000000 
+#define SXE_RXDADV_ERR_PE     0x08000000 
+#define SXE_RXDADV_ERR_OSE    0x10000000 
+#define SXE_RXDADV_ERR_IPSEC_INV_PROTOCOL  0x08000000 
+#define SXE_RXDADV_ERR_IPSEC_INV_LENGTH    0x10000000 
+#define SXE_RXDADV_ERR_IPSEC_AUTH_FAILED   0x18000000
+#define SXE_RXDADV_ERR_USE    0x20000000 
+#define SXE_RXDADV_ERR_L4E    0x40000000 
+#define SXE_RXDADV_ERR_IPE    0x80000000 
+#define SXE_RXD_VLAN_ID_MASK  0x0FFF  
+#define SXE_RXD_PRI_MASK      0xE000  
+#define SXE_RXD_PRI_SHIFT     13
+#define SXE_RXD_CFI_MASK      0x1000  
+#define SXE_RXD_CFI_SHIFT     12
+#define SXE_RXDADV_LROCNT_MASK        0x001E0000
+#define SXE_RXDADV_LROCNT_SHIFT       17
+
+#define SXE_RXDADV_STAT_DD            SXE_RXD_STAT_DD  
+#define SXE_RXDADV_STAT_EOP           SXE_RXD_STAT_EOP 
+#define SXE_RXDADV_STAT_FLM           SXE_RXD_STAT_FLM 
+#define SXE_RXDADV_STAT_VP            SXE_RXD_STAT_VP  
+#define SXE_RXDADV_STAT_MASK          0x000fffff 
+#define SXE_RXDADV_STAT_TS		0x00010000 
+#define SXE_RXDADV_STAT_SECP		0x00020000 
+
+#define SXE_RXDADV_PKTTYPE_NONE       0x00000000
+#define SXE_RXDADV_PKTTYPE_IPV4       0x00000010 
+#define SXE_RXDADV_PKTTYPE_IPV4_EX    0x00000020 
+#define SXE_RXDADV_PKTTYPE_IPV6       0x00000040 
+#define SXE_RXDADV_PKTTYPE_IPV6_EX    0x00000080 
+#define SXE_RXDADV_PKTTYPE_TCP        0x00000100 
+#define SXE_RXDADV_PKTTYPE_UDP        0x00000200 
+#define SXE_RXDADV_PKTTYPE_SCTP       0x00000400 
+#define SXE_RXDADV_PKTTYPE_NFS        0x00000800 
+#define SXE_RXDADV_PKTTYPE_VXLAN      0x00000800 
+#define SXE_RXDADV_PKTTYPE_TUNNEL     0x00010000 
+#define SXE_RXDADV_PKTTYPE_IPSEC_ESP  0x00001000 
+#define SXE_RXDADV_PKTTYPE_IPSEC_AH   0x00002000 
+#define SXE_RXDADV_PKTTYPE_LINKSEC    0x00004000 
+#define SXE_RXDADV_PKTTYPE_ETQF       0x00008000 
+#define SXE_RXDADV_PKTTYPE_ETQF_MASK  0x00000070 
+#define SXE_RXDADV_PKTTYPE_ETQF_SHIFT 4          
+
+struct sxe_mac_stats {
+	u64 crcerrs;           
+	u64 errbc;             
+	u64 rlec;              
+	u64 prc64;             
+	u64 prc127;            
+	u64 prc255;            
+	u64 prc511;            
+	u64 prc1023;           
+	u64 prc1522;           
+	u64 gprc;              
+	u64 bprc;              
+	u64 mprc;              
+	u64 gptc;              
+	u64 gorc;              
+	u64 gotc;              
+	u64 ruc;               
+	u64 rfc;               
+	u64 roc;               
+	u64 rjc;               
+	u64 tor;               
+	u64 tpr;               
+	u64 tpt;               
+	u64 ptc64;             
+	u64 ptc127;            
+	u64 ptc255;            
+	u64 ptc511;            
+	u64 ptc1023;           
+	u64 ptc1522;           
+	u64 mptc;              
+	u64 bptc;              
+	u64 qprc[16];          
+	u64 qptc[16];          
+	u64 qbrc[16];          
+	u64 qbtc[16];          
+	u64 qprdc[16];         
+	u64 dburxtcin[8];      
+	u64 dburxtcout[8];     
+	u64 dburxgdreecnt[8];  
+	u64 dburxdrofpcnt[8];  
+	u64 dbutxtcin[8];      
+	u64 dbutxtcout[8];     
+	u64 rxdgpc;            
+	u64 rxdgbc;            
+	u64 rxddpc;            
+	u64 rxddbc;            
+	u64 rxtpcing;          
+	u64 rxtpceng;          
+	u64 rxlpbkpc;          
+	u64 rxlpbkbc;          
+	u64 rxdlpbkpc;         
+	u64 rxdlpbkbc;         
+	u64 prddc;             
+	u64 txdgpc;            
+	u64 txdgbc;            
+	u64 txswerr;           
+	u64 txswitch;          
+	u64 txrepeat;          
+	u64 txdescerr;         
+
+	u64 fnavadd;           
+	u64 fnavrmv;           
+	u64 fnavadderr;        
+	u64 fnavrmverr;        
+	u64 fnavmatch;         
+	u64 fnavmiss;          
+	u64 hw_rx_no_dma_resources; 
+	u64 prcpf[8];          
+	u64 pfct[8];           
+	u64 mpc[8];            
+
+	u64 total_tx_pause;    
+	u64 total_gptc;        
+	u64 total_gotc;        
+};
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+enum sxe_fivetuple_protocol {
+	SXE_FILTER_PROTOCOL_TCP = 0,
+	SXE_FILTER_PROTOCOL_UDP,
+	SXE_FILTER_PROTOCOL_SCTP,
+	SXE_FILTER_PROTOCOL_NONE,
+};
+
+struct sxe_fivetuple_filter_info {
+	u32 src_ip;
+	u32 dst_ip;
+	u16 src_port;
+	u16 dst_port;
+	enum sxe_fivetuple_protocol protocol;
+	u8 priority;
+	u8 src_ip_mask:1,
+	   dst_ip_mask:1,
+	   src_port_mask:1,
+	   dst_port_mask:1,
+	   proto_mask:1;
+};
+
+struct sxe_fivetuple_node_info {
+	u16 index;  
+	u16 queue;  
+	struct sxe_fivetuple_filter_info filter_info;
+};
+#endif
+
+union sxe_fnav_rule_info {
+	struct {
+		u8     vm_pool;
+		u8     flow_type;
+		__be16 vlan_id;
+		__be32 dst_ip[4];
+		__be32 src_ip[4];
+		__be16 src_port;
+		__be16 dst_port;
+		__be16 flex_bytes;
+		__be16 bkt_hash;
+	} ntuple;
+	__be32 fast_access[11];
+};
+
+union sxe_sample_hash_dword {
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+	} formatted;
+	__be32 ip;
+	struct {
+		__be16 src;
+		__be16 dst;
+	} port;
+	__be16 flex_bytes;
+	__be32 dword;
+};
+
+void sxe_hw_ops_init(struct sxe_hw *hw);
+
+
+struct sxe_reg_info {
+	u32 addr;        
+	u32 count;       
+	u32 stride;      
+	const s8 *name;  
+};
+
+struct sxe_setup_operations {
+	s32  (*reset)(struct sxe_hw *);
+	void (*pf_rst_done_set)(struct sxe_hw *);
+	void (*no_snoop_disable)(struct sxe_hw *);
+	u32  (*reg_read)(struct sxe_hw *, u32);
+	void (*reg_write)(struct sxe_hw *, u32, u32);
+	void (*regs_dump)(struct sxe_hw *);
+	void (*regs_flush)(struct sxe_hw *);
+	s32  (*regs_test)(struct sxe_hw *);
+};
+
+struct sxe_hw_setup {
+	const struct sxe_setup_operations *ops;
+};
+
+struct sxe_irq_operations {
+	u32  (*pending_irq_read_clear)(struct sxe_hw *hw);
+	void (*pending_irq_write_clear)(struct sxe_hw * hw, u32 value);
+	void (*irq_general_reg_set)(struct sxe_hw *hw, u32 value);
+	u32  (*irq_general_reg_get)(struct sxe_hw *hw);
+	void (*ring_irq_auto_disable)(struct sxe_hw *hw, bool is_misx);
+	void (*set_eitrsel)(struct sxe_hw *hw, u32 value);
+	void (*ring_irq_interval_set)(struct sxe_hw *hw, u16 irq_idx, u32 interval);
+	void (*event_irq_interval_set)(struct sxe_hw * hw, u16 irq_idx, u32 value);
+	void (*event_irq_auto_clear_set)(struct sxe_hw *hw, u32 value);
+	void (*ring_irq_map)(struct sxe_hw *hw, bool is_tx,
+                                u16 reg_idx, u16 irq_idx);
+	void (*event_irq_map)(struct sxe_hw *hw, u8 offset, u16 irq_idx);
+	void (*ring_irq_enable)(struct sxe_hw * hw, u64 qmask);
+	u32  (*irq_cause_get)(struct sxe_hw * hw);
+	void (*event_irq_trigger)(struct sxe_hw * hw);
+	void (*ring_irq_trigger)(struct sxe_hw *hw, u64 eics);
+	void (*specific_irq_disable)(struct sxe_hw *hw, u32 value);
+	void (*specific_irq_enable)(struct sxe_hw *hw, u32 value);
+	void (*all_irq_disable)(struct sxe_hw *hw);
+	void (*spp_configure)(struct sxe_hw *hw, u32 value);
+	s32  (*irq_test)(struct sxe_hw *hw, u32 *icr, bool shared);
+};
+
+struct sxe_irq_info {
+	const struct sxe_irq_operations *ops;
+};
+
+struct sxe_mac_operations {
+	bool (*link_up_1g_check)(struct sxe_hw *);
+	bool (*link_state_is_up)(struct sxe_hw *);
+	u32  (*link_speed_get)(struct sxe_hw *);
+	void (*link_speed_set)(struct sxe_hw *, u32 speed);
+	void (*pad_enable)(struct sxe_hw *);
+	s32  (*fc_enable)(struct sxe_hw *);
+	void (*crc_configure)(struct sxe_hw *);
+	void (*loopback_switch)(struct sxe_hw *, bool);
+	void (*txrx_enable)(struct sxe_hw *hw);
+	void (*max_frame_set)(struct sxe_hw *, u32);
+	u32  (*max_frame_get)(struct sxe_hw *);
+	void (*fc_autoneg_localcap_set)(struct sxe_hw *);
+	void (*fc_tc_high_water_mark_set)(struct sxe_hw *, u8, u32);
+	void (*fc_tc_low_water_mark_set)(struct sxe_hw *, u8, u32);
+	void (*fc_param_init)(struct sxe_hw *);
+	enum sxe_fc_mode (*fc_current_mode_get)(struct sxe_hw *);
+	enum sxe_fc_mode (*fc_requested_mode_get)(struct sxe_hw *);
+	void (*fc_requested_mode_set)(struct sxe_hw *, enum sxe_fc_mode);
+	bool (*is_fc_autoneg_disabled)(struct sxe_hw *);
+	void (*fc_autoneg_disable_set)(struct sxe_hw *, bool);
+};
+
+#define SXE_FLAGS_DOUBLE_RESET_REQUIRED	0x01
+
+struct sxe_mac_info {
+	const struct sxe_mac_operations	*ops;
+	u8   flags;
+	bool set_lben;
+	bool auto_restart;
+};
+
+struct sxe_filter_mac_operations {
+	u32 (*rx_mode_get)(struct sxe_hw *);
+	void (*rx_mode_set)(struct sxe_hw *, u32);
+	u32 (*pool_rx_mode_get)(struct sxe_hw *, u16);
+	void (*pool_rx_mode_set)(struct sxe_hw *, u32, u16);
+	void (*rx_lro_enable) (struct sxe_hw *, bool);
+	void (*rx_udp_frag_checksum_disable) (struct sxe_hw *);
+	s32  (*uc_addr_add)(struct sxe_hw *, u32, u8 *, u32);
+	s32  (*uc_addr_del)(struct sxe_hw *, u32);
+	void (*uc_addr_clear)(struct sxe_hw *);
+	void (*mta_hash_table_set)(struct sxe_hw *hw, u8 index, u32 value);
+	void (*mta_hash_table_update)(struct sxe_hw *hw, u8 reg_idx, u8 bit_idx);
+	void (*fc_mac_addr_set)(struct sxe_hw *hw, u8 *mac_addr);
+
+	void (*mc_filter_enable)(struct sxe_hw *);
+
+	void (*mc_filter_disable)(struct sxe_hw *hw);
+
+	void (*rx_nfs_filter_disable)(struct sxe_hw *);
+	void (*ethertype_filter_set)(struct sxe_hw *, u8, u32);
+
+	void (*vt_ctrl_configure)(struct sxe_hw *hw, u8 num_vfs);
+
+#ifdef SXE_WOL_CONFIGURE
+	void (*wol_mode_set)(struct sxe_hw *hw, u32 wol_status);
+	void (*wol_mode_clean)(struct sxe_hw *hw);
+	void (*wol_status_set)(struct sxe_hw *hw);
+#endif
+
+	void (*vt_disable)(struct sxe_hw *hw);
+
+	s32 (*uc_addr_pool_enable)(struct sxe_hw *hw, u8 rar_idx, u8 pool_idx);
+};
+
+struct sxe_filter_mac {
+	const struct sxe_filter_mac_operations *ops;
+};
+
+struct sxe_filter_vlan_operations {
+	u32 (*pool_filter_read)(struct sxe_hw *, u16);
+	void (*pool_filter_write)(struct sxe_hw *, u16, u32);
+	u32 (*pool_filter_bitmap_read)(struct sxe_hw *, u16);
+	void (*pool_filter_bitmap_write)(struct sxe_hw *, u16, u32);
+	void (*filter_array_write)(struct sxe_hw *, u16, u32);
+	u32  (*filter_array_read)(struct sxe_hw *, u16);
+	void (*filter_array_clear)(struct sxe_hw *);
+	void (*filter_switch)(struct sxe_hw *,bool);
+	void (*untagged_pkts_rcv_switch)(struct sxe_hw *, u32, bool);
+	s32  (*filter_configure)(struct sxe_hw *, u32, u32, bool, bool);
+};
+
+struct sxe_filter_vlan {
+	const struct sxe_filter_vlan_operations *ops;
+};
+
+struct sxe_filter_info {
+	struct sxe_filter_mac  mac;
+	struct sxe_filter_vlan vlan;
+};
+
+struct sxe_dbu_operations {
+	void (*rx_pkt_buf_size_configure)(struct sxe_hw *, u8, u32, u16);
+	void (*rx_pkt_buf_switch)(struct sxe_hw *, bool);
+	void (*rx_multi_ring_configure)(struct sxe_hw *, u8, bool, bool);
+	void (*rss_key_set_all)(struct sxe_hw *, u32 *);
+	void (*rss_redir_tbl_set_all)(struct sxe_hw *, u8 *);
+	void (*rx_cap_switch_on)(struct sxe_hw *);
+	void (*rss_hash_pkt_type_set)(struct sxe_hw *, u32);
+	void (*rss_hash_pkt_type_update)(struct sxe_hw *, u32);
+	void (*rss_rings_used_set)(struct sxe_hw *, u32, u16, u16);
+	void (*lro_ack_switch)(struct sxe_hw *, bool);
+	void (*vf_rx_switch)(struct sxe_hw *, u32, u32, bool);
+
+	s32  (*fnav_mode_init)(struct sxe_hw *, u32, u32);
+	s32  (*fnav_specific_rule_mask_set)(struct sxe_hw *,
+						union sxe_fnav_rule_info *);
+	s32  (*fnav_specific_rule_add)(struct sxe_hw *,
+						union sxe_fnav_rule_info *,
+						u16, u8);
+	s32  (*fnav_specific_rule_del)(struct sxe_hw *,
+					  union sxe_fnav_rule_info *, u16);
+	s32  (*fnav_sample_hash_cmd_get)(struct sxe_hw *,
+						u8, u32, u8, u64 *);
+	void (*fnav_sample_stats_reinit)(struct sxe_hw *hw);
+	void (*fnav_sample_hash_set)(struct sxe_hw *hw, u64 hash);
+	s32  (*fnav_single_sample_rule_del)(struct sxe_hw *,u32);
+
+	void (*ptp_init)(struct sxe_hw *);
+	void (*ptp_freq_adjust)(struct sxe_hw *, u32);
+	void (*ptp_systime_init)(struct sxe_hw *);
+	u64  (*ptp_systime_get)(struct sxe_hw *);
+	void (*ptp_tx_timestamp_get)(struct sxe_hw *, u32 *ts_sec, u32 *ts_ns);
+	void (*ptp_timestamp_mode_set)(struct sxe_hw *, bool, u32, u32);
+	void (*ptp_rx_timestamp_clear)(struct sxe_hw *);
+	u64  (*ptp_rx_timestamp_get)(struct sxe_hw *);
+	bool (*ptp_is_rx_timestamp_valid)(struct sxe_hw *);
+	void (*ptp_timestamp_enable)(struct sxe_hw *);
+
+	void (*tx_pkt_buf_switch)(struct sxe_hw *, bool);
+
+	void (*dcb_tc_rss_configure)(struct sxe_hw *hw, u16 rss_i);
+
+	void (*tx_pkt_buf_size_configure)(struct sxe_hw *, u8);
+
+	void (*rx_cap_switch_off)(struct sxe_hw *);
+	u32  (*rx_pkt_buf_size_get)(struct sxe_hw *, u8);
+	void (*rx_func_switch_on)(struct sxe_hw *hw);
+
+	void (*tx_ring_disable)(struct sxe_hw *, u8, unsigned long);
+	void (*rx_ring_disable)(struct sxe_hw *, u8, unsigned long);
+
+	u32  (*tx_dbu_fc_status_get)(struct sxe_hw *hw);
+};
+
+struct sxe_dbu_info {
+	const struct sxe_dbu_operations	*ops;
+};
+
+
+struct sxe_dma_operations {
+	void (*rx_dma_ctrl_init)(struct sxe_hw *, bool);
+	void (*rx_ring_disable)(struct sxe_hw *, u8);
+	void (*rx_ring_switch)(struct sxe_hw *, u8, bool);
+	void (*rx_ring_switch_not_polling)(struct sxe_hw *, u8, bool);
+	void (*rx_ring_desc_configure)(struct sxe_hw *, u32, u64, u8);
+	void (*rx_desc_thresh_set)(struct sxe_hw *, u8);
+	void (*rx_rcv_ctl_configure)(struct sxe_hw *, u8, u32, u32);
+	void (*rx_lro_ctl_configure)(struct sxe_hw *, u8, u32);
+	u32  (*rx_desc_ctrl_get)(struct sxe_hw *, u8);
+	void (*rx_dma_lro_ctl_set)(struct sxe_hw *);
+	void (*rx_drop_switch)(struct sxe_hw *, u8, bool);
+	void (*rx_tph_update)(struct sxe_hw *hw, u8 ring_idx, u8 cpu);
+
+	void (*tx_enable)(struct sxe_hw *);
+	void (*tx_multi_ring_configure)(struct sxe_hw *, u8, u16, bool, u16);
+	void (*tx_ring_desc_configure)(struct sxe_hw *, u32, u64, u8);
+	void (*tx_desc_thresh_set)(struct sxe_hw *, u8, u32, u32, u32);
+	void (*tx_ring_switch)(struct sxe_hw *, u8, bool);
+	void (*tx_ring_switch_not_polling)(struct sxe_hw *, u8, bool);
+	void (*tx_pkt_buf_thresh_configure)(struct sxe_hw *, u8, bool);
+	u32  (*tx_desc_ctrl_get)(struct sxe_hw *, u8);
+	void (*tx_ring_info_get)(struct sxe_hw *, u8, u32 *, u32 *);
+	void (*tx_desc_wb_thresh_clear)(struct sxe_hw *, u8);
+
+	void (*vlan_tag_strip_switch)(struct sxe_hw *, u16, bool);
+	void (*tx_vlan_tag_set)(struct sxe_hw *, u16, u16, u32);
+	void (*tx_vlan_tag_clear)(struct sxe_hw *, u32);
+	void (*tx_tph_update)(struct sxe_hw *hw, u8 ring_idx, u8 cpu);
+
+	void (*tph_switch)(struct sxe_hw *hw, bool is_enable);
+
+	void  (*dcb_rx_bw_alloc_configure)(struct sxe_hw *hw,
+				      u16 *refill,
+				      u16 *max,
+				      u8 *bwg_id,
+				      u8 *prio_type,
+				      u8 *prio_tc,
+				      u8 max_priority);
+	void  (*dcb_tx_desc_bw_alloc_configure)(struct sxe_hw *hw,
+					   u16 *refill,
+					   u16 *max,
+					   u8 *bwg_id,
+					   u8 *prio_type);
+	void  (*dcb_tx_data_bw_alloc_configure)(struct sxe_hw *hw,
+					   u16 *refill,
+					   u16 *max,
+					   u8 *bwg_id,
+					   u8 *prio_type,
+					   u8 *prio_tc,
+					   u8 max_priority);
+	void  (*dcb_pfc_configure)(struct sxe_hw *hw, u8 pfc_en, u8 *prio_tc,
+				  u8 max_priority);
+	void  (*dcb_tc_stats_configure)(struct sxe_hw *hw);
+	void (*dcb_rx_up_tc_map_set)(struct sxe_hw *hw, u8 tc);
+	void (*dcb_rx_up_tc_map_get)(struct sxe_hw *hw, u8 *map);
+	void (*dcb_rate_limiter_clear)(struct sxe_hw *hw, u8 ring_max);
+
+	void (*vt_pool_loopback_switch)(struct sxe_hw *hw, bool is_enable);
+	u32 (*rx_pool_get)(struct sxe_hw *hw, u8 reg_idx);
+	u32 (*tx_pool_get)(struct sxe_hw *hw, u8 reg_idx);
+	void (*tx_pool_set)(struct sxe_hw *hw, u8 reg_idx, u32 bitmap);
+	void (*rx_pool_set)(struct sxe_hw *hw, u8 reg_idx, u32 bitmap);
+
+	void (*vf_tx_desc_addr_clear)(struct sxe_hw *hw, u8 vf_idx, u8 ring_per_pool);
+	void (*pool_mac_anti_spoof_set)(struct sxe_hw *hw, u8 vf_idx, bool status);
+	void (*pool_vlan_anti_spoof_set)(struct sxe_hw *hw, u8 vf_idx, bool status);
+	void (*spoof_count_enable)(struct sxe_hw *hw, u8 reg_idx, u8 bit_index);
+	void (*pool_rx_ring_drop_enable)(struct sxe_hw *hw, u8 vf_idx,
+					u16 pf_vlan, u8 ring_per_pool);
+
+	void (*max_dcb_memory_window_set)(struct sxe_hw *hw, u32 value);
+	void (*dcb_tx_ring_rate_factor_set)(struct sxe_hw *hw, u32 ring_idx, u32 rate);
+
+	void (*vf_tx_ring_disable)(struct sxe_hw *hw, u8 ring_per_pool, u8 vf_idx);
+	void (*all_ring_disable)(struct sxe_hw *hw, u32 ring_max);
+	void (*tx_ring_tail_init)(struct sxe_hw *hw, u8 reg_idx);
+};
+
+struct sxe_dma_info {
+	const struct sxe_dma_operations	*ops;
+};
+
+struct sxe_sec_operations {
+	void (*ipsec_rx_ip_store)(struct sxe_hw *hw, __be32 *ip_addr, u8 ip_len, u8 ip_idx);
+	void (*ipsec_rx_spi_store)(struct sxe_hw *hw, __be32 spi, u8 ip_idx, u16 idx);
+	void (*ipsec_rx_key_store)(struct sxe_hw *hw, u32 *key,  u8 key_len, u32 salt, u32 mode, u16 idx);
+	void (*ipsec_tx_key_store)(struct sxe_hw *hw, u32 *key,  u8 key_len, u32 salt, u16 idx);
+	void (*ipsec_sec_data_stop)(struct sxe_hw *hw, bool is_linkup);
+	void (*ipsec_engine_start)(struct sxe_hw *hw, bool is_linkup);
+	void (*ipsec_engine_stop)(struct sxe_hw *hw, bool is_linkup);
+	bool (*ipsec_offload_is_disable)(struct sxe_hw *hw);
+	void (*ipsec_sa_disable)(struct sxe_hw *hw);
+};
+
+struct sxe_sec_info {
+	const struct sxe_sec_operations *ops;
+};
+
+struct sxe_stat_operations {
+	void (*stats_clear)(struct sxe_hw *);
+	void (*stats_get)(struct sxe_hw *, struct sxe_mac_stats *);
+
+	u32 (*tx_packets_num_get)(struct sxe_hw *hw);
+	u32 (*unsecurity_packets_num_get)(struct sxe_hw *hw);
+	u32  (*mac_stats_dump)(struct sxe_hw *, u32 *, u32);
+	u32  (*tx_dbu_to_mac_stats)(struct sxe_hw *hw);
+};
+
+struct sxe_stat_info {
+	const struct sxe_stat_operations	*ops;
+};
+
+struct sxe_mbx_operations {
+	void (*init)(struct sxe_hw *hw);
+
+	s32 (*msg_send)(struct sxe_hw *hw, u32 *msg, u16 len, u16 index);
+	s32 (*msg_rcv)(struct sxe_hw *hw, u32 *msg, u16 len, u16 index);
+
+	bool (*req_check)(struct sxe_hw *hw, u8 vf_idx);
+	bool (*ack_check)(struct sxe_hw *hw, u8 vf_idx);
+	bool (*rst_check)(struct sxe_hw *hw, u8 vf_idx);
+
+	void (*mbx_mem_clear)(struct sxe_hw *hw, u8 vf_idx);
+};
+
+struct sxe_mbx_stats {
+	u32 send_msgs; 
+	u32 rcv_msgs;  
+
+	u32 reqs;      
+	u32 acks;      
+	u32 rsts;      
+};
+
+struct sxe_mbx_info {
+	const struct sxe_mbx_operations *ops; 
+	struct sxe_mbx_stats stats; 
+	u32 retry;    
+	u32 interval; 
+	u32 msg_len; 
+};
+
+struct sxe_pcie_operations {
+	void (*vt_mode_set)(struct sxe_hw *hw, u32 value);
+};
+
+struct sxe_pcie_info {
+	const struct sxe_pcie_operations *ops; 
+};
+
+enum sxe_hw_state {
+	SXE_HW_STOP,
+	SXE_HW_FAULT,
+};
+
+enum sxe_fc_mode {
+	SXE_FC_NONE = 0,
+	SXE_FC_RX_PAUSE,
+	SXE_FC_TX_PAUSE,
+	SXE_FC_FULL,
+	SXE_FC_DEFAULT,
+};
+
+struct sxe_fc_info {
+	u32 high_water[MAX_TRAFFIC_CLASS]; 
+	u32 low_water[MAX_TRAFFIC_CLASS]; 
+	u16 pause_time; 
+	bool strict_ieee; 
+	bool disable_fc_autoneg; 
+	u16 send_xon; 
+	enum sxe_fc_mode current_mode; 
+	enum sxe_fc_mode requested_mode; 
+};
+
+struct sxe_fc_nego_mode {
+	u32 adv_sym; 
+	u32 adv_asm; 
+	u32 lp_sym;  
+	u32 lp_asm;  
+
+};
+
+struct sxe_hdc_operations {
+	s32 (*pf_lock_get)(struct sxe_hw *, u32);
+	void (*pf_lock_release)(struct sxe_hw *, u32);
+	bool (*is_fw_over_set)(struct sxe_hw *);
+	u32 (*fw_ack_header_rcv)(struct sxe_hw *);
+	void (*packet_send_done)(struct sxe_hw *);
+	void (*packet_header_send)(struct sxe_hw *, u32);
+	void (*packet_data_dword_send)(struct sxe_hw *, u16, u32);
+	u32  (*packet_data_dword_rcv)(struct sxe_hw *, u16);
+	u32 (*fw_status_get)(struct sxe_hw *);
+	void (*drv_status_set)(struct sxe_hw *, u32);
+	u32 (*irq_event_get)(struct sxe_hw *);
+	void (*irq_event_clear)(struct sxe_hw *, u32);
+	void (*fw_ov_clear)(struct sxe_hw *);
+	u32 (*channel_state_get)(struct sxe_hw *);
+	void (*resource_clean)(struct sxe_hw *);
+};
+
+struct sxe_hdc_info {
+	u32 pf_lock_val;
+	const struct sxe_hdc_operations	*ops;
+};
+
+struct sxe_phy_operations {
+	s32 (*reg_write)(struct sxe_hw *hw, s32 prtad, u32 reg_addr,
+				u32 device_type, u16 phy_data);
+	s32 (*reg_read)(struct sxe_hw *hw, s32 prtad, u32 reg_addr,
+				u32 device_type, u16 *phy_data);
+	s32 (*identifier_get)(struct sxe_hw *hw, u32 prtad, u32 *id);
+	s32 (*link_cap_get)(struct sxe_hw *hw, u32 prtad, u32 *speed);
+	s32 (*reset)(struct sxe_hw *hw, u32 prtad);
+};
+
+struct sxe_phy_reg_info {
+	const struct sxe_phy_operations	*ops;
+};
+
+struct sxe_hw {
+	u8 __iomem *reg_base_addr;            
+
+	void *adapter;
+	void *priv;
+	unsigned long state;   
+	void (*fault_handle)(void *priv);
+	u32 (*reg_read)(const volatile void *reg);
+	void (*reg_write)(u32 value, volatile void *reg);
+
+	struct sxe_hw_setup  setup;           
+	struct sxe_irq_info  irq;             
+	struct sxe_mac_info  mac;             
+	struct sxe_filter_info filter;        
+	struct sxe_dbu_info  dbu;             
+	struct sxe_dma_info  dma;             
+	struct sxe_sec_info  sec;             
+	struct sxe_stat_info stat;            
+	struct sxe_fc_info   fc;
+
+	struct sxe_mbx_info mbx;              
+	struct sxe_pcie_info pcie;            
+	struct sxe_hdc_info  hdc;             
+	struct sxe_phy_reg_info phy;          
+};
+
+u16 sxe_mac_reg_num_get(void);
+
+void sxe_hw_fault_handle(struct sxe_hw *hw);
+
+bool sxe_device_supports_autoneg_fc(struct sxe_hw *hw);
+
+void sxe_hw_ops_init(struct sxe_hw *hw);
+
+u32 sxe_hw_rss_key_get_by_idx(struct sxe_hw *hw, u8 reg_idx);
+
+bool sxe_hw_is_rss_enabled(struct sxe_hw *hw);
+
+u32 sxe_hw_rss_field_get(struct sxe_hw *hw);
+
+static inline bool sxe_is_hw_fault(struct sxe_hw *hw)
+{
+	return test_bit(SXE_HW_FAULT, &hw->state);
+}
+
+static inline void sxe_hw_fault_handle_init(struct sxe_hw *hw,
+			void (*handle)(void *), void *priv)
+{
+	hw->priv = priv;
+	hw->fault_handle = handle;
+
+	return;
+}
+
+static inline void sxe_hw_reg_handle_init(struct sxe_hw *hw,
+		u32 (*read)(const volatile void *),
+		void (*write)(u32, volatile void *))
+{
+	hw->reg_read  = read;
+	hw->reg_write = write;
+
+	return;
+}
+
+#ifdef SXE_DPDK 
+
+void sxe_hw_stats_seq_clean(struct sxe_hw *hw, struct sxe_mac_stats *stats);
+
+void sxe_hw_hdc_drv_status_set(struct sxe_hw *hw, u32 value);
+
+s32 sxe_hw_nic_reset(struct sxe_hw *hw);
+
+u16 sxe_hw_fc_pause_time_get(struct sxe_hw *hw);
+
+void sxe_hw_fc_pause_time_set(struct sxe_hw *hw, u16 pause_time);
+
+void sxe_fc_autoneg_localcap_set(struct sxe_hw *hw);
+
+u32 sxe_hw_fc_tc_high_water_mark_get(struct sxe_hw *hw, u8 tc_idx);
+
+u32 sxe_hw_fc_tc_low_water_mark_get(struct sxe_hw *hw, u8 tc_idx);
+
+u16 sxe_hw_fc_send_xon_get(struct sxe_hw *hw);
+
+void sxe_hw_fc_send_xon_set(struct sxe_hw *hw, u16 send_xon);
+
+u32 sxe_hw_rx_mode_get(struct sxe_hw *hw);
+
+void sxe_hw_rx_mode_set(struct sxe_hw *hw, u32 filter_ctrl);
+
+void sxe_hw_specific_irq_enable(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_specific_irq_disable(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_irq_general_reg_set(struct sxe_hw *hw, u32 value);
+
+u32 sxe_hw_irq_general_reg_get(struct sxe_hw *hw);
+
+void sxe_hw_event_irq_map(struct sxe_hw *hw, u8 offset, u16 irq_idx);
+
+void sxe_hw_ring_irq_map(struct sxe_hw *hw, bool is_tx,
+						u16 reg_idx, u16 irq_idx);
+
+void sxe_hw_ring_irq_interval_set(struct sxe_hw *hw,
+						u16 irq_idx, u32 interval);
+
+void sxe_hw_event_irq_auto_clear_set(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_all_irq_disable(struct sxe_hw *hw);
+
+void sxe_hw_ring_irq_auto_disable(struct sxe_hw *hw,
+					bool is_msix);
+
+u32 sxe_hw_irq_cause_get(struct sxe_hw *hw);
+
+void sxe_hw_pending_irq_write_clear(struct sxe_hw *hw, u32 value);
+
+u32 sxe_hw_ring_irq_switch_get(struct sxe_hw *hw, u8 idx);
+
+void sxe_hw_ring_irq_switch_set(struct sxe_hw *hw, u8 idx, u32 value);
+
+s32 sxe_hw_uc_addr_add(struct sxe_hw *hw, u32 rar_idx,
+					u8 *addr, u32 pool_idx);
+
+s32 sxe_hw_uc_addr_del(struct sxe_hw *hw, u32 index);
+
+u32 sxe_hw_uta_hash_table_get(struct sxe_hw *hw, u8 reg_idx);
+
+void sxe_hw_uta_hash_table_set(struct sxe_hw *hw,
+				u8 reg_idx, u32 value);
+
+void sxe_hw_mta_hash_table_set(struct sxe_hw *hw,
+						u8 index, u32 value);
+
+void sxe_hw_mc_filter_enable(struct sxe_hw *hw);
+
+void sxe_hw_vlan_filter_array_write(struct sxe_hw *hw,
+					u16 reg_index, u32 value);
+
+u32 sxe_hw_vlan_filter_array_read(struct sxe_hw *hw, u16 reg_index);
+
+void sxe_hw_vlan_filter_switch(struct sxe_hw *hw, bool is_enable);
+
+u32 sxe_hw_vlan_type_get(struct sxe_hw *hw);
+
+void sxe_hw_vlan_type_set(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_vlan_ext_vet_write(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_vlan_tag_strip_switch(struct sxe_hw *hw,
+					u16 reg_index, bool is_enable);
+
+void sxe_hw_txctl_vlan_type_set(struct sxe_hw *hw, u32 value);
+
+u32 sxe_hw_txctl_vlan_type_get(struct sxe_hw *hw);
+
+u32 sxe_hw_ext_vlan_get(struct sxe_hw *hw);
+
+void sxe_hw_ext_vlan_set(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_pf_rst_done_set(struct sxe_hw *hw);
+
+u32 sxe_hw_all_regs_group_num_get(void);
+
+void sxe_hw_all_regs_group_read(struct sxe_hw *hw, u32 *data);
+
+s32 sxe_hw_fc_enable(struct sxe_hw *hw);
+
+bool sxe_hw_is_fc_autoneg_disabled(struct sxe_hw *hw);
+
+void sxe_hw_fc_status_get(struct sxe_hw *hw,
+					bool *rx_pause_on, bool *tx_pause_on);
+
+void sxe_hw_fc_requested_mode_set(struct sxe_hw *hw,
+						enum sxe_fc_mode mode);
+
+void sxe_hw_fc_tc_high_water_mark_set(struct sxe_hw *hw,
+							u8 tc_idx, u32 mark);
+
+void sxe_hw_fc_tc_low_water_mark_set(struct sxe_hw *hw,
+							u8 tc_idx, u32 mark);
+
+void sxe_hw_fc_autoneg_disable_set(struct sxe_hw *hw,
+							bool is_disabled);
+
+u32 sxe_hw_rx_pkt_buf_size_get(struct sxe_hw *hw, u8 pb);
+
+void sxe_hw_ptp_init(struct sxe_hw *hw);
+
+void sxe_hw_ptp_timestamp_mode_set(struct sxe_hw *hw,
+					bool is_l2, u32 tsctl, u32 tses);
+
+void sxe_hw_ptp_timestamp_enable(struct sxe_hw *hw);
+
+void sxe_hw_ptp_time_inc_stop(struct sxe_hw *hw);
+
+void sxe_hw_ptp_rx_timestamp_clear(struct sxe_hw *hw);
+
+void sxe_hw_ptp_timestamp_disable(struct sxe_hw *hw);
+
+bool sxe_hw_ptp_is_rx_timestamp_valid(struct sxe_hw *hw);
+
+u64 sxe_hw_ptp_rx_timestamp_get(struct sxe_hw *hw);
+
+void sxe_hw_ptp_tx_timestamp_get(struct sxe_hw *hw,
+						u32 *ts_sec, u32 *ts_ns);
+
+u64 sxe_hw_ptp_systime_get(struct sxe_hw *hw);
+
+void sxe_hw_rss_cap_switch(struct sxe_hw *hw, bool is_on);
+
+void sxe_hw_rss_key_set_all(struct sxe_hw *hw, u32 *rss_key);
+
+void sxe_hw_rss_field_set(struct sxe_hw *hw, u32 rss_field);
+
+void sxe_hw_rss_redir_tbl_set_all(struct sxe_hw *hw, u8 *redir_tbl);
+
+u32 sxe_hw_rss_redir_tbl_get_by_idx(struct sxe_hw *hw, u16);
+
+void sxe_hw_rss_redir_tbl_set_by_idx(struct sxe_hw *hw,
+						u16 reg_idx, u32 value);
+
+void sxe_hw_rx_dma_ctrl_init(struct sxe_hw *hw, bool crc_strip_on);
+
+void sxe_hw_mac_max_frame_set(struct sxe_hw *hw, u32 max_frame);
+
+void sxe_hw_rx_udp_frag_checksum_disable(struct sxe_hw *hw);
+
+void sxe_hw_rx_ip_checksum_offload_switch(struct sxe_hw *hw,
+							bool is_on);
+
+void sxe_hw_rx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+
+void sxe_hw_rx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+
+void sxe_hw_rx_ring_desc_configure(struct sxe_hw *hw,
+					u32 desc_mem_len, u64 desc_dma_addr,
+					u8 reg_idx);
+
+void sxe_hw_rx_rcv_ctl_configure(struct sxe_hw *hw, u8 reg_idx,
+				   u32 header_buf_len, u32 pkg_buf_len
+				   );
+
+void sxe_hw_rx_drop_switch(struct sxe_hw *hw, u8 idx, bool is_enable);
+
+void sxe_hw_rx_desc_thresh_set(struct sxe_hw *hw, u8 reg_idx);
+
+void sxe_hw_rx_lro_ack_switch(struct sxe_hw *hw, bool is_on);
+
+void sxe_hw_rx_dma_lro_ctrl_set(struct sxe_hw *hw);
+
+void sxe_hw_rx_nfs_filter_disable(struct sxe_hw *hw);
+
+void sxe_hw_rx_lro_enable(struct sxe_hw *hw, bool is_enable);
+
+void sxe_hw_rx_lro_ctl_configure(struct sxe_hw *hw,
+						u8 reg_idx, u32 max_desc);
+void sxe_hw_loopback_switch(struct sxe_hw *hw, bool is_enable);
+
+void sxe_hw_rx_cap_switch_off(struct sxe_hw *hw);
+
+void sxe_hw_tx_ring_info_get(struct sxe_hw *hw,
+				u8 idx, u32 *head, u32 *tail);
+
+void sxe_hw_tx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+
+void sxe_hw_tx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+
+void sxe_hw_rx_queue_desc_reg_configure(struct sxe_hw *hw,
+					u8 reg_idx, u32 rdh_value,
+					u32 rdt_value);
+
+u32 sxe_hw_hdc_fw_status_get(struct sxe_hw *hw);
+
+s32 sxe_hw_hdc_lock_get(struct sxe_hw *hw, u32 trylock);
+
+void sxe_hw_hdc_lock_release(struct sxe_hw *hw, u32 retry_cnt);
+
+bool sxe_hw_hdc_is_fw_over_set(struct sxe_hw *hw);
+
+void sxe_hw_hdc_fw_ov_clear(struct sxe_hw *hw);
+
+u32 sxe_hw_hdc_fw_ack_header_get(struct sxe_hw *hw);
+
+void sxe_hw_hdc_packet_send_done(struct sxe_hw *hw);
+
+void sxe_hw_hdc_packet_header_send(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_hdc_packet_data_dword_send(struct sxe_hw *hw,
+						u16 dword_index, u32 value);
+
+u32 sxe_hw_hdc_packet_data_dword_rcv(struct sxe_hw *hw,
+						u16 dword_index);
+
+u32 sxe_hw_hdc_channel_state_get(struct sxe_hw *hw);
+
+u32 sxe_hw_pending_irq_read_clear(struct sxe_hw *hw);
+
+void sxe_hw_all_ring_disable(struct sxe_hw *hw, u32 ring_max);
+
+void sxe_hw_tx_ring_head_init(struct sxe_hw *hw, u8 reg_idx);
+
+void sxe_hw_tx_ring_tail_init(struct sxe_hw *hw, u8 reg_idx);
+
+void sxe_hw_tx_enable(struct sxe_hw *hw);
+
+void sxe_hw_tx_desc_thresh_set(
+				struct sxe_hw *hw,
+				u8 reg_idx,
+				u32 wb_thresh,
+				u32 host_thresh,
+				u32 prefech_thresh);
+
+void sxe_hw_tx_pkt_buf_switch(struct sxe_hw *hw, bool is_on);
+
+void sxe_hw_tx_pkt_buf_size_configure(struct sxe_hw *hw, u8 num_pb);
+
+void sxe_hw_tx_pkt_buf_thresh_configure(struct sxe_hw *hw,
+					u8 num_pb, bool dcb_enable);
+
+void sxe_hw_tx_ring_desc_configure(struct sxe_hw *hw,
+					u32 desc_mem_len,
+					u64 desc_dma_addr, u8 reg_idx);
+
+void sxe_hw_mac_txrx_enable(struct sxe_hw *hw);
+
+void sxe_hw_rx_cap_switch_on(struct sxe_hw *hw);
+
+void sxe_hw_mac_pad_enable(struct sxe_hw *hw);
+
+bool sxe_hw_is_link_state_up(struct sxe_hw *hw);
+
+u32 sxe_hw_link_speed_get(struct sxe_hw *hw);
+
+void sxe_hw_fc_base_init(struct sxe_hw *hw);
+
+void sxe_hw_stats_get(struct sxe_hw *hw, struct sxe_mac_stats *stats);
+
+void sxe_hw_rxq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value);
+
+void sxe_hw_txq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value);
+
+void sxe_hw_uc_addr_clear(struct sxe_hw *hw);
+
+void sxe_hw_vt_disable(struct sxe_hw *hw);
+
+void sxe_hw_stats_regs_clean(struct sxe_hw *hw);
+
+void sxe_hw_vlan_ext_type_set(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_link_speed_set(struct sxe_hw *hw, u32 speed);
+
+void sxe_hw_crc_configure(struct sxe_hw *hw);
+
+void sxe_hw_vlan_filter_array_clear(struct sxe_hw *hw);
+
+void sxe_hw_no_snoop_disable(struct sxe_hw *hw);
+
+void sxe_hw_dcb_rate_limiter_clear(struct sxe_hw *hw, u8 ring_max);
+
+s32 sxe_hw_pfc_enable(struct sxe_hw *hw, u8 tc_idx);
+
+void sxe_hw_dcb_vmdq_mq_configure(struct sxe_hw *hw, u8 num_pools);
+
+void sxe_hw_dcb_vmdq_default_pool_configure(struct sxe_hw *hw,
+						u8 default_pool_enabled,
+						u8 default_pool_idx);
+
+void sxe_hw_dcb_vmdq_up_2_tc_configure(struct sxe_hw *hw,
+						u8 *tc_arr);
+
+void sxe_hw_dcb_vmdq_vlan_configure(struct sxe_hw *hw,
+						u8 num_pools);
+
+void sxe_hw_dcb_vmdq_pool_configure(struct sxe_hw *hw,
+						u8 pool_idx, u16 vlan_id,
+						u64 pools_map);
+
+void sxe_hw_dcb_rx_configure(struct sxe_hw *hw, bool is_vt_on,
+					u8 sriov_active, u8 pg_tcs);
+
+void sxe_hw_dcb_tx_configure(struct sxe_hw *hw, bool is_vt_on, u8 pg_tcs);
+
+void sxe_hw_pool_xmit_enable(struct sxe_hw *hw, u16 reg_idx, u8 pool_num);
+
+void sxe_hw_rx_pkt_buf_size_set(struct sxe_hw *hw, u8 tc_idx, u16 pbsize);
+
+void sxe_hw_dcb_tc_stats_configure(struct sxe_hw *hw,
+					u8 tc_count, bool vmdq_active);
+
+void sxe_hw_dcb_rx_bw_alloc_configure(struct sxe_hw *hw,
+				      u16 *refill,
+				      u16 *max,
+				      u8 *bwg_id,
+				      u8 *prio_type,
+				      u8 *prio_tc,
+				      u8 max_priority);
+
+void sxe_hw_dcb_tx_desc_bw_alloc_configure(struct sxe_hw *hw,
+					   u16 *refill,
+					   u16 *max,
+					   u8 *bwg_id,
+					   u8 *prio_type);
+
+void sxe_hw_dcb_tx_data_bw_alloc_configure(struct sxe_hw *hw,
+					   u16 *refill,
+					   u16 *max,
+					   u8 *bwg_id,
+					   u8 *prio_type,
+					   u8 *prio_tc,
+					   u8 max_priority);
+
+void sxe_hw_dcb_pfc_configure(struct sxe_hw *hw,
+						u8 pfc_en, u8 *prio_tc,
+						u8 max_priority);
+
+void sxe_hw_vmdq_mq_configure(struct sxe_hw *hw);
+
+void sxe_hw_vmdq_default_pool_configure(struct sxe_hw *hw,
+						u8 default_pool_enabled,
+						u8 default_pool_idx);
+
+void sxe_hw_vmdq_vlan_configure(struct sxe_hw *hw,
+						u8 num_pools, u32 rx_mode);
+
+void sxe_hw_vmdq_pool_configure(struct sxe_hw *hw,
+						u8 pool_idx, u16 vlan_id,
+						u64 pools_map);
+
+void sxe_hw_vmdq_loopback_configure(struct sxe_hw *hw);
+
+void sxe_hw_tx_multi_queue_configure(struct sxe_hw *hw,
+				bool vmdq_enable, bool sriov_enable, u16 pools_num);
+
+void sxe_hw_dcb_max_mem_window_set(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_dcb_tx_ring_rate_factor_set(struct sxe_hw *hw,
+							u32 ring_idx, u32 rate);
+
+void sxe_hw_mbx_init(struct sxe_hw *hw);
+
+void sxe_hw_vt_ctrl_cfg(struct sxe_hw *hw, u8 num_vfs);
+
+void sxe_hw_tx_pool_bitmap_set(struct sxe_hw *hw,
+						u8 reg_idx, u32 bitmap);
+
+void sxe_hw_rx_pool_bitmap_set(struct sxe_hw *hw,
+						u8 reg_idx, u32 bitmap);
+
+void sxe_hw_vt_pool_loopback_switch(struct sxe_hw *hw,
+						bool is_enable);
+
+void sxe_hw_mac_pool_clear(struct sxe_hw *hw, u8 rar_idx);
+
+s32 sxe_hw_uc_addr_pool_enable(struct sxe_hw *hw,
+						u8 rar_idx, u8 pool_idx);
+
+void sxe_hw_pcie_vt_mode_set(struct sxe_hw *hw, u32 value);
+
+u32 sxe_hw_pcie_vt_mode_get(struct sxe_hw *hw);
+
+void sxe_hw_pool_mac_anti_spoof_set(struct sxe_hw *hw,
+							u8 vf_idx, bool status);
+
+void sxe_rx_fc_threshold_set(struct sxe_hw *hw);
+
+void sxe_hw_rx_multi_ring_configure(struct sxe_hw *hw,
+						u8 tcs, bool is_4Q,
+						bool sriov_enable);
+
+void sxe_hw_rx_queue_mode_set(struct sxe_hw *hw, u32 mrqc);
+
+bool sxe_hw_vf_rst_check(struct sxe_hw *hw, u8 vf_idx);
+
+bool sxe_hw_vf_req_check(struct sxe_hw *hw, u8 vf_idx);
+
+bool sxe_hw_vf_ack_check(struct sxe_hw *hw, u8 vf_idx);
+
+s32 sxe_hw_rcv_msg_from_vf(struct sxe_hw *hw, u32 *msg,
+				u16 msg_len, u16 index);
+
+s32 sxe_hw_send_msg_to_vf(struct sxe_hw *hw, u32 *msg,
+				u16 msg_len, u16 index);
+
+void sxe_hw_mbx_mem_clear(struct sxe_hw *hw, u8 vf_idx);
+
+u32 sxe_hw_pool_rx_mode_get(struct sxe_hw *hw, u16 pool_idx);
+
+void sxe_hw_pool_rx_mode_set(struct sxe_hw *hw,
+						u32 vmolr, u16 pool_idx);
+
+void sxe_hw_tx_vlan_tag_clear(struct sxe_hw *hw, u32 vf);
+
+u32 sxe_hw_rx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx);
+
+u32 sxe_hw_tx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx);
+
+void sxe_hw_pool_rx_ring_drop_enable(struct sxe_hw *hw, u8 vf_idx,
+					u16 pf_vlan, u8 ring_per_pool);
+
+void sxe_hw_spoof_count_enable(struct sxe_hw *hw,
+						u8 reg_idx, u8 bit_index);
+
+u32 sxe_hw_tx_vlan_insert_get(struct sxe_hw *hw, u32 vf);
+
+bool sxe_hw_vt_status(struct sxe_hw *hw);
+
+s32 sxe_hw_vlvf_slot_find(struct sxe_hw *hw, u32 vlan, bool vlvf_bypass);
+
+u32 sxe_hw_vlan_pool_filter_read(struct sxe_hw *hw, u16 reg_index);
+
+void sxe_hw_mirror_vlan_set(struct sxe_hw *hw, u8 idx,u32 lsb, u32 msb);
+
+void sxe_hw_mirror_virtual_pool_set(struct sxe_hw *hw, u8 idx,u32 lsb, u32 msb);
+
+void sxe_hw_mirror_ctl_set(struct sxe_hw *hw, u8 rule_id,
+				    u8 mirror_type, u8 dst_pool, bool on);
+
+void sxe_hw_mirror_rule_clear(struct sxe_hw *hw, u8 rule_id);
+
+u32 sxe_hw_mac_max_frame_get(struct sxe_hw *hw);
+
+void sxe_hw_mta_hash_table_update(struct sxe_hw *hw,
+						u8 reg_idx, u8 bit_idx);
+
+void sxe_hw_vf_queue_drop_enable(struct sxe_hw *hw, u8 vf_idx,
+					u8 ring_per_pool);
+void sxe_hw_fc_mac_addr_set(struct sxe_hw *hw, u8 *mac_addr);
+
+void sxe_hw_macsec_enable(struct sxe_hw *hw, bool is_up, u32 tx_mode,
+				u32 rx_mode, u32 pn_trh);
+
+void sxe_hw_macsec_disable(struct sxe_hw *hw, bool is_up);
+
+void sxe_hw_macsec_txsc_set(struct sxe_hw *hw, u32 scl, u32 sch);
+
+void sxe_hw_macsec_rxsc_set(struct sxe_hw *hw, u32 scl, u32 sch, u16 pi);
+
+void sxe_hw_macsec_tx_sa_configure(struct sxe_hw *hw, u8 sa_idx,
+				u8 an, u32 pn, u32 *keys);
+
+void sxe_hw_macsec_rx_sa_configure(struct sxe_hw *hw, u8 sa_idx,
+				u8 an, u32 pn, u32 *keys);
+void sxe_hw_vt_pool_loopback_switch(struct sxe_hw *hw,
+							bool is_enable);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+void sxe_hw_fnav_rx_pkt_buf_size_reset(struct sxe_hw *hw, u32 pbsize);
+
+void sxe_hw_fnav_flex_mask_set(struct sxe_hw *hw, u16 flex_mask);
+
+void sxe_hw_fnav_ipv6_mask_set(struct sxe_hw *hw, u16 src_mask, u16 dst_mask);
+
+s32 sxe_hw_fnav_flex_offset_set(struct sxe_hw *hw, u16 offset);
+
+void sxe_hw_fivetuple_filter_add(struct rte_eth_dev *dev,
+				struct sxe_fivetuple_node_info *filter);
+
+void sxe_hw_fivetuple_filter_del(struct sxe_hw *hw, u16 reg_index);
+
+void sxe_hw_ethertype_filter_add(struct sxe_hw *hw,
+					u8 reg_index, u16 ethertype, u16 queue);
+
+void sxe_hw_ethertype_filter_del(struct sxe_hw *hw, u8 filter_type);
+
+void sxe_hw_syn_filter_add(struct sxe_hw *hw, u16 queue, u8 priority);
+
+void sxe_hw_syn_filter_del(struct sxe_hw *hw);
+
+void sxe_hw_rss_key_set_all(struct sxe_hw *hw, u32 *rss_key);
+#endif 
+
+void sxe_hw_fnav_enable(struct sxe_hw *hw, u32 fnavctrl);
+
+s32 sxe_hw_fnav_sample_rules_table_reinit(struct sxe_hw *hw);
+
+s32 sxe_hw_fnav_specific_rule_add(struct sxe_hw *hw,
+					  union sxe_fnav_rule_info *input,
+					  u16 soft_id, u8 queue);
+
+s32 sxe_hw_fnav_specific_rule_del(struct sxe_hw *hw,
+					  union sxe_fnav_rule_info *input,
+					  u16 soft_id);
+
+void sxe_hw_fnav_sample_rule_configure(struct sxe_hw *hw,
+					  u8 flow_type, u32 hash_value, u8 queue);
+
+void sxe_hw_rss_redir_tbl_reg_write(struct sxe_hw *hw,
+						u16 reg_idx, u32 value);
+
+u32 sxe_hw_fnav_port_mask_get(__be16 src_port_mask, __be16 dst_port_mask);
+
+s32 sxe_hw_fnav_specific_rule_mask_set(struct sxe_hw *hw,
+				    union sxe_fnav_rule_info *input_mask);
+
+s32 sxe_hw_vlan_filter_configure(struct sxe_hw *hw,
+					u32 vid, u32 pool,
+					bool vlan_on, bool vlvf_bypass);
+
+void sxe_hw_ptp_systime_init(struct sxe_hw *hw);
+
+#endif 
+#endif
diff --git a/drivers/net/sxe/base/sxe_logs.h b/drivers/net/sxe/base/sxe_logs.h
new file mode 100644
index 0000000000..510d7aae5c
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_logs.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef _SXE_LOGS_H_
+#define _SXE_LOGS_H_
+
+#include <stdio.h>
+#include <sys/time.h>
+#include <pthread.h>
+
+#include "sxe_types.h"
+
+#define LOG_FILE_NAME_LEN     256
+#define LOG_FILE_PATH         "/var/log/"
+#define LOG_FILE_PREFIX       "sxepmd.log"
+
+extern s32 sxe_log_init;
+extern s32 sxe_log_rx;
+extern s32 sxe_log_tx;
+extern s32 sxe_log_drv;
+extern s32 sxe_log_hw;
+
+#define INIT sxe_log_init
+#define RX   sxe_log_rx
+#define TX   sxe_log_tx
+#define HW   sxe_log_hw
+#define DRV  sxe_log_drv
+
+#define UNUSED(x)	(void)(x)
+
+#define  TIME(log_time) \
+	do { \
+		struct timeval	tv; \
+		struct tm *td; \
+		gettimeofday(&tv, NULL); \
+		td = localtime(&tv.tv_sec); \
+		strftime(log_time, sizeof(log_time), "%Y-%m-%d-%H:%M:%S", td); \
+	} while(0)
+
+#define filename_printf(x) strrchr((x),'/')?strrchr((x),'/')+1:(x)
+
+#ifdef SXE_DPDK_DEBUG
+#define PMD_LOG_DEBUG(logtype, fmt, ...) \
+	do { \
+		s8 log_time[40]; \
+		TIME(log_time); \
+		rte_log(RTE_LOG_DEBUG, logtype, \
+			"[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+			"DEBUG", log_time, pthread_self(), \
+			filename_printf(__FILE__), __LINE__, \
+			__func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_INFO(logtype, fmt, ...) \
+	do { \
+		s8 log_time[40]; \
+		TIME(log_time); \
+		rte_log(RTE_LOG_INFO, logtype, \
+			"[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+			"INFO", log_time, pthread_self(), \
+			filename_printf(__FILE__), __LINE__, \
+			__func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_NOTICE(logtype, fmt, ...) \
+	do { \
+		s8 log_time[40]; \
+		TIME(log_time); \
+		rte_log(RTE_LOG_NOTICE, logtype, \
+			"[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+			"NOTICE", log_time, pthread_self(), \
+			filename_printf(__FILE__), __LINE__, \
+			__func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_WARN(logtype, fmt, ...) \
+	do { \
+		s8 log_time[40]; \
+		TIME(log_time); \
+		rte_log(RTE_LOG_WARNING, logtype, \
+			"[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+			"WARN", log_time, pthread_self(), \
+			filename_printf(__FILE__), __LINE__, \
+			__func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_ERR(logtype, fmt, ...) \
+	do { \
+		s8 log_time[40]; \
+		TIME(log_time); \
+		rte_log(RTE_LOG_ERR, logtype, \
+			"[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+			"ERR", log_time, pthread_self(), \
+			filename_printf(__FILE__), __LINE__, \
+			__func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_CRIT(logtype, fmt, ...) \
+	do { \
+		s8 log_time[40]; \
+		TIME(log_time); \
+		rte_log(RTE_LOG_CRIT, logtype, \
+			"[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+			"CRIT", log_time, pthread_self(), \
+			filename_printf(__FILE__), __LINE__, \
+			__func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_ALERT(logtype, fmt, ...) \
+	do { \
+		s8 log_time[40]; \
+		TIME(log_time); \
+		rte_log(RTE_LOG_ALERT, logtype, \
+			"[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+			"ALERT", log_time, pthread_self(), \
+			filename_printf(__FILE__), __LINE__, \
+			__func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_EMERG(logtype, fmt, ...) \
+	do { \
+		s8 log_time[40]; \
+		TIME(log_time); \
+		rte_log(RTE_LOG_EMERG, logtype, \
+			"[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+			"EMERG", log_time, pthread_self(), \
+			filename_printf(__FILE__), __LINE__, \
+			__func__, ##__VA_ARGS__); \
+	} while(0)
+
+#else
+#define PMD_LOG_DEBUG(logtype, fmt, ...) \
+	do { \
+		rte_log(RTE_LOG_DEBUG, logtype, "%s(): " \
+			fmt "\n", __func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_INFO(logtype, fmt, ...) \
+	do { \
+		rte_log(RTE_LOG_INFO, logtype, "%s(): " \
+			fmt "\n", __func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_NOTICE(logtype, fmt, ...) \
+	do { \
+		rte_log(RTE_LOG_NOTICE, logtype, "%s(): " \
+			fmt "\n", __func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_WARN(logtype, fmt, ...) \
+	do { \
+		rte_log(RTE_LOG_WARNING, logtype, "%s(): " \
+			fmt "\n", __func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_ERR(logtype, fmt, ...) \
+	do { \
+		rte_log(RTE_LOG_ERR, logtype, "%s(): " \
+			fmt "\n", __func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_CRIT(logtype, fmt, ...) \
+	do { \
+		rte_log(RTE_LOG_CRIT, logtype, "%s(): " \
+			fmt "\n", __func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_ALERT(logtype, fmt, ...) \
+	do { \
+		rte_log(RTE_LOG_ALERT, logtype, "%s(): " \
+			fmt "\n", __func__, ##__VA_ARGS__); \
+	} while(0)
+
+#define PMD_LOG_EMERG(logtype, fmt, ...) \
+	do { \
+		rte_log(RTE_LOG_EMERG, logtype, "%s(): " \
+			fmt "\n", __func__, ##__VA_ARGS__); \
+	} while(0)
+
+#endif
+
+#define PMD_INIT_FUNC_TRACE() PMD_LOG_DEBUG(INIT, " >>")
+
+#ifdef SXE_DPDK_DEBUG
+#define LOG_DEBUG(fmt, ...) \
+	do { \
+		PMD_LOG_DEBUG(DRV, fmt, ##__VA_ARGS__); \
+	   } while(0)
+
+#define LOG_INFO(fmt, ...) \
+	do { \
+		PMD_LOG_INFO(DRV, fmt, ##__VA_ARGS__); \
+	   } while(0)
+
+#define LOG_WARN(fmt, ...) \
+	do { \
+		PMD_LOG_WARN(DRV, fmt, ##__VA_ARGS__); \
+	   } while(0)
+
+#define LOG_ERROR(fmt, ...) \
+	do { \
+		PMD_LOG_ERR(DRV, fmt, ##__VA_ARGS__); \
+	   } while(0)
+
+#define LOG_DEBUG_BDF(fmt, ...) \
+	do { \
+		PMD_LOG_DEBUG(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__); \
+	   } while(0)
+
+#define LOG_INFO_BDF(fmt, ...) \
+	do { \
+		PMD_LOG_INFO(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__); \
+	   } while(0)
+
+#define LOG_WARN_BDF(fmt, ...) \
+	do { \
+		PMD_LOG_WARN(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__); \
+	   } while(0)
+
+#define LOG_ERROR_BDF(fmt, ...) \
+	do { \
+		PMD_LOG_ERR(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__); \
+	   } while(0)
+
+#else
+#define LOG_DEBUG(fmt, ...)
+#define LOG_INFO(fmt, ...)
+#define LOG_WARN(fmt, ...)
+#define LOG_ERROR(fmt, ...)
+#define LOG_DEBUG_BDF(fmt, ...) do { UNUSED(adapter); } while(0)
+#define LOG_INFO_BDF(fmt, ...)  do { UNUSED(adapter); } while(0)
+#define LOG_WARN_BDF(fmt, ...)  do { UNUSED(adapter); } while(0)
+#define LOG_ERROR_BDF(fmt, ...) do { UNUSED(adapter); } while(0)
+#endif
+
+#ifdef SXE_DPDK_DEBUG
+#define LOG_DEV_DEBUG(fmt, ...) \
+	do { \
+		UNUSED(adapter); \
+		LOG_DEBUG_BDF(fmt, ##__VA_ARGS__); \
+	} while(0)
+
+#define LOG_DEV_INFO(fmt, ...) \
+	do { \
+		UNUSED(adapter); \
+		LOG_INFO_BDF(fmt, ##__VA_ARGS__); \
+	} while(0)
+
+#define LOG_DEV_WARN(fmt, ...) \
+	do { \
+		UNUSED(adapter); \
+		LOG_WARN_BDF(fmt, ##__VA_ARGS__); \
+	} while(0)
+
+#define LOG_DEV_ERR(fmt, ...) \
+	do { \
+		UNUSED(adapter); \
+		LOG_ERROR_BDF(fmt, ##__VA_ARGS__); \
+	} while(0)
+
+#define LOG_MSG_DEBUG(msglvl, fmt, ...) \
+	do { \
+		UNUSED(adapter); \
+		LOG_DEBUG_BDF(fmt, ##__VA_ARGS__); \
+	} while(0)
+
+#define LOG_MSG_INFO(msglvl, fmt, ...) \
+	do { \
+		UNUSED(adapter); \
+		LOG_INFO_BDF(fmt, ##__VA_ARGS__); \
+	} while(0)
+
+#define LOG_MSG_WARN(msglvl, fmt, ...) \
+	do { \
+		UNUSED(adapter); \
+		LOG_WARN_BDF(fmt, ##__VA_ARGS__); \
+	} while(0)
+
+#define LOG_MSG_ERR(msglvl, fmt, ...) \
+	do { \
+		UNUSED(adapter); \
+		LOG_ERROR_BDF(fmt, ##__VA_ARGS__); \
+	} while(0)
+
+#else
+#define LOG_DEV_DEBUG(fmt, ...) do { UNUSED(adapter); } while(0)
+#define LOG_DEV_INFO(fmt, ...)  do { UNUSED(adapter); } while(0)
+#define LOG_DEV_WARN(fmt, ...)  do { UNUSED(adapter); } while(0)
+#define LOG_DEV_ERR(fmt, ...)   do { UNUSED(adapter); } while(0)
+#define LOG_MSG_DEBUG(msglvl, fmt, ...) do { UNUSED(adapter); } while(0)
+#define LOG_MSG_INFO(msglvl, fmt, ...)  do { UNUSED(adapter); } while(0)
+#define LOG_MSG_WARN(msglvl, fmt, ...)  do { UNUSED(adapter); } while(0)
+#define LOG_MSG_ERR(msglvl, fmt, ...)   do { UNUSED(adapter); } while(0)
+#endif
+
+void sxe_log_stream_init(void);
+
+#endif 
diff --git a/drivers/net/sxe/base/sxe_offload_common.c b/drivers/net/sxe/base/sxe_offload_common.c
new file mode 100644
index 0000000000..a7075b4669
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_offload_common.c
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+
+#include "sxe_types.h"
+#include "sxe_offload_common.h"
+#include "sxe_compat_version.h"
+
+u64 __sxe_rx_queue_offload_capa_get(struct rte_eth_dev *dev)
+{
+	RTE_SET_USED(dev);
+
+	u64 offloads = 0;
+
+	offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+
+	return offloads;
+}
+
+u64 __sxe_rx_port_offload_capa_get(struct rte_eth_dev *dev)
+{
+	u64 rx_offload_capa;
+
+	rx_offload_capa = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
+		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+#ifdef DEV_RX_JUMBO_FRAME
+		   DEV_RX_OFFLOAD_JUMBO_FRAME |
+#endif
+		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 
+		   RTE_ETH_RX_OFFLOAD_SCATTER |
+		   RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+	if (!RTE_ETH_DEV_SRIOV(dev).active) {
+		rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
+	}
+
+	return rx_offload_capa;
+}
+
+u64 __sxe_tx_port_offload_capa_get(struct rte_eth_dev *dev)
+{
+	u64 tx_offload_capa;
+	RTE_SET_USED(dev);
+
+	tx_offload_capa =
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+		RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
+
+	return tx_offload_capa;
+}
+
diff --git a/drivers/net/sxe/base/sxe_offload_common.h b/drivers/net/sxe/base/sxe_offload_common.h
new file mode 100644
index 0000000000..20083de2e3
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_offload_common.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_OFFLOAD_COMMON_H__
+#define __SXE_OFFLOAD_COMMON_H__
+
+u64 __sxe_rx_queue_offload_capa_get(struct rte_eth_dev *dev);
+
+u64 __sxe_rx_port_offload_capa_get(struct rte_eth_dev *dev);
+
+u64 __sxe_tx_port_offload_capa_get(struct rte_eth_dev *dev);
+
+#endif
+
diff --git a/drivers/net/sxe/base/sxe_queue_common.c b/drivers/net/sxe/base/sxe_queue_common.c
new file mode 100644
index 0000000000..eda73c3f79
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_queue_common.c
@@ -0,0 +1,450 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include "sxe_dpdk_version.h"
+#include "sxe_compat_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_bus_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <rte_bus_pci.h>
+#else
+#include <ethdev_driver.h>
+#include <bus_pci_driver.h>
+#endif
+
+#include "sxe_rx.h"
+#include "sxe_tx.h"
+#include "sxe_logs.h"
+#include "sxe_regs.h"
+#include "sxevf_regs.h"
+#include "sxe.h"
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#include "sxe_vec_common.h"
+#include <rte_vect.h>
+#endif
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+#include "sxevf.h"
+#endif
+#include "sxe_queue_common.h"
+#include "sxe_queue.h"
+
+static void sxe_tx_queues_clear(struct rte_eth_dev *dev)
+{
+	u16 i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		struct sxe_tx_queue *txq = dev->data->tx_queues[i];
+
+		if (txq != NULL && txq->ops != NULL) {
+			txq->ops->mbufs_release(txq);
+			txq->ops->init(txq);
+		}
+	}
+
+	return;
+}
+
+static void sxe_rx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed)
+{
+	u16 i;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		struct sxe_rx_queue *rxq = dev->data->rx_queues[i];
+
+		if (rxq != NULL) {
+			sxe_rx_queue_mbufs_free(rxq);
+			sxe_rx_queue_init(rx_batch_alloc_allowed, rxq);
+		}
+	}
+
+	return;
+}
+
+s32 __rte_cold __sxe_rx_queue_setup(struct rx_setup *rx_setup, bool is_vf)
+{
+	struct rte_eth_dev *dev = rx_setup->dev;
+	const struct rte_eth_rxconf *rx_conf = rx_setup->rx_conf;
+	u16 queue_idx = rx_setup->queue_idx;
+	u32 socket_id = rx_setup->socket_id;
+	u16 desc_num = rx_setup->desc_num;
+	struct rte_mempool *mp = rx_setup->mp;
+	const struct rte_memzone *rx_mz;
+	struct sxe_rx_queue *rxq;
+	u16 len;
+	u64 offloads;
+	s32 ret = 0;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	struct sxe_adapter *pf_adapter = dev->data->dev_private;
+	struct sxevf_adapter *vf_adapter = dev->data->dev_private;
+#endif
+
+	PMD_INIT_FUNC_TRACE();
+
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+	if (desc_num % SXE_RX_DESC_RING_ALIGN != 0 ||
+			(desc_num > SXE_MAX_RING_DESC) ||
+			(desc_num < SXE_MIN_RING_DESC)) {
+		PMD_LOG_ERR(INIT, "desc_num %u error",desc_num);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	if (dev->data->rx_queues[queue_idx] != NULL) {
+		sxe_rx_queue_free(dev->data->rx_queues[queue_idx]);
+		dev->data->rx_queues[queue_idx] = NULL;
+	}
+
+	rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct sxe_rx_queue),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (rxq == NULL) {
+		PMD_LOG_ERR(INIT, "rxq malloc mem failed");
+		ret = -ENOMEM;
+		goto l_end;
+	}
+
+	rxq->mb_pool = mp;
+	rxq->ring_depth = desc_num;
+	rxq->batch_alloc_size = rx_conf->rx_free_thresh;
+	rxq->queue_id = queue_idx;
+	rxq->reg_idx = (u16)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+	rxq->port_id = dev->data->port_id;
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
+		rxq->crc_len = RTE_ETHER_CRC_LEN;
+	} else {
+		rxq->crc_len = 0;
+	}
+
+	rxq->drop_en = rx_conf->rx_drop_en;
+	rxq->deferred_start = rx_conf->rx_deferred_start;
+	rxq->offloads = offloads;
+
+	rxq->pkt_type_mask = SXE_PACKET_TYPE_MASK;
+
+	rx_mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+					SXE_RX_RING_SIZE, SXE_ALIGN, socket_id);
+	if (rx_mz == NULL) {
+		PMD_LOG_ERR(INIT, "rxq malloc desc mem failed");
+		sxe_rx_queue_free(rxq);
+		ret = -ENOMEM;
+		goto l_end;
+	}
+
+	rxq->mz = rx_mz;
+
+	memset(rx_mz->addr, 0, SXE_RX_RING_SIZE);
+
+	if (is_vf) {
+		rxq->rdt_reg_addr = (volatile u32 *)(rx_setup->reg_base_addr +
+			SXE_VFRDT(rxq->reg_idx));
+	} else {
+		rxq->rdt_reg_addr = (volatile u32 *)(rx_setup->reg_base_addr +
+			SXE_RDT(rxq->reg_idx));
+	}
+
+	rxq->base_addr = rx_mz->iova;
+
+	rxq->desc_ring = (union sxe_rx_data_desc *)rx_mz->addr;
+
+	if (!sxe_check_is_rx_batch_alloc_support(rxq)) {
+		PMD_LOG_DEBUG(INIT, "queue[%d] doesn't support rx batch alloc "
+				"- canceling the feature for the whole port[%d]",
+				rxq->queue_id, rxq->port_id);
+		*rx_setup->rx_batch_alloc_allowed = false;
+	}
+
+	len = desc_num;
+	if (*rx_setup->rx_batch_alloc_allowed) {
+		len += RTE_PMD_SXE_MAX_RX_BURST;
+	}
+
+	rxq->buffer_ring = rte_zmalloc_socket("rxq->sw_ring",
+					  sizeof(struct sxe_rx_buffer) * len,
+					  RTE_CACHE_LINE_SIZE, socket_id);
+	if (!rxq->buffer_ring) {
+		PMD_LOG_ERR(INIT, "rxq malloc buffer mem failed");
+		sxe_rx_queue_free(rxq);
+		ret = -ENOMEM;
+		goto l_end;
+	}
+
+	rxq->sc_buffer_ring =
+		rte_zmalloc_socket("rxq->sw_sc_ring",
+				   sizeof(struct sxe_rx_buffer) * len,
+				   RTE_CACHE_LINE_SIZE, socket_id);
+	if (!rxq->sc_buffer_ring) {
+		PMD_LOG_ERR(INIT, "rxq malloc sc buffer mem failed");
+		sxe_rx_queue_free(rxq);
+		ret = -ENOMEM;
+		goto l_end;
+	}
+
+	PMD_LOG_DEBUG(INIT, "buffer_ring=%p sc_buffer_ring=%p desc_ring=%p "
+			    "dma_addr=0x%"SXE_PRIX64,
+		     rxq->buffer_ring, rxq->sc_buffer_ring, rxq->desc_ring,
+		     rxq->base_addr);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	if (!rte_is_power_of_2(desc_num)) {
+		PMD_LOG_DEBUG(INIT, "queue[%d] doesn't meet Vector Rx "
+				    "preconditions - canceling the feature for "
+				    "the whole port[%d]",
+			     rxq->queue_id, rxq->port_id);
+		if (is_vf) {
+			vf_adapter->rx_vec_allowed = false;
+		} else {
+			pf_adapter->rx_vec_allowed = false;
+		}
+	} else {
+		sxe_rxq_vec_setup(rxq);
+	}
+#endif
+
+	dev->data->rx_queues[queue_idx] = rxq;
+
+	sxe_rx_queue_init(*rx_setup->rx_batch_alloc_allowed, rxq);
+
+l_end:
+	return ret;
+}
+
+int __rte_cold __sxe_tx_queue_setup(struct tx_setup *tx_setup, bool is_vf)
+{
+	s32 ret;
+	struct rte_eth_dev *dev = tx_setup->dev;
+	const struct rte_eth_txconf *tx_conf = tx_setup->tx_conf;
+	u16 tx_queue_id = tx_setup->queue_idx;
+	u32 socket_id = tx_setup->socket_id;
+	u16 ring_depth = tx_setup->desc_num;
+	struct sxe_tx_queue *txq;
+	u16 rs_thresh, free_thresh;
+
+	PMD_INIT_FUNC_TRACE();
+
+	ret = sxe_txq_arg_validate(dev, ring_depth, &rs_thresh,
+					&free_thresh, tx_conf);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "tx queue[%d] arg validate failed", tx_queue_id);
+		goto l_end;
+	} else {
+		PMD_LOG_INFO(INIT, "tx queue[%d] ring_depth=%d, "
+				"rs_thresh=%d, free_thresh=%d", tx_queue_id,
+				ring_depth, rs_thresh, free_thresh);
+	}
+
+	txq = sxe_tx_queue_alloc(dev, tx_queue_id, ring_depth, socket_id);
+	if (!txq) {
+		PMD_LOG_ERR(INIT, "tx queue[%d] resource alloc failed", tx_queue_id);
+		ret = -ENOMEM;
+		goto l_end;
+	}
+
+	txq->ops               = sxe_tx_default_ops_get();
+	txq->ring_depth        = ring_depth;
+	txq->queue_idx         = tx_queue_id;
+	txq->port_id           = dev->data->port_id;
+	txq->pthresh           = tx_conf->tx_thresh.pthresh;
+	txq->hthresh           = tx_conf->tx_thresh.hthresh;
+	txq->wthresh           = tx_conf->tx_thresh.wthresh;
+	txq->rs_thresh         = rs_thresh;
+	txq->free_thresh       = free_thresh;
+	txq->tx_deferred_start = tx_conf->tx_deferred_start;
+	txq->reg_idx           = (u16)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+		tx_queue_id : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + tx_queue_id);
+	txq->offloads          = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+	if (is_vf) {
+		txq->tdt_reg_addr = (volatile u32 *)(tx_setup->reg_base_addr + SXE_VFTDT(txq->reg_idx));
+	} else {
+		txq->tdt_reg_addr = (u32 *)(tx_setup->reg_base_addr + SXE_TDT(txq->reg_idx));
+	}
+
+	PMD_LOG_INFO(INIT, "buffer_ring=%p desc_ring=%p dma_addr=0x%"PRIx64,
+		     txq->buffer_ring, txq->desc_ring,
+		     (long unsigned int)txq->base_addr);
+	sxe_tx_function_set(dev, txq);
+
+	txq->ops->init(txq);
+
+	dev->data->tx_queues[tx_queue_id] = txq;
+
+l_end:
+	return ret;
+}
+
+void __sxe_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+					struct rte_eth_rxq_info *qinfo)
+{
+	struct sxe_rx_queue *rxq;
+
+	rxq = dev->data->rx_queues[queue_id];
+
+	qinfo->mp = rxq->mb_pool;
+	qinfo->scattered_rx = dev->data->scattered_rx;
+	qinfo->nb_desc = rxq->ring_depth;
+
+	qinfo->conf.rx_free_thresh = rxq->batch_alloc_size;
+	qinfo->conf.rx_drop_en = rxq->drop_en;
+	qinfo->conf.rx_deferred_start = rxq->deferred_start;
+	qinfo->conf.offloads = rxq->offloads;
+
+	return;
+}
+
+void __sxe_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+		struct rte_eth_txq_info *q_info)
+{
+	struct sxe_tx_queue *txq;
+
+	txq = dev->data->tx_queues[queue_id];
+
+	q_info->nb_desc = txq->ring_depth;
+	q_info->conf.tx_thresh.pthresh = txq->pthresh;
+	q_info->conf.tx_thresh.hthresh = txq->hthresh;
+	q_info->conf.tx_thresh.wthresh = txq->wthresh;
+	q_info->conf.tx_free_thresh = txq->free_thresh;
+	q_info->conf.tx_rs_thresh = txq->rs_thresh;
+	q_info->conf.offloads = txq->offloads;
+	q_info->conf.tx_deferred_start = txq->tx_deferred_start;
+
+	return;
+}
+
+s32 __sxe_tx_done_cleanup(void *tx_queue, u32 free_cnt)
+{
+	int ret;
+	struct sxe_tx_queue *txq = (struct sxe_tx_queue *)tx_queue;
+	if (txq->offloads == 0 && \
+		txq->rs_thresh >= RTE_PMD_SXE_MAX_TX_BURST) {
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+		if (txq->rs_thresh <= RTE_SXE_MAX_TX_FREE_BUF_SZ &&
+#ifndef DPDK_19_11_6
+		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
+#endif
+		    (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+		    txq->buffer_ring_vec != NULL)) {
+		    ret = sxe_tx_done_cleanup_vec(txq, free_cnt);
+		} else{
+			ret = sxe_tx_done_cleanup_simple(txq, free_cnt);
+		}
+#else
+		ret = sxe_tx_done_cleanup_simple(txq, free_cnt);
+#endif
+
+	} else {
+		ret = sxe_tx_done_cleanup_full(txq, free_cnt);
+	}
+
+	return ret;
+}
+
+s32 __rte_cold __sxe_rx_queue_mbufs_alloc(struct sxe_rx_queue *rxq)
+{
+	struct sxe_rx_buffer *buf_ring = rxq->buffer_ring;
+	s32 ret = 0;
+	u64 dma_addr;
+	u16 i;
+
+	for (i = 0; i < rxq->ring_depth; i++) {
+		volatile union sxe_rx_data_desc *desc;
+		struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+
+		if (mbuf == NULL) {
+			PMD_LOG_ERR(DRV, "rx mbuf alloc failed queue_id=%u",
+					(unsigned) rxq->queue_id);
+			ret = -ENOMEM;
+			goto l_end;
+		}
+
+		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+		mbuf->port = rxq->port_id;
+
+		dma_addr =
+			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+		desc = &rxq->desc_ring[i];
+		desc->read.hdr_addr = 0;
+		desc->read.pkt_addr = dma_addr;
+		buf_ring[i].mbuf = mbuf;
+	}
+
+l_end:
+	return ret;
+}
+
+void __rte_cold __sxe_rx_queue_free(struct sxe_rx_queue *rxq)
+{
+	if (rxq != NULL) {
+		sxe_rx_queue_mbufs_free(rxq);
+		rte_free(rxq->buffer_ring);
+		rte_free(rxq->sc_buffer_ring);
+		rte_memzone_free(rxq->mz);
+		rte_free(rxq);
+	}
+	return;
+}
+
+void __rte_cold __sxe_tx_queue_free(struct sxe_tx_queue *txq)
+{
+	if (txq != NULL && txq->ops != NULL) {
+		txq->ops->mbufs_release(txq);
+		txq->ops->buffer_ring_free(txq);
+		rte_memzone_free(txq->mz);
+		rte_free(txq);
+	}
+
+	return;
+}
+
+void __rte_cold __sxe_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_tx_queues_clear(dev);
+
+	sxe_rx_queues_clear(dev, rx_batch_alloc_allowed);
+
+	return;
+}
+
+void __sxe_queues_free(struct rte_eth_dev *dev)
+{
+	unsigned i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		__sxe_rx_queue_free(dev->data->rx_queues[i]);
+		dev->data->rx_queues[i] = NULL;
+	}
+	dev->data->nb_rx_queues = 0;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		__sxe_tx_queue_free(dev->data->tx_queues[i]);
+		dev->data->tx_queues[i] = NULL;
+	}
+	dev->data->nb_tx_queues = 0;
+
+	return;
+}
+
+void __sxe_secondary_proc_init(struct rte_eth_dev *eth_dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
+{
+	struct sxe_tx_queue *txq;
+	if (eth_dev->data->tx_queues) {
+		txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
+		sxe_tx_function_set(eth_dev, txq);
+	} else {
+		PMD_LOG_NOTICE(INIT, "No TX queues configured yet. "
+			     "Using default TX function.");
+	}
+
+	sxe_rx_function_set(eth_dev, rx_batch_alloc_allowed, rx_vec_allowed);
+	return;
+}
+
diff --git a/drivers/net/sxe/base/sxe_queue_common.h b/drivers/net/sxe/base/sxe_queue_common.h
new file mode 100644
index 0000000000..a38113b643
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_queue_common.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_QUEUE_COMMON_H__
+#define __SXE_QUEUE_COMMON_H__
+
+#include "sxe_types.h"
+#include "sxe_compat_platform.h"
+#include "sxe_compat_version.h"
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+#define RTE_PMD_SXE_MAX_RX_BURST 32
+
+enum sxe_ctxt_num {
+	SXE_CTXT_DESC_0    = 0, 
+	SXE_CTXT_DESC_1    = 1, 
+	SXE_CTXT_DESC_NUM  = 2, 
+};
+
+struct rx_setup {
+	struct rte_eth_dev *dev;
+	u16 queue_idx;
+	u16 desc_num;
+	u32 socket_id;
+	const struct rte_eth_rxconf *rx_conf;
+	struct rte_mempool *mp;
+	u8 __iomem *reg_base_addr;
+	bool *rx_batch_alloc_allowed;
+};
+
+struct tx_setup {
+	struct rte_eth_dev *dev;
+	u16 queue_idx;
+	u16 desc_num;
+	u32 socket_id;
+	const struct rte_eth_txconf *tx_conf;
+	u8 __iomem *reg_base_addr;
+};
+
+union sxe_tx_data_desc {
+	struct {
+		__le64 buffer_addr; 
+		__le32 cmd_type_len;
+		__le32 olinfo_status;
+	} read;
+	struct {
+		__le64 rsvd;
+		__le32 nxtseq_seed;
+		__le32 status;
+	} wb;
+};
+
+struct sxe_rx_buffer {
+	struct rte_mbuf *mbuf;
+};
+
+struct sxe_rx_queue_stats {
+	u64 csum_err;
+};
+
+union sxe_rx_data_desc {
+	struct {
+		__le64 pkt_addr; 
+		__le64 hdr_addr; 
+	} read;
+	struct {
+		struct {
+			union {
+				__le32 data;
+				struct {
+					__le16 pkt_info; 
+					__le16 hdr_info; 
+				} hs_rss;
+			} lo_dword;
+			union {
+				__le32 rss; 
+				struct {
+					__le16 ip_id; 
+					__le16 csum; 
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			__le32 status_error; 
+			__le16 length; 
+			__le16 vlan; 
+		} upper;
+	} wb;
+ };
+
+struct sxe_tx_buffer {
+	struct rte_mbuf *mbuf; 
+	u16 next_id;             
+	u16 last_id;             
+};
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+struct sxe_tx_buffer_vec {
+	struct rte_mbuf *mbuf; 
+};
+#endif
+
+union sxe_tx_offload {
+	u64 data[2];
+	struct {
+		u64 l2_len:7;     
+		u64 l3_len:9;     
+		u64 l4_len:8;     
+		u64 tso_segsz:16; 
+		u64 vlan_tci:16;  
+
+		u64 outer_l3_len:8; 
+		u64 outer_l2_len:8; 
+	};
+};
+
+struct sxe_ctxt_info {
+	u64 flags;  
+	union sxe_tx_offload tx_offload;
+	union sxe_tx_offload tx_offload_mask;
+};
+
+struct sxe_tx_queue {
+	volatile union sxe_tx_data_desc *desc_ring;
+	u64             base_addr;          
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	union {
+		struct sxe_tx_buffer *buffer_ring;          
+		struct sxe_tx_buffer_vec *buffer_ring_vec;  
+	};
+#else
+	struct sxe_tx_buffer *buffer_ring;	
+#endif
+	volatile u32   *tdt_reg_addr;       
+	u16            ring_depth;          
+	u16            next_to_use;         
+	u16            free_thresh;         
+
+	u16            rs_thresh;
+
+	u16            desc_used_num;
+	u16            next_to_clean;  
+	u16            desc_free_num;   
+	u16            next_dd;        
+	u16            next_rs;        
+	u16            queue_idx;      
+	u16            reg_idx;        
+	u16            port_id;        
+	u8             pthresh;        
+	u8             hthresh;        
+
+	u8             wthresh;
+	u64            offloads;       
+	u32            ctx_curr;       
+	struct sxe_ctxt_info ctx_cache[SXE_CTXT_DESC_NUM]; 
+	const struct sxe_txq_ops *ops; 
+	u8     tx_deferred_start;      
+	const struct rte_memzone *mz;
+};
+
+struct sxe_rx_queue {
+	struct rte_mempool  *mb_pool;   
+	volatile union sxe_rx_data_desc *desc_ring; 
+	u64  base_addr;                 
+	volatile u32   *rdt_reg_addr;   
+	struct sxe_rx_buffer *buffer_ring; 
+	struct sxe_rx_buffer *sc_buffer_ring; 
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	struct rte_mbuf *pkt_first_seg; 
+	struct rte_mbuf *pkt_last_seg;  
+	u64    mbuf_init_value;		
+	u8     is_using_sse;		
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM)
+	u16    realloc_num; 		
+	u16    realloc_start;		
+#endif
+#endif
+	u16    ring_depth;           
+	u16    processing_idx;     
+	u16    hold_num;            
+	u16    completed_pkts_num;     
+	u16    next_ret_pkg;         
+	u16    batch_alloc_trigger;  
+
+	u16    batch_alloc_size;
+	u16    queue_id;            
+	u16    reg_idx;              
+	u16    pkt_type_mask;        
+	u16    port_id;              
+	u8     crc_len;              
+	u8     drop_en;              
+	u8     deferred_start;       
+	u64    vlan_flags;           
+	u64    offloads;             
+	struct rte_mbuf fake_mbuf;   
+	struct rte_mbuf *completed_ring[RTE_PMD_SXE_MAX_RX_BURST * 2];
+	const struct rte_memzone *mz;
+	struct sxe_rx_queue_stats rx_stats;  
+};
+
+struct sxe_txq_ops {
+	void (*init)(struct sxe_tx_queue *txq);
+	void (*mbufs_release)(struct sxe_tx_queue *txq);
+	void (*buffer_ring_free)(struct sxe_tx_queue *txq);
+};
+
+s32 __rte_cold __sxe_rx_queue_setup(struct rx_setup *rx_setup, bool is_vf);
+
+int __rte_cold __sxe_tx_queue_setup(struct tx_setup *tx_setup, bool is_vf);
+
+void __sxe_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+					struct rte_eth_rxq_info *qinfo);
+
+void __sxe_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+		struct rte_eth_txq_info *q_info);
+
+s32 __sxe_tx_done_cleanup(void *tx_queue, u32 free_cnt);
+
+s32 __rte_cold __sxe_rx_queue_mbufs_alloc(struct sxe_rx_queue *rxq);
+
+void __rte_cold __sxe_tx_queue_free(struct sxe_tx_queue *txq);
+
+void sxe_rx_queue_free(struct sxe_rx_queue *rxq);
+
+void __rte_cold __sxe_rx_queue_free(struct sxe_rx_queue *rxq);
+
+void __rte_cold __sxe_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed);
+
+void __sxe_queues_free(struct rte_eth_dev *dev);
+
+void __sxe_secondary_proc_init(struct rte_eth_dev *eth_dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
+
+#endif
diff --git a/drivers/net/sxe/base/sxe_rx_common.c b/drivers/net/sxe/base/sxe_rx_common.c
new file mode 100644
index 0000000000..4472058a29
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_rx_common.c
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_mbuf.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+#include <rte_prefetch.h>
+#include <rte_malloc.h>
+
+#include "sxe.h"
+#include "sxe_rx.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe_queue_common.h"
+#include "sxe_vf.h"
+#include "sxe_errno.h"
+#include "sxe_irq.h"
+#include "sxe_rx_common.h"
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#include "sxe_vec_common.h"
+#include "rte_vect.h"
+#endif
+
+static inline void sxe_rx_resource_prefetch(u16 next_idx,
+				struct sxe_rx_buffer *buf_ring,
+				volatile union sxe_rx_data_desc *desc_ring)
+{
+	rte_sxe_prefetch(buf_ring[next_idx].mbuf);
+
+	if ((next_idx & 0x3) == 0) {
+		rte_sxe_prefetch(&desc_ring[next_idx]);
+		rte_sxe_prefetch(&buf_ring[next_idx]);
+	}
+
+	return;
+}
+
+void __rte_cold __sxe_rx_function_set(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
+{
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	u16  i, is_using_sse;
+
+	if (sxe_rx_vec_condition_check(dev) ||
+	    !rx_batch_alloc_allowed 
+#ifndef DPDK_19_11_6
+		|| rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128
+#endif
+		) {
+		PMD_LOG_DEBUG(INIT, "Port[%d] doesn't meet Vector Rx "
+				    "preconditions", dev->data->port_id);
+		*rx_vec_allowed = false;
+	}
+#else
+	UNUSED(rx_vec_allowed);
+#endif
+
+	if (dev->data->lro) {
+		if (rx_batch_alloc_allowed) {
+			PMD_LOG_DEBUG(INIT, "LRO is requested. Using a bulk "
+					   "allocation version");
+			dev->rx_pkt_burst = sxe_batch_alloc_lro_pkts_recv;
+		} else {
+			PMD_LOG_DEBUG(INIT, "LRO is requested. Using a single "
+					   "allocation version");
+			dev->rx_pkt_burst = sxe_single_alloc_lro_pkts_recv;
+		}
+	} else if (dev->data->scattered_rx) {
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+		if (*rx_vec_allowed) {
+			PMD_LOG_DEBUG(INIT, "Using Vector Scattered Rx "
+					    "callback (port=%d).",
+				     dev->data->port_id);
+
+			dev->rx_pkt_burst = sxe_scattered_pkts_vec_recv;
+		} else
+#endif
+		if (rx_batch_alloc_allowed) {
+			PMD_LOG_DEBUG(INIT, "Using a Scattered with bulk "
+					   "allocation callback (port=%d).",
+				     dev->data->port_id);
+
+			dev->rx_pkt_burst = sxe_batch_alloc_lro_pkts_recv;
+		} else {
+			PMD_LOG_DEBUG(INIT, "Using Regular (non-vector, "
+					    "single allocation) "
+					    "Scattered Rx callback "
+					    "(port=%d).",
+				     dev->data->port_id);
+
+			dev->rx_pkt_burst = sxe_single_alloc_lro_pkts_recv;
+		}
+	}
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	else if (*rx_vec_allowed) {
+		PMD_LOG_DEBUG(INIT, "Vector rx enabled, please make sure RX "
+				    "burst size no less than %d (port=%d).",
+			     SXE_DESCS_PER_LOOP,
+			     dev->data->port_id);
+
+		dev->rx_pkt_burst = sxe_pkts_vec_recv;
+	}
+#endif
+	else if (rx_batch_alloc_allowed) {
+		PMD_LOG_DEBUG(INIT, "Rx Burst Bulk Alloc Preconditions are "
+				    "satisfied. Rx Burst Bulk Alloc function "
+				    "will be used on port=%d.",
+				dev->data->port_id);
+
+		dev->rx_pkt_burst = sxe_batch_alloc_pkts_recv;
+	} else {
+		PMD_LOG_DEBUG(INIT, "Rx Burst Bulk Alloc Preconditions are not "
+				"satisfied, or Scattered Rx is requested "
+				"(port=%d).",
+				dev->data->port_id);
+
+		dev->rx_pkt_burst = sxe_pkts_recv;
+	}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	is_using_sse =
+		(dev->rx_pkt_burst == sxe_scattered_pkts_vec_recv ||
+		dev->rx_pkt_burst == sxe_pkts_vec_recv);
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		struct sxe_rx_queue *rxq = dev->data->rx_queues[i];
+
+		rxq->is_using_sse = is_using_sse;
+	}
+#endif
+
+	return;
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+s32 __sxe_rx_descriptor_done(void *rx_queue, u16 offset)
+{
+	volatile union sxe_rx_data_desc *desc;
+	struct sxe_rx_queue *rxq = rx_queue;
+	u32 index;
+	s32 is_done = 0;
+
+	LOG_DEBUG("check rx queue[%u], offset desc[%u]\n",
+			rxq->queue_id, offset);
+	if (unlikely(offset >= rxq->ring_depth)) {
+		LOG_DEBUG("offset=%u >= ring depth=%u\n",
+				offset, rxq->ring_depth);
+		goto l_end;
+	}
+
+	index = rxq->processing_idx + offset;
+	if (index >= rxq->ring_depth) {
+		index -= rxq->ring_depth;
+	}
+
+	desc = &rxq->desc_ring[index];
+	is_done = !!(desc->wb.upper.status_error &
+			rte_cpu_to_le_32(SXE_RXDADV_STAT_DD));
+
+l_end:
+	return is_done;
+}
+#endif
+
+s32 __sxe_rx_descriptor_status(void *rx_queue, u16 offset)
+{
+	int ret = RTE_ETH_RX_DESC_AVAIL;
+	struct sxe_rx_queue *rxq = rx_queue;
+	volatile u32 *status;
+	u32 hold_num, desc;
+
+	if (unlikely(offset >= rxq->ring_depth)) {
+		LOG_DEBUG("rx queue[%u] get desc status err,"
+			"offset=%u >= ring_depth=%u\n",
+			rxq->queue_id, offset, rxq->ring_depth);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#if defined(RTE_ARCH_X86)
+	if (rxq->is_using_sse)
+		hold_num = rxq->realloc_num;
+	else
+#endif
+#endif
+		hold_num = rxq->hold_num;
+	if (offset >= rxq->ring_depth - hold_num) {
+		ret = RTE_ETH_RX_DESC_UNAVAIL;
+		goto l_end;
+	}
+
+	desc = rxq->processing_idx + offset;
+	if (desc >= rxq->ring_depth) {
+		desc -= rxq->ring_depth;
+	}
+
+	status = &rxq->desc_ring[desc].wb.upper.status_error;
+	if (*status & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD)) {
+		ret =  RTE_ETH_RX_DESC_DONE;
+	}
+
+l_end:
+	LOG_DEBUG("rx queue[%u] get desc status=%d\n",rxq->queue_id, ret);
+	return ret;
+}
+
+u16 __sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,
+		u16 pkts_num)
+{
+	struct sxe_rx_queue *rxq = (struct sxe_rx_queue *)rx_queue;
+	volatile union sxe_rx_data_desc *desc_ring = rxq->desc_ring;
+	volatile union sxe_rx_data_desc *cur_desc;
+	struct sxe_rx_buffer *buff_ring = rxq->buffer_ring;
+	struct sxe_rx_buffer *cur_buf;
+	struct rte_mbuf *cur_mb;
+	struct rte_mbuf *new_mb;
+	union sxe_rx_data_desc rxd;
+	u16 processing_idx = rxq->processing_idx;
+	u64 dma_addr;
+	u32 staterr;
+	u32 pkt_info;
+	u16 done_num = 0;
+	u16 hold_num = 0;
+	u16 pkt_len;
+
+	while (done_num < pkts_num) {
+		cur_desc = &desc_ring[processing_idx];
+		staterr = cur_desc->wb.upper.status_error;
+		if (!(staterr & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD))) {
+			break;
+		}
+
+		rxd = *cur_desc;
+
+		LOG_DEBUG("port_id=%u queue_id=%u processing_idx=%u "
+			   "staterr=0x%08x pkt_len=%u",
+			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+			   (unsigned) processing_idx, (unsigned) staterr,
+			   (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+
+		new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
+		if (new_mb == NULL) {
+			LOG_ERROR("RX mbuf alloc failed port_id=%u "
+				   "queue_id=%u", (unsigned) rxq->port_id,
+				   (unsigned) rxq->queue_id);
+			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+			break;
+		}
+
+		hold_num++;
+		cur_buf = &buff_ring[processing_idx];
+		processing_idx++;
+		if (processing_idx == rxq->ring_depth) {
+			processing_idx = 0;
+		}
+
+		sxe_rx_resource_prefetch(processing_idx, buff_ring, desc_ring);
+
+		cur_mb = cur_buf->mbuf;
+		cur_buf->mbuf = new_mb;
+		dma_addr =
+			rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb));
+		cur_desc->read.hdr_addr = 0;
+		cur_desc->read.pkt_addr = dma_addr;
+
+		cur_mb->data_off = RTE_PKTMBUF_HEADROOM;
+		rte_packet_prefetch((char *)cur_mb->buf_addr + cur_mb->data_off);
+		cur_mb->nb_segs = 1;
+		cur_mb->next = NULL;
+		pkt_len = (u16)(rte_le_to_cpu_16(rxd.wb.upper.length) -
+						rxq->crc_len);
+		cur_mb->pkt_len = pkt_len;
+		cur_mb->data_len = pkt_len;
+
+		pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+
+		sxe_rx_mbuf_common_header_fill(rxq, cur_mb, rxd, pkt_info, staterr);
+
+		rx_pkts[done_num++] = cur_mb;
+	}
+
+	rxq->processing_idx = processing_idx;
+
+	hold_num = (u16) (hold_num + rxq->hold_num);
+	if (hold_num > rxq->batch_alloc_size) {
+		LOG_DEBUG("port_id=%u queue_id=%u rx_tail=%u "
+			   "num_hold=%u num_done=%u",
+			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+			   (unsigned) processing_idx, (unsigned) hold_num,
+			   (unsigned) done_num);
+		processing_idx = (u16)((processing_idx == 0) ?
+				(rxq->ring_depth - 1) : (processing_idx - 1));
+		SXE_PCI_REG_WC_WRITE(rxq->rdt_reg_addr, processing_idx);
+		hold_num = 0;
+	}
+
+	rxq->hold_num = hold_num;
+	return done_num;
+}
+
+const u32 *__sxe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+	const u32 * ptypes = NULL;
+	static const u32 ptypes_arr[] = {
+		RTE_PTYPE_L2_ETHER,
+		RTE_PTYPE_L3_IPV4,
+		RTE_PTYPE_L3_IPV4_EXT,
+		RTE_PTYPE_L3_IPV6,
+		RTE_PTYPE_L3_IPV6_EXT,
+		RTE_PTYPE_L4_SCTP,
+		RTE_PTYPE_L4_TCP,
+		RTE_PTYPE_L4_UDP,
+		RTE_PTYPE_TUNNEL_IP,
+		RTE_PTYPE_INNER_L3_IPV6,
+		RTE_PTYPE_INNER_L3_IPV6_EXT,
+		RTE_PTYPE_INNER_L4_TCP,
+		RTE_PTYPE_INNER_L4_UDP,
+		RTE_PTYPE_UNKNOWN
+	};
+
+	if (dev->rx_pkt_burst == sxe_pkts_recv ||
+		dev->rx_pkt_burst == sxe_batch_alloc_pkts_recv ||
+		dev->rx_pkt_burst == sxe_single_alloc_lro_pkts_recv ||
+		dev->rx_pkt_burst == sxe_batch_alloc_lro_pkts_recv) {
+		ptypes = ptypes_arr;
+		goto l_end;
+	}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#if defined(RTE_ARCH_X86)
+	if (dev->rx_pkt_burst == sxe_pkts_vec_recv ||
+	    dev->rx_pkt_burst == sxe_scattered_pkts_vec_recv) {
+		ptypes = ptypes_arr;
+	}
+#endif
+#endif
+
+l_end:
+	return ptypes;
+}
+
diff --git a/drivers/net/sxe/base/sxe_rx_common.h b/drivers/net/sxe/base/sxe_rx_common.h
new file mode 100644
index 0000000000..b7eb37f54a
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_rx_common.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_RX_COMMON_H__
+#define __SXE_RX_COMMON_H__
+
+#include "sxe_dpdk_version.h"
+
+void __rte_cold __sxe_rx_function_set(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+s32 __sxe_rx_descriptor_done(void *rx_queue, u16 offset);
+#endif
+
+s32 __sxe_rx_descriptor_status(void *rx_queue, u16 offset);
+
+u16 __sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,
+		u16 pkts_num);
+
+const u32 *__sxe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+
+#endif
+
diff --git a/drivers/net/sxe/base/sxe_tx_common.c b/drivers/net/sxe/base/sxe_tx_common.c
new file mode 100644
index 0000000000..a47f90109a
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_tx_common.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#else
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#endif
+#include <rte_net.h>
+
+#include "sxe_hw.h"
+#include "sxe_logs.h"
+#include "sxe_queue_common.h"
+#include "sxe_tx_common.h"
+
+int __sxe_tx_descriptor_status(void *tx_queue, u16 offset)
+{
+	int ret = RTE_ETH_TX_DESC_FULL;
+	u32 desc_idx;
+	struct sxe_tx_queue *txq = tx_queue;
+	volatile u32 *status;
+
+	if (unlikely(offset >= txq->ring_depth)) {
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	desc_idx = txq->next_to_use + offset;
+
+	desc_idx = ((desc_idx + txq->rs_thresh - 1) / txq->rs_thresh) * txq->rs_thresh;
+	if (desc_idx >= txq->ring_depth) {
+		desc_idx -= txq->ring_depth;
+		if (desc_idx >= txq->ring_depth) {
+			desc_idx -= txq->ring_depth;
+		}
+	}
+
+	status = &txq->desc_ring[desc_idx].wb.status;
+	if (*status & rte_cpu_to_le_32(SXE_TX_DESC_STAT_DD)) {
+		ret = RTE_ETH_TX_DESC_DONE;
+	}
+
+l_end:
+	return ret;
+}
+
diff --git a/drivers/net/sxe/base/sxe_tx_common.h b/drivers/net/sxe/base/sxe_tx_common.h
new file mode 100644
index 0000000000..2759ef5a7a
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_tx_common.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_TX_COMMON_H__
+#define __SXE_TX_COMMON_H__
+
+int __sxe_tx_descriptor_status(void *tx_queue, u16 offset);
+
+u16 __sxe_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num);
+
+#endif
diff --git a/drivers/net/sxe/base/sxe_types.h b/drivers/net/sxe/base/sxe_types.h
new file mode 100644
index 0000000000..966ee230b3
--- /dev/null
+++ b/drivers/net/sxe/base/sxe_types.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_DPDK_TYPES_H__
+#define __SXE_DPDK_TYPES_H__
+
+#include <sys/time.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <string.h>
+
+#include <rte_common.h>
+
+typedef uint8_t		u8;
+typedef uint16_t	u16;
+typedef uint32_t	u32;
+typedef uint64_t	u64; 
+
+typedef char		s8;
+typedef int16_t		s16;
+typedef int32_t		s32;
+typedef int64_t		s64;
+
+typedef s8		S8;
+typedef s16		S16;
+typedef s32		S32;
+
+#define __le16  u16
+#define __le32  u32
+#define __le64  u64
+
+#define __be16  u16
+#define __be32  u32
+#define __be64  u64
+
+#endif
diff --git a/drivers/net/sxe/base/sxevf_hw.c b/drivers/net/sxe/base/sxevf_hw.c
new file mode 100644
index 0000000000..75ac9dd25b
--- /dev/null
+++ b/drivers/net/sxe/base/sxevf_hw.c
@@ -0,0 +1,1057 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST) 
+#include <linux/etherdevice.h>
+
+#include "sxevf_hw.h"
+#include "sxevf_regs.h"
+#include "sxe_log.h"
+#include "sxevf_irq.h"
+#include "sxevf_msg.h"
+#include "sxevf_ring.h"
+#include "sxevf.h"
+#include "sxevf_rx_proc.h"
+#else 
+#include "sxe_errno.h"
+#include "sxe_logs.h"
+#include "sxe_dpdk_version.h"
+#include "sxe_compat_version.h"
+#include "sxevf.h"
+#include "sxevf_hw.h"
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+struct sxevf_adapter;
+#endif
+
+#define DMA_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+#define DMA_MASK_NONE	0x0ULL
+
+#define  SXEVF_REG_READ_CNT    5
+
+#define SXE_REG_READ_FAIL       0xffffffffU
+
+#define SXEVF_RING_WAIT_LOOP                   (100)
+#define SXEVF_MAX_RX_DESC_POLL                 (10)
+
+
+#define SXEVF_REG_READ(hw, addr)        sxevf_reg_read(hw, addr)
+#define SXEVF_REG_WRITE(hw, reg, value) sxevf_reg_write(hw, reg, value)
+#define SXEVF_WRITE_FLUSH(a) sxevf_reg_read(a, SXE_VFSTATUS)
+
+#ifndef SXE_DPDK 
+void sxevf_hw_fault_handle(struct sxevf_hw *hw)
+{
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	if (test_bit(SXEVF_HW_FAULT, &hw->state)) {
+		goto l_ret;
+	}
+
+	set_bit(SXEVF_HW_FAULT, &hw->state);
+
+	LOG_DEV_ERR("sxe nic hw fault\n");
+
+	if ((hw->fault_handle != NULL) && (hw->priv != NULL) ) {
+		hw->fault_handle(hw->priv);
+	}
+
+l_ret:
+	return;
+}
+
+static void sxevf_hw_fault_check(struct sxevf_hw *hw, u32 reg)
+{
+	u32 value;
+	u8  __iomem *base_addr = hw->reg_base_addr;
+	struct sxevf_adapter *adapter = hw->adapter;
+	u8 i;
+
+	if (reg == SXE_VFSTATUS) {
+		sxevf_hw_fault_handle(hw);
+		return;
+	}
+
+
+	for (i = 0; i < SXEVF_REG_READ_CNT; i++) {
+		value = hw->reg_read(base_addr + SXE_VFSTATUS);
+
+		if (value != SXEVF_REG_READ_FAIL) {
+			break;
+		}
+
+		mdelay(20);
+	}
+
+	LOG_INFO_BDF("retry done i:%d value:0x%x\n", i, value);
+
+	if (value == SXEVF_REG_READ_FAIL) {
+		sxevf_hw_fault_handle(hw);
+	}
+
+	return;
+}
+
+STATIC u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg)
+{
+	u32 value;
+	u8  __iomem *base_addr = hw->reg_base_addr;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	if (sxevf_is_hw_fault(hw)) {
+		value = SXEVF_REG_READ_FAIL;
+		goto l_ret;
+	}
+
+	value = hw->reg_read(base_addr + reg);
+	if (unlikely(SXEVF_REG_READ_FAIL == value)) {
+		LOG_ERROR_BDF("reg[0x%x] read failed, value=%#x\n", reg, value);
+		sxevf_hw_fault_check(hw, reg);
+	}
+
+l_ret:
+	return value;
+}
+
+STATIC void sxevf_reg_write(struct sxevf_hw *hw, u32 reg, u32 value)
+{
+	u8 __iomem *base_addr = hw->reg_base_addr;
+
+	if (sxevf_is_hw_fault(hw)) {
+		goto l_ret;
+	}
+
+	hw->reg_write(value, base_addr + reg);
+
+l_ret:
+	return;
+}
+
+#else 
+
+STATIC u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg)
+{
+	u32 i, value;
+	u8  __iomem *base_addr = hw->reg_base_addr;
+
+	value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
+	if (unlikely(SXEVF_REG_READ_FAIL == value)) {
+		for (i = 0; i < SXEVF_REG_READ_CNT; i++) {
+			LOG_ERROR("reg[0x%x] read failed, value=%#x\n",
+							reg, value);
+			value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
+			if (value != SXEVF_REG_READ_FAIL) {
+				LOG_INFO("reg[0x%x] read ok, value=%#x\n",
+								reg, value);
+				break;
+			}
+
+			mdelay(3);
+		}
+	}
+
+	return value;
+}
+
+STATIC void sxevf_reg_write(struct sxevf_hw *hw, u32 reg, u32 value)
+{
+	u8 __iomem *base_addr = hw->reg_base_addr;
+
+	rte_write32((rte_cpu_to_le_32(value)), (base_addr + reg));
+
+	return;
+}
+#endif
+
+void sxevf_hw_stop(struct sxevf_hw *hw)
+{
+	u8 i;
+	u32 value;
+
+	for (i = 0; i < SXEVF_TXRX_RING_NUM_MAX; i++) {
+		value = SXEVF_REG_READ(hw, SXE_VFRXDCTL(i));
+		if (value & SXE_VFRXDCTL_ENABLE) {
+			value &= ~SXE_VFRXDCTL_ENABLE;
+			SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(i), value);
+		}
+	}
+
+	SXEVF_WRITE_FLUSH(hw);
+
+	SXEVF_REG_WRITE(hw, SXE_VFEIMC, SXEVF_VFEIMC_IRQ_MASK);
+	SXEVF_REG_READ(hw, SXE_VFEICR);
+
+	for (i = 0; i < SXEVF_TXRX_RING_NUM_MAX; i++) {
+		value = SXEVF_REG_READ(hw, SXE_VFTXDCTL(i));
+		if (value & SXE_VFTXDCTL_ENABLE) {
+			value &= ~SXE_VFTXDCTL_ENABLE;
+			SXEVF_REG_WRITE(hw, SXE_VFTXDCTL(i), value);
+		}
+	}
+
+	return;
+}
+
+void sxevf_msg_write(struct sxevf_hw *hw, u8 index, u32 msg)
+{
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	SXEVF_REG_WRITE(hw, SXE_VFMBMEM + (index << 2), msg);
+
+	LOG_DEBUG_BDF("index:%u write mbx mem:0x%x.\n", index, msg);
+
+	return;
+}
+
+u32 sxevf_msg_read(struct sxevf_hw *hw, u8 index)
+{
+	u32 value = SXEVF_REG_READ(hw, SXE_VFMBMEM + (index << 2));
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	LOG_DEBUG_BDF("index:%u read mbx mem:0x%x.\n", index, value);
+
+	return value;
+}
+
+u32 sxevf_mailbox_read(struct sxevf_hw *hw)
+{
+	return SXEVF_REG_READ(hw, SXE_VFMAILBOX);
+}
+
+void sxevf_mailbox_write(struct sxevf_hw *hw, u32 value)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, value);
+	return;
+}
+
+void sxevf_pf_req_irq_trigger(struct sxevf_hw *hw)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, SXE_VFMAILBOX_REQ);
+
+	return;
+}
+
+void sxevf_pf_ack_irq_trigger(struct sxevf_hw *hw)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, SXE_VFMAILBOX_ACK);
+
+	return;
+}
+
+void sxevf_event_irq_map(struct sxevf_hw *hw, u16 vector)
+{
+	u8  allocation;
+	u32 ivar;
+
+	allocation = vector | SXEVF_IVAR_ALLOC_VALID;
+
+	ivar = SXEVF_REG_READ(hw, SXE_VFIVAR_MISC);
+	ivar &= ~0xFF;
+	ivar |= allocation;
+
+	SXEVF_REG_WRITE(hw, SXE_VFIVAR_MISC, ivar);
+
+	return;
+}
+
+void sxevf_specific_irq_enable(struct sxevf_hw *hw, u32 value)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFEIMS, value);
+
+	return;
+}
+
+void sxevf_irq_enable(struct sxevf_hw *hw, u32 mask)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFEIAM, mask);
+	SXEVF_REG_WRITE(hw, SXE_VFEIMS, mask);
+
+	return;
+}
+
+void sxevf_irq_disable(struct sxevf_hw *hw)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFEIAM, 0);
+	SXEVF_REG_WRITE(hw, SXE_VFEIMC, ~0);
+
+	SXEVF_WRITE_FLUSH(hw);
+
+	return;
+}
+
+void sxevf_hw_ring_irq_map(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx, u16 vector)
+{
+	u8  allocation;
+	u32 ivar, position;
+
+	allocation = vector | SXEVF_IVAR_ALLOC_VALID;
+
+	position = ((hw_ring_idx & 1) * 16) + (8 * is_tx);
+
+	ivar = SXEVF_REG_READ(hw, SXE_VFIVAR(hw_ring_idx >> 1));
+	ivar &= ~(0xFF << position);
+	ivar |= (allocation << position);
+
+	SXEVF_REG_WRITE(hw, SXE_VFIVAR(hw_ring_idx >> 1), ivar);
+
+	return;
+}
+
+void sxevf_ring_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx, u32 interval)
+{
+	u32 eitr = interval & SXEVF_EITR_ITR_MASK;
+
+	eitr |= SXEVF_EITR_CNT_WDIS;
+
+	SXEVF_REG_WRITE(hw, SXE_VFEITR(irq_idx), eitr);
+
+	return;
+}
+
+static void sxevf_event_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx, u32 value)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFEITR(irq_idx), value);
+
+	return;
+}
+
+static void sxevf_pending_irq_clear(struct sxevf_hw *hw)
+{
+	SXEVF_REG_READ(hw, SXE_VFEICR);
+
+	return;
+}
+
+static void sxevf_ring_irq_trigger(struct sxevf_hw *hw, u64 eics)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFEICS, eics);
+
+	return;
+}
+
+static const struct sxevf_irq_operations sxevf_irq_ops = {
+	.ring_irq_interval_set   = sxevf_ring_irq_interval_set,
+	.event_irq_interval_set  = sxevf_event_irq_interval_set,
+	.ring_irq_map         = sxevf_hw_ring_irq_map,
+	.event_irq_map           = sxevf_event_irq_map,
+	.pending_irq_clear       = sxevf_pending_irq_clear,
+	.ring_irq_trigger        = sxevf_ring_irq_trigger,
+	.specific_irq_enable     = sxevf_specific_irq_enable,
+	.irq_enable              = sxevf_irq_enable,
+	.irq_disable             = sxevf_irq_disable,
+};
+
+void sxevf_hw_reset(struct sxevf_hw *hw)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFCTRL, SXE_VFCTRL_RST);
+	SXEVF_WRITE_FLUSH(hw);
+
+	return;
+}
+
+STATIC bool sxevf_hw_rst_done(struct sxevf_hw *hw)
+{
+	return !(SXEVF_REG_READ(hw, SXE_VFCTRL) & SXE_VFCTRL_RST);
+}
+
+u32 sxevf_link_state_get(struct sxevf_hw *hw)
+{
+	return SXEVF_REG_READ(hw, SXE_VFLINKS);
+}
+
+u32 dump_regs[] = {
+	SXE_VFCTRL,
+};
+
+u16 sxevf_reg_dump_num_get(void)
+{
+	return ARRAY_SIZE(dump_regs);
+}
+
+static u32 sxevf_reg_dump(struct sxevf_hw *hw, u32 *regs_buff, u32 buf_size)
+{
+	u32 i;
+	u32 regs_num = buf_size / sizeof(u32);
+
+	for (i = 0; i < regs_num; i++) {
+		regs_buff[i] = SXEVF_REG_READ(hw, dump_regs[i]);
+	}
+
+	return i;
+}
+
+#define PATTERN_TEST	1
+#define SET_READ_TEST	2
+#define WRITE_NO_TEST	3
+#define TABLE32_TEST	4
+#define TABLE64_TEST_LO	5
+#define TABLE64_TEST_HI	6
+
+struct sxevf_self_test_reg {
+	u32 reg;
+	u8  array_len;
+	u8  test_type;
+	u32 mask;
+	u32 write;
+};
+
+static const struct sxevf_self_test_reg self_test_reg[] = {
+	{ SXE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+	{ SXE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFFFF, 0x000FFFFF },
+	{ SXE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, SXEVF_RXDCTL_ENABLE },
+	{ SXE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ SXE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
+	{ SXE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ SXE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+	{ .reg = 0 }
+};
+
+static s32 sxevf_reg_pattern_test(struct sxevf_hw *hw, u32 reg,
+				u32 mask, u32 write)
+{
+	s32 ret = 0;
+	u32 pat, val, before;
+	static const u32 test_pattern[] = {
+		0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFE};
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	if (sxevf_is_hw_fault(hw)) {
+		LOG_ERROR_BDF("hw fault\n");
+		ret = -SXEVF_DIAG_TEST_BLOCKED;
+		goto l_end;
+	}
+
+	for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
+		before = SXEVF_REG_READ(hw, reg);
+
+		SXEVF_REG_WRITE(hw, reg, test_pattern[pat] & write);
+		val = SXEVF_REG_READ(hw, reg);
+		if (val != (test_pattern[pat] & write & mask)) {
+			LOG_MSG_ERR(drv, "pattern test reg %04X failed: "
+					"got 0x%08X expected 0x%08X\n",
+				reg, val, (test_pattern[pat] & write & mask));
+			SXEVF_REG_WRITE(hw, reg, before);
+			ret = -SXEVF_DIAG_REG_PATTERN_TEST_ERR;
+			goto l_end;
+		}
+
+		SXEVF_REG_WRITE(hw, reg, before);
+	}
+
+l_end:
+	return ret;
+}
+
+static s32 sxevf_reg_set_and_check(struct sxevf_hw *hw, int reg,
+				u32 mask, u32 write)
+{
+	s32 ret = 0;
+	u32 val, before;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	if (sxevf_is_hw_fault(hw)) {
+		LOG_ERROR_BDF("hw fault\n");
+		ret = -SXEVF_DIAG_TEST_BLOCKED;
+		goto l_end;
+	}
+
+	before = SXEVF_REG_READ(hw, reg);
+	SXEVF_REG_WRITE(hw, reg, write & mask);
+	val = SXEVF_REG_READ(hw, reg);
+	if ((write & mask) != (val & mask)) {
+		LOG_DEV_ERR("set/check reg %04X test failed: "
+				"got 0x%08X expected 0x%08X\n",
+			reg, (val & mask), (write & mask));
+		SXEVF_REG_WRITE(hw, reg, before);
+		ret = -SXEVF_DIAG_CHECK_REG_TEST_ERR;
+		goto l_end;
+	}
+
+	SXEVF_REG_WRITE(hw, reg, before);
+
+l_end:
+	return ret;
+}
+
+STATIC s32 sxevf_regs_test(struct sxevf_hw *hw)
+{
+	u32 i;
+	s32 ret = 0;
+	const struct sxevf_self_test_reg *test = self_test_reg;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	while (test->reg) {
+		for (i = 0; i < test->array_len; i++) {
+			switch (test->test_type) {
+			case PATTERN_TEST:
+				ret = sxevf_reg_pattern_test(hw,
+					test->reg + (i * 0x40),
+					test->mask, test->write);
+				break;
+			case TABLE32_TEST:
+				ret = sxevf_reg_pattern_test(hw,
+					test->reg + (i * 4),
+					test->mask, test->write);
+				break;
+			case TABLE64_TEST_LO:
+				ret = sxevf_reg_pattern_test(hw,
+					test->reg + (i * 8),
+					test->mask, test->write);
+				break;
+			case TABLE64_TEST_HI:
+				ret = sxevf_reg_pattern_test(hw,
+					(test->reg + 4) + (i * 8),
+					test->mask, test->write);
+				break;
+			case SET_READ_TEST:
+				ret = sxevf_reg_set_and_check(hw,
+					test->reg + (i * 0x40),
+					test->mask, test->write);
+				break;
+			case WRITE_NO_TEST:
+				SXEVF_REG_WRITE(hw, test->reg + (i * 0x40),
+						test->write);
+				break;
+			default:
+				LOG_ERROR_BDF("reg test mod err, type=%d\n",
+						test->test_type);
+				break;
+			}
+
+			if (ret) {
+				goto l_end;
+			}
+
+		}
+		test++;
+	}
+
+l_end:
+	return ret;
+}
+
+static const struct sxevf_setup_operations sxevf_setup_ops = {
+	.reset		= sxevf_hw_reset,
+	.hw_stop	= sxevf_hw_stop,
+	.regs_test	= sxevf_regs_test,
+	.regs_dump	= sxevf_reg_dump,
+	.link_state_get	= sxevf_link_state_get,
+	.reset_done = sxevf_hw_rst_done,
+};
+
+static void sxevf_tx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
+				u64 desc_dma_addr, u8 reg_idx)
+{
+	SXEVF_REG_WRITE(hw, SXEVF_TDBAL(reg_idx), (desc_dma_addr & \
+						DMA_BIT_MASK(32)));
+	SXEVF_REG_WRITE(hw, SXEVF_TDBAH(reg_idx), (desc_dma_addr >> 32));
+	SXEVF_REG_WRITE(hw, SXEVF_TDLEN(reg_idx), desc_mem_len);
+	SXEVF_REG_WRITE(hw, SXEVF_TDH(reg_idx), 0);
+	SXEVF_REG_WRITE(hw, SXEVF_TDT(reg_idx), 0);
+
+	return;
+}
+
+static void sxevf_tx_writeback_off(struct sxevf_hw *hw, u8 reg_idx)
+{
+	SXEVF_REG_WRITE(hw, SXEVF_TDWBAH(reg_idx), 0);
+	SXEVF_REG_WRITE(hw, SXEVF_TDWBAL(reg_idx), 0);
+
+	return;
+}
+
+static void sxevf_tx_desc_thresh_set(
+				struct sxevf_hw *hw,
+				u8 reg_idx,
+				u32 wb_thresh,
+				u32 host_thresh,
+				u32 prefech_thresh)
+{
+	u32 txdctl = 0;
+
+	txdctl |= (wb_thresh << SXEVF_TXDCTL_WTHRESH_SHIFT);
+	txdctl |= (host_thresh << SXEVF_TXDCTL_HTHRESH_SHIFT) |
+			prefech_thresh;
+
+	SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
+
+	return;
+}
+
+void sxevf_tx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on)
+{
+	u32 wait_loop = SXEVF_MAX_TXRX_DESC_POLL;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	u32 txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx));
+	if (is_on) {
+		txdctl |= SXEVF_TXDCTL_ENABLE;
+		SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
+
+		do {
+			usleep_range(1000, 2000);
+			txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx));
+		} while (--wait_loop && !(txdctl & SXEVF_TXDCTL_ENABLE));
+	} else {
+		txdctl &= ~SXEVF_TXDCTL_ENABLE;
+		SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
+
+		do {
+			usleep_range(1000, 2000);
+			txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx));
+		} while (--wait_loop && (txdctl & SXEVF_TXDCTL_ENABLE));
+	}
+
+	if (!wait_loop) {
+		LOG_DEV_ERR("tx ring %u switch %u failed within "
+			  "the polling period\n", reg_idx, is_on);
+	}
+
+	return;
+}
+
+static void sxevf_rx_disable(struct sxevf_hw *hw, u8 reg_idx)
+{
+	u32 rxdctl;
+	u32 wait_loop = SXEVF_RX_RING_POLL_MAX;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	if (!hw->reg_base_addr) {
+		goto l_end;
+	}
+
+	rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
+	rxdctl &= ~SXE_VFRXDCTL_ENABLE;
+	SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_idx), rxdctl);
+
+	do {
+		udelay(10);
+		rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
+	} while (--wait_loop && (rxdctl & SXE_VFRXDCTL_ENABLE));
+
+	if (!wait_loop) {
+		LOG_ERROR_BDF("RXDCTL.ENABLE queue %d not cleared while polling\n",
+				reg_idx);
+	}
+
+l_end:
+	return;
+}
+
+void sxevf_rx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on)
+{
+	u32 rxdctl;
+	u32 wait_loop = SXEVF_RING_WAIT_LOOP;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
+	if (is_on) {
+		rxdctl |= SXEVF_RXDCTL_ENABLE | SXEVF_RXDCTL_VME;
+		SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_idx), rxdctl);
+
+		do {
+			usleep_range(1000, 2000);
+			rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
+		} while (--wait_loop && !(rxdctl & SXEVF_RXDCTL_ENABLE));
+	} else {
+		rxdctl &= ~SXEVF_RXDCTL_ENABLE;
+		SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_idx), rxdctl);
+
+		do {
+			usleep_range(1000, 2000);
+			rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
+		} while (--wait_loop && (rxdctl & SXEVF_RXDCTL_ENABLE));
+	}
+
+	SXEVF_WRITE_FLUSH(hw);
+
+	if (!wait_loop) {
+		LOG_DEV_ERR("rx ring %u switch %u failed within "
+			  "the polling period\n", reg_idx, is_on);
+	}
+
+	return;
+}
+
+void sxevf_rx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
+					u64 desc_dma_addr, u8 reg_idx)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFRDBAL(reg_idx),
+			(desc_dma_addr & DMA_BIT_MASK(32)));
+	SXEVF_REG_WRITE(hw, SXE_VFRDBAH(reg_idx), (desc_dma_addr >> 32));
+	SXEVF_REG_WRITE(hw, SXE_VFRDLEN(reg_idx), desc_mem_len);
+
+	SXEVF_WRITE_FLUSH(hw);
+
+	SXEVF_REG_WRITE(hw, SXE_VFRDH(reg_idx), 0);
+	SXEVF_REG_WRITE(hw, SXE_VFRDT(reg_idx), 0);
+
+	return;
+}
+
+void sxevf_rx_rcv_ctl_configure(struct sxevf_hw *hw, u8 reg_idx,
+				   u32 header_buf_len, u32 pkg_buf_len, bool drop_en)
+{
+	u32 srrctl = 0;
+
+	if (drop_en) {
+		srrctl = SXEVF_SRRCTL_DROP_EN;
+	}
+
+	srrctl |= ((header_buf_len << SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+			SXEVF_SRRCTL_BSIZEHDR_MASK);
+	srrctl |= ((pkg_buf_len >> SXEVF_SRRCTL_BSIZEPKT_SHIFT) &
+			SXEVF_SRRCTL_BSIZEPKT_MASK);
+
+	SXEVF_REG_WRITE(hw, SXE_VFSRRCTL(reg_idx), srrctl);
+
+	return;
+}
+
+static void sxevf_tx_ring_info_get(struct sxevf_hw *hw,
+				u8 idx, u32 *head, u32 *tail)
+{
+	*head = SXEVF_REG_READ(hw, SXE_VFTDH(idx));
+	*tail = SXEVF_REG_READ(hw, SXE_VFTDT(idx));
+
+	return;
+}
+
+static const struct sxevf_dma_operations sxevf_dma_ops = {
+	.tx_ring_desc_configure  = sxevf_tx_ring_desc_configure,
+	.tx_writeback_off      = sxevf_tx_writeback_off,
+	.tx_desc_thresh_set    = sxevf_tx_desc_thresh_set,
+	.tx_ring_switch        = sxevf_tx_ring_switch,
+	.tx_ring_info_get      = sxevf_tx_ring_info_get,
+
+	.rx_disable          = sxevf_rx_disable,
+	.rx_ring_switch      = sxevf_rx_ring_switch,
+	.rx_ring_desc_configure= sxevf_rx_ring_desc_configure,
+	.rx_rcv_ctl_configure  = sxevf_rx_rcv_ctl_configure,
+};
+
+#ifdef SXE_DPDK
+#define SXEVF_32BIT_COUNTER_UPDATE(reg, last, cur)                          \
+	{																\
+		u32 latest = SXEVF_REG_READ(hw, reg);				\
+		cur += (latest - last) & UINT_MAX;						\
+		last = latest;											\
+	}
+	
+#define SXEVF_36BIT_COUNTER_UPDATE(lsb, msb, last, cur)                \
+	{																 \
+		u64 new_lsb = SXEVF_REG_READ(hw, lsb);					 \
+		u64 new_msb = SXEVF_REG_READ(hw, msb);					 \
+		u64 latest = ((new_msb << 32) | new_lsb);				 \
+		cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
+		last = latest;											 \
+	}
+
+#else
+#define SXEVF_32BIT_COUNTER_UPDATE(reg, last_counter, counter)	\
+	{							\
+		u32 current_counter = SXEVF_REG_READ(hw, reg);	\
+		if (current_counter < last_counter)		\
+			counter += 0x100000000LL;		\
+		last_counter = current_counter;			\
+		counter &= 0xFFFFFFFF00000000LL;		\
+		counter |= current_counter;			\
+	}
+
+#define SXEVF_36BIT_COUNTER_UPDATE(reg_lsb, reg_msb, last_counter, counter) \
+	{								 \
+		u64 current_counter_lsb = SXEVF_REG_READ(hw, reg_lsb);	 \
+		u64 current_counter_msb = SXEVF_REG_READ(hw, reg_msb);	 \
+		u64 current_counter = (current_counter_msb << 32) |	 \
+			current_counter_lsb;				 \
+		if (current_counter < last_counter)			 \
+			counter += 0x1000000000LL;			 \
+		last_counter = current_counter;				 \
+		counter &= 0xFFFFFFF000000000LL;			 \
+		counter |= current_counter;				 \
+	}
+#endif
+
+void sxevf_packet_stats_get(struct sxevf_hw *hw,
+				struct sxevf_hw_stats *stats)
+{
+	SXEVF_32BIT_COUNTER_UPDATE(SXEVF_VFGPRC, stats->last_vfgprc,
+				stats->vfgprc);
+	SXEVF_32BIT_COUNTER_UPDATE(SXEVF_VFGPTC, stats->last_vfgptc,
+				stats->vfgptc);
+	SXEVF_36BIT_COUNTER_UPDATE(SXEVF_VFGORC_LSB, SXEVF_VFGORC_MSB,
+				stats->last_vfgorc,
+				stats->vfgorc);
+	SXEVF_36BIT_COUNTER_UPDATE(SXEVF_VFGOTC_LSB, SXEVF_VFGOTC_MSB,
+				stats->last_vfgotc,
+				stats->vfgotc);
+	SXEVF_32BIT_COUNTER_UPDATE(SXEVF_VFMPRC, stats->last_vfmprc,
+				stats->vfmprc);
+
+	return;
+}
+
+void sxevf_stats_init_value_get(struct sxevf_hw *hw,
+				struct sxevf_hw_stats *stats)
+{
+	stats->last_vfgprc = SXEVF_REG_READ(hw, SXE_VFGPRC);
+	stats->last_vfgorc = SXEVF_REG_READ(hw, SXE_VFGORC_LSB);
+	stats->last_vfgorc |= (((u64)(SXEVF_REG_READ(hw, SXE_VFGORC_MSB))) << 32);
+	stats->last_vfgptc = SXEVF_REG_READ(hw, SXE_VFGPTC);
+	stats->last_vfgotc = SXEVF_REG_READ(hw, SXE_VFGOTC_LSB);
+	stats->last_vfgotc |= (((u64)(SXEVF_REG_READ(hw, SXE_VFGOTC_MSB))) << 32);
+	stats->last_vfmprc = SXEVF_REG_READ(hw, SXE_VFMPRC);
+
+	return;
+}
+static const struct sxevf_stat_operations sxevf_stat_ops = {
+	.packet_stats_get	= sxevf_packet_stats_get,
+	.stats_init_value_get	= sxevf_stats_init_value_get,
+};
+
+static void sxevf_rx_max_used_ring_set(struct sxevf_hw *hw, u16 max_rx_ring)
+{
+	u32 rqpl = 0;
+
+	if (max_rx_ring > 1) {
+		rqpl |= BIT(29);
+	}
+
+	SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, rqpl);
+
+	return;
+}
+
+static const struct sxevf_dbu_operations sxevf_dbu_ops = {
+	.rx_max_used_ring_set = sxevf_rx_max_used_ring_set,
+};
+
+static const struct sxevf_mbx_operations sxevf_mbx_ops = {
+
+	.mailbox_read = sxevf_mailbox_read,
+	.mailbox_write = sxevf_mailbox_write,
+
+	.msg_write = sxevf_msg_write,
+	.msg_read = sxevf_msg_read,
+
+	.pf_req_irq_trigger = sxevf_pf_req_irq_trigger,
+	.pf_ack_irq_trigger = sxevf_pf_ack_irq_trigger,
+};
+
+void sxevf_hw_ops_init(struct sxevf_hw *hw)
+{
+	hw->setup.ops   = &sxevf_setup_ops;
+	hw->irq.ops     = &sxevf_irq_ops;
+	hw->mbx.ops     = &sxevf_mbx_ops;
+	hw->dma.ops     = &sxevf_dma_ops;
+	hw->stat.ops    = &sxevf_stat_ops;
+	hw->dbu.ops     = &sxevf_dbu_ops;
+
+	return;
+}
+
+#ifdef SXE_DPDK 
+
+#define SXEVF_RSS_FIELD_MASK        0xffff0000
+#define SXEVF_MRQC_RSSEN            0x00000001 
+
+#define SXEVF_RSS_KEY_SIZE                (40)  
+#define SXEVF_MAX_RSS_KEY_ENTRIES	  (10)  
+#define SXEVF_MAX_RETA_ENTRIES            (128) 
+
+void sxevf_rxtx_reg_init(struct sxevf_hw *hw)
+{
+	int i;
+	u32 vfsrrctl;
+
+	vfsrrctl = 0x100 << SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT;
+	vfsrrctl |= 0x800 >> SXEVF_SRRCTL_BSIZEPKT_SHIFT;
+
+	SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, 0);
+
+	for (i = 0; i < 7; i++) {
+		SXEVF_REG_WRITE(hw, SXE_VFRDH(i), 0);
+		SXEVF_REG_WRITE(hw, SXE_VFRDT(i), 0);
+		SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(i), 0);
+		SXEVF_REG_WRITE(hw, SXE_VFSRRCTL(i), vfsrrctl);
+		SXEVF_REG_WRITE(hw, SXE_VFTDH(i), 0);
+		SXEVF_REG_WRITE(hw, SXE_VFTDT(i), 0);
+		SXEVF_REG_WRITE(hw, SXE_VFTXDCTL(i), 0);
+		SXEVF_REG_WRITE(hw, SXE_VFTDWBAH(i), 0);
+		SXEVF_REG_WRITE(hw, SXE_VFTDWBAL(i), 0);
+	}
+
+	SXEVF_WRITE_FLUSH(hw);
+
+	return;
+}
+
+u32 sxevf_irq_cause_get(struct sxevf_hw *hw)
+{
+	return SXEVF_REG_READ(hw, SXE_VFEICR);
+}
+
+void sxevf_tx_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
+				u64 desc_dma_addr, u8 reg_idx)
+{
+
+	SXEVF_REG_WRITE(hw, SXEVF_TDBAL(reg_idx), (desc_dma_addr & \
+						DMA_BIT_MASK(32)));
+	SXEVF_REG_WRITE(hw, SXEVF_TDBAH(reg_idx), (desc_dma_addr >> 32));
+	SXEVF_REG_WRITE(hw, SXEVF_TDLEN(reg_idx), desc_mem_len);
+	SXEVF_REG_WRITE(hw, SXEVF_TDH(reg_idx), 0);
+	SXEVF_REG_WRITE(hw, SXEVF_TDT(reg_idx), 0);
+
+	return;
+}
+
+void sxevf_rss_bit_num_set(struct sxevf_hw *hw, u32 value)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, value);
+
+	return;
+}
+
+void sxevf_hw_vlan_tag_strip_switch(struct sxevf_hw *hw,
+					u16 reg_index, bool is_enable)
+{
+	u32 vlnctrl;
+
+	vlnctrl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_index));
+
+	if (is_enable) {
+		vlnctrl |= SXEVF_RXDCTL_VME;
+	} else {
+		vlnctrl &= ~SXEVF_RXDCTL_VME;
+	}
+
+	SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_index), vlnctrl);
+
+	return;
+}
+
+void sxevf_tx_queue_thresh_set(struct sxevf_hw *hw, u8 reg_idx,
+				u32 prefech_thresh, u32 host_thresh, u32 wb_thresh)
+{
+	u32 txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx));
+
+	txdctl |= (prefech_thresh & SXEVF_TXDCTL_THRESH_MASK);
+	txdctl |= ((host_thresh & SXEVF_TXDCTL_THRESH_MASK) << SXEVF_TXDCTL_HTHRESH_SHIFT);
+	txdctl |= ((wb_thresh & SXEVF_TXDCTL_THRESH_MASK)<< SXEVF_TXDCTL_WTHRESH_SHIFT);
+
+	SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
+
+	return;
+}
+
+void sxevf_rx_desc_tail_set(struct sxevf_hw *hw, u8 reg_idx, u32 value)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFRDT(reg_idx), value);
+
+	return;
+}
+
+u32 sxevf_hw_rss_redir_tbl_get(struct sxevf_hw *hw, u16 reg_idx)
+{
+	return SXEVF_REG_READ(hw, SXE_VFRETA(reg_idx >> 2));
+}
+
+void sxevf_hw_rss_redir_tbl_set(struct sxevf_hw *hw,
+						u16 reg_idx, u32 value)
+{
+	SXEVF_REG_WRITE(hw, SXE_VFRETA(reg_idx >> 2), value);
+	return;
+}
+
+u32 sxevf_hw_rss_key_get(struct sxevf_hw *hw, u8 reg_idx)
+{
+	u32 rss_key;
+
+	if (reg_idx >= SXEVF_MAX_RSS_KEY_ENTRIES) {
+		rss_key = 0;
+	} else {
+		rss_key = SXEVF_REG_READ(hw, SXE_VFRSSRK(reg_idx));
+	}
+
+	return rss_key;
+}
+
+u32 sxevf_hw_rss_field_get(struct sxevf_hw *hw)
+{
+	u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
+	return (mrqc & SXEVF_RSS_FIELD_MASK);
+}
+
+bool sxevf_hw_is_rss_enabled(struct sxevf_hw *hw)
+{
+	bool rss_enable = false;
+	u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
+	if (mrqc & SXEVF_MRQC_RSSEN) {
+		rss_enable = true;
+	}
+
+	return rss_enable;
+}
+
+void sxevf_hw_rss_key_set_all(struct sxevf_hw *hw, u32 *rss_key)
+{
+	u32 i;
+
+	for (i = 0; i < SXEVF_MAX_RSS_KEY_ENTRIES; i++) {
+		SXEVF_REG_WRITE(hw, SXE_VFRSSRK(i), rss_key[i]);
+	}
+
+	return;
+}
+
+void sxevf_hw_rss_cap_switch(struct sxevf_hw *hw, bool is_on)
+{
+	u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
+	if (is_on) {
+		mrqc |= SXEVF_MRQC_RSSEN;
+	} else {
+		mrqc &= ~SXEVF_MRQC_RSSEN;
+	}
+
+	SXEVF_REG_WRITE(hw, SXE_VFMRQC, mrqc);
+
+	return;
+}
+
+void sxevf_hw_rss_field_set(struct sxevf_hw *hw, u32 rss_field)
+{
+	u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
+
+	mrqc &= ~SXEVF_RSS_FIELD_MASK;
+	mrqc |= rss_field;
+	SXEVF_REG_WRITE(hw, SXE_VFMRQC, mrqc);
+
+	return;
+}
+
+u32 sxevf_hw_regs_group_read(struct sxevf_hw *hw,
+				const struct sxevf_reg_info *regs,
+				u32 *reg_buf)
+{
+	u32 j, i = 0;
+	int count = 0;
+
+	while (regs[i].count) {
+		for (j = 0; j < regs[i].count; j++) {
+			reg_buf[count + j] = SXEVF_REG_READ(hw,
+					regs[i].addr + j * regs[i].stride);
+			LOG_INFO("regs= %s, regs_addr=%x, regs_value=%04x\n",
+				regs[i].name , regs[i].addr, reg_buf[count + j]);
+		}
+
+		i++;
+		count += j;
+	}
+
+	return count;
+};
+
+#endif
diff --git a/drivers/net/sxe/base/sxevf_hw.h b/drivers/net/sxe/base/sxevf_hw.h
new file mode 100644
index 0000000000..67d711d5b8
--- /dev/null
+++ b/drivers/net/sxe/base/sxevf_hw.h
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_HW_H__
+#define __SXEVF_HW_H__
+
+#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST)
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/if_ether.h>
+#else
+#include "sxe_compat_platform.h"
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+#endif
+
+#include "sxevf_regs.h"
+
+#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST)
+#define SXE_PRIU64  "llu"
+#define SXE_PRIX64  "llx"
+#define SXE_PRID64  "lld"
+#else
+#define SXE_PRIU64  PRIu64
+#define SXE_PRIX64  PRIx64
+#define SXE_PRID64  PRId64
+#endif
+
+#define SXEVF_TXRX_RING_NUM_MAX           8  
+#define SXEVF_MAX_TXRX_DESC_POLL          (10)
+#define SXEVF_TX_DESC_PREFETCH_THRESH_32  (32)
+#define SXEVF_TX_DESC_HOST_THRESH_1       (1)
+#define SXEVF_TX_DESC_WRITEBACK_THRESH_8  (8)
+#define SXEVF_TXDCTL_HTHRESH_SHIFT        (8)
+#define SXEVF_TXDCTL_WTHRESH_SHIFT        (16)
+
+#define SXEVF_TXDCTL_THRESH_MASK          (0x7F)
+
+#define SXEVF_RX_RING_POLL_MAX           (10)
+
+#define SXEVF_MAC_HDR_LEN_MAX           (127)
+#define SXEVF_NETWORK_HDR_LEN_MAX       (511)
+
+#define SXEVF_LINK_SPEED_UNKNOWN        0
+#define SXEVF_LINK_SPEED_1GB_FULL	0x0020
+#define SXEVF_LINK_SPEED_10GB_FULL	0x0080
+#define SXEVF_LINK_SPEED_100_FULL	0x0008
+
+#define SXEVF_VFT_TBL_SIZE           (128)   
+#define SXEVF_HW_TXRX_RING_NUM_MAX   (128)   
+
+#define SXEVF_VLAN_TAG_SIZE          (4)
+
+#define SXEVF_HW_UC_ENTRY_NUM_MAX   128
+
+enum {
+	SXEVF_LINK_TO_PHY   = 0,
+	SXEVF_LINK_TO_DOWN,
+	SXEVF_LINK_TO_REINIT,
+};
+
+enum {
+	SXEVF_DIAG_TEST_PASSED			= 0,
+	SXEVF_DIAG_TEST_BLOCKED			= 1,
+	SXEVF_DIAG_REG_PATTERN_TEST_ERR		= 2,
+	SXEVF_DIAG_CHECK_REG_TEST_ERR           = 3,
+};
+
+struct sxevf_hw;
+
+struct sxevf_hw_stats {
+	u64 base_vfgprc;
+	u64 base_vfgptc;
+	u64 base_vfgorc;
+	u64 base_vfgotc;
+	u64 base_vfmprc;
+
+	u64 last_vfgprc;
+	u64 last_vfgptc;
+	u64 last_vfgorc;
+	u64 last_vfgotc;
+	u64 last_vfmprc;
+
+	u64 vfgprc;      
+	u64 vfgptc;      
+	u64 vfgorc;      
+	u64 vfgotc;      
+	u64 vfmprc;      
+
+	u64 saved_reset_vfgprc;
+	u64 saved_reset_vfgptc;
+	u64 saved_reset_vfgorc;
+	u64 saved_reset_vfgotc;
+	u64 saved_reset_vfmprc;
+};
+
+void sxevf_hw_ops_init(struct sxevf_hw *hw);
+
+
+struct sxevf_setup_operations {
+	void (*reset)(struct sxevf_hw *);
+	void (*hw_stop)(struct sxevf_hw *hw);
+	s32  (*regs_test)(struct sxevf_hw *hw);
+	u32  (*link_state_get)(struct sxevf_hw *hw);
+	u32  (*regs_dump)(struct sxevf_hw *hw, u32 *regs_buff, u32 buf_size);
+	bool (*reset_done)(struct sxevf_hw *);
+};
+
+struct sxevf_hw_setup {
+	const struct sxevf_setup_operations *ops;
+};
+
+struct sxevf_irq_operations {
+	void (*pending_irq_clear)(struct sxevf_hw *hw);
+	void (*ring_irq_interval_set)(struct sxevf_hw *hw, u16 irq_idx, u32 interval);
+	void (*event_irq_interval_set)(struct sxevf_hw * hw, u16 irq_idx, u32 value);
+	void (*ring_irq_map)(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx, u16 irq_idx);
+	void (*event_irq_map)(struct sxevf_hw *hw, u16 irq_idx);
+	void (*ring_irq_trigger)(struct sxevf_hw *hw, u64 eics);
+	void (*irq_enable)(struct sxevf_hw * hw, u32 mask);
+	void (*specific_irq_enable)(struct sxevf_hw * hw, u32 value);
+	void (*irq_disable)(struct sxevf_hw *hw);
+	void (*irq_off)(struct sxevf_hw *hw);
+};
+
+struct sxevf_irq_info {
+	const struct sxevf_irq_operations *ops;
+};
+
+struct sxevf_mbx_operations {
+
+	u32 (*mailbox_read)(struct sxevf_hw *hw);
+	void (*mailbox_write)(struct sxevf_hw *hw, u32 value);
+
+	void (*msg_write)(struct sxevf_hw *hw, u8 index, u32 msg);
+	u32 (*msg_read)(struct sxevf_hw *hw, u8 index);
+
+	void (*pf_req_irq_trigger)(struct sxevf_hw *hw);
+	void (*pf_ack_irq_trigger)(struct sxevf_hw *hw);
+};
+
+struct sxevf_mbx_stats {
+	u32 send_msgs; 
+	u32 rcv_msgs;  
+
+	u32 reqs;      
+	u32 acks;      
+	u32 rsts;      
+};
+
+struct sxevf_mbx_info {
+	const struct sxevf_mbx_operations *ops; 
+
+	struct sxevf_mbx_stats stats; 
+	u32 msg_len;  
+	u32 retry;    
+	u32 interval; 
+	u32 reg_value; 
+	u32 api_version; 
+};
+
+struct sxevf_dma_operations {
+	void (* tx_ring_desc_configure)(struct sxevf_hw *, u32, u64, u8);
+	void (* tx_writeback_off)(struct sxevf_hw *, u8);
+	void (* tx_desc_thresh_set)(struct sxevf_hw *, u8, u32, u32, u32);
+	void (* tx_ring_switch)(struct sxevf_hw *, u8, bool);
+	void (* tx_desc_wb_flush)(struct sxevf_hw *, u8);
+	void (* tx_ring_info_get)(struct sxevf_hw *hw, u8 reg_idx,
+					u32 *head, u32 *tail);
+	void (* rx_disable)(struct sxevf_hw *, u8);
+	void (* rx_ring_switch)(struct sxevf_hw *, u8, bool);
+	void (* rx_ring_desc_configure)(struct sxevf_hw *, u32, u64, u8);
+	void (* rx_rcv_ctl_configure)(struct sxevf_hw *hw, u8 reg_idx,
+				   u32 header_buf_len, u32 pkg_buf_len, bool drop_en);
+};
+
+struct sxevf_dma_info {
+	const struct sxevf_dma_operations *ops;
+};
+
+struct sxevf_stat_operations {
+	void (*packet_stats_get)(struct sxevf_hw *,
+			struct sxevf_hw_stats *);
+	void (*stats_init_value_get)(struct sxevf_hw *hw,
+				struct sxevf_hw_stats *stats);
+};
+
+struct sxevf_stat_info {
+	const struct sxevf_stat_operations *ops;
+};
+
+struct sxevf_dbu_operations {
+	void (*rx_max_used_ring_set)(struct sxevf_hw *, u16);
+
+};
+
+struct sxevf_dbu_info {
+	const struct sxevf_dbu_operations	*ops;
+};
+
+enum sxevf_hw_state {
+	SXEVF_HW_STOP,
+	SXEVF_HW_FAULT,
+};
+
+struct sxevf_hw {
+	u8 __iomem *reg_base_addr;      
+	void *adapter;
+
+	void *priv;
+	unsigned long state;   
+	void (*fault_handle)(void *priv);
+	u32 (*reg_read)(const volatile void *reg);
+	void (*reg_write)(u32 value, volatile void *reg);
+	s32	board_type;		
+
+	struct sxevf_hw_setup   setup;   
+	struct sxevf_irq_info   irq;     
+	struct sxevf_mbx_info   mbx;     
+
+	struct sxevf_dma_info    dma;    
+	struct sxevf_stat_info   stat;   
+	struct sxevf_dbu_info    dbu;
+};
+
+struct sxevf_reg_info {
+	u32 addr;        
+	u32 count;       
+	u32 stride;      
+	const s8 *name;  
+};
+
+u16 sxevf_reg_dump_num_get(void);
+
+void sxevf_hw_fault_handle(struct sxevf_hw *hw);
+
+static inline bool sxevf_is_hw_fault(struct sxevf_hw *hw)
+{
+	return test_bit(SXEVF_HW_FAULT, &hw->state);
+}
+
+static inline void sxevf_hw_fault_handle_init(struct sxevf_hw *hw,
+			void (*handle)(void *), void *priv)
+{
+	hw->priv = priv;
+	hw->fault_handle = handle;
+
+	return;
+}
+
+static inline void sxevf_hw_reg_handle_init(struct sxevf_hw *hw,
+		u32 (*read)(const volatile void *),
+		void (*write)(u32, volatile void *))
+{
+	hw->reg_read  = read;
+	hw->reg_write = write;
+
+	return;
+}
+
+#ifdef SXE_DPDK 
+
+void sxevf_irq_disable(struct sxevf_hw *hw);
+
+void sxevf_hw_stop(struct sxevf_hw *hw);
+
+void sxevf_hw_reset(struct sxevf_hw *hw);
+
+void sxevf_msg_write(struct sxevf_hw *hw, u8 index, u32 msg);
+
+u32 sxevf_msg_read(struct sxevf_hw *hw, u8 index);
+
+u32 sxevf_mailbox_read(struct sxevf_hw *hw);
+
+void sxevf_mailbox_write(struct sxevf_hw *hw, u32 value);
+
+void sxevf_pf_req_irq_trigger(struct sxevf_hw *hw);
+
+void sxevf_pf_ack_irq_trigger(struct sxevf_hw *hw);
+
+void sxevf_rxtx_reg_init(struct sxevf_hw *hw);
+
+void sxevf_irq_enable(struct sxevf_hw *hw, u32 mask);
+
+u32 sxevf_irq_cause_get(struct sxevf_hw *hw);
+
+void sxevf_event_irq_map(struct sxevf_hw *hw, u16 vector);
+
+void sxevf_hw_ring_irq_map(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx, u16 vector);
+
+void sxevf_ring_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx, u32 interval);
+
+void sxevf_tx_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
+				u64 desc_dma_addr, u8 reg_idx);
+
+void sxevf_rx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
+					u64 desc_dma_addr, u8 reg_idx);
+
+void sxevf_rx_rcv_ctl_configure(struct sxevf_hw *hw, u8 reg_idx,
+				   u32 header_buf_len, u32 pkg_buf_len,
+				   bool drop_en);
+
+void sxevf_rss_bit_num_set(struct sxevf_hw *hw, u32 value);
+
+void sxevf_hw_vlan_tag_strip_switch(struct sxevf_hw *hw,
+					u16 reg_index, bool is_enable);
+
+void sxevf_tx_queue_thresh_set(struct sxevf_hw *hw, u8 reg_idx,
+				u32 prefech_thresh, u32 host_thresh, u32 wb_thresh);
+
+void sxevf_tx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on);
+
+void sxevf_rx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on);
+
+void sxevf_rx_desc_tail_set(struct sxevf_hw *hw, u8 reg_idx, u32 value);
+
+void sxevf_specific_irq_enable(struct sxevf_hw *hw, u32 value);
+
+void sxevf_packet_stats_get(struct sxevf_hw *hw,
+				struct sxevf_hw_stats *stats);
+
+void sxevf_stats_init_value_get(struct sxevf_hw *hw,
+				struct sxevf_hw_stats *stats);
+
+u32 sxevf_hw_rss_redir_tbl_get(struct sxevf_hw *hw, u16 reg_idx);
+
+void sxevf_hw_rss_redir_tbl_set(struct sxevf_hw *hw,
+						u16 reg_idx, u32 value);
+
+u32 sxevf_hw_rss_key_get(struct sxevf_hw *hw, u8 reg_idx);
+
+u32 sxevf_hw_rss_field_get(struct sxevf_hw *hw);
+
+void sxevf_hw_rss_field_set(struct sxevf_hw *hw, u32 rss_field);
+
+void sxevf_hw_rss_cap_switch(struct sxevf_hw *hw, bool is_on);
+
+void sxevf_hw_rss_key_set_all(struct sxevf_hw *hw, u32 *rss_key);
+
+bool sxevf_hw_is_rss_enabled(struct sxevf_hw *hw);
+
+u32 sxevf_link_state_get(struct sxevf_hw *hw);
+
+u32 sxevf_hw_regs_group_read(struct sxevf_hw *hw,
+				const struct sxevf_reg_info *regs,
+				u32 *reg_buf);
+
+#endif 
+#endif
diff --git a/drivers/net/sxe/base/sxevf_regs.h b/drivers/net/sxe/base/sxevf_regs.h
new file mode 100644
index 0000000000..43486db526
--- /dev/null
+++ b/drivers/net/sxe/base/sxevf_regs.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_REGS_H__
+#define __SXEVF_REGS_H__
+
+#define SXEVF_REG_READ_FAIL  0xffffffffU
+#define SXEVF_REG_READ_RETRY 5
+
+#define SXE_VFLINKS_UP		0x00000008
+#define SXE_VFLINKS_SPEED	0x00000006
+#define SXE_VFLINKS_SPEED_10G	0x00000006
+#define SXE_VFLINKS_SPEED_1G	0x00000004
+#define SXE_VFLINKS_SPEED_100	0x00000002
+
+#define SXE_VFCTRL        0x00000
+#define SXE_VFSTATUS      0x00008
+#define SXE_VFLINKS       0x00018
+#define SXE_VFFRTIMER     0x00048
+#define SXE_VFRXMEMWRAP   0x03190
+#define SXE_VFEICR        0x00100
+#define SXE_VFEICS        0x00104
+#define SXE_VFEIMS        0x00108
+#define SXE_VFEIMC        0x0010C
+#define SXE_VFEIAM        0x00114
+#define SXE_VFEITR(x)     (0x00820 + (4 * (x)))
+#define SXE_VFIVAR(x)     (0x00120 + (4 * (x)))
+#define SXE_VFIVAR_MISC    0x00140
+#define SXE_VFRDBAL(x)    (0x01000 + (0x40 * (x)))
+#define SXE_VFRDBAH(x)    (0x01004 + (0x40 * (x)))
+#define SXE_VFRDLEN(x)    (0x01008 + (0x40 * (x)))
+#define SXE_VFRDH(x)      (0x01010 + (0x40 * (x)))
+#define SXE_VFRDT(x)      (0x01018 + (0x40 * (x)))
+#define SXE_VFRXDCTL(x)   (0x01028 + (0x40 * (x)))
+#define SXE_VFSRRCTL(x)   (0x01014 + (0x40 * (x)))
+#define SXE_VFLROCTL(x)   (0x0102C + (0x40 * (x)))
+#define SXE_VFPSRTYPE     0x00300
+#define SXE_VFTDBAL(x)    (0x02000 + (0x40 * (x)))
+#define SXE_VFTDBAH(x)    (0x02004 + (0x40 * (x)))
+#define SXE_VFTDLEN(x)    (0x02008 + (0x40 * (x)))
+#define SXE_VFTDH(x)      (0x02010 + (0x40 * (x)))
+#define SXE_VFTDT(x)      (0x02018 + (0x40 * (x)))
+#define SXE_VFTXDCTL(x)   (0x02028 + (0x40 * (x)))
+#define SXE_VFTDWBAL(x)   (0x02038 + (0x40 * (x)))
+#define SXE_VFTDWBAH(x)   (0x0203C + (0x40 * (x)))
+#define SXE_VFDCA_RXCTRL(x)    (0x0100C + (0x40 * (x)))
+#define SXE_VFDCA_TXCTRL(x)    (0x0200c + (0x40 * (x)))
+#define SXE_VFGPRC        0x0101C
+#define SXE_VFGPTC        0x0201C
+#define SXE_VFGORC_LSB    0x01020
+#define SXE_VFGORC_MSB    0x01024
+#define SXE_VFGOTC_LSB    0x02020
+#define SXE_VFGOTC_MSB    0x02024
+#define SXE_VFMPRC        0x01034
+#define SXE_VFMRQC        0x3000
+#define SXE_VFRSSRK(x)    (0x3100 + ((x) * 4))
+#define SXE_VFRETA(x)     (0x3200 + ((x) * 4))
+
+#define SXEVF_VFEIMC_IRQ_MASK            (7)
+#define SXEVF_IVAR_ALLOC_VALID    (0x80)
+
+#define SXEVF_EITR_CNT_WDIS       (0x80000000)
+#define SXEVF_EITR_ITR_MASK       (0x00000FF8)
+#define SXEVF_EITR_ITR_SHIFT      (2)
+#define SXEVF_EITR_ITR_MAX        (SXEVF_EITR_ITR_MASK >> SXEVF_EITR_ITR_SHIFT)
+
+#define SXE_VFRXDCTL_ENABLE  0x02000000
+#define SXE_VFTXDCTL_ENABLE  0x02000000
+#define SXE_VFCTRL_RST       0x04000000
+
+#define SXEVF_RXDCTL_ENABLE     0x02000000  
+#define SXEVF_RXDCTL_VME	0x40000000  
+
+#define SXEVF_PSRTYPE_RQPL_SHIFT               29 
+
+#define SXEVF_SRRCTL_DROP_EN                   0x10000000
+#define SXEVF_SRRCTL_DESCTYPE_DATA_ONEBUF      0x02000000
+#define SXEVF_SRRCTL_BSIZEPKT_SHIFT            (10)
+#define SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT        (2)
+#define SXEVF_SRRCTL_BSIZEPKT_MASK	       0x0000007F
+#define SXEVF_SRRCTL_BSIZEHDR_MASK	       0x00003F00
+
+#define SXE_VFMAILBOX       0x002FC
+#define SXE_VFMBMEM         0x00200
+
+#define SXE_VFMAILBOX_REQ     0x00000001 
+#define SXE_VFMAILBOX_ACK     0x00000002 
+#define SXE_VFMAILBOX_VFU     0x00000004 
+#define SXE_VFMAILBOX_PFU     0x00000008 
+#define SXE_VFMAILBOX_PFSTS   0x00000010 
+#define SXE_VFMAILBOX_PFACK   0x00000020 
+#define SXE_VFMAILBOX_RSTI    0x00000040 
+#define SXE_VFMAILBOX_RSTD    0x00000080 
+#define SXE_VFMAILBOX_RC_BIT  0x000000B0  
+
+#define SXEVF_TDBAL(_i)      (0x02000 + ((_i) * 0x40))
+#define SXEVF_TDBAH(_i)      (0x02004 + ((_i) * 0x40))
+#define SXEVF_TDLEN(_i)      (0x02008 + ((_i) * 0x40))
+#define SXEVF_TDH(_i)        (0x02010 + ((_i) * 0x40))
+#define SXEVF_TDT(_i)        (0x02018 + ((_i) * 0x40))
+#define SXEVF_TXDCTL(_i)     (0x02028 + ((_i) * 0x40))
+#define SXEVF_TDWBAL(_i)     (0x02038 + ((_i) * 0x40))
+#define SXEVF_TDWBAH(_i)     (0x0203C + ((_i) * 0x40))
+
+#define SXEVF_TXDCTL_SWFLSH  (0x02000000)  
+#define SXEVF_TXDCTL_ENABLE  (0x02000000) 
+
+#define SXEVF_VFGPRC          0x0101C
+#define SXEVF_VFGPTC          0x0201C
+#define SXEVF_VFGORC_LSB      0x01020
+#define SXEVF_VFGORC_MSB      0x01024
+#define SXEVF_VFGOTC_LSB      0x02020
+#define SXEVF_VFGOTC_MSB      0x02024
+#define SXEVF_VFMPRC          0x01034
+
+#define SXEVF_EICR_MASK       0x07
+
+#endif
diff --git a/drivers/net/sxe/include/drv_msg.h b/drivers/net/sxe/include/drv_msg.h
new file mode 100644
index 0000000000..9f06624cc3
--- /dev/null
+++ b/drivers/net/sxe/include/drv_msg.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __DRV_MSG_H__
+#define __DRV_MSG_H__
+
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+#define SXE_VERSION_LEN 32
+
+
+
+
+
+typedef struct sxe_version_resp {
+    U8 fw_version[SXE_VERSION_LEN];
+}sxe_version_resp_s;
+
+#endif 
diff --git a/drivers/net/sxe/include/readme.txt b/drivers/net/sxe/include/readme.txt
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/drivers/net/sxe/include/sxe/mgl/sxe_port.h b/drivers/net/sxe/include/sxe/mgl/sxe_port.h
new file mode 100644
index 0000000000..e41cb9e87b
--- /dev/null
+++ b/drivers/net/sxe/include/sxe/mgl/sxe_port.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_PORT_H__
+#define __SXE_PORT_H__
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "mgc_types.h"
+#include "ps3_types.h"
+
+typedef enum MglPortCmdSetCode{
+    MGL_CMD_PORT_SET_BASE      = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 0),
+    MGL_CMD_PORT_SET_REG       = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 1),
+    MGL_CMD_PORT_SET_LED       = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 2),
+    MGL_CMD_SXE_SOC_HTHRESHOLD = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 3),
+    MGL_CMD_SXE_SFP_HTHRESHOLD = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 4),
+    MGL_CMD_SXE_SOC_RST        = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 5),
+    MGL_CMD_SXE_SET_MFGINFO    = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 6),
+    MGL_CMD_SXE_SET_INSIGHT    = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 7),
+    MGL_CMD_SXE_OPT_INSIGHT    = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 8),
+} MglPortCmdSetCode_e;
+
+typedef enum MglPortCmdGetCode{
+    MGL_CMD_SXE_GET_REG        = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 0),
+    MGL_CMD_SXE_GET_SOC_INFO   = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 1),
+    MGL_CMD_SXE_LOG_EXPORT     = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 2),
+    MGL_CMD_SXE_REGS_DUMP      = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 3),
+    MGL_CMD_SXE_GET_MFGINFO    = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 4),
+    MGL_CMD_SXE_MAC_ADDR_GET   = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 5),
+    MGL_CMD_SXE_GET_INSIGHT    = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 6),
+} MglPortCmdGetCode_e;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/drivers/net/sxe/include/sxe/sxe_cli.h b/drivers/net/sxe/include/sxe/sxe_cli.h
new file mode 100644
index 0000000000..206cc48542
--- /dev/null
+++ b/drivers/net/sxe/include/sxe/sxe_cli.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_CLI_H__
+#define __SXE_CLI_H__
+
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+#define SXE_VERION_LEN                  (32)
+#define SXE_MAC_NUM                     (128)
+#define SXE_PORT_TRANSCEIVER_LEN        (32)
+#define SXE_PORT_VENDOR_LEN             (32)
+#define SXE_CHIP_TYPE_LEN               (32)
+#define SXE_VPD_SN_LEN                  (16)
+#define SXE_SOC_RST_TIME                (0x93A80)  
+#define SXE_SFP_TEMP_THRESHOLD_INTERVAL (3)        
+#define MGC_TERMLOG_INFO_MAX_LEN        (12 * 1024)
+#define SXE_REGS_DUMP_MAX_LEN           (12 * 1024)
+#define SXE_PRODUCT_NAME_LEN        (32)       
+
+typedef enum sxe_led_mode {
+    SXE_IDENTIFY_LED_BLINK_ON   = 0,    
+    SXE_IDENTIFY_LED_BLINK_OFF,         
+    SXE_IDENTIFY_LED_ON,                
+    SXE_IDENTIFY_LED_OFF,               
+    SXE_IDENTIFY_LED_RESET,             
+} sxe_led_mode_s;
+
+typedef struct sxe_led_ctrl {
+    U32    mode;      
+    U32    duration;  
+
+} sxe_led_ctrl_s;
+
+typedef struct sxe_led_ctrl_resp {
+    U32    ack;       
+} sxe_led_ctrl_resp_s;
+
+typedef enum PortLinkSpeed {
+    PORT_LINK_NO            = 0,     
+    PORT_LINK_100M          = 1,     
+    PORT_LINK_1G            = 2,     
+    PORT_LINK_10G           = 3,     
+} PortLinkSpeed_e;
+
+typedef struct SysSocInfo {
+    S8     fwVer[SXE_VERION_LEN];        
+    S8     optVer[SXE_VERION_LEN];       
+    U8     socStatus;                    
+    U8     pad[3];
+    S32    socTemp;                      
+    U64    chipId;                       
+    S8     chipType[SXE_CHIP_TYPE_LEN];  
+    S8     pba[SXE_VPD_SN_LEN];          
+    S8     productName[SXE_PRODUCT_NAME_LEN];   
+} SysSocInfo_s;
+
+typedef struct SysPortInfo {
+    U64    mac[SXE_MAC_NUM];         
+    U8     isPortAbs;                
+    U8     linkStat;                 
+    U8     linkSpeed;                
+
+
+    U8     isSfp:1;                                     
+    U8     isGetInfo:1;                                 
+    U8     rvd:6;                                       
+    S8     opticalModTemp;                              
+    U8     pad[3];
+    S8     transceiverType[SXE_PORT_TRANSCEIVER_LEN];   
+    S8     vendorName[SXE_PORT_VENDOR_LEN];             
+    S8     vendorPn[SXE_PORT_VENDOR_LEN];               
+} SysPortInfo_s;
+
+typedef struct SysInfoResp {
+    SysSocInfo_s     socInfo;        
+    SysPortInfo_s    portInfo;       
+} SysInfoResp_s;
+
+typedef enum SfpTempTdMode {
+    SFP_TEMP_THRESHOLD_MODE_ALARM   = 0,
+    SFP_TEMP_THRESHOLD_MODE_WARN,
+} SfpTempTdMode_e;
+
+typedef struct SfpTempTdSet{
+    U8     mode;             
+    U8     pad[3];
+    S8     hthreshold;       
+    S8     lthreshold;       
+} SfpTempTdSet_s;
+
+typedef struct SxeLogExportResp {
+    U16    curLogLen;       
+    U8     isEnd;
+    U8     pad;
+    S32    sessionId;       
+    S8     data[0];
+} SxeLogExportResp_s;
+
+typedef enum SxeLogExportType  {
+    SXE_LOG_EXPORT_REQ    = 0,     
+    SXE_LOG_EXPORT_FIN,            
+    SXE_LOG_EXPORT_ABORT,          
+} SxeLogExportType_e;
+
+typedef struct SxeLogExportReq {
+    U8     isALLlog;       
+    U8     cmdtype;        
+    U8     isBegin;        
+    U8     pad;
+    S32    sessionId;      
+    U32    logLen;         
+} SxeLogExportReq_s;
+
+typedef struct SocRstReq {
+    U32    time;        
+} SocRstReq_s;
+
+typedef struct RegsDumpResp {
+    U32    curdwLen;    
+    U8     data[0];
+} RegsDumpResp_s;
+
+enum {
+    SXE_MFG_PART_NUMBER_LEN   = 8,
+    SXE_MFG_SERIAL_NUMBER_LEN = 16,
+    SXE_MFG_REVISION_LEN      = 4,
+    SXE_MFG_OEM_STR_LEN       = 64,
+    SXE_MFG_SXE_BOARD_ASSEMBLY_LEN  = 32,
+    SXE_MFG_SXE_BOARD_TRACE_NUM_LEN = 16,
+    SXE_MFG_SXE_MAC_ADDR_CNT        = 2,
+};
+
+typedef struct sxeMfgInfo {
+    U8 partNumber[SXE_MFG_PART_NUMBER_LEN];      
+    U8 serialNumber [SXE_MFG_SERIAL_NUMBER_LEN]; 
+    U32 mfgDate;                               
+    U8 revision[SXE_MFG_REVISION_LEN];         
+    U32 reworkDate;                            
+    U8 pad[4];
+    U64 macAddr[SXE_MFG_SXE_MAC_ADDR_CNT];             
+    U8 boardTraceNum[SXE_MFG_SXE_BOARD_TRACE_NUM_LEN]; 
+    U8 boardAssembly[SXE_MFG_SXE_BOARD_ASSEMBLY_LEN];  
+    U8 extra1[SXE_MFG_OEM_STR_LEN];                    
+    U8 extra2[SXE_MFG_OEM_STR_LEN];                    
+} sxeMfgInfo_t;
+
+typedef struct RegsDumpReq {
+    U32    baseAddr;    
+    U32    dwLen;       
+} RegsDumpReq_s;
+
+typedef enum sxe_pcs_mode {
+    SXE_PCS_MODE_1000BASE_KX_WO = 0, 
+    SXE_PCS_MODE_1000BASE_KX_W,      
+    SXE_PCS_MODE_SGMII,              
+    SXE_PCS_MODE_10GBASE_KR_WO,      
+    SXE_PCS_MODE_AUTO_NEGT_73,       
+    SXE_PCS_MODE_LPBK_PHY_TX2RX,     
+    SXE_PCS_MODE_LPBK_PHY_RX2TX,     
+    SXE_PCS_MODE_LPBK_PCS_RX2TX,     
+    SXE_PCS_MODE_BUTT,               
+} sxe_pcs_mode_e;
+
+typedef enum sxe_remote_fault_mode {
+	SXE_REMOTE_FALUT_NO_ERROR		= 0,
+	SXE_REMOTE_FALUT_OFFLINE,
+	SXE_REMOTE_FALUT_LINK_FAILURE,
+	SXE_REMOTE_FALUT_AUTO_NEGOTIATION,
+	SXE_REMOTE_UNKNOWN,
+} sxe_remote_fault_e;
+
+typedef struct sxe_phy_cfg {
+    sxe_pcs_mode_e mode;          
+    U32 mtu;
+} sxe_pcs_cfg_s;
+
+typedef enum sxe_an_speed {
+    SXE_AN_SPEED_NO_LINK = 0,
+    SXE_AN_SPEED_100M,
+    SXE_AN_SPEED_1G,      
+    SXE_AN_SPEED_10G,     
+    SXE_AN_SPEED_UNKNOWN,
+} sxe_an_speed_e;
+
+typedef enum sxe_phy_pause_cap {
+    SXE_PAUSE_CAP_NO_PAUSE    = 0,   
+    SXE_PAUSE_CAP_ASYMMETRIC_PAUSE,  
+    SXE_PAUSE_CAP_SYMMETRIC_PAUSE,   
+    SXE_PAUSE_CAP_BOTH_PAUSE,        
+    SXE_PAUSE_CAP_UNKNOWN,
+} sxe_phy_pause_cap_e;
+
+typedef enum sxe_phy_duplex_type {
+    SXE_FULL_DUPLEX	= 0,	  
+    SXE_HALF_DUPLEX	= 1,	  
+    SXE_UNKNOWN_DUPLEX,
+} sxe_phy_duplex_type_e;
+
+typedef struct sxe_phy_an_cap {
+    sxe_remote_fault_e   remote_fault; 
+    sxe_phy_pause_cap_e  pause_cap;    
+    sxe_phy_duplex_type_e duplex_cap;  
+} sxe_phy_an_cap_s;
+
+typedef struct sxe_an_cap {
+    sxe_phy_an_cap_s local;     
+    sxe_phy_an_cap_s peer;      
+} sxe_an_cap_s;
+#endif
diff --git a/drivers/net/sxe/include/sxe/sxe_hdc.h b/drivers/net/sxe/include/sxe/sxe_hdc.h
new file mode 100644
index 0000000000..bbdc273bf9
--- /dev/null
+++ b/drivers/net/sxe/include/sxe/sxe_hdc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_HDC_H__
+#define __SXE_HDC_H__
+
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+#define HDC_CACHE_TOTAL_LEN     (16 *1024)    
+#define ONE_PACKET_LEN_MAX      (1024)        
+#define DWORD_NUM               (256)         
+#define HDC_TRANS_RETRY_COUNT   (3)           
+
+
+typedef enum SxeHdcErrnoCode {
+    PKG_OK            = 0,     
+    PKG_ERR_REQ_LEN,           
+    PKG_ERR_RESP_LEN,          
+    PKG_ERR_PKG_SKIP,          
+    PKG_ERR_NODATA,            
+    PKG_ERR_PF_LK,             
+    PKG_ERR_OTHER,
+} SxeHdcErrnoCode_e;
+
+typedef union HdcHeader {
+    struct {
+        U8 pid:4;          
+        U8 errCode:4;      
+        U8 len;            
+        U16 startPkg:1;    
+        U16 endPkg:1;      
+        U16 isRd:1;        
+        U16 msi:1;         
+        U16 totalLen:12;   
+    } head;
+    U32 dw0;
+} HdcHeader_u;
+
+#endif 
+
diff --git a/drivers/net/sxe/include/sxe/sxe_ioctl.h b/drivers/net/sxe/include/sxe/sxe_ioctl.h
new file mode 100644
index 0000000000..4f39b0f92c
--- /dev/null
+++ b/drivers/net/sxe/include/sxe/sxe_ioctl.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef _SXE_IOCTL_H_
+#define _SXE_IOCTL_H_
+
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+struct SxeIoctlSyncCmd {
+    U64   traceid;
+    void *inData;
+    U32   inLen;
+    void *outData;
+    U32   outLen;
+};
+
+#define SXE_CMD_IOCTL_SYNC_CMD _IOWR('M', 1, struct SxeIoctlSyncCmd)
+
+#endif
diff --git a/drivers/net/sxe/include/sxe/sxe_msg.h b/drivers/net/sxe/include/sxe/sxe_msg.h
new file mode 100644
index 0000000000..3db4e60ce5
--- /dev/null
+++ b/drivers/net/sxe/include/sxe/sxe_msg.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_MSG_H__
+#define __SXE_MSG_H__
+
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+#define SXE_MAC_ADDR_LEN 6
+
+#define SXE_HDC_CMD_HDR_SIZE  sizeof(struct sxe_hdc_cmd_hdr)
+#define SXE_HDC_MSG_HDR_SIZE  sizeof(struct sxe_hdc_drv_cmd_msg)
+
+enum sxe_cmd_type {
+    SXE_CMD_TYPE_CLI,
+    SXE_CMD_TYPE_DRV,
+    SXE_CMD_TYPE_UNKOWN,
+};
+
+typedef struct sxe_hdc_cmd_hdr {
+    U8 cmd_type;       
+    U8 cmd_sub_type;   
+    U8 reserve[6];
+}sxe_hdc_cmd_hdr_s;
+
+
+
+typedef enum SxeFWState {
+    SXE_FW_START_STATE_UNDEFINED    = 0x00,   
+    SXE_FW_START_STATE_INIT_BASE    = 0x10,   
+    SXE_FW_START_STATE_SCAN_DEVICE  = 0x20,   
+    SXE_FW_START_STATE_FINISHED     = 0x30,   
+    SXE_FW_START_STATE_UPGRADE      = 0x31,   
+    SXE_FW_RUNNING_STATE_ABNOMAL    = 0x40,   
+    SXE_FW_START_STATE_MASK         = 0xF0,
+}SxeFWState_e;
+
+typedef struct SxeFWStateInfo {
+    U8 socStatus;          
+    char statBuff[32];       
+} SxeFWStateInfo_s;
+
+
+typedef enum MsiEvt {
+    MSI_EVT_SOC_STATUS          = 0x1,
+    MSI_EVT_HDC_FWOV            = 0x2,
+    MSI_EVT_HDC_TIME_SYNC       = 0x4,
+
+    MSI_EVT_MAX                 = 0x80000000,
+} MsiEvt_u;
+
+
+typedef enum SxeFwHdcState {
+    SXE_FW_HDC_TRANSACTION_IDLE = 0x01,
+    SXE_FW_HDC_TRANSACTION_BUSY,
+
+    SXE_FW_HDC_TRANSACTION_ERR,
+} SxeFwHdcState_e;
+
+enum sxe_hdc_cmd_opcode {
+    SXE_CMD_SET_WOL         = 1,
+    SXE_CMD_LED_CTRL,
+    SXE_CMD_SFP_READ,
+    SXE_CMD_SFP_WRITE,
+    SXE_CMD_TX_DIS_CTRL     = 5,
+    SXE_CMD_TINE_SYNC,
+    SXE_CMD_RATE_SELECT,
+    SXE_CMD_R0_MAC_GET,
+    SXE_CMD_LOG_EXPORT,
+    SXE_CMD_FW_VER_GET  = 10,
+    SXE_CMD_PCS_SDS_INIT,         
+    SXE_CMD_AN_SPEED_GET,         
+    SXE_CMD_AN_CAP_GET,           
+    SXE_CMD_GET_SOC_INFO,         
+    SXE_CMD_MNG_RST = 15,         
+
+    SXE_CMD_MAX,
+};
+
+enum sxe_hdc_cmd_errcode {
+    SXE_ERR_INVALID_PARAM = 1,
+};
+
+typedef struct sxe_hdc_drv_cmd_msg {
+
+    U16 opcode;
+    U16 errcode;
+    union dataLength {
+        U16 req_len;
+        U16 ack_len;
+    } length;
+    U8 reserve[8];
+    U64 traceid;
+    U8 body[0];
+} sxe_hdc_drv_cmd_msg_s;
+
+
+typedef struct sxe_sfp_rw_req {
+    U16 offset;       
+    U16 len;          
+    U8  write_data[0];
+} sxe_sfp_rw_req_s;
+
+
+typedef struct sxe_sfp_read_resp {
+    U16 len;     
+    U8  resp[0]; 
+} sxe_sfp_read_resp_s;
+
+typedef enum sxe_sfp_rate{
+    SXE_SFP_RATE_1G     = 0,
+    SXE_SFP_RATE_10G    = 1,
+} sxe_sfp_rate_e;
+
+
+typedef struct sxe_sfp_rate_able {
+    sxe_sfp_rate_e rate;       
+} sxe_sfp_rate_able_s;
+
+
+typedef struct sxe_spp_tx_able {
+    BOOL isDisable;       
+} sxe_spp_tx_able_s;
+
+
+typedef struct sxe_default_mac_addr_resp {
+    U8  addr[SXE_MAC_ADDR_LEN]; 
+} sxe_default_mac_addr_resp_s;
+
+
+typedef struct sxe_mng_rst {
+    BOOL enable;       
+} sxe_mng_rst_s;
+
+#endif 
+
diff --git a/drivers/net/sxe/include/sxe/sxe_regs.h b/drivers/net/sxe/include/sxe/sxe_regs.h
new file mode 100644
index 0000000000..0652cd4906
--- /dev/null
+++ b/drivers/net/sxe/include/sxe/sxe_regs.h
@@ -0,0 +1,1276 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+ 
+#ifndef __SXE_REGS_H__
+#define __SXE_REGS_H__
+
+#define SXE_LINKSEC_MAX_SC_COUNT 1
+#define SXE_LINKSEC_MAX_SA_COUNT 2
+
+#define SXE_FLAGS_DOUBLE_RESET_REQUIRED	0x01
+
+
+#define SXE_REG_READ_FAIL	0xffffffffU
+#define SXE_REG_READ_RETRY	5
+#ifdef SXE_TEST
+#define SXE_PCI_MASTER_DISABLE_TIMEOUT	(1)
+#else
+#define SXE_PCI_MASTER_DISABLE_TIMEOUT	(800)
+#endif
+
+
+#define SXE_CTRL	0x00000 
+#define SXE_STATUS	0x00008 
+#define SXE_CTRL_EXT	0x00018 
+
+
+#define SXE_CTRL_LNK_RST	0x00000008
+#define SXE_CTRL_RST		0x04000000
+
+#ifdef SXE_TEST
+#define SXE_CTRL_RST_MASK	(0)
+#define SXE_CTRL_GIO_DIS	(0)
+#else
+#define SXE_CTRL_RST_MASK	(SXE_CTRL_LNK_RST | SXE_CTRL_RST)
+#define SXE_CTRL_GIO_DIS	0x00000004
+#endif
+
+
+#define SXE_STATUS_GIO		0x00080000
+
+
+#define SXE_CTRL_EXT_PFRSTD	0x00004000
+#define SXE_CTRL_EXT_NS_DIS	0x00010000
+#define SXE_CTRL_EXT_DRV_LOAD	0x10000000
+
+
+#define SXE_FCRTL(_i)	(0x03220 + ((_i) * 4))
+#define SXE_FCRTH(_i)	(0x03260 + ((_i) * 4))
+#define SXE_FCCFG	0x03D00
+
+
+#define SXE_FCRTL_XONE		0x80000000
+#define SXE_FCRTH_FCEN		0x80000000
+
+#define SXE_FCCFG_TFCE_802_3X	0x00000008
+#define SXE_FCCFG_TFCE_PRIORITY	0x00000010
+
+
+#define SXE_GCR_EXT           0x11050 
+
+
+#define SXE_GCR_CMPL_TMOUT_MASK		0x0000F000
+#define SXE_GCR_CMPL_TMOUT_10ms		0x00001000
+#define SXE_GCR_CMPL_TMOUT_RESEND	0x00010000
+#define SXE_GCR_CAP_VER2		0x00040000
+#define SXE_GCR_EXT_MSIX_EN		0x80000000
+#define SXE_GCR_EXT_BUFFERS_CLEAR	0x40000000
+#define SXE_GCR_EXT_VT_MODE_16		0x00000001
+#define SXE_GCR_EXT_VT_MODE_32		0x00000002
+#define SXE_GCR_EXT_VT_MODE_64		0x00000003
+#define SXE_GCR_EXT_VT_MODE_MASK	0x00000003
+#define SXE_GCR_EXT_SRIOV		(SXE_GCR_EXT_MSIX_EN | \
+					SXE_GCR_EXT_VT_MODE_64)
+
+#define SXE_PCI_DEVICE_STATUS		0x7A
+#define SXE_PCI_DEVICE_STATUS_TRANSACTION_PENDING   0x0020
+#define SXE_PCI_LINK_STATUS		0x82
+#define SXE_PCI_DEVICE_CONTROL2		0x98
+#define SXE_PCI_LINK_WIDTH		0x3F0
+#define SXE_PCI_LINK_WIDTH_1		0x10
+#define SXE_PCI_LINK_WIDTH_2		0x20
+#define SXE_PCI_LINK_WIDTH_4		0x40
+#define SXE_PCI_LINK_WIDTH_8		0x80
+#define SXE_PCI_LINK_SPEED		0xF
+#define SXE_PCI_LINK_SPEED_2500		0x1
+#define SXE_PCI_LINK_SPEED_5000		0x2
+#define SXE_PCI_LINK_SPEED_8000		0x3
+#define SXE_PCI_HEADER_TYPE_REGISTER	0x0E
+#define SXE_PCI_HEADER_TYPE_MULTIFUNC	0x80
+#define SXE_PCI_DEVICE_CONTROL2_16ms	0x0005
+
+#define SXE_PCIDEVCTRL2_TIMEO_MASK	0xf
+#define SXE_PCIDEVCTRL2_16_32ms_def	0x0
+#define SXE_PCIDEVCTRL2_50_100us	0x1
+#define SXE_PCIDEVCTRL2_1_2ms		0x2
+#define SXE_PCIDEVCTRL2_16_32ms		0x5
+#define SXE_PCIDEVCTRL2_65_130ms	0x6
+#define SXE_PCIDEVCTRL2_260_520ms	0x9
+#define SXE_PCIDEVCTRL2_1_2s		0xa
+#define SXE_PCIDEVCTRL2_4_8s		0xd
+#define SXE_PCIDEVCTRL2_17_34s		0xe
+
+
+#define SXE_EICR	0x00800
+#define SXE_EICS	0x00808
+#define SXE_EIMS	0x00880
+#define SXE_EIMC	0x00888
+#define SXE_EIAC	0x00810
+#define SXE_EIAM	0x00890
+#define SXE_EITRSEL	0x00894
+#define SXE_GPIE	0x00898
+#define SXE_IVAR(i)	(0x00900 + (i) * 4)
+#define SXE_IVAR_MISC	0x00A00
+#define SXE_EICS_EX(i)	(0x00A90 + (i) * 4)
+#define SXE_EIMS_EX(i)	(0x00AA0 + (i) * 4)
+#define SXE_EIMC_EX(i)	(0x00AB0 + (i) * 4)
+#define SXE_EIAM_EX(i)	(0x00AD0 + (i) * 4)
+#define SXE_EITR(i)	(((i) <= 23) ? (0x00820 + ((i) * 4)) : \
+                        	(0x012300 + (((i) - 24) * 4)))
+
+#define SXE_SPP_PROC	0x00AD8
+#define SXE_SPP_STATE	0x00AF4
+
+
+
+#define SXE_EICR_RTX_QUEUE	0x0000FFFF
+#define SXE_EICR_FLOW_NAV	0x00010000
+#define SXE_EICR_MAILBOX	0x00080000
+#define SXE_EICR_LSC		0x00100000
+#define SXE_EICR_LINKSEC	0x00200000
+#define SXE_EICR_ECC		0x10000000
+#define SXE_EICR_HDC		0x20000000
+#define SXE_EICR_TCP_TIMER	0x40000000
+#define SXE_EICR_OTHER		0x80000000
+
+
+#define SXE_EICS_RTX_QUEUE	SXE_EICR_RTX_QUEUE
+#define SXE_EICS_FLOW_NAV	SXE_EICR_FLOW_NAV 
+#define SXE_EICS_MAILBOX	SXE_EICR_MAILBOX  
+#define SXE_EICS_LSC		SXE_EICR_LSC      
+#define SXE_EICS_ECC		SXE_EICR_ECC      
+#define SXE_EICS_HDC		SXE_EICR_HDC      
+#define SXE_EICS_TCP_TIMER	SXE_EICR_TCP_TIMER
+#define SXE_EICS_OTHER		SXE_EICR_OTHER    
+
+
+#define SXE_EIMS_RTX_QUEUE	SXE_EICR_RTX_QUEUE
+#define SXE_EIMS_FLOW_NAV	SXE_EICR_FLOW_NAV
+#define SXE_EIMS_MAILBOX	SXE_EICR_MAILBOX
+#define SXE_EIMS_LSC		SXE_EICR_LSC
+#define SXE_EIMS_ECC		SXE_EICR_ECC
+#define SXE_EIMS_HDC		SXE_EICR_HDC
+#define SXE_EIMS_TCP_TIMER	SXE_EICR_TCP_TIMER
+#define SXE_EIMS_OTHER		SXE_EICR_OTHER
+#define SXE_EIMS_ENABLE_MASK	(SXE_EIMS_RTX_QUEUE | SXE_EIMS_LSC | \
+					SXE_EIMS_TCP_TIMER | SXE_EIMS_OTHER)
+
+#define SXE_EIMC_FLOW_NAV	SXE_EICR_FLOW_NAV 
+#define SXE_EIMC_LSC		SXE_EICR_LSC      
+#define SXE_EIMC_HDC		SXE_EICR_HDC      
+
+
+#define SXE_GPIE_SPP0_EN	0x00000001
+#define SXE_GPIE_SPP1_EN	0x00000002
+#define SXE_GPIE_SPP2_EN	0x00000004
+#define SXE_GPIE_MSIX_MODE	0x00000010
+#define SXE_GPIE_OCD		0x00000020
+#define SXE_GPIE_EIMEN		0x00000040
+#define SXE_GPIE_EIAME		0x40000000
+#define SXE_GPIE_PBA_SUPPORT	0x80000000
+#define SXE_GPIE_VTMODE_MASK	0x0000C000
+#define SXE_GPIE_VTMODE_16	0x00004000
+#define SXE_GPIE_VTMODE_32	0x00008000
+#define SXE_GPIE_VTMODE_64	0x0000C000
+
+
+#define SXE_IVAR_ALLOC_VALID	0x80
+
+
+#define SXE_EITR_CNT_WDIS	0x80000000
+#define SXE_EITR_ITR_MASK	0x00000FF8
+#define SXE_EITR_ITR_SHIFT	2
+#define SXE_EITR_ITR_MAX	(SXE_EITR_ITR_MASK >> SXE_EITR_ITR_SHIFT)
+
+
+#define SXE_EICR_GPI_SPP0	0x01000000
+#define SXE_EICR_GPI_SPP1	0x02000000
+#define SXE_EICR_GPI_SPP2	0x04000000
+#define SXE_EIMS_GPI_SPP0	SXE_EICR_GPI_SPP0
+#define SXE_EIMS_GPI_SPP1	SXE_EICR_GPI_SPP1
+#define SXE_EIMS_GPI_SPP2	SXE_EICR_GPI_SPP2
+
+
+#define SXE_SPP_PROC_SPP2_TRIGGER	0x00300000
+#define SXE_SPP_PROC_SPP2_TRIGGER_MASK	0xFFCFFFFF
+#define SXE_SPP_PROC_DELAY_US_MASK	0x0000FFFF
+#define SXE_SPP_PROC_DELAY_US		0x00000007
+
+
+#define SXE_IRQ_CLEAR_MASK	0xFFFFFFFF
+
+
+#define SXE_RXCSUM		0x05000
+#define SXE_RFCTL		0x05008
+#define SXE_FCTRL		0x05080
+#define SXE_EXVET               0x05078
+#define SXE_VLNCTRL		0x05088
+#define SXE_MCSTCTRL		0x05090
+#define SXE_ETQF(_i)		(0x05128 + ((_i) * 4))
+#define SXE_ETQS(_i)		(0x0EC00 + ((_i) * 4))
+#define SXE_SYNQF		0x0EC30
+#define SXE_MTA(_i)		(0x05200 + ((_i) * 4))
+#define SXE_UTA(_i)		(0x0F400 + ((_i) * 4))
+#define SXE_VFTA(_i)		(0x0A000 + ((_i) * 4))
+#define SXE_RAL(_i)		(0x0A200 + ((_i) * 8))
+#define SXE_RAH(_i)		(0x0A204 + ((_i) * 8))
+#define SXE_MPSAR_LOW(_i)	(0x0A600 + ((_i) * 8))
+#define SXE_MPSAR_HIGH(_i)	(0x0A604 + ((_i) * 8))
+#define SXE_PSRTYPE(_i)		(0x0EA00 + ((_i) * 4))
+#define SXE_RETA(_i)		(0x0EB00 + ((_i) * 4)) 
+#define SXE_RSSRK(_i)		(0x0EB80 + ((_i) * 4)) 
+#define SXE_RQTC		0x0EC70
+#define SXE_MRQC		0x0EC80
+#define SXE_IEOI		0x0F654
+#define SXE_PL			0x0F658
+#define SXE_LPL			0x0F65C
+
+
+#define SXE_ETQF_CNT			8
+#define SXE_MTA_CNT				128
+#define SXE_UTA_CNT				128
+#define SXE_VFTA_CNT			128
+#define SXE_RAR_CNT				128
+#define SXE_MPSAR_CNT			128
+
+
+#define SXE_EXVET_DEFAULT		0x81000000
+#define SXE_VLNCTRL_DEFAULT		0x8100
+#define SXE_IEOI_DEFAULT		0x060005DC
+#define SXE_PL_DEFAULT			0x3e000016
+#define SXE_LPL_DEFAULT			0x26000000
+
+
+#define SXE_RXCSUM_IPPCSE	0x00001000  
+#define SXE_RXCSUM_PCSD		0x00002000  
+
+
+#define SXE_RFCTL_LRO_DIS	0x00000020
+#define SXE_RFCTL_NFSW_DIS	0x00000040
+#define SXE_RFCTL_NFSR_DIS	0x00000080
+
+
+#define SXE_FCTRL_SBP		0x00000002
+#define SXE_FCTRL_MPE		0x00000100
+#define SXE_FCTRL_UPE		0x00000200
+#define SXE_FCTRL_BAM		0x00000400
+#define SXE_FCTRL_PMCF		0x00001000
+#define SXE_FCTRL_DPF		0x00002000
+
+
+#define SXE_VLNCTRL_VET		0x0000FFFF 
+#define SXE_VLNCTRL_CFI		0x10000000 
+#define SXE_VLNCTRL_CFIEN	0x20000000 
+#define SXE_VLNCTRL_VFE		0x40000000 
+#define SXE_VLNCTRL_VME		0x80000000 
+
+#define SXE_EXVET_VET_EXT_SHIFT              16
+#define SXE_EXTENDED_VLAN	             (1 << 26)
+
+
+#define SXE_MCSTCTRL_MFE	4
+
+#define SXE_ETQF_FILTER_EAPOL	0
+#define SXE_ETQF_FILTER_1588	3
+#define SXE_ETQF_FILTER_FIP	4
+#define SXE_ETQF_FILTER_LLDP	5
+#define SXE_ETQF_FILTER_LACP	6
+#define SXE_ETQF_FILTER_FC	7
+#define SXE_MAX_ETQF_FILTERS	8
+#define SXE_ETQF_1588		0x40000000
+#define SXE_ETQF_FILTER_EN	0x80000000
+#define SXE_ETQF_POOL_ENABLE	BIT(26)
+#define SXE_ETQF_POOL_SHIFT	20
+
+
+#define SXE_ETQS_RX_QUEUE	0x007F0000
+#define SXE_ETQS_RX_QUEUE_SHIFT	16
+#define SXE_ETQS_LLI		0x20000000
+#define SXE_ETQS_QUEUE_EN	0x80000000
+
+
+#define SXE_SYN_FILTER_ENABLE         0x00000001
+#define SXE_SYN_FILTER_QUEUE          0x000000FE
+#define SXE_SYN_FILTER_QUEUE_SHIFT    1
+#define SXE_SYN_FILTER_SYNQFP         0x80000000
+
+
+#define SXE_RAH_VIND_MASK	0x003C0000
+#define SXE_RAH_VIND_SHIFT	18
+#define SXE_RAH_AV		0x80000000
+#define SXE_CLEAR_VMDQ_ALL	0xFFFFFFFF
+
+
+#define SXE_PSRTYPE_TCPHDR	0x00000010
+#define SXE_PSRTYPE_UDPHDR	0x00000020
+#define SXE_PSRTYPE_IPV4HDR	0x00000100
+#define SXE_PSRTYPE_IPV6HDR	0x00000200
+#define SXE_PSRTYPE_L2HDR	0x00001000
+
+
+#define SXE_MRQC_RSSEN                 0x00000001 
+#define SXE_MRQC_MRQE_MASK                    0xF
+#define SXE_MRQC_RT8TCEN               0x00000002
+#define SXE_MRQC_RT4TCEN               0x00000003
+#define SXE_MRQC_RTRSS8TCEN            0x00000004
+#define SXE_MRQC_RTRSS4TCEN            0x00000005
+#define SXE_MRQC_VMDQEN                0x00000008
+#define SXE_MRQC_VMDQRSS32EN           0x0000000A
+#define SXE_MRQC_VMDQRSS64EN           0x0000000B
+#define SXE_MRQC_VMDQRT8TCEN           0x0000000C
+#define SXE_MRQC_VMDQRT4TCEN           0x0000000D
+#define SXE_MRQC_RSS_FIELD_MASK        0xFFFF0000
+#define SXE_MRQC_RSS_FIELD_IPV4_TCP    0x00010000
+#define SXE_MRQC_RSS_FIELD_IPV4        0x00020000
+#define SXE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
+#define SXE_MRQC_RSS_FIELD_IPV6_EX     0x00080000
+#define SXE_MRQC_RSS_FIELD_IPV6        0x00100000
+#define SXE_MRQC_RSS_FIELD_IPV6_TCP    0x00200000
+#define SXE_MRQC_RSS_FIELD_IPV4_UDP    0x00400000
+#define SXE_MRQC_RSS_FIELD_IPV6_UDP    0x00800000
+#define SXE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
+
+
+#define SXE_RDBAL(_i)		(((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
+					(0x0D000 + (((_i) - 64) * 0x40)))
+#define SXE_RDBAH(_i)		(((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
+					(0x0D004 + (((_i) - 64) * 0x40)))
+#define SXE_RDLEN(_i)		(((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
+					(0x0D008 + (((_i) - 64) * 0x40)))
+#define SXE_RDH(_i)		(((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
+					(0x0D010 + (((_i) - 64) * 0x40)))
+#define SXE_SRRCTL(_i)		(((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+					(0x0D014 + (((_i) - 64) * 0x40)))
+#define SXE_RDT(_i)		(((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
+					(0x0D018 + (((_i) - 64) * 0x40)))
+#define SXE_RXDCTL(_i)		(((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
+					(0x0D028 + (((_i) - 64) * 0x40)))
+#define SXE_LROCTL(_i)		(((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+					(0x0D02C + (((_i) - 64) * 0x40)))
+#define SXE_RDRXCTL		0x02F00  
+#define SXE_RXCTRL		0x03000 
+#define SXE_LRODBU 		0x03028  
+#define SXE_RXPBSIZE(_i)	(0x03C00 + ((_i) * 4))
+
+#define SXE_DRXCFG		(0x03C20)
+
+
+#define SXE_RXDCTL_CNT			128
+
+
+#define SXE_RXDCTL_DEFAULT		0x40210
+
+
+#define SXE_SRRCTL_DROP_EN		0x10000000
+#define SXE_SRRCTL_BSIZEPKT_SHIFT	(10)
+#define SXE_SRRCTL_BSIZEHDRSIZE_SHIFT	(2)
+#define SXE_SRRCTL_DESCTYPE_DATA_ONEBUF	0x02000000
+#define SXE_SRRCTL_BSIZEPKT_MASK	0x0000007F
+#define SXE_SRRCTL_BSIZEHDR_MASK	0x00003F00
+
+
+#define SXE_RXDCTL_ENABLE	0x02000000 
+#define SXE_RXDCTL_SWFLSH	0x04000000 
+#define SXE_RXDCTL_VME		0x40000000 
+#define SXE_RXDCTL_DESC_FIFO_AE_TH_SHIFT	8
+#define SXE_RXDCTL_PREFETCH_NUM_CFG_SHIFT	16
+
+
+#define SXE_LROCTL_LROEN	0x01
+#define SXE_LROCTL_MAXDESC_1	0x00
+#define SXE_LROCTL_MAXDESC_4	0x04
+#define SXE_LROCTL_MAXDESC_8	0x08
+#define SXE_LROCTL_MAXDESC_16	0x0C
+
+
+#define SXE_RDRXCTL_RDMTS_1_2	0x00000000
+#define SXE_RDRXCTL_RDMTS_EN	0x00200000
+#define SXE_RDRXCTL_CRCSTRIP	0x00000002
+#define SXE_RDRXCTL_PSP		0x00000004
+#define SXE_RDRXCTL_MVMEN	0x00000020
+#define SXE_RDRXCTL_DMAIDONE	0x00000008
+#define SXE_RDRXCTL_AGGDIS	0x00010000
+#define SXE_RDRXCTL_LROFRSTSIZE	0x003E0000
+#define SXE_RDRXCTL_LROLLIDIS	0x00800000
+#define SXE_RDRXCTL_LROACKC	0x02000000
+#define SXE_RDRXCTL_FCOE_WRFIX	0x04000000
+#define SXE_RDRXCTL_MBINTEN	0x10000000
+#define SXE_RDRXCTL_MDP_EN	0x20000000
+#define SXE_RDRXCTL_MPBEN	0x00000010
+
+#define SXE_RDRXCTL_MCEN	0x00000040
+
+
+
+#define SXE_RXCTRL_RXEN		0x00000001
+
+
+#define SXE_LRODBU_LROACKDIS	0x00000080
+
+
+#define SXE_DRXCFG_GSP_ZERO    0x00000002
+#define SXE_DRXCFG_DBURX_START 0x00000001
+
+
+#define SXE_DMATXCTL		0x04A80   
+#define SXE_TDBAL(_i)		(0x06000 + ((_i) * 0x40))
+#define SXE_TDBAH(_i)		(0x06004 + ((_i) * 0x40))
+#define SXE_TDLEN(_i)		(0x06008 + ((_i) * 0x40))
+#define SXE_TDH(_i)		(0x06010 + ((_i) * 0x40))
+#define SXE_TDT(_i)		(0x06018 + ((_i) * 0x40))
+#define SXE_TXDCTL(_i)		(0x06028 + ((_i) * 0x40))
+#define SXE_PVFTDWBAL(p)	(0x06038 + (0x40 * (p)))
+#define SXE_PVFTDWBAH(p)	(0x0603C + (0x40 * (p)))
+#define SXE_TXPBSIZE(_i)	(0x0CC00 + ((_i) * 4))
+#define SXE_TXPBTHRESH(_i)	(0x04950 + ((_i) * 4))
+#define SXE_MTQC		0x08120               
+#define SXE_TXPBFCS		0x0CE00               
+#define SXE_DTXCFG		0x0CE08               
+#define SXE_DTMPCNT		0x0CE98               
+
+
+#define SXE_DMATXCTL_DEFAULT		0x81000000
+
+
+#define SXE_DMATXCTL_TE		0x1       
+#define SXE_DMATXCTL_GDV	0x8       
+#define SXE_DMATXCTL_VT_SHIFT	16        
+#define SXE_DMATXCTL_VT_MASK    0xFFFF0000
+
+
+#define SXE_TXDCTL_HTHRESH_SHIFT 8
+#define SXE_TXDCTL_WTHRESH_SHIFT 16
+#define SXE_TXDCTL_ENABLE     0x02000000
+#define SXE_TXDCTL_SWFLSH     0x04000000
+
+#define SXE_PVFTDWBAL_N(ring_per_pool, vf_idx, vf_ring_idx) \
+		SXE_PVFTDWBAL((ring_per_pool) * (vf_idx) + vf_ring_idx)
+#define SXE_PVFTDWBAH_N(ring_per_pool, vf_idx, vf_ring_idx) \
+		SXE_PVFTDWBAH((ring_per_pool) * (vf_idx) + vf_ring_idx)
+
+
+#define SXE_MTQC_RT_ENA		0x1
+#define SXE_MTQC_VT_ENA		0x2
+#define SXE_MTQC_64Q_1PB	0x0
+#define SXE_MTQC_32VF		0x8
+#define SXE_MTQC_64VF		0x4
+#define SXE_MTQC_8TC_8TQ	0xC
+#define SXE_MTQC_4TC_4TQ	0x8
+
+
+#define SXE_TFCS_PB0_MASK	0x1
+#define SXE_TFCS_PB1_MASK	0x2
+#define SXE_TFCS_PB2_MASK	0x4
+#define SXE_TFCS_PB3_MASK	0x8
+#define SXE_TFCS_PB4_MASK	0x10
+#define SXE_TFCS_PB5_MASK	0x20
+#define SXE_TFCS_PB6_MASK	0x40
+#define SXE_TFCS_PB7_MASK	0x80
+#define SXE_TFCS_PB_MASK	0xff
+
+
+#define SXE_DTXCFG_DBUTX_START	0x00000001   
+#define SXE_DTXCFG_DBUTX_BUF_ALFUL_CFG	0x20
+
+
+#define SXE_RTRPCS		0x02430
+#define SXE_RTRPT4C(_i)		(0x02140 + ((_i) * 4))
+#define SXE_RTRUP2TC		0x03020
+#define SXE_RTTDCS		0x04900
+#define SXE_RTTDQSEL		0x04904
+#define SXE_RTTDT1C		0x04908
+#define SXE_RTTDT2C(_i)		(0x04910 + ((_i) * 4))
+#define SXE_RTTBCNRM		0x04980
+#define SXE_RTTBCNRC		0x04984
+#define SXE_RTTUP2TC		0x0C800
+#define SXE_RTTPCS		0x0CD00
+#define SXE_RTTPT2C(_i)		(0x0CD20 + ((_i) * 4))
+
+
+#define SXE_RTRPCS_RRM		0x00000002
+#define SXE_RTRPCS_RAC		0x00000004
+#define SXE_RTRPCS_ARBDIS	0x00000040
+
+
+#define SXE_RTRPT4C_MCL_SHIFT	12
+#define SXE_RTRPT4C_BWG_SHIFT	9 
+#define SXE_RTRPT4C_GSP		0x40000000
+#define SXE_RTRPT4C_LSP		0x80000000
+
+
+#define SXE_RTRUP2TC_UP_SHIFT 3
+#define SXE_RTRUP2TC_UP_MASK	7
+
+
+#define SXE_RTTDCS_ARBDIS	0x00000040
+#define SXE_RTTDCS_TDPAC	0x00000001
+
+#define SXE_RTTDCS_VMPAC	0x00000002
+
+#define SXE_RTTDCS_TDRM		0x00000010
+#define SXE_RTTDCS_ARBDIS	0x00000040
+#define SXE_RTTDCS_BDPM		0x00400000
+#define SXE_RTTDCS_BPBFSM	0x00800000
+
+#define SXE_RTTDCS_SPEED_CHG	0x80000000
+
+
+#define SXE_RTTDT2C_MCL_SHIFT	12
+#define SXE_RTTDT2C_BWG_SHIFT	9
+#define SXE_RTTDT2C_GSP		0x40000000
+#define SXE_RTTDT2C_LSP		0x80000000
+
+
+#define SXE_RTTBCNRC_RS_ENA		0x80000000
+#define SXE_RTTBCNRC_RF_DEC_MASK	0x00003FFF
+#define SXE_RTTBCNRC_RF_INT_SHIFT	14
+#define SXE_RTTBCNRC_RF_INT_MASK	\
+			(SXE_RTTBCNRC_RF_DEC_MASK << SXE_RTTBCNRC_RF_INT_SHIFT)
+
+
+#define SXE_RTTUP2TC_UP_SHIFT	3
+
+
+#define SXE_RTTPCS_TPPAC	0x00000020
+
+#define SXE_RTTPCS_ARBDIS	0x00000040
+#define SXE_RTTPCS_TPRM		0x00000100
+#define SXE_RTTPCS_ARBD_SHIFT	22
+#define SXE_RTTPCS_ARBD_DCB	0x4       
+
+
+#define SXE_RTTPT2C_MCL_SHIFT 12
+#define SXE_RTTPT2C_BWG_SHIFT 9
+#define SXE_RTTPT2C_GSP       0x40000000
+#define SXE_RTTPT2C_LSP       0x80000000
+
+
+#define SXE_TPH_CTRL		0x11074
+#define SXE_TPH_TXCTRL(_i)      (0x0600C + ((_i) * 0x40))
+#define SXE_TPH_RXCTRL(_i)	(((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+				 (0x0D00C + (((_i) - 64) * 0x40)))
+
+
+#define SXE_TPH_CTRL_ENABLE		0x00000000
+#define SXE_TPH_CTRL_DISABLE		0x00000001
+#define SXE_TPH_CTRL_MODE_CB1		0x00      
+#define SXE_TPH_CTRL_MODE_CB2		0x02      
+
+
+#define SXE_TPH_RXCTRL_DESC_TPH_EN	BIT(5) 
+#define SXE_TPH_RXCTRL_HEAD_TPH_EN	BIT(6) 
+#define SXE_TPH_RXCTRL_DATA_TPH_EN	BIT(7) 
+#define SXE_TPH_RXCTRL_DESC_RRO_EN	BIT(9) 
+#define SXE_TPH_RXCTRL_DATA_WRO_EN	BIT(13)
+#define SXE_TPH_RXCTRL_HEAD_WRO_EN	BIT(15)
+#define SXE_TPH_RXCTRL_CPUID_SHIFT	24     
+
+#define SXE_TPH_TXCTRL_DESC_TPH_EN	BIT(5) 
+#define SXE_TPH_TXCTRL_DESC_RRO_EN	BIT(9) 
+#define SXE_TPH_TXCTRL_DESC_WRO_EN	BIT(11)
+#define SXE_TPH_TXCTRL_DATA_RRO_EN	BIT(13)
+#define SXE_TPH_TXCTRL_CPUID_SHIFT	24     
+
+
+#define SXE_SECTXCTRL		0x08800
+#define SXE_SECTXSTAT		0x08804
+#define SXE_SECTXBUFFAF		0x08808
+#define SXE_SECTXMINIFG		0x08810
+#define SXE_SECRXCTRL		0x08D00
+#define SXE_SECRXSTAT		0x08D04
+#define SXE_LSECTXCTRL            0x08A04
+#define SXE_LSECTXSCL             0x08A08
+#define SXE_LSECTXSCH             0x08A0C
+#define SXE_LSECTXSA              0x08A10
+#define SXE_LSECTXPN(_n)          (0x08A14 + (4 * (_n)))
+#define SXE_LSECTXKEY(_n, _m)     (0x08A1C + ((0x10 * (_n)) + (4 * (_m))))
+#define SXE_LSECRXCTRL            0x08B04
+#define SXE_LSECRXSCL             0x08B08
+#define SXE_LSECRXSCH             0x08B0C
+#define SXE_LSECRXSA(_i)          (0x08B10 + (4 * (_i)))
+#define SXE_LSECRXPN(_i)          (0x08B18 + (4 * (_i)))
+#define SXE_LSECRXKEY(_n, _m)     (0x08B20 + ((0x10 * (_n)) + (4 * (_m))))  
+
+
+#define SXE_SECTXCTRL_SECTX_DIS		0x00000001
+#define SXE_SECTXCTRL_TX_DIS		0x00000002
+#define SXE_SECTXCTRL_STORE_FORWARD	0x00000004
+
+
+#define SXE_SECTXSTAT_SECTX_RDY		0x00000001
+#define SXE_SECTXSTAT_SECTX_OFF_DIS	0x00000002
+#define SXE_SECTXSTAT_ECC_TXERR		0x00000004
+
+
+#define SXE_SECRXCTRL_SECRX_DIS		0x00000001
+#define SXE_SECRXCTRL_RX_DIS		0x00000002
+#define SXE_SECRXCTRL_RP              0x00000080
+
+
+#define SXE_SECRXSTAT_SECRX_RDY		0x00000001
+#define SXE_SECRXSTAT_SECRX_OFF_DIS	0x00000002
+#define SXE_SECRXSTAT_ECC_RXERR		0x00000004
+
+#define SXE_SECTX_DCB_ENABLE_MASK	0x00001F00
+
+#define SXE_LSECTXCTRL_EN_MASK        0x00000003
+#define SXE_LSECTXCTRL_EN_SHIFT       0
+#define SXE_LSECTXCTRL_ES             0x00000010
+#define SXE_LSECTXCTRL_AISCI          0x00000020
+#define SXE_LSECTXCTRL_PNTHRSH_MASK   0xFFFFFF00
+#define SXE_LSECTXCTRL_PNTHRSH_SHIFT  8
+#define SXE_LSECTXCTRL_RSV_MASK       0x000000D8
+
+#define SXE_LSECRXCTRL_EN_MASK        0x0000000C
+#define SXE_LSECRXCTRL_EN_SHIFT       2
+#define SXE_LSECRXCTRL_DROP_EN        0x00000010
+#define SXE_LSECRXCTRL_DROP_EN_SHIFT  4
+#define SXE_LSECRXCTRL_PLSH           0x00000040
+#define SXE_LSECRXCTRL_PLSH_SHIFT     6
+#define SXE_LSECRXCTRL_RP             0x00000080
+#define SXE_LSECRXCTRL_RP_SHIFT       7
+#define SXE_LSECRXCTRL_RSV_MASK       0xFFFFFF33
+
+#define SXE_LSECTXSA_AN0_MASK         0x00000003
+#define SXE_LSECTXSA_AN0_SHIFT        0
+#define SXE_LSECTXSA_AN1_MASK         0x0000000C
+#define SXE_LSECTXSA_AN1_SHIFT        2
+#define SXE_LSECTXSA_SELSA            0x00000010
+#define SXE_LSECTXSA_SELSA_SHIFT      4
+#define SXE_LSECTXSA_ACTSA            0x00000020
+
+#define SXE_LSECRXSA_AN_MASK          0x00000003
+#define SXE_LSECRXSA_AN_SHIFT         0
+#define SXE_LSECRXSA_SAV              0x00000004
+#define SXE_LSECRXSA_SAV_SHIFT        2
+#define SXE_LSECRXSA_RETIRED          0x00000010
+#define SXE_LSECRXSA_RETIRED_SHIFT    4
+
+#define SXE_LSECRXSCH_PI_MASK         0xFFFF0000
+#define SXE_LSECRXSCH_PI_SHIFT        16
+
+#define SXE_LSECTXCTRL_DISABLE	0x0
+#define SXE_LSECTXCTRL_AUTH		0x1
+#define SXE_LSECTXCTRL_AUTH_ENCRYPT	0x2
+
+#define SXE_LSECRXCTRL_DISABLE	0x0
+#define SXE_LSECRXCTRL_CHECK		0x1
+#define SXE_LSECRXCTRL_STRICT		0x2
+#define SXE_LSECRXCTRL_DROP		0x3
+#define SXE_SECTXCTRL_STORE_FORWARD_ENABLE    0x4
+
+
+
+#define SXE_IPSTXIDX		0x08900
+#define SXE_IPSTXSALT		0x08904
+#define SXE_IPSTXKEY(_i)	(0x08908 + (4 * (_i)))
+#define SXE_IPSRXIDX		0x08E00
+#define SXE_IPSRXIPADDR(_i)	(0x08E04 + (4 * (_i)))
+#define SXE_IPSRXSPI		0x08E14
+#define SXE_IPSRXIPIDX		0x08E18
+#define SXE_IPSRXKEY(_i)	(0x08E1C + (4 * (_i)))
+#define SXE_IPSRXSALT		0x08E2C
+#define SXE_IPSRXMOD		0x08E30
+
+
+
+#define SXE_FNAVCTRL		0x0EE00
+#define SXE_FNAVHKEY		0x0EE68
+#define SXE_FNAVSKEY		0x0EE6C
+#define SXE_FNAVDIP4M		0x0EE3C
+#define SXE_FNAVSIP4M		0x0EE40
+#define SXE_FNAVTCPM		0x0EE44
+#define SXE_FNAVUDPM		0x0EE48
+#define SXE_FNAVIP6M		0x0EE74
+#define SXE_FNAVM		0x0EE70
+
+#define SXE_FNAVFREE		0x0EE38
+#define SXE_FNAVLEN		0x0EE4C
+#define SXE_FNAVUSTAT		0x0EE50
+#define SXE_FNAVFSTAT		0x0EE54
+#define SXE_FNAVMATCH		0x0EE58
+#define SXE_FNAVMISS		0x0EE5C
+
+#define SXE_FNAVSIPv6(_i)	(0x0EE0C + ((_i) * 4))
+#define SXE_FNAVIPSA		0x0EE18
+#define SXE_FNAVIPDA		0x0EE1C
+#define SXE_FNAVPORT		0x0EE20
+#define SXE_FNAVVLAN		0x0EE24
+#define SXE_FNAVHASH		0x0EE28
+#define SXE_FNAVCMD		0x0EE2C
+
+
+#define SXE_FNAVCTRL_FLEX_SHIFT			16
+#define SXE_FNAVCTRL_MAX_LENGTH_SHIFT		24
+#define SXE_FNAVCTRL_FULL_THRESH_SHIFT		28
+#define SXE_FNAVCTRL_DROP_Q_SHIFT		8
+#define SXE_FNAVCTRL_PBALLOC_64K		0x00000001
+#define SXE_FNAVCTRL_PBALLOC_128K		0x00000002
+#define SXE_FNAVCTRL_PBALLOC_256K		0x00000003
+#define SXE_FNAVCTRL_INIT_DONE			0x00000008
+#define SXE_FNAVCTRL_SPECIFIC_MATCH		0x00000010
+#define SXE_FNAVCTRL_REPORT_STATUS		0x00000020
+#define SXE_FNAVCTRL_REPORT_STATUS_ALWAYS	0x00000080
+
+#define SXE_FNAVCTRL_FLEX_MASK			(0x1F << SXE_FNAVCTRL_FLEX_SHIFT)
+
+#define SXE_FNAVTCPM_DPORTM_SHIFT		16
+
+#define SXE_FNAVM_VLANID			0x00000001
+#define SXE_FNAVM_VLANP				0x00000002
+#define SXE_FNAVM_POOL				0x00000004
+#define SXE_FNAVM_L4P				0x00000008
+#define SXE_FNAVM_FLEX				0x00000010
+#define SXE_FNAVM_DIPv6				0x00000020
+
+#define SXE_FNAVPORT_DESTINATION_SHIFT		16
+#define SXE_FNAVVLAN_FLEX_SHIFT			16
+#define SXE_FNAVHASH_SIG_SW_INDEX_SHIFT		16
+
+#define SXE_FNAVCMD_CMD_MASK			0x00000003
+#define SXE_FNAVCMD_CMD_ADD_FLOW		0x00000001
+#define SXE_FNAVCMD_CMD_REMOVE_FLOW		0x00000002
+#define SXE_FNAVCMD_CMD_QUERY_REM_FILT		0x00000003
+#define SXE_FNAVCMD_FILTER_VALID		0x00000004
+#define SXE_FNAVCMD_FILTER_UPDATE		0x00000008
+#define SXE_FNAVCMD_IPv6DMATCH			0x00000010
+#define SXE_FNAVCMD_L4TYPE_UDP			0x00000020
+#define SXE_FNAVCMD_L4TYPE_TCP			0x00000040
+#define SXE_FNAVCMD_L4TYPE_SCTP			0x00000060
+#define SXE_FNAVCMD_IPV6			0x00000080
+#define SXE_FNAVCMD_CLEARHT			0x00000100
+#define SXE_FNAVCMD_DROP			0x00000200
+#define SXE_FNAVCMD_INT				0x00000400
+#define SXE_FNAVCMD_LAST			0x00000800
+#define SXE_FNAVCMD_COLLISION			0x00001000
+#define SXE_FNAVCMD_QUEUE_EN			0x00008000
+#define SXE_FNAVCMD_FLOW_TYPE_SHIFT		5
+#define SXE_FNAVCMD_RX_QUEUE_SHIFT		16
+#define SXE_FNAVCMD_RX_TUNNEL_FILTER_SHIFT	23
+#define SXE_FNAVCMD_VT_POOL_SHIFT		24
+#define SXE_FNAVCMD_CMD_POLL			10
+#define SXE_FNAVCMD_TUNNEL_FILTER		0x00800000
+
+
+#define SXE_LXOFFRXCNT		0x041A8
+#define SXE_PXOFFRXCNT(_i)	(0x04160 + ((_i) * 4))
+
+#define SXE_EPC_GPRC		0x050E0
+#define SXE_RXDGPC              0x02F50
+#define SXE_RXDGBCL             0x02F54
+#define SXE_RXDGBCH             0x02F58
+#define SXE_RXDDGPC             0x02F5C
+#define SXE_RXDDGBCL            0x02F60
+#define SXE_RXDDGBCH            0x02F64
+#define SXE_RXLPBKGPC           0x02F68
+#define SXE_RXLPBKGBCL          0x02F6C
+#define SXE_RXLPBKGBCH          0x02F70
+#define SXE_RXDLPBKGPC          0x02F74
+#define SXE_RXDLPBKGBCL         0x02F78
+#define SXE_RXDLPBKGBCH         0x02F7C
+
+#define SXE_RXTPCIN             0x02F88
+#define SXE_RXTPCOUT            0x02F8C
+#define SXE_RXPRDDC             0x02F9C
+
+#define SXE_TXDGPC		0x087A0
+#define SXE_TXDGBCL             0x087A4
+#define SXE_TXDGBCH             0x087A8
+#define SXE_TXSWERR             0x087B0
+#define SXE_TXSWITCH            0x087B4
+#define SXE_TXREPEAT            0x087B8
+#define SXE_TXDESCERR           0x087BC
+#define SXE_MNGPRC		0x040B4
+#define SXE_MNGPDC		0x040B8
+#define SXE_RQSMR(_i)		(0x02300 + ((_i) * 4))   
+#define SXE_TQSM(_i)		(0x08600 + ((_i) * 4))   
+#define SXE_QPRC(_i)		(0x01030 + ((_i) * 0x40))
+#define SXE_QBRC_L(_i)		(0x01034 + ((_i) * 0x40))
+#define SXE_QBRC_H(_i)		(0x01038 + ((_i) * 0x40))
+
+
+#define SXE_QPRDC(_i)		(0x01430 + ((_i) * 0x40))
+#define SXE_QPTC(_i)		(0x08680 + ((_i) * 0x4))
+#define SXE_QBTC_L(_i)		(0x08700 + ((_i) * 0x8)) 
+#define SXE_QBTC_H(_i)		(0x08704 + ((_i) * 0x8)) 
+#define SXE_SSVPC		0x08780                  
+#define SXE_MNGPTC		0x0CF90
+#define SXE_MPC(_i)		(0x03FA0 + ((_i) * 4))
+
+#define SXE_DBUDRTCICNT(_i)	(0x03C6C + ((_i) * 4))
+#define SXE_DBUDRTCOCNT(_i)	(0x03C8C + ((_i) * 4))
+#define SXE_DBUDRBDPCNT(_i)	(0x03D20 + ((_i) * 4))
+#define SXE_DBUDREECNT(_i)	(0x03D40 + ((_i) * 4))
+#define SXE_DBUDROFPCNT(_i)	(0x03D60 + ((_i) * 4))
+#define SXE_DBUDTTCICNT(_i)	(0x0CE54 + ((_i) * 4))
+#define SXE_DBUDTTCOCNT(_i)	(0x0CE74 + ((_i) * 4))
+
+
+
+#define SXE_WUC                       0x05800
+#define SXE_WUFC                      0x05808
+#define SXE_WUS                       0x05810
+#define SXE_IP6AT(_i)                 (0x05880 + ((_i) * 4))   
+
+
+#define SXE_IP6AT_CNT                 4
+
+
+#define SXE_WUC_PME_EN                0x00000002
+#define SXE_WUC_PME_STATUS            0x00000004
+#define SXE_WUC_WKEN                  0x00000010
+#define SXE_WUC_APME                  0x00000020
+
+
+#define SXE_WUFC_LNKC                 0x00000001
+#define SXE_WUFC_MAG                  0x00000002
+#define SXE_WUFC_EX                   0x00000004
+#define SXE_WUFC_MC                   0x00000008
+#define SXE_WUFC_BC                   0x00000010
+#define SXE_WUFC_ARP                  0x00000020
+#define SXE_WUFC_IPV4                 0x00000040
+#define SXE_WUFC_IPV6                 0x00000080
+#define SXE_WUFC_MNG                  0x00000100
+
+
+
+
+#define SXE_TSCTRL              0x14800
+#define SXE_TSES                0x14804
+#define SXE_TSYNCTXCTL          0x14810
+#define SXE_TSYNCRXCTL          0x14820
+#define SXE_RXSTMPL             0x14824
+#define SXE_RXSTMPH             0x14828
+#define SXE_SYSTIML             0x14840
+#define SXE_SYSTIMM             0x14844
+#define SXE_SYSTIMH             0x14848
+#define SXE_TIMADJL             0x14850
+#define SXE_TIMADJH             0x14854
+#define SXE_TIMINC              0x14860
+
+
+#define SXE_TSYNCTXCTL_TXTT     0x0001
+#define SXE_TSYNCTXCTL_TEN      0x0010
+
+
+#define SXE_TSYNCRXCTL_RXTT     0x0001
+#define SXE_TSYNCRXCTL_REN      0x0010
+
+
+#define SXE_TSCTRL_TSSEL        0x00001
+#define SXE_TSCTRL_TSEN         0x00002
+#define SXE_TSCTRL_VER_2        0x00010
+#define SXE_TSCTRL_ONESTEP      0x00100
+#define SXE_TSCTRL_CSEN         0x01000
+#define SXE_TSCTRL_PTYP_ALL     0x00C00
+#define SXE_TSCTRL_L4_UNICAST   0x08000
+
+
+#define SXE_TSES_TXES                   0x00200
+#define SXE_TSES_RXES                   0x00800
+#define SXE_TSES_TXES_V1_SYNC           0x00000
+#define SXE_TSES_TXES_V1_DELAY_REQ      0x00100
+#define SXE_TSES_TXES_V1_ALL            0x00200
+#define SXE_TSES_RXES_V1_SYNC           0x00000
+#define SXE_TSES_RXES_V1_DELAY_REQ      0x00400
+#define SXE_TSES_RXES_V1_ALL            0x00800
+#define SXE_TSES_TXES_V2_ALL            0x00200
+#define SXE_TSES_RXES_V2_ALL            0x00800
+
+#define SXE_IV_SNS              0
+#define SXE_IV_NS               8
+#define SXE_INCPD               0
+#define SXE_BASE_INCVAL         8
+
+
+#define SXE_VT_CTL		0x051B0
+#define SXE_PFMAILBOX(_i)	(0x04B00 + (4 * (_i)))
+
+#define SXE_PFMBICR(_i)		(0x00710 + (4 * (_i)))
+#define SXE_VFLRE(i)		((i & 1)? 0x001C0 : 0x00600)
+#define SXE_VFLREC(i)		(0x00700 + (i * 4))
+#define SXE_VFRE(_i)		(0x051E0 + ((_i) * 4))
+#define SXE_VFTE(_i)		(0x08110 + ((_i) * 4))
+#define SXE_QDE			(0x02F04)             
+#define SXE_SPOOF(_i)		(0x08200 + (_i) * 4)
+#define SXE_PFDTXGSWC		0x08220
+#define SXE_VMVIR(_i)		(0x08000 + ((_i) * 4))
+#define SXE_VMOLR(_i)		(0x0F000 + ((_i) * 4))
+#define SXE_VLVF(_i)		(0x0F100 + ((_i) * 4))
+#define SXE_VLVFB(_i)		(0x0F200 + ((_i) * 4))
+#define SXE_MRCTL(_i)		(0x0F600 + ((_i) * 4))
+#define SXE_VMRVLAN(_i)	        (0x0F610 + ((_i) * 4))
+#define SXE_VMRVM(_i)		(0x0F630 + ((_i) * 4))
+#define SXE_VMECM(_i)		(0x08790 + ((_i) * 4))
+#define SXE_PFMBMEM(_i)		(0x13000 + (64 * (_i)))
+
+
+#define SXE_VMOLR_CNT			64
+#define SXE_VLVF_CNT			64
+#define SXE_VLVFB_CNT			128
+#define SXE_MRCTL_CNT			4
+#define SXE_VMRVLAN_CNT			8
+#define SXE_VMRVM_CNT			8
+#define SXE_SPOOF_CNT			8
+#define SXE_VMVIR_CNT			64
+#define SXE_VFRE_CNT			2
+
+
+#define SXE_VMVIR_VLANA_MASK	0xC0000000
+#define SXE_VMVIR_VLAN_VID_MASK	0x00000FFF
+#define SXE_VMVIR_VLAN_UP_MASK	0x0000E000
+
+
+#define SXE_MRCTL_VPME  0x01
+
+#define SXE_MRCTL_UPME  0x02
+
+#define SXE_MRCTL_DPME  0x04
+
+#define SXE_MRCTL_VLME  0x08
+
+
+#define SXE_VT_CTL_DIS_DEFPL  0x20000000
+#define SXE_VT_CTL_REPLEN     0x40000000
+#define SXE_VT_CTL_VT_ENABLE  0x00000001 
+#define SXE_VT_CTL_POOL_SHIFT 7
+#define SXE_VT_CTL_POOL_MASK  (0x3F << SXE_VT_CTL_POOL_SHIFT)
+
+
+#define SXE_PFMAILBOX_STS         0x00000001
+#define SXE_PFMAILBOX_ACK         0x00000002
+#define SXE_PFMAILBOX_VFU         0x00000004
+#define SXE_PFMAILBOX_PFU         0x00000008
+#define SXE_PFMAILBOX_RVFU        0x00000010
+
+
+#define SXE_PFMBICR_VFREQ         0x00000001
+#define SXE_PFMBICR_VFACK         0x00010000
+#define SXE_PFMBICR_VFREQ_MASK    0x0000FFFF
+#define SXE_PFMBICR_VFACK_MASK    0xFFFF0000
+
+
+#define SXE_QDE_ENABLE		(0x00000001)
+#define SXE_QDE_HIDE_VLAN	(0x00000002)
+#define SXE_QDE_IDX_MASK	(0x00007F00)
+#define SXE_QDE_IDX_SHIFT	(8)
+#define SXE_QDE_WRITE		(0x00010000)
+
+
+
+#define SXE_SPOOF_VLAN_SHIFT  (8)
+
+
+#define SXE_PFDTXGSWC_VT_LBEN	0x1 
+
+
+#define SXE_VMVIR_VLANA_DEFAULT 0x40000000
+#define SXE_VMVIR_VLANA_NEVER   0x80000000
+
+
+#define SXE_VMOLR_UPE		0x00400000
+#define SXE_VMOLR_VPE		0x00800000
+#define SXE_VMOLR_AUPE		0x01000000
+#define SXE_VMOLR_ROMPE		0x02000000
+#define SXE_VMOLR_ROPE		0x04000000
+#define SXE_VMOLR_BAM		0x08000000
+#define SXE_VMOLR_MPE		0x10000000
+
+
+#define SXE_VLVF_VIEN         0x80000000 
+#define SXE_VLVF_ENTRIES      64
+#define SXE_VLVF_VLANID_MASK  0x00000FFF
+
+
+#define SXE_HDC_HOST_BASE       0x16000
+#define SXE_HDC_SW_LK           (SXE_HDC_HOST_BASE + 0x00)
+#define SXE_HDC_PF_LK           (SXE_HDC_HOST_BASE + 0x04)
+#define SXE_HDC_SW_OV           (SXE_HDC_HOST_BASE + 0x08)
+#define SXE_HDC_FW_OV           (SXE_HDC_HOST_BASE + 0x0C)
+#define SXE_HDC_PACKET_HEAD0    (SXE_HDC_HOST_BASE + 0x10)
+
+#define SXE_HDC_PACKET_DATA0    (SXE_HDC_HOST_BASE + 0x20)
+
+
+#define SXE_HDC_MSI_STATUS_REG  0x17000
+#define SXE_FW_STATUS_REG       0x17004
+#define SXE_DRV_STATUS_REG      0x17008
+#define SXE_FW_HDC_STATE_REG    0x1700C
+#define SXE_R0_MAC_ADDR_RAL     0x17010
+#define SXE_R0_MAC_ADDR_RAH     0x17014
+#define SXE_CRC_STRIP_REG		0x17018
+
+
+#define SXE_HDC_SW_LK_BIT       0x0001
+#define SXE_HDC_PF_LK_BIT       0x0003
+#define SXE_HDC_SW_OV_BIT       0x0001
+#define SXE_HDC_FW_OV_BIT       0x0001
+#define SXE_HDC_RELEASE_SW_LK   0x0000
+
+#define SXE_HDC_LEN_TO_REG(n)        (n - 1)
+#define SXE_HDC_LEN_FROM_REG(n)      (n + 1)
+
+
+#define SXE_RX_PKT_BUF_SIZE_SHIFT    10
+#define SXE_TX_PKT_BUF_SIZE_SHIFT    10
+
+#define SXE_RXIDX_TBL_SHIFT           1
+#define SXE_RXTXIDX_IPS_EN            0x00000001
+#define SXE_RXTXIDX_IDX_SHIFT         3
+#define SXE_RXTXIDX_READ              0x40000000
+#define SXE_RXTXIDX_WRITE             0x80000000
+
+
+#define SXE_KEEP_CRC_EN		      0x00000001
+
+
+#define SXE_VMD_CTL			0x0581C
+
+
+#define SXE_VMD_CTL_POOL_EN		0x00000001
+#define SXE_VMD_CTL_POOL_FILTER		0x00000002
+
+
+#define SXE_FLCTRL                    0x14300
+#define SXE_PFCTOP                    0x14304
+#define SXE_FCTTV0                    0x14310
+#define SXE_FCTTV(_i)                (SXE_FCTTV0 + ((_i) * 4))
+#define SXE_FCRTV                     0x14320
+#define SXE_TFCS                      0x14324
+
+
+#define SXE_FCTRL_TFCE_MASK           0x0018
+#define SXE_FCTRL_TFCE_LFC_EN         0x0008
+#define SXE_FCTRL_TFCE_PFC_EN         0x0010
+#define SXE_FCTRL_TFCE_DPF_EN         0x0020
+#define SXE_FCTRL_RFCE_MASK           0x0300
+#define SXE_FCTRL_RFCE_LFC_EN         0x0100
+#define SXE_FCTRL_RFCE_PFC_EN         0x0200
+
+#define SXE_FCTRL_TFCE_FCEN_MASK      0x00FF0000
+#define SXE_FCTRL_TFCE_XONE_MASK      0xFF000000
+
+
+#define SXE_PFCTOP_FCT               0x8808
+#define SXE_PFCTOP_FCOP_MASK         0xFFFF0000
+#define SXE_PFCTOP_FCOP_PFC          0x01010000
+#define SXE_PFCTOP_FCOP_LFC          0x00010000
+
+
+#define SXE_COMCTRL                   0x14400
+#define SXE_PCCTRL                    0x14404
+#define SXE_LPBKCTRL                  0x1440C
+#define SXE_MAXFS                     0x14410
+#define SXE_SACONH                    0x14420
+#define SXE_SACONL                    0x14424
+#define SXE_VLANCTRL                  0x14430
+#define SXE_VLANID                    0x14434
+#define SXE_LINKS                     0x14454
+#define SXE_FPGA_SDS_STS	      0x14704
+#define SXE_MSCA                      0x14500
+#define SXE_MSCD                      0x14504
+
+#define SXE_HLREG0                    0x04240
+#define SXE_MFLCN                     0x04294
+#define SXE_MACC                      0x04330
+
+#define SXE_PCS1GLSTA                 0x0420C
+#define SXE_MFLCN                     0x04294
+#define SXE_PCS1GANA                  0x04850
+#define SXE_PCS1GANLP                 0x04854
+
+
+#define SXE_LPBKCTRL_EN               0x00000001
+
+
+#define SXE_MAC_ADDR_SACONH_SHIFT     32
+#define SXE_MAC_ADDR_SACONL_MASK      0xFFFFFFFF
+
+
+#define SXE_PCS1GLSTA_AN_COMPLETE     0x10000
+#define SXE_PCS1GLSTA_AN_PAGE_RX      0x20000
+#define SXE_PCS1GLSTA_AN_TIMED_OUT    0x40000
+#define SXE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000
+#define SXE_PCS1GLSTA_AN_ERROR_RWS    0x100000
+
+#define SXE_PCS1GANA_SYM_PAUSE        0x100
+#define SXE_PCS1GANA_ASM_PAUSE        0x80 
+
+
+#define SXE_LKSTS_PCS_LKSTS_UP        0x00000001
+#define SXE_LINK_UP_TIME              90
+#define SXE_AUTO_NEG_TIME             45
+
+
+#define SXE_MSCA_NP_ADDR_MASK      0x0000FFFF
+#define SXE_MSCA_NP_ADDR_SHIFT     0
+#define SXE_MSCA_DEV_TYPE_MASK     0x001F0000
+#define SXE_MSCA_DEV_TYPE_SHIFT    16        
+#define SXE_MSCA_PHY_ADDR_MASK     0x03E00000
+#define SXE_MSCA_PHY_ADDR_SHIFT    21        
+#define SXE_MSCA_OP_CODE_MASK      0x0C000000
+#define SXE_MSCA_OP_CODE_SHIFT     26        
+#define SXE_MSCA_ADDR_CYCLE        0x00000000
+#define SXE_MSCA_WRITE             0x04000000
+#define SXE_MSCA_READ              0x0C000000
+#define SXE_MSCA_READ_AUTOINC      0x08000000
+#define SXE_MSCA_ST_CODE_MASK      0x30000000
+#define SXE_MSCA_ST_CODE_SHIFT     28        
+#define SXE_MSCA_NEW_PROTOCOL      0x00000000
+#define SXE_MSCA_OLD_PROTOCOL      0x10000000
+#define SXE_MSCA_BYPASSRA_C45      0x40000000
+#define SXE_MSCA_MDI_CMD_ON_PROG   0x80000000
+
+
+#define MDIO_MSCD_RDATA_LEN        16
+#define MDIO_MSCD_RDATA_SHIFT      16
+
+
+#define SXE_CRCERRS                   0x14A04
+#define SXE_ERRBC                     0x14A10
+#define SXE_RLEC                      0x14A14
+#define SXE_PRC64                     0x14A18
+#define SXE_PRC127                    0x14A1C
+#define SXE_PRC255                    0x14A20
+#define SXE_PRC511                    0x14A24
+#define SXE_PRC1023                   0x14A28
+#define SXE_PRC1522                   0x14A2C
+#define SXE_BPRC                      0x14A30
+#define SXE_MPRC                      0x14A34
+#define SXE_GPRC                      0x14A38
+#define SXE_GORCL                     0x14A3C
+#define SXE_GORCH                     0x14A40
+#define SXE_RUC                       0x14A44
+#define SXE_RFC                       0x14A48
+#define SXE_ROC                       0x14A4C
+#define SXE_RJC                       0x14A50
+#define SXE_TORL                      0x14A54
+#define SXE_TORH                      0x14A58
+#define SXE_TPR                       0x14A5C
+#define SXE_PRCPF(_i)                 (0x14A60 + ((_i) * 4))
+#define SXE_GPTC                      0x14B00
+#define SXE_GOTCL                     0x14B04
+#define SXE_GOTCH                     0x14B08
+#define SXE_TPT                       0x14B0C
+#define SXE_PTC64                     0x14B10
+#define SXE_PTC127                    0x14B14
+#define SXE_PTC255                    0x14B18
+#define SXE_PTC511                    0x14B1C
+#define SXE_PTC1023                   0x14B20
+#define SXE_PTC1522                   0x14B24
+#define SXE_MPTC                      0x14B28
+#define SXE_BPTC                      0x14B2C
+#define SXE_PFCT(_i)                  (0x14B30 + ((_i) * 4))
+
+#define SXE_MACCFG                    0x0CE04
+#define SXE_MACCFG_PAD_EN             0x00000001
+
+
+#define SXE_COMCTRL_TXEN	      0x0001        
+#define SXE_COMCTRL_RXEN	      0x0002        
+#define SXE_COMCTRL_EDSEL	      0x0004        
+#define SXE_COMCTRL_SPEED_1G	      0x0200        
+#define SXE_COMCTRL_SPEED_10G	      0x0300        
+
+
+#define SXE_PCCTRL_TXCE		      0x0001        
+#define SXE_PCCTRL_RXCE		      0x0002        
+#define SXE_PCCTRL_PEN		      0x0100        
+#define SXE_PCCTRL_PCSC_ALL	      0x30000       
+
+
+#define SXE_MAXFS_TFSEL		      0x0001        
+#define SXE_MAXFS_RFSEL		      0x0002        
+#define SXE_MAXFS_MFS_MASK	      0xFFFF0000    
+#define SXE_MAXFS_MFS		      0x40000000    
+#define SXE_MAXFS_MFS_SHIFT	      16            
+
+
+#define SXE_LINKS_UP 	              0x00000001    
+
+#define SXE_10G_LINKS_DOWN            0x00000006
+
+
+#define SXE_LINK_SPEED_UNKNOWN        0             
+#define SXE_LINK_SPEED_10_FULL        0x0002        
+#define SXE_LINK_SPEED_100_FULL       0x0008        
+#define SXE_LINK_SPEED_1GB_FULL       0x0020        
+#define SXE_LINK_SPEED_10GB_FULL      0x0080        
+
+
+#define SXE_HLREG0_TXCRCEN            0x00000001  
+#define SXE_HLREG0_RXCRCSTRP          0x00000002  
+#define SXE_HLREG0_JUMBOEN            0x00000004  
+#define SXE_HLREG0_TXPADEN            0x00000400  
+#define SXE_HLREG0_TXPAUSEEN          0x00001000  
+#define SXE_HLREG0_RXPAUSEEN          0x00004000  
+#define SXE_HLREG0_LPBK               0x00008000  
+#define SXE_HLREG0_MDCSPD             0x00010000  
+#define SXE_HLREG0_CONTMDC            0x00020000  
+#define SXE_HLREG0_CTRLFLTR           0x00040000  
+#define SXE_HLREG0_PREPEND            0x00F00000  
+#define SXE_HLREG0_PRIPAUSEEN         0x01000000  
+#define SXE_HLREG0_RXPAUSERECDA       0x06000000  
+#define SXE_HLREG0_RXLNGTHERREN       0x08000000  
+#define SXE_HLREG0_RXPADSTRIPEN       0x10000000  
+
+#define SXE_MFLCN_PMCF                0x00000001  
+#define SXE_MFLCN_DPF                 0x00000002  
+#define SXE_MFLCN_RPFCE               0x00000004  
+#define SXE_MFLCN_RFCE                0x00000008  
+#define SXE_MFLCN_RPFCE_MASK	      0x00000FF4  
+#define SXE_MFLCN_RPFCE_SHIFT         4
+
+#define SXE_MACC_FLU                  0x00000001
+#define SXE_MACC_FSV_10G              0x00030000
+#define SXE_MACC_FS                   0x00040000
+
+#define SXE_DEFAULT_FCPAUSE           0xFFFF
+
+
+#define SXE_SAQF(_i)		(0x0E000 + ((_i) * 4)) 
+#define SXE_DAQF(_i)		(0x0E200 + ((_i) * 4)) 
+#define SXE_SDPQF(_i)		(0x0E400 + ((_i) * 4)) 
+#define SXE_FTQF(_i)		(0x0E600 + ((_i) * 4)) 
+#define SXE_L34T_IMIR(_i)	(0x0E800 + ((_i) * 4)) 
+
+#define SXE_MAX_FTQF_FILTERS		128
+#define SXE_FTQF_PROTOCOL_MASK		0x00000003
+#define SXE_FTQF_PROTOCOL_TCP		0x00000000
+#define SXE_FTQF_PROTOCOL_UDP		0x00000001
+#define SXE_FTQF_PROTOCOL_SCTP		2
+#define SXE_FTQF_PRIORITY_MASK		0x00000007
+#define SXE_FTQF_PRIORITY_SHIFT		2
+#define SXE_FTQF_POOL_MASK		0x0000003F
+#define SXE_FTQF_POOL_SHIFT		8
+#define SXE_FTQF_5TUPLE_MASK_MASK	0x0000001F
+#define SXE_FTQF_5TUPLE_MASK_SHIFT	25
+#define SXE_FTQF_SOURCE_ADDR_MASK	0x1E
+#define SXE_FTQF_DEST_ADDR_MASK		0x1D
+#define SXE_FTQF_SOURCE_PORT_MASK	0x1B
+#define SXE_FTQF_DEST_PORT_MASK		0x17
+#define SXE_FTQF_PROTOCOL_COMP_MASK	0x0F
+#define SXE_FTQF_POOL_MASK_EN		0x40000000
+#define SXE_FTQF_QUEUE_ENABLE		0x80000000
+
+#define SXE_SDPQF_DSTPORT		0xFFFF0000
+#define SXE_SDPQF_DSTPORT_SHIFT		16
+#define SXE_SDPQF_SRCPORT		0x0000FFFF
+
+#define SXE_L34T_IMIR_SIZE_BP		0x00001000
+#define SXE_L34T_IMIR_RESERVE		0x00080000
+#define SXE_L34T_IMIR_LLI			0x00100000
+#define SXE_L34T_IMIR_QUEUE			0x0FE00000
+#define SXE_L34T_IMIR_QUEUE_SHIFT	21
+
+#define SXE_VMTXSW(_i)                (0x05180 + ((_i) * 4))   
+#define SXE_VMTXSW_REGISTER_COUNT     2
+
+#define SXE_TXSTMP_SEL		0x14510  
+#define SXE_TXSTMP_VAL		0x1451c  
+
+#define SXE_TXTS_MAGIC0		0x005a005900580057
+#define SXE_TXTS_MAGIC1		0x005e005d005c005b
+
+#endif
diff --git a/drivers/net/sxe/include/sxe_type.h b/drivers/net/sxe/include/sxe_type.h
new file mode 100644
index 0000000000..433385a0c9
--- /dev/null
+++ b/drivers/net/sxe/include/sxe_type.h
@@ -0,0 +1,794 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_TYPE_H__
+#define __SXE_TYPE_H__
+
+#define SXE_TXD_CMD_EOP   0x01000000  
+#define SXE_TXD_CMD_RS    0x08000000  
+#define SXE_TXD_STAT_DD   0x00000001  
+
+#define SXE_TXD_CMD       (SXE_TXD_CMD_EOP | SXE_TXD_CMD_RS)
+
+
+typedef union sxe_adv_tx_desc {
+    struct {
+        U64 buffer_addr;
+        U32 cmd_type_len;
+        U32 olinfo_status;
+    } read;
+    struct {
+        U64 rsvd;
+        U32 nxtseq_seed;
+        U32 status;
+    } wb;
+}sxe_adv_tx_desc_u;
+
+typedef union sxe_adv_rx_desc {
+    struct {
+        U64 pkt_addr;
+        U64 hdr_addr;
+    } read;
+    struct {
+        struct {
+            union {
+                U32 data;
+                struct {
+                    U16 pkt_info;
+                    U16 hdr_info;
+                } hs_rss;
+            } lo_dword;
+            union {
+                U32 rss;
+                struct {
+                    U16 ip_id;
+                    U16 csum;
+                } csum_ip;
+            }hi_dword;
+        } lower;
+        struct {
+            U32 status_error;
+            U16 length;
+            U16 vlan;
+        } upper;
+    } wb;
+}sxe_adv_rx_desc_u;
+
+#define SXE_RXD_STAT_DD    0x01  
+#define SXE_RXD_STAT_EOP   0x02  
+
+
+#define PCI_VENDOR_ID_STARS		 0x1FF2
+#define SXE_DEV_ID_FPGA			 0x1160
+
+
+#define SXE_CTRL      0x00000
+#define SXE_STATUS    0x00008
+#define SXE_CTRL_EXT  0x00018
+#define SXE_ESDP      0x00020
+#define SXE_EODSDP    0x00028
+
+#define SXE_I2CCTL_8259X	0x00028
+#define SXE_I2CCTL_X540	SXE_I2CCTL_8259X
+#define SXE_I2CCTL_X550	0x15F5C
+#define SXE_I2CCTL_X550EM_x	SXE_I2CCTL_X550
+#define SXE_I2CCTL_X550EM_a	SXE_I2CCTL_X550
+#define SXE_I2CCTL(_hw)	SXE_BY_MAC((_hw), I2CCTL)
+
+#define SXE_LEDCTL    0x00200
+#define SXE_FRTIMER   0x00048
+#define SXE_TCPTIMER  0x0004C
+#define SXE_CORESPARE 0x00600
+#define SXE_EXVET     0x05078
+
+
+#define SXE_EICR      0x00800
+#define SXE_EICS      0x00808
+#define SXE_EIMS      0x00880
+#define SXE_EIMC      0x00888
+#define SXE_EIAC      0x00810
+#define SXE_EIAM      0x00890
+#define SXE_EICR_EX(_i)   (0x00A80 + (_i) * 4)
+#define SXE_EICS_EX(_i)   (0x00A90 + (_i) * 4)
+#define SXE_EIMS_EX(_i)   (0x00AA0 + (_i) * 4)
+#define SXE_EIMC_EX(_i)   (0x00AB0 + (_i) * 4)
+#define SXE_EIAM_EX(_i)   (0x00AD0 + (_i) * 4)
+
+
+#define SXE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
+			 (0x0D000 + (((_i) - 64) * 0x40)))
+#define SXE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
+			 (0x0D004 + (((_i) - 64) * 0x40)))
+#define SXE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
+			 (0x0D008 + (((_i) - 64) * 0x40)))
+#define SXE_RDH(_i)   (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
+			 (0x0D010 + (((_i) - 64) * 0x40)))
+#define SXE_RDT(_i)   (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
+			 (0x0D018 + (((_i) - 64) * 0x40)))
+#define SXE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
+			 (0x0D028 + (((_i) - 64) * 0x40)))
+#define SXE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+			 (0x0D02C + (((_i) - 64) * 0x40)))
+#define SXE_RSCDBU     0x03028
+#define SXE_RDDCC      0x02F20
+#define SXE_RXMEMWRAP  0x03190
+#define SXE_STARCTRL   0x03024
+
+#define SXE_SRRCTL(_i) (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : (0x0D014 + (((_i) - 64) * 0x40)))
+
+#define SXE_DCA_RXCTRL(_i)    (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : (0x0D00C + (((_i) - 64) * 0x40)))
+#define SXE_RDRXCTL           0x02F00
+#define SXE_RXPBSIZE(_i)      (0x03C00 + ((_i) * 4))    
+#define SXE_DRXCFG	0x03C20
+#define SXE_RXCTRL    0x03000
+#define SXE_DROPEN    0x03D04
+#define SXE_RXPBSIZE_SHIFT 10
+#define SXE_DRXCFG_GSP_ZERO    0x00000002
+#define SXE_DRXCFG_DBURX_START 0x00000001
+
+
+#define SXE_RXCSUM    0x05000
+#define SXE_RFCTL     0x05008
+#define SXE_DRECCCTL  0x02F08
+#define SXE_DRECCCTL_DISABLE 0
+
+
+#define SXE_MTA(_i)   (0x05200 + ((_i) * 4))
+#define SXE_RAL(_i)   (0x0A200 + ((_i) * 8))
+#define SXE_RAH(_i)   (0x0A204 + ((_i) * 8))
+#define SXE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
+#define SXE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
+
+
+#define SXE_PSRTYPE(_i)    (0x0EA00 + ((_i) * 4))
+
+
+#define SXE_VFTA(_i)  (0x0A000 + ((_i) * 4))
+
+
+#define SXE_VFTAVIND(_j, _i)  (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
+#define SXE_FCTRL     0x05080
+#define SXE_VLNCTRL   0x05088
+#define SXE_MCSTCTRL  0x05090
+#define SXE_MRQC      0x0EC80
+#define SXE_SAQF(_i)  (0x0E000 + ((_i) * 4)) 
+#define SXE_DAQF(_i)  (0x0E200 + ((_i) * 4)) 
+#define SXE_SDPQF(_i) (0x0E400 + ((_i) * 4)) 
+#define SXE_FTQF(_i)  (0x0E600 + ((_i) * 4)) 
+#define SXE_ETQF(_i)  (0x05128 + ((_i) * 4)) 
+#define SXE_ETQS(_i)  (0x0EC00 + ((_i) * 4)) 
+#define SXE_SYNQF     0x0EC30                 
+#define SXE_RQTC      0x0EC70
+#define SXE_MTQC      0x08120
+#define SXE_VLVF(_i)  (0x0F100 + ((_i) * 4)) 
+#define SXE_VLVFB(_i) (0x0F200 + ((_i) * 4)) 
+#define SXE_VMVIR(_i) (0x08000 + ((_i) * 4)) 
+#define SXE_PFFLPL     0x050B0
+#define SXE_PFFLPH     0x050B4
+#define SXE_VT_CTL         0x051B0
+#define SXE_PFMAILBOX(_i)  (0x04B00 + (4 * (_i)))   
+#define SXE_PFMBMEM(_i)    (0x13000 + (64 * (_i)))  
+#define SXE_PFMBICR(_i)    (0x00710 + (4 * (_i)))   
+#define SXE_PFMBIMR(_i)    (0x00720 + (4 * (_i)))   
+#define SXE_VFRE(_i)       (0x051E0 + ((_i) * 4))
+#define SXE_VFTE(_i)       (0x08110 + ((_i) * 4))
+#define SXE_VMECM(_i)      (0x08790 + ((_i) * 4))
+#define SXE_QDE            0x2F04
+#define SXE_VMTXSW(_i)     (0x05180 + ((_i) * 4))   
+#define SXE_VMOLR(_i)      (0x0F000 + ((_i) * 4))    
+#define SXE_UTA(_i)        (0x0F400 + ((_i) * 4))
+#define SXE_MRCTL(_i)      (0x0F600 + ((_i) * 4))
+#define SXE_VMRVLAN(_i)    (0x0F610 + ((_i) * 4))
+#define SXE_VMRVM(_i)      (0x0F630 + ((_i) * 4))
+#define SXE_WQBR_RX(_i)    (0x2FB0 + ((_i) * 4))    
+#define SXE_WQBR_TX(_i)    (0x8130 + ((_i) * 4))    
+#define SXE_L34T_IMIR(_i)  (0x0E800 + ((_i) * 4))   
+#define SXE_RXFECCERR0         0x051B8
+#define SXE_LLITHRESH 0x0EC90
+#define SXE_IMIR(_i)  (0x05A80 + ((_i) * 4))         
+#define SXE_IMIREXT(_i)       (0x05AA0 + ((_i) * 4))
+#define SXE_IMIRVP    0x0EC60
+#define SXE_VMD_CTL   0x0581C
+#define SXE_RETA(_i)  (0x0EB00 + ((_i) * 4))        
+#define SXE_ERETA(_i)	(0x0EE80 + ((_i) * 4))     
+#define SXE_RSSRK(_i) (0x0EB80 + ((_i) * 4))       
+
+
+#define SXE_TDBAL(_i) (0x06000 + ((_i) * 0x40))  
+#define SXE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
+#define SXE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
+#define SXE_TDH(_i)   (0x06010 + ((_i) * 0x40))
+#define SXE_TDT(_i)   (0x06018 + ((_i) * 0x40))
+#define SXE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
+#define SXE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
+#define SXE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
+#define SXE_DTXCTL    0x07E00
+
+#define SXE_DMATXCTL      0x04A80
+#define SXE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4))  
+#define SXE_PFDTXGSWC     0x08220
+#define SXE_DTXMXSZRQ     0x08100
+#define SXE_DTXTCPFLGL    0x04A88
+#define SXE_DTXTCPFLGH    0x04A8C
+#define SXE_LBDRPEN       0x0CA00
+#define SXE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4))  
+
+#define SXE_DMATXCTL_TE       0x1   
+#define SXE_DMATXCTL_NS       0x2   
+#define SXE_DMATXCTL_GDV      0x8   
+#define SXE_DMATXCTL_MDP_EN   0x20  
+#define SXE_DMATXCTL_MBINTEN  0x40  
+#define SXE_DMATXCTL_VT_SHIFT 16    
+
+#define SXE_PFDTXGSWC_VT_LBEN 0x1   
+
+
+#define SXE_DCA_TXCTRL_82599(_i)  (0x0600C + ((_i) * 0x40))
+#define SXE_TIPG      0x0CB00
+#define SXE_TXPBSIZE(_i)      (0x0CC00 + ((_i) * 4))  
+#define SXE_DTXCFG	0x0CE08
+#define SXE_MNGTXMAP  0x0CD10
+#define SXE_TIPG_FIBER_DEFAULT 3
+#define SXE_TXPBSIZE_SHIFT    10
+#define SXE_DTXCFG_DBUTX_START  0x00000001
+
+
+#define SXE_RTRPCS      0x02430
+#define SXE_RTTDCS      0x04900
+#define SXE_RTTDCS_ARBDIS     0x00000040   
+#define SXE_RTTPCS      0x0CD00
+#define SXE_RTRUP2TC    0x03020
+#define SXE_RTTUP2TC    0x0C800
+#define SXE_RTRPT4C(_i) (0x02140 + ((_i) * 4))  
+#define SXE_TXLLQ(_i)   (0x082E0 + ((_i) * 4))  
+#define SXE_RTRPT4S(_i) (0x02160 + ((_i) * 4))  
+#define SXE_RTTDT2C(_i) (0x04910 + ((_i) * 4))  
+#define SXE_RTTDT2S(_i) (0x04930 + ((_i) * 4))  
+#define SXE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4))  
+#define SXE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4))  
+#define SXE_RTTDQSEL    0x04904
+#define SXE_RTTDT1C     0x04908
+#define SXE_RTTDT1S     0x0490C
+
+
+#define SXE_RTTQCNCR                0x08B00
+#define SXE_RTTQCNTG                0x04A90
+#define SXE_RTTBCNRD                0x0498C
+#define SXE_RTTQCNRR                0x0498C
+#define SXE_RTTDTECC                0x04990
+#define SXE_RTTDTECC_NO_BCN         0x00000100
+#define SXE_RTTBCNRC                0x04984
+#define SXE_RTTBCNRC_RS_ENA         0x80000000
+#define SXE_RTTBCNRC_RF_DEC_MASK    0x00003FFF
+#define SXE_RTTBCNRC_RF_INT_SHIFT   14
+#define SXE_RTTBCNRC_RF_INT_MASK    (SXE_RTTBCNRC_RF_DEC_MASK << SXE_RTTBCNRC_RF_INT_SHIFT)
+#define SXE_RTTBCNRM                0x04980
+#define SXE_RTTQCNRM                0x04980
+
+
+#define SXE_MACCFG      0x0CE04
+
+
+#define SXE_GCR_EXT           0x11050
+#define SXE_GSCL_5_82599      0x11030
+#define SXE_GSCL_6_82599      0x11034
+#define SXE_GSCL_7_82599      0x11038
+#define SXE_GSCL_8_82599      0x1103C
+#define SXE_PHYADR_82599      0x11040
+#define SXE_PHYDAT_82599      0x11044
+#define SXE_PHYCTL_82599      0x11048
+#define SXE_PBACLR_82599      0x11068
+
+#define SXE_CIAA_8259X	0x11088
+
+
+#define SXE_CIAD_8259X	0x1108C
+
+
+#define SXE_PICAUSE           0x110B0
+#define SXE_PIENA             0x110B8
+#define SXE_CDQ_MBR_82599     0x110B4
+#define SXE_PCIESPARE         0x110BC
+#define SXE_MISC_REG_82599    0x110F0
+#define SXE_ECC_CTRL_0_82599  0x11100
+#define SXE_ECC_CTRL_1_82599  0x11104
+#define SXE_ECC_STATUS_82599  0x110E0
+#define SXE_BAR_CTRL_82599    0x110F4
+
+
+#define SXE_GCR_CMPL_TMOUT_MASK       0x0000F000
+#define SXE_GCR_CMPL_TMOUT_10ms       0x00001000
+#define SXE_GCR_CMPL_TMOUT_RESEND     0x00010000
+#define SXE_GCR_CAP_VER2              0x00040000
+
+#define SXE_GCR_EXT_MSIX_EN           0x80000000
+#define SXE_GCR_EXT_BUFFERS_CLEAR     0x40000000
+#define SXE_GCR_EXT_VT_MODE_16        0x00000001
+#define SXE_GCR_EXT_VT_MODE_32        0x00000002
+#define SXE_GCR_EXT_VT_MODE_64        0x00000003
+#define SXE_GCR_EXT_SRIOV             (SXE_GCR_EXT_MSIX_EN | \
+					 SXE_GCR_EXT_VT_MODE_64)
+
+
+#define SXE_PCS1GCFIG 0x04200
+#define SXE_PCS1GLCTL 0x04208
+#define SXE_PCS1GLSTA 0x0420C
+#define SXE_PCS1GDBG0 0x04210
+#define SXE_PCS1GDBG1 0x04214
+#define SXE_PCS1GANA  0x04218
+#define SXE_PCS1GANLP 0x0421C
+#define SXE_PCS1GANNP 0x04220
+#define SXE_PCS1GANLPNP 0x04224
+#define SXE_HLREG0    0x04240
+#define SXE_HLREG1    0x04244
+#define SXE_PAP       0x04248
+#define SXE_MACA      0x0424C
+#define SXE_APAE      0x04250
+#define SXE_ARD       0x04254
+#define SXE_AIS       0x04258
+#define SXE_MSCA      0x0425C
+#define SXE_MSRWD     0x04260
+#define SXE_MLADD     0x04264
+#define SXE_MHADD     0x04268
+#define SXE_MAXFRS    0x04268
+#define SXE_TREG      0x0426C
+#define SXE_PCSS1     0x04288
+#define SXE_PCSS2     0x0428C
+#define SXE_XPCSS     0x04290
+#define SXE_MFLCN     0x04294
+#define SXE_SERDESC   0x04298
+#define SXE_MAC_SGMII_BUSY 0x04298
+#define SXE_MACS      0x0429C
+#define SXE_AUTOC     0x042A0
+#define SXE_LINKS     0x042A4
+#define SXE_LINKS2    0x04324
+#define SXE_AUTOC2    0x042A8
+#define SXE_AUTOC3    0x042AC
+#define SXE_ANLP1     0x042B0
+#define SXE_ANLP2     0x042B4
+#define SXE_MACC      0x04330
+#define SXE_ATLASCTL  0x04800
+#define SXE_MMNGC     0x042D0
+#define SXE_ANLPNP1   0x042D4
+#define SXE_ANLPNP2   0x042D8
+#define SXE_KRPCSFC   0x042E0
+#define SXE_KRPCSS    0x042E4
+#define SXE_FECS1     0x042E8
+#define SXE_FECS2     0x042EC
+#define SXE_SMADARCTL 0x14F10
+#define SXE_MPVC      0x04318
+#define SXE_SGMIIC    0x04314
+
+
+#define SXE_COMCTRL             0x14400
+#define SXE_PCCTRL              0x14404
+#define SXE_LPBKCTRL            0x1440C
+#define SXE_MAXFS               0x14410
+#define SXE_SACONH              0x14420
+#define SXE_VLANCTRL            0x14430
+#define SXE_VLANID              0x14434
+#define SXE_VLANCTRL            0x14430
+#define SXE_FPAG_SDS_CON        0x14700
+
+
+#define SXE_COMCTRL_TXEN        0x0001
+#define SXE_COMCTRL_RXEN        0x0002
+#define SXE_COMCTRL_EDSEL       0x0004
+#define SXE_COMCTRL_SPEED_1G    0x0200
+#define SXE_COMCTRL_SPEED_10G   0x0300
+
+
+#define SXE_PCCTRL_TXCE         0x0001
+#define SXE_PCCTRL_RXCE         0x0002
+#define SXE_PCCTRL_PEN          0x0100
+#define SXE_PCCTRL_PCSC_ALL     0x30000
+
+
+#define SXE_MAXFS_TFSEL         0x0001
+#define SXE_MAXFS_RFSEL         0x0002
+#define SXE_MAXFS_MFS_MASK      0xFFFF0000
+#define SXE_MAXFS_MFS           0x40000000
+#define SXE_MAXFS_MFS_SHIFT     16
+
+
+#define SXE_FPGA_SDS_CON_FULL_DUPLEX_MODE    0x00200000
+#define SXE_FPGA_SDS_CON_ANRESTART           0x00008000
+#define SXE_FPGA_SDS_CON_AN_ENABLE           0x00001000
+
+
+#define SXE_RSCDBU_RSCSMALDIS_MASK    0x0000007F
+#define SXE_RSCDBU_RSCACKDIS          0x00000080
+
+
+#define SXE_RDRXCTL_RDMTS_1_2     0x00000000  
+#define SXE_RDRXCTL_CRCSTRIP      0x00000002  
+#define SXE_RDRXCTL_PSP           0x00000004  
+#define SXE_RDRXCTL_MVMEN         0x00000020
+#define SXE_RDRXCTL_DMAIDONE      0x00000008  
+#define SXE_RDRXCTL_AGGDIS        0x00010000  
+#define SXE_RDRXCTL_RSCFRSTSIZE   0x003E0000  
+#define SXE_RDRXCTL_RSCLLIDIS     0x00800000  
+#define SXE_RDRXCTL_RSCACKC       0x02000000  
+#define SXE_RDRXCTL_FCOE_WRFIX    0x04000000  
+#define SXE_RDRXCTL_MBINTEN       0x10000000
+#define SXE_RDRXCTL_MDP_EN        0x20000000
+
+
+#define SXE_CTRL_GIO_DIS      0x00000004
+#define SXE_CTRL_LNK_RST      0x00000008
+#define SXE_CTRL_RST          0x04000000
+#define SXE_CTRL_RST_MASK     (SXE_CTRL_LNK_RST | SXE_CTRL_RST)
+
+
+#define SXE_MHADD_MFS_MASK    0xFFFF0000
+#define SXE_MHADD_MFS_SHIFT   16
+
+
+#define SXE_CTRL_EXT_PFRSTD   0x00004000
+#define SXE_CTRL_EXT_NS_DIS   0x00010000
+#define SXE_CTRL_EXT_RO_DIS   0x00020000
+#define SXE_CTRL_EXT_DRV_LOAD 0x10000000
+
+
+#define SXE_TXPBSIZE_20KB     0x00005000  
+#define SXE_TXPBSIZE_40KB     0x0000A000  
+#define SXE_RXPBSIZE_48KB     0x0000C000  
+#define SXE_RXPBSIZE_64KB     0x00010000  
+#define SXE_RXPBSIZE_80KB     0x00014000  
+#define SXE_RXPBSIZE_128KB    0x00020000  
+#define SXE_RXPBSIZE_MAX      0x00080000  
+#define SXE_TXPBSIZE_MAX      0x00028000  
+
+#define SXE_TXPKT_SIZE_MAX    0xA         
+#define SXE_MAX_PB		8
+
+
+#define SXE_HLREG0_TXCRCEN      0x00000001  
+#define SXE_HLREG0_RXCRCSTRP    0x00000002  
+#define SXE_HLREG0_JUMBOEN      0x00000004  
+#define SXE_HLREG0_TXPADEN      0x00000400  
+#define SXE_HLREG0_TXPAUSEEN    0x00001000  
+#define SXE_HLREG0_RXPAUSEEN    0x00004000  
+#define SXE_HLREG0_LPBK         0x00008000  
+#define SXE_HLREG0_MDCSPD       0x00010000  
+#define SXE_HLREG0_CONTMDC      0x00020000  
+#define SXE_HLREG0_CTRLFLTR     0x00040000  
+#define SXE_HLREG0_PREPEND      0x00F00000  
+#define SXE_HLREG0_PRIPAUSEEN   0x01000000  
+#define SXE_HLREG0_RXPAUSERECDA 0x06000000  
+#define SXE_HLREG0_RXLNGTHERREN 0x08000000  
+#define SXE_HLREG0_RXPADSTRIPEN 0x10000000  
+
+
+#define SXE_VMOLR_UPE		  0x00400000
+#define SXE_VMOLR_VPE		  0x00800000
+#define SXE_VMOLR_AUPE        0x01000000
+#define SXE_VMOLR_ROMPE       0x02000000
+#define SXE_VMOLR_ROPE        0x04000000
+#define SXE_VMOLR_BAM         0x08000000
+#define SXE_VMOLR_MPE         0x10000000
+
+
+#define SXE_RXCSUM_IPPCSE     0x00001000  
+#define SXE_RXCSUM_PCSD       0x00002000  
+
+
+#define SXE_VMD_CTL_VMDQ_EN     0x00000001
+#define SXE_VMD_CTL_VMDQ_FILTER 0x00000002
+
+
+#define	SXE_MACCFG_PAD_EN       0x00000001
+
+
+#define SXE_IRQ_CLEAR_MASK    0xFFFFFFFF
+
+
+#define SXE_STATUS_LAN_ID         0x0000000C
+#define SXE_STATUS_LAN_ID_SHIFT   2         
+#define SXE_STATUS_GIO            0x00080000
+
+
+#define SXE_LINKS_KX_AN_COMP  0x80000000
+#define SXE_LINKS_UP          0x40000000
+#define SXE_LINKS_SPEED       0x20000000
+#define SXE_LINKS_MODE        0x18000000
+#define SXE_LINKS_RX_MODE     0x06000000
+#define SXE_LINKS_TX_MODE     0x01800000
+#define SXE_LINKS_XGXS_EN     0x00400000
+#define SXE_LINKS_SGMII_EN    0x02000000
+#define SXE_LINKS_PCS_1G_EN   0x00200000
+#define SXE_LINKS_1G_AN_EN    0x00100000
+#define SXE_LINKS_KX_AN_IDLE  0x00080000
+#define SXE_LINKS_1G_SYNC     0x00040000
+#define SXE_LINKS_10G_ALIGN   0x00020000
+#define SXE_LINKS_10G_LANE_SYNC 0x00017000
+#define SXE_LINKS_TL_FAULT    0x00001000
+#define SXE_LINKS_SIGNAL      0x00000F00
+
+
+#define SXE_PCI_DEVICE_STATUS   0x7A 
+#define SXE_PCI_DEVICE_STATUS_TRANSACTION_PENDING   0x0020
+#define SXE_PCI_LINK_STATUS     0x82 
+#define SXE_PCI_DEVICE_CONTROL2 0x98 
+#define SXE_PCI_LINK_WIDTH      0x3F0
+#define SXE_PCI_LINK_WIDTH_1    0x10
+#define SXE_PCI_LINK_WIDTH_2    0x20
+#define SXE_PCI_LINK_WIDTH_4    0x40
+#define SXE_PCI_LINK_WIDTH_8    0x80
+#define SXE_PCI_LINK_SPEED      0xF
+#define SXE_PCI_LINK_SPEED_2500 0x1
+#define SXE_PCI_LINK_SPEED_5000 0x2
+#define SXE_PCI_LINK_SPEED_8000 0x3
+#define SXE_PCI_HEADER_TYPE_REGISTER  0x0E
+#define SXE_PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define SXE_PCI_DEVICE_CONTROL2_16ms  0x0005
+
+#define SXE_PCIDEVCTRL2_TIMEO_MASK	0xf
+#define SXE_PCIDEVCTRL2_16_32ms_def	0x0
+#define SXE_PCIDEVCTRL2_50_100us	0x1
+#define SXE_PCIDEVCTRL2_1_2ms		0x2
+#define SXE_PCIDEVCTRL2_16_32ms	0x5
+#define SXE_PCIDEVCTRL2_65_130ms	0x6
+#define SXE_PCIDEVCTRL2_260_520ms	0x9
+#define SXE_PCIDEVCTRL2_1_2s		0xa
+#define SXE_PCIDEVCTRL2_4_8s		0xd
+#define SXE_PCIDEVCTRL2_17_34s	0xe
+
+
+#define SXE_PCI_MASTER_DISABLE_TIMEOUT	800
+
+
+#define SXE_RAH_VIND_MASK     0x003C0000
+#define SXE_RAH_VIND_SHIFT    18
+#define SXE_RAH_AV            0x80000000
+#define SXE_CLEAR_VMDQ_ALL    0xFFFFFFFF
+
+
+#define SXE_RFCTL_ISCSI_DIS       0x00000001
+#define SXE_RFCTL_ISCSI_DWC_MASK  0x0000003E
+#define SXE_RFCTL_ISCSI_DWC_SHIFT 1
+#define SXE_RFCTL_RSC_DIS		0x00000020
+#define SXE_RFCTL_NFSW_DIS        0x00000040
+#define SXE_RFCTL_NFSR_DIS        0x00000080
+#define SXE_RFCTL_NFS_VER_MASK    0x00000300
+#define SXE_RFCTL_NFS_VER_SHIFT   8
+#define SXE_RFCTL_NFS_VER_2       0
+#define SXE_RFCTL_NFS_VER_3       1
+#define SXE_RFCTL_NFS_VER_4       2
+#define SXE_RFCTL_IPV6_DIS        0x00000400
+#define SXE_RFCTL_IPV6_XSUM_DIS   0x00000800
+#define SXE_RFCTL_IPFRSP_DIS      0x00004000
+#define SXE_RFCTL_IPV6_EX_DIS     0x00010000
+#define SXE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+
+
+#define SXE_TXDCTL_ENABLE     0x02000000   
+#define SXE_TXDCTL_SWFLSH     0x04000000   
+#define SXE_TXDCTL_WTHRESH_SHIFT      16   
+
+
+#define SXE_RXCTRL_RXEN       0x00000001 
+#define SXE_RXCTRL_DMBYPS     0x00000002 
+#define SXE_RXDCTL_ENABLE     0x02000000 
+#define SXE_RXDCTL_SWFLSH     0x04000000 
+
+
+#define SXE_RXDCTL_DESC_FIFO_AFUL_TH_MASK 0x0000001F
+#define SXE_RXDCTL_AFUL_CFG_ERR		  0x00000020
+#define SXE_RXDCTL_DESC_FIFO_AE_TH_MASK   0x00001F00
+#define SXE_RXDCTL_DESC_FIFO_AE_TH_SHIFT  8
+#define SXE_RXDCTL_PREFETCH_NUM_CFG_MASK  0x001F0000
+#define SXE_RXDCTL_PREFETCH_NUM_CFG_SHIFT 16
+
+
+#define SXE_PCI_MASTER_DISABLE_TIMEOUT	800
+
+
+#define SXE_FCTRL_SBP 0x00000002  
+#define SXE_FCTRL_MPE 0x00000100  
+#define SXE_FCTRL_UPE 0x00000200  
+#define SXE_FCTRL_BAM 0x00000400  
+#define SXE_FCTRL_PMCF 0x00001000 
+#define SXE_FCTRL_DPF 0x00002000  
+
+
+#define SXE_QDE_ENABLE	0x00000001
+#define SXE_QDE_HIDE_VLAN	0x00000002
+#define SXE_QDE_IDX_MASK	0x00007F00
+#define SXE_QDE_IDX_SHIFT	8
+#define SXE_QDE_WRITE		0x00010000
+
+#define SXE_TXD_POPTS_IXSM 0x01      
+#define SXE_TXD_POPTS_TXSM 0x02      
+#define SXE_TXD_CMD_EOP    0x01000000
+#define SXE_TXD_CMD_IFCS   0x02000000
+#define SXE_TXD_CMD_IC     0x04000000
+#define SXE_TXD_CMD_RS     0x08000000
+#define SXE_TXD_CMD_DEXT   0x20000000
+#define SXE_TXD_CMD_VLE    0x40000000
+#define SXE_TXD_STAT_DD    0x00000001
+
+
+#define SXE_SRRCTL_BSIZEPKT_SHIFT     10          
+#define SXE_SRRCTL_RDMTS_SHIFT        22
+#define SXE_SRRCTL_RDMTS_MASK         0x01C00000
+#define SXE_SRRCTL_DROP_EN            0x10000000
+#define SXE_SRRCTL_BSIZEPKT_MASK      0x0000007F
+#define SXE_SRRCTL_BSIZEHDR_MASK      0x00003F00
+#define SXE_SRRCTL_DESCTYPE_LEGACY    0x00000000
+#define SXE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define SXE_SRRCTL_DESCTYPE_HDR_SPLIT  0x04000000
+#define SXE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define SXE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define SXE_SRRCTL_DESCTYPE_MASK      0x0E000000
+
+#define SXE_RXDPS_HDRSTAT_HDRSP       0x00008000
+#define SXE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+
+#define SXE_RXDADV_RSSTYPE_MASK       0x0000000F
+#define SXE_RXDADV_PKTTYPE_MASK       0x0000FFF0
+#define SXE_RXDADV_PKTTYPE_MASK_EX    0x0001FFF0
+#define SXE_RXDADV_HDRBUFLEN_MASK     0x00007FE0
+#define SXE_RXDADV_RSCCNT_MASK        0x001E0000
+#define SXE_RXDADV_RSCCNT_SHIFT       17
+#define SXE_RXDADV_HDRBUFLEN_SHIFT    5
+#define SXE_RXDADV_SPLITHEADER_EN     0x00001000
+#define SXE_RXDADV_SPH                0x8000
+
+
+#define SXE_ADVTXD_DTYP_DATA  0x00300000        
+#define SXE_ADVTXD_DCMD_IFCS  SXE_TXD_CMD_IFCS  
+#define SXE_ADVTXD_DCMD_DEXT  SXE_TXD_CMD_DEXT  
+#define SXE_ADVTXD_PAYLEN_SHIFT    14           
+
+
+#define SXE_FLAGS_DOUBLE_RESET_REQUIRED	0x01
+
+
+#define SXE_ERR_EEPROM                        -1
+#define SXE_ERR_EEPROM_CHECKSUM               -2
+#define SXE_ERR_PHY                           -3
+#define SXE_ERR_CONFIG                        -4
+#define SXE_ERR_PARAM                         -5
+#define SXE_ERR_MAC_TYPE                      -6
+#define SXE_ERR_UNKNOWN_PHY                   -7
+#define SXE_ERR_LINK_SETUP                    -8
+#define SXE_ERR_ADAPTER_STOPPED               -9
+#define SXE_ERR_INVALID_MAC_ADDR              -10
+#define SXE_ERR_DEVICE_NOT_SUPPORTED          -11
+#define SXE_ERR_MASTER_REQUESTS_PENDING       -12
+#define SXE_ERR_INVALID_LINK_SETTINGS         -13
+#define SXE_ERR_AUTONEG_NOT_COMPLETE          -14
+#define SXE_ERR_RESET_FAILED                  -15
+#define SXE_ERR_SWFW_SYNC                     -16
+#define SXE_ERR_PHY_ADDR_INVALID              -17
+#define SXE_ERR_I2C                           -18
+#define SXE_ERR_SFP_NOT_SUPPORTED             -19
+#define SXE_ERR_SFP_NOT_PRESENT               -20
+#define SXE_ERR_SFP_NO_INIT_SEQ_PRESENT       -21
+#define SXE_ERR_NO_SAN_ADDR_PTR               -22
+#define SXE_ERR_FDIR_REINIT_FAILED            -23
+#define SXE_ERR_EEPROM_VERSION                -24
+#define SXE_ERR_NO_SPACE                      -25
+#define SXE_ERR_OVERTEMP                      -26
+#define SXE_ERR_FC_NOT_NEGOTIATED             -27
+#define SXE_ERR_FC_NOT_SUPPORTED              -28
+#define SXE_ERR_SFP_SETUP_NOT_COMPLETE        -30
+#define SXE_ERR_PBA_SECTION                   -31
+#define SXE_ERR_INVALID_ARGUMENT              -32
+#define SXE_ERR_HOST_INTERFACE_COMMAND        -33
+#define SXE_ERR_FDIR_CMD_INCOMPLETE		-38
+#define SXE_ERR_FW_RESP_INVALID		-39
+#define SXE_ERR_TOKEN_RETRY			-40
+#define SXE_NOT_IMPLEMENTED                   0x7FFFFFFF
+
+#define SXE_FUSES0_GROUP(_i)		(0x11158 + ((_i) * 4))
+#define SXE_FUSES0_300MHZ		BIT(5)
+#define SXE_FUSES0_REV_MASK		(3u << 6)
+
+#define SXE_KRM_PORT_CAR_GEN_CTRL(P)	((P) ? 0x8010 : 0x4010)
+#define SXE_KRM_LINK_S1(P)		((P) ? 0x8200 : 0x4200)
+#define SXE_KRM_LINK_CTRL_1(P)	((P) ? 0x820C : 0x420C)
+#define SXE_KRM_AN_CNTL_1(P)		((P) ? 0x822C : 0x422C)
+#define SXE_KRM_AN_CNTL_8(P)		((P) ? 0x8248 : 0x4248)
+#define SXE_KRM_SGMII_CTRL(P)		((P) ? 0x82A0 : 0x42A0)
+#define SXE_KRM_LP_BASE_PAGE_HIGH(P)	((P) ? 0x836C : 0x436C)
+#define SXE_KRM_DSP_TXFFE_STATE_4(P)	((P) ? 0x8634 : 0x4634)
+#define SXE_KRM_DSP_TXFFE_STATE_5(P)	((P) ? 0x8638 : 0x4638)
+#define SXE_KRM_RX_TRN_LINKUP_CTRL(P)	((P) ? 0x8B00 : 0x4B00)
+#define SXE_KRM_PMD_DFX_BURNIN(P)	((P) ? 0x8E00 : 0x4E00)
+#define SXE_KRM_PMD_FLX_MASK_ST20(P)	((P) ? 0x9054 : 0x5054)
+#define SXE_KRM_TX_COEFF_CTRL_1(P)	((P) ? 0x9520 : 0x5520)
+#define SXE_KRM_RX_ANA_CTL(P)		((P) ? 0x9A00 : 0x5A00)
+
+#define SXE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA		~(0x3 << 20)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR		BIT(20)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR		(0x2 << 20)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SGMII_EN		BIT(25)
+#define SXE_KRM_PMD_FLX_MASK_ST20_AN37_EN		BIT(26)
+#define SXE_KRM_PMD_FLX_MASK_ST20_AN_EN		BIT(27)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_10M		~(0x7 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_100M		BIT(28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_1G		(0x2 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_10G		(0x3 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_AN		(0x4 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G		(0x7 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK		(0x7 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART	BIT(31)
+
+#define SXE_KRM_PORT_CAR_GEN_CTRL_NELB_32B		BIT(9)
+#define SXE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS		BIT(11)
+
+#define SXE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK	(7u << 8)
+#define SXE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G	(2u << 8)
+#define SXE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G	(4u << 8)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN		BIT(12)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN	BIT(13)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ		BIT(14)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC		BIT(15)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX		BIT(16)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR		BIT(18)
+#define SXE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX		BIT(24)
+#define SXE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR		BIT(26)
+#define SXE_KRM_LINK_S1_MAC_AN_COMPLETE		BIT(28)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_ENABLE		BIT(29)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_RESTART		BIT(31)
+
+#define SXE_KRM_AN_CNTL_1_SYM_PAUSE			BIT(28)
+#define SXE_KRM_AN_CNTL_1_ASM_PAUSE			BIT(29)
+
+#define SXE_KRM_AN_CNTL_8_LINEAR			BIT(0)
+#define SXE_KRM_AN_CNTL_8_LIMITING			BIT(1)
+
+#define SXE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE		BIT(10)
+#define SXE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE		BIT(11)
+#define SXE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D	BIT(12)
+#define SXE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D		BIT(19)
+
+#define SXE_KRM_DSP_TXFFE_STATE_C0_EN			BIT(6)
+#define SXE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN		BIT(15)
+#define SXE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN		BIT(16)
+
+#define SXE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL	BIT(4)
+#define SXE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS	BIT(2)
+
+#define SXE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK	(3u << 16)
+
+#define SXE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN	BIT(1)
+#define SXE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN	BIT(2)
+#define SXE_KRM_TX_COEFF_CTRL_1_CZERO_EN		BIT(3)
+#define SXE_KRM_TX_COEFF_CTRL_1_OVRRD_EN		BIT(31)
+
+#define SXE_SB_IOSF_INDIRECT_CTRL		0x00011144
+#define SXE_SB_IOSF_INDIRECT_DATA		0x00011148
+
+#define SXE_SB_IOSF_CTRL_ADDR_SHIFT		0
+#define SXE_SB_IOSF_CTRL_ADDR_MASK		0xFF
+#define SXE_SB_IOSF_CTRL_RESP_STAT_SHIFT	18
+#define SXE_SB_IOSF_CTRL_RESP_STAT_MASK \
+				(0x3 << SXE_SB_IOSF_CTRL_RESP_STAT_SHIFT)
+#define SXE_SB_IOSF_CTRL_CMPL_ERR_SHIFT	20
+#define SXE_SB_IOSF_CTRL_CMPL_ERR_MASK \
+				(0xFF << SXE_SB_IOSF_CTRL_CMPL_ERR_SHIFT)
+#define SXE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT	28
+#define SXE_SB_IOSF_CTRL_TARGET_SELECT_MASK	0x7
+#define SXE_SB_IOSF_CTRL_BUSY_SHIFT		31
+#define SXE_SB_IOSF_CTRL_BUSY		BIT(SXE_SB_IOSF_CTRL_BUSY_SHIFT)
+#define SXE_SB_IOSF_TARGET_KR_PHY	0
+
+#define SXE_NW_MNG_IF_SEL		0x00011178
+#define SXE_NW_MNG_IF_SEL_MDIO_ACT		BIT(1)
+#define SXE_NW_MNG_IF_SEL_PHY_SPEED_10M	BIT(17)
+#define SXE_NW_MNG_IF_SEL_PHY_SPEED_100M	BIT(18)
+#define SXE_NW_MNG_IF_SEL_PHY_SPEED_1G	BIT(19)
+#define SXE_NW_MNG_IF_SEL_PHY_SPEED_2_5G	BIT(20)
+#define SXE_NW_MNG_IF_SEL_PHY_SPEED_10G	BIT(21)
+#define SXE_NW_MNG_IF_SEL_SGMII_ENABLE	BIT(25)
+#define SXE_NW_MNG_IF_SEL_INT_PHY_MODE	BIT(24)
+#define SXE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT	3
+#define SXE_NW_MNG_IF_SEL_MDIO_PHY_ADD	\
+				(0x1F << SXE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
+
+#endif 
+
diff --git a/drivers/net/sxe/include/sxe_version.h b/drivers/net/sxe/include/sxe_version.h
new file mode 100644
index 0000000000..50afd69a63
--- /dev/null
+++ b/drivers/net/sxe/include/sxe_version.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_VER_H__
+#define __SXE_VER_H__
+
+#define SXE_VERSION                "0.0.0.0"
+#define SXE_COMMIT_ID              "13cf402"
+#define SXE_BRANCH                 "feature/sagitta-1.3.0-P3-dpdk_patch_rwy"
+#define SXE_BUILD_TIME             "2024-08-24 11:02:12"
+
+
+#define SXE_DRV_NAME                   "sxe"
+#define SXEVF_DRV_NAME                 "sxevf"
+#define SXE_DRV_LICENSE                "GPL v2"
+#define SXE_DRV_COPYRIGHT              "Copyright (C), 2022, Linkdata Technology Co., Ltd."
+#define SXE_DRV_AUTHOR                 "Linkdata Technology Corporation"
+#define SXE_DRV_DESCRIPTION            "LD 1160-2X 2-port 10G SFP+ NIC"
+#define SXEVF_DRV_DESCRIPTION          "LD 1160-2X Virtual Function"
+#define SXE_DRV_CONNECTION             "Linkdata Technology 10G Network Connection"
+
+
+#define SXE_FW_NAME                     "soc"
+#define SXE_FW_ARCH                     "arm32"
+
+#ifndef PS3_CFG_RELEASE
+#define PS3_SXE_FW_BUILD_MODE             "debug"
+#else
+#define PS3_SXE_FW_BUILD_MODE             "release"
+#endif
+
+#endif
diff --git a/drivers/net/sxe/meson.build b/drivers/net/sxe/meson.build
new file mode 100644
index 0000000000..5e7b49dcf6
--- /dev/null
+++ b/drivers/net/sxe/meson.build
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (C), 2020, Wuxi Stars Micro System Technologies Co., Ltd.
+
+cflags += ['-DSXE_DPDK']
+cflags += ['-DSXE_HOST_DRIVER']
+cflags += ['-DSXE_DPDK_L4_FEATURES']
+cflags += ['-DSXE_DPDK_SRIOV']
+
+#subdir('base')
+#objs = [base_objs]
+
+deps += ['hash']
+sources = files(
+	'pf/sxe_main.c',
+	'pf/sxe_filter.c',
+	'pf/sxe_flow_ctrl.c',
+	'pf/sxe_irq.c',
+	'pf/sxe_ethdev.c',
+	'pf/sxe_offload.c',
+	'pf/sxe_queue.c',
+	'pf/sxe_rx.c',
+	'pf/sxe_tx.c',
+	'pf/sxe_stats.c',
+	'pf/sxe_pmd_hdc.c',
+	'pf/sxe_phy.c',
+	'pf/sxe_ptp.c',
+	'pf/sxe_vf.c',
+	'pf/sxe_dcb.c',
+	'vf/sxevf_main.c',
+	'vf/sxevf_filter.c',
+	'vf/sxevf_irq.c',
+	'vf/sxevf_msg.c',
+	'vf/sxevf_ethdev.c',
+	'vf/sxevf_stats.c',
+	'vf/sxevf_rx.c',
+	'vf/sxevf_tx.c',
+	'vf/sxevf_queue.c',
+	'vf/sxevf_offload.c',
+	'base/sxe_queue_common.c',
+	'base/sxe_rx_common.c',
+	'base/sxe_tx_common.c',
+	'base/sxe_offload_common.c',
+	'base/sxe_common.c',
+	'base/sxe_hw.c',
+	'base/sxevf_hw.c',
+)
+
+testpmd_sources = files('sxe_testpmd.c')
+
+includes += include_directories('base')
+includes += include_directories('pf')
+includes += include_directories('vf')
+includes += include_directories('include/sxe/')
+includes += include_directories('include/')
+
diff --git a/drivers/net/sxe/pf/rte_pmd_sxe.h b/drivers/net/sxe/pf/rte_pmd_sxe.h
new file mode 100644
index 0000000000..70d342d433
--- /dev/null
+++ b/drivers/net/sxe/pf/rte_pmd_sxe.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __PMD_SXE_H__
+#define __PMD_SXE_H__
+
+typedef uint8_t		u8;
+typedef uint16_t	u16;
+typedef uint32_t	u32;
+typedef int32_t		s32;
+
+s32 rte_pmd_sxe_tx_loopback_set(u16 port, u8 on);
+
+s32 rte_pmd_sxe_tc_bw_set(u8 port,
+				u8 tc_num, u8 *bw_weight);
+
+s32 rte_pmd_sxe_macsec_enable(u16 port, u8 en, u8 rp_en);
+
+s32 rte_pmd_sxe_macsec_disable(u16 port);
+
+s32 rte_pmd_sxe_macsec_txsc_configure(u16 port, u8 *mac);
+
+s32 rte_pmd_sxe_macsec_rxsc_configure(u16 port, u8 *mac, u16 pi);
+
+s32 rte_pmd_sxe_macsec_txsa_configure(u16 port, u8 sa_idx, u8 an,
+				 u32 pn, u8 *keys);
+
+s32 rte_pmd_sxe_macsec_rxsa_configure(u16 port, u8 sa_idx, u8 an,
+				 u32 pn, u8 *keys);
+
+#endif
+
diff --git a/drivers/net/sxe/pf/sxe.h b/drivers/net/sxe/pf/sxe.h
new file mode 100644
index 0000000000..139480e90d
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_H__
+#define __SXE_H__
+
+#include <rte_pci.h>
+#include <rte_time.h>
+
+#include "sxe_types.h"
+#include "sxe_filter.h"
+#include "sxe_irq.h"
+#include "sxe_stats.h"
+#include "sxe_phy.h"
+#include "sxe_vf.h"
+#include "sxe_dcb.h"
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
+#include "sxe_macsec.h"
+#endif
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+#include "sxe_filter_ctrl.h"
+#include "sxe_fnav.h"
+#endif
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+#include "sxe_tm.h"
+#endif
+
+struct sxe_hw;
+struct sxe_vlan_context;
+
+#define SXE_LPBK_DISABLED   0x0 
+#define SXE_LPBK_ENABLED    0x1 
+
+#define PCI_VENDOR_ID_STARS      0x1FF2
+#define SXE_DEV_ID_ASIC          0x10a1
+
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_ADDR(x) ((u8*)(x))[0],((u8*)(x))[1], \
+	           ((u8*)(x))[2],((u8*)(x))[3], \
+	           ((u8*)(x))[4],((u8*)(x))[5]
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p)  rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p)  do {} while(0)
+#endif
+
+#if 1
+#define RTE_PMD_USE_PREFETCH
+#endif
+
+#ifdef RTE_PMD_USE_PREFETCH
+#define rte_sxe_prefetch(p)   rte_prefetch0(p)
+#else
+#define rte_sxe_prefetch(p)   do {} while (0)
+#endif
+
+struct sxe_ptp_context {
+	struct rte_timecounter      systime_tc;
+	struct rte_timecounter      rx_tstamp_tc;
+	struct rte_timecounter      tx_tstamp_tc;
+	u32 tx_hwtstamp_sec;
+	u32 tx_hwtstamp_nsec;
+};
+
+struct sxe_adapter {
+	struct sxe_hw hw;
+
+	struct sxe_irq_context irq_ctxt;
+
+	struct sxe_vlan_context vlan_ctxt;
+	struct sxe_mac_filter_context mac_filter_ctxt;
+#ifdef RTE_ADAPTER_HAVE_FNAV_CONF
+	struct rte_eth_fdir_conf fnav_conf;
+#endif
+	struct sxe_ptp_context ptp_ctxt;
+	struct sxe_phy_context phy_ctxt;
+	struct sxe_virtual_context vt_ctxt; 
+
+	struct sxe_stats_info stats_info;
+	struct sxe_dcb_context dcb_ctxt;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
+	struct sxe_macsec_context macsec_ctxt;
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+	struct sxe_tm_context tm_ctxt;
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+	struct sxe_filter_context filter_ctxt;
+
+	struct sxe_fnav_context fnav_ctxt;
+#endif
+
+	bool rx_batch_alloc_allowed;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	bool rx_vec_allowed;
+#endif
+	s8 name[PCI_PRI_STR_SIZE+1]; 
+
+	u32 mtu;
+
+	bool rss_reta_updated;
+
+	rte_atomic32_t link_thread_running;
+	pthread_t link_thread_tid;
+	bool is_stopped;
+};
+
+s32 sxe_hw_reset(struct sxe_hw *hw);
+
+void sxe_hw_start(struct sxe_hw *hw);
+
+bool is_sxe_supported(struct rte_eth_dev *dev);
+
+#endif 
diff --git a/drivers/net/sxe/pf/sxe_dcb.c b/drivers/net/sxe/pf/sxe_dcb.c
new file mode 100644
index 0000000000..5217cc655f
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_dcb.c
@@ -0,0 +1,1014 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include "sxe.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe_phy.h"
+#include "sxe_errno.h"
+#include "sxe_offload.h"
+#include "sxe_ethdev.h"
+#include "sxe_compat_version.h"
+#include "rte_pmd_sxe.h"
+
+#define DCB_RX_CONFIG  1
+#define DCB_TX_CONFIG  1
+
+#define DCB_CREDIT_QUANTUM	64   
+#define MAX_CREDIT_REFILL       511  
+#define MAX_CREDIT              4095 
+
+void sxe_dcb_init(struct rte_eth_dev *dev)
+{
+	u8 i;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_dcb_config *cfg = &adapter->dcb_ctxt.config;
+	struct sxe_tc_config *tc;
+	u8 dcb_max_tc = SXE_DCB_MAX_TRAFFIC_CLASS;
+
+	memset(cfg, 0, sizeof(struct sxe_dcb_config));
+
+	cfg->num_tcs.pg_tcs = dcb_max_tc;
+	cfg->num_tcs.pfc_tcs = dcb_max_tc;
+	for (i = 0; i < dcb_max_tc; i++) {
+		tc = &cfg->tc_config[i];
+		tc->channel[DCB_PATH_TX].bwg_id = i;
+		tc->channel[DCB_PATH_TX].bwg_percent =
+				 (u8)(100/dcb_max_tc + (i & 1));
+		tc->channel[DCB_PATH_RX].bwg_id = i;
+		tc->channel[DCB_PATH_RX].bwg_percent =
+				 (u8)(100/dcb_max_tc + (i & 1));
+		tc->pfc_type = pfc_disabled;
+	}
+
+	tc = &cfg->tc_config[0];
+	tc->channel[DCB_PATH_TX].up_to_tc_bitmap = 0xFF;
+	tc->channel[DCB_PATH_RX].up_to_tc_bitmap = 0xFF;
+	for (i = 0; i < MAX_BW_GROUP; i++) {
+		cfg->bwg_link_percent[DCB_PATH_TX][i] = 100;
+		cfg->bwg_link_percent[DCB_PATH_RX][i] = 100;
+	}
+	cfg->rx_pba_config = SXE_DCB_PBA_EQUAL;
+	cfg->pfc_mode_enable = false;
+	cfg->vmdq_active = true;
+	cfg->round_robin_enable = false;
+
+	return;
+}
+
+static u8 sxe_dcb_get_tc_from_up(struct sxe_dcb_config *cfg,
+					u8 direction, u8 up)
+{
+	struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+	u8 prio_mask = BIT(up);
+	u8 tc = cfg->num_tcs.pg_tcs;
+
+	if (!tc) {
+		goto l_ret;
+	}
+
+	for (tc--; tc; tc--) {
+		if (prio_mask & tc_config[tc].channel[direction].up_to_tc_bitmap) {
+			break;
+		}
+	}
+
+l_ret:
+	LOG_DEBUG("up[%u] to tc[%u]\n", up, tc);
+	return tc;
+}
+
+static void sxe_dcb_up2tc_map_parse(struct sxe_dcb_config *cfg,
+						u8 direction, u8 *map)
+{
+	u8 up;
+
+	for (up = 0; up < MAX_USER_PRIORITY; up++) {
+		map[up] = sxe_dcb_get_tc_from_up(cfg, direction, up);
+		LOG_DEBUG("up[%u] --- up2tc_map[%u]\n", up, map[up]);
+	}
+
+	return;
+}
+
+s32 sxe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
+					struct rte_eth_pfc_conf *pfc_conf)
+{
+	s32 ret;
+	u32 rx_buf_size;
+	u32 max_high_water;
+	u8 tc_idx;
+	u8  up2tc_map[MAX_USER_PRIORITY] = { 0 };
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	struct sxe_dcb_config *dcb_config = &adapter->dcb_ctxt.config;
+
+	static const enum sxe_fc_mode fcmode[] = {
+		SXE_FC_NONE,
+		SXE_FC_RX_PAUSE,
+		SXE_FC_TX_PAUSE,
+		SXE_FC_FULL,
+	};
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_dcb_up2tc_map_parse(dcb_config, DCB_PATH_RX, up2tc_map);
+	tc_idx = up2tc_map[pfc_conf->priority];
+	rx_buf_size = sxe_hw_rx_pkt_buf_size_get(hw, tc_idx);
+	PMD_LOG_DEBUG(INIT, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+	max_high_water = (rx_buf_size -
+			RTE_ETHER_MAX_LEN) >> SXE_RX_PKT_BUF_SIZE_SHIFT;
+	if ((pfc_conf->fc.high_water > max_high_water) ||
+	    (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
+		PMD_LOG_ERR(INIT, "Invalid high/low water setup value in KB, "
+			    "high water=0x%x, low water=0x%x",
+			    pfc_conf->fc.high_water, pfc_conf->fc.low_water);
+		PMD_LOG_ERR(INIT, "High_water must <= 0x%x", max_high_water);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	sxe_hw_fc_requested_mode_set(hw, fcmode[pfc_conf->fc.mode]);
+	sxe_hw_fc_pause_time_set(hw, pfc_conf->fc.pause_time);
+	sxe_hw_fc_send_xon_set(hw, pfc_conf->fc.send_xon);
+	sxe_hw_fc_tc_low_water_mark_set(hw, tc_idx, pfc_conf->fc.low_water);
+	sxe_hw_fc_tc_high_water_mark_set(hw, tc_idx, pfc_conf->fc.high_water);
+
+	ret = sxe_pfc_enable(adapter, tc_idx);
+
+	if ((ret == 0) || (ret == SXE_ERR_FC_NOT_NEGOTIATED)) {
+		PMD_LOG_DEBUG(INIT, "pfc set end ret = %d", ret);
+		ret = 0;
+		goto l_end;
+	}
+
+	PMD_LOG_ERR(INIT, "sxe_dcb_pfc_enable = 0x%x", ret);
+	ret = -EIO;
+l_end:
+	return ret;
+}
+
+s32 sxe_get_dcb_info(struct rte_eth_dev *dev,
+			struct rte_eth_dcb_info *dcb_info)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_dcb_config *dcb_config = &adapter->dcb_ctxt.config;
+
+	struct sxe_tc_config *tc;
+	struct rte_eth_dcb_tc_queue_mapping *tc_queue;
+	u8 tcs_num;
+	u8 i, j;
+
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
+		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
+	} else {
+		dcb_info->nb_tcs = 1;
+	}
+
+	tc_queue = &dcb_info->tc_queue;
+	tcs_num = dcb_info->nb_tcs;
+
+	if (dcb_config->vmdq_active) { 
+		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
+		}
+
+		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
+			for (j = 0; j < tcs_num; j++) {
+				tc_queue->tc_rxq[0][j].base = j;
+				tc_queue->tc_rxq[0][j].nb_queue = 1;
+				tc_queue->tc_txq[0][j].base = j;
+				tc_queue->tc_txq[0][j].nb_queue = 1;
+			}
+		} else {
+			for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+				for (j = 0; j < tcs_num; j++) {
+					tc_queue->tc_rxq[i][j].base =
+						i * tcs_num + j;
+					tc_queue->tc_rxq[i][j].nb_queue = 1;
+					tc_queue->tc_txq[i][j].base =
+						i * tcs_num + j;
+					tc_queue->tc_txq[i][j].nb_queue = 1;
+				}
+			}
+		}
+	} else { 
+		struct rte_eth_dcb_rx_conf *rx_conf =
+				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
+		}
+
+		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
+			for (i = 0; i < dcb_info->nb_tcs; i++) {
+				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
+				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+			}
+
+			dcb_info->tc_queue.tc_txq[0][0].base = 0;
+			dcb_info->tc_queue.tc_txq[0][1].base = 64;
+			dcb_info->tc_queue.tc_txq[0][2].base = 96;
+			dcb_info->tc_queue.tc_txq[0][3].base = 112;
+			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
+			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+		} else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
+			for (i = 0; i < dcb_info->nb_tcs; i++) {
+				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
+				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+			}
+
+			dcb_info->tc_queue.tc_txq[0][0].base = 0;
+			dcb_info->tc_queue.tc_txq[0][1].base = 32;
+			dcb_info->tc_queue.tc_txq[0][2].base = 64;
+			dcb_info->tc_queue.tc_txq[0][3].base = 80;
+			dcb_info->tc_queue.tc_txq[0][4].base = 96;
+			dcb_info->tc_queue.tc_txq[0][5].base = 104;
+			dcb_info->tc_queue.tc_txq[0][6].base = 112;
+			dcb_info->tc_queue.tc_txq[0][7].base = 120;
+			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
+			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+			dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
+			dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
+			dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
+			dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
+		}
+	}
+
+	for (i = 0; i < dcb_info->nb_tcs; i++) {
+		tc = &dcb_config->tc_config[i];
+		dcb_info->tc_bws[i] = tc->channel[DCB_PATH_TX].bwg_percent;
+	}
+
+	return 0;
+}
+
+static void sxe_dcb_vmdq_rx_param_get(struct rte_eth_dev *dev,
+			struct sxe_dcb_config *dcb_config)
+{
+	struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+			&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+	struct sxe_tc_config *tc;
+	u8 i, j;
+
+	if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
+	} else {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
+	}
+
+	for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+		tc = &dcb_config->tc_config[j];
+		tc->channel[DCB_PATH_RX].up_to_tc_bitmap = 0;
+	}
+
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		j = vmdq_rx_conf->dcb_tc[i];
+		tc = &dcb_config->tc_config[j];
+		tc->channel[DCB_PATH_RX].up_to_tc_bitmap |=
+						(u8)(1 << i);
+	}
+
+	return;
+}
+
+void sxe_dcb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+	struct rte_eth_vmdq_dcb_conf *cfg;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	enum rte_eth_nb_pools pools_num;
+	u16 i;
+
+	PMD_INIT_FUNC_TRACE();
+	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+	pools_num = cfg->nb_queue_pools;
+
+	if (pools_num != RTE_ETH_16_POOLS && pools_num != RTE_ETH_32_POOLS) {
+		sxe_rss_disable(dev);
+		return;
+	}
+
+	sxe_hw_dcb_vmdq_mq_configure(hw, pools_num);
+
+	sxe_hw_dcb_vmdq_default_pool_configure(hw,
+						cfg->enable_default_pool,
+						cfg->default_pool);
+
+	sxe_hw_dcb_vmdq_up_2_tc_configure(hw, cfg->dcb_tc);
+
+	sxe_hw_dcb_vmdq_vlan_configure(hw, pools_num);
+
+	for (i = 0; i < cfg->nb_pool_maps; i++) {
+		sxe_hw_dcb_vmdq_pool_configure(hw,
+					i, cfg->pool_map[i].vlan_id,
+					cfg->pool_map[i].pools);
+	}
+
+	return;
+}
+
+static void sxe_dcb_rx_param_get(struct rte_eth_dev *dev,
+		struct sxe_dcb_config *dcb_config)
+{
+	struct rte_eth_dcb_rx_conf *rx_conf =
+			&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+	struct sxe_tc_config *tc;
+	u8 i, j;
+
+	dcb_config->num_tcs.pg_tcs = (u8)rx_conf->nb_tcs;
+	dcb_config->num_tcs.pfc_tcs = (u8)rx_conf->nb_tcs;
+
+	for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+		tc = &dcb_config->tc_config[j];
+		tc->channel[DCB_PATH_RX].up_to_tc_bitmap = 0;
+	}
+
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		j = rx_conf->dcb_tc[i];
+		tc = &dcb_config->tc_config[j];
+		tc->channel[DCB_PATH_RX].up_to_tc_bitmap |=
+						(u8)(1 << i);
+	}
+
+	return;
+}
+
+static void sxe_dcb_rx_hw_configure(struct rte_eth_dev *dev,
+		       struct sxe_dcb_config *dcb_config)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	PMD_INIT_FUNC_TRACE();
+	sxe_hw_dcb_rx_configure(hw, dcb_config->vmdq_active,
+				RTE_ETH_DEV_SRIOV(dev).active,
+				dcb_config->num_tcs.pg_tcs);
+	return;
+}
+
+static void sxe_dcb_vmdq_tx_param_get(struct rte_eth_dev *dev,
+			struct sxe_dcb_config *dcb_config)
+{
+	struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+			&dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+	struct sxe_tc_config *tc;
+	u8 i, j;
+
+	if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
+	} else {
+		dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
+	}
+
+	for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+		tc = &dcb_config->tc_config[j];
+		tc->channel[DCB_PATH_TX].up_to_tc_bitmap = 0;
+	}
+
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		j = vmdq_tx_conf->dcb_tc[i];
+		tc = &dcb_config->tc_config[j];
+		tc->channel[DCB_PATH_TX].up_to_tc_bitmap |=
+						(u8)(1 << i);
+	}
+
+	return;
+}
+
+static void sxe_dcb_vmdq_tx_hw_configure(struct rte_eth_dev *dev,
+			struct sxe_dcb_config *dcb_config)
+{
+	struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+			&dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_hw_pool_xmit_enable(hw, 0, (u8)vmdq_tx_conf->nb_queue_pools);
+
+	sxe_hw_dcb_tx_configure(hw, dcb_config->vmdq_active,
+				dcb_config->num_tcs.pg_tcs);
+	return;
+}
+
+static void sxe_dcb_tx_param_get(struct rte_eth_dev *dev,
+		struct sxe_dcb_config *dcb_config)
+{
+	struct rte_eth_dcb_tx_conf *tx_conf =
+			&dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+	struct sxe_tc_config *tc;
+	u8 i, j;
+
+	dcb_config->num_tcs.pg_tcs = (u8)tx_conf->nb_tcs;
+	dcb_config->num_tcs.pfc_tcs = (u8)tx_conf->nb_tcs;
+
+	for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+		tc = &dcb_config->tc_config[j];
+		tc->channel[DCB_PATH_TX].up_to_tc_bitmap = 0;
+	}
+
+	for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		j = tx_conf->dcb_tc[i];
+		tc = &dcb_config->tc_config[j];
+		tc->channel[DCB_PATH_TX].up_to_tc_bitmap |=
+						(u8)(1 << i);
+	}
+
+	return;
+}
+
+static u32 sxe_dcb_min_credit_get(u32 max_frame)
+{
+
+	return ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
+				DCB_CREDIT_QUANTUM;
+
+}
+
+static u16 sxe_dcb_cee_tc_link_percent_get(
+			struct sxe_dcb_config *cee_config,
+			u8 direction, u8 tc_index)
+{
+	u8  bw_percent;
+	u16 link_percentage;
+	struct sxe_tc_bw_alloc *tc_info;
+
+	tc_info = &cee_config->tc_config[tc_index].channel[direction];
+	link_percentage =
+		cee_config->bwg_link_percent[direction][tc_info->bwg_id];
+	bw_percent = tc_info->bwg_percent;
+
+	link_percentage = (link_percentage * bw_percent) / 100;
+
+	return link_percentage;
+}
+
+static u32 sxe_dcb_cee_min_link_percent_get(
+			struct sxe_dcb_config *cee_config, u8 direction)
+{
+	u8  tc_index;
+	u16 link_percentage;
+	u32 min_link_percent = 100;
+
+	for (tc_index = 0; tc_index < MAX_TRAFFIC_CLASS; tc_index++) {
+		link_percentage = sxe_dcb_cee_tc_link_percent_get(
+					cee_config, direction, tc_index);
+
+		if (link_percentage && link_percentage < min_link_percent) {
+			min_link_percent = link_percentage;
+		}
+	}
+
+	return min_link_percent;
+}
+
+static s32 sxe_dcb_cee_tc_credits_calculate(struct sxe_hw *hw,
+				   struct sxe_dcb_config *cee_config,
+				   u32 max_frame, u8 direction)
+{
+	s32 ret = 0;
+	struct sxe_adapter *adapter = hw->adapter;
+	struct sxe_tc_bw_alloc *tc_info;
+	u32 min_credit;
+	u32 total_credit;
+	u32 min_link_percent;
+	u32 credit_refill;
+	u32 credit_max;
+	u16 link_percentage;
+	u8  tc_index;
+
+	LOG_DEBUG_BDF("cee_config[%p] input max_frame[%u] direction[%s]\n",
+			cee_config, max_frame, direction ? "RX" : "TX");
+
+	min_credit = sxe_dcb_min_credit_get(max_frame);
+	LOG_DEBUG_BDF("cee_config[%p] max_frame[%u] got min_credit[%u]\n",
+			cee_config, max_frame, min_credit);
+
+	min_link_percent = sxe_dcb_cee_min_link_percent_get(cee_config, direction);
+	LOG_DEBUG_BDF("cee_config[%p] direction[%s] got min_link_percent[%u]\n",
+			cee_config, direction ? "RX" : "TX", min_link_percent);
+
+	total_credit = (min_credit / min_link_percent) + 1;
+	LOG_DEBUG_BDF("cee_config[%p] total_credit=%u\n", cee_config, total_credit);
+
+	for (tc_index = 0; tc_index < MAX_TRAFFIC_CLASS; tc_index++) {
+		tc_info = &cee_config->tc_config[tc_index].channel[direction];
+
+		link_percentage = sxe_dcb_cee_tc_link_percent_get(
+					cee_config, direction, tc_index);
+		LOG_DEBUG_BDF("tc[%u] bwg_percent=%u, link_percentage=%u\n",
+			tc_index, tc_info->bwg_percent, link_percentage);
+
+		if (tc_info->bwg_percent > 0 && link_percentage == 0) {
+			link_percentage = 1;
+		}
+
+		tc_info->link_percent = (u8)link_percentage;
+
+		credit_refill = min(link_percentage * total_credit,
+				    (u32)MAX_CREDIT_REFILL);
+
+		if (credit_refill < min_credit) {
+			credit_refill = min_credit;
+		}
+
+		tc_info->data_credits_refill = (u16)credit_refill;
+		LOG_DEBUG_BDF("tc[%u] credit_refill=%u\n",
+					tc_index, credit_refill);
+
+		credit_max = (link_percentage * MAX_CREDIT) / 100;
+
+		if (credit_max < min_credit) {
+			credit_max = min_credit;
+		}
+		LOG_DEBUG_BDF("tc[%u] credit_max=%u\n",
+					tc_index, credit_max);
+
+		if (direction == DCB_PATH_TX) {
+			cee_config->tc_config[tc_index].desc_credits_max =
+				(u16)credit_max;
+		}
+
+		tc_info->data_credits_max = (u16)credit_max;
+	}
+
+	return ret;
+}
+
+static void sxe_dcb_cee_refill_parse(struct sxe_dcb_config *cfg,
+					u8 direction, u16 *refill)
+{
+	u32 tc;
+	struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+
+	for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
+		refill[tc] = tc_config[tc]. \
+			channel[direction].data_credits_refill;
+		LOG_DEBUG("tc[%u] --- refill[%u]\n", tc, refill[tc]);
+	}
+
+	return;
+}
+
+static void sxe_dcb_cee_max_credits_parse(struct sxe_dcb_config *cfg,
+						u16 *max_credits)
+{
+	u32 tc;
+	struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+
+	for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
+		max_credits[tc] = tc_config[tc].desc_credits_max;
+		LOG_DEBUG("tc[%u] --- max_credits[%u]\n", tc, max_credits[tc]);
+	}
+
+	return;
+}
+
+static void sxe_dcb_cee_bwgid_parse(struct sxe_dcb_config *cfg,
+					u8 direction, u8 *bwgid)
+{
+	u32 tc;
+	struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+
+	for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
+		bwgid[tc] = tc_config[tc].channel[direction].bwg_id;
+		LOG_DEBUG("tc[%u] --- bwgid[%u]\n", tc, bwgid[tc]);
+	}
+
+	return;
+}
+
+static void sxe_dcb_cee_prio_parse(struct sxe_dcb_config *cfg,
+					u8 direction, u8 *ptype)
+{
+	u32 tc;
+	struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+
+	for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
+		ptype[tc] = tc_config[tc].channel[direction].prio_type;
+		LOG_DEBUG("tc[%u] --- ptype[%u]\n", tc, ptype[tc]);
+	}
+
+	return;
+}
+
+static void sxe_dcb_cee_pfc_parse(struct sxe_dcb_config *cfg,
+						u8 *map, u8 *pfc_en)
+{
+	u32 up;
+	struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+
+	for (*pfc_en = 0, up = 0; up < MAX_TRAFFIC_CLASS; up++) {
+		if (tc_config[map[up]].pfc_type != pfc_disabled) {
+			*pfc_en |= BIT(up);
+		}
+	}
+	LOG_DEBUG("cfg[%p] pfc_en[0x%x]\n", cfg, *pfc_en);
+
+	return;
+}
+
+static s32 sxe_dcb_tc_stats_configure(struct sxe_hw *hw,
+					struct sxe_dcb_config *dcb_config)
+{
+	s32 ret;
+	u8 tc_count = 8;
+	bool vmdq_active = false;
+
+	if (dcb_config != NULL) {
+		tc_count = dcb_config->num_tcs.pg_tcs;
+		vmdq_active = dcb_config->vmdq_active;
+	}
+
+	if (!((tc_count == 8 && vmdq_active == false) || tc_count == 4)) {
+		ret = -SXE_ERR_PARAM;
+		PMD_LOG_ERR(INIT, "dcb tc stats configure failed, "
+				"tc_num = %u, vmdq_active = %s",
+				tc_count, vmdq_active ? "on" : "off");
+		goto l_end;
+	}
+
+	sxe_hw_dcb_tc_stats_configure(hw, tc_count, vmdq_active);
+
+l_end:
+	return ret;
+}
+
+static void sxe_dcb_rx_mq_mode_configure(struct rte_eth_dev *dev,
+					struct sxe_dcb_config *dcb_config,
+					u8 *rx_configed)
+{
+	switch (dev->data->dev_conf.rxmode.mq_mode) {
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
+		dcb_config->vmdq_active = true;
+		*rx_configed = DCB_RX_CONFIG;
+
+		sxe_dcb_vmdq_rx_param_get(dev, dcb_config);
+		sxe_dcb_vmdq_rx_hw_configure(dev);
+		break;
+	case RTE_ETH_MQ_RX_DCB:
+	case RTE_ETH_MQ_RX_DCB_RSS:
+		dcb_config->vmdq_active = false;
+		*rx_configed = DCB_RX_CONFIG;
+
+		sxe_dcb_rx_param_get(dev, dcb_config);
+		sxe_dcb_rx_hw_configure(dev, dcb_config);
+		break;
+	default:
+		PMD_LOG_ERR(INIT, "Incorrect DCB RX mode configuration");
+		break;
+	}
+
+	return;
+}
+
+static void sxe_dcb_tx_mq_mode_configure(struct rte_eth_dev *dev,
+					struct sxe_dcb_config *dcb_config,
+					u8 *tx_configed)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	switch (dev->data->dev_conf.txmode.mq_mode) {
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
+		dcb_config->vmdq_active = true;
+		*tx_configed = DCB_TX_CONFIG;
+
+		sxe_dcb_vmdq_tx_param_get(dev, dcb_config);
+		sxe_dcb_vmdq_tx_hw_configure(dev, dcb_config);
+		break;
+
+	case RTE_ETH_MQ_TX_DCB:
+		dcb_config->vmdq_active = false;
+		*tx_configed = DCB_TX_CONFIG;
+
+		sxe_dcb_tx_param_get(dev, dcb_config);
+		sxe_hw_dcb_tx_configure(hw, dcb_config->vmdq_active,
+				dcb_config->num_tcs.pg_tcs);
+		break;
+	default:
+		PMD_LOG_ERR(INIT, "Incorrect DCB TX mode configuration");
+		break;
+	}
+
+	return;
+}
+
+static void sxe_dcb_bwg_percentage_alloc(struct rte_eth_dev *dev,
+				struct sxe_dcb_config *dcb_config, u8 *map)
+{
+	u8 i;
+	struct sxe_tc_config *tc;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_bw_config *bw_conf = &adapter->dcb_ctxt.bw_config;
+
+	u8 nb_tcs = dcb_config->num_tcs.pfc_tcs;
+
+	if (nb_tcs == RTE_ETH_4_TCS) {
+
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		if (map[i] >= nb_tcs) {
+			PMD_LOG_INFO(DRV, "map[up-%u] to tc[%u] not exist, "
+					"change to tc 0", i, map[i]);
+			map[i] = 0;
+		}
+	}
+
+		for (i = 0; i < nb_tcs; i++) {
+			tc = &dcb_config->tc_config[i];
+			if (bw_conf->tc_num != nb_tcs) {
+				tc->channel[DCB_PATH_TX].bwg_percent =
+					(u8)(100 / nb_tcs);
+			}
+
+			tc->channel[DCB_PATH_RX].bwg_percent =
+						(u8)(100 / nb_tcs);
+		}
+		for (; i < MAX_TRAFFIC_CLASS; i++) {
+			tc = &dcb_config->tc_config[i];
+			tc->channel[DCB_PATH_TX].bwg_percent = 0;
+			tc->channel[DCB_PATH_RX].bwg_percent = 0;
+		}
+	} else {
+		for (i = 0; i < nb_tcs; i++) {
+			tc = &dcb_config->tc_config[i];
+			if (bw_conf->tc_num != nb_tcs) {
+				tc->channel[DCB_PATH_TX].bwg_percent =
+					(u8)(100 / nb_tcs + (i & 1));
+			}
+
+			tc->channel[DCB_PATH_RX].bwg_percent =
+				(u8)(100 / nb_tcs + (i & 1));
+		}
+	}
+
+	return;
+}
+
+static void sxe_dcb_rx_pkt_buf_configure(struct sxe_hw *hw,
+						u16 rx_buffer_size, u8 tcs_num)
+{
+	u8 i;
+	u16 pbsize;
+
+	pbsize = (u16)(rx_buffer_size / tcs_num);
+
+	for (i = 0; i < tcs_num; i++) {
+		sxe_hw_rx_pkt_buf_size_set(hw, i, pbsize);
+	}
+
+	for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		sxe_hw_rx_pkt_buf_size_set(hw, i, 0);
+	}
+
+	return;
+}
+
+static void sxe_dcb_tx_pkt_buf_configure(struct sxe_hw *hw, u8 tcs_num)
+{
+	sxe_hw_tx_pkt_buf_switch(hw, false);
+
+	sxe_hw_tx_pkt_buf_size_configure(hw, tcs_num);
+	sxe_hw_tx_pkt_buf_thresh_configure(hw, tcs_num, true);
+
+	sxe_hw_tx_pkt_buf_switch(hw, true);
+	return;
+}
+
+static void sxe_dcb_rx_configure(struct rte_eth_dev *dev,
+			struct sxe_dcb_config *dcb_config, u8 *map)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u8 tsa[MAX_TRAFFIC_CLASS]     = {0};
+	u8 bwgid[MAX_TRAFFIC_CLASS]   = {0};
+	u16 refill[MAX_TRAFFIC_CLASS] = {0};
+	u16 max[MAX_TRAFFIC_CLASS]    = {0};
+
+	sxe_dcb_rx_pkt_buf_configure(hw, SXE_RX_PKT_BUF_SIZE, dcb_config->num_tcs.pg_tcs);
+
+	sxe_dcb_cee_refill_parse(dcb_config, DCB_PATH_RX, refill);
+	sxe_dcb_cee_bwgid_parse(dcb_config, DCB_PATH_RX, bwgid);
+	sxe_dcb_cee_prio_parse(dcb_config, DCB_PATH_RX, tsa);
+	sxe_dcb_cee_max_credits_parse(dcb_config, max);
+
+	sxe_hw_dcb_rx_bw_alloc_configure(hw, refill, max,
+				bwgid, tsa, map, MAX_USER_PRIORITY);
+	return;
+}
+
+static void sxe_dcb_tx_configure(struct rte_eth_dev *dev,
+			struct sxe_dcb_config *dcb_config, u8 *map)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u8 tsa[MAX_TRAFFIC_CLASS]     = {0};
+	u8 bwgid[MAX_TRAFFIC_CLASS]   = {0};
+	u16 refill[MAX_TRAFFIC_CLASS] = {0};
+	u16 max[MAX_TRAFFIC_CLASS]    = {0};
+
+	sxe_dcb_tx_pkt_buf_configure(hw, dcb_config->num_tcs.pg_tcs);
+
+	sxe_dcb_cee_refill_parse(dcb_config, DCB_PATH_TX, refill);
+	sxe_dcb_cee_max_credits_parse(dcb_config, max);
+	sxe_dcb_cee_bwgid_parse(dcb_config, DCB_PATH_TX, bwgid);
+	sxe_dcb_cee_prio_parse(dcb_config, DCB_PATH_TX, tsa);
+
+	sxe_hw_dcb_tx_desc_bw_alloc_configure(hw, refill, max, bwgid, tsa);
+	sxe_hw_dcb_tx_data_bw_alloc_configure(hw, refill, max,
+				bwgid, tsa, map, MAX_USER_PRIORITY);
+
+	return;
+}
+
+static void sxe_dcb_pfc_configure(struct sxe_hw *hw,
+					struct sxe_dcb_config *dcb_config,
+					u8 *map)
+{
+	u8 nb_tcs = dcb_config->num_tcs.pg_tcs;
+	u16 pbsize;
+	u8 i, pfc_en;
+	struct sxe_tc_config *tc;
+
+	pbsize = (u16)(SXE_RX_PKT_BUF_SIZE / nb_tcs);
+	for (i = 0; i < nb_tcs; i++) {
+		sxe_hw_fc_tc_high_water_mark_set(hw, i, (pbsize * 3) / 4);
+		sxe_hw_fc_tc_low_water_mark_set(hw, i, pbsize / 4);
+
+		tc = &dcb_config->tc_config[i];
+		tc->pfc_type = pfc_enabled_full;
+	}
+
+	sxe_dcb_cee_pfc_parse(dcb_config, map, &pfc_en);
+	if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS) {
+		pfc_en &= 0x0F;
+	}
+
+	sxe_hw_dcb_pfc_configure(hw, pfc_en, map, MAX_USER_PRIORITY);
+
+	return;
+}
+
+static void sxe_dcb_hw_configure(struct rte_eth_dev *dev,
+			struct sxe_dcb_config *dcb_config)
+{
+	u8 rx_configed = 0;
+	u8 tx_configed = 0;
+	u8 map[MAX_TRAFFIC_CLASS] = {0};
+	u32 max_frame = dev->data->mtu + SXE_ETH_DEAD_LOAD;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	sxe_dcb_rx_mq_mode_configure(dev, dcb_config, &rx_configed);
+	sxe_dcb_tx_mq_mode_configure(dev, dcb_config, &tx_configed);
+
+	sxe_dcb_up2tc_map_parse(dcb_config, DCB_PATH_RX, map);
+
+	sxe_dcb_bwg_percentage_alloc(dev, dcb_config, map);
+
+	sxe_dcb_cee_tc_credits_calculate(hw, dcb_config, max_frame, DCB_PATH_TX);
+	sxe_dcb_cee_tc_credits_calculate(hw, dcb_config, max_frame, DCB_PATH_RX);
+
+	if (rx_configed) {
+		sxe_dcb_rx_configure(dev, dcb_config, map);
+	}
+
+	if (tx_configed) {
+		sxe_dcb_tx_configure(dev, dcb_config, map);
+	}
+
+	sxe_dcb_tc_stats_configure(hw, dcb_config);
+
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
+		sxe_dcb_pfc_configure(hw, dcb_config, map);
+	}
+
+	return;
+}
+
+void sxe_dcb_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	struct sxe_dcb_config *dcb_cfg = &adapter->dcb_ctxt.config;
+	struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
+
+	PMD_INIT_FUNC_TRACE();
+
+	if ((dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB) &&
+		(dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB) &&
+		(dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)) {
+		PMD_LOG_INFO(INIT, "dcb config failed, cause mq_mode=0x%x", 
+				(u8)dev_conf->rxmode.mq_mode);
+		goto l_end;
+	}
+
+	if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES) {
+		PMD_LOG_INFO(INIT, "dcb config failed, cause nb_rx_queues=%u > %u", 
+			dev->data->nb_rx_queues, RTE_ETH_DCB_NUM_QUEUES);
+		goto l_end;
+	}
+
+	sxe_dcb_hw_configure(dev, dcb_cfg);
+
+l_end:
+	return;
+}
+
+s32 rte_pmd_sxe_tc_bw_set(u8 port,
+				u8 tc_num, u8 *bw_weight)
+{
+	struct sxe_adapter *adapter;
+	struct rte_eth_dev *dev;
+	struct sxe_dcb_config *dcb_config;
+	struct sxe_tc_config *tc;
+	struct rte_eth_conf *eth_conf;
+	struct sxe_bw_config *bw_conf;
+	u8 i;
+	u8 nb_tcs;
+	u16 sum;
+	s32 ret = 0;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	if (!is_sxe_supported(dev)) {
+		ret = -ENOTSUP;
+		goto l_end;
+	}
+
+	if (tc_num > MAX_TRAFFIC_CLASS) {
+		PMD_LOG_ERR(DRV, "TCs should be no more than %d.",
+				MAX_TRAFFIC_CLASS);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	adapter = dev->data->dev_private;
+	dcb_config = &adapter->dcb_ctxt.config;
+	bw_conf = &adapter->dcb_ctxt.bw_config;
+	eth_conf = &dev->data->dev_conf;
+
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
+		nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
+		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+			RTE_ETH_32_POOLS) {
+			nb_tcs = RTE_ETH_4_TCS;
+		} else {
+			nb_tcs = RTE_ETH_8_TCS;
+		}
+	} else {
+		nb_tcs = 1;
+	}
+
+	if (nb_tcs != tc_num) {
+		PMD_LOG_ERR(DRV,
+			    "Weight should be set for all %d enabled TCs.",
+			    nb_tcs);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	sum = 0;
+	for (i = 0; i < nb_tcs; i++) {
+		sum += bw_weight[i];
+	}
+
+	if (sum != 100) {
+		PMD_LOG_ERR(DRV,
+			    "The summary of the TC weight should be 100.");
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	for (i = 0; i < nb_tcs; i++) {
+		tc = &dcb_config->tc_config[i];
+		tc->channel[DCB_PATH_TX].bwg_percent = bw_weight[i];
+	}
+
+	for (; i < MAX_TRAFFIC_CLASS; i++) {
+		tc = &dcb_config->tc_config[i];
+		tc->channel[DCB_PATH_TX].bwg_percent = 0;
+	}
+
+	bw_conf->tc_num = nb_tcs;
+
+l_end:
+	return ret;
+}
diff --git a/drivers/net/sxe/pf/sxe_dcb.h b/drivers/net/sxe/pf/sxe_dcb.h
new file mode 100644
index 0000000000..accfc930af
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_dcb.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+ 
+#ifndef __SXE_DCB_H__
+#define __SXE_DCB_H__
+#include <stdbool.h>
+
+#define PBA_STRATEGY_EQUAL       (0)    
+#define PBA_STRATEGY_WEIGHTED    (1)	
+#define MAX_BW_GROUP             8
+#define MAX_USER_PRIORITY        8
+#define SXE_DCB_MAX_TRAFFIC_CLASS        8
+
+enum sxe_dcb_strict_prio_type {
+	DCB_PRIO_NONE = 0, 
+	DCB_PRIO_GROUP,    
+	DCB_PRIO_LINK      
+};
+enum {
+	DCB_PATH_TX   =  0,
+	DCB_PATH_RX   =  1,
+	DCB_PATH_NUM  =  DCB_PATH_RX + 1,
+};
+
+enum sxe_dcb_tsa {
+	sxe_dcb_tsa_ets = 0,
+	sxe_dcb_tsa_group_strict_cee,
+	sxe_dcb_tsa_strict
+};
+
+enum sxe_dcb_pba_config {
+	SXE_DCB_PBA_EQUAL = PBA_STRATEGY_EQUAL,
+	SXE_DCB_PBA_80_48 = PBA_STRATEGY_WEIGHTED
+};
+
+struct sxe_dcb_num_tcs {
+	u8 pg_tcs;	
+	u8 pfc_tcs;
+};
+
+struct sxe_tc_bw_alloc {
+	u8 bwg_id;		  
+	u8 bwg_percent;		  
+	u8 link_percent;	  
+	u8 up_to_tc_bitmap;	  
+	u16 data_credits_refill;  
+	u16 data_credits_max;	  
+	enum sxe_dcb_strict_prio_type prio_type; 
+};
+
+enum sxe_dcb_pfc_type {
+	pfc_disabled = 0,
+	pfc_enabled_full,
+	pfc_enabled_tx,
+	pfc_enabled_rx
+};
+
+struct sxe_tc_config {
+	struct sxe_tc_bw_alloc channel[DCB_PATH_NUM]; 
+	enum sxe_dcb_pfc_type  pfc_type; 
+
+	u16 desc_credits_max; 
+	u8 tc; 
+};
+
+struct sxe_dcb_config {
+	struct sxe_tc_config tc_config[SXE_DCB_MAX_TRAFFIC_CLASS];
+	struct sxe_dcb_num_tcs num_tcs;
+	u8 bwg_link_percent[DCB_PATH_NUM][MAX_BW_GROUP]; 
+	bool pfc_mode_enable;
+	bool round_robin_enable;
+
+	enum sxe_dcb_pba_config rx_pba_config;
+	bool vmdq_active;
+};
+
+struct sxe_bw_config {
+	u8 tc_num; 
+};
+
+struct sxe_dcb_context {
+	struct sxe_dcb_config config;
+	struct sxe_bw_config bw_config;
+};
+
+void sxe_dcb_init(struct rte_eth_dev *dev);
+
+s32 sxe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 
+					struct rte_eth_pfc_conf *pfc_conf);
+
+s32 sxe_get_dcb_info(struct rte_eth_dev *dev,
+			struct rte_eth_dcb_info *dcb_info);
+
+void sxe_dcb_configure(struct rte_eth_dev *dev);
+
+void sxe_dcb_vmdq_rx_hw_configure(struct rte_eth_dev *dev);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_ethdev.c b/drivers/net/sxe/pf/sxe_ethdev.c
new file mode 100644
index 0000000000..00c6674f75
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_ethdev.c
@@ -0,0 +1,1109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_bus_pci.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#elif defined DPDK_21_11_5
+#include <rte_bus_pci.h>
+#include <ethdev_driver.h>
+#include <rte_dev.h>
+#include <ethdev_pci.h>
+#else
+#include <bus_pci_driver.h>
+#include <ethdev_driver.h>
+#include <dev_driver.h>
+#include <ethdev_pci.h>
+#endif
+
+#include <rte_ethdev.h>
+#include <rte_pmd_sxe.h>
+#include <rte_alarm.h>
+
+#include "sxe_types.h"
+#include "sxe_logs.h"
+#include "sxe_compat_platform.h"
+#include "sxe_errno.h"
+#include "sxe.h"
+#include "sxe_hw.h"
+#include "sxe_ethdev.h"
+#include "sxe_filter.h"
+#include "sxe_rx.h"
+#include "sxe_tx.h"
+#include "sxe_offload.h"
+#include "sxe_queue.h"
+#include "sxe_irq.h"
+#include "sxe_stats.h"
+#include "sxe_phy.h"
+#include "sxe_pmd_hdc.h"
+#include "sxe_flow_ctrl.h"
+#include "sxe_ptp.h"
+#include "sxe_cli.h"
+#include "drv_msg.h"
+#include "sxe_vf.h"
+#include "sxe_dcb.h"
+#include "sxe_version.h"
+#include "sxe_compat_version.h"
+#include <rte_string_fns.h>
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+#include "sxe_tm.h"
+#endif
+
+#define SXE_DEFAULT_MTU             1500
+#define SXE_ETH_HLEN                14
+#define SXE_ETH_FCS_LEN             4
+#define SXE_ETH_FRAME_LEN           1514
+
+#define SXE_ETH_MAX_LEN  (RTE_ETHER_MTU + SXE_ETH_OVERHEAD) 
+
+STATIC const struct rte_eth_desc_lim sxe_rx_desc_lim = {
+	.nb_max = SXE_MAX_RING_DESC,
+	.nb_min = SXE_MIN_RING_DESC,
+	.nb_align = SXE_RX_DESC_RING_ALIGN,
+};
+
+STATIC const struct rte_eth_desc_lim sxe_tx_desc_lim = {
+	.nb_max = SXE_MAX_RING_DESC,
+	.nb_min = SXE_MIN_RING_DESC,
+	.nb_align = SXE_TX_DESC_RING_ALIGN,
+	.nb_seg_max = SXE_TX_MAX_SEG,
+	.nb_mtu_seg_max = SXE_TX_MAX_SEG,
+};
+
+s32 sxe_dev_reset(struct rte_eth_dev *eth_dev);
+
+STATIC s32 sxe_dev_configure(struct rte_eth_dev *dev)
+{
+	s32 ret;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Rx mode check */
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+		PMD_LOG_DEBUG(INIT, "rx offload rss hash");
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+	}
+
+	/* Multi queue mode check */
+	ret  = sxe_mq_mode_check(dev);
+	if (ret != 0) {
+		PMD_LOG_ERR(INIT, "sxe mq mode check fails with %d.",
+			    ret);
+		goto l_end;
+	}
+
+	irq->action |= SXE_IRQ_LINK_UPDATE;
+
+	/* Default use batch alloc  */
+	adapter->rx_batch_alloc_allowed = true;
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	adapter->rx_vec_allowed = true;
+#endif
+
+l_end:
+	return ret;
+}
+
+static void sxe_txrx_start(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw     *hw = &adapter->hw;
+
+	sxe_hw_rx_cap_switch_on(hw);
+
+	sxe_hw_mac_txrx_enable(hw);
+
+	return;
+}
+
+static s32 sxe_link_configure(struct rte_eth_dev *dev)
+{
+	s32 ret = 0;
+	bool link_up = false;
+	u32 conf_speeds;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	/* Disable loopback */
+	sxe_hw_loopback_switch(hw, false);
+
+	sxe_sfp_tx_laser_enable(adapter);
+
+	dev->data->dev_link.link_status = link_up;
+
+	/* Rate of obtaining user configuration */
+	ret = sxe_conf_speed_get(dev, &conf_speeds);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "invalid link setting");
+		goto l_end;
+	}
+
+	if (adapter->phy_ctxt.sfp_info.multispeed_fiber) {
+		ret = sxe_multispeed_sfp_link_configure(dev, conf_speeds, false);
+	} else {
+		ret = sxe_sfp_link_configure(dev);
+	}
+	if (ret) {
+		PMD_LOG_ERR(INIT, "link config failed, speed=%x",
+						conf_speeds);
+		ret = -EIO;
+		goto l_end;
+	}
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_loopback_pcs_init(struct sxe_adapter *adapter,
+				sxe_pcs_mode_e mode, u32 max_frame)
+{
+	s32 ret;
+	sxe_pcs_cfg_s pcs_cfg;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+
+	pcs_cfg.mode = mode;
+	pcs_cfg.mtu  = max_frame;
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_PCS_SDS_INIT,
+				(void *)&pcs_cfg, sizeof(pcs_cfg),
+				NULL, 0);
+	irq->to_pcs_init = false;
+	if (ret) {
+		LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:pcs init\n", ret);
+		goto l_end;
+	}
+
+	/* Set flow control mac address */
+	sxe_fc_mac_addr_set(adapter);
+
+	LOG_INFO_BDF("mode:%u max_frame:0x%x loopback pcs init done.\n",
+		     mode, max_frame);
+l_end:
+	return ret;
+}
+
+static s32 sxe_loopback_configure(struct sxe_adapter *adapter)
+{
+	s32 ret;
+	u32 max_frame = SXE_DEFAULT_MTU + SXE_ETH_DEAD_LOAD;
+
+	(void)sxe_sfp_tx_laser_disable(adapter);
+
+	/* Initialize sds and pcs modules */
+	ret = sxe_loopback_pcs_init(adapter, SXE_PCS_MODE_10GBASE_KR_WO, max_frame);
+	if (ret) {
+		LOG_ERROR_BDF("pcs sds init failed, mode=%d, ret=%d\n",
+					SXE_PCS_MODE_10GBASE_KR_WO, ret);
+		goto l_out;
+	}
+
+	ret = sxe_loopback_pcs_init(adapter, SXE_PCS_MODE_LPBK_PHY_TX2RX, max_frame);
+	if (ret) {
+		LOG_ERROR_BDF("pcs sds init failed, mode=%d, ret=%d\n",
+					SXE_PCS_MODE_LPBK_PHY_TX2RX, ret);
+		goto l_out;
+	}
+
+	usleep_range(10000, 20000);
+
+	LOG_DEBUG_BDF("loolback configure success max_frame:0x%x.", max_frame);
+
+l_out:
+	return ret;
+
+}
+
+static s32 sxe_dev_start(struct rte_eth_dev *dev)
+{
+	s32 ret;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
+	struct sxe_macsec_context *macsec_ctxt = &adapter->macsec_ctxt;
+#endif
+
+	ret = sxe_fw_time_sync(hw);
+
+	sxe_wait_setup_link_complete(dev, 0);
+
+	rte_intr_disable(handle);
+
+	adapter->is_stopped = false;
+
+	ret = sxe_phy_init(adapter);
+	if (ret == -SXE_ERR_SFF_NOT_SUPPORTED) {
+		PMD_LOG_ERR(INIT, "sfp is not sfp+, not supported, ret=%d\n", ret);
+		ret = -EPERM;
+		goto l_end;
+	} else if (ret) {
+		PMD_LOG_ERR(INIT, "phy init failed, ret=%d", ret);
+	}
+
+	ret = sxe_hw_reset(hw);
+	if (ret < 0) {
+		PMD_LOG_ERR(INIT, "hw init failed, ret=%d", ret);
+		goto l_end;
+	}
+
+	sxe_hw_start(hw);
+
+	sxe_mac_addr_set(dev, &dev->data->mac_addrs[0]);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	sxe_hw_pf_rst_done_set(hw);
+
+	/* Configure virtualization */
+	sxe_vt_configure(dev);
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+	if (SXE_DEV_FNAV_CONF(dev)->mode != RTE_FDIR_MODE_NONE) {
+		ret = sxe_fnav_filter_configure(dev);
+		if (ret) {
+			PMD_LOG_ERR(INIT, "fnav config fail.");
+			goto l_end;
+		}
+	}
+#endif
+
+	sxe_tx_configure(dev);
+
+	ret = sxe_rx_configure(dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "unable to initialize RX hardware");
+		goto l_error;
+	}
+
+	ret = sxe_irq_configure(dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "irq config fail.");
+		goto l_error;
+	}
+
+	sxe_vlan_filter_configure(dev);
+
+	sxe_queue_stats_map_restore(dev);
+
+	sxe_txrx_start(dev);
+
+	irq->to_pcs_init = true;
+
+	if (dev->data->dev_conf.lpbk_mode == SXE_LPBK_DISABLED) {
+		sxe_link_configure(dev);
+	} else if (dev->data->dev_conf.lpbk_mode == SXE_LPBK_ENABLED){
+	       sxe_loopback_configure(adapter);
+	} else {
+		ret = -ENOTSUP;
+		PMD_LOG_ERR(INIT, "unsupport loopback mode:%u.",
+			    dev->data->dev_conf.lpbk_mode);
+		goto l_end;
+	}
+
+	sxe_link_update(dev, false);
+
+	ret = sxe_flow_ctrl_enable(dev);
+	if (ret < 0) {
+		PMD_LOG_ERR(INIT, "enable flow ctrl err");
+		goto l_error;
+	}
+
+	sxe_dcb_configure(dev);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
+	if (macsec_ctxt->offload_en) {
+		sxe_macsec_enable(dev, macsec_ctxt);
+	}
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+	sxe_filter_restore(dev);
+#endif
+
+l_end:
+	return ret;
+
+l_error:
+	PMD_LOG_ERR(INIT, "dev start err, ret=%d", ret);
+	sxe_irq_vec_free(handle);
+	sxe_txrx_queues_clear(dev, adapter->rx_batch_alloc_allowed);
+	ret = -EIO;
+	goto l_end;
+}
+
+#ifdef DPDK_19_11_6
+static void sxe_dev_stop(struct rte_eth_dev *dev)
+#else
+static s32 sxe_dev_stop(struct rte_eth_dev *dev)
+#endif
+{
+	s32 ret = 0;
+	s32 num;
+	struct rte_eth_link link;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (adapter->is_stopped) {
+		LOG_ERROR("adapter[%p] is stopped", adapter);
+		goto l_end;
+	}
+
+	sxe_hw_all_irq_disable(hw);
+
+	sxe_sfp_tx_laser_disable(adapter);
+
+	sxe_wait_setup_link_complete(dev, 0);
+
+	ret = sxe_hw_reset(hw);
+	if (ret < 0) {
+		PMD_LOG_ERR(INIT, "hw init failed, ret=%d", ret);
+		goto l_end;
+	}
+
+	sxe_mac_addr_set(dev, &dev->data->mac_addrs[0]);
+
+	sxe_irq_disable(dev);
+
+	sxe_txrx_queues_clear(dev, adapter->rx_batch_alloc_allowed);
+
+	dev->data->scattered_rx = 0;
+	dev->data->lro = 0;
+
+	memset(&link, 0, sizeof(link));
+	rte_eth_linkstatus_set(dev, &link);
+
+	adapter->rss_reta_updated = false;
+
+	dev->data->dev_started = 0;
+	adapter->is_stopped = true;
+
+	num = rte_eal_alarm_cancel(sxe_event_irq_delayed_handler, dev);
+	if (num > 0) {
+		sxe_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+	}
+
+	LOG_DEBUG_BDF("dev stop success.");
+
+l_end:
+#ifdef DPDK_19_11_6
+	return;
+#else
+	return ret;
+#endif
+}
+
+#ifdef DPDK_19_11_6
+static void sxe_dev_close(struct rte_eth_dev *dev)
+#else
+static s32 sxe_dev_close(struct rte_eth_dev *dev)
+#endif
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret = 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		PMD_LOG_INFO(INIT, "not primary, do nothing");
+		 goto l_end;
+	}
+
+	sxe_hw_hdc_drv_status_set(hw, (u32)false);
+
+	ret = sxe_hw_reset(hw);
+	if (ret < 0) {
+		PMD_LOG_ERR(INIT, "hw init failed, ret=%d", ret);
+		goto l_end;
+	}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	sxe_hw_pf_rst_done_set(hw);
+#endif
+
+#ifdef DPDK_19_11_6
+	sxe_dev_stop(dev);
+#else
+	ret = sxe_dev_stop(dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "dev stop fail.(err:%d)", ret);
+	}
+#endif
+
+	sxe_queues_free(dev);
+
+	sxe_mac_addr_set(dev, &adapter->mac_filter_ctxt.def_mac_addr);
+	sxe_irq_uninit(dev);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	sxe_vt_uninit(dev);
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+	sxe_fnav_filter_uninit(dev);
+	sxe_fivetuple_filter_uninit(dev);
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+	sxe_tm_ctxt_uninit(dev);
+#endif
+
+l_end:
+#ifdef DPDK_19_11_6
+	return;
+#else
+	return ret;
+#endif
+}
+
+static s32 sxe_dev_infos_get(struct rte_eth_dev *dev,
+					struct rte_eth_dev_info *dev_info)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+
+	dev_info->max_rx_queues = SXE_HW_TXRX_RING_NUM_MAX;
+	dev_info->max_tx_queues = SXE_HW_TXRX_RING_NUM_MAX;
+	if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE) {
+			dev_info->max_tx_queues = SXE_HW_TX_NONE_MODE_Q_NUM;
+		}
+	}
+
+	dev_info->min_rx_bufsize = 1024;
+	dev_info->max_rx_pktlen = 15872; 
+	dev_info->max_mac_addrs = SXE_UC_ENTRY_NUM_MAX;
+	dev_info->max_hash_mac_addrs = SXE_HASH_UC_NUM_MAX;
+	dev_info->max_vfs = pci_dev->max_vfs;
+	dev_info->max_mtu =  dev_info->max_rx_pktlen - SXE_ETH_OVERHEAD;
+	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
+	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
+
+	dev_info->rx_queue_offload_capa = sxe_rx_queue_offload_capa_get(dev);
+	dev_info->rx_offload_capa = (sxe_rx_port_offload_capa_get(dev) |
+				     dev_info->rx_queue_offload_capa);
+	dev_info->tx_queue_offload_capa = sxe_tx_queue_offload_capa_get(dev);
+	dev_info->tx_offload_capa = sxe_tx_port_offload_capa_get(dev);
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_thresh = {
+			.pthresh = SXE_DEFAULT_RX_PTHRESH,
+			.hthresh = SXE_DEFAULT_RX_HTHRESH,
+			.wthresh = SXE_DEFAULT_RX_WTHRESH,
+		},
+		.rx_free_thresh = SXE_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+		.offloads = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_thresh = {
+			.pthresh = SXE_DEFAULT_TX_PTHRESH,
+			.hthresh = SXE_DEFAULT_TX_HTHRESH,
+			.wthresh = SXE_DEFAULT_TX_WTHRESH,
+		},
+		.tx_free_thresh = SXE_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = SXE_DEFAULT_TX_RSBIT_THRESH,
+		.offloads = 0,
+	};
+
+	dev_info->rx_desc_lim = sxe_rx_desc_lim;
+	dev_info->tx_desc_lim = sxe_tx_desc_lim;
+
+	dev_info->hash_key_size = SXE_HKEY_MAX_INDEX * sizeof(u32);
+	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
+	dev_info->flow_type_rss_offloads = SXE_RSS_OFFLOAD_ALL;
+
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+
+	dev_info->default_rxportconf.burst_size = 32;
+	dev_info->default_txportconf.burst_size = 32;
+	dev_info->default_rxportconf.nb_queues = 1;
+	dev_info->default_txportconf.nb_queues = 1;
+	dev_info->default_rxportconf.ring_size = 256;
+	dev_info->default_txportconf.ring_size = 256;
+
+	return 0;
+}
+
+static s32 sxe_mtu_set(struct rte_eth_dev *dev, u16 mtu)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct rte_eth_dev_info dev_info;
+	u32 frame_size = mtu + SXE_ETH_OVERHEAD;
+	struct rte_eth_dev_data *dev_data = dev->data;
+	s32 ret;
+
+	ret = sxe_dev_infos_get(dev, &dev_info);
+	if (ret != 0) {
+		PMD_LOG_ERR(INIT, "get dev info fails with ret=%d",ret);
+		goto l_end;
+	}
+
+	if (mtu < RTE_ETHER_MTU || frame_size > dev_info.max_rx_pktlen) {
+		PMD_LOG_ERR(INIT, "mtu=%u < %u or frame_size=%u > max_rx_pktlen=%u",
+			mtu, RTE_ETHER_MTU, frame_size, dev_info.max_rx_pktlen);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	if (dev_data->dev_started && !dev_data->scattered_rx &&
+		(frame_size + 2 * SXE_VLAN_TAG_SIZE >
+		dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+		PMD_LOG_ERR(INIT, "stop port first.");
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+	if (frame_size > SXE_ETH_MAX_LEN) {
+		dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_JUMBO_FRAME;
+	} else {
+		dev->data->dev_conf.rxmode.offloads &=
+			~DEV_RX_OFFLOAD_JUMBO_FRAME;
+	}
+
+	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+#endif
+	adapter->mtu = mtu;
+	PMD_LOG_NOTICE(DRV, "mtu set success, take effect after port-restart.");
+
+l_end:
+	return ret;
+}
+
+static int sxe_get_regs(struct rte_eth_dev *dev,
+	      struct rte_dev_reg_info *regs)
+{
+	s32 ret = 0;
+	u32 *data = regs->data;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 length = sxe_hw_all_regs_group_num_get();
+
+	if (data == NULL) {
+		regs->length = length;
+		regs->width = sizeof(uint32_t);
+		goto l_end;
+	}
+
+	if ((regs->length == 0) || (regs->length == length)) {
+		sxe_hw_all_regs_group_read(hw, data);
+
+		goto l_end;
+	}
+
+	ret = -ENOTSUP;
+	LOG_ERROR("get regs: inval param: regs_len=%u, regs->data=%p, "
+			"regs_offset=%u,  regs_width=%u, regs_version=%u",
+			regs->length, regs->data,
+			regs->offset, regs->width,
+			regs->version);
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_led_reset(struct rte_eth_dev *dev)
+{
+	s32 ret;
+	s32 resp;
+	struct sxe_led_ctrl ctrl;
+	struct sxe_adapter *adapter = (struct sxe_adapter *)(dev->data->dev_private);
+	struct sxe_hw *hw = &adapter->hw;
+
+	ctrl.mode = SXE_IDENTIFY_LED_RESET; 
+	ctrl.duration = 0;
+
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_LED_CTRL,
+				(void *)&ctrl, sizeof(ctrl),
+				(void *)&resp, sizeof(resp));
+	if (ret) {
+		LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:led reset", ret);
+		ret = -EIO;
+	} else {
+		LOG_DEBUG_BDF("led reset sucess");
+	}
+
+	return ret;
+}
+
+static s32 sxe_led_ctrl(struct sxe_adapter *adapter, bool is_on)
+{
+	s32 ret;
+	s32 resp;
+	struct sxe_led_ctrl ctrl;
+	struct sxe_hw *hw = &adapter->hw;
+
+	ctrl.mode = (true == is_on) ? SXE_IDENTIFY_LED_ON : \
+					SXE_IDENTIFY_LED_OFF;
+	ctrl.duration = 0;
+
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_LED_CTRL,
+				(void *)&ctrl, sizeof(ctrl),
+				(void *)&resp, sizeof(resp));
+	if (ret) {
+		LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:led ctrl\n", ret);
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static int sxe_led_on(struct rte_eth_dev *dev)
+{
+	int ret;
+
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	ret = sxe_led_ctrl(adapter, true);
+
+	return ret;
+}
+
+static int sxe_led_off(struct rte_eth_dev *dev)
+{
+	int ret;
+
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	ret = sxe_led_ctrl(adapter, false);
+
+	return ret;
+}
+
+static int sxe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
+						size_t fw_size)
+{
+	int ret;
+	sxe_version_resp_s resp;
+	struct sxe_adapter *adapter = (struct sxe_adapter *)(dev->data->dev_private);
+	struct sxe_hw *hw = &adapter->hw;
+
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_FW_VER_GET,
+				NULL, 0,
+				(void *)&resp, sizeof(resp));
+	if (ret) {
+		LOG_ERROR_BDF("get version failed, ret=%d\n", ret);
+		ret = -EIO;
+		goto l_end;
+	}
+
+	ret = snprintf(fw_version, fw_size, "%s", resp.fw_version);
+	if (ret < 0) {
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	ret += 1; 
+
+	if (fw_size >= (size_t)ret) {
+		ret = 0;
+	}
+
+l_end:
+	return ret;
+}
+
+static const struct eth_dev_ops sxe_eth_dev_ops = {
+	.dev_configure		= sxe_dev_configure,
+	.dev_start		= sxe_dev_start,
+	.dev_stop		= sxe_dev_stop,
+	.dev_close		= sxe_dev_close,
+	.dev_reset		= sxe_dev_reset,
+
+	.rx_queue_start		= sxe_rx_queue_start,
+	.rx_queue_stop		= sxe_rx_queue_stop,
+	.rx_queue_setup		= sxe_rx_queue_setup,
+	.rx_queue_release	= sxe_rx_queue_release,
+	.rxq_info_get		= sxe_rx_queue_info_get,
+	.dev_infos_get		= sxe_dev_infos_get,
+
+	.tx_queue_start		= sxe_tx_queue_start,
+	.tx_queue_stop		= sxe_tx_queue_stop,
+	.tx_queue_setup		= sxe_tx_queue_setup,
+	.tx_queue_release	= sxe_tx_queue_release,
+	.tx_done_cleanup	= sxe_tx_done_cleanup,
+	.txq_info_get		= sxe_tx_queue_info_get,
+
+	.promiscuous_enable	= sxe_promiscuous_enable,
+	.promiscuous_disable	= sxe_promiscuous_disable,
+	.allmulticast_enable	= sxe_allmulticast_enable,
+	.allmulticast_disable	= sxe_allmulticast_disable,
+
+	.rx_queue_intr_enable	= sxe_rx_queue_intr_enable,
+	.rx_queue_intr_disable	= sxe_rx_queue_intr_disable,
+
+	.mtu_set		= sxe_mtu_set,
+	.reta_update		= sxe_rss_reta_update,
+	.reta_query		= sxe_rss_reta_query,
+	.rss_hash_update	= sxe_rss_hash_update,
+	.rss_hash_conf_get	= sxe_rss_hash_conf_get,
+
+	.mac_addr_add		= sxe_mac_addr_add,
+	.mac_addr_remove	= sxe_mac_addr_remove,
+	.mac_addr_set		= sxe_mac_addr_set,
+
+	.uc_hash_table_set	= sxe_uc_hash_table_set,
+	.uc_all_hash_table_set	= sxe_uc_all_hash_table_set,
+
+	.set_mc_addr_list	= sxe_set_mc_addr_list,
+
+	.stats_get		= sxe_eth_stats_get,
+	.stats_reset		= sxe_stats_reset,
+
+	.xstats_get		= sxe_xstats_get,
+	.xstats_reset		= sxe_xstats_reset,
+	.xstats_get_by_id	= sxe_xstats_get_by_id,
+	.xstats_get_names	= sxe_xstats_names_get,
+	.xstats_get_names_by_id	= sxe_xstats_names_get_by_id,
+	.queue_stats_mapping_set = sxe_queue_stats_mapping_set,
+
+	.get_module_info	= sxe_get_module_info,
+	.get_module_eeprom	= sxe_get_module_eeprom,
+
+	.flow_ctrl_get		= sxe_flow_ctrl_get,
+	.flow_ctrl_set		= sxe_flow_ctrl_set,
+	.priority_flow_ctrl_set = sxe_priority_flow_ctrl_set,
+
+	.timesync_enable	= sxe_timesync_enable,
+	.timesync_disable	= sxe_timesync_disable,
+	.timesync_read_rx_timestamp = sxe_timesync_read_rx_timestamp,
+	.timesync_read_tx_timestamp = sxe_timesync_read_tx_timestamp,
+	.timesync_adjust_time	= sxe_timesync_adjust_time,
+	.timesync_read_time	= sxe_timesync_read_time,
+	.timesync_write_time	= sxe_timesync_write_time,
+
+	.vlan_filter_set      = sxe_vlan_filter_set,
+	.vlan_tpid_set        = sxe_vlan_tpid_set,
+	.vlan_offload_set     = sxe_vlan_offload_set,
+	.vlan_strip_queue_set = sxe_vlan_strip_queue_set,
+
+	.get_reg		= sxe_get_regs,
+
+	.dev_set_link_up	= sxe_dev_set_link_up,
+	.dev_set_link_down	= sxe_dev_set_link_down,
+	.dev_led_on		= sxe_led_on,
+	.dev_led_off		= sxe_led_off,
+	.link_update		= sxe_link_update,
+
+	.dev_supported_ptypes_get = sxe_dev_supported_ptypes_get,
+
+	.get_dcb_info		= sxe_get_dcb_info,
+
+	.set_queue_rate_limit	= sxe_queue_rate_limit_set,
+	.fw_version_get		= sxe_fw_version_get,
+
+#ifdef ETH_DEV_MIRROR_RULE
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	.mirror_rule_set        = sxe_mirror_rule_set,
+	.mirror_rule_reset      = sxe_mirror_rule_reset,
+#endif
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+#ifdef ETH_DEV_OPS_FILTER_CTRL
+	.filter_ctrl		= sxe_filter_ctrl,
+#else
+	.flow_ops_get		= sxe_flow_ops_get,
+#endif
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+	.tm_ops_get		= sxe_tm_ops_get,
+#endif
+
+#ifdef ETH_DEV_OPS_MONITOR
+	.get_monitor_addr	= sxe_monitor_addr_get,
+#endif
+#ifdef ETH_DEV_OPS_HAS_DESC_RELATE
+	.rx_queue_count	   = sxe_rx_queue_count,
+	.rx_descriptor_status = sxe_rx_descriptor_status,
+	.tx_descriptor_status = sxe_tx_descriptor_status,
+#ifdef ETH_DEV_RX_DESC_DONE
+	.rx_descriptor_done   = sxe_rx_descriptor_done,
+#endif
+#endif
+};
+
+static s32 sxe_hw_base_init(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret;
+
+	hw->reg_base_addr = (void *)pci_dev->mem_resource[0].addr;
+	PMD_LOG_INFO(INIT, "eth_dev[%u] got reg_base_addr=%p",
+			eth_dev->data->port_id, hw->reg_base_addr);
+	hw->adapter = adapter;
+
+	strlcpy(adapter->name, pci_dev->device.name, sizeof(adapter->name) -1);
+
+	sxe_hw_hdc_drv_status_set(hw, (u32)true);
+
+	ret = sxe_phy_init(adapter);
+	if (ret == -SXE_ERR_SFF_NOT_SUPPORTED) {
+		PMD_LOG_ERR(INIT, "sfp is not sfp+, not supported, ret=%d\n", ret);
+		ret = -EPERM;
+		goto l_out;
+	} else if (ret) {
+		PMD_LOG_ERR(INIT, "phy init failed, ret=%d\n", ret);
+	}
+
+	ret = sxe_hw_reset(hw);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "hw init failed, ret=%d", ret);
+		goto l_out;
+	} else {
+		sxe_hw_start(hw);
+	}
+
+	ret = sxe_mac_addr_init(eth_dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "mac addr init fail, ret=%d", ret);
+		goto l_out;
+	}
+
+	sxe_hw_fc_base_init(hw);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	sxe_hw_pf_rst_done_set(hw);
+#endif
+
+l_out:
+	if (ret) {
+		sxe_hw_hdc_drv_status_set(hw, (u32)false);
+	}
+	return ret;
+}
+
+void sxe_secondary_proc_init(struct rte_eth_dev *eth_dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
+{
+	__sxe_secondary_proc_init(eth_dev, rx_batch_alloc_allowed, rx_vec_allowed);
+
+	return;
+}
+
+STATIC void sxe_ethdev_mac_mem_free(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+
+	if (eth_dev->data->mac_addrs) {
+		rte_free(eth_dev->data->mac_addrs);
+		eth_dev->data->mac_addrs = NULL;
+	}
+
+	if (eth_dev->data->hash_mac_addrs) {
+		rte_free(eth_dev->data->hash_mac_addrs);
+		eth_dev->data->hash_mac_addrs = NULL;
+	}
+
+	if (adapter->mac_filter_ctxt.uc_addr_table) {
+		rte_free(adapter->mac_filter_ctxt.uc_addr_table);
+		adapter->mac_filter_ctxt.uc_addr_table = NULL;
+	}
+
+	return;
+}
+
+#ifdef  DPDK_19_11_6
+static void sxe_pf_init(struct sxe_adapter *adapter)
+{
+	memset(&adapter->vlan_ctxt, 0, sizeof(adapter->vlan_ctxt));
+	memset(&adapter->mac_filter_ctxt.uta_hash_table, 0, \
+		sizeof(adapter->mac_filter_ctxt.uta_hash_table));
+	memset(&adapter->dcb_ctxt.config, 0, sizeof(adapter->dcb_ctxt.config));
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+	memset(&adapter->filter_ctxt, 0, sizeof(adapter->filter_ctxt));
+#endif
+
+	return;
+}
+#endif
+
+s32 sxe_ethdev_init(struct rte_eth_dev *eth_dev, void *param __rte_unused)
+{
+	s32 ret = 0;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+	struct sxe_filter_context *filter_info = &adapter->filter_ctxt;
+#endif
+
+	eth_dev->dev_ops = &sxe_eth_dev_ops;
+
+#ifndef ETH_DEV_OPS_HAS_DESC_RELATE
+	eth_dev->rx_queue_count       = sxe_rx_queue_count;
+	eth_dev->rx_descriptor_status = sxe_rx_descriptor_status;
+	eth_dev->tx_descriptor_status = sxe_tx_descriptor_status;
+#ifdef ETH_DEV_RX_DESC_DONE
+	eth_dev->rx_descriptor_done   = sxe_rx_descriptor_done;
+#endif
+#endif
+
+	eth_dev->rx_pkt_burst		  = &sxe_pkts_recv;
+	eth_dev->tx_pkt_burst = &sxe_pkts_xmit_with_offload;
+	eth_dev->tx_pkt_prepare = &sxe_prep_pkts;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+		sxe_secondary_proc_init(eth_dev, adapter->rx_batch_alloc_allowed, &adapter->rx_vec_allowed);
+#else
+		bool rx_vec_allowed = 0;
+		sxe_secondary_proc_init(eth_dev, adapter->rx_batch_alloc_allowed, &rx_vec_allowed);
+#endif
+		goto l_out;
+	}
+
+	rte_atomic32_clear(&adapter->link_thread_running);
+	rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+#ifdef  DPDK_19_11_6
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+	sxe_pf_init(adapter);
+#endif
+	ret = sxe_hw_base_init(eth_dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "hw base init fail.(err:%d)", ret);
+		goto l_out;
+	}
+
+	sxe_led_reset(eth_dev);
+
+	sxe_dcb_init(eth_dev);
+
+	/* Reset stats info */
+	sxe_stats_reset(eth_dev);
+
+	sxe_queue_stats_map_reset(eth_dev);
+
+
+#ifdef SET_AUTOFILL_QUEUE_XSTATS
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+#endif
+
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	sxe_vt_init(eth_dev);
+#endif
+	adapter->mtu = RTE_ETHER_MTU;
+
+	sxe_irq_init(eth_dev);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+	memset(filter_info, 0, sizeof(struct sxe_filter_context));
+	TAILQ_INIT(&filter_info->fivetuple_list);
+	ret = sxe_fnav_filter_init(eth_dev);
+	if (ret) {
+		sxe_ethdev_mac_mem_free(eth_dev);
+		sxe_irq_uninit(eth_dev);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+		sxe_vt_uninit(eth_dev);
+#endif
+		goto l_out;
+	}
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+	sxe_tm_ctxt_init(eth_dev);
+#endif
+
+	PMD_LOG_INFO(INIT, "sxe eth dev init done.");
+
+l_out:
+	return ret;
+}
+
+s32 sxe_ethdev_uninit(struct rte_eth_dev *eth_dev)
+{
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		PMD_LOG_INFO(INIT, "not primary process ,do nothing");
+		goto l_end;
+	}
+
+	sxe_dev_close(eth_dev);
+
+	sxe_ethdev_mac_mem_free(eth_dev);
+
+l_end:
+	return 0;
+}
+
+s32 sxe_dev_reset(struct rte_eth_dev *eth_dev)
+{
+	s32 ret;
+
+	if (eth_dev->data->sriov.active) {
+		ret = -ENOTSUP;
+		PMD_LOG_ERR(INIT, "sriov actived, not support reset pf port[%u]",
+			eth_dev->data->port_id);
+		goto l_end;
+	}
+
+	ret = sxe_ethdev_uninit(eth_dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "port[%u] dev uninit failed",
+			eth_dev->data->port_id);
+		goto l_end;
+	}
+
+	ret = sxe_ethdev_init(eth_dev, NULL);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "port[%u] dev init failed",
+			eth_dev->data->port_id);
+	}
+
+l_end:
+	return ret;
+}
+
+s32 rte_pmd_sxe_tx_loopback_set(u16 port, u8 on)
+{
+	struct rte_eth_dev *dev;
+	struct sxe_adapter *adapter;
+	s32 ret = 0;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_sxe_supported(dev)) {
+		ret = -ENOTSUP;
+		PMD_LOG_ERR(DRV, "port:%u not support tx loopback set.", port);
+		goto l_out;
+	}
+
+	if (on > 1) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "port:%u invalid user configure value:%u.",
+		            port, on);
+		goto l_out;
+	}
+
+	adapter = dev->data->dev_private;
+
+	sxe_hw_vt_pool_loopback_switch(&adapter->hw, on);
+
+	PMD_LOG_ERR(DRV, "port:%u set tx loopback:%u success.", port, on);
+
+l_out:
+	return ret;
+
+}
+
diff --git a/drivers/net/sxe/pf/sxe_ethdev.h b/drivers/net/sxe/pf/sxe_ethdev.h
new file mode 100644
index 0000000000..f1165e0413
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_ethdev.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_ETHDEV_H__
+#define __SXE_ETHDEV_H__
+
+#include "sxe.h"
+
+#define SXE_MMW_SIZE_DEFAULT        0x4
+#define SXE_MMW_SIZE_JUMBO_FRAME    0x14
+#define SXE_MAX_JUMBO_FRAME_SIZE    0x2600 
+
+#define SXE_ETH_MAX_LEN  (RTE_ETHER_MTU + SXE_ETH_OVERHEAD) 
+
+#define SXE_HKEY_MAX_INDEX 10
+#define SXE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
+#define SXE_ETH_DEAD_LOAD (SXE_ETH_OVERHEAD + 2 * SXE_VLAN_TAG_SIZE)
+
+struct sxe_adapter;
+s32 sxe_ethdev_init(struct rte_eth_dev *eth_dev, void *param __rte_unused);
+
+s32 sxe_ethdev_uninit(struct rte_eth_dev *eth_dev);
+
+void sxe_secondary_proc_init(struct rte_eth_dev *eth_dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_filter.c b/drivers/net/sxe/pf/sxe_filter.c
new file mode 100644
index 0000000000..e323af94f8
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_filter.c
@@ -0,0 +1,826 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_bus_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <rte_bus_pci.h>
+#else
+#include <ethdev_driver.h>
+#include <bus_pci_driver.h>
+#endif
+
+#include <rte_malloc.h>
+#include <rte_ethdev.h>
+
+#include "sxe_filter.h"
+#include "sxe_logs.h"
+#include "sxe.h"
+#include "sxe_queue.h"
+#include "drv_msg.h"
+#include "sxe_pmd_hdc.h"
+#include "sxe_cli.h"
+#include "sxe_compat_version.h"
+
+#define PF_POOL_INDEX(p)        (p)
+
+#define SXE_STRIP_BITMAP_SET(h, q) \
+	do { \
+		u32 idx = (q) / (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		u32 bit = (q) % (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		(h)->strip_bitmap[idx] |= 1 << bit;\
+	} while (0)
+
+#define SXE_STRIP_BITMAP_CLEAR(h, q) \
+	do {\
+		u32 idx = (q) / (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		u32 bit = (q) % (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		(h)->strip_bitmap[idx] &= ~(1 << bit);\
+	} while (0)
+
+#define SXE_STRIP_BITMAP_GET(h, q, r) \
+	do {\
+		u32 idx = (q) / (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		u32 bit = (q) % (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		(r) = (h)->strip_bitmap[idx] >> bit & 1;\
+	} while (0)
+
+static s32 sxe_get_mac_addr_from_fw(struct sxe_adapter *adapter,
+						u8 *mac_addr)
+{
+	s32 ret;
+	struct sxe_default_mac_addr_resp mac;
+	struct sxe_hw *hw = &adapter->hw;
+
+	/* Get default mac address from firmware */
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_R0_MAC_GET, NULL, 0,
+				(void *)&mac, sizeof(mac));
+	if (ret) {
+		LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:mac addr get\n", ret);
+		ret = -EIO;
+	} else {
+		memcpy(mac_addr, mac.addr, SXE_MAC_ADDR_LEN);
+	}
+
+	return ret;
+}
+
+static void sxe_default_mac_addr_get(struct sxe_adapter *adapter)
+{
+	s32 ret;
+	struct rte_ether_addr mac_addr = { {0} };
+
+	ret = sxe_get_mac_addr_from_fw(adapter, mac_addr.addr_bytes);
+	if (ret || !rte_is_valid_assigned_ether_addr(&mac_addr)) {
+		LOG_DEBUG("invalid default mac addr:"MAC_FMT" result:%d\n",
+			      MAC_ADDR(mac_addr.addr_bytes), ret);
+		goto l_out;
+	}
+
+	LOG_DEBUG("default mac addr = "MAC_FMT"\n", MAC_ADDR(mac_addr.addr_bytes));
+	rte_ether_addr_copy(&mac_addr, &adapter->mac_filter_ctxt.def_mac_addr);
+	rte_ether_addr_copy(&mac_addr, &adapter->mac_filter_ctxt.fc_mac_addr);
+
+l_out:
+	return;
+}
+
+static u8 sxe_sw_uc_entry_add(struct sxe_adapter *adapter, u8 index,
+				u8 *mac_addr)
+{
+	u8 i;
+	struct sxe_uc_addr_table *uc_table = adapter->mac_filter_ctxt.uc_addr_table;
+
+	for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
+		if (!uc_table[i].used) {
+			uc_table[i].used = true;
+			uc_table[i].rar_idx = i;
+			uc_table[i].original_index = index;
+			uc_table[i].type = SXE_PF;
+			rte_memcpy(uc_table[i].addr, mac_addr, SXE_MAC_ADDR_LEN);
+			break;
+		}
+	}
+
+	return i;
+}
+
+static u8 sxe_sw_uc_entry_del(struct sxe_adapter *adapter, u8 index)
+{
+	u8 i;
+	struct sxe_uc_addr_table *uc_table = adapter->mac_filter_ctxt.uc_addr_table;
+
+	for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
+		if (!uc_table[i].used || (uc_table[i].type != SXE_PF)) {
+			continue;
+		}
+
+		if (uc_table[i].original_index == index) {
+			uc_table[i].used = false;
+			break;
+		}
+	}
+
+	return i;
+}
+
+u8 sxe_sw_uc_entry_vf_add(struct sxe_adapter *adapter,
+				u8 vf_idx, u8 *mac_addr, bool macvlan)
+{
+	u8 i;
+	struct sxe_uc_addr_table *uc_table = adapter->mac_filter_ctxt.uc_addr_table;
+
+	for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
+		if (!uc_table[i].used) {
+			uc_table[i].used = true;
+			uc_table[i].rar_idx = i;
+			uc_table[i].vf_idx = vf_idx;
+			uc_table[i].type = macvlan ? SXE_VF_MACVLAN : SXE_VF;
+			rte_memcpy(uc_table[i].addr, mac_addr, SXE_MAC_ADDR_LEN);
+			break;
+		}
+	}
+
+	return i;
+}
+
+s32 sxe_sw_uc_entry_vf_del(struct sxe_adapter *adapter, u8 vf_idx,
+					bool macvlan)
+{
+	u8 i;
+	struct sxe_uc_addr_table *uc_table = adapter->mac_filter_ctxt.uc_addr_table;
+
+	for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
+		if (!uc_table[i].used || (uc_table[i].type == SXE_PF)) {
+			continue;
+		}
+
+		if (uc_table[i].vf_idx == vf_idx) {
+			uc_table[i].used = false;
+			sxe_hw_uc_addr_del(&adapter->hw, i);
+			if (!macvlan) {
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+s32 sxe_mac_addr_init(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret = 0;
+	u8 rar_idx;
+
+	eth_dev->data->mac_addrs = rte_zmalloc("sxe",
+				RTE_ETHER_ADDR_LEN * SXE_UC_ENTRY_NUM_MAX, 0);
+	if (eth_dev->data->mac_addrs == NULL) {
+		LOG_ERROR("mac addr allocate %u B fail.",
+			RTE_ETHER_ADDR_LEN * SXE_UC_ENTRY_NUM_MAX);
+		ret = -ENOMEM;
+		goto l_out;
+	}
+
+	eth_dev->data->hash_mac_addrs = rte_zmalloc("sxe",
+				RTE_ETHER_ADDR_LEN * SXE_UTA_ENTRY_NUM_MAX, 0);
+	if (eth_dev->data->hash_mac_addrs == NULL) {
+		LOG_ERROR("uta table allocate %u B fail.",
+			RTE_ETHER_ADDR_LEN * SXE_UTA_ENTRY_NUM_MAX);
+		ret = -ENOMEM;
+		goto l_free_mac_addr;
+	}
+
+	adapter->mac_filter_ctxt.uc_addr_table = rte_zmalloc("sxe",
+		sizeof(struct sxe_uc_addr_table) * SXE_UC_ENTRY_NUM_MAX, 0);
+	if (adapter->mac_filter_ctxt.uc_addr_table == NULL) {
+		LOG_ERROR("uc table allocate %lu B fail.",
+			sizeof(struct sxe_uc_addr_table) * SXE_UC_ENTRY_NUM_MAX);
+		ret = -ENOMEM;
+		goto l_free_hash_mac;
+	}
+
+	sxe_default_mac_addr_get(adapter);
+
+	rte_ether_addr_copy(&adapter->mac_filter_ctxt.def_mac_addr,
+				eth_dev->data->mac_addrs);
+
+	rte_ether_addr_copy(&adapter->mac_filter_ctxt.def_mac_addr,
+				&adapter->mac_filter_ctxt.cur_mac_addr);
+
+	rar_idx = sxe_sw_uc_entry_add(adapter, 0, adapter->mac_filter_ctxt.def_mac_addr.addr_bytes);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	sxe_hw_uc_addr_add(hw, rar_idx,
+			adapter->mac_filter_ctxt.def_mac_addr.addr_bytes,
+			sxe_vf_num_get(eth_dev));
+#else
+	sxe_hw_uc_addr_add(hw, rar_idx,
+		adapter->mac_filter_ctxt.def_mac_addr.addr_bytes,
+		0);
+#endif
+
+l_out:
+	return ret;
+
+l_free_hash_mac:
+	rte_free(eth_dev->data->hash_mac_addrs);
+	eth_dev->data->hash_mac_addrs = NULL;
+
+l_free_mac_addr:
+	rte_free(eth_dev->data->mac_addrs);
+	eth_dev->data->mac_addrs = NULL;
+	goto l_out;
+}
+
+s32 sxe_promiscuous_enable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 flt_ctrl;
+
+	flt_ctrl = sxe_hw_rx_mode_get(hw);
+	PMD_LOG_DEBUG(DRV,"read flt_ctrl=0x%x\n", flt_ctrl);
+
+	flt_ctrl |= (SXE_FCTRL_UPE | SXE_FCTRL_MPE);
+
+	PMD_LOG_DEBUG(DRV,"write flt_ctrl=0x%x\n", flt_ctrl);
+	sxe_hw_rx_mode_set(hw, flt_ctrl);
+
+	return 0;
+}
+
+s32 sxe_promiscuous_disable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 flt_ctrl;
+
+	flt_ctrl = sxe_hw_rx_mode_get(hw);
+	PMD_LOG_DEBUG(DRV,"read flt_ctrl=0x%x\n", flt_ctrl);
+
+	flt_ctrl &= (~SXE_FCTRL_UPE);
+	if (dev->data->all_multicast == 1) {
+		flt_ctrl |= SXE_FCTRL_MPE;
+	} else {
+		flt_ctrl &= (~SXE_FCTRL_MPE);
+	}
+
+	PMD_LOG_DEBUG(DRV,"write flt_ctrl=0x%x\n", flt_ctrl);
+	sxe_hw_rx_mode_set(hw, flt_ctrl);
+
+	return 0;
+}
+
+s32 sxe_allmulticast_enable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 flt_ctrl;
+
+	flt_ctrl = sxe_hw_rx_mode_get(hw);
+	PMD_LOG_DEBUG(DRV,"read flt_ctrl=0x%x\n", flt_ctrl);
+
+	flt_ctrl |= SXE_FCTRL_MPE;
+
+	PMD_LOG_DEBUG(DRV,"write flt_ctrl=0x%x\n", flt_ctrl);
+	sxe_hw_rx_mode_set(hw, flt_ctrl);
+
+	return 0;
+}
+
+s32 sxe_allmulticast_disable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 flt_ctrl;
+
+	if (dev->data->promiscuous == 1) {
+		PMD_LOG_DEBUG(DRV,"promiscuous is enable, allmulticast must be enabled.\n");
+		goto l_out;
+	}
+
+	flt_ctrl = sxe_hw_rx_mode_get(hw);
+	PMD_LOG_DEBUG(DRV,"read flt_ctrl=0x%x\n", flt_ctrl);
+
+	flt_ctrl &= (~SXE_FCTRL_MPE);
+
+	PMD_LOG_DEBUG(DRV,"write flt_ctrl=0x%x\n", flt_ctrl);
+	sxe_hw_rx_mode_set(hw, flt_ctrl);
+
+l_out:
+	return 0;
+}
+
+s32 sxe_mac_addr_add(struct rte_eth_dev *dev,
+			     struct rte_ether_addr *mac_addr,
+			     u32 index, u32 pool)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret;
+	u8 rar_idx = sxe_sw_uc_entry_add(adapter, index, mac_addr->addr_bytes);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	ret = sxe_hw_uc_addr_add(hw, rar_idx,
+					mac_addr->addr_bytes, pool);
+#else
+		ret = sxe_hw_uc_addr_add(hw, rar_idx,
+						mac_addr->addr_bytes, sxe_vf_num_get(dev));
+#endif
+	if (ret) {
+		LOG_ERROR("rar_idx:%u pool:%u mac_addr:"MAC_FMT
+				"add fail.(err:%d)",
+				rar_idx, pool,
+				MAC_ADDR(mac_addr->addr_bytes), ret);
+		goto l_out;
+	}
+
+	PMD_LOG_INFO(DRV, "rar_idx:%u pool:%u mac_addr:"MAC_FMT" add done",
+			rar_idx, pool,
+			MAC_ADDR(mac_addr->addr_bytes));
+
+l_out:
+	return ret;
+}
+
+void sxe_mac_addr_remove(struct rte_eth_dev *dev, u32 index)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret;
+	u8 rar_idx = sxe_sw_uc_entry_del(adapter, index);
+
+	ret = sxe_hw_uc_addr_del(hw, rar_idx);
+	if (ret) {
+		PMD_LOG_ERR(DRV, "rar_idx:%u remove fail.(err:%d)",
+				rar_idx, ret);
+		goto l_out;
+	}
+
+	PMD_LOG_INFO(DRV, "rar_idx:%u mac_addr:"MAC_FMT" remove done",
+			rar_idx,
+			MAC_ADDR(&dev->data->mac_addrs[rar_idx]));
+
+l_out:
+	return;
+}
+
+void sxe_fc_mac_addr_set(struct sxe_adapter *adapter)
+{
+	struct sxe_hw *hw = &adapter->hw;
+
+	sxe_hw_fc_mac_addr_set(hw,
+			adapter->mac_filter_ctxt.fc_mac_addr.addr_bytes);
+
+	return;
+}
+
+s32 sxe_mac_addr_set(struct rte_eth_dev *dev,
+			     struct rte_ether_addr *mac_addr)
+{
+	u8 pool_idx;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	sxe_mac_addr_remove(dev, 0);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+	pool_idx = pci_dev->max_vfs;
+#else
+	pool_idx = 0;
+#endif
+
+	sxe_mac_addr_add(dev, mac_addr, 0, pool_idx);
+	rte_ether_addr_copy(mac_addr, &adapter->mac_filter_ctxt.fc_mac_addr);
+
+	sxe_fc_mac_addr_set(adapter);
+
+	PMD_LOG_INFO(DRV, "pool:%u mac_addr:"MAC_FMT" set to be cur mac addr done",
+			pool_idx,
+			MAC_ADDR(mac_addr));
+
+	return 0;
+}
+
+static void sxe_hash_mac_addr_parse(u8 *mac_addr, u16 *reg_idx,
+						u16 *bit_idx)
+{
+	u16 extracted;
+
+	extracted = ((mac_addr[4] >> 4) |
+			(((u16)mac_addr[5]) << 4));
+
+	extracted &= SXE_MAC_ADDR_EXTRACT_MASK;
+
+	*reg_idx = (extracted >> SXE_MAC_ADDR_SHIFT) & SXE_MAC_ADDR_REG_MASK;
+
+	*bit_idx = extracted & SXE_MAC_ADDR_BIT_MASK;
+
+	PMD_LOG_DEBUG(DRV, "mac_addr:"MAC_FMT" hash reg_idx:%u bit_idx:%u",
+			 MAC_ADDR(mac_addr), *reg_idx, *bit_idx);
+
+	return;
+}
+
+s32 sxe_uc_hash_table_set(struct rte_eth_dev *dev,
+			struct rte_ether_addr *mac_addr, u8 on)
+{
+	u16 bit_idx;
+	u16 reg_idx;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_mac_filter_context *mac_filter = &adapter->mac_filter_ctxt;
+	u32 value;
+	s32 ret = 0;
+
+	sxe_hash_mac_addr_parse(mac_addr->addr_bytes, &reg_idx, &bit_idx);
+
+	value = (mac_filter->uta_hash_table[reg_idx] >> bit_idx) & 0x1;
+	if (value == on) {
+		goto l_out;
+	}
+
+	value = sxe_hw_uta_hash_table_get(hw, reg_idx);
+	if (on) {
+		mac_filter->uta_used_count++;
+		value |= (0x1 << bit_idx);
+		mac_filter->uta_hash_table[reg_idx] |= (0x1 << bit_idx);
+	} else { 
+		mac_filter->uta_used_count--;
+		value &= ~(0x1 << bit_idx);
+		mac_filter->uta_hash_table[reg_idx] &= ~(0x1 << bit_idx);
+	}
+
+	sxe_hw_uta_hash_table_set(hw, reg_idx, value);
+
+	PMD_LOG_INFO(DRV, "mac_addr:"MAC_FMT" uta reg_idx:%u bit_idx:%u"
+			  " %s done, uta_used_count:%u",
+			 MAC_ADDR(mac_addr->addr_bytes),
+			 reg_idx, bit_idx,
+			 on ? "set" : "clear",
+			 mac_filter->uta_used_count);
+
+l_out:
+	return ret;
+}
+
+s32 sxe_uc_all_hash_table_set(struct rte_eth_dev *dev, u8 on)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_mac_filter_context *mac_filter = &adapter->mac_filter_ctxt;
+	u32 value;
+	u8 i;
+
+	value = on ? (~0) : 0;
+
+	for (i = 0; i < SXE_UTA_ENTRY_NUM_MAX; i++) {
+		mac_filter->uta_hash_table[i] = value;
+		sxe_hw_uta_hash_table_set(hw, i, value);
+	}
+
+	PMD_LOG_INFO(DRV, "uta table all entry %s done.",
+			  on ? "set" : "clear");
+
+	return 0;
+}
+
+s32 sxe_set_mc_addr_list(struct rte_eth_dev *dev,
+			  struct rte_ether_addr *mc_addr_list,
+			  u32 nb_mc_addr)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_mac_filter_context *mac_filter = &adapter->mac_filter_ctxt;
+	u32 i;
+	u16 bit_idx;
+	u16 reg_idx;
+
+	memset(&mac_filter->mta_hash_table, 0, sizeof(mac_filter->mta_hash_table));
+	for (i = 0; i < nb_mc_addr; i++) {
+		sxe_hash_mac_addr_parse(mc_addr_list->addr_bytes, &reg_idx, &bit_idx);
+		mc_addr_list++;
+		mac_filter->mta_hash_table[reg_idx] |= (0x1 << bit_idx);
+	}
+
+	for (i = 0; i < SXE_MTA_ENTRY_NUM_MAX; i++) {
+		sxe_hw_mta_hash_table_set(hw, i, mac_filter->mta_hash_table[i]);
+	}
+
+	if (nb_mc_addr) {
+		sxe_hw_mc_filter_enable(hw);
+	}
+
+	PMD_LOG_INFO(DRV, "mc addr list cnt:%u set to mta done.", nb_mc_addr);
+
+	return 0;
+}
+
+s32 sxe_vlan_filter_set(struct rte_eth_dev *eth_dev, u16 vlan_id, s32 on)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vlan_context *vlan_ctxt = &adapter->vlan_ctxt;
+	u8 reg_idx;
+	u8 bit_idx;
+	u32 value;
+
+	reg_idx = (vlan_id >> SXE_VLAN_ID_SHIFT) & SXE_VLAN_ID_REG_MASK;
+	bit_idx = (vlan_id & SXE_VLAN_ID_BIT_MASK);
+
+	value = sxe_hw_vlan_filter_array_read(hw, reg_idx);
+	if (on) {
+		value |= (1 << bit_idx);
+	} else {
+		value &= ~(1 << bit_idx);
+	}
+
+	sxe_hw_vlan_filter_array_write(hw, reg_idx, value);
+
+	vlan_ctxt->vlan_hash_table[reg_idx] = value;
+
+	PMD_LOG_INFO(DRV, "vlan_id:0x%x on:%d set done", vlan_id, on);
+
+	return 0;
+}
+
+static void sxe_vlan_tpid_write(struct sxe_hw *hw, u16 tpid)
+{
+	u32 value;
+
+	value = sxe_hw_vlan_type_get(hw);
+	value = (value & (~SXE_VLNCTRL_VET)) | tpid;
+	sxe_hw_vlan_type_set(hw, value);
+
+	value = sxe_hw_txctl_vlan_type_get(hw);
+	value = (value & (~SXE_DMATXCTL_VT_MASK)) |
+		(tpid << SXE_DMATXCTL_VT_SHIFT);
+	sxe_hw_txctl_vlan_type_set(hw, value);
+
+	return;
+}
+
+s32 sxe_vlan_tpid_set(struct rte_eth_dev *eth_dev,
+		    enum rte_vlan_type vlan_type, u16 tpid)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret = 0;
+	u32 txctl;
+	bool double_vlan;
+
+	txctl = sxe_hw_txctl_vlan_type_get(hw);
+	double_vlan = txctl & SXE_DMATXCTL_GDV;
+
+	switch (vlan_type) {
+	case RTE_ETH_VLAN_TYPE_INNER:
+		if (double_vlan) {
+			sxe_vlan_tpid_write(hw, tpid);
+		} else {
+			ret = -ENOTSUP;
+			PMD_LOG_ERR(DRV, "unsupport inner vlan without "
+				     "global double vlan.");
+		}
+		break;
+	case RTE_ETH_VLAN_TYPE_OUTER:
+		if (double_vlan) {
+			sxe_hw_vlan_ext_type_set(hw,
+				(tpid << SXE_EXVET_VET_EXT_SHIFT));
+		} else {
+			sxe_vlan_tpid_write(hw, tpid);
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "Unsupported VLAN type %d", vlan_type);
+		break;
+	}
+
+	PMD_LOG_INFO(DRV, "double_vlan:%d vlan_type:%d tpid:0x%x set done ret:%d",
+			   double_vlan, vlan_type, tpid, ret);
+	return ret;
+}
+
+static void sxe_vlan_strip_bitmap_set(struct rte_eth_dev *dev, u16 queue_idx, bool on)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_vlan_context *vlan_ctxt = &adapter->vlan_ctxt;
+
+	sxe_rx_queue_s *rxq;
+
+	if ((queue_idx >= SXE_HW_TXRX_RING_NUM_MAX) ||
+	    (queue_idx >= dev->data->nb_rx_queues)) {
+		PMD_LOG_ERR(DRV, "invalid queue idx:%u exceed max"
+			   " queue number:%u or nb_rx_queues:%u.",
+			   queue_idx, SXE_HW_TXRX_RING_NUM_MAX,
+			   dev->data->nb_rx_queues);
+		goto l_out;
+	}
+
+	if (on) {
+		SXE_STRIP_BITMAP_SET(vlan_ctxt, queue_idx);
+	} else {
+		SXE_STRIP_BITMAP_CLEAR(vlan_ctxt, queue_idx);
+	}
+
+	rxq = dev->data->rx_queues[queue_idx];
+
+	if (on) {
+		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	} else {
+		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	}
+
+	PMD_LOG_INFO(DRV, "queue idx:%u vlan strip on:%d set bitmap and offload done.",
+		     queue_idx, on);
+
+l_out:
+	return;
+}
+
+void sxe_vlan_strip_switch_set(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u16 i;
+	sxe_rx_queue_s *rxq;
+	bool on;
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+			on = true;
+		} else {
+			on = false;
+		}
+		sxe_hw_vlan_tag_strip_switch(hw, i, on);
+
+		sxe_vlan_strip_bitmap_set(dev, i, on);
+	}
+
+	return;
+}
+
+static void sxe_vlan_filter_disable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_hw_vlan_filter_switch(hw, 0);
+
+	return;
+}
+
+static void sxe_vlan_filter_enable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vlan_context *vlan_ctxt = &adapter->vlan_ctxt;
+	u32 vlan_ctl;
+	u16 i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	vlan_ctl = sxe_hw_vlan_type_get(hw);
+	vlan_ctl &= ~SXE_VLNCTRL_CFI;
+	vlan_ctl |= SXE_VLNCTRL_VFE;
+	sxe_hw_vlan_type_set(hw, vlan_ctl);
+
+	for (i = 0; i < SXE_VFT_TBL_SIZE; i++) {
+		sxe_hw_vlan_filter_array_write(hw, i, vlan_ctxt->vlan_hash_table[i]);
+	}
+
+	return;
+}
+
+static void sxe_vlan_extend_disable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 ctrl;
+
+	PMD_INIT_FUNC_TRACE();
+
+	ctrl = sxe_hw_txctl_vlan_type_get(hw);
+	ctrl &= ~SXE_DMATXCTL_GDV;
+	sxe_hw_txctl_vlan_type_set(hw, ctrl);
+
+	ctrl = sxe_hw_ext_vlan_get(hw);
+	ctrl &= ~SXE_EXTENDED_VLAN;
+	sxe_hw_ext_vlan_set(hw, ctrl);
+
+	return;
+}
+
+static void sxe_vlan_extend_enable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 ctrl;
+
+	PMD_INIT_FUNC_TRACE();
+
+	ctrl = sxe_hw_txctl_vlan_type_get(hw);
+	ctrl |= SXE_DMATXCTL_GDV;
+	sxe_hw_txctl_vlan_type_set(hw, ctrl);
+
+	ctrl = sxe_hw_ext_vlan_get(hw);
+	ctrl |= SXE_EXTENDED_VLAN;
+	sxe_hw_ext_vlan_set(hw, ctrl);
+
+	return;
+}
+
+static s32 sxe_vlan_offload_configure(struct rte_eth_dev *dev, s32 mask)
+{
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		sxe_vlan_strip_switch_set(dev);
+	}
+
+	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
+			sxe_vlan_filter_enable(dev);
+		} else {
+			sxe_vlan_filter_disable(dev);
+		}
+	}
+
+	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
+			sxe_vlan_extend_enable(dev);
+		} else {
+			sxe_vlan_extend_disable(dev);
+		}
+	}
+
+	PMD_LOG_INFO(DRV, "mask:0x%x rx mode offload:0x%"SXE_PRIX64
+		     " vlan offload set done", mask, rxmode->offloads);
+
+	return 0;
+}
+
+s32 sxe_vlan_offload_set(struct rte_eth_dev *dev, s32 vlan_mask)
+{
+	s32 mask;
+	s32 ret = 0;
+
+	if (vlan_mask & RTE_ETH_VLAN_STRIP_MASK) {
+		PMD_LOG_WARN(DRV, "vlan strip has been on, not support to set.");
+		ret = -1;
+		goto l_out;
+	}
+	mask = vlan_mask & ~RTE_ETH_VLAN_STRIP_MASK;
+
+	sxe_vlan_offload_configure(dev, mask);
+
+	PMD_LOG_INFO(DRV, "vlan offload mask:0x%d set done.", vlan_mask);
+
+l_out:
+	return ret;
+}
+
+void sxe_vlan_strip_queue_set(struct rte_eth_dev *dev, u16 queue, s32 on)
+{
+	UNUSED(dev);
+	UNUSED(on);
+	PMD_LOG_WARN(DRV, "queue:%u vlan strip has been on, not support to set.", queue);
+
+	return;
+}
+
+void sxe_vlan_filter_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 vlan_mask;
+	u32 vlan_ctl;
+
+	vlan_mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		    RTE_ETH_VLAN_EXTEND_MASK;
+	sxe_vlan_offload_configure(dev, vlan_mask);
+
+	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
+		vlan_ctl = sxe_hw_vlan_type_get(hw);
+		vlan_ctl |= SXE_VLNCTRL_VFE;
+		sxe_hw_vlan_type_set(hw, vlan_ctl);
+		LOG_DEBUG_BDF("vmdq mode enable vlan filter done.");
+	}
+
+	return;
+}
+
diff --git a/drivers/net/sxe/pf/sxe_filter.h b/drivers/net/sxe/pf/sxe_filter.h
new file mode 100644
index 0000000000..a541dce586
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_filter.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_FILTER_H__
+#define __SXE_FILTER_H__
+
+#include <rte_ether.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+
+#include "sxe_types.h"
+#include "sxe_hw.h"
+
+struct sxe_adapter;
+
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_ADDR(x) ((u8*)(x))[0],((u8*)(x))[1], \
+			   ((u8*)(x))[2],((u8*)(x))[3], \
+			   ((u8*)(x))[4],((u8*)(x))[5]
+
+#define BYTE_BIT_NUM   8
+
+#define SXE_VLAN_STRIP_BITMAP_SIZE    \
+        RTE_ALIGN((SXE_HW_TXRX_RING_NUM_MAX / (sizeof(u32) * BYTE_BIT_NUM)), \
+		sizeof(u32))
+
+struct sxe_vlan_context {
+	u32 vlan_hash_table[SXE_VFT_TBL_SIZE];
+	u32 strip_bitmap[SXE_VLAN_STRIP_BITMAP_SIZE];
+	u32  vlan_table_size;
+};
+
+enum sxe_uc_addr_src_type {
+	SXE_PF = 0,
+	SXE_VF,
+	SXE_VF_MACVLAN
+};
+
+struct sxe_uc_addr_table {
+	u8 rar_idx;         
+	u8 vf_idx;          
+	u8 type;            
+	u8 original_index;  
+	bool used;          
+	u8 addr[SXE_MAC_ADDR_LEN];  
+};
+
+struct sxe_mac_filter_context {
+	struct rte_ether_addr def_mac_addr; 
+	struct rte_ether_addr cur_mac_addr; 
+
+	struct rte_ether_addr fc_mac_addr;
+
+	u32 uta_used_count;            
+	u32 uta_hash_table[SXE_UTA_ENTRY_NUM_MAX]; 
+
+	u32 mta_hash_table[SXE_MTA_ENTRY_NUM_MAX]; 
+	struct sxe_uc_addr_table *uc_addr_table;
+};
+
+s32 sxe_mac_addr_init(struct rte_eth_dev *eth_dev);
+
+s32 sxe_promiscuous_enable(struct rte_eth_dev *dev);
+
+s32 sxe_promiscuous_disable(struct rte_eth_dev *dev);
+
+s32 sxe_allmulticast_enable(struct rte_eth_dev *dev);
+
+s32 sxe_allmulticast_disable(struct rte_eth_dev *dev);
+
+s32 sxe_mac_addr_add(struct rte_eth_dev *dev,
+			     struct rte_ether_addr *mac_addr,
+			     u32 rar_idx, u32 pool);
+
+void sxe_mac_addr_remove(struct rte_eth_dev *dev, u32 rar_idx);
+
+s32 sxe_mac_addr_set(struct rte_eth_dev *dev,
+			     struct rte_ether_addr *mac_addr);
+
+s32 sxe_uc_hash_table_set(struct rte_eth_dev *dev,
+			struct rte_ether_addr *mac_addr, u8 on);
+
+s32 sxe_uc_all_hash_table_set(struct rte_eth_dev *dev, u8 on);
+
+s32 sxe_set_mc_addr_list(struct rte_eth_dev *dev,
+			  struct rte_ether_addr *mc_addr_list,
+			  u32 nb_mc_addr);
+
+s32 sxe_vlan_filter_set(struct rte_eth_dev *eth_dev, u16 vlan_id, s32 on);
+
+s32 sxe_vlan_tpid_set(struct rte_eth_dev *eth_dev,
+		    enum rte_vlan_type vlan_type, u16 tpid);
+
+s32 sxe_vlan_offload_set(struct rte_eth_dev *dev, s32 vlan_mask);
+
+void sxe_vlan_strip_queue_set(struct rte_eth_dev *dev, u16 queue, s32 on);
+
+void sxe_vlan_filter_configure(struct rte_eth_dev *dev);
+
+s32 sxe_set_mc_addr_list(struct rte_eth_dev *dev,
+			  struct rte_ether_addr *mc_addr_list,
+			  u32 nb_mc_addr);
+
+void sxe_vlan_strip_switch_set(struct rte_eth_dev *dev);
+
+void sxe_fc_mac_addr_set(struct sxe_adapter *adapter);
+
+u8 sxe_sw_uc_entry_vf_add(struct sxe_adapter *adapter,
+				u8 vf_idx, u8 *mac_addr, bool macvlan);
+
+s32 sxe_sw_uc_entry_vf_del(struct sxe_adapter *adapter, u8 vf_idx,
+					bool macvlan);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_flow_ctrl.c b/drivers/net/sxe/pf/sxe_flow_ctrl.c
new file mode 100644
index 0000000000..33c4ffeb9d
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_flow_ctrl.c
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include "sxe.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe_flow_ctrl.h"
+#include "sxe_phy.h"
+#include "sxe_compat_version.h"
+
+s32 sxe_flow_ctrl_enable(struct rte_eth_dev *dev)
+{
+	s32 ret = 0;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	ret = sxe_fc_enable(adapter);
+	PMD_LOG_DEBUG(INIT, "fc enable");
+
+	return ret;
+}
+
+s32 sxe_flow_ctrl_get(struct rte_eth_dev *dev, 
+					struct rte_eth_fc_conf *fc_conf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	bool rx_pause_on;
+	bool tx_pause_on;
+
+	fc_conf->pause_time = sxe_hw_fc_pause_time_get(hw);
+	fc_conf->high_water = sxe_hw_fc_tc_high_water_mark_get(hw, 0);
+	fc_conf->low_water = sxe_hw_fc_tc_low_water_mark_get(hw, 0);
+	fc_conf->send_xon = sxe_hw_fc_send_xon_get(hw);
+	fc_conf->autoneg = !sxe_hw_is_fc_autoneg_disabled(hw);
+
+	fc_conf->mac_ctrl_frame_fwd = 1;
+
+	sxe_hw_fc_status_get(hw, &rx_pause_on, &tx_pause_on);
+
+	if (rx_pause_on && tx_pause_on) {
+		fc_conf->mode = RTE_ETH_FC_FULL;
+	} else if (rx_pause_on) {
+		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
+	} else if (tx_pause_on) {
+		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
+	} else {
+		fc_conf->mode = RTE_ETH_FC_NONE;
+	}
+
+	return 0;
+}
+
+s32 sxe_flow_ctrl_set(struct rte_eth_dev *dev, 
+					struct rte_eth_fc_conf *fc_conf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret;
+	u32 rx_buf_size;
+	u32 max_high_water;
+	enum sxe_fc_mode rte_2_sxe_fcmode[] = {
+		SXE_FC_NONE,
+		SXE_FC_RX_PAUSE,
+		SXE_FC_TX_PAUSE,
+		SXE_FC_FULL,
+	};
+
+	PMD_INIT_FUNC_TRACE();
+
+	rx_buf_size = sxe_hw_rx_pkt_buf_size_get(hw, 0);
+	PMD_LOG_DEBUG(INIT, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+	max_high_water = (rx_buf_size -
+			RTE_ETHER_MAX_LEN) >> SXE_RX_PKT_BUF_SIZE_SHIFT;
+	if ((fc_conf->high_water > max_high_water) ||
+		(fc_conf->high_water < fc_conf->low_water)) {
+		PMD_LOG_ERR(INIT, "Invalid high/low water setup value in KB");
+		PMD_LOG_ERR(INIT, "High_water must <= 0x%x", max_high_water);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	sxe_hw_fc_requested_mode_set(hw, rte_2_sxe_fcmode[fc_conf->mode]);
+	sxe_hw_fc_pause_time_set(hw, fc_conf->pause_time);
+	sxe_hw_fc_tc_high_water_mark_set(hw, 0, fc_conf->high_water);
+	sxe_hw_fc_tc_low_water_mark_set(hw, 0, fc_conf->low_water);
+	sxe_hw_fc_send_xon_set(hw, fc_conf->send_xon);
+	sxe_hw_fc_autoneg_disable_set(hw, !fc_conf->autoneg);
+
+	ret = sxe_flow_ctrl_enable(dev);
+	if (ret < 0) {
+		PMD_LOG_ERR(INIT, "sxe_flow_ctrl_enable = 0x%x", ret);
+		ret = -EIO;
+	}
+
+l_end:
+	return ret;
+}
+
diff --git a/drivers/net/sxe/pf/sxe_flow_ctrl.h b/drivers/net/sxe/pf/sxe_flow_ctrl.h
new file mode 100644
index 0000000000..0be5d1aaaf
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_flow_ctrl.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+ 
+#ifndef __SXE_FLOW_CTRL_H__
+#define __SXE_FLOW_CTRL_H__
+
+s32 sxe_flow_ctrl_enable(struct rte_eth_dev *dev);
+
+s32 sxe_flow_ctrl_get(struct rte_eth_dev *dev, 
+					struct rte_eth_fc_conf *fc_conf);
+
+s32 sxe_flow_ctrl_set(struct rte_eth_dev *dev, 
+					struct rte_eth_fc_conf *fc_conf);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_irq.c b/drivers/net/sxe/pf/sxe_irq.c
new file mode 100644
index 0000000000..90c1e168f8
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_irq.c
@@ -0,0 +1,562 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_alarm.h>
+
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_bus_pci.h>
+#include <rte_interrupts.h>
+#elif defined DPDK_21_11_5
+#include <rte_bus_pci.h>
+#include <eal_interrupts.h>
+#else
+#include <rte_pci.h>
+#include <bus_pci_driver.h>
+#include <eal_interrupts.h>
+#endif
+
+#include <rte_malloc.h>
+
+#include "sxe_irq.h"
+#include "sxe_logs.h"
+#include "sxe_regs.h"
+#include "sxe_hw.h"
+#include "sxe.h"
+#include "sxe_phy.h"
+#include "sxe_queue.h"
+#include "sxe_errno.h"
+#include "sxe_compat_version.h"
+#include "sxe_vf.h"
+
+#define SXE_LINK_DOWN_TIMEOUT 4000 
+#define SXE_LINK_UP_TIMEOUT   1000 
+
+#define SXE_IRQ_MAILBOX          (u32)(1 << 1)
+#define SXE_IRQ_MACSEC           (u32)(1 << 2)
+
+#define SXE_LINK_UP_TIME         90 
+
+#define SXE_MISC_VEC_ID          RTE_INTR_VEC_ZERO_OFFSET
+
+#define SXE_RX_VEC_BASE          RTE_INTR_VEC_RXTX_OFFSET
+
+static void sxe_link_info_output(struct rte_eth_dev *dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_eth_link link;
+
+	rte_eth_linkstatus_get(dev, &link);
+
+	PMD_LOG_DEBUG(DRV, "port:%d link status:%s speed %u Mbps %s",
+				(u16)(dev->data->port_id),
+				link.link_status ? "up" : "down",
+				link.link_speed,
+				(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
+				"full-duplex" : "half-duplex");
+
+	PMD_LOG_DEBUG(DRV, "pci dev: " PCI_PRI_FMT,
+				pci_dev->addr.domain,
+				pci_dev->addr.bus,
+				pci_dev->addr.devid,
+				pci_dev->addr.function);
+
+	return;
+}
+
+void sxe_event_irq_delayed_handler(void *param)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *intr_handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 eicr;
+
+	rte_spinlock_lock(&adapter->irq_ctxt.event_irq_lock);
+
+	sxe_hw_all_irq_disable(hw);
+
+	eicr = sxe_hw_irq_cause_get(hw);
+	PMD_LOG_DEBUG(DRV, "delay handler eicr:0x%x action:0x%x",
+			   eicr, irq->action);
+
+	eicr &= 0xFFFF0000;
+	if (rte_atomic32_read(&adapter->link_thread_running) && (eicr & SXE_EICR_LSC)) {
+		eicr &= ~SXE_EICR_LSC;
+		PMD_LOG_DEBUG(DRV, "delay handler keep lsc irq");
+	}
+	sxe_hw_pending_irq_write_clear(hw, eicr);
+
+	rte_spinlock_unlock(&adapter->irq_ctxt.event_irq_lock);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	if (eicr & SXE_EICR_MAILBOX) {
+		sxe_mbx_irq_handler(eth_dev);
+	}
+#endif
+
+	if (irq->action & SXE_IRQ_LINK_UPDATE) {
+		sxe_link_update(eth_dev, 0);
+		sxe_link_info_output(eth_dev);
+		sxe_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+
+		irq->action &= ~SXE_IRQ_LINK_UPDATE;
+	}
+
+	irq->enable_mask |= SXE_EIMS_LSC;
+	PMD_LOG_DEBUG(DRV, "irq enable mask:0x%x", irq->enable_mask);
+
+	rte_spinlock_lock(&adapter->irq_ctxt.event_irq_lock);
+	sxe_hw_specific_irq_enable(hw, irq->enable_mask);
+	rte_spinlock_unlock(&adapter->irq_ctxt.event_irq_lock);
+
+	rte_intr_ack(intr_handle);
+
+	return;
+}
+
+static void sxe_lsc_irq_handler(struct rte_eth_dev *eth_dev)
+{
+	struct rte_eth_link link;
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	u64 timeout;
+	bool link_up;
+
+	rte_eth_linkstatus_get(eth_dev, &link);
+
+	link_up = sxe_hw_is_link_state_up(hw);
+
+	if (!link.link_status && !link_up) {
+		PMD_LOG_DEBUG(DRV, "link change irq, down->down, do nothing.");
+		goto l_out;
+	}
+
+	if (irq->to_pcs_init) {
+		PMD_LOG_DEBUG(DRV, "to set pcs init, do nothing.");
+		goto l_out;
+	}
+
+	PMD_LOG_INFO(DRV, "link change irq handler start");
+	sxe_link_update(eth_dev, 0);
+	sxe_link_info_output(eth_dev);
+
+	timeout = link.link_status ? SXE_LINK_DOWN_TIMEOUT :
+					SXE_LINK_UP_TIMEOUT;
+
+	if (rte_eal_alarm_set(timeout * 1000,
+			      sxe_event_irq_delayed_handler,
+			      (void *)eth_dev) < 0) {
+		PMD_LOG_ERR(DRV, "submit event irq delay handle fail.");
+	} else {
+		irq->enable_mask &= ~SXE_EIMS_LSC;
+	}
+
+	PMD_LOG_INFO(DRV, "link change irq handler end");
+
+l_out:
+	return;
+}
+
+static s32 sxe_event_irq_action(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+
+	PMD_LOG_DEBUG(DRV, "event irq action type %d", irq->action);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	/* mailbox irq handler */
+	if (irq->action & SXE_IRQ_MAILBOX) {
+		sxe_mbx_irq_handler(eth_dev);
+		irq->action &= ~SXE_IRQ_MAILBOX;
+	}
+#endif
+
+	/* lsc irq handler */
+	if (irq->action & SXE_IRQ_LINK_UPDATE) {
+		sxe_lsc_irq_handler(eth_dev);
+		PMD_LOG_INFO(DRV, "link change irq");
+	}
+
+	return 0;
+}
+
+STATIC void sxe_event_irq_handler(void *data)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)data;
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	u32 eicr;
+
+	rte_spinlock_lock(&adapter->irq_ctxt.event_irq_lock);
+
+	sxe_hw_all_irq_disable(hw);
+
+	eicr = sxe_hw_irq_cause_get(hw);
+	PMD_LOG_DEBUG(DRV, "event irq triggered eicr:0x%x", eicr);
+
+	eicr &= 0xFFFF0000;
+
+	sxe_hw_pending_irq_write_clear(hw, eicr);
+
+	rte_spinlock_unlock(&adapter->irq_ctxt.event_irq_lock);
+
+	if (eicr & SXE_EICR_LSC) {
+		irq->action |= SXE_IRQ_LINK_UPDATE;
+	}
+
+	if (eicr & SXE_EICR_MAILBOX) {
+		irq->action |= SXE_IRQ_MAILBOX;
+	}
+
+	if (eicr & SXE_EICR_LINKSEC) {
+		irq->action |= SXE_IRQ_MACSEC;
+	}
+
+	sxe_event_irq_action(eth_dev);
+
+	rte_spinlock_lock(&adapter->irq_ctxt.event_irq_lock);
+	sxe_hw_specific_irq_enable(hw, irq->enable_mask);
+	rte_spinlock_unlock(&adapter->irq_ctxt.event_irq_lock);
+
+	return;
+}
+
+void sxe_irq_init(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *irq_handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+
+
+	rte_intr_callback_register(irq_handle,
+				   sxe_event_irq_handler, eth_dev);
+
+	rte_spinlock_init(&adapter->irq_ctxt.event_irq_lock);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 gpie = 0;
+
+	if ((irq_handle->type == RTE_INTR_HANDLE_UIO) ||
+	    (irq_handle->type == RTE_INTR_HANDLE_VFIO_MSIX)) {
+		gpie = sxe_hw_irq_general_reg_get(hw);
+
+		gpie |= SXE_GPIE_MSIX_MODE | SXE_GPIE_OCD;
+		sxe_hw_irq_general_reg_set(hw, gpie);
+	}
+	rte_intr_enable(irq_handle);
+
+	sxe_hw_specific_irq_enable(hw, irq->enable_mask);
+#endif
+	return;
+}
+
+static s32 sxe_irq_general_config(struct rte_eth_dev *dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 gpie;
+	s32 ret = 0;
+
+	gpie = sxe_hw_irq_general_reg_get(hw);
+	if (!rte_intr_dp_is_en(handle) &&
+	    !(gpie & (SXE_GPIE_MSIX_MODE | SXE_GPIE_PBA_SUPPORT))) {
+		ret = -SXE_ERR_CONFIG;
+		gpie |= SXE_GPIE_MSIX_MODE;
+		PMD_LOG_INFO(DRV, "rx queue irq num:%d gpie:0x%x.",
+				  handle->nb_efd, gpie);
+	} else {
+		gpie |= SXE_GPIE_MSIX_MODE | SXE_GPIE_PBA_SUPPORT |
+			SXE_GPIE_OCD | SXE_GPIE_EIAME |
+			SXE_GPIE_SPP1_EN | SXE_GPIE_SPP2_EN;
+	}
+
+	sxe_hw_irq_general_reg_set(hw, gpie);
+
+	return ret;
+}
+
+static void sxe_msix_configure(struct rte_eth_dev *dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_rx_queue *rx_queue;
+	s32 ret;
+	u16 queue_id;
+	u16 vector = SXE_MISC_VEC_ID;
+	u16 base = SXE_MISC_VEC_ID;
+	u32 irq_interval;
+	u32 value;
+
+	ret = sxe_irq_general_config(dev);
+	if (ret) {
+		PMD_LOG_INFO(DRV, "unsupport msi-x, no need config irq");
+		goto l_out;
+	}
+
+	if (rte_intr_allow_others(handle)) {
+		vector = base = SXE_RX_VEC_BASE;
+	}
+
+	irq_interval = SXE_EITR_INTERVAL_US(SXE_QUEUE_ITR_INTERVAL_DEFAULT);
+
+	if (rte_intr_dp_is_en(handle)) {
+		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+			queue_id++) {
+			rx_queue = dev->data->rx_queues[queue_id];
+			if (dev->data->lro == 1) {
+				sxe_hw_ring_irq_interval_set(hw, vector,
+										irq_interval);
+			}
+
+			sxe_hw_ring_irq_map(hw, false,
+						rx_queue->reg_idx,
+						vector);
+			handle->intr_vec[queue_id] = vector;
+			PMD_LOG_INFO(DRV,
+					"queue id:%u reg_idx:%u vector:%u ",
+					queue_id,
+					rx_queue->reg_idx,
+					vector);
+			if (vector < base + handle->nb_efd - 1) {
+				vector++;
+			}
+		}
+		sxe_hw_event_irq_map(hw, 1, SXE_MISC_VEC_ID);
+	}
+
+	sxe_hw_ring_irq_interval_set(hw, 0, irq_interval);
+
+	sxe_hw_ring_irq_auto_disable(hw, true);
+
+	value = SXE_EIMS_ENABLE_MASK;
+	value &= ~(SXE_EIMS_OTHER | SXE_EIMS_MAILBOX | SXE_EIMS_LSC);
+	sxe_hw_event_irq_auto_clear_set(hw, value);
+
+l_out:
+	return;
+}
+
+s32 sxe_irq_configure(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	u16 irq_num;
+	s32 ret = 0;
+
+	if ((rte_intr_cap_multiple(handle) ||
+	     !RTE_ETH_DEV_SRIOV(eth_dev).active) &&
+	    eth_dev->data->dev_conf.intr_conf.rxq != 0) {
+		irq_num = eth_dev->data->nb_rx_queues;
+		if (irq_num > SXE_QUEUE_IRQ_NUM_MAX) {
+			PMD_LOG_ERR(DRV, "irq_num:%u exceed limit:%u ",
+				      irq_num, SXE_QUEUE_IRQ_NUM_MAX);
+			ret = -ENOTSUP;
+			goto l_out;
+		}
+
+		if (rte_intr_efd_enable(handle, irq_num)) {
+			ret = -SXE_ERR_CONFIG;
+			PMD_LOG_ERR(DRV,
+				      "intr_handle type:%d irq num:%d invalid",
+				      handle->type, irq_num);
+			goto l_out;
+		}
+	}
+
+	if (rte_intr_dp_is_en(handle) && !handle->intr_vec) {
+		handle->intr_vec = rte_zmalloc("intr_vec",
+				    eth_dev->data->nb_rx_queues * sizeof(u32), 0);
+		if (handle->intr_vec == NULL) {
+			PMD_LOG_ERR(DRV, "rx queue irq vector "
+					 "allocate %zuB memory fail.",
+					 eth_dev->data->nb_rx_queues * sizeof(u32));
+			ret = -ENOMEM;
+			goto l_out;
+		}
+	}
+
+	sxe_msix_configure(eth_dev);
+
+	sxe_irq_enable(eth_dev);
+
+	PMD_LOG_INFO(DRV,
+		      "intr_conf rxq:%u intr_handle type:%d rx queue num:%d "
+		      "queue irq num:%u total irq num:%u "
+		      "config done",
+		      eth_dev->data->dev_conf.intr_conf.rxq,
+		      handle->type,
+		      eth_dev->data->nb_rx_queues,
+		      handle->nb_efd,
+		      handle->max_intr);
+
+l_out:
+	return ret;
+}
+
+void sxe_irq_enable(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	struct sxe_hw *hw = &adapter->hw;
+
+	if (rte_intr_allow_others(handle)) {
+		sxe_link_info_output(eth_dev);
+
+		if (eth_dev->data->dev_conf.intr_conf.lsc != 0) {
+			irq->enable_mask |= SXE_EIMS_LSC;
+		} else {
+			irq->enable_mask &= ~SXE_EIMS_LSC;
+		}
+	} else {
+		rte_intr_callback_unregister(handle,
+					     sxe_event_irq_handler, eth_dev);
+		if (eth_dev->data->dev_conf.intr_conf.lsc != 0) {
+			PMD_LOG_ERR(DRV, "event irq not support.");
+		}
+	}
+
+	/* check if rxq interrupt is enabled */
+	if (eth_dev->data->dev_conf.intr_conf.rxq != 0 &&
+	    rte_intr_dp_is_en(handle)) {
+		irq->enable_mask |= SXE_EIMS_RTX_QUEUE;
+	}
+
+	rte_intr_enable(handle);
+
+	sxe_hw_specific_irq_enable(hw, irq->enable_mask);
+
+	PMD_LOG_INFO(DRV,
+		      "intr_handle type:%d enable irq mask:0x%x",
+		      handle->type,
+		      irq->enable_mask);
+
+	return;
+}
+
+void sxe_irq_vec_free(struct rte_intr_handle *handle)
+{
+	if (handle->intr_vec != NULL) {
+		rte_free(handle->intr_vec);
+		handle->intr_vec = NULL;
+	}
+
+	return;
+}
+
+void sxe_irq_disable(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+
+	if (!rte_intr_allow_others(handle)) {
+		rte_intr_callback_register(handle,
+					   sxe_event_irq_handler,
+					   (void *)eth_dev);
+	}
+
+	rte_intr_efd_disable(handle);
+	sxe_irq_vec_free(handle);
+
+	return;
+}
+
+void sxe_irq_uninit(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	u8 retry = 0;
+	s32 ret;
+
+	rte_intr_disable(handle);
+
+	do {
+		ret = rte_intr_callback_unregister(handle,
+				sxe_event_irq_handler, eth_dev);
+		if (ret >= 0 || ret == -ENOENT) {
+			break;
+		} else if (ret != -EAGAIN) {
+			PMD_LOG_ERR(DRV,
+				    "irq handler unregister fail, next to retry");
+		}
+		rte_delay_ms(100);
+	} while (retry++ < (10 + SXE_LINK_UP_TIME));
+
+	rte_eal_alarm_cancel(sxe_event_irq_delayed_handler, eth_dev);
+
+	return;
+}
+
+s32 sxe_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, u16 queue_id)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *intr_handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	u32 mask;
+
+	if (queue_id < 16) {
+		sxe_hw_all_irq_disable(hw);
+		irq->enable_mask |= (1 << queue_id);
+		sxe_hw_specific_irq_enable(hw, irq->enable_mask);
+	} else if (queue_id < 32) {
+		mask = sxe_hw_ring_irq_switch_get(hw, 0);
+		mask &= (1 << queue_id);
+		sxe_hw_ring_irq_switch_set(hw, 0, mask);
+	} else if (queue_id < 64) {
+		mask = sxe_hw_ring_irq_switch_get(hw, 1);
+		mask &= (1 << (queue_id - 32));
+		sxe_hw_ring_irq_switch_set(hw, 1, mask);
+	}
+
+	rte_intr_ack(intr_handle);
+
+	PMD_LOG_INFO(DRV, "queue_id:%u irq enabled enable_mask:0x%x.",
+		    queue_id, irq->enable_mask);
+
+	return 0;
+}
+
+s32 sxe_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, u16 queue_id)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	u32 mask;
+
+	if (queue_id < 16) {
+		sxe_hw_all_irq_disable(hw);
+		irq->enable_mask &= ~(1 << queue_id);
+		sxe_hw_specific_irq_enable(hw, irq->enable_mask);
+	} else if (queue_id < 32) {
+		mask = sxe_hw_ring_irq_switch_get(hw, 0);
+		mask &= ~(1 << queue_id);
+		sxe_hw_ring_irq_switch_set(hw, 0, mask);
+	} else if (queue_id < 64) {
+		mask = sxe_hw_ring_irq_switch_get(hw, 1);
+		mask &= ~(1 << (queue_id - 32));
+		sxe_hw_ring_irq_switch_set(hw, 1, mask);
+	}
+
+	PMD_LOG_INFO(DRV, "queue_id:%u irq disabled enable_mask:0x%x.",
+		    queue_id, irq->enable_mask);
+
+	return 0;
+}
+
diff --git a/drivers/net/sxe/pf/sxe_irq.h b/drivers/net/sxe/pf/sxe_irq.h
new file mode 100644
index 0000000000..322d7023c9
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_irq.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_IRQ_H__
+#define __SXE_IRQ_H__
+
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+
+#include "sxe_compat_platform.h"
+#include "sxe_compat_version.h"
+
+#define SXE_QUEUE_IRQ_NUM_MAX    15
+
+#define SXE_QUEUE_ITR_INTERVAL_DEFAULT   500 
+
+#define SXE_EITR_INTERVAL_UNIT_NS	2048
+#define SXE_EITR_ITR_INT_SHIFT          3
+#define SXE_IRQ_ITR_MASK                (0x00000FF8)
+#define SXE_EITR_INTERVAL_US(us) \
+	(((us) * 1000 / SXE_EITR_INTERVAL_UNIT_NS << SXE_EITR_ITR_INT_SHIFT) & \
+		SXE_IRQ_ITR_MASK)
+
+struct sxe_irq_context {
+	u32 action;          
+	u32 enable_mask;    
+	u32 enable_mask_original; 
+	rte_spinlock_t event_irq_lock;
+	bool to_pcs_init;
+};
+
+void sxe_event_irq_delayed_handler(void *param);
+
+void sxe_irq_init(struct rte_eth_dev *eth_dev);
+
+s32 sxe_irq_configure(struct rte_eth_dev *dev);
+
+void sxe_irq_enable(struct rte_eth_dev *eth_dev);
+
+void sxe_irq_disable(struct rte_eth_dev *eth_dev);
+
+void sxe_irq_uninit(struct rte_eth_dev *eth_dev);
+
+s32 sxe_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, u16 queue_id);
+
+s32 sxe_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, u16 queue_id);
+
+void sxe_irq_vec_free(struct rte_intr_handle *handle);
+
+#endif
+
diff --git a/drivers/net/sxe/pf/sxe_main.c b/drivers/net/sxe/pf/sxe_main.c
new file mode 100644
index 0000000000..3f30f26508
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_main.c
@@ -0,0 +1,326 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include <string.h>
+#include <sys/time.h>
+
+#include <rte_log.h>
+#include <rte_pci.h>
+
+#include "sxe_version.h"
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_bus_pci.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#elif defined DPDK_21_11_5
+#include <rte_bus_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <rte_dev.h>
+#else
+#include <bus_pci_driver.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <dev_driver.h>
+#endif
+
+#include "sxe_logs.h"
+#include "sxe_types.h"
+#include "sxe_ethdev.h"
+#include "sxe.h"
+#include "drv_msg.h"
+#include "sxe_cli.h"
+#include "sxe_queue.h"
+#include "sxe_errno.h"
+#include "sxe_compat_platform.h"
+#include "sxe_pmd_hdc.h"
+#include "sxe_vf.h"
+#include "sxe_queue_common.h"
+
+static const struct rte_pci_id sxe_pci_tbl[] = {
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_STARS, SXE_DEV_ID_ASIC) },
+	{.vendor_id = 0,}
+};
+
+s8 g_log_filename[LOG_FILE_NAME_LEN] = {0};
+
+bool is_log_created = false;
+
+#ifdef SXE_DPDK_DEBUG
+void sxe_log_stream_init(void)
+{
+	FILE *fp;
+	struct timeval	tv;
+	struct tm *td;
+	u8 len;
+	s8 time[40];
+
+	if (is_log_created) {
+		goto l_out;
+	}
+
+	memset(g_log_filename, 0, LOG_FILE_NAME_LEN);
+
+	len = snprintf(g_log_filename, LOG_FILE_NAME_LEN, "%s%s.",
+		      LOG_FILE_PATH, LOG_FILE_PREFIX);
+
+	gettimeofday(&tv, NULL);
+	td = localtime(&tv.tv_sec);
+	strftime(time, sizeof(time), "%Y-%m-%d-%H:%M:%S", td);
+
+	snprintf(g_log_filename + len, LOG_FILE_NAME_LEN - len,
+		"%s", time);
+
+	fp = fopen(g_log_filename, "w+");
+	if (fp == NULL) {
+		PMD_LOG_ERR(INIT, "open log file:%s fail, errno:%d %s.",
+			    g_log_filename, errno, strerror(errno));
+		goto l_out;
+	}
+
+	PMD_LOG_NOTICE(INIT, "log stream file:%s.", g_log_filename);
+
+	rte_openlog_stream(fp);
+
+	is_log_created = true;
+
+l_out:
+	return;
+}
+#endif
+
+static s32 sxe_probe(struct rte_pci_driver *pci_drv __rte_unused,
+					struct rte_pci_device *pci_dev)
+{
+	s32 ret;
+
+	printf("sxe_version[%s], sxe_commit_id[%s], sxe_branch[%s], sxe_build_time[%s]\n",
+		SXE_VERSION, SXE_COMMIT_ID, SXE_BRANCH, SXE_BUILD_TIME);
+
+#ifdef SXE_DPDK_DEBUG
+	sxe_log_stream_init();
+#endif
+
+	/* HDC */
+	sxe_hdc_channel_init();
+
+	ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+				sizeof(struct sxe_adapter),
+				eth_dev_pci_specific_init,
+				pci_dev,
+				sxe_ethdev_init, NULL);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "sxe pmd eth dev create fail.(err:%d)", ret);
+		goto l_out;
+	}
+
+	PMD_LOG_DEBUG(INIT, "%s sxe pmd probe done.", pci_dev->device.name);
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_remove(struct rte_pci_device *pci_dev)
+{
+	struct rte_eth_dev *eth_dev;
+	s32 ret;
+
+	eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
+	if (!eth_dev) {
+		ret = 0;
+		PMD_LOG_ERR(INIT, "sxe pmd dev has removed.");
+		goto l_out;
+	}
+
+	ret = rte_eth_dev_pci_generic_remove(pci_dev,
+					sxe_ethdev_uninit);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "sxe eth dev remove fail.(err:%d)", ret);
+		goto l_out;
+	}
+
+	sxe_hdc_channel_uninit();
+
+	PMD_LOG_DEBUG(INIT, "sxe pmd remove done.");
+
+l_out:
+	return ret;
+}
+
+STATIC struct rte_pci_driver rte_sxe_pmd = {
+	.id_table  = sxe_pci_tbl,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+	.probe     = sxe_probe,
+	.remove    = sxe_remove,
+};
+
+STATIC s32 sxe_mng_reset(struct sxe_hw *hw, bool enable)
+{
+	s32 ret;
+	sxe_mng_rst_s mng_rst;
+
+	mng_rst.enable = enable;
+	PMD_LOG_INFO(INIT, "mng reset, enable=%x\n", enable);
+
+	/* Send reset command */
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_MNG_RST,
+				(void *)&mng_rst, sizeof(mng_rst),
+				NULL, 0);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "mng reset failed, ret=%d\n", ret);
+		goto l_end;
+	}
+
+	PMD_LOG_INFO(INIT, "mng reset success, enable=%x\n", enable);
+
+l_end:
+	return ret;
+}
+
+s32 sxe_hw_reset(struct sxe_hw *hw)
+{
+	s32 ret;
+
+	/* Rx DBU off */
+	sxe_hw_rx_cap_switch_off(hw);
+
+	sxe_hw_all_irq_disable(hw);
+
+	sxe_hw_pending_irq_read_clear(hw);
+
+	sxe_hw_all_ring_disable(hw, SXE_HW_TXRX_RING_NUM_MAX);
+
+	ret = sxe_mng_reset(hw, false);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "mng reset disable failed, ret=%d\n", ret);
+		goto l_end;
+	}
+
+	ret = sxe_hw_nic_reset(hw);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "nic reset failed, ret=%d\n", ret);
+		goto l_end;
+	}
+
+	msleep(50);
+
+	ret = sxe_mng_reset(hw, true);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "mng reset enable failed, ret=%d\n", ret);
+		goto l_end;
+	}
+
+	sxe_hw_uc_addr_clear(hw);
+
+	sxe_hw_vt_disable(hw);
+
+l_end:
+	return ret;
+}
+
+void sxe_hw_start(struct sxe_hw *hw)
+{
+	sxe_hw_vlan_filter_array_clear(hw);
+
+	sxe_hw_stats_regs_clean(hw);
+
+	sxe_hw_no_snoop_disable(hw);
+
+	sxe_hw_dcb_rate_limiter_clear(hw, SXE_TXRX_RING_NUM_MAX);
+
+	sxe_fc_autoneg_localcap_set(hw);
+
+	hw->mac.auto_restart = true;
+	PMD_LOG_INFO(INIT, "auto_restart:%u.\n", hw->mac.auto_restart);
+
+	return;
+}
+
+static bool is_device_supported(struct rte_eth_dev *dev,
+					struct rte_pci_driver *drv)
+{
+	bool ret = true;
+
+	if (strcmp(dev->device->driver->name, drv->driver.name)) {
+		ret = false;
+	}
+
+	return ret;
+}
+
+bool is_sxe_supported(struct rte_eth_dev *dev)
+{
+	return is_device_supported(dev, &rte_sxe_pmd);
+}
+
+RTE_PMD_REGISTER_PCI(net_sxe, rte_sxe_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_sxe, sxe_pci_tbl);
+RTE_PMD_REGISTER_KMOD_DEP(net_sxe, "* igb_uio | uio_pci_generic | vfio-pci");
+
+#ifdef SXE_DPDK_DEBUG
+#ifdef DPDK_19_11_6
+s32 sxe_log_init;
+s32 sxe_log_drv;
+s32 sxe_log_rx;
+s32 sxe_log_tx;
+s32 sxe_log_hw;
+RTE_INIT(sxe_init_log)
+{
+	sxe_log_init = rte_log_register("pmd.net.sxe.init");
+	if (sxe_log_init >= 0)
+		rte_log_set_level(sxe_log_init, RTE_LOG_DEBUG);
+
+	sxe_log_drv = rte_log_register("pmd.net.sxe.drv");
+	if (sxe_log_drv >= 0)
+		rte_log_set_level(sxe_log_drv, RTE_LOG_DEBUG);
+
+	sxe_log_rx = rte_log_register("pmd.net.sxe.rx");
+	if (sxe_log_rx >= 0)
+		rte_log_set_level(sxe_log_rx, RTE_LOG_DEBUG);
+
+	sxe_log_tx = rte_log_register("pmd.net.sxe.tx");
+	if (sxe_log_tx >= 0)
+		rte_log_set_level(sxe_log_tx, RTE_LOG_DEBUG);
+
+	sxe_log_hw = rte_log_register("pmd.net.sxe.tx_hw");
+	if (sxe_log_hw >= 0)
+		rte_log_set_level(sxe_log_hw, RTE_LOG_DEBUG);
+}
+#else
+RTE_LOG_REGISTER(sxe_log_init, pmd.net.sxe.init, DEBUG);
+RTE_LOG_REGISTER(sxe_log_drv, pmd.net.sxe.drv, DEBUG);
+RTE_LOG_REGISTER(sxe_log_rx, pmd.net.sxe.rx, DEBUG);
+RTE_LOG_REGISTER(sxe_log_tx, pmd.net.sxe.tx, DEBUG);
+RTE_LOG_REGISTER(sxe_log_hw, pmd.net.sxe.tx_hw, DEBUG);
+#endif
+#else
+#ifdef DPDK_19_11_6
+s32 sxe_log_init;
+s32 sxe_log_drv;
+RTE_INIT(sxe_init_log)
+{
+	sxe_log_init = rte_log_register("pmd.net.sxe.init");
+	if (sxe_log_init >= 0)
+		rte_log_set_level(sxe_log_init, RTE_LOG_NOTICE);
+
+	sxe_log_drv = rte_log_register("pmd.net.sxe.drv");
+	if (sxe_log_drv >= 0)
+		rte_log_set_level(sxe_log_drv, RTE_LOG_NOTICE);
+}
+#else
+RTE_LOG_REGISTER(sxe_log_init, pmd.net.sxe.init, NOTICE);
+RTE_LOG_REGISTER(sxe_log_drv, pmd.net.sxe.drv, NOTICE);
+#endif
+#endif
+
+int sxe_eth_dev_callback_process(struct rte_eth_dev *dev,
+	enum rte_eth_event_type event, void *ret_param)
+{
+#ifdef DPDK_19_11_6
+	return _rte_eth_dev_callback_process(dev, event, ret_param);
+#else
+	return rte_eth_dev_callback_process(dev, event, ret_param);
+#endif
+}
+
diff --git a/drivers/net/sxe/pf/sxe_offload.c b/drivers/net/sxe/pf/sxe_offload.c
new file mode 100644
index 0000000000..deea11451a
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_offload.c
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+
+#include "sxe.h"
+#include "sxe_offload.h"
+#include "sxe_logs.h"
+#include "sxe_compat_version.h"
+#include "sxe_queue_common.h"
+#include "sxe_offload_common.h"
+
+STATIC u8 rss_sxe_key[40] = {
+	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+#define SXE_4_BIT_WIDTH  (CHAR_BIT / 2)
+#define SXE_4_BIT_MASK   RTE_LEN2MASK(SXE_4_BIT_WIDTH, u8)
+#define SXE_8_BIT_WIDTH  CHAR_BIT
+#define SXE_8_BIT_MASK   UINT8_MAX
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+u8* sxe_rss_hash_key_get(void)
+{
+	return rss_sxe_key;
+}
+#endif
+
+u64 sxe_rx_queue_offload_capa_get(struct rte_eth_dev *dev)
+{
+	return __sxe_rx_queue_offload_capa_get(dev);
+}
+
+u64 sxe_rx_port_offload_capa_get(struct rte_eth_dev *dev)
+{
+	return __sxe_rx_port_offload_capa_get(dev);
+}
+
+u64 sxe_tx_queue_offload_capa_get(struct rte_eth_dev *dev)
+{
+	RTE_SET_USED(dev);
+
+	return 0;
+}
+
+u64 sxe_tx_port_offload_capa_get(struct rte_eth_dev *dev)
+{
+	return __sxe_tx_port_offload_capa_get(dev);
+}
+
+void sxe_rss_disable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_hw_rss_cap_switch(hw, false);
+	return;
+}
+
+void sxe_rss_hash_set(struct sxe_hw *hw,
+				struct rte_eth_rss_conf *rss_conf)
+{
+	u8  *hash_key;
+	u32 rss_key[SXE_MAX_RSS_KEY_ENTRIES];
+	u16 i;
+	u64 rss_hf;
+	u32 rss_field = 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	hash_key = rss_conf->rss_key;
+	if (hash_key != NULL) {
+		for (i = 0; i < SXE_MAX_RSS_KEY_ENTRIES; i++) {
+			rss_key[i]  = hash_key[(i * 4)];
+			rss_key[i] |= hash_key[(i * 4) + 1] << 8;
+			rss_key[i] |= hash_key[(i * 4) + 2] << 16;
+			rss_key[i] |= hash_key[(i * 4) + 3] << 24;
+		}
+		sxe_hw_rss_key_set_all(hw, rss_key);
+	}
+
+	rss_hf = rss_conf->rss_hf;
+	if (rss_hf & RTE_ETH_RSS_IPV4) {
+		rss_field |= SXE_MRQC_RSS_FIELD_IPV4;
+	}
+
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
+		rss_field |= SXE_MRQC_RSS_FIELD_IPV4_TCP;
+	}
+
+	if (rss_hf & RTE_ETH_RSS_IPV6) {
+		rss_field |= SXE_MRQC_RSS_FIELD_IPV6;
+	}
+
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
+		rss_field |= SXE_MRQC_RSS_FIELD_IPV6_TCP;
+	}
+
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
+		rss_field |= SXE_MRQC_RSS_FIELD_IPV4_UDP;
+	}
+
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
+		rss_field |= SXE_MRQC_RSS_FIELD_IPV6_UDP;
+	}
+	sxe_hw_rss_field_set(hw, rss_field);
+
+	sxe_hw_rss_cap_switch(hw, true);
+
+	return;
+}
+
+void sxe_rss_configure(struct rte_eth_dev *dev)
+{
+	struct rte_eth_rss_conf *rss_conf;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u16 i;
+	u16 j;
+	u8  rss_indir_tbl[SXE_MAX_RETA_ENTRIES];    
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (adapter->rss_reta_updated == false) {
+		for (i = 0, j = 0; i < SXE_MAX_RETA_ENTRIES; i++, j++) {
+			if (j == dev->data->nb_rx_queues) {
+				j = 0;
+			}
+
+			rss_indir_tbl[i] = j;
+		}
+
+		sxe_hw_rss_redir_tbl_set_all(hw, rss_indir_tbl);
+	}
+
+	rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+	if ((rss_conf->rss_hf & SXE_RSS_OFFLOAD_ALL) == 0) {
+		PMD_LOG_INFO(INIT, "user rss config match hw supports is 0");
+		sxe_rss_disable(dev);
+		goto l_end;
+	}
+
+	if (rss_conf->rss_key == NULL) {
+		rss_conf->rss_key = rss_sxe_key; 
+	}
+
+	sxe_rss_hash_set(hw, rss_conf);
+
+l_end:
+	return;
+}
+
+s32 sxe_rss_reta_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_reta_entry64 *reta_conf,
+			u16 reta_size)
+{
+	u16 i;
+	u8 j, mask;
+	u32 reta, r;
+	u16 idx, shift;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct rte_eth_dev_data *dev_data = dev->data;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret = 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (!dev_data->dev_started) {
+		PMD_LOG_ERR(DRV,
+			"port %d must be started before rss reta update",
+			 dev_data->port_id);
+		ret = -EIO;
+		goto l_end;
+	}
+
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
+		PMD_LOG_ERR(DRV, "The size of hash lookup table configured "
+			"(%d) doesn't match the number hardware can supported "
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	for (i = 0; i < reta_size; i += SXE_4_BIT_WIDTH) {
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
+		mask = (u8)((reta_conf[idx].mask >> shift) &
+						SXE_4_BIT_MASK);
+		if (!mask) {
+			continue;
+		}
+
+		if (mask == SXE_4_BIT_MASK) {
+			r = 0;
+		} else {
+			r = sxe_hw_rss_redir_tbl_get_by_idx(hw, i);
+		}
+
+		for (j = 0, reta = 0; j < SXE_4_BIT_WIDTH; j++) {
+			if (mask & (0x1 << j)) {
+				reta |= reta_conf[idx].reta[shift + j] <<
+						(CHAR_BIT * j);
+			} else {
+				reta |= r & (SXE_8_BIT_MASK <<
+					(CHAR_BIT * j));
+			}
+		}
+
+		sxe_hw_rss_redir_tbl_set_by_idx(hw, i, reta);
+	}
+	adapter->rss_reta_updated = true;
+
+l_end:
+	return ret;
+}
+
+s32 sxe_rss_reta_query(struct rte_eth_dev *dev,
+			 struct rte_eth_rss_reta_entry64 *reta_conf,
+			 u16 reta_size)
+{
+	u16 i;
+	u8 j, mask;
+	u32 reta;
+	u16 idx, shift;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret = 0;
+
+	PMD_INIT_FUNC_TRACE();
+	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
+		PMD_LOG_ERR(DRV, "the size of hash lookup table configured "
+			"(%d) doesn't match the number hardware can supported "
+			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	for (i = 0; i < reta_size; i += SXE_4_BIT_WIDTH) {
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
+		mask = (u8)((reta_conf[idx].mask >> shift) &
+						SXE_4_BIT_MASK);
+		if (!mask) {
+			continue;
+		}
+
+		reta = sxe_hw_rss_redir_tbl_get_by_idx(hw, i);
+		for (j = 0; j < SXE_4_BIT_WIDTH; j++) {
+			if (mask & (0x1 << j)) {
+				reta_conf[idx].reta[shift + j] =
+					((reta >> (CHAR_BIT * j)) &
+						SXE_8_BIT_MASK);
+			}
+		}
+	}
+
+l_end:
+	return ret;
+}
+
+s32 sxe_rss_hash_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_conf *rss_conf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u64 rss_hf;
+	s32 ret = 0;
+
+	rss_hf = (rss_conf->rss_hf & SXE_RSS_OFFLOAD_ALL);
+
+	if (!sxe_hw_is_rss_enabled(hw)) {
+		if (rss_hf != 0){
+			PMD_LOG_ERR(DRV, "rss not init but want set");
+			ret = -EINVAL;
+			goto l_end;
+		}
+
+		goto l_end;
+	}
+
+	if (rss_hf == 0){
+		PMD_LOG_ERR(DRV, "rss init but want disable it");
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	sxe_rss_hash_set(hw, rss_conf);
+
+l_end:
+	return ret;
+}
+
+s32 sxe_rss_hash_conf_get(struct rte_eth_dev *dev,
+			    struct rte_eth_rss_conf *rss_conf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u8 *hash_key;
+	u32 rss_field;
+	u32 rss_key;
+	u64 rss_hf;
+	u16 i;
+
+	hash_key = rss_conf->rss_key;
+	if (hash_key != NULL) {
+		for (i = 0; i < SXE_MAX_RSS_KEY_ENTRIES; i++) {
+			rss_key = sxe_hw_rss_key_get_by_idx(hw, i);
+			hash_key[(i * 4)] = rss_key & 0x000000FF;
+			hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
+			hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
+			hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
+		}
+	}
+
+
+	if (!sxe_hw_is_rss_enabled(hw)) {
+		rss_conf->rss_hf = 0;
+		PMD_LOG_INFO(DRV, "rss not enabled,return 0");
+		goto l_end;
+	}
+
+	rss_hf = 0;
+	rss_field = sxe_hw_rss_field_get(hw);
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV4) {
+		rss_hf |= RTE_ETH_RSS_IPV4;
+	}
+
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV4_TCP) {
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
+	}
+
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV4_UDP) {
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
+	}
+
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV6) {
+		rss_hf |= RTE_ETH_RSS_IPV6;
+	}
+
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV6_TCP) {
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
+	}
+
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV6_UDP) {
+		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
+	}
+
+	PMD_LOG_DEBUG(DRV, "got rss hash func=0x%"SXE_PRIX64, rss_hf);
+	rss_conf->rss_hf = rss_hf;
+
+l_end:
+	return 0;
+}
+
diff --git a/drivers/net/sxe/pf/sxe_offload.h b/drivers/net/sxe/pf/sxe_offload.h
new file mode 100644
index 0000000000..d1f651feb6
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_offload.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_OFFLOAD_H__
+#define __SXE_OFFLOAD_H__
+
+#include "sxe_hw.h"
+
+#define SXE_RSS_OFFLOAD_ALL ( \
+		RTE_ETH_RSS_IPV4 | \
+		RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+		RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+		RTE_ETH_RSS_IPV6 | \
+		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+		RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+u8* sxe_rss_hash_key_get(void);
+#endif
+
+void sxe_rss_hash_set(struct sxe_hw *hw,
+				struct rte_eth_rss_conf *rss_conf);
+
+u64 sxe_rx_queue_offload_capa_get(struct rte_eth_dev *dev);
+
+u64 sxe_rx_port_offload_capa_get(struct rte_eth_dev *dev);
+
+u64 sxe_tx_queue_offload_capa_get(struct rte_eth_dev *dev);
+
+u64 sxe_tx_port_offload_capa_get(struct rte_eth_dev *dev);
+
+void sxe_rss_disable(struct rte_eth_dev *dev);
+
+void sxe_rss_configure(struct rte_eth_dev *dev);
+
+s32 sxe_rss_reta_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_reta_entry64 *reta_conf,
+			u16 reta_size);
+
+s32 sxe_rss_reta_query(struct rte_eth_dev *dev,
+			 struct rte_eth_rss_reta_entry64 *reta_conf,
+			 u16 reta_size);
+
+s32 sxe_rss_hash_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_conf *rss_conf);
+
+s32 sxe_rss_hash_conf_get(struct rte_eth_dev *dev,
+			    struct rte_eth_rss_conf *rss_conf);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_phy.c b/drivers/net/sxe/pf/sxe_phy.c
new file mode 100644
index 0000000000..595bbcbc25
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_phy.c
@@ -0,0 +1,993 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <rte_dev.h>
+#else
+#include <ethdev_driver.h>
+#include <dev_driver.h>
+#endif
+
+#include <rte_cycles.h>
+#include <rte_net.h>
+
+#include "sxe.h"
+#include "sxe_hw.h"
+#include "sxe_phy.h"
+#include "drv_msg.h"
+#include "sxe_phy.h"
+#include "sxe_logs.h"
+#include "sxe_errno.h"
+#include "sxe_ethdev.h"
+#include "sxe_filter.h"
+#include "sxe_pmd_hdc.h"
+#include "sxe_filter.h"
+#include "sxe_compat_version.h"
+
+#define SXE_WAIT_LINK_UP_FAILED	1
+#define SXE_WARNING_TIMEOUT	9000 
+#define SXE_CHG_SFP_RATE_MS     40   
+#define SXE_1G_WAIT_PCS_MS      100  
+#define SXE_10G_WAIT_PCS_MS     100  
+#define SXE_HZ_TRANSTO_MS       1000
+#define SXE_AN_COMPLETE_TIME    5    
+#define SXE_10G_WAIT_13_TIME    13   
+#define SXE_10G_WAIT_5_TIME     5    
+
+STATIC void *sxe_setup_link_thread_handler(void *param)
+{
+	s32 ret;
+	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	u32 allowed_speeds = 0;
+	u32 conf_speeds = 0;
+	u32 speed = 0;
+	bool autoneg = false;
+
+	pthread_detach(pthread_self());
+
+	sxe_sfp_link_capabilities_get(adapter, &allowed_speeds, &autoneg);
+
+	sxe_conf_speed_get(dev, &conf_speeds);
+
+	speed = (conf_speeds & allowed_speeds) ? (conf_speeds & allowed_speeds) :
+		allowed_speeds;
+
+	if (adapter->phy_ctxt.sfp_info.multispeed_fiber) {
+		ret = sxe_multispeed_sfp_link_configure(dev, speed, true);
+	} else {
+		ret = sxe_sfp_link_configure(dev);
+	}
+	if (ret) {
+		PMD_LOG_ERR(INIT, "link setup failed, ret=%d", ret);
+	}
+
+	irq->action &= ~SXE_IRQ_LINK_CONFIG;
+	rte_atomic32_clear(&adapter->link_thread_running);
+	return NULL;
+}
+
+void sxe_wait_setup_link_complete(struct rte_eth_dev *dev,
+						uint32_t timeout_ms)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	uint32_t timeout = timeout_ms ? timeout_ms : SXE_WARNING_TIMEOUT;
+
+	while (rte_atomic32_read(&adapter->link_thread_running)) {
+		rte_delay_us_sleep(1000);
+		timeout--;
+
+		if (timeout_ms) {
+			if (!timeout) {
+				goto l_end;
+			}
+		} else if (!timeout) {
+			timeout = SXE_WARNING_TIMEOUT;
+			PMD_LOG_ERR(INIT, "link thread not complete too long time!");
+		}
+	}
+
+l_end:
+	return;
+}
+
+static s32 sxe_an_cap_get(struct sxe_adapter *adapter, sxe_an_cap_s *an_cap)
+{
+	s32 ret;
+	struct sxe_hw *hw = &adapter->hw;
+
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_AN_CAP_GET,
+				NULL, 0,
+				(void *)an_cap, sizeof(*an_cap));
+	if (ret) {
+		PMD_LOG_ERR(INIT, "hdc trans failed ret=%d, cmd:negotiaton cap get", ret);
+	}
+
+	return ret;
+}
+
+s32 sxe_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+	u32 i;
+	bool link_up, orig_link_up;
+	struct rte_eth_link link;
+	sxe_an_cap_s an_cap;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	sxe_link_speed link_speed = SXE_LINK_SPEED_UNKNOWN;
+
+	PMD_LOG_INFO(INIT, "link update start...");
+
+	memset(&link, 0, sizeof(link));
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed  = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+						RTE_ETH_LINK_SPEED_FIXED);
+
+	if (irq->action & SXE_IRQ_LINK_CONFIG) {
+		PMD_LOG_INFO(INIT, "other link config thread exsit");
+		goto l_end;
+	}
+
+	if (dev->data->dev_conf.intr_conf.lsc) {
+		wait_to_complete = 0;
+	}
+
+	sxe_link_info_get(adapter, &link_speed, &orig_link_up);
+	sxe_link_info_get(adapter, &link_speed, &link_up);
+
+	if (orig_link_up != link_up) {
+		PMD_LOG_INFO(INIT, "link status %s to %s",
+			(orig_link_up?"up":"down"),
+			(link_up?"up":"down"));
+	}
+
+	if (wait_to_complete) {
+		for (i = 0; i < SXE_LINK_UP_TIME; i++) {
+			if (link_up == true) {
+				break;
+			}
+
+			rte_delay_us_sleep(100000);
+
+			sxe_link_info_get(adapter, &link_speed, &link_up);
+		}
+	}
+
+	if (link_up == false) {
+		sxe_wait_setup_link_complete(dev, 0);
+		if (rte_atomic32_test_and_set(&adapter->link_thread_running)) {
+			if (adapter->phy_ctxt.sfp_tx_laser_disabled) {
+				PMD_LOG_INFO(INIT, "tx laser is disabled");
+				rte_atomic32_clear(&adapter->link_thread_running);
+			} else {
+				irq->action |= SXE_IRQ_LINK_CONFIG;
+				irq->to_pcs_init = true;
+				if (rte_ctrl_thread_create(&adapter->link_thread_tid,
+					"sxe-link-handler",
+					NULL,
+					sxe_setup_link_thread_handler,
+					dev) < 0) {
+					PMD_LOG_ERR(INIT,
+						"Create link thread failed!");
+					rte_atomic32_clear(&adapter->link_thread_running);
+				}
+			}
+		} else {
+			PMD_LOG_ERR(INIT, "other link thread is running now!");
+		}
+
+		goto l_end;
+	}
+
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	switch (link_speed) {
+	case SXE_LINK_SPEED_1GB_FULL:
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
+		if (adapter->phy_ctxt.sfp_tx_laser_disabled) {
+			PMD_LOG_INFO(INIT, "tx laser disabled, link state is down.\n");
+			link.link_status = RTE_ETH_LINK_DOWN;
+			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+		} else {
+			for (i = 0; i < SXE_AN_COMPLETE_TIME; i++) {
+				sxe_an_cap_get(adapter, &an_cap);
+				if (an_cap.peer.remote_fault != SXE_REMOTE_UNKNOWN) {
+					break;
+				}
+				rte_delay_us_sleep(100000);
+			}
+		}
+		break;
+
+	case SXE_LINK_SPEED_10GB_FULL:
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
+		break;
+	default:
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
+
+	}
+
+l_end:
+	PMD_LOG_INFO(INIT, "link update end, up=%x, speed=%x",
+						link.link_status, link_speed);
+	return rte_eth_linkstatus_set(dev, &link);
+}
+
+s32 sxe_link_status_update(struct rte_eth_dev *dev)
+{
+	u32 i;
+	bool link_up;
+	struct rte_eth_link link;
+	sxe_an_cap_s an_cap;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	sxe_link_speed link_speed = SXE_LINK_SPEED_UNKNOWN;
+
+	PMD_LOG_INFO(INIT, "link status update start...");
+
+	memset(&link, 0, sizeof(link));
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed  = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+						RTE_ETH_LINK_SPEED_FIXED);
+
+	sxe_link_info_get(adapter, &link_speed, &link_up);
+	if (link_up == false) {
+		PMD_LOG_INFO(INIT, "link status is down.");
+		goto l_end;
+	}
+
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	switch (link_speed) {
+	case SXE_LINK_SPEED_1GB_FULL:
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
+		for (i = 0; i < SXE_AN_COMPLETE_TIME; i++) {
+			sxe_an_cap_get(adapter, &an_cap);
+			if (an_cap.peer.remote_fault != SXE_REMOTE_UNKNOWN) {
+				break;
+			}
+			rte_delay_us_sleep(100000);
+		}
+		break;
+
+	case SXE_LINK_SPEED_10GB_FULL:
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
+		break;
+	default:
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
+
+	}
+
+l_end:
+	PMD_LOG_INFO(INIT, "link status update end, up=%x, speed=%x",
+						link.link_status, link_speed);
+	return rte_eth_linkstatus_set(dev, &link);
+}
+
+int sxe_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = (struct sxe_adapter *)dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	sxe_sfp_tx_laser_enable(adapter);
+
+	rte_spinlock_lock(&adapter->irq_ctxt.event_irq_lock);
+	sxe_hw_specific_irq_enable(hw, SXE_EIMS_LSC);
+	rte_spinlock_unlock(&adapter->irq_ctxt.event_irq_lock);
+
+	sxe_link_update(dev, 0);
+
+	return 0;
+}
+
+int sxe_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = (struct sxe_adapter *)dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	sxe_sfp_tx_laser_disable(adapter);
+
+	rte_spinlock_lock(&adapter->irq_ctxt.event_irq_lock);
+	sxe_hw_specific_irq_disable(hw, SXE_EIMS_LSC);
+	rte_spinlock_unlock(&adapter->irq_ctxt.event_irq_lock);
+
+	sxe_link_update(dev, 0);
+
+	return 0;
+}
+
+
+STATIC s32 sxe_sfp_eeprom_read(struct sxe_adapter *adapter, u16 offset,
+					u16 len, u8 *data)
+{
+	s32 ret;
+	struct sxe_sfp_rw_req req;
+	struct sxe_sfp_read_resp *resp;
+	u16 resp_len = sizeof(struct sxe_sfp_read_resp) + len;
+	struct sxe_hw *hw = &adapter->hw;
+
+	if (!data) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(INIT, "sfp read buff == NULL");
+		goto l_end;
+	}
+
+	if (len > SXE_SFP_EEPROM_SIZE_MAX) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(INIT, "sfp read size[%u] > eeprom max size[%d], ret=%d",
+					len, SXE_SFP_EEPROM_SIZE_MAX, ret);
+		goto l_end;
+	}
+
+	PMD_LOG_INFO(INIT, "sfp read, offset=%u, len=%u", offset, len);
+
+	req.len = len;
+	req.offset = offset;
+
+	resp = malloc(resp_len);
+	if (!resp) {
+		ret = -ENOMEM;
+		PMD_LOG_ERR(INIT, "sfp read, alloc resp mem failed");
+		goto l_end;
+	}
+
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_SFP_READ,
+				(void *)&req, sizeof(struct sxe_sfp_rw_req),
+				(void *)resp, resp_len);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "sfp read, hdc failed, offset=%u, len=%u, ret=%d",
+					offset, len, ret);
+		ret = -EIO;
+		goto l_free;
+	}
+
+	if (resp->len != len) {
+		ret = -EIO;
+		PMD_LOG_ERR(INIT, "sfp read failed, offset=%u, len=%u", offset, len);
+		goto l_free;
+	}
+
+	memcpy(data, resp->resp, len);
+
+l_free:
+	free(resp);
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_sfp_tx_laser_ctrl(struct sxe_adapter *adapter, bool is_disable)
+{
+	s32 ret;
+	sxe_spp_tx_able_s laser_disable;
+	struct sxe_hw *hw = &adapter->hw;
+
+	laser_disable.isDisable = is_disable;
+	adapter->phy_ctxt.sfp_tx_laser_disabled = is_disable;
+	PMD_LOG_INFO(INIT, "sfp tx laser ctrl start, is_disable=%x", is_disable);
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_TX_DIS_CTRL,
+				&laser_disable, sizeof(laser_disable),
+				NULL, 0);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "sfp tx laser ctrl failed, ret=%d", ret);
+		goto l_end;
+	}
+
+	PMD_LOG_INFO(INIT, "sfp tx laser ctrl success, is_disable=%x", is_disable);
+
+l_end:
+	return ret;
+}
+
+void sxe_sfp_tx_laser_enable(struct sxe_adapter *adapter)
+{
+	sxe_sfp_tx_laser_ctrl(adapter, false);
+
+	return;
+}
+
+void sxe_sfp_tx_laser_disable(struct sxe_adapter *adapter)
+{
+	sxe_sfp_tx_laser_ctrl(adapter, true);
+
+	return;
+}
+
+s32 sxe_sfp_reset(struct sxe_adapter *adapter)
+{
+	PMD_LOG_INFO(INIT, "auto_restart:%u.\n", adapter->hw.mac.auto_restart);
+
+	if(adapter->hw.mac.auto_restart) {
+		sxe_sfp_tx_laser_disable(adapter);
+		sxe_sfp_tx_laser_enable(adapter);
+		adapter->hw.mac.auto_restart = false;
+	}
+
+	return 0;
+}
+
+void sxe_sfp_link_capabilities_get(struct sxe_adapter *adapter, u32 *speed,
+							bool *autoneg)
+{
+	struct sxe_sfp_info *sfp = &adapter->phy_ctxt.sfp_info;
+
+	*speed = 0;
+
+	if (sfp->type == SXE_SFP_TYPE_1G_CU ||
+	    sfp->type == SXE_SFP_TYPE_1G_SXLX ) {
+		*speed = SXE_LINK_SPEED_1GB_FULL;
+		*autoneg = true;
+		goto l_end;
+	}
+
+	*speed = SXE_LINK_SPEED_10GB_FULL;
+	*autoneg = false;
+
+	if (sfp->multispeed_fiber) {
+		*speed |= SXE_LINK_SPEED_10GB_FULL | SXE_LINK_SPEED_1GB_FULL;
+		*autoneg = true;
+	}
+
+l_end:
+	PMD_LOG_INFO(INIT, "sfp link speed cap=%d", *speed);
+	return;
+}
+
+s32 sxe_sfp_rate_select(struct sxe_adapter *adapter, sxe_sfp_rate_e rate)
+{
+	s32 ret;
+	sxe_sfp_rate_able_s rate_able;
+	struct sxe_hw *hw = &adapter->hw;
+
+	rate_able.rate = rate;
+	PMD_LOG_INFO(INIT, "sfp tx rate select start, rate=%d", rate);
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_RATE_SELECT,
+				&rate_able, sizeof(rate_able),
+				NULL, 0);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "sfp rate select failed, ret=%d", ret);
+	}
+
+	PMD_LOG_INFO(INIT, "sfp tx rate select end, rate=%d", rate);
+
+	return ret;
+}
+
+s32 sxe_pcs_sds_init(struct sxe_adapter *adapter,
+				sxe_pcs_mode_e mode, u32 max_frame)
+{
+	s32 ret;
+	sxe_pcs_cfg_s pcs_cfg;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+
+	pcs_cfg.mode = mode;
+	pcs_cfg.mtu  = max_frame;
+	sxe_sfp_tx_laser_disable(adapter);
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_PCS_SDS_INIT,
+				(void *)&pcs_cfg, sizeof(pcs_cfg),
+				NULL, 0);
+	irq->to_pcs_init = false;
+	sxe_sfp_tx_laser_enable(adapter);
+	if (ret) {
+		LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:pcs init\n", ret);
+		goto l_end;
+	}
+
+	sxe_fc_mac_addr_set(adapter);
+
+	LOG_INFO_BDF("mode:%u max_frame:0x%x pcs sds init done.\n",
+		     mode, max_frame);
+l_end:
+	return ret;
+}
+
+s32 sxe_conf_speed_get(struct rte_eth_dev *dev, u32 *conf_speeds)
+{
+	s32 ret = 0;
+	u32 *link_speeds;
+	u32 allowed_speeds;
+
+	link_speeds = &dev->data->dev_conf.link_speeds;
+	allowed_speeds = RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G;
+
+	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
+		PMD_LOG_ERR(INIT, "invalid link setting, link_speed=%x",
+						*link_speeds);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	*conf_speeds = SXE_LINK_SPEED_UNKNOWN;
+	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
+		*conf_speeds = SXE_LINK_SPEED_1GB_FULL | \
+				 SXE_LINK_SPEED_10GB_FULL;
+	} else {
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10G) {
+			*conf_speeds |= SXE_LINK_SPEED_10GB_FULL;
+		}
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G) {
+			*conf_speeds |= SXE_LINK_SPEED_1GB_FULL;
+		}
+	}
+
+l_end:
+	return ret;
+}
+
+s32 sxe_multispeed_sfp_link_configure(struct rte_eth_dev *dev, u32 speed, bool is_in_thread)
+{
+	s32 ret;
+	bool autoneg, link_up;
+	u32 i, speed_cap, link_speed, speedcnt = 0;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	u32 highest_link_speed = SXE_LINK_SPEED_UNKNOWN;
+	u32 frame_size = adapter->mtu + SXE_ETH_DEAD_LOAD;
+	u8 wait_time = is_in_thread ? SXE_10G_WAIT_13_TIME : SXE_10G_WAIT_5_TIME;
+
+	sxe_sfp_link_capabilities_get(adapter, &speed_cap, &autoneg);
+
+	speed &= speed_cap;
+
+	if (speed & SXE_LINK_SPEED_10GB_FULL) {
+		PMD_LOG_DEBUG(INIT, "10G link cfg start\n");
+		irq->to_pcs_init = true;
+
+		speedcnt++;
+		highest_link_speed = SXE_LINK_SPEED_10GB_FULL;
+
+		ret = sxe_sfp_rate_select(adapter, SXE_SFP_RATE_10G);
+		if (ret) {
+			PMD_LOG_ERR(INIT, "set sfp rate failed, ret=%d", ret);
+			goto l_end;
+		}
+
+		rte_delay_us_sleep((SXE_CHG_SFP_RATE_MS * SXE_HZ_TRANSTO_MS));
+
+		ret = sxe_pcs_sds_init(adapter, SXE_PCS_MODE_10GBASE_KR_WO,
+						frame_size);
+		if (ret) {
+			goto l_end;
+		}
+
+
+		for (i = 0; i < wait_time; i++) {
+			rte_delay_us_sleep((SXE_10G_WAIT_PCS_MS * SXE_HZ_TRANSTO_MS));
+
+			sxe_link_info_get(adapter, &link_speed, &link_up);
+			if (link_up) {
+				PMD_LOG_INFO(INIT, "link cfg end, link up, speed is 10G");
+				goto l_out;
+			}
+		}
+
+		PMD_LOG_WARN(INIT, "10G link cfg failed, retry...");
+	}
+
+	if (speed & SXE_LINK_SPEED_1GB_FULL) {
+		PMD_LOG_DEBUG(INIT, "1G link cfg start\n");
+		irq->to_pcs_init = true;
+
+		speedcnt++;
+		if (highest_link_speed == SXE_LINK_SPEED_UNKNOWN) {
+			highest_link_speed = SXE_LINK_SPEED_1GB_FULL;
+		}
+
+		ret = sxe_sfp_rate_select(adapter, SXE_SFP_RATE_1G);
+		if (ret) {
+			PMD_LOG_ERR(INIT, "set sfp rate failed, ret=%d", ret);
+			goto l_end;
+		}
+
+		rte_delay_us_sleep((SXE_CHG_SFP_RATE_MS * SXE_HZ_TRANSTO_MS));
+
+		ret = sxe_pcs_sds_init(adapter, SXE_PCS_MODE_1000BASE_KX_W,
+						frame_size);
+		if (ret) {
+			goto l_end;
+		}
+
+
+		rte_delay_us_sleep(SXE_1G_WAIT_PCS_MS * SXE_HZ_TRANSTO_MS);
+
+		sxe_link_status_update(dev);
+
+		link_up = sxe_hw_is_link_state_up(hw);
+		if (link_up) {
+			PMD_LOG_INFO(INIT, "link cfg end, link up, speed is 1G");
+			goto l_out;
+		}
+
+		PMD_LOG_WARN(INIT, "1G link cfg failed, retry...");
+	}
+
+	if (speedcnt > 1) {
+		ret = sxe_multispeed_sfp_link_configure(dev, highest_link_speed, is_in_thread);
+	}
+l_out:
+
+	adapter->phy_ctxt.autoneg_advertised = 0;
+
+	if (speed & SXE_LINK_SPEED_10GB_FULL) {
+		adapter->phy_ctxt.autoneg_advertised |= SXE_LINK_SPEED_10GB_FULL;
+	}
+
+	if (speed & SXE_LINK_SPEED_1GB_FULL) {
+		adapter->phy_ctxt.autoneg_advertised |= SXE_LINK_SPEED_1GB_FULL;
+	}
+
+l_end:
+	return ret;
+}
+
+void sxe_link_info_get(struct sxe_adapter *adapter, u32 *link_speed, bool *link_up)
+{
+	struct sxe_hw *hw = &adapter->hw;
+
+	*link_up = sxe_hw_is_link_state_up(hw);
+	if (false == *link_up) {
+		PMD_LOG_INFO(INIT, "link state =%d, (1=link_up, 0=link_down)\n",
+								*link_up);
+		*link_speed = SXE_LINK_SPEED_UNKNOWN;
+	} else {
+		*link_speed = sxe_hw_link_speed_get(hw);
+	}
+
+	return;
+}
+
+static s32 sxe_sfp_fc_autoneg(struct sxe_adapter *adapter)
+{
+	s32 ret;
+	sxe_an_cap_s an_cap;
+	struct sxe_hw *hw = &adapter->hw;
+
+	ret = sxe_an_cap_get(adapter, &an_cap);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "get auto negotiate capacity failed, ret=%d", ret);
+		goto l_end;
+	}
+
+	if ((an_cap.local.pause_cap & SXE_PAUSE_CAP_SYMMETRIC_PAUSE) &&
+		(an_cap.peer.pause_cap & SXE_PAUSE_CAP_SYMMETRIC_PAUSE)) {
+		if (hw->fc.requested_mode == SXE_FC_FULL) {
+			hw->fc.current_mode = SXE_FC_FULL;
+			PMD_LOG_DEBUG(INIT, "Flow Control = FULL.");
+		} else {
+			hw->fc.current_mode = SXE_FC_RX_PAUSE;
+			PMD_LOG_DEBUG(INIT, "Flow Control=RX PAUSE frames only");
+		}
+	} else if ((an_cap.local.pause_cap == SXE_PAUSE_CAP_ASYMMETRIC_PAUSE) &&
+		(an_cap.peer.pause_cap == SXE_PAUSE_CAP_BOTH_PAUSE)) {
+		hw->fc.current_mode = SXE_FC_TX_PAUSE;
+		PMD_LOG_DEBUG(INIT, "Flow Control = TX PAUSE frames only.");
+	} else if ((an_cap.local.pause_cap == SXE_PAUSE_CAP_BOTH_PAUSE) &&
+		(an_cap.peer.pause_cap == SXE_PAUSE_CAP_ASYMMETRIC_PAUSE)) {
+		hw->fc.current_mode = SXE_FC_RX_PAUSE;
+		PMD_LOG_DEBUG(INIT, "Flow Control = RX PAUSE frames only.");
+	} else {
+		hw->fc.current_mode = SXE_FC_NONE;
+		PMD_LOG_DEBUG(INIT, "Flow Control = NONE.");
+	}
+
+l_end:
+	return ret;
+}
+
+static void sxe_fc_autoneg(struct sxe_adapter *adapter)
+{
+	struct sxe_hw *hw = &adapter->hw;
+
+	s32 ret = -SXE_ERR_FC_NOT_NEGOTIATED;
+	bool link_up;
+	u32 link_speed;
+	if (hw->fc.disable_fc_autoneg) {
+		PMD_LOG_INFO(INIT, "disable fc autoneg");
+		goto l_end;
+	}
+
+	sxe_link_info_get(adapter, &link_speed, &link_up);
+	if (!link_up) {
+		PMD_LOG_INFO(INIT, "link down, dont fc autoneg");
+		goto l_end;
+	}
+
+	if(link_speed != SXE_LINK_SPEED_1GB_FULL){
+		PMD_LOG_INFO(INIT, "link speed=%x, (0x80=10G, 0x20=1G), dont fc autoneg", link_speed);
+		goto l_end;
+	}
+
+	ret = sxe_sfp_fc_autoneg(adapter);
+l_end:
+	if(ret) {
+		hw->fc.current_mode = hw->fc.requested_mode;
+	}
+
+	return;
+}
+
+s32 sxe_fc_enable(struct sxe_adapter *adapter)
+{
+	s32 ret = 0;
+	u32 i;
+	struct sxe_hw *hw = &adapter->hw;
+
+	if (!hw->fc.pause_time) {
+		PMD_LOG_ERR(INIT, "link fc disabled since pause time is 0");
+		ret = -SXE_ERR_INVALID_LINK_SETTINGS;
+		goto l_end;
+	}
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		if ((hw->fc.current_mode & SXE_FC_TX_PAUSE) &&
+		    hw->fc.high_water[i]) {
+			if (!hw->fc.low_water[i] ||
+			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+				PMD_LOG_DEBUG(INIT, "invalid water mark configuration, "
+					"tc[%u] low_water=%u, high_water=%u",
+					i, hw->fc.low_water[i],
+					hw->fc.high_water[i]);
+				ret = -SXE_ERR_INVALID_LINK_SETTINGS;
+				goto l_end;
+			}
+		}
+	}
+
+	/* auto negotiation flow control local capability configuration */
+	sxe_fc_autoneg_localcap_set(hw);
+
+	sxe_fc_autoneg(adapter);
+
+	ret = sxe_hw_fc_enable(hw);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "link fc enable failed, ret=%d", ret);
+	}
+
+l_end:
+	return ret;
+}
+
+s32 sxe_pfc_enable(struct sxe_adapter *adapter, u8 tc_idx)
+{
+	s32 ret;
+	struct sxe_hw *hw = &adapter->hw;
+
+	if (!hw->fc.pause_time) {
+		LOG_ERROR_BDF("link fc disabled since pause time is 0");
+		ret = -SXE_ERR_INVALID_LINK_SETTINGS;
+		goto l_ret;
+	}
+
+	if (hw->fc.current_mode & SXE_FC_TX_PAUSE) {
+		if ((!hw->fc.high_water[tc_idx]) || (!hw->fc.low_water[tc_idx])) {
+			LOG_ERROR_BDF("Invalid water mark configuration");
+			ret = SXE_ERR_INVALID_LINK_SETTINGS;
+			goto l_ret;
+		}
+
+		if (hw->fc.low_water[tc_idx] >= hw->fc.high_water[tc_idx]) {
+			LOG_ERROR_BDF("Invalid water mark configuration");
+			ret = SXE_ERR_INVALID_LINK_SETTINGS;
+			goto l_ret;
+		}
+	}
+
+	sxe_fc_autoneg(adapter);
+
+	ret = sxe_hw_pfc_enable(hw, tc_idx);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "link fc enable failed, ret=%d", ret);
+	}
+
+l_ret:
+	return ret;
+}
+s32 sxe_sfp_identify(struct sxe_adapter *adapter)
+{
+	s32 ret;
+	enum sxe_sfp_type sfp_type;
+	u8 sfp_comp_code[SXE_SFP_COMP_CODE_SIZE];
+	struct sxe_sfp_info *sfp = &adapter->phy_ctxt.sfp_info;
+
+	PMD_LOG_INFO(INIT, "sfp identify start");
+
+	ret = sxe_sfp_eeprom_read(adapter, SXE_SFF_BASE_ADDR,
+				SXE_SFP_COMP_CODE_SIZE, sfp_comp_code);
+	if (ret) {
+		sfp_type = SXE_SFP_TYPE_UNKNOWN;
+		PMD_LOG_ERR(INIT, "get sfp identifier failed, ret=%d", ret);
+		goto l_end;
+	}
+
+	PMD_LOG_INFO(INIT, "sfp identifier=%x, cable_technology=%x, "
+			"10GB_code=%x, 1GB_code=%x",
+		sfp_comp_code[SXE_SFF_IDENTIFIER],
+		sfp_comp_code[SXE_SFF_CABLE_TECHNOLOGY],
+		sfp_comp_code[SXE_SFF_10GBE_COMP_CODES],
+		sfp_comp_code[SXE_SFF_1GBE_COMP_CODES]);
+
+	if (sfp_comp_code[SXE_SFF_IDENTIFIER] != SXE_SFF_IDENTIFIER_SFP) {
+		LOG_WARN("sfp type get failed, offset=%d, type=%x",
+			SXE_SFF_IDENTIFIER, sfp_comp_code[SXE_SFF_IDENTIFIER]);
+		sfp_type = SXE_SFP_TYPE_UNKNOWN;
+		ret = -SXE_ERR_SFF_NOT_SUPPORTED;
+		goto l_end;
+	}
+
+	if (sfp_comp_code[SXE_SFF_CABLE_TECHNOLOGY] & SXE_SFF_DA_PASSIVE_CABLE) {
+		sfp_type = SXE_SFP_TYPE_DA_CU;
+	}  else if (sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] & \
+		(SXE_SFF_10GBASESR_CAPABLE | SXE_SFF_10GBASELR_CAPABLE)) {
+		sfp_type = SXE_SFP_TYPE_SRLR;
+	} else if (sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \
+		SXE_SFF_1GBASET_CAPABLE) {
+		sfp_type = SXE_SFP_TYPE_1G_CU;
+	} else if ((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \
+		SXE_SFF_1GBASESX_CAPABLE) || \
+		(sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \
+		SXE_SFF_1GBASELX_CAPABLE)) {
+		sfp_type = SXE_SFP_TYPE_1G_SXLX;
+	} else {
+		sfp_type = SXE_SFP_TYPE_UNKNOWN;
+	}
+
+	sfp->multispeed_fiber = false;
+	if (((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \
+			SXE_SFF_1GBASESX_CAPABLE) &&
+		(sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] & \
+			SXE_SFF_10GBASESR_CAPABLE)) ||
+		((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \
+			SXE_SFF_1GBASELX_CAPABLE) &&
+		(sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] & \
+			SXE_SFF_10GBASELR_CAPABLE))) {
+		sfp->multispeed_fiber = true;
+	}
+
+	PMD_LOG_INFO(INIT, "identify sfp, sfp_type=%d, is_multispeed=%x",
+			sfp_type, sfp->multispeed_fiber);
+
+l_end:
+	adapter->phy_ctxt.sfp_info.type = sfp_type;
+	return ret;
+}
+
+s32 sxe_sfp_link_configure(struct rte_eth_dev *dev)
+{
+	s32 ret = 0;
+	bool an;
+	u32 pcs_mode = SXE_PCS_MODE_BUTT;
+	u32 speed;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	u32 frame_size = adapter->mtu + SXE_ETH_DEAD_LOAD;
+
+	sxe_sfp_link_capabilities_get(adapter, &speed, &an);
+
+	if (SXE_LINK_SPEED_1GB_FULL == speed) {
+		pcs_mode = SXE_PCS_MODE_1000BASE_KX_W;
+		adapter->phy_ctxt.autoneg_advertised = SXE_LINK_SPEED_1GB_FULL;
+	} else if (SXE_LINK_SPEED_10GB_FULL == speed) {
+		pcs_mode = SXE_PCS_MODE_10GBASE_KR_WO;
+		adapter->phy_ctxt.autoneg_advertised = SXE_LINK_SPEED_10GB_FULL;
+	}
+
+	ret = sxe_pcs_sds_init(adapter, pcs_mode, frame_size);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "pcs sds init failed, ret=%d", ret);
+	}
+
+	if (SXE_LINK_SPEED_1GB_FULL == speed) {
+		sxe_link_status_update(dev);
+	}
+
+	PMD_LOG_INFO(INIT, "link :cfg speed=%x, pcs_mode=%x, atuoreg=%d",
+					speed, pcs_mode, an);
+
+	return ret;
+}
+
+int sxe_get_module_info(struct rte_eth_dev *dev,
+			struct rte_eth_dev_module_info *info)
+{
+	s32 ret;
+	bool page_swap = false;
+	u8 sff8472_rev, addr_mode;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	ret = sxe_sfp_eeprom_read(adapter, SXE_SFF_8472_COMPLIANCE,
+					sizeof(sff8472_rev), &sff8472_rev);
+	if (ret) {
+		ret = -EIO;
+		goto l_end;
+	}
+
+	ret = sxe_sfp_eeprom_read(adapter, SXE_SFF_8472_DIAG_MONITOR_TYPE,
+					sizeof(addr_mode), &addr_mode);
+	if (ret) {
+		ret = -EIO;
+		goto l_end;
+	}
+
+	if (addr_mode & SXE_SFF_ADDRESSING_MODE) {
+		PMD_LOG_ERR(DRV, "address change required to access page 0xA2, "
+			"but not supported. Please report the module "
+			"type to the driver maintainers.");
+		page_swap = true;
+	}
+
+	if ((sff8472_rev == SXE_SFF_8472_UNSUP) || page_swap || \
+			!(addr_mode & SXE_SFF_DDM_IMPLEMENTED)) {
+		info->type = RTE_ETH_MODULE_SFF_8079;
+		info->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+	} else {
+		info->type = RTE_ETH_MODULE_SFF_8472;
+		info->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+	}
+
+	LOG_INFO("sfp support management is %x, eeprom addr mode=%x "
+			"eeprom type=%x, eeprom len=%d",
+		sff8472_rev, addr_mode, info->type, info->eeprom_len);
+
+l_end:
+	return ret;
+}
+
+int sxe_get_module_eeprom(struct rte_eth_dev *dev,
+				struct rte_dev_eeprom_info *info)
+{
+	s32 ret;
+	u8 *data = info->data;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	if (info->length == 0) {
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	ret = sxe_sfp_eeprom_read(adapter, info->offset, info->length, data);
+	if (ret) {
+		LOG_ERROR("read sfp failed");
+	}
+
+l_end:
+	return ret;
+}
+
+
+static enum sxe_media_type sxe_media_type_get(struct sxe_adapter *adapter)
+{
+	enum sxe_media_type type;
+
+	type = SXE_MEDIA_TYPE_FIBER;
+	adapter->phy_ctxt.is_sfp = true;
+
+	return type;
+}
+
+s32 sxe_phy_init(struct sxe_adapter *adapter)
+{
+	s32 ret = 0;
+	enum sxe_media_type media_type = sxe_media_type_get(adapter);
+
+	if (SXE_MEDIA_TYPE_FIBER == media_type) {
+		ret = sxe_sfp_identify(adapter);
+		if (ret) {
+			PMD_LOG_ERR(INIT, "phy identify failed, ret=%d", ret);
+		}
+	} else {
+		PMD_LOG_ERR(INIT, "phy init failed, only support SFP.");
+	}
+
+	return ret;
+}
diff --git a/drivers/net/sxe/pf/sxe_phy.h b/drivers/net/sxe/pf/sxe_phy.h
new file mode 100644
index 0000000000..b0ec2388b9
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_phy.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef  __SXE_PHY_H__
+#define  __SXE_PHY_H__
+
+#include <rte_ethdev.h>
+#include "drv_msg.h"
+#include "sxe_cli.h"
+#include "sxe_msg.h"
+
+#define SXE_SFF_BASE_ADDR			0x0
+#define SXE_SFF_IDENTIFIER			0x0  
+#define SXE_SFF_10GBE_COMP_CODES		0x3  
+#define SXE_SFF_1GBE_COMP_CODES			0x6  
+#define SXE_SFF_CABLE_TECHNOLOGY		0x8  
+#define SXE_SFF_8472_DIAG_MONITOR_TYPE		0x5C 
+#define SXE_SFF_8472_COMPLIANCE			0x5E 
+
+#define SXE_SFF_IDENTIFIER_SFP			0x3
+#define SXE_SFF_ADDRESSING_MODE			0x4  
+#define SXE_SFF_8472_UNSUP			0x0
+#define SXE_SFF_DDM_IMPLEMENTED			0x40 
+#define SXE_SFF_DA_PASSIVE_CABLE		0x4
+#define SXE_SFF_DA_ACTIVE_CABLE			0x8
+#define SXE_SFF_DA_SPEC_ACTIVE_LIMITING		0x4
+#define SXE_SFF_1GBASESX_CAPABLE		0x1
+#define SXE_SFF_1GBASELX_CAPABLE		0x2
+#define SXE_SFF_1GBASET_CAPABLE			0x8
+#define SXE_SFF_10GBASESR_CAPABLE		0x10
+#define SXE_SFF_10GBASELR_CAPABLE		0x20
+
+#define SXE_SFP_COMP_CODE_SIZE			10  
+#define SXE_SFP_EEPROM_SIZE_MAX			512 
+
+#define SXE_IRQ_LINK_UPDATE      (u32)(1 << 0)
+#define SXE_IRQ_LINK_CONFIG      (u32)(1 << 3)
+struct sxe_adapter;
+
+enum sxe_media_type {
+	SXE_MEDIA_TYPE_UNKWON = 0,
+	SXE_MEDIA_TYPE_FIBER  = 1, 
+};
+
+enum sxe_phy_idx {
+	SXE_SFP_IDX = 0,
+	SXE_PHY_MAX,
+};
+
+enum sxe_sfp_type {
+	SXE_SFP_TYPE_DA_CU       = 0, 
+	SXE_SFP_TYPE_SRLR        = 1, 
+	SXE_SFP_TYPE_1G_CU       = 2, 
+	SXE_SFP_TYPE_1G_SXLX     = 4, 
+	SXE_SFP_TYPE_UNKNOWN     = 0xFFFF ,
+};
+
+struct sxe_sfp_info {
+	enum sxe_sfp_type	type;
+	bool			multispeed_fiber; 
+};
+
+struct sxe_phy_context {
+	bool is_sfp;                  
+	bool sfp_tx_laser_disabled;   
+	u32  speed;                   
+	u32  autoneg_advertised;      
+	struct sxe_sfp_info sfp_info; 
+};
+
+s32 sxe_phy_init(struct sxe_adapter *adapter);
+
+s32 sxe_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+
+s32 sxe_link_status_update(struct rte_eth_dev *dev);
+
+void sxe_sfp_tx_laser_enable(struct sxe_adapter *adapter);
+
+void sxe_sfp_tx_laser_disable(struct sxe_adapter *adapter);
+
+int sxe_dev_set_link_up(struct rte_eth_dev *dev);
+
+int sxe_dev_set_link_down(struct rte_eth_dev *dev);
+
+void sxe_wait_setup_link_complete(struct rte_eth_dev *dev,
+						uint32_t timeout_ms);
+
+int sxe_get_module_info(struct rte_eth_dev *dev,
+			struct rte_eth_dev_module_info *info);
+
+int sxe_get_module_eeprom(struct rte_eth_dev *dev,
+				struct rte_dev_eeprom_info *info);
+s32 sxe_sfp_identify(struct sxe_adapter *adapter);
+s32 sxe_sfp_reset(struct sxe_adapter *adapter);
+
+s32 sxe_pcs_sds_init(struct sxe_adapter *adapter, 
+				sxe_pcs_mode_e mode, u32 max_frame);
+
+s32 sxe_sfp_rate_select(struct sxe_adapter *adapter, sxe_sfp_rate_e rate);
+
+s32 sxe_multispeed_sfp_link_configure(struct rte_eth_dev *dev, u32 speed, bool is_in_thread);
+
+s32 sxe_conf_speed_get(struct rte_eth_dev *dev, u32 *conf_speeds);
+
+s32 sxe_fc_enable(struct sxe_adapter *adapter);
+
+void sxe_link_info_get(struct sxe_adapter *adapter, u32 *link_speed, bool *link_up);
+
+s32 sxe_pfc_enable(struct sxe_adapter *adapter, u8 tc_idx);
+
+void sxe_sfp_link_capabilities_get(struct sxe_adapter *adapter, u32 *speed,
+							bool *autoneg);
+
+s32 sxe_sfp_link_configure(struct rte_eth_dev *dev);
+
+void sxe_mac_configure(struct sxe_adapter *adapter);
+
+s32 sxe_pcs_sds_init(struct sxe_adapter *adapter, sxe_pcs_mode_e mode,
+			     u32 max_frame);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_pmd_hdc.c b/drivers/net/sxe/pf/sxe_pmd_hdc.c
new file mode 100644
index 0000000000..9137776a01
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_pmd_hdc.c
@@ -0,0 +1,717 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include <rte_malloc.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+#include "sxe_compat_version.h"
+#include <semaphore.h>
+#include <pthread.h>
+#include <signal.h>
+#include "sxe_pmd_hdc.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe.h"
+#include "sxe_msg.h"
+#include "drv_msg.h"
+#include "sxe_errno.h"
+#include "sxe_common.h"
+
+static sem_t g_hdc_sem;
+
+#define SXE_SUCCESS			(0)
+
+#define SXE_HDC_TRYLOCK_MAX		200
+
+#define SXE_HDC_RELEASELOCK_MAX		20
+#define SXE_HDC_WAIT_TIME		1000
+#define SXE_HDC_BIT_1			0x1
+#define ONE_DWORD_LEN			(4)
+
+static sem_t *sxe_hdc_sema_get(void)
+{
+	return &g_hdc_sem;
+}
+
+void sxe_hdc_channel_init(void)
+{
+	s32 ret;
+	ret = sem_init(sxe_hdc_sema_get(), 0, 1);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "hdc sem init failed,ret=%d",ret);
+	}
+
+	sxe_trace_id_gen();
+
+	return;
+}
+
+void sxe_hdc_channel_uninit(void)
+{
+	sem_destroy(sxe_hdc_sema_get());
+	sxe_trace_id_clean();
+
+	return;
+}
+
+static s32 sxe_fw_time_sync_process(struct sxe_hw *hw)
+{
+	s32 ret;
+	u64 timestamp = sxe_time_get_real_ms();
+	struct sxe_adapter *adapter = hw->adapter;
+
+	LOG_DEBUG_BDF("sync time= %"SXE_PRIU64"ms\n", timestamp);
+	ret = sxe_driver_cmd_trans(hw, SXE_CMD_TINE_SYNC,
+				(void *)&timestamp, sizeof(timestamp),
+				NULL, 0);
+	if (ret) {
+		LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:time sync\n",ret);
+	}
+
+	return ret;
+}
+
+s32 sxe_fw_time_sync(struct sxe_hw *hw)
+{
+	s32 ret = 0;
+	s32 ret_v;
+	u32 status;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	status = sxe_hw_hdc_fw_status_get(hw);
+	if (status != SXE_FW_START_STATE_FINISHED) {
+		LOG_ERROR_BDF("fw[%p] status[0x%x] is not good",hw, status);
+		ret = -SXE_FW_STATUS_ERR;
+		goto l_ret;
+	}
+
+	ret_v = sxe_fw_time_sync_process(hw);
+	if (ret_v) {
+		LOG_WARN_BDF("fw time sync failed, ret_v=%d\n",ret_v);
+		goto l_ret;
+	}
+
+l_ret:
+	return ret;
+}
+
+static inline s32 sxe_hdc_lock_get(struct sxe_hw *hw)
+{
+	return sxe_hw_hdc_lock_get(hw, SXE_HDC_TRYLOCK_MAX);
+}
+
+static inline void sxe_hdc_lock_release(struct sxe_hw *hw)
+{
+	sxe_hw_hdc_lock_release(hw, SXE_HDC_RELEASELOCK_MAX);
+	return;
+}
+
+static inline s32 sxe_poll_fw_ack(struct sxe_hw *hw, u32 timeout)
+{
+	s32 ret = 0;
+	u32 i;
+	bool fw_ov = false;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	for (i = 0; i < timeout; i++) {
+		fw_ov = sxe_hw_hdc_is_fw_over_set(hw);
+		if (fw_ov) {
+			break;
+		}
+
+		msleep(10);
+	}
+
+	if (i >= timeout) {
+		LOG_ERROR_BDF("poll fw_ov timeout...\n");
+		ret = -SXE_ERR_HDC_FW_OV_TIMEOUT;
+		goto l_ret;
+	}
+
+	sxe_hw_hdc_fw_ov_clear(hw);
+l_ret:
+	return ret;
+
+}
+
+static inline void hdc_channel_clear(struct sxe_hw *hw)
+{
+	sxe_hw_hdc_fw_ov_clear(hw);
+	return;
+}
+
+static s32 hdc_packet_ack_get(struct sxe_hw *hw, u64 trace_id,
+				HdcHeader_u *pkt_header)
+{
+	s32 ret     = 0;
+	u32 timeout = SXE_HDC_WAIT_TIME;
+	struct sxe_adapter *adapter = hw->adapter;
+	UNUSED(trace_id);
+
+	pkt_header->dw0 = 0;
+	pkt_header->head.errCode = PKG_ERR_OTHER;
+
+	LOG_DEBUG_BDF("trace_id=0x%"SXE_PRIX64" hdc cmd ack get start\n", trace_id);
+	ret = sxe_poll_fw_ack(hw, timeout);
+	if (ret) {
+		LOG_ERROR_BDF("get fw ack failed, ret=%d\n", ret);
+		goto l_out;
+	}
+
+	pkt_header->dw0 = sxe_hw_hdc_fw_ack_header_get(hw);;
+	if (pkt_header->head.errCode == PKG_ERR_PKG_SKIP) {
+		ret = -SXE_HDC_PKG_SKIP_ERR;
+		goto l_out;
+	} else if (pkt_header->head.errCode != PKG_OK) {
+		ret = -SXE_HDC_PKG_OTHER_ERR;
+		goto l_out;
+	}
+
+l_out:
+	LOG_DEBUG_BDF("trace_id=0x%"SXE_PRIX64" hdc cmd ack get end ret=%d\n", trace_id, ret);
+	return ret;
+}
+
+static void hdc_packet_header_fill(HdcHeader_u *pkt_header,
+			u8 pkt_index, u16 total_len,
+			u16 pkt_num, u8 is_read)
+{
+	U16 pkt_len = 0;
+
+	pkt_header->dw0 = 0;
+
+	pkt_header->head.pid = (is_read == 0) ? pkt_index : (pkt_index - 1);
+
+	pkt_header->head.totalLen = SXE_HDC_LEN_TO_REG(total_len);
+
+	if (pkt_index == 0 && is_read == 0) {
+		pkt_header->head.startPkg = SXE_HDC_BIT_1;
+	}
+
+	if (pkt_index == (pkt_num - 1)) {
+		pkt_header->head.endPkg = SXE_HDC_BIT_1;
+		pkt_len = total_len - (DWORD_NUM * (pkt_num - 1));
+	} else {
+		pkt_len = DWORD_NUM;
+	}
+
+	pkt_header->head.len  = SXE_HDC_LEN_TO_REG(pkt_len);
+	pkt_header->head.isRd = is_read;
+	pkt_header->head.msi = 0;
+
+	return ;
+}
+
+static inline void hdc_packet_send_done(struct sxe_hw *hw)
+{
+	sxe_hw_hdc_packet_send_done(hw);
+	return;
+}
+
+static inline void hdc_packet_header_send(struct sxe_hw *hw,
+							u32 header)
+{
+	sxe_hw_hdc_packet_header_send(hw, header);
+	return;
+}
+
+static inline void hdc_packet_data_dword_send(struct sxe_hw *hw,
+						u16 dword_index, u32 value)
+{
+	sxe_hw_hdc_packet_data_dword_send(hw, dword_index, value);
+	return;
+}
+
+static void hdc_packet_send(struct sxe_hw *hw, u64 trace_id,
+			HdcHeader_u *pkt_header, u8 *data,
+			u16 data_len)
+{
+	u16          dw_idx   = 0;
+	u16          pkt_len       = 0;
+	u16          offset        = 0;
+	u32          pkg_data      = 0;
+	struct sxe_adapter *adapter = hw->adapter;
+	UNUSED(trace_id);
+
+	LOG_DEBUG_BDF("hw_addr[%p] trace_id=0x%"SXE_PRIX64" send pkt pkg_header[0x%x], "
+		"data_addr[%p], data_len[%u]\n",
+		hw, trace_id, pkt_header->dw0, data, data_len);
+
+	hdc_packet_header_send(hw, pkt_header->dw0);
+
+	if (data == NULL || data_len == 0) {
+	    goto l_send_done;
+	}
+
+	pkt_len = SXE_HDC_LEN_FROM_REG(pkt_header->head.len);
+	for (dw_idx = 0; dw_idx < pkt_len; dw_idx++) {
+		pkg_data = 0;
+
+		offset = dw_idx * 4;
+
+		if ((pkt_header->head.endPkg == SXE_HDC_BIT_1)
+			&& (dw_idx == (pkt_len - 1))
+			&& (data_len % 4 != 0)) {
+			memcpy((u8 *)&pkg_data, data + offset,
+					data_len % ONE_DWORD_LEN);
+		} else {
+			pkg_data = *(u32 *)(data + offset);
+		}
+
+		LOG_DEBUG_BDF("trace_id=0x%"SXE_PRIX64" send data to reg[%u] dword[0x%x]\n",
+				trace_id, dw_idx, pkg_data);
+		hdc_packet_data_dword_send(hw, dw_idx, pkg_data);
+	}
+
+l_send_done:
+	hdc_channel_clear(hw);
+
+	hdc_packet_send_done(hw);
+
+	return;
+}
+
+static inline u32 hdc_packet_data_dword_rcv(struct sxe_hw *hw,
+						u16 dword_index)
+{
+	return sxe_hw_hdc_packet_data_dword_rcv(hw, dword_index);
+}
+
+static void hdc_resp_data_rcv(struct sxe_hw *hw, u64 trace_id,
+				HdcHeader_u *pkt_header, u8 *out_data,
+				u16 out_len)
+{
+	u16          dw_idx      = 0;
+	u16          dw_num      = 0;
+	u16          offset = 0;
+	u32          pkt_data;
+	struct sxe_adapter *adapter = hw->adapter;
+	UNUSED(trace_id);
+
+	dw_num = SXE_HDC_LEN_FROM_REG(pkt_header->head.len);
+	for (dw_idx = 0; dw_idx < dw_num; dw_idx++) {
+		pkt_data= hdc_packet_data_dword_rcv(hw, dw_idx);
+		offset = dw_idx * ONE_DWORD_LEN;
+		LOG_DEBUG_BDF("trace_id=0x%"SXE_PRIX64" get data from reg[%u] dword=0x%x\n",
+				trace_id, dw_idx, pkt_data);
+
+		if ((pkt_header->head.endPkg == SXE_HDC_BIT_1)
+			&& (dw_idx == (dw_num - 1)) && (out_len % 4 != 0)) {
+			memcpy(out_data + offset, (u8 *)&pkt_data,
+					out_len % ONE_DWORD_LEN);
+		} else {
+			*(u32 *)(out_data + offset) = pkt_data;
+		}
+	}
+
+	return;
+}
+
+STATIC s32 hdc_req_process(struct sxe_hw *hw, u64 trace_id,
+			u8 *in_data, u16 in_len)
+{
+	s32 ret 	= 0;
+	u32 total_len	= 0;
+	u16 pkt_num     = 0;
+	u16 index       = 0;
+	u16 offset      = 0;
+	HdcHeader_u     pkt_header;
+	bool is_retry   = false;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	total_len  = (in_len + ONE_DWORD_LEN - 1) / ONE_DWORD_LEN;
+
+	pkt_num = (in_len + ONE_PACKET_LEN_MAX - 1) / ONE_PACKET_LEN_MAX;
+	LOG_DEBUG_BDF("hw[%p] trace_id=0x%"SXE_PRIX64" req in_data[%p] in_len=%u, "
+			"total_len=%uDWORD, pkt_num = %u\n",
+			hw, trace_id, in_data, in_len, total_len,
+			pkt_num);
+
+	for (index = 0; index < pkt_num; index++) {
+		LOG_DEBUG_BDF("trace_id=0x%"SXE_PRIX64" fill pkg header[%p], pkg_index[%u], "
+			"total_Len[%u], pkg_num[%u], is_read[no]\n",
+			trace_id, &pkt_header, index, total_len, pkt_num);
+		hdc_packet_header_fill(&pkt_header, index, total_len,
+						pkt_num, 0);
+
+		offset = index * DWORD_NUM * 4;
+		hdc_packet_send(hw, trace_id, &pkt_header,
+				in_data + offset, in_len);
+
+		if (index == pkt_num - 1) {
+			break;
+		}
+
+		ret = hdc_packet_ack_get(hw, trace_id, &pkt_header);
+		if (ret == -EINTR) {
+			LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" interrupted\n", trace_id);
+			goto l_out;
+		} else if (ret == -SXE_HDC_PKG_SKIP_ERR) {
+			LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" req ack"
+					"failed, retry\n", trace_id);
+			if (is_retry) {
+				ret = -SXE_HDC_RETRY_ERR;
+				goto l_out;
+			}
+
+			index --;
+			is_retry = true;
+			continue;
+		} else if (ret != SXE_HDC_SUCCESS) {
+			LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" req ack"
+					"failed, ret=%d\n", trace_id, ret);
+			ret = -SXE_HDC_RETRY_ERR;
+			goto l_out;
+		}
+
+		LOG_DEBUG_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" get req packet_index[%u]"
+			" ack succeed header[0x%x]\n",
+			trace_id, index, pkt_header.dw0);
+		is_retry = false;
+	}
+
+l_out:
+	return ret;
+}
+
+static s32 hdc_resp_process(struct sxe_hw *hw, u64 trace_id,
+			u8 *out_data, u16 out_len)
+{
+	s32          ret;
+	u32          req_dwords;
+	u32          resp_len;
+	u32          resp_dwords;
+	u16          pkt_num;
+	u16          index;
+	u16          offset;
+	HdcHeader_u  pkt_header;
+	bool     retry          = false;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	LOG_DEBUG_BDF("hdc trace_id=0x%"SXE_PRIX64" req's last cmd ack get\n",trace_id);
+	ret = hdc_packet_ack_get(hw, trace_id, &pkt_header);
+	if (ret == -EINTR) {
+		LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" interrupted\n", trace_id);
+		goto l_out;
+	} else if(ret) {
+		LOG_ERROR_BDF("hdc trace_id=0x%"SXE_PRIX64" ack get failed, ret=%d\n",
+				trace_id, ret);
+		ret = -SXE_HDC_RETRY_ERR;
+		goto l_out;
+	}
+
+	LOG_DEBUG_BDF("hdc trace_id=0x%"SXE_PRIX64" req's last cmd ack get"
+		"succeed header[0x%x]\n",trace_id, pkt_header.dw0);
+
+	if (!pkt_header.head.startPkg) {
+		ret = -SXE_HDC_RETRY_ERR;
+		LOG_ERROR_BDF("trace_id=0x%"SXE_PRIX64" ack header has error:"
+				"not set start bit\n",trace_id);
+		goto l_out;
+	}
+
+	req_dwords = (out_len + ONE_DWORD_LEN - 1) / ONE_DWORD_LEN;
+	resp_dwords  = SXE_HDC_LEN_FROM_REG(pkt_header.head.totalLen);
+	if (resp_dwords > req_dwords) {
+		ret = -SXE_HDC_RETRY_ERR;
+		LOG_ERROR_BDF("trace_id=0x%"SXE_PRIX64" rsv len check failed:"
+				"resp_dwords=%u, req_dwords=%u\n",trace_id,
+				resp_dwords, req_dwords);
+		goto l_out;
+	}
+
+	resp_len = resp_dwords << 2;
+	LOG_DEBUG_BDF("outlen = %u bytes, resp_len = %u bytes\n", out_len, resp_len);
+	if (resp_len > out_len) {
+		resp_len = out_len;
+	}
+
+	hdc_resp_data_rcv(hw, trace_id, &pkt_header, out_data, resp_len);
+
+	pkt_num = (resp_len + ONE_PACKET_LEN_MAX - 1) / ONE_PACKET_LEN_MAX;
+	for (index = 1; index < pkt_num; index++) {
+		LOG_DEBUG_BDF("trace_id=0x%"SXE_PRIX64" fill pkg header[%p], pkg_index[%u], "
+			"total_Len[%u], pkg_num[%u], is_read[yes]\n",
+			trace_id, &pkt_header, index, resp_dwords,
+			pkt_num);
+		hdc_packet_header_fill(&pkt_header, index, resp_dwords,
+					pkt_num, 1);
+
+		hdc_packet_send(hw, trace_id, &pkt_header, NULL, 0);
+
+		ret = hdc_packet_ack_get(hw, trace_id, &pkt_header);
+		if (ret == -EINTR) {
+			LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" interrupted\n", trace_id);
+			goto l_out;
+		} else if (ret == -SXE_HDC_PKG_SKIP_ERR) {
+			LOG_ERROR_BDF("trace_id=0x%"SXE_PRIX64" hdc resp ack polling"
+					"failed, ret=%d\n", trace_id, ret);
+			if (retry) {
+				ret = -SXE_HDC_RETRY_ERR;
+				goto l_out;
+			}
+
+			index --;
+			retry = true;
+			continue;
+		} else if (ret != SXE_HDC_SUCCESS) {
+			LOG_ERROR_BDF("trace_id=0x%"SXE_PRIX64" hdc resp ack polling"
+					"failed, ret=%d\n",trace_id, ret);
+			ret = -SXE_HDC_RETRY_ERR;
+			goto l_out;
+		}
+
+		LOG_DEBUG_BDF("hdc trace_id=0x%"SXE_PRIX64" resp pkt[%u] get "
+			"succeed header[0x%x]\n",
+			trace_id, index, pkt_header.dw0);
+
+		retry = false;
+
+		offset = index * DWORD_NUM * 4;
+		hdc_resp_data_rcv(hw, trace_id, &pkt_header,
+					out_data + offset, resp_len);
+	}
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_hdc_packet_trans(struct sxe_hw *hw, u64 trace_id,
+					struct sxe_hdc_trans_info *trans_info)
+{
+	s32 ret = SXE_SUCCESS;
+	u32 status;
+	struct sxe_adapter *adapter = hw->adapter;
+	u32 channel_state;
+
+	status = sxe_hw_hdc_fw_status_get(hw);
+	if (status != SXE_FW_START_STATE_FINISHED) {
+		LOG_ERROR_BDF("fw[%p] status[0x%x] is not good\n",hw, status);
+		ret = -SXE_FW_STATUS_ERR;
+		goto l_ret;
+	}
+
+	channel_state = sxe_hw_hdc_channel_state_get(hw);
+	if (channel_state != SXE_FW_HDC_TRANSACTION_IDLE) {
+		LOG_ERROR_BDF("hdc channel state is busy\n");
+		ret = -SXE_HDC_RETRY_ERR;
+		goto l_ret;
+	}
+
+	ret = sxe_hdc_lock_get(hw);
+	if (ret) {
+		LOG_ERROR_BDF("hw[%p] cmd trace_id=0x%"SXE_PRIX64" get hdc lock fail, ret=%d\n",
+				hw, trace_id, ret);
+		ret = -SXE_HDC_RETRY_ERR;
+		goto l_ret;
+	}
+
+	ret = hdc_req_process(hw, trace_id, trans_info->in.data,
+				trans_info->in.len);
+	if (ret) {
+		LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" req process"
+				"failed, ret=%d\n",trace_id, ret);
+		goto l_hdc_lock_release;
+	}
+
+	ret = hdc_resp_process(hw, trace_id, trans_info->out.data,
+				trans_info->out.len);
+	if (ret) {
+		LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" resp process"
+				"failed, ret=%d\n",trace_id, ret);
+	}
+
+l_hdc_lock_release:
+	sxe_hdc_lock_release(hw);
+l_ret:
+	return ret;
+}
+
+STATIC s32 sxe_hdc_cmd_process(struct sxe_hw *hw, u64 trace_id,
+				struct sxe_hdc_trans_info *trans_info)
+{
+	s32 ret;
+	u8 retry_idx;
+	struct sxe_adapter *adapter = hw->adapter;
+	sigset_t old_mask, new_mask;
+	sigemptyset(&new_mask);
+	sigaddset(&new_mask, SIGINT);
+	sigaddset(&new_mask, SIGTERM);
+	ret = pthread_sigmask(SIG_BLOCK, &new_mask, &old_mask);
+	if (ret) {
+		LOG_ERROR_BDF("hdc set signal mask failed, ret=%d\n", ret);
+		goto l_ret;
+	}
+
+	LOG_DEBUG_BDF("hw[%p] cmd trace=0x%"SXE_PRIX64" \n",hw, trace_id);
+	
+	ret = sem_wait(sxe_hdc_sema_get());
+	if (ret) {
+		LOG_WARN_BDF("hw[%p] hdc concurrency full\n", hw);
+		goto l_ret;
+	}
+
+	for (retry_idx = 0; retry_idx < 250; retry_idx++ ) {
+		ret = sxe_hdc_packet_trans(hw, trace_id, trans_info);
+		if (ret == SXE_SUCCESS) {
+			goto l_up;
+		} else if (ret == -SXE_HDC_RETRY_ERR) {
+			rte_delay_ms(10);
+			continue;
+	 	} else {
+			LOG_ERROR_BDF("sxe hdc packet trace_id=0x%"SXE_PRIX64
+					" trans error, ret=%d\n", trace_id, ret);
+			ret = -EFAULT;
+			goto l_up;
+		}
+	}
+
+l_up:
+	LOG_DEBUG_BDF("hw[%p] cmd trace=0x%"SXE_PRIX64"\n",hw, trace_id);
+	sem_post(sxe_hdc_sema_get());
+l_ret:
+	ret = pthread_sigmask(SIG_SETMASK, &old_mask, NULL);
+	if (ret) {
+		LOG_ERROR_BDF("hdc restore old signal mask failed, ret=%d\n", ret);
+	}
+	if (ret == -SXE_HDC_RETRY_ERR) {
+		ret = -EFAULT;
+	}
+	return ret;
+}
+
+static void sxe_cmd_hdr_init(struct sxe_hdc_cmd_hdr *cmd_hdr,
+					u8 cmd_type)
+{
+	cmd_hdr->cmd_type = cmd_type;
+	cmd_hdr->cmd_sub_type = 0;
+	return;
+}
+
+static void sxe_driver_cmd_msg_init(struct sxe_hdc_drv_cmd_msg *msg,
+						u16 opcode, u64 trace_id,
+						void *req_data, u16 req_len)
+{
+	LOG_DEBUG("cmd[opcode=0x%x], trace=0x%"SXE_PRIX64", req_data_len=%u start init\n",
+			opcode, trace_id, req_len);
+	msg->opcode = opcode;
+	msg->length.req_len = SXE_HDC_MSG_HDR_SIZE + req_len;
+	msg->traceid = trace_id;
+
+	if (req_data && req_len != 0) {
+		memcpy(msg->body, (u8 *)req_data, req_len);
+	}
+
+	return;
+}
+
+static void sxe_hdc_trans_info_init(
+					struct sxe_hdc_trans_info *trans_info,
+					u8 *in_data_buf, u16 in_len,
+					u8 *out_data_buf, u16 out_len)
+{
+	trans_info->in.data  = in_data_buf;
+	trans_info->in.len   = in_len;
+	trans_info->out.data = out_data_buf;
+	trans_info->out.len  = out_len;
+	return;
+}
+
+s32 sxe_driver_cmd_trans(struct sxe_hw *hw, u16 opcode,
+					void *req_data, u16 req_len,
+					void *resp_data, u16 resp_len)
+{
+	s32 ret = SXE_SUCCESS;
+	struct sxe_hdc_cmd_hdr *cmd_hdr;
+	struct sxe_hdc_drv_cmd_msg *msg;
+	struct sxe_hdc_drv_cmd_msg *ack;
+	struct sxe_hdc_trans_info trans_info;
+	struct sxe_adapter *adapter = hw->adapter;
+
+	u8 *in_data_buf;
+	u8 *out_data_buf;
+	u16 in_len;
+	u16 out_len;
+	u64 trace_id = 0;
+	u16 ack_data_len;
+
+	in_len = SXE_HDC_CMD_HDR_SIZE + SXE_HDC_MSG_HDR_SIZE + req_len;
+	out_len = SXE_HDC_CMD_HDR_SIZE + SXE_HDC_MSG_HDR_SIZE + resp_len;
+
+	trace_id = sxe_trace_id_get();
+
+	in_data_buf = rte_zmalloc("pmd hdc in buffer", in_len, RTE_CACHE_LINE_SIZE);
+	if (in_data_buf == NULL) {
+		LOG_ERROR_BDF("cmd trace_id=0x%"SXE_PRIX64" kzalloc indata"
+				"mem len[%u] failed\n",trace_id, in_len);
+		ret = -ENOMEM;
+		goto l_ret;
+	}
+
+	out_data_buf = rte_zmalloc("pmd hdc out buffer", out_len, RTE_CACHE_LINE_SIZE);
+	if (out_data_buf == NULL) {
+		LOG_ERROR_BDF("cmd trace_id=0x%"SXE_PRIX64" kzalloc out_data"
+				"mem len[%u] failed\n",trace_id, out_len);
+		ret = -ENOMEM;
+		goto l_in_buf_free;
+	}
+
+	cmd_hdr = (struct sxe_hdc_cmd_hdr *)in_data_buf;
+	sxe_cmd_hdr_init(cmd_hdr, SXE_CMD_TYPE_DRV);
+
+	msg = (struct sxe_hdc_drv_cmd_msg *)((u8 *)in_data_buf + SXE_HDC_CMD_HDR_SIZE);
+	sxe_driver_cmd_msg_init(msg, opcode, trace_id, req_data, req_len);
+
+	LOG_DEBUG_BDF("trans drv cmd:trace_id=0x%"SXE_PRIX64", opcode[0x%x], "
+			"inlen=%u, out_len=%u\n",
+			trace_id, opcode, in_len, out_len);
+
+	sxe_hdc_trans_info_init(&trans_info,
+				in_data_buf, in_len,
+				out_data_buf, out_len);
+
+	ret = sxe_hdc_cmd_process(hw, trace_id, &trans_info);
+	if (ret) {
+		LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" hdc cmd process"
+				" failed, ret=%d\n",trace_id, ret);
+		goto l_out_buf_free;
+	}
+
+	ack = (struct sxe_hdc_drv_cmd_msg *)((u8 *)out_data_buf + SXE_HDC_CMD_HDR_SIZE);
+
+	if (ack->errcode) {
+		LOG_ERROR_BDF("driver get hdc ack failed trace_id=0x%"SXE_PRIX64", err=%d\n",
+				trace_id, ack->errcode);
+		ret = -EFAULT;
+		goto l_out_buf_free;
+	}
+
+	ack_data_len = ack->length.ack_len - SXE_HDC_MSG_HDR_SIZE;
+	if (resp_len != ack_data_len) {
+		LOG_ERROR("ack trace_id=0x%"SXE_PRIX64" data len[%u]"
+			" and resp_len[%u] dont match\n",
+			trace_id, ack_data_len, resp_len);
+		ret = -EFAULT;
+		goto l_out_buf_free;
+	}
+
+	if (resp_len != 0) {
+		memcpy(resp_data, ack->body, resp_len);
+	}
+
+	LOG_DEBUG_BDF("driver get hdc ack trace_id=0x%"SXE_PRIX64","
+			" ack_len=%u, ack_data_len=%u\n",
+			trace_id, ack->length.ack_len, ack_data_len);
+
+l_out_buf_free:
+	rte_free(out_data_buf);
+l_in_buf_free:
+	rte_free(in_data_buf);
+l_ret:
+	return ret;
+}
diff --git a/drivers/net/sxe/pf/sxe_pmd_hdc.h b/drivers/net/sxe/pf/sxe_pmd_hdc.h
new file mode 100644
index 0000000000..13671f3a83
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_pmd_hdc.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_HOST_HDC_H__
+#define __SXE_HOST_HDC_H__
+
+#include "sxe_hdc.h"
+#include "sxe_hw.h"
+#include "sxe_errno.h"
+
+#define SXE_HDC_SUCCESS              0
+#define SXE_HDC_FALSE                SXE_ERR_HDC(1)
+#define SXE_HDC_INVAL_PARAM          SXE_ERR_HDC(2)
+#define SXE_HDC_BUSY                 SXE_ERR_HDC(3)
+#define SXE_HDC_FW_OPS_FAILED        SXE_ERR_HDC(4)
+#define SXE_HDC_FW_OV_TIMEOUT        SXE_ERR_HDC(5)
+#define SXE_HDC_REQ_ACK_HEAD_ERR     SXE_ERR_HDC(6)
+#define SXE_HDC_REQ_ACK_TLEN_ERR     SXE_ERR_HDC(7)
+#define SXE_HDC_PKG_SKIP_ERR         SXE_ERR_HDC(8)
+#define SXE_HDC_PKG_OTHER_ERR        SXE_ERR_HDC(9)
+#define SXE_HDC_RETRY_ERR            SXE_ERR_HDC(10)
+#define SXE_FW_STATUS_ERR            SXE_ERR_HDC(11)
+
+struct sxe_hdc_data_info {
+	u8 *data;
+	u16 len;
+};
+
+struct sxe_hdc_trans_info {
+	struct sxe_hdc_data_info in;
+	struct sxe_hdc_data_info out;
+};
+
+s32 sxe_driver_cmd_trans(struct sxe_hw *hw, u16 opcode, 
+					void *req_data, u16 req_len, 
+					void *resp_data, u16 resp_len);
+
+void sxe_hdc_channel_init(void);
+
+void sxe_hdc_channel_uninit(void);
+
+s32 sxe_fw_time_sync(struct sxe_hw *hw);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_ptp.c b/drivers/net/sxe/pf/sxe_ptp.c
new file mode 100644
index 0000000000..166665ad11
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_ptp.c
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include "sxe.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe_ptp.h"
+
+#define SXE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
+
+static void sxe_timecounters_start(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	u32 shift = 0;
+
+	memset(&adapter->ptp_ctxt.systime_tc, 0, sizeof(struct rte_timecounter));
+	memset(&adapter->ptp_ctxt.rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+	memset(&adapter->ptp_ctxt.tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+	adapter->ptp_ctxt.systime_tc.cc_mask = SXE_CYCLECOUNTER_MASK;
+	adapter->ptp_ctxt.systime_tc.cc_shift = shift;
+	adapter->ptp_ctxt.systime_tc.nsec_mask = (1ULL << shift) - 1;
+
+	adapter->ptp_ctxt.rx_tstamp_tc.cc_mask = SXE_CYCLECOUNTER_MASK;
+	adapter->ptp_ctxt.rx_tstamp_tc.cc_shift = shift;
+	adapter->ptp_ctxt.rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+	adapter->ptp_ctxt.tx_tstamp_tc.cc_mask = SXE_CYCLECOUNTER_MASK;
+	adapter->ptp_ctxt.tx_tstamp_tc.cc_shift = shift;
+	adapter->ptp_ctxt.tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+	adapter->ptp_ctxt.tx_hwtstamp_nsec = 0;
+	adapter->ptp_ctxt.tx_hwtstamp_sec = 0;
+
+	return;
+}
+
+s32 sxe_timesync_enable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 tses = SXE_TSES_TXES_V2_ALL | SXE_TSES_RXES_V2_ALL;
+
+	sxe_hw_ptp_init(hw);
+
+	 
+	sxe_hw_ptp_timestamp_mode_set(hw, true, 0, tses);
+
+	sxe_hw_ptp_timestamp_enable(hw);
+
+	sxe_hw_ptp_rx_timestamp_clear(hw);
+
+	sxe_hw_ptp_systime_init(hw);
+
+	sxe_timecounters_start(dev);
+
+	return 0;
+}
+
+s32 sxe_timesync_disable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+
+	sxe_hw_ptp_timestamp_disable(hw);
+
+	sxe_hw_ptp_timestamp_mode_set(hw, false, 0, 0);
+
+	sxe_hw_ptp_time_inc_stop(hw);
+
+	return 0;
+}
+
+s32 sxe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+				 struct timespec *timestamp,
+				 u32 flags __rte_unused)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u64 ns;
+	s32 ret = 0;
+	bool is_valid;
+	u64 rx_tstamp_cycles;
+
+	is_valid = sxe_hw_ptp_is_rx_timestamp_valid(hw);
+	if (!is_valid) {
+		PMD_LOG_ERR(DRV, "no valid ptp timestamp in rx register");
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	rx_tstamp_cycles = sxe_hw_ptp_rx_timestamp_get(hw);
+	ns = rte_timecounter_update(&adapter->ptp_ctxt.rx_tstamp_tc, rx_tstamp_cycles);
+	PMD_LOG_DEBUG(DRV, "got rx_tstamp_cycles = %"SXE_PRIU64"ns=%"SXE_PRIU64, 
+			rx_tstamp_cycles, ns);
+	*timestamp = rte_ns_to_timespec(ns);
+
+l_end:
+	return ret;
+}
+
+static u64 sxe_timesync_tx_tstamp_cycles_get(
+						struct sxe_adapter *adapter)
+{
+	return SXE_TIME_TO_NS(adapter->ptp_ctxt.tx_hwtstamp_nsec,
+				adapter->ptp_ctxt.tx_hwtstamp_sec);
+}
+
+s32 sxe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+				 struct timespec *timestamp)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u64 ns;
+	s32 ret = 0;
+	u64 tx_tstamp_cycles;
+	u32 ts_sec;
+	u32 ts_ns;
+	u32 last_sec;
+	u32 last_ns;
+	bool tx_tstamp_valid = true;
+	u8 i;
+
+	sxe_hw_ptp_tx_timestamp_get(hw, &ts_sec, &ts_ns);
+	if (ts_ns  != adapter->ptp_ctxt.tx_hwtstamp_nsec ||
+		ts_sec != adapter->ptp_ctxt.tx_hwtstamp_sec) {
+		for (i = 0; i < SXE_TXTS_POLL_CHECK; i++) {
+			sxe_hw_ptp_tx_timestamp_get(hw, &last_sec, &last_ns);
+		}
+
+		for (; i < SXE_TXTS_POLL; i++) {
+			sxe_hw_ptp_tx_timestamp_get(hw, &ts_sec, &ts_ns);
+			if ((last_ns != ts_ns) || (last_sec != ts_sec)) {
+				tx_tstamp_valid = false;
+				break;
+			}
+		}
+	}
+
+	if (!tx_tstamp_valid || ((ts_ns == adapter->ptp_ctxt.tx_hwtstamp_nsec)
+			&& (ts_sec == adapter->ptp_ctxt.tx_hwtstamp_sec))) {
+		PMD_LOG_DEBUG(DRV, "no valid ptp timestamp in tx register");
+		ret = -EINVAL;
+		goto l_end;
+	} else {
+		adapter->ptp_ctxt.tx_hwtstamp_nsec = ts_ns;
+		adapter->ptp_ctxt.tx_hwtstamp_sec  = ts_sec;
+		tx_tstamp_cycles = 
+			sxe_timesync_tx_tstamp_cycles_get(adapter);
+		ns = rte_timecounter_update(&adapter->ptp_ctxt.tx_tstamp_tc,
+						tx_tstamp_cycles);
+		PMD_LOG_DEBUG(DRV, "got tx_tstamp_cycles = %"
+			SXE_PRIU64"ns=%"SXE_PRIU64, tx_tstamp_cycles, ns);
+		*timestamp = rte_ns_to_timespec(ns);
+	}
+
+l_end:
+	return ret;
+}
+
+s32 sxe_timesync_adjust_time(struct rte_eth_dev *dev, s64 delta)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	PMD_LOG_DEBUG(DRV, "got delta = %"SXE_PRID64, delta);
+
+	adapter->ptp_ctxt.systime_tc.nsec += delta;
+	adapter->ptp_ctxt.rx_tstamp_tc.nsec += delta;
+	adapter->ptp_ctxt.tx_tstamp_tc.nsec += delta;
+
+	return 0;
+}
+
+s32 sxe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u64 ns, systime_cycles;
+
+	systime_cycles = sxe_hw_ptp_systime_get(hw);
+	ns = rte_timecounter_update(&adapter->ptp_ctxt.systime_tc, systime_cycles);
+	PMD_LOG_DEBUG(DRV, "got systime_cycles = %"SXE_PRIU64"ns=%"SXE_PRIU64, 
+			systime_cycles, ns);
+	*ts = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+s32 sxe_timesync_write_time(struct rte_eth_dev *dev, 
+					const struct timespec *ts)
+{
+	u64 ns;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+
+	ns = rte_timespec_to_ns(ts);
+	PMD_LOG_DEBUG(DRV, "set systime ns = %"SXE_PRIU64, ns);
+	adapter->ptp_ctxt.systime_tc.nsec = ns;
+	adapter->ptp_ctxt.rx_tstamp_tc.nsec = ns;
+	adapter->ptp_ctxt.tx_tstamp_tc.nsec = ns;
+
+	return 0;
+}
diff --git a/drivers/net/sxe/pf/sxe_ptp.h b/drivers/net/sxe/pf/sxe_ptp.h
new file mode 100644
index 0000000000..367c1a34a0
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_ptp.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+ 
+#ifndef __SXE_PTP_H__
+#define __SXE_PTP_H__
+
+s32 sxe_timesync_enable(struct rte_eth_dev *dev);
+
+s32 sxe_timesync_disable(struct rte_eth_dev *dev);
+
+s32 sxe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+				 struct timespec *timestamp,
+				 u32 flags __rte_unused);
+
+s32 sxe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+				 struct timespec *timestamp);
+
+s32 sxe_timesync_adjust_time(struct rte_eth_dev *dev, s64 delta);
+
+s32 sxe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts);
+
+s32 sxe_timesync_write_time(struct rte_eth_dev *dev, 
+					const struct timespec *ts);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_queue.c b/drivers/net/sxe/pf/sxe_queue.c
new file mode 100644
index 0000000000..8a0042022b
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_queue.c
@@ -0,0 +1,856 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <rte_dev.h>
+#include "sxe_ethdev.h"
+#else
+#include <ethdev_driver.h>
+#include <dev_driver.h>
+#include "sxe_ethdev.h"
+#endif
+
+#include "rte_malloc.h"
+#include "sxe.h"
+#include "sxe_hw.h"
+#include "sxe_logs.h"
+#include "sxe_queue.h"
+#include "sxe_offload.h"
+#include "sxe_queue_common.h"
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#include "sxe_vec_common.h"
+#endif
+#include "sxe_compat_version.h"
+
+#define SXE_RXQ_SCAN_INTERVAL 				4
+
+#ifndef DEFAULT_TX_RS_THRESH
+#define DEFAULT_TX_RS_THRESH   32
+#endif
+
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif
+
+#define RTE_SXE_WAIT_100_US	100
+
+#define SXE_MMW_SIZE_DEFAULT        0x4
+#define SXE_MMW_SIZE_JUMBO_FRAME    0x14
+#define SXE_MAX_JUMBO_FRAME_SIZE    0x2600 
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+static s32 sxe_vf_rss_rxq_num_validate(struct rte_eth_dev *dev, u16 rxq_num)
+{
+	s32 ret = 0;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+	switch (rxq_num) {
+	case SXE_1_RING_PER_POOL:
+	case SXE_2_RING_PER_POOL:
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
+		break;
+	case SXE_4_RING_PER_POOL:
+		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
+		break;
+	default:
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
+		SXE_HW_TXRX_RING_NUM_MAX / RTE_ETH_DEV_SRIOV(dev).active;
+	RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
+		pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+
+	PMD_LOG_INFO(INIT, "enable sriov, vfs num:%u, %u pool mode, %u queue pre pool"
+				"vm total queue num are %u",
+				pci_dev->max_vfs,
+				RTE_ETH_DEV_SRIOV(dev).active,
+				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool,
+				RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx);
+l_end:
+	return ret;
+}
+
+s32 sxe_sriov_mq_mode_check(struct rte_eth_dev *dev)
+{
+	s32 ret = 0;
+	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+	u16 rx_q_num = dev->data->nb_rx_queues;
+	u16 tx_q_num = dev->data->nb_tx_queues;
+
+	switch (dev_conf->rxmode.mq_mode) {
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+			PMD_LOG_INFO(INIT, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in sriov");
+			break;
+
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+			PMD_LOG_ERR(INIT, "RTE_ETH_MQ_RX_VMDQ_DCB_RSS mode unsupported in sriov");
+			ret = -EINVAL;
+			goto l_end;
+
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
+			if ((rx_q_num <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) && 
+					sxe_vf_rss_rxq_num_validate(dev, rx_q_num)) {
+				PMD_LOG_ERR(INIT, "sriov is active, invalid queue number[%d], "
+					" for vmdq rss, allowed value are 1, 2 or 4",
+						rx_q_num);
+				ret = -EINVAL;
+				goto l_end;
+			}
+			break;
+
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+		case RTE_ETH_MQ_RX_NONE:
+			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
+			break;
+
+		default:
+			PMD_LOG_ERR(INIT, "sriov is active, wrong mq_mode rx %d",
+					dev_conf->rxmode.mq_mode);
+			ret = -EINVAL;
+			goto l_end;
+	}
+
+	switch (dev_conf->txmode.mq_mode) {
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
+		PMD_LOG_INFO(INIT, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in sriov");
+		break;
+
+	case RTE_ETH_MQ_TX_DCB:
+		PMD_LOG_ERR(INIT, "RTE_ETH_MQ_TX_DCB mode unsupported in sriov");
+		ret = -EINVAL;
+		goto l_end;
+
+	default: 
+		dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_ONLY;
+		break;
+	}
+
+	if ((rx_q_num > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
+		(tx_q_num > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
+		PMD_LOG_ERR(INIT, "SRIOV is active,"
+				" rx_q_num=%d tx_q_num=%d queue number"
+				" must be less than or equal to %d.",
+				rx_q_num, tx_q_num,
+				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	PMD_LOG_INFO(INIT, "sriov enable, rx_mq_mode=%d, tx_mq_mode=%d, "
+			"rx_q_mun=%d, tx_q_num=%d, q_pre_pool=%d",
+			dev_conf->rxmode.mq_mode, dev_conf->txmode.mq_mode,
+			rx_q_num, tx_q_num, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+
+l_end:
+	return ret;
+}
+
+#endif
+
+static inline s32 sxe_non_sriov_mq_mode_check(struct rte_eth_dev *dev)
+{
+	s32 ret = -EINVAL;
+	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+	u16 rx_q_num = dev->data->nb_rx_queues;
+	u16 tx_q_num = dev->data->nb_tx_queues;
+
+	switch (dev_conf->rxmode.mq_mode) {
+	case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		PMD_LOG_ERR(INIT, "VMDQ+DCB+RSS mq_mode is not supported");
+		goto l_end;
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
+		if (rx_q_num != SXE_HW_TXRX_RING_NUM_MAX) {
+			PMD_LOG_ERR(INIT, "VMDQ+DCB selected, nb_rx_q != %d",
+					SXE_HW_TXRX_RING_NUM_MAX);
+			goto l_end;
+		}
+
+		if (!((dev_conf->rx_adv_conf.vmdq_dcb_conf.nb_queue_pools == \
+			RTE_ETH_16_POOLS ) || (
+			dev_conf->rx_adv_conf.vmdq_dcb_conf.nb_queue_pools == \
+			RTE_ETH_32_POOLS))) {
+			PMD_LOG_ERR(INIT, "VMDQ+DCB selected,"
+					" nb_queue_pools must be %d or %d",
+					RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
+			goto l_end;
+		}
+		break;
+	case RTE_ETH_MQ_RX_DCB:
+		if (!(dev_conf->rx_adv_conf.dcb_rx_conf.nb_tcs == RTE_ETH_4_TCS ||
+			dev_conf->rx_adv_conf.dcb_rx_conf.nb_tcs == RTE_ETH_8_TCS)) {
+			PMD_LOG_ERR(INIT, "DCB selected, nb_tcs != %d"
+					" and nb_tcs != %d",
+					RTE_ETH_4_TCS, RTE_ETH_8_TCS);
+			goto l_end;
+		}
+		break;
+	default:
+		PMD_LOG_INFO(INIT, "%d rx mq_mode supported",
+					dev_conf->rxmode.mq_mode);
+		break;
+	}
+
+	switch (dev_conf->txmode.mq_mode) {
+	case RTE_ETH_MQ_TX_NONE:
+		if (tx_q_num > SXE_HW_TX_NONE_MODE_Q_NUM) {
+			PMD_LOG_ERR(INIT, "Neither VT nor DCB are enabled, "
+					"nb_tx_q > %d.",
+					SXE_HW_TX_NONE_MODE_Q_NUM);
+			goto l_end;
+		}
+		break;
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
+		if (tx_q_num != SXE_HW_TXRX_RING_NUM_MAX) {
+			PMD_LOG_ERR(INIT, "VMDQ+DCB selected, nb_tx_q != %d",
+					SXE_HW_TXRX_RING_NUM_MAX);
+			goto l_end;
+		}
+
+		if (!((dev_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools == \
+			RTE_ETH_16_POOLS ) || (
+			dev_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools == \
+			RTE_ETH_32_POOLS))) {
+			PMD_LOG_ERR(INIT, "VMDQ+DCB selected,"
+					" nb_queue_pools must be %d or %d",
+					RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
+			goto l_end;
+		}
+		break;
+	case RTE_ETH_MQ_TX_DCB:
+		if (!(dev_conf->tx_adv_conf.dcb_tx_conf.nb_tcs == RTE_ETH_4_TCS ||
+			dev_conf->tx_adv_conf.dcb_tx_conf.nb_tcs == RTE_ETH_8_TCS)) {
+			PMD_LOG_ERR(INIT, "DCB selected, nb_tcs != %d"
+					" and nb_tcs != %d",
+					RTE_ETH_4_TCS, RTE_ETH_8_TCS);
+			goto l_end;
+		}
+		break;
+	default:
+		PMD_LOG_INFO(INIT, "%d tx mq_mode supported",
+					dev_conf->txmode.mq_mode);
+		break;
+	}
+
+	ret = 0;
+
+	PMD_LOG_INFO(INIT, "sriov disable, rx_mq_mode=%d, tx_mq_mode=%d, "
+		"rx_q_mun=%d, tx_q_num=%d",
+		dev_conf->rxmode.mq_mode, dev_conf->txmode.mq_mode,
+		rx_q_num, tx_q_num);
+
+l_end:
+	return ret;
+}
+
+s32 sxe_mq_mode_check(struct rte_eth_dev *dev)
+{
+	s32 ret = 0;
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+	if (RTE_ETH_DEV_SRIOV(dev).active) {
+		ret = sxe_sriov_mq_mode_check(dev);
+#else
+	if (RTE_ETH_DEV_SRIOV(dev).active) {
+		ret = -ENOTSUP;
+		PMD_LOG_ERR(INIT, "sriov not supported");
+#endif
+	} else {
+		ret = sxe_non_sriov_mq_mode_check(dev);
+	}
+
+	return ret;
+}
+
+void sxe_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+		struct rte_eth_txq_info *q_info)
+{
+	__sxe_tx_queue_info_get(dev, queue_id, q_info);
+
+	return;
+}
+
+s32 __rte_cold sxe_txq_arg_validate(struct rte_eth_dev *dev, u16 ring_depth,
+				u16 *rs_thresh, u16 *free_thresh,
+				const struct rte_eth_txconf *tx_conf)
+{
+	s32 ret = -EINVAL;
+
+	if (ring_depth % SXE_TX_DESC_RING_ALIGN != 0 ||
+		(ring_depth > SXE_MAX_RING_DESC) ||
+		(ring_depth < SXE_MIN_RING_DESC)) {
+		goto l_end;
+	}
+
+	*free_thresh = (u16)((tx_conf->tx_free_thresh) ?
+			tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+	*rs_thresh = (DEFAULT_TX_RS_THRESH + *free_thresh > ring_depth) ?
+			ring_depth - *free_thresh : DEFAULT_TX_RS_THRESH;
+
+	if (tx_conf->tx_rs_thresh > 0) {
+		*rs_thresh = tx_conf->tx_rs_thresh;
+	}
+
+	if (*rs_thresh + *free_thresh > ring_depth) {
+		PMD_LOG_ERR(INIT, "tx_rs_thresh + tx_free_thresh must not "
+			     "exceed nb_desc. (tx_rs_thresh=%u "
+			     "tx_free_thresh=%u nb_desc=%u port = %d)",
+			     *rs_thresh, *free_thresh,
+			     ring_depth, dev->data->port_id);
+		goto l_end;
+	}
+
+	if (*rs_thresh >= (ring_depth - 2)) {
+		PMD_LOG_ERR(INIT, "tx_rs_thresh must be less than the number "
+				"of TX descriptors minus 2. (tx_rs_thresh=%u "
+				"port=%d)",
+				*rs_thresh, dev->data->port_id);
+		goto l_end;
+	}
+
+	if (*rs_thresh > DEFAULT_TX_RS_THRESH) {
+		PMD_LOG_ERR(INIT, "tx_rs_thresh must be less or equal than %u. "
+			"(tx_rs_thresh=%u port=%d)",
+			DEFAULT_TX_RS_THRESH, *rs_thresh,
+			dev->data->port_id);
+		goto l_end;
+	}
+
+	if (*free_thresh >= (ring_depth - 3)) {
+		PMD_LOG_ERR(INIT, "tx_rs_thresh must be less than the "
+			     "tx_free_thresh must be less than the number of "
+			     "TX descriptors minus 3. (tx_free_thresh=%u "
+			     "port=%d)",
+			     *free_thresh, dev->data->port_id);
+		goto l_end;
+	}
+
+	if (*rs_thresh > *free_thresh) {
+		PMD_LOG_ERR(INIT, "tx_rs_thresh must be less than or equal to "
+			     "tx_free_thresh. (tx_free_thresh=%u "
+			     "tx_rs_thresh=%u port=%d)",
+			     *free_thresh, *rs_thresh, dev->data->port_id);
+		goto l_end;
+	}
+
+	if ((ring_depth % *rs_thresh) != 0) {
+		PMD_LOG_ERR(INIT, "tx_rs_thresh must be a divisor of the "
+			     "number of TX descriptors. (tx_rs_thresh=%u "
+			     "port=%d, ring_depth=%d)",
+			     *rs_thresh, dev->data->port_id, ring_depth);
+		goto l_end;
+	}
+
+	if ((*rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
+		PMD_LOG_ERR(INIT, "TX WTHRESH must be set to 0 if "
+			     "tx_rs_thresh is greater than 1. "
+			     "(tx_rs_thresh=%u port=%d)",
+			     *rs_thresh, dev->data->port_id);
+		goto l_end;
+	}
+
+	ret = 0;
+
+l_end:
+	return ret;
+}
+
+static void __rte_cold sxe_tx_buffer_ring_free(sxe_tx_queue_s *txq)
+{
+	if (txq != NULL && txq->buffer_ring != NULL) {
+		rte_free(txq->buffer_ring);
+	}
+
+	return;
+}
+
+static void __rte_cold sxe_tx_queue_mbufs_release(sxe_tx_queue_s *txq)
+{
+	u32 i;
+
+	if (txq->buffer_ring != NULL) {
+		for (i = 0; i < txq->ring_depth; i++) {
+			if (txq->buffer_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free_seg(txq->buffer_ring[i].mbuf);
+				txq->buffer_ring[i].mbuf = NULL;
+			}
+		}
+	}
+
+	return;
+}
+
+void __rte_cold sxe_tx_queue_free(sxe_tx_queue_s *txq)
+{
+	__sxe_tx_queue_free(txq);
+
+	return;
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+void __rte_cold sxe_tx_queue_release(void *txq)
+{
+	sxe_tx_queue_free(txq);
+	return;
+}
+#else
+void __rte_cold sxe_tx_queue_release(struct rte_eth_dev *dev,
+					u16 queue_idx)
+{
+	sxe_tx_queue_free(dev->data->tx_queues[queue_idx]);
+	return;
+}
+#endif
+
+static void __rte_cold sxe_tx_queue_init(sxe_tx_queue_s *txq)
+{
+	u16 prev, i;
+	volatile sxe_tx_data_desc_u *txd;
+	static const sxe_tx_data_desc_u zeroed_desc = {{0}};
+	struct sxe_tx_buffer *tx_buffer = txq->buffer_ring;
+
+	for (i = 0; i < txq->ring_depth; i++) {
+		txq->desc_ring[i] = zeroed_desc;
+	}
+
+	prev = txq->ring_depth - 1;
+	for (i = 0; i < txq->ring_depth; i++) {
+		txd = &txq->desc_ring[i];
+		txd->wb.status = rte_cpu_to_le_32(SXE_TX_DESC_STAT_DD);
+		tx_buffer[i].mbuf       = NULL;
+		tx_buffer[i].last_id    = i;
+		tx_buffer[prev].next_id = i;
+		prev = i;
+	}
+
+	txq->ctx_curr      = 0;
+	txq->desc_used_num = 0;
+	txq->desc_free_num = txq->ring_depth - 1;
+	txq->next_to_use   = 0;
+	txq->next_to_clean = txq->ring_depth - 1;
+	txq->next_dd       = txq->rs_thresh  - 1;
+	txq->next_rs       = txq->rs_thresh  - 1;
+	memset((void *)&txq->ctx_cache, 0,
+			SXE_CTXT_DESC_NUM * sizeof(struct sxe_ctxt_info));
+
+	return;
+}
+
+sxe_tx_queue_s * __rte_cold sxe_tx_queue_alloc(
+					struct rte_eth_dev *dev,
+					u16 queue_idx,
+					u16 ring_depth,
+					u32 socket_id)
+{
+	sxe_tx_queue_s *txq;
+	const struct rte_memzone *tz;
+
+	if (dev->data->tx_queues[queue_idx] != NULL) {
+		sxe_tx_queue_free(dev->data->tx_queues[queue_idx]);
+		dev->data->tx_queues[queue_idx] = NULL;
+	}
+
+	txq = rte_zmalloc_socket("tx queue", sizeof(sxe_tx_queue_s),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq == NULL) {
+		PMD_LOG_ERR(INIT, "tx queue[%d] alloc failed", queue_idx);
+		goto l_end;
+	}
+
+	tz = rte_eth_dma_zone_reserve(dev, "tx_desc_ring", queue_idx,
+			sizeof(sxe_tx_data_desc_u) * SXE_MAX_RING_DESC,
+			SXE_ALIGN, socket_id);
+	if (tz == NULL) {
+		PMD_LOG_ERR(INIT, "tx desc ring alloc failed, queue_id=%d", queue_idx);
+		rte_free(txq);
+		txq = NULL;
+		goto l_end;
+	}
+
+	txq->buffer_ring = rte_zmalloc_socket("tx_buffer_ring",
+				sizeof(struct sxe_tx_buffer) * ring_depth,
+				RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq->buffer_ring == NULL) {
+		PMD_LOG_ERR(INIT, "tx buffer alloc failed, queue_id=%d", queue_idx);
+		rte_memzone_free(tz);
+		rte_free(txq);
+		txq = NULL;
+		goto l_end;
+	}
+
+	txq->mz = tz;
+	txq->base_addr = tz->iova;
+	txq->desc_ring = (sxe_tx_data_desc_u *)tz->addr;
+
+l_end:
+	return txq;
+}
+
+s32 __rte_cold sxe_tx_queue_start(struct rte_eth_dev *dev, u16 queue_id)
+{
+	sxe_tx_queue_s *txq = dev->data->tx_queues[queue_id];
+	struct sxe_hw *hw = (&((struct sxe_adapter *)(dev->data->dev_private))->hw);
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_hw_tx_ring_head_init(hw, txq->reg_idx);
+	sxe_hw_tx_ring_tail_init(hw, txq->reg_idx);
+	sxe_hw_tx_ring_switch(hw, txq->reg_idx, true);
+
+	dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+s32 __rte_cold sxe_tx_queue_stop(struct rte_eth_dev *dev, u16 queue_id)
+{
+	s32 poll_ms = RTE_SXE_REGISTER_POLL_WAIT_10_MS;
+	u32 head, tail;
+	sxe_tx_queue_s *txq = dev->data->tx_queues[queue_id];
+	struct sxe_hw *hw = (&((struct sxe_adapter *)(dev->data->dev_private))->hw);
+
+	PMD_INIT_FUNC_TRACE();
+
+	do {
+		rte_delay_us(RTE_SXE_WAIT_100_US);
+		sxe_hw_tx_ring_info_get(hw, txq->reg_idx, &head, &tail);
+
+	} while (--poll_ms && (head != tail));
+
+	if (!poll_ms) {
+		PMD_LOG_ERR(INIT, "Tx Queue %d is not empty when stopping.",
+				queue_id);
+	}
+
+	sxe_hw_tx_ring_switch(hw, txq->reg_idx, false);
+
+	if (txq->ops != NULL) {
+		txq->ops->mbufs_release(txq);
+		txq->ops->init(txq);
+	}
+	dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+	return 0;
+}
+
+void sxe_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+	struct rte_eth_rxq_info *qinfo)
+{
+	__sxe_rx_queue_info_get(dev, queue_id, qinfo);
+
+	return;
+}
+
+s32 __rte_cold sxe_rx_queue_mbufs_alloc(struct sxe_rx_queue *rxq)
+{
+	return __sxe_rx_queue_mbufs_alloc(rxq);
+}
+
+s32 __rte_cold sxe_rx_queue_start(struct rte_eth_dev *dev,
+						u16 queue_id)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw      *hw = &adapter->hw;
+	struct sxe_rx_queue *rxq;
+	u16 reg_idx;
+	s32 ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	rxq = dev->data->rx_queues[queue_id];
+	reg_idx = rxq->reg_idx;
+
+	ret = sxe_rx_queue_mbufs_alloc(rxq);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "could not alloc mbuf for queue:%d",
+			     queue_id);
+		goto l_end;
+	}
+
+	sxe_hw_rx_ring_switch(hw, reg_idx, true);
+
+	sxe_hw_rx_queue_desc_reg_configure(hw, reg_idx, 0, rxq->ring_depth - 1);
+	dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+l_end:
+	return ret;
+}
+
+STATIC void __rte_cold sxe_rx_queue_sc_mbufs_free(struct rte_mbuf *mbuf)
+{
+	u16 i;
+	u16 num_segs = mbuf->nb_segs;
+	struct rte_mbuf *next_seg;
+
+	for (i = 0; i < num_segs; i++) {
+		next_seg = mbuf->next;
+		rte_pktmbuf_free_seg(mbuf);
+		mbuf = next_seg;
+	}
+
+	return;
+}
+
+void __rte_cold sxe_rx_queue_mbufs_free(struct sxe_rx_queue *rxq)
+{
+	u16 i;
+	
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	if (rxq->is_using_sse) {
+		sxe_rx_queue_vec_mbufs_release(rxq);
+		goto l_out;
+	}
+#endif
+
+	if (rxq->buffer_ring != NULL) {
+		for (i = 0; i < rxq->ring_depth; i++) {
+			if (rxq->buffer_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free_seg(rxq->buffer_ring[i].mbuf);
+				rxq->buffer_ring[i].mbuf = NULL;
+			}
+		}
+		if (rxq->completed_pkts_num) {
+			for (i = 0; i < rxq->completed_pkts_num; ++i) {
+				struct rte_mbuf *mbuf;
+
+				mbuf = rxq->completed_ring[rxq->next_ret_pkg + i];
+				rte_pktmbuf_free_seg(mbuf);
+			}
+			rxq->completed_pkts_num = 0;
+		}
+	}
+
+	if (rxq->sc_buffer_ring) {
+		for (i = 0; i < rxq->ring_depth; i++) {
+			if (rxq->sc_buffer_ring[i].mbuf) {
+				sxe_rx_queue_sc_mbufs_free(rxq->sc_buffer_ring[i].mbuf);
+				rxq->sc_buffer_ring[i].mbuf = NULL;
+			}
+		}
+	}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+l_out:
+#endif
+
+	return;
+}
+
+void __rte_cold sxe_rx_queue_init(bool rx_batch_alloc_allowed,
+						struct sxe_rx_queue *rxq)
+{
+	static const sxe_rx_data_desc_u zeroed_desc = {{0}};
+	u16 i;
+	u16 len = rxq->ring_depth;
+
+	if (rx_batch_alloc_allowed) {
+		len += RTE_PMD_SXE_MAX_RX_BURST;
+	}
+
+	for (i = 0; i < len; i++) {
+		rxq->desc_ring[i] = zeroed_desc;
+	}
+
+	memset(&rxq->fake_mbuf, 0, sizeof(rxq->fake_mbuf));
+	for (i = rxq->ring_depth; i < len; ++i) {
+		rxq->buffer_ring[i].mbuf = &rxq->fake_mbuf;
+	}
+
+	rxq->completed_pkts_num = 0;
+	rxq->next_ret_pkg = 0;
+	rxq->batch_alloc_trigger = rxq->batch_alloc_size - 1;
+	rxq->processing_idx = 0;
+	rxq->hold_num = 0;
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	if (rxq->pkt_first_seg != NULL) {
+		rte_pktmbuf_free(rxq->pkt_first_seg);
+	}
+
+	rxq->pkt_first_seg = NULL;
+	rxq->pkt_last_seg = NULL;
+
+#if defined(RTE_ARCH_X86)
+	rxq->realloc_start = 0;
+	rxq->realloc_num = 0;
+#endif
+#endif
+
+	return;
+}
+
+void __rte_cold sxe_rx_queue_free(struct sxe_rx_queue *rxq)
+{
+	__sxe_rx_queue_free(rxq);
+	return;
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+void __rte_cold sxe_rx_queue_release(void *rxq)
+{
+	sxe_rx_queue_free(rxq);
+	return;
+}
+#else
+void __rte_cold sxe_rx_queue_release(struct rte_eth_dev *dev,
+					u16 queue_idx)
+{
+	sxe_rx_queue_free(dev->data->rx_queues[queue_idx]);
+	return;
+}
+#endif
+
+s32 __rte_cold sxe_rx_queue_stop(struct rte_eth_dev *dev, u16 queue_id)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw     *hw = &adapter->hw;
+	struct sxe_rx_queue *rxq;
+	u16 reg_idx;
+
+	PMD_INIT_FUNC_TRACE();
+
+	rxq = dev->data->rx_queues[queue_id];
+	reg_idx = rxq->reg_idx;
+
+	sxe_hw_rx_ring_switch(hw, reg_idx, false);
+
+	rte_delay_us(RTE_SXE_WAIT_100_US);
+
+	sxe_rx_queue_mbufs_free(rxq);
+	sxe_rx_queue_init(adapter->rx_batch_alloc_allowed, rxq);
+	dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+u32 sxe_rx_queue_count(struct rte_eth_dev *dev, u16 queue_id)
+#else
+u32 sxe_rx_queue_count(void *rx_queue)
+#endif
+{
+	volatile sxe_rx_data_desc_u *desc;
+	struct sxe_rx_queue *rxq;
+	u32 count = 0;
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+	rxq = dev->data->rx_queues[queue_id];
+#else
+	rxq = rx_queue;
+#endif
+	
+	desc = &(rxq->desc_ring[rxq->processing_idx]);
+
+	while ((count < rxq->ring_depth) &&
+		(desc->wb.upper.status_error &
+			rte_cpu_to_le_32(SXE_RXDADV_STAT_DD))) {
+		count += SXE_RXQ_SCAN_INTERVAL;
+		desc  += SXE_RXQ_SCAN_INTERVAL;
+		if (rxq->processing_idx + count >= rxq->ring_depth) {
+			desc = &(rxq->desc_ring[rxq->processing_idx +
+				count - rxq->ring_depth]);
+		}
+	}
+
+	return count;
+}
+
+void __rte_cold sxe_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed)
+{
+	__sxe_txrx_queues_clear(dev, rx_batch_alloc_allowed);
+
+	return;
+}
+
+void sxe_queues_free(struct rte_eth_dev *dev)
+{
+	__sxe_queues_free(dev);
+	return;
+}
+
+const struct sxe_txq_ops def_txq_ops = {
+	.init             = sxe_tx_queue_init,
+	.mbufs_release    = sxe_tx_queue_mbufs_release,
+	.buffer_ring_free = sxe_tx_buffer_ring_free,
+};
+
+const struct sxe_txq_ops *sxe_tx_default_ops_get(void)
+{
+	return &def_txq_ops;
+}
+
+void sxe_multi_queue_tx_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_hw *hw = (&((struct sxe_adapter *)(dev->data->dev_private))->hw);
+	u16 pools_num = RTE_ETH_DEV_SRIOV(dev).active;
+	bool sriov_active = !!pools_num;
+	bool vmdq_active = (dev->data->dev_conf.txmode.mq_mode == 
+				RTE_ETH_MQ_TX_VMDQ_ONLY);
+
+	sxe_hw_tx_multi_queue_configure(hw, vmdq_active, sriov_active, pools_num);
+
+	return;
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_21_11_5 || defined DPDK_19_11_6
+s32 sxe_queue_rate_limit_set(struct rte_eth_dev *dev, 
+					u16 queue_idx, u16 tx_rate)
+#else
+s32 sxe_queue_rate_limit_set(struct rte_eth_dev *dev, 
+					u16 queue_idx, u32 tx_rate)
+#endif
+{
+	int ret = 0;
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+	struct rte_eth_rxmode *rxmode;
+#endif
+	u32 rf_dec, rf_int, bcnrc_val;
+	u16 link_speed = dev->data->dev_link.link_speed;
+	struct sxe_adapter *adapter = (struct sxe_adapter *)(dev->data->dev_private);
+	struct sxe_hw *hw = &adapter->hw;
+
+	if (queue_idx >= SXE_HW_TXRX_RING_NUM_MAX) {
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	if (tx_rate != 0) {
+		rf_int = (u32)link_speed / (u32)tx_rate;
+		rf_dec = (u32)link_speed % (u32)tx_rate;
+		rf_dec = (rf_dec << SXE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
+
+		bcnrc_val = SXE_RTTBCNRC_RS_ENA;
+		bcnrc_val |= ((rf_int << SXE_RTTBCNRC_RF_INT_SHIFT) &
+				SXE_RTTBCNRC_RF_INT_MASK);
+		bcnrc_val |= (rf_dec & SXE_RTTBCNRC_RF_DEC_MASK);
+	} else {
+		bcnrc_val = 0;
+	}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+	rxmode = &dev->data->dev_conf.rxmode;
+
+	if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
+	    (rxmode->max_rx_pkt_len >= SXE_MAX_JUMBO_FRAME_SIZE)) {
+#else
+	if (dev->data->mtu + SXE_ETH_OVERHEAD >= SXE_MAX_JUMBO_FRAME_SIZE) {
+#endif
+		sxe_hw_dcb_max_mem_window_set(hw, 
+						SXE_MMW_SIZE_JUMBO_FRAME);
+	} else {
+		sxe_hw_dcb_max_mem_window_set(hw, SXE_MMW_SIZE_DEFAULT);
+	}
+
+	sxe_hw_dcb_tx_ring_rate_factor_set(hw, queue_idx, bcnrc_val);
+
+l_end:
+	return ret;
+}
+
diff --git a/drivers/net/sxe/pf/sxe_queue.h b/drivers/net/sxe/pf/sxe_queue.h
new file mode 100644
index 0000000000..ef3036a07d
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_queue.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_QUEUE_H__
+#define __SXE_QUEUE_H__
+
+#include "sxe_dpdk_version.h"
+#include "sxe_queue_common.h"
+
+#define SXE_TXRX_RING_NUM_MAX     64  
+
+#define SXE_TX_MAX_SEG          40
+
+#define	SXE_MIN_RING_DESC	32
+#define	SXE_MAX_RING_DESC	4096
+
+#define SXE_MMW_SIZE_DEFAULT        0x4
+#define SXE_MMW_SIZE_JUMBO_FRAME    0x14
+#define SXE_MAX_JUMBO_FRAME_SIZE    0x2600 
+
+#define SXE_DEFAULT_RX_FREE_THRESH  32
+#define SXE_DEFAULT_RX_PTHRESH      8
+#define SXE_DEFAULT_RX_HTHRESH      8
+#define SXE_DEFAULT_RX_WTHRESH      0
+
+#define SXE_DEFAULT_TX_FREE_THRESH  32
+#define SXE_DEFAULT_TX_PTHRESH      32
+#define SXE_DEFAULT_TX_HTHRESH      0
+#define SXE_DEFAULT_TX_WTHRESH      0
+#define SXE_DEFAULT_TX_RSBIT_THRESH 32
+
+#define SXE_ALIGN               128
+#define SXE_RX_DESC_RING_ALIGN	(SXE_ALIGN / sizeof(sxe_rx_data_desc_u))
+#define SXE_TX_DESC_RING_ALIGN	(SXE_ALIGN / sizeof(sxe_tx_data_desc_u))
+
+#define SXE_TX_MAX_SEG          40
+#define RTE_SXE_REGISTER_POLL_WAIT_10_MS  10
+
+typedef union sxe_tx_data_desc sxe_tx_data_desc_u;
+typedef struct sxe_rx_buffer   sxe_rx_buffer_s;
+typedef union sxe_rx_data_desc sxe_rx_data_desc_u;
+typedef struct sxe_tx_queue    sxe_tx_queue_s;
+typedef struct sxe_rx_queue    sxe_rx_queue_s;
+
+struct sxe_tx_context_desc {
+	__le32 vlan_macip_lens;
+	__le32 seqnum_seed;
+	__le32 type_tucmd_mlhl;
+	__le32 mss_l4len_idx;
+};
+
+s32 __rte_cold sxe_txq_arg_validate(struct rte_eth_dev *dev, u16 ring_depth,
+				u16 *rs_thresh, u16 *free_thresh,
+				const struct rte_eth_txconf *tx_conf);
+
+sxe_tx_queue_s * __rte_cold sxe_tx_queue_alloc(
+					struct rte_eth_dev *dev,
+					u16 queue_idx,
+					u16 ring_depth,
+					u32 socket_id);
+
+s32 __rte_cold sxe_tx_queue_start(struct rte_eth_dev *dev, u16 queue_id);
+
+s32 __rte_cold sxe_tx_queue_stop(struct rte_eth_dev *dev, u16 queue_id);
+
+void sxe_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+	struct rte_eth_rxq_info *qinfo);
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+void sxe_rx_queue_release(void *rxq);
+
+#else
+void __rte_cold sxe_rx_queue_release(struct rte_eth_dev *dev,
+					u16 queue_idx);
+#endif
+
+s32 sxe_rx_queue_start(struct rte_eth_dev *dev, u16 queue_id);
+
+s32 sxe_rx_queue_stop(struct rte_eth_dev *dev, u16 queue_id);
+
+void sxe_rx_queue_init(bool rx_batch_alloc_allowed,
+				sxe_rx_queue_s *rxq);
+
+void sxe_rx_queue_free(sxe_rx_queue_s *rxq);
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+u32 sxe_rx_queue_count(struct rte_eth_dev *dev, u16 queue_id);
+
+#else
+u32 sxe_rx_queue_count(void *rx_queue);
+#endif
+
+s32 sxe_mq_mode_check(struct rte_eth_dev *dev);
+
+void sxe_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed);
+
+void sxe_queues_free(struct rte_eth_dev *dev);
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+void __rte_cold sxe_tx_queue_release(void *txq);
+
+#else
+void __rte_cold sxe_tx_queue_release(struct rte_eth_dev *dev,
+					u16 queue_idx);
+#endif
+
+void sxe_multi_queue_tx_configure(struct rte_eth_dev *dev);
+
+void sxe_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+		struct rte_eth_txq_info *q_info);
+
+u16 sxe_pkts_simple_xmit(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+u16 sxe_pkts_vector_xmit(void *tx_queue, struct rte_mbuf **tx_pkts,
+			   u16 pkts_num);
+#endif
+
+u16 sxe_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num);
+
+u16 sxe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num);
+
+int sxe_tx_descriptor_status(void *tx_queue, u16 offset);
+
+#if defined DPDK_20_11_5 || defined DPDK_21_11_5 || defined DPDK_19_11_6
+s32 sxe_queue_rate_limit_set(struct rte_eth_dev *dev,
+					u16 queue_idx, u16 tx_rate);
+
+#else
+s32 sxe_queue_rate_limit_set(struct rte_eth_dev *dev, 
+					u16 queue_idx, u32 tx_rate);
+#endif
+
+const struct sxe_txq_ops *sxe_tx_default_ops_get(void);
+
+s32 __rte_cold sxe_rx_queue_mbufs_alloc(sxe_rx_queue_s *rxq);
+
+void __rte_cold sxe_tx_queue_free(sxe_tx_queue_s *txq);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+s32 sxe_sriov_mq_mode_check(struct rte_eth_dev *dev);
+
+#endif
+
+void __rte_cold sxe_rx_queue_mbufs_free(sxe_rx_queue_s *rxq);
+
+#endif 
diff --git a/drivers/net/sxe/pf/sxe_rx.c b/drivers/net/sxe/pf/sxe_rx.c
new file mode 100644
index 0000000000..febd9fc634
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_rx.c
@@ -0,0 +1,1567 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_mbuf.h>
+#include <rte_prefetch.h>
+#include <rte_malloc.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#include "sxe_ethdev.h"
+#endif
+
+#include "sxe.h"
+#include "sxe_rx.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe_queue.h"
+#include "sxe_offload.h"
+#include "sxe_dcb.h"
+#include "sxe_queue_common.h"
+#include "sxe_vf.h"
+#include "sxe_errno.h"
+#include "sxe_irq.h"
+#include "sxe_ethdev.h"
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#include "sxe_vec_common.h"
+#endif
+#include "sxe_rx_common.h"
+
+#define SXE_LRO_HDR_SIZE				128
+
+#define SXE_PACKET_TYPE_ETHER				0x00
+#define SXE_PACKET_TYPE_IPV4				0x01
+#define SXE_PACKET_TYPE_IPV4_TCP			0x11
+#define SXE_PACKET_TYPE_IPV4_UDP			0x21
+#define SXE_PACKET_TYPE_IPV4_SCTP			0x41
+#define SXE_PACKET_TYPE_IPV4_EXT			0x03
+#define SXE_PACKET_TYPE_IPV4_EXT_TCP			0x13
+#define SXE_PACKET_TYPE_IPV4_EXT_UDP			0x23
+#define SXE_PACKET_TYPE_IPV4_EXT_SCTP			0x43
+#define SXE_PACKET_TYPE_IPV6				0x04
+#define SXE_PACKET_TYPE_IPV6_TCP			0x14
+#define SXE_PACKET_TYPE_IPV6_UDP			0x24
+#define SXE_PACKET_TYPE_IPV6_SCTP			0x44
+#define SXE_PACKET_TYPE_IPV6_EXT			0x0C
+#define SXE_PACKET_TYPE_IPV6_EXT_TCP			0x1C
+#define SXE_PACKET_TYPE_IPV6_EXT_UDP			0x2C
+#define SXE_PACKET_TYPE_IPV6_EXT_SCTP			0x4C
+#define SXE_PACKET_TYPE_IPV4_IPV6			0x05
+#define SXE_PACKET_TYPE_IPV4_IPV6_TCP			0x15
+#define SXE_PACKET_TYPE_IPV4_IPV6_UDP			0x25
+#define SXE_PACKET_TYPE_IPV4_IPV6_SCTP			0x45
+#define SXE_PACKET_TYPE_IPV4_EXT_IPV6			0x07
+#define SXE_PACKET_TYPE_IPV4_EXT_IPV6_TCP		0x17
+#define SXE_PACKET_TYPE_IPV4_EXT_IPV6_UDP		0x27
+#define SXE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP		0x47
+#define SXE_PACKET_TYPE_IPV4_IPV6_EXT			0x0D
+#define SXE_PACKET_TYPE_IPV4_IPV6_EXT_TCP		0x1D
+#define SXE_PACKET_TYPE_IPV4_IPV6_EXT_UDP		0x2D
+#define SXE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP		0x4D
+#define SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT		0x0F
+#define SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP		0x1F
+#define SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP		0x2F
+#define SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP		0x4F
+
+#define SXE_PACKET_TYPE_NVGRE                   0x00
+#define SXE_PACKET_TYPE_NVGRE_IPV4              0x01
+#define SXE_PACKET_TYPE_NVGRE_IPV4_TCP          0x11
+#define SXE_PACKET_TYPE_NVGRE_IPV4_UDP          0x21
+#define SXE_PACKET_TYPE_NVGRE_IPV4_SCTP         0x41
+#define SXE_PACKET_TYPE_NVGRE_IPV4_EXT          0x03
+#define SXE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP      0x13
+#define SXE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP      0x23
+#define SXE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP     0x43
+#define SXE_PACKET_TYPE_NVGRE_IPV6              0x04
+#define SXE_PACKET_TYPE_NVGRE_IPV6_TCP          0x14
+#define SXE_PACKET_TYPE_NVGRE_IPV6_UDP          0x24
+#define SXE_PACKET_TYPE_NVGRE_IPV6_SCTP         0x44
+#define SXE_PACKET_TYPE_NVGRE_IPV6_EXT          0x0C
+#define SXE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP      0x1C
+#define SXE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP      0x2C
+#define SXE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP     0x4C
+#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6         0x05
+#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP     0x15
+#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP     0x25
+#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT     0x0D
+#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0x1D
+#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0x2D
+
+#define SXE_PACKET_TYPE_VXLAN                   0x80
+#define SXE_PACKET_TYPE_VXLAN_IPV4              0x81
+#define SXE_PACKET_TYPE_VXLAN_IPV4_TCP          0x91
+#define SXE_PACKET_TYPE_VXLAN_IPV4_UDP          0xA1
+#define SXE_PACKET_TYPE_VXLAN_IPV4_SCTP         0xC1
+#define SXE_PACKET_TYPE_VXLAN_IPV4_EXT          0x83
+#define SXE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP      0x93
+#define SXE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP      0xA3
+#define SXE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP     0xC3
+#define SXE_PACKET_TYPE_VXLAN_IPV6              0x84
+#define SXE_PACKET_TYPE_VXLAN_IPV6_TCP          0x94
+#define SXE_PACKET_TYPE_VXLAN_IPV6_UDP          0xA4
+#define SXE_PACKET_TYPE_VXLAN_IPV6_SCTP         0xC4
+#define SXE_PACKET_TYPE_VXLAN_IPV6_EXT          0x8C
+#define SXE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP      0x9C
+#define SXE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP      0xAC
+#define SXE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP     0xCC
+#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6         0x85
+#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP     0x95
+#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP     0xA5
+#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT     0x8D
+#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0x9D
+#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0xAD
+
+/* SXE current supported message types */
+const u32 sxe_ptype_table[SXE_PACKET_TYPE_MAX] __rte_cache_aligned = {
+	[SXE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
+	[SXE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4,
+	[SXE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+	[SXE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+	[SXE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
+	[SXE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT,
+	[SXE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+	[SXE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+	[SXE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
+	[SXE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV6,
+	[SXE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+	[SXE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+	[SXE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
+	[SXE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV6_EXT,
+	[SXE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+	[SXE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+	[SXE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
+	[SXE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6,
+	[SXE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+	RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6,
+	[SXE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6_EXT,
+	[SXE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6_EXT,
+	[SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
+		RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+};
+
+const u32 sxe_ptype_table_tn[SXE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
+	[SXE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER,
+	[SXE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
+	[SXE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+		RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+		RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+		RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
+		RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+		RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+		RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+		RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+		RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+		RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+		RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
+		RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+		RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+		RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+		RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+		RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+		RTE_PTYPE_INNER_L4_UDP,
+
+	[SXE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
+	[SXE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4_EXT,
+	[SXE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV6,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV6_EXT,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
+		RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+		RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+	[SXE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
+		RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+		RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
+	[SXE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+		RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+		RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
+};
+
+void sxe_rx_mbuf_common_header_fill(
+					sxe_rx_queue_s *rxq,
+					struct rte_mbuf *mbuf,
+					volatile union sxe_rx_data_desc desc,
+					u32 pkt_info, u32 staterr)
+{
+	u64 pkt_flags;
+	u64 vlan_flags = rxq->vlan_flags;
+
+	LOG_DEBUG("port_id=%u, rxq=%u, desc.lower=0x%"SXE_PRIX64", upper=0x%"SXE_PRIX64","
+			"pkt_info=0x%x, staterr=0x%x",
+			rxq->port_id, rxq->queue_id,
+			rte_le_to_cpu_64(desc.read.pkt_addr),
+			rte_le_to_cpu_64(desc.read.hdr_addr),
+			pkt_info, staterr);
+
+	mbuf->port = rxq->port_id;
+
+	mbuf->vlan_tci = rte_le_to_cpu_16(desc.wb.upper.vlan);
+
+	pkt_flags = sxe_rx_desc_status_to_pkt_flags(staterr, vlan_flags);
+	pkt_flags |= sxe_rx_desc_error_to_pkt_flags(staterr);
+	pkt_flags |= sxe_rx_desc_pkt_info_to_pkt_flags((u16)pkt_info);
+
+	if (pkt_flags & (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD)) {
+		rxq->rx_stats.csum_err++;
+		LOG_WARN("pkt_flags:0x%"SXE_PRIX64" rx checksum error",
+				pkt_flags);
+	}
+
+	mbuf->ol_flags = pkt_flags;
+	mbuf->packet_type =
+		sxe_rxd_pkt_info_to_pkt_type(pkt_info,
+						rxq->pkt_type_mask);
+
+	if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH)) {
+		mbuf->hash.rss = rte_le_to_cpu_32(
+				desc.wb.lower.hi_dword.rss);
+	} else if (pkt_flags & RTE_MBUF_F_RX_FDIR) {
+		mbuf->hash.fdir.hash = rte_le_to_cpu_16(
+				desc.wb.lower.hi_dword.csum_ip.csum) &
+				SXE_SAMPLE_HASH_MASK;
+		mbuf->hash.fdir.id = rte_le_to_cpu_16(
+				desc.wb.lower.hi_dword.csum_ip.ip_id);
+	}
+
+	return;
+}
+
+static inline void sxe_rx_resource_prefetch(u16 next_idx,
+				sxe_rx_buffer_s *buf_ring,
+				volatile union sxe_rx_data_desc *desc_ring)
+{
+	/* preftech next mbuf */
+	rte_sxe_prefetch(buf_ring[next_idx].mbuf);
+
+	if ((next_idx & 0x3) == 0) {
+		rte_sxe_prefetch(&desc_ring[next_idx]);
+		rte_sxe_prefetch(&buf_ring[next_idx]);
+	}
+
+	return;
+}
+
+u16 sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,
+		u16 pkts_num)
+{
+	return __sxe_pkts_recv(rx_queue, rx_pkts, pkts_num);
+}
+
+static inline u16 sxe_ret_pkts_to_user(sxe_rx_queue_s *rxq,
+					struct rte_mbuf **rx_pkts,
+					u16 pkts_num)
+{
+	struct rte_mbuf **completed_mbuf = &rxq->completed_ring[rxq->next_ret_pkg];
+	u16 i;
+
+	pkts_num = (u16)RTE_MIN(pkts_num, rxq->completed_pkts_num);
+
+	for (i = 0; i < pkts_num; ++i) {
+		rx_pkts[i] = completed_mbuf[i];
+	}
+
+	/* Update completed packets num and next available position */
+	rxq->completed_pkts_num = (u16)(rxq->completed_pkts_num - pkts_num);
+	rxq->next_ret_pkg = (u16)(rxq->next_ret_pkg + pkts_num);
+
+	return pkts_num;
+}
+
+#define LOOK_AHEAD 8
+#if (LOOK_AHEAD != 8)
+#error "PMD SXE: LOOK_AHEAD must be 8\n"
+#endif
+
+static inline u16 sxe_rx_hw_ring_scan(sxe_rx_queue_s *rxq)
+{
+	volatile union sxe_rx_data_desc *rx_desc;
+	sxe_rx_buffer_s *rx_buf;
+	struct rte_mbuf *cur_mb;
+	u16 num_dd_set;
+	u32 status_arr[LOOK_AHEAD];
+	u32 pkt_info[LOOK_AHEAD];
+	u16 i, j;
+	u32 status;
+	u16 done_num = 0;
+	u16 pkt_len;
+
+	/* Obtain the desc and rx buff to be processed  */
+	rx_desc = &rxq->desc_ring[rxq->processing_idx];
+	rx_buf = &rxq->buffer_ring[rxq->processing_idx];
+
+	status = rx_desc->wb.upper.status_error;
+
+	if (!(status & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD))) {
+		goto l_end;
+	}
+
+	for (i = 0; i < RTE_PMD_SXE_MAX_RX_BURST;
+		i += LOOK_AHEAD, rx_desc += LOOK_AHEAD, rx_buf += LOOK_AHEAD) {
+		for (j = 0; j < LOOK_AHEAD; j++) {
+			status_arr[j] = rte_le_to_cpu_32(
+				rx_desc[j].wb.upper.status_error);
+		}
+
+		rte_smp_rmb();
+
+		for (num_dd_set = 0; num_dd_set < LOOK_AHEAD &&
+			(status_arr[num_dd_set] & SXE_RXDADV_STAT_DD);
+			num_dd_set++) {
+			;
+		}
+
+		for (j = 0; j < num_dd_set; j++) {
+			pkt_info[j] = rte_le_to_cpu_32(
+				rx_desc[j].wb.lower.lo_dword.data);
+		}
+
+		done_num += num_dd_set;
+
+		for (j = 0; j < num_dd_set; ++j) {
+			cur_mb = rx_buf[j].mbuf;
+
+			pkt_len = (u16)(rte_le_to_cpu_16(rx_desc[j].wb.upper.length) -
+							rxq->crc_len);
+			cur_mb->pkt_len = pkt_len;
+			cur_mb->data_len = pkt_len;
+			sxe_rx_mbuf_common_header_fill(rxq, cur_mb, rx_desc[j],
+						pkt_info[j], status_arr[j]);
+		}
+
+		for (j = 0; j < LOOK_AHEAD; ++j) {
+			rxq->completed_ring[i + j] = rx_buf[j].mbuf;
+		}
+
+		if (num_dd_set != LOOK_AHEAD) {
+			break;
+		}
+	}
+
+	for (i = 0; i < done_num; ++i) {
+		rxq->buffer_ring[rxq->processing_idx + i].mbuf = NULL;
+	}
+
+l_end:
+	return done_num;
+}
+
+STATIC inline s32 sxe_rx_bufs_batch_alloc(sxe_rx_queue_s *rxq,
+							bool reset_mbuf)
+{
+	volatile union sxe_rx_data_desc *desc_ring;
+	sxe_rx_buffer_s *buf_ring;
+	struct rte_mbuf *mbuf;
+	u16 alloc_idx;
+	__le64 dma_addr;
+	s32 diag, i;
+	s32 ret = 0;
+
+	alloc_idx = rxq->batch_alloc_trigger - (rxq->batch_alloc_size - 1);
+	buf_ring = &rxq->buffer_ring[alloc_idx];
+
+	LOG_DEBUG("port_id=%u, rxq=%u, alloc_idx=%u, "
+			"batch_alloc_trigger=%u, batch_alloc_size=%u\n",
+			rxq->port_id, rxq->queue_id, alloc_idx,
+			rxq->batch_alloc_trigger, rxq->batch_alloc_size);
+
+	diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)buf_ring,
+				    rxq->batch_alloc_size);
+	if (unlikely(diag != 0)) {
+		LOG_DEBUG("port_id=%u, rxq=%u buffer alloc failed\n",
+				rxq->port_id, rxq->queue_id);
+		ret = -ENOMEM;
+		goto l_end;
+	}
+
+	desc_ring = &rxq->desc_ring[alloc_idx];
+	for (i = 0; i < rxq->batch_alloc_size; ++i) {
+		mbuf = buf_ring[i].mbuf;
+		if (reset_mbuf) {
+			mbuf->port = rxq->port_id;
+		}
+
+		rte_mbuf_refcnt_set(mbuf, 1);
+		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+
+		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+		desc_ring[i].read.hdr_addr = 0;
+		desc_ring[i].read.pkt_addr = dma_addr;
+	}
+
+	rxq->batch_alloc_trigger = rxq->batch_alloc_trigger + rxq->batch_alloc_size;
+	if (rxq->batch_alloc_trigger >= rxq->ring_depth) {
+		rxq->batch_alloc_trigger = rxq->batch_alloc_size - 1;
+	}
+
+l_end:
+	return ret;
+}
+
+static inline u16 sxe_burst_pkts_recv(void *rx_queue,
+					struct rte_mbuf **rx_pkts,
+					u16 pkts_num)
+{
+	sxe_rx_queue_s *rxq = (sxe_rx_queue_s *)rx_queue;
+	u16 done_num;
+
+	if (rxq->completed_pkts_num) {
+		done_num = sxe_ret_pkts_to_user(rxq, rx_pkts, pkts_num);
+		LOG_DEBUG("there are %u mbuf in completed ring "
+				"of queue[%u] return to user, done_num=%u",
+				rxq->completed_pkts_num,
+				rxq->queue_id, done_num);
+		goto l_end;
+	}
+
+	done_num = (u16)sxe_rx_hw_ring_scan(rxq);
+
+	rxq->next_ret_pkg = 0;
+	rxq->completed_pkts_num = done_num;
+	rxq->processing_idx = (u16)(rxq->processing_idx + done_num);
+
+	if (rxq->processing_idx > rxq->batch_alloc_trigger) {
+		u16 alloced_idx = rxq->batch_alloc_trigger;
+
+		if (sxe_rx_bufs_batch_alloc(rxq, true) != 0) {
+			u32 i, j;
+
+			LOG_ERROR("rx mbuf alloc failed port_id=%u "
+					"queue_id=%u", (unsigned) rxq->port_id,
+					(u16)rxq->queue_id);
+
+			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+				rxq->batch_alloc_size;
+
+			rxq->completed_pkts_num = 0;
+			rxq->processing_idx = (u16)(rxq->processing_idx - done_num);
+			for (i = 0, j = rxq->processing_idx; i < done_num; ++i, ++j) {
+				rxq->buffer_ring[j].mbuf = rxq->completed_ring[i];
+			}
+
+			done_num = 0;
+			goto l_end;
+		}
+
+		rte_wmb();
+		SXE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr, alloced_idx);
+	}
+
+	if (rxq->processing_idx >= rxq->ring_depth) {
+		rxq->processing_idx = 0;
+	}
+
+	if (rxq->completed_pkts_num) {
+		done_num = sxe_ret_pkts_to_user(rxq, rx_pkts, pkts_num);
+		LOG_DEBUG("there are %u mbuf in completed ring "
+				"of queue[%u] return to user, done_num=%u",
+				rxq->completed_pkts_num,
+				rxq->queue_id, done_num);
+	}
+
+l_end:
+	return done_num;
+}
+
+u16 sxe_batch_alloc_pkts_recv(void *rx_queue,
+					struct rte_mbuf **rx_pkts,
+					u16 pkts_num)
+{
+	u16 done_num;
+
+	if (unlikely(pkts_num == 0)) {
+		LOG_DEBUG("user need pkts = 0");
+		done_num = 0;
+		goto l_end;
+	}
+
+	if (likely(pkts_num <= RTE_PMD_SXE_MAX_RX_BURST)) {
+		done_num = sxe_burst_pkts_recv(rx_queue, rx_pkts, pkts_num);
+		goto l_end;
+	}
+
+	done_num = 0;
+	while (pkts_num) {
+		u16 ret, n;
+
+		n = (u16)RTE_MIN(pkts_num, RTE_PMD_SXE_MAX_RX_BURST);
+		ret = sxe_burst_pkts_recv(rx_queue, &rx_pkts[done_num], n);
+		done_num = (u16)(done_num + ret);
+		pkts_num = (u16)(pkts_num - ret);
+		if (ret < n) {
+			break;
+		}
+	}
+
+l_end:
+	return done_num;
+}
+
+static inline s32 sxe_lro_new_mbufs_alloc(sxe_rx_queue_s *rxq,
+					struct rte_mbuf **new_mbuf,
+					u16 *hold_num, bool batch_alloc)
+{
+	s32 ret = 0;
+
+	LOG_DEBUG("rxq[%u] %s alloc mem, current num_hold=%u",
+			rxq->queue_id, batch_alloc ? "batch" : "single", *hold_num);
+	if (!batch_alloc) {
+		*new_mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+		if (*new_mbuf == NULL) {
+			LOG_DEBUG("RX mbuf alloc failed "
+				"port_id=%u queue_id=%u",
+				rxq->port_id, rxq->queue_id);
+
+			rte_eth_devices[rxq->port_id].data->
+						rx_mbuf_alloc_failed++;
+			ret = -ENOMEM;
+			goto l_end;
+		}
+
+		(*new_mbuf)->data_off = RTE_PKTMBUF_HEADROOM;
+	} else if (*hold_num > rxq->batch_alloc_size) {
+		u16 next_rdt = rxq->batch_alloc_trigger;
+
+		if (!sxe_rx_bufs_batch_alloc(rxq, false)) {
+			rte_wmb();
+			SXE_PCI_REG_WC_WRITE_RELAXED(
+						rxq->rdt_reg_addr,
+						next_rdt);
+
+			*hold_num -= rxq->batch_alloc_size;
+		} else {
+			LOG_DEBUG("RX bulk alloc failed "
+					"port_id=%u queue_id=%u",
+					rxq->port_id, rxq->queue_id);
+
+			rte_eth_devices[rxq->port_id].data->
+						rx_mbuf_alloc_failed++;
+			ret = -ENOMEM;
+			goto l_end;
+		}
+	}
+
+l_end:
+	return ret;
+}
+
+static inline void sxe_rx_resource_update(sxe_rx_buffer_s *rx_buf,
+				volatile union sxe_rx_data_desc *cur_desc,
+				struct rte_mbuf *new_mbuf, bool batch_alloc)
+{
+	LOG_DEBUG("%s update resource, new_mbuf=%p",
+				batch_alloc ? "batch" : "single", cur_desc);
+
+	if (!batch_alloc) {
+		__le64 dma =
+		  rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mbuf));
+		rx_buf->mbuf = new_mbuf;
+		cur_desc->read.hdr_addr = 0;
+		cur_desc->read.pkt_addr = dma;
+	} else {
+		rx_buf->mbuf = NULL;
+	}
+
+	return;
+}
+
+static inline u16 sxe_rx_next_idx_get(union sxe_rx_data_desc *desc,
+						u16 next_idx)
+{
+	u16 nextp_id;
+	u32 staterr = rte_le_to_cpu_32(desc->wb.upper.status_error);
+
+
+	if (sxe_lro_count(desc)) {
+		nextp_id =
+			(staterr & SXE_RXDADV_NEXTP_MASK) >>
+			SXE_RXDADV_NEXTP_SHIFT;
+	} else {
+		nextp_id = next_idx;
+	}
+	LOG_DEBUG("next idx = %u", nextp_id);
+	return nextp_id;
+}
+
+static inline void sxe_lro_first_seg_update(struct rte_mbuf **first_seg,
+						struct rte_mbuf *cur_mbuf,
+						u16 data_len)
+{
+	if (*first_seg == NULL) {
+		(*first_seg) = cur_mbuf;
+		(*first_seg)->pkt_len = data_len;
+		(*first_seg)->nb_segs = 1;
+	} else {
+		(*first_seg)->pkt_len += data_len;
+		(*first_seg)->nb_segs++;
+	}
+	return;
+}
+
+static inline void sxe_mbuf_fields_process(struct rte_mbuf *first_seg,
+					sxe_rx_queue_s *rxq,
+					union sxe_rx_data_desc desc,
+					struct rte_mbuf *cur_mbuf,
+					u32 staterr)
+{
+	u32 pkt_info;
+
+	pkt_info = rte_le_to_cpu_32(desc.wb.lower.lo_dword.data);
+	sxe_rx_mbuf_common_header_fill(rxq, first_seg, desc,
+					pkt_info, staterr);
+
+	first_seg->pkt_len -= rxq->crc_len;
+	if (unlikely(cur_mbuf->data_len <= rxq->crc_len)) {
+		struct rte_mbuf *lp;
+
+		for (lp = first_seg; lp->next != cur_mbuf; lp = lp->next) {
+			;
+		}
+
+		first_seg->nb_segs--;
+		lp->data_len -= rxq->crc_len - cur_mbuf->data_len;
+		lp->next = NULL;
+		rte_pktmbuf_free_seg(cur_mbuf);
+	} else {
+		cur_mbuf->data_len -= rxq->crc_len;
+	}
+
+	rte_packet_prefetch((u8 *)first_seg->buf_addr + first_seg->data_off);
+	return;
+}
+
+static inline u16 sxe_lro_pkts_recv(void *rx_queue,
+			struct rte_mbuf **rx_pkts, u16 pkts_num,
+			bool batch_alloc)
+{
+	sxe_rx_queue_s *rxq = rx_queue;
+	volatile union sxe_rx_data_desc *desc_ring = rxq->desc_ring;
+	sxe_rx_buffer_s *buf_ring = rxq->buffer_ring;
+	sxe_rx_buffer_s *sc_buf_ring = rxq->sc_buffer_ring;
+	u16 cur_idx = rxq->processing_idx;
+	u16 done_num = 0;
+	u16 hold_num = rxq->hold_num;
+	u16 prev_idx = rxq->processing_idx; 
+	s32 err;
+
+	while (done_num < pkts_num) {
+		bool is_eop;
+		sxe_rx_buffer_s *rx_buf;
+		sxe_rx_buffer_s *sc_rx_buf;
+		sxe_rx_buffer_s *next_sc_rx_buf = NULL;
+		sxe_rx_buffer_s *next_rx_buf = NULL;
+		struct rte_mbuf *first_seg;
+		struct rte_mbuf *cur_mbuf;
+		struct rte_mbuf *new_mbuf = NULL;
+		union sxe_rx_data_desc desc_copy;
+		u16 data_len;
+		u16 next_idx;
+		volatile union sxe_rx_data_desc *cur_desc;
+		u32 staterr;
+
+next_desc:
+		cur_desc = &desc_ring[cur_idx];
+		staterr = rte_le_to_cpu_32(cur_desc->wb.upper.status_error);
+
+		if (!(staterr & SXE_RXDADV_STAT_DD)) {
+			break;
+		}
+
+		__atomic_thread_fence(__ATOMIC_ACQUIRE);
+
+		desc_copy = *cur_desc;
+
+		LOG_DEBUG("port_id=%u queue_id=%u cur_idx=%u "
+				"staterr=0x%x data_len=%u",
+				rxq->port_id, rxq->queue_id, cur_idx, staterr,
+				rte_le_to_cpu_16(desc_copy.wb.upper.length));
+
+		err = sxe_lro_new_mbufs_alloc(rxq, &new_mbuf, &hold_num, batch_alloc);
+		if (err) {
+			LOG_ERROR("mbuf %s alloc failed",
+					batch_alloc ? "batch" : "single");
+			break;
+		}
+
+		hold_num++;
+		rx_buf = &buf_ring[cur_idx];
+		is_eop = !!(staterr & SXE_RXDADV_STAT_EOP);
+
+		next_idx = cur_idx + 1;
+		if (next_idx == rxq->ring_depth) {
+			next_idx = 0;
+		}
+
+		sxe_rx_resource_prefetch(next_idx, buf_ring, desc_ring);
+
+		cur_mbuf = rx_buf->mbuf;
+
+		sxe_rx_resource_update(rx_buf, cur_desc, new_mbuf, batch_alloc);
+
+		data_len = rte_le_to_cpu_16(desc_copy.wb.upper.length);
+		cur_mbuf->data_len = data_len;
+
+		if (!is_eop) {
+			u16 nextp_id = sxe_rx_next_idx_get(&desc_copy, next_idx);
+
+			next_sc_rx_buf = &sc_buf_ring[nextp_id];
+			next_rx_buf = &buf_ring[nextp_id];
+			rte_sxe_prefetch(next_rx_buf);
+		}
+
+		sc_rx_buf = &sc_buf_ring[cur_idx];
+		first_seg = sc_rx_buf->mbuf;
+		sc_rx_buf->mbuf = NULL;
+
+		sxe_lro_first_seg_update(&first_seg, cur_mbuf, data_len);
+
+		prev_idx = cur_idx;
+		cur_idx = next_idx;
+
+		if (!is_eop && next_rx_buf) {
+			cur_mbuf->next = next_rx_buf->mbuf;
+			next_sc_rx_buf->mbuf = first_seg;
+			goto next_desc;
+		}
+
+		sxe_mbuf_fields_process(first_seg, rxq, desc_copy, cur_mbuf, staterr);
+
+		rx_pkts[done_num++] = first_seg;
+	}
+
+	rxq->processing_idx = cur_idx;
+
+	if (!batch_alloc && hold_num > rxq->batch_alloc_size) {
+		LOG_DEBUG("port_id=%u queue_id=%u rx_tail=%u "
+			   "num_hold=%u done_num=%u",
+			   rxq->port_id, rxq->queue_id,
+			   cur_idx, hold_num, done_num);
+
+		rte_wmb();
+		SXE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr, prev_idx);
+		hold_num = 0;
+	}
+
+	rxq->hold_num = hold_num;
+	return done_num;
+}
+
+u16 sxe_batch_alloc_lro_pkts_recv(void *rx_queue,
+					struct rte_mbuf **rx_pkts,
+					u16 pkts_num)
+{
+	return sxe_lro_pkts_recv(rx_queue, rx_pkts, pkts_num, true);
+}
+
+u16 sxe_single_alloc_lro_pkts_recv(void *rx_queue,
+					struct rte_mbuf **rx_pkts,
+					u16 pkts_num)
+{
+	return sxe_lro_pkts_recv(rx_queue, rx_pkts, pkts_num, false);
+}
+
+void __rte_cold sxe_rx_function_set(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
+{
+	__sxe_rx_function_set(dev, rx_batch_alloc_allowed, rx_vec_allowed);
+	return;
+}
+
+#ifdef ETH_DEV_RX_DESC_DONE
+s32 sxe_rx_descriptor_done(void *rx_queue, u16 offset)
+{
+	volatile union sxe_rx_data_desc *desc;
+	sxe_rx_queue_s *rxq = rx_queue;
+	u32 index;
+	s32 is_done = 0;
+
+	LOG_DEBUG("check rx queue[%u], offset desc[%u]\n",
+			rxq->queue_id, offset);
+	if (unlikely(offset >= rxq->ring_depth)) {
+		LOG_DEBUG("offset=%u >= ring depth=%u\n",
+				offset, rxq->ring_depth);
+		goto l_end;
+	}
+
+	index = rxq->processing_idx + offset;
+	if (index >= rxq->ring_depth) {
+		index -= rxq->ring_depth;
+	}
+
+	desc = &rxq->desc_ring[index];
+	is_done = !!(desc->wb.upper.status_error &
+			rte_cpu_to_le_32(SXE_RXDADV_STAT_DD));
+
+l_end:
+	return is_done;
+}
+#endif
+
+s32 sxe_rx_descriptor_status(void *rx_queue, u16 offset)
+{
+	int ret = RTE_ETH_RX_DESC_AVAIL;
+	sxe_rx_queue_s *rxq = rx_queue;
+	volatile u32 *status;
+	u32 hold_num, desc;
+
+	if (unlikely(offset >= rxq->ring_depth)) {
+		LOG_DEBUG("rx queue[%u] get desc status err,"
+			"offset=%u >= ring_depth=%u\n",
+			rxq->queue_id, offset, rxq->ring_depth);
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#if defined(RTE_ARCH_X86)
+	if (rxq->is_using_sse)
+		hold_num = rxq->realloc_num;
+	else
+#endif
+#endif
+
+		hold_num = rxq->hold_num;
+	if (offset >= rxq->ring_depth - hold_num) {
+		ret = RTE_ETH_RX_DESC_UNAVAIL;
+		goto l_end;
+	}
+
+	desc = rxq->processing_idx + offset;
+	if (desc >= rxq->ring_depth) {
+		desc -= rxq->ring_depth;
+	}
+
+	status = &rxq->desc_ring[desc].wb.upper.status_error;
+	if (*status & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD)) {
+		ret =  RTE_ETH_RX_DESC_DONE;
+	}
+
+l_end:
+	LOG_DEBUG("rx queue[%u] get desc status=%d\n",rxq->queue_id, ret);
+	return ret;
+}
+
+s32 __rte_cold sxe_rx_queue_setup(struct rte_eth_dev *dev,
+			 u16 queue_idx, u16 desc_num,
+			 unsigned int socket_id,
+			 const struct rte_eth_rxconf *rx_conf,
+			 struct rte_mempool *mp)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw     *hw = &adapter->hw;
+	struct rx_setup rx_setup = { 0 };
+	s32 ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	rx_setup.desc_num = desc_num;
+	rx_setup.queue_idx = queue_idx;
+	rx_setup.socket_id = socket_id;
+	rx_setup.mp = mp;
+	rx_setup.dev = dev;
+	rx_setup.reg_base_addr = hw->reg_base_addr;
+	rx_setup.rx_conf = rx_conf;
+	rx_setup.rx_batch_alloc_allowed = &adapter->rx_batch_alloc_allowed;
+
+	ret = __sxe_rx_queue_setup(&rx_setup, false);
+	if (ret) {
+		LOG_ERROR_BDF("rx queue setup fail.(err:%d)", ret);
+	}
+
+	return ret;
+}
+
+static void sxe_rx_mode_configure(struct sxe_hw *hw)
+{
+	u32 flt_ctrl;
+
+	flt_ctrl = sxe_hw_rx_mode_get(hw);
+	LOG_DEBUG("read flt_ctrl=%u", flt_ctrl);
+	flt_ctrl |= SXE_FCTRL_BAM;
+	flt_ctrl |= SXE_FCTRL_DPF;
+	flt_ctrl |= SXE_FCTRL_PMCF;
+	LOG_DEBUG("write flt_ctrl=0x%x", flt_ctrl);
+	sxe_hw_rx_mode_set(hw, flt_ctrl);
+	return;
+}
+
+static inline void
+	sxe_rx_queue_offload_configure(struct rte_eth_dev *dev)
+{
+	u16 i;
+	sxe_rx_queue_s *rxq;
+	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
+			rxq->crc_len = RTE_ETHER_CRC_LEN;
+		} else {
+			rxq->crc_len = 0;
+		}
+
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	}
+
+	return;
+}
+
+static inline void
+	sxe_rx_offload_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw     *hw = &adapter->hw;
+	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+	bool crc_strp_on;
+	bool ip_csum_offload;
+
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
+		crc_strp_on = false;
+	} else {
+		crc_strp_on = true;
+	}
+	sxe_hw_rx_dma_ctrl_init(hw, crc_strp_on);
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+		adapter->mtu = rx_conf->max_rx_pkt_len - SXE_ETH_OVERHEAD;
+	}
+#else
+	if (dev->data->mtu > RTE_ETHER_MTU) {
+		adapter->mtu = dev->data->mtu;
+	}
+#endif
+
+	rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
+		dev->data->scattered_rx = 1;
+	}
+
+	sxe_hw_rx_udp_frag_checksum_disable(hw);
+
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
+		ip_csum_offload = true;
+	} else {
+		ip_csum_offload = false;
+	}
+
+	sxe_hw_rx_ip_checksum_offload_switch(hw, ip_csum_offload);
+
+	sxe_rx_queue_offload_configure(dev);
+
+	return;
+}
+
+static inline void sxe_rx_queue_attr_configure(
+					struct rte_eth_dev *dev,
+					sxe_rx_queue_s *queue)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw     *hw = &adapter->hw;
+	u32 srrctl_size;
+	u64 desc_dma_addr;
+	u32 desc_mem_len;
+	u8 reg_idx;
+	u16 buf_size;
+	u32 frame_size = SXE_GET_FRAME_SIZE(dev);
+	reg_idx = queue->reg_idx;
+
+	sxe_hw_rx_ring_switch(hw, reg_idx, false);
+
+	desc_mem_len = queue->ring_depth * sizeof(union sxe_rx_data_desc);
+	desc_dma_addr = queue->base_addr;
+	sxe_hw_rx_ring_desc_configure(hw, desc_mem_len,
+						desc_dma_addr, reg_idx);
+
+	buf_size = (u16)(rte_pktmbuf_data_room_size(queue->mb_pool) -
+		RTE_PKTMBUF_HEADROOM);
+
+	sxe_hw_rx_rcv_ctl_configure(hw, reg_idx,
+			SXE_LRO_HDR_SIZE, buf_size);
+
+	if (queue->drop_en) {
+		sxe_hw_rx_drop_switch(hw, reg_idx, true);
+	}
+
+	sxe_hw_rx_desc_thresh_set(hw, reg_idx);
+
+	srrctl_size = ((buf_size >> SXE_SRRCTL_BSIZEPKT_SHIFT) &
+				SXE_SRRCTL_BSIZEPKT_MASK);
+
+	buf_size = (u16) ((srrctl_size & SXE_SRRCTL_BSIZEPKT_MASK) <<
+				SXE_SRRCTL_BSIZEPKT_SHIFT);
+
+	if (frame_size + 2 * SXE_VLAN_TAG_SIZE > buf_size) {
+		dev->data->scattered_rx = 1;
+	}
+
+	sxe_hw_rx_ring_switch(hw, reg_idx, true);
+	return;
+}
+
+static inline void sxe_rx_queue_configure(struct rte_eth_dev *dev)
+{
+	u16 i;
+	sxe_rx_queue_s **queue = (sxe_rx_queue_s **)dev->data->rx_queues;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		sxe_rx_queue_attr_configure(dev, queue[i]);
+	}
+	return;
+}
+
+static u32 sxe_lro_max_desc_get(struct rte_mempool *pool)
+{
+	u8 desc_num;
+	struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
+
+	u16 maxdesc = RTE_IPV4_MAX_PKT_LEN /
+			(mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
+
+	if (maxdesc >= 16) {
+		desc_num = SXE_LROCTL_MAXDESC_16;
+	} else if (maxdesc >= 8) {
+		desc_num = SXE_LROCTL_MAXDESC_8;
+	} else if (maxdesc >= 4) {
+		desc_num = SXE_LROCTL_MAXDESC_4;
+	} else {
+		desc_num = SXE_LROCTL_MAXDESC_1;
+	}
+
+	return desc_num;
+}
+
+static s32 sxe_lro_sanity_check(struct rte_eth_dev *dev, bool *lro_capable)
+{
+	s32 ret = 0;
+	struct rte_eth_dev_info dev_info = { 0 };
+	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+
+
+	if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
+		(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
+		PMD_LOG_CRIT(INIT, "lro can't be enabled when HW CRC "
+				"is disabled");
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	dev->dev_ops->dev_infos_get(dev, &dev_info);
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
+		*lro_capable = true;
+	}
+
+	if (!(*lro_capable) && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
+		PMD_LOG_CRIT(INIT, "lro is requested on HW that doesn't "
+				   "support it");
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+l_end:
+	return ret;
+}
+
+static void sxe_lro_hw_configure(struct sxe_hw *hw, bool lro_capable,
+					struct rte_eth_rxmode *rx_conf)
+{
+	bool is_enable;
+
+	sxe_hw_rx_lro_ack_switch(hw, false);
+
+	sxe_hw_rx_dma_lro_ctrl_set(hw);
+
+	if ((lro_capable) && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
+		is_enable = true;
+	} else {
+		is_enable = false;
+	}
+
+	if (is_enable) {
+		sxe_hw_rx_nfs_filter_disable(hw);
+	}
+
+	sxe_hw_rx_lro_enable(hw, is_enable);
+	return;
+}
+
+static void sxe_lro_irq_configure(struct sxe_hw *hw, u16 reg_idx,
+						u16 irq_idx)
+{
+	u32 irq_interval;
+
+	irq_interval = SXE_EITR_INTERVAL_US(SXE_QUEUE_ITR_INTERVAL_DEFAULT);
+	sxe_hw_ring_irq_interval_set(hw, reg_idx, irq_interval);
+
+	sxe_hw_ring_irq_map(hw, false, reg_idx, irq_idx);
+
+	return;
+}
+
+static void sxe_lro_hw_queue_configure(struct rte_eth_dev *dev,
+						struct sxe_hw *hw)
+{
+	u16 i;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		sxe_rx_queue_s *rxq = dev->data->rx_queues[i];
+		u16 reg_idx = rxq->reg_idx;
+		u32 max_desc_num;
+
+		max_desc_num = sxe_lro_max_desc_get(rxq->mb_pool);
+		sxe_hw_rx_lro_ctl_configure(hw, reg_idx, max_desc_num);
+
+		sxe_lro_irq_configure(hw, reg_idx, i);
+	}
+
+	return;
+}
+
+static s32 sxe_lro_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw     *hw = &adapter->hw;
+	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+	bool lro_capable = false;
+
+	s32 ret;
+
+	ret = sxe_lro_sanity_check(dev, &lro_capable);
+	if (ret) {
+		PMD_LOG_CRIT(INIT, "lro sanity check failed, err=%d", ret);
+		goto l_end;
+	}
+
+	sxe_lro_hw_configure(hw, lro_capable, rx_conf);
+
+	if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
+		PMD_LOG_DEBUG(INIT, "user app do not turn lro on");
+		goto l_end;
+	}
+
+	sxe_lro_hw_queue_configure(dev, hw);
+
+	dev->data->lro = 1;
+
+	PMD_LOG_DEBUG(INIT, "enabling lro mode");
+
+l_end:
+	return ret;
+}
+
+static s32 __rte_cold sxe_rx_start(struct rte_eth_dev *dev)
+{
+	sxe_rx_queue_s *rxq;
+	u16 i;
+	s32 ret = 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		if (!rxq->deferred_start) {
+			ret = sxe_rx_queue_start(dev, i);
+			if (ret < 0) {
+				PMD_LOG_ERR(INIT, "rx queue[%u] start failed",i);
+				goto l_end;
+			}
+		}
+	}
+
+l_end:
+	return ret;
+}
+
+s32 __rte_cold sxe_rx_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw     *hw = &adapter->hw;
+	s32 ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_hw_rx_cap_switch_off(hw);
+
+	sxe_hw_rx_pkt_buf_size_set(hw, 0, SXE_RX_PKT_BUF_SIZE);
+
+	sxe_rx_mode_configure(hw);
+
+	sxe_rx_offload_configure(dev);
+
+	sxe_rx_queue_configure(dev);
+
+	sxe_rx_features_configure(dev);
+
+	ret = sxe_lro_configure(dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "lro config failed, err = %d", ret);
+		goto l_end;
+	}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	sxe_rx_function_set(dev, adapter->rx_batch_alloc_allowed, &adapter->rx_vec_allowed);
+#else
+	sxe_rx_function_set(dev, adapter->rx_batch_alloc_allowed, NULL);
+#endif
+
+	ret = sxe_rx_start(dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "rx start failed, err = %d", ret);
+		goto l_end;
+	}
+
+l_end:
+	return ret;
+}
+
+static void sxe_vmdq_rx_mode_get(u32 rx_mask, u32 *orig_val)
+{
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) {
+		*orig_val |= SXE_VMOLR_AUPE;
+	}
+
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC) {
+		*orig_val |= SXE_VMOLR_ROMPE;
+	}
+
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC) {
+		*orig_val |= SXE_VMOLR_ROPE;
+	}
+
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST) {
+		*orig_val |= SXE_VMOLR_BAM;
+	}
+
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST) {
+		*orig_val |= SXE_VMOLR_MPE;
+	}
+
+	return;
+}
+
+static void sxe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+	struct rte_eth_vmdq_rx_conf *cfg;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw     *hw = &adapter->hw;
+	enum rte_eth_nb_pools pools_num;
+	u32 rx_mode = 0;
+	u16 i;
+
+	PMD_INIT_FUNC_TRACE();
+	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+	pools_num = cfg->nb_queue_pools;
+
+	sxe_rss_disable(dev);
+
+	sxe_hw_vmdq_mq_configure(hw);
+
+	sxe_hw_vmdq_default_pool_configure(hw,
+						cfg->enable_default_pool,
+						cfg->default_pool);
+
+	sxe_vmdq_rx_mode_get(cfg->rx_mode, &rx_mode);
+	sxe_hw_vmdq_vlan_configure(hw, pools_num, rx_mode);
+
+	for (i = 0; i < cfg->nb_pool_maps; i++) {
+		sxe_hw_vmdq_pool_configure(hw, i,
+						cfg->pool_map[i].vlan_id,
+						cfg->pool_map[i].pools);
+	}
+
+	if (cfg->enable_loop_back) {
+		sxe_hw_vmdq_loopback_configure(hw);
+	}
+
+	return;
+}
+
+s32 sxe_rx_features_configure(struct rte_eth_dev *dev)
+{
+	s32 ret = 0;
+
+	if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+		switch (dev->data->dev_conf.rxmode.mq_mode) {
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			sxe_rss_configure(dev);
+			break;
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+			sxe_dcb_vmdq_rx_hw_configure(dev);
+			break;
+		case RTE_ETH_MQ_RX_VMDQ_ONLY:
+			sxe_vmdq_rx_hw_configure(dev);
+			break;
+		case RTE_ETH_MQ_RX_NONE:
+		default:
+			sxe_rss_disable(dev);
+			break;
+		}
+	} else {
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+		switch (dev->data->dev_conf.rxmode.mq_mode) {
+		case RTE_ETH_MQ_RX_RSS:
+		case RTE_ETH_MQ_RX_VMDQ_RSS:
+			sxe_vf_rss_configure(dev);
+			break;
+		case RTE_ETH_MQ_RX_VMDQ_DCB:
+		case RTE_ETH_MQ_RX_DCB:
+			sxe_dcb_vmdq_rx_hw_configure(dev);
+			break;
+		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		case RTE_ETH_MQ_RX_DCB_RSS:
+			ret = -SXE_ERR_CONFIG;
+			PMD_LOG_ERR(DRV,
+				"DCB and RSS with vmdq or sriov not "
+				"support.(err:%d)", ret);
+			break;
+		default:
+			sxe_vf_default_mode_configure(dev);
+			break;
+		}
+#else
+		PMD_LOG_ERR(INIT, "unsupport sriov");
+		ret = -EINVAL;
+#endif
+	}
+
+	LOG_INFO("pool num:%u rx mq_mode:0x%x configure result:%d.",
+		     RTE_ETH_DEV_SRIOV(dev).active,
+		     dev->data->dev_conf.rxmode.mq_mode, ret);
+
+	return ret;
+}
+
+const u32 *sxe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+	return __sxe_dev_supported_ptypes_get(dev);
+}
+
+#ifdef ETH_DEV_OPS_MONITOR
+static s32
+sxe_monitor_callback(const u64 value,
+		const u64 arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
+{
+	const u64 dd_state = rte_cpu_to_le_32(SXE_RXDADV_STAT_DD);
+	return (value & dd_state) == dd_state ? -1 : 0;
+}
+
+s32
+sxe_monitor_addr_get(void *rx_queue, struct rte_power_monitor_cond *pmc)
+{
+	volatile union sxe_rx_data_desc *rxdp;
+	struct sxe_rx_queue *rxq = rx_queue;
+
+	rxdp = &rxq->desc_ring[rxq->processing_idx];
+
+	pmc->addr = &rxdp->wb.upper.status_error;
+	pmc->fn = sxe_monitor_callback;
+	pmc->size = sizeof(u32);
+
+	return 0;
+}
+#endif
diff --git a/drivers/net/sxe/pf/sxe_rx.h b/drivers/net/sxe/pf/sxe_rx.h
new file mode 100644
index 0000000000..7322a54a2c
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_rx.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_DPDK_RX_H__
+#define __SXE_DPDK_RX_H__
+
+#include "sxe_types.h"
+#include "sxe_queue.h"
+#include "sxe_hw.h"
+#include "sxe_compat_version.h"
+#include "sxe_logs.h"
+
+#define SXE_RXDADV_ERR_CKSUM_BIT  30
+#define SXE_RXDADV_ERR_CKSUM_MSK  3
+
+#define SXE_PACKET_TYPE_MAX               0X80
+#define SXE_PACKET_TYPE_TN_MAX            0X100
+#define SXE_PACKET_TYPE_MASK              0X7F
+#define SXE_RXD_STAT_TMST                 0x10000   
+
+#define SXE_DESCS_PER_LOOP 4
+
+#define SXE_PCI_REG_WC_WRITE(reg, value)			\
+	rte_write32_wc((rte_cpu_to_le_32(value)), reg)
+#define SXE_PCI_REG_WC_WRITE_RELAXED(reg, value)		\
+	rte_write32_wc_relaxed((rte_cpu_to_le_32(value)), reg)
+
+#define SXE_RX_RING_SIZE ((SXE_MAX_RING_DESC + RTE_PMD_SXE_MAX_RX_BURST) * \
+			sizeof(sxe_rx_data_desc_u))
+
+extern const u32 sxe_ptype_table[SXE_PACKET_TYPE_MAX];
+extern const u32 sxe_ptype_table_tn[SXE_PACKET_TYPE_TN_MAX];
+
+static inline u64 sxe_rx_desc_status_to_pkt_flags(u32 rx_status,
+							u64 vlan_flags)
+{
+	u64 pkt_flags;
+
+	pkt_flags = (rx_status & SXE_RXD_STAT_VP) ?  vlan_flags : 0;
+
+#ifdef RTE_LIBRTE_IEEE1588
+	if (rx_status & SXE_RXD_STAT_TMST) {
+		pkt_flags = pkt_flags | RTE_MBUF_F_RX_IEEE1588_TMST;
+	}
+#endif
+	return pkt_flags;
+}
+
+static inline u64 sxe_rx_desc_error_to_pkt_flags(u32 rx_status)
+{
+	u64 pkt_flags;
+
+	static u64 error_to_pkt_flags_map[4] = {
+		RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
+		RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+		RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
+		RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD
+	};
+
+	pkt_flags = error_to_pkt_flags_map[(rx_status >>
+		SXE_RXDADV_ERR_CKSUM_BIT) & SXE_RXDADV_ERR_CKSUM_MSK];
+
+	if ((rx_status & SXE_RXD_STAT_OUTERIPCS) &&
+	    (rx_status & SXE_RXDADV_ERR_OUTERIPER)) {
+		pkt_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
+	}
+
+	return pkt_flags;
+}
+
+static inline u64 sxe_rx_desc_pkt_info_to_pkt_flags(u16 pkt_info)
+{
+	u64 flags = 0;
+	static u64 ip_rss_types_map[16] __rte_cache_aligned = {
+		0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
+		0, RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH,
+		RTE_MBUF_F_RX_RSS_HASH, 0, 0, 0,
+		0, 0, 0,  RTE_MBUF_F_RX_FDIR,
+	};
+
+#ifdef RTE_LIBRTE_IEEE1588
+		static u64 ip_pkt_etqf_map[8] = {
+			0, 0, 0, RTE_MBUF_F_RX_IEEE1588_PTP,
+			0, 0, 0, 0,
+		};
+
+		if (likely(pkt_info & SXE_RXDADV_PKTTYPE_ETQF)) {
+			flags = ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
+				ip_rss_types_map[pkt_info & 0XF];
+		} else {
+			flags = ip_rss_types_map[pkt_info & 0XF];
+		}
+#else
+		flags = ip_rss_types_map[pkt_info & 0XF];
+#endif
+	return flags;
+}
+
+static inline u32 sxe_rxd_pkt_info_to_pkt_type(u32 pkt_info,
+							u16 ptype_mask)
+{
+
+	if (unlikely(pkt_info & SXE_RXDADV_PKTTYPE_ETQF)) {
+		return RTE_PTYPE_UNKNOWN;
+	}
+
+	pkt_info = (pkt_info >> SXE_RXDADV_PKTTYPE_ETQF_SHIFT) & ptype_mask;
+
+	pkt_info &= SXE_PACKET_TYPE_MASK;
+
+	return sxe_ptype_table[pkt_info];
+}
+
+static inline u32 sxe_lro_count(sxe_rx_data_desc_u *rx)
+{
+	return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
+		SXE_RXDADV_LROCNT_MASK) >> SXE_RXDADV_LROCNT_SHIFT;
+}
+
+static inline bool __rte_cold
+	sxe_check_is_rx_batch_alloc_support(
+						sxe_rx_queue_s *rxq)
+{
+	bool support = true;
+
+	if (!(rxq->batch_alloc_size >= RTE_PMD_SXE_MAX_RX_BURST)) {
+		PMD_LOG_DEBUG(INIT, "rx burst batch alloc check: "
+			     "rxq->batch_alloc_size=%d, "
+			     "RTE_PMD_SXE_MAX_RX_BURST=%d",
+			     rxq->batch_alloc_size, RTE_PMD_SXE_MAX_RX_BURST);
+		support = false;
+	} else if (!(rxq->batch_alloc_size < rxq->ring_depth)) {
+		PMD_LOG_DEBUG(INIT, "rx burst batch alloc check: "
+			     "rxq->batch_alloc_size=%d, "
+			     "rxq->ring_depth=%d",
+			     rxq->batch_alloc_size, rxq->ring_depth);
+		support = false;
+	} else if (!((rxq->ring_depth % rxq->batch_alloc_size) == 0)) {
+		PMD_LOG_DEBUG(INIT, "rx burst batch alloc preconditions: "
+			     "rxq->nb_rx_desc=%d, "
+			     "rxq->batch_alloc_size=%d",
+			     rxq->ring_depth, rxq->batch_alloc_size);
+		support = false;
+	}
+
+	return support;
+}
+
+s32 sxe_rx_configure(struct rte_eth_dev *dev);
+
+void sxe_rx_function_set(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
+
+#ifdef ETH_DEV_RX_DESC_DONE
+s32 sxe_rx_descriptor_done(void *rx_queue, u16 offset);
+#endif
+
+s32 sxe_rx_descriptor_status(void *rx_queue, u16 offset);
+
+u16 sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,u16 num_pkts);
+
+s32 sxe_rx_queue_setup(struct rte_eth_dev *dev,
+			 u16 queue_idx,u16 num_desc,
+			 unsigned int socket_id,
+			 const struct rte_eth_rxconf *rx_conf,
+			 struct rte_mempool *mp);
+
+s32 sxe_rx_features_configure(struct rte_eth_dev *dev);
+
+const u32 *sxe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+
+#ifdef ETH_DEV_OPS_MONITOR
+s32
+sxe_monitor_addr_get(void *rx_queue, struct rte_power_monitor_cond *pmc);
+#endif
+
+void sxe_rx_mbuf_common_header_fill(
+					sxe_rx_queue_s *rxq,
+					struct rte_mbuf *mbuf,
+					volatile sxe_rx_data_desc_u desc,
+					u32 pkt_info, u32 staterr);
+
+u16 sxe_batch_alloc_lro_pkts_recv(void *rx_queue,
+					struct rte_mbuf **rx_pkts,
+					u16 pkts_num);
+
+u16 sxe_single_alloc_lro_pkts_recv(void *rx_queue,
+					struct rte_mbuf **rx_pkts,
+					u16 pkts_num);
+
+u16 sxe_batch_alloc_pkts_recv(void *rx_queue,
+					struct rte_mbuf **rx_pkts,
+					u16 pkts_num);
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_stats.c b/drivers/net/sxe/pf/sxe_stats.c
new file mode 100644
index 0000000000..5d9de2991c
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_stats.c
@@ -0,0 +1,593 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#include "sxe_stats.h"
+#include "sxe.h"
+#include "sxe_logs.h"
+#include "sxe_errno.h"
+#include "sxe_queue.h"
+#include "sxe_compat_platform.h"
+#include <rte_string_fns.h>
+
+#define SXE_STAT_MAP_WIDTH 8
+#define SXE_STAT_MAP_CNT 4
+#define SXE_STAT_MAP_MASK 0x0F
+
+#define SXE_QUEUE_STAT_COUNT   \
+    (sizeof(stats_info->hw_stats.qprc) / sizeof(stats_info->hw_stats.qprc[0]))
+
+static const struct sxe_stats_field sxe_xstats_sw_field[] = {
+	{"rx_l3_l4_xsum_error", offsetof(struct sxe_sw_stats,
+		hw_csum_rx_error)},
+};
+
+static const struct sxe_stats_field sxe_xstats_mac_field[] = {
+	{"rx_crc_errors", offsetof(struct sxe_mac_stats, crcerrs)},
+	{"rx_error_bytes", offsetof(struct sxe_mac_stats, errbc)},
+	{"rx_length_errors", offsetof(struct sxe_mac_stats, rlec)},
+	{"rx_size_64_packets", offsetof(struct sxe_mac_stats, prc64)},
+	{"rx_size_65_to_127_packets", offsetof(struct sxe_mac_stats, prc127)},
+	{"rx_size_128_to_255_packets", offsetof(struct sxe_mac_stats, prc255)},
+	{"rx_size_256_to_511_packets", offsetof(struct sxe_mac_stats, prc511)},
+	{"rx_size_512_to_1023_packets", offsetof(struct sxe_mac_stats,
+		prc1023)},
+	{"rx_size_1024_to_max_packets", offsetof(struct sxe_mac_stats,
+		prc1522)},
+	{"rx_broadcast_packets", offsetof(struct sxe_mac_stats, bprc)},
+	{"rx_multicast_packets", offsetof(struct sxe_mac_stats, mprc)},
+	{"rx_fragment_errors", offsetof(struct sxe_mac_stats, rfc)},
+	{"rx_undersize_errors", offsetof(struct sxe_mac_stats, ruc)},
+	{"rx_oversize_errors", offsetof(struct sxe_mac_stats, roc)},
+	{"rx_jabber_errors", offsetof(struct sxe_mac_stats, rjc)},
+	{"rx_size_packets", offsetof(struct sxe_mac_stats, tpr)},
+	{"rx_size_bytes", offsetof(struct sxe_mac_stats, tor)},
+	{"tx_size_packets", offsetof(struct sxe_mac_stats, tpt)},
+	{"tx_size_64_packets", offsetof(struct sxe_mac_stats, ptc64)},
+	{"tx_size_65_to_127_packets", offsetof(struct sxe_mac_stats, ptc127)},
+	{"tx_size_128_to_255_packets", offsetof(struct sxe_mac_stats, ptc255)},
+	{"tx_size_256_to_511_packets", offsetof(struct sxe_mac_stats, ptc511)},
+	{"tx_size_512_to_1023_packets", offsetof(struct sxe_mac_stats,
+		ptc1023)},
+	{"tx_size_1024_to_max_packets", offsetof(struct sxe_mac_stats,
+		ptc1522)},
+	{"tx_multicast_packets", offsetof(struct sxe_mac_stats, mptc)},
+	{"tx_broadcast_packets", offsetof(struct sxe_mac_stats, bptc)},
+
+	{"flow_navigator_add_filters", offsetof(struct sxe_mac_stats,
+		fnavadd)},
+	{"flow_navigator_remove_filters", offsetof(struct sxe_mac_stats,
+		fnavrmv)},
+	{"flow_navigator_filters_add_errs", offsetof(struct sxe_mac_stats,
+		fnavadderr)},
+	{"flow_navigator_filters_remove_errs", offsetof(struct sxe_mac_stats,
+		fnavrmverr)},
+	{"flow_navigator_matched_filters", offsetof(struct sxe_mac_stats,
+		fnavmatch)},
+	{"flow_navigator_missed_filters", offsetof(struct sxe_mac_stats,
+		fnavmiss)},
+};
+
+static const struct sxe_stats_field sxe_xstats_fc_field[] = {
+	{"dropped", offsetof(struct sxe_mac_stats, mpc)},
+	{"rx_xon_xoff_packets", offsetof(struct sxe_mac_stats, prcpf)},
+	{"tx_xon_xoff_packets", offsetof(struct sxe_mac_stats, pfct)},
+};
+
+#define SXE_XSTAT_SW_CNT  (sizeof(sxe_xstats_sw_field) / \
+		      sizeof(sxe_xstats_sw_field[0]))
+
+#define SXE_XSTAT_MAC_CNT (sizeof(sxe_xstats_mac_field) / \
+		      sizeof(sxe_xstats_mac_field[0]))
+
+#define SXE_XSTAT_FC_CNT (sizeof(sxe_xstats_fc_field) / \
+			   sizeof(sxe_xstats_fc_field[0]))
+
+#define SXE_FC_PRIO_VALUES 8
+
+#define SXE_XSTAT_CNT  (SXE_XSTAT_MAC_CNT + SXE_XSTAT_SW_CNT + \
+			SXE_XSTAT_FC_CNT * SXE_FC_PRIO_VALUES)
+
+#ifdef SXE_TEST
+u32 sxe_xstats_cnt_get(void)
+{
+	return SXE_XSTAT_CNT;
+}
+#endif
+
+s32 sxe_eth_stats_get(struct rte_eth_dev *eth_dev,
+				struct rte_eth_stats *stats)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_stats_info *stats_info = &adapter->stats_info;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 i;
+	u64 rx_packets = 0; 
+	u64 rx_bytes = 0;   
+	s32 ret = 0;
+
+	sxe_hw_stats_get(hw, &stats_info->hw_stats);
+
+	if (stats == NULL) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "input param stats is null.");
+		goto l_out;
+	}
+
+	for (i = 0; i < SXE_QUEUE_STAT_COUNT; i++) {
+		rx_packets += stats_info->hw_stats.qprc[i];
+		rx_bytes += stats_info->hw_stats.qbrc[i];
+	
+		stats->q_ipackets[i] = stats_info->hw_stats.qprc[i];
+		stats->q_opackets[i] = stats_info->hw_stats.qptc[i];
+		stats->q_ibytes[i] = stats_info->hw_stats.qbrc[i];
+		stats->q_obytes[i] = stats_info->hw_stats.qbtc[i];
+		stats->q_errors[i] = stats_info->hw_stats.qprdc[i];
+	}
+
+	stats->ipackets = rx_packets;
+	stats->ibytes = rx_bytes;
+	stats->opackets = stats_info->hw_stats.gptc;
+	stats->obytes = stats_info->hw_stats.gotc;
+
+	stats->imissed  = 0;
+	stats->ierrors  = stats_info->hw_stats.crcerrs +
+			  stats_info->hw_stats.rlec +
+			  stats_info->hw_stats.ruc +
+			  stats_info->hw_stats.roc +
+			  stats_info->hw_stats.rfc;
+
+	stats->oerrors  = 0;
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_hw_xstat_offset_get(u32 id, u32 *offset)
+{
+	s32 ret = 0;
+	u32 size = SXE_XSTAT_MAC_CNT;
+
+	if (id < size) {
+		*offset = sxe_xstats_mac_field[id].offset;
+	} else {
+		ret = -SXE_ERR_PARAM;
+		PMD_LOG_ERR(DRV, "invalid id:%u exceed stats size cnt:%u.",
+			    id, size);
+	}
+
+	return ret;
+}
+
+static s32 sxe_sw_xstat_offset_get(u32 id, u32 *offset)
+{
+	s32 ret = 0;
+	u32 size = SXE_XSTAT_SW_CNT;
+
+	if (id < size) {
+		*offset = sxe_xstats_sw_field[id].offset;
+	} else {
+		ret = -SXE_ERR_PARAM;
+		PMD_LOG_ERR(DRV, "invalid id:%u exceed stats size cnt:%u.",
+			    id, size);
+	}
+
+	return ret;
+}
+
+static s32 sxe_fc_xstat_field_offset_get(u32 id, u8 priority, u32 *offset)
+{
+	s32 ret = 0;
+	u32 size = SXE_XSTAT_FC_CNT;
+
+	if (id < size) {
+		*offset = sxe_xstats_fc_field[id].offset + (sizeof(u64) * priority);
+	} else {
+		ret = -SXE_ERR_PARAM;
+		PMD_LOG_ERR(DRV, "invalid id:%u exceed stats size cnt:%u.",
+			    id, size);
+	}
+
+	return ret;
+}
+
+static void sxe_sw_stats_get(struct rte_eth_dev *eth_dev, 
+				struct sxe_sw_stats *stats)
+{
+	u32 i;
+	u64 hw_csum_rx_error = 0;
+	sxe_rx_queue_s *rxq;
+	
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+		rxq = eth_dev->data->rx_queues[i];
+		hw_csum_rx_error += rxq->rx_stats.csum_err;
+	}
+	stats->hw_csum_rx_error = hw_csum_rx_error;
+
+	return;
+}
+
+s32 sxe_xstats_get(struct rte_eth_dev *eth_dev,
+				struct rte_eth_xstat *xstats,
+				u32 usr_cnt)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_stats_info *stats_info = &adapter->stats_info;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 i;
+	u32 cnt;
+	s32 ret;
+	u32 offset;
+	u8 prio;
+
+	cnt = SXE_XSTAT_CNT;
+	PMD_LOG_INFO(DRV, "xstat size:%u. hw xstat field cnt:%lu "
+		    "fc xstat field cnt:%lu ", cnt,
+		    SXE_XSTAT_MAC_CNT,
+		    SXE_XSTAT_FC_CNT);
+
+	if (usr_cnt < cnt) {
+		ret = cnt;
+		PMD_LOG_ERR(DRV, "user usr_cnt:%u less than stats cnt:%u.",
+			    usr_cnt, cnt);
+		goto l_out;
+	}
+
+	sxe_hw_stats_get(hw, &stats_info->hw_stats);
+	sxe_sw_stats_get(eth_dev, &stats_info->sw_stats);
+
+	if (xstats == NULL) {
+		ret = 0;
+		PMD_LOG_ERR(DRV, "usr_cnt:%u, input param xstats is null.", usr_cnt);
+		goto l_out;
+	}
+
+	cnt = 0;
+	for (i = 0; i < SXE_XSTAT_MAC_CNT; i++) {
+		sxe_hw_xstat_offset_get(i, &offset);
+		xstats[cnt].value = *(u64 *)(((s8 *)(&stats_info->hw_stats)) + offset);
+		xstats[cnt].id = cnt;
+		cnt++;
+	}
+
+	for (i = 0; i < SXE_XSTAT_SW_CNT; i++) {
+		sxe_sw_xstat_offset_get(i, &offset);
+		xstats[cnt].value = *(u64 *)(((s8 *)(&stats_info->sw_stats)) + offset);
+		xstats[cnt].id = cnt;
+		cnt++;
+	}
+
+	for (i = 0; i < SXE_XSTAT_FC_CNT; i++) {
+		for (prio = 0; prio < SXE_FC_PRIO_VALUES; prio++) {
+			sxe_fc_xstat_field_offset_get(i, prio, &offset);
+			xstats[cnt].value = *(u64 *)(((s8 *)(&stats_info->hw_stats))
+					  + offset);
+			xstats[cnt].id = cnt;
+			cnt++;
+		}
+	}
+
+	ret = cnt;
+	PMD_LOG_INFO(DRV, "usr_cnt:%u stats cnt:%u stats done.", usr_cnt, cnt);
+
+l_out:
+	return ret;
+}
+
+s32 sxe_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_stats_info *stats_info = &adapter->stats_info;
+	struct sxe_hw *hw = &adapter->hw;
+	sxe_rx_queue_s *rxq;
+	u32 i;
+
+	sxe_eth_stats_get(eth_dev, NULL);
+	sxe_hw_stats_seq_clean(hw, &stats_info->hw_stats);
+
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+		rxq = eth_dev->data->rx_queues[i];
+		memset(&rxq->rx_stats, 0, sizeof(rxq->rx_stats));
+	}
+
+	memset(&stats_info->hw_stats, 0, sizeof(stats_info->hw_stats));
+	memset(&stats_info->sw_stats, 0, sizeof(stats_info->sw_stats));
+
+	return 0;
+}
+
+s32 sxe_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_stats_info *stats_info = &adapter->stats_info;
+	struct sxe_hw *hw = &adapter->hw;
+	sxe_rx_queue_s *rxq;
+	u32 size = SXE_XSTAT_CNT;
+	u32 i;
+
+	sxe_xstats_get(eth_dev, NULL, size);
+	sxe_hw_stats_seq_clean(hw, &stats_info->hw_stats);
+
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+		rxq = eth_dev->data->rx_queues[i];
+		memset(&rxq->rx_stats, 0, sizeof(rxq->rx_stats));
+	}
+
+	memset(&stats_info->hw_stats, 0, sizeof(stats_info->hw_stats));
+	memset(&stats_info->sw_stats, 0, sizeof(stats_info->sw_stats));
+
+	return 0;
+}
+
+s32 sxe_xstats_names_get(__rte_unused struct rte_eth_dev *dev,
+	struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int usr_cnt)
+{
+	u32 i = 0;
+	u32 cnt = 0;
+	s32 ret;
+	u8 prio;
+
+	if (xstats_names == NULL) {
+		ret = SXE_XSTAT_CNT;
+		PMD_LOG_INFO(DRV, "xstats field size:%u.", ret);
+		goto l_out;
+	}
+
+	if (usr_cnt < SXE_XSTAT_CNT) {
+		ret = -SXE_ERR_PARAM;
+		PMD_LOG_ERR(DRV, "max:%lu usr_cnt:%u invalid.(err:%d)",
+			    SXE_XSTAT_CNT, usr_cnt, ret);
+		goto l_out;
+	}
+
+	for (i = 0; i < SXE_XSTAT_MAC_CNT; i++) {
+		strlcpy(xstats_names[cnt].name,
+			sxe_xstats_mac_field[i].name,
+			sizeof(xstats_names[cnt].name));
+		cnt++;
+	}
+
+	for (i = 0; i < SXE_XSTAT_SW_CNT; i++) {
+		strlcpy(xstats_names[cnt].name,
+			sxe_xstats_sw_field[i].name,
+			sizeof(xstats_names[cnt].name));
+		cnt++;
+	}
+
+	for (i = 0; i < SXE_XSTAT_FC_CNT; i++) {
+		for (prio = 0; prio < SXE_FC_PRIO_VALUES; prio++) {
+			snprintf(xstats_names[cnt].name,
+				sizeof(xstats_names[cnt].name),
+				"priority%u_%s", prio,
+				sxe_xstats_fc_field[i].name);
+			cnt++;
+		}
+	}
+
+	ret = cnt;
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_all_xstats_value_get(struct rte_eth_dev *eth_dev,
+						u64 *values, u32 usr_cnt)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_stats_info *stats_info = &adapter->stats_info;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 size = SXE_XSTAT_CNT;
+	s32 ret;
+	u32 offset;
+	u32 cnt = 0;
+	u32 i;
+	u8 prio;
+
+	if (usr_cnt < size) {
+		PMD_LOG_WARN(DRV, "ids null usr_cnt:%u less than xstats"
+			     " cnt:%u, return xstat cnt.",
+			      usr_cnt, size);
+		ret = size;
+		goto l_out;
+	}
+
+	sxe_hw_stats_get(hw, &stats_info->hw_stats);
+	sxe_sw_stats_get(eth_dev, &stats_info->sw_stats);
+
+	if (values == NULL) {
+		PMD_LOG_WARN(DRV, "ids and values null, "
+			     "read clean stats regs");
+		ret = 0;
+		goto l_out;
+	}
+
+	for (i = 0; i < SXE_XSTAT_MAC_CNT; i++) {
+		sxe_hw_xstat_offset_get(i, &offset);
+		values[cnt] = *(u64 *)(((s8 *)(&stats_info->hw_stats)) + offset);
+		cnt++;
+	}
+
+	for (i = 0; i < SXE_XSTAT_SW_CNT; i++) {
+		sxe_sw_xstat_offset_get(i, &offset);
+		values[cnt] = *(u64 *)(((s8 *)(&stats_info->sw_stats)) + offset);
+		cnt++;
+	}
+
+	for (i = 0; i < SXE_XSTAT_FC_CNT; i++) {
+		for (prio = 0; prio < SXE_FC_PRIO_VALUES; prio++) {
+			sxe_fc_xstat_field_offset_get(i, prio, &offset);
+			values[cnt] = *(u64 *)(((s8 *)(&stats_info->hw_stats))
+					  + offset);
+			cnt++;
+		}
+	}
+
+	ret = cnt;
+
+l_out:
+	return ret;
+}
+
+s32 sxe_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+					const u64 *ids,
+					u64 *values, u32 usr_cnt)
+{
+	s32 ret;
+	u32 size = SXE_XSTAT_CNT;
+	u32 i;
+	u64 value_all[size];
+
+	if (ids == NULL) {
+		ret = sxe_all_xstats_value_get(eth_dev, values, usr_cnt);
+		goto l_out;
+	}
+
+	if (values == NULL) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "invalid param values.");
+		goto l_out;
+	}
+
+	sxe_all_xstats_value_get(eth_dev, value_all, size);
+
+	for (i = 0; i < usr_cnt; i++) {
+		if (ids[i] >= size) {
+			PMD_LOG_ERR(DRV, "index:%u invalid ids:%lu.", i, ids[i]);
+			ret = -EINVAL;
+			goto l_out;
+		}
+		values[i] = value_all[ids[i]];
+	}
+
+	ret = usr_cnt;
+
+l_out:
+	return ret;
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+s32 sxe_xstats_names_get_by_id(
+	struct rte_eth_dev *eth_dev,
+	struct rte_eth_xstat_name *xstats_names,
+	const u64 *ids,
+	u32 usr_cnt)
+#else
+s32 sxe_xstats_names_get_by_id(
+	struct rte_eth_dev *eth_dev,
+	const u64 *ids,
+	struct rte_eth_xstat_name *xstats_names,
+	u32 usr_cnt)
+#endif
+{
+	s32 ret;
+	u32 i;
+	u32 size = SXE_XSTAT_CNT;
+	struct rte_eth_xstat_name xstat_names_all[size];
+
+	if (ids == NULL) {
+		ret = sxe_xstats_names_get(eth_dev, xstats_names, usr_cnt);
+		goto l_out;
+	}
+
+	sxe_xstats_names_get(eth_dev, xstat_names_all, size);
+	for (i = 0; i < usr_cnt; i++) {
+		if (ids[i] >= size) {
+			PMD_LOG_ERR(DRV, "index:%u invalid ids:%lu.", i, ids[i]);
+			ret = -EINVAL;
+			goto l_out;
+		}
+		strcpy(xstats_names[ids[i]].name, xstat_names_all[ids[i]].name);
+	}
+
+	ret = usr_cnt;
+
+l_out:
+	return ret;
+}
+
+s32 sxe_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+				  u16 queue_id,
+				  u8 stat_reg_idx,
+				  u8 is_rx)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_stats_map *stats_map = &(adapter->stats_info.stats_map);
+	u32 qsmr_mask = 0;
+	u32 map_mask = SXE_STAT_MAP_MASK;
+	u8 reg_idx;
+	u8 map_idx;
+	s32 ret = 0;
+
+	reg_idx = queue_id / SXE_STAT_MAP_CNT;
+	if (reg_idx >= SXE_QUEUE_STATS_MAP_REG_NUM) {
+		ret = -EIO;
+		PMD_LOG_ERR(DRV, "invalid queue_id:%u reg_idx exceeded "
+			    "max map cnt:%u.(err:%d)",
+			    queue_id, SXE_QUEUE_STATS_MAP_REG_NUM, ret);
+		goto l_out;
+	}
+
+	map_idx = (u8)(queue_id % SXE_STAT_MAP_CNT);
+	map_mask <<= (SXE_STAT_MAP_WIDTH * map_idx);
+
+	if (!is_rx) {
+		stats_map->txq_stats_map[reg_idx] &= ~map_mask;
+	} else {
+		stats_map->rxq_stats_map[reg_idx] &= ~map_mask;
+	}
+
+	qsmr_mask = (stat_reg_idx & SXE_STAT_MAP_MASK) << (SXE_STAT_MAP_WIDTH * map_idx);
+	if (!is_rx) {
+		stats_map->txq_stats_map[reg_idx] |= qsmr_mask;
+		sxe_hw_txq_stat_map_set(hw, reg_idx, stats_map->txq_stats_map[reg_idx]);
+	} else {
+		stats_map->rxq_stats_map[reg_idx] |= qsmr_mask;
+		sxe_hw_rxq_stat_map_set(hw, reg_idx, stats_map->rxq_stats_map[reg_idx]);
+	}
+
+	PMD_LOG_INFO(DRV, "port %u %s queue_id %d stat map to stat reg[%u] "
+		     "%s[%u] 0x%08x ",
+		     (u16)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+		     queue_id, stat_reg_idx,
+		     is_rx ? "RQSMR" : "TQSM", reg_idx,
+		     is_rx ? stats_map->rxq_stats_map[reg_idx] :
+		     stats_map->txq_stats_map[reg_idx]);
+
+l_out:
+	return ret;
+}
+
+void sxe_queue_stats_map_restore(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_stats_map *stats_map = &(adapter->stats_info.stats_map);
+	u8 reg_idx;
+
+	for (reg_idx = 0; reg_idx < SXE_QUEUE_STATS_MAP_REG_NUM; reg_idx++) {
+		sxe_hw_txq_stat_map_set(hw, reg_idx, stats_map->txq_stats_map[reg_idx]);
+		sxe_hw_rxq_stat_map_set(hw, reg_idx, stats_map->rxq_stats_map[reg_idx]);
+	}
+
+	return;
+}
+
+void sxe_queue_stats_map_reset(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u8 reg_idx;
+
+#ifdef SET_AUTOFILL_QUEUE_XSTATS
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+#endif
+
+	for (reg_idx = 0; reg_idx < SXE_QUEUE_STATS_MAP_REG_NUM; reg_idx++) {
+		sxe_hw_txq_stat_map_set(hw, reg_idx, 0);
+		sxe_hw_rxq_stat_map_set(hw, reg_idx, 0);
+	}
+
+	return;
+}
+
diff --git a/drivers/net/sxe/pf/sxe_stats.h b/drivers/net/sxe/pf/sxe_stats.h
new file mode 100644
index 0000000000..792a160753
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_stats.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_STATS_H__
+#define __SXE_STATS_H__
+
+#include <rte_ethdev.h>
+#include <rte_ethdev_core.h>
+
+#include "sxe_dpdk_version.h"
+#include "sxe_hw.h"
+
+#define SXE_STATS_FIELD_NAME_SIZE  50
+
+struct sxe_sw_stats {
+	u64 hw_csum_rx_error;  
+};
+
+struct sxe_stats_map {
+	u32 txq_stats_map[SXE_QUEUE_STATS_MAP_REG_NUM];
+	u32 rxq_stats_map[SXE_QUEUE_STATS_MAP_REG_NUM];
+};
+
+struct sxe_stats_info {
+	struct sxe_sw_stats  sw_stats;  
+	struct sxe_mac_stats hw_stats;  	
+	struct sxe_stats_map stats_map; 
+};
+
+struct sxe_stats_field {
+	s8  name[SXE_STATS_FIELD_NAME_SIZE];
+	u32 offset;
+};
+
+s32 sxe_eth_stats_get(struct rte_eth_dev *eth_dev,
+				struct rte_eth_stats *stats);
+
+s32 sxe_stats_reset(struct rte_eth_dev *eth_dev);
+
+s32 sxe_xstats_get(struct rte_eth_dev *eth_dev,
+				struct rte_eth_xstat *xstats,
+				u32 cnt);
+
+s32 sxe_xstats_reset(struct rte_eth_dev *eth_dev);
+
+
+s32 sxe_xstats_names_get(__rte_unused struct rte_eth_dev *dev,
+	struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size);
+
+s32 sxe_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+					const ulong *ids,
+					ulong *values, u32 usr_cnt);
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+s32 sxe_xstats_names_get_by_id(
+	struct rte_eth_dev *eth_dev,
+	struct rte_eth_xstat_name *xstats_names,
+	const ulong *ids,
+	u32 usr_cnt);
+#else
+s32 sxe_xstats_names_get_by_id(
+	struct rte_eth_dev *eth_dev,
+	const ulong *ids,
+	struct rte_eth_xstat_name *xstats_names,
+	u32 usr_cnt);
+#endif
+
+s32 sxe_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+				  u16 queue_id,
+				  u8 stat_reg_idx,
+				  u8 is_rx);
+
+void sxe_queue_stats_map_restore(struct rte_eth_dev *eth_dev);
+
+void sxe_queue_stats_map_reset(struct rte_eth_dev *eth_dev);
+
+#endif
+
diff --git a/drivers/net/sxe/pf/sxe_tx.c b/drivers/net/sxe/pf/sxe_tx.c
new file mode 100644
index 0000000000..6b92e6faed
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_tx.c
@@ -0,0 +1,1069 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <rte_dev.h>
+#else
+#include <ethdev_driver.h>
+#include <dev_driver.h>
+#endif
+
+#include <rte_net.h>
+
+#include "sxe.h"
+#include "sxe_tx.h"
+#include "sxe_hw.h"
+#include "sxe_logs.h"
+#include "sxe_queue_common.h"
+#include "sxe_tx_common.h"
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#include "sxe_vec_common.h"
+#include <rte_vect.h>
+#endif
+#include "sxe_compat_version.h"
+
+#define SXE_TX_DESC_NO_WB 1
+
+#ifdef RTE_LIBRTE_IEEE1588
+#define SXE_TX_IEEE1588_TMST RTE_MBUF_F_TX_IEEE1588_TMST
+#else
+#define SXE_TX_IEEE1588_TMST 0
+#endif
+
+#define SXE_TX_OFFLOAD_MASK (			 \
+		RTE_MBUF_F_TX_OUTER_IPV6 |		 \
+		RTE_MBUF_F_TX_OUTER_IPV4 |		 \
+		RTE_MBUF_F_TX_IPV6 |			 \
+		RTE_MBUF_F_TX_IPV4 |			 \
+		RTE_MBUF_F_TX_VLAN |		 \
+		RTE_MBUF_F_TX_IP_CKSUM |		 \
+		RTE_MBUF_F_TX_L4_MASK |		 \
+		RTE_MBUF_F_TX_TCP_SEG |		 \
+		RTE_MBUF_F_TX_MACSEC  |      \
+		RTE_MBUF_F_TX_OUTER_IP_CKSUM |		 \
+		SXE_TX_IEEE1588_TMST)
+
+#define SXE_TX_OFFLOAD_NOTSUP_MASK (RTE_MBUF_F_TX_OFFLOAD_MASK ^ SXE_TX_OFFLOAD_MASK)
+#define RTE_SXE_MAX_TX_FREE_BUF_SZ 64
+#define SXE_TXD_IDX_SHIFT	4 
+#define SXE_TX_MIN_PKT_LEN	14
+
+extern const struct sxe_txq_ops def_txq_ops;
+
+void __rte_cold sxe_tx_function_set(struct rte_eth_dev *dev,
+					sxe_tx_queue_s *txq)
+{
+	/* Offload off and signle simple tx code path < 32 use simple tx code path */
+	if ((txq->offloads == 0) &&
+	    (txq->rs_thresh >= RTE_PMD_SXE_MAX_TX_BURST)){
+		dev->tx_pkt_prepare = NULL;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+		if (txq->rs_thresh <= RTE_SXE_MAX_TX_FREE_BUF_SZ &&
+#ifndef DPDK_19_11_6
+		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
+#endif
+		    (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+		    sxe_txq_vec_setup(txq) == 0)) {
+			dev->tx_pkt_burst   = sxe_pkts_vector_xmit;
+			PMD_LOG_INFO(INIT, "using vector tx code path");
+		} else {
+			dev->tx_pkt_burst   = sxe_pkts_simple_xmit;
+			PMD_LOG_INFO(INIT, "using simple tx code path");
+		}
+#else
+		dev->tx_pkt_burst	= sxe_pkts_simple_xmit;
+		PMD_LOG_INFO(INIT, "using simple tx code path");
+#endif
+
+	} else {
+		dev->tx_pkt_burst   = sxe_pkts_xmit_with_offload;;
+		dev->tx_pkt_prepare = sxe_prep_pkts;
+
+		PMD_LOG_INFO(INIT, "using full-featured tx code path");
+		PMD_LOG_INFO(INIT, " - offloads = 0x%" PRIx64,
+					(long unsigned int)txq->offloads);
+		PMD_LOG_INFO(INIT, " - tx_rs_thresh = %d "
+				   "[RTE_PMD_SXE_MAX_TX_BURST=%d]",
+				txq->rs_thresh,
+				RTE_PMD_SXE_MAX_TX_BURST);
+	}
+
+	return;
+}
+
+int __rte_cold sxe_tx_queue_setup(struct rte_eth_dev *dev,
+				u16 tx_queue_id,
+				u16 ring_depth,
+				u32 socket_id,
+				const struct rte_eth_txconf *tx_conf)
+{
+	s32 ret;
+	struct sxe_hw *hw = (&((struct sxe_adapter *)(dev->data->dev_private))->hw);
+	struct tx_setup tx_setup;
+
+	tx_setup.dev = dev;
+	tx_setup.desc_num = ring_depth;
+	tx_setup.queue_idx = tx_queue_id;
+	tx_setup.socket_id = socket_id;
+	tx_setup.reg_base_addr = hw->reg_base_addr;
+	tx_setup.tx_conf = tx_conf;
+
+	ret = __sxe_tx_queue_setup(&tx_setup, false);
+
+	return ret;
+}
+
+static void __rte_cold sxe_tx_start(struct rte_eth_dev *dev)
+{
+	u32 i;
+	sxe_tx_queue_s *txq;
+	struct sxe_hw *hw = (&((struct sxe_adapter *)(dev->data->dev_private))->hw);
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_hw_tx_enable(hw);
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		sxe_hw_tx_desc_thresh_set(hw, txq->reg_idx,
+				txq->wthresh, txq->hthresh, txq->pthresh);
+		if (!txq->tx_deferred_start) {
+			sxe_tx_queue_start(dev, i);
+		}
+	}
+
+	return;
+}
+
+static void sxe_tx_buf_configure(struct sxe_hw *hw)
+{
+	sxe_hw_tx_pkt_buf_switch(hw, false);
+
+	sxe_hw_tx_pkt_buf_size_configure(hw, 0);
+
+	sxe_hw_tx_pkt_buf_thresh_configure(hw, 0, false);
+
+	sxe_hw_tx_pkt_buf_switch(hw, true);
+
+	sxe_hw_mac_pad_enable(hw);
+
+	return;
+}
+
+void __rte_cold sxe_tx_configure(struct rte_eth_dev *dev)
+{
+	u16 i;
+	u64 queue_dma_addr;
+	u32 ring_size;
+	sxe_tx_queue_s *txq;
+	struct sxe_hw *hw = (&((struct sxe_adapter *)(dev->data->dev_private))->hw);
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxe_multi_queue_tx_configure(dev);
+
+	sxe_tx_buf_configure(hw);
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		queue_dma_addr = txq->base_addr;
+		ring_size = txq->ring_depth * sizeof(sxe_tx_data_desc_u);
+
+		sxe_hw_tx_ring_desc_configure(hw, ring_size, queue_dma_addr,
+						txq->reg_idx);
+	}
+
+	sxe_tx_start(dev);
+
+	return;
+}
+
+static inline void sxe_single_desc_fill(volatile sxe_tx_data_desc_u *desc,
+				struct rte_mbuf **pkts)
+{
+	u32 pkt_len;
+	u64 buf_dma_addr;
+
+	buf_dma_addr = rte_mbuf_data_iova(*pkts);
+	pkt_len = (*pkts)->data_len;
+
+	desc->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+	desc->read.cmd_type_len =
+			rte_cpu_to_le_32((u32)SXE_TX_DESC_FLAGS | pkt_len);
+	desc->read.olinfo_status =
+			rte_cpu_to_le_32(pkt_len << SXE_TX_DESC_PAYLEN_SHIFT);
+	rte_sxe_prefetch(&(*pkts)->pool);
+
+	return;
+}
+
+#define TX4_PER_LOOP 4
+#define TX4_PER_LOOP_MASK (TX4_PER_LOOP - 1)
+
+static inline void sxe_four_desc_fill(volatile sxe_tx_data_desc_u *desc,
+			struct rte_mbuf **pkts)
+{
+	s32 i;
+	u64 buf_dma_addr;
+	u32 pkt_len;
+
+	for (i = 0; i < TX4_PER_LOOP; ++i, ++desc, ++pkts) {
+		buf_dma_addr = rte_mbuf_data_iova(*pkts);
+		pkt_len = (*pkts)->data_len;
+
+		desc->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+
+		desc->read.cmd_type_len =
+			rte_cpu_to_le_32((u32)SXE_TX_DESC_FLAGS | pkt_len);
+
+		desc->read.olinfo_status =
+			rte_cpu_to_le_32(pkt_len << SXE_TX_DESC_PAYLEN_SHIFT);
+
+		rte_sxe_prefetch(&(*pkts)->pool);
+	}
+
+	return;
+}
+
+static inline void sxe_tx_ring_fill(sxe_tx_queue_s *txq,
+				struct rte_mbuf **pkts, u16 pkts_num)
+{
+	u32 i, j, mainpart, leftover;
+	volatile sxe_tx_data_desc_u *desc =
+					&(txq->desc_ring[txq->next_to_use]);
+	struct sxe_tx_buffer *buffer = &(txq->buffer_ring[txq->next_to_use]);
+
+	mainpart = (pkts_num & ((u32) ~TX4_PER_LOOP_MASK));
+	leftover = (pkts_num & ((u32)  TX4_PER_LOOP_MASK));
+
+	for (i = 0; i < mainpart; i += TX4_PER_LOOP) {
+		for (j = 0; j < TX4_PER_LOOP; ++j) {
+			(buffer + i + j)->mbuf = *(pkts + i + j);
+		}
+		sxe_four_desc_fill(desc + i, pkts + i);
+	}
+
+	if (unlikely(leftover > 0)) {
+		for (i = 0; i < leftover; ++i) {
+			(buffer + mainpart + i)->mbuf = *(pkts + mainpart + i);
+			sxe_single_desc_fill(desc + mainpart + i,
+						pkts + mainpart + i);
+		}
+	}
+
+	return;
+}
+
+s32 sxe_tx_bufs_free(sxe_tx_queue_s *txq)
+{
+	s32 ret = 0;
+	u32 status;
+	s32 i, mbuf_free_num = 0;
+	struct sxe_tx_buffer *buffer;
+	struct rte_mbuf *mbuf, *free_mbuf[RTE_SXE_MAX_TX_FREE_BUF_SZ];
+
+	status = txq->desc_ring[txq->next_dd].wb.status;
+	if (!(status & rte_cpu_to_le_32(SXE_TX_DESC_STAT_DD))) {
+		ret = 0;
+		goto l_end;
+	}
+
+	buffer = &(txq->buffer_ring[txq->next_dd - (txq->rs_thresh - 1)]);
+
+	for (i = 0; i < txq->rs_thresh; ++i, ++buffer) {
+		mbuf = rte_pktmbuf_prefree_seg(buffer->mbuf);
+		buffer->mbuf = NULL;
+
+		if (unlikely(mbuf == NULL)) {
+			continue;
+		}
+
+		if (mbuf_free_num >= RTE_SXE_MAX_TX_FREE_BUF_SZ ||
+		    (mbuf_free_num > 0 && mbuf->pool != free_mbuf[0]->pool)) {
+			rte_mempool_put_bulk(free_mbuf[0]->pool,
+					     (void **)free_mbuf, mbuf_free_num);
+			mbuf_free_num = 0;
+		}
+
+		free_mbuf[mbuf_free_num++] = mbuf;
+	}
+
+	if (mbuf_free_num > 0) {
+		rte_mempool_put_bulk(free_mbuf[0]->pool,
+					(void **)free_mbuf, mbuf_free_num);
+	}
+
+	txq->next_dd       += txq->rs_thresh;
+	txq->desc_free_num += txq->rs_thresh;
+	if (txq->next_dd >= txq->ring_depth) {
+		txq->next_dd = txq->rs_thresh - 1;
+	}
+
+	ret = txq->rs_thresh;
+
+l_end:
+	return ret;
+}
+
+static inline u16 sxe_pkts_xmit(void *tx_queue,
+			struct rte_mbuf **tx_pkts, u16 xmit_pkts_num)
+{
+	u16 n = 0;
+	sxe_tx_queue_s *txq = (sxe_tx_queue_s *)tx_queue;
+	volatile sxe_tx_data_desc_u *desc_ring = txq->desc_ring;
+
+	if (txq->desc_free_num < txq->free_thresh) {
+		sxe_tx_bufs_free(txq);
+	}
+
+	xmit_pkts_num = (u16)RTE_MIN(txq->desc_free_num, xmit_pkts_num);
+	if (unlikely(xmit_pkts_num == 0)) {
+		LOG_DEBUG("simple xmit: not enough free desc, "
+			"free_desc=%u, need_xmit_pkts=%u",
+			txq->desc_free_num, xmit_pkts_num);
+		goto l_end;
+	}
+
+	txq->desc_free_num -= xmit_pkts_num;
+
+	if ((txq->next_to_use + xmit_pkts_num) > txq->ring_depth) {
+		n = txq->ring_depth - txq->next_to_use;
+
+		sxe_tx_ring_fill(txq, tx_pkts, n);
+
+		desc_ring[txq->next_rs].read.cmd_type_len |=
+			rte_cpu_to_le_32(SXE_TX_DESC_RS_MASK);
+		txq->next_rs = (u16)(txq->rs_thresh - 1);
+
+		txq->next_to_use = 0;
+	}
+
+	sxe_tx_ring_fill(txq, tx_pkts + n, (u16)(xmit_pkts_num - n));
+	txq->next_to_use = (u16)(txq->next_to_use + (xmit_pkts_num - n));
+
+	if (txq->next_to_use > txq->next_rs) {
+		desc_ring[txq->next_rs].read.cmd_type_len |=
+			rte_cpu_to_le_32(SXE_TX_DESC_RS_MASK);
+		txq->next_rs = (u16)(txq->next_rs + txq->rs_thresh);
+		if (txq->next_rs >= txq->ring_depth) {
+			txq->next_rs = (u16)(txq->rs_thresh - 1);
+		}
+	}
+
+	if (txq->next_to_use >= txq->ring_depth) {
+		txq->next_to_use = 0;
+	}
+
+	rte_wmb();
+	rte_write32_wc_relaxed((rte_cpu_to_le_32(txq->next_to_use)),
+							txq->tdt_reg_addr);
+
+l_end:
+	return xmit_pkts_num;
+}
+
+u16 sxe_pkts_simple_xmit(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num)
+{
+	sxe_tx_queue_s *queue = tx_queue;
+	u16 ret, xmit_pkts_num, need_xmit_pkts;
+	UNUSED(queue);
+
+	if (likely(pkts_num <= RTE_PMD_SXE_MAX_TX_BURST)) {
+		xmit_pkts_num = sxe_pkts_xmit(tx_queue, tx_pkts, pkts_num);
+		goto l_end;
+	}
+
+	/* When pkts_num > 32, it needs to besent in a loop */
+	xmit_pkts_num = 0;
+	while (pkts_num) {
+		need_xmit_pkts = (u16)RTE_MIN(pkts_num, RTE_PMD_SXE_MAX_TX_BURST);
+
+		/* Signle transmit */
+		ret = sxe_pkts_xmit(tx_queue, &(tx_pkts[xmit_pkts_num]),
+							need_xmit_pkts);
+
+		pkts_num      -= ret;
+		xmit_pkts_num += ret;
+
+		/* Don't have enough desc */
+		if (ret < need_xmit_pkts) {
+			break;
+		}
+	}
+
+	LOG_DEBUG("simple xmit:port_id=%u, queue_id=%u, "
+		"remain_pkts_num=%d, xmit_pkts_num=%d",
+		queue->port_id, queue->port_id,
+		pkts_num, xmit_pkts_num);
+
+l_end:
+	return xmit_pkts_num;
+}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+u16 sxe_pkts_vector_xmit(void *tx_queue, struct rte_mbuf **tx_pkts,
+			   u16 pkts_num)
+{
+	u16 xmit_pkts_num = 0;
+	sxe_tx_queue_s *queue = (sxe_tx_queue_s *)tx_queue;
+
+	while (pkts_num) {
+		u16 ret, need_xmit_pkts;
+
+		need_xmit_pkts = (u16)RTE_MIN(pkts_num, queue->rs_thresh);
+		ret = __sxe_pkts_vector_xmit(tx_queue, &tx_pkts[xmit_pkts_num],
+				need_xmit_pkts);
+
+		xmit_pkts_num += ret;
+		pkts_num -= ret;
+		if (ret < need_xmit_pkts) {
+			break;
+		}
+	}
+
+	return xmit_pkts_num;
+}
+#endif
+
+u16 sxe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num)
+{
+	s32 i, ret;
+	u64 ol_flags;
+	struct rte_mbuf *mbuf;
+	sxe_tx_queue_s *txq = (sxe_tx_queue_s *)tx_queue;
+
+	/* Check if the pkts is legal */
+	for (i = 0; i < pkts_num; i++) {
+		mbuf = tx_pkts[i];
+		ol_flags = mbuf->ol_flags;
+
+		if (mbuf->nb_segs > SXE_TX_MAX_SEG - txq->wthresh) {
+			rte_errno = EINVAL;
+			goto l_end;
+		}
+
+		/* Check offload */
+		if (ol_flags & SXE_TX_OFFLOAD_NOTSUP_MASK) {
+			rte_errno = ENOTSUP;
+			goto l_end;
+		}
+
+		if (mbuf->pkt_len < SXE_TX_MIN_PKT_LEN) {
+			rte_errno = EINVAL;
+			goto l_end;
+		}
+
+#ifdef RTE_ETHDEV_DEBUG_TX
+		ret = rte_validate_tx_offload(mbuf);
+		if (ret != 0) {
+			rte_errno = -ret;
+			goto l_end;
+		}
+#endif
+		ret = rte_net_intel_cksum_prepare(mbuf);
+		if (ret != 0) {
+			rte_errno = -ret;
+			goto l_end;
+		}
+	}
+
+l_end:
+	return i;
+}
+
+static inline bool sxe_cache_ctxt_desc_match(
+				sxe_tx_queue_s *txq,
+				struct rte_mbuf *pkt,
+				u64 flags,
+				union sxe_tx_offload *ol_info)
+{
+	bool ret;
+
+	ol_info->l2_len       = pkt->l2_len;
+	ol_info->l3_len       = pkt->l3_len;
+	ol_info->l4_len       = pkt->l4_len;
+	ol_info->vlan_tci     = pkt->vlan_tci;
+	ol_info->tso_segsz    = pkt->tso_segsz;
+	ol_info->outer_l2_len = pkt->outer_l2_len;
+	ol_info->outer_l3_len = pkt->outer_l3_len;
+
+	if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+		     & ol_info->data[0])) &&
+		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+		     & ol_info->data[1])))) {
+
+		ret = false;
+		goto l_end;
+	}
+
+	txq->ctx_curr ^= 1;
+
+	if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+		     & ol_info->data[0])) &&
+		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+		     & ol_info->data[1])))) {
+
+		ret = false;
+		goto l_end;
+	}
+
+	ret = true;
+
+l_end:
+	return ret;
+}
+
+static inline void sxe_ctxt_desc_fill(sxe_tx_queue_s *txq,
+			volatile struct sxe_tx_context_desc *ctx_txd,
+			u64 ol_flags,
+			union sxe_tx_offload tx_offload,
+			__rte_unused u64 *mdata)
+{
+	u32 type_tucmd_mlhl;
+	u32 mss_l4len_idx = 0;
+	u32 ctx_idx;
+	u32 vlan_macip_lens;
+	union sxe_tx_offload tx_offload_mask;
+	u32 seqnum_seed = 0;
+
+	ctx_idx = txq->ctx_curr;
+	tx_offload_mask.data[0] = 0;
+	tx_offload_mask.data[1] = 0;
+	type_tucmd_mlhl = 0;
+
+
+	mss_l4len_idx |= (ctx_idx << SXE_TXD_IDX_SHIFT);
+
+	if (ol_flags & RTE_MBUF_F_TX_VLAN) {
+		tx_offload_mask.vlan_tci |= ~0;
+	}
+
+	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+		if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
+			type_tucmd_mlhl = SXE_TX_CTXTD_TUCMD_IPV4 |
+				SXE_TX_CTXTD_TUCMD_L4T_TCP |
+				SXE_TX_CTXTD_DTYP_CTXT;
+		} else {
+			type_tucmd_mlhl = SXE_TX_CTXTD_TUCMD_IPV6 |
+				SXE_TX_CTXTD_TUCMD_L4T_TCP |
+				SXE_TX_CTXTD_DTYP_CTXT;
+		}
+		mss_l4len_idx |= tx_offload.tso_segsz << SXE_TX_CTXTD_MSS_SHIFT;
+		mss_l4len_idx |= tx_offload.l4_len << SXE_TX_CTXTD_L4LEN_SHIFT;
+
+		tx_offload_mask.l2_len |= ~0;
+		tx_offload_mask.l3_len |= ~0;
+		tx_offload_mask.l4_len |= ~0;
+		tx_offload_mask.tso_segsz |= ~0;
+
+	} else {
+		if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
+			type_tucmd_mlhl = SXE_TX_CTXTD_TUCMD_IPV4;
+			tx_offload_mask.l2_len |= ~0;
+			tx_offload_mask.l3_len |= ~0;
+		}
+
+		switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+		case RTE_MBUF_F_TX_UDP_CKSUM:
+			type_tucmd_mlhl |= SXE_TX_CTXTD_TUCMD_L4T_UDP |
+				SXE_TX_CTXTD_DTYP_CTXT;
+			mss_l4len_idx |= sizeof(struct rte_udp_hdr)
+				<< SXE_TX_CTXTD_L4LEN_SHIFT;
+			tx_offload_mask.l2_len |= ~0;
+			tx_offload_mask.l3_len |= ~0;
+			break;
+		case RTE_MBUF_F_TX_TCP_CKSUM:
+			type_tucmd_mlhl |= SXE_TX_CTXTD_TUCMD_L4T_TCP |
+				SXE_TX_CTXTD_DTYP_CTXT;
+			mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
+				<< SXE_TX_CTXTD_L4LEN_SHIFT;
+			tx_offload_mask.l2_len |= ~0;
+			tx_offload_mask.l3_len |= ~0;
+			break;
+		case RTE_MBUF_F_TX_SCTP_CKSUM:
+			type_tucmd_mlhl |= SXE_TX_CTXTD_TUCMD_L4T_SCTP |
+				SXE_TX_CTXTD_DTYP_CTXT;
+			mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
+				<< SXE_TX_CTXTD_L4LEN_SHIFT;
+			tx_offload_mask.l2_len |= ~0;
+			tx_offload_mask.l3_len |= ~0;
+			break;
+		default:
+			type_tucmd_mlhl |= SXE_TX_CTXTD_TUCMD_L4T_RSV |
+				SXE_TX_CTXTD_DTYP_CTXT;
+			break;
+		}
+	}
+
+	vlan_macip_lens = tx_offload.l3_len;
+	vlan_macip_lens |= ((u32)tx_offload.vlan_tci << SXE_TX_CTXTD_VLAN_SHIFT);
+
+	if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
+		tx_offload_mask.outer_l2_len |= ~0;
+		tx_offload_mask.outer_l3_len |= ~0;
+		tx_offload_mask.l2_len |= ~0;
+		seqnum_seed |= tx_offload.outer_l3_len
+			       << SXE_TX_CTXTD_OUTER_IPLEN_SHIFT;
+		seqnum_seed |= tx_offload.l2_len
+			       << SXE_TX_CTXTD_TUNNEL_LEN_SHIFT;
+		vlan_macip_lens |= (tx_offload.outer_l2_len <<
+				    	SXE_TX_CTXTD_MACLEN_SHIFT);
+	} else {
+		vlan_macip_lens |= (tx_offload.l2_len <<
+				    	SXE_TX_CTXTD_MACLEN_SHIFT);
+	}
+
+	txq->ctx_cache[ctx_idx].flags = ol_flags;
+	txq->ctx_cache[ctx_idx].tx_offload.data[0]  =
+		tx_offload_mask.data[0] & tx_offload.data[0];
+	txq->ctx_cache[ctx_idx].tx_offload.data[1]  =
+		tx_offload_mask.data[1] & tx_offload.data[1];
+	txq->ctx_cache[ctx_idx].tx_offload_mask    = tx_offload_mask;
+
+	ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
+	ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
+	ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
+	ctx_txd->seqnum_seed     = seqnum_seed;
+
+	return;
+}
+
+static inline u32 sxe_tx_desc_csum_info_setup(u64 ol_flags)
+{
+	u32 desc_csum = 0;
+
+	if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM) {
+		desc_csum |= SXE_TXD_POPTS_TXSM;
+	}
+
+	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
+		desc_csum |= SXE_TXD_POPTS_IXSM;
+	}
+
+	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+		desc_csum |= SXE_TXD_POPTS_TXSM;
+	}
+
+	return desc_csum;
+}
+
+static inline u32 sxe_tx_desc_cmdtype_setup(u64 ol_flags)
+{
+	u32 cmdtype = 0;
+
+	if (ol_flags & RTE_MBUF_F_TX_VLAN) {
+		cmdtype |= SXE_TX_DESC_VLE;
+	}
+
+	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+		cmdtype |= SXE_TXD_DCMD_TSE;
+	}
+
+	if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
+		cmdtype |= (1 << SXE_TX_OUTERIPCS_SHIFT);
+	}
+
+#ifdef SXE_DPDK_MACSEC
+	if (ol_flags & RTE_MBUF_F_TX_MACSEC) {
+		cmdtype |= SXE_TXD_MAC_LINKSEC;
+	}
+#endif
+
+	return cmdtype;
+}
+
+static inline s32 sxe_xmit_cleanup(sxe_tx_queue_s *txq)
+{
+	s32 ret = 0;
+	u32 wb_status;
+	u16 ntc = txq->next_to_clean;
+	u16 ring_depth = txq->ring_depth;
+	u16 desc_to_clean_to, nb_tx_to_clean;
+	struct sxe_tx_buffer *buffer_ring = txq->buffer_ring;
+	volatile sxe_tx_data_desc_u *desc_ring = txq->desc_ring;
+
+	PMD_INIT_FUNC_TRACE();
+
+	desc_to_clean_to = (u16)(ntc + txq->rs_thresh);
+
+	if (desc_to_clean_to >= ring_depth) {
+		desc_to_clean_to = (u16)(desc_to_clean_to - ring_depth);
+	}
+
+	desc_to_clean_to = buffer_ring[desc_to_clean_to].last_id;
+
+	wb_status = desc_ring[desc_to_clean_to].wb.status;
+	if (!(wb_status & rte_cpu_to_le_32(SXE_TX_DESC_STAT_DD))) {
+		LOG_DEBUG("TX descriptor %4u is not done"
+				"(port=%d queue=%d)",
+				desc_to_clean_to,
+				txq->port_id, txq->queue_idx);
+
+		ret = -SXE_TX_DESC_NO_WB;
+		goto l_end;
+	}
+
+	if (ntc > desc_to_clean_to) {
+		nb_tx_to_clean = (u16)((ring_depth - ntc) +
+						desc_to_clean_to);
+	} else {
+		nb_tx_to_clean = (u16)(desc_to_clean_to - ntc);
+	}
+
+	LOG_DEBUG("Cleaning %4u TX descriptors: %4u to %4u "
+			"(port=%d queue=%d)",
+			nb_tx_to_clean, ntc, desc_to_clean_to,
+			txq->port_id, txq->queue_idx);
+
+	desc_ring[desc_to_clean_to].wb.status = 0;
+
+	txq->next_to_clean = desc_to_clean_to;
+
+	txq->desc_free_num = (u16)(txq->desc_free_num + nb_tx_to_clean);
+
+l_end:
+	return ret;
+}
+
+static inline s32 sxe_tx_pkt_desc_clean(
+			sxe_tx_queue_s *txq,
+			u32 need_desc_num)
+{
+	s32 ret = 0;
+
+	LOG_DEBUG("Not enough free TX descriptors "
+			"need_desc_num=%4u nb_free=%4u "
+			"(port=%d queue=%d)",
+			need_desc_num, txq->desc_free_num,
+			txq->port_id, txq->queue_idx);
+
+	ret = sxe_xmit_cleanup(txq);
+	if (ret) {
+		goto l_end;
+	}
+
+	if (unlikely(need_desc_num > txq->rs_thresh)) {
+		LOG_DEBUG(
+			"The number of descriptors needed to "
+			"transmit the packet exceeds the "
+			"RS bit threshold. This will impact "
+			"performance."
+			"need_desc_num=%4u nb_free=%4u "
+			"rs_thresh=%4u. "
+			"(port=%d queue=%d)",
+			need_desc_num, txq->desc_free_num,
+			txq->rs_thresh,
+			txq->port_id, txq->queue_idx);
+
+		/* Clean up enought desc */
+		while (need_desc_num > txq->desc_free_num) {
+			ret = sxe_xmit_cleanup(txq);
+			if (ret) {
+				goto l_end;
+			}
+		}
+	}
+
+l_end:
+	return ret;
+}
+
+u16 __sxe_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num)
+{
+	s32 ret;
+	u64 ol_req;
+	bool new_ctx;
+	u64 buf_dma_addr;
+	struct rte_mbuf *pkt;
+	struct rte_mbuf *m_seg;
+	union sxe_tx_offload ol_info;
+	sxe_tx_queue_s  *txq = tx_queue;
+	u32 pkt_len, cmd_type_len, olinfo_status;
+	u16 need_desc_num, last_desc_idx, xmit_num, ntu, seg_len;
+	volatile sxe_tx_data_desc_u *tail_desc = NULL; 
+	volatile sxe_tx_data_desc_u *desc_ring, *desc;
+	struct sxe_tx_buffer *buffer_ring, *buffer, *next_buffer;
+
+	ol_info.data[SXE_CTXT_DESC_0] = 0;
+	ol_info.data[SXE_CTXT_DESC_1] = 0;
+	ntu         = txq->next_to_use;
+	desc_ring   = txq->desc_ring;
+	buffer_ring = txq->buffer_ring;
+	buffer      = &buffer_ring[ntu];
+
+	if (txq->desc_free_num < txq->free_thresh) {
+		sxe_xmit_cleanup(txq);
+	}
+
+	/* Refresh cache, pre fetch data to cache */
+	rte_sxe_prefetch(&buffer->mbuf->pool);
+
+	for (xmit_num = 0; xmit_num < pkts_num; xmit_num++) {
+		new_ctx = false;
+		pkt = *tx_pkts++;
+		pkt_len = pkt->pkt_len;
+
+		ol_req = pkt->ol_flags & SXE_TX_OFFLOAD_MASK;
+		if (ol_req) {
+			new_ctx = sxe_cache_ctxt_desc_match(txq, pkt, ol_req, &ol_info);
+		}
+
+		need_desc_num = (u16)(pkt->nb_segs + new_ctx);
+
+		if (tail_desc != NULL &&
+		    need_desc_num + txq->desc_used_num >= txq->rs_thresh) {
+			tail_desc->read.cmd_type_len |=
+				rte_cpu_to_le_32(SXE_TX_DESC_RS_MASK);
+		}
+
+		last_desc_idx = (u16) (ntu + need_desc_num - 1);
+
+		if (last_desc_idx >= txq->ring_depth) {
+			last_desc_idx = (u16) (last_desc_idx - txq->ring_depth);
+		}
+
+		LOG_DEBUG("port_id=%u queue_id=%u pktlen=%u"
+			   " next_to_ues=%u last_desc_idx=%u",
+			   (unsigned) txq->port_id,
+			   (unsigned) txq->queue_idx,
+			   (unsigned) pkt_len,
+			   (unsigned) ntu,
+			   (unsigned) last_desc_idx);
+
+		if (need_desc_num > txq->desc_free_num) {
+			ret = sxe_tx_pkt_desc_clean(txq, need_desc_num);
+			if(ret) {
+				if (0 == xmit_num) {
+					goto l_end;
+				}
+
+				goto l_end_of_tx;
+			}
+		}
+
+		cmd_type_len = SXE_TX_DESC_TYPE_DATA | SXE_TX_DESC_IFCS;
+#ifdef RTE_LIBRTE_IEEE1588
+		if (pkt->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) {
+			cmd_type_len |= SXE_TXD_MAC_1588;
+		}
+#endif
+
+		olinfo_status = 0;
+		if (ol_req) {
+
+			if (pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+				pkt_len -= (ol_info.l2_len +
+					ol_info.l3_len + ol_info.l4_len);
+			}
+
+			if (new_ctx) {
+				volatile struct sxe_tx_context_desc *ctx_desc;
+
+				ctx_desc = (volatile struct
+					sxe_tx_context_desc *) &desc_ring[ntu];
+
+				next_buffer = &buffer_ring[buffer->next_id];
+				rte_prefetch0(&next_buffer->mbuf->pool);
+
+				if (buffer->mbuf != NULL) {
+					rte_pktmbuf_free_seg(buffer->mbuf);
+					buffer->mbuf = NULL;
+				}
+
+				sxe_ctxt_desc_fill(txq, ctx_desc, ol_req,
+						ol_info, NULL);
+
+				buffer->last_id = last_desc_idx;
+				ntu = buffer->next_id;
+				buffer = next_buffer;
+			}
+
+			LOG_DEBUG("tx need offload, port_id=%u "
+			"queue_id=%u pktlen=%u, ctxt_id=%u",
+			   (unsigned) txq->port_id,
+			   (unsigned) txq->queue_idx,
+			   (unsigned) pkt_len,
+			   (unsigned) txq->ctx_curr);
+
+			cmd_type_len  |= sxe_tx_desc_cmdtype_setup(pkt->ol_flags);
+			olinfo_status |= sxe_tx_desc_csum_info_setup(pkt->ol_flags);
+			olinfo_status |= txq->ctx_curr << SXE_TXD_IDX_SHIFT;
+		}
+		olinfo_status |= (pkt_len << SXE_TX_DESC_PAYLEN_SHIFT);
+
+		m_seg = pkt;
+		do {
+			desc = &desc_ring[ntu];
+			next_buffer = &buffer_ring[buffer->next_id];
+
+			rte_prefetch0(&next_buffer->mbuf->pool);
+			if (buffer->mbuf != NULL) {
+				rte_pktmbuf_free_seg(buffer->mbuf);
+			}
+
+			buffer->mbuf = m_seg;  
+
+			seg_len = m_seg->data_len;
+
+			buf_dma_addr = rte_mbuf_data_iova(m_seg);
+			desc->read.buffer_addr =
+				rte_cpu_to_le_64(buf_dma_addr);
+			desc->read.cmd_type_len =
+				rte_cpu_to_le_32(cmd_type_len | seg_len);
+			desc->read.olinfo_status =
+				rte_cpu_to_le_32(olinfo_status);
+			buffer->last_id = last_desc_idx;
+			ntu = buffer->next_id;
+			buffer = next_buffer;
+			m_seg = m_seg->next;
+		} while (m_seg != NULL);
+
+		cmd_type_len |= SXE_TX_DESC_EOP_MASK;
+		txq->desc_used_num += need_desc_num;
+		txq->desc_free_num -= need_desc_num;
+
+		if (txq->desc_used_num >= txq->rs_thresh) {
+			LOG_DEBUG("Setting RS bit on TXD id="
+					"%4u (port=%d queue=%d)",
+					last_desc_idx, txq->port_id, txq->queue_idx);
+
+			cmd_type_len |= SXE_TX_DESC_RS_MASK;
+
+			txq->desc_used_num = 0;
+			tail_desc = NULL;
+		} else {
+			tail_desc = desc;
+		}
+
+		desc->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
+	}
+
+l_end_of_tx:
+	if (tail_desc != NULL)
+		tail_desc->read.cmd_type_len |= rte_cpu_to_le_32(SXE_TX_DESC_RS_MASK);
+
+	rte_wmb();
+
+	LOG_DEBUG("port_id=%u queue_idx=%u next_to_use=%u xmit_num=%u",
+		   (unsigned) txq->port_id, (unsigned) txq->queue_idx,
+		   (unsigned) ntu, (unsigned) xmit_num);
+
+	rte_write32_wc_relaxed(ntu, txq->tdt_reg_addr);
+
+	txq->next_to_use = ntu;
+
+l_end:
+	return xmit_num;
+}
+
+u16 sxe_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num)
+{
+	return __sxe_pkts_xmit_with_offload(tx_queue, tx_pkts, pkts_num);
+}
+
+u32 sxe_tx_done_cleanup_full(sxe_tx_queue_s *txq, u32 free_cnt)
+{
+	u32 pkt_cnt;
+	u16 i, ntu, tx_id;
+	u16 nb_tx_free_last;
+	u16 nb_tx_to_clean;
+	struct sxe_tx_buffer *buffer_ring = txq->buffer_ring;
+
+	ntu    = txq->next_to_use;
+	tx_id  = buffer_ring[ntu].next_id;
+
+	if (txq->desc_free_num == 0 && sxe_xmit_cleanup(txq)) {
+		pkt_cnt = 0;
+		goto l_end;
+	}
+
+	nb_tx_to_clean  = txq->desc_free_num;
+	nb_tx_free_last = txq->desc_free_num;
+
+	if (!free_cnt) {
+		free_cnt = txq->ring_depth;
+	}
+
+	for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+		for (i = 0; i < (nb_tx_to_clean && pkt_cnt < free_cnt && \
+			tx_id != ntu); i++) {
+			if (buffer_ring[tx_id].mbuf != NULL) {
+				rte_pktmbuf_free_seg(buffer_ring[tx_id].mbuf);
+				buffer_ring[tx_id].mbuf = NULL;
+
+				pkt_cnt += (buffer_ring[tx_id].last_id == tx_id);
+			}
+
+			tx_id = buffer_ring[tx_id].next_id;
+		}
+
+		if (txq->rs_thresh > txq->ring_depth - txq->desc_free_num || \
+				tx_id == ntu) {
+			break;
+		}
+
+		if (pkt_cnt < free_cnt) {
+			if (sxe_xmit_cleanup(txq)) {
+				break;
+			}
+
+			nb_tx_to_clean = txq->desc_free_num - nb_tx_free_last;
+			nb_tx_free_last = txq->desc_free_num;
+		}
+	}
+
+l_end:
+	return pkt_cnt;
+}
+
+int sxe_tx_done_cleanup_simple(sxe_tx_queue_s *txq, u32 free_cnt)
+{
+	int i, n, cnt;
+
+	if (free_cnt == 0 || free_cnt > txq->ring_depth) {
+		free_cnt = txq->ring_depth;
+	}
+
+	cnt = free_cnt - free_cnt % txq->rs_thresh;
+
+	for (i = 0; i < cnt; i += n) {
+		if (txq->ring_depth - txq->desc_free_num < txq->rs_thresh) {
+			break;
+		}
+
+		n = sxe_tx_bufs_free(txq);
+		if (n == 0) {
+			break;
+		}
+	}
+
+	return i;
+}
+
+int sxe_tx_done_cleanup(void *tx_queue, u32 free_cnt)
+{
+	s32 ret;
+
+	ret = __sxe_tx_done_cleanup(tx_queue, free_cnt);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "tx cleanup fail.(err:%d)", ret);
+	}
+
+	return ret;
+}
+
+int sxe_tx_descriptor_status(void *tx_queue, u16 offset)
+{
+	return __sxe_tx_descriptor_status(tx_queue, offset);
+}
diff --git a/drivers/net/sxe/pf/sxe_tx.h b/drivers/net/sxe/pf/sxe_tx.h
new file mode 100644
index 0000000000..78249c3340
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_tx.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_TX_H__
+#define __SXE_TX_H__
+
+#include <rte_mbuf_core.h>
+
+#include "sxe_queue.h"
+
+#define RTE_PMD_SXE_MAX_TX_BURST 32
+
+void __rte_cold sxe_tx_configure(struct rte_eth_dev *dev);
+
+int __rte_cold sxe_tx_queue_setup(struct rte_eth_dev *dev,
+				u16 tx_queue_id,
+				u16 ring_depth,
+				u32 socket_id,
+				const struct rte_eth_txconf *tx_conf);
+int sxe_tx_done_cleanup(void *tx_queue, u32 free_cnt);
+
+void __rte_cold sxe_tx_function_set(struct rte_eth_dev *dev,
+					sxe_tx_queue_s *txq);
+
+int sxe_tx_done_cleanup_simple(sxe_tx_queue_s *txq, u32 free_cnt);
+
+u32 sxe_tx_done_cleanup_full(sxe_tx_queue_s *txq, u32 free_cnt);
+
+s32 sxe_tx_bufs_free(sxe_tx_queue_s *txq);
+
+#endif 
diff --git a/drivers/net/sxe/pf/sxe_vf.c b/drivers/net/sxe/pf/sxe_vf.c
new file mode 100644
index 0000000000..74a0bbb370
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_vf.c
@@ -0,0 +1,1275 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_memcpy.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_bus_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <rte_bus_pci.h>
+#else
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <bus_pci_driver.h>
+#endif
+
+#include "sxe_logs.h"
+#include "sxe_vf.h"
+#include "sxe_hw.h"
+#include "sxe.h"
+#include "sxe_errno.h"
+#include "sxe_filter.h"
+#include "sxe_offload.h"
+#include "sxe_ethdev.h"
+
+#define SXE_MR_VLAN_MASK  0xFFFFFFFF
+#define SXE_MR_VLAN_MSB_BIT_OFFSET 32
+
+#define SXE_MR_VIRTUAL_POOL_MASK         0xFFFFFFFF
+#define SXE_MR_VIRTUAL_POOL_MSB_BIT_MASK 32
+
+static inline s32 sxe_vf_mac_addr_generate(struct rte_eth_dev *eth_dev, u16 vf_num)
+{
+	u8 vf_mac_addr[RTE_ETHER_ADDR_LEN];
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_vf_info *vf_info = adapter->vt_ctxt.vf_info;
+	u16 idx;
+
+	for (idx = 0; idx < vf_num; idx++) {
+		rte_eth_random_addr(vf_mac_addr);
+		memcpy(vf_info[idx].mac_addr, vf_mac_addr, RTE_ETHER_ADDR_LEN);
+	}
+
+	return 0;
+}
+
+static void sxe_vt_mode_configure(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 gpie;
+	u32 pcie_ext;
+
+	pcie_ext = sxe_hw_pcie_vt_mode_get(hw);
+	pcie_ext &= ~SXE_GCR_EXT_VT_MODE_MASK;
+
+	gpie = sxe_hw_irq_general_reg_get(hw);
+	gpie &= ~SXE_GPIE_VTMODE_MASK;
+	gpie |= SXE_GPIE_MSIX_MODE;
+
+	switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
+	case RTE_ETH_64_POOLS:
+		pcie_ext |= SXE_GCR_EXT_VT_MODE_64;
+		gpie |= SXE_GPIE_VTMODE_64;
+		break;
+	case RTE_ETH_32_POOLS:
+		pcie_ext |= SXE_GCR_EXT_VT_MODE_32;
+		gpie |= SXE_GPIE_VTMODE_32;
+		break;
+	case RTE_ETH_16_POOLS:
+		pcie_ext |= SXE_GCR_EXT_VT_MODE_16;
+		gpie |= SXE_GPIE_VTMODE_16;
+		break;
+	}
+
+	sxe_hw_pcie_vt_mode_set(hw, pcie_ext);
+	sxe_hw_irq_general_reg_set(hw, gpie);
+
+	return;
+}
+
+s32 sxe_vt_init(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_vf_info **vf_info = &adapter->vt_ctxt.vf_info;
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+	struct sxe_mirror_info *mirror_info = &adapter->vt_ctxt.mr_info;
+#endif
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	u16 vf_num;
+	s32 ret = 0;
+	u8 nb_queue;
+
+	PMD_INIT_FUNC_TRACE();
+
+	RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
+	/* get vf num from max_vfs or sriov_numvfs */
+	vf_num = sxe_vf_num_get(eth_dev);
+	if (vf_num == 0) {
+		LOG_WARN_BDF("no vf, no need init vt");
+		goto l_out;
+	}
+
+	*vf_info = rte_zmalloc("vf_info", sizeof(struct sxe_vf_info) * vf_num, 0);
+	if (*vf_info == NULL) {
+		LOG_WARN_BDF("vf_info allocate memory fail.");
+		ret = -ENOMEM;
+		goto l_out;
+	}
+
+	ret = rte_eth_switch_domain_alloc(&(*vf_info)->domain_id);
+	if (ret) {
+		LOG_ERROR_BDF("failed to allocate switch domain for device %d", ret);
+		goto l_free_vf_info;
+	}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+	memset(mirror_info, 0, sizeof(struct sxe_mirror_info));
+#endif
+
+	if (vf_num >= RTE_ETH_32_POOLS) { 
+		nb_queue = 2;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
+	} else if (vf_num >= RTE_ETH_16_POOLS) { 
+		nb_queue = 4;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
+	} else { 
+		nb_queue = 8;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
+	}
+
+	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
+	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
+	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (u16)(vf_num * nb_queue);
+
+	sxe_vf_mac_addr_generate(eth_dev, vf_num);
+
+	sxe_hw_mbx_init(hw);
+
+	irq->enable_mask |= SXE_EIMS_MAILBOX;
+
+	sxe_vt_mode_configure(eth_dev);
+
+	LOG_INFO_BDF("vf_num:%d domain id:%u init done.",
+		      vf_num, (*vf_info)->domain_id);
+
+l_out:
+	return ret;
+
+l_free_vf_info:
+	rte_free(*vf_info);
+	*vf_info = NULL;
+	return ret;
+}
+
+static void sxe_pf_pool_enable(struct rte_eth_dev *eth_dev, u16 vf_num)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 enable_mask = ~0;
+	u8 vf_reg_idx = ((vf_num >> 5) > 0) ? 1: 0;
+	u8 vf_bit_index = vf_num & ((1 << 5) - 1);
+
+	sxe_hw_rx_pool_bitmap_set(hw, vf_reg_idx, enable_mask << vf_bit_index);
+	sxe_hw_rx_pool_bitmap_set(hw, (vf_reg_idx ^ 1), (vf_reg_idx - 1));
+
+	sxe_hw_tx_pool_bitmap_set(hw, vf_reg_idx, enable_mask << vf_bit_index);
+	sxe_hw_tx_pool_bitmap_set(hw, (vf_reg_idx ^ 1), (vf_reg_idx - 1));
+
+	return;
+}
+
+static void sxe_vf_vlan_filter_enable(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 enable_mask = ~0;
+	u32 vlan_ctl;
+	u8 i;
+
+	vlan_ctl = sxe_hw_vlan_type_get(hw);
+	vlan_ctl |= SXE_VLNCTRL_VFE;
+	sxe_hw_vlan_type_set(hw, vlan_ctl);
+
+	for (i = 0; i < SXE_VFT_TBL_SIZE; i++) {
+		sxe_hw_vlan_filter_array_write(hw, i, enable_mask);
+	}
+
+	return;
+}
+
+void sxe_vt_configure(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u16 vf_num;
+	u16 pf_pool_idx = RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx;
+
+	vf_num = sxe_vf_num_get(eth_dev);
+	if (vf_num == 0) {
+		LOG_WARN_BDF("no vf, no need configure vt");
+		goto l_out;
+	}
+
+	sxe_hw_vt_ctrl_cfg(hw, pf_pool_idx);
+
+	sxe_pf_pool_enable(eth_dev, vf_num);
+
+	sxe_hw_vt_pool_loopback_switch(hw, true);
+
+	sxe_hw_mac_pool_clear(hw, 0);
+	sxe_hw_mac_pool_clear(hw, SXE_UC_ENTRY_NUM_MAX - 1);
+
+	sxe_hw_uc_addr_pool_enable(hw, 0, pf_pool_idx);
+
+	sxe_vt_mode_configure(eth_dev);
+
+	sxe_vf_vlan_filter_enable(eth_dev);
+
+	sxe_hw_pool_mac_anti_spoof_set(hw, vf_num, 0);
+
+	sxe_rx_fc_threshold_set(hw);
+
+l_out:
+	return;
+}
+
+void sxe_vt_uninit(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_vf_info **vf_info = &adapter->vt_ctxt.vf_info;
+	u16 vf_num;
+	int ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
+	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0;
+	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0;
+	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0;
+
+	vf_num = sxe_vf_num_get(eth_dev);
+	if ((vf_num == 0) || (*vf_info) == NULL) {
+		LOG_INFO_BDF("vf_num:%u vf_info:%p, no need free vf_info.",
+			     vf_num, *vf_info);
+		goto l_out;
+	}
+
+	ret = rte_eth_switch_domain_free((*vf_info)->domain_id);
+	if (ret) {
+		LOG_ERROR_BDF("failed to free switch domain: %d", ret);
+	}
+
+	rte_free(*vf_info);
+	*vf_info = NULL;
+
+l_out:
+	return;
+}
+
+s32 sxe_vf_rss_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	bool is_4q_per_pool;
+	s32 ret = 0;
+
+	sxe_rss_configure(dev);
+
+	switch (RTE_ETH_DEV_SRIOV(dev).active) {
+	case RTE_ETH_64_POOLS:
+		is_4q_per_pool = false;
+		break;
+
+	case RTE_ETH_32_POOLS:
+		is_4q_per_pool = true;
+		break;
+
+	default: 
+		ret = -EINVAL;
+		LOG_ERROR_BDF("invalid pool number:%u in iov mode with rss.(err:%d)",
+			      RTE_ETH_DEV_SRIOV(dev).active, ret);
+		goto l_out;
+	}
+
+	sxe_hw_rx_multi_ring_configure(hw, 0, is_4q_per_pool, true);
+
+	LOG_INFO_BDF("pool num:%u is_4q_per_pool:%u configure done.",
+		    RTE_ETH_DEV_SRIOV(dev).active, is_4q_per_pool);
+
+l_out:
+	return ret;
+}
+
+s32 sxe_vf_default_mode_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret = 0;
+	u8 tcs = 0;
+	bool is_4q_per_pool = false;
+
+	switch (RTE_ETH_DEV_SRIOV(dev).active) {
+	case RTE_ETH_64_POOLS:
+		is_4q_per_pool = false;
+		break;
+
+	case RTE_ETH_32_POOLS:
+		is_4q_per_pool = true;
+		break;
+
+	case RTE_ETH_16_POOLS:
+		tcs = 8;
+		break;
+	default:
+		ret = -SXE_ERR_CONFIG;
+		LOG_ERROR_BDF("invalid pool number:%u (err:%d)",
+			      RTE_ETH_DEV_SRIOV(dev).active, ret);
+		goto l_out;
+	}
+
+	sxe_hw_rx_multi_ring_configure(hw, tcs, is_4q_per_pool, true);
+
+l_out:
+	return ret;
+}
+
+static void sxe_filter_mode_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u16 vf_num = sxe_vf_num_get(dev);
+	u32 filter_ctrl = sxe_hw_rx_mode_get(hw);
+	u32 vm_l2_ctrl = SXE_VMOLR_AUPE | SXE_VMOLR_BAM;
+
+	filter_ctrl &= ~(SXE_FCTRL_SBP | SXE_FCTRL_UPE | SXE_FCTRL_MPE);
+
+	filter_ctrl |= SXE_FCTRL_BAM;
+
+	if (dev->data->promiscuous) {
+		filter_ctrl |= (SXE_FCTRL_UPE | SXE_FCTRL_MPE);
+		vm_l2_ctrl |= (SXE_VMOLR_ROPE | SXE_VMOLR_MPE);
+	} else {
+		if (dev->data->all_multicast) { 
+			filter_ctrl |= SXE_FCTRL_MPE;
+			vm_l2_ctrl |= SXE_VMOLR_MPE;
+		} else {
+			vm_l2_ctrl |= SXE_VMOLR_ROMPE;
+		}
+	}
+
+	vm_l2_ctrl |= sxe_hw_pool_rx_mode_get(hw, vf_num) &
+			~(SXE_VMOLR_MPE | SXE_VMOLR_ROMPE | SXE_VMOLR_ROPE);
+
+	sxe_hw_pool_rx_mode_set(hw, vm_l2_ctrl, vf_num);
+
+	sxe_hw_rx_mode_set(hw, filter_ctrl);
+
+	sxe_vlan_strip_switch_set(dev);
+}
+
+static inline void sxe_vf_flr_handle(struct rte_eth_dev *dev, u16 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vf_info *vf_info = adapter->vt_ctxt.vf_info;
+	u32 vm_l2_ctrl = sxe_hw_pool_rx_mode_get(hw, vf);
+
+	sxe_sw_uc_entry_vf_del(adapter, vf, false);
+
+	vm_l2_ctrl |= (SXE_VMOLR_AUPE | SXE_VMOLR_ROPE | SXE_VMOLR_BAM);
+
+	sxe_hw_pool_rx_mode_set(hw, vm_l2_ctrl, vf);
+
+	sxe_hw_tx_vlan_tag_clear(hw, vf);
+
+	vf_info[vf].mc_hash_used = 0;
+
+	sxe_filter_mode_configure(dev);
+
+	return;
+}
+
+static s32 sxe_vf_dev_mac_addr_set_handler(struct rte_eth_dev *dev, u32 *msgbuf, u32 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_uc_addr_msg mac_msg = *(struct sxe_uc_addr_msg *)msgbuf;
+	struct sxe_vf_info *vf_info = adapter->vt_ctxt.vf_info;
+	u32 rar_idx = sxe_sw_uc_entry_vf_add(adapter, vf, mac_msg.uc_addr, false);
+	s32 ret = -SXE_ERR_PARAM;
+
+	if (rte_is_valid_assigned_ether_addr(
+			(struct rte_ether_addr *)mac_msg.uc_addr)) {
+		rte_memcpy(vf_info[vf].mac_addr, mac_msg.uc_addr, RTE_ETHER_ADDR_LEN);
+		ret = sxe_hw_uc_addr_add(&adapter->hw, rar_idx, mac_msg.uc_addr, vf);
+		if (ret) {
+			LOG_ERROR_BDF("vf:%u mac addr:"MAC_FMT" set fail.(err:%d)",
+				      vf, MAC_ADDR(mac_msg.uc_addr), ret);
+		}
+	}
+
+	return ret;
+}
+
+STATIC s32 sxe_mbx_api_set_handler(struct rte_eth_dev *dev,
+						    u32 *msg, u32 vf_idx)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_mbx_api_msg *api_msg = (struct sxe_mbx_api_msg *)msg;
+	struct sxe_vf_info *vf_info = &(adapter->vt_ctxt.vf_info[vf_idx]);
+	s32 ret = 0;
+
+	switch (api_msg->api_version) {
+	case SXE_MBX_API_10:
+	case SXE_MBX_API_11:
+	case SXE_MBX_API_12:
+	case SXE_MBX_API_13:
+		vf_info->mbx_version = api_msg->api_version;
+		break;
+	default:
+		ret = -SXE_ERR_PARAM;
+		LOG_ERROR_BDF("invalid mailbox api version:%u.\n",
+			  api_msg->api_version);
+		break;
+	}
+
+	LOG_INFO_BDF("mailbox api version:0x%x.(err:%d)",
+	             vf_info->mbx_version,
+	             ret);
+
+	return ret;
+}
+
+static s32 sxe_pf_ring_info_get(struct rte_eth_dev *dev, u32 *msgbuf, u32 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vf_info *vf_info = &(adapter->vt_ctxt.vf_info[vf]);
+	struct sxe_ring_info_msg *ring_msg = (struct sxe_ring_info_msg *)msgbuf;
+	u32 default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+	struct rte_eth_vmdq_dcb_tx_conf *vmdq_dcb_tx_conf;
+	u8 num_tcs;
+	u32 vmvir;
+	u32 vlan_action;
+	u32 vlan_id;
+	u32 user_priority;
+	s32 ret = 0;
+
+	switch (vf_info->mbx_version) {
+	case SXE_MBX_API_11:
+	case SXE_MBX_API_12:
+	case SXE_MBX_API_13:
+		break;
+	default:
+		ret = -SXE_ERR_CONFIG;
+		LOG_ERROR_BDF("mailbod version:0x%x not support get ring"
+			      " info.(err:%d)",
+			      vf_info->mbx_version, ret);
+		goto l_out;
+	}
+
+	ring_msg->max_rx_num = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+	ring_msg->max_tx_num  = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+
+	ring_msg->default_tc = default_q;
+
+	switch (dev->data->dev_conf.txmode.mq_mode) {
+	case RTE_ETH_MQ_TX_NONE:
+	case RTE_ETH_MQ_TX_DCB:
+		ret = -SXE_ERR_CONFIG;
+		LOG_ERROR_BDF("vf_idx:%u sriov eanble, not support tx queue mode:0x%x.",
+			vf,
+			dev->data->dev_conf.txmode.mq_mode);
+		goto l_out;
+
+	case RTE_ETH_MQ_TX_VMDQ_DCB:
+		vmdq_dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+		switch (vmdq_dcb_tx_conf->nb_queue_pools) {
+		case RTE_ETH_16_POOLS:
+			num_tcs = RTE_ETH_8_TCS;
+			break;
+		case RTE_ETH_32_POOLS:
+			num_tcs = RTE_ETH_4_TCS;
+			break;
+		default:
+			ret = -SXE_ERR_CONFIG;
+			LOG_ERROR_BDF("vf:%u sriov enable, tx queue mode:0x%x "
+				      "invalid pool num:%u.(err:%d)",
+					vf,
+					dev->data->dev_conf.txmode.mq_mode,
+					vmdq_dcb_tx_conf->nb_queue_pools,
+					ret);
+			goto l_out;
+		}
+		break;
+
+	case RTE_ETH_MQ_TX_VMDQ_ONLY:
+		vmvir = sxe_hw_tx_vlan_insert_get(hw, vf);
+		vlan_action = vmvir & SXE_VMVIR_VLANA_MASK;
+		vlan_id = vmvir & SXE_VMVIR_VLAN_VID_MASK;
+		user_priority = (vmvir & SXE_VMVIR_VLAN_UP_MASK) >> VLAN_PRIO_SHIFT;
+		if ((vlan_action == SXE_VMVIR_VLANA_DEFAULT) &&
+			((vlan_id !=  0) || (user_priority != 0))) {
+			num_tcs = 1;
+		} else {
+			num_tcs = 0;
+		}
+		break;
+
+	default:
+		ret = -SXE_ERR_CONFIG;
+		LOG_ERROR_BDF("vf_idx:%u sriov eanble, invalid tx queue mode:0x%x.",
+			vf,
+			dev->data->dev_conf.txmode.mq_mode);
+		goto l_out;
+	}
+
+	ring_msg->tc_num = num_tcs;
+
+	LOG_INFO_BDF("max_rx_num:%u max_tx_num:%u default queue:%u tc_num:%u.",
+		    ring_msg->max_rx_num, ring_msg->max_tx_num,
+		    ring_msg->default_tc, ring_msg->tc_num);
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_vf_rss_hash_conf_get(struct rte_eth_dev *dev, u32 *msgbuf, u32 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct rte_eth_rss_conf rss_conf;
+	struct sxe_rss_hash_msg *rss_msg = (struct sxe_rss_hash_msg *)msgbuf;
+
+	UNUSED(vf);
+	rss_conf.rss_key = malloc(SXE_RSS_KEY_SIZE);
+	sxe_rss_hash_conf_get(dev, &rss_conf);
+
+	memcpy(rss_msg->hash_key, rss_conf.rss_key, SXE_RSS_KEY_SIZE);
+	rss_msg->rss_hf = rss_conf.rss_hf;
+
+	free(rss_conf.rss_key);
+
+	LOG_INFO_BDF("vf[%u] rss hash conf get, rss_key:%s, rss_hf:%ld\n",
+			vf, rss_msg->hash_key, rss_msg->rss_hf);
+
+	return 0;
+}
+
+static s32 sxe_vf_vlan_id_set_handler(struct rte_eth_dev *dev,
+						u32 *msgbuf, u32 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vf_info *vf_info = adapter->vt_ctxt.vf_info;
+	struct sxe_vlan_msg *vlan_msg = (struct sxe_vlan_msg *)msgbuf;
+	u32 vlan_id = (vlan_msg->vlan_id & SXE_VLVF_VLANID_MASK);
+	s32 ret;
+
+	ret = sxe_hw_vlan_filter_configure(hw, vlan_id, vf, vlan_msg->add, false);
+	if (ret == 0) {
+		if (vlan_msg->add) {
+		vf_info[vf].vlan_cnt++;
+		} else if (vf_info[vf].vlan_cnt) {
+			vf_info[vf].vlan_cnt--;
+		}
+	}
+
+	LOG_INFO_BDF("vf[%u] %s vid[%u] done vlan_cnt:%u ret = %d",
+			vf, vlan_msg->add ? "add" : "delete",
+			vlan_id,
+			vf_info[vf].vlan_cnt, ret);
+
+	return ret;
+}
+
+static s32 sxe_vf_max_frame_set_handler(struct rte_eth_dev *dev,
+						u32 *msgbuf, u32 vf)
+
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vf_info *vf_info = &(adapter->vt_ctxt.vf_info[vf]);
+	struct sxe_max_frame_msg *msg = (struct sxe_max_frame_msg *)msgbuf;
+	u32 vf_max_frame = msg->max_frame + SXE_ETH_OVERHEAD;
+	s32 ret = 0;
+	u32 cur_max_frs;
+	u32 frame_size = SXE_GET_FRAME_SIZE(dev);
+
+	switch (vf_info->mbx_version) {
+	case SXE_MBX_API_11:
+	case SXE_MBX_API_12:
+	case SXE_MBX_API_13:
+		if (frame_size > SXE_ETH_MAX_LEN) {
+			LOG_WARN_BDF("pf jumbo frame enabled.");
+			break;
+		}
+		// fall through
+	default:
+		if ((vf_max_frame > SXE_ETH_MAX_LEN) ||
+		    (frame_size > SXE_ETH_MAX_LEN)) {
+		        ret = -SXE_ERR_PARAM;
+			LOG_ERROR_BDF("mbx version:0x%x pf max pkt len:0x%x vf:%u"
+				      " max_frames:0x%x max_len:0x%x.(err:%d)",
+				      vf_info->mbx_version,
+				      frame_size,
+				      vf, vf_max_frame,
+				      SXE_ETH_MAX_LEN, ret);
+			goto l_out;
+		}
+		break;
+	}
+
+	if ((vf_max_frame < RTE_ETHER_MIN_LEN) ||
+		(vf_max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)) {
+	        ret = -SXE_ERR_PARAM;
+		LOG_ERROR_BDF("mbx version:0x%x vf:%u invalid max_frame:%u (err:%d)",
+			      vf_info->mbx_version,
+			      vf,
+			      vf_max_frame,
+			      ret);
+		goto l_out;
+	}
+
+	cur_max_frs = sxe_hw_mac_max_frame_get(hw);
+	if (vf_max_frame > cur_max_frs) {
+		ret = -SXE_ERR_PARAM;
+		LOG_ERROR_BDF("mbx version:0x%x vf:%u invalid max_frame:%u >= cur_max_frs:%u",
+			      vf_info->mbx_version,
+			      vf,
+			      vf_max_frame,
+			      cur_max_frs);
+		goto l_out;
+	}
+
+l_out:
+	return ret;
+}
+
+static void sxe_vf_mc_promisc_disable(struct rte_eth_dev *dev, u32 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 vm_l2_ctrl = sxe_hw_pool_rx_mode_get(hw, vf);
+
+	vm_l2_ctrl &= ~SXE_VMOLR_MPE;
+
+	sxe_hw_pool_rx_mode_set(hw, vm_l2_ctrl, vf);
+
+	return;
+}
+
+static s32 sxe_vf_mc_addr_sync(struct rte_eth_dev *dev,
+					u32 *msgbuf, u32 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf];
+	struct sxe_mc_sync_msg *mc_msg = (struct sxe_mc_sync_msg *)msgbuf;
+	u8 mc_cnt = min(mc_msg->mc_cnt, SXE_VF_MC_ENTRY_NUM_MAX);
+	u32 mta_idx;
+	u32 mta_shift;
+	u32 vm_l2_filter = sxe_hw_pool_rx_mode_get(hw, vf);
+	int i;
+
+	sxe_vf_mc_promisc_disable(dev, vf);
+
+	vf_info->mc_hash_used = mc_cnt;
+	for (i = 0; i < mc_cnt; i++) {
+		vf_info->mc_hash[i] = mc_msg->mc_addr_extract[i];
+		LOG_INFO_BDF("vf_idx:%u mc_cnt:%u mc_hash[%d]:0x%x\n",
+			     vf, mc_cnt, i, vf_info->mc_hash[i]);
+	}
+
+	if (mc_cnt == 0) {
+		vm_l2_filter &= ~SXE_VMOLR_ROMPE;
+		sxe_hw_pool_rx_mode_set(hw, vm_l2_filter, vf);
+		LOG_WARN_BDF("vf:%u request disable mta filter.", vf);
+	} else {
+		for (i = 0; i < mc_cnt; i++) {
+			mta_idx = (vf_info->mc_hash[i] >> SXE_MC_ADDR_SHIFT) &
+				  SXE_MC_ADDR_REG_MASK;
+			mta_shift = vf_info->mc_hash[i] & SXE_MC_ADDR_BIT_MASK;
+			sxe_hw_mta_hash_table_update(hw, mta_idx, mta_shift);
+
+			LOG_INFO_BDF("vf_idx:%u mc_cnt:%u mc_hash[%d]:0x%x"
+				"reg_idx=%u, bit_idx=%u.\n",
+				vf, mc_cnt, i, vf_info->mc_hash[i],
+				mta_idx, mta_shift);
+		}
+
+		vm_l2_filter |= SXE_VMOLR_ROMPE;
+		sxe_hw_pool_rx_mode_set(hw, vm_l2_filter, vf);
+	}
+
+	return 0;
+}
+
+static s32 sxe_vf_cast_mode_handler(struct rte_eth_dev *dev,
+					u32 *msgbuf, u32 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf];
+	struct sxe_cast_mode_msg *cast_msg = (struct sxe_cast_mode_msg *)msgbuf;
+	u32 enable;
+	u32 disable;
+	u32 vm_l2_filter;
+	s32 ret = 0;
+
+	switch (vf_info->mbx_version) {
+	case SXE_MBX_API_12:
+		if (cast_msg->cast_mode == SXE_CAST_MODE_PROMISC) {
+			ret = -EOPNOTSUPP;
+			LOG_ERROR_BDF("mbx api:12 vf:%u cast_mode:0x%x "
+				     "unsupport.(err:%d)",
+				      vf, cast_msg->cast_mode, ret);
+			goto l_out;
+		}
+		break;
+	case SXE_MBX_API_13:
+		break;
+	default:
+		ret = -SXE_ERR_PARAM;
+		LOG_ERROR_BDF("vf:%u invalid mbx api version:0x%x.\n",
+			     vf, vf_info->mbx_version);
+		goto l_out;
+	}
+
+	if (vf_info->cast_mode == cast_msg->cast_mode) {
+		LOG_INFO_BDF("vf:%d currut mode equal set mode:0x%x, skip set.",
+			     vf, cast_msg->cast_mode);
+		goto l_out;
+	}
+
+	switch (cast_msg->cast_mode) {
+	case SXE_CAST_MODE_NONE:
+		disable = SXE_VMOLR_BAM | SXE_VMOLR_ROMPE | SXE_VMOLR_MPE;
+		enable = 0;
+		break;
+
+	case SXE_CAST_MODE_MULTI:
+		disable = SXE_VMOLR_MPE;
+		enable = SXE_VMOLR_BAM | SXE_VMOLR_ROMPE;
+		break;
+
+	case SXE_CAST_MODE_ALLMULTI:
+		disable = 0;
+		enable = SXE_VMOLR_BAM | SXE_VMOLR_ROMPE |
+			SXE_VMOLR_MPE;
+		break;
+
+	case SXE_CAST_MODE_PROMISC:
+		ret = -EOPNOTSUPP;
+		LOG_ERROR_BDF("vf:%d promisc mode not support.(ret:%d)\n",
+		              vf, ret);
+		goto l_out;
+
+	default:
+		ret = -SXE_ERR_PARAM;
+		LOG_ERROR_BDF("vf:%u invalid cast mode:0x%x.\n",
+			     vf, cast_msg->cast_mode);
+		goto l_out;
+	}
+
+	vm_l2_filter = sxe_hw_pool_rx_mode_get(hw, vf);
+	vm_l2_filter &= ~disable;
+	vm_l2_filter |= enable;
+	sxe_hw_pool_rx_mode_set(hw, vm_l2_filter, vf);
+
+	LOG_INFO_BDF("vf:%d filter reg:0x%x mode:%d.\n",
+		     vf, vm_l2_filter, cast_msg->cast_mode);
+
+	vf_info->cast_mode = cast_msg->cast_mode;
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_vf_uc_addr_sync_handler(struct rte_eth_dev *dev,
+					u32 *msgbuf, u32 vf)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf];
+	struct sxe_uc_sync_msg *uc_msg = (struct sxe_uc_sync_msg *)msgbuf;
+	s32 ret = 0;
+	u8 rar_idx;
+
+	if (uc_msg->index) {
+		if (!rte_is_valid_assigned_ether_addr(
+			(struct rte_ether_addr *)uc_msg->addr)) {
+			ret = -SXE_ERR_PARAM;
+			LOG_ERROR_BDF("vf:%u mac addr:"MAC_FMT" invalid.(err:%d).",
+				      vf, MAC_ADDR(uc_msg->addr), ret);
+			goto l_out;
+		}
+
+		vf_info->uc_mac_cnt++;
+		rar_idx = sxe_sw_uc_entry_vf_add(adapter, vf, (u8 *)uc_msg->addr, true);
+		sxe_hw_uc_addr_add\r(hw, rar_idx, (u8 *)uc_msg->addr, vf);
+	} else {
+		if (vf_info->uc_mac_cnt) {
+			sxe_sw_uc_entry_vf_del(adapter, vf, true);
+			vf_info->uc_mac_cnt = 0;
+		}
+	}
+
+l_out:
+	return ret;
+}
+
+STATIC struct sxe_msg_table msg_table[] = {
+	[SXE_VFREQ_MAC_ADDR_SET] = {SXE_VFREQ_MAC_ADDR_SET, sxe_vf_dev_mac_addr_set_handler},
+	[SXE_VFREQ_MC_ADDR_SYNC] = {SXE_VFREQ_MC_ADDR_SYNC, sxe_vf_mc_addr_sync},
+	[SXE_VFREQ_VLAN_SET] = {SXE_VFREQ_VLAN_SET, sxe_vf_vlan_id_set_handler},
+	[SXE_VFREQ_LPE_SET] = {SXE_VFREQ_LPE_SET, sxe_vf_max_frame_set_handler},
+	[SXE_VFREQ_UC_ADDR_SYNC] = {SXE_VFREQ_UC_ADDR_SYNC, sxe_vf_uc_addr_sync_handler},
+	[SXE_VFREQ_API_NEGOTIATE] = {SXE_VFREQ_API_NEGOTIATE, sxe_mbx_api_set_handler},
+	[SXE_VFREQ_RING_INFO_GET] = {SXE_VFREQ_RING_INFO_GET, sxe_pf_ring_info_get},
+	[SXE_VFREQ_CAST_MODE_SET] = {SXE_VFREQ_CAST_MODE_SET, sxe_vf_cast_mode_handler},
+	[SXE_VFREQ_RSS_CONF_GET] = {SXE_VFREQ_RSS_CONF_GET, sxe_vf_rss_hash_conf_get},
+};
+
+static void sxe_vf_pool_enable(struct rte_eth_dev *dev, u8 vf_idx)
+{
+	u32 enable_pool;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	u8 reg_idx = vf_idx / 32;
+	u8 bit_idx = vf_idx % 32;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf_idx];
+
+	enable_pool = sxe_hw_tx_pool_bitmap_get(hw, reg_idx);
+	enable_pool |= BIT(bit_idx);
+	sxe_hw_tx_pool_bitmap_set(hw, reg_idx,enable_pool);
+
+	sxe_hw_vf_queue_drop_enable(hw, vf_idx,
+				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+
+	enable_pool = sxe_hw_rx_pool_bitmap_get(hw, reg_idx);
+	enable_pool |= BIT(bit_idx);
+	sxe_hw_rx_pool_bitmap_set(hw, reg_idx,enable_pool);
+
+	vf_info->is_ready = true;
+
+	sxe_hw_spoof_count_enable(hw, reg_idx, bit_idx);
+
+	return;
+}
+
+static void sxe_vf_reset_msg_handle(struct rte_eth_dev *dev, u8 vf_idx)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_rst_reply reply = {};
+	u8 *mac_addr = adapter->vt_ctxt.vf_info[vf_idx].mac_addr;
+	u8 *addr_bytes = (u8 *)(((struct rte_ether_addr *)mac_addr)->addr_bytes);
+	u32 rar_idx = sxe_sw_uc_entry_vf_add(adapter, vf_idx, addr_bytes, false);
+
+	LOG_INFO_BDF("receive vf_idx:%d reset msg.\n", vf_idx);
+
+	sxe_vf_pool_enable(dev, vf_idx);
+
+	sxe_vf_flr_handle(dev, vf_idx);
+
+	sxe_hw_uc_addr_add(&adapter->hw, rar_idx, addr_bytes, vf_idx);
+
+	sxe_vf_mc_promisc_disable(dev, vf_idx);
+
+	reply.msg_type = SXE_VFREQ_RESET | SXE_MSGTYPE_ACK;
+	reply.mc_filter_type = SXE_MC_FILTER_TYPE0;
+	rte_memcpy(reply.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
+
+	sxe_hw_send_msg_to_vf(hw, (u32 *)&reply,
+				SXE_MSG_NUM(sizeof(reply)), vf_idx);
+
+	adapter->vt_ctxt.vf_info->is_ready = true;
+
+	LOG_INFO_BDF("vf_idx:%d reset msg:0x%x handle done.send mac addr:"MAC_FMT
+		    " mc type:%d to vf.",
+		    vf_idx, reply.msg_type,
+		    MAC_ADDR(mac_addr), SXE_MC_FILTER_TYPE0);
+
+	return;
+}
+
+STATIC s32 sxe_req_msg_handle(struct rte_eth_dev *dev, u32 *msg,
+					u8 vf_idx)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret = 0;
+	u16 cmd_id = msg[0] & SXE_VFREQ_MASK;
+	struct rte_pmd_sxe_mb_event_param user_param;
+
+	if (cmd_id > SXE_VFREQ_CAST_MODE_SET &&
+		cmd_id <= SXE_VFREQ_IPSEC_DEL) {
+		ret = -SXE_ERR_PARAM;
+		LOG_ERROR_BDF("vf_idx:%u msg:0x%x invalid cmd_id:0x%x.\n",
+			   vf_idx, msg[0], cmd_id);
+		goto l_out;
+	}
+
+	user_param.ret = RTE_PMD_SXE_MB_EVENT_PROCEED;
+	user_param.vf_idx = vf_idx;
+	user_param.msg_type = msg[0] & 0xFFFF;
+	user_param.msg = (void *)msg;
+
+	if (cmd_id == SXE_VFREQ_RESET) {
+		ret = 0;
+		sxe_vf_reset_msg_handle(dev, vf_idx);
+
+		sxe_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
+					      &user_param);
+		goto l_out;
+	}
+
+	sxe_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
+					      &user_param);
+
+	LOG_INFO_BDF("vf_idx:%u cmd_id:0x%x user configure:0x%x.",
+			vf_idx, cmd_id, user_param.ret);
+
+	if (!adapter->vt_ctxt.vf_info[vf_idx].is_ready) {
+		msg[0] |= SXE_MSGTYPE_NACK;
+		ret = sxe_hw_send_msg_to_vf(hw, msg,
+					SXE_MSG_NUM(sizeof(msg[0])), vf_idx);
+		LOG_WARN_BDF("vf_idx:%d not ready now, send nack to vf.ret:%d.\n",
+			  vf_idx, ret);
+		goto l_out;
+	}
+
+	if (msg_table[cmd_id].msg_func) {
+		if ((user_param.ret == RTE_PMD_SXE_MB_EVENT_PROCEED) ||
+		    (cmd_id == SXE_VFREQ_API_NEGOTIATE) ||
+		    (cmd_id == SXE_VFREQ_RING_INFO_GET)) {
+			ret = msg_table[cmd_id].msg_func(dev, msg, vf_idx);
+		}
+		LOG_INFO_BDF("msg:0x%x cmd_id:0x%x handle done.ret:%d\n",
+			 msg[0], cmd_id, ret);
+	} else {
+		ret = -SXE_ERR_PARAM;
+	}
+
+	if (!ret) {
+		msg[0] |= SXE_MSGTYPE_ACK;
+	} else {
+		msg[0] |= SXE_MSGTYPE_NACK;
+		LOG_ERROR_BDF("vf_idx:%u msg_type:0x%x cmdId:0x%x invalid.(err:%d)\n",
+			      vf_idx, msg[0], cmd_id, ret);
+	}
+
+	ret = sxe_hw_send_msg_to_vf(hw, msg, SXE_MBX_MSG_NUM, vf_idx);
+	if (ret) {
+		LOG_ERROR_BDF("vf:%d msg:0x%x reply fail.(err:%d).\n",
+			   vf_idx, msg[0], ret);
+	}
+
+	LOG_INFO_BDF("pf reply vf:%d msg:0x%x done.ret:%d\n", vf_idx, msg[0], ret);
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_vf_req_msg_handle(struct rte_eth_dev *dev, u8 vf_idx)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u32 msg[SXE_MBX_MSG_NUM] = {0};
+	s32 ret;
+
+	ret = sxe_hw_rcv_msg_from_vf(hw, msg, SXE_MBX_MSG_NUM, vf_idx);
+	if (ret) {
+		LOG_ERROR_BDF("rcv vf:0x%x req msg:0x%x fail.(err:%d)\n",
+			   vf_idx, msg[0], ret);
+		goto l_out;
+	}
+
+	LOG_INFO_BDF("rcv vf_idx:%d req msg:0x%x.\n", vf_idx, msg[0]);
+
+	if (msg[0] & (SXE_MSGTYPE_ACK | SXE_MSGTYPE_NACK)) {
+		LOG_WARN_BDF("msg:0x%x has handled, no need dup handle.\n",
+			  msg[0]);
+		goto l_out;
+	}
+
+	ret = sxe_req_msg_handle(dev, msg, vf_idx);
+	if (ret) {
+		LOG_ERROR_BDF("vf:%d request msg handle fail.(err:%d)\n",
+			  vf_idx, ret);
+	}
+
+l_out:
+	return ret;
+}
+
+static void sxe_vf_ack_msg_handle(struct rte_eth_dev *eth_dev, u8 vf_idx)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	u32 msg = SXE_MSGTYPE_NACK;
+
+	if (!adapter->vt_ctxt.vf_info[vf_idx].is_ready) {
+		sxe_hw_send_msg_to_vf(&adapter->hw, &msg,
+					SXE_MSG_NUM(sizeof(msg)), vf_idx);
+	}
+
+	return;
+}
+
+void sxe_mbx_irq_handler(struct rte_eth_dev *eth_dev)
+{
+	struct sxe_adapter *adapter = eth_dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u16 vf_num = sxe_vf_num_get(eth_dev);
+	u8 vf_idx;
+
+	LOG_DEBUG_BDF("mailbox irq triggered vf_num:%u.\n", vf_num);
+
+	for (vf_idx = 0; vf_idx < vf_num; vf_idx++) {
+		if (sxe_hw_vf_rst_check(hw, vf_idx)) {
+			LOG_WARN_BDF("vf_idx:%d flr triggered.\n", vf_idx);
+			sxe_vf_flr_handle(eth_dev, vf_idx);
+		}
+
+		if (sxe_hw_vf_req_check(hw, vf_idx)) {
+			sxe_vf_req_msg_handle(eth_dev, vf_idx);
+		}
+
+		if (sxe_hw_vf_ack_check(hw, vf_idx)) {
+			sxe_vf_ack_msg_handle(eth_dev, vf_idx);
+		}
+	}
+
+	return;
+}
+
+#ifdef ETH_DEV_MIRROR_RULE
+static s32 sxe_mirror_conf_check(struct sxe_hw *hw, u8 rule_id,
+					  u8 rule_type)
+{
+	s32 ret = 0;
+
+	if (sxe_hw_vt_status(hw) == 0) {
+		ret = -ENOTSUP;
+		PMD_LOG_ERR(DRV, "virtual disabled, mirror rule not support.(err:%d)",
+		              ret);
+		goto l_out;
+	}
+
+	if (rule_id >= SXE_MIRROR_RULES_MAX) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "invalid rule_id:%u rule id max:%u.(err:%d)",
+		              rule_id, SXE_MIRROR_RULES_MAX, ret);
+		goto l_out;
+	}
+
+	if (SXE_MIRROR_TYPE_INVALID(rule_type)) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "unsupported mirror type 0x%x.(err:%d)",
+			      rule_type, ret);
+	}
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_vlan_mirror_configure(struct rte_eth_dev *dev,
+		      struct rte_eth_mirror_conf *mirror_conf,
+		      u8 rule_id, u8 on)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_mirror_info *mirror_info = &(adapter->vt_ctxt.mr_info);
+	u32 mv_msb = 0;
+	u32 mv_lsb = 0;
+	u64 vlan_mask = 0;
+	u32 vlvf;
+	u8 i;
+	u8 reg_idx;
+	s32 ret = 0;
+
+	for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+		if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+			ret = sxe_hw_vlvf_slot_find(
+					hw,
+					mirror_conf->vlan.vlan_id[i],
+					false);
+			if (ret < 0) {
+				ret = -EINVAL;
+				LOG_ERROR_BDF("vlan_id[%u]:0x%x no matched vlvf."
+					      "(err:%d)",
+					      i,
+					      mirror_conf->vlan.vlan_id[i],
+					      ret);
+				goto l_out;
+			}
+
+			reg_idx = ret;
+			vlvf = sxe_hw_vlan_pool_filter_read(hw, reg_idx);
+			if ((vlvf & SXE_VLVF_VIEN) &&
+			    ((vlvf & SXE_VLVF_VLANID_MASK) ==
+			      mirror_conf->vlan.vlan_id[i])) {
+				vlan_mask |= (1ULL << reg_idx);
+			} else{
+				ret = -EINVAL;
+				LOG_ERROR_BDF("i:%u vlan_id:0x%x "
+					      "vlvf[%u]:0x%x not meet request."
+					      "(err:%d)",
+					      i,
+					      mirror_conf->vlan.vlan_id[i],
+					      reg_idx,
+					      vlvf,
+					      ret);
+				goto l_out;
+			}
+		}
+	}
+
+	if (on) {
+		mv_lsb = vlan_mask & SXE_MR_VLAN_MASK;
+		mv_msb = vlan_mask >> SXE_MR_VLAN_MSB_BIT_OFFSET;
+
+		mirror_info->mr_conf[rule_id].vlan.vlan_mask =
+					mirror_conf->vlan.vlan_mask;
+
+		for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+			if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+				mirror_info->mr_conf[rule_id].vlan.vlan_id[i] =
+					mirror_conf->vlan.vlan_id[i];
+				LOG_INFO_BDF("rule_id:%u vlan id:0x%x add mirror"
+					     " to dst_pool:%u",
+					     rule_id,
+					     mirror_conf->vlan.vlan_id[i],
+					     mirror_conf->dst_pool);
+			}
+		}
+	} else {
+		mv_lsb = 0;
+		mv_msb = 0;
+		mirror_info->mr_conf[rule_id].vlan.vlan_mask = 0;
+
+		for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+			mirror_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
+			LOG_INFO_BDF("rule_id:%u vlan id:0x%x del mirror"
+				     " from dst_pool:%u",
+				     rule_id,
+				     mirror_conf->vlan.vlan_id[i],
+				     mirror_conf->dst_pool);
+		}
+	}
+
+	sxe_hw_mirror_vlan_set(hw, rule_id, mv_lsb, mv_msb);
+
+l_out:
+	return ret;
+}
+
+static void sxe_virtual_pool_mirror_configure(struct rte_eth_dev *dev,
+		      struct rte_eth_mirror_conf *mirror_conf,
+		      u8 rule_id, u8 on)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_mirror_info *mirror_info = &(adapter->vt_ctxt.mr_info);
+	u32 lsb = 0;
+	u32 msb = 0;
+
+	if (on) {
+		lsb = mirror_conf->pool_mask & SXE_MR_VIRTUAL_POOL_MASK;
+		msb = mirror_conf->pool_mask >> SXE_MR_VIRTUAL_POOL_MSB_BIT_MASK;
+		mirror_info->mr_conf[rule_id].pool_mask = mirror_conf->pool_mask;
+	} else {
+		lsb = 0;
+		msb = 0;
+		mirror_info->mr_conf[rule_id].pool_mask = 0;
+	}
+
+	sxe_hw_mirror_virtual_pool_set(hw, rule_id, lsb, msb);
+
+	return;
+}
+
+s32 sxe_mirror_rule_set(struct rte_eth_dev *dev,
+		      struct rte_eth_mirror_conf *mirror_conf,
+		      u8 rule_id, u8 on)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_mirror_info *mirror_info = &(adapter->vt_ctxt.mr_info);
+	u8 mirror_type = 0;
+	s32 ret;
+
+	ret = sxe_mirror_conf_check(hw, rule_id, mirror_conf->rule_type);
+	if (ret) {
+		LOG_ERROR_BDF("rule_id:%u mirror config param invalid.(err:%d)",
+			      rule_id, ret);
+		goto l_out;
+	}
+
+	if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
+		mirror_type |= SXE_MRCTL_VLME;
+		ret = sxe_vlan_mirror_configure(dev, mirror_conf, rule_id, on);
+		if (ret) {
+			LOG_ERROR_BDF("vlan mirror configure fail.(err:%d)", ret);
+			goto l_out;
+		}
+	}
+
+	if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+		mirror_type |= SXE_MRCTL_VPME;
+		sxe_virtual_pool_mirror_configure(dev, mirror_conf, rule_id, on);
+	}
+
+	if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT) {
+		mirror_type |= SXE_MRCTL_UPME;
+	}
+
+	if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT) {
+		mirror_type |= SXE_MRCTL_DPME;
+	}
+
+	sxe_hw_mirror_ctl_set(hw, rule_id, mirror_type, mirror_conf->dst_pool, on);
+
+	mirror_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
+	mirror_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
+
+	LOG_INFO_BDF("rule_id:%u mirrror type:0x%x %s success. "
+		     "vlan id mask:0x%"SXE_PRIX64" virtaul pool mask:0x%"SXE_PRIX64
+		     " dst_pool:%u.",
+		     rule_id,
+		     mirror_conf->rule_type,
+		     on ? "add" : "delete",
+		     mirror_conf->vlan.vlan_mask,
+		     mirror_conf->pool_mask,
+		     mirror_conf->dst_pool);
+
+l_out:
+	return ret;
+}
+
+s32 sxe_mirror_rule_reset(struct rte_eth_dev *dev, u8 rule_id)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_mirror_info *mirror_info = &(adapter->vt_ctxt.mr_info);
+	s32 ret;
+
+	ret = sxe_mirror_conf_check(hw, rule_id, SXE_ETH_MIRROR_TYPE_MASK);
+	if (ret) {
+		LOG_ERROR_BDF("rule_id:%u mirror config param invalid.(err:%d)",
+			      rule_id, ret);
+		goto l_out;
+	}
+
+	memset(&mirror_info->mr_conf[rule_id], 0,
+	       sizeof(struct rte_eth_mirror_conf));
+
+	sxe_hw_mirror_rule_clear(hw, rule_id);
+
+	LOG_INFO_BDF("rule_id:%u reset susccess.", rule_id);
+
+l_out:
+	return ret;
+}
+
+#endif
+#endif
diff --git a/drivers/net/sxe/pf/sxe_vf.h b/drivers/net/sxe/pf/sxe_vf.h
new file mode 100644
index 0000000000..8690b9e7fd
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_vf.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_VF_H__
+#define __SXE_VF_H__
+
+#include "sxe_dpdk_version.h"
+#include <rte_ethdev.h>
+#if defined DPDK_20_11_5 || defined DPDK_21_11_5 || defined DPDK_19_11_6
+#include <rte_bus_pci.h>
+#else
+#include <bus_pci_driver.h>
+#endif
+
+#include "sxe_hw.h"
+
+#define SXE_MIRROR_RULES_MAX   4
+
+#define SXE_MSG_NUM(size)         DIV_ROUND_UP(size, 4)
+
+#define SXE_MSGTYPE_ACK    0x80000000
+#define SXE_MSGTYPE_NACK   0x40000000
+
+#define SXE_VFREQ_RESET               0x01 
+#define SXE_VFREQ_MAC_ADDR_SET        0x02 
+#define SXE_VFREQ_MC_ADDR_SYNC        0x03 
+#define SXE_VFREQ_VLAN_SET            0x04 
+#define SXE_VFREQ_LPE_SET             0x05  
+
+#define SXE_VFREQ_UC_ADDR_SYNC        0x06  
+
+#define SXE_VFREQ_API_NEGOTIATE       0x08  
+
+#define SXE_VFREQ_RING_INFO_GET       0x09  
+#define SXE_VFREQ_REDIR_TBL_GET       0x0a
+#define SXE_VFREQ_RSS_KEY_GET         0x0b
+#define SXE_VFREQ_CAST_MODE_SET       0x0c  
+#define SXE_VFREQ_LINK_ENABLE_GET     0X0d  
+#define SXE_VFREQ_IPSEC_ADD           0x0e
+#define SXE_VFREQ_IPSEC_DEL           0x0f
+#define SXE_VFREQ_RSS_CONF_GET        0x10
+
+#define SXE_VFREQ_MASK                0xFF
+
+#define SXE_MIRROR_TYPE_INVALID(mirror_type) \
+	((mirror_type) & ~(u8)(ETH_MIRROR_VIRTUAL_POOL_UP | \
+	ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
+
+#define SXE_ETH_MIRROR_TYPE_MASK \
+			(ETH_MIRROR_VIRTUAL_POOL_UP | ETH_MIRROR_UPLINK_PORT \
+			| ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN)
+
+static inline u16 sxe_vf_num_get(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+	return pci_dev->max_vfs;
+}
+
+enum sxe_mbx_api_version {
+	SXE_MBX_API_10 = 0,
+	SXE_MBX_API_11,
+	SXE_MBX_API_12,
+	SXE_MBX_API_13, 
+	SXE_MBX_API_14, 
+
+	SXE_MBX_API_NR, 
+};
+
+enum sxe_cast_mode {
+	SXE_CAST_MODE_NONE = 0, 
+	SXE_CAST_MODE_MULTI,    
+	SXE_CAST_MODE_ALLMULTI, 
+	SXE_CAST_MODE_PROMISC,  
+};
+
+struct sxe_vf_info {
+	u8 mac_addr[RTE_ETHER_ADDR_LEN]; 
+	u16 mc_hash[SXE_VF_MC_ENTRY_NUM_MAX]; 
+	u8  mc_hash_used; 
+	u8 cast_mode; 
+	u8  trusted :1;  
+	u8  is_ready :1; 
+	u8  spoof_chk_enabled :1; 
+	u8  rss_query_enabled :1; 
+	u8  mac_from_pf :1; 
+	u8  reserved :3;  
+	u16 domain_id;
+	u16 tx_rate;    
+	u32 mbx_version; 
+	u32 vlan_cnt;     
+	u32 uc_mac_cnt;  
+};
+
+#ifdef ETH_DEV_MIRROR_RULE
+struct sxe_mirror_info {
+	struct rte_eth_mirror_conf mr_conf[SXE_MIRROR_RULES_MAX];
+
+};
+#endif
+
+struct sxe_virtual_context {
+	u8   pflink_fullchk;
+	u32 mbx_version; 
+	struct sxe_vf_info *vf_info;    
+#ifdef ETH_DEV_MIRROR_RULE
+	struct sxe_mirror_info mr_info; 
+#endif
+};
+
+struct sxe_msg_table {
+	u32 msg_type;
+	s32 (*msg_func)(struct rte_eth_dev *dev, u32 *msg, u32 vf_idx);
+};
+
+enum RTE_PMD_SXE_MB_event_rsp {
+	RTE_PMD_SXE_MB_EVENT_NOOP_ACK,  
+	RTE_PMD_SXE_MB_EVENT_NOOP_NACK, 
+	RTE_PMD_SXE_MB_EVENT_PROCEED,   
+	RTE_PMD_SXE_MB_EVENT_MAX        
+};
+
+struct rte_pmd_sxe_mb_event_param {
+	u16 vf_idx;     
+	u16 msg_type;   
+	u16 ret;        
+	void *msg;      
+};
+
+struct sxe_mbx_api_msg {
+	u32 msg_type;
+	u32 api_version;
+};
+
+struct sxe_uc_addr_msg {
+	u32 msg_type;
+	u8 uc_addr[RTE_ETHER_ADDR_LEN];
+	u16 pad;
+};
+
+struct sxe_rst_rcv {
+	u32 msg_type;
+};
+
+struct sxe_rst_reply {
+	u32 msg_type;
+	u32 mac_addr[2];
+	u32 mc_filter_type;
+};
+
+struct sxe_rst_msg {
+	union {
+		struct sxe_rst_rcv rcv;
+		struct sxe_rst_reply reply;
+	};
+};
+
+struct sxe_ring_info_msg {
+	u32 msg_type;
+	u8  max_rx_num; 
+	u8  max_tx_num; 
+	u8  tc_num;     
+	u8  default_tc; 
+};
+
+struct sxe_rss_hash_msg {
+	u32 msg_type;
+	u8  hash_key[SXE_RSS_KEY_SIZE];
+	u64 rss_hf;
+};
+
+struct sxe_vlan_msg {
+	u16 msg_type;
+	u16 add;
+	u32 vlan_id;
+};
+
+struct sxe_mc_sync_msg {
+	u16 msg_type;
+	u16 mc_cnt;  
+	u16 mc_addr_extract[SXE_VF_MC_ENTRY_NUM_MAX];
+};
+
+struct sxe_cast_mode_msg {
+	u32 msg_type;
+	u32 cast_mode;
+};
+
+struct sxe_uc_sync_msg {
+	u16 msg_type;
+	u16 index;
+	u32 addr[2];
+};
+
+struct sxe_max_frame_msg {
+	u32 msg_type;
+	u32 max_frame;
+};
+
+s32 sxe_vt_init(struct rte_eth_dev *eth_dev);
+
+void sxe_vt_configure(struct rte_eth_dev *eth_dev);
+
+void sxe_vt_uninit(struct rte_eth_dev *eth_dev);
+
+s32 sxe_vf_rss_configure(struct rte_eth_dev *dev);
+
+s32 sxe_vf_default_mode_configure(struct rte_eth_dev *dev);
+
+void sxe_mbx_irq_handler(struct rte_eth_dev *eth_dev);
+
+#ifdef ETH_DEV_MIRROR_RULE
+s32 sxe_mirror_rule_set(struct rte_eth_dev *dev,
+		      struct rte_eth_mirror_conf *mirror_conf,
+		      u8 rule_id, u8 on);
+
+s32 sxe_mirror_rule_reset(struct rte_eth_dev *dev, u8 rule_id);
+
+#endif
+#endif
diff --git a/drivers/net/sxe/rte_pmd_sxe_version.map b/drivers/net/sxe/rte_pmd_sxe_version.map
new file mode 100644
index 0000000000..e85eb752b4
--- /dev/null
+++ b/drivers/net/sxe/rte_pmd_sxe_version.map
@@ -0,0 +1,10 @@
+DPDK_20.0 {
+	global: 
+	rte_pmd_sxe_tx_loopback_set;
+	rte_pmd_sxe_tc_bw_set;
+	local: *;
+};
+
+#EXPERIMENTAL {
+#	global: *;
+#};
diff --git a/drivers/net/sxe/sxe_drv_type.h b/drivers/net/sxe/sxe_drv_type.h
new file mode 100644
index 0000000000..c7bda4f558
--- /dev/null
+++ b/drivers/net/sxe/sxe_drv_type.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_DRV_TYPEDEF_H__
+#define __SXE_DRV_TYPEDEF_H__
+
+#ifdef SXE_DPDK
+#include "sxe_types.h"
+#ifndef bool
+typedef _Bool bool;
+#endif
+#else
+#include <linux/types.h>
+#endif
+
+typedef u8 U8;
+typedef u16 U16;
+typedef u32 U32;
+typedef u64 U64;
+typedef bool BOOL;
+
+#endif
diff --git a/drivers/net/sxe/version.map b/drivers/net/sxe/version.map
new file mode 100644
index 0000000000..2064d17939
--- /dev/null
+++ b/drivers/net/sxe/version.map
@@ -0,0 +1,24 @@
+DPDK_21 {
+	global: 
+	rte_pmd_sxe_tx_loopback_set;
+	rte_pmd_sxe_tc_bw_set;
+	local: *;
+};
+
+DPDK_22 {
+	global: 
+	rte_pmd_sxe_tx_loopback_set;
+	rte_pmd_sxe_tc_bw_set;
+	local: *;
+};
+
+DPDK_23 {
+	global: 
+	rte_pmd_sxe_tx_loopback_set;
+	rte_pmd_sxe_tc_bw_set;
+	local: *;
+};
+
+#EXPERIMENTAL {
+#	global: *;
+#};
diff --git a/drivers/net/sxe/vf/sxevf.h b/drivers/net/sxe/vf/sxevf.h
new file mode 100644
index 0000000000..52d294d869
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXEVF_H__
+#define __SXEVF_H__
+
+#include <rte_pci.h>
+
+#include "sxevf_irq.h"
+#include "sxevf_hw.h"
+#include "sxevf_filter.h"
+#include "sxevf_stats.h"
+
+#define SXEVF_DEVARG_LINK_CHECK           "link_check"
+
+struct sxevf_adapter {
+	s8 name[PCI_PRI_STR_SIZE+1]; 
+	u8 max_rx_queue; 
+	u8 max_tx_queue; 
+
+	struct sxevf_hw hw;
+	struct sxevf_irq_context irq_ctxt;
+	struct sxevf_vlan_context vlan_ctxt;
+	struct sxevf_mac_filter_context mac_filter_ctxt;
+	struct sxevf_stats_info stats_info;
+
+	rte_atomic32_t link_thread_running;
+	pthread_t link_thread_tid;
+	u8 link_check;
+	bool stop;
+	bool rx_batch_alloc_allowed;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	bool rx_vec_allowed;
+#endif
+	u8 rss_reta_updated;
+};
+
+struct sxevf_thread_param {
+	struct rte_eth_dev *dev;
+	pthread_barrier_t barrier;
+};
+
+#endif
+
diff --git a/drivers/net/sxe/vf/sxevf_ethdev.c b/drivers/net/sxe/vf/sxevf_ethdev.c
new file mode 100644
index 0000000000..d656dc83fc
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_ethdev.c
@@ -0,0 +1,811 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_bus_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <rte_bus_pci.h>
+#else
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <bus_pci_driver.h>
+#endif
+
+#include <rte_ethdev.h>
+#include <rte_kvargs.h>
+#include <rte_common.h>
+#include <rte_string_fns.h>
+
+#include "sxevf.h"
+#include "sxe_rx.h"
+#include "sxe_logs.h"
+#include "sxevf_msg.h"
+#include "sxe_errno.h"
+#include "sxevf_tx.h"
+#include "sxevf_rx.h"
+#include "sxevf_ethdev.h"
+#include "sxevf_queue.h"
+#include "sxevf_offload.h"
+#include "sxe_compat_version.h"
+
+#define SXEVF_ETH_OVERHEAD     (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)  
+#define SXEVF_HKEY_MAX_INDEX   (10)
+#define SXEVF_RSS_OFFLOAD_ALL ( \
+	RTE_ETH_RSS_IPV4 | \
+	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+	RTE_ETH_RSS_IPV6 | \
+	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+	RTE_ETH_RSS_IPV6_EX | \
+	RTE_ETH_RSS_IPV6_TCP_EX | \
+	RTE_ETH_RSS_IPV6_UDP_EX)
+
+#define SXEVF_DEFAULT_RX_FREE_THRESH  32
+#define SXEVF_DEFAULT_RX_PTHRESH      8
+#define SXEVF_DEFAULT_RX_HTHRESH      8
+#define SXEVF_DEFAULT_RX_WTHRESH      0
+
+#define SXEVF_DEFAULT_TX_FREE_THRESH  32
+#define SXEVF_DEFAULT_TX_PTHRESH      32
+#define SXEVF_DEFAULT_TX_HTHRESH      0
+#define SXEVF_DEFAULT_TX_WTHRESH      0
+#define SXEVF_DEFAULT_TX_RSBIT_THRESH 32
+
+#define	SXEVF_MIN_RING_DESC     32
+#define	SXEVF_MAX_RING_DESC     4096
+
+#define	SXEVF_ALIGN             128
+#define SXEVF_RXD_ALIGN        (SXEVF_ALIGN / sizeof(sxevf_rx_data_desc_u))
+#define SXEVF_TXD_ALIGN        (SXEVF_ALIGN / sizeof(sxevf_tx_data_desc_u))
+
+#define SXEVF_TX_MAX_SEG            40
+#define SXEVF_DEFAULT_TX_QUEUE_NUM  1
+#define SXEVF_DEFAULT_RX_QUEUE_NUM  1
+#define SXEVF_RX_BUF_MIN      1024
+#define SXEVF_RX_BUF_LEN_MAX  9728
+
+static const struct rte_eth_desc_lim rx_desc_lim = {
+	.nb_max = SXEVF_MAX_RING_DESC,
+	.nb_min = SXEVF_MIN_RING_DESC,
+	.nb_align = SXEVF_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+	.nb_max = SXEVF_MAX_RING_DESC,
+	.nb_min = SXEVF_MIN_RING_DESC,
+	.nb_align = SXEVF_TXD_ALIGN,
+	.nb_seg_max = SXEVF_TX_MAX_SEG,
+	.nb_mtu_seg_max = SXEVF_TX_MAX_SEG,
+};
+
+static const char * const sxevf_valid_arguments[] = {
+	SXEVF_DEVARG_LINK_CHECK,
+	NULL
+};
+
+STATIC s32 sxevf_devargs_handle(__rte_unused const char *key, const char *value,
+		  void *extra_args)
+{
+	u16 *n = extra_args;
+	s32 ret;
+
+	if (value == NULL || extra_args == NULL) {
+		ret = -EINVAL;
+		LOG_ERROR("invalid args.(err:%d)", ret);
+		goto l_out;
+	}
+
+	*n = (u16)strtoul(value, NULL, 0);
+	if (*n == USHRT_MAX && errno == ERANGE) {
+		ret = -ERANGE;
+		LOG_ERROR("invalid args.(err:%d)", ret);
+		goto l_out;
+	}
+
+	ret = 0;
+
+l_out:
+	return ret;
+}
+
+STATIC void sxevf_devargs_parse(struct sxevf_adapter *adapter,
+		      struct rte_devargs *devargs)
+{
+	struct rte_kvargs *kvlist;
+	u16 check;
+
+	if (devargs == NULL) {
+		LOG_INFO_BDF("no dev args.");
+		goto l_out;
+	}
+
+	kvlist = rte_kvargs_parse(devargs->args, sxevf_valid_arguments);
+	if (kvlist == NULL)
+		return;
+
+	if (rte_kvargs_count(kvlist, SXEVF_DEVARG_LINK_CHECK) == 1 &&
+	    rte_kvargs_process(kvlist, SXEVF_DEVARG_LINK_CHECK,
+			       sxevf_devargs_handle, &check) == 0 &&
+	    check == 1) {
+		adapter->link_check = 1;
+	}
+
+	LOG_INFO_BDF("dev args link_check:%u", adapter->link_check);
+
+	rte_kvargs_free(kvlist);
+l_out:
+	return;
+}
+
+static s32 sxevf_hw_dev_reset(struct sxevf_hw *hw)
+{
+	u32 retry = SXEVF_RST_CHECK_NUM;
+	s32 ret;
+	struct sxevf_rst_msg msg = {};
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	adapter->stop = true;
+
+	sxevf_hw_stop(hw);
+
+	/* Mail box init */
+	sxevf_mbx_init(hw);
+
+
+	sxevf_hw_reset(hw);
+
+	while (!sxevf_pf_rst_check(hw) && retry) {
+		retry--;
+		udelay(5);
+	}
+
+	if (!retry) {
+		ret = -SXEVF_ERR_RESET_FAILED;
+		LOG_ERROR_BDF("retry:%u use up, pf has not reset done.(err:%d)\n",
+		               SXEVF_RST_CHECK_NUM, ret);
+		goto l_out;
+	}
+
+	LOG_INFO_BDF("pf reset done.");
+
+	hw->mbx.retry = SXEVF_MBX_RETRY_COUNT;
+
+	sxevf_rxtx_reg_init(hw);
+
+	/* Send reset message to pf */
+	msg.msg_type = SXEVF_RESET;
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg,
+				    SXEVF_MSG_NUM(sizeof(msg)));
+	if (ret) {
+		LOG_ERROR_BDF("vf reset msg:%d len:%zu mailbox fail.(err:%d)\n",
+			  msg.msg_type, SXEVF_MSG_NUM(sizeof(msg)), ret);
+		goto l_out;
+	}
+
+	if (msg.msg_type == (SXEVF_RESET | SXEVF_MSGTYPE_ACK)) {
+		memcpy(&adapter->mac_filter_ctxt.def_mac_addr,
+			(u8 *)(msg.mac_addr), SXEVF_MAC_ADDR_LEN);
+	}
+
+	adapter->mac_filter_ctxt.mc_filter_type = msg.mc_fiter_type;
+
+	LOG_INFO_BDF("vf get mc filter type:%d default mac addr:"MAC_FMT" from pf.\n",
+		  adapter->mac_filter_ctxt.mc_filter_type,
+		  MAC_ADDR(&adapter->mac_filter_ctxt.def_mac_addr));
+
+l_out:
+	return ret;
+}
+
+static s32 sxevf_hw_base_init(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	s32 ret;
+
+	hw->reg_base_addr = (void *)pci_dev->mem_resource[0].addr;
+	PMD_LOG_INFO(INIT, "eth_dev[%u] got reg_base_addr=%p",
+			eth_dev->data->port_id, hw->reg_base_addr);
+	hw->adapter = adapter;
+
+	strlcpy(adapter->name, pci_dev->device.name, sizeof(adapter->name) - 1);
+	adapter->stop = true;
+
+	adapter->max_rx_queue = SXEVF_DEFAULT_RX_QUEUE_NUM;
+	adapter->max_tx_queue = SXEVF_DEFAULT_TX_QUEUE_NUM;
+
+	ret = sxevf_hw_dev_reset(hw);
+	if (ret < 0) {
+		PMD_LOG_ERR(INIT, "hw dev reset failed, ret=%d", ret);
+		goto l_out;
+	} else {
+		adapter->stop = false;
+	}
+
+	ret = sxevf_mac_addr_init(eth_dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "mac addr init fail, ret=%d", ret);
+		goto l_out;
+	}
+
+l_out:
+	return ret;
+}
+
+static void sxevf_txrx_start(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	sxevf_tx_queue_s *txq;
+	sxevf_rx_queue_s *rxq;
+	u16 i;
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		txq = eth_dev->data->tx_queues[i];
+		sxevf_tx_ring_switch(hw, txq->reg_idx, true);
+	}
+
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+		rxq = eth_dev->data->rx_queues[i];
+		sxevf_rx_ring_switch(hw, rxq->reg_idx, true);
+
+		rte_wmb();
+
+		sxevf_rx_desc_tail_set(hw, rxq->reg_idx, rxq->ring_depth - 1);
+	}
+
+	return;
+}
+
+static s32 sxevf_dev_start(struct rte_eth_dev *dev)
+{
+	s32 ret;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_stats_info *stats_info = &adapter->stats_info;
+	struct sxevf_hw *hw = &adapter->hw;
+
+	PMD_INIT_FUNC_TRACE();
+
+	ret = sxevf_hw_dev_reset(hw);
+	if (ret) {
+		LOG_ERROR_BDF("dev reset fail.");
+		goto l_out;
+	}
+
+	sxevf_mbx_api_version_init(adapter);
+
+	sxevf_tx_configure(dev);
+
+	ret = sxevf_rx_configure(dev);
+	if (ret) {
+		LOG_ERROR_BDF("rx configure fail.(err:%d)", ret);
+		goto l_clear_queue;
+	}
+
+	sxevf_vlan_filter_configure(dev);
+
+	sxevf_txrx_start(dev);
+
+	sxevf_irq_configure(dev);
+
+	sxevf_stats_init_value_get(hw, &stats_info->hw_stats);
+
+	adapter->stop = false;
+
+l_out:
+	return ret;
+
+l_clear_queue:
+	sxevf_txrx_queues_clear(dev, adapter->rx_batch_alloc_allowed);
+	return ret;
+}
+
+#ifdef DPDK_19_11_6
+static void sxevf_dev_stop(struct rte_eth_dev *dev)
+#else
+static s32 sxevf_dev_stop(struct rte_eth_dev *dev)
+#endif
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (adapter->stop) {
+		LOG_INFO_BDF("eth dev has been stoped.");
+		goto l_out;
+	}
+
+	adapter->stop = false;
+	dev->data->dev_started = false;
+	dev->data->scattered_rx = false;
+
+	sxevf_hw_stop(hw);
+
+	sxevf_vfta_sync(dev, false);
+
+	sxevf_txrx_queues_clear(dev, adapter->rx_batch_alloc_allowed);
+
+	sxevf_irq_free(dev);
+
+l_out:
+#ifdef DPDK_19_11_6
+	return;
+#else
+	return 0;
+#endif
+}
+
+#ifdef DPDK_19_11_6
+static void sxevf_dev_close(struct rte_eth_dev *dev)
+#else
+static s32 sxevf_dev_close(struct rte_eth_dev *dev)
+#endif
+{
+	s32 ret = 0;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_stats_info *stats_info = &adapter->stats_info;
+	struct sxevf_hw *hw = &adapter->hw;
+
+	PMD_INIT_FUNC_TRACE();
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		LOG_INFO_BDF("secondery procee can't close dev.");
+		goto l_out;
+	}
+
+	ret = sxevf_hw_dev_reset(hw);
+	if (ret) {
+		LOG_ERROR_BDF("dev reset fail.");
+	}
+
+	sxevf_dev_stop(dev);
+
+	sxevf_stats_init_value_get(hw, &stats_info->hw_stats);
+
+	sxevf_queues_free(dev);
+
+	sxevf_irq_unregister(dev);
+
+l_out:
+#ifdef DPDK_19_11_6
+	return;
+#else
+	return ret;
+#endif
+}
+
+STATIC s32 sxevf_dev_reset(struct rte_eth_dev *dev)
+{
+	s32 ret;
+
+	ret = sxevf_ethdev_uninit(dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "dev uninit fail.");
+		goto l_out;
+	}
+
+	ret = sxevf_ethdev_init(dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "dev init fail.");
+	}
+
+l_out:
+	return ret;
+}
+
+static s32 sxevf_dev_info_get(struct rte_eth_dev *dev,
+		     struct rte_eth_dev_info *dev_info)
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+	dev_info->max_rx_queues = adapter->max_rx_queue;
+	dev_info->max_tx_queues = adapter->max_tx_queue;
+	dev_info->min_rx_bufsize = SXEVF_RX_BUF_MIN; 
+	dev_info->max_rx_pktlen = SXEVF_RX_BUF_LEN_MAX; 
+	dev_info->max_mtu = dev_info->max_rx_pktlen - SXEVF_ETH_OVERHEAD;
+	dev_info->max_mac_addrs = adapter->mac_filter_ctxt.uc_table_size;
+	dev_info->max_hash_mac_addrs = SXEVF_UTA_HASH_BIT_MAX;
+	dev_info->max_vfs = pci_dev->max_vfs;
+	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
+
+	dev_info->rx_queue_offload_capa = sxevf_rx_queue_offloads_get(dev);
+	dev_info->rx_offload_capa = (sxevf_rx_port_offloads_get(dev) |
+				     dev_info->rx_queue_offload_capa);
+	dev_info->tx_queue_offload_capa = sxevf_tx_queue_offloads_get(dev);
+	dev_info->tx_offload_capa = sxevf_tx_port_offloads_get(dev);
+
+	dev_info->hash_key_size = SXEVF_HKEY_MAX_INDEX * sizeof(u32);
+	dev_info->reta_size = 0;
+	dev_info->flow_type_rss_offloads = SXEVF_RSS_OFFLOAD_ALL;
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_thresh = {
+			.pthresh = SXEVF_DEFAULT_RX_PTHRESH,
+			.hthresh = SXEVF_DEFAULT_RX_HTHRESH,
+			.wthresh = SXEVF_DEFAULT_RX_WTHRESH,
+		},
+		.rx_free_thresh = SXEVF_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+		.offloads = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_thresh = {
+			.pthresh = SXEVF_DEFAULT_TX_PTHRESH,
+			.hthresh = SXEVF_DEFAULT_TX_HTHRESH,
+			.wthresh = SXEVF_DEFAULT_TX_WTHRESH,
+		},
+		.tx_free_thresh = SXEVF_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = SXEVF_DEFAULT_TX_RSBIT_THRESH,
+		.offloads = 0,
+	};
+
+	dev_info->rx_desc_lim = rx_desc_lim;
+	dev_info->tx_desc_lim = tx_desc_lim;
+
+#ifdef DPDK_22_11_3
+	dev_info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PASSIVE;
+#endif
+
+	return 0;
+}
+
+static s32 sxevf_mtu_set(struct rte_eth_dev *dev, u16 mtu)
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	u32 max_frame = mtu + SXEVF_ETH_OVERHEAD;
+	s32 ret;
+
+	if (mtu < RTE_ETHER_MIN_MTU ||
+			max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
+		ret = -EINVAL;
+		LOG_ERROR_BDF("invalid mtu:%u.", mtu);
+		goto l_out;
+	}
+
+	if (dev->data->dev_started && !dev->data->scattered_rx &&
+	    ((max_frame + 2 * SXEVF_VLAN_TAG_SIZE) >
+	     (dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))) {
+		ret = -EINVAL;
+		LOG_ERROR_BDF("max_frame:%u stop port first.(err:%d)",
+			      max_frame, ret);
+		goto l_out;
+	}
+
+	ret = sxevf_rx_max_frame_set(hw, mtu);
+	if (ret) {
+		LOG_ERROR_BDF("max_frame:%u set fail.(err:%d)", max_frame, ret);
+		ret = -EINVAL;
+		goto l_out;
+	}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+	dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
+#endif
+
+	LOG_INFO_BDF("change max frame size to %u success.", max_frame);
+
+l_out:
+	return ret;
+}
+
+static s32 sxevf_dev_configure(struct rte_eth_dev *dev)
+{
+	struct rte_eth_conf *conf = &dev->data->dev_conf;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+
+	LOG_INFO_BDF("Configured Virtual Function port id: %d",
+		     dev->data->port_id);
+
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+	}
+
+#ifndef RTE_LIBRTE_SXEVF_PF_DISABLE_STRIP_CRC
+	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
+		LOG_INFO_BDF("VF can't disable HW CRC Strip");
+		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
+	}
+#else
+	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
+		LOG_INFO_BDF("VF can't enable HW CRC Strip");
+		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
+	}
+#endif
+
+	adapter->rx_batch_alloc_allowed = true;
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	adapter->rx_vec_allowed = true;
+#endif
+
+	return 0;
+}
+
+static const struct sxevf_reg_info sxevf_regs_general[] = {
+	{SXE_VFCTRL, 1, 1, "SXE_VFCTRL"},
+	{SXE_VFSTATUS, 1, 1, "SXE_VFSTATUS"},
+	{SXE_VFLINKS, 1, 1, "SXE_VFLINKS"},
+	{SXE_VFFRTIMER, 1, 1, "SXE_VFFRTIMER"},
+	{SXE_VFMAILBOX, 1, 1, "SXE_VFMAILBOX"},
+	{SXE_VFMBMEM, 16, 4, "SXE_VFMBMEM"},
+	{SXE_VFRXMEMWRAP, 1, 1, "SXE_VFRXMEMWRAP"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxevf_reg_info sxevf_regs_interrupt[] = {
+	{SXE_VFEICR, 1, 1, "SXE_VFEICR"},
+	{SXE_VFEICS, 1, 1, "SXE_VFEICS"},
+	{SXE_VFEIMS, 1, 1, "SXE_VFEIMS"},
+	{SXE_VFEIMC, 1, 1, "SXE_VFEIMC"},
+	{SXE_VFEIAM, 1, 1, "SXE_VFEIAM"},
+	{SXE_VFEITR(0), 2, 4, "SXE_VFEITR"},
+	{SXE_VFIVAR(0), 4, 4, "SXE_VFIVAR"},
+	{SXE_VFIVAR_MISC, 1, 1, "SXE_VFIVAR_MISC"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxevf_reg_info sxevf_regs_rxdma[] = {
+	{SXE_VFRDBAL(0), 8, 0x40, "SXE_VFRDBAL"},
+	{SXE_VFRDBAH(0), 8, 0x40, "SXE_VFRDBAH"},
+	{SXE_VFRDLEN(0), 8, 0x40, "SXE_VFRDLEN"},
+	{SXE_VFRDH(0), 8, 0x40, "SXE_VFRDH"},
+	{SXE_VFRDT(0), 8, 0x40, "SXE_VFRDT"},
+	{SXE_VFRXDCTL(0), 8, 0x40, "SXE_VFRXDCTL"},
+	{SXE_VFSRRCTL(0), 8, 0x40, "SXE_VFSRRCTL"},
+	{SXE_VFPSRTYPE, 1, 1,	"SXE_VFPSRTYPE"},
+	{SXE_VFLROCTL(0), 8, 0x40, "SXE_VFRSCCTL"},
+	{SXE_VFDCA_RXCTRL(0), 8, 0x40, "SXE_VFDCA_RXCTRL"},
+	{SXE_VFDCA_TXCTRL(0), 8, 0x40, "SXE_VFDCA_TXCTRL"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxevf_reg_info sxevf_regs_tx[] = {
+	{SXE_VFTDBAL(0), 4, 0x40, "SXE_VFTDBAL"},
+	{SXE_VFTDBAH(0), 4, 0x40, "SXE_VFTDBAH"},
+	{SXE_VFTDLEN(0), 4, 0x40, "SXE_VFTDLEN"},
+	{SXE_VFTDH(0), 4, 0x40, "SXE_VFTDH"},
+	{SXE_VFTDT(0), 4, 0x40, "SXE_VFTDT"},
+	{SXE_VFTXDCTL(0), 4, 0x40, "SXE_VFTXDCTL"},
+	{SXE_VFTDWBAL(0), 4, 0x40, "SXE_VFTDWBAL"},
+	{SXE_VFTDWBAH(0), 4, 0x40, "SXE_VFTDWBAH"},
+	{0, 0, 0, ""}
+};
+
+static const struct sxevf_reg_info *sxevf_regs_group[] = {
+				sxevf_regs_general,
+				sxevf_regs_interrupt,
+				sxevf_regs_rxdma,
+				sxevf_regs_tx,
+				NULL};
+
+static u32 sxevf_regs_group_count(const struct sxevf_reg_info *regs)
+{
+	int i = 0;
+	int count = 0;
+
+	while (regs[i].count) {
+		count += regs[i++].count;
+	}
+
+	return count;
+};
+
+u32 sxevf_regs_group_num_get(void)
+{
+	u32 i = 0;
+	u32 count = 0;
+	const struct sxevf_reg_info *reg_group;
+	const struct sxevf_reg_info **reg_set = sxevf_regs_group;
+
+	while ((reg_group = reg_set[i++])) {
+		count += sxevf_regs_group_count(reg_group);
+	}
+
+	PMD_LOG_INFO(INIT, "read regs cnt=%u\n", count);
+
+	return count;
+}
+
+void sxevf_regs_group_read(struct sxevf_hw *hw, u32 *data)
+{
+	u32 cnt = 0, i = 0;
+	const struct sxevf_reg_info *reg_group;
+	const struct sxevf_reg_info **reg_set = sxevf_regs_group;
+
+	while ((reg_group = reg_set[i++])) {
+		cnt += sxevf_hw_regs_group_read(hw, reg_group, &data[cnt]);
+	}
+
+	PMD_LOG_INFO(INIT, "read regs cnt=%u, regs num=%u\n",
+	             cnt, sxevf_regs_group_num_get());
+
+	return;
+}
+
+static int sxevf_get_regs(struct rte_eth_dev *dev,
+	      struct rte_dev_reg_info *regs)
+{
+	s32 ret = 0;
+	u32 *data = regs->data;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	u32 length = sxevf_regs_group_num_get();
+
+	if (data == NULL) {
+		regs->length = length;
+		regs->width = sizeof(u32);
+		goto l_end;
+	}
+
+	if ((regs->length == 0) || (regs->length == length)) {
+		sxevf_regs_group_read(hw, data);
+
+		goto l_end;
+	}
+
+	ret = -ENOTSUP;
+	PMD_LOG_ERR(INIT, "get regs: inval param: regs_len=%u, regs->data=%p, "
+			  "regs_offset=%u,  regs_width=%u, regs_version=%u",
+			  regs->length, regs->data,
+			  regs->offset, regs->width,
+			  regs->version);
+
+l_end:
+	return ret;
+}
+
+static const struct eth_dev_ops sxevf_eth_dev_ops = {
+	.dev_configure        = sxevf_dev_configure,
+	.dev_start            = sxevf_dev_start,
+	.dev_stop             = sxevf_dev_stop,
+	.link_update          = sxevf_link_update,
+	.stats_get            = sxevf_eth_stats_get,
+	.xstats_get           = sxevf_xstats_get,
+	.stats_reset          = sxevf_dev_stats_reset,
+	.xstats_reset         = sxevf_dev_stats_reset,
+	.xstats_get_names     = sxevf_xstats_names_get,
+	.dev_close            = sxevf_dev_close,
+	.dev_reset	      = sxevf_dev_reset,
+	.promiscuous_enable   = sxevf_promiscuous_enable,
+	.promiscuous_disable  = sxevf_promiscuous_disable,
+	.allmulticast_enable  = sxevf_allmulticast_enable,
+	.allmulticast_disable = sxevf_allmulticast_disable,
+	.dev_infos_get        = sxevf_dev_info_get,
+	.dev_supported_ptypes_get = sxevf_dev_supported_ptypes_get,
+	.mtu_set              = sxevf_mtu_set,
+	.vlan_filter_set      = sxevf_vlan_filter_set,
+	.vlan_strip_queue_set = sxevf_vlan_strip_queue_set,
+	.vlan_offload_set     = sxevf_vlan_offload_set,
+	.rx_queue_setup       = sxevf_rx_queue_setup,
+	.rx_queue_release     = sxevf_rx_queue_release,
+	.tx_queue_setup       = sxevf_tx_queue_setup,
+	.tx_queue_release     = sxevf_tx_queue_release,
+	.rx_queue_intr_enable = sxevf_rx_queue_intr_enable,
+	.rx_queue_intr_disable = sxevf_rx_queue_intr_disable,
+	.mac_addr_add         = sxevf_mac_addr_add,
+	.mac_addr_remove      = sxevf_mac_addr_remove,
+	.set_mc_addr_list     = sxevf_set_mc_addr_list,
+	.rxq_info_get         = sxevf_rx_queue_info_get,
+	.txq_info_get         = sxevf_tx_queue_info_get,
+	.mac_addr_set         = sxevf_default_mac_addr_set,
+	.get_reg              = sxevf_get_regs,
+	.reta_update          = sxevf_rss_reta_update,
+	.reta_query           = sxevf_rss_reta_query,
+	.rss_hash_update      = sxevf_rss_hash_update,
+	.rss_hash_conf_get    = sxevf_rss_hash_conf_get,
+	.tx_done_cleanup      = sxevf_tx_done_cleanup,
+#ifdef ETH_DEV_OPS_MONITOR
+	.get_monitor_addr     = sxe_monitor_addr_get,
+#endif
+#ifdef ETH_DEV_OPS_HAS_DESC_RELATE
+	.rx_descriptor_status = sxevf_rx_descriptor_status,
+	.tx_descriptor_status = sxevf_tx_descriptor_status,
+#ifdef ETH_DEV_RX_DESC_DONE
+	.rx_descriptor_done   = sxevf_rx_descriptor_done,
+#endif
+#endif
+};
+
+s32 sxevf_ethdev_init(struct rte_eth_dev *eth_dev)
+{
+	s32 ret = 0;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_stats_info *stats_info = &adapter->stats_info;
+	struct sxevf_hw *hw = &adapter->hw;
+	u8 default_tc;
+	u8 tc_num;
+
+	PMD_INIT_FUNC_TRACE();
+
+	eth_dev->dev_ops = &sxevf_eth_dev_ops;
+
+#ifndef ETH_DEV_OPS_HAS_DESC_RELATE
+	eth_dev->rx_descriptor_status = sxevf_rx_descriptor_status;
+	eth_dev->tx_descriptor_status = sxevf_tx_descriptor_status;
+#ifdef ETH_DEV_RX_DESC_DONE
+	eth_dev->rx_descriptor_done   = sxevf_rx_descriptor_done;
+#endif
+#endif
+
+	eth_dev->rx_pkt_burst         = &sxevf_pkts_recv;
+	eth_dev->tx_pkt_burst = &sxevf_pkts_xmit_with_offload;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		sxevf_secondary_proc_init(eth_dev);
+		goto l_out;
+	}
+
+	sxevf_devargs_parse(eth_dev->data->dev_private,
+			      pci_dev->device.devargs);
+
+	rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+#ifdef  DPDK_19_11_6
+    eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+#else
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+#endif
+
+
+	ret = sxevf_hw_base_init(eth_dev);
+	if (ret) {
+		ret = -EIO;
+		LOG_ERROR_BDF("hw base init fail.(err:%d)", ret);
+		goto l_out;
+	}
+
+	sxevf_dev_stats_reset(eth_dev);
+
+	sxevf_stats_init_value_get(hw, &stats_info->hw_stats);
+
+	sxevf_mbx_api_version_init(adapter);
+
+	sxevf_ring_info_get(adapter, &default_tc, &tc_num);
+
+	sxevf_irq_init(eth_dev);
+
+	LOG_INFO_BDF("sxevf eth dev init done.");
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_ethdev_uninit(struct rte_eth_dev *eth_dev)
+{
+	s32 ret = 0;
+
+	PMD_INIT_FUNC_TRACE();
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		PMD_LOG_WARN(INIT, "secondery procee can't unint.");
+		goto l_out;
+	}
+
+	sxevf_dev_close(eth_dev);
+
+l_out:
+	return ret;
+}
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_ethdev.h b/drivers/net/sxe/vf/sxevf_ethdev.h
new file mode 100644
index 0000000000..4eb33321a3
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_ethdev.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_ETHDEV_H__
+#define __SXEVF_ETHDEV_H__
+
+s32 sxevf_ethdev_init(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_ethdev_uninit(struct rte_eth_dev *eth_dev);
+
+u32 sxevf_regs_group_num_get(void);
+
+void sxevf_regs_group_read(struct sxevf_hw *hw, u32 *data);
+
+#endif
+
diff --git a/drivers/net/sxe/vf/sxevf_filter.c b/drivers/net/sxe/vf/sxevf_filter.c
new file mode 100644
index 0000000000..4f788ee4a1
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_filter.c
@@ -0,0 +1,511 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <string.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "sxevf.h"
+#include "sxe_logs.h"
+#include "sxevf_msg.h"
+#include "sxe_errno.h"
+#include "sxevf_filter.h"
+#include "sxevf_rx.h"
+#include "sxevf_queue.h"
+#include "sxe_compat_version.h"
+
+#define  SXEVF_MAC_ADDR_EXTRACT_MASK  (0xFFF) 
+#define  SXEVF_MAC_ADDR_SHIFT         (5)     
+#define  SXEVF_MAC_ADDR_REG_MASK      (0x7F)  
+#define  SXEVF_MAC_ADDR_BIT_MASK      (0x1F)  
+
+#define SXEVF_STRIP_BITMAP_SET(h, q) \
+	do { \
+		u32 idx = (q) / (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		u32 bit = (q) % (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		(h)->strip_bitmap[idx] |= 1 << bit;\
+	} while (0)
+
+#define SXEVF_STRIP_BITMAP_CLEAR(h, q) \
+	do {\
+		u32 idx = (q) / (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		u32 bit = (q) % (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		(h)->strip_bitmap[idx] &= ~(1 << bit);\
+	} while (0)
+
+#define SXEVF_STRIP_BITMAP_GET(h, q, r) \
+	do {\
+		u32 idx = (q) / (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		u32 bit = (q) % (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+		(r) = (h)->strip_bitmap[idx] >> bit & 1;\
+	} while (0)
+
+static void sxevf_random_mac_addr_generate(struct rte_ether_addr *mac_addr)
+{
+	u64 random;
+
+	mac_addr->addr_bytes[0] = 0xe4;
+	mac_addr->addr_bytes[1] = 0xb6;
+	mac_addr->addr_bytes[2] = 0x33;
+
+	mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
+
+	random = rte_rand();
+	memcpy(&mac_addr->addr_bytes[3], &random, 3);
+
+	return;
+}
+
+s32 sxevf_mac_addr_init(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_mac_filter_context *mac_filter = &adapter->mac_filter_ctxt;
+	s32 ret = 0;
+
+	eth_dev->data->mac_addrs = rte_zmalloc("sxe",
+				RTE_ETHER_ADDR_LEN * SXEVF_HW_UC_ENTRY_NUM_MAX, 0);
+	if (eth_dev->data->mac_addrs == NULL) {
+		LOG_ERROR_BDF("mac addr allocate %u B fail.",
+			     RTE_ETHER_ADDR_LEN * SXEVF_HW_UC_ENTRY_NUM_MAX);
+		ret = -ENOMEM;
+		goto l_out;
+	}
+
+	if (rte_is_zero_ether_addr(&mac_filter->def_mac_addr)) {
+		sxevf_random_mac_addr_generate(&mac_filter->def_mac_addr);
+		ret = sxevf_mac_addr_set(hw, mac_filter->def_mac_addr.addr_bytes);
+		if (ret) {
+			LOG_ERROR_BDF("vf uc mac addr set fail.(err:%d)", ret);
+			goto l_free;
+		}
+		LOG_INFO_BDF("generate random mac_addr:"MAC_FMT,
+			MAC_ADDR(mac_filter->def_mac_addr.addr_bytes));
+	}
+
+	rte_ether_addr_copy(&mac_filter->def_mac_addr, &eth_dev->data->mac_addrs[0]);
+
+	mac_filter->uc_table_size = SXEVF_HW_UC_ENTRY_NUM_MAX;
+
+l_out:
+	return ret;
+
+l_free:
+	rte_free(eth_dev->data->mac_addrs);
+	eth_dev->data->mac_addrs = NULL;
+	return ret;
+}
+
+void sxevf_vfta_sync(struct rte_eth_dev *eth_dev, bool on)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_vlan_context *vlan_ctxt = &adapter->vlan_ctxt;
+	u32 reg_idx;
+	u32 bit_idx;
+	u32 vfta;
+	u32 mask;
+	u32 vlan_id;
+
+	for (reg_idx = 0; reg_idx < SXEVF_VFT_TBL_SIZE; reg_idx++) {
+		vfta = vlan_ctxt->vlan_table[reg_idx];
+		if (vfta) {
+			mask = 1;
+			for (bit_idx = 0; bit_idx < 32; bit_idx++) {
+				vlan_id = (reg_idx << 5) + bit_idx;
+				if (vfta & mask) {
+					sxevf_vlan_id_set(hw, vlan_id, on);
+				}
+				mask <<= 1;
+			}
+		}
+	}
+
+	return;
+}
+
+static void sxevf_vlan_strip_bitmap_set(struct rte_eth_dev *dev, u16 queue_idx, bool on)
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_vlan_context *vlan_ctxt = &adapter->vlan_ctxt;
+	sxevf_rx_queue_s *rxq;
+
+	if (queue_idx >= adapter->max_rx_queue) {
+		LOG_ERROR_BDF("invalid queue idx:%u exceed max"
+			   " queue  number:%u.",
+			   queue_idx, adapter->max_rx_queue);
+		goto l_out;
+	}
+
+	if (on) {
+		SXEVF_STRIP_BITMAP_SET(vlan_ctxt, queue_idx);
+	} else {
+		SXEVF_STRIP_BITMAP_CLEAR(vlan_ctxt, queue_idx);
+	}
+
+	if (queue_idx >= dev->data->nb_rx_queues) {
+		LOG_ERROR_BDF("invalid queue_idx id:%u exceed rx "
+			   " queue number:%u.",
+			   queue_idx, dev->data->nb_rx_queues);
+		goto l_out;
+	}
+
+	rxq = dev->data->rx_queues[queue_idx];
+
+	if (on) {
+		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	} else {
+		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
+		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	}
+
+	LOG_INFO_BDF("queue idx:%u vlan strip on:%d set bitmap and offload done.",
+		     queue_idx, on);
+
+l_out:
+	return;
+}
+
+static void sxevf_vlan_strip_switch_set(struct rte_eth_dev *dev)
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	u16 i;
+	sxevf_rx_queue_s *rxq;
+	bool on;
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+			on = true;
+		} else {
+			on = false;
+		}
+		sxevf_hw_vlan_tag_strip_switch(hw, i, on);
+
+		sxevf_vlan_strip_bitmap_set(dev, i, on);
+	}
+
+	return;
+}
+
+static void sxevf_vlan_offload_configure(struct rte_eth_dev *dev, s32 mask)
+{
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		sxevf_vlan_strip_switch_set(dev);
+	}
+
+	return;
+}
+
+void sxevf_vlan_filter_configure(struct rte_eth_dev *eth_dev)
+{
+	u32 vlan_mask;
+
+	sxevf_vfta_sync(eth_dev, true);
+
+	vlan_mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+		    RTE_ETH_VLAN_EXTEND_MASK;
+
+	sxevf_vlan_offload_configure(eth_dev, vlan_mask);
+
+	return;
+}
+
+s32 sxevf_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	s32 ret;
+
+	ret = sxevf_cast_mode_set(hw, SXEVF_CAST_MODE_PROMISC);
+	if (ret) {
+		LOG_ERROR_BDF("cast mode:0x%x set fail.(err:%d)",
+			      SXEVF_CAST_MODE_PROMISC, ret);
+	}
+
+	return ret;
+}
+
+s32 sxevf_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	s32 mode = SXEVF_CAST_MODE_NONE;
+	s32 ret;
+
+	if (eth_dev->data->all_multicast) {
+		mode = SXEVF_CAST_MODE_ALLMULTI;
+	}
+	ret = sxevf_cast_mode_set(hw, mode);
+	if (ret) {
+		LOG_ERROR_BDF("disable mc promiscuous fail.(err:%d)", ret);
+	}
+
+	return ret;
+}
+
+s32 sxevf_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	s32 ret = 0;
+
+	if (eth_dev->data->promiscuous) {
+		goto l_out;
+	}
+	
+	ret = sxevf_cast_mode_set(hw, SXEVF_CAST_MODE_ALLMULTI);
+	if (ret) {
+		LOG_ERROR_BDF("cast mode:0x%x set fail.(err:%d)",
+			      SXEVF_CAST_MODE_ALLMULTI, ret);
+	}
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	s32 ret = 0;
+
+	if (eth_dev->data->promiscuous) {
+		goto l_out;
+	}
+
+	ret = sxevf_cast_mode_set(hw, SXEVF_CAST_MODE_MULTI);
+	if (ret) {
+		LOG_ERROR_BDF("disable mc promiscuous fail.(err:%d)", ret);
+	}
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_vlan_filter_set(struct rte_eth_dev *eth_dev,  u16 vlan_id, s32 on)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_vlan_context *vlan_ctxt = &adapter->vlan_ctxt;
+	s32 ret;
+	u8 reg_idx;
+	u8 bit_idx;
+
+	ret = sxevf_vlan_id_set(hw, vlan_id, on);
+	if (ret) {
+		LOG_ERROR_BDF("vlan_id:0x%x status:%u set fail.(err:%d)",
+			      vlan_id, on, ret);
+		goto l_out;
+	}
+
+	reg_idx = (vlan_id >> SXEVF_VLAN_ID_SHIFT) & SXEVF_VLAN_ID_REG_MASK;
+	bit_idx = (vlan_id & SXEVF_VLAN_ID_BIT_MASK);
+
+	if (on) {
+		vlan_ctxt->vlan_table[reg_idx] |= (1 << bit_idx);
+	} else {
+		vlan_ctxt->vlan_table[reg_idx] &= ~(1 << bit_idx);
+	}
+
+	LOG_INFO_BDF("vlan_id:0x%x status:%u set success.", vlan_id, on);
+
+l_out:
+	return ret;
+}
+
+void sxevf_vlan_strip_queue_set(struct rte_eth_dev *dev, u16 queue, s32 on)
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+
+	if (queue > adapter->max_rx_queue) {
+		LOG_ERROR_BDF("queue id:%u invalid exceed max rx queue num:%u",
+			      queue, adapter->max_rx_queue);
+		goto l_out;
+	}
+
+	sxevf_hw_vlan_tag_strip_switch(hw, queue, on);
+
+	sxevf_vlan_strip_bitmap_set(dev, queue, on);
+
+	LOG_INFO_BDF("queue:%u vlan tag strip on:%u done", queue, on);
+
+l_out:
+	return;
+}
+
+static void sxevf_vlan_strip_offload_configure(struct rte_eth_dev *dev, s32 mask)
+{
+	u16 i;
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+	sxevf_rx_queue_s *rxq;
+
+	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+			for (i = 0; i < dev->data->nb_rx_queues; i++) {
+				rxq = dev->data->rx_queues[i];
+				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+			}
+		} else {
+			for (i = 0; i < dev->data->nb_rx_queues; i++) {
+				rxq = dev->data->rx_queues[i];
+				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+			}
+		}
+	}
+
+	PMD_LOG_INFO(DRV, "mask:0x%x rx mode offload:0x%"SXE_PRIX64
+		     " all queue vlan strip offload flag configure done",
+		     mask, rxmode->offloads);
+
+	return;
+}
+
+s32 sxevf_vlan_offload_set(struct rte_eth_dev *dev, s32 mask)
+{
+	sxevf_vlan_strip_offload_configure(dev, mask);
+
+	sxevf_vlan_offload_configure(dev, mask);
+
+	PMD_LOG_INFO(DRV, "vlan offload mask:0x%d set done.", mask);
+
+	return 0;
+}
+
+s32 sxevf_default_mac_addr_set(struct rte_eth_dev *dev,
+			     struct rte_ether_addr *mac_addr)
+{
+	s32 ret;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+
+	ret = sxevf_mac_addr_set(hw, mac_addr->addr_bytes);
+	if (ret) {
+		LOG_ERROR_BDF("modify default mac addr to "MAC_FMT" fail.(err:%d)",
+			      MAC_ADDR(mac_addr->addr_bytes), ret);
+	}
+
+	LOG_INFO_BDF("modify default mac addr to "MAC_FMT" success.",
+		      MAC_ADDR(mac_addr->addr_bytes));
+
+	return ret;
+}
+
+s32 sxevf_mac_addr_add(struct rte_eth_dev *dev,
+			     struct rte_ether_addr *mac_addr,
+			     __rte_unused u32 rar_idx ,__rte_unused u32 pool)
+{
+	s32 ret;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_mac_filter_context *mac_ctxt = &adapter->mac_filter_ctxt;
+
+	if (memcmp(mac_ctxt->def_mac_addr.addr_bytes, mac_addr->addr_bytes,
+		    sizeof(*mac_addr)) == 0) {
+		ret = -EINVAL;
+		LOG_ERROR_BDF("mac_addr:"MAC_FMT" eaqual to defalut mac addr"
+			     " skip mac addr add.(err:%d)",
+			     MAC_ADDR(mac_addr->addr_bytes), ret);
+		goto l_out;
+	}
+
+	ret = sxevf_uc_addr_add(hw, 2, mac_addr->addr_bytes);
+	if (ret) {
+		LOG_ERROR_BDF("mac_addr:"MAC_FMT" add fail.(err:%d)",
+			      MAC_ADDR(mac_addr->addr_bytes), ret);
+		goto l_out;
+	}
+
+	LOG_INFO_BDF("mac_addr:"MAC_FMT" add success.",
+	     	      MAC_ADDR(mac_addr->addr_bytes));
+
+l_out:
+	return ret;
+}
+
+void sxevf_mac_addr_remove(struct rte_eth_dev *dev, u32 index)
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_mac_filter_context *mac_ctxt = &adapter->mac_filter_ctxt;
+	struct rte_ether_addr *mac_addr; 
+	u8 i;
+
+	sxevf_uc_addr_add(hw, 0, NULL);
+
+	for (i = 0, mac_addr = dev->data->mac_addrs; i < mac_ctxt->uc_table_size;
+	     i++, mac_addr++) {
+		if ((i == index) || rte_is_zero_ether_addr(mac_addr) ||
+		(memcmp(mac_ctxt->def_mac_addr.addr_bytes, mac_addr->addr_bytes,
+		        sizeof(*mac_addr)) == 0)) {
+			continue;
+		}
+		sxevf_uc_addr_add(hw, 2, mac_addr->addr_bytes);
+	}
+
+	LOG_INFO_BDF("index:%u mac addr"MAC_FMT" remove success.",
+		      index, MAC_ADDR(dev->data->mac_addrs[index].addr_bytes));
+	return;
+}
+
+static u16 sxevf_hash_mac_addr_parse(u8 *mac_addr)
+{
+	u16 extracted = ((mac_addr[4] >> 4) |
+			(((u16)mac_addr[5]) << 4));
+
+	extracted &= SXEVF_MAC_ADDR_EXTRACT_MASK;
+
+	PMD_LOG_DEBUG(DRV, "mac_addr:"MAC_FMT" parse result:0x%x",
+			 MAC_ADDR(mac_addr), extracted);
+
+	return extracted;
+}
+
+s32 sxevf_set_mc_addr_list(struct rte_eth_dev *dev,
+			  struct rte_ether_addr *mc_addr_list,
+			  u32 nb_mc_addr)
+{
+	s32 ret;
+	u32 result;
+	struct sxevf_mc_sync_msg msg;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	u32 i;
+
+	msg.msg_type = SXEVF_MC_ADDR_SYNC;
+	msg.mc_cnt = min(nb_mc_addr, (u32)SXEVF_MC_ENTRY_NUM_MAX);
+
+	for (i = 0; i < msg.mc_cnt; i++) {
+		msg.mc_addr_extract[i] = sxevf_hash_mac_addr_parse(mc_addr_list->addr_bytes);
+		mc_addr_list++;
+	}
+
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg)));
+	result = (msg.mc_cnt << 16) | msg.msg_type;
+
+	if (ret || ((result & SXEVF_MC_ADDR_SYNC) &&
+		    (result & SXEVF_MSGTYPE_NACK))) {
+		ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR;
+		goto l_out;
+	}
+
+	PMD_LOG_DEBUG(DRV, "msg_type:0x%x len:%zu mc_cnt:%d msg "
+		  "result:0x%x.(ret:%d)\n",
+		  msg.msg_type, SXEVF_MSG_NUM(sizeof(msg)),
+		  msg.mc_cnt, result, ret);
+
+l_out:
+	return ret;
+}
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_filter.h b/drivers/net/sxe/vf/sxevf_filter.h
new file mode 100644
index 0000000000..9e74718b95
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_filter.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_FILTER_H__
+#define __SXEVF_FILTER_H__
+
+#include <rte_ether.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+
+#define SXEVF_MTA_ENTRY_NUM_MAX        128
+#define SXEVF_UTA_HASH_BIT_MAX         4096 
+#define VLAN_N_VID     4096
+#define BYTE_BIT_NUM   8
+
+#define  SXEVF_VLAN_ID_SHIFT         (5)     
+#define  SXEVF_VLAN_ID_REG_MASK      (0x7F)  
+#define  SXEVF_VLAN_ID_BIT_MASK      (0x1F)  
+
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_ADDR(x) ((u8*)(x))[0],((u8*)(x))[1], \
+			   ((u8*)(x))[2],((u8*)(x))[3], \
+			   ((u8*)(x))[4],((u8*)(x))[5]
+
+#define SXEVF_VLAN_STRIP_BITMAP_SIZE    \
+        (SXEVF_HW_TXRX_RING_NUM_MAX / (sizeof(u32) * BYTE_BIT_NUM))
+
+struct sxevf_vlan_context {
+	u32 vlan_table[SXEVF_VFT_TBL_SIZE];  
+	u32 strip_bitmap[SXEVF_VLAN_STRIP_BITMAP_SIZE];
+	u32 vlan_table_size;
+};
+
+struct sxevf_mac_filter_context {
+	struct rte_ether_addr def_mac_addr; 
+	u8  mc_filter_type;        
+	u32 uc_table_size;
+};
+
+void sxevf_vlan_filter_init(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_mac_addr_init(struct rte_eth_dev *eth_dev);
+
+void sxevf_vlan_filter_configure(struct rte_eth_dev *eth_dev);
+
+void sxevf_vfta_sync(struct rte_eth_dev *eth_dev, bool on);
+
+s32 sxevf_promiscuous_disable(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_promiscuous_enable(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_allmulticast_disable(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_allmulticast_enable(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_vlan_filter_set(struct rte_eth_dev *eth_dev,  u16 vlan_id, s32 on);
+
+void sxevf_vlan_strip_queue_set(struct rte_eth_dev *dev, u16 queue, s32 on);
+
+s32 sxevf_vlan_offload_set(struct rte_eth_dev *dev, s32 mask);
+
+s32 sxevf_default_mac_addr_set(struct rte_eth_dev *dev,
+			     struct rte_ether_addr *mac_addr);
+
+void sxevf_mac_addr_remove(struct rte_eth_dev *dev, u32 index);
+
+s32 sxevf_mac_addr_add(struct rte_eth_dev *dev,
+			     struct rte_ether_addr *mac_addr,
+			     __rte_unused u32 rar_idx ,__rte_unused u32 pool);
+
+s32 sxevf_set_mc_addr_list(struct rte_eth_dev *dev,
+			  struct rte_ether_addr *mc_addr_list,
+			  u32 nb_mc_addr);
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_irq.c b/drivers/net/sxe/vf/sxevf_irq.c
new file mode 100644
index 0000000000..646a10d6dc
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_irq.c
@@ -0,0 +1,455 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_alarm.h>
+#include <rte_interrupts.h>
+#include <rte_malloc.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_bus_pci.h>
+#include <rte_eal_interrupts.h>
+#elif defined DPDK_21_11_5
+#include <rte_bus_pci.h>
+#include <eal_interrupts.h>
+#else
+#include <bus_pci_driver.h>
+#include <eal_interrupts.h>
+#endif
+
+#include "sxevf.h"
+#include "sxe_logs.h"
+#include "sxe_errno.h"
+#include "sxevf_rx.h"
+#include "sxevf_irq.h"
+#include "sxevf_msg.h"
+#include "sxevf_queue.h"
+#include "sxe_compat_version.h"
+
+#define SXEVF_IRQ_LINK_CONFIG      (u32)(1 << 3)
+
+#define SXEVF_RX_OTHER_IRQ_MASK     (3)
+
+#define SXEVF_MISC_VEC_ID        RTE_INTR_VEC_ZERO_OFFSET
+
+#define SXEVF_RX_VEC_BASE          RTE_INTR_VEC_RXTX_OFFSET
+
+#define SXEVF_EITR_INTERVAL_UNIT_NS	2048
+#define SXEVF_EITR_ITR_INT_SHIFT        3
+#define SXEVF_IRQ_ITR_MASK              (0x00000FF8)
+#define SXEVF_EITR_INTERVAL_US(us) \
+	(((us) * 1000 / SXEVF_EITR_INTERVAL_UNIT_NS << SXEVF_EITR_ITR_INT_SHIFT) & \
+		SXEVF_IRQ_ITR_MASK)
+
+#define SXEVF_QUEUE_ITR_INTERVAL_DEFAULT   500 
+
+void sxevf_intr_disable(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_irq_context *irq_ctxt = &adapter->irq_ctxt;
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxevf_irq_disable(hw);
+
+	irq_ctxt->enable_mask = 0;
+
+	return;
+}
+
+void sxevf_intr_enable(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_irq_context *irq_ctxt = &adapter->irq_ctxt;
+
+	PMD_INIT_FUNC_TRACE();
+
+	sxevf_irq_enable(hw, SXEVF_RX_OTHER_IRQ_MASK);
+
+	irq_ctxt->enable_mask = SXEVF_RX_OTHER_IRQ_MASK;
+
+	return;
+}
+
+static s32 sxevf_ctrl_msg_check(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	u32 ctrl_msg;
+	s32 ret;
+
+	ret = sxevf_ctrl_msg_rcv_and_clear(hw, (u32 *)&ctrl_msg,
+				SXEVF_MSG_NUM(sizeof(ctrl_msg)));
+	if (ret) {
+		PMD_LOG_INFO(DRV, "ctrl msg rcv fail due to lock fail.(err:%d)\n", ret);
+		goto l_end;
+	}
+
+	if (ctrl_msg & SXEVF_PF_CTRL_MSG_REINIT) {
+		sxe_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_RESET,
+					     NULL);
+		PMD_LOG_INFO(DRV, "rcv reinit msg.\n");
+	}
+
+l_end:
+	return ret;
+}
+
+STATIC s32 sxevf_link_msg_check(struct rte_eth_dev *eth_dev, bool *link_up)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	u32 ctrl_msg;
+	s32 ret;
+
+	ret = sxevf_ctrl_msg_rcv_and_clear(hw, (u32 *)&ctrl_msg,
+				SXEVF_MSG_NUM(sizeof(ctrl_msg)));
+	if (ret) {
+		PMD_LOG_INFO(DRV, "ctrl msg rcv fail due to lock fail.(err:%d)\n", ret);
+		goto l_end;
+	}
+
+	if (ctrl_msg & SXEVF_PF_CTRL_MSG_NETDEV_DOWN) {
+			*link_up = false;
+			PMD_LOG_INFO(DRV, "rcv ctrl msg:0x%x need link down.\n", ctrl_msg);
+		} else if (ctrl_msg & SXEVF_PF_CTRL_MSG_LINK_UPDATE) {
+			*link_up = true;
+			PMD_LOG_INFO(DRV, "rcv ctrl msg:0x%x physical link up.\n", ctrl_msg);
+		}
+
+l_end:
+	return ret;
+}
+
+STATIC void sxevf_mbx_irq_handler(void *data)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)data;
+
+	sxevf_intr_disable(eth_dev);
+
+	sxevf_ctrl_msg_check(eth_dev);
+
+	sxevf_intr_enable(eth_dev);
+
+	return;
+}
+
+void sxevf_irq_init(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *irq_handle = SXE_PCI_INTR_HANDLE(pci_dev);
+
+	sxevf_intr_disable(eth_dev);
+
+	rte_intr_callback_register(irq_handle,
+				   sxevf_mbx_irq_handler, eth_dev);
+
+	rte_intr_enable(irq_handle);
+	sxevf_intr_enable(eth_dev);
+
+	return;
+}
+
+static s32 sxevf_msix_configure(struct rte_eth_dev *dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	sxevf_rx_queue_s *rx_queue;
+	u16 queue_id;
+	u16 vector = SXEVF_MISC_VEC_ID;
+	u16 base = SXEVF_MISC_VEC_ID;
+	u32 irq_interval;
+	s32 ret = 0;
+
+	sxevf_event_irq_map(hw, vector);
+
+	if (!rte_intr_dp_is_en(handle)) {
+		ret = -SXE_ERR_PARAM;
+		PMD_LOG_ERR(DRV, "intr type:%u nb_efd:%u irq unsupported.(err:%d)\n",
+				  handle->type, handle->nb_efd, ret);
+		goto l_out;
+	}
+
+	if (rte_intr_allow_others(handle)) {
+		vector = base = SXEVF_RX_VEC_BASE;
+	}
+
+	for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+		queue_id++) {
+		rx_queue = dev->data->rx_queues[queue_id];
+		sxevf_hw_ring_irq_map(hw, false,
+					rx_queue->reg_idx,
+					vector);
+		handle->intr_vec[queue_id] = vector;
+		PMD_LOG_INFO(DRV,
+				"queue id:%u reg_idx:%u vector:%u ",
+				queue_id,
+				rx_queue->reg_idx,
+				vector);
+		if (vector < base + handle->nb_efd - 1) {
+			vector++;
+		}
+	}
+
+	irq_interval = SXEVF_EITR_INTERVAL_US(SXEVF_QUEUE_ITR_INTERVAL_DEFAULT);
+	sxevf_ring_irq_interval_set(hw, 0, irq_interval);
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_irq_configure(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	u16 irq_num;
+	s32 ret = 0;
+
+	if (rte_intr_cap_multiple(handle) &&
+	     eth_dev->data->dev_conf.intr_conf.rxq != 0) {
+		irq_num = 1;
+		if (rte_intr_efd_enable(handle, irq_num)) {
+			ret = -SXE_ERR_CONFIG;
+			PMD_LOG_ERR(DRV,
+				      "intr_handle type:%d irq num:%d invalid",
+				      handle->type, irq_num);
+			goto l_out;
+		}
+	}
+
+	if (rte_intr_dp_is_en(handle) && !handle->intr_vec) {
+		handle->intr_vec = rte_zmalloc("intr_vec",
+				    eth_dev->data->nb_rx_queues * sizeof(u32), 0);
+		if (handle->intr_vec == NULL) {
+			PMD_LOG_ERR(DRV, "rx queue irq vector "
+					 "allocate %zuB memory fail.",
+					 eth_dev->data->nb_rx_queues * sizeof(u32));
+			ret = -ENOMEM;
+			goto l_out;
+		}
+	}
+
+	ret = sxevf_msix_configure(eth_dev);
+	if (ret) {
+		PMD_LOG_ERR(DRV, "intr type:%u nb_efd:%u irq unsupported.(err:%d)\n",
+				  handle->type, handle->nb_efd, ret);
+		goto l_out;
+	}
+
+	rte_intr_disable(handle);
+
+	rte_intr_enable(handle);
+
+	sxevf_intr_enable(eth_dev);
+
+	PMD_LOG_INFO(DRV,
+		      "intr_handle type:%d rx queue num:%d "
+		      "queue irq num:%u total irq num:%u "
+		      "config done",
+		      handle->type,
+		      eth_dev->data->nb_rx_queues,
+		      handle->nb_efd,
+		      handle->max_intr);
+
+l_out:
+	return ret;
+}
+
+void sxevf_irq_free(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+
+	rte_intr_disable(handle);
+
+	if (handle->intr_vec) {
+		rte_free(handle->intr_vec);
+		handle->intr_vec = NULL;
+	}
+
+	return;
+}
+
+void sxevf_irq_unregister(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+	struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+
+	rte_intr_callback_unregister(handle, sxevf_mbx_irq_handler, eth_dev);
+
+	return;
+}
+
+s32 sxevf_rx_queue_intr_enable(struct rte_eth_dev *dev, u16 queue_id)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_irq_context *irq_ctxt = &adapter->irq_ctxt;
+	u32 vector = SXEVF_MISC_VEC_ID;
+
+	RTE_SET_USED(queue_id);
+
+	if (rte_intr_allow_others(intr_handle)) {
+		vector = SXEVF_RX_VEC_BASE;
+	}
+
+	irq_ctxt->enable_mask |= (1 << vector);
+
+	sxevf_specific_irq_enable(hw, irq_ctxt->enable_mask);
+
+	rte_intr_ack(intr_handle);
+
+	return 0;
+}
+
+s32 sxevf_rx_queue_intr_disable(struct rte_eth_dev *dev, u16 queue_id)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = SXE_PCI_INTR_HANDLE(pci_dev);
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_irq_context *irq_ctxt = &adapter->irq_ctxt;
+	u32 vector = SXEVF_MISC_VEC_ID;
+
+	RTE_SET_USED(queue_id);
+
+	if (rte_intr_allow_others(intr_handle)) {
+		vector = SXEVF_RX_VEC_BASE;
+	}
+
+	irq_ctxt->enable_mask &= ~(1 << vector);
+
+	sxevf_specific_irq_enable(hw, irq_ctxt->enable_mask);
+
+	return 0;
+}
+
+static void sxevf_physical_link_check(struct rte_eth_dev *dev,  u32 *link_speed, bool *link_up)
+{
+	u32 link_reg, i;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+
+	link_reg = sxevf_link_state_get(hw);
+	if (!(link_reg & SXE_VFLINKS_UP)) {
+		*link_up = false;
+		goto l_end;
+	}
+
+	for (i = 0; i < 5; i++) {
+		udelay(100);
+		link_reg = sxevf_link_state_get(hw);
+		if (!(link_reg & SXE_VFLINKS_UP)) {
+			*link_up = false;
+			goto l_end;
+		}
+	}
+
+	switch (link_reg & SXE_VFLINKS_SPEED) {
+	case SXE_VFLINKS_SPEED_10G:
+		*link_speed = SXEVF_LINK_SPEED_10GB_FULL;
+		break;
+	case SXE_VFLINKS_SPEED_1G:
+		*link_speed = SXEVF_LINK_SPEED_1GB_FULL;
+		break;
+	case SXE_VFLINKS_SPEED_100:
+		*link_speed = SXEVF_LINK_SPEED_100_FULL;
+		break;
+	default:
+		*link_speed = SXEVF_LINK_SPEED_UNKNOWN;
+	}
+
+	*link_up = true;
+
+l_end:
+	PMD_LOG_INFO(DRV, "link up status:%d.\n", *link_up);
+	return;
+}
+
+static void sxevf_link_info_get(struct rte_eth_dev *dev, int wait_to_complete,
+				   u32 *link_speed, bool *link_up)
+{
+	s32 ret;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+
+	sxevf_physical_link_check(dev, link_speed, link_up);
+
+	if ((wait_to_complete == 0) && (adapter->link_check == 0)) {
+		if (*link_speed == SXEVF_LINK_SPEED_UNKNOWN) {
+			*link_up = false;
+		} else {
+			*link_up = true;
+		}
+		goto l_end;
+	}
+
+	if (*link_up) {
+		ret = sxevf_link_msg_check(dev, link_up);
+		if (ret) {
+			PMD_LOG_ERR(DRV, "ctrl msg rcv fail, try to next workqueue.\n");
+			goto l_end;
+		}
+	}
+
+l_end:
+	return;
+}
+
+s32 sxevf_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+	bool link_up;
+	struct rte_eth_link link;
+	u32 link_speed = SXEVF_LINK_SPEED_UNKNOWN;
+
+	PMD_LOG_INFO(INIT, "link update start...");
+
+	memset(&link, 0, sizeof(link));
+	link.link_status = RTE_ETH_LINK_DOWN;
+	link.link_speed  = RTE_ETH_SPEED_NUM_NONE;
+	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+						RTE_ETH_LINK_SPEED_FIXED);
+
+	if ((wait_to_complete == 0) || dev->data->dev_conf.intr_conf.lsc) {
+		wait_to_complete = 0;
+	}
+
+	sxevf_link_info_get(dev, wait_to_complete, &link_speed, &link_up);
+
+	if (link_up == false) {
+		PMD_LOG_ERR(DRV, "other link thread is running now!");
+
+		goto l_end;
+	}
+
+	link.link_status = RTE_ETH_LINK_UP;
+	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	switch (link_speed) {
+	case SXEVF_LINK_SPEED_1GB_FULL:
+		link.link_speed = RTE_ETH_SPEED_NUM_1G;
+		break;
+
+	case SXEVF_LINK_SPEED_10GB_FULL:
+		link.link_speed = RTE_ETH_SPEED_NUM_10G;
+		break;
+	default:
+		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
+	}
+
+l_end:
+	PMD_LOG_INFO(DRV, "link update end, up=%x, speed=%x",
+			  link_up, link_speed);
+	return rte_eth_linkstatus_set(dev, &link);
+}
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_irq.h b/drivers/net/sxe/vf/sxevf_irq.h
new file mode 100644
index 0000000000..169eb1f0fd
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_irq.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_IRQ_H__
+#define __SXEVF_IRQ_H__
+
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+#include "sxe_compat_platform.h"
+
+struct sxevf_irq_context {
+	u32 enable_mask;    
+	u32 enable_mask_original; 
+};
+
+void sxevf_intr_disable(struct rte_eth_dev *eth_dev);
+
+void sxevf_intr_enable(struct rte_eth_dev *eth_dev);
+
+void sxevf_irq_init(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_irq_configure(struct rte_eth_dev *eth_dev);
+
+void sxevf_irq_free(struct rte_eth_dev *eth_dev);
+
+void sxevf_irq_unregister(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_rx_queue_intr_disable(struct rte_eth_dev *dev, u16 queue_id);
+
+s32 sxevf_rx_queue_intr_enable(struct rte_eth_dev *dev, u16 queue_id);
+
+s32 sxevf_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+
+#endif
+
diff --git a/drivers/net/sxe/vf/sxevf_main.c b/drivers/net/sxe/vf/sxevf_main.c
new file mode 100644
index 0000000000..72d600c0b1
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_main.c
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <string.h>
+#include <sys/time.h>
+
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_dev.h>
+
+#include "sxe_version.h"
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_bus_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <rte_bus_pci.h>
+#else
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <bus_pci_driver.h>
+#endif
+
+#include "sxevf.h"
+#include "sxe_logs.h"
+#include "sxevf_ethdev.h"
+#include "sxe_queue_common.h"
+
+#define PCI_VENDOR_ID_STARS      0x1FF2
+#define SXEVF_DEV_ID_ASIC        0x10A2
+
+static s32 sxevf_probe(struct rte_pci_driver *pci_drv __rte_unused,
+					struct rte_pci_device *pci_dev)
+{
+	s32 ret;
+
+	printf("sxe_version[%s], sxe_commit_id[%s], sxe_branch[%s], sxe_build_time[%s]\n", 
+		SXE_VERSION, SXE_COMMIT_ID, SXE_BRANCH, SXE_BUILD_TIME);
+
+#ifdef SXE_DPDK_DEBUG
+	sxe_log_stream_init();
+#endif
+
+	ret = rte_eth_dev_pci_generic_probe(pci_dev,
+		sizeof(struct sxevf_adapter), sxevf_ethdev_init);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "sxe pmd eth dev create fail.(err:%d)", ret);
+		goto l_out;
+	}
+
+	PMD_LOG_DEBUG(INIT, "%s sxevf pmd probe done.", pci_dev->device.name);
+
+l_out:
+	return ret;
+}
+
+static s32 sxevf_remove(struct rte_pci_device *pci_dev)
+{
+	s32 ret;
+
+	ret = rte_eth_dev_pci_generic_remove(pci_dev,
+			sxevf_ethdev_uninit);
+	if (ret) {
+		LOG_ERROR("vf remove fail.(err:%d)", ret);
+	}
+
+	return ret;
+}
+
+static const struct rte_pci_id sxevf_pci_tbl[] = {
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_STARS, SXEVF_DEV_ID_ASIC) },
+	{.vendor_id = 0,}
+};
+
+STATIC struct rte_pci_driver rte_sxevf_pmd = {
+	.id_table  = sxevf_pci_tbl,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe     = sxevf_probe,
+	.remove    = sxevf_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_sxevf, rte_sxevf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_sxevf, sxevf_pci_tbl);
+RTE_PMD_REGISTER_KMOD_DEP(net_sxevf, "* igb_uio | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_sxevf,
+			      SXEVF_DEVARG_LINK_CHECK "=<0|1>");
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_msg.c b/drivers/net/sxe/vf/sxevf_msg.c
new file mode 100644
index 0000000000..6cd64fc1b3
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_msg.c
@@ -0,0 +1,646 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include "sxevf.h"
+#include "sxevf_msg.h"
+#include "sxevf_hw.h"
+#include "sxe_errno.h"
+#include "sxe_logs.h"
+
+#define SXEVF_PFMSG_MASK    0xFF00
+#define SXEVF_DEFAULT_TC_NUM        1
+
+void sxevf_mbx_init(struct sxevf_hw *hw)
+{
+	hw->mbx.msg_len = SXEVF_MBX_MSG_NUM;
+
+	hw->mbx.stats.rcv_msgs = 0;
+	hw->mbx.stats.send_msgs = 0;
+	hw->mbx.stats.acks = 0;
+	hw->mbx.stats.reqs = 0;
+	hw->mbx.stats.rsts = 0;
+
+	hw->mbx.retry = 0;
+	hw->mbx.interval = SXEVF_MBX_RETRY_INTERVAL;
+
+	hw->mbx.api_version = SXEVF_MBX_API_10;
+
+	return;
+}
+
+static u32 sxevf_mbx_reg_read(struct sxevf_hw *hw)
+{
+	u32 value = sxevf_mailbox_read(hw);
+
+	value |= hw->mbx.reg_value;
+
+	hw->mbx.reg_value |= value & SXE_VFMAILBOX_RC_BIT;
+
+	return value;
+}
+
+static bool sxevf_mbx_bit_check(struct sxevf_hw *hw, u32 mask)
+{
+	bool ret = false;
+	u32 value = sxevf_mbx_reg_read(hw);
+
+	if (value & mask) {
+		ret = true;
+	}
+
+	hw->mbx.reg_value &= ~mask;
+
+	return ret;
+}
+
+STATIC bool sxevf_pf_msg_check(struct sxevf_hw *hw)
+{
+	bool ret = false;
+
+	if (sxevf_mbx_bit_check(hw, SXE_VFMAILBOX_PFSTS)) {
+		hw->mbx.stats.reqs++;
+		ret = true;
+	}
+
+	return ret;
+}
+
+STATIC bool sxevf_pf_ack_check(struct sxevf_hw *hw)
+{
+	bool ret = false;
+
+	if (sxevf_mbx_bit_check(hw, SXE_VFMAILBOX_PFACK)) {
+		hw->mbx.stats.acks++;
+		ret = true;
+	}
+
+	return ret;
+}
+
+bool sxevf_pf_rst_check(struct sxevf_hw *hw)
+{
+	bool ret = false;
+
+	if (!sxevf_mbx_bit_check(hw, (SXE_VFMAILBOX_RSTI |
+				      SXE_VFMAILBOX_RSTD))) {
+		hw->mbx.stats.rsts++;
+		ret = true;
+	}
+
+	return ret;
+}
+
+STATIC s32 sxevf_mailbox_lock(struct sxevf_hw *hw)
+{
+	u32 mailbox;
+	u32 retry = SXEVF_MBX_RETRY_COUNT;
+	s32 ret = -SXEVF_ERR_MBX_LOCK_FAIL;
+
+	while (retry--) {
+		mailbox = sxevf_mbx_reg_read(hw);
+		mailbox |= SXE_VFMAILBOX_VFU;
+		sxevf_mailbox_write(hw, mailbox);
+
+		if (sxevf_mbx_reg_read(hw) && SXE_VFMAILBOX_VFU) {
+			ret = 0;
+			break;
+		}
+
+		udelay(hw->mbx.interval);
+	}
+
+	return ret;
+}
+
+static void sxevf_mailbox_unlock(struct sxevf_hw *hw)
+{
+	u32 mailbox;
+
+	mailbox = sxevf_mbx_reg_read(hw);
+	mailbox &= ~SXE_VFMAILBOX_VFU;
+	sxevf_mailbox_write(hw, mailbox);
+
+	return;
+}
+
+STATIC bool sxevf_msg_poll(struct sxevf_hw *hw)
+{
+	struct sxevf_mbx_info *mbx = &hw->mbx;
+	u32 retry = mbx->retry;
+	bool ret = true;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	while (!sxevf_pf_msg_check(hw) && retry) {
+		retry--;
+		udelay(mbx->interval);
+	}
+
+	if (!retry) {
+		LOG_ERROR_BDF("retry:%d send msg to pf done, but don't check pf reply.\n",
+			  mbx->retry);
+		mbx->retry = 0;
+		ret = false;
+	}
+
+	return ret;
+}
+
+STATIC bool sxevf_ack_poll(struct sxevf_hw *hw)
+{
+	struct sxevf_mbx_info *mbx = &hw->mbx;
+	u32 retry = mbx->retry;
+	bool ret = true;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	while (!sxevf_pf_ack_check(hw) && retry) {
+		retry--;
+		udelay(mbx->interval);
+	}
+
+	if (!retry) {
+		LOG_ERROR_BDF("send msg to pf, retry:%d but don't check pf ack, "
+			  "init mbx retry to 0.\n",
+			  mbx->retry);
+		mbx->retry = 0;
+		ret = false;
+	}
+
+	return ret;
+}
+
+STATIC void sxevf_pf_msg_and_ack_clear(struct sxevf_hw *hw)
+{
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	LOG_INFO_BDF("clear pending pf msg and ack.\n");
+
+	sxevf_pf_msg_check(hw);
+	sxevf_pf_ack_check(hw);
+
+	return;
+}
+
+static s32 sxevf_send_msg_to_pf(struct sxevf_hw *hw, u32 *msg, u16 msg_len)
+{
+	struct sxevf_mbx_info *mbx = &hw->mbx;
+	s32 ret = 0;
+	u16 i;
+	u32 old;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	if (!mbx->retry) {
+		ret = -SXEVF_ERR_NOT_READY;
+		LOG_ERROR_BDF("msg:0x%x len:%d send fail due to timeout.(err:%d)\n",
+			  msg[0], msg_len, ret);
+		goto l_out;
+	}
+
+	if (msg_len > mbx->msg_len) {
+		ret = -EINVAL;
+		LOG_ERROR_BDF("vf msg:0x%x len:%d exceed limit:%d "
+			  "send fail.(err:%d)\n",
+			  msg[0], msg_len, mbx->msg_len, ret);
+		goto l_out;
+	}
+
+	ret = sxevf_mailbox_lock(hw);
+	if (ret) {
+		LOG_ERROR_BDF("msg:0x%x len:%d send lock mailbox fail.(err:%d)\n",
+			   msg[0], msg_len, ret);
+		goto l_out;
+	}
+
+	sxevf_pf_msg_and_ack_clear(hw);
+
+	old = sxevf_msg_read(hw, 0);
+	msg[0] |= (old & SXEVF_PFMSG_MASK);
+
+	for (i = 0; i < msg_len; i++) {
+		sxevf_msg_write(hw, i, msg[i]);
+	}
+
+	sxevf_pf_req_irq_trigger(hw);
+
+	hw->mbx.stats.send_msgs++;
+
+	if (!sxevf_ack_poll(hw)) {
+		ret = -SXEVF_ERR_POLL_ACK_FAIL;
+		LOG_ERROR_BDF("msg:0x%x len:%d send done, but don't poll ack.\n",
+			   msg[0], msg_len);
+		goto l_out;
+	}
+
+	LOG_INFO_BDF("vf send msg:0x%x len:%d to pf and polled pf ack done."
+		 "stats send_msg:%d ack:%d.\n",
+		 msg[0], msg_len,
+		 mbx->stats.send_msgs, mbx->stats.acks);
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_mbx_msg_rcv(struct sxevf_hw *hw, u32 *msg, u16 msg_len)
+{
+	u32 i;
+	u16 msg_entry;
+	s32 ret = 0;
+	struct sxevf_mbx_info *mbx = &hw->mbx;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	msg_entry = (msg_len > mbx->msg_len) ? mbx->msg_len : msg_len;
+
+	ret = sxevf_mailbox_lock(hw);
+	if (ret) {
+		LOG_ERROR_BDF("size:%d rcv lock mailbox fail.(err:%d)\n",
+			   msg_entry, ret);
+		goto l_end;
+	}
+
+	for (i = 0; i < msg_entry; i++) {
+		msg[i] = sxevf_msg_read(hw, i);
+	}
+
+	msg[0] &= ~SXEVF_PFMSG_MASK;
+
+	sxevf_pf_ack_irq_trigger(hw);
+
+	mbx->stats.rcv_msgs++;
+l_end:
+	return ret;
+
+}
+
+s32 sxevf_ctrl_msg_rcv(struct sxevf_hw *hw, u32 *msg, u16 msg_len)
+{
+	u16 i;
+	u16 msg_entry;
+	s32 ret = 0;
+	struct sxevf_mbx_info *mbx = &hw->mbx;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	msg_entry = (msg_len > mbx->msg_len) ? mbx->msg_len : msg_len;
+
+	ret = sxevf_mailbox_lock(hw);
+	if (ret) {
+		LOG_ERROR_BDF("size:%d rcv lock mailbox fail.(err:%d)\n",
+			   msg_entry, ret);
+		goto l_end;
+	}
+
+	for (i = 0; i < msg_entry; i++) {
+		msg[i] = sxevf_msg_read(hw, i);
+	}
+
+	sxevf_mailbox_unlock(hw);
+
+	LOG_INFO_BDF("rcv pf mailbox msg:0x%x.\n", *msg);
+
+	mbx->stats.rcv_msgs++;
+l_end:
+	return ret;
+}
+
+s32 sxevf_ctrl_msg_rcv_and_clear(struct sxevf_hw *hw, u32 *msg, u16 msg_len)
+{
+	u16 i;
+	u16 msg_entry;
+	s32 ret = 0;
+	u32 clear;
+	struct sxevf_mbx_info *mbx = &hw->mbx;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	msg_entry = (msg_len > mbx->msg_len) ? mbx->msg_len : msg_len;
+
+	ret = sxevf_mailbox_lock(hw);
+	if (ret) {
+		LOG_ERROR_BDF("size:%d rcv lock mailbox fail.(err:%d)\n",
+			   msg_entry, ret);
+		goto l_end;
+	}
+
+	for (i = 0; i < msg_entry; i++) {
+		msg[i] = sxevf_msg_read(hw, i);
+	}
+
+	clear = msg[0] & (~SXEVF_PFMSG_MASK);
+	sxevf_msg_write(hw, 0, clear);
+
+	sxevf_mailbox_unlock(hw);
+
+	LOG_INFO_BDF("rcv pf mailbox msg:0x%x.\n", *msg);
+
+	mbx->stats.rcv_msgs++;
+l_end:
+	return ret;
+}
+
+static s32 sxevf_rcv_msg_from_pf(struct sxevf_hw *hw, u32 *msg, u16 msg_len)
+{
+	s32 ret;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	if (!sxevf_msg_poll(hw)) {
+		ret = -SXEVF_ERR_POLL_MSG_FAIL;
+		LOG_ERROR_BDF("retry:%d don't poll pf msg.\n", hw->mbx.retry);
+		goto l_out;
+	}
+
+	ret = sxevf_mbx_msg_rcv(hw, msg, msg_len);
+	if (ret < 0) {
+		LOG_ERROR_BDF("retry:%d read msg fail.\n", hw->mbx.retry);
+		goto l_out;
+	}
+
+	LOG_INFO_BDF("vf polled pf msg:0x%x and rcv pf msg done. "
+		"stats req:%d rcv_msg:%d\n",
+		 msg[0], hw->mbx.stats.reqs, hw->mbx.stats.rcv_msgs);
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_send_and_rcv_msg(struct sxevf_hw *hw, u32 *msg, u8 msg_len)
+{
+	s32 ret;
+	u16 msg_type = msg[0] & 0xFF;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	ret = sxevf_send_msg_to_pf(hw, msg, msg_len);
+	if (ret) {
+		LOG_ERROR_BDF("msg:0x%x len:%u msg send fail.(err:%d).\n",
+			   msg[0], msg_len, ret);
+		goto l_out;
+	}
+
+	if (msg_type == SXEVF_RESET) {
+		mdelay(10);
+	}
+
+	ret = sxevf_rcv_msg_from_pf(hw, msg, msg_len);
+	if (ret) {
+		LOG_ERROR_BDF("msg:0x%x len:%u rcv fail.(err:%d).\n",
+			   msg[0], msg_len, ret);
+		goto l_out;
+	}
+
+	LOG_INFO_BDF("send and rcv msg:0x%x len:%u success.\n", msg[0], msg_len);
+
+l_out:
+	return ret;
+}
+
+void sxevf_mbx_api_version_init(struct sxevf_adapter *adapter)
+{
+	s32 ret;
+	struct sxevf_hw *hw = &adapter->hw;
+	static const int api[] = {
+		SXEVF_MBX_API_13,
+		SXEVF_MBX_API_12,
+		SXEVF_MBX_API_11,
+		SXEVF_MBX_API_10,
+		SXEVF_MBX_API_NR
+	};
+	u32 idx = 0;
+	struct sxevf_mbx_api_msg msg;
+
+	while (api[idx] != SXEVF_MBX_API_NR) {
+		msg.msg_type = SXEVF_API_NEGOTIATE;
+		msg.api_version = api[idx];
+
+		ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg)));
+		if (!ret && (msg.msg_type == (SXEVF_API_NEGOTIATE | SXEVF_MSGTYPE_ACK))) {
+			hw->mbx.api_version = api[idx];
+			break;
+		} else {
+			idx++;
+		}
+	}
+
+	LOG_INFO_BDF("mailbox api version:%u", hw->mbx.api_version);
+
+	return;
+}
+
+s32 sxevf_ring_info_get(struct sxevf_adapter *adapter,
+			u8 *tc_num, u8 *default_tc)
+{
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_ring_info_msg req = {};
+	s32 ret;
+
+	req.msg_type = SXEVF_RING_INFO_GET;
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&req,
+				     SXEVF_MSG_NUM(sizeof(req)));
+	if (ret) {
+		LOG_ERROR_BDF("msg:0x%x send or rcv reply failed.(err:%d)\n",
+			   req.msg_type, ret);
+		goto l_out;
+	}
+
+	if (req.msg_type != (SXEVF_MSGTYPE_ACK | SXEVF_RING_INFO_GET)) {
+		ret = -SXEVF_ERR_REPLY_INVALID;
+		LOG_WARN_BDF("msg:0x%x not expected.(err:%d)\n", req.msg_type, ret);
+		goto l_out;
+	}
+
+	LOG_DEBUG_BDF("original ring info from pf, max_tx_num:%u max_rx_num:%u "
+		 "tc_num:%u default_tc:%u.\n",
+		 req.max_tx_num, req.max_rx_num, req.tc_num, req.default_tc);
+
+	if ((req.max_tx_num == 0) ||
+	    (req.max_tx_num > SXEVF_TXRX_RING_NUM_MAX)) {
+		req.max_tx_num = SXEVF_TXRX_RING_NUM_MAX;
+	}
+
+	if ((req.max_rx_num == 0) ||
+	    (req.max_rx_num > SXEVF_TXRX_RING_NUM_MAX)) {
+		req.max_rx_num = SXEVF_TXRX_RING_NUM_MAX;
+	}
+
+	if (req.tc_num > req.max_rx_num) {
+		req.tc_num = SXEVF_DEFAULT_TC_NUM;
+	}
+	*tc_num = req.tc_num;
+
+	if (req.default_tc > req.max_tx_num) {
+		req.default_tc = 0;
+	}
+
+	*default_tc = req.default_tc;
+
+	adapter->max_rx_queue = req.max_rx_num;
+	adapter->max_tx_queue = req.max_tx_num;
+
+	LOG_INFO_BDF("ring info max_tx_num:%u max_rx_num:%u "
+		 "tc_num:%u default_tc:%u.\n",
+		 req.max_tx_num, req.max_rx_num, req.tc_num, req.default_tc);
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_rss_hash_config_get(struct sxevf_adapter *adapter,
+			struct rte_eth_rss_conf *rss_conf)
+{
+	struct sxevf_hw *hw = &adapter->hw;
+	struct sxevf_rss_hash_msg msg = {};
+	s32 ret;
+
+	msg.msg_type = SXEVF_RSS_CONF_GET;
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg,
+				     SXEVF_MSG_NUM(sizeof(msg)));
+	if (ret) {
+		LOG_ERROR_BDF("msg:0x%x send or rcv reply failed.(err:%d)\n",
+			   msg.msg_type, ret);
+		goto l_out;
+	}
+
+	if (msg.msg_type != (SXEVF_MSGTYPE_ACK | SXEVF_RSS_CONF_GET)) {
+		ret = -SXEVF_ERR_REPLY_INVALID;
+		LOG_WARN_BDF("msg:0x%x not expected.(err:%d)\n", msg.msg_type, ret);
+		goto l_out;
+	}
+
+	rss_conf->rss_key = msg.hash_key;
+	rss_conf->rss_hf = msg.rss_hf;
+
+	LOG_INFO_BDF("rss hash conf get success, msg:0x%x rss_key:%s rss_func:%ld.\n ",
+		 msg.msg_type, msg.hash_key, msg.rss_hf);
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_mac_addr_set(struct sxevf_hw *hw, u8 *uc_addr)
+{
+	s32 ret;
+	struct sxevf_uc_addr_msg msg = {};
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	msg.msg_type = SXEVF_DEV_MAC_ADDR_SET;
+	memcpy(msg.uc_addr, uc_addr, SXEVF_MAC_ADDR_LEN);
+
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg)));
+	if (!ret && (msg.msg_type ==
+		   (SXEVF_DEV_MAC_ADDR_SET | SXEVF_MSGTYPE_NACK))) {
+		ret = -EPERM;
+		LOG_ERROR_BDF("msg:0x%x uc addr:%pM replyed nack.\n",
+			   msg.msg_type, uc_addr);
+		goto l_out;
+	}
+
+	if (ret) {
+		LOG_ERROR_BDF("msg:0x%x uc addr:%pM set fail.(err:%d)\n",
+			   msg.msg_type, uc_addr, ret);
+		ret = -EPERM;
+		goto l_out;
+	}
+
+	LOG_INFO_BDF("msg:0x%x uc addr:%pM set success.\n", msg.msg_type, uc_addr);
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_rx_max_frame_set(struct sxevf_hw *hw, u32 mtu)
+{
+	struct sxevf_max_frame_msg msg = {};
+	s32 ret;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	msg.msg_type = SXEVF_LPE_SET;
+	msg.max_frame = mtu;
+
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg,
+					     SXEVF_MSG_NUM(sizeof(msg)));
+	if (ret || ((msg.msg_type & SXEVF_LPE_SET) &&
+		    (msg.msg_type & SXEVF_MSGTYPE_NACK))) {
+		ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR;
+	}
+
+	LOG_INFO_BDF("msg_type:0x%x max_frame:0x%x (ret:%d)\n",
+		   msg.msg_type, msg.max_frame, ret);
+
+	return ret;
+}
+
+s32 sxevf_vlan_id_set(struct sxevf_hw *hw, u32 vlan_id,
+						 bool vlan_on)
+{
+	struct sxevf_vlan_filter_msg msg = {};
+	s32 ret;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	msg.msg_type = SXEVF_VLAN_SET;
+	msg.vlan_id = vlan_id;
+	msg.msg_type |= vlan_on << SXEVF_MSGINFO_SHIFT;
+
+	LOG_INFO_BDF("update vlan[%u], vlan on = %s\n", vlan_id, vlan_on ? "yes" : "no");
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg,
+					     SXEVF_MSG_NUM(sizeof(msg)));
+	LOG_INFO_BDF("update vlan[%u] ret = %d\n",vlan_id, ret);
+
+	msg.msg_type &= ~(0xFF << SXEVF_MSGINFO_SHIFT);
+
+	if (ret || (msg.msg_type != (SXEVF_VLAN_SET | SXEVF_MSGTYPE_ACK))) {
+		ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR;
+	}
+
+	return ret;
+}
+
+s32 sxevf_cast_mode_set(struct sxevf_hw *hw, enum sxevf_cast_mode mode)
+{
+	struct sxevf_cast_mode_msg msg = {};
+	s32 ret;
+	struct sxevf_adapter *adapter = hw->adapter;
+
+	msg.msg_type = SXEVF_CAST_MODE_SET;
+	msg.cast_mode = mode;
+
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg)));
+	if (ret || (msg.msg_type != (SXEVF_CAST_MODE_SET | SXEVF_MSGTYPE_ACK))) {
+		ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR;
+	}
+
+	LOG_INFO_BDF("msg_type:0x%x mode:0x%x msg result:0x%x.(ret:%d)\n",
+		   msg.msg_type, mode, msg.msg_type, ret);
+
+	return ret;
+}
+
+s32 sxevf_uc_addr_add(struct sxevf_hw *hw, u32 index, u8 *mac_addr)
+{
+	s32 ret = 0;
+	struct sxevf_adapter *adapter = hw->adapter;
+	struct sxevf_uc_sync_msg msg = {};
+	u32 check;
+	u32 result;
+
+	msg.msg_type = SXEVF_UC_ADDR_SYNC;
+	msg.index = index;
+	check = *(u32 *)&msg;
+
+	if (mac_addr) {
+		memcpy((u8 *)&msg.addr, mac_addr, SXEVF_MAC_ADDR_LEN);
+	}
+
+	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg)));
+	result = *(u32 *)&msg;
+
+	if (ret || (result != (check | SXEVF_MSGTYPE_ACK))) {
+		ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR;
+	}
+
+	LOG_INFO_BDF("msg_type:0x%x index:%d addr:%pM sync done "
+		 " result:0x%x msg.(ret:%d)\n",
+		 msg.msg_type, index, mac_addr, result, ret);
+
+	return ret;
+
+}
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_msg.h b/drivers/net/sxe/vf/sxevf_msg.h
new file mode 100644
index 0000000000..c3e22d7785
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_msg.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXEVF_MSG_H__
+#define __SXEVF_MSG_H__
+
+struct sxevf_adapter;
+
+#define SXEVF_MAC_ADDR_LEN 6
+
+#define SXEVF_UC_ENTRY_NUM_MAX 10
+#define SXEVF_MC_ENTRY_NUM_MAX 30
+
+#define SXEVF_MBX_MSG_NUM        16
+#define SXEVF_MBX_RETRY_INTERVAL 500
+#define SXEVF_MBX_RETRY_COUNT    2000
+
+#define SXEVF_RST_CHECK_NUM          200
+
+#define SXEVF_DEFAULT_ADDR_LEN       4
+#define SXEVF_MC_FILTER_TYPE_WORD    3
+
+#define SXEVF_RESET               0x01 
+#define SXEVF_DEV_MAC_ADDR_SET    0x02 
+#define SXEVF_MC_ADDR_SYNC        0x03 
+#define SXEVF_VLAN_SET            0x04 
+#define SXEVF_LPE_SET             0x05  
+
+#define SXEVF_UC_ADDR_SYNC        0x06  
+
+#define SXEVF_API_NEGOTIATE       0x08  
+
+#define SXEVF_RING_INFO_GET       0x09  
+
+#define SXEVF_REDIR_TBL_GET       0x0a 
+#define SXEVF_RSS_KEY_GET         0x0b 
+#define SXEVF_CAST_MODE_SET       0x0c 
+#define SXEVF_LINK_ENABLE_GET 	  0X0d  
+#define SXEVF_IPSEC_ADD           0x0e 
+#define SXEVF_IPSEC_DEL           0x0f 
+#define SXEVF_RSS_CONF_GET    	  0x10 
+
+#define SXEVF_PF_CTRL_MSG_LINK_UPDATE  0x100
+#define SXEVF_PF_CTRL_MSG_NETDEV_DOWN   0x200
+
+#define SXEVF_PF_CTRL_MSG_REINIT        0x400
+
+#define SXEVF_PF_CTRL_MSG_MASK          0x700
+#define SXEVF_PFREQ_MASK                0xFF00 
+
+#define SXEVF_RSS_HASH_KEY_SIZE   (40)  
+#define SXEVF_MAX_RETA_ENTRIES    (128) 
+#define SXEVF_RETA_ENTRIES_DWORDS (SXEVF_MAX_RETA_ENTRIES / 16)
+
+#define SXEVF_TX_QUEUES      1 
+#define SXEVF_RX_QUEUES      2 
+#define SXEVF_TRANS_VLAN     3 
+#define SXEVF_DEF_QUEUE      4 
+
+#define SXEVF_MSGTYPE_ACK    0x80000000
+#define SXEVF_MSGTYPE_NACK   0x40000000
+
+#define SXEVF_MSGINFO_SHIFT  16
+#define SXEVF_MSGINFO_MASK   (0xFF << SXEVF_MSGINFO_SHIFT)
+
+#define SXEVF_MSG_NUM(size)   DIV_ROUND_UP(size, 4)
+
+enum sxevf_mbx_api_version {
+	SXEVF_MBX_API_10 = 0,
+	SXEVF_MBX_API_11,
+	SXEVF_MBX_API_12,
+	SXEVF_MBX_API_13, 
+	SXEVF_MBX_API_14, 
+
+	SXEVF_MBX_API_NR, 
+};
+
+enum sxevf_cast_mode {
+	SXEVF_CAST_MODE_NONE = 0, 
+	SXEVF_CAST_MODE_MULTI,    
+	SXEVF_CAST_MODE_ALLMULTI, 
+	SXEVF_CAST_MODE_PROMISC,  
+};
+
+struct sxevf_rst_msg {
+	u32 msg_type;
+	u32 mac_addr[2];
+	u32 mc_fiter_type;
+};
+
+struct sxevf_mbx_api_msg {
+	u32 msg_type;
+	u32 api_version;
+};
+
+struct sxevf_ring_info_msg {
+	u32 msg_type;
+	u8  max_rx_num;
+	u8  max_tx_num;
+	u8  tc_num;
+	u8  default_tc;
+};
+
+struct sxevf_uc_addr_msg {
+	u32 msg_type;
+	u8 uc_addr[SXEVF_MAC_ADDR_LEN];
+	u16 pad;
+};
+
+struct sxevf_cast_mode_msg {
+	u32 msg_type;
+	u32 cast_mode;
+};
+
+struct sxevf_mc_sync_msg {
+	u16 msg_type;
+	u16 mc_cnt;
+	u16 mc_addr_extract[SXEVF_MC_ENTRY_NUM_MAX];
+};
+
+struct sxevf_uc_sync_msg {
+	u16 msg_type;
+	u16 index;
+	u32 addr[2];
+};
+
+struct sxevf_max_frame_msg {
+	u32 msg_type;
+	u32 max_frame;
+};
+
+struct sxevf_vlan_filter_msg {
+	u32 msg_type;
+	u32 vlan_id;
+};
+
+struct sxevf_redir_tbl_msg {
+	u32 type;
+	u32 entries[SXEVF_RETA_ENTRIES_DWORDS];
+};
+
+struct sxevf_rss_hsah_key_msg {
+	u32 type;
+	u8  hash_key[SXEVF_RSS_HASH_KEY_SIZE];
+};
+
+struct sxevf_rss_hash_msg {
+	u32 msg_type;
+	u8  hash_key[SXEVF_RSS_HASH_KEY_SIZE];
+	u64 rss_hf;
+};
+
+struct sxevf_ipsec_add_msg {
+	u32 msg_type;
+	u32 pf_sa_idx;
+	__be32 spi;
+	u8 flags;
+	u8 proto;
+	u16 family;
+	__be32 addr[4];
+	u32 key[5];
+};
+
+struct sxevf_ipsec_del_msg {
+	u32 msg_type;
+	u32 sa_idx;
+};
+
+void sxevf_mbx_init(struct sxevf_hw *hw);
+
+void sxevf_mbx_api_version_init(struct sxevf_adapter *adapter);
+
+bool sxevf_pf_rst_check(struct sxevf_hw *hw);
+
+s32 sxevf_mbx_msg_rcv(struct sxevf_hw *hw, u32 *msg, u16 msg_len);
+
+s32 sxevf_send_and_rcv_msg(struct sxevf_hw *hw, u32 *msg, u8 msg_len);
+
+s32 sxevf_mac_addr_set(struct sxevf_hw *hw, u8 *uc_addr);
+
+s32 sxevf_ring_info_get(struct sxevf_adapter *adapter,
+			u8 *tc_num, u8 *default_tc);
+
+s32 sxevf_rss_hash_config_get(struct sxevf_adapter *adapter,
+			struct rte_eth_rss_conf *rss_conf);
+
+void sxevf_mbx_api_version_init(struct sxevf_adapter *adapter);
+
+s32 sxevf_ctrl_msg_rcv(struct sxevf_hw *hw, u32 *msg, u16 msg_len);
+
+s32 sxevf_rx_max_frame_set(struct sxevf_hw *hw, u32 mtu);
+
+s32 sxevf_vlan_id_set(struct sxevf_hw *hw, u32 vlan,
+						 bool vlan_on);
+s32 sxevf_cast_mode_set(struct sxevf_hw *hw, enum sxevf_cast_mode mode);
+
+s32 sxevf_uc_addr_add(struct sxevf_hw *hw, u32 index, u8 *mac_addr);
+
+s32 sxevf_ctrl_msg_rcv_and_clear(struct sxevf_hw *hw, u32 *msg, u16 msg_len);
+
+#endif 
diff --git a/drivers/net/sxe/vf/sxevf_offload.c b/drivers/net/sxe/vf/sxevf_offload.c
new file mode 100644
index 0000000000..91f8d6d2e6
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_offload.c
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+
+#include "sxe_types.h"
+#include "sxe_offload_common.h"
+#include "sxevf_offload.h"
+
+u64 sxevf_rx_queue_offloads_get(struct rte_eth_dev *dev)
+{
+	return __sxe_rx_queue_offload_capa_get(dev);
+}
+
+u64 sxevf_rx_port_offloads_get(struct rte_eth_dev *dev)
+{
+	return __sxe_rx_port_offload_capa_get(dev);
+}
+
+u64 sxevf_tx_queue_offloads_get(struct rte_eth_dev *dev)
+{
+	RTE_SET_USED(dev);
+
+	return 0;
+}
+
+u64 sxevf_tx_port_offloads_get(struct rte_eth_dev *dev)
+{
+	return __sxe_tx_port_offload_capa_get(dev);
+}
+
diff --git a/drivers/net/sxe/vf/sxevf_offload.h b/drivers/net/sxe/vf/sxevf_offload.h
new file mode 100644
index 0000000000..9c5ab4cb8d
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_offload.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_OFFLOAD_H__
+#define __SXEVF_OFFLOAD_H__
+
+u64 sxevf_rx_queue_offloads_get(struct rte_eth_dev *dev);
+
+u64 sxevf_rx_port_offloads_get(struct rte_eth_dev *dev);
+
+u64 sxevf_tx_queue_offloads_get(struct rte_eth_dev *dev);
+
+u64 sxevf_tx_port_offloads_get(struct rte_eth_dev *dev);
+
+#endif
+
diff --git a/drivers/net/sxe/vf/sxevf_queue.c b/drivers/net/sxe/vf/sxevf_queue.c
new file mode 100644
index 0000000000..5e7d9ec17d
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_queue.c
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <rte_byteorder.h>
+#include <rte_mbuf_core.h>
+#include <rte_ethdev.h>
+
+#include "sxe_dpdk_version.h"
+#include "sxevf_rx.h"
+#include "sxevf_tx.h"
+#include "sxe_logs.h"
+#include "sxevf.h"
+#include "sxe_queue_common.h"
+#include "sxevf_hw.h"
+#include "sxe_offload.h"
+#include "sxe_ethdev.h"
+#include "sxevf_queue.h"
+#include "sxevf_msg.h"
+
+s32 __rte_cold sxevf_rx_queue_mbufs_alloc(sxevf_rx_queue_s *rxq)
+{
+	s32 ret;
+
+	ret = __sxe_rx_queue_mbufs_alloc((sxevf_rx_queue_s *)rxq);
+
+	return ret;
+}
+
+s32 __rte_cold sxevf_rx_queue_setup(struct rte_eth_dev *dev,
+			 u16 queue_idx, u16 desc_num,
+			 unsigned int socket_id,
+			 const struct rte_eth_rxconf *rx_conf,
+			 struct rte_mempool *mp)
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+	struct sxevf_hw     *hw = &adapter->hw;
+	struct rx_setup rx_setup = {};
+	s32 ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	rx_setup.desc_num = desc_num;
+	rx_setup.queue_idx = queue_idx;
+	rx_setup.socket_id = socket_id;
+	rx_setup.mp = mp;
+	rx_setup.dev = dev;
+	rx_setup.reg_base_addr = hw->reg_base_addr;
+	rx_setup.rx_conf = rx_conf;
+	rx_setup.rx_batch_alloc_allowed = &adapter->rx_batch_alloc_allowed;
+
+	ret = __sxe_rx_queue_setup(&rx_setup, true);
+	if (ret) {
+		LOG_ERROR_BDF("rx queue setup fail.(err:%d)", ret);
+	}
+
+	return ret;
+}
+
+s32 __rte_cold sxevf_tx_queue_setup(struct rte_eth_dev *dev,
+				u16 tx_queue_id,
+				u16 ring_depth,
+				u32 socket_id,
+				const struct rte_eth_txconf *tx_conf)
+{
+	s32 ret;
+	struct sxevf_hw *hw = (&((struct sxevf_adapter *)(dev->data->dev_private))->hw);
+	struct tx_setup tx_setup;
+
+	tx_setup.dev = dev;
+	tx_setup.desc_num = ring_depth;
+	tx_setup.queue_idx = tx_queue_id;
+	tx_setup.socket_id = socket_id;
+	tx_setup.reg_base_addr = hw->reg_base_addr;
+	tx_setup.tx_conf = tx_conf;
+
+	ret = __sxe_tx_queue_setup(&tx_setup, true);
+	if (ret) {
+		PMD_LOG_ERR(DRV, "rx queue setup fail.(err:%d)", ret);
+	}
+
+	return ret;
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+void __rte_cold sxevf_rx_queue_release(void *rxq)
+{
+	__sxe_rx_queue_free(rxq);
+}
+
+void __rte_cold sxevf_tx_queue_release(void *txq)
+{
+	__sxe_tx_queue_free(txq);
+	return;
+}
+
+#else
+void __rte_cold
+sxevf_rx_queue_release(struct rte_eth_dev *dev, u16 queue_id)
+{
+	__sxe_rx_queue_free(dev->data->rx_queues[queue_id]);
+}
+
+void __rte_cold
+sxevf_tx_queue_release(struct rte_eth_dev *dev, u16 queue_id)
+{
+	__sxe_tx_queue_free(dev->data->tx_queues[queue_id]);
+	return;
+}
+#endif
+
+void sxevf_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+	struct rte_eth_rxq_info *qinfo)
+{
+	__sxe_rx_queue_info_get(dev, queue_id, qinfo);
+
+	return;
+}
+
+void sxevf_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+		struct rte_eth_txq_info *q_info)
+{
+	__sxe_tx_queue_info_get(dev, queue_id, q_info);
+
+	return;
+}
+
+s32 sxevf_tx_done_cleanup(void *tx_queue, u32 free_cnt)
+{
+	s32 ret;
+
+	/* Tx queue cleanup */
+	ret = __sxe_tx_done_cleanup(tx_queue, free_cnt);
+	if (ret) {
+		PMD_LOG_ERR(DRV, "tx cleanup fail.(err:%d)", ret);
+	}
+
+	return ret;
+}
+
+s32 sxevf_rss_reta_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_reta_entry64 *reta_conf,
+			u16 reta_size)
+{
+	s32 ret = -ENOTSUP;
+
+	PMD_INIT_FUNC_TRACE();
+
+	RTE_SET_USED(reta_conf);
+	RTE_SET_USED(reta_size);
+
+	if (!dev->data->dev_started) {
+		PMD_LOG_ERR(DRV,
+			"port %d must be started before rss reta update",
+			 dev->data->port_id);
+		ret = -EIO;
+		goto l_out;
+	}
+
+	PMD_LOG_ERR(DRV, "rss reta update is not supported on vf.(err:%d)", ret);
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_rss_reta_query(struct rte_eth_dev *dev,
+			 struct rte_eth_rss_reta_entry64 *reta_conf,
+			 u16 reta_size)
+{
+	s32 ret = 0;
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(reta_conf);
+
+	if (reta_size != 0) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "vf rss reta size:0, not support query.(err:%d)", ret);
+	}
+
+	return ret;
+}
+
+s32 sxevf_rss_hash_conf_get(struct rte_eth_dev *dev,
+			    struct rte_eth_rss_conf *rss_conf)
+{
+	s32 ret = 0;
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+
+	ret = sxevf_rss_hash_config_get(adapter, rss_conf);
+	if (ret) {
+		LOG_ERROR_BDF("rss hash config get failed.(err:%d)\n", ret);
+		goto l_out;
+	}
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_rss_hash_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_conf *rss_conf)
+{
+	s32 ret = -ENOTSUP;
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(rss_conf);
+
+	PMD_LOG_ERR(DRV, "rss hash update is not supported on vf.(err:%d)", ret);
+
+	return ret;
+}
+
+void sxevf_secondary_proc_init(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	bool rx_vec_allowed = 0;
+
+	__sxe_secondary_proc_init(eth_dev, adapter->rx_batch_alloc_allowed, &rx_vec_allowed);
+	return;
+}
+
+void __rte_cold sxevf_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed)
+{
+	__sxe_txrx_queues_clear(dev, rx_batch_alloc_allowed);
+	return;
+}
+
+void sxevf_queues_free(struct rte_eth_dev *dev)
+{
+	__sxe_queues_free(dev);
+
+	return;
+}
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_queue.h b/drivers/net/sxe/vf/sxevf_queue.h
new file mode 100644
index 0000000000..1a061231a5
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_queue.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_QUEUEU_H__
+#define __SXEVF_QUEUEU_H__
+
+#include "sxe_dpdk_version.h"
+#include "sxe_types.h"
+#include "sxe_queue_common.h"
+
+typedef union sxe_tx_data_desc sxevf_tx_data_desc_u;
+typedef struct sxe_rx_buffer   sxevf_rx_buffer_s;
+typedef union sxe_rx_data_desc sxevf_rx_data_desc_u;
+typedef struct sxe_tx_queue    sxevf_tx_queue_s;
+typedef struct sxe_rx_queue    sxevf_rx_queue_s;
+
+s32 __rte_cold sxevf_rx_queue_mbufs_alloc(sxevf_rx_queue_s *rxq);
+
+s32 __rte_cold sxevf_rx_queue_setup(struct rte_eth_dev *dev,
+			 u16 queue_idx, u16 desc_num,
+			 unsigned int socket_id,
+			 const struct rte_eth_rxconf *rx_conf,
+			 struct rte_mempool *mp);
+
+s32 __rte_cold sxevf_tx_queue_setup(struct rte_eth_dev *dev,
+				u16 tx_queue_id,
+				u16 ring_depth,
+				u32 socket_id,
+				const struct rte_eth_txconf *tx_conf);
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+void __rte_cold sxevf_tx_queue_release(void *txq);
+
+void __rte_cold sxevf_rx_queue_release(void *rxq);
+
+#else
+void __rte_cold sxevf_tx_queue_release(struct rte_eth_dev *dev, u16 queue_id);
+
+void __rte_cold sxevf_rx_queue_release(struct rte_eth_dev *dev, u16 queue_id);
+#endif
+
+void sxevf_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+	struct rte_eth_rxq_info *qinfo);
+
+void sxevf_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+		struct rte_eth_txq_info *q_info);
+
+s32 sxevf_tx_done_cleanup(void *tx_queue, u32 free_cnt);
+
+s32 sxevf_rss_reta_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_reta_entry64 *reta_conf,
+			u16 reta_size);
+
+s32 sxevf_rss_reta_query(struct rte_eth_dev *dev,
+			 struct rte_eth_rss_reta_entry64 *reta_conf,
+			 u16 reta_size);
+
+s32 sxevf_rss_hash_conf_get(struct rte_eth_dev *dev,
+			    struct rte_eth_rss_conf *rss_conf);
+
+s32 sxevf_rss_hash_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_conf *rss_conf);
+
+void sxevf_secondary_proc_init(struct rte_eth_dev *eth_dev);
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+s32 sxevf_rx_descriptor_done(void *rx_queue, u16 offset);
+#endif
+
+s32 sxevf_rx_descriptor_status(void *rx_queue, u16 offset);
+
+u16 sxevf_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,u16 num_pkts);
+
+u16 sxevf_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num);
+s32 sxevf_tx_descriptor_status(void *tx_queue, u16 offset);
+
+void __rte_cold sxevf_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed);
+
+void sxevf_queues_free(struct rte_eth_dev *dev);
+
+#endif
+
diff --git a/drivers/net/sxe/vf/sxevf_rx.c b/drivers/net/sxe/vf/sxevf_rx.c
new file mode 100644
index 0000000000..53b9168345
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_rx.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <rte_common.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+
+#include "sxe_logs.h"
+#include "sxe_errno.h"
+#include "sxevf.h"
+#include "sxevf_msg.h"
+#include "sxevf_rx.h"
+#include "sxe_rx_common.h"
+#include "sxevf_queue.h"
+#include "sxevf_rx.h"
+#include "sxe_ethdev.h"
+
+#define SXEVF_RX_HDR_SIZE  256
+
+static void sxevf_rss_bit_num_configure(struct sxevf_hw *hw, u16 rx_queues_num)
+{
+	u32 psrtype;
+
+	psrtype = (rx_queues_num >> 1) << SXEVF_PSRTYPE_RQPL_SHIFT;
+
+	sxevf_rss_bit_num_set(hw, psrtype);
+
+	return;
+}
+
+static void sxevf_rxmode_offload_configure(struct rte_eth_dev *eth_dev,
+						u64 queue_offload, u32 buf_size)
+{
+	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+	u32 frame_size = SXE_GET_FRAME_SIZE(eth_dev);
+
+	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
+	    ((frame_size + 2 * SXEVF_VLAN_TAG_SIZE) > buf_size)) {
+		if (!eth_dev->data->scattered_rx) {
+			PMD_LOG_WARN(DRV, "rxmode offload:0x%"SXE_PRIX64" max_rx_pkt_len:%u "
+				    "buf_size:%u enable rx scatter",
+				    rxmode->offloads,
+				    frame_size,
+				    buf_size);
+		}
+		eth_dev->data->scattered_rx = 1;
+	}
+
+	if (queue_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	}
+
+	return;
+}
+
+static s32 sxevf_rx_queue_configure(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	sxevf_rx_queue_s *rxq;
+	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+	s32 ret;
+	u16 i;
+	u32 len;
+	u32 buf_size;
+
+	rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+		rxq = eth_dev->data->rx_queues[i];
+		len = rxq->ring_depth * sizeof(sxevf_rx_data_desc_u);
+
+		ret = sxevf_rx_queue_mbufs_alloc(rxq);
+		if (ret) {
+			LOG_ERROR_BDF("rx queue num:%u queue id:%u alloc "
+				      "rx buffer fail.(err:%d)",
+				      eth_dev->data->nb_rx_queues, i, ret);
+			goto l_out;
+		}
+
+		buf_size = (u16)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+			RTE_PKTMBUF_HEADROOM);
+
+		sxevf_rx_ring_desc_configure(hw, len, rxq->base_addr, rxq->reg_idx);
+
+		sxevf_rx_rcv_ctl_configure(hw, rxq->reg_idx, SXEVF_RX_HDR_SIZE,
+					   buf_size, rxq->drop_en);
+
+		sxevf_rxmode_offload_configure(eth_dev, rxq->offloads, buf_size);
+	}
+
+	sxevf_rss_bit_num_configure(hw, eth_dev->data->nb_rx_queues);
+
+	sxevf_rx_function_set(eth_dev);
+
+l_out:
+	return ret;
+
+}
+
+s32 sxevf_rx_configure(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	u32 frame_size = SXE_GET_FRAME_SIZE(eth_dev);
+	u32 mtu = frame_size - SXE_ETH_OVERHEAD;
+	s32 ret;
+
+	if (rte_is_power_of_2(eth_dev->data->nb_rx_queues) == 0) {
+		ret = -SXEVF_ERR_PARAM;
+		LOG_ERROR_BDF("invalid rx queue num:%u.",
+			 eth_dev->data->nb_rx_queues);
+		goto l_out;
+	}
+
+	if (eth_dev->data->nb_rx_queues > adapter->max_rx_queue) {
+		ret = -SXEVF_ERR_PARAM;
+		LOG_ERROR_BDF("invalid rx queue num:%u exceed max rx queue:%u ",
+			eth_dev->data->nb_rx_queues,
+			adapter->max_rx_queue);
+		goto l_out;
+	}
+
+	ret = sxevf_rx_max_frame_set(hw, mtu);
+	if (ret) {
+		LOG_ERROR_BDF("max frame size:%u set fail.(err:%d)",
+			      frame_size, ret);
+		goto l_out;
+	}
+
+	ret = sxevf_rx_queue_configure(eth_dev);
+	if (ret) {
+		LOG_ERROR_BDF("rx queue num:%u configure fail.(err:%u)",
+			      eth_dev->data->nb_rx_queues, ret);
+	}
+
+l_out:
+	return ret;
+}
+
+void __rte_cold sxevf_rx_function_set(struct rte_eth_dev *dev)
+{
+	struct sxevf_adapter *adapter = dev->data->dev_private;
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+	__sxe_rx_function_set(dev, adapter->rx_batch_alloc_allowed, &adapter->rx_vec_allowed);
+#else
+	__sxe_rx_function_set(dev, adapter->rx_batch_alloc_allowed, NULL);
+#endif
+
+	return; 
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+s32 sxevf_rx_descriptor_done(void *rx_queue, u16 offset)
+{
+	return __sxe_rx_descriptor_done(rx_queue,offset);
+}
+#endif
+
+s32 sxevf_rx_descriptor_status(void *rx_queue, u16 offset)
+{
+	return __sxe_rx_descriptor_status(rx_queue, offset);
+}
+
+u16 sxevf_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,u16 num_pkts)
+{
+	return __sxe_pkts_recv(rx_queue, rx_pkts, num_pkts);
+}
+
+const u32 *sxevf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+	return __sxe_dev_supported_ptypes_get(dev);
+}
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_rx.h b/drivers/net/sxe/vf/sxevf_rx.h
new file mode 100644
index 0000000000..8e862b7e01
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_rx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_RX_H__
+#define __SXEVF_RX_H__
+
+#include "sxe_queue_common.h"
+
+#define SXEVF_RX_DESC_RING_ALIGN	(SXE_ALIGN / sizeof(sxevf_rx_data_desc_t))
+
+s32 sxevf_rx_configure(struct rte_eth_dev *eth_dev);
+
+const u32 *sxevf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+
+void __rte_cold sxevf_rx_function_set(struct rte_eth_dev *dev);
+
+#endif
+
diff --git a/drivers/net/sxe/vf/sxevf_stats.c b/drivers/net/sxe/vf/sxevf_stats.c
new file mode 100644
index 0000000000..f82ccf1fd7
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_stats.c
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <rte_ethdev.h>
+
+#include "sxevf_stats.h"
+#include "sxe_logs.h"
+#include "sxe_errno.h"
+#include "sxevf.h"
+
+#if defined DPDK_19_11_6
+#include <rte_string_fns.h>
+#endif
+
+#define SXE_HW_XSTATS_CNT (sizeof(sxevf_xstats_field) / \
+		      sizeof(sxevf_xstats_field[0]))
+
+static const struct sxevf_stats_field sxevf_xstats_field[] = {
+	{"rx_multicast_packets", offsetof(struct sxevf_hw_stats, vfmprc)},
+};
+
+#ifdef SXE_TEST
+STATIC u32 sxevf_xstats_cnt_get(void)
+{
+	return SXE_HW_XSTATS_CNT;
+}
+#endif
+
+s32 sxevf_eth_stats_get(struct rte_eth_dev *eth_dev,
+				struct rte_eth_stats *stats)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_stats_info *stats_info = &adapter->stats_info;
+	struct sxevf_hw *hw = &adapter->hw;
+	s32 ret = 0;
+
+	sxevf_packet_stats_get(hw, &stats_info->hw_stats);
+
+	if (stats == NULL) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "input param stats is null.");
+		goto l_out;
+	}
+
+	stats->ipackets = stats_info->hw_stats.vfgprc;
+	stats->ibytes   = stats_info->hw_stats.vfgorc;
+	stats->opackets = stats_info->hw_stats.vfgptc;
+	stats->obytes   = stats_info->hw_stats.vfgotc - stats->opackets * RTE_ETHER_CRC_LEN;
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_dev_stats_reset(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_stats_info *stats_info = &adapter->stats_info;
+
+	sxevf_eth_stats_get(eth_dev, NULL);
+
+	stats_info->hw_stats.vfgprc = 0;
+	stats_info->hw_stats.vfgorc = 0;
+	stats_info->hw_stats.vfgptc = 0;
+	stats_info->hw_stats.vfgotc = 0;
+	stats_info->hw_stats.vfmprc = 0;
+
+	return 0;
+}
+
+static s32 sxevf_hw_xstat_offset_get(u32 id, u32 *offset)
+{
+	s32 ret = 0;
+	u32 size = SXE_HW_XSTATS_CNT;
+
+	if (id < size) {
+		*offset = sxevf_xstats_field[id].offset;
+	} else {
+		ret = -SXE_ERR_PARAM;
+		PMD_LOG_ERR(DRV, "invalid id:%u exceed stats size cnt:%u.",
+			    id, size);
+	}
+
+	return ret;
+}
+
+s32 sxevf_xstats_get(struct rte_eth_dev *eth_dev,
+				struct rte_eth_xstat *xstats,
+				u32 usr_cnt)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_stats_info *stats_info = &adapter->stats_info;
+	struct sxevf_hw *hw = &adapter->hw;
+	u32 i;
+	u32 cnt;
+	s32 ret;
+	u32 offset;
+
+	cnt = SXE_HW_XSTATS_CNT;
+	PMD_LOG_INFO(DRV, "xstat size:%u. hw xstat field cnt:%lu ",
+		    cnt,
+		    SXE_HW_XSTATS_CNT);
+
+	if (usr_cnt < cnt) {
+		ret = cnt;
+		PMD_LOG_ERR(DRV, "user usr_cnt:%u less than stats cnt:%u.",
+			    usr_cnt, cnt);
+		goto l_out;
+	}
+
+	sxevf_packet_stats_get(hw, &stats_info->hw_stats);
+
+	if (xstats == NULL) {
+		ret = 0;
+		PMD_LOG_ERR(DRV, "usr_cnt:%u, input param xstats is null.",
+		            usr_cnt);
+		goto l_out;
+	}
+
+	cnt = 0;
+	for (i = 0; i < SXE_HW_XSTATS_CNT; i++) {
+		sxevf_hw_xstat_offset_get(i, &offset);
+		xstats[cnt].value = *(ulong *)(((s8 *)(&stats_info->hw_stats)) + offset);
+		xstats[cnt].id = cnt;
+		cnt++;
+	}
+
+	ret = SXE_HW_XSTATS_CNT;
+
+l_out:
+	return ret;
+}
+
+s32 sxevf_xstats_names_get(__rte_unused struct rte_eth_dev *dev,
+	struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int usr_cnt)
+{
+	u32 i = 0;
+	u32 cnt = 0;
+	s32 ret;
+
+	if (xstats_names == NULL) {
+		ret = SXE_HW_XSTATS_CNT;
+		PMD_LOG_INFO(DRV, "xstats field size:%u.", ret);
+		goto l_out;
+	} else if (usr_cnt < SXE_HW_XSTATS_CNT) {
+		ret = -ENOMEM;
+		PMD_LOG_ERR(DRV, "usr_cnt:%u invalid.(err:%d).", usr_cnt, ret);
+		goto l_out;
+	}
+
+	for (i = 0; i < SXE_HW_XSTATS_CNT; i++) {
+		strlcpy(xstats_names[cnt].name,
+			sxevf_xstats_field[i].name,
+			sizeof(xstats_names[cnt].name));
+		cnt++;
+	}
+
+	ret = cnt;
+
+l_out:
+	return ret;
+}
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_stats.h b/drivers/net/sxe/vf/sxevf_stats.h
new file mode 100644
index 0000000000..bdfd5178fd
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_stats.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_STATS_H__
+#define __SXEVF_STATS_H__
+
+#include "sxevf_hw.h"
+
+struct sxevf_stats_field {
+	s8  name[RTE_ETH_XSTATS_NAME_SIZE];
+	u32 offset;
+};
+
+struct sxevf_stats_info {
+	struct sxevf_hw_stats hw_stats;
+};
+
+s32 sxevf_eth_stats_get(struct rte_eth_dev *eth_dev,
+				struct rte_eth_stats *stats);
+
+s32 sxevf_dev_stats_reset(struct rte_eth_dev *eth_dev);
+
+s32 sxevf_xstats_get(struct rte_eth_dev *eth_dev,
+				struct rte_eth_xstat *xstats,
+				u32 usr_cnt);
+
+s32 sxevf_xstats_names_get(__rte_unused struct rte_eth_dev *dev,
+	struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int usr_cnt);
+
+#endif
+
diff --git a/drivers/net/sxe/vf/sxevf_tx.c b/drivers/net/sxe/vf/sxevf_tx.c
new file mode 100644
index 0000000000..667a165c64
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_tx.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+
+#include <rte_ethdev.h>
+
+#include "sxe_logs.h"
+#include "sxevf.h"
+#include "sxevf_tx.h"
+#include "sxevf_queue.h"
+#include "sxe_tx_common.h"
+
+void sxevf_tx_configure(struct rte_eth_dev *eth_dev)
+{
+	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
+	struct sxevf_hw *hw = &adapter->hw;
+	sxevf_tx_queue_s *txq;
+	u16 i;
+	u32 len;
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		txq = eth_dev->data->tx_queues[i];
+		len = txq->ring_depth * sizeof(sxevf_tx_data_desc_u);
+		sxevf_tx_desc_configure(hw, len, txq->base_addr, txq->reg_idx);
+
+		sxevf_tx_queue_thresh_set(hw, txq->reg_idx,
+			txq->pthresh, txq->hthresh, txq->wthresh);
+	}
+
+	LOG_DEBUG_BDF("tx queue num:%u tx configure done.",
+			eth_dev->data->nb_tx_queues);
+
+	return;
+}
+
+s32 sxevf_tx_descriptor_status(void *tx_queue, u16 offset)
+{
+	return __sxe_tx_descriptor_status(tx_queue, offset);
+}
+
+u16 sxevf_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num)
+{
+	return __sxe_pkts_xmit_with_offload(tx_queue, tx_pkts, pkts_num);
+}
+
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_tx.h b/drivers/net/sxe/vf/sxevf_tx.h
new file mode 100644
index 0000000000..858341db97
--- /dev/null
+++ b/drivers/net/sxe/vf/sxevf_tx.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_TX_H__
+#define __SXEVF_TX_H__
+
+#include "sxe_queue_common.h"
+
+#define SXEVF_TX_DESC_RING_ALIGN	(SXE_ALIGN / sizeof(sxevf_tx_data_desc_u))
+
+void sxevf_tx_configure(struct rte_eth_dev *eth_dev);
+
+#endif
+
-- 
2.45.2.windows.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH v2] net/sxe: add net driver sxe
  2024-08-26  8:10 [PATCH] net/sxe: add net driver sxe Jie Liu
@ 2024-09-06 23:39 ` Jie Liu
  2024-09-09 11:35   ` Thomas Monjalon
  0 siblings, 1 reply; 3+ messages in thread
From: Jie Liu @ 2024-09-06 23:39 UTC (permalink / raw)
  To: anatoly.burakov; +Cc: dev, Jie Liu

Adjust code implementation according to code style requirements.

Signed-off-by: Jie Liu <liujie5@linkdatatechnology.com>
---
 MAINTAINERS                                |    2 +-
 doc/guides/nics/features/sxe.ini           |    1 +
 drivers/net/sxe-dpdk-0.0.0.0-src.tar.gz    |  Bin 0 -> 196888 bytes
 drivers/net/sxe.zip                        |  Bin 0 -> 193908 bytes
 drivers/net/sxe/Makefile                   |   14 +
 drivers/net/sxe/base/docker_version        |    4 -
 drivers/net/sxe/base/sxe_common.c          |   18 +-
 drivers/net/sxe/base/sxe_compat_platform.h |   23 +-
 drivers/net/sxe/base/sxe_compat_version.h  |  303 +-
 drivers/net/sxe/base/sxe_errno.h           |   78 +-
 drivers/net/sxe/base/sxe_hw.c              | 1263 +++------
 drivers/net/sxe/base/sxe_hw.h              |  928 +++---
 drivers/net/sxe/base/sxe_logs.h            |  132 +-
 drivers/net/sxe/base/sxe_offload_common.c  |    9 +-
 drivers/net/sxe/base/sxe_queue_common.c    |   97 +-
 drivers/net/sxe/base/sxe_queue_common.h    |  177 +-
 drivers/net/sxe/base/sxe_rx_common.c       |   79 +-
 drivers/net/sxe/base/sxe_rx_common.h       |    3 +-
 drivers/net/sxe/base/sxe_tx_common.c       |    6 +-
 drivers/net/sxe/base/sxe_types.h           |    2 +-
 drivers/net/sxe/base/sxevf_hw.c            |  238 +-
 drivers/net/sxe/base/sxevf_hw.h            |  139 +-
 drivers/net/sxe/base/sxevf_regs.h          |  182 +-
 drivers/net/sxe/include/drv_msg.h          |    6 +-
 drivers/net/sxe/include/sxe/mgl/sxe_port.h |   37 +-
 drivers/net/sxe/include/sxe/sxe_cli.h      |  247 +-
 drivers/net/sxe/include/sxe/sxe_hdc.h      |   46 +-
 drivers/net/sxe/include/sxe/sxe_ioctl.h    |   10 +-
 drivers/net/sxe/include/sxe/sxe_msg.h      |  124 +-
 drivers/net/sxe/include/sxe/sxe_regs.h     |  880 +++---
 drivers/net/sxe/include/sxe_type.h         |  820 +++---
 drivers/net/sxe/include/sxe_version.h      |   31 +-
 drivers/net/sxe/meson.build                |   14 +
 drivers/net/sxe/pf/sxe.h                   |   30 +-
 drivers/net/sxe/pf/sxe_dcb.c               |  147 +-
 drivers/net/sxe/pf/sxe_dcb.h               |   48 +-
 drivers/net/sxe/pf/sxe_ethdev.c            |  117 +-
 drivers/net/sxe/pf/sxe_ethdev.h            |   11 +-
 drivers/net/sxe/pf/sxe_filter.c            |  125 +-
 drivers/net/sxe/pf/sxe_filter.h            |   40 +-
 drivers/net/sxe/pf/sxe_filter_ctrl.c       | 2951 ++++++++++++++++++++
 drivers/net/sxe/pf/sxe_filter_ctrl.h       |  153 +
 drivers/net/sxe/pf/sxe_flow_ctrl.c         |   13 +-
 drivers/net/sxe/pf/sxe_flow_ctrl.h         |    6 +-
 drivers/net/sxe/pf/sxe_fnav.c              |  507 ++++
 drivers/net/sxe/pf/sxe_fnav.h              |   80 +
 drivers/net/sxe/pf/sxe_irq.c               |  116 +-
 drivers/net/sxe/pf/sxe_irq.h               |   15 +-
 drivers/net/sxe/pf/sxe_macsec.c            |  260 ++
 drivers/net/sxe/pf/sxe_macsec.h            |   20 +
 drivers/net/sxe/pf/sxe_main.c              |   27 +-
 drivers/net/sxe/pf/sxe_offload.c           |   74 +-
 drivers/net/sxe/pf/sxe_offload.h           |    4 +-
 drivers/net/sxe/pf/sxe_phy.c               |  166 +-
 drivers/net/sxe/pf/sxe_phy.h               |   57 +-
 drivers/net/sxe/pf/sxe_pmd_hdc.c           |  168 +-
 drivers/net/sxe/pf/sxe_pmd_hdc.h           |   28 +-
 drivers/net/sxe/pf/sxe_ptp.c               |   14 +-
 drivers/net/sxe/pf/sxe_ptp.h               |    4 +-
 drivers/net/sxe/pf/sxe_queue.c             |  196 +-
 drivers/net/sxe/pf/sxe_queue.h             |   34 +-
 drivers/net/sxe/pf/sxe_rx.c                |  274 +-
 drivers/net/sxe/pf/sxe_rx.h                |   41 +-
 drivers/net/sxe/pf/sxe_stats.c             |   63 +-
 drivers/net/sxe/pf/sxe_stats.h             |    8 +-
 drivers/net/sxe/pf/sxe_tm.c                | 1115 ++++++++
 drivers/net/sxe/pf/sxe_tm.h                |   59 +
 drivers/net/sxe/pf/sxe_tx.c                |  208 +-
 drivers/net/sxe/pf/sxe_tx.h                |    2 +-
 drivers/net/sxe/pf/sxe_vec_common.h        |  328 +++
 drivers/net/sxe/pf/sxe_vec_neon.c          |  606 ++++
 drivers/net/sxe/pf/sxe_vec_sse.c           |  634 +++++
 drivers/net/sxe/pf/sxe_vf.c                |  247 +-
 drivers/net/sxe/pf/sxe_vf.h                |  114 +-
 drivers/net/sxe/rte_pmd_sxe_version.map    |    2 +-
 drivers/net/sxe/version.map                |    6 +-
 drivers/net/sxe/vf/sxevf.h                 |    8 +-
 drivers/net/sxe/vf/sxevf_ethdev.c          |  169 +-
 drivers/net/sxe/vf/sxevf_filter.c          |  119 +-
 drivers/net/sxe/vf/sxevf_filter.h          |   35 +-
 drivers/net/sxe/vf/sxevf_irq.c             |   90 +-
 drivers/net/sxe/vf/sxevf_irq.h             |    4 +-
 drivers/net/sxe/vf/sxevf_main.c            |   14 +-
 drivers/net/sxe/vf/sxevf_msg.c             |   78 +-
 drivers/net/sxe/vf/sxevf_msg.h             |   76 +-
 drivers/net/sxe/vf/sxevf_queue.c           |   20 +-
 drivers/net/sxe/vf/sxevf_queue.h           |    8 +-
 drivers/net/sxe/vf/sxevf_rx.c              |   28 +-
 drivers/net/sxe/vf/sxevf_stats.c           |   14 +-
 drivers/net/sxe/vf/sxevf_tx.c              |    1 -
 90 files changed, 10798 insertions(+), 4897 deletions(-)
 create mode 100644 drivers/net/sxe-dpdk-0.0.0.0-src.tar.gz
 create mode 100644 drivers/net/sxe.zip
 delete mode 100644 drivers/net/sxe/base/docker_version
 create mode 100644 drivers/net/sxe/pf/sxe_filter_ctrl.c
 create mode 100644 drivers/net/sxe/pf/sxe_filter_ctrl.h
 create mode 100644 drivers/net/sxe/pf/sxe_fnav.c
 create mode 100644 drivers/net/sxe/pf/sxe_fnav.h
 create mode 100644 drivers/net/sxe/pf/sxe_macsec.c
 create mode 100644 drivers/net/sxe/pf/sxe_macsec.h
 create mode 100644 drivers/net/sxe/pf/sxe_tm.c
 create mode 100644 drivers/net/sxe/pf/sxe_tm.h
 create mode 100644 drivers/net/sxe/pf/sxe_vec_common.h
 create mode 100644 drivers/net/sxe/pf/sxe_vec_neon.c
 create mode 100644 drivers/net/sxe/pf/sxe_vec_sse.c

diff --git a/MAINTAINERS b/MAINTAINERS
index 03adb4036f..e3d5c35093 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -982,7 +982,7 @@ F: doc/guides/nics/sfc_efx.rst
 F: doc/guides/nics/features/sfc.ini
 
 Linkdata sxe
-M: Jie Liu <liujie5@linkdatatechnology.com>
+M: Jie Li <lijie@linkdatatechnology.com>
 F: drivers/net/sxe/
 F: doc/guides/nics/sxe.rst
 F: doc/guides/nics/features/sxe*.ini
diff --git a/doc/guides/nics/features/sxe.ini b/doc/guides/nics/features/sxe.ini
index 5a18808ccf..b61ad7c699 100644
--- a/doc/guides/nics/features/sxe.ini
+++ b/doc/guides/nics/features/sxe.ini
@@ -35,6 +35,7 @@ VLAN offload         = P
 QinQ offload         = P
 L3 checksum offload  = P
 L4 checksum offload  = P
+MACsec offload       = P
 Inner L3 checksum    = P
 Inner L4 checksum    = P
 Packet type parsing  = Y
diff --git a/drivers/net/sxe-dpdk-0.0.0.0-src.tar.gz b/drivers/net/sxe-dpdk-0.0.0.0-src.tar.gz
new file mode 100644
index 0000000000000000000000000000000000000000..44a4c97fb4812e5dfaccd076888a8c016231f80e
GIT binary patch
literal 196888
zcmV(^K-Iq=iwFSVJ=tae1MEF(bK5qy{VM+oPCBzAdEG>$WLd4-%+Mrd^IETvl;hmq
z9gHGTwytGKk?6$jw*UR&;6Z`_NIGrm-8&o3ZY-QP4h{|;1ksnT_K)$aZ~oG!iJy~`
zWB6;H95=<k{Q1UgcRC%b)j4TnxM`l4oo|fezt$}JJnWO{&M?0D-*|nuT<;6%s__3$
zpO>Wl-%s~{VGw+N`&*q>^K1J*$0M{q{()wj`}@uM<?R>M0S(;gv<ut6-L|CmKQ=Mi
zXns*E3ZDP)_J4I~gk$&myZ&+(ulMnH{ybhM%f&L@{a~Dh?su*4oYnMUA0HTphO^oJ
zvRmHVCPv+992r*AvW|@Ya(y?SCQ~DdXSeIkYIE}oCVYtz$^0eedUf#aa<QJr3j?9>
z+C%Qxy+qDvFc=Nd1wsekV$^aS7hn$_I&A|2GucgNF<Q>iO}wsu+$`sf(}M%}o5#*p
z@ifD0m=hJbypI8acJXwD?)NFdxAA(uTpawXTj;NSr2hZmA%6JcS)jcCn`R5+^8LTl
z>X={q|8qRg(Ep_b@Mhvoyy6U?pfUS;wtASy#tYJ2f7!<Sm$xq@Fb>)6G(p?dG+AtR
z_k{!><K2D<u?_eISOrQiMqz~9z`ybWiEY07NK_IAnh;WhgUCbUflJID*w-kyMrV^C
zjEt6ba1gH_?n!=`C65V$!aomcK<Px+SeF+%s9F2TXdcN>6GKe}I-Cp)<0r#9I`|!@
zrhT$|m?g&U5$)sTVGE);D2Y*=+@g8>(Ky8aoF3F3%#K0(^>Y5m%JX<X1EZkWvW)#^
zc84?Ia*oTh&3d00lDOR?1}}_5tPy^k=Jegi_uI{8WgOmbL0%WP<Z^?7m^{8gkXz7n
zKHp*7dqReVyq_ksTQps*HZ$_?rx>diYL+}(%@S6#B&}IV1;E0GHI5%Tay%l4d;}^5
zL-DT~LaP{$FYg}~i+IOtPoUVQ+5Mcv6fUg=@Mg7KFE$A0qvT<a;lCSrM4F%K<nHz#
z=K|;YG1<o-u*YH%DhT@YKIdI99vXpIC%O0@Lvc&ue|0R>+aP%ld@zR2_7C?c-YMSM
zReEQ8m*iZ7LAUeNTe|8CKWkJI`N?Yx`c4TzB&28@8lVqce42xT$_)Cual1#m{hpM5
zH&z=o|Fqc=x9T)<O1;A6mIj^x0&j6m#B<9nQ+2z{RDt48F>#D2IM=Y?^9>>4vT-=w
zB)1~u<7&F5@*cL^cvln)$rECMbgp&$$odkLR?|H#fjFE^4Vo9{Mmm%0w^7ftqe<X}
z#^3&?;zNJnY7N18dR88oJQ?S@<7Tl~ZKiV`nh*~^TmQ(%5Fl0-^t~TWPI<XS%HOBu
zHYs;`IoacW8Q<)G{lV;T82A<?vn2sB*ybU@wlirdH-lO!^mnBN$RGhx$W33YrZ*5;
z+UjLLt0~a^bbqI7anhAR-Ny|cG?%N`D9CJwB&F__>zh+z8xAi+$ccZv;e`&R4aP$j
z3rw|^hy;pA2mtEhZ{ro7w>(8OizUGmxR)7tOqj!};D%=;9P_yDM9DnflAcCEq+28#
zgy%^qY_W^6!rNWEzlHE%yn6*ioQoOfe9+2CB<q>e94!&7(9Y-i@@}7&r4bU$z;zPK
z+s%%5>n^nJmK({+w}R0G@F^o8@xWqjPX}ELn89%R0E0qJ8|4H)o!!PiE7P2R$Rjmv
zxesWw-5ZDaA8HnaI1VeE1hMO24xi{ocsz*zy1`Sm!~0(ghch|Pu-Odc8U(*iwOwRa
z)<q1lgD7FO%ZHVAP?lm}PlI*l<M@wvMJ3U~V7XZCaE8Z;@3eq`i~tl5fdB~HzDqpK
zo~|BZ)=<kmj&JBRd*~j5jdhqn+kta|uDhLv@!M}I!VU%v8l|v0huLBl`5eL;pLJs1
zmkEX3Rkyp%EZ!4iviQH<ti;X-3x!pj;Hbhi+zSZWukd7Ydw0V?;xsnFOQRbYV^!3C
z`72KhyfdbC5G?_hl8GVB*%T)BO=Gs3u~{RO=DW=nWhJ>cK$DPBDjiHPK<KJ+lP;!r
z^p~!=Gq-?Oe=c1692%C=gO#3GE1p%tng)JohAf%_$cVL^8}&nov_@Sws^QuEhsJ42
z2sOa7x)>+Zef%U2aX^8DTgpTX#a$aAsOSub6aJa68mwiwh;-F<$bTIfWMPCm%Etxl
zJT2FWDlE|oOBYIqNr5m@Bgr1J(16Xbg0<C=Aug?s=nPg_5`Lh${O|+(kyIsrT$Qpw
zOZ6bGh<cmPRB%|)ffBogm~hCoACZ(xM$FkdX;iio4HVL8>$;Hwx7Y`vqD3tP3d;f}
z+qm2nu{=@bpXE;;IpLnTa97++B&zgFcd6}}JkcasoW!w@alV*OcfTOW&}0bFa<U&s
zY$ri)4Ko#b7oH1>HvUKK=ziWK?f=fV^Sf-1><j6rc>iNs#~o$=_qg?S|Mxkb=imP&
zlg=wI2>s$WKq5NZko^&IK^mN|>7E0bD>8cH^-qRSgw;gd{Qk&j(*HGJ{wtFIsHHY7
zMviBm+%{sB`8_gC)PaTN6BJv`&ruB2g^H~vC_a9wV)L^Un`Mg4&r)oCmSU?+u?31-
z*_Qj)%>CCq`TYNJ0SnXp^S=K-f#34`|IYE({Qn%!v*-V-9tynkP|f@zBE_U|{D0d;
zJ=^aaM)PsOo=Fj5;6=eZ<2WY>UG@6@@G?Y`ajn*TWYO2vl7wT=b2Fk&F%dGG=T9UQ
z=-Q<Mn;D@cSHX#bermpi3nM$4gf;L`Vbw5}qD>jy3nG8u1vQFp%jn>G;0@je_LvH2
zncvImJ>LnaX{KTt#)TXwSKjjnDNfdkaSq2h3eFxdoT<u;g8E~lX4C=SwVKA?jd~jg
zF$Ut%CQs~N*#fayO^gP9=upjVxu$`8X7@oF2*Zvb!kUlmW`R7QzYyr#mGpgYDA0E+
z>4|j1p!8<7JyDLnQvS8;L{MRavshIuVGu|xW>u~G!AREPRIrT0AbL0UXgs%+h>XN`
zvZ~uD65GkDZl_3WC#$-hBC(yU>UN66cCxD5DU#aBs&1#qeM^0mvb3JWa*<=HYA4sR
zQE&};O^E8JFhw~g?BZDk)A=}{$pi<SoJ`Kg5ye>=+$eBRKRlzXmK;e0JxI}VDmouU
zDYvELj#6$*<sM`<w-j9PhTvN2P!NP+aLHWLGHGx_s~j3_wFi5V&C_4Yl4+$OmZoIn
z2XD~84lir9TK%LU(U25r_xmFUUD<u#1tYpmMHJ*{<P0P9*7eD_M9AnPizKx0f1;EH
z7d*XQw~#ix@Ou&T5UWJoHLih1hw=KgR$N0KkZJxy-q1eldt_*|n!3R`dLkXfGz!9q
z4b*K$B+qn2X|)1r8Qs;ut3k)m3uBoQ1#iZE=Cm59x4xGtY34?5FdAdsHA>ceG@DxG
zoZ1T=BHy(m8;yqE*`!x1@<7IR7W)6~jW46rqPnSsg-Ckg2JnZItBo$prSZ$>L$DI*
zm5Y}-T0O~^&?oT}^=x#_iL9QSOa^Co9#x~Gl2yo$XRvQ-sbNjCfXa3*1uCn6>Z&&p
zsFW<isKz-ec_Nlkg;6N@%#`CdBgDoDgi7K|YZ^v=I^k8OQvy{sA@3GaIq1xfGNorq
ztkYp1Ewho6B|Hqm84o&1Lk=7|^yhfm)jTl#=Xko+JXA|foC1_HL^+0P4RjJf%n~O7
z<x1YBE)!<~l{^<Y9!KTT8i&HSy?%I+wT@;qkaB5`QW>x4s83=-9`_kO;&^%Nr+7a4
zGZB`YuY?%@CuwU)B0-8g56DOc0*NYRAl3!X2b5vW$wo*_b5gl;rhfmCJ0<%?_87>*
zxN!f+>R8J6Kb;fv>-Qhe@;v|kzcU()?Wp7pfQCSL2e41(@OlsS|D}`KxmQ;&vP06@
z3&c};Dw{*%kGQVnzy*8f4q_N7noU=$_tV)OVrNDQUyL5q3WOTu=aE5QNt1&k)7|vG
zK@UrUL-KDGm=nC{0?(MP&}bZ@3p=Feq5`jPN8W+pDd9!#G)Z>L_YW9=>UDsLsTIW0
zeWl@KfF{Gs;ppwqnE1n}Wx?#1+aeyh>12sV1Pi<IaI!ATJzj+DNi^#D{XRRte+^qv
zVM;M}e6lz9)<JIrpHzg4VzHr6a8{`2r`<A%vALgSVzGb(&Us89<9QCuV4~XeB$W7y
zML2|%Zym0z1`YwA;sqA+$5(LrttOVWgu~IW7ue^707cHYR)nbLp11|v7*S+oM>4Wg
zrKVG4WalYHc8ZMblp2Y=YgkW6pki`t31Klb&|)rZfrg7h57cJ9>f1v^7B8&pz{sja
zl(724p%*Zf610SCG>J4^1X{vH7PN|u(9zY(b<ub<>W4HoSV18&JB*46<z<Bsce5NJ
zFDrzdcAk)zGYbt)3Y{XQBL#(|E0U0cM$X_3oKVA|=z(+ckOE>H8>u5xNCB<uoJq78
zQcO!^%j6~EuLkZLyl(Els~H+#8532c0?T-yG73*|eK;D#>ez@nZHq)oi$ujO5-DDe
z3cVcZY7&*#B+|KA1ZwKgb9e;+!b&<S?2fTL3B8n%19ZfZOdORE_a^;*rXNa$(GSV4
z*SF7WQcM$INw6wB99R|*lt}~Bn1|7$Uavp0U7E4NDDcP7&@c_sBFjXO!O*!3Cj*U$
z3|S@uMguCLCh@13GIkh(^z137ioG9<Ak;PN7*odl#vi^ZbYO-o6G288!Besn8DA!A
zU^}7baDya4BC;|l@Z=JPLw!(CWTg!6I`W2YfeeZ)lMy<0M2BQq0K=5Fsz31Xq<An^
zgMwp9b$iYP&x_yH6fbg28TX~<;lLwX2NE~Ol-D~N7xt(OSshbp?@^>k_%czDvGBU+
zLahtOXi`ihHMTzty&z%@sYjN{P;-nHStdftF*-~cyOd*em@;;?Nwvr-3!*ShQ!1va
z_IhT9DPz_qs1kgcERqt7qCr5GM$|Q#XAk?NN^(Sr$TBJCftSiqED@09ap{jUAxd0k
z$WlYRp?8(4Q^O;VFO$R)JaexkCWkDQ5~2Rcx$Fg=r#ePJmdcp)BOj;A^H3A-8M0Kw
z*`$Yhb{HX0rje4vmv+8<F4tfxyG~J@2*~m{5k#nQ@)Qx;j9!c{_s0;^53_+#b&Eux
z$?$NviEVVjUewUzb5<2m#@q9+J(n(sWoQ|jd7iFJa?nzq=6OxE5ku9=WgB?nf+-Iz
zVFK%FGN_EcnN=qTE#Z;2M-)U+lU>;T9&#t+K5WY9pn5h~>8NZ1RL|yuiTS2-g}s|R
zF`N@*`!B4lTfE>&2OOY4<lVDrb-8c_p|S$@JQz*JgoZ8SB&e#9ET!O^J7MY@@*N)?
zPLTi$IMOsY^`dd$k8sZMz0fGZ>8;?zWYagS83l+!0~jBRXNgJ6HO8yla55f`V7%4<
zH7%4a8+k$zx5sh_Juh`*#$15VGt*^-hB=Q=n6tH-WVMPYtulKXsDYMNma^%frPY+!
z1ZY_W1sf%(UYYOmZB7ZKX?+N&z;itR3U)yqWV>#_JgGzJ<D7H|hb%6T9R&6}qZn1t
zVQt6p#u3hjQM#L=z)LxahJjNnfLHTQ3V0{wyk`LpF$d3URHIW=sQ@y)(hEpYd8d&U
zz36*nG->B?becuBlrR-Nfdq$n7@&YQTQp|m>2gue(sTVFCmsL=w0y-u=Q`-jc595u
z6M-cap)o&(xrbPt6kNcXN@pjN7;6q(#hQ)WGPm?*qeuW$SHoRg;gsZjG;%do1GtKn
zO@w4tWhRud_S;%t<-k>}imzl$m4LjjL~s>n-d8zr6>HvCIdGAiJ<l_byInMn#$wwF
z;402&5Jo}+iQtkRhZ8xSp9!4~!^${%{cE|K0<3@~8(?ysEe9_0Cv3`!#>soJ87Zu6
z+31Wp7`XtfW+UIac&>(*VM^+kOIrZYrqD=dW>je=rl3prTE;>F1=M+q7@&X_wvO;Z
zJo9n}E8x(@n+z^s#T{Wd;=5>>J^@@}C1b0JR+jMyJ8qe$^&4#&ldwscD$*~hbG2dz
zvvXA{%5G7XJjFKh%a!nx4+8yFyO_2k({>7I^J98cJf=tTpqn~153b-PwU8JKE0Pj)
zG)$eMK9=ow(X+#xQ2?t`#`MiRUw}$)=TpM;?L0r?A-|qaDbv^UoKk*2pHimp=Q(Be
zf?hZ*)c4OA1MJqg-q}Q%zorPBhl+90wfvdy=O54E!uctz#eBI#+xP5d9yp9;5S#+n
zbHYuKa8DNJfU@TGFN`$fGy0)Jrwxal0VRXW`An;Oso?`~Iiq#l(J=z}&mVIl$Ex8>
zmOqRFJWC$BeXqWqE#cT(!}y<rTK#YJwEW%c<vQ7++v$3~ilt+{<S1KjXkQ^`H0;$y
zcFGE82l2mf;to6cW$JF}E0uE(_EqhHM>a(=IrxVnWA}K!na3~hr;l(A3c05$UN;Wd
zSL}Iz@DJRdTJ3^L|BI@8pF9|UC&UmQqbo1C@NBoiPv|{Ut%Pf5rt|4Gfvd?HN?o_R
z<;Q6fW7uLnrB!d_BHVWEF(lIq8?VPxO<&L@e=@g_-6nQ0lkJDVw>l}Fy^Z{@$>3~6
ztyEYCeh`cT6ioUm4h~kpQBRl5%5&`}Y95S+el!Zi>dg63()8AZyhVJdaV|W%SFd@i
z%5hJUTegQ-vB&uJMu&f|M>_w1`{@hc{Qytt`G2!zHjm}|-`lOG_4WM!b3D&~{{P~w
z=FC3>=5B{X=+X;@ULT>{-H<@e5Rn%~4dL5>)pGst_=?>S|5`!Z#k+M(zZ{_H_22Ab
z`OIEC2xqlO6SBxouW5Pl2R$av3j%)v7nZ(QtsY*;m}`c4R4^{X%=4IFa8_^BjNSeF
zdc(l6g}t&|{6cQ>UaoJ9_c8WiY|L)s*`0BSpI6$c3Ns1+edx5cHsQaID#~T3xrE|Y
z;psfFEcK}{bGiL=Di#;Z6<peUKb_^lGj>;aPCkEmfcoCh$X@|MKZhV+LtwHo`Nje6
z@^48Od-U+Y@Ad3SKSCG&`2~7w)AudGWeq-xE~4#^-ioB;!@5i`HICWlT}OfK(63&}
zCv&}Cuc0SpU(g!$;{pPx1$$|O1)UVhIrAfd%<M@^gXkJNWjyNl>FlFcYw6K=C6=F|
z!#MyHjnFWxulGmBKAtu39JY>wrGWvB27GBq3JeAW@Y&%IwzkIDS@<ukQ_}xN18(8v
zEP*NNhOQR>c~FBn3)vzy=zpbwl*xNpUVxXv<nqr{&i#xB4wu5*!RCu_7Qbm^|J8>)
z#?(SBEwn{1T^HjYR_J8PHL$M@sRyuK0mIBkC))xrTKO-BO%(XUbK)t=mBU&RHqYCV
zu=zG^OW3xwSj3Y?6Ge`s5x{Wx-)JjfIPh<D^HRERQUxs9albgpf6(04(&WF@wzM=}
zUfETT9**eLYNmYq55d5F*m7@=*j(QmjhSS2SB!ie04EH>%z|dOgb*?fa$VuOYw6o@
zPUs9=a*cx4oZocH8(KN(hyDDTl_JYroPfxetCr@nfRxJbIbq@504dcK+<9Oa!i@%6
z)986y;)re`6&y;(D(m`6{RZnx{Kz(H5l}@PocmSwpo?6riCll4-UpzXiK%d7;o5k_
zF<pI?$$>hOl5le4&EGC0guQWoQB<*S#`Zfrr15(+q)V3g?x#O?LPlbqC~-a>jd2?H
zufqW~N{L2F5Tyi`VlSm28jNlMmMW;LN`L%OCr51w(Tuuk+UJS!$IuA;ijubcSvtsu
zM$ff>aR4ydXaZqCwx6@kT}9hYY1`$r{dNSe7K|r~2J*<s$h+0#@V0}In@^A)gjZa;
zSs|Ucn|507|JeJx<+hS#VGujF&{M3|4rv2wNDYt_#ocUa2Z$dLpFk1~EK=&Pk7fmk
z9}+DPfEE@g>TGF8OoV;m@Ps{f*uEHFjPD$_BkWtp#Q1II`1sDRFqK*VwW?|@kdk(v
z^ELKv5va<_s>;gB%F4>h>UV|g+sY&l6l)&Wd%tu;@|P8ZN#hu<&W@Z8WW-EnWKc)_
zzKB_3tO*jPR##E|#STSX0C;ug>aByZLmwI(&pmvsVtyJyyF6*S!1?)xY^<vh;ps8#
zFcf*QWvuS>I%4f5^>#DYesITl)%=CI!W9a|9X;#=4KvSxn#W<U4W46WOc)9UE_4Ef
zz-;ZA6R>^MJ|>d<jOdU|vY4aEAdc(Z28S?$Gck~UGc1z5*>Y44rq>ZDq~y<v=%Hl9
z(6~x-OG1XEFqi0RLI9zB3Q#r@0E-N}(xdJ{4bHB~KHw!G&CjQ`4vmmhvO~OXk{w3a
zY!Jd2d%;&PBRIEWN+dl(Eb8%<V5=U!0rweD<hZ)2x;Nrbr=0);XK9@ch}eA<VlxCM
zJgsgS<9Axe@%=?KAV@U<YeEG+Iyz~Pofz)Bb`7pOI(S3oD>lXCY{<eml3+J)_ReY@
zQp;0gqDY|n$>}kO@vW5jAc|nBy{WJ{px^5t+Xk9b#?-z}ALW2yPG1VdPUSDV=B}}}
z!M3rMxo@oH+&I><caDr+WcfMhaKG7rPH`QCxdl2K#Q`$f#KAH*vr!l0MB&UqvAVTq
zk-`|zih+Unt!V(qCopMDFjAdi(16)Pqu-G2e;e!oCtctv0YwN@iw@vw1gFuSlE-pL
zm*^em;`UZrio#$VQlkc0$F`oO0FImC>yzFq3G}pDHGrgDq);$U7x9GW;I#g#$@~zB
zT!&b8jas9+)7(nK_K7mzX$n}N%Cnuu_O3x22F72I<j|m%7hUxR51rK*!>s1gHI%Rn
zS#WJYM2n$!?<v@}2L=Kr5Rj?|1R^pJkSz}gXjoY_VYmZq64^cZs-w#}gEYhy18sr%
zN%oP*1}m}7j^rL1>D97ANhPLo(<D;HUSso3bnr{q?29S^GR-XQV=^n?D2-c?KoC~a
za2UxB7bxc11&l7hfXj*aYqi>B!`!JsL;yLB8qJoOS=C#jT<hS0SICrifmADae}Sv7
zTW&ZGNOx~exR(bC+?J<a4v3+0i4Xzf6@kUaQ^2e>o6g@wt0g`!sTjAxn4Cs>bkOCI
zjG2`(pz40g&g%oOq+VOKl_2^NNH5R<r4cS(t)l6Y8|1kWj)wYxs6x?B@I2Tlr;>L`
z@@`o-=;2W{coT3ms)zu!B9M=@(yVT?-Ug0<%z}8ZRazPc@QjjX9!yOp*tP}m+AA54
zvIB+)3;+U{stbll3=Xs9f&r0nwu3O+E*KCQtUWuv_Uuxar@r<um|bssb`j>8LOMpO
zMw{#&fbF6#nQ!2~Sy0~4mASaQHJK-=93cdtws=_d5wzKZ3*9(S5(aX+c%}PTLLsVc
z6%5VRX;eEZOrzb$Wf}se>ZqQoU~DnhpbE&3X;dfSA?(Q%P=f85Mg*o{g9AnT<!P_U
zYJDE;Y29>!{Y3?%jz#@f2eefPP}PVI>c>G1bPc%fA-hT7s#dsOvwo(-v5BH@1sdF*
zS+E(8M1J$6Td>%UM1HGtMDus-`Da~*f6vUXHOS_RsKDGzOGPfMITbf3+buc*Zi-sT
zl&SaAYpclb?0_j0?4|a6u)DDnA8UTM-C_nqdw#uFKP%ASt#$xm-3xqBXH9Co+;Rbc
zXrS$M05pC%COfQv88;ooAfi7kcmTpaM3`XKaTkaZt=10GZ`Qj7ZliXz<Ulb!+d(iY
zD>K!mAPB=p^(GDJY|ZS@%rq}UW|wBB7!@;5`c2+LYyK(Ex3CoC69G+a%odh{{C1a>
zPqP$oKY%C^a+w**2`cPb>DF)%#(~XVL5YOyfNj;AC#@z1*0q|*bx;)5I7OfNL19s6
zH<8%O23gRcQo7B#CUhOtQwqJ~g+4quDNqH?2UVxRplxSK3Y0r!v<k&y=i82()G_Ke
z3-$=mDipP~wvW%EZto;KA-#4UWm!v+A$S=c=B}uA3huIaPl8T}jEYXN6V{U{m25j&
z6m4e<ngurxD%Ta+xu3b!0ibTL&5aFYGh{;p5d2(bJC$2+Ddkhvb`HVIr$h~O1zub@
z4#kd)uzu6kYX-{0tq0~Y5Ch|DIXxUxR(B^bEbM^LT<x6&V5LtLAc@WbXI-;7XKw78
z(HtM3ae{{IMU#qku@PXn-Y{!XC1p5dklj}y>c)7DOFh}O;^olv@W&ab+X4-75gpOG
z)6lyr?(}+LLaVs#s|SY;>+*RXi_)QjGOGz|uoo0fBeKO7T|}dFt1l2+G!7LM<GYB1
zlq_g!RbgjxPS%iw!vm_mIA*iW<aB}Bq5BBTD8SB(3(Cot2f7*l+>%7JX*VXclydla
zmR*1<&pVoL*41FhUI+dgeO`MQ565xLo-_`~*YhF!9?!=7>oOkh?#S=et!MJb*3+l*
z$J1(60jq3nE5Dv@ZLwb$<R|-jF27#MuNTWid|k-Yg-9j+;om0W+d_Q%jekoPG5@xd
z-xA)cMCn$db}Lc4m8jiHv~DG8x2|NX&*j(mq+7pMci)PiiT3kc&pp?3m+?sbJvv{<
zH_321|8>tuTC5Wz?P3&<$HTdqJc<{SrJ0<hktQZmQuJauRDPT*KSpZKNXbc-L-~|D
z{v0bmXUp?<N=f*sVl}cyT0SAoNDQ;uwnS}Pa<lqCm|FQYO@^1^XFOhx2Z<~=UJVhN
zCV<ImFdFU4U-8xTzLqC(vur)#Y9MRF-_sSJYL_42O3i!`jjje!JYFr<WG1de=Z4FP
zr2TR-R7T=zIm92F#goAd-bG%5bMd~Ks7xc4fHUax{=M71Z?9g}kaoT4HN2HQ8$)>S
zWHDUKgn^HOb1fB}9HfcK%q1(3BL@~7M;5H(W%GG+*&^mah;LqlyaOX_JdCExefndx
zO4yI;F6O`!@$4t$v7f%a@-O&ud;o5b@ih5Jf6=<|3Hl$wcthkptx-$j^`_ubiC7m)
zNj>T=+csZ}#<I_(E7SRCe8akhFG}mr#@p$oe8;-D5qUiaQ?Rh;#_O4^j3Fi8NcRiD
z^%dy@k_LZHW{VFjMtq?VMdxGEoa@<O9!*C)MU2U>J8zXVN=PM5A1<Dx3>i<F%*Hp-
z`Nsrj7Ee5XmqeF?6->;}!5q6zzOuR|1A=8Rgh2JbR;zC*on@z))ftLMqYsOCl<K$M
z?TlW34!{+DkR)&*z?*<9so<17);o*kS%+=?<rIZx(te27(uwq0ELYm|;50dSbol||
z>Z`03k1w=2MC4w~W)N8;<_Y@_uD>Jb=5R4zk&=7Cqxs-!oZ{3Ka2u(991jvgR#Zjc
zG5MhQh{(ZlQnI$FfNd{6T27*vj1WFmp9xIF>uSDOEThqMH8`J*9nfoD`$Yu)^%kfy
zp5PDw1v!9YRD2&i{c=5+=>=EOXnnQxHy*(zv-RrI0Vjw?;K+zOXkk#Fj64niP@N5w
zXJ{S~E=i%7#_x^cV+34+jF?{_01z@7&c=f{V?32INKI!ULCW=5DG%i?#&gn|88WJ!
zCycVJy=K{qEPjf4O9yKriQP<94&Y^tV7iLPihrXZAgznW(<EMvXSsVQDxJ=WL?*fJ
zK7z0Zz|l<^TxS6rFQg!(4{O{SquD-*4yl5}mv0!%4qgpxQ$i9mp^bnKGz{euX_(qd
zYr9-{iFjU2CZwq@6ryG0xpSmZQHMp@V5}kNW7#YvTS>&{#l<*dx`-CJp=p!G)8R^G
zyybW}olJ*V*2lQE0n!TlNMR{En9VZk)GAuyb>`{CI?hqWN>bY)|7d|n(_zfGXmvS=
z$D_1Zq_cspaVfEA2t2D$gq^i66tTYIBRujaQnh*!;cYDk1ypF*p_5uyq?mujlI{fA
zjHbZ)CMQCu@|lQn=a4IdFd6#67`h=*+k7LeHkd5j_-+v0w7DgNF5Sqk?j~1LAgaM6
zCK^hXuu=#v%Vg4<5Cr7moCvAeVmO!$2g_`Qg%nDL(dG2wGWsx}*XH7P?)L2k1e%J#
zl$JqJf%f?YFyVVks4hC%FZW18>g9pOhU+*6@dha#irEXtW3pLU5zRQOxNN3T#MRqW
zePd+?l4++59TQtNozOnktbns_g=5WAdb~dqHBL^!HGyoxn<2bidD`yL2h^)6dE*pV
zx-cqGfd#vffb0+k7c1UdP)BCx<2nD03pi22lL2fhQaen3k<+66k(KEt=Qwdd->hSG
z4U<^RJ|c=p6C-~$Y$4Ycajlqls$VT;u?+($Jc}2;m5*yE3`8rs9xXv;F!8@yUuCR?
zOrWj_Pmzrl{NvtW5KW7#W~ddBJr@+Bx!7^m69ehUqkrms=OuEe@>j`#Zj2M}3?Pj}
zi-cWYFp#X5qk(IOwWnx+d%^iMF|;?B-2>KqPh%c&AlUbcW{CTzzNN$h?j2i|E#{L|
zt};SVOfJVU7!r6XPnVRdv#LNBxULhNT3Nc*UO)t8bUj%qFYSS#dVSK7o=yVk2f}Tc
za}0n$y;-ZusVE6dHusZ3B5<#!+*YM@H)B{<C}23R<HYglxqD*f!FOezIYp*bOxq=@
zRlQ`OXgRV;Uu**da@#QErpc6#tf<GHp>4Qy#Yo3NCL-Bz-K7-_!{&|Q6Q80P#PQ(c
zExTl5*|0ZS*&L#iPw5OzH?_R1=gEL<14j^>YZb-AYaa^i;)J0B6X!Bf6?nGfW^ccy
zBe}karH3jIlt1TO&6!<KqVtc-!D{848&JYGIZTfC%?`s3ZOp&`Lr_>Uu?g@`3h(HA
zZOk?)zabjh`FawqrvEa|CrFp)!iI85*ePBtt|<SIB4m-f_^9g*qJj*jAZo{AZs=F*
zSu%~#R*>1UWVpg+aa=p^`BpieF0L&a&1x0B8-E1p8<EYud(+JTjiO}tJtZsC`OJ@C
z58ioYkb2*6AB7}=Zu>P&QXlFPKuTOXz3LxzbN80_sUuDlFb-rdzslHo11Mn=$|Hm;
zaC#wsEGH4QQ>nXSqLAj<9Gl!hW+WkEhLiw-#w=sP>Nkm))+W$SCSvUb`N19hUE4&a
z*%L6(J>)MeqzWQP6DB7-U;^`+i*`%^InSjYaoe05y+g&$#5|oZ*NFgwiM*JA5{mqb
zCvE<E9a=a=7z9}r2TGNxij;Gz;g!ogB-*Z=Q_+FJf~cD-FxVw(UVOwctBaUYF*=!$
z$?h>3SV|#M=Ny4ka{`d+Dw$q0aWdy4tmv7l3onzU&#<9DP2%x;!YjY6J*GyIjc`WM
z)yGvby>d<s)H)#Kk`A4@l?LGCCIVnW@T;YJR}2C<0hM_uf6P}VDuKtV$8!NN+Y-_t
zSq&35rIXdz#2c$k^l3v14dU!>DMlL{#Fbu%aPKt3ZliZP*G@0ld|?)4N5gZl$$_BH
zSe8Mlgv(d1nm5onjBTg7d=dffKM*x)baEHj1D)%u^kh9BW-Zfd&rnzKo=eExJ~G{W
zHN61ul-Xi_5x^KO)`{MPFNIQ&gESKq!pV?tK%H!9Q6rtUI$g&sNc~pBa5Zo$qL7EX
z`D`hcmnwM3WSYP*)u%Wp_86C9s^Ey`3vkz%%}7$7;Ze64F3jM-m175kU{~WSh-OTO
z9-Q7F5opN_XnU4z9Z(y<lFNAPTd5pnVSh&CWcJL}*ph7*U|&ooA~c!qzTRE%gtV!0
zqgoIhoNPef^yyCt3e4r1`*u1JO7fOu9(w?MyjW&%m&RE(OKY;c%rY1<&c+|Z<=T)!
zd5(>e&9_$pSLacYmAfr<hed&46q1|wdUfA~wUbR*o7wZuSqaIji&etYD5J*X$(YP{
znr7+RU1UmfYnkH2$Co@tiM(5TiVv1}$(qW-al47cy9cX>ucR1{M<6P=iZ__fEF&z2
z%;#yNHb(9p!4{JYE-=cYf3|X~n7(C9O<K|>iPUqVLv2przqnX&LzV+ghWn6F?2aOX
zl+K|Aif*%lOcf*&>-<ARUr_LJnb;5#0z4wKX*!$fX+&(uguUR=`3GpSmLwu_T2hB<
z8i*k7$0}&Wl~=|>n>E|o?sUU#E!UfFYtA4U{G*y<OCD;8K3Xm(neu!OJijAc@XZRc
zhl*YF$m?yv7;?RYl2=Y&1-7R73VOuUBk;sBih`@lY<e|C<Fi*F!9*loHV(3IUCr$)
zq6r~!aE&<f31(U**{F=Nwm_l0Yb!klF^s~uLZDjz_#F10>LM;M`7ybNv?u!ytlvDf
z!SmBO(TG;$=fu}Kj*)6V<}n~5LplB>u+ie^pqzD<XPzR%qGfR)pn4eSseXK{UH61p
za5=$lSt+TxBf*3q%G)CT7=0kESbW&XkCKT*tO2LhWH3w?eqT&wbwhuGWSnM~+*?N8
zErkk7^S~1@8x?O8o#>_>1S*ZkzO2qVa}Q3`U-uqd+g=x}XH2WH>G#4tke84}_ybfT
zLT`J=m@__smRdOZK4a3IeATHXu&$QVY_tSOL1Xh_5%}=z#)%IVM*>pD1qNWnMgpoC
z%o4U;T#Ux<YLeSk6DV~!FvEYi<`dk0XJPPnTrX1y#`BBm9PQI_hIKQeeD3PGUl|r(
zOeV9%U<Ag>TwWin*8@Mag;kxrZt$_4*NHH-Gdt5MvQM-rplf|plH`?CQbw5D=xRw;
z^l^fxVjdGP1z*oe$JU?;ZBPuwE?1NsH!}EXCfu?h+rkN3TA5AM*yWwCBfO%;aON8=
zAhvtB^r5iJrmOQChir111i8=4)e3fAbGMHfK+yR}rzpQr#*bkrig<W^8w_H0z|o4!
zJ=$1KSP9VfHY)-5hVqq=i!vcBSW|N7N$G)w!t$~;h&Dp!h!A7Nm<H^D^e6j0e$TT=
z?*!B;v5&ah{ZnTL=Kg7I8UD*F8Yl5b_MP7hVLxGrXXCVk#Bw+t+d1z#$>DIx6c`&)
zXXnf+<%AG!y>Q0~T<G~t)Wg?NKRn@=NXajZj%Nm_8|vZd>gtmfECoX9)=v9PnG&W_
zApW2H#Uk;gFR2c-D|OQ2``1*Y#J6k=ge$iL1QboYa{@;foa<NgJ&x&kERwi~vUFNz
zrI9^v7T^t>qKfLtZOhj^uhR}X!-&+g5EQ0IrPtBw<JG?WHCXXqv!#+neqc`OD%;4^
z$f!Ib>h^3wq=WA;lyEx0s9HWN6_3eWU->Tek+RWno~Ud4Dw=$V7S~R%Ou0@GIFMY-
zqHh<QYO8Q9LkW#{9&<r6<X*9EZXQ@s)UEKwsI#~{jKO=-2y1ib<(C3eh@ESNj!vUl
z)`^?g1I5>V_fShC=?x-!5i7F-3F<dAB8=+h0Q})U{KlpXF9-Aac*gDkWjoWe$N@gr
zS<AFhWe;b3D(zG9R-a{d{bPCg(K-|TS7cXsIFN9AH2~w*2pNnXW?WB)WA2p#@{QC3
zj3jjr@15PvD|nQyVJhBn^9mHDo=4Ihhh4iG`pjZRVQ~SAchJYjSkSV9UVhYWZKoGe
zZNTh<O6vI=UK)v_>4GRhr~^mvz+pshfHNk1#GWeUObLcc43nXqrz1Zr(_yflCH!^N
zqAaJF<;2L+)?Y7Xq_wkgK!KfivBmT*+ly$5khl#U;2&K}5_baOW~iY+@!K~}r&1+|
zf}Ss&0U*qZk;QEk0r{q9i7g$x(Pd<b{R4b68pBHk5GN<417aTeq8F0-7ktz6gzyM1
zM@Bm|rXxRC=mbzK7{wr$ZOU-SOGZcnl_~6-7OK+#sp!mIt(Oo0VI{6#O@{V@YBm1I
z2dJo1K<?^F+8dpJ1SLQFtW30!ln0Nb6*oAKa#cs<%nZHt?9ed6(;?B$XK-mu)I}=d
z<g=8!N2W2+TGsJAfNQegUSHG{;o#xrhsQ5)hJq|Ht8opHgNP_z*}T*zc&QvHGaZ@*
z^}-LLW;3Y)DQYDohkgP5&(|^-gUohu(c%;<C4k~rO@u&6iIS3NJr`3D$`}6b%VaE1
zlJPHXzH*5MLh9&z33p@Z$tpdwahjLjjmJxXHW{1RF;=jZ25n#y!wVQI3~Vsfb#*FB
zo4U7>%sdAt*Ri!wNf8UcQo~5@TimR#aiLbk)iP~Eq@i%!&CQ&l_UpOUCYR-%S}PLc
zVs0)R8qN4_EjPtP0S~=6j47GqrOWK9l&h95hS9Hg>O7o|HIT`6&hl)lyAc9QK#|Py
z3aKnHMY2hzg{pAyfjv@%PI)DehR-%F<7jxoXxK5U5kuU2Lp5wjpqi819oJ)rY&2p<
zh>08=Y767y?2QQJvYMJxS&Q?LU9Th7%_i(6n$8@a+;YP=imb4U(275iO|&aFlbuEq
z?P9X?TCxr6XgqUa6;@+~AZz`;u$vx=j;7LNow~H_Vg&Pa-O7vUJJ6n4V!a5hqcg4{
zXY&}IpQkTCI$L3QYPt9>jTV%hOxG`8>nIcE^P4hTf|s#!%{d9asaV01;~RL8JaG~+
zqqylTmv<P_6i`toH{eyfS<e&-n#G-TRLUS7phUj)j6-iqb0fN2X0y<z08ZvE;>CLD
z-CP==PWA;G46^{K;z`7}$=I0EZJ#BANIo4+m^uB+5xN55!Xqx*K1yVg1#>zyid!(H
zto!VSo{7^Qs>Wk7Hp0w62|L<!*qS?IwFGGK;N%lf#D7UC#OBQymj~Z@ipSZ7vmE+@
z^-fl%qB7fJXB??ZSsv_p*B4%jbYX~I5c!IvuYhgZeXn+>PCw^eFo!hR-ElTFpH^qN
zgy&iGw_P{!%$6v(0CT9caHnkGS!RqYX=YjFMz`AqLR{!h6{lt^kF*y+w$iCP;f}ea
zT?scWXA((<xlRk%kkJ+=8teYawW^tyw6cd2Y%^MXG8bJ1kfly%;GtZ5crZipa@=bv
zLy0kNXPFq4iyD3%-OH@b3m<pKso$}2>uTG^J)0nX$wq8^T&!lf4VKY?d)%*lJM%&q
z2emPxyl3Ux`}q~n^gX!EHeKwNnsnISP++Oo%VLKu!cr(5aw%6a8T%{WX2nuGYn<hd
z&~+(KMQV7v->OymJ>YCQm^YC;yJX*GBrDFl$^`c%usg{F40k{o9SY<S494)qBRz%!
zO-mx%rVOm@nxBK?07x&v{amu$TvDce)!8MFuZyN*;W41*^ped%tcTnSB6DxY;1zF~
zT$&IRGreXuDy4YOY_xY(P#9h`e*MAeL1Plb>PeFuw*_I!*CcaH>=C}R$!q%lI~h*7
z<71oQFgC8^J<ocRx<0UR@x0rk^flTd4o9|4Q8=aOS87vtYDHq|Uai>>Jx;}DORO{4
zgoAQ2)`SC#d7IWJMBO)Pt)qHdwT&fbT#!!b<w}6zT<U`suqvPGFM}@UvNkC}qt}l8
zxH5<JpwYR^Qt0%PZ|5TX{_lAD9N)aBo-J3dvsuSMh0Y;g0Erd>HHX1D$*)}UC`lJ9
z*NMH;D19rFwjKlARg`<6rJNHyb1eH9A}9IGCp)rB*l&@#B)qb~D7w_rW8O7R;mxYA
zuSV~qtM~aNB$>A<P8Uc<W9s;V2W0tn3tAE8XXMKEwST>gw!rS1uhvHdk_@U78xDRx
zRM}M7fEv7;)X2kL*%A5hkQUSwu{*IT9=h?5Azv)X1YgYMVnu^^42H?{dQ2_8M9&yn
z`;%-`gBut2^M)|Hk7qT@6r&BHwq=3R7J9do6gGD7t2t+vpVECy-8=WK^vH7SdBOvR
z!hlm!Iz}jc)!&K!9}_RZHt3F+37(dJ$&d-w7TLole2W%s95o@;el2o>Um&=`Ct$9x
zo*Yu$TIAcy@LNk<w&S{qY^QtQPV{0;!lJbSy+t4<YNyF;r<5=};*ui>6K$$RR^s*i
z0lg6@@8@r%fqCxe`%XBwT{rc$odbGw=TXu{{P&5=LZ$?MH#c-J(kw!EmV1jM8il$3
zd|8Z+m>_Pnep@N-$8aL})Jp2_WN4L^c=G<v*@MdKYd<*WE0-%bas1HA<~!T8$M^6b
z#@pj^NW2gpl3Pd?u46+BpV-2#dFhF<*F;?FpSAHeqFdW-zga}Sir){-{rEL?VZpOA
z2e-(3#%4i&3wqOn;xcq*L4`fp_2Xy~FJQ7e&wb(gHnloIggSZ?*to{6VC9#b;UfIq
zkeaehT*S`oq{{QgqcJXB**V{g*JJ!A`^pnCE`+IAWzHEqu{fP5qB57OqvrOtd2t&h
z%rH3K(a0MGM|a^iBlqN767=vrM&GGUGGC43#CKvssjf}0gq_M{p2*;P2dmj4i6&Fe
z;Q<oC@<2k$%h7z)!@JM3&YEc)FJiwdo?`x*t<ITTTcu379Mv>V)`OXdL-hqvvIwCr
zX2huHT$Kc^ot9#EHuu=8zYuf?Nt!lgvjT|7yN1)6jDgTSF>)=M)+dZMV_}P$KjG(*
z8_vA+WHXGr>KjQe#~byDBtto(L2<H9$Ja}CAW0>uJjGz5mANnwu^6Y0GFT;^qa#_1
zqik}s8hTBh!x{hLsy^NT1zQ28&elLgpDcqR0g>lId%=Lad9c3_J}IT2i?)8j3hty^
zTa@cNzdf(<FTRXm*svV;&RF-XrYppQuV?eOCS1Z+09jE?m)E<QDB$gKj07C;m*zP>
z)!|sQh~E71Ar!BJ=nXO-c2_QJ#ANF^PA=gdZSaWvQ#Ma2ZSTpmH-a+jab4l?Emf)K
z&Fr^JbVmzzW=`;m;`Aqfe8XRbtDV{dYANmH<J{jw*#Nqcb6A)ZJl1xkHYFQ-Y4T^#
zQcUW<R>xmxvGJqw$i|8-Z}g3iOx?)-?26BLo9o$?Kz#%S=`DudU%7qk?@+l_9H@_y
z<g!@%9%9jY=k{9_SC9R!A$GUbwLvtvT~Fm1s6G&FkjXERL1PdyIVMuG2f!mF*(CRm
zfvp<#)G?n!fbB7}%$Lkk?%(S3F7}zg<|6y^Pk(&=!+oCoKt}ur@RWcr!YF-pwU|E{
zexW)l<g>fG1AnW}o>tVq<g>NA^Q`hu)t%kl-L382XFDXly1Ttw`KO@rh3fZy)(}%O
z2>$7}@%$axXFK7t^8cNFes~o0yNx%GJJaEKz8XJni?~5Q1_%Ad<L$@w8C=i07d#5;
zi{;07dU2Tq#d@h6Y*n_l%0Xv3e+TDsK{y^>&KI-A#YY18gk&V6C#2L5pgPZov-M~k
z{KWJ(qPab}d|{+t;i;aMejiwJv>d&Q*b{Y6F7F$;gjJV=M9-IsIx68?m8e>co(7*j
z>FL$KK>Dt=d1Svz)WbO4%JcOK4APh}o~5d)VpU3+jnEM?=PJocsd-Fk(uot_qzRT-
z(#W8#(y8F!lS)N%uO;22eC{n$zkSp&D!v{Mxd>9LL4uLIMCtJDs_eB-&dhFMRUE0>
z8cigoh=jS2%wxQWMK)pP_?2$HF}E-nk_8ROWW6m+7I9FVLioo0U>f`cp}ro!fa&%4
z#r#~NB$?lTUxGka>gI=M!==ji1cy>sl>Bx27V0Swx)lNHyXV32X{Q5pB0nF$SS(jO
zjp8rx<i{_r$X0bFoUhq~FDd{k5<LC$J@S7g_>Fpyl7}!%hXL=20eB(OLx1@9!+5j_
z)nY%@VnMaoq*`pFS}du?D5PSED3K=dcnpw~LK*-P$=<7S5T}Sol)NYc+6))75&5d&
z!th^$fHy2E*dzW;@y(ea#T=xDAV91L>H7HE45-K9;~P*<5kG!Wi?K8ol#_LJbfdwD
zR2D96wLsT!!WSx(NM*Rt4+ZY_>aC=S`&y^2##c+cReZJ7ps$v;E)nQ|S<G?ZqpN?>
z!-8pC!5V_0WYH~v-7H(Ncz5wQLFT)I1XSy0k?8^m?#aJvd<p9U1fHU?>14@k3IFIi
zPg(Kgc=xCgwU66j)H*$`LzLEfvj`i20`Dm$CyX$e>pbm~i5T*q#5Z6XeIjXe59f^-
z#s}E--kYe=?AN1S`}k#4>$G1U!>p>1RT^fDgnoh05IYtjp-d?S^b-YYA1gp~aze*C
zCoiLBuUCW)mV^7UA?siaM)vz<!DBf2=x{!6wvW$h9Z0xXB#Qvf!ht0}i~d;Tgy8VE
zx{T0fEeeN>;@FU^DrgO?O;maTU;hhLr6M|!iub7|5%k{BX-T>Zw#0>Y%E|GU*oD1b
z*o^A6`e8Hbw2zyFNB?uPEaxk<j?2R5YCkRH1{m6)0Kzd&2GiMibYCO%@ySv1$V%uK
z)|RVtG~W_XoX42PT<Dj{C9<~=2AHXeR_LnAyl{+9=+6%Fc?nKb0;viqS`{@pkgOt+
z0;OURXwwW237ef!T>=KP^n`i<0zE3F;IE`NQV9MSILZ)VKuyyabtw2=)?VV(qj3UT
zrmzD`+)uHhYZe9%MH9%(J888#C$&cOs@d#D^<KRMg8`L+zcdkZq%sB#*6Ut9>NJlb
zellpNDagt`A1HRnW00Gr^8S!6lUWD8-0+xz39DuDB8m`Fr3xhvf8-hrD+B-pLAO?a
z)eNKXmo7+-{woShWIUT7xb|HQB9Q)Qc8{-dLL#@~NimrtVkDS-g$XSbFmyUV3Q8{P
z%)sjM&kf+=)C5V34Xz;MojU(Q2@K2Iy#hfGrYrpY_yxT+QYi;!&q`cmGsS_8INSl`
z!DwMJh~R=~z%3OOT|kYv8hn3`QSYqPYlKC0PL)aqSKYY|TMa@gaA-ynqOdNm2a<d7
z$|M<loaw@h7c;+89H2)fg?OAy-{D3S{8(572TlIaSf4m>Iv+He2d6JNC*TnO`e5`n
z7%j%D`JahYrUgk;1H>0lKDm$Fc>i%Q9L$GfxGfqam*ZeE9wh6S{3WU|{QhvcAXEbc
zfb>4)VNKMbvMD49GSwqZowF#Ygff!>87{_Rbp9*S7pRem1Q-fReouuAkV7o-Z=!?K
zUO$v$ww_GDjew4dZE-`e)dx0fxkcK#z)@tS;87s&Q3W|yy6?>9<eAeH{L9a3@XJ;S
z10_AAVQhV0+{MpJonSNS=*`MQpw(BQk#&M8Eqnf88R%_=6fynb2b*R5a^3UF&4c^6
zvh><-c6X3EfyohULg|=UqCl<|<?`s($IEo*KnEbfc)Ck6!QyB!iME7tyqVOw8WZxK
zjlsJ&=-n_n6z(tMF<GNWQyMAVCwzDbrw|IVq>#4q6y!bz<q6n^(!z+ZA%$fQXu>1`
zz$=RRdxWIBX~t)t<>n|`4aYs=J<C2nQyB6s|2<^jA4SMx@&{uYPv=Q`|DRa*|B2ZD
zC(ivp-K_5WECER#OF+Ub0g12#$YP0R2@q{X6*kFb3rL){08Q;rJl24eJ&Yq*iC2EL
zZGdMBOa%@__&yrM7c?#tct@emDBDyH9!mUl9z0ZPDF=)+LP;DaPAQq(1afwgN&{Xg
z7n5a`w57BllswL{w~t#VR`jw5&mRzuWWrB$1rwEikl%XkOuJC%U?+2l3217b)HoR@
z1*NBoSR-zF*#TQ)E&}ogCzulDBw_aA!mXWjc@(|i_gx@LJDfQKwnX2k77EXm?(lpu
z4+y^JYI`^U6OF7o>mHE8Q5rM2gm0=K#usKhS&A($!e3^=U+VB5vfw}H@PY-fW8xRj
zR@j1K)OY0wlb9PqqvF(VF;O&s*3b?2?5eoRziT(xC{Z`9&<>^98OF23Zafp~A*D;k
zE-CTtlEh~AtQC^YQsUT-Su5HIzYY66t1$9uVwws@)UO+^ekCdOE73JA(%jvhQevuF
z1vX<R>-iAW8T960f(KV(`+<h02FT+SW#r%DFu!-N<#+;OSn$ojMCnry)tt^2m6TbY
zz4-CyJ^WEE{T5R;w-kD{8a(bmY+yX68jdi;?Tf6{aF7sPi_F0IMn|dUNcj0V-9|Hf
zssM37j=wY%FwA<TTIo2vY1i9N6;drw(jaAlOB<n5rYWP8W(im3_x?R&Y?IrMZA!}$
z0~^^!)=+Tx!fItyePknZJQ8y}1Pl{P#wx;#$VJ5gf2qS?X2E~Z;XgRxb^b8d6iWfI
z1c~mh9Kgy@C;CvQkXqI!+rqW49OiM20gR6rKZB_7$UcwwXfcmQ=nX`%f>DQkfTp?I
z4t$H&yb}M)v?i@fxb%_DC71Y;NH5annUI-hK<B!{hE=vg*7g&vO{uBXYc@%Bt*HN>
zp?##jIXpt5xo#Aw;RBokFn%Und00)aMsQo`!*rBf7E8PzXU%$u^+(mMJw!;icW{n`
z^9`<t#Eap0wL*I`Dfu0vK<}gu8~>wP?-zFRWUf8X0&(EOPKDKNgErUz4Baow5ym55
zRM<0$lFlt>V_RwFKXQx0VxOyQ8Qe%F>)9+_|L1$D)y{u%O^?Z`*6&)YEDIU47Ci!|
zd2SRm#=<<4rJ<6T7P0T~8Zeh6=`A4jD~DAW{@vH*H*_#~g#SJl2~j*?(F=;|f?5*J
z1r%QME>O2Njouj=&;o>0_cCG`dH!I{&7i3IYStF&j*!~q=(`*o%mUOvR}t{!X9q&v
zSK$#%$-`!?(K|W8!W0lq)z39!4t9zY9*vYZS%*t}ZFn}0c6W+`Fjx_o<M~bsNl{|Q
zptwXm)GcY~lR+~<#T<;DT+-JaVm9t-7Mv2XEQJ*coQ(*sCv2$T4`pb-7UyDYv{iaJ
z+Ui7-<wFA1@dVutoz6AvupEH7J1Brx|B@<AC$qDgiL0C1QY5;W%fl$k!ARz-5*Hhg
zDAduqm)N?O@b3Bg_r<)Eoc1J}a~NzZO$(xuS9VpDQa)Kpb$IL^XZ)E%P%Nfi``6~)
zRkkVNaI13rIZQSGhJvG3e*V0<<D8A59UgOLBa?iMOpHeABeg<{`XK&@YKP{8uZ<Y{
zQfANgy3EMpfd$3pov~lQDaz2Nx+dl(X}k;OS0h8~^W&iUmY?L9+il*bfKSP^vGVvN
zY!eP-7J}nN5Hk0bRUqz2jDnBj<O#D^2KP@_pb@b31K@d`PwsOah4H1mF>VXJMpRq{
z{?UHhU+QD{|1)>$CDC`tjJkSq`6cS``~O#-KHJ(d{QsX-o<9A<|NmQjzK#Ds-MU4%
zy{n%bb!%bN?bO27N$)5+jG}vgqB}(ogy9B0{7BZumBpVv9?Z!EUc$XHI`z{aACvJS
z9@)I{r;BtNcl>@~I@iM{5&hsr>*)bwP>~5l^pbz8+6$UtvmXkGepn-7j{E+zMCLF}
z|0u5gYy;dri+U%g$BpQ;D~hOz#d$dxl?p*nEEeab;QOFBDuFaE1wX*olEf&ejxQxE
zzscMmjwQg=bY2{gC8kt>O~6t6nEn#oiXd=6G)R(odcID^Xd5uniUcVUpk%kdSYKR5
z5orZ84+Kg&DJ4ooQ887h#3^MfJ6Z&P`y70@gpFFUQj(X~HZ7qLV)bEgzP(jsvCo5l
zzE>bbU2kudMAC~F!PZVG_!O8K4};20rP6AFh(oYFfFc+ikQBBQ{PYurG=NAlsi$PL
zt-5Qr4Ng0hwsBNyGS*O%-DEp5xDHudlmcawc0P_UEe%SDk^;rFG$`5Tr9d&w1f8X^
z&P=S(NXQ`h9F(A=2}WC?kxoz(>+X&dRB!7njdf;V6;pMdEG}1~B>sXjdLL2~9N=bJ
zsTnP$R-7tznJnhAF8l?h^s%*;nrWqGS}8^*Hj}tG7>#FxkJxqm7G24T*Jj>Y$y?3f
z31A|kFUCb8Q_2v_u%y94vZVDD7AW47&}~EoyLnWFX(f5M*6Y=NLHDF0#CQ#g%*P3T
z{J=e)@aM0Ux5_*^XopZiv3lBpi9x>0yv9*2BKhb;N<?Xa3e@M%gIzM?{~G+g0{hw@
z$@C(HJ3swIGE0xE>dZRsH|tTi*^9{ELbbv_9SH%ai`}UDbZ@Uvx!K!mJ(F2yo!T+j
z9Z#bEVY?L;3e|1fOVao7#UHYH&|j{f$Nbr9Jon-@<VSFgqFBVQCCq^ZVjhF6cnVrF
z0r>Phs3I*$^21=eTC%*ZO%U7@&vFAcsE@x*t<}oRCrIFjAp0nk(t=-E^>5){QE3Sc
zu!O{px4rMtXIH;y&mTSy{$AAk?(O_w&fXHp*9;wiIV>yHRc@-(HzZGUAqNZ>&j~2(
zfUy81JPiI0sP`r5X)4}4N1Cc#q9%~R?8D$Ayf2UaTNB(M5buHQ?0wOAMcf=&T?8_t
z9+V_s5w?_79}Xv8{(oQObq6eZfU|Of1&go>HM1gJ=*j^*@=XmOj~9TCK>(2bS#^I;
ziEQLOC12A$RbP9@+UVoCSe=MokfVbHc`>yl(jIDEywxpzegAhoi7r2Uv3uYA<KO>1
zC40W7#{O?-cW3vH{ol9ve1rX;a#BbX`d7`~akE2odF4KK5%xta1GYycWe-N?(E8>F
z41zNnUz1j*w_=oQ^WId3+s-!NY5d-ne6_MdgALk}ZUwzT3m?5;tlja(=8WTbzQ}tg
zd-KcinayLQlZxSOFI3y^A?~O4RJYZs)bda^IzBmW5}gU)l`S}F(^)I(HD8kdYK^FV
z92TC+Y`h`D^IElb2YkdTHxu?b)gS>6T#0#IBhBrcoOGE>e6b3erw+(2FW`;F?n$SE
zpahWUf+k8<eGvi|q3}i%DdkogFHz)JioWi(!zMxy@G^^l6G#L!gpQGK;6WTwt8>~v
zEDqGdSo?rp2s&$lZQ``AY-jj*i$F5SJrQ+$yd=7i7M%$*F_4UIg;p=A${VVeAK*?6
z;~92%#MMjM>ciJ$#7;Y5Id}-b_;5_AM9-?p;*6&!<IDm8M&01g@I)0RK`<w)HEAn$
z<e%sF#r>xulvBGW53a~Gz+AXWD^LDOYs?}7ik;OMx8YS1nxSw71^KgP7gUSnx&s_$
z?*-g^6FgEcI6<wNbKd@jV}MWMaY7JGFhzB~;CySc01X|KK9WgKP4$Q!I;6B4SyAyU
z)aw@K<+Rv$&rReoxAFSGIYIOe!RfBGxK-h20o)5QI%u?t_wmuV09+k{>5mUal)^t&
zZXVnajwlDyvWzyR)co98vyV2C4VWUT&cENu_|smprEw)X0zJ@2%+i0wahm!p0l70r
z82ZN!rPf4sem9<d^i!F(gZW0B^!PYl{0gH6KHq~8^NwJH_x!=1=x8dkaX&p)bRIGp
zTH?+Cq$koDfYIMNpKX!M(nG06FvC&Z%RP|3q0!_aFH2j)_8V!E)6D~DCU<2^K1(*_
zOvxO+9hTYf+c3*UGD)&lKDOfC&jxqhx^p>19Bro%WvO;9-Z|QsY9yDG1?K<5<>j^#
zkK}j4{W*8WHk#i1oQf|=XTI?!yso^^rBD^TuGKAk!!J@FpB{l$Xj$ScdLX+U{iab5
zJH=^9T!W)U9%}hf^SE}<X|kbb5YL0ZTVVFtt>~2QXw;`N0vqYHrbw4A54vXis9t8h
zqrakd?`Krj0kgB)&33(Kbu{?`Bn>l28s;ErxB*E+H%UKhEC+7~_@>ETpggTgfN!Hj
zyPJ7*aMU~kdm>hXXG^5TP>s}&4^Cgw6%}EAe6S9%y}{M_4cyj6)#wS;jc7|*@y{Y0
zC+ntX`Cz@&)f(S;+8SQkT(xoa5M`#bgHi!Z$w8&ZWMw#C+&Fu!+7sQ~B(nYB<V|Vc
z<Qj%NL%>S8>R|y|h3u7H(~>^x0Wanz@$}+i96LLcqYmvS0sUEZToV;SWbUt0VXgiu
zUE%n8JWsH`tASPYvqz!uE;L^TsL$;HJPX)@FufiiOUVqvZnRMFiH&gktk#Qaoz6);
zI;(Zs4OHPsIUlEEnWJ{Uj%p47gG8#_v|9Z4r{`L0d~^}W%GlmCF+`ACphqgu@-5hl
zi2uGaWAxhcRD!1!S#{Ko$^i?go7ol~csJdYbcp`E{6<?)$*i8x{qGfJ`An~?pG}1%
z<_}goxs%`RPFOH;QAoUJmV<T6!E&*hat_**iFsK(s#5AF>uVJvNmO_7)M}4>#fFyh
za=wP_#fzX?(xyQX<^f25-hh%aDT+!jbdanmlCIS?U&QE4=VVDCY;MGaHiQU5@~G9k
zh*<T;(*y{@)2VVon%QiJy$Jr}F^mxK({nYO>{z1LjdnjvH4A%1)(Fxkz~eMWNx687
zzU31QT4Pihk#3Fpbi4TsiffiS^h^78g(LSYI;3m6JA`P%Rc6oX{W1mA>eE6fNV&6E
zj^hE1sG%l;yD+KdiIvCropTotk`3My`v}T{_WZJ1B@sG!ZQCl2^g1Z04Lbu(4WSC1
z%G8>{1_f@;+lLFFEo+x-x+N3JD4_Iywz)8IAhtFX6ly5R<$Is^Q-XW!m@XO21CNBL
z-y^tax9?ps7<=(&pIH5?1aE9*DWBgJoihfiFNr;p(ilzCRo-vE@boxK*X^{AU-c#K
z(fVqM2Y0`|rSm|c4pJK*FA$~%1czrIp$RY$YMqKxkU*;)(d%|DfN?7%kMO~BxPro-
zab)kdNaGh<RuPWIE9EfK0||Wkte4MNA-cImD;}$j-Z8L{Pf`kRD2QH9sW$*S^6A#X
zu-QAtbJapso&+^RYDU8JEs;+7?D&MHZ;SL0Hox0jG<io)-rd3EsB=<yYR@@r7k2OI
z2ZXEfY!cxS4;(K1O^}Y64(aRbEaEEpjga7S%ZSW8T;MBihW3#+-11hhPW#0f?Qh&H
z(m1GfiWNBFE^Fi?njTR*;ImgzzXzYodg()d=~heqZj|mck6oqFMU<81-}Tbe`oM~7
zL&*dE0TZeA45RkYQ+GwFyRvcJI+eoBXl&FK-igLxHjQcB{5vcC1W&=BBPy@0G!e4I
zs0VZ8Ex!Tu9)HV&J*mVQS&h@gFR;QxAA<x{C8G9^iR_?pe-&oOSIU5)SqnH*7b-VT
zYxIB6CHD0xA4d2nglc~N;>SeB@r-`VD(<3E&TO=6FR8#dAlr;r&4zN2BqQ*k=p@~w
z!V|vyq)K{~3MI_v)<Q}-yD()ZA$Hpy8mu+ylD}`s0!c?~pIRb)h=uJD4EaSTD(<DH
zoIf1(>DAv+4B<&xA#4nw4jxo?-mue%A4TjnLNYR@4=Q_ag7FPefRgcu{T4?6-27k8
z(;BDI6R}XTYFCSu;e-O}B!kZEJMJ9ksKR|O*E%2Z*(q?+bRVtL$O;=zNOLa!zUNFf
z>${1s@xuPSOjbwF39S$^J>A(V?5(?DjFTU5kR(?lkL#?RgR`;8YGveCnw#@5bDGTc
z!)E;zmQp!6MeJ<koAB_;xtM60r_Q078qg!8?sHue+MJQu-5EfD2X*#SR+=qUI^~F!
zzQo8e2rZB04hJh;gZ+^SuSD*G%~e-ClqDL4=LNwd!YDhHl9oxQI{hwXxq23gEl`!;
zJGEfv_SH)z`%W#`yL$@`+ncnYs5W7zbhmbByB2#Zbcz2Jm_bu*X&Y=4-dkb8R|~tq
z{G&M8*S%Y)T)bM0;PC=d51B2`AB>c<eRC?=kN0`}Nz7D57(K8=EZ={hXT`P3nq<99
z+qZbKuyBE?L>hOv@dRuy>_}eCgWqT!;xe)D+?0|c8}`O(HYPx>ZnO{}dUBb!4Gk%p
z8H^cM%IX$CvHv9ep>c>(VtdQa*a;vUybv#9E@QMBNab-#w8zaoCL}m2$6XbNEweW_
zL<p9Lj6pOP-0iJW$#Z_eupYXxzIYLAZ)Ku@Etf>0b~~%2TD9+_1%X2Y*lzSq*a<4W
z1-zI{oTt-y$^KfWP-Zh<>*M5YDbk8Y$wEUs4x+8Wlzrs0+(M!KaIVFq#rHWEARUav
zWjvmYhnGB9E7vZPo6#_t@jx5w7o4*Qyl~9Ioi)(5@H#x~HT#G3;u;V`DWz1MDtD-r
z3ozD}-YU;Ihd`rHme!j-zv1&V^$@gm{N-8OVlK$?VK7bbMQNc7)4*k_8~El5l}2#8
z%{1Llu65G}vcMgj;2MFY6U(iLNEW9B)y%ZJoK>_UY{898Tw!k2fr7Y+{rIt@?Zbz`
zcSVNpp{*|^BYp^nrS3cbzI&dd<7_keohWD7TO~IbcNpF~V(K>e#=?e?-<Q)l-ChL&
zMr~g%W*FI>tlrbbi2KXZc|bXA-|@5?z3k#SPEQF{w1#0P_g%A!dw;=vLiev*?Y6&5
zc@pZbvax^Qq=8c%xLD&gi}#)Tw5MmXS}a+OaT>i!4VOr8H+q`}IvFcg7%SyjD16*)
zT%{<ZwA2%3NT!j69*lxH*`wo89<Ucy&p1joKJh1xQjT%)-6eAtLur?KHfN_>jf5ed
zV?#XaHu+C_>e-P`Ze=i2KDl+PK6xZ|tP8Fbe`&EMO^KcIVg2{h^Bf)bIoD>2^7yNX
za?2FF{U+x2yBKl)CWaIcnj4v5lP!!ml)L>FMx4w!w=lQznC|7?-0-)vQd<UN*oM~;
zVZY2lGd2l9fa7>M9*o8@96IAj(g)_>UC0EKwJfH#khZHhR<;ND936)s`xKr@`_=FD
zU^+B<C*25Ml)J|PKA{7dR=?aFuu=zbymQ#-!5_XucqmUM3@G7uuGXOR1_nJ{u=^^^
zKn)NKwQsa&5oN$=GkhC8>f2((47cv^guB%vAQ}6b5GjCzf{9+Ic*1sEl3_7FaL9rE
zHXoQxDT@zH<n|uBbws+~*z?P@q_$%(Xoa*a25xMd3(uW9>ax-@DpU&vvqdyrh2#TO
zNH?EnmG6Na=|=hxDZX1b(uD|N)Y>&Lqk6k+P8CQ~&?X)9ip*SU<5D7*;oV>TD)IIf
zNa6a)>2cWXMW@|Hjm!c(>zEBzNjVs<V~kAoH~0s|#caS%9$W`V2uaGp`*1}D{TR`K
zOVVlkIHZ?A01Q$IDhh>3pUwN$+q;$)&Q|AeU*X)*OtN^*#~&gBWp5|RVD03fS1!;i
z4ODc#SmdRMlp|{oQTbSn1_3HJO4y)I@O_Y?i1c!3bviz>XffvU3?E^IH8==DU?2Qa
zDriGV+8BecAIr@eQ&<|zB0p&r;TVv<vrN)|rTc2<lqwC3jCw5l;d6yNCL#{>$&Mr+
zCxL)RZh|j@qtcW^?kcd4;0L2#H^;GRO8f4zP8MV>^OOaKw4vj?SzzOmZtcG4PL9##
zppF5Cf#Dz-gDh&b4YUoPXRJ9c>&?LB0H2(KRd3~AmW>QrBzS~>6ouD1<NEB*+{<pS
zUdFhOtH&>p1lN;`Wjy3HU^-2bx?%>#(1I~ZV@y<xHbxg1PvAM={(-#dhbc!SMa9YM
zTBKXQi1XJKrs@hkb*1~YNb*wZ$U^(8H0`e_?ak{m$R7~GnZ{VWBder2z^9=B*Oc~C
zr4|{b*f=^$qPmO^D7oE)`M^V{HYzI8xCmqU+a6lmo>7YNg`;G?wuf15zsjKd5v4m}
z1<ye!$+4V+(p0V%n?gotpJoPnyIWXI6tHEwjw<0H<H1d|UcnnpsAT$Xc>pL=E~TCA
z{ZiZaDKG(Co&nnqfROQkD2rQv$=EY>`#pHFyNrml_Z#PH%YFkB#rd+Oi>k_-$}DGi
z5nr9(r0b-Kv`$i~dI3~arY4pZAnKW9LSZWI9>g_a3Byy$EL*RKC1Bz79Zq@eB9&F9
z1&lkGY5^n5cJ~2ZKU4$Rr@WOg5D$0(JOPTq{^OzH8n91+AQ~6EvM3?g<rtPvh;HPw
zVPN@W<7woxttiWfF-8E{0yZf*g7dYBfbtfLmtky+%ur4Je$;9=I}NlIfEO;rmjb24
z3>?+_xen0H;{rf2&82F?_o_)fT+yp$#~y-;fi;iAUc1>Z1T+Sf1+W>`1QbD6-P$V|
z#w6Iyg7v~Tob2%k>JT}hCJqeeF_BgwQ1Eita`u%{pP<+tDdxA)<dh~A>xGMFiWi#S
zLO12MxeVi6E;tz<APbaPkT8_ML|Bi!!3T0SHq6=BFn?p23q)>E1*^{>uH84nD`F6f
z@J`o_*loa+JHS`;*!ja{#|NBc)n2V2rW@Yza%(6#ubv8-;JBDhmayyDU>+p{q7IOC
zZs|LQWG5Z)s5Ht+j1j?)Bk3(d8Q5YrA0{!Cjr42Y6DN$1Ar6NW%msnTJVm7Eh3q*v
z4wO1xL#Vt*)YytL?}pW%hCZXCI4b2jdPw^n(a}^Llmg9(DeaeJr=!)^Li0q@>ovVB
zm@5)LMo`LWE=-<M8SYI6Qt3_QL7C-f^+lYcVs0WQ>S(T!%sv0~Gc0uyN->XKmhcML
zXc|Y!`OLYO4#d=BM~YdZACw+mJ-*veN-YGb%3a-Oa#?A@22@D3LyN+aBE`G$M-LG!
z4mGrl61<Vtr245Dnj-GiXfbw!fWIaw65V9&_j|AONyZ(8c|&ZynoP&Dk#{M_%Q*3S
z|0bO1&*+N7LY*VLIDOZ&a7tbMvNVyW&=x9F-SK+p%Uc4FGzg!1+25><xJOpoCzXFr
ze}Nxfjxi`n^Bf!tJc6$8vzhM-Glf(yj<~{ZS(rrJP4=)h@&f2~tSWXl+1lpez`MTM
zTzfVlH7i*tU?H(cN}@|Hr=ucPNEO5zZiv8@u}#iCQZDx@E&^=RUfwg?t@Hc-J<)Sf
zh1bI%)g67#f?xebyjU;Y5xr0<TI?oNhk%Ey0gGJuAE8IydLnxMTRE6A0R($Xw9#`D
z6Z5!XH{PFMo^s#lH_E}kvCH%Vgd~aEMy$GDDc<jc-|ScGsc`35SOme}f=2=|=!PS9
zvqFo#xOyHuSe5B*etfn5!43PxqA*r=Zd5lo#5NyXjmv>r9X+(sTT1<oFz_wj%P6p{
z=fQ9LT69L*YkGK_`6Phx{u_-t{)J=A`{VytpKeumQt|({p8XO3|66>%>HBXWX2^8u
zM~BLjZ)&0v_kWd)%;(rvA6Gvl)2p$biZ9T^>%Tc(XoI=hYImB^aqWoI+B_}<TTge>
z+1*-r2ro+ha6O2BAYJ<5z6GY&Y_;Ej?-W$Fyc#_rfTXVR4c^dZcQ|22p$%={mJ#1r
zlM{RLD89C)Uw+t^U4#cSwTeYf?@bUWHOojHYI-8m4__<kmmg#V{@z)jwn^4~dVJb%
zHt;zm48=meDWUnp_EEElb&xT{195QB#N%1ESU?7mlZE7ZUrW9U9wj51MD#^~T1-ZR
zkHv?{b(uz9r<n;D!Xft83}7JpNpcMjt(FA73Fh!tlzCVd(X4WC|G_VhA6z|tFnavp
z@W%&7KR)Q+CtIZvMWMbHCwev^DImk6WlR>W3F&nJt7AO8jEhCs==}MIKbQWRO8LI}
zBfd6e+LCExN*Ta~J&1~_&`@COaxl3{%E6N-PYgot1-vW2?o{5=ZtH}nX&8%maZ!W^
z{RH-MAkgDE{rz7btlrZ9W}~+cRzE%%{Yd@|?nCXtee@@#fLQ!~Igo^9(-N;87fTGz
zq*O$wQbh8jsM9_sUm0YCZ?wYCKm93+&T3JuM?^&A83j%!T#LUJnE?6isRX5eA4FoW
z>wYaVQS`Sb6fOVzAl2(y@A#_`3Xr#t?=bNI!S91k;Gh1r$V718o<yYZ?}I{hvfuh@
zBm(5OrxBF?eGsYEY4*Mzl@R;w$%Ms!A9QLSHG5xA9YE}Nm~v43A4Dh3%{7nml&K5d
zf3PZ+pqsSOqOYz-ZKRWAsTg;!+`HCiFWq-<fM*5Gu^`^Pe)r0hvC!PTYLCdzlHhl*
z+$TyiCF2($yGKH03Y=S1Z#*;y&4A5t)H*$`M`5o<=0M312#`<e3SPXppScf{rSzWY
z7|G_Cb=5^4ArI_mFe@JxoL6=%n3WF;ec;B924+q^Eb=u4A9W626KcZ>a-U*kw`w21
zpeE2Pl6mkW+3tWI2<AbL^t=Old28BD=uOdU3!Q~KJsjs@BOh;MjyN*_v6iP>umQMb
zKz!gf0;k~R8<&~${LD7jg<fS~raF{`D0!7SUT0b^WLK%vb<-NtG9<f3eP}kV5m<f0
z3XQ?tq(%szNk{T(buqi1xisakF``g6j_56`zWvBT%{a2RtoimM%)^!5vf@og+9Ctf
zGuJ0KEVC{P+?X<1;5xtipnbfWRnEa@S;ZVol2yz;!Y+BsPsQ=TV3m-4;1zW%#zTT!
z$CQ73zS74y|BY|p5$UVN{7XIp??3<Df^$UU{P*eZ)2%<wf4{}&+noRMvs&2oU5&36
z@kjj(m!<#9Vm`KHoUbQZs^cYe`EWUEbz7CFT8*A6&&<>G>R%vz*QjKiT#m-q(I}o0
zoLHOwRQpdB02Ep}8gwa5hL@IJ;HyOGQ|k09?(t3D*WfL$-ZM8`Hi=WYmM7p{575I6
z9823Y)VFynzJ1g%8#NwU8w_NFN2t{I>Dk?^>3jz3kyu&a+Sz!uSjR&-21RhroR4o3
z>1`~XlAr{<t^tvrAR-9d0HZM1iy5hcXc+4`ydcG3fyL1)Hh6@-vEm{eG!kG$tsL90
zyy~}4fP;k#caf*#-TmznJXl*{QK3v+4@F%P8;u5*f=pLy#P*3Id@6M@gRiVtU`v6k
zY^sAAn+>G1*U;DNGIxI@1RId5B4lLz_X9kEi<yr|E({|}#aj=zXAg9%g~x=at7yH#
zyLqdz5;r|!wG9`9DX{%C+AnEsiOhFGpYQCg6|N|nBq$V!jNr@$9;tXSa5cRe!OepY
z(@}C+ECoLWV5>T7)<bM$RNdO+Tv;g4@v|D2x{EmZ^}*<EFj|aP^FJrS)p(o)XY_Hk
z-i-*_Pli(TXLTPgTwFhX0c`j9#gd?iP*o}v3`#4msGph4;@Y(_B>Q3x(I7n%{VZP4
z_M^A3m2k{|&f39F?}Q9g5RZxIStaAqli(D_EEojm>)AWrKk$=L9%f^bn+CKmU}H$l
z%XbNxmGO$*($rc)S_%2!oISjzF}Oqsu}vEWf5;5KOVcO{XWK-CGlSNvAx6oDJEu25
z)H_TP94>R>dB{OK&ls=I+o7;v;r@`IfjBxr5m@f|gHh=T4=ZiwM=ym$SCKT(N=hEp
z5Od4U%i;I)ESnJ-oGm7XmvRGzXb@}7$KGF*kawE}wDG<1#d<~*-Qs*Pe@tI?EX#3c
z^Fel?B=a;*5&aU`UxZUK=h9u~iC`1!T(Q!OO_KN~U^fHG!E!c+=<ip9cjI6Mv6Fgl
zgy^Py!H2%)i(ocht%BroFo$56De}<PHT(5`)NS@K1SQLGk<CRlV^+xkCWRam!_jdv
z#0OZ#0q}MWN(EhYVG0JZkel~cgjZH%uRtWLtsvJf1pJ5TY!;jo&1ywPiKH{yaMEl_
z0JUo>$0$?c4xNNBrtH-b(a&;VicWQX;ozw;ztzZ!QN0WAs|r-`{2oS9Ox5WX*HoW!
zmeOFY7p;}f&(A?~!yU>>xqhu{<ur~gnXB&!4yCXt`Rnv8Qjb79exY~JP#Be%J_Z?c
z?GVAoR;bFLiO16=5exKnMbxPu@kY_hXp6~Y1y3rFPUYG~Xt8HgedoW~EhD^xxlbQ@
zA>Cvz6I8V4%!~Kg8#*^M9Ph6W)^E$KS$KmN62wb6KtH-&oQ^mYzdW-h8?1NZ*~cO)
z`~uoT;8}FJWpb&#;)8Wl(B}`<2o0+a0+IxA<$;2fQ@B9Rr{2+_Z@jaLm+@jq1S@}?
z_j}e}e%A*<Escb=AM+lk@-WmgjK%ZdA!f<V78ZHCkMoBS`n&J&67}O3AI_hwm&<XC
zkyzF%*gh}VMQz+*E!R=9h{$rby#?WC;JR(CaTfK%S{OANCClqStP;4{CAPT^)}wW1
zF&}->3Zw~aS(@Qt#Dmh;&T8#WZnl$540m;C-HaPA$vzV9WYde`d~bFeu2Fe4xL|x5
z(8}<M6O^iH=88<_vhuT?)!5Tc&GzwGtz+Z8yO=gaNpx<g_3DSwo4wr<yNqcHE#Od2
zOwQ}^m@aDY9w@t`#;r|!!)X@bzuA0n(m!}ChK|`BtbB5M%sC}JE!mvOF)I<$J}EKF
zJ}24Gr)MR6d;7#X|5$~LM}vn!uBj-xpXz_(<hZF!)ST?f-r)%hPK>EoHD1qwIZLOw
zSngBvS~1Ch4Jml^h_1nSg$@v4Q5Z`aHq1Gxi`Kl7b*^Wbz4+LRggm8(>o~G5d5RHk
zqM@f5yaIb6z)HT$d`T(Dx;nSzVB}56akMqQ0ZvEi>fCMRc>sj~AS|{96OZ_mP|SaU
z)&$od8CnqbZ0=TAk}{on5){JsZ^g|=QSK)>?l^}zabeEL>7ut2R<i<Y7LBMFdDgRi
zwrBpX$cyBhQuv+}dus}HG_X)9qp!<;rVWEs=hFqg>n(>A7Suer&zkcbAN{-GL?R$`
z4Im(75n4?zKroYKutZ0UR#P_ejCpD;uQw%yfi=fbH~6@XfnGqi>MjI_m*R^w`vTTO
zqzUCo;4sFfQ9SrSw>EUixH^xPiv^7U1OvkhJg4`c<a?piW#>NGog%jn=uV7Ah*!u>
zw<<tUh4A<76yoILMSN6bLG`5#c}mFltBU6B6QR#zN9j^WTSzaq%D2k!x+|X>?A9xt
z^1$jsWY}0;PoiakaORx0u1<y2%tt-D;5=$D*+evr1<t|a0z^UO5}Sbp42BjZSV2o5
z076a`MBtw1X)SYirzm&^W!Q9a4T6k5DN1qBHw)yHr}rgCF+++X$IOPA#j>Sr6YZy)
zG6=WLl!xxC@aW*Q6&*Hfjo!)05kI70Aq)kfio?qR5!@w?204IUur@J^MnfXmI<CeS
zpcPcL<naxx-!KPo(5X0p1&@f(QZd&I3?4IE7Ba#x9`X=Jq85o(2xWs1MlNd#UU{c;
zTDF1+PSy%A#*kJlqS1#%Ji=*FQrQg0lpc&lEH?gRI-7w&C^Kr|gBNA7z?8W_DlzGc
z%EGUF;rRahw~X<Sukg;eMT>hLm`mI}Ro_I?!aH6p<$GNN<vU1`|AMXOinRtzWvPvT
zV=*9|fIu|bDgeQt06!$ccj(~l)_^-N0b(O$Ez2oiN{b}Hq!y7>DFuH;rYxq__i8El
zF(t+zna5p=y7e}V?@(tU9w?F#zVXT;l<DQMsG)-h?oiT|T?W%;c?VIYl~0jBqM9w7
zn6JoWBeVy*5?;twL%x`2ji|;dSZE13!BbD3Gn9ik77w1IG=eB!QD+Nu`ev`Jr*22>
z?%9qtF>1aEZ5g}PL<{sz)USu#RAN|9r=2#8wD9!!xY;3?_2f25?b+L*Wu;rgvMtq%
zoKz!Dk6#_1ygnwwFJANTn*MT_2(4z?j56*Mfek9t%29D67)mnEH{Jf{v!iAa$8M?h
z_3*P{Krl^E6c{J*KiVVdkI%RK82*1m?V2qHBjJwoMe6YT|5YkapPK%E1bXKW|G#hX
z`KIw-FxJaStJOKFHKO{-(b36qbQmeIU|azAz{Dc#;Z$7{tU6c@ygJ3cQ5-eF+m%jk
zQJPEKfAmP(KmPguWt@OB=Kt0++x*}DWBz}O&o`L=zfZqiSF9*K*<26Je3HH#Bx--|
zB&uNh16pIh8Aj;C$7Beam>HH8w?>KIpS1HEE<VpXwd1HC_S#(~jwCNbPatkD6)|2P
zDNYKiW$5#$T2f$b9B#JiullD)0lxAb1U4wLJK;bcD6+fB0rjfcBw)RI;S<0A2bXgQ
zz<)YDIyi}1z1mTe7G~+Zpps9}ivu`&ElBiMyAw8hp0*$q*{(H@8y={By%zfMK)lDp
zTK`Zy9m-*#3;kaE<O~_;$zYfe?k`b`kTy7W)&YukdM9f9QFEL>-)ULo?30*i?cAT3
zEXB60(CpA4Yx}t0?1e})3y8^g77&x^EFdPwSwO%HkWmp}3Hv7nSN_pyCu|d9yzDzs
z9M$UmW}Wp}8Lkvvo%=dv{U0rc@5V8gWnXZO$<qI~suj8ZZ$E|mKTmi6(Eq>52bbgN
z)!<^himoe9sz2WQ*Z=MR{a^o^|K-2_xBrjPV)X9+`#=1*|Mq|Rugmn$zx|*8pa1&*
z`9J>u|HuFChxID{VHFR5fYSF26k7z~9>cKY82<Ht|KFUir?b)j^#A@}|J#4}zxdbx
z&42%I|A+tn-~Pw{)Bo@P^}qX{{)hkTzx}WNZ@*LR_&@%~(Ephl^kwVFnE%zSrxo-5
zA6yCiWBz}G&o|Nk)$Kp(@?dK2Tekj2rX%*N9wl}*JvYL{rX$C)D{cnVAimJk*7GSz
zHsRp(#i*9vq@r5_k^!>@dbpm>lf5W`H=6gg#Of|dT~~MY)a@-u-QLnucXuFlcV}PX
zg)>tK<|P3+&caGX%5j2^sFsR23o8xjYOhe}D<}yjlAxj^SV#(>Qif?ZhPzp$FIw96
z7Cs@ar4juuApKAapIZu_TMC~mh5xu4{xv_k{KrV{-#GsFR&}THG!_4Q`w#u^n|!{x
z{r9XD9llPBewnNtp+#ih-h)%osH7N#(DRICgw1|fQbhA?I$z)X!1S0GsSL8inU80-
zoaqGis8%DAYU)|e2FYX*UoqPi4<vMW(hs9XuYK0+nFg=;8r3T07PHj^MW0Dv@yc&Q
z0{O^a)a|uTL4w|&&DQtLoHs1zCY94*IivgO*TMx(@)rcN4JrA1lg?_etPx}xLE8As
zM7=jruYLS7IzByumq*axv1fU=5Fyb+w|mm*6zmTih8(2VY&FCBA%8&$)){y_%>j->
z$Uv*;%29QJqFcp5t^SI`?-A&|G<5par$WIA_u7D~?v~PoGGBy35>#%UwNkL0q(CS^
zQ>0dkuB7jc9)!bJ;I^Kb%{*>~uTOffG*|$ATCJvVcG}0UqJFp8Y;cc)Lcx6cQh}+y
zJcwGSWI*8s%q<^aMTOk+LRKVTWzTHjS&NMIL5Dr4Nqd|c7$T8UtSLZEgkXFMYcB=H
z;!X;N;5)73YdXIu=-?vxy)ii0{EffPg8c|jqVC}@_(*D{Z00lwNmGfvCf;zvr$2Or
zjW%f+$cd<1>-U?DLZP5ymDvZKllm)`U)A%$rKpQ0$zuxJ?DYU_O9!hTHp#pIU<rhs
z*ygY6YVXFRQ`mdh%tQ#P33~*4-$+T!ltl_=VZvM?9-5@Quf1=iBxcGY1+y?=RMZ+)
z4WWihL!)C@4X)vm_!u5_(y*Bsi6dj-DzR{sptW!Ks4s!m$B#ecD;`P4%Wwo=@ex*3
za-iWrIR1;J6KsW29N!U&lJUxoZ%qNo;{Faf3ZD(lf=0BX^_9JT{)!d=frl^s4PKh)
z(74YcWw4&2SE8#z%S@o83xo%ppRAAYDdH3w`bx5e_)A!M!P^gBQJswkan?Y@9GdJO
z$MN+5-@<c|g>35(ls|*V%P9E9*F>|tHA4PAUaECh1J_vb=fjd4iVxnf5>A{-1=cpY
z(b`Lj_OREnU4_Z48&8KTr6d$$Z+g0U?rN}lr<7SO$3vo|52IX#P1-@B(RAf3Wq^V!
zc3v)~!#i0xxY*!M2M-&stiuDk(2e3F^%eE3mTPaR=OB(O$RAjD71|5fTdz1Us|zkZ
zXi;a|WJ$HQ^h4}f9ZSE&o_lnFuyS#QYf3~Yz?<>>R%blCW<S~Q@q3;{>J(6`#5HW5
zu{TDQj=g%ZOKTwh%jyHNQm1>`rX>C-?nF~bBC;;%J8Qhn%PBw3vb2{z2yYRU@bWqJ
zxK^Kp_VCcAZ$j^@RUxlIObzEmbC;#hw2CKpGfT^t_rz<nj^}puw{i7akvR0>#_Sr^
zR~*xAC(W45)+-MleG%cc1D`q9^K_5tV}o#qf-T{2qd=A3H(r707=ZYTkRtwe+y+&V
z89q#A*%cWS3<An0^co7NJWxAjm3J310lPSg7t09m&{!v(M@SfR(y>5*aByfj^tusB
z4>y0@(?Y?in-07{tgg-!6R5vbviOl^VYT<fLl3j34uXu%*AA8Y24k5)z!k=;UX=@A
z89(9^ptgE^qo*~aARCLDQH>~Lxi=-d;(mxxaK!Ic?X(kGw~M%P?FCUZU63ij9uS4C
z9lLx>Z&0%kv|^oow2Y_Mc;lDd&6zFc7xb=_9BRBV7hMkKBM<|!oLt|bGP=Ig&Wl^m
z?B4eHLY-`5p+^WkqwFXGCoSiTK|DfBVQN_vdxIdrzxzhM(7FQ9OwC-eE(ZbmnnKQJ
zB-}DIi%RwkkXmLw=Z6CMVqr`1>?>0@lnc`Aky3V98eUV~7c8|GJc5w-6d|ys#0Fb4
zxNd~rMLc}4zFk#^(Y0mOqev=GLKET4Q8*ui<~}xL^9k_*iw4&Q4=+DFeu2D2U!1mw
ztV}o7xyeS+!nem7d&0xvl24&=P&fR*OMJ-lkfK%t*SPQtedE{HI-q!tw(-f~oERm5
zVjYlEA~hLGq6d7Uf!PTD?#pB>Pm=NPJ`e1p#`D)0LfPxMm_hF|7$5RM;wC=_sMt{C
z-%ltnFY`S`_Lf2^qwJING*I0t<=81HMbq1t9QCDlFgXgvH=z5CFTAEGW8mq|ICEF6
zP!6plaNgy7m|m|%GZ?tnRT^;5!a|A_<CaDYD{8ClW^>bxijr&WLUH_(3aUQPHhp-L
z+BUAv)QP{SO3Yym%~)&0Y6U9^E^OYX;%!rqaK!a&FprV}2G^f1*?z>Wq+)mxI+alF
za2{iZq|~@mCw95b%)iUjcae^*rr3(6YgV>b8}6=b9Q7p-(H%Dtn7Ihy95jmoZ$rUr
zY;<nDCDB@C+3egNv6QMHv{rBpcQlQo<b3AD8wk!!&C?VP7yVf<2L+Qs#Z<fAjX!!?
zqzR5x*~xS~8|7Rqv?AVlF2L*<h6C>;cQdfkVK|@|%x1o04hjh<6b+f6AebSKg+_IC
zT#`yZg2{%bnkQlFc(Go(l^+~v%Pv&xI`E)I3M0B#q><QlM~r{86V5;IWBC6vN69Z+
zM~46J&eNy6DgWQ<vp@X5zQyO;-~Vm)ddDXg-(Q)oIS`TL=;)+z+G#?NvExRq2hIr<
z7fiPW)4l4-KEv!SSN36}4*A>8CY-cd{bpED99VUL;?-ibo{h^8xN^7{jZ1~XKi?}9
zS#t2xPj-Z*;FBnk!rW~YB{rkFRPW1fJZJgwocZt&B1uGkJOorhl)_X$Ic~LI()F6g
z;!JnG)~g*+h`<5TYr=)GR;}GZzZIfuH@nT=Q3{T5SHD}UHwz5t_@v*hr=VKLwX>*>
zZ->@Tj=G&@Nd1GF2dDjC(xZOxCIXKcte!T~3U|`&)x!44F+8sealelc)Y?qy!xYxS
z(|&{<ZZ~KH?7df9o&>aun2qLHyWYe$^iR9plU~?F+|83NNrNQYVABplw>%vLxYq0F
z<Sc^crB6;n1cmPn@*_>RM*9S*tU!CfS<hH!n_<{Ke%W_(1dbtfhLU{IY@ehVu+vr;
z#|m;(>xWHXvY$_z{Sbn-A2&$tFe1tXt){0*ztshn!$$O*KZ7@7zj+)Q1-H@#o5!^S
zkalo?lk=5YWIJ7?-Gys_{TlKO9pL7hej6%N3huZXi7bhYD;UjtY7s|=zj%x6+Kb3p
z3Y1phpQYhjb*$mI`SK)e*N{my3hdcB(lYs4EAp3=Gaen>dQ@vPddk$XO*95=5i~l(
zFt}Rp<>?V2O+Kv9>Rz+f_@!W<UJMM~IBS}uu~tsqJ^L@S;BrVV(d};M7o{(@X~#g@
zy`RysQSG=J+ArJ15Vbo8CvRjk)%7gppo=9)=MRad&}mZVL&<Gt=6qu%Tyjw<w{0tz
zW7?gys2}L>b)#YE|Cb-W<imgL<JJFncDAc2{eNe>vipbr|1Cb>=Kdc)o`HpcDt)KD
zX6gpU6~DK~U)H<BJrKNlu^djVPwAx-N8|IgB^hm%m!n}iXBAHu*RFUV^r}bt3W?*T
zJ?rCs>>h~axj%M=LceCw0;vZV0`NLOt5q@l{6Q+V8!80-7*O@FM^_K^Zb1qc`oq7j
z5yb|cuOZtleLDue-^aD1CfpJL9L8>`aeCB67zj>65D_S{N<7=P0`*}%AxMmCC2$u+
zy9q*S(Hqg}9=wX6eprI=4glUYHn7xj_3xa#j&`DhUx=!2Rzm>}+b<8JErcN0^z9=A
z;b^s+od%;><z_-YP??UZ_j8j$6l8n!t1zfVH)>%`xr?#&bXSMji^4h?q5}*MTDhsR
zPe~l*GD8{~&cGO^#(vj~jOcs73W=8#@enYhm~K#YV%XjB;{cy`Zj@Dfh_`EyLu$QV
z?H7ocJ1EmK{80y54&H;-+f3nt@5KuUWXHW#;_wESLIf3eB?7ykqaC?9H<~XIbrJds
zXHTVqjzy%WiqT-LkdMt^ebhT><XY&~>YVlui-S^NwjDs|zS2=OrG%^^i`W2I1TF%S
z6gkxx#f!9*L7o2VS{FziF`QAf`Y<>r2|UF@P1?4DrZPgZMLO+Bw^@_QT6ObeaXI5H
zvNfr?Ycz=@xSO=Q!|EG&hKg{%+8jv_?hF-8m#Z-xCY{f?Hx$z-+ubOHJ)pe#!&S|0
zo91Y#ubRK`)X&V2el-}P4V_2DMcICG1mc|)?bekaAS{)iMDn!y>^t0RQnKJ@yTOZ*
z?Kp=!>=456)*iJw^<#moUN5VzJP~W0?#H8e*3bzF_o#}zAJmgZSQ68&9v;sh)ptip
zWj<oUSq%JQK0JP*xQ;<cCDqHv?&HH=5g`pn*Q#*_xO}gPJ`E2mdoU;Rle7{$@Xz@2
zBzr41<-GG95BUG^VNev#K0Fm4lQD`2D9sy*FA{=m^aWMVQgM$taZMS!@1rB0^u!Zh
zc#M#yXWqu7$0?%tW04^&$ymLMbz*yMOclZ8pRo6f^3WKzTWKC1Ey;F&uZl?iUH~ma
z2&`d--Kg=@#nos$8+;@iMHV|-7JhCkETW&|gALI^qgA|*E>EOk(-k4ZD!3xOo-WCt
zL*R=Qd&B{slX?E&PdE6|jJVoSu+0H_Z-73Bm#6zwBM|eD4KvbOh6+w~#l}{7bPvu@
z|2S^an8MO8!EE?u4~}Y18tSiMdDy_g2N%CqZXVpcMdvr`z$?qnzS0THlxE90V@vGI
zkLD1|0lc*yc+YUm%@{!M6qn*u^ZF|?A6g5`f)^li`CS<CmXb{}Mf#zKgtmZuH5<Jt
zlPH6nw;Dy732|mrXO(NLx?6Wkdy`gqlSZ;N5l=xjlQU0_0&<${jLP?$$bUq4R~l|k
z#XHcEk*|H+I!OtI#XCj^lUj^k3XPNpb?`OaFET35LBV`=F{E{VW-m(o!{rVT9V>xd
zN9s*0rlk{o-7+HT1C4jy^qLLlZ*$y9USc!eb3rLks&Z0OndggWHD4^279VL<ZAOp%
zB%Vd%8`Qe^cVpSawMLkcnr}iFegXUGxt13l_oGI;zi;m0OdMrVbBV#K`&{Zf^qIbx
z$vU+jVz2SRth95T=_j@q#Dkc*iR}eg2AsWcw?MBO@<-VeU1ekSgVhndz`mJLAk?QA
zA8SY7`{Iopwq4*2{PDqvHyplB=cH-V(GxyH1JRQ*2P-KGjq=DUyotp?iTvE&a%)9m
zXpXx58d}YGw@Qi<LWcRE9SWS{Rhsk&r_m^4OIbs5v;_=wIT{l@h7M-6NJFMJDfMgy
zf6I|c0~ZfECl_@uWHs@Lb~$CtHd}nyl+`Y$8`6o?vC$~!*_t79v53T^K@WrPNG-;5
zPgK#JFM|1aJPI%%0TALb(2J0DhF8eMUg!zk6g$`#uy4d<715E)X;w*t8L+6y29<q7
z0+IcE>UxM~hDZq(ka6W;y6JdBu|Scly?)3<E*G~}+05l~@bG!?cTxCd`y^8$Q)!I!
zvx?%vdNmu5mr*>JUyO^@N~KZ`s#_IGaZ_2VR+P=T(iLZ$*_)KK=~}PRv~IpWxu#l3
zRo#+1%i2V+dg7mw#5ll2%+6-;YAJhSk+3J60nNtAnDMNl^>A^auE21EW<_I#L~e#r
z)}DKQd8jVELI!H<Zns7H+{m%+3XRU9rg0x#yuIycw3R_5F4J{aI_|1Jz}K5uG4B=r
zCfTrx|Gy7E?td<`EUt^PO!`x1%dRP<Apt-bOtW3@iG28><n?>Kde|veh#aWy5?OGw
z1C?N+tfbdCM2OuTgdqHXpCqaIVLCrl@-fC&3ItPCEC;nT5G3j_mbQ>YrB9(i1UK>+
zP^cpc1tJ6HP=)C<kJB(DQH8N!(Qt(6e1$ZjB#jCpNJGGaCvbI?y%-mKW_MC;yc;d!
z>D)5k?0)6CS2=u9%?7`L+};Zwslg{EQ<&H6?tmic(JY@j;(SXnpjn)B;8*h;mF1tW
zCt6wM4E61%YX`kSfaep?AIKZUC<cdQ-w|jGPu4o?R$J=>73TNo=5Y1v^sP%qF|<?m
z_$}X^ifVg|m%ePR6sxDw!>>@~p|8U5D^wVID&*7W3)e}}diqHv%$M#dVD(n|HO3RL
zIx7voN>4+7Ps6X$(=e;2Ut@-b{uvs6g&7+9W@z{oW@zY}q2bq=p`m?-j_4!NKPrkX
zH8nnXTz$F<;DL2+P7rGZEKcQgE&3XQsya7Hp#scau-Df_OP6%x1A7>IHOHlTQsk?n
zYye*#v61&5tbTnkdi-GY_P)|0EP$qBweqsrnD2vUj66v9-zeRGBaPf6e;A!<!@hqW
ztXPYfq3u2ri`KgORYUy!_irg$ZSQ~-ez4+4IjmRoavBiKsc~Zj42Qm*t+R+t82b%x
z5PM3MOhp_<L#OOM|H-#;DFsVmk5uQBEi<I7S6B|t+;eZ7`I*p|^o#r*Oi;VQtYmR<
zf$=UNJ}&ueS6Z9uNEEG8j}b@3oMVeVII``eP2z^k^yZ6iU?d!ND5C)a@%}yi%o+<z
z%L_R60sD>OYr?Z^$4Ut=Of4AKB)C{4Kho#63->EGgt^}Y<D2Dpn2bk11~fsQ1z;`e
ziG#9>PDPfrT5<fupo0Lj$YFlt?m237TJ;WClX}?Gbm_fE_ol}uG+p{1qA$(%7ERvK
zlXrJ$Z#pN1r}mt~c41eIgF|-pvkCGvFgFi{L%y<O#z7npKJxf><TpanFhB(!q=J2A
zF;A2|O=|M$m1)1YFVNqRxUG7RZKYtR8)&qHt<_$oLMBbKBEPpP0q(>^@x$8}@3!g?
z@~L1g%Y9ST?^@Mic;=}(qxe~e`ZEQN;`~)A^}AABZoGpXckO!>^}A90FtfO`@jbE2
zwihRD6~*~?wYc0sV{vT;kTR-5&+Z{<mE-oj;HtTz)LhxPW|d0e#zfXC3-3ncFc*>c
zD(ZKwtH@uJw>8<LuvAt3t`?^KE`WW>lsI|#!&&^W&8A|N7boBAM)9MtMvHIll{GP#
zQS}oLHqT)QRpF$p?x+sntL1aY@-ELv#f5#4d@Mu=292^S00b?rJ$#+mY>6*uf)up|
z5SOg2Tx1q1H&1Kyf6#mO^{FW?7*U#^Go6kMhacVd?-mOvDs`mMRBMGVKdEk%YM)A&
z&($@zg2SD`EQ>RqWNUOHf8P=<gpLzCesd!go^_z36rG63UyRUHlAa{G2Ez(bl?7Pv
zL1pI+v&3`trEs4RD=>5zQ-$Fk6XiUuL6-&xQG}{eNDrbdem(+fqlv?rjee7}NSJ&W
z?qfc3yLl2-{Wi1j))Vg=PmnA=cTrp$daCGqkf6`5Rs=a53TeEYW@R!yKj7F%K1QzA
znK=h9W2@B~$*(m3X0xwJ;q<c(IW>75@kU&`@@)mt^uBNMWQIcg2#NY!SBy7hTw0rB
z-%3wG(d7efLpVm;w&$Wl?L6sbCKLcg?;wiXG!oHlxu=lltHRz-LBuwL<)RHr6;D4h
z$*A--wTcTj8qq4a3Nj5I;n_?{%cSyzewXWn>YIj5P_>S`HDTxW^-G=WZcW&`dlL@Z
z8#SS*wqd7qw}xoStD_nE!txaWc|eB07(&}A&_<+%jJLXiuMWisYV*UUQ}b#uf-oke
z9x`E`KNu-~kmhVM@#AtX<N;>tZZhZ{3FW{4KF_*t5pc%KAZp@@>aR~?{;ml;*kf1|
z%QPQ7QF%6Zo91~)JDw?g9m^auG+)O|(<lIGOhL&N&M~4mIuV%VF%duv4+#loWB{z`
z&#2UD<j^ZgRx2wHp6PTQL@&<wxSH~n2!4<~0Q{-KsN!!ze1sxPuiP^2)6A;KeDNZ{
zH#U(;Xt{@Ut*%npY`2az+eiJV`3Co?Sjywq#$l@rwyJp8iCQ<V%s`N<wR#KC7OY%F
zQXi>77DUG|hXP=^kD^pp@0HTNHVS&m#18zXgR!em;3~q+o_bGQfn+!6K)+;lkS54w
z)Y)|WT+;!^+U$OrTB_kIeTTk%)a(VHM1FJ<9yWU=kMoc5-i>RP;A1!HB;k_-jOCE2
z%?-C<92=!DU}@h#gR>~Rd{ve=m_zhvuuc{pmrW}7d5FcU)s%W};$^{@-r(Tr-uX~F
z;{DP4pHZNiDxi4Ex{E4Q=!I{hpHG{oO*sWsIsgeS#&fcq%=iL-+X?cr+ivP^WX23p
z>$%-LfhwHy1ay<}G>M6}fBQKe_WI3^-V6I&)MGfS9=`k{wfb}vbN>$pv)N*ZD>vIw
zPp=0=(O524Q<`|MAVCPXv+``Nw$Ez4sMaAH-v}e_05~rSQ-fVpe;5$qOte5;We9%P
zc@m;KYWM57hXFuv5XEp2aIc$V7P+5D6-w)L5En^BC1vcZg{F?M*tVe9ObgSay?%2t
z7x^hNBXo5aHSaz06`NVgLwJFNh_Y(wYmb0lTlP>mB8gj~?%Jb?^ECkGBhZ(nGgn-g
zW)b`cuDH-}1Y@47m{~s#qu1!2?3=1ZKJI0#$i05qU!&WgV|fIJ(wc1Xfy)l+BX<$G
zMAp~cwP|6eeYX;F)pV4%CXXVWucE%YBEtOs-eh)xuBKqa_e>c8<?Hp|D?UAg!c{q(
z@5+*8#ab?{Vdyo2F}P*C7)I#f9<5iNUiQ1)sM|ZKBP%-`6c|V;cnCm$SJR?Kvs3#e
zI_)zz`A-00ZNk38wFA{e&!Nww=`dzOeRVmA$0LWWd0OU&;hXundUzk2plefU=XS@v
zh6+T$s^q!Z!1PqOh%}YA7+6>3*#`JtQM&`$97r>Ngny_70-}@&>vsVt#t7rta1q}A
zyV!n0Q=f4a)4MZpm2pv~kU<;TUu-frivP`KZIQj^w@$~jn-A!fcfi2RE2eRptP2=3
z6XFkQM=P2kCJ&v-4v|y&GxyrE6+jx<M@JnTpG4!^81G;RASd-Rh)-|KsMJ?j&Psjk
zTGkMlBfx@|rUpRJ|8s6)iXo7_cFx@|L!MkyagkgS>GB_ILT1iH+38!)Nwg%~er6hE
z*XAcq(QhH%bDCm}=WS&YH^aKLJ!z(NL7=VFri-g)Q(JyntJYD{4gf`!EwpOGRg$%q
zkqukH)*_OwH^czcG62$Ig;2mPxwb+V-a;1!RaLu>tq*EG+NG_=N~NS7z$~g5BJD*L
z0Ij`LtG2b2=5UG#Gi1RahRD!|hFSEiqr>vFcBJ9WEMz3h{8SmrlAS7+>aisk3K@F>
zrGSw`Yfw~fjmOmtPE2~zq0^C`bl`D<V&9^#o-D^>^LZHa5F262w+StfaTAordlPq^
znOpEfHIqe-uU1}`c<7mQ4lJ+GMpls$ah~KJM6J^f#7Yup0ls>i)p%>sD{%v!9=|$1
zd3`KHtci=}gyb+Dl-AE5fcrl<z&$4&fE8kke0y2?=Q(;SkZgLorw)v|3}AZ7eyohQ
zXa9}>T1*Y&8h7R}vc5a1q}-<dWGA(LbmPQV$&0Y^(&_x6K+Busas}n@3IYA97nFqS
zrOQc4H3r-Fu^5oB#8?l1M5o<S@Yet>^RTfXy~dGKYo4yuCyLgt>lNoTGy!Ni3=fUc
zK#HROM>&uzu<0IEAPHP-K)YsPZHOI+yiH1@Z$8z`1V+I)vJR6ze-r*bKqpiYWmn{J
zzai0>vWZh(1^02%L$oiXnnEC0a~fp+mP&Gm!6==1jUEC;F3A$>GBqhvj?FmdgoA*U
zt(VX@0Aegp6QgdeLAE+h>kowjP8W=>#918rTW1(N_5yToQZ5U?rQ#U|!;w$kPlj<a
zE90NbO_@$2bCIIe;gJ^<STsmuE((FLRi}ZCnXR0r_GBsyCVL`6DM9`_P8Y+hHz&um
zY)QSQjBzc8PloF_25saDo|MrpE90vX$4&6$NL{QWdA&De^iKPjuk?nEuhQyHt4_z*
ziYCl~Cd@_y(_w>_Y*>@!W*<t7;{6E(d?8v*5))yz9K?ewcv3o;&W8&^SHia+EiRVw
zt)BgRmY9g9@e6`;hrL>>)vibNPOaZpY+*90sXmlU5GVxZ!h!)`PA@K_4@5x6zfRw>
zeJe6QoWC#_(l-GDM<IVH3FhjB9po#|W}DuKnU?5mn$c}7PAsJgtr@904w0>KbE<^E
z%c`zoRArrty*J$h^yQwGO2Ok`Yv<{+-4Y(&2wRoAo<q^C%Mu?nA4fI1E##<sn~YZ}
z#iyo}$UWyBJ?T(ALRP}GvJ02|qSuU|%edCCU^g_C)K31P=S`T+$RKh<M>_e3%sA&7
zz^*na!Bj7p=p`gv;+6~IxuTC6T>gOnPWRKZsF<2Y!4JV!NnHAMjW%44g3;#nySd`5
zY-twOk%|+VT5`!^2K(VT*+;|S;|!mq^lo$56%i46iPjvC->=E`iKrCFRnIvq5_yHO
zlSM@1G^i>{v-qIiufD7u*K7-lj@G%Pa0V)>_@e$VN0j>+M~jSFpJkNtEF>zMr7-L(
zwc9IqRiP|-wzbDrYj%e`>kNx1*~(mFp4`{S;@&&(>Nz{>8r9OHQlaBTGIYiA``X$S
z#udCFt%E4%|FM-V(M@gu-$Wnq&04;4(dCi4&|Zjzhp)EOr&crqy=c*-j0Hs7z{>$w
zS>L+XWtx=a&0Fe_3Zma9dA<~VZl@b}@2(qn^XbOvn!c@Wys^HU)-$OPV>+)}<z-+V
zz1OFQGD)fGz8=-rRhQ6zGkp?`S}S^uehrwFN~^){b%f3pt&^9f{jZ}2({a)jVL1C|
zwFA+>KkT7XNT-?FEWfp5DK8|spAx?tCA#}xyb?`Rm|;BhIs9mrVz+iH48+_@tE3ao
ztV?Z<3eZ#v(X;B{;OHKZddh~Y8NU~c<@sRvF8wTh%4w4-Z0@L*suwkb4gN|^iu6gZ
z;77WPus2jZW#jZ115;sggDc;iZH3Y}uNn)veLQ?aTKh>$>ZOyKjegTjc*S~dBRq8G
zYH$-x$ZF*XAW9D&1d!s5e=}LRji7H@ePKElh4DHoE%@Sv1dNWzfBnODD=cxBCjjBH
zLc(zIM)L`|Bxd$Dwt<hyG6Ao^^jd^&jFvPdZ22fjUp1oLPP?P)g!G@-<)oY-f|UlG
zie@(JwvZYKr=MAj#@Ev!8SV9QxrmdM*ojYuzM~5Ujuw4t=cnoH)J59)Q|BSfMS}D^
z$a^jzj~oaB78y#ZKjXkaoEQ8RwO9T0Mm-7;I9*o&CU7i3hBTVar>-H*WnU=RR(}VK
zvJy}TX3QXvYzr+w*+H~oPZ6VI%ya@lGytiD5pIp&`JI*vVhhY=K>SYau|=bMjzDQS
zLu(h><`<|ut+}do8|QP1xrtG1<Ub#DlGjF819e8f_(H}TqISwUoC>VUrs5qq0*u@w
z8>BK>_Qe<rXXdO&XZhSU>BVdmH4u9Vnra_f8+;Sf>|x3g3xv7hh((jQBNjhtu3pf?
zX4R#`JcurHcq$LcXs*?3w5GI_;9MfA$n_ipcIAX+4o>ofQ-#ydJ^@bCDpgP|18dh6
zH??*>2Y?l=$*f<=0!U3IF=4e#9s0rt%?Kupek!C$WDqvvG=p~JvVwLQt5swNk>%4;
z(kz~aV5Q|+WpqwvFi2BG=-_59E0mt^da-oAz2GXNAos_!3T}PwSOAP6RHU+AsRcm3
zfXQ9aZgPq?hl!0U_r@({c+<m+nBGCt-jeZEb9Qvn>B$#UEO%3Hd~!6P;l^jcPq;sG
z-vw#0&A$Siz9p@8&wnWj_HS2<S#0UAy?&#oWm??Y;aR-!8225!$#~+~to2CmajQn5
z`0fOgqfU=KenB<Y?-bkxR)D{#VZz(HieP2hB5Mcd6T<y;AhKUy;vK`CuV_wDw&%Fj
ze+>;{8zPa|gGBdzrOZ~YM@taDY@J-KuktkZX1JoK9`u`YK;G2*r$;y<6pm9E)4?d=
zex`!Llv!Qrrh_suV2|*)rp;dPh{Gs*J67njEWgj_c8>qTE1v|o#FKt~`j#HO7|A<m
zBHb%lGq>MzQ=lz(>n$_%(VOnPQCPJOS@q(nD0rMV#A@}E<3pvtmu`kZ*p5n@w5Jn)
z{D}PH4D`p|+`7lN3Xh-!&C7U$3yP8jl)h^P3-P9YPuhE2l;i9VavA3Xy;_gAJAfy*
zR>5MO1dB<4w+4QEuokaxi5JDJxGg6L;;pY#*e8Re>YHYlAIkFWGUdIkdj&=!!Ka)P
zfI)&lO{}UMs%LF!e5L1YYh1+#!a2`t`lC|<p51L0@j^dw@ThjC0e;eYLZ;Ox_4iZO
z3B55~m>@QfoUV10_njE!IRB^LH*z64G>||OZ0?%Mn{Dzbw>7>9|Ko$@5qyD1ga}vw
zfo&K^rG}ZkA@2#OmY$9ycNm1}d^GmWx7$uL#Ot4AzE|&*J4tI$@RRr>99Gf??+9j~
z9Dn()IF1PgM<w|Jo~_O@Er>5#maG#l4>MD8ZNkbv#-Z3Ww%L&RIsym1uVfP%>YXSG
z?Pea$!ki4E%fafBK2)Ef5bkmE=DFAn(PnwOl_)x%%Gb*g*){nea}-e^_JZ?iqP?u=
ze@KwDh83oQM0-JyqvA7~K9oS37tl6Q+&_Z*^%MPNZg<~#ZJwiwA=eNy&nncu9o3^&
zyAw8hQTR)@S>ZPFdOySihoe^0&5#@_^Z75?wrqvaPk2_NLUeI(aEB(VZRHce7fwpV
zlf1><a_2;=C`*9QBj{^~>ml2Qp(GcvAb+ep7V+f*y9^5+2N*uf_CSaUtN4<5goikR
z_XsHt^V!uQB8%5#KG-_NWiqxftfloKYgbIbj8ofF5GO(eX0eK8p18+~bnPlf*N*ab
zEd`j-wIj7_j({d`PZlB&@>!WBs2La($r*<u{JB+BO1Y)>b%LgaXgIOI;=C6yCmftG
z8{LLF3nRLT*I&7$$?%6>MBg{ww`BR<lamfVf8)i~rtm{Z6`UVKTB_?cyB+Eu;;?og
znDfw_kST}gyc)4%X!<RaFdR;F{iq>3<GH)S8%v9`=Kg6qA@srg*Q7XgbUN)dtt|L~
z2tO7M*X;^yulzN<^YM2Ndjsw)6PT=DP;bI{%2jP$e2Kb8gvXDmSCEedxlxcoX>LOY
zsJnvZD;vW+bzYyg>O|t8mwdY5QP;D;ajTv7^s22lEz%u@3j1P#GKR(%f<{mpMBjYF
zQLM8?!q-4H%En9TsWTiVX)H%MT8@ZtdZXC1m8-}%nkic`YucdrF^gf~Rqu2gSt;<9
z8+;|&AZ?pQD`W;hDs~hVPDxgwM*0<fC0T`Hm(<g6;)1zhIn!<KY7QRVq;?1D@l7hz
zQiSTy?ou=#9`}4=s>5m_Bde@|r!Ru1^4Q3vjk5XxrStLfyk+rK@6TJZp_o`F<x$;p
z%NnW<pJM;9jG>at`r|a;YQ1tol|4&9;fX4gRhClI%PJg-6e}d=a9Am+Ojwyy76Jt6
zC<Y65{?F4ZZ0ca>dN!C-da*Hq2o0{29Qp<tH<5Ucayn<Nq69Cw{8CcYW0VJf{5gR(
zpT1@^Mbp+;i+uDTJKn3o(nS{G<gqbz4sJyr)|#(Fr2`k4QcYs5q4D+ojk!%*THGp-
zZi+cnT1&nV@9Ne2NwB%2=WNcEG4|D|%F-9QN=%c|PU3h`u831m(sWSyS#$^vBQ#(Y
z$jt}rSuj@G&zkFbGPt-HkD}$fWEI83Yd1Z_OaU597<mT6;dq%SO8#Qzj15Z9Os^*;
zs@wx4?>*!wIw!|kuz}T1yDE(kK<*JF1l<C^uJw+K_pc{lVNIE~Ci2|Yd(KPRtex<#
zR35&bM61~%A^bFVo)Q7A8O5-``N!p8wNmZW>}^S$V)Wh3@zpZ<h^U$Unuw0oZEV+2
zIXEX)R6BLWYBM%#CV)zNgTG4i_VG#7@7C%~?!=6h2xdNOMQ3fgqm<8df(2ax0J}lE
z+20QyKjzu{>@6qKQTLppi-ELUV;^ae=SXOA;|Q8BP(zaCfg(I3Pj&m@!{9qWNpBLR
ziK1ZTmt#^$O3J|-<-++QSS<&`aWI_+(2plPJRA!VVb=3kLfjO=_j4SSsPfGhh4Y@A
z(9@ATvs6QaBdSFP^Qwm|gfd{YzMc^6m#FN?fPOCv_QmNy&@cij2TNq0PDeim57zW}
zh<;-YAU`04jjSIM&|-y6$7ajMs_m3vgNW>V5$sg{^0X9u`V^q|21(f6F8L2|%vplB
zIN<E`Igl;*0be~R$PTj*nUME@#d3#CCr7i@O4{osriagYp9nMj9kKj~<m5R2tLF-(
zh0llm-Vw8`ux-3)X~Jvo*i}SGcPn&e^TAT|QC;dm4LA%s-I??e#ed==NThRCD%h)L
zL!z>a?MbW*adDuw;Zd*^e2?sc14k*27}x;`X7O?3;*$)3Bo82yZN#$wRk|!23e3g}
zao=syz-sVVZKOgrG1?_}NCuUq==8$|`Sfeqf(#paCPlg7Iz3R@6oFMkA?1&Lz1CYF
zN^18Kp6Z^qMg^aMb)m%y^Zj_wLhMawo#Q+*t^H=5>d+wgVo_x=@zEg9zW>CV6om&J
zd@_+|`K<ET1qvocFQW!+0T(rRj%RQbv^x9@#;PNOy&*VE9_5I~<6O;XPv^JQGRLyG
zY0lhbt5y!%F1&1%(H<Xi?w!q2%Kp_;RDp6JptWaW5N$|r5zVHjAyVaRWJ&>~N|^yD
z9aBXq$GyJoWu)h~YrkWZbAc%}T5Vl6z031J(+%K{&exM1yHHqxm>d|LuZI)oHP=S(
z4GsGxcAa$bVx^dlPNAa4!D;UeJW$>X<(`T`@=cWu)Fo^wY?(FDZJbxTKK++*ww4Nq
zZS(b2v^?j(A%Hw#o_Mji;z`wAnBPQxUwov*A_>VN8O%gQ;!^gTsQW664o+JrCCO^b
zaq$~sq=mfDKE*Uu!@hh+8v}%;-$-UvjXWV5BEn4+y9YHwzg{hDzWgP6-E6-+44aJ-
z3q7H>uxK>;QScNcAzAQ>thPtgO{`EbKp#I>TTZ}u0eY2Juj;3txSy&<4RKVMzJ{zK
z<43(t$5s*OsjRk%DhHFnY?fTci#7UVS?JUJ`Lx!t5y$ATB+27oN+a$S*y*RgTeL^C
z?A`hzLTo4@8Pt$4N_Bv&`|2gODc_N~Ry99Z)Gr2TGs)%0zZU?vW?0TdWdzply-&I~
zrEzhyH<t9~+)B!@o%a!0XCq~g$!>JU)5W!EXS<5u51p<yy`Ss!YAOH;FPq-aJa7P=
zq#9nKLMQObR<MJ^jpcc0M1mAfnSbtudm#&dCZdbdLRDX(om3&c%V3{?8<8Fy{r>ir
z#NmV6-J#|cB|t@H-+Cb_xMA0@jMg(dSijHO<91mgaAh1-&$6&u(IYcpX*_81Js~rW
zS5swKWDFxL5i2<18qY@Jwt+}6S{}99%}yg~ch7cM;2}pYJTd2m*!AZ^d0~~Kt>?Ff
z0kGYT&YHalM=wufqtiwXbh|mx?Q(Q{a77un^WUw8G|Tz1;2K=fxLi`fO*iyN`{WyB
zFT}1tmk+WKO-0Y}F_(1Dh?H_NOh%tK;1xe6!6tWO30Oi_(R$^NG7XSqg;tNnsHG3T
zO(M3T<V><$;pw!hr!fpfz->d{I0Wh5)*4;^|FW-y{FcyF1G*}p#S7NZlE;tTD|ffw
z19!ZlXeTpUkae6Pw)j#(3*L=C(&vb0PCxfY3{Lg<bhzl%-rxYfYW^a3?0$`--|xMm
znhvc50+rfZ8jnZQI7-gxLj-vbHS<0_)@w@VVhGc*<v!HZ8{;G19-$ov)cj@PFG*8Y
zkm-Ch9gbI2>4Eec(C|HEZZdaBL1SE(0qLjb2DV3mXax(vN-)CHDBJ9TLl$Bd61Oqf
z{5F67+)sXVi!Vb@P~9)cF42cZ(mEa98^&D=*(v&O!?ANu9{2KUFvN#7X6Ive>VmUy
zGD>bPRv(;ly4z}mZ(jCa<LitC2`*wQv1~9-cMhoL;h16^v@B~yq-N`sSm&SVbw1Cz
zE!o4eV_OC%uk-QHH|{$OZ|LbOEoqxJI>9HCToljSU>XaLo*J8NzV!$JQd#7oeUKP;
zDsUpemSs3M9Z%bUve++Lh+@yUV=P5Z8-8V08_~Q&s@oNf!JVw<!wsjCe{hx1OgcN>
zJrt9ZNs~KSrf@6lw^4-_8x@YAVWkSW=_!;}Djy8&t81OsX(u9S?jfaR6f>2|M5g)6
z+J@YCy|m3TeMzgAT&jp{rx>VdD#IM5z_P)B0Drj2$14~=a0^I!Su;!{_8y0-c|xg7
z9wOY<@DbV!AC|lI`J%NcObUg_0J(Ux(>oz!ZR1T|kaGrwgg>?VE4)LmvJr=7Y-$5`
zYS9$9%$w>~rLQMI2EWe^JC={c>&@uE!~MAgfhH5_#xseXx17#ey=GCOD=FShfC9UY
z<$+3g4$cqWbn+k!J9E`Q+@4KF&1Gz-P8;=^iuvF=`Y?d|ZgcVvTtdydF{ODkY!W2d
zrV+M|YiCjWxE)3f2x!?o>2!?u1FZCgVkqzF6Q&{vGE0^i*@CM)OnA@oU><$_jT06J
zieF>3G|+lo9K*>HVoomx3F+r@F`ES#6Ks4l9FIri(RYq;vRFZ{iB+^}?M}0?pZWd?
zj++0ST>uf`LDYn35yLQYj%IJp5yl9V_gCz^1x-C9TO|35NUHj)W*8mT`iHQ2avv}Q
zbYCyquN`$eO|>lH5v7&RD;a$7*`z9Jg<yM7CEEaqUC1+dBFIs*&-@V_ceB}j3{KE}
z*CW3L&zw@i<Y^7iPlHq4JS9tMsGaMkKw8-#N42nim;uu6_x%tZye*?^b*3Oe=WcuK
zJ;jZRQ?e{Pl0{+aFSUv9o^j!x<9-efjAb49^9O76Xq5HqbqgOTEg2ZeWZ9#W29MzE
z1MW8KZ3sn+;8JQR_uQe(uMgI5gF!HkV<I;I6Gl9v%?w71V7^F#4~zKSlVC~MW;MPK
zl)N9Tc?fNzT>-&jZvwx(X!hFTgo5V+TF0I{J4PcQ^O^SPxh{z%EHELn{1yeOM(?B>
z{e0RyZSvcAq9n~?fkycuky1^ua<iK%)ao?fup%r#O68`Of&x1P;pdm(p$@x~#&_E3
zMB!nt**_F5(I>hRo<k84rf*BdbnC{l5X~}z_g?WpDvHj>a5x(eWkxw*siRdQQ<eBN
z_}KJ3SzNA)ItUG7L$dk#CG;t*caNe*_oNpdiGw}_`oxiFwU>A{9Jh*hX`iwW4Q^e+
z=o})m#y&Jmw;jPY`_Smt27)^Kl&CSwilEY>|3d^j1_G8>K?FOvBV@10DF`a?27HSm
z*fSBdXf`6)(_F(%fp>jHy+gzpI_2J8z+jna5{h40+(S3Jkj{?L8ER&AV8Y<+NZx>z
z=j){JaPms)S7$4Q-nXARS8krQ)B{^fSbZYZiub7yTg*vksq+~SCM%A<McHwX8<Z@1
zJd#cDIT-JS42#z@SeYh^;L&uxT(ij~*Q75y9<qwZFXqXb$n7Vh>G~jbcEI|be~dai
zU2bf$RKg`d@CoIa1%gj+<@xLM;1Qr72&w>4sUzrq3PFDh0j)@>#~#vM-iW18!$u0`
zWGY8McGpw}x1>zAV$v9axi|Qi<sB-9Sp_9yN_T3<X<i{87rQ`#cEdTTA$X0iDDs*%
zj+L9{?W=3D>T(Y`;QQhtWPB5ye@x(vA?IMrB41;?$+Ay2#=|k3)|F&c`oN=FbMQ;p
z>~A#uP|P_RtxZP*8aE}nP5=$O^{^P@As%%`35`SIi)E2Vtg9olFE_tDFd-#PqussU
zRdSQz@@m5&yyp_b=kF3c%`cgxJYNsr5mG^3u)18VXQN;+`!M*p3eL%hz>OD4Res^c
zt8U;qR=NQ~)p$r*m7jTx6XBpw^wXI8U)n3PEVg=e<={^Eb%E>Z2^X|?>q&afJL*&C
zdj2Xcn^1fJia*aQ5V?&EM-qx|bATa@e(2i;&+5((lWjfZIzq|Oa8|F@c1ZI#A2t@{
z;P3f>Pa7t%0UaGQx3_fDQM<cy#5`F4?&$e+`D^8Es^&DVMg}gR5XYr%;O?ozF7=9(
z*mEyF!>bV~enl!t#;$j-H3uIQ>ZDY*2LtX{A@~dV4~&$-qo7(*OsO1`*Qj>X5J#Ec
z6-9}MsS0on;_o(d>W~(X#<Ri4VpUjDO$y<b9}u*U>nE_Vg-z#E4mfeCm-y82+GM^p
zA?-V1Wv}bqVx`2m0?XZsw<w~j7g04oqAf3?t+#omPP^N$l|5a`fuNB>pv{r&5k$##
z*D>TvH6Y_s61OPN1JKi=WDyOQ>ted0Ox(!=s!0TlAw&lD!}f74Y@ZwpyNH1P<ijsE
zR9=Y(m6S!}^U(~KzAr^HK*niCuR1e(4=S0jYu%{2YxGPmg(XfrEU6L?Hzd;Lrh*BJ
zl8%*cY`o+Gv}MumeB|Ov*Ih`a1anv|CJ+NsBO;*ZL_{6#c_6j-?X8mDQ{JF`r2ruc
zL#$dF@dTvben|#`NXCXjP0u+DDN`1$@}KYV%QjaoH^PmGvlqNykJn@UVCibKuj<^J
zy-csIOzJvQYcvRVoxJ9c4zes3#_yEKrLd_uhw}VRtsfdi(8Nri_xsayGUqXF8r{Y#
zs@`FrG%L3>O_#WV`)Z`ww+UVV+Lys;6ex2tar4hl`g#rB!+?a^3}QGi=rL`XQLOid
zb;(uAm~~fL#5<1VEYzajc@M#k8f8Az%%*B+rxxV~5Ij^0DSEWFFES2K36`tDd=!kx
zG=b9(j`cfbXOw?WX^%@znK=mZF?|pyG65Y<<Q%vq*Sqx`u{BAZHJGbuCZt$HG9fkb
zDWJ<j4s!0W9D7<eT)Mzf&{49bSwT4y10P^K`(gn&fveF5@dU{HoW@IK5uG<=1$#{F
zJFtRfdH5nS@%OsMxL6=VZkTeNh_2o*q`(o1at9M_AWt-hj9W;Qg2qPT1k}C(Vyn+t
zt<!G!E^}KcSa8Jp&Y&Q&o=HVQzmu~jt{}#St77*USq-4ql(DlfrUfA8#JWXIpC}>F
zyW9|hI@7OrcSNVzH2D201gWL-Yp>OEuF_5x3Asuf6Df<4QdAIvT@qf-*xj8>j-&G=
z73Vm)p+ool!3|=5&TKuHSY}kvQ7B>L$l$uT3Y+OOssF)Lzf-3A5<J4ik2Xr~Z3$x>
zYy}AWOukVVN2KcfqXM&r0^2;|b=cxD?FGws7l?IJmg42WbIyLeD+iawq2{MPyr?|M
zv6c$!-Lst^eGsROEQo$R%mRUGb~CH-Ltr(#w>;5BttRU<Dea~t#???AtRe}dBqw`W
z6FOWebp{P{@b7ef0fBCn%6tlDf%B@AZKG*?r=4?_;=z@UFQc^?`o;zt8|~*t8rw1+
zUr!h7l~Z*qQ$M)DIoS-6fe0{uFd^b&@IRamX48McP&F)kT&|-?Igl$BjyN8BUJ_P)
z8T0t6emP@_dOIiW^ZfPI=`TGvy7!3NxgGsod(Ea2cYI^EMn^yI5r}d<U9f+rS?e8!
zC1<U7uM{2Gl_AYjYqkUc|A5<(DzDRXn$v8q&uQLLcKil+t){?4J{Ni?ySN>woe-@|
z2;n~MhG5>)P5|mWo#Bz>+OI+O$}fuko%G+{*JT3n$j9%A^!shSa$a*Mxd`UD3mvwP
zYK{NY@jgpHVGhd@pOd%2V^?l=pqZ<WE8s}a1GAf4h(UK?#5mJ_U9sKHe1*n33gKE|
z`oHu^mvDh$rTvij0_U8^f+Pv#pOfggUu5-^c_VBN0;Jyop#$j$gBtS=Jclr(tR#M)
zubw}+4^?z~?bF?z`@;L&s0JVvCtc{X`9!Tk_#AV<c)^mA$IB9oFgaij)dz=C3}b0`
zhWZeXFNhAx20vNJ7hl3YuIXVPZmrVlSQ12_*fP?(L<5g%osN;**@;e%+jX)}rGgsj
zaxn)jLI(ojxPHNK5xt@xHoMOJI)=94nPhW`y>ZXNKD%6t@Se4Z5WRg|7mqH{pHaUV
z;&Mv9cbTTzJ|j#lzV?s%rBeDfBn(M>69Fp`L3FkBMw7JCsrRNI9(51xqn)UzecM^;
z+>yZB21DjlNPf&$Dq}$2zWfCfgUX2UFxoqxCd$vP9p$$aA@Y}2fF-d)ZgWzIT_v>c
z@ZTZ0AHHnYD~wN~xih9LD|}Y#D06CqDyqB+(!YFl<d%LQQyC2d8m1a6dB~h8!TEom
zf*^^&n8IQ3LV&nI03Ru#uLL-6i4Ip#$09Jm;JyKT03BrofGCP~()U;Ko&ux$L*~M%
z-Bh4@)_Ojhz8lXz7K^eIkM(gc!Tm<jx{4AzS8F8!q9L!e)W>*jn(n|U56j+Ah)*h8
zrkBw;bDrF4EsbxI)lB(*sjZ`!>kbY9ti<96BrErOpkn%4*HEmZOe?popRjhq8XDi=
zGAf7=5DgetO?VO1yL_rO*q2TkEKXU1Ryw`$0aXXZu`EDeDX>a-2)aytoDU%=0)}Of
zzuYU!FUNHV4<RmWDrFuTAcVkSh_SfRjo>#mx%O;8*d?;)cIE^3{6_3-i%Mi%!1&kk
zDz#>d+nlp4-!TwAq{qK-qjL4>-d;)W3e_yS;IsWFW&kitYleqWmzGAOhP)}hapi$_
z-x)j0BfRI+6D(_e-o@Ut#U~mMS;djy?(;_PKzkogbyZ6EF9iPc?9?R?vk>TsQAqYU
zK1D|Gcvc-fTODk6&^p{?v23qK!}BN^Vkjhk6b*<xwUT^(wL0f9!dNUX$VC<*S>Jz;
zEU;RUrAICY_OADS4k6sx3Ieji@4YCjM@RjGl56)%^eBv;>3lE|4~lr0Eb1Y-p0B1C
z^YMs?QZP6Xc35y9I4uXQ6X>W~4)71r(a91*)o}8a0?ZbRC6{+M^i9Ui>~yr0TXSiU
z;Z$EB<^_aWjS%vIO9xZwc*?A?$cB37Kz>r11V02-s%VIwvSwm<t~cNeS*MX07zknt
zo~Otq7(*a>{Fud0QX?#=6j$(lA5ceOwT|!2N+6;yz-&rR;l<3!8l;G-Ixz3cmu`>x
zXqzgI;_Gs7pFjK>(4tRhQEKaitpnkz57XpwIuDXdYO(@z=W-lR7b9w|a>hXV{cw1E
zD-MsnL()rgc&u}HPv>xVtT2Pc<Nu&6z9~Hgi^nR9_ioSPy}Prxa>1R*@X`4iUXf(a
z___6bU$A8eaMY^zof9c_lI#kUN%jHa@XfUyO`V*19!7x984n`RB;}1#dk8D-F-AK`
z*(lEf_gf2!nEQ^%)B3}w762@(N7PiS(sMX)DvqR4B$dxvsKrs#d!%Jqz$X(o{A9u#
zmWqsQG%S^GFUTG$^%Uh#l`?v0p;}F1Xqiw%kI7X?zY8lf@iA0na}i!eh1YY+9M*oX
z(kNyJa^S#sX?^9{3$=E?OMg-JY^}>J#4MYJFiWdvCic=QgtcztHC#)!EN1ltFbi9O
zaCB3V1eJ<Yd1a%3L$)|Uutsj1Rym2Mm07-WCSyP;IUHCu&>PTdmI-dWx2)%IVAHH>
zlrXbJvDDt*ESLgF7frpsYs==v(xzpjU>d+$G~Gutn-{YhcZ?ymfglsuc7{s5wrqUJ
zSP7@6@{gKsE@14@1;-M`a#<lgIH+Dk&*PIvRi*W*qvkO54&c}Xrl?aojE+B*%^tEb
zbp6rQfIje?bsi)^or++C+tNUzmf%9Iruh&ZvDNE`_`H=<x2CXVwchl}2Frw13HxBx
zu)ahWcm!eDgIfJn{R^~yyc$NyWjyw%hhnp(bcF4SZoAR>1Qzz4N+2FuLCw6|YqpwU
z{SX6uLd;a|_NkWIG78~-rPXeoL^YxkA6m;)D{e};_fTnS;Vf{+3RSkHwz#2!YVgC{
zef8SM@YrlSj5;T9Z|W;=IE~1pl314Qr#r5(d;3`!Eoc}WPe94d_jM4fnQb~BKb91P
z*H*n-J>4Gc-)#ST=xt_a-PciYUo4PGpCe=AdlLY!2M^W(bIT>)CIiwIvExNIwjj7~
z-T9W~M4()@Z;NEC&!~uOt0RW#*0bw(GX=gT!+FLxO=0xlrE(rn<#^6lp)(S8q`Q#Q
zv?R)WeW7uvHlAZ~`;qh-p;jOf1^k9WczMkgTGL#;@-&sliuP2Qkg0IjDnm<^Ry!yR
zUA|YC5%g+&70t$TYS+OO&;ZB|G)@h$w|xgSh57JYMngt~PvfW-9gr0Z9Qcd)%&cc@
z4{9on4O}l?u(x!b#XHSorMTKr-{m_ervPG(Za)PlhgXNQ4bkwLXjik$DD1kuIuOqf
z!u?XB=hFd$rxo5^@B|^+R&mUZl<7B&_^21=N>RVp1KEOs&kq|t`D`8YqP4)#kR|&9
zIoz!&D$RhwF^j>ocs{g{&(J!(#t>W?Q(C>9%2dJjQ<=LD8RF||K($;od^>$Op}@X*
z0=b)O1}`CFvNLTQJ-n1$UaOO%4di9sEaO6%_D&y_1hCI=KhmepZ2wDr=7Q)xA|1r%
zQ+x*1UMpXWjraHBBpD5<dkKDXF3n-sKr1#oZjWiO0f~^Vp%jN$L$Od)>mATg_>TO8
z?m?@6gesatupnHSHP&3WDNe2um{sP+h$HGa%+2{AV!)n4yvKuZh+t=o{_0bO^uQWe
zFu8dz53LjW0-9rBp%V0ENsUDM6+}SAPmRK#>+BKu{d?*gf{@v$A4KpT8hsitQJw`0
z@o7LP#UDb0l)VuC`xy`!uM2W))eW%igz%q^O^}Vpv)K7L>Na~yI9Mz4?X9yG$72Q9
z-9Z5BYl<2r=sv+DpqCU3se>4K5h~PBAG`sqj8h2a-EH~+>PF}URZ_)v33aR81FX~g
zv}1^mz9sb0Hy?-M`<Lk@BKcyfU}#kZZFSvU2MjZHz~qiPAWKduf;rn~?m#>a-tInX
zdZelNZKOnocT*xmzY<w+@{(OyO!I+XSxnN_HXI(;t1E`ih(@DxR$3-qE>=k@aT$*%
zW3r>B>0$*Co+1^+4F}XcftRb1%p2sj5#C)bqf~5^I@C%Lp`ka&!z*r=9fOvG*(}>h
z1auI9A>vJa{Yc#EHe&^HU?nsl!e+SEOsTDgg1UT^{5_s34-;n9+Vj-9nPow()ooJn
z*tA^I>ed$0FKZOlSl%~+9;;BIS%o$mRjcb+K2zbw&Gr~ZeQsX5%97z{_wS&$nVt1m
zK*f#T<eL~jlP_uf{GGA+*JLXHi+F5t&1L1J@6$1Qg0g9BVD*>s=#O({5&Kf_=Baxv
zwHbwiS|b>JRHWd?2lrkc3*=yvVd(eoaiu&zisKX5Q=|~x;axwf2Z)#bcr!`@>0O@o
zQ;<8>BCoeQ3@|gQd1ek#PD6e@Lzy5vISSne$Q-5|NEOEJ3it)BI_a72^diS<h2rPB
z?Pov!CBH|5WWcUY;`a??k0^3Shi;C?>GiMIRIJ=QoBZ@sbyv%Ah`i04U@{=a7fc33
zz$fu^neoJ;eA`-oRM9l4u@mG?4t()lL}AlWa5|2>&W@T=de~2{5G2omI@WC*soS|x
zQp<e}DWLHMuQALd`SO0T;*U|vmnW~Tr=SWFAh;1oKwxu%v3R?^_KDn+KvU0-&e}bE
znWIKl|5oj^<B9Z%z(rD3nO2f^r%=1$o=&p8ne|jk=F?cbis^hcjuWTAkahTkg{aBe
z6;w_j#-s1HE)fpG(EbQE%>2eK(U!1&OmxbI29e)PC}0scV8{W@mcoJ=-$duQCtTz)
zl29&sME0apbiGy5$mUOtllAEQ159)+X-SkkBhHpc9ja+yRD<MrntbH!31Wx(e-wt`
z1)M~*6?z(MK_U27frhjvWjI~JUbo(ldc7mYz&xQ=*As+|+GU~!D3o3$Sp)dUm`lCd
z39%*vI9LE!r{(kWk2v<z?p9cjPP=TndCWr`ST@FN0$fW7d$krklvnT6`h8`4t`QOM
zE5jGkpnaK1x=_IL7?w;stCu8_LP87wB0{Bbryd>EJIecZqW2Kwtw<S!U@n=Z4qm^M
z@#p9Z=$vzaU_{kJdDLkizhXBLF62Q2MbYVZy(bq4g$fX*9Sh`?gIn@)hGnjC&0(Vn
zXPt5BDQBB=`lM4QQW?3Ej@wF|FeV38xAqjCadkXw{ET<)rzQW{PFSrg7eqJVR;9T?
znj_U*!AqpNErkR3CI4xxp2?FGPfnhE**iJyrkRrBc*&S@Lyo3}vp5=|d>oA@aABjX
z2t1b5JMu6!JCXA;HPPb!K}>B+9%_lUwIpC`$zFPu2w|FsU5!dK31q=Nz*<z2VYPts
zB!yi>vhoIKU}kz(nDDEvFySUEOn8SChBn+i+2|}*XIj@aV?DF8`vqoa_b#)un`?G<
zvu5X)=DWQ7Wtq~;NuF7ZAx;S-df%ibU%@r?rsd$baxfXj5CuWZLp(+u^4O_tpUkdG
zvailqh#eL>KthTjhBVqyt1beiatQw3f&}Qc>hvK84@{%mGGK6XY(SC!=yQrZjIYdk
z1CPCPpCP55A|)ige7nnEXSAVo76V&Us+g$GO`Kj9ae69oT#@}cA`(Ny<1S^48o?Nz
zbX79&cF`PCJNd`nFaZ&I^2k5ryR*p@PH%Fyd==gaigbO!n3sA#a6Xm*c5UZ5E2ZL{
zCjm7ujQBYtE%h5M^;@b*zwx5<fpK2vZz!{-CA8`@7I_#Pw8P?WB^HvWP>?lM@gPCO
z*QL*_e&(W(!LedgWEJ7GXfk{PVP&EZgJc}DW#%s>!SlWF5ZM+n;&fLELRe#GxeoFz
zmUFNh0HH8Nyd}*qLusYMzC$g^a0(baCiCX$vt9bCbgmldvCC(-tjC_4l3uac`c}+*
zm1osk(bpG7^5tN?8smK}q6uI~DmY52TGWbSs)@l5@MN~%u1w}rc&7B*DJw`j&&rlo
zVI96`BYVj(y1p8{hikd)o~37%O6z*|9nSVvN@XF`snY)uQW4l6=zHJJFeLq>o^`-o
zwV*LEfEU6gv+^Ke5MMy?xxzizt`5iAmV(===x|%~e`&ZK9d4WcFAZnZyF>q%hBNx{
zl>RRTw{5g<m;R5#X?f4+|2#TXNXTwb-OW1jO|)LZt;4H<>(M{zB}lmj_XV7JNDZsO
z_1M@bq&+dIF$PyOfTtngHr1I))$8RJt{_ku5lbH1kWg1?krx45grFq1+AFpdQJO0t
z{6rVvzk5!*>7L*zbCYSbPv|SMa58)@@0oj^qO@4*?Ov?THg#K#@XgEq>$>q1yec~f
z7#)`LRwe@5Nf>rWTqcMY?l4fuKi3oKb3{E&Cunn~CqaN?iSH~eW<ol>AMrEJpBqhi
zhQha|sB2IpKm~yQB*72c4f=Yw#%4v@48k^gl}x(x?C*nW$p#}>5iH@{-^b;+Cj4!t
zoYA0N?S^~@h(VB6W8(!3BJ<2|0;9Xw=+=C==L>^#K22aw-~IRxB7hr*e}*HyTAwfC
zTgr`|mhqiisCAb){Jz=e<jeOt^z}LUx_u6veKy56XuaTU_+dI95zWyhz&XSEuI7Z{
zLH)RQr1*^4ZCW@jVxpo(lff`qxE%PoC=(8I^6D2U>u(4%n(|`a9Xm7{BrhOMw|PZ|
zX+o5t^*nJ4DBD2o0o~bIqc{fidC;95HS0%uxF9jVVS^#j`A!2>9PW`|{^rAIFi)ne
zj~SyNL@;yg$NTeqO&7<iaK=exAh^9!X)78oGam$d`}cG)=%r1+dvemU9USv!6x_7p
ztCaLf%u7wPR-K-C^dYw#dBmr%P=!o;sZR~s%)Y-wQ=3&+q4%VU*$R=}#R+czz|!q?
zt<=234A%j1bId;M^fH#14^9<E-Vu|<=yRaBOJ*Fvptv=}1k1P*X1NY#3ylvm3lGD;
z_0%fLxm6+>h+tsUVua4{A8yW(Uh~60aJP55INVDS+pak7+4r&<WTsuAOP1iq>w{V+
zIxcD@%cAxyqXS2Am;;4#gGPyy;gYVbUV1%(R#H&01$AH9|1(<gEJ`x*D@8e;qw2zJ
z!IB(q=+1<lm70A*(&m-R_F;;d&E-(KfqxZs5$4;$FM{p!1~=$&r2!RcHod}|NM1*|
z)OCZL_YAW1tBgZwrdcr--6#YuM2K2mvWRWIZt<BsT&`1U7D<y*jz5APb`K-)fP@zi
zx~J`iJlqn}tZ2YtESFyIghzTd@ib~-&6^M9;WvF##l;t*fS5~)^mAVP|B7Vl<s?%t
zgG@bzOq2}v{M=;fZAd1dky}M^*h)kSXd8IJ&uuUxZKB6C>aXbNglxv^2V0q@5=Z$)
zyN_1{v{{l4Yk4(Dg|);aC9I_u%?eKM)gW40rW$gK2Mh~RA-A-UVUGI(u2L$D7T`;}
zOlXVqr2(*_#@#i&n32hAGh-tdb(z_$Sn9HWIs~MxnC3@qX&%oi%aP)!()}@ldhs$+
z1uqx{WDdrkbJ*OJ@N*6bR+AJqMny7%hhgJk__W|_5}D(Uxd!bDeQLjcY6~Nm;+XVp
zM<Y61nqwSJ>FHtyfZEdZ;3TP;<i2^MOh7k~5oFT5abgWKX#%O;wXm$2rM8;8<XXDO
zvlzIYhmtf838AgQiBtr#y0fbh4sH3YA*yq?-V!y(1U7?2rt(CfMyvm?9<0#{Qs#jk
zoj#35#78HbiplOsmd0d<<Crx66(jPbmM5wnn&*i|Dc{j!SyECvV=APmsAXG=UHFS#
zD8*8GwOKF)pb*+--Aqy021NPUe$t~bv>kv)?}RMusUEgdH*KrHD=)C*e;eks(Od;y
zd4WZTZQFM}L}@x-tYv8vrm>Wz0Yq7gk;W)&fxtFn6dkagkFOT-N5yomk8|3z4t1vu
z%1Uq3nF^8gFsOk+A~SBPc^AE)seNXyD#}IV04S1@<|3EHP5sk!G8#`%acy_|%{qJi
z`Lx%xs^ZfnyfXz+XNfAa7+9V)qPJ+2!@j*h^|1~-yMNMv-kWygjd1RK$nOw^kPP_|
zfAkJt!BJj!$AVFKz&s?GV_<+bjGuHY{0NT2sG02S&7tV?28vDpCVOH*I#|ev(D2xD
zf+#v4!;KdLDk5u;?=$96NWKcQj>F(Zp)-vEv`5`qqk)bZSbs5!2)5xE4r@wecNrxM
zf@^!LsAaIGfzPSWKu~Zo4zz&wqpWaWyBme+O_QAim(#p-B%li2D2I}E_~VoyY5LuE
zImky4Qi;!Fm{A(JhmBu$h&e0OXv<Zqkyhp=8$K=IAsgPbB0y#I5eH}yMQ$~iB`o~e
zVl+;X(Zz}kM?PMB)%?ZFh%lZ-g7McPLmNDX)B4AO;6wj-);ghmKoK$Y&=czgQ5>B#
z%18jW0N8N&NT2f!Uy?`e=9IMICX&`KMmLk7;T<Jt;^*N&Ft9X?+qn3P^KPi}?(JCD
zhkO&x&Em+!#*tZchyQSUK`6iU8RE!x_T5^Zj)L9bYMFe*u;KWP9Y3H7t{+8sSe8r_
zYF;f#w$#!g$bj$mhyXM%khrSrIjUp!&R%A9J)=5K32ztib#UgeE)wXssMq*qA0xPD
zq3M}udIC+4qruZyj_Tn<F*@|B41K|fLVYI()iVV;-Gf)s3d(<RYp_E*C|u#&OvP<3
zyOAkK&$htZlf`*qFzjikO{mC^aqNU7y5A<WRFGN7DT2p*XQwLsz|K8>T)Hh`D?U;>
zPZ8<#9nUYOb9{v*_Qu;ZW-ptV9!Y?ToG6!P79X{b+pU*;V$s)r@p;_|{Zzo`y>hed
zpUM<V-pL%kIXG>#YAsi-!wRdl`qW=7MU&;rt;~Mk8|P9a>7uM3p7hA*p7dVVdJVr)
z$(Gaf>Ybs+xJ%t;Q5M{_(b8}m=E@?3hupH?m0TR-U2Ui!p(}uTj_er3Z?-H^nY7``
zl!M<ZH(O8hkuB%+u5LEHH=RLzc)YW(r<XNJ=${PgyU&z&7n2DnRqz6k(_cf?iwIEm
zD#kUWEbyKHr;kdHVHsubNYmw}$f~p}7bmS2_Br?jc9CLiz-mh#hvj!epep)WG`n6y
zAB-L+DbHCp$p;w($}@p;i2~ozD-M}{^h%TX(QC?NquBaVd1bYLFizM?h)k`B*womH
zh)h`|@fAs|MKsx(Dx>AfLaO4BDx+pvidbq<@|1=REDuDPSrR-w&&*P32()+pX*?fI
zC%5H@pc16sn&Pk8U98ZyJ$Frb*qd8CKdZZzt)(`zf9{5L9+%Sr(8~Zg2<y>dHb^G}
zq?e7Qmx-m@Yw*^prW!NQ)XRhVp*qDa`5kY5x9-div$K7*huO7z@ne2Q_p)k$ErGud
zgz(qGK)gN4uBeftrbdpc!fw5N?9V?r4Ltyavl_lp?Z0mJdfxnY=pFW6v)in>TR`}t
z*{{<&93b@`<oBH@Nalfu)}5w1lL&#`9bayB>zOZm>*-Tp_S0(Bi@dV6?ahC>wdKrz
zNdTOg2foZBU*^kh=*c|kdAJdB-JJ09B=nZ`aZ&HoGrlcP-iasgKUpso?4_<Ho1@Zg
z6^$bcouN;9?rxGy&s^zxCR*1$g~*8cm^kz^ap-5_(9gtS1`~&VCO-0U+d<cU?;~r7
zTJ?~PkbUn>m5<f<z3Zlmk_1QT2p>GcKiYlX-5rdjIvGau^%V`Vpd1W0x{X&9=wbnZ
zi<MX;ABoZv4d<Z$!*hsSzjE~y)f}w;@rz|VTu$I>%lEE87?MM2#97h23~I?LCc}i)
zAj>QH$s!rd(6s?B+o!-lLD3*DmdViBTrO|)5(`?w9&v$65<G`46pMtwC67pf0~q=e
zL2782os^G`UoI8Ipgn%9+AV46@5qp$-BOG!q?DMwSi;B%*X`rhN%20S56Fg5JY)YI
zeXE{Ok#mK4H&1Y{Lvy|p|K{S?Z!uc8V4>gnc>af(-ByOYttchz7XLQf7qOgT`dBe|
z;j*i&@Ks^XWEs0et%X$?KgYw%azJ+VjH{gjSKC-Jg1leSxIC^QQMa0+NSskbZa&pM
zx_}#&3P!vJfk4TpPHpoAI#0*r)nXkF$14L`L3b{?=$WE&N6e<F*smTP6t&i!XHZPp
zH>;)MT#e}EZA8WxsN3$(Lf*tTql@!lMkDxEn>8W$o;qH5U^}$0N+nCTK$AOhJ}3R~
zp$(pQ!rhC|+6xb8suLrBCyZ7>fjp$$h_<$N&6elyye(7q=USm0trPopt$nWMi5<K_
zxXczdY)u^HY-7XTNH-DHX3FZ4<q&SPxEm>q4{DKdRuN{85G&qj<RgTzPYBjteHh2F
zXD$efg;D}v4)GP?MhsCV9xum(#8XDvHRPlluZI4%skV+B>Ht{@2~Ei;_yk$w;Iz>T
z>+Slnxzw}bi*=Ht{E4Id#V8(+$%2}RqS;h1D9gBOUBf~rt*)b9QgpJE^(qK;$Rrnm
zLKlIOd;|(L0x4UZ$$W4<7>(TeZ|k^rcFO%qDmN|iQJ4mb$5+>MN@taapmMuTG^7=b
z`P_(FSq<SBGEw;3ay9TxI6!k$(`Ou(nyyy<LhU|1U9|4xslExCr6O#HF#wA|biYhV
zH(JHG$Vw1!B4Yz*31Mzz@aII?a{_Zi9D}{`!5-<b@xmh)PwJWs9TF|qvt^*>AqidG
zdgcRX_96+t_4KI^o*9rN{L^aH2hS`@N_#3>+ujB-8<SGv>DHFF1hYDETF%BEVloS{
z7$eVK^GG*Wkj;%7?WqPi@j#*p^Im~$gq(3u{*#96H$+A^PCC0_+PwnZCD~V@uC_WZ
zal(^7YMN)^>uRfa;^n;1;Jn1od7;U9iJ$XAlk*Zk=Y=NcB^jI-rZ_Lj;Jnb}ymKEn
zF$<>vzVL~Mm%4a>O|8%B(#vOc>E*PM#Xg%#FUzJ9=dh{7PMb<E(?%wZ7oG(WE%TWq
zK@+`)A}I?4BN4MMxN&+2gk}Bo5g;ac0Cxs;QhiXU^%&=V@F?hoWN8Zzn>|<(qYnD9
z3%0}nl4*<FV?B)<jZ`MJlw7z=YV9~`A45A}yaYH_Yi;bGXt05dCTBNoUp2h4(*q-B
z-eCA{O!N+Td(CS0%JVcB-P>8KSkJ+G;_kKd`)BS-gf%W2t*@-!hj=3ZT{O<ulL?E$
zx1Qj0b!t!*?vuD;hDZ(*{1CvO#RP-xTZ6B#Lh4%sv^FJnm)a|6G{C_1seS1&HFfkc
zp?ij8A$xlZGB&~^*rRBKOuN94^pp<_P2+A+!cMAQ5EY4K0l_YVnlRE?ri`~S)!|d2
z^&EaTRc9vShN_+kMUVy85EMLB^tqQ!`DJ>8?8be1BbQ#PRBg&v=Qob$ym7c7*fRln
za<#g^8zGccjt<^LNBx)ZZsER!na-1OECa^_P_G&Gej(yBZ1&D-9R-%KLHscVSU)*E
z#vm{p3>*CfLS~Q#tnh|uW#7zR5h=p*ZP|outAe9wJbv%aTO~42;Yii365>h4@1x=6
zc=*mWv#$oLcl6r^(Hgt0ql0$6S2SXyv4CfY(;Fa6D_*AX`{Lq+#TSw}Vj6Cvq<@4A
zp8lmmc%@^>J!OLH9PdqtD016d`<sM|J8N}%O-T-o8Z7u*$wv_^Ul#-sMSv*jasU4N
zTM}#RV+NgAdlp(jh;ABpPbF1#SH#A%u$?jH7?Sp9P^$=*GUGW)nKbX;wx19g;2+=5
zTK!dye!~3E-A~IL2MIY_<mG?H%@!YLwt7>!r^!L+HPcV=_v6PIbGz2=9Gtvi5gAKH
zGWm8|(jq*&B#e#*r-C}I(pY(jJH?$^d3^$(R#?-!Q0AIY+P8XMuavORRLDn@cyUDr
z-l-=M81@QdbvIzk61!U}#H)H1itF2CK?<G&4?biq+ZTuPD>^Lv;FDn;&v9TjdwX3J
zgj)rC$$Sxz>PVgRXDRrr_B;5IcS2mcGWia=uCh06>|<sj!{xJcQhyb-$Vlz89Sg}g
z=|u;PR`LGz1g^*a_+Z2<g0F<y0U#$U!*q7OxWSjcpA^S&Oma)`oHx)I0u<1vSp}g<
z<;W;RfnNl(Xt7TA?e8W?A}X<BR~HU_v&d)mQS*oi>EL^OeBh^m><&x3UybI$=}X?P
z2>Z1rZ5oV#sKM3v>emPBw?9^H9^5?PohXP-OtBwSrS)9ImNSE|7WzyeVh0f5DC!Cd
zh?#pxlplSp&J`9=&HjO$PJS(BqfL1nSk%SZ3U@Z!$5i7p*>O1`ZCK7eLXY54ZQ#c7
z&2T&((d&oEoe1DyNkIv<ZVzLAjX4D)h#kP`v<^s>e44G1$10QwgZpe6-On}IL}%oz
zBbYpWBjcBIHsKU9X3!#@UQFjiVdveU6Gu!X)(L1J<woEau6)t9v1NkBBP2Tle4v8O
zbdn2U9HEUAiU715;tdNHvd{`i+Rl+#pLb#s(|7AVL!zjnz%5xmkd4{+8;X{G*f&Ir
z*xdMJi@BkLut2_ut-5*!)TE(rHxLALOjuDmGW^F+fw5~8S>LB$A&P3fUhNl6xq|@{
zstqg2uS5cjC3*li(wXCjMBw9x!^ZpWwJ`t<4HE-R5*M#9e9?1r6OGZ7#2!G4G9^+m
zcoJ8iH4u@C0krtGOUJ@fX`rap1+FEE0Yh6^$_$1qPf7?bUC*M3RG!hhC6d#ogXM5K
zhJaxZYve93JG>;yeDh6c4aJ=r8m!b98<xfgM`@B0am#2d?T)f2Uyg>jH29tuC)x~v
zsLKAnygQ{YuNy6@cETJs>QVo7)OjU;_L`k$t>2`{+@ISrAxw$MTCPY;&kKK|ULK%9
zBt&H2%(n^Ofm*L@0HpF?74MkQ2c%v+7nBnwb!9Onln1l=`Fiycuhae*h*gK}9L(hz
z+JnpU!D;^&>%y4PyKc+q9VW2eiOzX8VR?qe>?dKY0Q!om0H^?8thc~T1oM;Sgt9R0
zJ>^bgZM*#N>x1RnKd*x2B2HjM10UpoC}mI(TLE2U?<(e4Zp5^S$3#l3oDpwuV+&_3
zrrMf4IoPq!9R`B)rP$(rXFs>nSi?e8{sdztO&Kvvx}wft!;2ef`zTh-O}0GDDhi^4
z#o&eY9V{51$%klh?NSIlf*HusdL5md(bd;50|DmgiXtC7UfDt9SO~$)3*XK`z*^P$
zi3u=2LE>;+)C3`|IFM{*P-;4-tK(=f-|SL1<1nCt_7hv<l)&y*T$0@?(ra#yl9gk{
zL_Q_k&}JAN64F)NFYze#;??QVhebSc)qSrZo<E~-Ou0=riS`#7hU#0iy=7|~L}Vvz
z({L$~)YtmrONI|LAV!|s9mJW0u~w9(2hoXglOx@RTH_X!aFGD?p0&fte^b#(pTI$c
zuEmhdEATQq?L(GOr4`vW`M@yz3d_`6lw>qPJr|@Y*10!|uX(5Lpd&VVXG%kTY&5(a
z%;)17);az%bd-d2T2l!fQ817uHeIBIwV$<)A~Q%jHYqxGoVKFmjo(M(>+wACbTNDQ
zq0LAAw$1~ak;drc)oQvS!?6CDd?P#P-pP{wA;|Bhx4~dOVw=wMY4SWpf--9__e3IX
zg}6MJxbKtpdb3rEx+%zSqvQ$vkYfu{x$y)wNv}u@+}wz8V0^8kgKwk1(QcV;w6sil
zJ-#g?(8Aa;(j>lN<O-MJF`(`U%L*7%?re}O3nPJOs*xH{IjuMWyuSRwmOapmq}E4l
zlcCvYQJ8J2k5WicL<-hI3V>3~&?BrR4H$cAID^P4O<)*t<ic36P)6ortaNS7F=?;o
z;#jG|G=*Nv)xphh;ugWhn$Hz*lfjfMfK07kW@AzSXe`oZy4hbnml{44N44(ZFG^7D
zKau3=d`#`NjrIx2Iy$NyH=?k8)I2#2gKDKBrkY|tSzdkw$C(H%rq=t31a=uGgAv~k
zUeM6d_xK~6qw)2WG!lYl(^C#W39LYuYr2Qr7uOeui>oUlZNH4A<)~k8lmm31614&}
zs|1eoioC@<&@3rai;lYy#HMDi7zR9KcTwq#2H$9&Md2^qCNIF@kun-242z1ILka-Q
ztp8H)G+BgOOeJF$)sGs{$#K-}oxJo#d<J+yN2}Gy#RTeG!-~m={FDT<uE0>G*n}53
z@I!a=;c=vjZe`+jrCh~zKTe>2v)3!$Cxgsv`@^eIFq)3$e@=iB%h@;~+=r(&_xXT7
zf3PmA+(~}pk}g<S&9oVCC<%`w`J@eJj2!&=cg||aH~%GZ*?$Zs`wSL5!eN%fXs7&!
zv-B?`z8~4-w{rGOWH{vBT9W^sC(M7DviXb68g%IRLo(-JG#J4@SnS`MU{3QGNJ0<Q
zAjUF<dGX?TfEXb)>ftt;KfWv^F0d*Q!KETsWF5<Zs_X-+^_pkxe*5GYJueCcvbC;M
zS}CY@BRXuqJdB=#1)PFbZd!G+FK|I(0}F+!bLTJ}`Ho$cb?4D^WNru8PJy7foL-1v
zIx=;(_`rO)@3En=EqPEEs|U$Tn5EsQvL8oe^twm2?oopog@fRs#4fzE!A(a$;G~Cs
zz;Dx0uwE|7Iz}3HK227505~48N}oR%AyeUxl9fX?Z2Fd~=?@iRabKQ{hLb*$hVR_2
z-DJd&9m&?JSX2k<#S1&1;FU1Ky!*85r{{8CM(OS5XM?LCDt9!bPlx5<t<`cowy_tc
zM}za}Y?^%B7h?hX2&v}DVhNW*U8B+OHk%C(A8E)_147%xj{z;i1APogAH}$kqGVL`
z@nI(!&~EQoq3gBU9wJ?3C!%WQWl6S)MfUErk6+3BgQ#`d>FoP(5dsMKO2v<GV|*Fy
zt8jE1>BaaUWjyvAUr?H=SG9Bw0u1}ENlZ;9Cf|TdU6ncpO5QpA<xOABClb5g41Lp>
zs#B!t27|CQEU`0!sR?XXG%w!Sc)TnEwdHb`M=;4Q6$)mb9s-GkZU7AfIJIHo=pG>X
z;Hy+%hx1G&q4A3JgLB44E(9)R6sX5v9i~(Shfl6BuuGtvmBhMF7gU0mJCSB?$I{MD
zgqo(W6G<X;R-Rwx98|C~R++)Th@?Rv6-}C>@jRJMrqr^m!_mbVpj3gX!>BpTBB@<|
z)2ERZ>smdwAl=HkjYf*EmmqTq@`rf&4-%l{J_VS@@6~KD$I8sB!BEYFi*Yh@biP(I
z;qsi4c{R{7=^NPlyir5FNh(?xT`ia0R@|32&eSGRM?;ngZfj~TcXivBPCrU6JJ#5D
zn|BM!q6KE+hdNIw0owp@GM$ml_7j6>G}WLT1BEv&EwK31B?9XQw()9(ao`EpMCZV1
zH?CLa#~0c)8&*(iUexvU1-nsLdlU8geSEmAc^vlI%|2dwrG?mP%KVRIqE-1OXnL<W
z{*roxDVktDwXni&(f2hiwiM|TBf*SYLRay7XMBpI-p_U6gAH&GMKUzDdCYq%FdJ?%
zGS78knP5fXa62Ci*1+^Y6X`N@==ow?S<h`Pp|1$Bn$@nuNVnR}j&LpioW6*!ZB4>n
zt<`GRqk5-CiV|iCf;}DLs281eYGfbOKfn&2v|2=vlcjhk1=j9`r?pPhJvr$Z-~`x&
z#$7+%T;WEZ?66uUOrsxCY`oF}v`&?h!+6LZoTg71J72VUN7iY87UU)0C$oX`c@((f
z-zU?hD_j_W%LEo&A2|x9UcG8IyI2mx7<>+3R%vkj-5CSWX_nC1Q06TzrF=_b`OY|8
zes?;T>;=hCzN)!A*Dlq^H#C6|gnbdO2u9skVRUfXiu&#U+~iwq5f4eN9b=wh`*9At
z56OFPx(9%*NLy4Yi~7+I!G{^<u*JO#-T}tft*_q8XRRG%fP3Tc?(j;S`_sDj>HVN`
zFo)-~SBeON{?bIoFlR@NpQA?o04{z<Kc6;FoBiMii5TDoJ&WtTQqe+6D1bPvCMVVg
z?hxP^YrMfz4g~!aTmlI}*bEOvi%<JaqL%gACuCH}lCVK7)V#%nQ&Dsn%u_5tMpWI!
zS=%oKe`THwD3XC>y>KtAH;;oK10#6{lAY`VfMpyAj}@;M_PX`H`aSCPjxszVa5;mk
zV(bSToX?4xIg2jh#d`VcTdG_B4d)|1Kvc#5-{%SUKbKiHbxP3yXp#h>&+LJNzkvl3
zDH!;R;Q#O6|4be8bGICCn8xd6VrhG`UDuli39@0$_K}%=q-WR7?7Es=GqY=Y_K}%=
zq-KY`Vugr+t(|hPBS6BQ1Zkhu(jbs1L0)#-O|240xHCy7!^Fl35aRfxt5rvnWP4im
zUU;TKV2T6?TgHe$!krr&-+(%vZgH=1Q0pKGcXz;3Xs5zz#SGah_<87pJ5=F1&0{+p
z$xz`O*fd<B9VS#@-Zbi=y#tt|Ov7G}LsoYYl21m=PzIoT7}00M1TY7cS!ydGMHc`p
zO>HJy*8t<5M99-s$douyYrGQ+*v9`&y?=USR!5SQ7PV5{Yw4Y<NlNW;5RG{l>*xT=
zNO$`^(EBKmqz+3~Cyz;+mOdC~o#PY;OOrrH^?sOw;)x0vE-oqvNl>afYD6YsSQ-F%
z#>zWNiEWfZF-y0%G$s%ASsIF2YJodc;rJ{~!D+KJY&fuKxI#OOGD|}_OHD9imck4R
z-IFggCST~7SD`kq?l5US4Bm~`OUsbFKGm6n{FcpnecCc}TUvJC$i2IO2R&DB<z0|X
z2N(0jia={P`NU{->c{%bfRuC>%K55&By}vc0|lyfrj@U<o@b~xMy_0`jSOW)OyyXU
z6MM4CAx4%M6Eoc)FU>wa?Q|4+anpb#4B5Y|=Sk5+NPxw?3!zCcWmX*cjwRU{pA4je
z!ONatUI~w;0SMxDV$S#9vpmHz!hTzt3A+Q?QiqQW_A{gX(q9IC!d^q{MSm-A*ePTq
z-1PiLH&DN^NN)m!ByppCfmQb_qeJkUEe^*$c1%QZprZqU3?2!zV0IYMV{RofBIG^~
z9<0hV&>THBzhS@ds>bsNm7N=L6veU42Up{ApjJoE)%A9ksQDi3|1^$|7|P%FwWu=6
z_$dQ7n9bC|<5B)#8!b<46u%^e@EM~?G5Wy}ZY7RLWJ8o;Eec!#vM0bJhn6(Pwayn&
zv}9pq-B@4{!sY3O%qje~G>S+L{oN79GDD#=0zo-=2z{b#^;QcS0Q@@`AsYt6d4dif
z_=B(t^Y&m<n-nc)u$AXEtkHNfSkDqP9eAxZ{NmYOpcP>jWmmQqg(`{2%78)D@Ef<^
zgrLB|pXPDxKscwd<X*E&gOP&K&sH-Ko#BI1d9!wIJ7eh3hDlHKrL|oaS0Eehe$;5T
zx*fNfgF=>B$I{k7VF>$1MD`CUwAzX_jVxs-hgW0Z%!(TSoURLdfjZt0-;bMXF&+nV
zn}T$gu@{UTQS%K^F4br(ISN#@V-Q-kX`D5B;Wo}v%dB<-(CmbrBV_aCZ9v`&e~D_z
zeP34a5t-6kQ8LWQLXQaKAdba&te0CD0hb0|B7fmG6P>2X6Ar6X%j7S@E=fzlR4Y>a
z5IhUs0jNAfCmDj}QLydv@#}@X)9zMC)OEzgI;=*(O$O2BV09TKz@Q#(QxD*Lr$47*
zO+76$$ZJ$*XN!A`s0>ZM3!-Yh`tT~f#bZeD)4S!-7Tav6`y~<I;f|}=-Yb_|t-Oh~
z<zr04=nt(Rg{eD=;7*?8n)tHY5|m4DniZY3noc{m>=!HSbT&O}g;99W!E=Jt)01XA
zPcq=sQklJq!CuC2HlfwST{IBJYrPNt$`A%W=EHo{?bmuy=j1gCBCUwG1o*q9i&*(;
ze3RUnx0-LxnxX63baDgR$h%IC#!(Fn7?#;#;=jv1a8`+{D?lr(=yrYJrxNKt2*h`2
zygUeT%<%Z;J@pQ`8Z0+kGacgp=l)TzsEb6GP}x9X;>rs>QbnpvrN*?og8(b&gjd>~
zwTeOwV)5D_F*<8EkJ;k^3^#xLrsP}epzMLbNLI!zf>pL8J-L!EOzn_RJDb??jsK(;
z5ALTch*h<iGoq4Ktm5h7IvNa<>GhbOY7<E&PqN1s`wjt*sqd-ECXW8aQ6bv{@Pt}j
zPpI_{h<ffm(q_kx1arx#!RNS=u(vdy2Rqa}fjS@Sv@}@2VJrN3NL>i<<dpnFT`1TC
z6XsDXKT}nG5i&Lv)Q@rV3Q6bteOaG$FqGZf`_kRp`^MedGrH$k)9^HbGXc8lD*Q?{
zd1<bCPgni-JRHDlc;xL()N3DONO>9(;JvV1*{l4y(WJAZ$bRu#ikJwMu<a;hwHt5P
zR<u;&ossnWhc}An^BlT6mreZ-rlaK&LwQAxAz~*(ZwMVGP*KP}irHHzAZV%FeU!)~
zAzCLh?!HaGIb*<d_z1uRV6YGA!iFb#aI}bL<`FD#fsx+9bhK=#EfWrWd~<7LY5}!u
zm4=Q=L$lJvS7~CcG)?^T0fF(j@bWze`iu@CSmt`XO2#7$%*ZH9+H`z!+}syX!*v`(
zKm{KRZCx_G;t-8yt9IH631qiM_SX_}G+p8DleBiQP8M_IMnOi~_FQuqR6OAs!r9_O
z^kI;Uaepq`#~VO42hr<V2=DCFdaw3nIk*NR3#Q89l^CtyXGu5==y8LCQ8krd(lN3E
zN$f<yiQRE6xr3al9H$#dVcy!S5(Kz)O_GFKMR55>&TH1?Y~9|VQA!U}k+~E<sG7kD
z-*cVr8hX8{k{NS1Ru-`}S3&4$u!K^Ztj!EhHK3g)8)>I5J=CiQ_QJ4H?^9o1;YLam
zv$ait(iII=*Sc3wDn1HPhk?bE-;jkAqR^24epVd7vq}&RkNo$uu&;7Gj+OnS6sDGS
zkF_9F^`zWH;y$#uqLRgN7CeU`ituWE6)h%{*<vu_&Zf?NUx5$3vNFau!xg+$Pg;rJ
zoV`J(8@@6!90gv8vj74`?Jil}o67J|AVC5h%I#`PJ%4Cvi=F@u>2P1g+3mp6-@JbL
zqK8<3Gl=leSIq6`#r|AT#~DCApHQPuxc9xenkMYL!6hc7VU_LW`2=>VaZPDkC9URV
zHZq`D=ua;Mcnm56Xp<4Rr&CqG9k72-jEO*=(*f6Us7Hk64v|4<Lm_jCy$I$JqUM%;
zFQ6Jx>dxJkqG`}T$`69Q;6<>FSK~+x$#hO{b}&g&xv5m?|AI#V{sR$-`%dkg8HY+-
zP{2+jM;q{NuG~-J9dZ0XO}M4va9cY%9BW$|Zcm5XqW|M?THZGOA9mVjg;9HzYvks6
z2QF*4a?l(5u~nfLC~t#G^ijuTnPTyHG>xO=d<N6ZeZ`8~?lj?`nY?)1MjLAV601O<
z7{M&=G${;De+1d6O#5xGDUX5Rg85d-dJ{=joN@8YUbVjq3Jua?Z<#Eq7B`*4Ms;C%
z57K90Y#+6c>mnL+N_iV*GqJQNQpJg_b2tg8wMLx5Dw-9%ma2e28olcxZ%;r2_I!en
z-ro{4UoipZ#O@Wd8)3)WG%n<6NnnlW%tw*{Uo9iHr`&8b`@bC5>1oD2ZZb+gU@Cx0
z!9xK1yPg__uzhzc3<%lFQ%TQS38oTgtR!EoYq%InPZK;(ki>&nI#X=={tzo1&<GNz
zb-d*H3pdkQ9~odp;B}$toJgkW$npRW17b&JaEH6rs~st~yZ60DP;nH<JI8FVK5#q-
z)ZtTyfY}$kH2p}qs-zi@b6+*G)&K9(uO`)t+nk&;RrCg}Pk$VvTh6&*N?K3VcX!yG
zI8XfqV>MlW>RzC@W0=vOqmvF(YT1B&v!i>vaMYC#?G+lQWyr7Nu;+3N4kz6OTij(l
zUR^F+yDG&3i%bmmJr@(DE=hY|+oGn+Uf7|`gd&rLV{%vo*wuz<sT4e>kHLD-9wJ&E
zvgos2ME%ZBuxYm&^kO-G2TeZhTP%t?L37>SD)Ccq%3;bOn!7pX`8PH};?)o$CFLQQ
z52=PV_&%tX80w5Rmz4F=HZxK~I9n{1=lBj*_BL$loTlAYBYg9+|GIAc1oK?yfT%JS
zFFYzd!Z-cbfUw;Mt2=#;I!oX{8J7sn>U2Vc6j`>W(fjpyJ<eucsjJhKk7@7pu$3pF
zB^bv=HOyWmR5MU?-a&jmP1w^dm&)~Z^#qq9rF=(iWkFFQr#JoKr5jV^{|N1cI)jvX
zRJE<{?wqyobXvse(XefAZG>%m%ZrWMsB&WCmSA}*uG^25YMA<AMZCN7b5!jfq>r8P
ztz9{mJbck!K$(F)$>&+P=~8^!J`M?)K<~{0ubSWhPY=I&jqDIy{PY}#BUuEbI2g}5
z8Mhye7nCM=&h6<nkPY}s^^(XKm<*#mlrCjVAV-vj>j|6Rqj<6O*=G66jB=BXj?_z1
z1<bkBBhy^-6vB+Xn+O+nw1QN|6?ryDa2w3`@@+WraJ2MCVDuG3$Cqr&^K%RX(Qc>K
z;q=(=fKjAFH(lzr*T)lf!0(N$BO=9X_aD+KIl9OQk;i}^GRfYM_ZV1M9rfjFaeSn_
z@$G+^a~j8s7;5l%o5kzdSn4A1Iwvi~;wqDE(~9nLnCRC|C-|ig`ps=8hwG}WBB_9I
z&m>lujGa>TzRY;Yx0N!Q&AwKQw@^f~O~tg)H4lmR??t6b5!gu9qv{j3O|06lGNXsh
zKtU_Hp2o>~Fr#{7_K4s%aJD+9U#?c?KGpE3cXp&Sg%oq|Dm37LOitEqPiP^081>xK
zHJ_*I?Yl%Y*G)R5fUDg~2Ug1J4kGbdlUSqN%{Q4tJ|~-}s~$7XrX9_y_$Yt1n==hJ
zFa3#3cp7zTM15ttjaN};Cu%io;c2hg2b*aszt!%bRvh*^x_iR(8ba{RM6|cTNEq_t
zWE_o?%V;#d4jz$zm|RmvQI?*=rkGw1q=<4D2MNl@FW_Py`FDIBE#v7mzMEA@@d*UG
zW91&dxcsm$-)%)7ujKut9IQsm_Y;;f+ulj0g#Y->(7k}&jjG&CCMYvO>|;A0zrZKm
zP&oh$BHk^}W^{qRJ)zPuPPlaJH@ZK!KtOg+h{7rE%W*{X@29|uYZVVEu7?V$ey<Lw
zs2L^6Xw5QSB#YsKuX(MIAn5i^!jn3%4->hF<&y~FYsWKP#2A#7>!1u3z0$-SOj(!p
zEZ$}2!RF}ZbQw|lt2f)&QiS;WC#Su7lSWIYYmyrwj6o*CMw2XBxzO=p$k##~=R7UU
zffC!83n3141~xJ9^^>Enz{p1*!POCb8lHALbYT`D$T_gYdBPZp#(hiWV-zu_VDd}3
z_`q86J`p2<kPd9BmFa`)@W?8euE-bkk+pI_2bKPU5bpQ+uux!5QThwo!3&~p4$1c+
z)))8#J9)4ixY@~95-N6=#at+jLWo0QvpkDMqxN$P?GElHxtEW=uQ2Var!dGFERE91
z4G1GlYe^97UO`Az9-Vo6Cyd%h?Op_-a(id6sZe2b$%Q20JSdA&9ulI{-WCCl4dQj!
zJc!NZY&4#E4~NC8pq_D(fZkQTzX>dkLQY^DiJY*KQf7hFrKsd`9OIR)^vZ8Lau)a~
z^+a*{lQKNt&5Y^N#iKHipDP{>gJnTmn;=7&VnQ9O)U`@fKr>d#BBrqt!P18~g(@S-
zl}yVmDX2sy7h>+o+BE4uPJ9ywu$thj+y}6!VR3Qr9-IF3?F!HBL1?`S>g6A7yaPJ3
z?W>P-KQl>Iq5f-3@sLk|ew@EY{S3dzrw8;A@Bebl*~UKTCP0Z<Ka_NKOM|bvvLyvp
zH)9c{2>JSRt1FEM6pp*9Spc1rAM(qzv$vtGl_T6VwAvA{4J8SN8+aS-Jf!=Q-J8q@
z*Qp?${;>Tr8f>4&?avI|THyyh?jx-V9%(V$+F3)D+Tr%HJjLYUm|gB-PbahS4Jvd#
zk8lAbZ?llH=(3AnoOfA{YiGh?S0Dk^5e&%HT{Iwr%7REZ17j&NM9nvDN+L{|HY0r?
zf<2oquXi(h21*LR&|jeMq`C=sL|5?#hyxDFo*5y`$x*piPtxXgcb7E#?*bKf303Yw
zh*4QgCM$e&)!BLqp|zHI6NrZ+XtTW+j!3h(M}dG<#{zu{U^uvyPC#K$3EBuaxvRoh
za@}4lD0gRW(onYqj@vz7^`|0`TptG01g{I><Fw3+%k`GU1fKF4P2=$}Av0<<y_gS@
zbv(wm83MfDFuoa%$D{G+JN78UXVW0d!`t7=0CH5T_nY;UFX`1_2m$}Wn-5=Kaypa4
zBMD;2lPDg2EC(_C;U)*7k#f6r_B{&H6dEwlxJn_?qwr1SyC)z3bKOWF&^<?WG#yl=
zjOuz?buKPJ%6NMX5r*knttHWKQLpg}G%*89FO8)qu=F?<^d&@0gdq?m<x0gJjJ`^O
zW9vCc?{p7d(au+HswHv$k?KC#meco3wz;tzxq@_L3sm9Jf||^gXV(fF5zIrejY#I9
z+2??T@A@u9I`R#>^Z2n)n>M0cMfkI)35Q2T#zhx;?1tVp9yI{{-h8HGk8>QG{O&_N
zevJe|km0<ien~PAAahh=8p%9L;!E>3l~u|Ba>Scs=IVk%=$k@PP&S~m*8vfp^a#CA
zdarA}#wH@g19))SYSmgWu3Jwlxq5NbK5n;O@_vZh>|m0`PX(6q%FVXFffUQ_q@F^V
zo}gV%rCnMscJs?b-WbLu%q-iH<Ip_jYq?UFl3#20>usa0N_O`+JnZ!k4ID+vcEYlf
zhR-stIM<RQV{a9APjrRvx-41tUaBOuXEk~!-N@tW+HunSwv*@UF0*Nd+jO1H@3svB
z#_7qfin6aPpi-L5SCmh8QhC(5ztL!XYl&FhOd@7d&oMln9lfN3gM_)A<`|<TH>;s<
zRbLHf{L7_hb*G}MUndPCC9*_ZzkXOogc~+C-k`C}gKw%%GT1f^FWo~!rJB+UMYd0M
z*-N?*b=x-4%W|0H^;dLwJLy8!wbdZ=sTu+zyr9U>!JI2cEIRGG@yC@ef`luSXTGoz
zteKLPrxRA}^lQqhNSmRriX1FWKMjSKhiX|nAYbY3F>4n@x0>5rS$8tbOBBzm<}VBp
zk<tVg(Dc{oThEB*r7b#-QqoDEWh=pJ#}(l}#ECwbrw9&Uc5<5toI{#-K$u&S-i4~S
z3)OsGs5-llLU6YpY$}-ku@jQWk{V}d+r;c44Qgk~w5%BN4D+q>jFrrgWi>gW{36YZ
zCN(c_H!*v+pO{61@p_cK9F(aY^Qd1Iifu?RC&Mvw8~gK5e|&ytpC><%iT(q`Y=SQ|
z`pebTV*X_K<?4Xg0K2<8@VENxX+`}DpRLNXXa7{)+1=gU+TMM(L(;2H0X(RDxhA+j
z>s2y{gW#Wj8_(ZO=PM^%R{p=!&kv7+ez)=Fac4Rl&sXEeZSf57kHJB|@p$`jeFg!8
z?gfv6`eOMpo?cugL9q^xl2x|0%0Xv3e+QerARG@b=Zo3m;v)fkLNb!k6H@Ajdw-hF
zhqLu)9NY&E8!bogqU&+Ini3|vysz2e(Z;@2iK^A;Y4GWjo?iV6r0+^}KQY%raQhp@
zQ-br!<qLcEayZ32Jam;1p2B89D#bM1^Kk-sM*l89&|ax&gea?YD!=iA(=C-NXCleE
z2k(JI5lbJ9SHpO^OcpVBgvVtaBDC>ias7Y^O4E}Ueh~&XYt4S0J%)~pHeh8}P81b@
z;{@#QsaXnt6R7LOY><GN7Zm{75^mF7&u7zj<Jm`X!UQGXKYoE14o2f;a#<2)bb|G9
zvweJ4gO;RUBIoT27>YSYLtaFLDTD7>FZXr0Vp06~p4GczHzR|`pv?VXq{3YYJ%wqp
z0*|Ew*nR=<I0dlJZut0cfIj6%0VJTb1^-pd#{vNKD=+%?$%peNd<3vkHXdZn2UZ;}
z*Fdt_INILAvui&7Xu5ASO7d=l$srAlvFCcX{2wj8{`lPS<6QqQzg!&|>wjhE+0)ee
z|E&7Q`u{CH-(dZRC4LTXaYqpoJ@u2Lqm$$4Fp9*}*S5Snta|%29wRNhe3z_fRJ0G%
z<dVm`OBGixEL@$hC&42Oc}mJMa~xoq(@=8%akewY{Ez<u^S@fP>Hp9EnE&76^X=z<
z&olq^yo{X#HD*N-DmaVTh|PA0gR!0ulc^rs)p8GsUKG8F&Ij;9(O@=P4DsI&5Kl$~
zh`t_+RGj*o?pMH_OSDW8V(9KfWl(GvVcb>8g+UX84jiT!jK{<4REf0Yi<#uRM?=D?
zvX8##hB`mR3;U$idI@(J#-nHn4!>ILZIkkf0R4|3LH~V!4Eg`%jX(dCZU5Qb-ZJGs
z`T2+Z{}!KbBmcSCWC@$=!ALi?tUj)O0N+hZDu@z2c|D)5l99#k!eC7^ief|IOARWT
zy;>cj5zq)PXa^6JNT&4DYIQoN_F^Fm-&0V_K;V~3#Z%^}$!fx?c&r#&*jcRAsl6-|
zAhe9$Iw;`l6B_-4io5+3APDw6&1EoKJ9<_9W<3f|qN9FsH6E4%_?PfUm9*-#(<#|N
zkJF&^g9Wq_r%<E|G4J~jk`F-1GPb)z?NRYy2p69Bl_VBq9fEJpqN|niuq%PMVwZ-v
zQ46522Qz{=xh6S(3o5~9>2ys}2qu78Oh$u`#fQmtIe?do6|dyrCwhBQwN%|JU_Cl*
zlItf4SsJ0+-v{)^8oK|3+0C?l<!VI(hZ((whobfFCDRe<3T^gsIx0d{MK;Yy)w4+v
zFWqjT-y{^k`$PqPuL?1+5SGj_J4MbC_wV7Y01I1+MS&up;GIH&?x&wz48@P4(o;iX
z8Cl9VoW&bjY3yUFyfQ6P7^E1WN01`QtK>KJ^i+%ogd|W##Gq2eHHgaN!<M?>jpNVX
zNHwDCKN&J2B+u8=+2||Ok+J@7SE{?l`oB}zt^Be6e~ZtbaxVXWvaJ4<a`1Y6GY$HL
zf~(+YI*b=V|KlndUrA+PI$kO2z`fyQHn>>9lg+>W`EdiLAncX?{MM3jc+wA}Mz4L=
z>{;{83ma~rey@FU=FV24TDXDe%@22ec;u<SebgYi_x`k6pO2<-@z0=c|Cub4i}T-L
z)%X>k%z+;SXf$3j4Dd`UDYjay<KcKkV@-@##e0Q6FDE}xf6FOR)&E?UDfDiikp?fT
zz_lDbc^bdh6X=PTp2kdNdLnKb^`!Vl|D71WF`lxXw7eQcm!qMPc=^#tNS1oS^+f+Y
z8lKxoVCgX2HZodyrT?Z5F)ZyG)2=5fl|`CHXOSvNDhsbJ%p{G@B2}f3NEBohDM>0t
z9SxDFkDJ)tX(x_Gp0xZK8Hp={zZ+uDwWnTwFj7ckSsKv-6QVf)UzjZaXKV+}B(MaM
zsA7W{o|VQK1uj~DzS-OTGg*OI(eFiB&N$$iyc%Qi@yt?w5MO1LpO3Le1Oc^Pu$=}%
zv+ohVoi>HCY@%nvEhl-Pu5&@LL|oT5fd{hwqgSIpKHvI7^*_!7U#AXG@}50=>OB7=
zA4C7!-rlMHQ}FcbG)MpZ7q0*5<IfE#U-I-POaI@kZdI$M{cro}_8<EHxA^=6&VM16
zqvhzAC+pUzH)L;q)QFlVU1~LBh9fIbuMg(9JqHAHbqN$ZU)5R}*i(vT<)4h2$YOa9
zW(xFcJ)bV-EI!>eKdJ`rAZBs`KcUmzo#1>unb4>vG57|ZZf^xcqU~Uumf3jD(-t#`
z!4wI1p3eaobM{l{;??ztmzm!rtMU72H6Dv3rTmBU{d=Fy21e_BR~KSu7a+2RXg~RV
zITHP`HB<RopjF^kY#%Vq{yilYAk8YA((@(^M71xIpeC90Ik;TG^E>$Kb1+*(qYsN1
z`<|+ZgIVgCF443je$&V7Xa(>+nl4!lms42-)c0YGG*D4WqDNy0T8SkAD;e7h>JElK
zAv7mNvtBO8v1No}WrS1)?MXHUPOsWC#a!9CCyRIM?dYs|+&Jlhxr@N{z_?nuskT~M
z8k$D)3{867m$d?gsP0zo>Ft26azV@^-P}uBaN1{P@x4?jc(;HRtm?VVexu8AZs|Ci
zCk@D?S$ld@+FNx9(Yv?x^e=A&gazQu)16AeEMNzGS|Nz4Pg_sxuKeavbW<?WJg|ct
zutl;BDk0nf#7e9eF`*pLT*E@%X*NhAc6ZC5f2#+!)E<{|JBD$;-euV|SMOo{q}S|s
zYrQ7s?i!ezZ_b(_Fhu2P<=LLW5zThJ$3}0@EZ460MV2w<?IV%3XO=pui!8HLO=MNg
zQne$V^_NsL;MI&`G%bOA2awuc1+3+e@+jCTrCM!*|HTJ>lmf4L!RsdYfe*ZPWP&$z
za6?cu4r(3DEEe(4Pr>d^Dflavg(Vcqvci>{9f()-WAM))pb(<LArRy-mgRt@R7HoL
zigq$9+R0l{r+MtDXfLy(y}T72ayr{8s%Da|nwxZ?x1!7*RrB`fO#=mti*{R?HErds
zsbBZkRL`ubp1Y>T!D)d#f4FB5wozvSVyk5=CA~LC&7;@7S{JjotA9zYUOiZ*sQ4t9
z&2!IQ&)dPOmEYNLc{p8Pf$O!1UX2FWf>9Ari}AdZnvCP~(z1b1yun#3rCG(K84E@R
zt)b#)yRM0l9RnjEY`rWXU}^wRc|htK^+4Q)<}qe9Dm&IDu=D7uSrDp=UiQ1ue{S|p
z<eB`I4UF(j?+uQ0sO<#IX7*2y3b@#r<Y)o0=sIYuXN_L74l{t$(Vh!e-SIUAQ>{7*
zYl?_}y5*&Dj|q4RT+@N~Qox5g@Qw$VzlufK9N>4o$oZRCD!hqb?`XH*gY6z=Z&ILH
zOLbK*fRB`62M~WH2l!e0xKWILD+jWepwVx1Dv;kL4~1J)&BcK>v=jCc8=WvDS0Cu3
zdOxJum}|%w2qr|OQ?qWM#o(`^z0GttHFna0{oF!B#qt4a?s$PQRIJ3j>jiFVz)!2%
zYGMKIE13OK1Vca;D518EIrwuZX!y(^Z^+5JSKTs4?M!KEtLm+tzI(+f*7B40?C7lB
z6To}^f$O$foi1mrgGxH@kmu<_kUmvXpqbTF!J}Hcb8zxTc5vr_He(0gf+ne{W*i-m
z#gI?J>Nacjt_Pmz&|=b7o>jBZw~titEgv{O14tJb#z;*s!QnL{1$?hs^^Z@pex$5l
zd(Zyj1OK_vEP%@u5#h!V(6_!25pCt3zgl>UP*&UWGsY<ua&-M}`RC{nS422!|19Ni
zRPfPg?Pkym-j-0YZ~MSs|9sFROD4y@)jG(84h8gH)r(ukqLy-F&%ZLhX_1At+3Qur
zT+!uor_&A($yWD}3J^m+>vcV?c}Ez4n{PsmbJ@nvQ{oJb8O{2xA)n}bLu^&qtLnRR
zeA!yk;J`1HIW%2go8C_J`u*N3xi}o8fDJRL+*}gyUE6Ll1%If*r<G&_UTpO!e0qOk
zz^ls5u5F8Ez=t|~7Jg;_gas@cCZmGaI)%3L6i`XDomTDjwI5%81}1ve1n)>K@2Ey|
zk&Sk0d*(uM*6MdpPNX)l=U>~z<B2$#_O^^gm1G{h>Hp{6&osMgD)r%;u-<KTUSexW
zYxeBfhb&v?`OZP32YUiKCF-@5S-*QgXR_$b&R?FUxYtIVC~Q)!ReSbvpJs2_vtKsO
zkiL8N?4t%mprZ&X4v>TPaR@LlympW??!;T&y=Na1{+=cFBfX1c!MVWJ`4=fVc5ORH
zov!WfOUgd=0}IKG&0sxaGZ^yCV7+PVh>nh567FG}@p{A9)<G+xR{NmYiyF;V?X(jH
z+t#*FDizFKVaT_8A>ZzK_IqKk%gvNqJEpc7h7F#%^A~eFh?!BXcR;pv1yRFJ3QFLw
zH&Q5ir`;{0(Ndd3s-(!Qo3>SGcDPd)+`m=b@eM`zrqlUZk$rnxUL}iS?<?4M{oq8h
zZq=2k=+!YOcwYhc>H$DO6??t0l?~okz`c}##{ArGQX%yh)A|4{ty1|%GWRT*eV$oQ
zF&5d69M^jVR46NZ=6s_0;Y*gkQ~OIQ|Db-{Yw-NP7!s0}@AW+SjgY8AB0r5E^P}UF
zh*VS%9Bc}5))0wkFGVEGC)xxA_eVtRDAtZB>cQ(pb@dv>cB|EL!nVboh9IcA>ZV|b
zvgoReJBC}4r`Q5Oj%258qo?N^Hl11>q#vF5|1*16+uSyer0=WdSE%x3JzMdVNJ_Gt
z)P8{YBJoU-3`yC}rM6g(CC|7fj=i#z%v{~vZ@=yaK!N}$JF_|OT~)lbl@akY(0FTf
zHyS_)1FM@Xp7IvHH!xzf&7no<C-sC&@PUAM7RXeCj7`Xq2AP?VnFi?&rF{981w|L4
zi8+7-tkNK39U}MNT1c0de9rttD`=|i`}r|r&8{D$&-5KB00bR?J~Q*!XX`R-n_jo5
zV;aT-k*Muu25(>mZ+PSot)hO+chj0Iy3M(YOP_Ao;&~7x()O%iUeXFa;vmkbJ~JDX
z(?yL96yKaxQKPV%*PQV{=nhl1LF=z_(4kh@TFka4tRm9qyM}f~H}@^tc?6AbLZyKa
zN{%5*2+i{PG%Q@~DzU9eINLm{S7m$-;z@M6RX#~5-ozS$F!Pg;6$4U}_Mkx#TV+95
zJJZ2H(K7ZyJ$KOOs@9>?u>$q1PzrLVjTB3LhyTTQSs>Y-z2TwtOK0In@q(5$eUq6|
z6EszM>dcY~V@;&`W@<2BjCiVJ$r(}|k*ZHANM$J-id0LvEKPEeYAKhcsYrD#<sxm9
z-Yt2Vz)T&HjCVDeM@f!I()eY$80wIuslLo-1DVw2l;tdxN!oJ2ilE7G(k+X{Wt*gx
zRkmemK@F?2IOEf6>}uYKRl<TMsaw`+sU+$3I*)i9mXTCJJ4rUCM(31{$Z%%#YU#?P
zF4Z}vi6q_09wng6h@>v_Wmw?FMzXDrjee=i#VU?r{+Dn2Akr9ckF~B_SDDZgUDo9&
ziPICwEE}6iF14AbjXTPaC%J59CV3)BV*>`VY))-e!z9_9+Jc3uil~>T=@_g^SSdHQ
z#<~@<0y|FDIxve%b4NCIFt((+O=`D-CuovIO-E@f6{U)S7Iw3ZLcbYJ;mk;q)epOk
zR<2lpyT2&ZoN=THrd590G>#^BXt?;i`y73(^s^OhBXK2QJT@e$Oclx6$Ph1Ika~&9
zzA@R#fz)8eV~yvM`B}PbGdH$FSgL03naOd`QfoTpBh*pO7IhiG@|mR^aWpc*$avd;
zSsJEdeRHgUq6N5CKp3Fy)&xCSF=ZZxLG6$yLSN7{%xED40Cbjlf1zU~l%|qn0~89t
zwE=Rjgp7VO3@r)pK|N#%?Gt%ip)vcut|F|sl@ksqRDf#%tnmL_gTpiC{FJ#JCkC2T
z4{9^x&KUApX(M96<cOeq2kQV|k3A<9M8>k|*>;Fn3hE#_at5seIeUacJTy1ZR8GxE
zr)U!V2ZTTp#AWwv|54O~X8s8}ecDjSkzxOd;0pM6Rh6KjyRC~zw~GbRFZ{A9;#|p9
z>o|$b%Ur3KG4Lg9b-D@_!`^T(30SaVF<$nkma7NWa3otSsM<YBc~j-m2UVa#51nAr
z3-^JOByL=2v;Ows?e5EHl|3yP`59hH9K{qFI^Kg6&R8Kx_w^3<?9%3-gqg9i^&-w{
zs!Lzr5GqMepf**1gL6?*R$_$NY9JwE4j<^5i@<dW)AfAP<XRcfCjlB%<ABj;w+!yB
zbL{gr%6@ER7=dYtO(cQlFlvBBrUKo5U{IXaNyY-T5A_It;W2hcCv!O?j0m3wHC9l}
z7!eI>)}s7g)O6gSEK^3JW^IX%(R2;Uak0pr=4enLlI75v8K}yFa!Hu#uoJK?C2;f`
z)1`i$&j$nD`NGX5TqX;jrd5wgEO5m+7kYW0JuGnlzFy6<Zhkg14Y_84iqq9D1-C9k
z9&|Rccq1DAx;Ud}1P;};M$j57lK`y(zs!R{|G*bGSidU34tmGE#KG{7lByV}&JPPN
zFT$LBDTIqmQPSXIQQQ6#H{EtALO&p5f4UEvq>E}976t8ASx@OGxdyZZ@{KL~RHoR9
zdYy-I`5l`Xa?ELqu`DYb!ziPk+Hy#dt@3cssv}V%Bb%`1kU3lNL1JqI4q4^GYiO@K
zk4~)P9F)XRjyGfQ3KU@4(MFsbElFVq`0pEB+xjMJ2gff>P&Je$(}6T5qeTrpSvUK)
zGE?mYNAZUH*wiHm|1_L%UV_<Cqgm2s==Z~#^~$wjV3i&SEoNqEWird_lXKJ}!9d7+
zJ%${(&iN>_s>d8c=7rc-?njY6j3$$9d~ir1wC27^$5EAnax6oiqs|xSWz+4JNhDY&
zp9q(Lw$aZ~FxJ4rl3U+ajh+JmM7lY(zSD=|Z660o*t?E3*_CfozgGkb)CfvV;zio8
zOSC{3Koo-L3o(7v)5f4?;?{KE3L;t$3T4WXsmNkj#(4B9_T}g#9*@|55ME%Ef|*~%
zVC<lDkJCld^ad&NA`fWcSk?{GCUabRiY##fbgX;hL^fw}P%WuKUer?(L2C4j8cyC0
z!W|e<5*lJWFoGmB#B6|bv!}*l)`4W%1E^il1BZG%h=xH`ai|A{XqXwtcm`$}f>KiO
zgRZd9#bqT9VugF5D;QmIw5EY*siM|pwKKiB7vpSV3^4$sy5Q1<TbUHNSG_Q2T6^xz
zeGSPtz<Y6%5zLv<r~84W{o2fG$cikd$I1iDq!%-t)Q&eI(6kriPDk~`<*4I*91%ko
zy@+u=BgCdZkE7ZfV=-IHjD`lrtA#|sKX3JQHJF(zI$PElu`bLwGM0TqIyEQ}@G>(}
z*(t4P)@6z>vb1`mU#WMYr2`IiT>51!jMzSnpo2W~wQCf;a4%SdHd!zQK0DAj7)GYY
zf>3Exo^@e*%#DN<U6{UyFJZWg*m3-Lwv!(X(tdqn7voan?E!cQu(g2%j0cAHA{sJ3
zop$P_+c*mB9Q8oOhBgmrN-!AjdB<~M8li2r;d$N0JXwrdUXk!jqATJ-S$U1)_edZd
zc71BLK$T}vQu07~o2D&LXFNJk`Xo)aMf0Glp`=<+!D{|CS7N+$x7?HXFM9bm&U%wq
zj$UbrqpW7pI;kuEaBstc6WNFeit2ZWfkADxjI&sJ5n)q6<1IB$gIS5hY?#@wV0lJi
z<uB_Lh9R=m$_dAGHuVv;p^$%G%tqYHKhOL6kORQ{k^tR1&jITeU>v|E0N8kaz6TD-
zSANuIxN$^~OMt~@dlQ6mp!JMdL@Z&MqK%q9zY2ol;9Qk@M|y`#44*?YBaL2t>o|k;
znONspUX^uxUQ(ktJ?WK5ca3y$2}hrOE+KK`m;Ls6gYx%D4SA7BpM5SN0bOL!XUIv^
zDfN%lBCNW1Imj-woj0w?Q%8<Cv9~g^q4*X9hYVb|A40v&twP^qOhCme+I)M@Rso&2
zW)q2HwM`Qhy$p&j+SA9J<N<^X>A*?2VE$X>vgN&K7M#{u8qU{I-&oKokfSnB;;Prr
zZlfqdAn_R)UkNJ~AdF6`zJb$QBq(EN>@BT}><M&j!-aC)(4=QxV&*iq@ZWsUzgcFW
z6Y=i4>_!i@LyZEPw<t@Mn^v3}>k*=%SuAMWX?P3`T7!@*eQ01BI$H8A7w4lWw4(8Z
zx(pEIz6Z;eR0k?mhoN({zjzLu*g(|J!lr8-Y!-)|KraW47Ox2eNJqgDKm*kQtC-kV
zGlz`?wsKLIl)Z2++H_%OgXipRTXQ4y0dIva>mXa>nv8*b7|!=*9T%avy+*r2q=3c=
z^CF{Y+QHz^=}E5rW$2&a@od`tJai3QzI@B`z+}%z23t9b7owE0NzDo-co-f|jAV=u
z!tp5zIGh@Yb$XI!XQ^ZZnK&{)++z?{=r%+?W(I-<EJwAbtg<{01Ci6QfupDM=mu;b
zXo7;XrQeKF6w+0YW-zMPd7c#&=89Oesf9{~9Tt|bW(emJvEZ03E_sl(ssM`{<A;vf
z{1~{Dh6~~Y{i;b=g`5quFZi&DSe#G;5X$TlP^Qng1CV!`WA+L31vQT3RODs$OYHjX
z*wODO!m^@VZ$T{4>=p^kDTcL@gVpR74g|(vEhxh`4FSQ;o&jjdQeK(7DrCN9pF}~7
z4M_PmEqLgf9U`U`n44vc(K?7Sy8_G2ncs|i*X)N3vZ%sU-ayQLh<s`k=>M#WnQT7S
z><0ntG&2D09YXU+8&HRy+0^+QpVKCHX(JxY7M6n<mh|;XcHN<8wlG~$jgbRI#K;G?
zTCp_1qRmF8-XPhVsyv$YfuW4oFtr2q&@%^Dm7Rd#EyAjJOG2z$RcweD%Ruhs#6fp*
z<M8Ro>2(>Fqk2(f>%2LC>^RP3-${N!1S`*K`5%M9JgyEVRzwk=iskme`-Dd?rrL?T
zlVVk47uP;3>IKw_Za&HJ;*%UNJi&F)84X|2)`~Ow(IDnB$3YT>1`jn74c_u#r{O{K
z-J;x)FUfq940nRkN)V}3uB3AJP|b9z@k~&SXC@WjagmLok}+8`W~*5a#%GDK#|tz7
z7t3z_ys6+6{=*~=VBA&bQN;Hb!g|~#o_b?W4FB;s@kA${NdRs@k-uU~ZHx1|DpKkf
zn91f4n9J^*+qyGHB_%qq=j$Sk(=KHwDZ@U>DB%_`@a>K|#K)rvuki%_!z8BX3d1O}
zjilp=%gdd>e^~3wQil7f-QGQBg53#2b|2>#<X=o1RskCf{u^elC*J$`3evLRGx+PM
zl7b|K`xWKMO+;SW<S8^P^^RJS>C;GBf}<z#CBi;eyakV_MwUU^Pl=>d-q%nql=l0o
z(m0{_wS+7sy7AZ2x%GsX_|**~1pil2!urQ>Y2%Te*r=ZKlqCaniez4U?wt3;v860K
z2}P1~BF9#evR2d;K8+k@1@6GMH9R<?mCaJUy|gd@4%|OS2}9su{A5yS&+gQ7+^13b
z3C8!-d<GJs_cX#N#ItR0L}?b$X)TTANGOOb(xH#&<Xq5L#6vHQka^Bi8ZZpQPC@Qd
zYSa)F{lZJ=F5GrY745Z8nAoZ#L!61mGVX0OO(b>i1Z#91u&iM2@UxP=e6?s+YUtxV
zna_|pJYUDLmQ_SWu41$+1=c!Mb>&Cpors>IF{xFMQlVcv;|>9kDtv;%T2KeQlWY$e
zP;oL_d+EBBKjOYwB(MxC1D-q3icUNKEtAWj$+!oiC9-Mmrrw6`mDBnx13NaWGCxa*
z*fL=(q>`kbzs*@$^6}H71-1Gf>S2)CNl}9|=K`6V8di=qr44MdI9<aO%ub6UXyn|Z
z${w_7Vvt#S+#tg%3Y#{yh*O=QDZoBe%XuAVC7xgh0~1VJemS#nnmUelBGEvj6H!6W
z?3oiQ<Z%$&3!_J1(!r{o=kEZ?$|c9MHJqUbOWtXIm{saPkLCPhf4+3on7jzYg1e_)
z4YbQa6j@p5X1s@k3O5eW@snV82sZY#L>DhM?vDELc#Ki@n&#^;aD0gE$--U_!O{tL
zylNW~=^0-0QpZtE2a)7RJDT;Q>QxYf@bRmGZ7z19=+|r2cY+^16)y>9Pb640^7$!n
z(wc2nRX;-lzqi6tk)tKwW=3`4c(DZ*L4lw))y2~C!dEIrjrT<OpqUa*GbJ9&{KxMv
zLH~>Yy6?Z>3l2BipB{dD_?Bhh=G30|AMvk!|HYfo`!AmJZ{Od4d4@mp{h!a<-TgH_
znf%Slc%=WGv8%uTIUP;E_y4o}J;VDy-RFNWsL_1=M}F<BHOhw6i_uT?zZV?<Zn(r%
zEq3!>yoXUU+ibd+dBIYjHq?hNuC^BspLQ_JFMhs$c(^!pe)4`A9p>KQ=5F(AeRuJ2
z`Afa~^1GJ7I!^ct#gqrsdVDxKJ{(Pkj`zwLz4DwF`qg{IX`M2{<Gu^NL*B{qH0nZL
z&^w4Pj69Oq4^y(E2QL<RGW_hC7`CpKS<z%;E2n`7UTj5Scu5iGVq*+Z!_Dp*M1d{C
z4p`>d;I+-Av!<TN;qB!MJ((4}dm#B@cjLJ)+6SZZ(xcG70+d~_&>KqpE~t6$@FO#j
z!)E*b)6Y=E_5;3I`zU<3{jj~b$5s<si1_zkJu9E@uPgsIyU+Aw={J4;?a}pr;&}S|
zZ_dR1F8`n7?|I*UL+P<97uo`#enl{1alR@qK*MJ~L`mAzI8OOHh04@5SB^Rf)1ik)
z6P4H6#FJf$s#EW__jmY)*LpU*`Xax+(=z(Ie4$IFs_$R_{}vjC{`bG|1}FvOebxJ)
zV+a2E{{H7P{F(hvFJXVvHQ*8X4?S<l|D);AclrMuf6xE^rz!u{uir-g%TP#K0JO`q
zmw``lSQLOwchlTo|L6Ah>Off-e%V~sm-ImW?ni@|Sbc;T8N>`=BK`Qo58IEQZYW7U
z_lb)Q88E;H#X9>*{8IY_tD^o%1eU)1;g8qW^-N(~EN8O7*Uhi{Pw$m4u!e1SpSSO(
zxVmXs(Aw6PLzO6h+eT;Q!^J~W1&qAJokX!~v`&LCiqkL{{Eb4BUs_QVlZcycgmQXN
zrO#3kv^pIAjm}!wfly#vF7c2rqP!l|I$snlz$+XG<+P|`ou+ZR5I8nZvlUB-wQ)~g
z;s^yoB7pqSwoAP=`=(8eKVIF6t^r%9luxu?{sJ-x-wvIBy1IJjxuk1!sd#U6I%9Qz
z9e$?AM^|NBhj?9lCNFs%h-zXjWhxHd=!|X&rPkDv(8LwT=sE59<qs_auYVbcD$C=g
zP;tQoJymKBl`qT9r9N7yDCO8X9q*^XqV$Lk!Yv5orYAWpikG6rnj)zqnwclZ_3ho|
z?WQyTFu?dMOQ>p&GUJjkpzROk6NZ`=Aq%umbOK79t8^|>ypsC2;Y3Y}#jOI$w}Qqx
z?dPapM<m4-q53h&7Ik=zaV*M5Xb*Is01~^cnM|03_c7pIXgJ@L&lnFvTMpgaMPx+x
zBW*Upu!_?tQ$C|xAFr2!0w=O#=sP>-;>l?F?QW+wy&7ms5%;|L_eeHHra~q^&|A8^
zJ74o3q6GNGp8&>XDWSJ_`TNBH*=_&PJhcozFD`!-KmTZ7=~cwy?C<z{IvzgkE-tt9
zR4}Bzzuo-)Zq(QV8gJc+s?_2B>aN~>0kh#BBmt27tE>C%gW#U?0of0~?5<(>BKJxu
zE%p8awilb$p6O(ZU-b+nn)2*S-v*-0UK;;6TAc+6f|vT|(PF@Ru|rNXv6o>4T)h84
zg><Qg_|I!SGsN~c!QCCcZ!PniXIV15z7MYN@k^6c3+l}XeqMa~@KE1eT+##As*O0r
z5ASq6qiI+xX>UO{AAhcQ_YV!X+m9-%B2_grzpb5V*Z;3?FCRX9GYf$9|M4FG&(V?Y
z|M#5l{{LtBdrtp9&X_pE(~L_+{qKDJ^Xh-AYQ;Es%ilj<`ZpVzu^_*PiUj|Bd%YR{
z_4;GLAE3u)+lRyJj|mA%o_PD|p_2-~DftiWL3TRNLA>Ldjz5R+`>Z(l%jz#LK@bk@
zT>bce$7f`p|FEQeYx}={{yU!O|2=ZQ`+uM1@A>V2)ZI<{Uw-}P?SHcGnZFF{Dyvw6
zOY>lO0GiNWosk;+lBF2(>7Rgp%TEvEJ(vewOmLPJ0p38tch3&oi4ttt7$N)|R_$l&
z6z>GUpgz+U0<$FC?LOZ6LI=)I7Ik*QO|h5!CqMA`2HEL+DRb=;{#%~Jx$-H^Fp^Kj
zM0Ika{Lb+5MPVeVymahUt~W#S5){KNIT3^eBL#Xkyu04K8o%cM4eB;#(EL{PYMfIK
z`L92}a^x52q|Wc_`(o4ny1Bn@e>~hiTzmj=`^s@iG5-a45t4ZGWrUhh!yuWR-y?|r
zi9fsizx;6hZ2n(-sm3+rzc=|V|DWUU`Q^W##1{E4zkaj*Ke21aV=xXpEixMoH!jxk
z#T-XJ(Lg7KNVfYS7SD4sav|7hm<E|Z49t@JWt`XI5|dOcBRb|Z59(57S3k&t4j8=T
zTSTE?kma$*W?En=+TnQoy5_fDWl6vfC!exps79Kv7O-%I#R|Pi=+>cYoOul$7y7xo
z24c=dh&MT`A?pT;_2@z8MHa06iUL2BmNWeeAGVwNM)3*7L6}x?^p<>Yb2_$?rQ8|$
zvL6vew!|#8ezk+?ngQvykX&_qB8tV1>6Z_?4`f?-?%+mvpx{3@pLQ1yu!I8PA06yt
zdA05Z$mQbl*Vj6|-AR+*?jCRgmUr7N**Y{Lsaevj)h(Kj{Gj4ccsiBu3j(_ucroCv
zU=N$7Q<l}s{ri1+dnrBK<!ms#`f|G6z3Wa$^r&Il?cIYtaSe&8ZybN$U2NWYL<!9U
ziPiSzj(g??m%m)!#T$_+{-}VfcgD<<#ooQYc-NhBP0H?e1Jy_`ZaBF<in&j|siUn&
zAr)Jxaq<IuHy4-h6oDfvcuhd|URLk}HZ0Uw{fX_2P9s53Ufq$7#D6WeR7~?__j&W`
zf7tui^|X#<Q2<?k(Nhe2eeniaRsdmHi6XBLA!M_o3jt(1%gcLw7=|oj1O$*R&zY<z
zIFEGR>Ab?}s_Oe?jDaLyGG{zzY-x0Lb$4}jb#--h)wUw-XwV-YPltj|1_{|O>@Yl8
zu;cKDWG>0&lX(LE1Cz1Sxgw1z_e`kvBmncLPM-kzps2%_f=+8>3HWgrzbh<-8UK;v
z7YN+7!srL&=napB#Bs$XW<#qV)jP*Z>I9%();lMtPU~pB%R&HjlO-7t3y}~R_Pksy
zFW5rAn2rTQkF^hf8mPPkyiSg$@0!;@la8_h7Y0{z_Wg)kf`NUSlc8!~tgB=Jel2n_
zX$EK;v4^@E1h7;jWV*Vtzz3m9lxqcttrPcSeIKtm{&Y|WZrHRA{x0Y|TGWq@m`cLO
zI=CFlt4}E0Pu`6tsdNBwJm#dc$?&^(3Z{e3LUw`GUXCu&nkx%@wK~bW#lh9(tfTD$
zY|@4w)tqUwp+h4!oB>YnV~o1a=*OhZKCNQp<(&;x-em%pos2aIwjpe50G;NGmhgMR
z?V_d?o;Hu;Zm+}3SmCx_iY~LKb#`p7ce~Bw{i9#03v3sudcl~;fuKZZ$G(mjM)n4T
zV!>U?e@k?H{!Y7Jov|b6$<^gszR7s#Lc%y&B=b>!Y=Jr?dc%*Pm*?*e@LGcIi9U>^
zub53ZQDdHboX+2?O0eQ<;$n&I4|RE);2H26aplyX4|`z3Wc_@dK}5YyeX&R`-;O_d
z>ge;st0dQKXQ;gP%(M<r4FLS9O+vi%FVgXFrsjyT&zOC{>srp{QXc-PgZr?lRUp^e
z+x{W}Y6@-aBVV@~sQ;pCHdq*nDBPP;I!|^$#mB)vzlvX;YKo{I!Y;(eT5gSJRH4IA
z+Jz7t3!)?S;wM{n8Kf7%_fx%B)y}J6Y3Eh77btCX{iXY7J*oZ*S{PzhQfi)DPM1l{
zZ@4s)b(+Vg;EGW{I_p8KsAi`FPUnhZ1_0B;;o-Nh2z5`0S9R9$mJ14D{HT^x^Er%i
zk#W20**9>&7oXz6`8#p{sKy^D%|5zZUhQfGV(@-W>QCZ@P{yHK@~WMY1V*N@kSqeN
z-1+v*g>pVwgB5N2XAXurgQI*r=+7jhV|)j=^=JK~C+`}M%ffp1*W=@6uhVMKM+YFQ
zeE+ofJWyl}!suprB*UxOIJq{QskEGO+900Sj}FyW;ou=c^ihG{wuU&3VHmAB8C^Du
z;I3VL*yVg>4h$q6|3w2ZT95c|2=b@Zbm&BBc1(wl##44G*!2`JrkU?5CgRw?l`E>h
z|9r+juKoA&-T01uz;EFH{iO2bZ~yNv@y9>^C?VJ(=WZ?mGW@@{9#?Gp|Mruu+TZs7
zFY)J#zyHLuy=(uMp8-6gqG2(P2ea|j0{$zlv9m`qpG5Z?_u1|yt^V>};PTx7_ss=f
z3dqm1#g_VP`Om%^gCVJL3HB6mFMIs#h+HS3L*N^VZpv_eg_7f+DGX5&eyNYg^xH<H
z*HXWMQ8_A?Hau7<7y8YyR=ik`D81N?u~v1g;^N`Cw0zwh@s@-5uze?pYrY<1%G4X9
zeC(iPlx3$~xGK3h)@>iw)=9VZ?0N6zSfAKfPuq8Zb;sA&qoad1I=|n7zkVp8%ecMn
zTs2>TJad;O^ynF1LpK`aGiO~2l)}^bhKuN#vouYkX5r>oouw)7<ZdXOo_W{hvs?1|
zGw-657A<sAzj=3^rTOfZi|CnHnt!@1hg=!ljsE=Ccr)(z|ClH57I)_WlB)kzD&X#G
z>wlH4zy1Hd$e%Co|ABJLRRHB@!<7KjXEqz?cF=8b7rROrq|QOAw@O99`hxF#&<qI)
z68WDqKNN*wf2_N%^qO5WV@67vDhzOD1gyxau!E7Tx%e@?P2kNDPsU>UhR~}po(IOM
z1KHVlY&JWWw~?woGf};s&n%4|X5M-`@F$i&YhmJdDs<>$A+USu<h>7~*Hel6NUA3b
z4(1A$Ip`sSUQc9na+2psu#>K8+s&NkWV9~?Q$Zg#I=v%|+FxYIwyM=~TF`z5{9FUx
zP6uurK7$@^9r#;~U!9&bgV`prE%rCK|7aj?6LF6L#XIF{y!9%?Um9S$(>m?6dMX)I
z#qQj*MhDrMavre#<*^R}12p!I*?Q8l0a0G`G}ua+a+sFOo&-J?Eavez2u(5q*W2Lz
zQso%k#Kn9p@o~5HDz-@JgWEqlJZyHll<PB;dQ1Srt4_R-+E&Rit?X<z=vGw&t)_x*
z*EG<rR8T$XW(c;+=KV#17P>_hw$>^3V8P!HD`KjXMH%|}`7$jR@?=^DsPt6Jg&did
zbEI0%k!v|evgOQd%Y_`tmJ7aY!FIM~Z@yqV-7*r#0rKTrF62+Q>>r~q-E!eCo^AOr
zm~PpVZ@JlO;Pg7VZCOE=7g*VLTF1O#$F6bQ;01P#Ixn!(YqjdfRPcjc(Ca|pTPiby
zF+Xd!n!LblruCxUDe_&q{6iUcX}(80aPe`gOMR+)#?2(5L{$GFhzKZQfY#GM8wTip
z3TXY<06j<n?a_#fQ2f(gRNaDnC|GTW>oh`3iGCE~9vz{}mFgCe2tgi2)fxx1BW1VS
z;MB>9b+F?UqIHW(4<VVI!(MTx_bUG7ta&D~u3|DJ2kP+X6r$d}2<`j`bj*$x`=_sT
zmC}JZVlzuw3f)bd0O;&fpl!$x(`+<?IXn^I`9XuWg%Cim(MAy#R45P%9~hasp39Dk
zfG$m4Xh}$iv@Wcmr%hlN5~-@w*t2RHItcsC1wpFmG;~1}-CR<-(42lIJslbzuSCP+
z6=-;zn})~PG(66x;c+$%k5{7MafpU)*D!jvc5*c7%Fm8spr(3h_DToE#!AkFHCC8{
zMmAv^9;#WV8@8>0Ji`<bA-$f%R1BD^7p7{!xLX*h;TTg>JCiw3r;P)3AgFo(*x=}d
z!yEE~U@XzlxduQm#UCXN3?7yM?nTc&dNk7D*cmBsrq`j^@hFgKSBGLJBdtYq@^DM9
zA03@GVoLvqTByxbJ@gHYUy=>MJ4ghA|MbJv;i0Y1a3G#{AcWzSSSA|I=;>1rj*@TN
zWP?YB3?=Z^fhvVS&So8`s@s{I<t>|YIuL}dIIxu<Y}J9S+9o5eY0&EFbz0A!(b2<~
zsn-iEX6^8>ap))+vc^I4sQznw*45`Y8(KS-Ke4&5^9z{{RQn=hqyg7?)xi9_4AEnI
zJe|Wv??@EcW*m{5<Z;<nxnI3#_C%gaR&3>mcyV;1*EI`|8{M8(YHQ%VUk+tj%ktw&
zH7%_r1MVu|W}}>DA?yBn^2;GM+H6?KMvr@St<7pGnll~tu$Bt@;!p>MeBmj;o%)dm
zYj$GCfqibk)>2@P+g-*XYzvd7#QJtBa;kvID76}CO|-imIE`ylQBNOgQ>ROFKOMN+
z?fk4m@1#Qi(rfTZeqv2>=ND08adWGA+QO2<?Z@VZ*=AhI3rr?H(n{<d9axxNKNNOs
zHsjw56)S1XS-1$8h?m(#oq8u!izZQ3TXWYh6ETPNvm?9)*zpN^zKm@+hjuzZc^ix?
zmTuVAaDeeAL9Jukrl<i_-S(bKNS4K|cDvCP8jL43I|4Bfsl-%0s6h0^M;-HGq3V&3
z@yTKLr2$n7Ky?f#M;Abm8h7{GLAxwL9JiYi!lpPuoM{kMFT{TRSVGji5bfi}VbD5G
zAP4Qk6bcFk!ObABp$VAAVJmpB)nHJ0^&W0zV~1g#Qo<aYR~;VahyT!0P05znR%YiR
zf7NTA95fFY()tO1d5(E8w{5LS>}c^Z>srkt3=UgIa5LDfw@;4>?tUlAE01?}tOP!K
z@nNf7s0B;G;ivF8TwHIo3)^Abje?n#3hrk$fUYIVun7Abv@+-eKMCr+0M$M{Ju-46
zZ9CgM2a<We93sP08O_-9<j~iY0uo8g9Ez|r<WVbZU;zL^V2;VsD1SXsL#J$yQQ&B&
zfl}`akpw+#rh<wwgn&@tX=BrYelj&8e$hHPP#Wo0!x^&#^5%VmQg&7j%`d5^FOCm>
zfn!owMdca<9Mzv6bV4Un0d;m@K-pI$9IDlRQPaR|IjVteYhbo86<|kI1ypfK3EBmy
zDm*AeV73v8{Ut)u7Tl{BLe$&oHADc4z)rydYaYNYA0X?TKpHR)D8QNx_}B%6Nu|*S
zu7M>KL%3DtQ79yAJqWv9vtjoGu!2fDY$F6)b72p>Q^?j&^XMQ}Nf8fSVPe4os{!wZ
zY{(vPPPS?e*lo5K0rn_x?d-3?nb0*>d%%NsYaePcPLB!OM;5kN5xkZUy8z$G2hJ$N
zYDAi;!V+-@`|?~xXNI4m?V7l)fI8Y?0=6>!#sWYN1PH=B!pZ`&<TUj>t*KgeQ?<NJ
z9W_tVn%c>3YA0_~X=AEnldqDKeBDl`(My}EN;dr}x#`zSYbcv~mAuq@b&x)&YIa-I
zoNcj_Oh#Lc?6w+t3CPY^+;CJqbdO)czO!YoVb;lm{WD%>A7@{+_aRKb@_BMCIz|QP
z#33cSXRwgD(>^Q$^@FEiUaXy7YvrorphSGh+B$7>pC0cShJ)b%+CPhPg_FWs2V)8a
z{3AfCUPwsaMHvR_y?T6l(%e5g3>jVRJ{X=lPY%;MqQh<-dXC*lDz422n&&{>mxo8)
z=OLRf2YQimGa%=s+hC&p@HDP7#qc>Nlg%;++U_(Do4v+!jHv+5XHvai^DGhghb~Df
zzEXDd!KTr+?QpLSfj0xSG>9F5*hy^#uPSUC5?^2J-{W2vTrrfARZaoqS3jPeS->?q
z?6BHtwL|s?0oU%fgL<NXIeu}h-8I>U6ApB6TyGtK>tR4k6ma!t&r*CW1r#G5xNHK`
z(s`ws2Fi|d0;flv&;aX=pBq80=3s}7(`Nj#bJ%(nWNks0eF)@s5|R`W?4TWo%^eb|
zy{{3uQug!7aYF;Dm2GzjlE~~I{mRa9q$b(S>5*#3BEcI!>$YP^kRD<t4#tqOb2ldB
zh=PqXKIzFVrB=60v^`CY<3>hjFLNq9H!8T=gH=JPR8+y;nXL*2evc+(f%gpXEbO}C
zfHgdtf_YGVao9U}31pg;c$F=z0<f$K`y8n8T&vMYt<kf0=tlaT3&7K}L0O~r!{f9X
z$31SP->Fyv1#lfU=sv8`v{uVOk2C=)+p)@y+o%Fy;SaVJAQ;bU4iA9#nyx#fEPQqV
zzC=6z!WTWi=}8CBZu2~yAkGlaF$r6=;{$nVfLIOoj2%4+2)~}OrAKA`y?~GR;*;Wh
zG<g@#l6gFvPRAS3hx2$eyvBdAg#A)Nozvu5vl?(lk!qG0v*>mG9DKuepRdsFn!~4Y
zrw8<NSpf@Q$ik{yxLx}tuC(`oCEdcU>I<~G+qXwpkBVw;(M}Kk`-K%Y+`?K9{`&=y
zTI|HuRNUS#3$Cm9N-LtY>MN~^(weVyOO)>TN^7#+vak5DY}i}8EnD^$Kaov)i=~gR
z{fLC53G7=vzHjN9W)PBT=5hV#@C+_8o>8BzZMfHIa|u?hS$EJ-)~hv?#IIIrsTZBS
z&RM(4FFUs!?W@;2;4RgDur&)-_6vBq^3s}9+N(4~;f`Ba)jC=Gp_1Oo-hShxbIh^r
zI30mf(bkUB5jDVu?Iz4h1t}W`o-(7CjRWVQjc;P&PUl!tkE&jfdLyhC6G*+YZ(S9-
zjj@?#I-9uAsJKl5!2U~dkomz|{)}{V{~3zI91%?D0)R77eAec>#8$8z7mMwJLVV^K
zvInSl(B}TwLILyuy*M_2s$QVpfj&_cZ)>@_w13d%h&<Lw0mIae?y-J|@Ile@hQ{+}
zX~f3GlQ!=J5T17`PoH;!$(nasd2y#JHYxA*je<L5>QTcq5?B|#IxIW?h#@+Q+5kd?
zXn`t*+JigSAbxit_e_Q^Y`GAyNPBIa&#LaE`d6|Slg5npC6k6oTwXeJE&&c2`?!g^
zeKFW3a@nSBuubH!O<S-H-hPA_r!5!<p#rSaYd?>Ld9P9_KdCvTJv?lp^o=5ZmU1nR
zPPLAz=C^65ST7@P(@wEo`bQz}X9MOH3CCac2A|$ZK=c`H>{kk|^~i>y*Lk)(qM`*^
zz~FawY=?{S$04fWn<yP}I|zwpL=Eyu7!q3VbWR2259v5xD#*%KdPjB8@R%CyvzA;n
zHJ3=e+(hc-9*_>H_p%ZOg|T<i$KLY~$bjoM8$D$=-?8-_EQTj9qPXT3!*kO5p#a!%
z0ghWIEq+SgvEM{70L^Z{W9w8{tlB@Y&7NBK5uok`=r)e{?KlJ2@d7+o01Y2Ny}(^0
zcIws$2%vpZj3=c67d;FJ?LIX)*s<^Sl?hh)x%n$A+lb=JQbE)rNdm_bQB$G0Ecx(s
z^x^M&hTqYLzwa4-M<4#aXZQ?&`%UuE?UVLR-5U9O)jqMR>||DP39`=>=rn=^_?dn}
zzqpX_RIm<UHH5Tx+G)mzr=6Ge&Vd5=t}0+#-iyw`uj#$eU{4PZ&5#40z94wB!35B&
zW~Wm~?MO$GqmxMvX|T;AhX8CBbk8BjU@Mm|Z$5(#25dGRI*JY*mktimK+!Q?^$>LB
z(AKS$REaU?mwNbLww+u|P?E8r#sq=7x|@zyk|12Y)orwN(pH0@?UUZ~PWL$}tx$nH
zo$M<lw*o0`kxw4-h!`(u@S5++!H!F+PKZ=KN#c;I8UoU8K7-UULB)uCGLS|N)<k_c
zwIE6W6`kA;0TKIa+#iHf<9!kYGRRGnxV7cu%x*odpOi&<@l?*_pxoQk#}Wupagg~p
zK?LQ<+=)YoZu1B?%Tyj0aFQ(EU7I!6=o!fD1C!XDAlfw_K^i>-C?A1mnIc3NJPUP9
z-c1$Z!o)y`PP2#MgEVAbE>@r%Va+?v#`Cz{Qd8x15i>w}@8Ci@u*k&$!?p<PvgnK=
zOH|ST*z#+1er@;ASQ}iYDUAv3`+2kRv%fX5kwFCUo@?N?<bfORA+U=p?*ivJcdOm)
zy=oo2BIU|9Xkux%&L1G`*cMGJMwNzNK{1Fl9f-kqRAcr{6P30hKAu9@EZ4FScH1qE
zu;O$N5UK_(4_hoZy5A)2G_^HJjb^ps7PA8=FWqwPiVsifFXTC(X%8PtpR?ZZ((N6m
zw5ydioYI3<yLQZrx7^~cR$Oyi?ls!7wC0qa9jH>bU#<4GD1GAAl|^>YBUX0UX*NZd
zUA#wpYh;bO<2jQ@re^c8JDx+m*wd+cukl<IyDcBLx?NG)aJ$WD`C(gf4V%eEilv??
z)U9)T6(9W$WT`t)e8EelRd<~9gn~-lVLq>S#VD(8yN%-mQD}R~2v&^`kD9OKOAhOz
zQv&g1c+aFOQNhy}LI=4C^vhEQ>gkx&czzL30igCiy%FB7=~`JO9k^26(V?s9(ACG=
zVdYptv-5v1MsJ*;*S3W1wwsOCVGE3~f(w27riAY_DX_7OqvX|>{orx^=w<y^)8QmM
z(cE_)4x@*{I_zPEl8Xc6M|=PcABVPNTv+(#r*%+Boh^cD8y@iPEFI&^6$8k#(Bq?;
ziO93sacG6GMn^ygu<8pbs+w4>VC3qxPB1HrMhxH5&0Jrwg5z%MS=@cec<ev{`=|?%
zT;V@HjcGi~A=ze&K^W2(4+n<SGB27oyl44v_+QSNonN6AzMIMH4}V-k&;8TpR0BR^
zqO;dbUviL}p3U3=Uu~(&jlmw(#7me$`dU-4;J2?}J-e$0+imo8;B7xJJsr?jt(VB~
z2IzTD0Xi=lIUs3^0?K*SC>o^)uN=yG*(k<Q?`kdD)>PRwa1n$IU=q}Q2gyO@t1kh$
z6N1EWAw)%51dqL7)>Aa^wnbpQvy+qNk$l!g_G3SHzUb)><-8jlXZtoAIxJ@Y7>p(h
z#^tA}>%TfZJY>o~cdx2d>Q-1hS*{$wDo!9vqe`>gh@Z8&SHR<P)A0>{)^3PXVfFB_
zTf8r=tgN`^DSobs9dBZRor)W7F=5BzZKrL4or>+YA*&ank9g;|?dA~zTM!U-4>-hg
z3Gu|~gw_l}I5g0jAqa;IS~CP;>6e{Xy>_E@A}O_FV_?M){Fakq#~wzzb8w*O{DXzP
z$0r$1C{OB+N18UnthnYWey)mb&$#YOG*I&5eH(fAWlPFmR%|0@T1&Iulf{nPU9;Oj
z^fbc^!t%vBk(^yCTgaj8K$cnb{H1%`(OiSJ(!DMjv;yAQP6hsjbw~kM%3B$JtiSAc
z8u5`)Bg3;6Ydkk<cwAZ6>|Pz{-Ku4FtKDnpW8Sggoj&!9QS3ai9E$A_7g*uBQK6Pv
zq1$~SS=ibbVWx#&)1ejr<hZR3W9zV<{{ChE?4Z+Yv~=;h)!0ru=kY41W|f`HD*Fd*
zql)WM<y2`l%^IohY*^#;&}t+#<S|s~S)*^TV{~?vQ?tqwyNbn(FV7nIM2bhdv6YFJ
zXNRJkig&Eyu2H*U+mp8I+%uf}F+ARJBAnRPFjhqb{)p_GCf3`yeauyrkOC}##Az+0
z$O7EUpGBtofKCcP^)}_YH!e!-jvY22;5QO0^g<eej_c1%Tu$!~A*!XKs=w=isbK@z
zlf!`aZ6IfS45*HG4B?9)1fqw%+URP2plt(a#{<M<gwb3DRWECs?D89DjGSGw`Lfr=
zORfTySvy(x*OLZ+Z^3KZ4!8i(F(7Oo(^uWzahvbNP;BcNQ2bmKTMh}oveW0Ws%=q<
zijP%s%~Pz9TlPLyRzLW^3b~~vQn9LSX^E`3b<$9D-FAGSB=y*y-|O+2tOQ{&^>X>z
z6a8FK8FZ!;OdKW+6J4Npg=en^4~M{K@B~ryK|nfr5jou93WPX4X?A<fw(N@+s39A2
z4pkhY%}z-e7Xr~V{6I&w`0S+BVCOlmRJo@R;B>fw?`rGVK0YE$$As~%7{n95s6hCP
zhAtJtkQAy7@}-C9p#j&A`1v8+!+foG0<GI8Vgp#Uo-+1ozD`yxY~~*IBJQ4aX${#0
z{JVAG&6ZsIfX|Ilfa#Z6>5DpuBK!RUJMqd0b`?vVZy)lIsOs>^V~uHdCw7hfRzp*@
z%TH2WY+f85b(%$baXpG2L>2H?L>_3AqaX37?Q+>ur%_Zb@Q~5o0CF8Is@mg9Gb7rZ
z2E0>A_00d}pjqIDiNmrL-nrX8J=KEn?eJK10+B_qgWjuW-IrX|ty+7;i{lq9jhe7_
z(?)Q7dZa-gW<Vdk(0X}T$pqflpwol=9e0e!cZcams29f_u#$2mjw+-QA1WN{=(p0L
zn+^Ru_vD8(XyD-Erh>0*aVl=>Jnxd7>ywtz0>~CF*$>2}L-O&mnvYc8@OVfSZwWGF
zpbq1&KnK~{;U%_<E!BdK)P)05gR6%DxcEiuK=)NDJ7LZgsCX+ukWF{HBJ#Xo8Z%1l
zim&uclvaJE2cop*#eBp|{jw5n|KYJ?`v`#ivDOH%(`>`-X2@lnL(xOG{iCL)WmkNF
z$}(v=-{|G!JynGK8RBloscti{l=JL7(+!_36j;4s_}EkfU@s2Obg)_gtZjnr1i(6o
zPCH|Gw;tr%Us4Cb!L&iR59SSQsS#%lgc)y7*{&fy=8LL>&ejq?Zyhu#aGnCIx?rt?
zS0bepLP6LzKNOFVvE!D$?6i8aJ4guU08jD*7bo{j14K6Y`9=opc0_OP?KkSW+~BW;
zgKI+rJ!vwHI_*sb31Au0(eoCGAYKTFoe)G_Ao0fYM3By@h%V`S@GD?Ah%d|mfr#*u
zJ3$EEOdu5&?kq1_{D>e4<A5|zdYu;2+M~#~HPJmS7aE#qHrslB(1@R(c4c;8IM!|3
ztpQN?WqgFbYf;gu$M#TYpyX6)A7)jmxs_N;kRU0g(vDl{Ftbv_(>=DVn>~DZQ7#1P
zL6n+MMR?#4tmwwv0hr@%OI+-Cn$M8jPi%6tu_zEeCr@k{3mMf6C<5BCF(E}E9|(dG
zkaguyR%uN;bv$u0Ap7Z{0rXq|nSE+>8tD4gri``(V(+w)nd1FcPafkce%>))w*0J;
zf#)y>Os9F&grgMpOP(y<ezPq<?w!Kuitv!4IEfxb6_>97?2ri)0mztg!g!+M24RXJ
z77;v-)}kHRc7+g-g$lNU-oCuwWTU895a=feUT*qbtwRq9C%2Y@{q&VXK+Z^R3JL@(
zA~?Pcr9|8Ox!G(Z_waa)TG#CD;DFAw1u_`hZcn^i#7zwj(ty*ia=FrDxn>-3Ya&?;
zw;Y9{(ZlO`QEXi;H+sDnWmRn7D?_ocau-#FjP!>u3I$=L+ww#JkXCmnw(Z~)z!<X-
z3XfzunR}w#6WGxq%?-^=5Uj0(x#JXI2kk=zW_xCk3zXeAo8}sr-Zy_l=#JgDs=c_i
z&OxR};p7he^Y|}o(2zP(K@+UQ-ozkKO&HjmM`#k{w4LERtwXjC#a(vAtLQ4oBMB)~
zVzY~l(__KowBgijV_CalEP|Ttao*nlSz#au>xQ@f>aeT0wr0Pwt2a(ho-6!SPqEf~
z)mC}xCsy-Yw#vl));NMKtN)OJO6#5NbJ~7ZkGlt5evCqxCw7q7ZlkWPyT^8n&Tiwt
zD7H?h&yU!4A>>4v4bZC2q|k}zAS<?Vu(91)yK1X#ZP{^X8r{mXBc=%`4EFq_H-HH0
zCw0|^eRseij@l9jTpz4?*+fYCu&9^n1~G<shOt#mfjZKcqCc9lAOpOyU`)?W@jZ<3
z<5@mBXuJLlHnf7Y3xq)-pVC1LE*^Ye41<D09@EUkacL(Vy4mTRGVSYSR~{WaV}nyw
z_t#_A;j^wdWcZ=#T@A{47I*YB<8%P#Kb^IMn<*x-JflQXIH}gfef^B-Oef7}5}@|j
zo@=&p;*&Pbq$YbkXL19S^Mtd`;0*eLr@=&QxF3096&s-5=u-&Ne*UZ8H`tGwuD)Y}
za%Z%P3&O;4L#AxPfi^talL5*HTJ`h|8*BX9NO`J{aGDn8J-Z^1rqYEwg}K6EQsF94
zUOah0*@4I$>Q=CS0#poj$20BCmyiLdciK2T(zQe%;f@YWP;b0J1l|9&UGH`~^|(=c
z99Cu#^!T8K9)gfMw{!X|WVm2przq~@4!(iMcMuw1+2!JenG=+gsl;s2t9!Xh&3-)2
ziuW68+p-^icaEA3ZO^G&2ex*nv0c+^@7Tqa>JzzT)~&}4thoC4u`0HM&377)D;2HS
zX}eOcZs}vIy3IeXRt*TV2m9zUriN!HFS3j6oNrdK+5EH9PUA=;j`b!7il1x6b_f*R
zGX{z+7l1>n*z#&SwTi7H`~NkI?MwVquBeIPnqB<dD7HKV+8q<QGwba`-7ANIq&g#!
zN0)4F-&$DDPJ23;?eG9l{9G@#JKNJcYljA4P3sK4Z)Z*D84SK}FOQzV;QRLS=ot*Y
zZ!eFY&EWg?3hCJlzHcp(V~cjy3fZ@dtsbxgfSf6ss(t4^&Cwzcp9vkb(X=&OEM{86
z0oUJx+8aMlz_s~_I(T?NIyy+z3(`D5*9{I+^TJS=sme3q+-tF!1y&YtTkdJC!F%3l
z76M)8ARPn5&QDE#9Ll2MDq;lD=r+h*K<<x@Cd>{_?6`?E@OOwnIwpv%DFevyVOM*F
z>`&kz01GL$9feKWo7|AnaHK-TQP&&|5H=4S?BMjJZm#j+;&XR)^79D+haf6te5c0I
zRl{H$P8>saf{@Sl4M;oEfsRvw(shJw{=`!ra95|1W`^T%omUi=x}D<l$3g$^+425q
zS`ri4G+VzqoPE;Q;MnOvTb!=9lI*1e>ZBLtpl|@xPkzo@5e|2J(CCuWOW+!h!$AW3
zV|19O$qvG?wRU*a>!_yPvl$0%gDd-qc;a^GGQWgtsbL2$mP`V5nvH`x@4$8%^bx$p
zWV#(XwS_cn4fg?>-37F|S8~qxJhNGl?Fa~Sn}x4|38=%im!@@yule8zEcC=nLuSWE
zI6U;?Lr@`h>}!6uL&b+jXN7{XU~Fj#K;0L#4G6d`kB~#SLcwMl1BzdF#)l1lnwG&a
z?7P;wp1>0hkZJzdCUCk_PWPZ~K&R*4JTwxCHq$+Lj<#yOxOLo8Px_lX>8U5+G4Y|E
zAI))s!BTNLOAlUSg5o>9z_HJS#Mgg;={F(iJzv!a(K=;k=3ng&K1l)fhT1rkN!m<I
zTZaKK|C4Dbe%w22^Uy$M3X6&dmk!LW5szJ{?&%rZi8S*DlOIhN44%YlKmc_RM23%O
z1+x7BG6jS;g(*`6b~-uS5Ycd$bmimKSP)V?RTKdW)#2S)PBD|I;_1FuPconaPZbHK
z%Z^MU2tb+JwET;VuJ}|EOlQMYkew#mPhJS4k83(qkeIfwKvyy;U_8kl(%56&vH+ER
z%Yb@u3=hTRbr#&xRZ_FkQU$mvZFS)dXXEtj1k6A7vp4XVkGTVPrS{mlB*k*{gQfC;
zji3jL@3U5;Or&_+XaD_B#y2aEp9ElesLIC)`)|np8?gW0GQ`PbI67bZkH7zXJAXF6
zUtA~O&(6R9jBTLwfASdqtvq>L*8dfM{-aXc-rlZmZ9l28@0IFSb?ZN($DgrJ^3T;`
z*`G(zfBcqA-j61WAY4}YU-XZhfA&xOAsNJzWIEX#+-Vyy!Q0!l!2EALsct>C=Kpc!
zaqT~%@}2e~@aNxr{=Z+3*m?2QqodIvnJkh=tzj}*j?PEP{Ksg&d+=!MQDfY{S|n?1
z4{A(jpXQ@?7t5&FC~ZX5a<#e<9gQaMhy7(g>Lr7V$#gt@_lW^+vV!Gslhyj3$^FrJ
zMEk^$boBPY&+rK6uvzb&vHduD@W3tawvL&iG@1;?SHmRwX|WuRCd<u>r|R2%VA<hp
z_@43Yd@*8ddvV`r3-MITaitPJj{f+={9gG1zHe(y%$G@=EH8%1hj=(2F`U?1GG>EU
zKpp_x)J!y&{qcA@fMTnFtuW6ZVBHO-mzOjIxv6$~OkKc}QWvsIK+fe;beOTst>jfF
zukCoK1yaC*kRtX<+0HlCd38`fcmX#gaBkp7P`^iN8$xVAVB@BAzaTGA{GS1P);j6!
zpgS2VIF0A^4*bTrYBBmpGCeO|?cjlEHt)Z?>_^WUjc7RPzne@K%h4bju^;Aa=I%dw
zIp{BzkAA=Ej~SuZ(#6ur7@GMo9Szrjob&77<9X7bUlte3`PE<<(Z~4rtK=$))>(0B
z?VoFf#a1;M?XDHBD%%k&9SzyfAEsl5a-2k06UI=YBJg)SOcn#EJe)B&6Y<-t^Ydg5
zg~=@1iyp8R9zCUxnC|5@eqdb!u82Y4EZ-<3$S@2#TEBc--hjU=&@0C7+qLUzyo`pI
z{kT6I&RL=O(SrC#GM}=BKMb#CzsHmKYP(h}mjGnD7R>;jgYR7LFlaV_wE_%;Rl%OY
z(6FOS;dHwS&}>x|uNax%C*w~=>~L}k>22b7$&w9!{Jx0wF5gnFoHcG;FT$Ke>m?wJ
z{0M9foFfjs9msRi>GmKzJA`0oQ-kdpJ(*lxvd)F7Mo;$uJJo>vI>ThPydYGC^XVMk
zLhVK6-Dt$x@GmP?_V6LEQGouw9=+MbX%Q28-hWRMwTmzx7R$=-Na`YxdA;-iND8%B
zpNDd^Ff}Et@#h--$K?#`;|C^m7sMR1>3qpp@XaRh8XLqlwiMIg&-;uehtWeEWQYdP
z#ynYG%_r=qKiQnY^nMr*EAiD1>*5ae5GRQt9!*Be_(Okum6Tw9eKhhm!b%_RMNIA^
zufvZ-Gse2eO^j}3qT4Qht%$D`@pX+zr}c`}#fhF@;!4|yW=Jq(Tvi>6vCq>FeZd4j
z_m07}6MT?6r>DmNFP907dyy=EkH`H<f-{7uHgGOBqH;-g!kxc|53wslwbV3lqtBJ}
zK37)ib0tTg^LM#Y;w<1UW|*=R4szBVReH(=g|@cV3O?+>Kz6&ZbRVU)3h<2gS>+;=
z#igRJ2YU>2Df%y5RYj{W=uI5`=<PWa;RJ%H{x~18wFeX`Vv8GmRKq3PFWzAR!2TKB
zSx`_er}1=rKJLF;#2<<zRV&*lh8N3ZK984|v&u%~FQ|fCh-4ua^`Vee*kP-+KZy}D
z`<3lVSHoF6!w*!=*)Z6S6VKi+7r399HR6kr6#CfEZ24)HY#_v7adjCl-j8M+Vyk*x
zV-QOw(_^EYP02smuEYQTr-|^xlC?5s%!xl-jOYgw22lb!vnBZA;ARlG>4G?cu_Ui+
zuuI}5_HjCg?fFkO6=Q%LOec$_PR99S0pp7=7w-Y_8aK#nM9$y5sEogwf8VI+&rpA8
zCS`9na0Q?QI0WX0wBr!vZsqS8*uq6(_XyO&|8wMJ51zw=R>wbB<XdDKjr?4f#8(td
zRosZ|54nNLf$q_-MUT=N{b8%Jt#-2%J$)KgYu|XKObYjBY|#lKn={mE$Bm!6XUFlg
z)6)abbQ|TY6J{wpkx}V>J+*RnaW-me4wc|@s=8y!3!L4HJeXF5U1jKbrut&!pK6H~
zN6I)y9is$MSFT`9%9m`A#J|3Iv0<y<q56)OS^#q~oHICG@|y!O@NPt_{F*9${PQO_
z(6{|zytu#v1mBMi*&g)2K!6k$3Cg+jZs6_?O5{IA><Gy8RHch-GBuwm<r|t7!atSx
zzk>MszW<>=86qN0Z;*sWjmoPod>Bk;pGb>kwYRE87fOtv@L`@XvL*A`928#$49yv!
zj8iHlV|Z4tv~3@BUJ3U5^6j{SfN~?=APor7fuf>glS-Jelc`a1edWXAVvgtQog!e^
zh_(?#mMZ<74gAA9^H<+sC2}^<E%qP$$d!H3#p!&GU9{@&AW~ixVCA@I?PwIR1&V~%
z!Of8&)DwjgH=H0!p#w^k1TK2cr;3-9yv(LG?;EGfM4U5ROsbR4LbQ}$tGj|1q*`kO
zt2q&C%+`R_dICezCMc}{{?0hpSp(dz2}~t%H04S?L-gqBbS%|KK$#GB0a5WmRCI`{
z52C6=Z22Iz7=&{SVl#oKAh7w&`h)lEADG%A`yMe=@n~{Bg_RAzv$37WgNtPFUVK?z
zO(w~hzR`JwePl-&VO~--$fCfCB7SP5z{KRg1}B1d)8#apUM=}47it^WvA*b0^k6T|
zOu8$Zf}tU=#>@WOG5dS+THtu&X*CN!kl2S;N4mz){kX=>Hbji<!zbaQqo=SCvZ4h$
zF43_O8X&3IUm*E2&W-9^rp=l$U%M_8;MPr!Y?YE<K*>5(5*jZ!#a1xkXR_xOIIvmC
z7@DtUS*=gM&}A!-sKvIw;5H<Is&7i#qP82W<Zi5zr?D-)F}|6Z<DpM{C#&edYUY$I
z(<;a*1DfeG>xI_ltYr2?f=Y?4Rr6jk(70#%q?r;<lOz!sIDHbv+Dk=)WkFQ4Sm)vZ
zh)x0kDp^AH7M-eOcPap4cB*1@s(QOlRkJ%405LmNH9EC*yH0IocPap4c4`Yc<?dFM
zZMqFg1|Q;i|08x8uG)gPLK|TN8`<Abx=!BG7ckhZFJ|MB7%K|X+g%_nKH@bDzm(!F
z!3e<k1@~36=|bFC!DgYG+Can64ncI<z`vc}Dpkf_TWQ}bHTKhE_=)}Sm?^qU9fa!(
z-ehGPFOmlHLEInycC}b4V;)*mp<tX$q^9qDRrW;P>bYwhF@-&uQ-bRQ^`q7^$RKfg
z+9}G}@Zai`GOxO3^XVX2EWnsN8eStfn|&sp&y(fgBA!eqF=(IDd4F64L?xlGI5zI5
z;hQbKF4V;&+(8ON_Ym)+ryt*LUd?97d=vQiY7w&o$#fn)pbIJ1r@`zBwtuFXuoa-$
zkI+<ha8N4gdQ;h>u^4pK5VU{piPR5hDJLcjUu2pj!|ew~zJJy_I$(c4i%(CAMe~RY
zugD$K2!&4q5A1gHpv34$m&A;6sG2K-3!gjf%L4@~udmqJfPrrq>xQnhpy(&@9WUeH
z3%?3+xn_ed6Nj@f2+`V6!Ti8v#@0;nOpCdETiM`0RK*Wl;)j}E!MR#c=lET-50`JD
znEhR~8sz6&@s%%MhGCa0=Kz4$T!4b#8=5ywoGV^4!o3M!pwYenFxU?|yaUo|7E-nz
zs~oY=&sY_=4lP2w4il<X7!ISyXt33E*s2b@l@7aA%0UGj6wL@h!+JON3^EOg4M)kH
zS%1E)7IlKsC2|f(81zce)s>*P2pR-Rfpe>69B@wXx`w+UH!8X?CC1N<p=x1}D5|+p
zR05r-<i=2TJE7L3VIWBFh42wbrx<oot^b_(%lYtE-;~4vP(-Ad5G$?;v0|7I3;ruo
zzv$YCOrsmw=tx`8FFvCO)*@gatUelzuHLeKR<><2Fp&plC*n=8s!26Pn{8xThb{~m
zLyll8G_n<HWGlT9B|dhF!d2#hz1q+j1gJV6&6d-7d_GFX!$k#2CAJIQJ0OUvng`X^
z$`r1K24B5H3RgmnR5B?X5V^p8mT@j*rR9uT5^flU^&;rv>m?m6!}Y(2^FhEj4m(pY
zssyzQDPXl#xwsf}6RBJ{hGQMNI_#QtY{D#daDmCw`@$9>iydRYBtYC|WIHhG)k#QN
zYKoH0J!S&>yvFImt;yW{a`1W8S~3ozJV>~D<1@~obmgZZ(;nv&Xoh<o(REMy!W9gC
zHl1f8a98!$irFc1G+vG-@nHE6Y@`%@Z?X_F_gi*Cc~1<ee27eyUwZ2J${rf}H<=4o
z!|)*TdiY3G`k~9fV>%03-4Nzj^HlA(zzW-eFd3i`fs0jPGDx!)=!OC`3vm2?dxXL$
z>&s*9$=T77Vcjsy8jC@H$<|deWOF-TED>A&Uh>V>=`0)wM*IvJvX$VOjDDw6JP9`3
z#b1Zc!Eew^KzF^=ChO3Go5+21`Q1YjB}K8Z68Bn@$Y9}!v+p>@)33ax1qEHy-L4_?
zpw6!0)1Ba8;hv9xu#4m!V20U<9z4*yuwBEG3TqYI*tM?}`PBlidcn8e1iGHy8xj#8
zFTmtf9MK^c>A<kx)8}OL22dA94<GJwfinUe930E^cDZ+-)uxyYhK<`4)Eti3lEXEF
zdmF5;{$i0_z8xntkq=be4cW^_5#F&suzJeIjr~w7lUFcSr{eybu2Z3I+6w^eYsr#n
zw!#3#2`ubTDW)FjzJ@Cjr91wlbGdKX%I?yhiwl!?6HHau8@(*p0qzG3oo7;?_d;$5
z;E5@}0T)o`!XCdaXW*tF8(3*jp#cfv@7mK&U}?8vuxpIz2?g%K=cDns=&`~s4J+)D
zDH-0IG1iR@loKRe#Qm6N<RNuHICU_1!_gOi$A+fo=i~s!u$AyLET(ZXfgtp??dXq4
zcoV5QtQ4iAYgAaGiIBVb%3k>F-jXtRMG{UE-Sllyh`*$8-N=Q-EqeHHw88$uHrT)H
zqp*p3=a1m1VIk#d;WITx-<Ux$yS^MP`I=v*tofz3<~^I9w$Ux+Mz_R`uIMc*E!F;)
zi@3XXBiyjch5-P{Oql&qJi>dR5gY!Jzf6%7=eFD4D>_~r9@U?9dHLKf>`>Gi?r&&U
zk<(_H*N<B#%B;Rby=oCJFXqYOLfWjC(w2TcPZCtPej-@>okQ~}rlFF6hGM8);_cEf
zm0BhgI?)lgA*l(7onI=>5(Vnc&PKZAPUeVml3XuAU%y(w{4ME*;K9<kB6y88-e7s6
z$~Aj93`s%(m_rqzpL&WOX#-)jFhLF77hBL29Zs~TjB2#sh~<vfJHCRF88Q!7p6Hz6
ztWQv^wbjX?X`!Oz&sw6gj##ESmNl>Q@FDnbQ5BsNhzjb<Bhh;?fq1u}W^ul-NGG_J
zpNJ7SeSY&zFz$TWq!t~A33KHz(ZGe)DY!JAL(p#zO~kYaq2OtxlY;oc8K(lE=SiD`
z%ORafVMF?3Pndt5J0?q#l+sBrK0X9|j_5%8*{cs1B#JoajfM@%rP0!QuHkBmbjzc5
z0FXd$zed~~;ur@w9n<b8q&Gt~W|}H(6VCmxu@%rhL0=`bsLzEOYt+}8P=qAwEM&d(
zH;VMttu+^|r4JE>&Eg|)L9St0tgnmf^tRX(!)-suo2xBF5O|E{v_eNV1JUpCbhe0~
zhWHt^uuZ5h^=rK!Y_s1q=*FSKlMgj?A^-tYC5@aLYoMvY7V1p(?>H#dkE>bw>}pnl
zVRGKT8ZWKdYL?x0mZ5fBf_vIAYcKJDrV%#SDrZJEIm6MC={n?tQPk8e{<JY#XMcr9
z>BRyyu;ITWg?y<$PW%UUX20X`4;k?vYFp*UPwe;)tis>%AHKw&zvDlA-uMsd`;w_{
zlO@|a%&-ui01##w`#$S0^)ap(CBY6d$5gt*PP0j=asC^UU}XeSfUy(}EoPz2Yu-=_
zlVNi0KNg$La8gCxm8uTkd|3;iV3Q*fXp;{_gSJDG_?joY0>elV7T6CELLrVXE7hG*
zbiq{Ocs`xNc`d%Ygk3RCW~1%eb<y{7TfTO(3rZSJf7AwHiWX4tS$I2JEwhSnGqYXe
z9#Lxr<-De>@3u>ummJcL?2p_a>KNf0Wbg(t5Bzg__y#R{gWZ#yAsl2iJC1|?zMqo{
z9>=qzCZlALZf5NRZbqC3F*GyQgXn=3@B!OgvK7W#EQ&c?FFjCaEi?W@L60I(NQ(g>
z8vP7`AXX3VAeWvB@E`{Jf%EhS$<PH_SJ9rVW`=-(wWb&9ksutu!Rh%qo7d@i(OSLK
zCor;>qI)>}LOLj(4jI4g@GE1TbH>yD@X9lbMH&eibP)mF5{n28*>x*s-8GkMuO@Io
zaxtP+UNp!_Orpnx2-T>F3Gg?<e&*1c6p}*D+lpJIVpj1rGZ}J9Xw688=5NNSQZcKn
z<y>g~Kpi)(tcA?*O3++!GyFB5>cZNNpy96~!)W+qXpUGKu>T6nOjHc^Obe<}^mS1S
ztl>u>qif*Pk46jPQN}MDlr#m>1J++P5A@oa>1IP}3M<9jkn^eDJc%zdu#Fu|0|sct
z)5wUWAyAqRZs4IG{r-XfP7gNim>Qxs`g(&;Jp5bX|5o|GTdp!7U$Z*;KxMnQRuIum
zH=@A566BD7a{ev3jf%H_w{G>2x8nZmX;qJpp|!YU`He%&iCn=c@&>sG7o}BP^?TVf
zV*SmmpU`%wh@)Ccq=?G)#<yH0PhgKMm<%Gq!{d)O`>fdy<?kwP!wRt??kqsXDq%6q
zQQu;q>0`Uia8l}KPW8p;-9_eHJL@|;21S1OFWQ=i9(JS)Lwvv@HG5}lcG*7%8ngph
z==8VRjI8$E@7bX*uK8XeADk-vcRb~SQn<HoMADIdm)EGI)u_rETW|Pley0&qvV)C$
za2v5|IE@5H;=bnf1x7GcW>LY7#bGq#R?rm-7ar#Je4Nmtvg2ix6|H>a1&3clCvxbd
zdLxu3b0;imAvCZz^Rb|*s8FeSfe0(9U^`B`)42igK@s^waPY3oafT)9jm>T{0$9t}
z=uJv9Y!_RBc!s(b9<(zlA;6gn=wm`{R;*wyKR_qERrDd}3&xQ;ayK3a>DRo%+i@}(
z@-Av80XbJDG8D;>rGA&AsoV(7wvDEGBQ!M|&DM?3R23S}lgzNaMDUa_t%M;}#7kjY
zS*0Dl_C0CC!sopCYB5o;O^Yk-n6F0{&_i?*yP%CWN9~HQb|r7^s;_o6Z|yB#?Je&)
z8DDQow_Eb*c$X}TqscPD(>1q5+Cf?hcAOSG_nLuNz!M+hIfKVyk&FuK9+PSbovrva
zuP0zjzE~;hmN*bBC3j?$WZ*Fp+#oQDK%&Pxv2FA+6$<=&bhHuKU-T;hBesc#iEW~5
z^q@(5CbGh>B(=?`J94sdwHW3M|2A%`JoGBms2NZIgo<=K)yIfn0#z=j9}*ZHZ6JQd
zM#VKrQ-kyk%!*fHJ%MjxHXaqX1UyB}*d-b>Q!~l+a^4>-E!-r^Gs2wMYqqRrZ9`v~
zyEz-J=#7TLnAnY0ZrNy6Z!{F9#BQ{D%SN~KMnhpp>_)eOjiy8o(H^AG>w?flXvRAO
zE`Ju}!g{mVAz9a@;#QR3#L8z*(B`fYKxDfXedy1%^(|gAYoCnh9<EiepdI!Kg-6QB
zE5dwmpp4?E3_q-@v2a)V914r?4Ik((wH0+2;h%qtCU=bM;h_-J$#f>8Y&oXv-Mk@E
z1aE0h+=|^&9*a0N_2?=JL*OaqDmT*Ukygq@Sxb7E`|m$;$5}zY1?i`t!mY@L?kwht
zvC<+v`ilN&CTIK=!5KbnLnP)C|B}Yxex)5%Fjfkts`%Hyq<uw9T5KWP0`~-i13hME
z7>pbA7*6Z>6O*=;swc)!;Jf;5?cYfptk5J1f#qOyizexWxk8&4{l&#?+M}>1Qkr?4
zTy*~vH#3E{t(o2#elMV}5}Qago;N^|D=v71A|Y?lSA>Ts3WO)pWULRyblM=jJP?>j
zs`$C05(#L^OCduM;13`^E85Rhy#j;-+SUE@a2o2J#`E~q&UU<AE1AHoGWV@plVOR{
z958f#P%rDXSUxG0i~5eWMP47|_Ix>G3fVk<+n>)z$-HPeDERu}4}z32ZXO|Nh4DSa
z^O>TM7T_rFK&$BkH3BT<#aYRRvz!}eDF^lP&c{P77?YyhzgSGB6uwoFZyfuqSSP{o
zOo19ze~oImM#W#lYbaMX2|L=7kT9Oh7-^B%vTPZun(q$BW5_7KQ4iW|HxwS=Lo>=6
z*;|avE!=(83i@Hr2KT>Whb#U)C8UrKe#?Sq6?j%KfWDUYes%5;KWQYzs4*UMjbFNl
zgrO(=(8KgNMuHZm2UjY>^k_zcSURPv0m@ZVLJRreD=wu2B19SxnIcXM?3n&`_*q)%
z!jY*L-=?fwL!G{{q0-e*03hnM(Z9E&rSQe+Pll7}k`rI<@bWZ4g2}35EL%~wVUC<C
zuA^z%37!N<1|OE#C~F$xz;Y~9%?_NDD`=8w29!fd<9ujgp|ky*C*fL!+tWi334YAa
z?Rn6JgApYI6^=;i$(b>UKJ<|!GHTFlIvY%{Cd=`-XwB7jO~!W;%STghAMSBe6Rr~e
zR2z`r^Xjem>R7}eG95C5A%p?|4^2gzn+$J=>_HTEku)*;!xQ<v;q4CEk6A4f-d%+z
z66XvjHj!iP^u#7oMxkJEB2O%k8--?Z`U)RJXd()-axM-`u3h%lL5c)+@wU`Mo!@<g
zQS!<NQKZo2jZxHp7vC;Sk%7n;sE7)TSVd-$Ct48|+*QCLNtK)tiv;|QLl$YXx>C#{
zSuc0cBE_AbD{7ImIOd7;9_KWc=G~hIEebB@)c8K)V^*XhBTSL(?VZIb5<!ac4#GDE
zNoHSwq8mN>Pkl`ne)KPG*`e3j#+&f8MsmD8&ur{VJO#f|*e5#xPkg0ao`vE*E$qNg
zy7f#(eX{JABD&MU3GW1)qeXaHs1Q$-5d%M&k)swWbX1n7h<1l@7eS#`C)m2HP1y7c
zhJ})5P7!%VHATUp7OBypH0X6J_zah`cNrwg5V1z^h=mFeWqtO>hDxRJZd!0C_iA^-
z&Y&0zhDStNUTs7$#8|T+=ZhE>`#^HWjk>!?P)6UqSD0=z8M{tq+$f_0cX$>wWc6+O
z8L_2wNPl=KHV2V6xRicJYDXYql|U`AE;+nWhzNA`AXDzB(?U(LR)o5$4LL2+l-AE%
z1e{X#-nF!-Q~J_hDfE=5BE_GIwb}+d5QVf)G7c5Z^y*QlaxlUQoDhgc<)02UjMDC!
zFD#WtA5SHn#x!~!q>KIh&_qr#%Gme6eq1Uxid}M2_;~piZ!hp?2vD_EjXz@Or~Y*^
zEEb(E+Sp1WmNsubBZieFigHG>!Z1qi4rOVUzQ{|4rkA57SDDRf(ibl)f2(#xrd&-1
zVp(}w4ThQ$QIgD1Q=l7Iais7Dzz#N5uz-xh^gvT~Nif>f($V;XWXTZ)N+?DTw1JoY
zj>z;c{Kts@#C8I9LXHQ^`S>nlK&8fis%~wUx2tyir|l=@zvDlBi9cT;{?m=ZKOMG?
zdd*JU=yi^A#8!gu=i})|JuZ_`2!2fdukm0$a3eCgVVKy!7Vg)JkM6hgN&kcAtG2eB
zLU!N>M+ge;q=##2oiIwgX!g$9N6on1X`Oajy<ek>6Vu59^aQpyxuY>%W;T|+U|SIE
zX1#jr=$E*8((C+M^a7Ql1VUCNi(Mm}64-SN*oar5_XfCCsiqlpGDn>*y>%a4N(XH4
ztUknxPm}B(SWqeLE5Ov2fg9ra6<m$_8#Mqt?HMqsP2tnc)B&0hfyNY2`oP#qGkCx^
zHVZ~?5wCK1ps*CQ^cKMA2v+6h73!2`>RNj{osJRHXaSn{92{o)mrRjnryN7klR|^(
z^Ow>349&ybFWnE=AAp52TBm;(si4c{oB^`G3Cb=fN8@$;2emWoHd*f(sLYCek9SMv
zcGRka+E&D$Hlk+h<VF36+5#KM;nC?!2+MHVp;uO|Uh_CUIit{J1-OXWi0<P(5pQ7g
zKAxe#w0?B@45}6nIxiRyFONox2xtg_hsGl&@fj~-G%8-vF}-8&Ii+sf`+B3-Iz93C
zpPC4EpU>INde5eBm9F!&3{LNRy^aJhy3X*A-d-(}aPI}g$^#%)I`5Cc1{J!s2(KL3
zFgx!~C+`9)$L8{zvpL{+^pqWU*qQwH?7|2hw@#ih<)TYa*v07n>16yVT3pQ_$Xdi3
z3(|gyz*7a>Uo2MQ9~x6~cSCBrBRZKb<$#j(@k|`h8;oW-?@t!z$x6#2IG$doe$qW`
zc7h`kt>qe%0KUPf-p!|1vpX5mv(D*RJ2)O}C)Z#Q@=Gmtj5&x;EorgC{<@eC)OWh)
z87`LECtC1nJ1YErOSw=qx&baj<cVN^1!w!pHoE6Wf56pS(dewK=zP^Gt}y?h9Tp35
z*v7oGgU#Dd5oyd^l`(nXWxaF4nE*QW=#d!U>+<z?*Wi!J`z?NV_=0O0=A;>;y)({9
zn{qrwfZ200jlq@|bM2wHmdYv|>h+tAbh!1w)qKuWNMxBbIOqeu1LYN>|BA#<52H%-
zWAs2Y38_nkt3*YGF9ifl+QD<udwXhEM>Ptk`^9PN0PiY9+5uJpC=CCi$I731DJQf_
zhjvsDCLOQHb*wbvi6*kiCm^#4D4Jc3fLxQiV-mW|cYgJce|-At+!tI9;;<ozTDln%
z9-jU8fBzaHA-5){w-2Qyi_}*;Xj;Yrjca(sW~2SUPTR!<F2an_(v$1ioC%h}bd01q
z+o9iuBhupIXgRouiV_G0IDl*no+0%b?H|Dph$CpV+nv+iDSXE3G42=|s(p6gt&J}$
zvuk%7J#TgRuGi=0^B?^Z97q!p-6BpmWBcY}7u`FdAzj2Scr*`w-la7LCs>sz#CEJn
z-H3f6B4axi!9m!Dzb-B}AV_Dn%wYe*bhCeatjDF9I6DjHQPRQ&S+e4nv*uazmOC`z
z!(!H;vqgIklG?oTVbY6QRFrb-XF4HKFcd=vDw=SSVhriRAkRy1HbPqo5zIjNbT49j
zK)yqdo=)CMr>|8`9jPh}k&AjoZC+ln6}_BJqqn1XA$?YdzX{cHG7|eCKOZ$u4*s%S
zB(NPc_s^aQVMDezjuiz3>&##v4}x=gtTPeP*i!Kx$#TCKvW#(j-{2A{@1AOj9M^oj
zPXBsf{ilVjQ^#%^NIaT-s4+P9cf6P-gWSNNMW+KV2eW9M{gnf+1kJ{JHDsXhqAVM$
z1kJ{}7%Ukq{G9`@2AqwV%`clQFp+2Cm7v*J;dx!Oj*slJ@oK;vbFg{j#;fr-1cRs$
z6G5#NE|W_T7Y{^DjDy01|Am^tHzJ~<#FkkTAStvFGIt)0PTyCdQmO*k>^&!^|4hhM
z`$g?8jL=kiyE8J<0{Z&;yWtzKjE3asMx@3SwgG8l-wRQRhAx;Io(^Co&WQ!V!m@z4
zj_dLCY207vZ;Nk6!n2cZyV+<Rwwebil!T_D*%Thyfd}5aKY5oBHGSmN?M_&e^(V9`
zSuZaB6hka3dO)fjMT8PhfNelLco4C*Fu0r*1w}U^wVcS5k+q(<p{Gz(ZCQBv)3#Od
zmTo;sH{$akqXT(fe>ZzW%pAd}=+}S|NGxU|t2PtC`O7O>01&a{k}V)H_~d88Lb#1@
zjO;g(FAjP03mHWMxyLzTMs8?-%b6ABjHD^#Ih5OPrbM9qH(k`oNx2c~zopIHNt<ts
zl%GX{5U>C9-)F_&GgtgQn+$h3-K`1sP0TX1nPa(R-OYPPCJvOAB&RWhTi0Xq=K{Va
z7x!<5x%F9{f)^G_iIwJ72y;P&M^7(?^UVc@)fmpjbsee>VGy{x+p21pegzvLT!3XD
z)IqWlqQpY0Eu`M&RWQnzAG-rz{)Xhex@rUh=vR&U@5T@^?jHk_#e|%n=uUyrSa9*s
zz#csXG^3eZM*cQ-wF=OoU<J`zy#jRw;oS;GXLseQ_G)fsmjx~;&FZZS8|-N0!diW#
z&1BUbIM)W;YqWEk63!0tLc+yMPSDj1yMHC~L=Gzsj2+2j?3NHvLE9~r)^N{dZ!VQQ
ze7~nhGPFTB8~LsZt)cy?(MmU9Gq(AjF~s*c)oM%aPlmLs$i0;{Mn-eP!fu(jvn@P9
z3VH}=sT+6BUzWN7jRctrJ2ahJL|MVp$yOC_d;wxBiYw}gw^S3&N4L38*Nn{+|C=m)
z&$wmg`?zNz;O_m+Y~FO4-XBMde}Ekr8rJ)bXoj!LXsM*=pW@~rOZ{Xx|Mj#_KFomk
zn;?zaYypD$Zk7rEDw5+H5nWJmQu*c#`Yd{ze)*TB_9!Pd@R;-V%2p*(s)*uU!5W!5
z3=W0kF}0Xb(=N1XweV*IdDf#`OPTE!IhL|iGX9cFne{&RQWn>JSxcGW{C8W*f;7IR
zY?gf!OL;HycPhM+?Ha~H7FWjE3z^X5T*o%>4VN(}d^y&!Is*J97cz7|_ez%c>sd<~
zu>QNPWl0?0Vzvsuk<}dNRoZe6jB;19N8SZ3fpe{B3))@M;>3_~*7_`#h1>gV-H~tp
z9UkoczFgefiuXe}QQgw5E1zFiriW?17hw5a!d7uu&F?&x={g1^kU<jFOH$#RNgm6Z
z7_y&`N)}fBhxpD%7cs6YBw&dUORlp!PDf1NclC!FIh^uD2RT)E$M{+UENaj6NkYzY
zdHoYJdg^*H13xL+@#XtKcQnr1)fGJ{?Vc*RCgdUctM2Ixq1jIY4<qx;zIw%Z3$m)$
zYP0!hIv*`RMNcu>_u=84yj&B31B}OOO<X>r!SEYl(W6_8Pic1?7mwg%j(OKhh@aA;
zCE!htL%DF|gz(znwT1L&5;vNjN0tA5vVnR78&?Hnzu8vFG}Ft$g?pT_MMTCPaX!)&
zEWStFyXV^^Oxu}01wTsPL?Jc2eu<9kMXyHR69~JTerCw!i;PcBOt~i>l29OQ{!46F
zpRac0n-rc*2!wwME(z{?vBFWvcPpB`P`(!d95kyub_s$sPLI5AS2Qz44h+8cEBXN`
z2a1$8EEzRVPEVS<?z0snm7!X!xiLi8IZcu}m6M`%<5voyofQ>gUHiq_+#iw5(<jqt
zz_>hA6RU23f0U~h(c71e<z0U{g^hFDai*`F(Lg0IS+4LM@mmDm0jM9k2MCZtyB5tw
z3iBG(C561W^tO|O6lMdflY)2P2Ba{b%o@a?XTsiT(@$`I&fw>)Kx;X<LvQA0{2>pz
zH{g)f$YLLC{aoTFi+23X!zs5Ti|ob?$YR}}N}B9=Nn%{8y7W+D7=>SOYxDJ?l!H`8
z^Q+TIb}x-iR)d-*w^MTZEj2n7v_Aa-dRp|M^)>H4F`iD}Ltw;|tz<l6=g@WSmz!=r
zX}zO=VLPH2Nr$JMX1(!T>RWpAB1n?5j&e^^twnZb5`@&hV$Th|EjR#nTG><9Q;*jn
zpUjBzHn@``hUDr1PJ8{~Fx9w_gLF2D`@{YWi^UJdBS`imTRo!>OyS(MW(GUN*3uH5
zNQ+4vEb+bc1CC4*6~5aDME@q!VPZt5k{@<?jO-11{0Q00M(4|2%lP2NMVDt1$U;tP
zSeKPNe8@S0n<z%n_v|=IvwXExEk*0m{;!baveknSNw_kxMTGq>6PB{#Y9of>i&3Hj
zFt+>#a^O@Tygee9p36BZ{=o!}thlS&^5vNDE3J*zNw?YQ#qdk<K>#}~6lvq&3kH!0
zsm8gO5{Eq9^HMi-Vz*6ki>N<mKVOZ<!taD@nJ1^+U%P3dP%_1)7^*S!T@RqJLl}06
zT|A-<W}hTURE0a13%e9T_u`{}A)^Jy>4YbWu_e7_4gv@3Fc~Mybo1kvlMojlqjmP5
zE(|U{-h~j60q<NMSj%2S8Ec*`PE;ET|4U1>NR`qTKTb8RxlZ%=^hI;^<%UKl2&~NK
zKtvb!F%O&O;>AZU6ifLG)A0Cx^zLe&VVqv|Xi4#g7&m`=+lsDZ@Sjzo*j@6d8uo4@
zUY~lAdMrNa5<*#dQ%?LQ;Uj<3y21A+UqUIVhootEsho>bUWhU}z}vYe6KElW1a}Ao
zPOce#$akP!f3jNEF426|>h?Ulgx;*m+ZEl*Zg)6M7L)(8r1s6`Nt!zQRP)5w18z<g
z?MYI@O8N(d+AZGq(OpEG%4C@mV=bv>m}9jrWW(EUL_M)?V%6A=o5#CeEtc}<8vCZZ
zF%|+#BIf|rns9CstRoNK$mO&Ib!FUQr9>?l%Of|<7$Tcsj<EJ>{_18VZMU}i_-AnY
z$C&<JOji?1S&))3>Khcz`eBa8Y?6E(@zY(a)9s~-B%wX$-+hrA#THDz=xVQLZvV8b
z`d;k<1pgXFCN&iW&D*5j>C}IXyRH9jI-B;IH*oWfql9FIcsApNeC9AAHxFa+8KX6P
zZbt}7@~;wu;a|!-UBwa3c)|ENy|;4Dr+D`(-WcG|+*d(OqW6`rGw4v5j9`=uq!N%}
zw1^?sGOc`k<V0as$RlboAMl5HIe>>CWIxx74Qvv2SO}w={&3^6KL8su(3N#bz29In
z1-(yWoT3^HPT>P1&Z!ml^mC3BTTdOO2}aKZrBnd^F^A$Cv(%nwKFM_y1?}4ejunt#
zsj@92hDb!Z7Hw80A<Cp?Q;ZTqQXm861>e3afy?85x7r_S-?P8Ab(MX^rK$@gz=w~e
z5T`_uUp{>U9(#8r+QKo{KC|DR-H|);+smbB{gvGI($%@Vo_o7S{)eyRtM@;XygqOl
z%CYRCw5|PR-1WW@N4?!UanZv`H?l1RI+{N79_O^r=bWcUmd|w2n|Od%=j&YJ?@PO+
z*-I?sm1cKzWv8?}BH4H095g`imq-<D=T6U@Ebg$Hfg3`p2=TMN-91QDzp3cJR37yU
zkDnB%PeG6P1>i_t{`u~Wp}&tR?&dbiwWp0H3-EGSg@BRn8Q|_R4u9O1#P?PtQW~_l
z8XfJXK>8;S$8G~@)pBCw(lmfg$k;rX;Ibx&2nquR$J?^ZJh_~HNLFGQw(52w71$cJ
zS`kdy#{uRI{G&?v{1RY_y@=n;NhkL#dg9f&KHn%X^VJeNWd0g7ccDxW{GKw$T;#j3
zTSYUx<Ey&QcE79iQ~GIfH5SiI(!`VfVK-fU<ok=x(2=q7V{v#9y4h2n`y}O3NikWG
zo!rZ{>yp^Z>3&PZ_whGWkYmlb>Ec%ySyx=)-Wa!D`8$>K^2?^RvG&|nc70Vsz8fy2
zwcrYAlg{@<j}$9BaCW!67e=-^<oLLlB<!{uUh&PZi7r06-G-|yxbUARUMAfzsZ=tR
z{S<O=d2g%%@GxXmZZUeo>fABvBp_FxRY)Lw9(dP$w`9<OPqb>8DDUDz<#Sw-+&td-
zw-L}fKj?N&P#I_Db6UO_XZ(FVb7$8xnUM<QwVQ(W#h*n%^BFAg-Cs{UHw}RUt`$9N
zaWTCbGoDF!-pLhS;(xv{t$Bk&EA#soT}|IiL}+zekq319Yx8J2Z;oa`)?7Ju>gQoi
zDZ2k9x$O&HLi?G*GfvP6{*G%G7+WX|S158D8K%q#TkzZYxc_buJwT3!Xl`s)*wE|G
z_)WSQ2uxMrYecU>6~H<IU(CpGUs>H*J^IYQ(oN`>)dzid0ICO{?efz&4*iv`Jypm5
zEiOER$5z?7zpps|@>iVqe0Q9p?-!$YV0z`YPc0^#6@+6C-i2;eOaEIw3n8djHldsD
z?d~P&P3}<dBFssK^!(Cyr)sr^ZcP`T7Pk-L;Su6Dx+n<9QuB(yi173+Ul=F^`na^8
z_af6JTovu|l<7<sz{h0Le>(<wp&%YTHOj=!A+6o0zUUC)@OT=3Lf9Kf8$69B2~$28
z1J0A_ESV^KsS$l6)mW-AgRm4E;!pj`1p>Wn4UI4ospCzUphaE>(w?eYZJGjzPEt3b
z+WEQpOb1Cf7*7|8O*?sAk&kv{V$P%uzi3E#sA0d;G}}mdq4~|<%zcf8=HZgNm^(F3
z(5*GD51dc>AL7O6-K5WEG>I>#!+h5S8Fv7bMx9X#-M-i149ouE2hJQ<2UqZ{^I{_!
zv;QtHcDdIDm&}~3mwkw=p<RSodC=)tCbYnZ<S3EPZhJ!M<@tl~ywxbFIxTB&w9NvZ
z<;7R{1;6_9vXRJ;W*5y2S8&E&aF#r+`N9H#xYP5+JXu_5sL}F)RmTW*f%QOQsgD3-
zR=tGJ+#j2NmhD_b4`{YTZSg@3=K==9{(i~|S09do6aw38=6m?iE3SBLZ{P`72HwL*
znvRkC6sQX#XlZ`M7<C=~wsjT%feyouVexP@$3+?;9{g#K+g1*mFXF?K`ir=6dU6Pd
zlM>@529R&XeU-Gw3~U>LxAS2Zz}cApq6w9=fp3}3I0bm#XWJ<IYZXx5g{z(DH|?c3
zGIroyxL|)HuQ7Ze(YJw-DSb5|FaYvAW>kSwjjXhZBU}zfdW<Bk_<WpP$8SF^lZ6K0
zHQeF0_1C{i{w(>FKEF8<=JyNVPp7CseD+JQM{;IDaZB_$Xm#S_(*s7D*0Ym(@2mr%
z4*BMp0^M$Q4x0_8q_6$w=zN%*M+fbLpW|vZu2kYJ-ExN>xk|>{cp$bSO-P#Phi6c-
zFF*s$pr!42>O>7yg_;DOC$vLnG~xDaS3lWkFU?_osWF@;{du{tPduXGho6_5gt@1W
z5W4%HGzU4z$KP|Mp_&Xw=jl9ua@y;jwR1n4Jp>A1Yy2hOU^-`qxn+>u&-Dt~cB8+F
zi$eC&559=Bul*RNm0cA64HBT{-rGK(8^#yP&7F&NzKS1)A^_d7YU}I41ATpcD^x;#
zag~epf78`<qeSbpxrn`YlR~_Z>N*-SaEX-h)BueYDH^V9CmZ+yy@iHX(90=2iLPuH
z%hyc^|GxV<MTNY<{`!3V)x5B{&g`9eqzHEsN8gD0F$7IPEE2))zsZWyJdM63jRUP2
zHxh(m*58U1D=b#o3A|Cw{*vjtbCKdsl6PxDilUBdv<PQm)@o6J?qDsI;n-1W1s(aU
zaOo}Wh~W&(f#pm6e_ejVAFie3We#AVu>>*b*_uDAPN$NW=C>HEl7cx`4(7a|o%+jf
zWkr0gU1j^WyUMm)9-5`%UeK_?Q`)N?h<~_CdHp%|Br>CqCLkoXSJ{lhl6yO!<lg>Q
zliXk^f#=S@(R1hDqMxG5Hv3mRZYJIp^fBev>)+n!0mD7geA|BN`Wy8^&k7mFeNRg?
zF5AdO^9-V|Ky~jm{(OMXFQopEo*vM>@wzGD#nzKTc)t0rPIw*nw@P@3mUS0|UsZqD
z4(SgnlxUhFAVLyE5|zp88*JuF`4H0dh7gK5lnDRUF8<ao>{o~x%Ecdsc5ze_wf{b^
zepRM{yX?C?u7+NXm^zR7rsM0=;jRy>cD5k%@r#~z$l7GXl~f9+RkxbZ1D~{WCgn>)
z!kRCA^uDK@bc6NvOjMtBA7P?-1%dyqFydE{;%pzig%rn}fqzT!ugEmuJ9M{FTsR`7
zOK~>y|5j35P-P`4ZdSUv6c=t}H<aR{`c0*{S^sY-{_RO|&FAVSVx01}{(H!A={xC_
zZ>sdcgGdTHpwGXWwEKJEehUj%8WH{tR-UCnX%QmyE%0lz0r)Fp_ZUHN&%J?Li|~w2
zSE1P4e^Ww5jy%&08W^%yMRA4<jQK4MSxoZkLr(P?zdmPwlz!;$$f5=*S;b=Us<K@Y
zWPJ2gyb7tjDS@?prxg7WxuxYdstK(V)AREMy6>tQ^l8^Cn=)kc-(Ei}3xD4WY^@`o
zEr5mYs%-=yxJe+3w7?Ug_enR62FM#*XaDY|!^HI179eBs&NdbPvOj+h4+`h#TgN{7
zBCrr_QHE&HpK-8{9zL!&dactF;=*6fnrBVYd9r~II;X2Vknn~YKy(yiV6b$!o<<SQ
zRk30u2`8AT9O7OYJ!p(_ni$ms&qd97Eg^al$(cl(Kp>H+%(j|u0zo}B9s*C^#`9~c
zv&a@Unu~rG-LnbrdDTEYqzKPBsGpiQ&+^G|-YrO<j9bgt+m2d3mXUAM=j`9%d^%gf
zHlv0cw01!B6z*@5Vb@S`R=y@TOgzvlTdQDv6isVS9#|W!DUdRJ&E^z&><1z7a)`X+
zdWUJQDDrYZ!&d*7m3f?*rsqrYzAo?P7KB747&q=hSt5|ODxU;Ytpf}be-P_s#hG>G
zciMFQRc9+p_2Q^Mr{;Cj=ipZ8u-tFPSJ2;X`@CGfI{fBigoWSfV=yk+MS6k!HRFYa
zbapdn*iHI*%aCE;Ry?p9r#$-W1?$q~)g4Fc%KhLz^uqMc;&d4cbUuB@)_wXhHS6e!
z!Cr>R7zGuCg#oC+<&dI{?y7GWeT+qNwIf5{X<-l{oQ%HJf1#j5Fd!9j2!C?|4vDY3
zSz$h+j36I&!y;PIgo;I|vv@+-WZ0a$cq$!y?QJ|=c2diODxRjDS*rfq_sbZjna^QH
z$N5wHl69~OU!y(n+@jge-0m<jED3*ztaPCfQ$~{cdBP?HdUwx>)Fp=QIRgl8*0&Ol
z42REH>T<>;uZWu=fJADK5TDH6_Q&JtfPzNJ)nKTiyJk#DPe)XcggffWpE+h*t#6uj
zkOx+-4_)wxG#_GVZA4c0^cV-J$v$+M?M?IHGa<`9iI62=tR0<?1}NNQW)GWWLP9y6
z3v`Y317g8Zgf-nF%9%|>a`Fa_q#Y-<zOyI|$*ZkF`^@^lIkRd>=5nsrg0K#SI0!5a
zv6f@Z=-VIOW3$fM9)beKXgWjX%CnyD6HDKZl;s1)kw1Q+?p3ebwkUJl^Ote&Rp(XQ
zX`MWaPtJ~&Q@x;sR>xeX={XyXQ<>9CXOj8uU@s|=Jt}oc!N?{lI|7`3$0H^ZKaQ3c
zl#6LJdFNrhjBcu-Yx|jStl_`HHu{fClrqKpqLcTfOMF04;t@@mE+tptJvE|`fWRiH
zEhh_*koU*U<B-WmdGkL2Ou9($idEkCs|j$bp*Y2($@vr&y#is<4N11*q=7hMRurmj
zLTwPd$O<$Z6viS2mEs&Es9yr3@-t78qJ#<G!{-$xS@Kwf5Djc$eBc|AFV~cTwSURB
zEyAfH>M+b4QHJ^<z{Diz4X(MQ3U3H(Jf0=RL%8B<!5yUtR;AKSwoXsBt+IZU3T!l1
zzwUGERVbz?sO%0ajh3O)#Ak`%>rvogg#{J(!9{;gDmyAilm2D$8aVTLz56^qsUJ7H
zt^aPmQEk8=5E2a}Y#W|0wCeZ~RxZ$a6Oi#Ws;~poY$g+S6i<Y$7!pw8W=RPsVJ|_b
z_sOStoJ?RTWXKN$$0p9NCIc=@N!yzb%tD-!n4KDd<XE(r4&En1wPf|=??u4-CvZ=%
z-a7gvZk@Dx#Rrn6oRDL-CjetCCTv0`%k!c&-pC;*v`TU~{%&!9gU@U7VKiV|3w4lV
zwRRBMMSn6JC+66aJ}9y_{l&(1&yT`gjW96294XK-VJ6;3NxOvTtogP&PuDN|Gtd6U
zx3)sY))uXoqICh8!Y)`|4vwAmAuw>-V71X>7h(KtkDJavp;M9~1w$kVAtnOle}LHL
z@saPTPC8Xp7aTW0z$e2fn4(lj9FT{O$YF~lQ|NqgF%z0v?HA)<@0&Qe)A}HG#cAJR
zJGequJ5pel^3SFW+FR5IvKNtvIBa#gAsNA~lLXjl9-qEw`XM<-YWVpVE;c3esh+BQ
zn1A|Gch46KJ#rrVi>YqfoT&0>opD;GH6JQO>!XRgg>k2z$)l%2dOt-0c+fg7GR*jy
zr{e)2R2X6~B3WRhN$BcOn!v#-Jj-Y?+~pE(OMHuf=HW-&1IEdRWXwO}&-fLToCW`i
zKU>?9HcTv0qoFeSyQl~EW~4U@T5Jd;8#=a<LT6Rs(c&d`L%&{^pP9%sdmy&5^myn(
zh0*vWzx&Q=4m^e(Rp&qTvjzAWeR+_b*<ZVqe`X^1_#W@{Z3Bt&OHH{`QTV_6@Q?ov
zQt8Ni6sx8S`z-!Noo@Fpdmjr^oOSPwY@lCzb1s1I;3mxe)tA(rFoo{7F`3e6=p3~O
zk^%a|eRvjZnPZ)mT&K)|Fxi{xs&sqb6qr~7y1$%GnF=-e#K(xr72~UjD-+=adY*bf
z2VO7t@G&;Ugkn=n*x}S91^XuARG4&nJ$Z90m#&q0fzR)dP0nTinl1jzw@f-t&kf%2
z5h|nN3Qk@+v*4!5<wCRfoDMXHXD1{b4w{Gcv!mW;V3)~s@@SZx_aQSiunDT*&>0k9
zn8w>Md#0q#Q;2|XvJho1A5;@Q!-)H=ACrG1^C^xunGE`~#nrfvf{)Y`+teEtml73>
zI1j4KQ48hg!Gp;7owEk$Z)yW4ZSOPCNOA(`RF_vwqQ6a|<eCADKSi~2gzo35;xn^$
z8YO}%wWhlOc~<c?TVdql+y&>n5nIgvpc#ebd^HcJ=+vZTpz2*~7n&>Izn`-gko)uc
zdTA{9SzW!dJh)P{_q>i=;_#>}-=QOj<&C5*xW9dpR`*GQi=}*iNdU=xlbSe*U(G8?
zTUdAPl%#Q!*ELB2gnW}!=WgPfgdQ)Pr?dsI=wF6%T<Is5!POjo$Z5SnXGz$kU>Wl6
z6gdj6@9phH0*Ze@M{5bhttL7YeKNtg8AS)9PbQeOtLT7A-_SudLyHcqJ|p-&Fq~Y@
zih2thChiTlsZyXax4c-bk(A@+7pn}6FrLl<>tvefGSfw7%iqXI6$y5Y(FP>F`C?fk
zuXZ97(4z@Bhb`ZZUkmenr`fBs4ZqiEHM?(i?=J0~AQZHU=0N!azmkIA$quJ$sQ@9(
zdjBP_8l&}6@oF?#&X%nAC0~W;N6O@&q2{K+5PAuEaqfc>R>nV-1xin184wL-LO)fy
z2{`55$OF4I5yW5c&~UVXYaW?WMJ786px>YiG|B&ALFvC~)dV?r=`nO8pXY(Q%eY94
zjV0T5jJE<Q^xL!yKB-+`#|n0sU>ra<Xl5GBc|u=vcMWY7m_v{)TrydVxbuy@idf0R
zhZ`!&CXDJgkd6+Eso?;Le-oNl!Ca}MH$L_xQ^;>`Nb}&X1OPQpcty}N;{KTY#SD}s
z!O}&sD8)J;?<;n)iBAuiB2qty>qkc=Ef8pkL48bRoa>d@G>g|lCtlT26dQD%RN~|^
z%OvCDg%PaxcWm+Cu#FN}QslkCQ3#zic&rRBz|flZc{0sD=~Te$I#9YuqDLU(S6n(d
zbE|4Y`XaeiZ?uD~BnLE>^yQ=4h{6+wH@d(R9zvauJ|yU3qg-1S%V}5*_?q-G5t#iJ
zC_+%+&_mb79HsoAd@T-7JI#9Ixy<TqQdHr>`%-x^s-;#}NH^)Bt4en)-|RY&!34^}
z+s&yV{d~@+vXhcj_%*OYI+Lv&;e?Bp4FPUn27AeFI2XT?rxFOu16swG<*Cn?-x#v$
z@@qOdwhiUdr?yOUuWa^yIphu_v{z~lzB*?qdQteLh@wo7gsvpZC6_oA>GOhZp_}p}
zsxu~crPWhj!kibJX%sGiOr^KUP*V2$^i0=(byoPYsjiXb#1uMN*Xg*U<yQBlT=(b8
zUh`$k7W2zbozk@9-#N6M=o{WMR@b#tcUf2-;f)rYtCd((o>D7NDQ!#UDrl;R)nGiU
zPAfItS)F3f>{6-6a|6vh!tnG>zx_?I37fs17`lb=n|t!NCnK^yxK%WPVF(ECJ3sl8
z380x^)Q`5QWv%f2dUVJ*D#9@cO?>@(l~&jooyQ32r3vL0R<7(e&+3MAwBU+I$Ktw~
zZ0UC6hCTfr<W{xL063ne+z>s(v5B8DA{*2il(We44$AF9zHxOwP56j@Os?_xt`AHx
zO3IKFkTn=h8sr^@-eA4~94g0k-v}fl=X!-3de5G(^IBW^`VP08;jXW0ruSYd*?qUV
z?7LXXa-K8gTC~0<1v2;4P7%j>q;XzhoL?5_kF)Uk6~VufxW9$_b8x4yxd;E2?#mhm
zgCubRkI$V?W(`MpVkO^Kr=BjWu$K9IfNlh8{2HPHfttUQGJQUpFJugB`SzQW#d$0z
z2zTEnjiEq1k)ziE@FC?@lJ_Bi{TAHs%>Okv^A+}T`!TZJj&0Gh5LVP{LzQ*i<xI~+
z89?_#<`D;#eBsESI6CD#ozu$m>3i}>a|#vl*FS}GpM3Oax9&0dSQPV&Gd%wVWOH_)
zxk4KHmbU=mAGt|FxaWzdF}<_)k(t|Gx0Q=$KDPZ%T%@mX4HH5|#Mj?vbE@hK<u9Tp
zT1&=h%RQbJJb1;y)LuMZA#JW7asl(T9b88~tkzQUf_%x<<m-TMK{>QcSyNs<v6fX}
z1I)Fu*pJN)xXNTLE=NPT`Qma=<yu_d*ipq=L>hj3$I<bG7LBEyc(xk_k+w!GHOE>Z
zR^gXi8)nnKRiTk7Wj$CEY^?-Gsmrwny1$-ealfu?-<a<;B)K^Ln>pH=F+>1rlk(hX
znK1Q-s_N~ho}5zNJ(+-d<D2+_96dY?F5dfN(|93Wu>0kWA5Pg5jE9wObnkn6yj_LN
z0TRRUL^eQJf)Y35Iq=ci5uTnBy>Cs5x}v}EpOsG2UWI1dl`H~?@uziIpk1TCHDMqb
z4<{ceAl+}A;iX6Wq1E!}(IX0D#WQg@W3`6LI}>_=EMk<vWvO-?lb;PjeZL;n$74NY
z0{Fj0(@8?z0V9qaIQOq}*qVCj=k09P_L0pEbmI-CGoAb&<*UPJ1jHMT&d-y1GFjpP
zvWR%sODfNbT6rM$f>49<(a9NT!Adu)i#9!hhF_}k_<H;nTggu07Ey3^_=0Iv2!zQo
zw|L)myN*R|@0tjfg(<5oYk&7`@Lm$Sf?YK4+2XaukUc)J2#o#Rw!{>2?e7qsX4g=4
ze1}Kv6}+{@yJ?EW{A$xnSAs&cl$_P!i-!=Fg)~UarJ-b>*&jpK(!!r<d>=mEYJ<(C
zihMp(b%w1vsBot^E%~lb?d510?tF$g2@zDdO(d4GZ54i2WgCwWL9aWrC%%PQ(22Gw
zeEF=!=h!xLEk1Z)nWbH8w&X1B`p-2O_K#~R&q5wCg5kIVZ$h5ePY#Zn8`9$$;O-wf
zF=H5R^|py3RDiK{SDJ3gqHW!!Kw0=gdzdipnSM&W&-NP7SBxg@BAH?MG3+;WQ44vh
zO`kcHqLMMi0aLl}is2i&3i-FVZ202wa9G`sqvha2dc$BI9}sneK2w;zR$R7Lyax7E
zt9i7}cT~laoYlhiY;vs0VdBnh&A$0HL)o_n`6MEj5Xt}@nSxP<H<aP}oC^eDhj_~b
z8Kg<`O@%c3!E6L9wpr%aoO9x}cILE|u3{6twLx3y?kmib@}ta{C$Dun&s24PJ%LGb
zEQINnKQTG!b7R@uGOcv^Y;r4GMQ=p;g>-BQ(%iCl=Aky66@c9KX)SmLV}wqpv3cgn
zIwWPDh*mt1hV<W5*`wJ#LZmg1no{c}r}yS@yZ37-dRVHNMjni^<_XkFm?uY%59{YC
zOCtMJ!4I%$s`Sm+SbNG6qP2CBu*t)tf&8c)`1B+Lgd0Uv@w;Kk3c0bNt9<_FGNK*%
znn)hTZX6GStKeA9V@N3lT(Ti}LHHjv*(!O?aA4Tk^YPW<VigTM<FP+J4;|f&_9Z#*
zhW_KZeAGjaT(pRQRAL7MRIS=CS4t(}{p~15{HYWQCca60*7ptEGI=imO`q{`0v>Z|
zfvSkS9w%rD%d60BM8&I}(ufvm=v{*dH9yi`G1wl|iZmrNW`r);2?(Dea)Y`PkT`Q4
zrX7PxusMqmR|y6_4~0b_2z{?0OU4GTn;vPz|DcYOBCwkM{(#fjLM?9i1)oT7%Af^%
zu0J^&^d>*%Yo+|DD=QNcXwNyhLn#e!&ypw`Yw*_jKS$?G97nKGLFoL`c2{gzjb7(S
zg;o~aG#?jZbosiPTrC*o)5KCC^T-|mo6aKn69=JZ;r;nLU+|Ju&UDY3C(TZ)@uRs$
zS&R1^OVG1s&oHW|M(i)F^FR^Q)AbVTE66o{rgy*2Cg|tR|HPyZ#!kb!h)-v8&u|n)
zVIE&!XMf{#trM5qwBY?IQ*S22(Yg5NKmPvnFZ5^gdp7ak&(6PBl8eca&B0x^QD%R(
zw`=fk<;ml+{xAG_T;4AKN2RvCy<Oefeo|xKE04Eo)&GdfciD;1pDU);&7<f)eoH3r
zN0UVmF01@6`Uf6$v=3fAIvNd<$s&2w8YYwF=zNsSe~k9K2amQMHO74=Yu2Lms4<;=
znvdRHETdwhv=LRy)#^rcG@87}XY##da50&Vr|&*7z)e=L9B!_`?Ka!TCWG<SFp1zQ
zWb@*_`5|Ui7Vg)JkM1|cpH9*2;*<LkvKsZ5@du`(L69?6Zpu_Z#UC7g_Iw0}N;VL!
z@_KsQBs$lL=<|b!AnyyU&YN684hD9J^ljfNWnIBgFzft>&-}g>-<Qs>;~DT&Je`P7
zOZ^j5=&3W0p3=wV=u#+G;My>V)n^>>1>K~GN*9br<Bxsnt*W#f#Hs|-x$?@~qAKeH
zHNDkkL0atv+a}}d>BOikKKi=bzgkWw2`D7@c!kjd>hSL(z{im4X%#Q~1Df@7$T|_9
ze*~jPMTFMi4YNb*)k1dlfku1wn?`mSi9~~dfR;K4n6hN!l%GecWsUfd9%ZQHNV#iv
zUd8SDS+}Xz&fjj&tep-FY4Eg_R1U18#W%i=%$$gf(r>TMV=(veFPHsmjoHNo9hW^w
z73g?1rZ4c}^&2|tV6Hb#s15b2m?G^{&;4{rx1boP3FH|_F${<s_d4}Plj6Q;y;W-j
znC1Oa7MN?xD;hi3(X0<iNg`J194uahuzda9HB-X1R<Ko*B4FFo`|)Uv8<+4d1C0kq
z&685}^eKhmVMG177w?}P#*nKd?mllF_W0?!=#x87tr_6a{k^b1MXu!WM6BV%Zc$|G
z2oQV^cd&$H0frI?u7^47qo4QPlQu{2T(yj0UVoA#v%2g(y703V6KC4ln?%h#`TdF=
zY?C1}n2hM-Fq*MC--!3zj$jw{J$r#!$0OH8-Lh8Q99f!N_tjQ2V^o%M-RZ3;G2tzl
zF00+McLUpqW2-A6^NnZ1ef70<!W~EDOrMH_DI0)mHu>86>fe9v_K$J?n_u7U!H@q*
zJ^xj<%GIr!b^fcB%YUE$zQ~_1dj9)~Uu90`?6CIK{r(Ro`8)+|mv+C-=E-@o99($d
z(e=&vjxumK8@|`ig8#{c=P)@RO_E`B&_4J%u9o9UC4LN9EsXD#AK?48)eO`6Ky4c`
zwJn)rYp8uHAQ!;s4i#fZ$gq}mPG+m~7H4M+BIACZp0ll?KXeO*gSP=BaIC%*B-Bek
zoV(vJjIaB7H2>WLt0<yP4Q7soX*jCIhs}ELtkdkm!Ne)=wvG?%9wh_4x7?0tQB{Lj
z`%l7(NRK+F@$-WYYJ8yaSE@TEK)c@fx!L1^+CgswsPc8W9E57MU(`5IB^4;{HQESQ
z$$&dMAh>D<T({99z*+`i^Hq;JxRnW~I++7jb~Fd9>TD|THYa5*6-f0m15WlZ15WiW
z15lE(kqM_dnFCgKGzYBeY$|X~(3AD{aW<gpZXRIS-#oyo!x_*pI@ReX`GBe8tlxRz
zvG1(wdEu%1toIoeOa?i~2P}J^7hd!}FTCu1Mg=)s9041^VSacyh*j!{fvi$T4(2c{
zwohI>>!5Itq9|2zBcPDq2pg883YNwS;3;xk1IJ+kc#1gJz-fe0s!-R!ak8-QVX<n0
zWB;-Hxof}>a0Iz);3%S0VJ~ZJ3+jhOuY%R6p9@@3KNq;7elBoD{jel9!4>s$*O1iD
zT|-hoM-8z{M5()k1x}+#-74%FIEvK$!ma_tjZy`vrv~&aN|mO*O007ciqv4O7q5=$
zCsL^FgoO$M>Oy5Fyp9#DV(TArflIco=K_~(-N*$l*?K3ud`)o4)<5K~A=tW}yM|!v
zMvfXnsO*H-v4Yj8p9@@3KNq;7elBoD{qXWN!4>s$*O1iDT|-hoM-5G=?1UG*1ulfj
z5BY0wp;FIZgA0|Nuzc{;;6mkx)hdxtsWYJh2ANmQXmB;hhsi2a*JZL;^xq{BWr<n;
zo_nxe@w=-TIt*e;^4E5Y$MsilB61uW^amH<062O#K_BUVt`!_`Jj=e}_hxPUbX29U
z8y;{Ni61(GI}h#i7Q%*b0OeM}6aArsV)Y<uM&H|@X4QkJf!A+?8|nv<Gu~JQH^8Ui
zT9w35XB9GYR=#Z$Y2+5-<+g34kbQdv+}j}>E0TIUKbf~zB(Z}!Oy2EPN$cQFBkPLX
zyq&_$+c)6m?bWz>`v%<XZ6b}_H{fP(8>vEMMV2-tRPXHMq<O@?Td*zmxs$O7YByO4
zH)!6d!AsHw)eV+WO21a>R+@0T$r8LXdSz`yEAx@VsJG)K-G=x1yRkY?={Cc!)xWHr
z^F|DImmSO$RI92yt9L9_NxI?Qa>H)iP;lKq&AQ9}WlFG{=xBG@!CaE;Cc5<<Gn;J|
z|1#aVg=D)6{r)pDpJV=igK_A-R#%A)!5K1mRF9VvXDGdsRS)cFrd)MW;<J;VPflN+
z_(sgo`G=fh22i%rFWD0x<-=wM^XB2(=x0bmC_DW((NC{lX4%KTiH@dE{Efu^=OJk3
z#NSLI`f4lc1{>Jt>8HP1aEmi948MsXY~OSUH#>2E&9(T=^p$Vqx1zfmPeAR6GR9+6
z(?{fX+E1lb7VHf)xi7n51G=7hW&ceKA-G_(hj5eA_?I0+=s4xMw*Mvul1J)%D}7Cs
zI@!!~)7y|QKbE{qq$cODAVR+x+6QH54!VC+OjNri6Mai6Me10xRf=y(PNfb7<&<YE
z=;?4L-KR$UiyK=>S>8S>D<6!n-n~_&kLl?2O?N+Z!d2cv-*Wf877xpL=UeW7I``b2
zUEy<)Ka+dz*go==7V%weR-dc;D_s|uev;qvxK_L1^|&8?%Y)0Y^uP5XtbSD$aK^l&
zlY|(it5fko-q}gjblv`z=O`c>0>@U<Df(L;*J{ocmiP8waA2WhmB)qpTOM5=srQW!
zVfBk|kLUegbbxue@NO+cUq!Be>$4OP>^WxWufT(@qsOWv{Kj>slmTYzPJaO@nKC|<
zlHhWKp?-OQ2Z$iTqddeHNq?BkA)4`+Qb!j!5XH7wMC<eG-`yB*fFfFlzh%(laeoOB
z&aWn;=|r>ykx(GK^8f<R4>&0J{XlRt3ha+wKuZWEMe*OSwrf~=KJLGR$h+|ShjD*G
zAJ{Cg{yciBzCd6o6%6e@ybg*-!@cjWAm-Wc_%F1*Ii7w@=6mJq@9yJdcUrHuYXGc3
zQ@{6F^Rt=f^L^;)ebGrMzTObM+@nZQ0D)cMEpAZ9h-~cFHJ)VvoSLTbV0IO6*NWJa
zIV1Td;O-BH^HLVz#c-|xD!K`pVK6r~57ZGsAKocdhy+^>AS%4sSY?|U`}lSfX|V~A
zh*i#l$lq{{Qz>F7vicID$a}M)O%;UEQYii~aZogMABxm!V)$mw;#Fn4B#LC<Mup@-
zRD`8*4Dsp?A$VP@9XEdNo*l>g^#i_cR3Te6vXIA}Lz1#SKo=LAgT>V)4B+8I2oxrx
zVc(Yx{%Elio04d7kqq9mij;hhXT6g(N<4ZT&DC@)>8`H?KFb*7k7p5E6U%kzkOlyX
zl3i%!afaokluS{OYdm_Nj6WGvGwIUlb}=&urwR4uqQAJ<gftsATWwWIoCw~+<+ph;
ziiaPk^C48hF-J1MWw>ET8{@QjJ{&Q0eGC@wFmzhWID4ZR{#%MD(<;nWw|?9{YT^jv
z<9he!T~B+X;Z0i`&Dd(>v#fGwj3#4v5vj}Od9s*Z%?B!=6EJr)Nv>fzU-M*%1WIup
zK}3T*pN}T*s5na$uj5hzcoewbuSd|<G8tT8I}yqD?<o@qI%tZnh<09!{=C@)V97&8
z@tw_K`Fg7q?d|D#6bxJsWG@u9(ykZMZB|hdsDse&mkUO`!G|IRwHG_sM&#^T>l9pb
z0n~!(Sarg<0jR;_`^B+|RG|}<RkdArMn$g}HbTO<<!X`4iz{rddN&K6p1KtvzB8nH
z7}AG52ye^QyZVeo#BWGR>fO6Hu*)*+BQEr@35W;^VilKS>!c{Y^KDMy)X3mw9ubAR
z(Zh$M5=v66@OlLLl-XbQ4Nx$4Jey&130ceon=T^gSAQ}jHtm1tkH#3haW-9yAkQf=
zt-pI>JjGD?BYi>(4w20IM#;m-paJI~IdtF-@%H)X^z`Sr{=CVyogIwoRn)%R+lzKe
zYyU|ZaqhQ|58&nVk9r*pQ2I8BcK&Cg!lVT_?;69%vI&nDgZ{+Fc*sSW!h&zTJZz^S
zcIcN~4=WC?=IkfVSBwbpa2PLSd|s|V^yl-}YH)9Oq1dwQB4E&s=r;oe|2~{f5<bUe
z{t2**lL=?2)3?k1Xc8?i64V8ehuEGBBY}J@jH0vo6w*U6UgJzeqZdm^$tv1G2^xqC
z(r8x%@s;qLMkzIbXfOm+*s!rjPo)aR351*=Wb3^mukZlr40{g~Dr_TSJ1y*AomU6-
zgBNkPSMS9K2b45Ok520@PCPCgAH+}_BK)$gcK@u??O~Fw=;5Bmc^jgWp3g9D<0}b#
zZ0$E{?Kjq%Drdio)I$Z06@GgYR3doe3}n+wBc2CHY#x`hcz*eoqAOEB6bn*^wBPlf
z@%%<btyc>24Wqn=(3oOHLCKNHF4=_8mV-tJ$@aO)K$^yQ$~6sGvWgCJNwAsG*I!0|
z9zOhy_lH(6aVH4p-)O5x3<bWrluWc$BHx6u3h$N|C5kOAmC?a`0H}zJ)F^4FgkP>>
ztrUfmTbyyYg`GAc6q1`+|Ac0&(SIW{xUv+SNu8yEa`H%M?YL|)MKrB0En#VE+<4De
zNPtk!5NEGoG0WQank{@-XGYY4NaR_zqZ>=Su4W@*!_!a}@!LLA(PB#N9ae4%JkvqK
z#)vbNZ|{*}*SGr!t<bms5P5MYz^=i#+3zu9mhD<JyzJwVng@;_j{5I5D9fCgt4UTB
zr)q7m@p3+T_b!<;y`kuoQ4IB{0<0st>D?P-3vY6mF(felMkWeRkFeD44g1J+hD@V{
zYD0l~n%Hp3Ap^?;HMs=4OfF|^+97y7o44_MBGl#E7`|>qTvIQZrx&+TjvANB)r6l@
zv6*{tydZ&<N;?q}AMx?zX@VSS^e`fU%M|fW%y-GA9(_TdVQ2k%hf0fCo_86vJpYV@
z;3)7&=W00>3h7p$f><0(Z9N}MmKbUs?RUhcyf!Yr>G`>^cK+PMb2vz&&gtnfViL=T
z&$7})wrfS%3L+X!KfqGP>|xj#rCC+fVFNOZTwH_mKnQyogV2^Q=Bo^?%#=m@FhF{d
z@*_~~DMyKF^)Q*S?QAW)N_-s<lEIa^R$5I<oqu~ZUo69i+?yUDU4adIy8XE%ezEjz
zl_JG|B19h6lymQi824r0q1sr}kODG^`s_cpnsIPUpH~M&Tzn`I3-_nz5q9?VcUNyB
zPF%KNp|K6wbtV+_<?nW0D^$`^)GqnOfEp>PMZRMg9J_Yc{O8N^ZrbTp5xwBs)Q#h*
zv*BwU*QU$@dm3CXG?Q{SWgSc36s2Yp(iCvgF?}-0aMC&9U`J;>_>HG+IPDbgv$Gn`
zxh}sWCFc3e7w(Ih!r6t*VnNS@N0VjAkcfOiAjxcrFYNI=$<2t7=(S$=%_b$O5xORg
zoOkNs-db=E<EVo(jFNBV$$eyUrMAZ~O3IDEsN+!{H>3jV>X5%lZPBaTzM*khhj43s
zsMFa;I+>BtwcTjNo#wOnWg~vsX|WBb(>$ubY97!?=R;T??@-up=&UEq<v~sk%V~$Q
z<$r4_=4LiAbbq=1XsmqKn~-luk!qe=9$r-s$PrBvkOmA8W|Q*#*v#n2YV*c}n)qoi
zXc8%Txxn6((~ZRGI|g)<r-(K=56Hu&W+OslRzV)RUBhg!^Gb_N@AU&=ofZ9JN8yQ0
zPm)B+z92K^IF?q;v?^d!ex8YaxLy4SI-XDCN%9fdY>{~cb@@`%)fjXAVmclYHSxfz
z@x3pZuf~b2AzZ58E%+|Q4nG?*Y$crV0kvGP){@lPiT*31Q{DaO$LKx?s&T?LGubn7
z61u1N4LL;)NF2XV+wXr=d#*fu$;r^_kTHO7#oEX^uRNPCmr~juZ0x-`4pz6>X6feD
zJ(>CT95uvJj^mAfBPmJl(~R&?Di<hea>HdFo#<YwQgq&D&6h+jXMMj+-!SzHUSr?n
z1)_(X@@j040;d!tAQ8jE5pbOMaYP17d<#60lFci}wd@t%O%(%oAvrH#!3?P!VjRgN
z1(RP0X*0p34Rt6I9b^)l*WUYX#;KkyT||fE`M{Mf%65is2hmDrf4Kx{FG5;@FhW@+
z3ZY1Jgw3(fE?^a#Mgg^Y|LHbDhEDJGyLM$JE|@XjVwzP_EGzwTUTQP&VTN{>g67@i
zGwY|^`@mGJ$wL(Eb%W{EWQkR{d=Ty6gcO7Wjd@LR(tOoxqeBB^Zxmw0C=}TnHR37E
za&$;Mo%iq(mUL)5Ziza0eM;*}5hA>}U}WbbP)Qa{WQpbFY}OJ)P(E<yD7SOcEBRFc
zP7TU*iO1aVw1N`cOmkejR*`-+A#l4eleh2#sG}8=tbSAvbKpYh_Gbziap9ehlJRg6
z^D~Z1MVp9WZ7JJ}FN^+;U!QijE4<6#=g_i9x#G%m)1!u}ciQhhCt=l`h+UCiPubRI
zF;?v|>L;@XU!o3`#xr#ZH=eP<%f@%3@r-OjU=xuX7$KA>LamuFs7`YpdGrX_2Owpq
ztYI)s-0iujge^-!iTg042JzKAf!v($<hV;FUwg>m5)f^*fRr5Aq<c^CYA0GRX~V&t
z9i;lhNc$>Ow={PfkR_?d)9~Rm($A#`>puW^EvA}%H2<BJ&=qrCa!JKf*=2m-43k}h
zK53O;pfF*Y3C}VQ1-&Yy(#Y$H-+3{c(hsR<;rPTB9q=zd>&ixyuMjO9r2dHQh0_^u
zW7zk^ON5kC+|MM4#_)bf7c3}Ly`}n25>)gw^eKXu$-3F6-8FM(6TfTQbbkF4D=Rt8
z<^|C}(T|$81=m{YMuzjw8yZniwNa{CP`xQ=jxCPjA{|*YI{~Vp{pIv>#Ew-LpaRC{
z$z+fe<G9{CJ#IDPdgGU~R;NkWCDB;_v@x#*ZVUDEidZ4ax_n=E|CZ#>b=ut!*yOgu
ztN0wTvBQqn1PzEt9kPAkzV|>K9v*1M3{8xQqd)9^>S)I;?Z}PNhCwGQadyi(M$jo$
zm~`m$ExJIbGvy^EuQYXTqUFwt@1^lX^R!LWkj|!iQl33Tjf<QrWU1E}tk*_cPQgCu
z$qh<6-^z<v(kaM=7VC;tE`2=c+~LK`Ra|~xlt(XhaI>ciI9a*8tiUw8Yu>F{S2x8P
zB+`p}80`$BeE*HJ(F57)hC2Vy0p1a?542tBwZMxLUNA-Mg8-D{5v)~H-Z{Z0br#dq
z|43F4lcnLX&W(H;y=RpNnyM*vV0H4q%G%Dzdn4wCJ}T*LP6#A+)d#ElW<e*p&ZdE=
zu~-u!M`Y|*WF3)GUlB8Kq|nBS;znZwHFjOMmL9)ir+@hHjnxTxiSOLaNLsl@37||q
z`&_oHXI)$}Nhf7}6h-$ni>-Uz!T_?K{xA?J^pzYo?qP3t9VS=`D{G2bS*NJ>UDMU_
z-t=Uu(>J*7;cdIxddlSqzbZERNqf#W*HJRxVo$LfJZLqey%3}4SCauatD^&s)+=uF
zgag6{)+h3pyEf(n{{EN@xmgef012J>SL`%BTd-RJj6qx$7IIe~oez`qsM&kYR?Z8!
zNIGbC8}Y&ENpnqe0EERqAI+9iwu3QVa$a5lP1%W!bs@jI-)@dG7WxTHhKV>*Fj)!h
zdv0a$)fKpU%e&MK-XoM;@WF74QyLrZw|(&El|2{Cj<6w6kRJdICo%-oJn;$yvf<$J
zUs=M4{YNXN_Yf-HT>|yrf>YTer)-xyvUz;iunKISiqK3$OHuCWSye7Kb=MJ!Hv)_(
zpE!7Vj#|v(y3u6Guk7H)2UuFLbNxlVb(DwS6yvgftgEL7>^Duc^RvZG*+pm4=pAQJ
z?uiVCX8tDC;i}l?Dla4Zc}!fEv`${sk9<sbo5b_7CO*EbcN)**S3BD!&Dny@^cAx3
zg2_yC7o*-LB&%rG0&A1ZS<e~6t)p#o&`xjyqDOM>_yaXBkDQ$dF7dm4=39>l>0gdm
z<X?^Qfrz~x;ite$2;SiOYHvm4@SX94$k&!=PJZ3{4&7_*Q!w`eurJ-%GIF;`z8V?m
zEM=EudKN5q(Zn^aex#Hz<3+j`ShSd;@lhygl!(H@Kfh+5&rzhn4+N>}59HPytzXXc
zhj`HO{;)q|s&BL|eqd*<<O7|(U<N-hMSApsX~(-pwTq91X#L_NOf|2_82O`ufv3kn
z3H8HX^v|dq{kbb$5`baA`J;8x>cxj=Cylt*sW+NM?rlibHU(v{UkkCaWU-_pE0;g(
z0Fr!gd=fwF0GH6U%h|5c<_98!{t@)pxBW#TuJ0~B!gANX8Nf6VB$VMNtI(s@7^L)r
z@V9ylQff^)(>)L^D5Xxguawp_n@PNXaHv*8MAHL?lTEO&{P^9lbYF0O=)T#A%`hD%
zIL6VttGT=gBPIbJk#{NQ<7GTp&c}Fx!GGq5b?`XlnBc6=D(#v}H|Y;$ee#tQzX7Dg
z_QxIy^}|N5a}@8_kAr0g?ZaSM`?zsvHve%xS|+M}3a4Q7z#7B?2VsDCFT$*?oQdG^
zv_EvIX$um0aXa#B1G~lx6A}^7F)1R*Kx(XW_`FGAzPEQ8^yIB<Z@~)1i!jncG&l()
zTrO!J#g-mD<%1Qk=eZ0&Jv=-*tslfcH=FIa(P?N;l=zL1-13?o0Emy8C$QP!X6wS1
zwT6GsYV-xfQ0ewMt+sGyaXS0QUV25cM+6_u%h_6Os=t81z8RnJTaQ_cVFUP}P!*61
z0v<oW8i+@e5ntMdmWCQwwH#L}@#E-^Kg{oyAK?4;T99@4X-`bV|2;e2KaCGN_2VXR
zjvVUc@=6I~_G|w-rn4}=WQ5fZb9(xs*}>;=OkqQYB4tPt0PsB381T4v=4r>Qu&b0m
z&j|LS|IJ{i@ZdcayN!CU*X)p@CVR0M^p~uE-~oM2m;Cw+T@7dP`Mm!w79l_4;b;Nx
z)cI`XC4S?16XTHJq0M9o)+ks+gi`D)$34sJ7yyYbi3PLxI0AzzNLLF-NmNder?8-Q
zGXh#rT}H6ER?)G^IC`DyoPa-k?)8heVe|Q5ITlWE5Pt}DCGFV}K;bv03i3<1tDOk+
z7j=CK##4a;w(EKSGNI=`v^zd)_Go3X^U{AeIVlSqhtR)BDdHwjHIww7L`@wBQkn{X
zlg7KJVN>Uy^$0pMJrB4!L1&C%D<ry(X7!1igT*vM-}H_M*Bk?}>^}BekS)^R77ITF
z?A?4iy+n{A?~m|l#h1lCx6YD9y#W)uY=X^x3L36)7pbGpY5e@4gR`?CXHza<-u7@l
zoy7?{Gix#$KKiDIn=uV}4y1s)%*BP&b98!W%m^eYO8Pu~ifq*FbQ(RTok2I*xXGEz
zh-+g3pevY<(hRxY*aI(mawPldryzQ2Ur<To`iD`K@f=h^Y22$nL(($&FKN4qlnpKc
zWh%~+Nw?DrNghR>=W^>MpVqdr7Pw}KKud(8QI}5Oqc^4$zS11zVQ)qrduN)P<slZA
z4i0ojKND)8V)fx$UM^1I@QXhSUvurLFMKX?FLR<F>jXEJ!1Bb1b?`s|_=V}4(f7bj
zMUIf}sjQ{Wbc*1@20jxFJY{^cEzBzXpvxCCa4BmPts0eWzI8@c<(=%hJAtaT?5Z`j
zVQ2JDJ1eR=Q4_4cfLQdIBKV1R_MoMN-x#15X@3UFi2IzuvmKLzp8P>GI^6C!M3c7i
z;f2C;<gM0gw1KYN(h9Z_ID<AitzHp~BpcCvMviFEpZp(;sgq2AEW_yI1=}#6zl>Oe
z!t-Oah`0(jyr0i*%H<dv_;j|QAAo`jT!lCx55zZNR}jB%>c<&2SLuUeZR@it6+?bu
zS~R8{thIMbJw};%!i3NQp07=jZp(}QG8#^k1<;iU2=}Rrs~NsbA1!l|Fx^Gpa35wn
zLW;$NGrvX%_e#*i#JE7D7aB5$xdQ>h{@}g72`Tl<Jg{NgaA;cya|~Yrx1JsDH8U|L
zU!DrI5_ulL`yo+a>MfJ=1$fZ1P1ioMn4_k;bZC&6g0nN4|DHZQ^ukSPU;H)}>PfG9
zxaEW2C#-WH`e^jhf9GPf+3I!T)(JbYF{X;ox@heF<*a$eHXW@*H!-2bUc+Q7VH-xf
z1~Ej{%l=HwzFm>jxG>}9R7EVWciSE)qPMMXX7XQt1<g+4J)$_J@Ov6@@WF|aDhGei
zS<pXd{%eM6;=E2XSuU)v8_ksrR~?h{%t>`(G3tBjzl0gd+!mBQK;0LpH=&LqL3@-g
zo;34!>XtxgDBdY|Al!ia%f0W0(h%<1Fuk%Irc9UH!sMi$Dcx>6BZD;WJiaCF&t`1%
ziYC)#gxd~`Xgax{*=KthBmKDW)TRp1@uQ5l4=78)RR92CGVgmX$gQo=e}Bdb=RyCH
zkSqJHcHA*vx(2V7?%c4={onYN=Q-mXb^^rF2rIJWTGe|TEYf^LUa0D!u?Q|93Eu2<
z1i6hnOhh7GUIilS`5;)MGu{w-r{n8OlpNQDO60Ny$MqV}MO`fbOugdRE_clG2KUv)
zgB_gH;DE2&5v|KD4XxLP&&`V_rv{79lRle>WZ{7@1)W{)muniE;5h*+=>iSjy`_n?
zoBK}45jjp=8CRO9B^dXfrfV0s!rvG@J($xi@cLSmt1u5u+0xS&TrP^{JQ94k9R6-x
zfPzv2zxNmKDc=10bUu0)gFYwT1@dBT^LTN5@Jn28G@5OQ<=?A6!?TvCDDF649G@O_
z;`&*;saf_@Fj08vMhdRZ>G4X)&eD<P#kJpI+hn8O?WJJDE8slHj?a#Itqf!|%9QEz
zo$Au0rYIBj$%$id{%-fP3f2UaTG94|6xc<Ce~YJ4oc%zBQ*Wt8DWbT*dL35s*bw;7
z`cPRd*(kV9pVPxY5}H^dW#wWa&v&{EWduirm%saEv?%9~y2&1BC~N$Xf^E~W&;T~d
z^#a+1vvd)Zf1m&*@RN3c!y1@$z*HM0EjA)m$yHYbnQ$!ZAZH(qL<{a%qmeNjfdB&5
zVRvMZDYqQ^rpGYS2{xWiXH57G-j}Q{KmmMrCg)GXLDI7grY;^+&x78G6%F^1-A?QD
z1#-z|pKTT&5-p@VDG0`eu5}dIWUPdwr&pr=MIyz;@h?25z>oftgT{VJDGD22EP2}5
z*R{t;Asnn3iDNi;>lZ%x(Vd<g{VMU@9M{R|N%Kdv65|>%Mrp2G^TCyRlY2Ae119@(
zPpmU_AI@((dw%x0%Xv-bMy*uXHPIT?>BNoGlf%|CY(!Ur4mvMLI^1V<F&sIoA2G6D
zM9|ZSNvXwrH2r`FQStteEU2zqnLYrUV;FITptY25AOCRu(5J6mO@sv(JLWawa<ve}
z8dPtc9G=3Xo9P$=ApM9hZ6eM&KjJe=*&T>q0{#AOxXGW`q)H(I$>z_X=N&nIl0P@F
z2&>M=2hr0YJIcboF>^$xkt!mHf#~c$syJ<T<Kt6C+|!PBl3&AzSJTM|9NY&??*@rq
z<h_MVNc}NAX;Gj0^LMZDa_saaxX=(vhQ9vGSu^hb_Zxm1y_#Gt_$v`rcZf&pWfIw;
zT>Xy1yK>M)><Cl;1P09YA<_L;^r#a32wMtmPR6j{(jQ5alBr_`kslNxrfh|*&n^df
zQx*(oruHV<aGm?Xi)c9WI9Yq#y3y^9_2SV}?hx?+>IUQX6X^MbpOEZX6#)DWUU*|{
zRds$XYtJD5<L^J8{m<t2Y^uJWoqvzQGa2ysBAXX?*hZQC+1{?fzm+GC%lg0Y<M!6}
z<Nv7Cwzs#dTiZ`+?0e<$<I2{5MCCi|L-5ZPoXVr<KYmLl??;nG5H73yFZ$>E^{CrE
zc=hOLG)N|k<WXywOqQebQ8NEA+V37b+IrL&_n9DFi`JvYboPlIZ7-Hlu~FKHs^w~R
zBRU#Q-h(KLddc8oG96FfePV!{tYA6ZT!Z8yY#*4wMi675#uv>7QxcAu*p8pau}l=>
zExgrpoXKE(#SWl9EtbR4WVw0qRQslRa6rN84y7h4wo}4(IArH&$c1IM#EWNx5tb!m
zr?m>Urd7+p$jUqeLfaK@0-g@#!IsvFj*wm<iQ@ZAT5Mk2H$SjV2foQEg*{q?=Ip(S
z4?E2!8A<=Uuw9c-0MUDOAhul4{L0s5E*hk<gtqkl_+$UmB35BBahqN8(1IYnAaIX<
z<+_yn;F9P6F$;O{6IK-<lG(1EZHN6b;s0v41pFs<j)n$ZcM_hIVIx|H^4DQXn2B+g
z=8n9SF|&t5eEuZ6W#z}QXjsx8c&Tu()2k&vf=@(S^96UsgKoh1VUJ&0ad&_^MqeNW
z#01kl!1cu?*QKP_)I5>llK|!GvXwNAKNY-$s848L<@s}fQe}86e~D_kA})rwP?n4e
zSu!S?<)V<?i;?U_fnS#d`eBulB?FW_-a-1HfT`oWxAfSb24IGh<nKd;-`MPmr-(4H
z%IqNpkt+g<Hw2^c&bzeokc)0^_}}<fj^;&*3QxP*SW;(VwTVEVGC2Wj9x90$*oZDm
z;?R%GDwYaj*Te97iXK|*4wwv%HtLFxzFN>>1L&z(O2#Y4Fmj2F=SvZH3if(rt2IZi
z1QdWgTU^Y}wTne^3C0r7U1uOMAc#(6JMP2HZHN$~Y%v>;M7~aM0zQm$A?m#Gi9ovY
z5N}2y<E0h{!+~bIj1D{QOHdh1Pei5RF5<|F{Pze$z;h_Fc#4vr<jCYV!LS$^h_!pq
z2nWo*$Y3L!NUwQC9-_tisD+z(Xh(uW+XN88jBY4DYH<<6Xm@$HSchci2sOpnRdXdP
z3lYVe;mJd>HvGJ+7iuY35kUNI0EL4E79=4xLOeAC1PVE50(o(ng45-y1P;i+9&nC?
zz7ZvCGE;y+_J;>wQ}tBbLDrj+@HH~fTClPO?YlmRq8WEk2y$bvLVi2_P~p%Dfk1@N
z&0QtXG5Gjv&g-=4a5_jT&`j#6@hV@VY$oHljd<4r*jJXL33}r3{!z<n_1|&@dBKfk
zV?zI_2iei3jz%X(%n-pqqe(6Yvrk1K93vU?1ZBxO<0CR%p!_S9bn^uV3cc6lVi`15
z3S>4<#!P5?<+bVPrqc8%#Y{wWg;R<hKOmG60=sD-)GMjG4sYfr?YX|T<NLZMc6oPe
zcQ`v#Rqpm4#ls(Mu~1-Ly=TQzP|G5MnnlP>S1Co`In?<wgArV5P~8^{fidAixCv?q
z(d&fspVL^Gt=x4_;0td`fi<$?lC7N8L9XQS2SVJ)4$`ybJYGhcxlxK`9&YQO(WVS&
zx<~Hwl*)V43=@o>vg6Yum^a6lH^bKhmEwy4AV&xj{`2^4e?A{2bG&Yzp-v`7Q^HD+
zc58l#W>p~_5IMX%jmi4E7p)h;io9N$*(S?O`f}qMJ4d4nO6v0k&qbv$q5zeSTKo8{
zq}gm%9`Ecp4grw5n|lMCPx>E(3ndyznlIwRlllw1r_o(kBp@3#+}q!y34v7P@$u<F
z6R+r0+>A#R>0E<{+h#95>oyP6t3Cy!JZC(btnXYeC8ZFP6y-R4aMzc|niaZ&^xUN{
z=&hu(*OS=E<m?j&ptg(r&Y^dqYq}i*$AmALjjs!3pAi0t(&*0trMW-80^s2;*$r4*
zJf?>W_K##fO@<<(=|BHj{!@F(;x&NqU&V36G6ML>wDj!@;sB&)wPRJZ{^Ul5Qke}`
z8YXm{yHti364Ho5#t@3;!VMw>#|QV?P6n#ZlLH0>8B7WhH=L*z;VwxFIOjcnS_7c8
zciK^qawkOwa#1i~dcY(Hj^VI?2&}WD12<o!!ST&S7#SX@(f65Pf;|SdIG|*s_o_#<
zJUKg7Z)i0pV$~?dcq#ZaS}cJ}X{Xcmp0rhvgIRaw{T@BM6a229xea<sLzIulb)CvL
z$$PR1Q=O8`Y#YkFhkl`{*u23@bT!2`F7O4RR;u4#M~lO&qsYp%!I58%pQFWCS6#08
zdS`VcN=;g!^O1#)TYUqp#AjNjKX9~QFB=%<03ai_;RO{_>Sw34G1*$e($T`K0d0um
zmI~rTP`|^wI|P9rLXJ*p46oaaA62B$cpNLUabSSn&kvP6f}SB1bPc`xbJ(_+ZYE|M
z!*-&4yqW=SFJU5zIlwrVX@WTZvlG-vmY9H2{3anZ{S9*G^txy5UEQ%Pb1DFX=7qM3
zc4ySg(z|zgU`?Ye-vM%0iykW9mZM%nBG?Tb>NgR3R3{DfT_Ri~-x7fmya%o?N5XMp
znSynx1vh0IpN-oLqxZEhUB96}#{K_gf3QdfcY5~^f70*&%jL(;{eNw{Qv3V<|4aP&
z0{8!x0|4H5AJ-e*X2atFV3%4~%;;u@x6Io1M{En?cL~PSe`06{@w!F?t4St<`KQ@3
zR>5m#<Nl}Fe7a<E2EYEPi&~D8zx+*oq#EwYy7--`$eI-TmgJ9I|EMZn4r9F8zuPu4
z)_<i|uGs!R?Cam_|4aP&vg==DYc<!s{&fvjH~@onZYN$A*jn8+K32Bb*DHu<qJQ10
z!q=^;`YJwxBFr@|jmJTqlMf8wEQ}h%biTx^&*`M(t1^h+ek9->g2naEepwFGttkWf
zm;jZ0C`_?pw3$rspE-tC3Re~G@S<)C=<?bKPGz(PqA8{7JfkYt-bU%I_d_UH2?ej+
ziTXT97Ao*FqQUz6<kKR(ceh6U4a(xL@yF2rme+T#`@x?~`CoZr>wo3S-}3*9{Q2Vk
zpS@Qet&e}S{SV(_zQcLKeKd7N5D$WmyP=*9yvMVFM_W~YZi;1|SL#esKp1VeYTkI=
zXv&oE;*lH%6_K_x{tavbM!0U%(j(9>dARPSj17T-zFlL9b8I$jB)dVX<jmNCy070b
z66jF87+ucBUQ?ahxhrXjF>#z<!DUb$49b>64x&Lbq1q0<_Hz8<e+>CQ9K8J;8$gEq
z-`Zv}-;)28^7h~I|4aP&3g`dEzVF;GzUqGaZ>4`ehi&iIA*^w|*L?PC-2CONegww8
zV!7mpc-d?{d){jv6p9rE0ei##%lKL6^sEia;f_&q)@^p;cBggPY4v_B6m|?hb?78+
zEuH$|VXF~0j_Tbm0IlJpc=7g<iOkUe!$MBcU;!2hs0wKD9vwXuwQtxL{lih~<Y%x)
zAX-to-h0kAHN4eR)$17chdx&Hk8oWFA9bjQ00L$Xqr1yRAKK7<Ws-iu`>20?H=lwG
zr_l|P#44r&7e6~=@r@Rm5p;x!!Wm=@xzoB~0`8RK+Kvq<W*1x3j4;q9WHVb17Tgvz
zdk4Re-Dh?_pkJ(7bhx8bYgFs)$9FuZIsH6K=4_2D>BE?@z4?WaW;sPPqvd5EUlNG4
zrGt4g9HE0Qcry4(F8koXl+H!+r(F+?VN{&e;AauOk<TK0;_)7F|Fk|WudNUBYY%zU
z90tfXxadzN$@sO-gKz9%$(}~yClNH2VksL0!K1Y^Re&bc8U9`V^g6h1HGgHBYxuuv
z>h;++hxb|+Cx8+|k-^aL`LT$j$kUrmuO^JR)3>9E7GW{8Sm2^W{w7f1yKZ_D?9k=y
zM~y7fDiI=|u$B4=D_iyAmp@rnQ$Rr8E!kIJz|M0Bz;-^KepJySGxSZf6zELKu=q)D
z0b=t~kItv5c<nM?hXL@PO7<Xg9UP1rD@A$vatf)T|A7<O=HSlTNZ0?i%G+C2TmRcC
z|9$`SMgDx5^Z)9PI1gJ#J;>0~>l|5D0Srw#znU!<?u$j%y6>^%+bhp!@f_3m_~Clo
zpBSIf+Ay3A-^U-4`C<f5qwnASd(q4Xo7MwN`+cVI9UgbMCKLC)CZpEE{d)1y{f2@;
zH@1KeDP=p_uOA(qHZV>;Q%kSQTZXodWe^pQ;^Y^i%44m(**R=BAaY*g=VmY7uKm2v
z<AotI!1m<uFa%zy?wIfl{4fk&eY|ahS9I`h>zThJg(wZy>O}!yw*s(M7ZCOdz?z4Q
z=3)Kps23j|HDB%j+G}<prf0X=D^#jZ?AFoj!!~H05SmN`&43UeqZuWd0JUlzfGFo5
z(+RK!6uz!(zaa%@BwVHWy9Wuf5fuqhNxGcNGH~`Q6a{W_XtX`VoI0A17)E85eF68-
zz38lU(%S(=4IS(y@;~;XO7zoD=ntqO(HoWT$#)A*)S2jhG@5+qkJ(lXRU)X8EEW-v
zY?G&OmySR(IJa=s_jq^<r!DfBrp!d=(|FdOFOs6yV5j+GV}3oFBdCwby}(&X{wCfM
zo6cJzKoL1;Gp!IarNe-Y=sf7HEmj@lVu*^PFpDf@_R?j5Fk}%HCyWtzJs(Pd%!d=!
zL_VAVspHfl|9Haph&k8%+Bw-Mq)d8HcJA}$lAgSbP0HhVPEO-avwhm>q0jLd#zt~R
zkB|;nc<2VGm!2-hxPJ7q{_84;d5v4qmU~~XMp~nP#WdL$eJ{hsRJKbZAB!<0s6v~J
z@OIQ*Gwj)~8TjTvG9yf<ZoAoN!HMy>-fKMHwPAS^nEr;s3@|ZeAUNopw&P!9k{Al=
zndbgzNEeV%@nSj63X>j|I)GsTH@1uScNB0}OgcHZa3P3K7*(7|`Bj)Y7E9WyFodQG
z3AvaLobQ;vcqw!5aZmLJ5+!6`gRrLR8I%#UQV|Y!D61MpDqIdmqDA=U(Np8Arxx@R
zpB)*sjIW+rFlx7!@iovEqe;!ICBN|UKyzsoshswD5u(Y^A7q7Xy`_!lfiX=o)-52B
z3tV~+BTFwQ<WrurA?g{V2+{dum4*UDl#FHk9;TgyH-2}YdU5SbTca6~)9xZ1Yii*9
z8?8(kO$@)zuf_?PJvfo_MMRe?)Lxb$^d<TJB7MPtP6y|y^u>sp-x{P`U`3PV)oh$>
zqCp95j9i{~>&NY*COdQXer-2V_eRm8RpAj#*;rF0#_pQcP;wo={j^LLo{Fl_S5=JF
zQ_+?XdNnb4Pc>T{+0{lfiWrhAa6G*X)idOc8FXBT;l(T&jLt^`ntZuZAeig}V~P9X
zW|Q)cq2(ac4y(9?H_w0=In>BepdKjzKL1O{kS@SZXeVfiFrwh%0Hh31DS6W*jSAlq
zg~+93+YMBKd|6Xg&<1>|F>K8P1Epuq7FP`;HD9c9p84`Noa7Fx9<9nCE6^NQFwi{9
zFde42L=isveUp2Ge?%~tS^pDdEjWM25aIAJ;&=uPP*|_aAGrbuzYF=P#R=E89(jOJ
zI=%T~$rTxstMNFRP0>!c8Q?l6EH*0g5Eq+<r-#KBg9Ks`9DZRuq2=!oK#!i{#&b{0
z^IU%!x9h(`R+gZNLI#^nh7s>U1bBpj!stIn*uZRBTB{(3rswC-DJHESfW@lB!#Q9Q
zxq=_If`*q68hnD7R-M88L|ZIegB>2R6WHllr_qFCn~0F5PE3%j*gdn+c3d!zRwzh=
zZF*0GDd#~m?1@W$d7zc-*q^_<x=bd^MQ{dExd;%Uu(jEU>z6to!6%!MNJCb7jK)rf
zN2f32vy-1sPG6n``AM`NEiR^4;~}K`W2B4Tji+D(`4na=eL>O#+M?bg>?=(APti^Z
zx$N+;%vkIXngAEJ@<xE+aZHYo+PFtHzsfeS4_pHnMPZAcJ6|~1e3Pc{@3OmUe-|hz
zdVac27wu_X_%D4dKaxRIRo7|~A0vTx7=5+%5n^LXLi_Y-Dx0gg3+0qYmip0!r@s4g
zsd_Rp^-$huPK}Ld_LirUmKi5Vi2)^!=axuQ^inhbVB9%xoC~Q9lHl_L8RjFgQJtK2
z$%>Yx<Jd-H|0tM_EcJvOiV&(j5XMQ*+!NaVh_t|hXjw11H#JQ57Zqdr?|U%xk+Gia
zOiA-ZtSf4<6jC*<L}CxZD!j1lI0>)rLOHbiW@><3+Y5J2f(IrqwX^=d0?H9&w6leq
z({+6ww1kyfH!^hJD_!@6J4B3~bG1x@NBL-7{z#hb`{WY}i#UFpuwN;bbUgexoezbE
z$39<--d)hgjR>|l{v&R8A1*Pu35TR0`JyVns?y;i3O4yR!5A8|D~RJR`@|*$LVNWG
z5GD$OBN<@~D0n6Nw=9E}82$Z!47DE<#(F8JQTRHcqk{R8<Ag=0Rekyt5;#YH7$4b^
zWFL2pHvZq9L7?dW!A7e6PKbnTV-tb<F1T7y!~-NGLn1rk0|kE!6I}HG;MFLyx+8|5
z6Z|nFH2eRfN9qcY8ZT1oc*%z;UVZ(A5sr3!^`O_@x+a}RyT+v+f#CwU(s#vKGCzkW
zp)0QdxSiB<BjIkvFP*ptAHTR08D9C>8wMmUr}p;3_hkBZ<6o#oXOyNOE}-%1`RC@Z
zC9Z0uUy-{~CX%<WS>1&Tz|Y-X>*_t!-Ot+z(^p^xgM-0yi_yCYW-<B_T=(`0-<Uv4
zV61%?Q!vm~`A|c1SF{cLO5BwDK5r*o4%B$=^EU199jX9)GIO*iuPKFbT(+fMcN8T;
z?OqcBp(ov!Ot(Ie#}BrLX+sX4Hb#?0q7t>H*rq6yhYd$qI2irnH)RS(DDV-9*><7d
zHsJS*{$wapqel_xYaHXFr=q<~<xM3vqP&g~Tl_RcuD?-#*;l0F60;M3H@rtle+pLS
zN6kQG^%gRzF<@zkC%iD+UcGhn3!8~<v(t;=7vr)Ne;kh%%MHEWM#MD9`G`)rD;yQ)
z$>sDzl6&&P@=GX+T0H&iiS|tHFd3&!E-IvXrZBocnMRAN!G%3*tbt_7XUxviY0Va|
zWZaY(bGMs1trRCp%x9;0eEOof;#8_YRr>NsQ~dojU$f_ZJe|H*?;O8@dFLGpO*{9)
z;^FN=OkmEM2$Ou{$+d`V3?Q(ch`ZqV#Amj>tZM(i!R>-~;=)i8z`SLaGfbuf{{(B-
z&YtPu;R~5ecO68GK0vTsX}gB4kUJfpoE@X@h?sI(k%z5Lx94AwxM%Vh<3*EQvg>j?
zd5&d|J4R^fht|HwJp#5wd>VA#A1@Ml8&6%n=Z1qbu5|nGf)|(cBF5c}%@jWQz}%d^
zMC3kWmE0@m$s?#e>W{%7bv~pWGR=S+kY)0*g}Gm)dQW}3_irJVouFaUXAvb^T0XO=
zbiT;p9LE{>w9~9No);&Sme#*332vlm$JghjusZ-_LM?@E;Q^plikJM(KTpq>T83Hv
z6@BhJal&uh=K!aBll~UW4m-{#797Cfkt=No7$fqFK1H|Vf871+pRD&k+tuo}^ZvVB
ze)9MGpD*#}i@g8&qEBhQQ0lcCr&!~0hfJ)KWSQs1&hk3ndmaDF9*TJ(5<I%NjiOs8
z-b18%M#=m3TdQQS3?-LCi$#B_Gm4cljt8YajbH9Z6`5?-7arfLC%(tM<8BWtV9J3*
zr%cRoTJ^G;PZR9)@DMyc=&ex^4n_aNWW3qh=^1F7t@exU=#O-OS^z-Q4V2YFWiO8E
zCqa~P<LB<#aiEy-01nAt+H17qZu427l)^ejtTaj)tkt}a?4=;NDUvUzx`kh#k^kqL
zr&5m8g^?$9Ve5d~zzTL*eB3&Lq!#f}^Q2IzeXfW*pvE#meo7%PA@n7t*aoXIBoM%$
zJJAxdPqXF4memE*&e^dGg3<d5nan2tA`0J3SQolCksa0i>0U7W5<lLc9!)`dAtl_b
zKTqVD`{u%?pu$?K&iu5OHh--DA|8pctrX}41wsiQjzj2kMw}spbNDzKE-#8DG0YcW
z|A+R$0E`HE9&kn&oVETq=E-SL6J#~H2SK}Y+JFcj$Mw#yY=c{q^QlLHh8ZPdbFEaT
z-P^1AVu|5C@D05cD0x>uZ(%lak<A>^<U|B%Og0^~boZ!?)`(&9wB}(g-kQw2?zB}1
zx3qFYGWCcyx{Sd=TQOv%k2%-x23eOv7$h^Sh{?gi9fXh@kIoak;Td`e;;R`wiqy)B
z%GclBj~L@zZ`ZgSC|*rqeKUOH>EvC>SgL7A*+A4%*^9m#M)!pY{Qm3EBHsIM_=a1o
z&BaFFf@^<QoN2!G7-~VAq&(Xf9~6r|Q$eZdHxZOdU<D{CY|CpJsr-R*uF}t&FkZAa
z>>3{zGDDP>97+KQf?Q*OeMui3L{bBIR{X33%#(NV+x{XE8JjLXLUt<cn-0wBMa{ua
z$~GIvo>_e$YHpCRNxxmP+)n~qBj)c)HQ>nHyzb6;*K?WTQN~RNtOL^d#&mz+KB8I+
zlee<J3nP8T@225N0&zD{)f6%atwia;NDDN<q-ffb{m2(Am0uXY%~XC<N(zWy5&wBI
zhpYiO3#Bzj|G>#Xm#vXn?D~XqaEjz;`5CG?q4g(+C4I>7?Jc=6N$&B0fvcHeqG5Zd
zAC%L|0?t6Mdcpc_Kwb6+@eG5EYg7K`Ey7Lexo;rT7?F8+*%vDj-9|tbaNo$U&-9m9
z<K>8Vz!)PBWK;06z~@^XCdp)e5>1*FBmpBg;gU*mbZsZp!X9f3!fP3kl7UMk`VDLT
zgzZmXq9x6JPl`KEI&Tsj%ICqF7$?*?W=Bg+7Y6?tvFf$N8JNRTh*6$WlQSR8m}0%P
zg8n<djKdJE*w$$Jq0cj7vg#r?Cu2MMrYEw6_vMQL=0F^7^29M3PVk^xP8n#tT`THs
z0%EP^fGOvoUQ<ItfmgM+s>LE3R7tlN93Iu5b)!GTKrM{ZF?!{SRAOF~Vr1MSLLi10
z9?cWY6PwlJD&lzp;^ToN1f~zC*%hb7-m4l|-I>02+Ky`8&-_elKtkrBAd8$&ujVT=
zND6=OOnDy8Fj>c4)Ow#Wo!$of3!7yBB7W{LlbgM=`0M5jX4btM7Bg|)9h31(zjQVW
zCesp(fYORGEV_G1n$+-QQ$$s4Bl-;<42~x3q_o_K#>x3|`T_5HGkK860WQ{7u0K0b
z+MANO%c+!O>nin_lkdul241{UUxAy%aKO71r#AXuXBDFVxfqtelDl+E-u+HA4Zl6y
zGiqshcz>e`zj@ZuZxkzs5BedXiGC}|glg;9H~C#?kCtpe(m#Zm6VPH;0&Vb$@yz>V
z{HZ91{8ZW|?D5FS#;lqAB2act5*k`?`)CyJ3T4yT>UeK;XtA+|5Xrl>+##Wz3~F?=
z#6`=!78toyVm*}_GL0)$v06)%uy!4>z|XJ4#w+wYmx(TeOJq2FvnexTa>ti@MPBbg
z<VYJ&#!TUrpoTB*;weS;pQH}tSZKj4h;O_L!UUAN1>vSXakkNO8-n-~=?(4Nh6Qhq
zL4MMayiJAN_W{?GN@~tw&B=b6YGowbo?Rs>gDDd}l6VHQpqlsc8!oi@y5S+Y;8Q%i
zg0sl@ebEIMydUv!@9QNAV=*YLDId?`d=?aL1BNoc!zWEBI3rV;67%;tSC3`pK`sC3
zLp!nQt~Y_D4@?@21$RDm&HyrhtpIV-sA3jXVfe6{Si?n<mEh1wpXW<*=U2y!OvY%g
zG&f-Obs_|jE7oAMvOkx^FL^d-o_F4oJw|>>a|-y1#^1S>VXaVbL@<xSUob`_-k{<=
zPV2;je#0WcuC-%X=#)(z45J6l{j+Dq``mgApg%^F=`u<t)2nwExG*EqQqe+s9~*@a
zd*5AcM3W>L#u7jKME22KeRO51hjNeh%!-1@MO%qsyPVkqTaMnWvHtwRZVp;CK}t8o
z1PhY+G_BT__w&V@O;>mR!>{M+J5Q%edeW}3`sV5-vNw~i0nF(^?5`cF{lj2%n{060
zCZjxJ11#-&Qa2l2F{i+f!j?B2{Ar|*P~4Dxabk*(4=ouc4S@!fUr??vS90@k>5(CW
z^@*jk^~cM%MRCslI3F#Ot!n%c^Gf%xlVP#wP{&4ADoIZ)vUU#H0Frm|9B2t~YbpL|
z<v-Q#s#Zu=>Pbg$31^Bbr;Pi%B;r}8EU{t>q+OZvR(O~Z>A7H(eEL)7&8;nx%}q-U
zQw}O!(tW=ky}V#dEHOTfwyGP^Xo&+@fKTY#WRXmkY@>|&5#(sY3u(I>u*A8`trC1F
z<qSI5dx)Y}%B2;<0)A0+eZHLcCkuuSd!c#6`e*=}Fc!Qvy8A})n%O&QOMo%Wh6#PT
zg0jZ@a8JB$fu0>qC;w*|UGzUBVza_C5q41d-&##U4@7)Nn25j@h#{m#L)_!UeG>ix
zo8bL<0%t4Puf6Yv79X-NxDjGiQ1%cTN2Pw(M(ys8*=z6S9s_pP;yX$)EOaj)mxk7K
znkE+DZMpXC#l8aH^O-P(y?%w~kVfNgKYJ5*`&ir0#cJ|3hR@BoA+8Yj{c?)M?fmT|
zGM)5!fQs!}#7;I4W?>OJ%il=zM!DF4tl{v$ae0xDC1$jU#>u<>7?-wvRF-2`mm~a$
zD`tr<L_~-AA@0#rH3IEqix)2J{LJ=w>Ab|GB6xFg9g*K4YNyj=KA%h(S{3Bgl=pP3
zlq2Kr2}(}Mo;^tM<MOp961MvrR6~+>RF6N_isR5644b{@;9ZZxlOdJM2mOa2Fin4n
z6V1n4l$Hp=;#zPUz4=FcB{+>s|031S1n%)C%VZo6-Y>2$g=5O?+oC5HhWs6Qj<1-E
z1RXW|gNp?H>gk|(*#|T9TEJB;U{7ZA#Qite0z;PIzsiSXik#8<l%l8L@}#^fv^DbY
zkDiXJFzW^M?O~|Jhg}_H%LdtUKx#Hf%?1&{AqxUTe9(cGizzOwe-t>7{GdZkukfK=
zm0xQhp#IGRyXAr15?3IavPZh_04`@?vR<oSujcbNn_M{T5#hOqAEebvucl^X6U+n{
zBoYrMx$E)huT&X#)d!IrBmZ2waepee=}*Pqp9<>_s>O0G@+VZGBfV$_qES@-o|bQ5
zTDZ+_j=OFyOP#zdQ$jBFx=b0-jGF-d`YR??9R|9*CKokbtDp})v8R4S81gQ;ny{nn
zFaiwgmp}`74Tsi(<rNqXF2@&Ret~hhSj6KR6A|2LOk8Y0ci0#H8%nXu{$Ml%X@YsM
z(t9LhI>H@E@;l_uS|rI(9N0N_VSW;mj20Fz*YTbOctDhzBjk;(70fYWn0DU2yiV18
zw&lrKQ$}28c``?sy%Dk=n)u&xCK~P=uVUW>A%zb--?rUxa%HB`d(}ILduI)B&p~g0
z+%!F<N3|Zb?*lmK{o2MKar@W&fb%xHeFx2g>T1uLCOG7Vsa=Q@n&WO)1$j(EL3_7T
z|EMM})VVFuoMXNp^BMM(Z)KF%(#zGzBok?K@L9NImyCAl<IEf=D%v79MzD|kwFq@l
zCaqBAX9s)`Kl+C7hj$FXRIFYmQ@QM5J_kqfIh@58!#N%?3#q(u7c`TnSRW}sJlup7
zdDd4RQY;7G3@N&e-pyzM74L)?i^1~iP(w?+vckAK-51%zmTmAjsMaPQnc`BMhHNO<
zPSM3h1C|@1O(@to^X{_d^Dp7+Y0}L&BL!uuaXJc1W($k1wwPOojt_~<lwNzJqW1=4
zgi%uCA!NO?lauCAY~b?EgWJa{-{i>JZ0?lxhT)*7Z8I6TEvy-6jgt>5_FVgtCmP*?
z76SF0V$e4%q%S^*+!`yOucV=`_|RAKp+_`@Jy%sGrI?)nC}6m`bGw&UCI}E(R(IF2
zQ{4hFs+-a<@>MrLjLI5^g9$|Y)#tr@rJS$GYZ?q!<P3?AMf7u!JjX~#ahSk6@NA8P
zzqm6>PENUWS>xBOh*!$hgSdU#?seHu-Q!GoB_&llu2vpg;%JbsTl}ypA$`szHl;6z
z$U-S}<{&ziGfzaKj@p^<^7+M4bCn@GSlG@5J6o@tjmFwFOl%a~SXh<;vej;M)rQ1P
zy7GdE-X~|YNI-#pvqH?>&LFh+hkU{~yl1+ES0Y|%48}=+0uSPHdnJB+8%tk5bO2fo
z#8F#Zcq3AAd91uYoNM=8Efu$@#(vm{=rx9hDb3B%@DkEK?cU16DEJD+?ydAf077_t
z8)$}|T2J*?c5*HH@Hz#L^qPS#>F$ws@te&t#1|f6kX<>Q_|8Q*g}7B*7f)4lH7`%?
zD!BIw+nkSP@KX4@+7;YEfmhUUI!WlI==~zx;_iJnq^m^sLy5l)b=YQ2x>v}HVEj$K
zTjXBs6hYky?BF%v!BAmhpE_L1@Ba->m2*}!oSqnhQuL53G!7n9JqjT7Facn7Z`T_D
z089<kH0Znd5x=pz@DF*Jg@|kRVguURaM$Tt9(w0uDzOo`Vnnc|R!;(&E^LN3kmZ_b
zNfghuzuy|Gd2qw+9&K-c0q>kbHdzppc&#j+)ViY7n6neh573-8K2-+7J(xmB{lUQZ
za!jXQcO!Bsly54PJBqP)kUOQ!8DK3V%SqDciu4_n^3oO&IIP}95;5qvOy*C|4blKM
z3hhP&knS^hsojSM$Wd}VAYUK$iJ&^&=q<uPlhg5VGwNM1!Siu69!H}~R&B`vAv*VT
ze%YT4lFj_HAV&k=Ht%j~%y;nFf)1|2aS2_`ID0KM*B$Qi;hhho1L8h6b@B`P^yj+g
zm7%Gb2B5iuI6)C^?N4DiQ_K^1v5NP&ljI{@vg~>vnfWe_y*?OVPz_yK@OvZIM?*Mi
z=#F;kRoh0SJ78`^#v3l7b(w5~8#?|9@36^Rh5YoESBzrGJ7Hi+Q;3%R(U>lbWPH+m
z3e*PuoJUXF5Wle>P|k~|D)PdaPQ0)?!<nD<XL(1H0wZr~*pLeaTO~zTW3?CI(bHD?
z3Yq#O{Z@d=4|ENSHTmX7Zt0$-gw8n~cww@*i14Vt5zXdFgfH$Q41UJe78c+_b|2;G
zPf*kYQ1a*zoI#E~@;BYTM|>1+K75_%_nZtEo>K93_wXVArh5ozFEk6Z?xgQP&<{rh
zwdY47R;NTFQgk5f0)npA;Ml<z`9}0WIBqC^H7&vgsS4O1;feH6NQ9DfRZ=U02P)<H
z$xLhyHBN#HQt@KP+S>Tdi9qRl^vP3^<#x8ldl6&pi=lzmdWA#Mdn&!y(J{cN7x1?Z
zAU8iha;jn@nueqNJasoum@qbMc+LQeKy<&bM#jnekzB9|JaaGwn|${GjsP+u&1kYL
zMaFKJ0wTA_0FV&D{!);p{t}~~ipDqCU0qcZdE3+{jUAUT;IC?V^Gvk`TNyJxu$?N#
z9guECk)6y}$cCu}6WSisd-doK<9q9{(cMi8ibtKZSAR-0JbDvnE)B!(0g#%}qhP&r
zrgvtV93gj&9NjQ3&XrVqw1=p0^Wi3HexQ?I=?dUsBtO)|qkm3W&JiSIIR4!38??b<
z9q}tHN2@+{$%~{EJy1t6v0<Ssle@6Jt%;2UK}JLL?<DCYHk)$sfmYwya`>2yH<3oj
zXMv8ttPDS^ECgOw79?>C9;X>xIf7hrYv2cQK2ICe9o}8h2)B^1c8rxx4T5MDCKgGZ
zU9CPVHIy6KNlhfO4Gq?`@Ka^Gmy(fmWbjH#91n;4y28-#uHp1EmMq?O2JOPeGNd^+
z7qMO2vft;aZ+gY~9CCz1h*LFxR0KQ%kaB~Us#6i!4p*qN=vG~FYpX1|v<0UYZ^mj{
za~B&w=AhAo4in;|6$&FqTn(&7eGLW9@MuAv=qwq*Gh8*w+)%`^$U)_anc?CxfCxn%
z*%1~ULCGQbi<29Q$$N^Q;Hg~9@~*RoTlXbR9wLISsU5xb;hw2#m|*&QQ436{%=uMe
zO&E507D8^BVMl4UN+TxZ$Ov4x57w2SCAeT6f3r;^sTATGHUWCZ!?lZo=e%tCv}xu1
zd`#OwSp9bm={f*6_iC!!k8A#O3VINxpES~K_Ue>jyntNGQ0hTNkF+y4{3>)(v0-KG
zSJ^w2UoW@6iDuOh+=rW3&-)<L_Z*Z#2cC<0UuoX0C5tzgYAIW$K9AJWz`>q6og~8Y
z5JSjNuZJ<-?c$;zFq-OtyV&pC6gA>jCiF)T<|#C{V?|h-#@Ab4>ye4aYv92?MArQm
z?7N#%)CR3IaAbm~d|tZ@i7Y<wV2~2)^2V?+iFS4cn50esjNh{<>hqWx)B)!zl^b~U
zE{chYev)&z8~H=&6s@i^I=Gd&^@E6f4;Ms9dk?0m9^z>^s4$y8wLe7<aq5<I)!N0Z
z1zAxu#I`XXI=MCP5xeTAGa8o3I=q^;PycCbbXrjAP_P4&$tYBdXp2ZlB+)$P)Op#O
z0FBOmE+s+{CR8`<+eF`1?pdvcoIDz^?Mnf{gYo)g4m|_TDRlq|$QgF}naC`rYBj*h
z{o@bCr*24!%*8_m66TTm@jEx>inDxhWnoaQiE_Em#SnH7BK-mC8b#3v#4q9Y1ZC>+
zJX;;5O@Xr?Ib?V89Llh}1|Ee$);8}0JxQ4$E;UuB+~LJ!KK~I_I-dB>xhT>{A+G?r
z0`3llZ8vONTqH|(_w*uARe3dN^K0iz`B(d6<o}spZ(e+cHq!I|RH|F$nw$Tp_ILiD
zFY)IK<o}WR7UEdC+I3#V&*NAII(19z<TUJ*6v}9mFC~tfP{yA!$H?H(d38`fcmYC^
z91P?ARxgUS%IQGI-JcndiK$)3w7qDrKd!$b^IiGXPB{qGJK=!kSCw)Y49<jzt^DfA
zq1`WX^aAreXdzMg8gr$aJ%ht#SC2kxHq~gwo#wOnWg~vsY4w_H|9H0%edv#`k`hDt
zKlVY+6M2V#OR$T)E2Sc5zZ}(HH4n^2oHkOT{FrT7<cK@1lV@?a_212?SQKGj@Egz|
zdgz3N>4=apjQRfuEo~|^^xO;$%^G7GX##GZm~TLw>t^J-4dKCGd-&kZZf3Q3IdN(d
zZ-6P%^84McMWS=iFWO0gJfQ!fUABl3#IJdgj7tMG(M??Q3TB^VHQ4>-FL2EvQSeJ+
z$!#y%rRcvR4O#Rfx>u!LMxkO7LMU#Z^ybyif9SVG9qZCc-bd+bFdvWWG;I-&sXt5^
z3DeF`B#slly9h8_Yz^EZKqIv}&YhU!XQ!tJ&ajSZTIt43EAQ9S>ax-sx0_m*6*3*z
zW}yIiG)8#(bd8>y_~dX&@rfgTrRztStE4};NNMDO6nHsov!fXuIQQ*YV>TVqVdSRz
zzX}5aESj`U9wa-I3V|VGW{7x0iUUWpc+Qg$5pgTqZ$w@KblT{T$v#I6S>CWl)9H3G
zG5pHk(~*UX90oHgm3eAk@ndA6ffS}2#A&Hl-zJ5Cn91_@b0pA?Se&YnfP889+8YKO
zcI{8)%poJKQSNJltJ**JP>?r!zZ{BquVN1EE-4gGpGGxc`|_*ulQ;aupEJc;gXLF;
zZ)_8|)=G9gnW!m$Elc1Ad7W=`^Xs9*=a@4~ST*rNSng85`?xj2D=OfA%a=6|wbu%w
zbGYAQ$=S)zC#Nq@gor{E{KipGK7fzkJF*5<#|$(N_PjcqcAU2L1X1-(3f;!@DN|{$
zCQFYp60Og#^)oiANuWqfQCVVY(){|-(~oaA$J39=oc$dSKThZLu#FAGl(|Qp)5b}U
zKdLgxz>D~;tv^JNN`gWzmjj3d6^|Cs<+tD`2FcH-OjEg<!F5BC&om#Dv_Z}J;};4$
zM6fE12Y>!BN4vC+6jFS6a?^Fp2#3NJLYFAq8Zy6*C_xw^j1Xajz5h{U%)0Mw5M$yA
zh)~NSOdf;rB5}hLcC?&Y1w{ej0LYhk8X<n7rZf}WXCJ=RVD_l(9_AQq(W(=D=SObj
z)XP8S**syY`EUg385h>rCvU}G?T*?*7h~*N=CVM417p|Z1{Ln=f$iscvJCBN>C)%<
zvrkg2Epy$%-yah;cr0Y=6cZWpui7KKX74$sB=7Ls%!AXDrrv4I{0D|iZatj)tcS|U
z%wONIKL#+E_kwJSKREF2Rp4B|3WSHQBF>@KkGv`E5Qn-$l_=3IiU72`Q!A%k<jrX}
z=Q-^-3lO9}3X01ftzXWpPV>}+i+rr3PO9rrA5unfPE3Zhk1ZgBW*Q2MDNftn`1q79
z!qX0PYYo#%O(!FmO}PF=bSd*QgMCcafaMyk&n^cVr|9gCH(ZpIIKD{wLsk`C)7C;e
zkI200&D=m`hF89|8_Ovz6bz*%5*Z~Da@G<osV^_s=7TI6=$7QuHKj9Y!Bs^wJg>%Q
zZ$}@_9XG*e_iNiw|H^+1`@e{AaL1Pb>GuC}wf3ZH+y9@iPk-D0zrdd_Z~qrhOI-WE
zUgEXroA%spSJtQ>9YsZ%QyjJxD5aQa&gYZUlfzE^85UFcq;wDrHswRUonCW08~HXM
zFWoSn^<nD>Y)y?`=V<L}hc;t4oL}@87xDY#liW4E`=9{SbUu|X(h1Xw`o)421SYom
zPblV<(Tes=5$9ykpY^l!0If1Qo86@D@-_`GZ_uzo!{KOwsXz0==hCpGYPNvgl4%lG
zGl(gis!)0+3#+o_JpTmYA@Mh<81hKseR0%q+hjzYe@fS3S8Ov%Z4|ljW&?G((m@Ob
zX%o8w$Nx)d9mf7o*QtZMZX<pFe_Sb7D)#<gds6;;|NjzyzQF!(?&QPS@V$8pueLti
z)~e;WQi&gf_n7g$@&kO|R_K1>%M=cv!}*Be+`M?|FP;rXSeA^P)+*SVRxJY~D>FXg
zpr{TG2My-Aum!$52x;e5<#;^8^OWPokk1uDxH{zWrI2R=FFZd-+Y`B3P7V_<*pD=<
zH2f@I4(q1>m$=)gpTw;brXanjA4L(mHE=cbpm|t7JL-XBeWx4up0mC^m-T&xtol|}
zn+OLDX8sE!(uxg498#nad|7Yx;!3$3pLGir+e_p4_$7KS#2jy-P`<ADA^z{#@%|}m
zw0_)VKotwB{z?bH8VstsjSb#t3OlXS7sB)}_T3NXqzzpE4rg_l6(P9$l4?NEo+wwk
zt!FTTYD`>3>-^_vPo!l)Yf|qN97|OCaUP^rU<dmz1|5YFRLK7WykFX<r$;|(MOCM$
zptpj<#zr>#gC$$VS}(S1SfvXVT;T#L@z!pRcdP2+)gp%SFiI+mh2sq$XlU{_{ym;0
z^BC=ed;9|E`Af#U!fWOPF4pU>qVJ>B&h7@=VEsh&tR;N64%g^3eUF}A_OBVoEZ7#4
zr-LFE7})PyCx@rPe4k7}6OR`2(UdV1;A7MMBfQUq*P1Z0%jqz|-zWtS#BjjxJg(J;
z%V;@W_Q$*!e1ZE6M7-*B`b%F^>5&Rzj4Bn<BpzeyP^cun{E!Cm<?k_!nC{$y=Z)0W
z29Try!)}E?;RDJK^vDogr|C0)PXebv{vt~QqI(jgs)&}d3;`|`pjx}4l}AtK*D$0_
zJ~XwM1>uf=fh&d=#|OW}2aWw7wW}VTRs7J%ACa_WLjp1+<o%Mi;8NRz^S$1t=3O1G
zK+RrFLJg-bYj1a`7omRgK6)l`)1<B@dCcm<)4eFC`=`BJOPBlFkd-~ON8o-SFbn4h
z4@Hqg5T1@NHAXhXp_cF4Wd8cQ;Tv%?cRzwh0v|4izelX$jfme;k)t6l^$OE!BL-6w
z&lqJl@CAl56K+(cY&JPPIr`N<J13_n&D+cjU^X~a+Z*!G@8f(rc^8Q$=GT$DvJ9@j
zO#Y-aoX#>{W-sI3O3S!+>tzh@EX}6;tC!$zxoEYS@y+%!Wwv|%*_N>p)uMcwb4}t4
zsN!<PYUV|{Mk}nL@s(J7qF?Y{c&YuYFING3c?H+-Ni<Ft5GJlafmNCOe#JIth?Y3q
z<cukBZb*#zcif!J>tj4xI;3!*tl~3;ZU9U3R~~^5x_5as5fEbZ8`19&M*uq{T?9hQ
z4FW#;R}fb75F_7IhGQ?vn|5yu#tHZg+*s96ME}=OI8qb<0C~tj9^#hIS2ho>Dy7JM
zH)(2ydc$3(X>9k%*UYE6hxbv@v1pNulfe?4IXL|QpWFk2XM&P<C*)nbS2SoD_lI1>
z!8RNZ2XE;&;77D406#8xhA1)U$~GN)KvG3Tdx6{tr8r5IQuGU#E+X_nWZ@RliU~6a
znky-04`9PN$@yt#of^%_*XI3RfVMyb?gav5SPn3cm_P%UUS0m|^(OUqpneM799Dw*
z#7TmF{mIZQun2}-+^|~AAeBei(M2|!8>Z2Sb*B~_Z#h-N(YM%}>M}yPqi3jctCfoi
z0pjD+gJ%2-tgJA7Pm<B{BAG`odQ5iABOo{EuCyCTH?+ciF56fmYQHRm+gp(nd#)H-
zeg-kL{0w4f>601D>sv~qn`8Z}3Zq_77%eSfvRqLZ<*)yj6-LWk!f2T;jI`j@VKtRs
zQRmT9gK8@F>}G1ItCgn3`sSMD4NX_Kv0x(;ryBE!%T!!QpXy6iXZ~zrC5(@>tj599
zx}z+L=N8%%57ohteomd#U5N$#rLThYs=$XCX&2{)Co4?21-QKaogPB0|E}IF$!Gso
zBZyEUi+-2!VmWW9=LEc$zsLRgyS(=c%H?`ZxD&3c5Z(y0u7w09D6g;WlKa|n!4x{#
z@RyCcZ@LlE3AV5LQR~?WrfKE}aSi;byt9$dR7)-tg~Nl?)(VO^%Q?*I!&9jd_$W~B
z>LHP%{{jjGN~0eGcU-_)imcFz_fqYlL2EWt&F!BWHUf352b-2CRU{$RVLcoS#^f-1
z=7kh!G|1Gx2FxHv;}DS8ARg}0u$slA0g8za8xi;mZ{jGlu=Q6G&_smMz25RS2v-q*
zET96h84T_+x4tIyb1@J7<(@|WbcyqHFEW#BRKvC*gD9?%XQ|Q(a7)ni+|IplIhtH8
zqH2&r_jyb@K>?irHyxpu-EJ^QWyFHfey36q*h4YL;A?$DpINRw<sX~D&G!6l-D>{)
ze40MlY?ehe?`&_;Y+HOUs#p_lviZ&DeBBU*HzSB{we%Avb!6#mVUP5toNjJV3JWGt
zE3Y>6K2tY8j22UzQ=J{uOq(;DBI)>OHzMN&nOCy8!`+CiXJr1N8#7*k`zdq(;FI)`
z!`yS9kZBZQBYVkrH$jgDQwUhQWq7RSn^mv?zGE8wlF<e^rm`vFGNeKl1^lB{x^jBD
z9n=IJbt~3Fu~Fo!P?|CxZc7W(@5Bw%Lt{caB9fK?B$U6@UQz3p7>{etPJ8U28tfRn
zx)~Wk2Yp@ghUBw@lv#oHY-F!TD@580Rpz1gg1EJ?+Y~S!W?5+RAxfproTZy<@y+l?
zU{!y;)Q*VB62ZcFin~Dj;yrpUl&Oxs1>^Z$piyl}$xDLQveFygA>)E1wP&NrXc=Zp
zc-u6GqY7irjI1!%!(AEXfgev?uzw`;DGvt>Aq}E`mj8_Y+zmzeR6qLTd*)gSYkF{A
z&ZVr1))RagKS!4>Hx^%IH)lIMIWUB+c;Ga+OJ(@LT_zvMl?{(-ayp0wR=~tGVvQNa
zJalDY52s8kgAvLQl@TxI!yfJ0Npi2|l)naCZ-TLnz1h|LBy{5B8p!QbZdo{n!-7i9
zqqZ#vZoWu^yG$<Op$T8-r3Yr_Tpl~4V5Sc`IXjNlt>oM?qX60k#NJ>nso(-cr&;mN
zGrxGyzS$-O+)d=;Tvv|J0tefvZAD-d`NwpUu=0PXjFhSJ#@t43BCd=>g0~6D>6}IR
z()q_Ff0vFU#Y_oU>HbEfg~U>$E^!3~j9-m<<2eLjIU#4$4UJs8N=?|)$L0@CiMYX^
zhri5+AeCS<)#SKW<5~WLj{@iUvL6H8**39A<{!AX(S06(5@m>vkzSk_L)4>wF<x^F
zG?J6iu;Ux<Kk|$g<u<A$`e-+_WIgWCbtQ^Rv)?u0<x<AhenPXYt9@Mgbl0T>1G3l)
zB{wHn9m*yY-?mnx5?_6sTK~8?^)7+AU<q?#GR;FIT3G($(NjpTU`TIHW)Cwlu?RIN
z+nuePqYw$%K*g$C_UAdxv>`!dI!Cw?;*F_4&|^Uu&V!=P3H@Pz1_zq;k}j0+A;x_4
zft__qrAJRMK9*9m`cebXL*ylRsZScz#YY|?9-_nZNwt76(0*QEy3utYH!!+PiM+KO
znqZWPL=i^w(fZ}AdDcXw0{>>8542!E<s9=k4abh~Oe4e;n2yKs<$`wt-#|2<wYoj@
zF+VyIPYdE>aNUG03#iEk$jG$&%`*Z|-$IXX#{ExBrzv{gOi;WOb86+if;+hnhy)LS
z2H1?u-tp*>ASW0D01S3sge%ElLmVV*3b>AbLBF=|hQi4wxtuLOu||?f1XDa?9TeB{
zazcfZK~4_EpfDxlFgu+sOhrL_rOYwve5^@1UXBZffBy4!Tz}eb1J|2OuLt3{;pLJO
z@<iS(SyxML6ZX|o`pweZG%AqhsByL^SD~qlfY#pXbAE_TZ7*GllhRpmg@9+xi64xm
ztgStJ2{Zkj7V!gK6PBh4WQ=;3d*Bf>?i#VlgNu1?%a*xUws54NkZ25VW;{-zv6+a^
zvpfgWD@J%&m83VIDFx1+i19}it4Uv(9>-={r*n6mx|@iCC7h%VFfjEb+^AS~qG9o5
zkp&O+R^E<QH~y)n2Atw#Me;?(krs^+XA*Gm$CRFX5oO2XDL!}C+@LRboaD)^Y;%CX
zquBujxI)d-F?r2w(xk?XXR5IjyP3sl`ndhFfDio^Zz8#0C!7D#f^B%nU5i8={bZNm
zxuhxB`4Zq$ivyEc870=-HqUv`0OLirlFc(zncF;rxLup4>I=LL9VZxyN<<3cfiHan
zbD?*mhYv@h*Iwr4c};jvEHv29xw49|<IU{-yXavw(sMNixe~`x2y{&(x%pGwUmzq~
zkh;PA#?xrxk&_{45tadIwY91AL#$-M*0yz~tE2^_<2z;QGIdO-^L!>$U9wT)J)698
zdVGBYJ1Hh64M(Q*$v#NvC(Nw#Y%?o2xjjJ8^Zt8cVcb{v+1)Gw<yN)n@{8yJERVI+
z#c4IkxBV23cGrvcT_@mjK6>{KRT_VYOENZM*=#-?BnuRrq)14F7mS%y<98yXF*@^T
zzE~QG|2%t;2fQTdV`7oZLYASuSufK2jz^+;r|~>~wX<E~wdM&Y4k(p64Mc$?gLr4p
zH;Sy~m%RmAdaxKgDZqDfi6n8Gmq>H%63O3u_!7xMd6P>dv(Z#l*S<tbJr}HI6}XcF
z!L7~%pYs%uL-AH;;~bU)Yv#<oP1uag%|{X?6cLrzyp6WbkI~jSdgaw~WydGwk)!i*
z6D^kj4l|BpNwD%i*HS}buJh!ZX{^NB!zMgpI@h3cG8uQ(C13%e<uMmYgNxRd>)B-9
zAR}=rQjt3E23_Xq1}F{a$G4kTvsp6dH<grg0z+`S`E9kx+v3KOyP{B25BL1hPw3u<
zjUp97?Pcc1`H;`TQ%5H#nOT){5QryeCxnk|d~`$gr{R~3pu1sz205j8pE(b+bc(8I
zI6c@1#bb0|z1=m#s>SQsr((xroto02$shYq5n9){Q~5mxiyj1k5I<vnf=!S?V4&DD
zjmPkr?M|D<k+5I^8HE6Cd-X(miaVoZW9`q~lzy&@dkuR30N+#?Ld{C{KpT*08@y!I
zM!cWPJi)B3-R}0LXmZ7Xw=`Tm%8GbGLFv)YUWRPBcTZvv5cnua0@%IqzWd&jcW0i*
z-_|XOPB_z~q&2)@^m9WEb5-vi9yco}eZ@l#u1@tK1HlNAV`jt{ckbRJ{&*Kqh`s0z
zkuyvha<O+o$ZhE;xBAZGSgCO~*XdvNW&^VcowFVeHsD{@D3eoz$$S9s8N~5&G<hE{
zW(nqKu~-JmHWk2bme#l0atPe;n93lXfh0j;mq>s4m;K(Jd+@`QX#S@}AAGW~RV`^$
z`JTS;8oLC_Na_LoE(MhT^b@_M==6H~jgw9z?i|7<DRO*#AIZ9`vYe`D&#Kgon<w>M
z1LTiA@Ux{sq&GqVQhWeB{kM&lcm08b^q|?0b+q?EdcVs=^qVMGJTisvZT_a<bQzHX
zetq<m$C85#OFS2V2;i%QBvbjAs1gKQqA=i?CY^JbFOC^%L)z0Tt!J0ZE2hanMG&N?
zFPfd_&H6zqGQ>p-RY+NQiU{4gOfKV(qsegk5had@mrJwei^FEkKqfkG$T#b9kiUz>
zof$>oITPxT{(z&<q@0}bl$Up3I}qjHf4<5eBmV1bIlKGlui5!uYfqf`ua(E!f5(6Q
z5`Vr-{P)xlrT%cFzQCoxpLVd%#;=W|X5;h>j<UGhL0;ACbL-F9(UEz%iRYBjWr7FH
zWWGqhcfW;6jnurw#pql<)fSd{sY$VMp;znX;?n{;6)y+6onGsd)@8KUhf%RDbIHbZ
zIlBvF%axI>71BnxIT&D~G_E;0bGVybT~HMN&4Oo~EgLLW;5iD@!LVkD?hJhObqe07
z!#0BKE*mv&)am7IJ8dJ#?)365I(_kx;{XI)F--@QaC+PTnDE7?$snd^D4$_H=QtGM
zieE001t)y3+w8LQ5c}&z6;oCGp+0xiXYDu-1Lp3Fu$GMPI7FlXzUUX~9{8Ey(u9Uf
zyP-OqF5Kr%0|C~PP-OdZfy9#1`c?=?(jgDO>$p7{)0CAy;j_+DfJCBC?(hSo$D0O9
z0qU#4s5PML8;&uZBy(_d)n_B!q9Lqs2tH$LVqI3`0Kkouzmv?TR}*+!^0}A5b}gDL
zlv5aA(r5@9Ehu0E2U9|C@QE=V#$gj6+z=_2#KL4#1R<)DSEi`u`h6XAUTj47C)0?~
zGHj8=1ECw|*Kl(*TCfr12;fL2YOCL7UyY8E3o789h}THkk`Y-iGIw#iML=j~GNfU$
zt~#&~72rN5BJH%!wrlq%i#`2!5kSzWv%#hiqd6#2QRgR^Q(UlI5uq|%%4EhB!RJe>
zF(i`fwPEw6Zbe2Z+F}Rx-f4W&EoN^lv?4P1l~T&eT;9dX{5p#dKHF56yA_OE{W1{+
z+~;7R|B8jUk`*?wS}7xI;cAE8*y^ItT@c`YjSbTe#B6B)gI^*r|K(5?Fdhow$*uvj
z>vfQ#2Z6mVIm|bnHyb}oZ=3942r`KuX+`i>JPqjnS2v&$55LTtLicnm!mwnbFZ?r%
zSom<c^Ci-X7F1v<-3^Pxs$C6Q;d8W?XNr)9__Cg6DYFvx*_Wv`ON=j96ywS!mrE-Z
zD<g+$8tth?Rj^kbhQBXli$fvv0LO|uhjC=%V%2T~@rE`Kx6f<?5pUZ9n}}0M>>&BJ
z5a76sz+jSI{o!v{jL+fs@U8EHNxWe)UiNQwfDcg!G3)#40g!;&%RB)iM?Ksl@O+2C
zba3Ywrh(_uIWtJ<Y-WouvWu~nInk7MXnryEE>U)4l}oq5m<^?=TTYiUl`XS|QneOo
zI#$L!ew>f^I5Vz6jSGBdPL=*}!xdwY98kkhCX3=SNJEH|!YL9>Z2Ae}WG|>a>UW;S
z6a;k_5`mh;|JsJzJNjeV|1Un>b?#UAlac>*yHfMm{~!Nt|NjzyzDWKzJ^gF@`LCYT
zui~pWeVdscZ8-lBUoPIcw*!Om$o<G6jVpuh;bFXA?>6K5!9k}0nY7AIdF!Bg(rX>I
znjwHn`PqImZk!&s*<T0EE<kO$P&N>Q+IFBC_5CAuIA}JWpPVwm_$xr|I8ZyaC)N0%
zRevUOrN_Nr+f9Id-0%ZK!$+<9Nu!AkKW@5oP~&93whR7I0G*oME;w^Q!03XTd`vcZ
zcG5j-2S|2sa2&T-kDAA<&E^52@qpFicAd5MqDh4Dx5l9Ajh+VNr5S_jwwnzBdek~@
z_1G}{DA{B&uIRpE7~7)ZN&xD}fT{+ddJ4$fJzyeevST#qXEtcmfUB16>1-=jqr`57
zf`hCbH=FIw=`pz$9M@klpvRS}iK^B41>!XP9G|rh>b+(mBF~v(1+4kyk{`BldUDu$
zX2Ncj)?6KeZKVl#*5Ue0hNFJG{4~Q;7`i<)8IL1tGiLhv<>?8W(Knp3!`6PY1E;JC
zxg<)AY!gFQB#sV7+&XxrApu###%)zwoX?DIKs{`Tu~6fTV%rD|vaH)VV)6m|U=~!K
z$x2mES@+e^D{$Sc*=20RC#Nq@psaj-$o^0-H5x)c@yRpmr_=cY{Kbtk@I(z<0@3#=
zJ|DdW!du9IR!kLKCf7^AC#<s!opftFN2|oRzhFu;w~P<DF(?vlXZTD1YB`-G?;vn@
zGH1=&4r#LE;s@RvuEXNAVSua@+n)xHeq9;qf+O&@`q&>)z$5<p$U(wguhVgr^hC}E
zQOujBP`CYct?Q3aWf%sOftJk_Wij@&zIv++rynP|28c9c)vjh#QP3{)T{>%as~Xdh
zVtTcN2nkwe*qID&POirEEPs_W&oB<HYP2q%?|X13$!tEo%$?~OP*W8XwM?c5{7P!k
zr;M4>t3ZIE+}u1HuvIr)7%L#qaSn|)Pzg_I2n!HA$<F8fOM}6&Bj6lHG4Rv6vtd?9
zqIut3ESfX{D9#ue?%C8gti_%1M=(kZ57r;*=rKb#p-+zp!Qj^w@IuFNZeRRnE=L@N
z9frbB9&A{-lk(xL^TlY)X0CoPj62bCz@yTb5`u`^pY`94#-rtkn*+1EM?K@KktFy!
z7tA(W<eNXEXP5nfTO&l`f2lO-|FVCK^S{XTe8)b38UBCl$H(^h|H<F^-@nYCf4%(g
znF-jPgz8?e#9-cYzq_KFk~}M0x~DO%Twa^+K(11JG4?+B^tx7lL+`)J*V_jh?2PcZ
zj(=~%-v{_}^)Y_ghri2B_`3l|5O(0Lz%ScR6wnr;sBOXDb^Lqh5dPjTV}*k<)+Vfo
zuYrHp5$hBDc?)6oYgpmQ69rT~(6E*j=wTH**1!teh`WZL_ObF7R^G=J>J0?NZXed^
zB&44_Yw>=o7r$&B^q#XrXXAOj10SOAqiV?m$9+TLG1XD?qzb<lEiAAjn^kx8CPWJ!
z2#)-$b<*2`!{rUb3LLhMz&Wqc>m04Ina9_3F#Q+(#Rbg2_br{qANpf*;cy`z>`EqM
zYDJogl@iU$K_Oo*YOP`T@mn?K$KbTJywS+;0eG_)pLLrDelIl*|1HN7#309_VxqBG
z{L0>NS)mxAi);k?YkTp?-W~}utabjOcAeuFzK{dVm{3RjME6iT5R#1ZKDRRf07ya^
zF{Rs$e{TL7H&1$<R<rwNR}0Sq&SlV<bFD7h+QviSi5(oVXcVroc#j~e@q%{*ydoq>
zYWSo;H+BCHF{05MFc%55uNjv0s8*t|K^n{-E==?=s=NVn+fF{<Dgsxw^FePRbhWn2
zLoI3C03r#j3;pq!f-lfF@luJ1@6ZtE1K*od`2+dF7O2!}zo=p86ggRc@K;)$Um6H;
z*lHdfAe@Y|<3n_EdUDvQKSRK{*J$TO3b3h&8PIl4WZQXrxV=ITw=;Wa4E^jN|Iit1
z{vPK?ye$xGyn|Jw53T%?A#g-EJzL<c^iDi1c5Zy<oy(y?3mf$tImzL;M{L8M$IG{4
zCDfX|x+c+51QpDbibS%2SN+RAw`C5&(TXIrC7U*)-#j8pYZT=ZgEE4I{Ra3n0_a(m
z(Z#Fqn-HwT6>4A@pz1f0Af^bJCmhBRB(k<HPEs;LZ|W&*wF#vssFnm0&t(~xiZ`fr
zkl}ky#v47MB0C7pp>GkfTXYFgAtE-{JSX1*V!f8{%TToXrcPVF5tdIaG-HF2QLp*p
zvI<RwZ>`|$xq^VWkDBizEDl-40xPSs1LFJ>+gjK0H)sUGDfSQh0&j!N0FXP;ZyOOj
z3K9D-17lqjO9Gy#isvP_TYb8<7!RfE@+YlwzSU|bmP}uP_e3+)FMI)!1ia9JR+!od
zLIcY*K~#hN$QEgWECxi1XqqS(X`~v_GyF&o$5XGDI*)`((*nARF3xeu<3(Vlx)XAm
z?_DJL-V1U!0gpxF>Gb{8ELs9vM^f+(MJ}`N*uaO=WHI?aOTIv~OkWrM`S2s8D<1SG
zDwCyF_5Oz9qK%Z!=JU~a!XOIN@GB38qa6T=1Qc>=#14|Od1(4s=k%<N9l+(y-$jk0
z@6y1;Qz&+@EEeU0c@5EmHnHsKQ}I0Nf%b@89a(Xp$YTJGVis=fMw2UwLI6e7$ewlB
zMc6s_{Vve6c>3TKzi@%_7OzfGfQ&Q;=HF<~K#oLG{>HZrjdZ)#W`TWr5EaYUcoF=Y
z^g%`}OjEFk0l&T`qCSj%1C^7LLmlkb85Z(SR&Ki*K4{H4kqaay4i#xLeKXRjL8Ir9
zu~Rl&?f{?V#wsha-E;maRSesyN3-N`w4xxsdntT-sd@)eYx%d-`i7+z7bSTGkuHaB
zFMU#^&K{Q+IA|E9B{_`Y?wG^Xo=xOf{iv<oObhc^N<Oa_wmh_3t5=@y^QzZUU*Z!F
z5BeB9-Id}?KF-VP{Ha0!c@{b7Nf5*xuFa#jSK#ac?f}V>RB(5xIVszNDia@?Hvy(<
z+-#Vu`q5~a_YGmD?w*&E@IpN8-h2hK+h=5^9j`qP5B{co&oSTei0WU%GG_`ZZiaV7
zRKYRb@je2I2XK>te-015z+}VYO9^T);lnn!JoTz}y~=jJid%NYYR#=k5-s1lb;JNy
zK8>@qxpX;>|KI=j|08Vhw07L*wmL@LlIz%%)YA{Kk_q%d$m}H!+6?_4dyX+uH;82*
z(v5g7GTR`z3S!OHmVwwZ;iV(qP96I83W#zK-LYO~A<jE=X=DsOiDoQ^s^Iz6WUyDh
zmQPGx!4nh0U)o*saj}p?wmdHX)njKF{vV_H?_c5lNA+>7?7aV|J^9=J<4gScGV!1E
z2O!`t?mhq!U;h%`BEs8aHtsK(T5*}j9mH@NpeWxkW*!`~P<iZy=yha}W)TK3ilWEm
zat1I1+A<L~4P0k%=052bs^!{_2aG@|B4wcfxNMJq0D+Sao|d@6aCm5WPgresi&u+M
z^gnCtc!fW$M-^6!9aw|?6QMHQUV)y%Fcs|nlnnoNCeU>McF->&^6hlOl}%DUV$C9=
zeTk=x*+&x$#R|-`n2jdm>EM0Ll$H-{l^}NbAbjPQQ}oP*GXeRw%Nr^Mk8$XeAye8X
z!*Md_@zZDhdH>RIwFKCpGcf-__vqv!UBQ_XYNSL-(%LYpr1>a>>Re4$8HyHWe`&kd
z`|4Wp>Vuea^xQ>dj018l+2OqrHO(fRZQx(c`}Lbw4UGLC2Y&Y&K!*Qkxw2KW_y5|H
zzvDlCkw0Jj{U_~Jt^%OGXQ}{cewD`l<HVpYMB6nR1dRaQ<}=vmYU)WHy*F!}sKm~N
zt*Tk5XMHZz%0FzIsO|W@?xRCrs9<;iXhC_R6T|br?XvX%khcSIl@B{jHn!)TX7@SU
zy{jRxHi58_?}R{}XM?<yAb}3`@^z@EcPI?=Tx%`>@-iEw(*@%#@6nd^{t;7m!(B=g
zRjTH=*?2KKJ<dGA6=fBs%V^2M&7>!#lFfK|@hojT=^QKyvr+-Gv9bAr7lW`m*&aQe
zM0Rt|hxlq&Rxz`;@I2g+`Fp(B^)xZJo0w-eA-p}Ubj6|WDA>6MyJjNfx*_%BpOBhk
zoG{(t!?-_z$1bB8gdB$`5xiiL{7xAt@o^*n2<^wqtHI?kz8qhSRl((A5szyO17$4|
zuN|EvCcnr1`8x`&l=_+xFZQgw>hNuSzQFfXiwlBT$Gn34%bFAJLm*yWlkJyh*+9j~
zAT1O5p&ZqeY;~>RF)CYc0?mXA>YF_f3X`iPguk;~=b5N-C)V2i5;=@-lIzIrm75a{
zmD10W_-xlmU}i44)#u5of7{8)lP1z^Qj*I>N6Nn+`pahJlCzmFFPA$-JjYBtxJU-?
za~&A(DBIHRiYb$BydFXe-dq~Xtfk@9yV=sXd4eNz4)RMKb~3GO$4g!G*s^D-WKS%F
z4-5bwu|kf{#RwjL$JdvmrKA`2U_;+dC^;^aK`mlcnSOWohL2$|$EML6cK(J87c2H=
zjTAYnPx6_J-T+{ZKaLn>_=}C(3|eBICo=@O4Kne}9dm0f2J_JjbA4dw0tYWtW&>Cx
z%avBo*U9N57vJZ}>ZE4+W=YHZ+JiUueA+e=5-M^>(bcY{yQAb^HISf8J@m$GU%6$y
zA*}KwK@V(&e?R^HtNd}|zkTuiFWcp<E%*NK@!#*izsR3|z4za~;@M!tZ~L6qXwOT%
z0yHoVc|5wzi$htIaiNG<DF~?B+9X-R$E?Td-efrt%u)Xs44mgchM&17^Z8`z`OM?=
z+27Ac<0TB+Y4LIxUknHND`ecKqEU+nEQ=58t)u2ap<+RFPQcOz;o?uvdW9d#)*Wi&
z`7;c|)q%{*$K66?MW(AfqgL9D|37>Gy46OqEDEFhuXu`T_OO>k%e27Q?PVITfkD{p
z<~D_JySJB54WSEYwr-Y67|+?BPjH^#Jj?e^-z%JqOI|CpsstFj=d#AIk*e|%nHd=w
z5gC_Ld<}P(c7d8x!Y%On|AK+@(P{IT8o=96TkUv6A^Yl0ifdnb;_qSYF}+@U?0^4+
zUPn*toJYv#7*U-l29NhD63qa~*wgD>gB(MoQ%YK|D%i%In30<oxGUm^Wm1SQlk0@9
zZdt(Ous|~AJ*JUX!GesZGO4CYOTlq+bWrrA(uAj|(@k3K%d-4A>kfM5it<zp2kcS7
zc^=xp!2=zYr$iWTg+D?O?x7Rrs3c)7n?ri&jJy*4S6+eVf^D{+rF;o7G#rN5<#27Q
zZZx@yvT#*mZ7@0%4u``gu?wg4NYPFB>nWpQQLy3|;<8&FREiL>#nSZ$n0&pPY(40Z
zSw6Ke{qX1oUH(T8x1xV}LKpHqe=8Xe`|Zc;tmM%6W^S74d^GBhF3?<zOu@j?)OLiw
z%P4BMu*-Fu11A;Lu)r1L$8#U8gxTYy=GOGUJmky{TZQ7WIcR-~=d*Y==yoS%Abn`&
z$(=uXq@0<IL2G;dANXlS8u6nG<&ts*vdU03JzDRWN?}Z*L7;W2T(T$`kE6kObb+(=
z1DU9jWvotdB89Hj2)&kbBX${Oh3rJ%{FFDtrh3WPK*VLla1)VCTcepuCAuR<3Wv+~
zN&KqbBs>Frtzt+^FySHMUm+k=em4n};gS>Bg5orfuutBByaB6`0Oq-%bK|5h5N8yL
z)*>-6#>Na-w%<Iy;}To?^d({Erm7Jb^vH(O#~Q9Z{I^LISuG8Hx!3bd%>Yq+D+@F%
zC{nG{lX`tuh72_gUrm=~92LzYd6}-Rt}f%m%XEKG!c6K1zj4Kl{L-jrinQr1_P>kW
zn#B-8qZmO5m+wc%M|G=gXD(Lj+HLan_Q77fdnQ<*w^nI=HviyFoUXf{DHk29VS;3D
z|80F&9Vph3;EQD&b_5?MmtxIvZrK%_iAMK&CcQtrekDHO(S(SGF~eLbE=o&(8-*20
z`>PoYGce&p6VWT13sf|M86vo5L*Py!-v=j#j$%TW2Qxu7#sB2KY_;`Y%k#<d^JNG~
zx?G{xzBd(P=2H7S=+z+w0igA!I|sBP$FfD9nslCU<{NbR@SjyR%IsFU$xK?gbd3Iu
z$PAg>0#dq~@~E@U;G{cC1filh8b!|H3??rX=2!a6`PMnPj<7HUJdowt>FP#Il;hNE
zZcUCVl4jOqIm7N!XP_$S*=i@#f$Xv^{<rdenD8Wh89Nh%&PVX-5lctQ`~pu+zW2_O
zrnn67uhB2cgUxevGO=3TLBJxD0il!h2RW@boL6*9B;Q11hCN5PLDLujv9{clV1_;_
zuE$5)^3IC0lLFL#PfV_+{uK3vw+wD8oW-_4JNFD+d-W14gIwbtN50c&?Ume3i*1F>
zogm9rBSi$exIF|ZTmA15FJr+&wb(fLRJB&tM-g4cf=Lxi4$ErDti=53%|s7Y1vt~y
zX?Hlrr1L`VgOh<QjSi2SDu_AJRwJ!gCUMpNvzi=xm%6s-TGw!~B^Z3kQ?$0hT^WA8
zd5o4QRTba29$d53E51nzlEQ$h!QL72q{)_Yi1L|I3?Pq|=9i0wG7mAx_sJ69Eq}{f
z={uY2vbi^5SHf&c-uz2q7vO*Yif6>4)U*&Wy(uj%$fy40si?i3?~#};nrI<kE9cb!
zRc;lgQ>sec3=~G}b=zMF{wt@|46C*mlav!LKf>&_`@MeK^bwM(r278hX?$?HQ*phb
z=A**>7<7q`DkbAMp0$govG_KYMEV+ip*7Jd%D={<xcW{QIVaFSop5%V0%e<I*zOFv
zBg}@d0t*mUfnsuskD`+g;@t<&I~0DpvkAmRngM%4v5;nH#|=tSHmVO)gO~`SV8KPF
zz;6IBX}R5CA081)^cJCH1;+6*%wLAK2%vruO5AjVZejfI9?aj1MrpwyYY0E>yw|rT
zv@*CVfzGA=)&sbYtV;;0y!KlU=2e(#@poB1!v~CQA699>Xf@zBl@X8V3l*C|HrXLr
zU&&>bEX)tIqUIbO9Us;YbLOaumt}3lnn>`DEQN=HQWVp@<Hd4)d)HRBKNRDthHw(m
zykb`Xn5vOGP*{jmh?>->KtV4r$5LzHuDr1zdZE362Ehz$(~LAYn>=~}Yxe^#o`a*u
zc?cSWFoh*P2*h%=C!fl&Cuy1<y`U*$#G=B2^FZS@#E*;+J82@P$UA71vdNrGb;Fd}
zrD@RM1RF$m<5^@K8wClFRcEz7+vV^>TSmv5W`@&D6^C0bw2hU%115oFt*TKInKN_b
zn}v4?xJ2!rV)T2$!-p1xT5>a=R;=5sqI<#~MBvhR0ovwF%Dmm~&Sp8&5<<Kirx`;W
z^P;LkOBt^{+)t5|WCc1h;xsRo6bst+%`%9_P#y5?`GYwq)T)uOAy$X2_NrM(-C<-F
zQtI~{_RUi$#daHM;XPexyApYFXpZWO=YDdyVwH<;1(4UIEGe@On8g*tR)*$OynXZn
z(<O4)Q9JWt^yi;2V_AhD1tQJ<N<i@=1$LO&6|EVO`u)`247O9#4Xt>LeAIYS1}-Le
ze}>zQ{#*?791$3ngv4O%SYQGXr4=dEx_jz2`Q1F{k}Sa|llorpR8r+v;TyzGA~`C9
zbYejofjm&)5z}I_;xBrLp#9!y{h+?XcPkGSK<WTdPr`>s2A^U88?-hmI9OC|q=p)_
ziMZ1&f%eKHx%fi}1#x@YHb|9hsNK5|xWM0Tq4zBiA@&B=60S!jj2FG%vcttgF=btL
z%`v~&^JiveK6tC5SpF*Hpm?XTL$tw|FtxX(z%Cxg0<ivs&Dju3Wd&Sip3PN+b}J%i
z^wVS>p>)FwwpmEcP;{Gsk?QI0X{_%cl~lq2sNKT9J$og6`D&s~&S7b>X}r&l8}ZBC
zy>hlSk?0QNZXyQ|<kZlbxQ!_bTb<4n)qSQmM($4P5awQR6?(Rx*RCI21HMOM!%>*e
zj-cYmcECLFmNJuW=7j3Jaehl<qZ;O`MkdPHrq2L&lQr(z^3}Tw>}FCnw4U3AwO0|G
z?!Jo$)DqLumdhrD6%0!WL7V<ncKW~<L1w)_LcjP(ns_Gt(S@nwHFd#OhnB<|jK8{K
z+XjcOo^V7f)@*vGX8Wvpd{k$<q3pKT6!U=4XE80t?QRN=hu#&cy}}Cv3xW&=M%i8#
zXu4R-2JpEtY`;WaS<m6dY*^K}?Zit^zEmK)NApe$|FlPP(|uDX9zcHCzqkbRpF)?X
zSOX~)g;d|tvJ)IMKav_*to3ev8#dwg?k0xoC9A9ckhKbGhNJ)Tx+Jg#vEzxbh@rf(
z7`BZ(4)EcYUM<DD)0-f6+q8v(f--dxW!Q+XcR`$|w9^P07J*#Di1Fi(P_oD}*gMU(
zcsJR1H4Rc)LT(ne^QN@^k|(B@JrxZr7B#5U@WsIPTql-2UNO;1+LML+AdEp-Tq<r=
z46_PeF~_?&nwQ(B_4wCD{M)f4-5N8kk-E?>d2VDChFX!E1LJX=bTY<sGfO}7@;u9Y
zP9Ysi0BkzKBnPh11)S3Ci^o5nC!5+x#z1JWZ1>``fdJD_Y!0CmwV_Q@V9(rs%V3ah
z&$bDXFT{gF`|MWR?B8pb)oQfO-p0S(V3&D7O?$z40j2f!TO>($o|3XJk<q)w*`msK
z;mVx?czgEKjkc5AP4;FA@1upb5%37W#4au950%E~s?Eiqj#$9@e95Q9=hG2#o^_|9
zFGoX{cPg6d%B(hX_{Ji{<Q84&p7ONiHm%NAmzuLq%2J7Ej<PhU=~^j@#V|)G83T!1
zvCuiw21b+zp|`xY#dU<>y`@YbDpD3TgdaYJR<eks<_QhK!mh!?(a(&S`d*hOKye{$
z^P1*g9#|U3Osm2IZcjP(R(bb9q8<tdk#tjnG(VG8yPteo;ARMAUN-6)jN7fj>ID4e
z;WzJ$lkGD>kHH7)^!LNVdb6>=L(ha*490wxNm%j9I%CS^$44)E?bT`b!(5ORv1p`N
zA_UmmF%I?;7j4g{Q!oQz4U|!C(<HUl`M)Qm_h?7RC{wX#wjwCZ27jg%a%Kk*c@khR
z*2z}mZG^>1ewjt%(cqKzz|#(Zq3P!t&{Md*&;q2YzdROlk~#UAgKZjvEi$1^&uID3
zxxr8kSQi^)YK{sFwXxt(eG6+1HMd7;0AHjxD63BI>`Q-js4PO$ir9%MHQpnXw;K%_
zL^D3BzdCO23;u6+Gi8(w$CW0<Owo8+SOaVT8a`KKdZJ!iDnkNREjP6q{-M~>28UM_
z)Tv26D2o51qv?krhmd!iva+xWIl-RQwge9&4%6#(@$FTmN}1eB%Pvf=HEEUskaGlT
zU9&_rgWdMyw3mL{cj%&nqJ{t|jd8!=!HO%%un>U&={k6P;`E9+Q+6b?{;1uJuqJv-
z8#!?u+(*6a{w^<Pz1iG9dUfiZW>@Hkh_^!K*xM82Y3E@3)J)ri^wBrN4_%VXBvCF0
z@Z<4EasE%=_1~M$=louf6ZM*IxK&l_)eE(hz83woO6s|>a5QX9|4=y!Ti^vJQM9z2
zw9$f3^@v7Nx+AoeqN4q3<N=RgdLbl@L(x@oWn3cxUg@V5o2LRtz8=uyHq&D~xgw<q
z{A3Ro;s%v$0vuxvWE3SY>K3uhZPm^coUGnAMq;S!coG7osY>vcH%G5MDYRM9?loy+
zw8+JM|Fib0bEozN_bI_E$#uc4r?%=3X;h^75Ig42KYiOzzUCd}40JzdD_OY#^O4*d
zBdH9GQKo?M?|wC)uK=)l9h$pQCgx75O2oXo9V5P^c8n=R_9_0@rv*gF?r1*Lc5Gm7
zB)`xV2K)kNYVlvE`%bF-z=KEpw@dN#-`o3bLHnqXwTB!G=}DKJ*>=t59j)YEjQYFn
z-Q5O}Y(L~xdweN@S51mzf~B<pA(_}1L0`=N?TsXLW5|fLk#S8_*f@MhDRoi91jE5+
zYx)NsZN3HFi}!cyN6r1c{d(hlxX{k_%Y%B{tna)&Iu;=Qo>3HFsO!l-%_{1YRHlcy
zkV=NuehjOmX*en-$$ALZ5tha^lZL2D;+?t-Pd8chZUYMzvU1Ev7w2$*RzQL|Jv%u$
zZjb=u=i>`52_F{lZg*Rp7N@8CZ|YPm8>L0*oih|3wV+&ZZPA;uY`zwI@3aAuF!ak7
ze#W~4^{$c))M?|OVax8ta0nqXUWz`Os>{qC(E`&^7u>kd)3>@)WO;Ai)WO?=Q^wYz
zwi+NGG4Yu5*>M_nNo1L_zz{N~gVZUDAXq7gU==yLw}9>7PGK_<TrHP(Znagbm-6dF
zus5lyqZw&de`Z{hDB>lHU}29{uEBX{M5|+5F<5~Bn_k@J#3NUeWr<8DCoL`+N7ZHA
z%d=*4(}+5_d1Gsm8(48HXnKLF^NM}5lfIa>wJxD?TbGcVMQ?#Jt%(dW8Un3W5H`aM
z;TEvfEK{e<3t9<EHL@>avsr7)1~DAou*hQNU(5#eXV3AvUn$Y#Q1FLnXguv+82)37
zNqWF7>DXs&LFwHK>9j?;`GE2oWSTao-<egT6-;B`G5?yhF2sy~1XZ^>4Ny4fJ=^#h
zf*D?cJ4t8CO1B$s(1rMW>=+{n>~A0K)aBf<KcEJz0(;Z=>9Y7<$Gnh=G9RMd{q0v`
zkBG$=0xF!bHehO!04)7uL7>PnhbtQ#ABa13!SNlv!fl$?I#N1ksM?*)qV{ELbkPNG
z;CX*4kYDuDLf<P~@2GaYy)I>n^Lc_H9cS~&WGv99vl^XXLJv_J9z@9{8p;b);u?5_
zh!)`m8B+`4T;Qzu*PYJz(#ymv<0NFROD&pqf#F#`I}!{tDmZitq*MICJ2AVvhw(lH
zoj4Q>dwo}4`w489J~ql+Q%v010e4mV`9C*RIbM5mtqqip59-d<aBd%BI1BUvgXcaB
zGi(xYcx}WqkHn-51>*opDWEazt_(91G2!7=;x{b+sFLV5+F^Oox5puRq#T(1u7S_c
z?|G;e)yr|o&@X{xa92U1CB5On?h;{xAGe@O81N7q5~I6FE&(BH0T=R{kPvEH`skWH
zRU>qgcL{`2f|ZxaH@!D8eXYZHbn=o;>3xRXPQS&PAM0nHB?wNruNmSPmr91faab30
zkwlKY{g-u+>Czm7U}#BUBn{sfbZXDk4T~D47f-=!93QO4*Dc@pO;sN(fLY!@G!rXQ
zdO6eG(KI0QaW0piv<fVwJ~9YRs0owtfh%G~CWH{E&9Q7O`Pz9E{wg0k{|ATVzvCPr
z+4(;<)}O3BcJqJy{ABHW{*U|k+(Z5kC#402>#$N?+)rYV+>9NRnIkXpgl=Kh9kwQy
zLiDloc5s#e`{AsAF=`FcqYh@m@aNy~B>G4fN+tOSX8s7I^DxpWOFO^rFixTi05ulh
zH|_Krr)N7mf@udQ!kUwv_Vw;g+-&@QaJ=&?CSWeDyIG5&M5BIC2c45qDpdyUjxn!A
zDU~<_-a|UywGt#*)|o$!kLvO6Z^w;Y;5Y%ijfyiQ%!$FHYU!+BaP;zl^3WKyX>SS-
zLHrnX2XvIHikgYzGqeC#K&Zbv<ip<WtAYUu*YcGB;1DTlnP=hu`~UoZQ(6G%Aepw>
zkOxh4Fe<yD9!#L;BL{2&W~VC{lJO@Gq%sthY90d6stz1;(B4P9hmL2TMs2}_jzJ<T
zr|z>|j`!&X-b0JFhSH`MTLDH}?5Kgj;c_LH1%}I4{w4i9(b9tMS^*;LWh;8Hj0&N~
z{@K%w<>3t7Nmc)gge}<@Sa$mR(T?LstJ+eYw7T^yzw<?WVe#G`#$-6d1~r}Ls${Q*
zba|s+@s(6wqRwd16;Qz*`HC6vN`S%&C~a7bDl96?w_<z0#iz~fMic&T0&{w_fBIU;
zzCQDY-p6+jChr-<ci_GI9%M(5_%Fs|*d(xK^xCy?pnjTXr_i9+t^sF(hu}qS%d8~6
zvTyGsgppb<lO2w)#B$2@i?1p^0Q?u{a@+Ms1E6+toneA8`kN#E80*aK7%QSGNYr*H
zcg548MOLr@Cr7G_T~2qhwArO|6!^MTLezLLfYoT}q=vdf&H^dZhNN)Y3V<sb8DE+1
z>x<yGYn#S>8Ww4?d1L%SQpBg-sWxXKfeIg^NTtJl4$&2-yjC-PPx@eLV&yz>wgk)%
z4~>hhuBt%P&MhuXz1RlFZ_ErSscm5I;PDklAxG$aC>lT+^Axf&35W3|)t!ClK#@rc
zlZUrLh%Yb#P8dGFhyuUvv|kHeO{~H^TABQj#FzNX#FvtRFm_HEiz`_SYf5KFXQ%bu
zvV?}>L8$&8z36<balhtM_`52YwjH|AIQbRVJ3el{28BfGu#>HC334b8ySL)7!(edY
z_=v<@p*?~0<EusnK;~VBvD45jh_NoH)1m&+RN*7`VvGO2&<*61_1QZQHX^_Z(|hOi
z2v3!sEBJf*>;8$Q`jV?DUpnUigQoAEf*t)p9UcD~Y#!Ij-@fwpy74*+3C5gZh3j&z
zGpyZ>9mI3n>kkIyzy`tHGk*ptiUn)i>B^kG<3zA&5H%3rQx0QDVT@Leg61jb{4;(>
zdg59T{XO_U>0=5sZjJSr|D{2Rp3?%+k4MN}dzwL553qZ=K0-){o5$h^dR0~<QF=4o
zB=iWmcP$9uD!}amx7EoX7e*H-rFfex^Hd?ke0P~bvr)&}92KRxRFI2?$|U?8ogE7C
z-R{6vz3J+zhG#P(3{cEJXWU4|N3{C021gi#Df5*W&icIM;X{KFbSCB@hHxzT=SHfA
zb9e-L;4J{vJO{}V``lp^2IIg;b!#>Qbdw7g002}fap&WB+Q|U|HW`(Z`%nON)f&vZ
z0hrvD0WPZns+>h20uZIo*AVqkv3?buK&SwDFgd~6Q%Kp~`3T0RQ6vvwddgT<a-Y-H
zL#d=rYbtz!11`zJw>_gB$}FZ9E|gI82vTX7E&v;$p&1|ng{~%-aqP00s+tNg^Jf=N
zJaZK>cL*ib(De|`3?y`_D*{KiO%#(Dm2BLeqHla!sl6o0>L5i#_aIcTBnd9{0Q!JD
zGp{c)&iwrsxxDg;WTE9k%2T1F;sJBbm1skSvGJVGOgJ6NiEOr@<O&of!$ds=l~nKL
zTs(+2;E=CL+j9;%TLy%#Jd{w<VnjdMk*<wKEX7pHdD0l{N*SG%D`l%ec79}zs|crn
zIEZbCHJo-Y(1!%KJ)xrVCDv$0b5d1%2s_OsB<VWH*C?cUPPl2a|Ln~XCfI3$Ik&lf
z1DI+1sxY|>2*dtH9ZKmZkr<sBEBfImE`$)3gjAS{QX=^9#7ikl4n-7C#c}UGRU5}5
zlhnx_5J>9u?&txvmnH{^+#z}EB^?+%6vNtun+xnF;I*;)9+(<f+X>2EXYQs%?u@&j
zRy^IrGr~K@ju4@e=%i^=q-d9kXeHfFHmy<wD%EmDOtpbn-gK$7V?Km+#Gd^E;PyvM
zXJJE#A96pWbv%k4E~#B0g_4rq7!mGhi>Htbg?wb}(voq8N)-}vw#oDpHgO|8l4jgd
zyum?<$+V^)7draM#audvY2Vp3Pque{tv7KK9&W!itJ`ljJ*-VOR|8oXcp0!Db&KAJ
zY(6E&Bc36w)ks76U@rHF&D*GjrRJ)RNXZRUwBC)xC)B;+*I#5O{6JESqFWoJUO2v(
zS0|PXjx%fx_PIU217g&ujOdnru$=R5G*&089^*rk#xv8chZ*THmON}6O6A!t@KL8k
zZ_Zt*=pRkgN@NO$8%}TPME8S@UGQ2Nq=a{(B5g{zHAPQbwMo0xqrVGwZknDz+hDlV
zZ1`R4q=F>4-2IW*ebc$vgPoNAD-|wCD}ZFzKE=@uA%Xn_dK~HJYsDS%z=re7L`S+7
zJNl+Zz1jF(>Y6-}#IPAyB1pdt-z5z*vS@<8Zhu5SoC)|rpHF-PeD^~Ss<Lth>k0Ja
z(ziUQ;E)H%7%$cSr`?YLW1#<TCo+Y)^nT|Jsxf$##4c;Xjo9!~3+UI@>0?tcnv0&B
zikpmFPI5+^AYaAGA0LRFnwifi`-t;I7t>NY5MlKIC&A@H?PE1cAFCxo|5EA3r>bYw
zv+(dgFcn8)YE=Vz@kL2ifMVS#!3ycCLkjxL*^o-*^lo%mTe%~%He^c4fBSVI<w<A@
zdi5o%roQ6`<GH1-_3G}e(KyqXIfc|iblI9kV&TTq|M&m-|8cYtv__Y=FVT5FjcMs=
z(_0c)J;|gO;}T28PIXnd9tmyLQj=>G)@{5nD8$`O+H`gW*4t70va2qRp7WF$y>r^4
z5BjIQ?0g`q@@C=C+^R%j<|u#pC0b9j$D$e)eK3#CKPBCnPRwLHM38uue3_fL!V67`
zOC}inRO&`--9nf<i!_-xy0nA@)ycwQg2i^PeVc6er|zsE-)$m@EnqCfe7nsU+&5i6
zEx=nx+AQdQ5PzI`$BD_<fSAHXG^8V6j8kNY?u<|V5=`=!9RD3~${A;~08JntYQW^F
z96pwdamGt}j#ZkTg7Rxo8mYBhjtpxO=|ar7G=Sp$Nd|(26u(x8AJGC<WtSlR-BD~A
z2Dh6xH_O9{QVEZ-M!y+TJ;dVM80%TXvIk?mat35q)8isTxb36U?VaZS@lm|LD<d%Y
zh$%I~;akP1&gZjF9-FHWgKU``O4x1lygaKLR@~*vwg4_Pi!4@(y|;JH0xZ5e0$0Ur
zH>}b8(To0QRV$#K7ruO#H&HV%6f)pnA^OP!bIle#qckM)@G!v+&p%Fhb2kG8n!NZ(
zk=(h%e9lG$X`{`6(Vw3>g6;Go+KR<#<0d*y;<*9WBMZ{QS^okOOefKJ(20kw*&o$t
z)cuIx=xuj6Nj`zC=0o|sLN8jKj`g@2o$kL9ELYa+X1#H!RSE1d5{uGMqAUu;FTny<
z99g>BLkU?C{6M)9eNG~%Qo#OB?_2*xfWkNz-4wmH?g6#y10-k&I25Etp{=F4SH$)4
zh=R$rKSjOyV89*vfk*K~ApzYC=$}HAdtyG2Vv0>{t&=>okzF?1IG35jW7sf;YZh~?
zsAwvA#c;+pzg`y8$3}`F;GEGALL`E@NA@c~wM0Q4vc9){cF^2R509zGe3CX_ya@uK
zH_Bu@3t)~xcLMw8wB9`2KK<3$HY8Z(J6k}$&hfMhgrL|KvUPjuRb5KWUT)msHoApP
zQ$E6yl8SfH3V)G#)SpG@pYmx!mS6n#(F^%h03g3^I<IH*b6KS3g~xD!8O|=eV1bhv
zzmHbLf2PI7c+#c}glQJIMD8-En^B5s_$3w7I#lYiX#O2FwZ-;RUVLIr4_n)8J(#nG
zO(u<ljwM7UrhkxZzkpsG##jQBF!)72CZG0IE^TPh4o_c+zYPq^C&URZAgf_NXt*kS
z)ObU%!tK3m^#``N;!AsM;e6crBrogrY-y-{%Vr;(v}_AV9vZXcg?oHH@0pkGMhPiO
z=_2O!f``wGP?S@3HdBT0Dx3<@O!~Me){+HUVU8*?b!mo2RLZZ*$C+sSUCKjPrvyu)
zvIxtSS>jyIR!^63cU?{iaVn%s7>lpJE_%=6APF&JE6&nr#Ni)mQA#FhFpMxj+a-50
z?wFCY?Z5-i*mk+QQL4-rZ$aLBEtL#79goYTTwy5Xs6m-5&hvke+jDt&GDJHuF%3dA
z?De|SWo_#K;Zk<CcV1)kCJOH<k1MN1veESNewlK?AOAogK!E}~hb03+WFj^<<ulIV
z0o1L&KF5xZ59^0s{>xnP(AdF+Mh#wR$1_H6H+tLQE2U4-36O*=YX-z53E<-1v?~Qe
zxp}vu;0a!_cTDQ!mnXdTHN`lWjW>v8u%pwBTl>AjmZ>KC{OHAe>EG)+>ESew{3mAW
z6!K{N1&b+wB7fDu+Q{ZjyXew#e<Ur6cm=k!*^se144B+SdbPCV>K=j*-9yeg-^{bk
zKGw{(ZJEK8Zyl|pEi>FovX=$8Cq`f&YZ_Ezzz`fv1<i6nM6z~b5R*8*SNo4gFNAP`
zafyAy<)B(p8KwQif)h{$Va0z7ZS}U9=DBkwK%Fw84mQP%9=sxRaNoh;UgZ6-Wt~{5
zNJlWl;Nl`pVnQU1Owgf=8Ydf5$WSqCCGE@fMlgG}_1KHdg*zUT5j9vEoFn2%tvg0%
z`+Jr+xVGA`Mp+V?L-KBZ&r;d7Dl&<h8YhU2;#7jK*1>{er9sBcat3(_YQ4U8D3=?3
ztoT15KFsbq`cGc`pT|#YKiAy&KTp=yzsLW%kIy~C|H%vUW4<K}%nxznkB$=vXY*m+
zoqO8g&_0tvE9s4=Lp!z)?0j(^_D2_SZ#09rJ$A&K=5{ka+&+na+uv=zj-L5n?i@9v
z4gV|9O+~fq+MeC+-_Gi1Tx#d|OjO@)G`4@oqXsYBq+EOXk*DgzWZKp-ZQA3}4D5+?
zA_J@=mUBP~fopm&`$*5;y%(o~IHLZ!JiU$wkK@6HC@~+#s1aP2@#P^JsnH~NO@3){
z8_J8?Sf%p0y7YN-VQ_}6wg;Z}v`wwf0+2v80woe<MbR1bPnEG^j5av=wMF6cwysOZ
zN#VYygRW5-LcPRKH&};Upt@MVryFklCvh^4*Vcbts2Ne-X=iQy88o#1<jFz}iSkZE
z>rb9SLr>P$7HUY8cN!9e9fww1e_XgFfoL!~QN<lzZG8h;8@8?&ZS2YVx^C=zI&O8^
ztyxlhaObe^R09~Je6a?GRs+3h>jJ`^7Du|CQF}h>073mnw|MorQGI;1Xzj64`|qvu
zb0IJmuKRa)vlO1PWHNQC7Tn*-SXN9H?=x|#F0{83y|<HF?Cr$Z+sSSAc4F-9<TiUd
zG4^(H%e|eLdpo)1-cHQDo!oG1CT-oojfev^04+e$vv}0H>R+@38iJcQSP>@O0((Ua
zrzdKPDh!R&*0ehmTX)gsro$_LbKDMs2#4tc@QLEtZ8E5fG%Mf?Hj4sqcj1}DYJnE}
zv)K(A6t!;1^Yl_IPiH!wOuC&S_-T;P>+wkZ>-82omnS`q-#2KW*G}Z9P3HA2@~x-8
zA>SL7Wgz=B+X=SC{|$0{6om`DtG%yOd>tHa?_dv}4iqDq*@I>`LtX3zd+4P*@bu6N
zcOaTRY3v`zZ?+H4>Zd|-@N`e}mmcwt>1qCvph$tf;^_<^>Gwo$_^w$$Z7$9EB8=f~
z3l78=1JeQxbR8TkRb8Y(xN~a42)$*+LFX^2Sm>%|w1R)!T)Li{CvM80Atkl*IFFnD
zZl-h7kDY5Vf5Jr-H4bj8rs`ml_f_-|nGzXp#Pmm2(OC3Mfu{JOPq0%;RGJ&>&O<8m
zRJ`_deuVyLGEbsOYuXw{)B+e-&Ig0lWk(lmn#~wN?0mwg5z)>V`;S{uP%?ejf4@nk
zX;Ok^&$;YU32jhkk6wI;``iPhRL$>0RT~$nmZYln5yGrW=bpauE2VoPPC0v29j^76
zjdDfBzD71Z`<wc;L_0az`qfz1^!9flro$*L8}gSOLs{J`otrwk4n3tIMxjPnT91BY
z3hgTcQgwELdCvQB&ikG@kg*22wprNf`y&y~m<~+b(MTaW6ozSq10nb$6L1En6q#4q
z8aiShVvZo(ax2RI-FuYvJMUMOQ>9@b=EQkC*=}qfhG+(*A?<gbKbS||YqZ}d;(?%w
z!$G`Q<%<^>qfB6w4!`ft)a-6&iVnX;CQ5M?-}XES#)0Muf%VSTGr_?1$J2iD=|)qx
zw=j<N(kLd_4#^vdL$4yZ4GB}_Tbfm62Q~Y61MbAEXfh`z58x_5B^?x1#UoTa$D;D|
z`hzc0ti#BR&Q+zEb=H1JGd-#xpDrmteuR_Y0~TbW%Zgo`F-i73@Vk%f1oYQ8=kl;>
zv~sN^C<>rApG|@9{EP5?;59Q!G-t=p6h-0f39Bv2SvwE6ot<HgP@g6S&>1jXEI>q0
zNM=ODAI#Y#qSyAntio57`_(#6oTo$p6o1LnFR&37!PvAkOwUf8&%|nQHQ1{I@G?j)
zTO;1C_D<!32%n$zT!<+yrM9y~IlIct<)g2IjC3tDOgTa@2+{2HwZl(W6$X1pM_2%T
z_*iWF;MaRT#9j2ko>_;0BUs*vKHjfl*v1GW#tFg%1{>uWSZtL2Q(3Wt$(b#oVH+>M
z`19kB88U)(9^3Hy&iq@$Kl>u^@454jMPVaTY5ew!N2I1#vQ{bk2=7<1v^@p4T0=57
zjqIri&V`x`nH1jK=pMRc>~V8v^A8BlXJEhF;6c_|a)LOJX?K==f_&S@f%kluhXlR0
zH6n^gNf!i*=*_wxIGskeKHv-T55w*dy_77T0ke}ZpJ%<ONWM!}^MP#E_M`8nzkyj!
z12dxmg=e)*51aCDi)Jado|p>@1Fr+<r#a{ENO&JeIBE?!)p8sI4$epO8Ok=HWRX6;
z8C#LeGc{R_toG(3a8?puAU~zaR3lOtBBAC;EXPU7wrXaHy7940F5U&X+8OhylDW+a
z`)_ljRu`$@GC9KJuyu`Wv2lX2e7RZ{Le29BoeHz3RyW3B7KCykE-sl4z--c-PCAz1
zYgKX#X<d$4fKboh1Y+-vd9;O~(+ZR?0o<K2U?cH!l!QQHxik}MDzhvV8V~021Gp1I
z&W)=3R3ltDkIgEmSZoajvI}Ghi^<z1R7tF0jTtG2{xZUK;6MvW*%ld7!nsYvG%e8z
zE=elD&sZv+IwZ;YMOs<~va^qtBPkH_@{uSm9!hC9(iB7<veHjL{QQdIFCm3Z%L9`E
zHs@R%=7pUx#ON{E1+oY%0!+IXGc&S9?e-M2nA}U?;ikQ@#QS9>+eIJ@$+X|T?0U0x
z@Jp>Xp_(bBWPA>8{RK!RnQ0)reHF7Sn|xZND4PMtmWXl-QvMDKzt<{vFH039`)Y$L
zz_Qd$p}k({&X_dtq~@YVLf6Fwp@lMv5~h)uM`GYEPY_&=zSuJ8winz^_uAm>B2f>R
zKHxD0wo)s0>;g6TRM0Dh%6KcQSB_edH?h=?ggS_p3umqk2@H(QDT%<r=c4k$QX1<m
zB&Q6yw*)5DMRDjHV9>%OwekAd8!+6P{yJM|Q>LUih2pj_mbonpG^9b(yu-J=VZ=<Q
zqzB$E#A~N!+<aSEY)eU`WK{^uOOpxsXXL1O6l1pbG@|M<_Cy4isR*NBWH38}ehiT$
zukVm;4yt%u&RUF0GS=`_R498G>s3{v!u#RFY&gX*Ck9h2Udu-*=aWs2mUO|flEe=v
z&ViF`vn4Jie#!}qeoA?|Y)aQbD*Ix~u^~Fef{pqsh}h>GO2^qJlI#_bSGG?##DbDa
zC+^lY3_#vKIbyzA_$;6@q<)?n@RFTBtCZC=JV;Fg>XYQW|MJTg+3`94LMw8vbueN^
zIJOglm3{Ogx&ENB;vJ9tee`fE`j_lN^oCU1sy8iCZ>eOWOxfHA(Ze*pR3Hv+-dN0`
zu#O>bFn?}o)ZNq)fGiD#IYVkyu6;GiZPqbE$e)`47}KI)_?wkf&Z8cijHd~N34Sos
zo9Y2sJRlwm1jO$$xeTItwESR(ziQVHYR|4C!ft_+Yfmd>hZI2v5KD=o;$BDp6)iX3
zE=SL!<>p(ir*}n)3L2IEc6!*Ldg9x%9==s9e}<o9`64S9Is}wChjR+V9I@%16#ZY8
zlp8cz1v8V>7bBNla3He`LZ*s2V)Y$ts=M77Ig5I4cQ--Jl(`zxcDmk-x6hi#d;13m
zya336o(f8w?yfgzUCe;GKy;IO^L4yif8(#dsf?=cciC&jm$jys?Q8ojfB)Rg$MXN5
zB$L(4yYD00|G&1j{`1eS|Nq+4C*S@5@8fe1{{KKYM?g8_7^8hOPvX~ctlVDgr%KX;
z6yvi`qjuaKway3rvmFkBO;43hf3|QP3?MnZrUyxDI0@BeA(7u=@kzHGt*DAz00c0i
z{^m7pxbNgH`t~;8>ik_`IsOhd9L=6?M4j${5)%d>%x)kB)Xdw|$7w(5X18UkXamji
zS^2fM&tLvy(f_O7mmvQ+^nb1Pc>Re({~!PS^ylyN|2{tVPygTSdB{IMEE+537bY2k
zSU_l&!V35GwQNPa-NE^M7EjuJ>;aW5`Zd4eXn9nEn0I>wmmdLc%f<6>9|NE@&JOCQ
zm~1DCHk=Zt$j+mOMMaSN-TgOl<M`}oH$FSTht+7KlB$BC0k?O4E!D1{F=yp1awN)#
zK5N$-vXoWq&0eGaHz837F^9ZcYuB|kPYJlY-QI;TXi!iTTK5%_Wqv=}!9tHch29)&
zA5kk%V#8D7K!DBgh!RhH0H3)%J@u8^KG~0t>aUKQ``gVr5AK<F{QF0wt%+ZWeyQMp
zdgp0(zY#ZI9vJ0XzH+Ch@vrsYu@nyC+*4?0`?MJ!9`Dj%sj;?iI`)r#jq69-FAwU}
zZS8HX<0-d)a$3h}k?YtE7TT>J$U?mU_&di(djx;&dd-V8pe8KgD(>xhaUSsx?-cfT
z%fmi~=)ri^;KW?EpFE8Ig{oGNnYsF#{YLX_`yf6!K0XjzA^H=l8Sl?daAr=98%^=f
zc(Hr@+fm>ZAfM}^ZM;T^F;)}0S!Y8N00nC<1Wzj0`P$Qn&Syv|m1hiO>P6r^(y8v1
zAn?%&e~)Z^MDsX4+1Zy0MAM*=%RhPnVK}aOGt|R%A%_^o1USErTa&&d(V%m{)qeRF
zciLJFlowU!;hOtk-F@)beXs$cG|VSQ4T?UXAi?+?Lx|VSFzCH>bbM4d;LtA)&kmaV
zsi)fq2mTi)jpM`p(;XS?LFtafqM!xTKlu&?`0dX09nWvQL3HV$egwD3u#&@eeA${^
zzT<OG(8&4`go+m=AI^jKq<50VVvri>ks2hI5HQRVafCfz;|HAn?2~>nn~cZ3xPAEt
zi%~}5>1+l%x9O+!E3`zMj)(E2XO!>Gy3;Et_Lv9K84p|i5r#6y-$H_!l4=bvp9FF+
zV(^ozL2E?Ue00^@Mw3T;@LB8EBvKmZq&eM8B;=Gk`+(mH(V-U&>F+z=p$iwkpS8x#
zT+aS9nYRW|HR)a_ysS|)>4~lQM-2HV#B_Sc=s}7V{<4`hx>8_x!>O!GU^<8g;dv8S
zIfN0Qfd>@bAEd4_!~+)qkJ^>6)P^3U7~dCHsK*yMYI6Y^!|es_zPNh@L8_*+3H0f_
ztG^L=EVkS639up6toKJ51;jqvsn>Tyuf^h-6M+N{G8iB^V))2{<)4I=G$y(C^b}D{
zc0fi7*N(x{-6>`Q2O)6NlMFu%@|UVTCsGiq{Vba2(uzr|lWKM<sKB&+m2T2kt~(ig
z3V^Oi)%tt+q|E_v!VSu?jF{e+Jb;ORo1Qla3jPmT<aFzBmyZ#egYy-^DKx#7iG}$=
zVm?UP#t*UKTJu2?L&$>ESY||C!=2&lKf0fgfJg=oA~C*a121f&muecxoNP9Yb7IO(
zde~1duviAvl!V(Dwj~{<ABbjxT1{K+B;#Ch`cdae27Vi7P8z#&pj0WJVV_7ubI01k
z;CI6;7*rXq%^Ibv#L7eBWra443noz>&V&BNJm;#=sU&c{KsO4i9^zJZPw6@mRdG2A
zRaPJZk?K!B2y`BG2Hj~MpjpQ(!^4NrbUx??t!t#7E^-}N%p-Wk_`#*&N)6iOSO(*f
zIT2>Z;rl;2xKulaI`F$zaM6#Y|6}6cPNsvqR0QVf|2Ed#`=2LIzw7_*=W{RdpH21O
z-u^*TFgiQU#(_r<=)Sd7dr$z?bq5>qUVXcH)~KIC)}+*XqZLOVM;M-t1r44IDNWsL
zIPk^E8SfwM*55|1(&I@}NclowR1um{ivhgc_QC$EBea!*+I!8v?J-EFRUJ3w@jAp!
zdHK62|8l>HMsXZeF1w5ui2ai{8>FWw_a|2yXvXyD#bsx@%FJopzZ`(t6O|E85jbjN
zAtr)+2U-GAEb<q`)Y}vuS(GaIOL6u*aXN`2C2tWG?v}&JgnR4@&LA1bgZ@=F+#}>m
z!EEuf`OrT<%^eQ8T~68)vwiW4O*D9J?DsPOa@PG2LFb<L({B3;GIEFBW6}UI$YA~J
z_+x*B6Xkh}0}8!_U-Rk2Ki;!lIo^54g6Bf;QJC35j@S1-V<Jy?nzH3tJF(ztU8qK#
zSHAX6e?JPY%52(>(LPSL2YkH+CPZV+DxlUaR6s3UunW0z!4~AwNz`=<sMYhn^)KJv
z(e|4=-^@j}a_&a<?>x^&F0ze%Wky1NfebpL81@*T2pM}_PTo^Jc~8yB6S5&heKmo6
zY7O_(VBG4+-Qh0-@aY}tyuBPx#1Myl2o#luEQswct_CqRlfw94TLdohwy#K#_qD=e
zf9(u3E>43)(?!V;wj3rxCgt#wB7<_2{3VEv@d6hhWC&4X`!@z?+WN>9rrkj+0fdJn
zI@fDzw7SHs+nTm7^?M1C*#;|Ju}n=ZGO`A`81FtT)Z>q*Kg3O4O0{N`Ac9~PiBdpK
zT8UtyM(=9xWkFA0ve23;6q>iwlll(L8yI*v^JcY7?~Pe==1kT%=FA!+z30h#5h(`k
z;B9<*eAd{h$H#kno*uczgJ|8{2>QLA*T8!<tLp~D#*mE}-QpP)e67=pF{50lQnB=)
zNZkIpR6@lqKG4d%$WrXd43$EFCY?%id;j2X@$35bZkgERt8Ox%3{(??{tR52J+CUZ
zK1T@Y-6@_y^jfNh)N|Gip|!90em))#AZB)2jW+u<GTOj${TV)``wDJIwM1Ha_S*OA
z`*ds0qzUqxz_BU{F)YxURQ)+(z)ECr=kLwoQ0d>|=G(?wx<L-A-lU{$T{7solUc_z
z=GA>LdneJkZpa%_v#VDPnttaMU*6k2dU3#L2!1+N?|Q}bSG$O@4En40`seuM)RN;=
zgiNiYC>l=%K6!g0P;5Mj)Hf@+YioK@*>pT65tYW1g<`$n;b&R8DWnuFMJp@f|H@`6
z-ZliEmoNskldhuBtdybDOjV;ASxy$|Dh}@CcZVAs>cS<aLXCLV8g|S6`7t)_N_#$q
zFF2w3r6GbLeEY6BwDo^*oqPA`-}3Z-PoAzlb?v_&*B*b@|J}#uKJ&l*GQ)&wWU;57
zeVY9Q;vuD2v+VCgTXtEtX0eN+{w|nME^R`%pl^QIB+z4wXxan%UTbT@6c}WgSfsm*
zuAFYHwxq3Oz9r*rdUDEpjUxTY``{Gz<@94K&&-s7kg9e#yYS@r8}tk30}Nyj%t{l8
z1tyrJxt)Fi=G5N4q0J%Tqc?4gwA$j+6uu_s1C=R;j52oiflu1ssh8pFoBGl2aU<T}
z1-CHJGovzG@KNH)_UZmkCBlR+t5;%0z`g`?tXDr1U-~S%fJK2bw+qhs@4#18FkfIS
zdRRw$1n9c<S)SA4jE3^<X}6Vtb&{D^1ko8Yz-oQ~3X8FIg;0-tfXXidokT0WiRKc4
z7F@LkrR2(ZHjGvhHiDuo?gC^e_->=7LpWG4Rmi=3KE;IX9N^C$B}Vp3jTEilC=IX*
zCg#>lRjPtx?t>Xefr@m2lkMhfqx?yuzPJAtyAV7gj9}dBv_6#|CPrw`1lDFSZnp;L
zBq83dCRg#S+pf@z^zbqKrvekgj|u?s`=bZLM-MuW9=v}3;PCl_Q%D5ep-kBR!9+hs
zm@+ptr07w^Ik1;8{uy?GKB?fz+8a;0qtv`CfBbP-dVxbRU$80g?%zM!Z;H~;h8XAt
zu-M(_4`!JB8M85t#?K#gA~m5Uw<9cqE+-hGyo|pqG7=Roy5)C$blilE!u>**gpIOi
z2!UbuK}L{b9|DjXeZ{cIW#2+JF(Cp`yBH~5o0M4*-6qrVxy3tcn#)$1(<{qSh^38e
zJ5D<=(Nc+**gO*7J(#^G$sLGJAJn<}(fPDBYF`?^=lucs`M@*Iwv6^9LYCY=KH^05
z$A^deO+i`c=gY?S(avlBezt$G8#nh4>y;o?DETOYA>)(;Px5+q2UrJ{zrB6g8jZRG
zRaOQLF(e6IjPgTevr3ayjhqMRNPuZ4VvYhRn}!xZ2_qPzHo`!!xnUg^kWWP{q4u+t
z@ZGZwM8!y)At2F5AQ90xa5ETTEW)uVWRp0%vQe>{WV?uZDqOQ~GP1%p+h|OA+){Y#
z!^*G~%BF?0@~yp?tpQL#vA4<rfGO03v}?%<=k821^mW;S;B_#RPAI}v8ovOLi|(j9
z?YHGTSO{QZ0Li;WS6{9?wOV}T-QB@bHtr8ER>(QvjtD0TtiG3QVTxqpe~2+z?RVH9
zl|v^n<07VK6*x4yhuG5WOXE#^RIl%X5Btgf(W~fB>(%~Ivk@PF#dT>FakwO#g&**g
zhEduwP}9oAr_72*7q&T(_W7?y$k32V(TVe>s?L&l7V$S^C+O=c)q*$!QX^>v`rt_y
zrO|j=kOV5X_23#vtL$`>NZzR+e^f34N)1sMfdcA4R5a~zTd<pnpxZO_%}07HIz2pk
z6*o>P*F&jHI;zS;22}A0{F|v81%s{?!BRB_L{4!!!4z#h=-@aU{1oHZ^n^W0wKSWz
z+uhkLJS*5#I2IIyEATI>dn}6CZCppYFV7(0pgvb1+(F!iKnEZDN&6CR(Zrlqj6`h^
z;tovfq(ia&b^-?$7_EvD=pYNRF{hp;(6m*zL96Cd6dNa-jki##*AkN3VvEInF0^R6
zdjb6jX&pgOq-#<B26!@@4Z7V)`AIEv{J>%@HhkWni-5x8mZsaj6}?Ivd<fvFl71-i
z<ur(>+$>lFv(~dUowh!CJD+h}`-_<h)^5HYjpNy9Je~wFw$naG7Y(|l?&|RoO8W5;
z6}n<R=7~5ifJ+vVNn3>n!6KI*!9%FM3T04HEJmxCVlNpmOG3OZToqq470y3I6kST6
z#0$i;`2;R=yZ(ru#>wpGyTze|D}b#M+A$x`sTZg1;iPOZAL1`oee{Cl5Yd2q#38R0
z8-lyl@T8Jhp)Rtn3xIFt;Pj1}0{<EZaRvL3juVagtNl~BK>_csQ8$57)yOXHD;qb#
zw@}wG&T?OkUk{IW1)p)ED=e=>{fqN>-XDuIUHotIPU6W`uRney9xaE31b*rblL@i|
zFglyo6R{TdgXy*XJ8^zPgdZ3SU?kppLYweL?Mm4ZGlJM?_o6=&JEy!Xrf*dYcomdU
zP)JF>R?QcJAw{I%`Vdcd(7oyotYTGUd=$H;wjGZvw|`M4)jib9?_HG2>U?^g+xPT3
z)%EncK+n_btd5iXj+1o9Ns*3|%#JY(R9?rTRH|=LF~4u1v~J&@Stl!>%YEfwh>LI4
zNH!3xh>PpsNr7sxGWH4=sFXOBk|LF0I#M&M>#4grQd!@KB7Ac@oODrNYm2MiZ;W-S
zg$2=0)w6G0mmpmUOU|Um5ghY*tNjOvc_1HT9PR419-foyAKvTZ*J{K@E?ssAghlru
zkvw2yOPS<Cfk!Pjtecc30P2*#Srg>m`8A@ddHg@_JO)fo{I4fZAJ^Rb-=`ZJ-|v6#
z<8%M<U)ZQ8==)*EfswbT&i&(;xZ6$Nb~;z3o&vt>&A>$_hk{A#nm4swBm`C2J8VYF
z4{Gbz&qMz$TjjxJxO`oS%H=sGUAV4P-qqe$t>0_nx3si1pRYUR9tX;8IOU$a*D#yj
zDWaMiP6dOzG#mRTPzHikf~gsrWNJ^Da!4N%{gmwsL(m!}Drrw#)5cCZ-J)wRsw&xA
zwuv!ZlyfgI5y`YaiO>59ge?t^L>8xZvB{WHy;$)nq&@-clktf9Cxw?d*)u{@yxCJ<
zAiNHA7;-7se*e#+X=|!ucx!L>XOZ@09*b?;AGP5Agj{k_3e}M%&^wpx2X{;#^pvx1
z+he(CpU<GZ5Us(6i=~J8W~dODZRtXxQoXiS2xCViEewny;F#-x65^q=vS-chlovER
z6{iaydJ30<lfPp9v|W=WxP=^)jVr=of7YIl=d%TZ9-6flh<hk_li_@j^wGb&cyCs<
z1-ercBK|8D+|a>+71@FjU6B!=LRU-#M%!-Dvdi?HRXPL)H{}I6&jUIaWV6+I+rYZq
zrt{_^?LiGlIT#e}BV8ZF2N_TREPNO8{nh{)El9s_(ZT3kwrzR88;Z}oTn-otiQ^_z
z@qWh!pQ!Mp-zk(8Ldg$Q4d}A^MqNQusrJXc9y$tT&g@K{HDGXUhZKOPbi8oP>&z9B
z*5&4K?Y1;YE*^5=9@p77aK3TL7`bgv-1crqgNag6zo8za^|;0~U2}*Zn@m4Jhy{c6
z_$qR`a(RT}-Y|9=o|xh%48od7yORzG-Wi=lx&u;8yY1eyA+ja^CBrXZ{hKHMJ$<tN
zcr6wG^XKpJpYP>!Px9a5KK#u?R|*`WO^7W9nsGpvo#)9=iE8Or3t0-6O=+@Xh?8a0
zb~pWk<;=>R#}!RO@3*NXnKNMhs>!REsdjRm5l*L5DBD7(K&A!gibaPN?$%1Xz|Vk7
zC!aLb-9F^Uk6z3!TN82o2(A|__3+?9RjMj59gm?!ux;23v_YpXSM?W3+UmXeYy{DH
zfN3BzBd5jCTQa;OJ3)CZ_}Mf4IZiV7z5sSDd*-7R`**4nm<;S;gu$bhN*^zwTDdIW
zZACQmd;5*k!0J@!j-Sdk>W9Z~>fVCYh#Rd&@ITpsg5%G$+m-!M>Wb_oD-NW@k8B9J
z)J?!kAscD_Ei`AVH=`xv9dr`d6rYWOI;$EQuW#FBkKI#4aIn@x-*ng|biNTMASy9<
zNqd$V5H(<nNTI3ZH`bL<Q*y3VF|jqAUBz`^HDoDBm;-^kR)-%b2<euNuX^~mxcN5T
z-FZo~rTZ}}i_J_8uY!e!_>JEM8!d`&s!Udkb@}FS_iwsix32E-YS{S@JKfUe0FfHf
zA_Ai-+kU)`0dG$^%b*^PEU*zbcTQnRjr3cdV$W1DD^Zs&xTejK;9`Ye_-s<!tx4;=
zKj<fY&Z}5hlmTQlDn~2gm$wvJnG2MGKZsFhM`x$?-7?83Zn`|QOchN5Kkx(9N!cn<
z5-<UNBp?O{2PiKMmEgPRk9zITaNnAFmW{-WDFY^~Bvu%NS(@(VA@nGN8ACYSP2~{*
z0|qw;+Ys6_Hx)c(+mO*|gp>)AG{GJHTJ&QCUdPSG_TJw9PP}uleR?XMg;csmGk8rY
z+kv36^n46&T0QQij|^$MHDH{;GI{Trij25z7i^~(pOcFg??9N?sadfWD2auJ{ZVca
zn$h(qd^0v53^vv6iG643YlGvpU<g5FBb3qS`6uYpExl$~;+wU^pJ}%f9IJXw)5eeB
zPfmIIO*Qby%PgNi>0B^l&aD!%jr?imYDH(6i~A?JrY4!|v@dZo+TvL5OuKSyxtoaU
z%_gEMhd$`*ow;ch`q=wmb^st=Ovm#{Y8s8wSYTFcl2o<)2~tHDnH(tpu?ym-JHGZ1
zu5`cvw=*42{C##ljatKgJDyKEwCpNTs<kuzNH`{vOqxR&B*~~axb_;`hp-ZY+i5_&
zjQkc5*o+q9Jv<5ZP(DuaVgs4<G@7T2xb!QdXlN5>l+^oKzA9dqA7pH9PWLBQBWUw&
zANdY*iG~yu6{?&v_%uWpC?-yM$$t8ZrcQc&wyRT@PIzv`5bsujqR>&FcM6}r-aY|a
z%f>M}-H}ICv%VWQwwrJ~%Lazs*{pQ|Z7gdk61owrGg#0wrZ0P0fdG^`N%W1OgqzPi
zD9`6W1}IPWqQFkNtv_xK=ShA0*9;J$@!LV??(v&J`D-)Y-u*vkr%kwQ4S<TIG~)O{
z)ER_ZD+01&NVB&TOH?_setOSQ6eiwoUT-gLeocwnJFW)9(~*`#SysJAg@+Z&CM~_X
z8V`C1!cnHsn*t_Jk0Lcvd&iCX_RecZB6ekH7OYW_HWI>vIA|Aq^g=8ENn?7FQ#zcI
z<2OcP$^~GX4YoKI`<-jZMGV>FI~6I)8L2~Lqq*ce?@pMo$Gs5Ert5iB{p+ArAvK!z
zTS7rnmUM*4lqBepF0pC35Mbn?Bux2CNts&2%y0<GICd3!5{R1_ZaPZz&1esr*?Q2)
z^we=RS79QB7pa7#Xg+J}m~KM6_jdyXo1xj?jk?!jXD$~L+EZfzy!{7Vc#t=D3Uihv
z`;u~DP8DT$zmoFw`@uH@63V0F!}?(%gaS|U`TGc=jE3DIV;4ilL?LIaK$b+em_r6H
zdAh^)<Wu>fTT0=Vxyk)%3yUgLOcnoWz0r)}M~<G7RX~1a<;_@B<6d|%8uu3@U1?r9
zM+;`7m6$3;r#vdKJ0t|&eQ@`b0GKsR4ZOUii|*5I4>Ek*1fpzk#JN46b^U<cg(g3&
z$Tv9Bf-EWTBugQfE9>b(z!Bizn+tl(_+u-mnFKgiO-+R2yR1c$8gp0m3Tec)h4_CO
z|F-wH__zB0tJjoW%)e-&P+{T3fpG*%A*~XE*rRJV0`w?8JrSt2f85w_?$=NKfS#VH
z$eUk4G?6YnUk3-Q>BYGPg-5ZSX-?$M;dVgE+Zm58x-$q*7>Sm}!=fTy3Np~b;D2hj
z_Av}DL-t!k$nKhO;a$MZu-9%VfW4y7%}2pHBulR(C71=F74#9{MWA<%kDB$j%`XJ>
zDE5FoyXn45gB{p@MS(6#fHmcHHe03*Etc-fz(r`2%QS)B6Un6Sc=e+Tf9dVZpvg;8
zG3z+humx0rszQ4?gf{)rRcjzRK%;?vrV@#!WD)v7#P|2m!OnDdb~&C8I?;JIs)f6!
zH3*#sAqeQ8^7qdz4(GE3`d-W5F1$sULGbSB98$_VgZZA&novK?sVsbs#uk{qJ)8sN
zkqu^G>)r@RiOw%9et(2l_;__c1Cu(r$6&QFx&vc9Li)F}`dQr+{IWSM-i2Daw}Uzm
zA1|s$P(7mjizLWX(2ZgNM+)}s={m35n<zoag30({={$>*LgfS`n?dwhQD48ZRL6!|
zt?5NKkw#Bx<*Q-QHL**143hx+tOq3JZ^?Nfywfufc@Pv*BLlkFfLX0(2r*K5Dxs}L
z9;+-{X)23)Va)-yxr4e>7uE?Izm*&~rKU6=R@27q?!d*(p93+0=xiIKxVe3_C1X1t
ze;f&_NmMA;8mB~ggWGIyFTSsww`S~nu`-&Y&$exz<=TBo6P(P5bdfjTHCN>%vDADz
z;q_7pVd7iM6~`ctDjLfo0aYigQ49!nz>rEdyuL}lxJ$0L!p{vuuyl)SPrWT}1X|oM
zTeNCC3)G<JpO@@fE81SmQ%BCnkG#qfwR8i{tXT$PJQVU4fnw;SMGxE1Q|-A0YHiaT
z$77z%^J%yB$EK>YX4jFWvMa6Im7Z=`mDZkmD?PR=J*yR|v|(5J&!?N7wQ=e^vFohW
zHVRaF>Qq`=FHq@cr_y>YyAtO$rCKUBYZ{AgxWsZDvX|Idtzt{;Onk8=b|*1iD|12|
zYgbDSYjrv|l*JYi#R^Jd#RRd<d*y57eHnqf_;nc`g8Gt7NHcBle2KWhB3ES=C!|av
zipdUVN010<e{a7YkY`#02r~F7QtcMJTr&B4hqXq0k+@>uh{O*^Opk)BxOrRb>uEuL
z+(rCFkzL4CBVto1&#<`xRH-k%3GNg1{X#}27l?L*fr{n~X4hZjiYQOAkj$s=tpU-d
z$r3UH_aQdU2YN6%=by%ny0aHtn!4bANNtA#ciz5!p#cF_5iwbBbQuWB5l-zPJk(O=
z9cZ~&yA?@F1Q8et>3hukBTFNlx=b^4(hJ<{(Q@F>gT3Oc25<e+>e+mya{3z3ui!B+
zH|@JXzgWnVXOOT=#U&Isbv<UmgZi8L0p@@>`dvun(4{K+!Y_3D<|A9x6&U$%)A8tn
zwv2zUN|H_aG!^c>e$+HkHsjR1Y+Z@dJ%T=h&}gqOpZx{Al2#W{<h8p%5nUCjX_S-i
zXmlx43lFVGhNMgey9%<f5Nt_z&>_wv1A4ZoV9ePxUnpJimXRpWc+a}rEv9$YJHC;Y
z9JqQu8mAr+?rLJM-=w#6$^gwY0x?@eLTjGBe&Y}0u&#K0;0%vMpD_^1BL2(xAAiJ3
zMwIy!*)w*nR8@GWv%{W9bcr6ir>?S7FkURCh<5ge&%$&nFB@}A6jCgKPpu3Dgy>A6
z;?12eGo1bmM}&!91yk1C2@k6{<upsCM)%`F7b====9A50i?J104A<*0pgyw+vFUvJ
zVoP#bi;(6ZIZno7xa9sM)?^ks0v+8@z+*Rj`Ej>^Qk%guBc%3pcREl?!H*H2ieE<O
zYUZtSlG2I+VxccEnkcz!jl>c-mrbD)I{&`u&VU?%hMAz0_o#j*sqk5ye^f_02ZDW2
z7chy;KBiN9H*`kl9MKskbl`waAI7!-wZM^F^h7RpAm4;~S-ggCufSYq8F|VpA71XX
zJHC(f!Fg}_CR<>z!1?2i;4!Xs@<Foc9znH~T49Gx@v}CWoJ~QiEK4`j=?=O{PHZ)U
z&-u5|nB?d#Ze{~3=?ELYb_ZJjSi}bQD<~c_Efc=3p^ZKp7EzFRkAPe1OCZV3CH5Ac
zGnazjx?><I%k9<(SYX<qLh!{2`FPnE;6U~(=?9#%lm)1>H3W||6?mtH`{<Dr``maa
zq4*`4B3KSMMW6}OR)jTwa->+Zcvu+UIfR`W=g^J9;RH?-YJ15y{Wmlh#FX?|_x@>>
zJ=9-;jEdb>ubLNm<KD?O1C}4mgA!PplzSP6w@hO|8ct0~T3(IfxHliQ<2X#!zf;u3
zDQfoaB-1xhSK?fxG8IuQfAfWz#%H0;K-z+$v1vT1Kl84o(jDmdH8(btEf%)s0)3K2
z_vrs+f#BZ(-hejxIwg$#T3PMi*Tqmu>T>)z1!8BX+nwa;Tof$X=b<k~&ZhgE-86#l
zjh8h|{d1v^2agAaRE(Y+nDWc}VGML1j%1PvMJ=n>j5cA-@}ddxp3e#>WM%^^YECP=
zMXvxS;*J@bguBlyucO_VHJK)5S9Rjw6ewFf_)unJS>azYE9QkG^4CsBG+6-V0wH~}
z>g9&7S%hvl9eD!+AvMJisXH%Y?+!I>supQ5v^P-)=xHb%a>!S@K<uG#H8%)Ru&5gy
zs1PC)3R@JalaBy)MK{__wkV@0Qvos(ESpV2g4KNCv+ja)1RKhY8g(O*k*ZmgT!bnv
z1m;2^z?5rYAe2>l$-<GKG7+SBRHy*b-b7KjP>{w_T0~6ilyAoQ-n;l%n4yN2Zplyc
zlzkT_?@E~TMNJItt;OkX0g`(c)OJxqdm|cq6Y}~+<0=-{1Zb|kC<(qP4PKN8J2&A)
zHL^t*KfiqUoqPPIxJONbZhHn__PbLcub*PN#L3P&Br0KI@K^}XU=06_g{8?mjOR}`
zqVtcLr*d9Y=G25`s&@15=6>)PIeOe!lVPMfHm=)ryPff<yXm>|BPUDeE+4IyGK;9V
zN~;)g@uu-BgU2fxV8M<$Z;d)1`<>)cpR<5ilz)EH?+sbpano<gK<2K&3!lKqZ!mQK
z*qb|^k$Z+7at2+<7c_kRb`JThi%$YBC27H_YE4;6o`2*S!sp_@mFQXYQ?w>n5dMQB
z)F$=>F!C&ezCdM6-Guy|VM<XIEs;s9WjueQb@1MVie1VMJ!0ks1Z-U7SsUNt7=Br>
z_nrsRdhh|SrFoMIEa@hn)4sK+D_gs#6M;zEyi|%7e(^bn_zP+}OPu?(@nj~+CVbt3
zzE@r0jMvY7ERKOy8-c))A@UmLix-~ad>iepA>Kg^WO8(~@~3)bHDNqa1>^;!SwudA
zLjH@HXe;B^MztNN<$VX0Qm4q4_cX3r$s=iLir*G-p6k~1!nxbg$q3+GQ8&2YHKu5!
z@Xh5)nUIe_Td6=@ahB`hh40bsIRV=L@Q<7S?XGiw!zU;I_xj_FpWXc5qQv+7Z};)J
z|NP(L6i?^==7%><6=x?{13Z2aOv}CG*8%>MaxbT3>Z<2?bI11-)d{{ALK?2Rxs=n+
zlK+%EAm$)0dka+}DdS+gQ2wH=PP5`8>wwHDNKk_E_fMRruUEr9S-aDwAOc^^(Kw($
zCG)D(%lts2K?hy>Lt~`TK#GNOAf|UU5HNSstt)%6DoH~rzYJ^3#J<?!MWTh%JaM#}
z^ebwJ1*41-T6W+CGVIIviES9R$vMImlS@;&Sjs?Og`6aJcf$wiJgVQhiR_Pc{-c`d
z?pc7G^Z)V2+7tKuf3mjm{rta=&;6hOr_Jr=sfPv7&z;j;=68MJeJ<$jwCg$U)9GB(
ziBS~<y8pfX`oV5|w0&5|3?fnVq-GKL*+<$XK;A*mc=<7I&*sB;dQB?P)afc!w-c-Z
zl=u3Be)tZ0M!tdjKiocv8+BBozn5p}0u{WS_eZ_4dmPDOMDm+`ctGB3Dsbt2uScc6
z1v>AcC<(nki=t6$*nJnAY&qa@uQx+s&^Qc$>{ElG^T15(?ndXMp{(E^D|t7zIPK;n
zi#MxW-y9088m&+Z`l1_5{?i>40?Mrf^GmRqCu0;iKC7wBnAi-qBEh64tJX;y%}2BT
z1+d`#5v9=aPf&dRDen7Yw^-9uB3;3J0FJLK{f><G$`7tugE@czb2Xn$W1cTlR50^1
z8(Em`yPMElF_@{zZ~!ejRZG6%*sxR9=Uc+#>7_V`E<AK)Dj_0PQ0?qVe{>P&V59V7
z!_X^OU7Bu0n5`8cnRYL9;(L7D7kGYcv58LCuniJH;PW;@R{i{KB**`?596=!vG~7E
z`+T*1_kCpZe@`E;t*7)qYqg)h^MCj8xgY-Tj#Af8E<dFo3bt!BPCph*#9-X&c*-+7
z={^^d?nDqNFUyEwMTec3;a7YknBg|wLUi`M{Z~=6X1!>dFRV8^jr#6>Q?PN{N6oXt
z($fv~7OXP(ZKJ-oe{cYCL7uFwc?uv6-KhQN6NCCe8t2>`@<4Yt@8ZH9dV`G7eQ^M_
zdl%MaJ-!q2{wfu4R1<AgFKKTgL{J_=t|o0Z!3$V8>&(Ty3s2@^8!S#*+`D-60^(gH
z?b+(&0{*}NRfn5ykzU(ZrZ=8O<vw1kME&R&^JUck@kbeG17-o6Q-gfh2gX*sdi0`w
z*&2<ygLg2vlkMhffg|r%&p%$olmsca*rYpcqc=Ytq2>9rN_nkT`^lKS9|6dR(Hcj;
z@wL_{)@kE5o#G_Y)h&GO^k=Q}L04G-`lhJ{(-a*3d=e*Pz?}1b0&<5~hwHt)P>n`b
z4R<90&0hW%ziJ$xofvD0;K2k81c{TgU;!mo7Hoy+;yb5!!!G`rkxs~slk=9`4jT1O
zUT(*AK_Ct!cET*8tx$IaGillq!fVXp-fAcPtFB>}h&_zwqfR^>pZ7<py2fQOFloGb
zfCm-R)|&u|Ky|;4hw*&k-&(liC7&*~Z-3ftLk5#7G&G;2QazeGUXRv`0-^y>ElYE!
zm;24KXmFE*{)s1^4XtiElXgYlM{>|VTyKFUk2WLNtnf|SPUx;4MjDW${eD#<l^R<L
zrO3d7L%U+MY|-QMrIL<_xjdhsO|JkaX~Vbw9xaP0npDNl%BGRs-WyIErh7hF7ySV%
zJ84a4-Tc)^FUJakN|b*-Q8O~1&=wGXX%C#8qCEv?n-I_CP5`~beDa=euRSP&#MtZ<
z*UK{xj~+dW)QGJ4;6BK%0Vjin5qZrJi}U=WI8&yBpe&*#O6M(X9+;_UMSgR_)RLYh
zKA-nuI8gWrP`69L@Wda***BFK$YHF-cVj52`GeW|XH<(m-^o#X`7v6#{Aiu9mmeKV
zI1?R=@0q7EG+@|@sGBh+Hc_t)f8M<(LlL~$+S|bxjaB_qa6l*9V(`YJX8O_I*};MM
z2}&KbI@muFOwQif(GDE(JM}W2)gi3c<EcglNv-M?sNhimYa?LQ>mt`%N7exnV=T2a
z$@mB=Jjm=TU!WT0@?(`(JRR*822hQb8`lx1=@m75Vnw;t+Vz8Li07%h!V%gyCCZge
zU>*VbatL&ROn_Af^`lDkf`T=|O#j-9U!Luu!Yn?0y}t)p5`d7E^-z25Rild+4jxyc
zKmQcn1k3+IN49ze+m0%x4JLUG_Sx(oTKP$okEkWFPX%#7{S73Z`}Jj2+LAy)DzpXj
zRE<=nEt)qh7J#rCg3mQ*Vd!4#v0<8;U)16_6SL{FkV)qoBy<rVOuHZE-C5G@uoW*O
z3~w3>NO*$8P1l})Toot%p{{9F(eLegGXY9`Js$aM$+vbLxFM9&B=*Jh4^>$vUkFdB
z)ry|20?k-cYX;wiQpAgtnKl{QjL{iOi<JU&jpQ%b1Y%{02sPuQ`m5vS{&usz>)@uf
zLq%_B_$OwDEu*YRyIC%H9&C~%N{_+V?9`;fq2sW9HlJ>AkdoBa8)qA3JMXv-k$U)c
zM^@N3Jw<}Tod<AHg!FW*Ttuc2pKQ9JorSDgjs7l?!WjVcvpT(oy|p_0KquBil|kb0
zZ$K~MwAee?eg(XmoGOZ4r$jsgH_cK?Ls(C>HH;F&K*QwdGR9DJ*hRMqH6WvVgN0|4
zP<!U^y{r(e;7@UO;4hNR9{T4;paOrlngE1%MIn_XMa&xbahNO(WPe2I2>Q|kLIzqe
zw<z40KoO$g8)ZXmW??xM&Avff)>Rf<Uchcg>tBTH?*!N11zZv%qQ6U?w4|mVr+j+-
zK}Zkp{w_pTiUa=l&8$LV739*bc?u>)T1&#Mx!En;X||_HR*xs+@nB|1pvC4PJ;uHP
zItvATD;En7CsZ>%JL!?%tdAt4kI$yLwVpf={g7C{vLN~!b-x7iXW)IBgy>8+8%CxZ
zpOVR<8zyq3Y0j!f^1i}dSQeS`B(I9ODo+`emxA?p2h>zo<N=Ubcuf`NtSp(O*Hvjw
z$C6q4u_`U|u;jsQjJF6lYgUcN1^QU`&1v2sQZ<bDFIkxM>>e;lX0-X1Fsa}|EP!_!
zlb#iTWMd&LeWJ0nRsfWziy`&Ty7+nl&}$20^w-)0xl3Twcvff?7OU`d(F#AOE3m8A
z0(+A*<tCfx-YZxzoPV6rSybd2VN)d*0m-BmR6$B^g))}p1#ifeB}_r3FmJmsYny%D
zWC|5o%$P}C?fiOS!69-8@@A;Cx@U5W8{AK!ZZ;O{Miy77XfT-RD>uz*X5r%B!*^VP
zyO>S?UMR$bAvk}}k~i)Y+CL?SOLt$<FTX^6t*G|y6|f7$zUBer6?q$*y%E|y=j!xs
z^~V$rMfNRFpO7B^H<By$Gjz5kzWH5rf2uyzg^n!+$wUXL*bnKQ2;Lyx0(Npun1JbQ
zCNCkCU!}^vXK3}*8V)}g3y4}a*^yFIo5eY;9|CVzWYobilD^0)Y`r&Mc)JmpxvuG%
z1dZw<@_TDt4O*kj(YTvXE(nfB>;UsbavKkgxX*COHh?KaKvc?;)N2IFxB>E<MMaOH
z8=XR7*>XYAve@C1N!x+cn1>Q+q_PRTx6WUl#(!n<9>BsP1fJ3&MA1UX_$6)$L$ur2
z%RI}PLIhNkw0AT0cvU4J`mwu`&1ok0!)YdRqsvZnc6;JG%#sDSyy7>L@BHSMqD28~
zp}XTZle;8HK4=#wND{8XROwsYJNHES6zN0u;~IjK@O>nZzFHNJ(yWc@@>$73<nt!9
zu_#%*G3TQeb+Prl2^q}ge3Bb*KB+o?d1~`*5Slq(RYQ-%z7Qm)-HzBVfQHgYsweQW
z3wWipN#(M79{m)pSE3(1me0{6$eQ;P!jOJzn<ok}RH1Yik;}BRL})VT8dRb0xB*ip
za$tNg>U6KAnOt=K(a@4X=VouM3paEbXx?JV|Fwc`^~QWdjF<Fa>8Kzu*Gu;6vo;i6
zIGy5-G#g5po9f!{xNj=d??7{UP^cj^-m9^C`i#o%qZMO%Dir%^Q!jmLru?V`;{bHJ
z;W9?%8meWJ^BG(;#8{_SX_TuhMX3$iwd$9bddRnO*QO`02-2F1YS~dLv{V;~ZU+w^
zx`Pyex&y>ZOV6m&(_`p^-SWP$g2upzD$L?`YtWt#T8U+;O2@Nw{0hlK&L)MD7J>1s
zxfSdvhmW}Y_~-@yDzbYOs?f;zK{8HS1N%{WqR6e2$mF!!>qCH6^O+b?7(p#8Hf)LG
zm%DrAWj#ml9!%ax{n2Ee=ovyATQx)P9?a0bcF937V?tHg=)a=n#@prSd9>VoyG)jf
zYU~CYI|qO!{+oHA+CW~66Jv6vC2$6$y08My!RB1T;@xEVfLrsa0Jq3)0Q|u0R?{04
z4+{gefa)6a|5LgKWSq?f-E4B}!5odPO#sdJl{+DTw><H_C<!gOvqdKPk#PWuNvgV=
zk|%Dd#G@09RlTQYs-`B}zT#l+<fGqc^a#DBX9CU6N~$qu0{|13A}GKyn3PDA6S(9#
z(oI}o1ugJ*m6S`G9+lnjFXkS_t&r|lQBHFhAY7>PLzONI`dD#^XkZ1cV#a!zD`u6c
zm=i-Ge?3#8hi-5==E$LDXxP-Mb%10;FDhh%E(++m*R)08i^1eqY$D{}dY$e}pV&H-
zvpMB>Mg>=ir_3#H@()fhN=udn_-G-DdbKzlyEr;mcqNhb!-z&1;-~!TcR5mYPg1-P
zt&>!ax&|*HVx-jsd%cD{_-!GBLIm)R;ijc)Owz7Ky0>g45FUKu#FSkXb{K-$G%9|T
zDK+}{&Z~>E;TberGN;k$A%89ft?qYjh@j7+6|DYM<`R1@ik&1LhNqJnGbd6tpFf3Y
zA9>>$oM7UIuQ7wzbI}<jBT!KxJc-nvIf<%=0+Wc&B88p*z}=7M4_y@01cLA1?Iw_0
zgjodFtH|~rbY@fg4>AYD?Xjod`&H#5{DbWzj7OO4>QB=H^t0t#Dd0pdmrA|^9}Jpb
zbVrn<x70|c;F@rVqqN4{6<LewRJA+dGs2oV@?~K9g~9Y}Cta0V(sl%Z+0fZojAp`0
zQ@}gB?SS@q(}+l4F6*`hjKGOnZ7_i7eq)y0s92$drAN-mc4PYx0P(oY2xwr>Q4}UM
z+YyE3Rz6{(6EX(@9^4+zgBdeL5)iw9kgmofl&;3TULXp;sjE*fJi9QZ+pgp${B$pd
z*%77SJxl@I(&TYPUD9^xV3~dFC+*83elfUaZ7>YA-DBPy?*0w5(a)XRJg+}_$pj$U
zvcq2>w-`1V#JKOqhS~I#wr;Pam5%hU_5e?C`(MP3)6?uhscbHVM^$)0p0T9L%5kyH
z&vXCQ*&nsXQ#fe=tf=ueLMJELatjWiWvjXHQIdRF+)P~D{DMx;8EBv6qkVH>v`<n9
z(hQ@0vJl$4m$n(G;vGruAT*j69Yvu5-2J`h&9+4IHe15b#t4a@RKVF=B7aA?5f_|T
zJMOLM?YGJKN7W*KjlABPsvIn~04&=FlCD-Idy?t3<cp``@PgXc1X?3=Kgj7!?MgQk
zPevSK79UwLG@{WsiMrPqig}s&UPy{3IP;4x6^m+u4eV1?iT-=aFH+)erE>|nnt9+)
zJILf);d6!*zOQ9g<_C9x3k3I3m$H~$g`3%2BCr>T*OSo-j=R8;6&QW7f$_u_gPsc8
zzJTB6N~LDliHF3VPYWzsUW=deOk_EZMZRQ%${&hA(SziA{eN`%LY%CF4I&05C+8F%
zWvN8pq!O@fHzDAb4fxE9l@}{JR0aZY$k+m6A5(9-vBexV^V_f4Vq(hhroeo<v&Nko
zwTW$^5D_R^88$?T1Dh+F$jS7SPcEn3*`>cm6?vFYM;ab36~?Gy9OHtg<CpVmfVG&l
z-lfRe`;sX8&)R1yLW-<(p(xU&V0)1M)l<$aHEdn``r{%?rOfRKYv2j+Qq2(ht#~;p
zg}OQAR=^x}|6$cNRh_}Aq|kI~8XCS}MX?lawX~kvtSxUkZ2M$+N$<=XjR-3dQA0Sf
zS7ZRH>EKqGR(9V}{jN~mOF3O3;nrB)3}ST=BwK77G;g{Mnra)kW}|qqQ2@0gjn<Co
zJ~RSXOn16JLK{Q|__?LR)KDB4vQn|IEG8quR;8rt&6KoKEQ^QUT0C+tvsoxGk^rvE
z9j`=>;U0Ozl%X?YGbzT0Yb_PwZ#~gB$+Wo>_b^qoQirDR7wqjJFyo<8qNl>uq>OM~
z*>H&hcy&k7LkN&m+jDT=#v~cYsyn&?Qr@xIyTB=1C0JuR>w74p|ApwKX~AZb`e8B`
zd_5}A(UoQ!q~Gn`J5dURb!S<cYDd{4*5A}PZ(5YSNbSU`eS`3R6xdH!^eSvp{kpY+
zCW`i1WZ9dUCKa1^)KufG3U6j<@-4-2#(c_g;eDFFfuv9a8CPKTI8=8&RVx;gSua1~
zMz=(=lf9kz^y~x^lN%tW_q_*1*RMKvd*6b~%8q|lG_bV<2o-QEa+08~Z}KXc&M6oG
zPZotB!B&Vf!_nEk#Hmou;{|Hov=ZE|I3f2?X=hyc@g&MiK1wCZZJI$ILDv!LJME_J
z;Nl3k4Ep8Fq8BNJ04B;gy#btNyIT+DqUi^7f;Z54HPm-%1l}`C@6_IfJm=sxS;w7c
zD`j8}AVBhX%0_y0)r_J(8J%z<`@H>-MqIg*HnaDGgZ3FZZF@(^Z+Xh=O|5W}3?cJ;
zGYm-J<hPa6{$~4Ne-{%ria*B<ypwL!n`NA-=usWAF}}gg1>+RE(inEX-JQLIg72Bn
z`Jq3<IiJlZ@VMK-+ekZw?$L2m5I65#Guu0WV(~><wHiu0-`tr+v&-=uGw(*DaWouH
zyHRr48buE}tGrv%t!*w930hi!doWfyo8T3_Whkk|_s55jlui~CysS-mwzkrjT{L@2
zP%!IyebQDH3K>DPiub5V>p$6$#owdr)wr7S>xJd>$EVPvNjcb>K?GB!l((;~BsYUp
zDs<$BKWs6z@FO7^V*G{##K1)eo(j^CV7CM+Ml4Q50%0Xu8aE3C0_dWZM=eLh0*(O!
z2|}uQrfD2e55_YirTtp1mNk;*Wf!Q15UM^!<DS?{T>&&_DmBI@T3KB#2z;(oSWH4I
za9LsxTX&v+V{>O9NH<3Jx@1cc=74d`*G_Bm{ZsH`<$vn+2ZEU2c?ziP{7)NCpNMx(
z{->u;*1zX}x{uF2<bTS`%ei10KW`Gyj^JVh=B9Flp3i6Tq}|6C-9f6WHSEf$vruzl
zRtnYRTIM@WIA^y$M0G|e(WxiLDeu=}8MV^IC}5%WxHG+qhqDV$>MbFKTz1-?v{{3G
zkz86Tfz`<#vOYo#+pWJXPb!dzpyFm?ZS0@KFZY{}+w@fM@R!x-LnZoe!J>|%f5Kl*
zM2<cvA3&#C<}^^{%gUn{LcHit*v0)_?R`a{?#tf=&m>v|$*i)enm8v<JJH00n>3Lw
zDqwmKCd=qbFP?4wH?U?OFCmS2xt2A~orC&z!yf53KhTF;(Z9-asFmVleZ^~9ji%q?
zU<FX3x_`(oUSRVN1#|5V*D*1N$o`N<x7F!PDN#=EV>v@0w=MXoo?}*Sisc+BPW3SD
z&L)xgRZtBq=>99g>vA(tF@k2gvsnbNM!o)Y_^~DAPeH(q#0^Bk;<lg$$+R_^sf+6!
z!GAYu@!|FktZo%8*ddS#sdy+ed*z|%MrCXh-z)N70rDmw=gKU+kl)ckPEec(`(gDA
z%){0L(9#S$&n3)}m}xZow-Y*0`UO)k8g_^6$)~cMGSD@)SLquJ0bt+W-ECmTmCa(l
zZ0;%;u~~d;>|al&@nT+aII=OgLV!zz=(C+X!;4rdjfQ-K1B=f;CEb}Z%W$gx0R8o6
zF{W&ZTQkb#Yd}zbsPIp(3j192In#@aIM0`1`t}Z+%VKOZFy4I7QLc<pB<Hs51J7DH
zttc~kb$b&tq58w6&ukx4a#(XL4k&>L4>cy}?E%l|TqS*m=vvM8tr~)qO1hnN=C#?!
zcn;TJ$@CKx$|Mz-I~RRJm%Jn+ZFV|md`U22$%^=yaO#pEz*ZHX85eAtvz@qp)NK5Y
zDo-Jk8d4}K(qu;v-Vt&~v&O@HY#3Wx@20IOr*!J4-i-w>+8?zB%2{1T{;-SjJtzQs
zvWIVx*CJYA7F0AL4$}l}R4ijTGll4HX0K?cJ1DlI-}w4rGM?1*{vhYKLhi?7Aw!Jj
zUFEEtwQRg@VZ=}d?Adegne?k(?oN{XM-;{UMdntjM(DLAenQ;RZ+v0yaAVP2h*$I_
zw4-=lqW?m8-|WF=d2_IR6g}q$UwX4Kqvfo4k)6h?O&r%>Xp#N9Z)<_wC)ZXkZjo)r
zHXCw8Fu^7}S*4g^46>TYQh5Dp*7_-4CYZTcrfSc>;^mSzHm0-V34t95?Gu<wr)>!~
zXxahVe~|cI1_5~4ijrDyD6|$5hi~1<09jox_<J$kLKUA}URFF2b}vR6J(x!?kzr=-
zd|@yw?)m8WuzqN$z{hi$Fu(!vvNgMmZ#5gu?V{7+_7#|udD4n#hu&aPTxc?9r8n(r
z47Tsua%e(mqS6Vit!kBg6e`+(X!6>rxf?i>cS9KSXHu@Oc~|a}v7|hOl97)iFT~Fi
zUWRtuoirXU+n!G?fuN+1WS=;oj;~rZeCU|{+XA4aXT>Dbpa4DCi1+H-&9g=wW9g^f
zV`%3k_W%HJ(6E#M#(^#hQdJ%-;ur!;LReHu<^{N<T`kb@P5P~22c&^Ty5?M8Ejh^e
zoX`NCWz(a|W<GI*EvLJXZ!)^bYCYYsMXjwY%Hv^w)}D{&Ga2dj?hUC5nX3zcdSW|6
zB4=ca_<LmL;YGtj%D)SvwzGPYwS?d-I;tVGCqhMld{VL6{&OoTQySuC;~+jesYicC
z`uE{UU72IK-F%$(1p&<Lr{ut`(!iRacsU<)QDkz8|3RzrP^`+otYuMl#(F5?5Q>L_
z{`cFh8OEPj1J=@9S!*FTRo2c>H(zbF#YogZ{%7t%XQjUO8hl$@&cS<wHW1c(@*C5H
znI-!(w$7<W=1?i`^?6sC?yUaz$O@C9dHb)i@$X?rI>&rK05z0Fe((Rtud!dTc|MF<
z%c1+=D`xZgvdCnG9xBL4GK3O#2#$MK45PlR$YC47DL>skr6<yC;3Dk>8gy7q#bzQG
z7`THfFLTq+h42Uv!C%F($qNXwG!u_VKq_~q)8`L56&89WaFNdSl)9_1W?o;0r1qvK
zJVFrwmq9%cb;ctW#M2;UL*&^%@;*#mgc;Y6!|_$v2S;y>Z(~D%F3a8!^6W_C8<aKh
zEp>O9w&*ohhvp?4MZC9Q*^BO3Z0;$&-quP#^hpu#xaj+e>w;liULk$Wz2hwCi9{Z7
zR@3Z5SmYkf5pjJaeU_6$D3_#5jW8w(A#M1Ly-YkC1vf?Q)&ja70~#8kNqe-y-wl&t
z$k)_7j!$;>IldMp;^W=H<zddR1+|<h0&JA2Y1z4yIy`YSTB%>Fs%Z;do!;Q)wB$ob
zQz7KpG8x1XjGG0a70gk4KGiqOdjH5F_(^5O!)!WM0+tSdJ>KklCXQ!&VV-07?7W}g
z>>(102i@yr+G;1=4l1PO4=`6z0KMBlE0v8(^e41bE~B@rdi&&k1q^mJ2yWfP!!7#`
zrv4WEk(%Pc>90^yf(L!+TB0MX*6Hj0y=EnPXg_V#U+H#uNT96Oo!jNX9!Hk<F@>I0
zggK5FL|AC<<+*g*Nm~erlU7Z+o@kql2-1&>hg!{CT9U^c!p)P+RUy`ryWEtX_ZPP!
zFNF?8UBujD<541(f&iswR#j&^vgmTv8sN2}<Bk);cRyG5P%XeiV*yIoHXxer`q7Jd
z(lQp~9oLi=S#+26Fsfaz@wv>6Y~hsK+?4J?9`RP&Eb3I{n#hx0G{m8bZK&YFEXL50
zKmG{kH#H7ACjkvx$H{h5m3>3<S2Zn}f)G4<q#;1_yc>cLsB1*hbmnYk&smiaBt(xr
zI~eY4{2LheZ?qgem?2fJM)r_1_PEViV;Kb>cR)`mMS~avMiFffx~(a<8J_A4zSy82
zpuZ|eLk#-x*P;P*VXk)-&Sv>vwThTboma=b`*!cyjLYu~48;I$vf~_F>}xCx@zT~!
zVT3|~#52C@ioD1XRx!=MJdbJLpg+q#GNK^+X|%v4Va7-2vhBYRn)Ej_54<HX%!DO-
zXfFHET2kaCwc%)>R3#9$gQv_5-Qhc?jA<`$R0&KqDiHM#gO^L-13Tr=zf&~&4}<8R
z0StykRL+44t8}AkDOF~~AT?*6873iWBSn^MwdhoCCKGvsy9YBwbH5>4$zO<VmMNvt
z?z#~5ytR#pE+P|a)25(b0`ILl>M83Y6rTrCne>j(<P>H`vD!R5$!LRpRlL7z^V#yD
zX0uD0$yP1cWhIMU@o+=t8nmX<)~6V>y8({K;&)t(X7!!HjyNS0H<9iWHNPR2LQN{q
zuWyrW=?V4VEKkdG&?pPMdH$e-C`)3W#wtt@l_TF_;bhY9FwDN#fC5vYgo?$lK=aK#
zSalitcEt|!JhJlq&A}0>&)?LW6(Q_l`<wQJVndP~7_(>hnOwJ%!6H5H9&R_^?leW$
zj6@`BmH`4-U$+cltA@?(Fa@DY5O8wy$N`*^UGPqYx2o|-eREdfUJc10eFY49lQRKy
z#&e+502?d_zALgIMm@0X#A*-i`F^$g#*C4zT1kh(Mq1GP{iH7T#ZkTSoXrJ|KG-_Y
zhvXPFazcU7EI0LZd<2hXbUxzH3Q>PF5)+OC5)fYsc3Q^tjg~J4<MWoBEG)xr+7P|g
z8y8*=>G-UfHzc`B1o5`;kbG85WUjvzn+!G@s6`l^Qv=VRr6q5O>y*!Vdb-X=DiTG6
zCbo##4?2=k;rebaSs|}|M9-K9Ab6@S`~WbC8cI3{2>cv6A^y`u+N!y&pr+DZ=XlIu
zHPfvsM<NnCmcl5I%m#bs&P1dIPj4b$GU7`-gLDiZRJ0xiC4{vGoO%6Q+<e=38#nfk
zUMT~(=uae+R^dQInM&wZLAu~zv53Xx?sdB>kYm`oW^q}_%Tz#6#PjYH$_Uod>_9wH
zB91x9A>b7n1mI0wU1$!$>(!~IwyJwKn1+CdWT2Knyz3A4x+}=use&qegjD5-O1#Fs
zLF+;W*EocDfO{BO2{zlGo`(e5Nqu)ywlePZ2IE#|28@s-N22iY-W~*XGb-7toFWjU
z`v6`lw-Y}Nu+RqSE^bGI2jb+=tx+f94lQ}uK$WpfujAX_bOL4V?=Nj(A&5tRGq7z-
zOiME3aJkZ00QC*QG6M@KcnL>;vlcTOU(l3(JTg2vc!$tKWv6d3eys~@6{I}K>})Aa
zijnNfl7T4WCbRxkjc78K&LdYCQxS^yZ5IEkCR(=<GC{nc-vhl1GNwuv&XGc7DWE;3
z{IRpQZ@y+bFD0Gq)iZ?>uIAlt<VMttn=cPAJgcks$Pya+s%@W;NXm~80*Hz3jym6f
zeVDWj3oe~)suh+Sq}kQK(xO71P^Qec?7o0$#=?P41fyI(+O6-(1I(@FHlMQ=U)^5v
zZkK{L$roJ-?&IbQVTs^;S`~=1i}i-B*&pVr$Wj#F5zb(Wx8@BD$GOWSvj%M$hon9k
zN%@dI&J4~&S~-N;+1o!5aw0XP{O+jxLMJrUuUPV*eSUN}kC{BQLkB3A`<wwRE#=LD
zrTR9o_WlY2>yShPSnuuL><<zpr`MF7iGhQM%jm1v*nj(U1KG)CVnQXSX(-{K86j6X
z<IB+<OFYdKD`)bE)M&~$o4(P;yJT550}U-{?YHg5ku*ISbU{M_<1)DI^KK8KJF=w?
z#Gn!a4dgXyB~fzOpTVPxt{s&QZnj;kz!E~W6`2tJD*&Qoivs(Zjd#R^WT;HlI_Wi*
zE4C`N5weXuqd{ZGGH+f;6=13Q&yLPc>$?&k`4<*5eiJYICDtZA9(_~x#o`qgJ+jnd
zq?kfx*BEyK=)!WZk5rOrQHVBe=A)T~C&2*I)2!f}GVby5(ZTPu`HJzeIh<-CRyCE$
z9m0PSr6WR0=)^aqv^lgm^|fQZnIAj;ALJnbK4A6oF8j!i|0mwno;vaWo;((>zsLW(
zhtEC4{{zd`QK#FB;uw8C4vv3|al^fiV-+LJ_f8$`3NQ1-fK6YWi;Po+ZW^4o9G^Z~
z;V=GXb1jRDG|Q&6UvK@b{eRWF^Vpy8$=v^Iwc5tI8~^k1&riR{|Gbyay~O{#v0-~K
z5N7JRFIp#sow1&Og%G6tT80h%QsGI9cT+TMX{f~l;Y`C(NP)(ro?U({5UsR3osP!o
z$DtUg%y@d*VI*3%15_S1h>u0~HmY6k@yB22^d(x*2<iH?-i#ivr`y_ZG|#pVDAp;o
zDJ0#DMy=A^I6?wVvpS|f8bB2#?dtdj;)ok{r>(?)Fxuu)Ju|JR(Z;)UK$Q2JcV#63
zv4%#yakL`87=e?n67urkS5jk5mVu-ZX2%nZ4f+cY0e`7m7pYxpjXL8Yj;DNO$L*8R
zAGsTh=DDg(^NrFfm4`7EQrVGh2b2`au6}>Pj6mkQnDo2+RqWX==yA~Lhrer437^Oj
zXlY3mlzQD)JLnsNra#BKb){H<j&y7N=?6y2K@0|2x<M?b8avq+@>kPnh~BeghOZA#
z_upbyrd5mS&xA*X3Ldgb`Nq@ME>AZo=K`2qs8Pv|oIyR^Kvjzi3PVb7z4j+Eu|ttg
z9<OJ0LT|mDtUWE>$=cJbPUx*^G~&YsDFDm93`;I|on=wz&^6>r?lZubqBsW0c^IZS
z3$p;0fpmX59Z$JG^M|j?;Z%0XzQ>DQ2Ev4^yFl^7DvU4r5>V0lv@sf{)j=Iu=7mP?
z9^>`|3wZ$wv>uq6Q1oE-9|Vh&*|a~t0yy9|1QRBxm{fS8|7^0#m5Yf!v|Bc)?nvP>
z8jB7=D;JHx?gcO_x=I2;4GbKxLT>TnWo25PMj!}fMW7e~wsd=L2rUhuETV#FiSuVX
z{lrnUOto%Cq%|rbw+dfth!d{WA93!xhYFaA*bVfC%+0dmDswdD!~l;eD>Z@HUReK$
zR%VV*I>s*22lnTs8*Csp`ZxBAY2@$UUWU)Ug(WYxQWRL_2_k!!tqAF95uA1p3~jID
z?@^NDRF4~r_9$VuErg5~pDo&=gpKhjqbl?QnO`tpQM6@{h^$%=WeZauseqwGadN7l
zORcO#*;sjAS5(K!io?t3DyEhhTEpaHcz#VM4Ld9ds`%&yDUL|vEiXp*4^IWxv48OL
z_^s+I#lmT_L=masK1A(s(K8rL@uul_z@!REk>x)m=|^a*W(FbSDrSH%)y$?v+Bg7M
zjsfO`XFo%e77`e~Jhy7Dl3)?}v=Sf~9uaB>zKEVwq8D&H{8zL_o@QJc{R=oKG?#uT
zDx&X!sPiaVGw}ryvcrrRMo_5kSG`s(xPJM?>cS2{<7=pF!2kadUE5Cp7!#W0uAm7t
zR16xQbki$p9d*AFjt^%X3n=NHxR{;Ow`0g;3^8@*n0!0prpa<*j&3G3v~dfbBDhdr
zf<;u*r1->CT25FAiWvoYI5{PCY=p3&Vh#z-IEqATSaL8PPtIHIKTH!uFy^$5Epf(S
zVRSH6R*D&~eIZs|;wFRFm;htb{-I@`e_=A<mI4tH+Qx%>l=S17_}Me5nc}FGeJ<n?
zznpewm*YW)OM0+<KKgd0NP7$PM+qP0x0kUNVBB(wI~-eq(iXE;TVQ|M8x%s9E6yb?
zIi>lkc_ewC2NS)Gfdi5+WcEpgPbOz%Z|^Kg3Yn^0IVwwvLZB=O>bWU573JM5b%{8X
zXS1(vM}k)J{aL)BaV%{jg&WucR?&i>zOJyY&hFeacxSy;h6*lKS6a}ks#$(m&T>n?
z8*EB@)d1r^z9Qof@L-F?Ot>MwBG2h(Oh87=H_cjNFvob>A6+=v(*-HF$7!4`_Rf-!
zBiL3Ic%lzTLwd=9+is;mL<~&OzyZDUb>;V9XYZ=UD0OblkNO(>{sO2S)EUzdpP8!P
z^fo{VxzUJsj*s^CUlrdD_lfypAYt4OA)+JJFdy?xAfsj<(q}85s~9y_M%c&>(~45i
zFdnSSk|>279v?K~Z5;xe9=zN>M7hOi8Qt}Dkk-?eCsxqxP!Mgi2`l6;cM%SAOT2(n
zKR0Z5sbq{%R-i#Oj-63*4MVq<6>OS4g@Xg4Xwz>0R&Se*i*TrD<5(6Z+_%e_47I(?
zO6G_jLi7JBTfJLj$1a>Fykx%i6{oHs@yR!-kh^-Ww7`&Im;c$l+%~lhy}>lTY>hf$
zdoQV>x<_BE#V)el9hkk6YRWtrjZ~0Q5NYw4AlI!B?x%KmYBPxaxZvtLYAWSus3lS^
zBk$O+dhb_<972z?3MqckPj{Jb^!T8_G+bFR9~n8O*}-O4hO>+Fd5?bF8LCLJS~QL^
zcHr+;wBpN%0<Tso1jTKUDICa_Q=M68!hp%NFB)$Qu1B5oC)*88pmQ@s0=04GU4^cy
zIQ>?%jfP``f%su&<Fvu`n_+~5LEA~GR9M|qyNIPF`-05nZmxP9F4WC#3BCfpD88uY
zcM?&Wlb|^rI~SJ2m#6@?=hty-(l3PP)Ia6E1$39Vt|Eut9^WN6h|KbfAUL5SfbY4;
z{;(WE_jd&RdmC44Cfm(ji8EV#VPihcZKGY)%eV3N$$q?6d+vL>=6$;Eefk&}V}|X~
z3m7T)2wT<zNHNx$w_K339tt!&A74Wc5jPAm8)RD5$lG-)42UfP`q~69o)kL!Mr-IZ
zGLd)EBW@iU$TYwHf)x5@hj}JS5<2SodW?wk_%ppPoKpHEfZ<A6_ygWlMJpj`$x`xZ
zDCC5qLa)<4hhv#+%-ts>+=4J6geJwBT=l2sbqUC1h`j0vMwEhI;`pAzM31Na<WqQ8
z`miRjD;M5>w~$o{>pK?4qeqvFLIegZuu#h^v@S=eUrY(JS-Zk#zriR$5w!((MWS87
zMYd}vRJ(9>v%W;Lq>a0Wan4*gpZir}xnHGK8Haz1n{VTz<D>d>@0;D7my0YC(R_>@
z2sT4ew>84ZkLEfh*BsRbAR@v=Ya`BH_)}mQ?m~G%N6#{bKef36PRrtTtHa+t+fVNi
z=N-lpAqPlUIhKQi+M5z@VgnJL#m${lltop-&7j`MQ+*>{-PmK{_4D&gym*|2CcR)P
zyjPS`A;IaEK8N^A{?f`CIrGcK<fgTEtr?^%j2Yl_NT?wwsOj`iLQ#JtsAXbHcI<aZ
zBo?SWR^_0Khi~@DvV2>b!Ios$#!$)l&A#%sELqeU&YwV?GwG}R;swSL13&Czif1M{
z;<5$iRtmn|`rh{00mPm}l|xy@m;7NXlGi*oI6uJk4+{YX_N}?f5&f!`T8JCa{17>j
zNcROui9_>~Hq(1`?54Rfc`A^!A*nq4k~!}YNO(Za>8M@4!=)Pe4h^TcJ`K#`)O^0?
z)VZKS;T|UEYcP7s?w>E8BKgH()Fhn5r-@%4Hq9!dxHRQMPMcV^zY1Ue=)zuOV^YA!
z#_4JNYyEdL@sL;d={dsrcqm({Z~$bGiy1P`8Y!BuMsA06>+g1?Dq?SNLRs}LaUgjA
z=(gvzYkT)#{vKMOtO_`>0&oIG)Or43M&B<5!5?&_JObixU<3wip70GRyke5Fi17QZ
z2@)BK95=KRBAohWW2G)&%MJQ;D9a)o<j9Eh`(_VmHlGuQEWNqpq&V_NE>LDo5Z)11
zS9jIwbPRP8Yf=nL933=+OIMb(Oo@6QvX_!66HN2kRFPm^Om>i#aT$Rh%%ZD)2ft!8
zOIeH3F~R6%Y|GTxAl|$9gYn)w-9b0uR4{TjLI4#NkkO>|I_^zd!|tuI{4XOf>megX
zdsIkv0SD!cIRmmQk2repBboFHHtqOLz45xfy_>$=g(yy#bQvm1A(+>o|Bo);kiDum
z<GsfAVI8S!%97zrO1Q8A40v9sF&%EdMfImvRcbT0NiX{Qe0V;lk*ExhWa6eddSIu1
z`U&>zAWAN$<N3v<#a2m!9X+L<?*F8eLYXjDtT4bST~|spKfkucaxoqOLp1p#i3VMM
zhLVMr8zKedjNZd<S=g(X^+@gVr5=o>$Tg=zy>P~=CI7Gxp#nPDllzOI-9FquGN#vE
z1f!hj_5Yk5zC339)U#bsv6z}`lGwDqk$d&t<A)~^Crp=erY#DqM~vO5&mvTuD?*^J
zv-xx|r^cxN#pdC~mf4NUHFK*8G2!-2OhMBHXu2(vNasZ>yeF>cCRd(Dc@<ky-=Hsg
zL+L+l<+b6bQ9I|5{nj|ipuH)au+9P7#sTv2+V=31Rte*LeHMu~f`Ng9wLdCTU4hwc
z;%dl#Jw9}djir?dWERnBEa2DdvfoQAbWqu1auEUzXZxo2Pa4VD-rBrvMrh5eW&(aQ
zZH><_bkJV61xhkxOakH#r>ny_z8;%ZnUS+Cc}&^ZgU<WsGH>w`o2ynMIko+2<muju
z1js9+m}_f7?H!{GN%Hp$)mPdn*L#t;<PY=iEQxs5F_{a;TUeGA4Gm82JeL?zP;Bwc
zxy&Z$BokcHhwou3qae462uJQdsb(KqRdsT~QYeAWe9~#L_XR^-)mV)<ca!yJ#pg2n
z+H5_Tt0ZJw#<gB4A3La=7qk+hjg2CHQ=>&OBozL@pt6`%9UCep&-{f+tsW|pQu>)9
zw@xWrpdn6OUIfw>^N`FtxolgPEWQNv(F+Be)bj3ZpK{QE6M-cAr#m($Uq3#flpZG`
zHoi6KKVMsCW=b`0EB$rmkAMM%2f#vX_KhzMLrkK>6P|nS%x#M9T@EkbYbZLLb?}Vd
z?;-(ir-F@~&Jzwh_+j1}pmB<t^J+Bvqd$RXt0tJfF1%1mGz-C`W?Ul<-vyTBfw5)V
zo8=fJGGXEfihK=V-^JM79iAOD_e1@;t>r+H*Vp#Jfv3CLB3)3_%oK!Kz4A^p-4m>B
z=)u`e#LN;QhH-nkTBd|96@|B~wDrhmx8E^<EXLi6y;lk&;lj*giaFAh)jbpy0bcMZ
zOzRtA*127Ay0cCKhW?<V&2^AQSgrYI!Qf~O+Tbg^$X(6ckbwUEkLS55=gfVQ(A+Nx
zrTjtIj8Ixe&RHlRZ)>IN{^52Old@IAT(YnNEoiC88&RFJs{u<ztMiPr))bWHlzv_5
z?-w&n#q8(Spq1N3$#q<X%9c5&uX0Khu})YGtdaaH&&FE8;~TVbN1dy#F{SU>1xXl@
z`t}YaO##$960sZgzZuEY>rH&?e^t%1i~d}-o0V06KiYAdHC|PHDifzE3KX4!PCiym
zc<%9>mJUwpo`(3T2E}(*;<R)Y9^RR6cs_dQGznbfQT^3%bAP*8ciObxRYNAbPJ@_D
z3(t*LsWDscs$Mfnr!~zbq-W52R}IQ%E_|n_Xiwn-&v>QUOWkdY_C9ZV6_t50M>?GU
z+OS$Sf;-q&kBc5WI_oMc2*j74A`GymbFk`H59rkwJ+4$TGGl<29g!Kj!;_BQtR^^_
zm;24Kk#^PJOe#aW(TEkapvTryo#}XD#ta~^=@2PmgIEa;aC$cb3<O*W4p32YaZ*D{
zTQJ`sw@K9`#VNr2?&i?ltnuL(Dbp?{02lN<*LTUn_IqKfregn3yOY7Ei2fH_{4?!>
z6?uYW*e*!CRX!VBRj0`Y9t)4>ayipG1X&F;zqZsQFTzLAf|xnFwO^FYpijH)F2quh
zTDnezs|2SovgdLg44tz{uVio!r-<R7<||RmImHA&-9=dQt<xUDLarh|0Xacg2ql_b
zhv2j%h8~{2!nCN{JD~XFN-D#*QC08)oah0{1D9hs?r=TJWhcW|NAD`g9fL5A#jq$2
z5lR=8hu9?16owOS7s?z44&PQP)`&ys>D|_THLZaFB&^XolH)r7o93qh!sTe#j#R!>
z2rr{d25B|z#lTmVLlCpro_Y4)Tur+lijflO`NM+SeNh%$SXz4;wTB%tOA^4mt4Y<n
zWL_yXh#tg5lqSw8?p}#)9#2|=%VS>y_$s9?ZuUhK;Bd$ol}r6%?t%T2)B27$r4AOl
zAU4$b5<`?)13R&G8d0Rk5skab#r!%AJ-$_f2R09ch#~c5Tz^wPYJ%!#r;hnWjq=Q*
zNM6N$X%GQ5{S<RLRSs`p;M8JIEo+*$Qbfd@yqXl!e21M&ov<L|s$f3{1B`+sXj;2F
zn^9@WNzka_mht8BTdAIkqK6vraGZcU>>)%l=H|*Hit2zFw)HR}0+p^t_F`A$RR55B
z$Vy^HbI%LPS2>cJ9}zjXjrlypR+9z~jish7BI^rgN^}#{i9};ZW6?0u%8{6HCWoY8
zG&Y8&i~$=Eyx~;RFwNR~MCIOm)Ycl7vb$@tw~ew@^5L{4yk(ZOq%vDG69$?yX@MiA
zF{7VTT6{%YOw2M*BAQJ#nKo@vb803N_LDa`&p#-4S3f!tHW}?@V_g=}q_pC4bo(8P
zh$@woRtg6Qs2-$ivKqj0bp^yoZZ1JqL|oG0sudyj*z)tuqV@)mPQ;xaH|4bNEN`H}
z1uXH4G79&^8K51A7>lzSs8pzZ)~rLy5_#Ib!ht{->AP50(H_h_FX{)q0@m8*FKwD|
zIU)x3qq$4n)2e5I)iI#X>9VsA9kv~|T*VjU(BNQaOglO{SXyU3;fDtHDiDGJr#-Bv
zkEG@ACuu+e90wD&z`Q%>#6m`JaNy@4zYb};zy`>S637@H`AFl<j(9&|v*pHzc_+Dj
zjDNPnCt_se>4(hgpRW-wG|Gx3s8&q!W=7gQ>CFy-@RMo(;-WhhSbrr02&%0o!`7gM
zlH#Fjn^NVclZwy-X|M##sI_9CJ!xNd+kY6^u!`3vTT_DGV5%W=lntelN0nek6V<Fk
z8WyoV)36v#I{P=>Y^q7vMwv}I`^?uQA>9A&_aNV!)DIr+BVA~m9n@1{E-1D;>W``K
z6yo5Xoz4ec>39L;LOud6L*;LElD5i`B{6h5pMf2*VMV8-@5606OtVZ7D#)13c-o&P
zU{}G)7eQGGVC>w!T6c_DGWlH|4cHdrF4psNie{A-d@3Aa8ayeJa(ig92Y`_cv(KAW
z&ZD>A94_6cg0v0++utqgg(wWTG6E92m1h(MSFioPYQOmDU}Xe0a4b7<HJwDxTcfs0
z?g2lzw1J!OowL&3i!wJW?MT`!hx3jolU-o5&JE8P@nE1IJl!xd3hP2wgDW-{?pyaN
zt}ZBTbBlRm!F3v(7>snXr-pebGDLcY$)Ro4%xqj<J3BalnCE?47E^9)f)vdL<0S6&
zN1ZaEkDws*CD3%gqbw>*%A-w12NIw&z3?D*n(N^PAI#s&7!zVACW5kcaPC%@mDXX|
zMRW12ufG-m*6^%wFYD>VDCt`-+#jHw5I{I?f2iqZJuq-1?oEtx3l}v_&%%B1SRp6y
zlIOMIcqoYOQvRsqiW;ejA*7jHaSI|6QH!ohA7cqqN0v1rECT>;u9sj|yATY7IM=&h
zunT>MhJ5cp1vtoFPZc)`R)g(4OKQ`an*`~!k<D(C3J#ru(%9OY(hk`JgQh%sF{H7*
zV`@h}kH+TQ%G_j>g?cG%BVL3q+=ws~h8|Za2Yf7}amx_<^vR1EpCy8U4=J^!5NH`x
zWSv<e<LzoRvYDGwF;ub&$!04SvKV?Clem{G+P~l`6Lr7YGA_6x1YW@j?{o+Dgp2vc
zR8*cBce%*|^PMr>pRI7UT#kZX%KK=^AkZ>Y`S4vJNG<cB98>>0Ka=KX1~xMzf7#@c
z3Rz3ZSqdz|>89?)U6NJRAf=&1p7}dXHy7&1*3Ft5y6A2;`>1Vt5!b}D6ao|W66zX3
zy}2$H%S(&>KLR12$ra@5xLQc#nU;TTf??u;mb3D7o|;Coa7DBtqU%*x39nS}s@v?#
z8ZS&@`=Dbq$%k+PFo$BygS<v)>@*KVk;A%ub>$b<NbzW(g$9$|E3)Hw!XaVJ0tC2s
z0(fTlmn<}DbMv2_<W0cIq4LAZrti5^un3T6CkOjSzX})|%}N${X9!;Lc6X0|J1W-l
zF1KtGo_6A)H%1#({}Ienu#`jiG|*dtO|_ZVgzV96K|c4G9ohR(YB`8uc`%D+^EO;k
zuaY<tEi*$C0fB}>%P?mrjs3SzH<lx*ib4=PC8!CL-I4j{Ol~JIczQP;h2oxsSR{bM
zQ$QUvo_7ZCl$R^QJXX-sAoE^}hxr#c9&U0HsK^tB0!~Kt`8sC-=kI)&xoaRh$r9eH
z*?B!Zhrg8t&s=&PUR*A5=2j!iuGxG>n`_ys7Qi^pDawET*Z-i;>Q7?Ef131uLWA>R
zC%){oS1<3jkDB;AeYydE*M5FdGylS8eeLP`|61F4`t<4g<EKAwi05k?KR;RjUs3ID
z!SH|PLe`o_(f|5;cl1YpGz*r?d;b^x{In9Cp6tGTbkJ{iN3-su{Z4n3^n3m8^m+91
zbobHYM>~VoeAZoxR-&Ep<kPf&ahXKrok}%YudT0Fql5nF51^${v)jHLjR)h4Pom(e
zc#(8gmmuTw|L*rja2*t*q0Z~$(`Nj7cPD-w$4md4QUm*5iE5+vVBYCQ%SaOdEtlPg
zAJfkSAsdb1v1%Nf7OkG1?d;S~Pm#RUQg8RR4^Hbq`=s%VcvxPmq{^Y1kt}OeL?AA`
zo-Y0J?DTgesAjpx>2iC&#m6V70*L*C`mPPbhQIWixVe8=KR#>PrJtlrqs7zCuQ3G0
zf^s%6Ps0_O2lb<Lg`d+EPJVqApZ>akf`#qU&-|s2(Nq|Vo2CEL2Mh!p!IJjU)a<fi
zPEMQK&9hTqS+R~w=HcFH$AGhRT>q1-*x>=+N)NiDO?G6l>Lk<FXr?L!8|;rZGv49}
zij6ay(-Hg!#N89b7d!A?feNU`leQ7$7L7OIk{4R00`N<K?v$8KtRmv4C<AAq(k=mf
z?aS6^)E!XbWOx%S!3h{;899pH$2jSOhn5jxM~?tX{trA|zx|JS{@?YvFF!fw|C6<+
zkDc@X@#9+U`}uz#pZh=m)5pGb+W&%%0qy?k<qPK_kQaFB9QYu=&eD7U$0<rZ?@vFZ
zAA-K!)5hfTQ~KdmFa503KBtGhh=$g|Gimo^_t*GfBi^fvgQ-zJjUGNszdzjGIj!$F
z4GvrFS(i8-7ENx_G&`6;wleSRVB9p%)QwtKx9z%l=q}UHXwo`Zg;?!IycP0b(!B-=
z**xM8PG0^R@9v*&zZ6W85GiZdwe<V?5xy?fuGe(!lb!weP5o$BNX7fRz$Y|LfeE}`
z+uK{WngDYpQDXaaf2TwxYAvH3_63TTAJo>bpNIZiw#uXP&vgYX*wHiYx>7;IQuFtk
z`2F8(fNDNpcgj5ul-qF1J$bK;u%waviQwq!U>;xJt2cLED-dzJwc3AllXx=i_PUUs
zdQyq#IX_qvPsGv<1R2PwCV=Y*<_P|Njy_)Y2i>R)j2bUHpwgOZLxOg8S~uX*MNRPS
zG6oR1Pffrr$ZLMc)0F^fAH)V`PLhd4OoRQClnGr%yHmK|_%uUKC~0qIl$u`0i6A?}
zNlv-ME(f<;;?d<tekiDf=jjL^I~=T3fc?`|biod^o`H?194UTJa4)7JSIaW=0}I*l
ze%nS+X)I|z!t%y4Z|`oO0J(a-{YJ3!N84}E;XD=Vz1QjUZ4uBZj`q&WRSGN@eow(C
z$h$59-%}-|US16}@y!LE9>m=1uXaO_jzTtq-{s(Md|o&cEf(CIWc*2e7+dcqRNU+*
z8LB?u>+Rb&4=A%E2Wu>hu?sQ^P%->rK}AV?#a=rHE36np_2d5^yPX@(|LOkWu5S0L
zYqlq`@L4o!4ZH8)*g0wJivy>D!K8m&L!W;Nk~!a0;LX4eT*!wMcd#Q9hPRUOu-|^X
z9w*U2a9MG33HDj>bUqRy7^GL5@Duxef=QcX_TpJGo=l)kQ{)6k3sa$P3u;>I`v|x_
z4HQ#7z!s3^oWabw-vz*L@?+_Lfj4g7eIMEK-{Z$W*IfB;V{PN-@ABV$eC|d5Go?5|
z6a+a)OLtz8uQg~*haQ=)GwJ*xLoB+27ZU09TD-OvKY?sS*7LRhgy&D4ZqDa3fB$`8
z!lv^H2!}6#k4*KmhW!+2uiF}=+XKEddm6Yc))MA7DNkdwQA$zcnMbcMESAtC3s1Yg
zrydP_K`|Ob%w(wV?Z@r$aMDWbB~J_GwloOF2w;^U73I-}pf>3uXD6ya6s<uKtIU1_
zj2ib3ULL;%u?xahRLW~$YU*4LP(k3$NIcU8*DD^#xLZ;7Kb;;S;G>XMcZ47cEdgu1
ze<U~vc=Uhkjbk>^wJSB=;`^7|aEakmYP@Z}Rh3-RLgXAU!qj-4Oy<EmcJp4H5f$DE
zvJtKD_sC97*gTE}10)mIJIzA3$hAmjM?NH99V1Z^Jj5Gc=|+P7U+hQ%Ob57GVRKaV
zIapRq!29vRJUTp|%%TS~zSb_!*Pd3a?DL{J+ziueXk-=VgIWS>i1~z!A;ur2tFloF
zdf}G!)6RS{=w3_zbeIfW@4d5wgLwDs<e>hx68#reTJFsUgGW>zd%0{4dh&O8$n8E@
zFo@!j<S+LQn*;_g1Vz?a?Tm-5{>b-65EX9)v7Q~@3-D@BD4eb3P)JcbKt$|x2dz)t
zj&d!e+Xo7&;9!pHYG?#zv|d*@1jvoB&Tt?!staN%9qpnO^9R`{!F=MbdA)tKdr&WP
z4^A4JTew1+g7peGEt>WvTGB4+VcqSwr*i+yCjF5h@_)qeUpd|Cs$MH7VH#VA(}C8I
zLktmnUxL=PuL!mSoPd#%!-t-T2<+@TMF`m<9uWw?D_?7PTLle+Kr@A3)bGPB4qXB)
zqO@@_Wg>J{(ZlozNZ_3km8mtllic5F#0RH4>>?oh`Iq_5^j8ZH>yK`C5)Ec;fPI-P
z@Kg|4IDsK(pO~+Y(|%&wuRxEL)Pl`N#h2`65-Y@KqbYP}gHN^!+abCR!g?1eff`yd
zR*V~i!;g=(U5u&mPi_&{h2!ZfhtZk}^qD$+jD4FO*+n&GM|Z*PE7q|Z9SMTVZmuS{
zyxtDL(ufjDa@08Kfkbn4c*@w}Lk`%<90Xcg<_RQ^+}6@dUwGklc)D&|aOZ7&XT+!T
zbaitiD=AVbCcJEr$w1UQu@P?KR_<X1zOcvn4Ja{qLV`k;?A}o2!6==<xG3Dz%nMgC
z=;<t0tOjJ9kW?2V2c$R=v8{jLCTMT@&=4mQuIVqjfDqLPxE3)@;(z!K&TW^1Gys=C
zXuol4FfxHNtcMYYlCd~rXJ`fx?kE{!q<K*=f`WOC%O=2F&kw31tf+2UmIxmv9yzpQ
zsl;n0=G(*7`Nf0hWH;dv4CK1tatoOY#Zt6ZmfftN<RFbFGg%{9NXFliP-{=qw7>Lh
zem?9c5w?l*WgbUl&A#kLKz2!)KIs%KTX1p25wPr!Q>O<@WcHpSOBCG1exj`>laB^8
z4~LYHE=O{kL-(hZ$P7{;r*h?|tdmrp%IXvipUdIXjT^ybR;RZBhuM~bp!g4Uhv1vw
z<m1zFRyG-rXkm*KfD$*BDA>m9+dE~xQD$E65glDcvGmIZL+j(&?};V$^0mO_J9~p!
zxfJ7<%NPe5QTr<Zf#~D3o&^Odi{w0|NT_+5{RNmtQ!N0`(MWIsPVi18dqp+Aq+o9{
z%1S($b}#xfQMasNkO#h>tW~4nLgN&Eak22rsV6Qb{jTYTYs`sYG|?GrOKU1=Y2vf}
zW0?rGNbxs&`^WL&>Hb?5>bXyY&Rujz-Kk*OAoPNwcCsXI$5$u&b!-JA%5v;Ae0aRG
zD@9RHd(vSI+F~_;LJ8hzdJtAX<l<YB&+atSWdcJ^J@=rywf5QF?6KR%8HCB#UR4s|
z3L=7$y#+?rnSfx{9hG^>D8R`NWo(V3^o5b-<mGk@nVG-?(w~_ZEL!$oZDyihfQy=5
zM|fv~$1%n?>)_DM;Io3t$PF_(kGs8&ftp%0D2DmQV07XtxhKqm_5JO`x>Mxz<YX<b
zADP9(qjmAf_BzkmP`QB-hO_>4ek6o{U;H;?`(M+YTC^g+nXZQD*Fb3<&^!c#?{(YB
zcq$_p>RTs%eGci!ye~zgbi@llu37_rB)(D1b?wyXAyGUfmd~OG+f{_HNG$QAYaP;(
z-4~@Ckyc}9<rSFa#AC=kFq8Wk*fBM1V}Qo>*(T`$)#xIr0u)Ot*0<gO|DUr{Bn5vv
ztDlMU$aquPbir^*N=4^NmFq!qRMf9uOq3A&Sgz*`UDRbU6;?$pyoE}t(mPp!0@wg-
zMzO(k49Du4jG7~@gCxH$XF=yf!`YKzXy9WOlL2U!JWUN-6Kny4Y{>>Bm;zusG!peg
z?Ma%3^C@2FAwfhVEQsY&ctmY2Gu>qO&<h!+m}<f|85u^lD<jVZfHPskOjCqWzYs?K
z=+NNk5nL7PdXzEiv*s!^KPRVXt?F*OicL4e>r<;n&Vr;XwdTp#5D-<etECd|;mA;W
zlR32vVM4D#%TQJ&(3Oh`Jw9N;$b1Cin3hKjQ3n7)VNl~2bm7DoVZJnj$lrrdQIyyN
z=>&iwoTxW0W!aD#Ch>4SNct042j)1C_l<r?C)klNu7ZGgC3zE%gaT8wN*%zr>Gg-`
z2Nm{`K}*XvE}>za5EiERHcah==qmGn6VdPlrlfkXwYt}BAlrlfu%DP)Es~Q|4YZBB
zD;=cMmzK_=w_?XBj+*2z*;631p61C?Baxsko0uRIfEaujyV2{hnPQ9X-wS{G;PNn`
z(GM;Sq_Mb9A=IB}<{xD7Q(l(OC1e?E9<4|peXv}SmvjJFx|1!sT?;M0#i}xKRAIRg
zGQr?P5B@QK84bI`@suO2tLZBYrHdqo^~3NSd-z3D^p=E%amRe+Jy#@Fpeef8W0d<e
zjV5X`7+03Lvct^Jqhy>2Zo_yWOESv?G2`Xcd7)j;Wi4FvRyQZusl;N0{Sfl;K~EVG
z6)as)M&7q?h;V;vCLTrDS^J(nbQwkp%3PFzn*ls1mP(%cqx4mH3GEksWz%;}cAAQm
z+H5{jiZfGbCXg;yX?CO0rD>K{b(>+G)m_C;D-lLziTb$NBV~3O^=Ewd-%(d7#m{6q
z!^5(I?;_7!h(_}*9i7&-y><NEQ0<gM^&BF{)(<d!h6O~B{`mTHi3WI9^rl^lY<TuH
zQV#Da^@DjzL2Ya;b&hWOk5Q<)nTnuq-mHAv<jmD<g~>;T65HB~K@GNg?Otiidp?sj
zQ(}ii&QfZ@En^dF>N)nnw>|R|7dx4L@&x`<8{M4+@|#o&!@?A@AbEsVUjV)N^UsL0
zCjq}yKjPG0G6}s5tsKcpKS06lSKIqX0d9{)Z1YB0gNl4zH5#GE*qCNzSw>C8*+4@E
z!IqWRYBf?*vJWPf{qaX3REt+argqITD(b#OTV%9bqjq;t&Nja=?^_HRQ!+pn1<g(+
zVrbO9=xx^h+o|61)wYcO3`k{Yxz?UqiBG&Y*Z%X5?CRCz8#gqI+;tkLZa^i(3JTmv
zVNzY)$?RhvJPBpWT9T3^(-Vfvt#7DnvcY+}u?RS89&mJPk0L2E<v_J&K$T+;=u_Ux
z4pAghwvm!ch!uyx5}(q#1uP~lGZF80EpQ}<_v^p&UO`peH@aCe0Q)Mt<zIKTT?*_7
znNa2~w#2Rc|1AGc_RYI%7tn0~&!-!YpKQ4PpHDZ|zWaaP$LAjWKfjXiCLy!eZ_;=z
ztKQDFGrOyf>+hQC+j0ZV^!N<-6ngG<=5r7YI^|M==HiaIfnZF2mr{1>oD5n?Z#*3q
zbp|aM2a<<WG%nQiI{hs1JRA3VgK?`<*rBz5+jvtdC~ImYAkh0GI8Bm?XSxS{`(bB_
zxi%LJab*<@`Qv8z>mvRL<_ONx-e|^1boU{q?66LI+h+&O_^^2fnu@h2jw3J#Wv>tF
zM<B|sZKPi9?VQH+HuAh|Y#-L?H8>DI5%p7LAS;R}5k+Md0qYHbg~eJZar~y<fB>Bp
zW#b|k##!PfOXzfG?Rd~1N*-AK-U7B~b#yLxgllQEAv(qU#k=*>ohrZWkCaKrQR;1L
z(7%xH8*lLqmDxVne|1#lfI<bKOTHv@O%HTfXx6<jIuXU7pVRs)*+VkVC}NCH1?Lo`
zGaP%|P$hWk6|_rICOWq_6I#e2V*hBr8SkAP?ZnN-_D;QwoTcCg8rRWqEcO)XyQNOs
zr_Y{VL&4SIhZrk7R0@v6zXAR<PEX^#gY8#7M<VE)ZVH*8LKGZGVtYyYp^Wah!VDAv
zvcm>CxE~yE?_#g7w@+Vd#r+{%vGXOIGcr(d$pGv$3J$(*n<*Lh=cfLP2O(-l!w+WD
zp!S+YLP)rb9(3frrV~kBkf}|>>oC_!T?}Nm+iQujKu-Ax0~$qmnHu3x)H?g49f?tb
zmB)>Zg)!=p*@qGNNVP3#FODuhtx<9<+OZ-GWm)ds%6zUAxMGu%-8C9xuv|ADNh@J~
z4Pna=SS2L;{h;s`ZlP_@I%3nYP&`5tDOLzzMgR;oF#D3GgoI!umPUi|cmi5yL@b^I
z`HUw?n{ug%n)N0?^#`q4S7}hW57Kbk(4`#nwoKPbAVgp#Fz!X;^Q6@Wk8VJGoyMhw
z8%FM8+QfRYj0h6O3<gaPjqNP6s!~jeZjy+emjgRjG^J)yhLBr@d4Q|fRkZCPXnOr~
zF$+s(z8m8$)NpVIK{p`RlP#}ZbxZ#0+7vNku;iOX1_nU2Ajd6sxFs*}#K)OHopSNm
zH!InjUu_kMPSV}9oPpZ<t^-OVnI=+RjYwy99rvcKVb?uSxk9^l5ziw2_NBCUOC|1L
z6%T?fgm2S$n#|#-QLt{Bc$<m=4!aA8?+%3S6zu2e?kOJF9Bza!+8%P_RcRE%zd4`@
z++niTt?xMZ8bl^5eTWb|A_Rz$115Isfxr;~>2vA-i&?FMryFP(#5h4(0Ypotn7^hC
zDG~)^HB%&}*G4i6k5RE^QVu)W2TUv`G?7zD5hfJTl5nBLj81${_@Z%Tb5Tr95q2M{
zIw)wNC$pS}G-5~qyAYlqI2af(g;!4zRjKYZ=SIx$z8Gv@(5!!SqnY4~TB8omff(?7
z(49FXGQIKSCgZ!MQ*;H0*J`i896J8B5&w2<&s!$SK+gaLu7D&dF!zWzgg<gk$YUVX
zWia*3;)B7^z$dSN7wrCe<L&*7#_cmWocV0f?M~t;ILN|1XsrgywHjWCrc&OE5#xoh
zk<E<rNIClfGT)fP^jd0p&>fEE0zBCrN-a4w_$zyC6eRy&D3<Gr5zN)<*&F25*cnzn
z27{aZ*y<-hU+0Qt7)~J37cm^QN}id59=eJxs?QP=0APpA9chYFTZZ}V>E2#^bljA_
zM)lpWoYk9vwH4ZD^(H@7O-rq=wHwu5lbp&)y>Xa6oh(y_V7Fo*_V%ZKmd$kZb{=w4
z7-;`8E04IR2Z8*V9i$Icd4Y%f)L32X1SRg**n#rx>|O2st=&cmf=Mr)&XO2T+IXhD
zD}RF90!g1ZigN!Snqx=hVUprk?=F}yOuG!|u3|}JFJc;iKl_2m$KLLKgUI#K@loCM
z*~3i)5ruUyVhdI`@uX*6p}NUjb8r-UImtX(?O;}&r8#*G)inJ&m(QP*fnG93Ok-xD
z@iw|_O*<c3)2=PxK=QQ|V%uIA1^WepnVtbpKZp?!M?5gyRD$kBDa!Ux2<&2~C1a<I
zp;b^)P@4N?Wk^uBHJF_LL29k7t#tMQ$M7~aEe2^R-Z-Z&FxpOYo(3^B4Qf+v5?&OL
zE_I4iW=>jas4_tf6CyKA57Hd17BIGwyseO$0Y}4@MIYI^ltc*8L?jR=6OlOFx{W}$
zI-xNQsy=cby<jb*7>x>^Mz8hos%9kiJEd;=BFotUSqiu%o~P$azBvYt3O5Z*Zu$rc
z8)@o2M-BuW+ylp-qm!&C8JKkmCVG8VIUuatUss1n?CN+D=2q!`)yw5(SHr1Hsp{Yt
z`*z(lj@hl$bV1d<rHs<tnIqPem$}Tl2b1@}PI0kiwOW}RM<BdRAxX6``+5Jo)M9*-
z+6{7)spID+TftJ0&f2Xcfp|OOv}eTYI9|KtQ7F@}!fc{}6l;Po@*Hlkbd0)LI=HR|
z&q+pLGWQPNBDv^x7(BmVAdgvcIlPIvu5nI~D`HI&++b|TPNBGvj$lm$a6D0InU_gu
zMndYNL~aAE1R|t?!df<kO%VC9D6tOdQIx{92jf}BeL|4L<<?U)?#oH>wg+q``I7j-
z3yJ0=<%;ChPa4NN5byYKyYagxZ~zkk5DEo=!`AeZ8JIAj!DlLiDH^9&v9Uow1B*_E
zK5phd2-BC6&YTQOHcne-qvR<h0!wAJ{a(oc{&A~)h5WxxG|l6-1Ro}w$Y7W>)kB_5
zMTnVFXjP0!l{YyT_8qwmGiXe%5MHeY@0hnU<W8#_=KQYU03CF_MQ)$)=bAG8h{^P~
zd}Er3)F_`QdgvHjr%`Ox04-w$Z2B$dXP4jZIE+0y|E@@335cIZhT@}BrbusH8}7^`
z{=NP!ZoX}>K@2;<Z@QI|VwGSwrh^A`bl?DxIb|>al1~vzAt2l497Y8D!@q%@3~ET0
zNKVXy3k#EC1zq}?+!{6eBbbKsc@G#CQFX1hzL6TUC^7jX8Faz!XYI+epVw0*6mQ18
zpX~@a8d3&_rV(sRQnkQf1geHAUG<=(3X0E<(!eBhQ7j6L;Ze%mYtx+)YQeA)+dqPP
zqBKCm&WCt19uH;)x<1{APmYfdPOZ{dlJq9TgauI1#!}^9A?4h5YtqvCr}R5{-(J~t
znsFK^a|@Vy!P@|Zk}8#=9BAyGk_>>o@B9EKDS)|wFueA}b;w3X43Q~<OR?w$?Y~vX
z0(dDniAyq(6Hg}Uw|P}4w)si(wV>Lsk<hJPhDyCQOMMKL`c0SmTxB@m(2YYC_8Rqi
ztSdqJ&U8G9absfKV{Mj24?g>+B?(PSVopm^a9Wy$rlnbETAH3|NwTJ;nVuF=emV=5
zZk)ape6LYFd&+F{PRHQWMOvq1VwN<EBK7;D`zdCsi^T8O`tN82xqq}<e``nBuy-@a
zg<Mft-kqL8j`nR#a2KzwKeK9})(^P&801Q0yl7%7s!FT9ZSb}>Ym#9M??Ee*%IQhH
zz8kN-ivILIue}OwNZ7)t(>$L8SIh%>Y@aC_Q5Pz1v_3l5=iypj+xdl!meFRK;aGq2
zG_UPcaev5V$!C#mY?#AB2Rc#c4s7yL{F(g1787`9EkTDPF>NUAZwx(AR@65G5-o|^
zlAO(5D%B4$i-u^IX#py#B7=)#F^oU+l~1Tw76bexvMs~^{L`F-7ixl5Esi1xkZ5x2
z0kkUq8&8d<;OUFVYD7FD{lBPGwZW(wHLD>Q%zM`Tqc__J!GX$kvDHTq-AKQ&6kiw}
zqijsXkI}lIPk;+=4z`ct=JqS3JJAc<WLPtoRP7Pz4}NVPzC7ED0eu?B$A^{hO*6==
zcvaHti@mRt8e|W0VOJ)c6>FUxj{$<vcQ2DRhK3~shTpJ-{y%4jFOSJX!M-pm(&9sG
z@n6|ZrnQ2WWw=6=n0R<SR-G+XOY+G3VrEpeC`ZT5{hc~{Q9=PoJi!{eYLvA8=tkXM
zuPewvs|U0P7VQz-iL^L+OOBCOe?&w~>|jiBmnC0DroTf69Rn^{yo9PAvtFQOD@=)?
zh$cl}G$O8;>x<-4tExb058cNX)A4))VgM35rFJ6-t#By_tZIk8Y^oO@`yFbZ83i$m
zkft_F;>vb|NK3`0NIj-+m6Wfu0X-&u0_7ZL0-#XNZkMV1P~-XoXf-tVoT9cxm}Tra
zK8lGNR*kjVdN8R{Ag*OBA$G?Mh`K05Rd6a)CF${`XsS6tvt8pQH7Q?Wd&XOCFdvhZ
zxgwCxpvOulZNG)zW)mB|pcTAoh5vF9)@u-1aj(7^k1K@`d8vf1ZEQ=6J`uY@u;1VN
zUC_l{!E&C~o10vyBaOjX$6U~^7TN)wn`2W8O{!}Y_)&2Rz-*}k@S~DvMVM(uOTWc}
zdF+<(R@9)|t}&pjDoLW7TO$KUU}Tv#*O5%{EU@pEC{t*S%Wzi&M1Z>FJU!N`A0I`}
zJx}-c{(Ue3y78K@5h?_iRvu)q3V{SXa_>V?=7Vo`4p|ns5|tF%s9>@M0PXd@CZyKQ
zP6nvGkCLuus-Pjqjets*mi`lfAhF;jKO*O2kE~?)(E{C`#52#{x-XhraKG&R77H;f
zepPQ;js~~h9dI`DTsZe+;Vlt5{bwUF_EFL+8|xvLKpg3i6c#6XRloOM<>iYuP~t}v
z-<hHR$lN?E5-xp(ugCN~X!coddDlwWc08Fy=CwEcCdXRIcB8pdsiXy4^{~pNkS{h%
z-~;`3Jh5MnC-NQM-B@kI1NjyYA@eOhsEP?$Rk~ZNZ~JN0E~lZe9@?eg3M^d=o?7Lm
z-9fh{Aa@G#V=DTAB4=K7)8NS$a-d38U=CJ2!~rMX0Vk;eC%yqE?tl~TfRogK6aRqS
z$6OR|guyyxB-+#}L*pqGOwUY`o*7YcG93^5v-W&EpRoZP7f-*aI>p(B##5f(Km{QK
zpu1eVHB0OcJa1AJ>@Iw-Y)mymz@_m)y{e|F*!_6lVMnZKlT*q+u{~{hqfpT{$Wf(4
z+tW{}LiiNRh$pD*<<@jL<Ed2;z5HOMoucwNfi7?2#eWKZ<5|@#Gwlw?S6#OhJ+%s&
zlVLo=9_MWWBf$_zGX-Djd6f!-xEQLCeurI%zwwZ}9rS0ig8P`tqV2J<A%GmKhgDU=
zqKL*LY~(uC%C*(RwWp11s}Zw}`217c@1z={$4=2vYuKIHMe&i-vnuLqTUF3KICj|+
z)I2L*BJdWc0-7O5HKZuqlk>D{7wmR{Or(p_b8J?d8yk~sJ*--#X1-E9(}Ir}Py5Ly
z`;ByS@xG*kpfBd>)2MBe66a~vENBxD_pw!!GF@L&p`<mGsb#|j($$l^>Pe`2tMm7{
zpq}vC1=e`w>Csa`y|9W?qCj7H{RU^*bQ*wFl<^##=T%uWne;p6a$65oDS6Y2EgA)l
z=c+6Ovzh4g*eve6QW!t&UX%>{1yv8eL#|6aV3T^27t>FxZb8WK>nn<xf4MNpg|bun
zWxeb5te3qWyfsYw)m!~YgDjn};t588f<c+aVxHG(D!^7BcfK(n@|mK}%)>ycWXvQ{
zG6{=*ESJR=*wn_ho>r9woV1iV+(${IaduF@m3`Riun%_j@pR7Tk38D!$5p2!QF|4R
z#C=nh3iCTNHFJi4d~#}AqOB^ky6Hg8p?*}AR$N3zg*cL;wfyXEKnL5j)nw>)q@Pq>
z(?)Y-hsQ_z&EtkTNJjl+j2z2Uu8#4@20Go?>+MsHKU6>1Zq`jkusxoSU=>|k8GOzx
zBV5f3x&XbY&=<!2zV%8n?13u2S5+5e8Ov$+_^6(O0GNIgT<4X$cAgkXmmgzDd~}~F
zKLa|ETgPnGH^i48DeDuC3~&NeF09$mc3|BhY|im~+U~xqy<dfLn@e7^WCrcsgZcaD
zVvJW1y6vq86S^*N27pFK5Ct>^4a@~R)MMvwtScL|C!e%82ykpwZN5g6zDiG7KVy}x
zM32_Y_%~VR(WbiZFW~yiZuhrx{Wbe3;{1DSn{W$g<7&+C|8)RSVic}}v4QeHlA$>?
zV7XvvJn6L!N<gkbU)qXYMl~>u&<h}KQ1h$2E@oXJ_jX#-Pw0%0OWUd(!^!X6!*?kw
zcGxVWhjAS0O$DwFwdu5GMRU-3)G7QY-~&)AH={-tjlKbqUGn8WQO34TDvn?=K(Wxv
z$V|=Q>&>?B&F<1E>*+UTE$nn2bVHc){zFZJ14Y3~08X)@k&~~ah+C1^Re52T5E-O{
znSulb8-T%dH%q@oWq+!2Y8Xk&(8%x|g4<?xC4Wcg0t|&o2kwQ{DO0&Iy|UQm6etGh
zUNDUJ6@#+rpa7E5;=P<pi)fKo(6S+lV<<Bp&1Xz;9Cy~Q#gE;Ar;P4f2MwU8Df}Ca
zb4)JJm}<lINQ0@aRUSGwErc1PTtPrJRZJk*v}-IP7?@QT6c9GE1%v=ZIfBBbE96=2
zfC(7d(Uwrc#pxB#K%d#Za)zSHS_Q8`z6YyHse|#<noPR_>AR|)_=%No=3jV=K$)f7
zLp{eIn66SEnx*Wso@H;vb=cuJVQ1eJ44$sZaL*H%3qpe@<6*!3c%7Y}^$<{T1h(;@
zmz<9vprh%Z+#XLpAqj#_t6?-UT6rDLJS3(!XkA#*VFAT=4vtUjVw)Zwzp3-ByF3B4
zlp;rFG4z_FU4V@ioINH3R$O%2^D)3iYx^xXJ0)qw`t}QmlSvjppk5~nWEo+{xG_jI
z=82?Ymy*+cT5rbNXU$_ldJi~F>)X@jcJs9O+<U8Q`g<47n9jTeY)dWUX48@1oz^?!
z0*r&mJo|%37cw)=?frwlp_g>|q0s;wU()jv?xS@03~a~n8O_AjkW^si43qQ1{-3v<
z8D1;Lb2hzP?vA)qUsivger=7Y2h0l_^~+F_Y)|Y>c<S(I1vAxs0jKwf+li`$oZpox
zM~XsZ<A`g);&mxemLT~er`EiAWV+e3KfY3VuT86*%!o%=Ey8LoR1+CmBet8I^w4A+
z)N)xGIRE_V&1bLr0_TM695Icmvo`1sz_Cwz4t%i~NDBaz5&9PvSEm!fC^0IUv_KAU
zEP{rr(fnB?txf$#cG)7w=8_w3LLNhJwh#7qW6&~+KgSJJKQ`*kGIa}hBn~R+$_f0V
z(XkMSX78ZjdzDrbVw}JMD>abl2wDU)$&mpnK?GY99}^>jt(X)D-wDtCi~;~L9z`#r
zwSbXYxKA10+KQ#!jl{#b84IjLGebskX#k3tUNZkA2tGI}rviwZx_9nqDFbfUt;QVT
zBN?fs8X3?A1D$^DTua~L$Cv->@{aq+&i_?g+gMvm<^Nj$`FsAa`}o{X{x4FT#4&rf
zflTl^j#ZkR)Qfb&Co84S;o)!OZp_ZrrBdyB!(YO1R}+Pt+&<uBXB5C1qSX4+TJ4rO
ze9X#@d-Ym6^^mM+HvM|Hf!+OR2rWB}9drszHK1aLQ_xeZ&49}!CK_@N(#<<mHE!O7
zC*Hj08yFC1NDlbg8}TcBEcy>3<egLhT>Ag`$<OO9{eQf+@tywP$LAjCzfJxz7;~EV
ztH*a>*5pw5y@R*$>G4@(rw*QCQS@|uDP0D-=SGgs4ojsngd-ATe!OlIO1R_(MGGd4
z6`VJSE@VO2?#Gn77hP~?Q$evNR~zQD&MdK?iMm|wsrih{J$1{WtmTw*pY;Y^LH|A_
z-5Eb6dmXK9(pn+_pEOMvFsbaku0#S8rhNzuR1%b1NE@oaRNCdLM?6CKiW)%UH(b=o
zNg4M*ScG5CC?6NWM^M0N<pM!=*DmB0&zJzF=%0?2kFi#AjtYzfPl|KQj;w!i{Vv2%
z!i(Hae@bz(IM0i3{JsIhS7VjP``5ot@;a-)u%4BCjh6f?kF$!v;<sSAXk~@(zYTXZ
z7(6_U=pDuBl=B4Oz`b<0!y{r2z#m9G<of4U#8$IBngczNr$IS1tt<2ku4O^wje@wb
z@<;cR59j18Ev4pnJpN-oxx<b<gA<2QcbXLLBLus%t>$J=lkOBkD@Zqx;bfk4%VA)r
zhPi%8?-W=m+$}<GN0tl}oDu&>^*QTbj9Nl&?|$iC6`yHi3`Nsk5Yakah+zy|WmZl5
ze)Maf{PeN-KMGfV&-uTF_+NXHlK*S#-}%4$_}mZwcjLJGPTJrh?%q%`@AUIwci5hM
zvYvm59OOCqzs;QCMe>X<)SQ(%Jfo&3tGJsAJe;aKo#2^Ch4tLZ7`|*!SUWR9W(3z_
znUL-*Eog4yDJQT$v31FHEOY#sDIvO}_Vm*PyihrI#eqQK=35X-;0pY;;Inrczn?Ut
z=XfIfiZI_eRUODO4JvcmZ0zqeWsW8%toubetUL3I_5y&q-A%q(m4hN~K|)Y<fjl{C
zzHXd?Hth6QOJQ%`MpEWXY!}q@$+b*0BAQiSF{qB{X*rsVz$|2TNoo8D)`(!`OD9n*
z1u(%Wp;iEUr`CnqV<*c~BqREN>3p<{Bi>CGTxV%WuXxeb<MrUCI?=MfsqmC4em|bI
zu5P~l;l7F=-q9-DDAn3hDUb$6R53s_-)uT{3_9V|(Qn*3)9z%@`ZSr26JW^GHQo0G
zSEN9osBdqYYnc<8dh40G8c&aNFKv@a>sGou!avgqCj2RSKkW?NSg~*arrNLuaIdQQ
z@Xx%VHpaSYtsZ3rvEEzjhq!OP^{gK*gOMD-LuQCBnnYvMDsHA}a*Ab{_;}Do5?P5a
zdFb(Pz3drOMp}%gXHHoZEi&!N^~~tQ-mO4m?#_w?G<MPj<Q-{@cjE0`bwRiO{@-vR
zwfIz)W5;}E)ySOzs9)qnus9Q3J=;wJowHWT7&Eh?GI$uz9;!vycwb4gFJvwvn9W&R
zE!Xp9fOPM$xonuafGr>AgqGt_V}y1W!^3F~9#Y)oN&h=Oe#`hcy)8agVTl7cISt|D
zqz_Y35Hq=lg{fpKgq`9|zjKXWTO6w{f{{&#*8%18?0=>$9wVe-vQMxD4fg>MnofxN
zhSKN$(XT=P#oNWx?Re+-?5Mf0tsXULSnU}gnO1soC4=zYfIi0-;@I0l_9xH=v7$Yh
z$H_Q8@5YbU%ac)sylll5K@<w?iu52`Blv$ZB1HhyrEjPzAkyCQ?-qL+04zY_6$gri
z3H$f0^Y6QLzSPc{euFz_`h|ARH+7Zu{d0p4-T$Fces`7#3V7$Yf4TR6Yfqm%cJKc-
zzT5xa&*y&b|CHP2`Z`>8JzgqwXSHnFq-R|;>Bic_&Y(XMZ(GwD*6mJ#ceby>>_VgY
zFzYo$a2s7f2?GGRSD<qpD|+dw7boo)l8>B!<m%!JnAl*7glBOQt?&cECdenEihw~z
zj~4wQ;~;Y?M3gL}L?MBx%tR3ANez5`65ti^;q2Y|lc(?HuNCqNQccFaLFe5XzK4d_
zo{EP1n^u7B3ANY0oR%lGYV;rF|B%i?6FWCVkK-!Ok`AT{Uv6G@qx06NgQ*K1p>5IZ
zay%b&qVuk3=ltVZH9G$&aAkFMwelbTJyO|J;3#>th6inF^5c)<iyRd~A%Li8AZqsk
z1*aSmojr;s+_Q}3EL}G%82}KrV9}_1-R^cfvq&HzU`Ka4Y7L_Ed9Nq92`CT(LB_3~
zX>ZXA1gu_NfsoY{zIvj4KVdV7ZfBDL*hgD+c;Tzj+EX!?RW+9w-&*A}N)2R>BGU^-
z2*UU!=DbEQud4yb8Iv*B39wJS@iZ#;k%f)=(J%Nk>i_s7#OUE|@UH(J;%f0;s0e@)
zv1Db5vHkbe=%GMmBQv=6HI(L95TcxR-Z@5Jnh(%oPHy8~|GL}ZZ4BE9K?}T0+8OVz
z!|!Oz1O5tbs^1Y$*e@<zg(qOs45(&Po@D$-ccd#=!^AL?OQq)Vuk|Cm6h3-|{=)l0
zKKQ@&Mman+7+(d_jVwdP2{Ee+_nowF(RI=W9bNlo9bez3<7-q7-l$_*-dl7|4M23Q
z{^*g~OgXSXnjWV`#wra7<SQ)>h6*PK;^f)>QFHC-O{RlU_ZIV^4ndiW-MFY+!juE{
zG1@6BH<KBKN1%pGhm%Nm3FC-X+Jma)k;1Mjm^R1|&3)u!Wr_*;tnTdVd6acm;D0_+
zpCS2xMKd0pf8;}#&sj^N=5vkKBo?AL3#yPR!3h0VKiYl?`AL4;ZX8uAit6H#4L}GX
zAO6nXnbqEJ>el%oM}?7Y<;RVtShwSo6B%_cbqt(f!1ze#LVM7mbZ8v@Zf3h9ORx-A
z<P3fYcEdx-`Cwc8#`I9O2CGjy<wh(?p)+H$+4GNN5`pff+UL0qqpE=o&iS<40_$mf
z!divrzNwHtvuf+g6VPb^)i0SX$Q`VL2s*QvY7lV6+hT7%YGbM!(7yDClR<YVSodzn
z1&tF!dfS*x#~0Jqa0on07vjx6kID~b6$DKnJ3cBuf{7ys@6UmwN(e>?Qwg=;oSOo2
zz4~@E#vZ23XqY`j{JiWACO~q;qh<8=ZKLUsNiw5fc(PtZzgQgIi)9Ry3RH`;Vi;+B
zoRhN}d?`++@Vo2qJErf!_YyUO1tdXS;#?|Akbo9A1Igsb<Td#WjzN=<&ClgT2HlL8
zEyOY6QgTlS1tbxs{NuqHmDClYf(x)X`&hKPD{?G)yGtL4#m5P3@=b<`!9F2-S7?y_
z?r1S?)FV|DosLKUd)(_SJBO|_pIK*aDfiS(mGK|EE=v0kT{;a=_yf6cI3Z$hjAyx`
z!z-eJ9A&_ANvjb()hgNRrHd+20|gB~v9qSkA$HD8=FmFaEt;?aQjJG{!YXCcGz<Sz
zw8h;W;R1th#!H^)6k(E)UT7^@A)>NIjfX=)vMFDYXig?7QqO5L@UQgHIvo&sg4ADf
zOO?ox=f#PI6-%e0W@|8rnme=qfXe#FE<bosi9jTU*C%_Ccpkmkn}IM3onpdo4Uil^
z!4q5|t6zEy`F6z{yIop${6G4vCb3yRZGw7#dv`b9ZR|rzt)1h;!|kKp)3Tho%9qu$
zlm7Rr_tO_?oj#!D{rTr7A^%_aIQrl9$B)<kSM=ly!lXX`!}Y&v{ly=_jr6A<?%n}B
zFaC#EbXWiXWWDxX|9>B!dyoI|W-s1v{4MPPZalwlXPsOp8kq;egbExJTS+__v=VWs
z4EZWRUg^_TG97r~K9y`vO_y-vNjw&F+aI+C#B58OZD`5JA67JahOCz1a}%f~qao&0
zVK+!U&<vm6a8J46$kSEkqiOd-$V%O5aS*+!*ur@{!PJAxcd{DFacIhD$4H_Rt`170
zkF9<J1_FR?;A3j|k=_YI;L!i%`rhw<1r>Sxb1MGF#`pW*d->cG{l96*(4e1Z5wFFo
zp63yuxaTQu)3lpoa*eVK9Xuu_!WTtop6x>;YTi6;ji?-S9FstCSl?(Rx&E+8Awl|l
z)(psH!o8JqIv<^@W@dC-hoYKjE)A$E_^uBM=hA>2Jgz@HdV%VTPIn@NwiR_HDS?FA
z7(P6n%V@LwqHEbwquUyI-e8j|_YMDsd^r8iHR^cX34k!uvrCop!z_;y9X^;)DSmxv
z{rX71q&C3GyDkJ%>?az{hwR~K8&|D*gJqg?D}CnFk>Cr|&LX+2L?;Rb_dvwr`_NcM
z)l4^_t8z8eXkbJc?A0GyJ=L#);luNJFIrili0d=9ZiiOHT%>^tw69xM?Wi`sTbAA9
z$2tEgp2Z!DfH~*?#^c&k=lmDq-;?j>|9yPEhWy8IwtOO>@hW}v`^>|>72(i~C?>{n
z*xaV5T6h(4xc#<NT3dhS6gm@yUT>eij$iIKsZ6v{`_HGU6vk^jir?(-Ixk-S-mGIK
z(RdU^&vftHGS=MRjZa_i?=?lU<tLR;kw*O$y2Y2Gs9d}Lc`sOyfRqK-_KYZ(?9UP{
zKd7x=KM(!4Y?Vjfmg`DXE`z;%MSQKitG%yUzt_a?|0X{NF--H#x>NFTu;hkQ^2vL5
zt#E*Fm2v`OJlsCP_?XySnM%C=EpEPTyp8d;g%>pXi7dkeUjqrn#P%>(rBZRU0hAWW
zRtHerI1nM<F(iA<xcTw``+v_|+YB?vp7)bsYw|7&U|B)88k6au9a&7kVy06{MHo|i
zjglUUnNsPm$nz{3s@f1ziF=*5Q+*oTvYCr_G3A1t$F8`h!0hoR%89F<U{N96aCY=l
z40=XUr!~x;9QMTrY*sF&ywP=ineA^_d(p0)Y|GiR7OAL-aQ6z0&=k7sH?y-R7f`gA
ztvSFMnVZ86K^;)W+4b3k!pcpb9mgW_@1_;y%x8~QHEc($^9)qL)E9^8bzql>|855u
zm$I&JF(OTCN**GqHxS$1h6WNg4;dgd_Q`@M3PNPGz$KwOAwv!I9DWxQZuxQef8Hc_
zD*)v1|Fx%UuKw@I<HwJ`^Z)np`5OHH@!lT9D)$Ni=DVf&(+yYe&In4ek1}E{T2=Al
ze({>g%{xsN?DQ^G`U`!W{r^QWK;-TJCr{n{4^P&9e)@g?-^b@`?Eg>(2>X@6P!vo?
zU~?f0B^{XS8@}#^*sas?e1evSuy27;a~L+G6y!53dzAFVtdZ5**WBURAHV+ZZtnoI
z_y6PC#$(t1<H>q$?fd?}kI!G{{!cq%<|ziVL&B;MvbmRqp<BgxIJ-y}?oOwpv9Gk$
zsi)se7rBaLgA+s7>1yg!5S;qkW@CGY)ODz>+uKX)^;GE$C7~*lt0hz=Gt`7s(pFs6
zo8`-D^q~Ukw$3>EC#v00%I#c(XP|ha6>a6q%A*&SLag>arFq*_O^7nw#DkkOkuEA=
zdJnlx*P>s3fz~(w8_bG7UiJrFG1%;p?i|#&8}?Yg`Jq1CivCrWgRLww+E=`;)oA)H
z4p#srs{051;srMUFj`9u*a@czJGeFKjE7qPa?u@ir)i^}wEhQOyoI<x4RE|meD&xB
zwuFfs#1P=3{<^!d87Qzu1<s#_3anFs$B(o6_|r^{dvLt7eIPdT;r<bBX>`6qFxmeK
zew5*6oa1pl^fn7B`G-`m8a+;3cNbUe-jfAIj)r_iKXap&##UB&H*}8!njH6xC-Wb4
zAeDi}b7UI56e_6ON~CTg&UWmZCGw5-Wlc4@GCk)xUe0lYY#BtU2$2)v5Hl7%m`5)u
zXSIyJyck5%9gw{p59^1)z&|hp{aO5v?sRPI6T&E(Rai`oEftl*IBWBTt82&{ER>ca
zp_;22v#}8?iK;uLSG{PC@!n`+!J8qJ0$ffD7(&AD&%8ho?S~qZqheB#4kL$aWQB{%
z8-Vh0v8WSfU=Pn7c;W}}3n;B5FS4q2Aw+@9{n!#p*hzEpoS-z552w0^;biPG!2Hcr
z=%Sn7^?U_-WZfRF&@VT9vU<ula%}Fzzr>Md{DKklSHl>YZ)B8_7`H&CV)Dl?QTnb@
zy_MEt&_4M8N{|>jy6fb!@8vdv!n}_#rIFyTaAzP43^&0bfBX_XURS?lpQ2ZSAC7hf
z?Jv=jO7x>K{F3feobh<qL-)qUO!qsA9;jO?PK3)a{_=~2@-uDP&%Suu_B|Fc83ix0
z=(1%Ilq-gAi(-<aew{%E#ShXePeBUdQH&GuT2=6#IMNrIOz1d*8AVGG3vx{{%OEiT
z6#yf<b0Lqa+{EIYF-QJ>Qy%G*F&Wl6Acm+>S9R-1S-=3qTZ>rwKt0b4*_MD)WeJ!&
zN7z8WV@%UjhIZ4_ls!wKzSExjXy|DaS7qO+=2QSdF`?v82_}<t1q|8GB4Q^GU!Lv7
zdvW6}S}a6=dLGh*p44|YWh+u+Zt$c~Sl$chN{&ls0SMB4fPj<R37dcF6WPLB=#z{z
z#6}f|JD&Em)zQU}`!cOU&2^!x>O9gxAq+B8pxqsb2%SjMmN-BXN=&R|%tG)=j`z-v
zc4AOQ)XNp0@o@kx4a4IMeC#705-VmeYc}Q}!5<s3=;a=#hrO2gWf&Dh3UfNWF0AFi
z^P#UYmzS~#KKXOVQzY}Skah~WY4Qwf1z3ET20WYe=nX?5%zWYl(m*ZNg@3-KA<dF$
z-bCz$L>kR$DXt2Z0OwB?kG4VsU=pI<u(cq)TGEmey{*4(){l1atpS05iWN8tij=^*
zpkdBEafu{r>i8&B98Gv29ggB-==j>%K5fQ_$Gi3Tq;Y(>f4XBvTk#!3&=6(p!EmA;
zNrnW(Q2R>I(Ju8?*xj2MOx@qy>MTsw=#Gw$>h|n=#BU)_$I74z_FkZ|?Sq5EvxDaT
zX5JdY4<|g1vs^rR(XbsEa~cpnGZth~Q?PqQcBxqLOt~@WilcH^bc4uM$)4~V?+@9$
zT8xa?)HgKl`!`&a0xPoE7Ra6T+uJq%1=IzKj)8rzp*MJ@<z{=Oa-`&}6NQWNeo<M*
z!aK)kJ0ncD)>V|H8sNnXpEZ0%@JD_Ncwk|bwn4SxmX`Nh6+;GEV%<Yf)d9kcx=3*Z
z%bc3l9gLDKZGj|XPG|FWyE~gfZVh{pLa2X-Hn?A0Br#;{=*0pG2r0lB(cumK0|?iq
zcb~#U9$i6&LjmDIdghS;kt;vg#XT~ql1D{vhD<u=c<*)tc{1ls<q#z%+NG!0$o{PG
zU$K<V(nCM1^fpiLlq2Tgl9N;jtt}uJk|J?&AvIS_<syrzH(0r-l^pj2SrMg(srU3{
zD1a3ra>N-wr5*$xE`yQBNn`)*(+yOIEMrI}+H$5kg*7Lj(DCOkfL<W;r@hK4$?XQ_
z|46#>cLY|F(qguRX&3cO8ID|zS<fF3an>E_>Q)@&FY2d&6QS6HFHFsIZ|2-f<~(LY
zAV$CUDdKt2H*$h`&`Fs5Y)2~plGU#z*V>f0P@no(UMkRAwy^fqoj>joM~Yq3`%;VA
zm-Kv&bcYa7P*w?EnEH_qXN!$b^oFUxWqU1<t|oo;?q&q*wm!^T13(r)put=t5a&x4
zxbue~j{4TVlgeyU6nA}GwBCSo1FK;Ft>179rpG9L-fq*S0e7~bzfXJ9mGbvoK8t(<
z$E`1~sK28<67UFBs7?}MU!Qy~#?*9tZ-aBuXk-smUD*?Bqc^4FS}O2Fa5uUY$>{(R
zLJwO!&*1SLFfUP?Rg?j9usfgwK|;W67|eZtG*>nr3-7Tsmg6uvLpM<fh3$lmsZ$1P
zRP`?N+Hzo__l~)U78Z2qkjvKWQf~l^bXw+|mxhg-C@yer+G;0VyqQ;%|84_q?KUc;
zx-XX@!<x#o`0mO33JAR$bol8e9&Tk>yPCWe#%odhXgc>I031=6S!A^`5-+Om&68L?
zbk5`Za(}dsT{8Kw1JyGHXycU&SS?55-xoikk<_6%(ClJW{6jZ!rT0FRR>!{|?QC)x
zvF%2Qyt3UtD(gW~I<$(a>e`v5ZxnTdh7`mPaYA|7DDy}dlg;^e_hQL>iFs?>!*kXh
zb>f!zH*H_R#N+N(X+vihQnh(_iU9?r4Xb!lsZeSc42dlEi!niBJBS?;{TZ1PD|s2{
zT~NGonX5kZ%y%Vv_z?G+Tjumt^Y@c_e6+puYsJd{2mk*U{{K9B#4b9G27>*1`)K!|
z4nMsovUe9U%p}<ix})b0{xOeebZ~VJ7z%7o6fGP3auvn~YywBrjjZu!&7@Hp`=~}T
z9Co()_kWOAvE%<kz6W<40x&QC+ml+#|EKo&d;Fh!`TTYKf8c-w3Ot^6eEvQVmP~NC
z({7e4P;T&`BK|p^0D^_%53)K_#}srMzx<f4qnz?nE=Qi+cv(I~+I0!p6_V!L<AL&K
zkm^>L%vnr^>0V8&&3wqSk3znATk$kgfp!9#jWB3}yP8k3St`ef5`JP}K}ZTi1xB;}
z1z;OQSe=dAe{^Ml{8B`D_EO4gP~^eLQKLsC&6s%IA!tGb*EYFR`L2{H;6bKYm<|19
zEBfbW+b$t!i6!f-%BexHW-^13daVr*xAdf%1=Rcj^CQ2T1%?3EaI$Ijc}25eK7=bT
z72{F7zWhir8`U$bFs%fzDD=&FdENr6Cd57b|07BtdLI2nf~ZVb_fYnr%$71PqSY-D
zEzJTp7%AjWiX%S?T&g@HH9pi4Ir$j~EEevUyttn*?sLS`EN!2X2+9x3W&a+il>NA0
zsQ_+Qbc>0^Y54^vEAb&v;=yD+ShIkM%f=j;MMBt|EP|z~aH6m@kyvUYn6t7ElPm;F
zC%e8Gjx8J`4;5Zvl05BVrZ5f~Do+Pz2cZWCQGBUF3*))nVq&$JSS=>9MGDmCw7V35
zv{=OM&?qvEqu+Z6xD*)RqJShJK#~9;GVFlqv#4X3_r_U)No**po-5rC*haMS0si>r
zOLd_yu+ap8Dx{1}lN*EP!j@>*Vh6Z%CoXWY0i7M7cRUj7DVA;|Hft(2uP6!20+Oc7
z9|<<QDfcSZ2iDl71y=jIWz}jPJ%BjG-1@>cf797apk>m^3rml;9u6YNszP>{etNnA
zshq?w#VX<P15ZFWH>W1D^C1SA6;KSF)|>I!X??e>+J@;!zrjAFR^$igv;koc)Oe`k
zaOZDHrTPI~gb)lNb1>HsUwcBhh{RBFC=tyP>kLh6H#;RF-TxeCsz?laZnqxwXPDh@
zHlIMVlwL?k6*sVBQ%fXJYWgXge_n8PqpanvrIwGZ%)?8nP@LL>gq5M|wDbgC_4Eos
z?dP?sIfD_7-s6XvX`T!dfGR?OFq10LO+b43SbE@Z=_$Igz+h_~EkOGh2r9Qxja2VB
z7j`gz&<LAcU=HXWW`tJr#;Gg)cX5SK5uv;pp~;*}lbYmXot}}<X2gkDbTek$7tXq(
z!%l1Z32ffm_u%`EPC4Q@zUrEC5bjDkb$k#hgVr2z2^{>brn8wl#Vp|w=TG0#QJ%K`
zF0FIiZ4fI5>bWby6@v+A<m^*lW@+sv^KYOh4#a=FQw?xl{;$VR9;f0zu5WzT|K7*v
z9`wK8YQT0o(aWbvH?6@n?D#?oTSGZ}<9L;+tQl_gxZRqxq^QCvJZ=`?($|$)JF}<7
z0%Af}o-^37Uc6z<OkehsoN;{Zt@cfRJoz8*JONlX{a=5wzUIdN+<+^=@AUs3K7XzI
zzfe%WNxPqZ*c!ATFQfaoKN1}6bUsODS-}Cn=%IB!pZP)VwgxVI-9f6aHSDWMo8I=U
z<B_YG=VNAV&)Hcxa$-0{qW_{5lhGE7zIdasi8|}(!T!;&@y_wl-u^3yTL>mc<a6~n
z73K~+A}|sb)T2O$<#Z|VeU9JMcjEnB@=D)7YBun}|E)KU!KXr;ik_yS#LMl|I@qmP
z)f#V`Zv)lp`=X2}SZ}=9K8Vjie|B_QTCZ)m`DUmf6_!ySAA4p4ir~mf6vPq9^xodH
zN_NMm<@pSQ{edap3_|a%iF(mbSzy8(hkyf5z5iB`S4_ZC)BU%z`dK};I@qo6ZJ!-9
zqbPb(tK}9K#U9@*=jvB*?=&;ZuHX;XmRCN^RI2L6<T&vHZmA?|KYC%Lr)90d=NqM%
z{dbro9;S9nPEmr)$qUr69PzY{3{uYnp^Mv>-S!{fz}&*+h6HNUWBTEM4Q7vE{z!Tm
z)A&&qAzi80c25=|Q%Fl!>G7L_(r7s-`b0%#)R}j|fkK?*f7sU`KE0(l(KdC2DA~0#
zjXc?lcbbg@1hG*E6j2cf<x&VyV=y>xwf{g3T6Z?n`ZMW9SASC%R1Jx_;CJd2vJc2B
z+sAtc2IHA_K_Q1Gi~{(I)UXPmJsMUQi7?4<AtYjz+X|;8{*F`khEqrNCIr0SJ^t;8
zppk2!0}%z0LqbXBv?Fj|hRRWA{Bg93aH&yms$f~U5FsygcCx$OtfwH7@sbJwG`ajV
z>x;cXK+Pu}kn(WZ^t3s@MlyUUP+-uV^6e`|*Eg`$humO~;(J|z$_9-$PkbXaK*>vy
z(*?}4(0KcK@$Bl(c#u8sUI_s$TH)_Th$O(C=5c(ovv2fjR!6vK*61MQ?|39ZJ)%ow
z&RymX+;RElMK!MIpGwTGyOEw<{#C3--UfLVbwM0b&Mc?M-*DE?OVWIs+0L2ig0uXk
zUBNuYoAiB?d~*yNC=6cGZ71WYcGl2h3>N@1r_gKg`#}5_{a>{Po2h$Xf~I@b9VLhb
z!`6hthhVSz(q2twX9Dh9-BFp*z`6&9i3MkblkLX#q5nD<8i~;3;K3YRXSzL#`7r$e
zHlXq%&!XjwuhikA7vy3fe^C<|euyfjF+h)3Pw^O>dS+?_5b;A!vNf58^r!q<<m!-n
zi~1FI{1>guQaXShz08a1h_L`9HpOT_XRZ+*M$EU!%63j|IWU|VIb%2x;<@fdz(M7_
z2MIQ01?b=|Ft!EOgKxY}wv}T5ECVWx{7SR`3(fwI&e}$gqBV>UNX3+MkBQi7$x;Gv
zNW9eaOU<inO!>Fhs@Co)WYxh!&SGik@A^zBO6f2#hya)jj6HFqj00stk<%t1aM8vg
z*Cij<iGf3?9)%G3VAzashY^SCwYm|@t)x0StR#HWYtx}Lq#=TKytYH=xMWTZQJxOc
z8;m8>k>O^RRRx>S$o{bpi@L~Q8`cP1`UslTAKa}+V{xBGPWXBGTtendv`J;6mB`3^
z%kk$CuyiM8;Q1^AR+u8*naeyi%Q_(aKjtr^VRty5ezM}yr_dn`CC#Wd65*iq{yDbd
zP}p3Au^+rb#8*6fG*(D;j^v%vtd;R9ldtj<=m47Aag#bSLh-U*VG5`u1gS(&QR9Iu
z$tFieh!=Y@=uyU^JC%6O?1vDx3BWJr;SW19Xe9FOP}l6`_w7yQ4rFls)N9`<9;s;h
zpM8?ee553RzszhY;l%)6o(B0ci*4AZ4U4nYqJ9hSFCc`NKfbmka#Q6j#S+etOB^c$
zHK!@I!s5q`sj-=IFE{Sj>Z+)Ih5*BMev@vy-|M$s&9S!~8zBWQTEJz*d$JKO>$c)M
z#{GL>oQ?*07bs_8RZ8O~^>?I44(pIR6#5iYu~<?70;Zv9jWQI$--6Iq%@~1S1_72z
zvNOfZ5qx5#T{I0Mhci?;lvP>OiN<gCKo5EvpPf{~dfAk^*9}JT#E3yL?{o*PPvx~5
zvCG9^D+rtyIvYI6+fw++DAeTXNxi;{jkZBNPQ8lPYOkKF>A<B{7>t@@RqAEDN325W
zeA;b6j+s!4g&JI-y;`jhgtb(I9J8%J?X#m_kB)yk;$=^1$dy{#qbO%>xQf%-A-%ZT
zDS_RHuk+oaz@zzNtDnTl7)>7r-9+nP3+&O`$8nSN$VWa9L=yl#XxfKm7j}-OCgj?(
z+T6N9GsF^9w^l8@Y?aWKk$E6IR~}YuSKXI@-Ox#T@7?sEaiDJQu(afux_ts~&`)sv
zG=k&n<9PZ98N2Y6paGnbipahO%<;AG;te7Mq6jx(pZm}NA~XEAX^{8)UE^#FxK`CW
z1D!3<?P_&(bs14Z$o`-vdk7^^Q^t}#{6^6pzN{(=I-eQ53--TDo7E_WFdD@OLWnHH
z!2!%WGsOLM+0I-{$F<w!>+OTRc=zn&p#Ih>-I^ytd<DAxgX`;z`YGN0EPBZIz*fTq
z$=?23L5TDA1=^csX~PDyHeh?Yx~Y-3X$#^*iiSiF&3*gO-MDJQ@NNoeMUWSw8^Pof
zQfGr(&2&B*^+y-cX#DX9TJZGInzzE>^OL(~K=AdNSw|rb%sTk|2hejqyH_n#Ph-*A
zE8}DdHd<S>)!M7TfmNiob!z1wQMeIBAt5<TwjNxIT?Z|1sYq#(ESMynf>)6;#h?+i
zW;&EL;!ov&TCk;W@Z;Ej!#d4YFYmUGT>qc-S}Ok2#?RmFzwhI7Pxjv<L8+oRmPz!s
zo2T*XI96tv={JGENJ|!1ir5+pU<Vx_+_Ps<G-?gI?<D1WOYI$PAJ#EX<$LB*#=YJQ
zHNBhO1~G2HKT|>`y^Z7-uCCZDAaztf#Gp%KE1#DzwY`FWDw}pt?<;P1+Re#8VpqJr
zIW$-`TA>znxS08D%3%o7-N3|CXI7Btuye1kOKe5<OHH~#r_In!20YR-{cG>1zuL#4
z|7cr(*L~!~|EsNSxb%Nx<LB@6|6V@#;{R!re+(Y#A^!6Dl27xnnewoZ;eR0+`BV3;
zn`Am#bA15ZV6xHUTB?heZ>1S&skHXg^R`ik7L*e~+S5Q6+BrTuYL?ao!l&FmPMh)0
z>-x^G)KMfRHdThb8PteDo3oJ~5hp~%V6ML%H+D;pQ=NhNF=>Np*R{1&H*fUBLP1ex
zJ>888C|iO8t_ukz3c+5)TNlcMlRCcy0G@aj&Tc)mwA&~9@lpNNadUsWS?6&)OAT}X
zh(l$+5`9sj|4h%t?tTN4SVJL%r<Gcp)A-l=?_2<zId_Wa>}031m9{%(kof9H+b<95
z3ZA#Mj_Z1Ja$3jH^W1d3?|175{J!Ub6)aRFu(j*86dnZ(eW|dEya799oCXh$rcLEx
z0XMP>lAjT@__L?~M*VNn6G=3+x98hDf?wP|eT}OGJf48Ayis#qmBa>zeYlN#e!qTN
zDzB}l{F%%G@h<G2)2QsPAN>TOm|b9+Z^`!SR1C=7S`Fi!yQP|q?W0rNH>JqkO=9O$
z#S-jzcUT;_d3)zqF+a~(pb8X%=+3xy8?s!wAZ{7-z7iuyQ9OQ+`Bu?0Y;R9Ha9Op}
z4snJ#ennK?-G37|1WDW#7>Ez6(T1qh1zRnB?cSR7rRN?n<eH`e5m>8%7OR?iyq12n
zo__Q={b-}Q<UTuURFTa$;8Ix%HAHq|Pzx$2ccJIG$^@ByC6lP81(hklFuREOR!s*3
zr=K#2{<@TShLGC<QaezoRC{u=!)+Q%;i*q%ji5mcv>-$?PM1dJfAT?MK1kZe51Gs;
zX?yyUi4%goae;SfX5l@XB2HQzPs2I~Wq2x78Q(-2!S?@29htS^Ck3ZhKdhkie%|E0
ze+F_o!bA>o^&~c=0TzqUn+i!Q6Q8(@r@omWGu#5_9`s>Ox6>DBc|I^}8#N*5Sid`a
z7bMZXL1BogS?e<Xqx;F%?z0FZJHhLBfpa9-EanObB6)#kOAzt`o9cP`=uc){M2d{r
zob=*Zi{tRd@p%_y@kyT`fNW_q`Z}48lQAPhuQlusKI!tDfrA?afS>qAOwe??gA8aS
zC{WpT*fCsP%)9zZx~M^4Jk{L$M`!?VrueRrD57Pf;Pz)YhKTOiq0Cp9A~~*LmIk@4
z2Hla}Mk>KWUPI6P?f6o)Ic+?PR+yUERW;4MaO=9$MNt?<LquVzQB7c-c0Ez#@>|c3
z)OHtZ?(vYl%_@rHNYlUY3INV0bGxdU0ykI-3MpSt#8=7=2w)gPvGN{!8}srXoDbY+
z$;>Ad1d6i&S*d!0Q)_r!E;h;EuXX+sA4mT;y}tYKKe_t9r%!%%^?#3_tbf=4-N)yz
z75_(uVf(`IdG0Ex++tC17KnA@iI)>&9)wVox?nyKUrRczNLq*S8gz50W?6so)Muv#
z$30{!7YA0_HO)CYAa1Y<;R$X}(n@qPo66i){1u$It+J3iZmpu#4=1M$)N(Zbc5*;E
z2yNeD!X(}8xLw<M*ou;lCvveY*w$TI2^oAVP#&F=kfBD0NOohXyXmn*o^C2dHpLr9
z&IRHmleP7$dNbZ@$O}))m8W<{?9-j?X0zVl?1*KflOH2-+?#sd=JqS>sS>@=<I@It
zKRBb9wOff`<+_4zzIN>2e%o#wDX*}Ml+nvlkudoqi7|)RT)K=cO9<c@Sv!MM5_c+{
z#@spJ$9k0fKn;iO>RgvNb1dv%C~X6+#m~(;zLbWkKzeYm&%5#nSlBD{3&#2IIiY_@
z+Y;Vj3mEASIqN(cA$CXnO)eoNym3*#K%R5gpgY6$%rNz)l<Q7eH|@HB60_HQ5U*6W
z67968CJk!GnA&7crg2upZ(pt@%Ch0wnTFESunV5)a*fR>KTPz}6wUVpI{4D|=6K{q
zeeJWDa<^R4+YBf?WB^6)39+{^p%)H*Ivx+PNEv5qcphWg>FCiCn@*nm+B|%Dwimyy
zZ|^paj}K{{)C}?TiW$_q0*(N&0##Z7mAr+2v;Vl;ju401$-rsFtWZrKlxmI?GIEv9
zbUcYkR?>Y0&Kwn{2Wu+A6`3UJv-h~OVa-@4!;?fj>&-{)1jKSUE{#LP=P#M^N&1#(
z&tivUiA8;|3D=b#VI?3F$8YKlKr)l`7%o3EftVk=Q?Y>YdO2&Z(>cOVRc=ic&cfNS
z>^bgu+0JEh-|VFggsRALpMIgEzw?2MFYX5FsC(T8D^=05dZpt!<T?2I0rQU1A(pnb
z2et$$c5+gfUlTe+lDC*~PK0y@bAyX0mS){^VuyUpb11lKzyxisLCYl4IuAycOSCTs
zltn5E1ld{Kq2y-r<No2Uk&eiMvQsR#fmy;%azOInty<^EZ&Gl)Nm?$8=)(Cn?N5^N
z6tf7aWLzpckzkO>?$H|q+^w!FawRjFRH$RJ^1ema+<>?9C7pR(a0g%w*}h73W#Aw1
z57EjB2?{fU0!~Eum4-t%sT)&ORW*czvgD!S)3DH#0UhEAhLT?_czLjejtpM@rPQg8
z{tt4S-gzH6_TROQpP#1nf8Xu@?&b3}?7y3Dz4m|nI45Cl+T>=pe!4>jU)u-!uZ~J!
zvO`~^pKNI=v8Absn|rPp<N9x9ZJgHs-~ayqx#s47*?7G1^!xt5kI&cG{|*2CZ`@@6
zHx}9dmNq4U{eJ_wb@MJbfX&-7)OSUMy#8Nzd<B@Z|9@V4?4JKmYEPbg-~adV`D@+(
z>HHt5i!AnByj>y+A~3ET=D#EqI}`MZy^)<6LsO^t^>5MxZRe;Nl`T?qZTPNL1fBYC
z@~91#c~^Un^okVzR-*2-FPx`3gxwTU$3K@r2?C)N(Trr$f(+NQWhrPu*FLAWYIM~b
zPNwb3XR<{VQv?JZ2HC8iHYHed*D0($N{|c!tuv*b)a>}B%R6+byIXCe;U~Cad^fd^
z`f=fF5?`=fS|MW=Kxg$(Hx6}U<u!=<A{w_9d9gs|)c$BPPoha{+8RdG0_3}#4+g8t
zekB$bdi0{tI0OnU4^*pHy$gXjo4Wk@r=&ZJqKxumv;4R~`-xe;K>JBMdK9_sugD9x
zdL!Ox?0~6okr1qlxMzM<Bvj#-txctF7FZv}^XOkim8>?nZaF2yO69eYS7NB(@CUHc
zD<dkAQz_9l94^|w@pb%)p_8(+@gg15*98uxqFshzn8}*~b{IFN)Vdb&?RVa<N}ppH
zhu_ws=LfPTH9FnTvGSa@DFP}8t-cE=!3fQ3p&;^pITzu-0KG+AkS6_Tk#Afl8(!i|
zCGpme3clbOG%<Wruo{V;=?NXpxKIOm(II}}-&E6znDcxJT!UFPu!bP^po(DDrg9ko
zSq9zNEJ`k0BgPjSRk&a^;`H(!I)Br1=sI{1)k2U<pdlWSK8~)P^N6ohwfDCNYFe_I
z=L$c)jQZj}OgQ>tv`QIWDiKKUL@)1G#m>a&>MP~>U_82rRx0IkdG-wc`X{Io{TKz>
z!cgzLwBnBnKTh-JBSyIg)HmRtFU`9827T2F<lLm1jr)+tw+(~)aK_tCZpUEWa-AGS
z@3{1?w3sOsL^=zTWx#KBQwSiaO&7c&TnK5C>7YINWKA4l6MQA(e9xI=z9y-|-vEWq
zP9q~?X$bwkTYvvWK5qPnR)2Ko7T`JhpN;j$DgE!$$KUn8_wxDc=zmDxVrM9weVYB0
z^oLzrBZTT10Z73-v0jamZB2f!w&uRqUck+iuSL?tW^`4&n;bj%+!j3vXK$5MmLyuX
zS1)*V03oU0)Q@(L8}a@w1f6P}QoMw{J=ZlKf*y!6+o$^&j$*C0z3ve;C)4q{b!(tU
z$l<i7R~DbdENpfFZaX0BQb`C$qoh|h(evGd*?WvrkHhRIalfM<olje%_NDQA-XC;g
z*e~#m6HZ|K+<N15|M&>47vr7d!^8ci7&QHS+1NhXdClL?_78UB=Kf*5Vk-d=;dbjU
z&t9Rt04oLh(Qa!<etm|a3v&+!!-+$`=#IM6ew$~KCjj7+8c~ldS*b=Q0$^8}i8(mV
zsHhZ8h8>VU!RRPz3qX^uYtR*D&SaA~VA<${83BVW<0vqV1+aTO8kBIw`Vv=_!Mdl6
zy1T>iRT{mrktKVYxk+Q5OV(7*N8ZU)7HH*VF;Z$IG;cn|gGYNUhoi%eiyz7PKxwQ|
zEeAVRY{mVZdfBsitLe>Ji9U-_{d4t7pxt=NDYUAe*~6J8*t=a(wNHQyI|PD|%Hjl_
z36y$=ffQY=ime@^(=~_OL{J-V;v<N<dANOYvVZgn${|jabPMnkp3^Lrwyeb_H9IyZ
zkgZXFwSNkMeINk;2*hwGTD4&nbM7l2HzDkdS<4!Muhy@J$Gc)KPs~coD^dUAJf8Q*
z(Vx*(uRney8d&!B30f>Ppg($Lw7^D)YN56p9DxQyA4SVsztsL*d$Ii8b@YBep7?Kf
z?f{z2|37{#7|C?}x9|M_y?p*M{Qtr}k`yqs$i1&(EK~8PRD>!HKIIR}*4*I`amC|b
zz+rv)5qw#$SJ`P^ry9{==infh#pnWr!%fUscV@rDpcKKkt@a=O7t`*Cz>8TjlXorh
zjX2AvpPbUE#E9wGcxDr3R420r1$DAK+w_exOec8RK+N#qlw!;UR>vxZ#^8K>P1QKR
zJ(c)#E7D`Si1DF4P<{v{>5EL2ak#yI@bXv;aVLJc-!yKbxDgG%5u*eK0A4x4ASE1V
z0<yEH!286-$@~PK;hD3-!^0$(3@^1Nb(Wl*!ZSqhg@dx2L8i@)wvx&!o?41D_<TF<
znG6`aJ+mi!r_EFLBTXYKY9g&MWpJA|;E?I-%L2Z=^Q#wd*nY(TUnH<i7Q{b<KrTCI
zeA?WP{*-=dsdu{-|3V1ju+T!7L2qbNLN@ixz03s}^(E=?hSszUC=ACWD0XnX^J}~(
zMg%kS@v<*C`7$>4=n;*K`^nf=y3C(j&J?}bJKI!uM{;N%r~QO{?)VA$`PqnMb^(WA
zs$DA$Dc+i?(EsE-$w`G=LYyssnTHgEJ0BHai=`sdC5r%I@l98O+456SL3k$P!5|1~
zDy$vd8jEjm1I!cUAe49lE}Y`WrnQ?GrStBwp-~;CRwCFB9%9Msj9sbABtXMWy_N`!
zyO5L?TUT>k!Qc&~+yo8)Q^zFZXwpLs1bB%i@a;d6h?3?^Ky;rBKCQ}}qmeNKgzM_3
z#5?;SDnxC8)c)0$N*@njYN_lMszz$*JgX$2sjA$lz)QmLc`8Kp+Lu^BGXWf-E<}LB
zc?m^<DrD^8%)Gp;rAfi2qY23UqYD^0OpxdoW`MICDMJKjKW!Q>tknxdNRT`-B)6HQ
z%pJ3WS}lUIjy;hPoM1fYERLs$Q^peeeQ=MCo8m;=-u->k<HI_;fH)f05k|~lLIIk)
zs3X`vflQzl9?z3hKH><*|E~7FD(4|XF$!QxY32o?q?|mz8KMK^6)wC5C!0DA`on%=
z<`h=&7XjFWq@Y`_#iHmufk78x-}FLVd7y=0U0`-IHYIcZ;6|`z9F%+>bB1o}36qvW
z!G|d+7u#?x4;U`8H_&X3c&ZQWK7#+Q|K8rGf>p<4l*zPzanV&!24q&?B`v;HogboJ
z$m{(0-~fUjGjmsLS=>Vr`jCVsuqyrqv1yn<UP;@nKL7vieQS3k$8o5?0>7fg<t_=4
z0P!OD5@lgY?2-{*0k99<J$rD507xQ>mw*A6d!_Y}6FZSC+0wmnoW$4GiDT#5z9-f?
zCnu5Q*vXHH$dy0kFWj!`$8^v13@}SBS4l8&Rz!DocXf4jb#-@j6<dixm_b97mziMa
zw*yfKbj}U=I3=JE1rU(|0m98Bm<CMkqT#HX3LB1~Ylids!<1G%D~|hF@YedsBmYL0
zn&G6PY!F<B_{p8GYrn~|Wa#rvcxF&k!w`L=&U1#WXNLQS4|OOiW*?ho=1Ja&aiYL1
z=X$p`zqQE;v&%3!75))NO)xP<urPnWAJ6KK6S+vNk7F+Sig1x}cgR^p7khv@C~O>Y
zA^mtzQ~EP3TlsR;v$-N2oxjj35vm{a+NJUuQ~p<pkX&TiBe@(cUspVmvuwn^3Owt%
zwP$522fa?~S#}yD2jVsD6<miULo_HLvyTQPx2KA(#P(;5TH3Y12x_&WYT$7XbX`%h
zI0I8oN(LWvVCYcTB_j8Zii$^Lb5t^^wt=Q57=+f3Y8q}kxSB?_T0V`qvgTIOyyw-P
zG|o;VX=mr~G_LcJ8X`cQZg961p3T%zb?vDG26lwa(*Vit-QNYM=Ob)MS*OL>F-Ac`
z=_G=($J~8#Pv7%)O2l$LXMc1KMk)!0b+_0bRIBxVpG^+$h9=O_lQUaG`e+XjUB2%g
zRjVL)Hb9#c)<34GEvDcurWi1ua53Jrog$?wU-}!h6F)^$L`I?iubdhJnzYb;@{;=!
zbEn9!6{wU{W}G2@)e48wuS7-79(Ru(S%9}WIQL`MkUPE6b~*<z0b()f5)uw4F`&AR
z-l~Xoq#;P&_J$e~_dz}vx(@dGbp^ajJCTnFbVe3SF)d4YO_YTdp{_I$b8kH^Fc5<0
zjL)BGeWTw{i4V;CcqOXa<GtzS1uab3ib1IQjWw-JIF>YyIL&7xK9Yz+F+w#tkRo_9
zfRd_SGiiltKZeL%t;;V-{!%zea+gL3KV79**xLe&g4_A6&2^!xfZJeKciJ7B)K9wg
zs_p8+sK`s>PF8V;Zjl>pt4Uy-3GM`7RlwtWCx#KOTr$Lw@JdW37^lH+htJ?Cl298E
zCt1s^{M}Q23vPwK^{^c*I=o(oj15n#cW1pS{U#OfGOF_!Lz3uvO@83YFi-0k8(Z&%
zYrQvm>kfHn?RxnbuaRf38qIcqhA&G92Yf}T-yyluhOM=sfN+DEm$f7v2J($gEIFhW
zDoX>jy?`QwmS>kEx%EXe=};Z_d4-&LMsco&d4m7<N^|4Nt_ENXBg|pp4ie^a`Mz*r
zu6AR%5<RjoSLpEkWci4=3tou73V%JQKVn;-gb38N;s!Z7;1D82muFt}`xxd69j+3}
zszTGAbyRTP*7DUp{s}IfmajJ9K%dCXqJq7O(5%w<Xvg=VLP!ND=xR_smF+{Hae1eP
zJFqNVN7Y;KY8AkF!?Q&G;SJm8*`S?Q7Y<T|qc}UJ&A5A1;vGdN7FV2mg`&wFvSg4!
z6+^HsU+r$QSz51=w)YZB;SnbWn6J7uL^7ytdOR2pYuA}#?k*15qxv@Q-k%!#I>Oo!
zDu$Q$5?_mdXp;69G68tl;}xDQ8U;~f4??qcG!kODfQN|FI)`7=u>wmMsL~AN&*t#&
z*YV$F&EyUOdFOZ5x6rMLA5&l>j58!T*iS%OVMz~1Y7`WF8NeYUhiaoh=4fpmu5m}b
zd~SRrf@yQc;aJja8BtR&_x7ql+9xuvcRd?)Yw-LHiru?9xIc@35!}jO?c?MWw2@VR
zolHEc_b2GEPqaJeAL==<Zy=WZ-iDpNItU70&>4A^d?PSev7_s=$>O8yh_UcF6A3#l
zg08*J6A{qEta?n_=f<j9Aau@`4r;4}#ca9wMSRGpX=q*8&;^P!+EM-&9tZX5PKaGF
z4V7%Pq*=2KV3ubN`sj6h6^ac!%A<@&tgskfs)9-0im>AZYWVWdEum(;v9rcm2r;Js
zBAhX^jzo7ml1;<}cDfc3`sFlq8G#Z^jS(=HqGYn}`d}*<mjeOIvd&!wlOj3M)JW=N
zbolVPi=PRooQpi^=3bpoIfpsqWyQ@5JoXXu75QB6KY5(Mi)~|=|CfcF!T<00{pY28
zFT(%#AKog^|1<gkSF(n>T~Tw<<Ms$1KmZO<8MNW93QH<J!MV`a!vw6zLe6>zn@%1P
zjX2v-mQv}j7@FyMo9PX2Mx6FWBw{>-o*z;wbWedsb;0Cp+#5a4T^OVF#fDZv71cr4
z&s{k?SpWkB28^N7;3xJSPK_CW>O8LBL63ucOXhVN!9oV|17GI6ok6NGVCpRh6-RZR
zu5S<Q(R+X<6cS!+7BP&ejNs?E!qhtuF?PV_9pJ$yz)wn8cS2@A@)FBly;-;VA|8;<
zhSJyfyr~Qye&+jU#Mb<%sPRAuOh&Cwc$v`xxr*^}-4F*?=JkL`>A=kZ--Q22>|G=X
z-Hf{(+i9mF8gjIK5KdG_Lqo_m<ySGTGT0IZysOu_YfRTA3st_X3foEta=PJa1&g9>
zUoK0T0)ELkhf@GjxYt=zs}#T9qhJ9MvQ;vqS$U%rI2=)fb6dkzlDYP0Bc-=$g^~Jo
znpm@XkHAGtRkOMA4M%fXk;7IlAOzUT#YXb-R(Aq^upTxjd3c~q&m8gAZL4)-EpWHH
z;+l6p-L!qW=_f0lIM$jqvsX~C_l?E?axSLf^})ms7c#`9|8rMr7wZBhSpUy1%%|tw
z`hPl^9qa#>@%^Xq|ME)Tt~EL?AA`)D%d0M^w(U;ydXA);IR*8UwR$c%S{dJ4csxXo
z9YH>CN06?VFM9feV>(!UZ@Sp+xzqJ{EyKM{LMXIRJy5m`E{J@C%wUj3Z>piaV_)Bv
z&QXmwgRgpCgeYaYmaVd^%b}4P;y6O4op=}<?0A&Scw=P5Ty<=`UU&mcv?`lB>xH{;
zi=IllVZcB{gj>&VZINjRe<MS&)6C7i+gb9qJ9TGw=SDGq6LTx&wY?z_x7H01G$$8W
zE;qF1oPW&~+GBGDt<*o0)<-jA?%@!IYQ?*i8^uDQQr@7#a)XeJ*GZ`a++#oyLKgjj
zHbw!uBY~2Bh?Pf0tgIpy1ht_w>I-yd6rf`1`es>RMw=9n!UBAAhglRt^tgzElgQ8C
zRr9mSrJTD87Hy7hLsDrs8H;z<MfS7;92k!d;!(O|TaHwVH>#y4L?-m6RJciz%?6MR
z58vP#gOHRzUj#tW-0QcHwsdS#$#mA7Po)N6M99(trJS!<P^P@nJ~bOH!gm??H*?C_
z-Ur(a#~qM5V6!~Gp*R>*2b||y&BmbvAqQ<%^%5G4mi_$EDm!RlZohu0wcre|sNNm~
z0(m-DJ)#=$%c7^uz5!wuROkXbXi<HzFP>xgc<-bQLzD*1xvCl<2^MO)UvAf$bv^oo
z+CG}TsR`(rj9rtmeOCR%?pc6TU%u*m8R+Ds>qeT6BQzdH0yHrPcu0sW#n-KfzJm(K
zC?0|lt!av2j53bh88jIlEXoph#{k#?+$y<M+9;NBMo>i<wFXEn6!SYb3p#R`bYA4-
zb?*=d#LD(kjA7U6IfropmbpZoFuX8sLgnBo`44#ZgX1RE$@xPyerL?cBx=wGOuyWu
zG=q&U5@AA^snJgHKC`;Xs?s3u46i%>D*QWZW`8kUq85&Z(g?rMbt<oR25p-cP}~#i
z^CkvWrI_)Ride4$hH=A63zxdac{WR~AqlaGBx{1-oYN*51*HjelkVXv;$A&vG}scv
zB@Qfmpkt_n-S#^D!BYv=89%Q)M-S0OwH886HDZv?CVH9=xMHcyXu&M`{R~>bh2Hs$
zT9q(Uoi1@aDN5`iT|73Xd+KfQtOF04&}$Rj1dG?n!YL>NRVrXz=QVizQaMflNAXKF
z@8^rkV1`{Pl=1CvVO^6e%5&TQ`0Wp`4hWUkWYC?3XR$|CAMLUcW%fYUZABUpFYUAu
z8u?u#dMJ~XPiSsqdcj@*qw@C4W=`o8a>K}!z&xH+-lCJtI;?XKEig!Cv;IQqS?&3s
z-JJheP@u&e!r+_E|2Y`;XkRzU@NYhhvA^>&5Gi4{PGRpInaRyH<;6->SV>mx6C0j|
zNON_>m|5qr_Jr+@k6{iX=Ua12{&b!<vqX{RHHbN8*88n0VMPgA$G}^6_Id?uS(tgu
z1mJRGyF4Kte#wV1WJHV@u;|i=B#MQJ5<q3hzSXY}zc6quzlK->WL50*(fGZ*i;m}=
z3x^Ag>^CGBs+jmNk+F9`7SOGjMi`T!6)5Y02<;4d)%v~UeYEvjamDB3ks3I1XhnL6
zVoa%f^=g;RC07?9v^QFqdawa{D60iLfOXal5t~bP4r6W^BSOrIU7ZFSBXPQ1ulTP(
ze$UAK_iX`B%KW#4Zo@cj!PcCQIKCSUp*QGI1Aj5m#@;2NC|bQ*8xopLQ&>0+T;2>7
z&nVm^66b=#x5FZXhzZY@ZNP6Rb|JKAy!nCahRci+>pQ_*#t&#4uoCsyj^O?Oiu9r5
zb@}2#g>FDarF|tx9j)ApwgkM_z}MmuCp@X`KWy05BV1W&X*TQ7a0FOiEC|D~=^3!;
znXu^@vFUlS>6x(MD*VIN{xtig-Hujl%1f0#kDIAiz<x;s5;5T)n>A$~Z`g;2Jb_?_
za?*<nz{;dE%)H8tTRUr&axuSFnD&_9`b!$4;-rfg-{95${w;(j3rCgY#yO3{K@azf
zUSA<zfIu93Cq%*z3p<2Y#EdaD+|?H-UhF&qN6!GqZ+?ZuI5uqN8-9dnuRe0wC>f(#
zmzFhDwOet#0hcxV-hz7~PqL^~y19etK!a`Bd?yjR;qsY&!3FUaYXl*FE&(1edw}({
zAu%18h~QVSncJ_|Av$oU+pX99W{lt3>*@q@TfJrm=vOV9)={V1rW1ut%)4mL<_BQj
z^Qc{A{D4K=9R4h9qs|08pWu^#v#g1W+-@N(o8VLH19E(Tqy+4de2jOL@Z_8)La}GH
z+3`1g3*^tCB_)G~di?PfTq)$KTit4kV>8@tO~>Fl1;(Z-?yk{)Hn;P|H%#+@nOoI+
zj|i5pld6S2xPgYyWxx3_;u1B|0wtJH3YT+BWUp0_cL5l*sS48QQz~Bs^CQ6w+>dhE
z;eR8;YR7)T`ixF^En1y%<uxezP%$eJs3I8JF}32~hTg)+`*_yS5F1jHxuhtF5X^G)
zB37YLX9R-F1P1!HwC`+=^^6sF3EQuPZ7k7fbNhSPLMs<~;yTh(KeHuNqb})plb!-C
zd%$RAtR!wQpzv0~ccrdA>Bm2VDhoA;Cas<-m{$^|9{y^soUd%;OB)c4$cX*%0RTIP
zB`nDc)~dZ(0fYIzGP=2JWp8(PtE3OB+#qsmlS<X<TB<^<<+8x@eCBACxQaU$<9OiW
zDK5j*48cTi#|}i+o$G_F?Lk%+H{+@kOt|-`Xe{comU&s60Sz)~7pa7qGheKo9NOwb
zT=UShSYV+!rBQ5cbfkov9{!F3=x=&p!9;0-QrbSEvigW!2Gf+T{OxCmf|4q6<PW!@
z!aekdyW_w?WKYBf?cDu~D7Go0M6K89R&ds%H^78KE=YK6CXek$9$O`kJu-R9Ba){)
zB6-RN^4P)TDSOES=PvyKc-5k21mfuy8_F;DtWuzC6-v`X(rM5`GHOw}W#*Ti;)S^o
zSxnpFZ(;H2_g=nK+Pq!R!w_@j+!&z>Z77Dk;V}9Wx#)c!kG-V%3n4S+^~PoU0FbXJ
z7JFc8vR%H#T(d=-I$u>`s4V(}Tie%nDSPC%3wZZ3QtS?4hCmwy$6?^og_6hjhRJ8!
zc<mZ_$#27mci7X@4&E~)O(mw{o1vV)St)J)k3x*R3fC=aBaVu|q-dON4qh*B!>SMT
zEAH-YyS@1Mb=GlM0)AFuLkvp*IcW6ywsMxkFX%nKN``2g9*;KYdzMi%(9w^EmD|D3
z#H<MsPfy*Lg|=#Tp9p}2q@ow5Xu{E&c*IMIkuoz~p@HZWKdb{FVWW=2M{!g<+^Ss*
zt7<#PV1fG&J&!%$;fumGLx<0?opuvoO(0k!w;MfsU^U5&LAz?h%TnaJMs1nE1x1wW
zMXoC=c_b7xE^jgsdQFPZ?LUMS6qJV>Tlt%?%Lynh7lOAP8B9wHwF7Da^>oE~3Jg$N
zo7dq#!q<CceX|6isxfr*9+($1w7F+CS4-DS4ZU6{U_sM9|Mm^Ov#&M!7|VO(4xyl=
zWcM0fwgvQQX_b5<pe>vwe9MdG7SDumAjb7}D1^;};KG;BwIK&(&OPI^z2Vs0Uf0_=
zu6yeaT#_~OOD-Pk%-OE<v(Mt%TTamU5{4XHD;KwBNecfr;UrC9Xn6L!RHpcsTtF;Q
z1p})qHx#C+KsSoz&Fw<b1;$fVKpqOw1;|r1!0X$EZ3>f{C0Pd!5>=?;U7B#GnBQ|%
zDAO5m65}P%|4mFh_O)>JCo#sk>kYcLi^zq|HFuXGNkx$Ub`uYhbdYT?$hI3~%?q-o
zgXFh8Ao*<_q+Fa%!t*7SrjV*aZQjl^sH_SFsctB$Rh8Um$@B{$eD9>zvRp(ju3yh@
zA%csv=96sF8Fa|exPvBa_=0Zepj6%S0i`)Qs9&9~8p}RlWu1f9*UCQq!7Odc7K?ts
zEK3F6D;2>=i4Ra@LQB<VcyZSkh0>a<*7dbKm8~F4j0K!*ccF4{p|VowoLpm{xLBqv
zStjdhx$I_{vSb+(=!Om|SjG$LVwv)~U!ATR%RXQl%ak3K@j!Z5ro4W~zw>uIy}#q%
z{X4S%&oVwQt4mg|Gw8}V2hv_ZS<PnM=~4uHa!^KiQtcC#1lGz5hE7m*2N7<t9EdzX
zqc#lgIU4*R3jW5j+@{=#uY3rG6ba0Wp|S!%GvK}oa%N_Dp5hvc{{~czIKrcEI9-FA
zGAH~TQNlMlh(v~Uh1EF>5Fz{O=1iiabaq<Shwizs1a>i|_1uT(vT#gXb-7K}Fxm~2
zBh|LOdUb$K4;f_*<kQ_>IIUsaCI9F#pxaBTRv8^<C3(L><Wz}?meYC<(ZWVlFiW>*
zv|smVq5C6~cb5I2?8a%$Hu5xc)}JY3x@|kUut8XwiSR*}IqnA&rdj8WR)#~yNYrP0
zvkSY%4LXgYTlYiz$S{NsE##gP5!Bz=0IsqM_Q#6Mc-W}1s<m@)Z{RT=ctEX-haDiE
zWcC-|W0_|K@C@=lnr?Yo6Q?9QyXDfYy_Klx$`4G5q1t7FM`5IE^{m!1Cc$BcWGhDe
z)vH}6v@^mmzYz$=3Lz<EF=3>%3a>6$Iklf^-C8!QR%BzHTD}rgB{;H~Hm}8fW2f79
zu1IAHdkLqVu0$fMrbCxm4u|J|5giEA9Dq(2u+{-OUC76mC(IKI^h<y!$pFRf%IPR6
z%>dFe9e_?0DB@Igq5-iVcxkP+$8i=}N_7HsI3&mlF#GLC+-+m3z(87gxJZU+0>Fr%
zY_Qwwv>N^Dpfl)K_yL0h>CJQ(5vM4|B0Lky(9M2p(4_nWj$lPQqKqNaQ${poETc3e
z?gjYZseKl=>5`WQT{s)7*D77~?ErcD*cbqyFuxUus6^#ykgb~J0hj~i%8|g}lc4Lk
zAJgGonX-_$g~TSb5=st0>%Q4ke%5m|SnhEv;)Ufo0B!r$Y<p^U`v!}6VU?N{Z(Vjz
zFpLKkCDU8T%`wJY98SyeoU6=EHCY4e)mxq8x;plH4$1~Pfm=Kk9=ti#x^o!fZ%Xcr
z7)Y*iu5E_(_#M+wcFP@hpQ&a8IM+}#z7|>wqUm@IQ$bt~#Uvzp`qJd5?gR`0bq`vu
zT9q6PSUB^Fcz{I?=Q!0xT14YcHD^{_F$yb|-Mvy}dzY@~yG2fxR;S$nhfhE*PDm|{
zwaJe#4Yh6LOZc?AQY>uc%LT@)9!FB@$a^wHmwXGEGCf@@-eq{K@9uc9K(D{V2Mdgs
zF1yGrxioiZs(IAVmklPH!x#S!>3NtfLZCS{<`evOf=l_D?>b0Jb?TZ&$5QT60@+Pg
z7Mkr=l)|CeZzY)he6SkIhAS>M)N6uk#r;9A&qN-lne;H{>W>6=fqJ~hsgJwEn3)Qx
z@Ac-VFtFdtgqrr?-k*)d6|?(6880tiVzW!j?jxLKz`~+V`4%m<Vm3~8KZJwE51Q7Y
zcFzPOytcJlDp0=L-eq<ccq2Ay;ry-g?v2f@E&kx_F5JMBoU0*^xvlfk(eHJayx86~
z+~3p53N<Junxi;QFe>i~0_V2Z9q~Pi9`924U4VONA#3OlCTLtHzb=Y-!pQ<45ibLf
zIL-s?p@DdGW86^dz{MJPi_JNN6BE00A2V-2)((CoB==P+;e3ekpU<Ve?*H+kJ-@SW
zi2wIgE}eAwf6U}EWB-qr@?C=eN4NWTlizpU;rlCGA2d1Y136~$dUy2Dl*;zprW_WD
z-RlF-<7NdnG82w>lL<HxGpL;)(-sLF)7+z4Do7--WI=nhnbduz{kd%3d5UAx?$s;3
z(JA@cJ-dEl6a3pg@x1fI(*Oa)ei)!s+j9=<GDi>4l2vV(=@^#XX*H^ubj4;)V!)Er
zs#SV}HWjjmw5l#o(erH#Vns>ekeL2P*SB!MC;m1YsH~jFFh$g>7^O&+0}yiUZ)+?3
znol7J?*pFo60e<@IO112zi;(=4R8uU_+PA6uV>!N`I!92`PQmcV0SCrL$<8$1+_6m
z{?i{BNB(E#sr=8)CzE6Oe;FS)=}aVndk>qPeXF_5rnBG&DxI30PIup`K7jM06<2Px
zvj32$(|Jitlb1P17w+yAs9ILoDd)Fn?N>zg0C0suyy&ivzpsh;zimCBBC=V3K5Yz{
z|8&YHUGqOf{}|8z%lNL0sQz7Xw7=<UjOu==WSst!Bb!ulXwuAHx6`x0C^;Gxn${$G
z5H7919-C`DfHfv@Z1wty)&nXWXobzje$S#CGEkXl!q(<>xDTd@j(AB}(dZf&AeZ3A
zKfa#d+S<KSQ3XpSzqhG@h&A4mxUpL*SJsQ0w+lsM0aa-*12C85#;hBgTV?QxTrO^z
z0k-pNrNWvyue|Nk9%v~_&TdBeH938U9cW-mEE=U$*+n;!dt{mUIW(;3Q93#Y^*71%
z<TOO+-n?0XAu8|gZmn(PH+L$VYgDs;6vOf1ZGnfCMzu<lSVHA`zEt3ZgtNMMtzjL;
z#idJXA?r$+U&^>r7IRCcx@O#UEiI;8De3h5QZ$PGfC9U*ww2$xY3fLo>cWTSuF%%+
zv^#O;nKX_XT^;m#H5HF@{BQ?2xU;L;ajV&|`c&y<iL~LCWyjI;XdI5^SQp&59r_XZ
zGBNY3q1EU^v3R%Fu{(GvN;`on>()A*xr{-Z{G^8eg|RjGpF9u~`vnJB-g0<+#DBEw
zHvfxz2W3Cts>f51JCfJZxwg#&O}<uRZeIyIdjw+R@gH+N58mJEeKKt|`yKM2-Fet1
zR-g2btR8x(g@~$G;K7Mt!iQE5HZ*<GXcPOW-lA<pC#U;I^=6Y)yL4ififike#n@c;
zfbOp4&zKqLcoZPOb_6gkc;{YD9E}VM%YRt-)?w3IhLeR%067U9Hsj&p1Qksbsub6z
zs`K`vvbVh+<Mvz?yWY2{)JjxCl;QsVtD~1VdKd;;KKc#ajuw3`L^*UP6t<B4429US
zNf=_+s{3J(65(V<FToFN!jypb&sDqE3{waWkwSpCtVXE85Fr>s6zLv43I$Hr_M=)g
zR1v!yntQ{sZ5U{F{uYYC!$2P&0ACxFgnV&rgZ{If%c6cTdWH2WU6pWp;o=>tV80KP
zWb0+fsSujA5876!8P~F^{dy=yvRh$f1|Bqqnm*3CZKyIBZ#+zaa6;htAiOw}=ggss
zgqlcr0Ur)N?94tsPy`y)h3Gcewy1t!A2s^qh^kp!hl-Zo*oJfSz&hh%cc^bt5=Y@m
zE9=ow(2kStCvn61qW9Tf)sLP3ASrvtc(Z!(cYwprf9GkQe*c%vCCBGKm-1bD{AaqW
zyj_9Eck2biRiGzV*rdo~9pAXmtPQx+k+G30*I~~INs8f+9=NR8Xb(>2WQaF$4I=|M
zFOpk}N<8OOj@(M+l!Q#0+{&btgj|-~%4MAdykxq*ySugWl{63F_x~4u=c9hZ=6`lR
z<vRb(rjq0M@0arN2!R)V`@@~~Jg0gy<J+nW`nqtZKKFdvz)PNmdEfa@KbZgX3z>yf
zCY`49Kb@SP{~DQpzG$)Uzxn)U<DX6?QmF*~{T$jDBLCA1$+T<yGxOQhSpHweNA-~d
z5+C$-2!ykZ_tE)eobki_oYn2l!I^BF3edS$y-!ty{XwHyGXcjzk?<=JjmP7mT8L2X
zG80cO#?x7<XfCHy%jrx)iVHH6TudgTGcz-xF@cJ+@nkNZUVw_{Q_I<OVktYHUd*J@
zi@f63UL)~^)NGE-(0`Z6Yp+EKd{uUTK=+>arsC_$_H^^^&Fh8A^*6xH+|+#rT&s5x
zbgEsqD~*%G{rXgtFa>Z1Y?dH;Fgj5>vij3gP|HrC3_9>sEIQ+<fOcsrI>U++|KjU=
z`8+MJA#iUBZb)if%bJ=cQ~V}I{)Wo&@B0F~V3uHaCjB{DZX}NIf%jzyANX_gLqNSM
zJ0zF0$wWGtSxhaZ7nT%u$fSWC=)Vh+9pIayYICDHHLIp{+nEP-Jqd?@HaZiTf`L@C
z9<&?U?`Gr8L8I2_Xo+;t8V58bcxdV;R;$~r_eUBR6R_`~n1IhcB7T*s!;xN2W)e$D
z*bk%@a>{U|k|{b8GweSwAdFh|6T5<ZPuZT&L%!9+B>s1$*67u%cBj{<_oq1;vHQ`u
zoHa0M>Q+;kFMO(C%KqR0{>x4S;je0|M)#3*I)!G=Tg9w1-8ugK!9GP-qA7YxWdIwq
z^T~g{{wH7L`ahq`dDj2=vHjnrd_EE-8D{-YKFj()qy-QJ>3EXLf-FfbE+=!#>BZsF
zWduxI5+<8lPR=JX^BKy9OUZdf!pvtt;M0HE`tQHy;^GgBZsgoc{&|=E$OW(Y$q;LP
z@)_3r;QG(a1Ea0^+4ORDK9Nl>r54h;Y(`;$^dhhT{g*8<k*UTZF7***jgJ@>_lQ{4
zBieEv8MclmM_tF0Vb}41n$Jz;5!P`!%d*)-CP%mR3kx*KA@L$8R5Ocdxr#?zOL!#2
z`W-pv;(f02`pO+KEZdO_TC*?m<LiI=MXvw3g$3{WKR;goFXQu(M92@B5!ZhP9CH01
z(gNRIEX7kPlFBWomX?$0;ZkV?Ox%)bk?#5E4kI_8UPvvd`~Tbm2!HzT$m{=q#a%xi
zrjaxNh593F0KxU2n+rzN02Y^1$wV@>xRg%KYllFYEbjWVFJ{+owJm>Tfm3d{vY?P-
zG-V;6=5r6oD9Xapaw?agd;a-UHl0Z-qp`FA$^!m-5nFy(ywPNG>c8pc|7&9XPc6N`
z^*@tJXT9hDWBZRw`OLzF@k4Ho`RSWuv!udo=DlDzvAv)k!Qx*lNPuKKm5!$tVGokZ
zE+>cYL4sR%36;zul`DzN!s6mmZedY9*iA37UEY$|<|)=^>|B3yXRUI(uvXqJu0GPT
z4X6LBZ8}G1d=1#3f-CfYWUbTcf}K3NpoxQ6YD4^!&1|wjC>(#90KmZ`q%Lsc6`wDa
zOu)XQWCA|-IQdnoj}#UEsm09vQaYVcx8s={F2*@NPV1XRv-#rYnq$7$sMdoZ_jWR`
zE5Nvh2f^b=OK@1<qcgtHE;Gu2dbc)rC8SUV0>Ht+$#!9T^9B~cLxlk7?XA06S$nH_
zk{!ZQ+;WmC(23Rq-;vaye3?jtIyd)tYE#)}X&EmUmQvYlDyOndY7vz<_zz>83VSJJ
z9^pI$BLQNuPrfn&6Ox8?d@!WAU_E1T;Kjhw4tp6&Gj0niFB#5T8u`y)4$n)zzO#wu
z@PX(7+X0}lb5x@hTZtFFrj5VRKmLLW)}loJxp@B>s{iHExs+S~qiJLP?=rqCFU{>Y
z+VH^eD0=1evrkUH^UE*(?I&m7d=jO5jC*8aGBrVWGZXRnQN7unAn}fqbTmQkufPk)
zb~IA29?_gzee1BkO!`MeXDfDYLAMu2<R0Di*mOTM+sqt1tR)hO`xBycDxttN`cN~m
z_WPaYz^)Ta>>OYsK#_iD(5u$*eda6#0y~07n{&NR$DT7BRLe%v2?GkyTBi$-*R3W5
z53jMdyIp+a>28jQJuDG>%17i;XFwiWZ5!T+ihj#BGYJI}H(CGw&NnHAh}C8-F&~F+
zqV<nD1G<;ruag0IE@O}o0S#z@wVyO>l8PQQqIBrK{dcDy{_&ST|LNJsAF-i}x7kq1
zF_OexolFVT)V?8!CneNGl)LaV1GYN^+IQjMw48QY7VCwe8+bNeolJ|W*^z}L>ZA%b
z5n<i?@_V12e)xkgzxBhjAN~E=d+(h7=oe4_&nKt9`-`VP{MOTd`SIzK&%gZi$?3m-
zE6OMdoBh?vY5Fht79f+;@D>={3z7H%nM}rFQP#2pO&NO9`2|j)p8oa+Uq1O?r+@al
zv%mQDfBg0X=>O@@e*W~mcPR$1uBMY;{O;eLe*YJEiormm2O=sPAHxb}PRHJgMtUt0
z?}<Ho<VwD{efs%7o__f2bc%99I?ZZ@CU~nK{xNO!i_gB7PO_{oKKt(JM<1X!yqCzm
zdt?$`v60npkcpGU93}L9@Nz_xTh<?Sdg8r5ILMnwClwBeAVv%g5?=@3Q|&>^s<bIr
z_g7Onn#LbFSGc0S%1=n<s_l~pQ#8NZv}|}azKStQ8dW-o$R)$7jz;J>M%s<`A6X>6
z#wkHbAqM;OSATi-$v^%6#~(la<!@$B-~DS&h$o+){^t91kOdf$fOD9}Cmv>tRBH+|
z&vDX=T!yW5Lr*alwXJIPIwcTw?8W0D-0_NJ4!Xhd<qevLta0V+Z$3Z!?k8s-|MS_$
z-+KBF|8n~A`?SK}zw_SN`@iI^eEN5%-~N>K+%WWVa7EQ1+g532C6|r2Et`&G9A7z5
zc@_sB17gH9c&pc<;Pl_)Mau;a1IOkIFF~LL$jnK#iV5(>dV&!N`6H}VhyGetIbXb4
zD6eXzCBS;2RHj=QwXB>!t`a~)`54VzpAT;VZGZI6*$3}E{r1m)|IS~Ye)#Fx`+s`+
z?w=yfKl<Xc|HljQaU05Rk%-mnS&vqyC*M*5=H_PS$YY3B0?r0#4kW^2GWiz5-+S%;
zV~DBP0spN!r^*x3NnVTvYkg2h_;y`#IA_JvcYk*FgJ03LgNhPdTu%S@2l`5K_E%3%
zKl;B^W>OjQ#b@s@$@#@+??>@b4ba*VeefJ4x|c7nZFq99awg$lBJmm}2`}%_1Dw@P
zta9gG;nZd&dI1e1vKP}dU44H3)6?(0Pir{)%|}oF=9{PQ{u+jh{Qx>vyNo?`GR~=^
zsvdmE^$aCG6Y!{WQ6_x)lTW_<<Y!XuVK*2^6e|-^KqjZFmaR5$yHG4`?(W2*+8RLN
zkTR2Rsihy2w~6(Tu7)?rL}G$Wz18hOxSGk-<0)Fc(SFwQX(+Gg3t}MxQK6QHaIF+c
zEnh>M0ss~|fQr|IAmQon{`~a4?@~&s0(6?nu{kOMV^I*6v=3IVdK81X!l1wR<n(X<
zgs!P)-+yxU0h=_mhA*G|m`jHbKYIGJ-%tU5_U@;rpS<_wPrn_-g%_4W;Hda9XRm9o
zlDS&_c&<HYHg!hqVn72aoESI@5A5Qtv+sNdbI;!UbCs!n71g9dl+_U5>88jl#4{`B
zbn+#1VzGmLJ}?9H{=%vop_HQvo&D1{pZ)^YCZ>!iyo-w+OXZ+H`26&@AF??+IX$g6
z6N{1fVV$Ito+Vq;o4EF=u9rx#7rkK`f8%fbjlc0X{>I<<8-L?({EffyH~z-o_#1!Y
dZ~TqF@i+d)-}oDU<L@i{{eNwYzk>ik5CEW0a=QQk

literal 0
HcmV?d00001

diff --git a/drivers/net/sxe.zip b/drivers/net/sxe.zip
new file mode 100644
index 0000000000000000000000000000000000000000..acc83b77710ed628eea3449f8aab3d07dc148f08
GIT binary patch
literal 193908
zcmZ^~1CS;`m@V42ZM*-rHEr9rZTGZoPFvHqZQHhOyZg=Fw{P$6jdv>|t1>FHDk>{Z
zetFLMsuZL_!O(#IV_UU_(fnVP|J;H7J34!qGAR6K4=5nyFlbGV&RQf(C?KF393UX%
z|Lj3;Y-4C=PG@HCWNYZMgtb<V%JkT!uPEX$@EbUa$Ts`jf`dnO4pKTxa^qBnnF6~D
zoqs!tkEKf$37l@z$@@8rFP)^_dKLlk{h6iuVw-WM4v9rAp03ZQZVLtg`E}v$HIR6D
z>Oc#uVa&Zx^;5_=a8Jt}jy!UzED!9MphN;qGsZEgtYtv66EAcP`I|D(*+s*KWh_&N
z9WP4IZ<>mIl&JvZB!*1?Rsjk~rI!Js%a=d@R5$|G-r6rpDgc;ik_(lj%*rzj1!kW+
zjj19!zSl@qmCc$;;*y90eu~`CWeh50K@23d)W5-*EIqIva@xV>HGCAh&-CslqY7!n
zjla(`!XL)u)4q5HiRybNdiptff(eB)Nn*bz?KID5QVM-(HBscSba)8Qk1nPWeeJ5K
z&+@?%Sb%oGe+3VjXxmdZZy#K-8=7zf38>*U(9_2D2Sb8-P5l+;Ublc8aEg~JDQaN*
zKqf6Q1VcMQ->hgR23P(L=(6J{?KH)k!TvSjNY{XK(jFq{-DeO%y!eYS1@Hq_9)_J&
zG$^GJH414K`D>RMY1j-xD3yrzvyE71<PH-L+P0i%DVLnWacqekG%}oKD|GQKWSYAC
z7uxB`esS~?<zX;I4P`HCCwVdzs5+Jxr!Jut=~Z}r5Mp$HLg?&NhEn-wVBoGnAgA~T
zj=F1vNr(Cq8|f-+A&Le+EAu(j%10)=3HLkD-=lmE5*O%#3DZ2~{axZK8yOvr74+mK
zD#!_lFXSX|mg{W?l@%c6^2dnZ`*4#!x3?(J3kci}V&vvUafM8y1*-KL)d>4=-KEn<
zg3@^t*@EJ8cnB0$jBUoN`vW76hcnHLEX9x`=lY7W8$xM}u?dqepzhN^^8Tkf5J{P&
zYIqR%lbL+fj@oUS)QuNIL}DgvgX%B1H$Wx)d1{>Iot%(+*~X3(K~Zr)`SXVd$-Q-g
z=|xan*GUKp%F*3}ttY3j^KUGL$&yL6$H{-BYJ^Fg)K!q;lfnr<0dweR2gI%_71o$R
zWq;BpxvV###>%mhlvM2oY>FGaXZ2_AcPkhvbd0x|$XCn7GR9H-!<OU|u+-?*6^dQr
zxDuc$b5Mw9@;O%){OxJD$<gZ~JZ=nUV)MwwJ7B0^biH=ucRGJB)v%d3NGb~O79m!;
zdz;*xsj8}y=f}8NVXCgQ*k`J$XOkp_&2x-XI&?FvR)cw&4U>NbL$Pg8MypGNhV&y!
zCdnECBQDU+Vx_u96x)b9shr!*N>5uc3R8<6)h1j*KQ`#BLqJ};9?OiTzr^C6AwaTI
zQ0<J^s+@txfBzhr)oYLs+_8LFH)@zzBTBX~twfz<y40t`@5CnRNj=n4*ETDae4;G-
zP4*asVGAQ}GRf7N>h0nGg53y-ybqZ$eJgse?yf%BA-+4C_R*iI+3W<<k)i>{zTd^Z
zv2cJOtqy4~>hl5}P(f-+ddtgF@_U<mflYN+IKTg>4m9?m{QLQ>$mn%+oPxT3^UZX{
zQJtgU_`#u~ky>ID3f0t3ud6pOR?XY(Q5G|;G2na|4pc__H#?Z#6@b#1A>h92%j>p#
z$H{}^@aLF1Y4T18oM-vy9vYy>g*T%|f!|+k0wQKls$(8kv1SqmekK_U%faVj35H87
zzoAU&e+9#rJZ3a+7LI9#W|OolVV9{BYF+4EKJKw_TadPM-;13uKN>3#Vny`mqm)0N
zG~A1D*5HQmgwGg{orj~UIW0-2&klfV09qlCs=%3_1m6W8c3r&-bLvuW!KTO5#&3BT
z(~5-|*b(WwCc<u%E6joPiX_huxz>I+_PTVLU$7Osz}~~p=Vk!~%)Ow1^1zO-$Y8Vh
zC*E~2DA@=Xs*$uHIZ3o<rI;C%xs}&xiFz|yc=(_ODD{IQtyj&z3MrTI&AWIb=?njD
zouV{|iPq>if-I1W)3J83kD#VG64XW|Q0kS4gLadH!c_P(mjc`QP%8@cX8n4z-M%Db
zu}$p)`~+#^zHafP1f~?=8V3#>$j&{ncS{Q}=*B(SLEJBJ1v=b@30Hmr%DZPBOFx%6
z;LCI3j6+5;SydSTzf7I4`b~gc4S>z)A3CRHQ_qJKn8Uujp6}FgYaf5w=t_kbF_k8<
zQdgku9NJC76UU;}+}q{GD6Wg;5}<t^x^`SRyI3QF@P_P2bPb17CsXf8k*;>**I{Nu
zL0iUS+BFRDw+C)jvjpJODvVT`cVHmAaAlut1O3i@1TTh_PtkdfSZo&p)$e?+05&^D
zh<6uH6}&Jjgd2Ayb>b-$>_&;FQ<SoBp6-R@vC#2OoXLdo65obB;#l6mExqH>>RrpW
znx5`!`}7V#)ZYr}-)r-Nuf2Pe1S;-LN4{L{D(w`ubYs6r8|~TgU;(Lah{_S4yGGWU
zt&6o_;TOEabIU?iOM`pFj5eP9%Ra~HdW4=03t)WTdid<H9VJXL87?Fx$%LjX{mfYJ
zS}S+2NX!TMQ13)IsSkb_613hpEvG6fo(8-yaVKlOY{Lpm$XB*D#%*;6R$;~w@xa`s
zd3ACjw_iGgI$cKn)>g23Jl(e?4mRZTy1Up}_Avcc=t2P=+*IJz0LK*Udw8as-g7Ov
zYvF)8NSBJj7HMEc#5XJmY1@L(LmY21k4+&w(v}jAXc4M{r*nbH=R^hVQ-lJs0|vUj
zs#h97mgmG9uMptv{Byigz16<)(Ei2JpX_7RrQfwl*5HB<wb82npssm8_^Dy9Ha4Mt
zMzhu_r>5DS_Og5$ge|p4|0rePj}WV8qc03ewKi=BaZfuu`yFx(Jz;V5z_Q?OW^dC9
zOP`u)(<OBxzu+DL5l5MMDa);Ce>{*LFxfzV7*74!?sy<}1NpysiLn-prvLWRfhi~u
z5I#H*5Zr(A5?MoQQ!`5&(;@XKr42@;?hk6#qOig$Vc4$%7QATNQb<(u%f{Ix@wy_7
zAON-`)7XZ4b{eqII1zo<>hk%AT1UlJN>f<9q{Gp0C6N#+2TcTHH1l8Iyd;1FqHgX%
z2*otPy=jAzjb6GSlL#as|8JMQJ9N>0B!r~$1Q=Y3kOM}=TT#i`4MA@B2LLDG^~%b}
zc)Vg%3c=D!Yy%_=S0;6(n#68~KT)jQtRp3u!S<qQb*f1){s@0eR|8xZznI<bkfT$2
zn0gXp41NFIVwAHz3SXd!x*>r~-~(lUt|a?Z5bHaBF%PuoCl^0_*I5ngq#I~GtCTN)
zmK4H``0kZyTCrqC4Y}|LFiVn4hQ0^t;{g`mEscG|JW`or3@&89=omasC;sc$D=YUz
zN1ALB*|VS59A})5->V^;4rHZ%lvId@qQ3>{sC*~jl1~_Fk_J0xK==Tg@^qnPMmv!~
zJWM<BMnBH&%&VvB;`F10Zmh2_m2`wy2lcTM*Y`4J#*ItK0dU-*8?eL>;(`fsU*m#X
zNhA-a7*|Ml3|qhOoKqEJ6ynHauED~z17)~}gPRJ7>8Dq7?s7ETX!cb6^qcrD$~S8-
zXp2b{%TN{OrW6?~MTJz5$^<*oT}Gb#sPSsfbI&cbE9}fv$UCE+Q6abd2p}r{X}9C)
zcoz(DeG2B(KJBDlCG|9>5p`%Q^Qi&Mt&56qLFnq%nqxVYs#Jz>Dh)@7flfAwtO#wx
z1hdVfs}+ib0LGA9up+J#Z!u{5z)lT8(jymZLm%AoX#2TnaHu8$PeD}p*BmZCTVD>X
zxQpFji<%!GtHr1wPAXkk6{*j!Uemcw@3naxx}3yzQ!pt?@f@=mKq+l;Qs{2w-mYDC
zPRMW_vKQPb#OnepIOrHzLd$HdE&E5&=gSau)JG_(H5w;of*3V*@^t!w%r!(!Xhw$U
zm-T6yr#IH?p}=FHRa23DN49!cL$(q!TdTd+lQ#QasVwZt#afzudp0%W{gFmExUbya
zwoq1<LCX-hw!(~R=h{|DD|Ag;`+wF;tXd*-Hjpn-nu?yuSClTgEXuabS{ChIw5+m!
z3$!tCn<Gw}IYc^h=B10Ff5JSTbS8bZg%{ktHE?fz*KWKg5U9FcLh?0uU1xx##y`;=
z{g+Gt0=oJ?gaq_|PDdj{=l`xI{$tku$m321{A&*XS1|v3HxqkfYf~qEH&Z8POM5%r
z$vT)o1+?M2uV2zG6?Lfz=OW>v`$Nxpp%UYNacpK~Rc2L1c7_ieAJA16ht9OSe-Kp^
z$lhw#Z9`l#WKX0+<q2bGGl1~~sZo-$9tv^a!_t0NEbh0@w$8G5R;`6=utF}wo)hhE
zXk6SA+`q~#gA=m<wvqBPeH=$kwIiqi{@=0YKL$rnfwTz*0tDm_4g`e$-=F%QT+%nT
zx3#smqc;vw@r%k7K>FJFLKB7wV<$IXC`hJIhmy33%h2J1e~jxVxjrU&B|W8qUirA@
zs2DDt+Y|u#wbPl-!9)AD8&aLijIhwPI<#q6uoZ`shL@zh2i^(FckMYJyf7;*Lhk?p
zIToPI0TW1tcLhP$xrjxSdsl_&RQpX|_QjgXa5fM)O~_%LS#7p#Z!yCE_Y|@VIn*#C
z(~cO|FJ#hN2qLsvA6bjexk!(=0FK&Cbd=GYUyz@uCOXp36=S&uasZ-h#Z>ZjdFS^S
z)(+c+#spUr;n}zn>a`Wo&mO7I@E`W#GFQbFTPuQ+V6kDb;!n+A`99O6WXQ3%IubqG
zg58jH2Ttcqt&qQkp!!Ano$gLqFwtJ-O9cO#C5*7%3!$ZBk%8+<ie$BeknuSCAYtz_
z<PB8#yqLl<VTR|$nuus0<1OJP{0?loMC01VP#H@PtVR~~j1>X<bwoU%hNfA>Mw}2M
zA~x4-LdzHoB5_1yw-KsHy7DfER+%tN{LDvOLz!fLW5H^JB_dpT>V3ncI>2q^DO%Uu
zBWl7OQdcbt(~QaN8;?qzTcnBSTq|DboR-n3egm|%8RS)0+&1x(SDr$Gajgx2xAU@-
zkqvc)0(d=OPCag#lRJ3wO{1jdBHJou3vGG$5;#3hfja)aUW_g5Dos(_ag0>9GoRa2
zIPUsyurd1wcCsf0C;NYJ>G@awZ?Lg&o#=oG5<m;y`{oU)m7Nb`r8Xu)iVk2s3mwXa
zhjanAe|@#eC$eH2;^<K%#QP2A<A*qy`x@e%L3V~KFEA;A<B>P7$Dp`{Dr?Fjg#Qg!
zQmx|N9U;@P3@FL|&0*JE3Qj2?npseq8f55jZVL22fnB{WN&@pw_iy}LK?(kUfbC%D
zqVHg1=<***pWb3f%{y*h0O{xBomxy*MqQEmRFZctPl&7^A9(t8vbb?#uGMzN?IIxV
z<8u1B={Jy|vC&WKM)up+aZ?A2MN3wZc>}ouxA#g3!_08fc9xu2*W0cQcX18%l3o7#
z-6{bX&K-bY4K4>Fa8A^~YdKO6V$uz;_qzSH)IeiGnnt(^D6IhOxV9)(#tdm{r1Eqp
zQ}0bVOqH(cNL|X(#ray^#|yITL=hPoNNg(=Gbm@im_O+7XTBT{q^{G2Hbx=|8N!SR
zWt;Oz(=qH->}hlJ@$ml^ouR}oB3tZ&SOshL&21#rUEu?97Zji5V}#<3mri_Bgbd*7
zn54T*^)+AucfgnDz^x@>%zioZ@W{BVH%X{v7Pf*x=8TeHL|ZZ8#$n^atmuuSS4&RZ
z^bpJn?Peh^zf>$=kx49U_cLC5-h;&ZB;Lc+sluBUus3`9?3soQ!_-TUa+5CEks)B}
z#Q(S!U$jfWd%MJLB)BH+l`nO&6L#LRL4c6`G=jx<f)Z3B+dEsp$q!D2!bU4QG{+-M
z%XQ33mnx9vrE<gpRYY&3C#CUeH#i-nB=Q!b|65myVB2y&tDzP~z+Wk>s1|nLuBz8W
z6C@I{Sfaq13PV#jPUJ2~t7aPCk(Z+3xkbaEt@<^I`b{&LHiqzYgjWx%Yj%ecH3D$y
z3~NH^PW_N_gQCs3qQ_vUE3DaPqWyCo%YJZ7A;+husLmBz-d>sq%VnI37PbMtpbP2U
zW?d}_O*<sQ6$$Ny-Vn-TFnUr_?O@M+)m7547uIbD0UYo<j?AT-#V>uAK%hOUg)lQ0
z7V&1tfnN7w_)X#$whrx9g=5^`oLKJ$4yWVY7C$Q24u|NfV=f(z8F5k%koHDgTrpSX
zYM8gI>34>+LjIyxLg%UO1fj>1nzLY>IBxt)hV1FxI;AfBC%H2dZr*~1m4QCI=;mT_
zR7|`$=|PU2x~6#Oc86dB&wS@SCfwE=U4KkgBeqQ@TOOiy#H-OeuuaGOD5eo<61Kan
zCy+H`f&|nnC}92$12Smv)u(lcNIzA+0$|x%#=Gdu)!dL&Rh2FX8Ib_YRDGVr9osc6
zR14zhI7_(PyPA6$iOB$5-Ci$B$3<HIp0LT?o#kCO%<3m6^yfc)k2dd+hRMLy*uDWu
zbD7r0l5z-e>Q+8>!kGD#T6n(dFtgK5tiS&v5GP0xM_6mgL@2#+%lewy31u8(=G(3*
zHTkDMtAAzAAdjy-vrK1{p0_9wrME`cnZ)tCW~m2|uB!7rz~8l6_tKxtOUX#$mnQ@T
zny9b~{B6M}inXEBp;T0Yma;!4G+nnlO5bOmarv;;Et#X(p8e^2%eU>~R$c&mfNOv%
z`l#Ibf%u<Mgxs@Xx#^!pn8);wj{jex$bT;E^cGVY))fnkSR?u!@``AK2k>#z`BXyk
znYON7YsJoyNXKf9oGnZz@SnF7Q|pbMiL5Yz-JI-WH9x$b_pB|Z4#p3LE%f}H!H2LS
zon%tFGRIUCL1YxlLJ%AlXJ+c>z%+H*wmsF-C-13*<eG%bxuY%juci$<TR$GU$-jP_
zw0d@KT3-0OvEaJ1V)fN2PdsQOJ>Mmkk6O%woL-)slazgF-6@^kJ*jfxAu_kd^4Hm<
z)F!T;znq>T@M1qy-;O4#`q-#WH;etQ5vnasvd<I#_-)uPPhS~(l7t;OzP$x5i)Z)y
z?3a8;>--{5O*L731>U2+o691zlq<HA%XN-3p>kouY0Mg;(Yb1|esAT?j?H}0w}fbO
zjXKiRf#Y`ajEtAxCm|8#jY?XnQwC%5eT)~m{@YAG5Z;>3QRMe`uGY6JiLX`hdN~sj
z^!?{qn!0=xnZhn0$q^~^P&EKX1cS@XaT}=&bCyD8)H<$n8e~0Qs~iDQfVHOH&6nR!
zj-2h~qda?&=upEhm4Cbm^t%gJ$2y;Fg?(r*(t4W(k+3$$^YZgu^~Bw~@__941TZ5R
zKOH8=lV7+`Mqqwj2m<x2G$w<;T%-D`riHek$?vot(TyYSwj5_uNw}uT&#Was+n+E`
z)65k4rvgNDn3cJSPmLKb$dCnbD{Ys9x}0CWv|tcUND!|;#u8}|j#)Cm)`ZF|(QqId
z)G3cJC!STZf76l=^w>#7m;|^?9MM{ejED_~Jy}AaVKr4e{T(_(H-$r9fyhg=9z4q6
z&?U(#5j=W9(vowh!-<=kPu)5aFX)>0UO#~hVTa0_zz8C}STA8o*>Q;`ZRr65l`Wly
z(e-E7`RCt-_00-}i&++O9MT`5*(V!VHWRBA0Q0kV%|6C|8Y973t}q4gN5jToL=>yu
z<%@SVj1xQTdN4rqo#@!U7a(k-a)e<x$!I`JD?5C^==l4W%MmB+d^Lh@Ss7ChR&HRI
zrk;oxQvmj0xu&RvAXC7Twc(5utoI)+!#|vIJs3kWQZ<3EDIqu5%QqiZ)mAV5NT6w^
zh$7Xgq;cwWYmjxmuoYN;tu3tt>`NUh)#2$FBfs?6V$&C$W`@i={BZ`O_mD-x?NI(M
zmXa|hVYpfga=_b1t3gkau?dF{yB%W}ftfv{%h7c^MQZxU@)8WS*uiLb*z-+Pd>t*W
zDh2e-AxqDp4swmxRu=W;Wo+GsSA~k(aPS&s?jdi$$E})q47$w)RQ(k$M?P_>yyA#9
zGIQ6YM)$2rw9FgwW1QW;hO$RC`xg}&-u|w~4jOJ5<QEUgcqPFtrP*W$ua$yZ#xG|T
zQW&#4p`sOdDm#iJ7^ppGo3qT}3LzO`bRip}P;A>|5tDh#2VpNoB0;+J<{l0+Vx%cv
z{_|7oGL8Gouforms4O`24Wr)5KZw`CboaDHBf`lG4Bd#$GwN%<l9wDMWZpCYFu{M3
zgPhf|23dRoTG=|o79e80Y-UP0kg$ieFE~gnXF76t@y3UQy_f{OSJ9jh$J!!kMSekR
zWag)5<0M=oy|^=7M8#**Ww@~s`h~<=R6M{&QrT8JR`~;r1+g<)i;`t+B(7RB=gLQL
zhN*{r+9LFXzXHUlnKZ*w<?0J1XvLc7ic6z`gUaz)c>1jE1q&V3qQ;DDP-i~aO9})s
zz@hQ)K$2UA;L_%sagKXL51nvRu#yv14VDGoq_~T{J4s|wf6)c56GONTG70gO0^_qm
zqy*nsi>W0tb|tTpJWDysP^wnV0@fj!3MxoAa;bF@n38g71gJrol9Ie!wbJH=F{X?(
z>>EJzvwEXg9Zb>Ju<sQZ#cj64barM6xn0xlc@7CHN1Ed<Uj`<BFU-vwXAqA(@g^H_
z!y34QY~>Hi2zB;D**vjn^r#q8s-_iHGvk~P=!)U|!w|{=?51xW#DzzW_yns+9m^$J
zR%$!hfpJIDw8K~BaE3BiY-A$@;fLVx?7L>nlK05Nc!ExwZ$-?>ieiUG0Vzb-W2gVc
z`DnCUNp+-Oy#geo7=GPu0C)UZ0?KrN5(a@)ykuf2FBwZ0_Yw@+IgnTZD#8aJ{(qG!
z-v77m{ol_0XFE|aJt2R7&&a;Y5+MC7sZTNXi8+ud8qNiz0s=CDweP^8&oV*nV9CS^
z2f{?`;vHbfmP5FNw6Pi06!(`o;!HhX98oa55LsagYyGBiy$tW6F669r#YO5iiC8UH
zf?Cl~<KGBZ?<jaC$|Mq0`Z0hb=s-}^cNDU2XC%(xBIYuuC$xt%_2OgW44ce{0ZOl1
zTB4!NbxQ_Xdn9EK4&pk6O|L$q|K3YnjMl0|M1-s4SLNU#I+zQoiHuOn*84Y-Oz!BY
zkV)Yi@5M~@IZjIC<Yy12<KWaFV=MQ-W#x>xddO?_5eT=B<%Gq?#fPt--^~qRd-LtH
zb{lVOkFSfRU0+q5Kjj61xEMq0%AHUnRw4jlA63)qDD)AK#cfmkYYaF|F$^Dj3gAMu
zO!|0xH!2ox=(htM1AV#7*uf`d-)QC{kNiR0?y6zC3cm$A7Oc%-&nEh!H!IyEe(%_(
zZ9fqeRZS3;>UcRFAzp*OjUolZSKXwS^XJk2t?In?Lv}@RulX9Rju3JaN|n-jse?vW
z4Tv?4|JTjR(bvob;o)l#-mmP@=1pCdRL6iDdfJ_@lqgn&cm1u0x5uS6Vf|ynv{~;1
z<->Q(6Q1#(AjtBlgHNwFr!L;C@dD?Mz#b6&R<|ij?+b@bAMKp+(BRiDOhUH_`_1$%
z>5cA8qd^=V@%}}>)9aqkSoD?c3s81Mb3<j{?mxHn;ngU1Y#wYd^?)%6wcsu0Z<-#7
zn}uvSRNUHQstC|5ldEHe^UcJt$$fF%s6mx;S!8_?3kYMa7?_sw@#DNs)fC`V^PZjm
z$#>v1$y2)j@tpjB_9O29$#+Z~OsxMu8-w1WX+|D4kP!>ww&4Q}lo*Y~2NOKdQ;CWM
zw(1nAGc1cW9{l;_frDj0I0K&@!kE`P_l~r9)MT6`G7V1%j=c}%#V`x7j697TQN~Q{
zsa-jIMBP)8<uw#@5QM4jm{8E5Pl4TJ@#9J+)(L;%c0OZq5IJXO8o*O1*K}oNz-aO7
z{4U5);l`Sw*1hhFxTkiP1V}Do?4gIZhi!7MK(VU|=Qued0s-D)ULXvj(K3w!{$jRP
ziA4=3Ju-Un1oeLo`$bErQ~93@a{7<z{YM7*KXS$Y%i{SD7sS-b$<ChMq9{c+Zk-V+
zbmmB1V;z?ED!<olL&}@!4y#hu#+Zj%BE^SFP{>NtvS|DLz&b3RVFcF7873#p{TDAw
z`X)YN$!|}A<#Rd>g-F<rW;dr3gt!uBPJ6hgx+$<R<=S-vxmuD_<LG#_DWm4Vzn+uL
z6wq2CUocW^SghWWKnwacA3OXLkSg6>UmjbjFQ@lDxjt#jYLB11)!uR6tbTah4$!jN
zbI)hoSc51khs4!-pEX{0vF!|2q(AcKa(SeLr<8%p%|g8%0SmQG8hZdUm0cKnYBOpi
zFccLGqH4^hL1>~~p$W2D<yhEElR@U3aG7EQy!Oc_dYi<f<zf=gvQJSgVsv&Hhm&Ou
zLV2?C<U34$NQ#wash9-r5hxnqQMN>ZK9o|O3mvKa=$gp*>)s!Nj#X!&Unvy%@%cNI
zRF#I<;hRLa41--E;3P7yO56U+>5F3lOwlzi*(<N;)aE8vx-7`gisRcX<{G%SbPZ#I
zxL0;zs3%P;!&i;d{amvu?{@cwt??Xc@7D(uB#emQ{s%?+evuLTn<DtW$oDc<5|zWM
z1z9)f%^B&AVQ~e>`Z#rwjlzC7Yl;`T(Y`n(8+RA|xx#4_Da>IhjFU$~?VqW2y_O|0
zke@5x2-Ft8yCQ~yOZ(!356RD)m4(16RCC&JbTFM2xdmnDA5c-03d@F`)-Aq5WQx_1
zkN-R^kWnsdJ=LbKYvh=(^nJg>2RYRNhMIRkLkM$`s!?=0<o&b^b$x#I9$Zv%*jDu#
z5jhNeK7tecR!%ATx?_BU{huJD88LOqbNXi<8iM@S=Lr7=LJN0#<4b%?cbu`ihDTbn
z6m!1XI?(z`&gFW~@PU17NdOVy8s`HN910UiKk-CFXAhsM?v^gg)&ffq;DkaGw*BI~
z{Ji4*eaeFsPE7CR_m(}O-))6?NzC!ZgWT^Aj)OCUdIE^;xTHIm-ya(ndmhi1=OtG=
zU<-R5U~8m^NFxgdX%Al<Pt%rNM<1RuPj3s)%UjvQKyEN4>Nn5#leXrbxUWHLPn|tp
zZcWC8h(S?LMcF=V+SQ*Gd%e@?lSaKKL3P=!I??7FBwT5LAd6ynd;rFRCZL17IOG0z
zs7M*-C@FdTI4<SZ%C}_E$M6KYJ#LH<6Ct?&rCAK{;RsOqGdCT-%&9v@w%o&G(@*vM
z<y%!lL$f-KnDa%=*TT5umi>}^*SsKEjhpX02GAA(3NlfTHv#PYll+W)XV8}gG|kP|
zw0Do%joR0<W`*g!T~pllt3ZQ?cA@;3K#iJJ6GHgc^v+A%ugCLkUqG+p!&6<4IFk3=
zylaPVfu?-|LBJf;uTg5Lk_Ts9pjxufx>#$~9<?)71#k_3{mqad+J4baMIdqPECl%*
zu&|4FieAHcRql;#0S_SX8|k}kl1@G9$>1m<$|J7k7benhP~~U7P<whz7|8yEQ6~)8
z`tu*$gzn8p$_pS1fcn>x2a@oC0e{SRU-w@Wy8RBh25|)CGZz}d&lB&q*7j`%4_qH0
z;8r_-5U7SZ3+&4JeIO9@k%?E};L_!Mk=qgP{iE=PmxH)%y>6l=6O|Gvy1yhM7~o`!
zymRW^@dCfJq7sUUa)h;?!UjS#@iG~MGLlD=NeFjU1W{BB0*7Bk3tYGPrk}u5T;|@v
z10r*Lw_QiYxhJ_sb$?Hv1N~6j{Q)*CP$Km~&_;8q%6jBK>yYzMgjjfMzoD^xG}Zqz
z`;wabff-Mvgtg7Sr^u%Q4)v!^RDcDY@RQd0P4%w9h{&h4c46t#isT8oB7Qy~A4$T2
z_Rt`A+|oMeEfc<psu}>QO_5ezBf7ws!w?kEbFMD~a&uESZPuY()8FG;<Fr`rZCaxN
zW$v;eJ04NkUFN4yrk_7GlC#^+FM$20#|I22(e3FHEDTr0c|-N?x3v4=J|&P+&(7(1
zWlPm~+30*v1{?IURZh2}!3dp#qT)BFWn+0shFTNN5Dk9#FO|sSAdJrmgOmY!`kpWM
z%H4`5qjSZH4cqU!9OWYaoXZRLSnU$4UTy1@Z5w;ib9UFj@6w)}UWIyod^wN~Zwxr0
zvM)owZWxSh<ENra-QT|NNuS{Jc6~%#EieWyUQBvNBFH;$G2@;U>bGvlXgn6IC(Fx6
zHf#?Rzry1@CWyen1GLO*3^ksXYuvfH@XWoGPcH<O6vO#t-S4G9DnY|>=(Ash*Le8s
zo8n)tt10hKqV8hkW(tN{WLV)L2U`TPh4$5@u1XErPJtjJty~?@;eL@vdj`AReQ2mf
z3BSIfSS@+D;p=$EF}zEwyeX7nPt`0S+!$gh{*!S6R9dSj4NbL!7R|R##+AZh+p8Z;
z^Y8=wUDJ>fe)qwR<b!5+BJ!!mZMnf6Jn<S5f}H%x1<AN6#f($S;Ips6fi}8`L6Sex
z*KLcP0?iJcPU*>%U_zLJFPs;z;!R=*XIFVLrPp^TDs_+5`G*HwuLD*-_<%n5;Uoi%
zTQ{=H^3PL_mR)HDWYfFg-TB&3bEq-iEPm4k2r?zZ<R7Yzg6Fjgn<@X?Eo&7zzDxZR
zJ>-8WhR8<H!t_Zkkv|t_CZE)4K9cuQU?Z)cMIIlg&jn$CMoO$-KnlqEyZfsty6{+4
z&4IS~&4m`>HylfA-ydSblS)6P(ttWiit<YE86`g*VTpub`}>8Drr=8!ScL<!TXhyc
zM*RZ^h)76FA&6H~3}a;u5G&{`fSMaA6=!kXC-FFQy1CU`KRntBh(L@Qu*E!YgOJ05
zMN5nhQ**1wW2W^uMfTXUnw4b3`N}L~VYobmY>8BSxoz3uvE@Ou!eyx0v!s`&Mt0?F
zDU51d_zr0^YgM}43_iGNqf0YXv8ru;_uqCB-A0MQpWUAy0@-aN^0oLUr(10-XExyp
zp}&oMe;B6I@|$Icm<NE1uRb8sCIbbY2Uxd+(t5_(aodVH*7n+ZU5*(_8rC)mV^hCi
zPkex+SvBY=d>v?8%Ww47VAdRJo4>muQ5Y|Exe`xu-M+LMye%eU>2ScOu*<Lwv2fx=
z!;X0=^bv#EKi_O1z=|~l!iMy`qQ3hH5$%WEgGUHiF;SLvA|U8k>)U56B=2v1QNVd?
zge0&sgyE<TWD+6h2G6O14ESC)Qsj=nJVGdiSd$fny1iIM_Lf%e&<1f4bIWk5dj}Dr
zQxW0&d&h*0ZNcHMo*%kqB@hzun}1ol(~`JkD=&Hu2D9^9J+o>~GEK%L2cUgNlnj&B
zt>(L{lyf?vvCj}?zo5ZntBMp`PPBH>J&q3uyrSl?bL*hd8e$MGsoq-w_?(zOI?MR=
zVh(8R1E>ANS|VUgEC&T7VQwLjELJ{%Tm3gI{rI{4=pb%*7ScK31M@l3z`np~_NlF+
z-#NCB<peq!%5q)}@f|w2otR~_75)aQs|b%&Fok>QGf)qEOU&7H6dOmM3PcauC03b5
z;GPC@Y%d}sO~JBFW!v2-6mOHW-gtsHYCrofjn-tEm&I_io%$1QWEdO%Ng_)7i28nH
z9!)WbDc?<bHfUm?S5c&KvIsnsOi0h76aOnNMTapQ?0$c1EgO`rL`ASC5@X#X7;4u=
zDVFo3Wy3;m1IITFrk2NW^tu7+K6JDvSql@4C=Q`<J90j9Lh1*M+`4Cj^D39SH!VGr
zTko<9GJD>&0Z-*lCwx<tG1U61Sqeqsa~4N&aim)LNN-*jfnc^kPQ(I_haOW>u=cF>
zn%dKz_IdB<T>&@&_Cw^dko@$6^t<%XhfSU;FoqZ4XOk#A#E~F}VhpX|sKAF0;*b8c
zD^b2`avI_lz!SJ|?m%=%#xh+k!-p*m`VFiK#wfvT=*2EG<L{{tx=w*ryp9(~gOMlE
zxCdAh`K!2`L-Fb(5Bh8qnC5~9>2A~Cci#j}i6YyIWrb0d1HZsx5S%E(C<TxHwASc{
z?}6t1JUDqatBURu&M&X-ND6Fl@#w%**UPYXI<8+U?s3LfoFP9}z{H9f%-#wAL~KMp
zxPE<`Kt;s)XBX3%CuFimBhJ_HviP;tvZnEqR}a3rGYf!t43GIY1izP;T1!yRc0&=e
z0&Y<@4_^d(gwQzi7X{BSQ}i`-nl)tCX<s@_IW%uv59r7Nm~YojZ_UZ^OZPJ$K+*(N
z%2VYHK_<Y8dFIAYI`8~&?SuXZO=XL>N1Drf?C6u8O<BS9;S+%k)uR3FcUT5A>(*zt
z{M6;p@}wcrad?eK+9cCbwz#}B!=LSfeQErTMP1NmNs^&`^eevolfc{>gVC!a5<VlH
zJ(3Hp)s+ag{<6~&aUn60<@&VWCIM1)pGR}IX9auW>c}KU;b_A3l-oD8MB{jV#Ll1K
zWWLaThcXglZ(wSbjs7%EMBUd=x_`08b&!9_=lB$+1lt<V3x#?#iqLwr50=J_4zdqe
z>=0JhoYWbA0Z8%qu#<ZP5!=8jJE=;{<$w@cnAnU$B@*XLxENu*)m^-f!ygRBy{Msj
z^&&2lyqd5u1vK~U{lE@L)0;AM_2fW;yS{Wmf6YY2$ck%<EGtTTBI}O;364m{Z&#zy
z+@xXNgGKJWlcLa}8v4gwQX-9V8w7s@JxzMGw&rtFo-CRG<Kx}W!Vxq=)Exfu-=|Xv
zf6;T`L4>F<cpWbQTUv*b+vC)Csn)kv(~Gj*{#LxGd@t6Evb0WgC5eGc#@`d(wpglu
z^s24XxqT4Qj=12<=abr5IAgW^#aHln!mWE5?A0wk_Az%D^qWTZV$vHq*Lm~Nz}#VU
z>rbBvSr%^B=0``4!fd~tU)`x+fM^=^7n(x_gEJd(+cg9KSdEb+==i@_da7I5+(N>C
zltu6z)^4l9^Y`bkTeXN^|9e65{?&vLvXx>N*4Gx*xF>-!k1wOul~|MevRu(pJmEtN
zv1ZK+dz49fX_sw8l}*`T<h}8CYzzj{tZ#+|L)5W7PjP&9Mn@j+&A1Re40-L;jvLqz
z4UIZ~vA7==q*`FDT{=@{bRx9k<WBV}f{4*y6i?AtKh~@}TMQPD7lK8jt|>sV$-z#j
z9)SNuZPD#9@r_3cP}n#R{KbZ|5EGdpb<>J%r7smu_^#V@n*B&4V}C>bJ0ZK*EYu^E
z&Ih||_`}Nr&fOz#QC~2XtLRSdJ(B+!C`18q7Yhkk6<PVu)=}t1t@a*)Xszg3a_OpT
zcz1!v$6uizWtw<?eJlh6uV0J1i&c(uww65kQ<X2cD&dNZ$^K>_uSX-d%exN1mNO9j
zqXAXMV58pan-e;n&SZauUO(nG$WhCS)F3^(@JPi~Ur!*hjJVJG^dC{?3)uD#h<BD_
z2QijK)fm^-{20{L^~$zn^CV}mp2l{DLn%?>x=rHcsJOm?VB+qY0n)Iy4$`6EpaKJj
z77uWT9Gbzy>yaIGhM>r{R?#)pj5lllxOYeKcM`3l7M}wZuaPH^^rIV%5`7>@5*Aao
zVvq7Mvq8}g46J1d1*ZAT^zrI=%|In}pTiNL+RU=UOQ)t=5btUj+gw<HbAUDcIRzV`
z(`B;4ll*Y@apur6<OQoI0)aTOy9^h#=amr%O{)#uzi4$kqqlyquQH;K4h+3xGCl-<
zw7@1O|1Pp!FN_$8&$>V-5;Zgv!1%{*byGNiZYgY6eBzi-FrOf5slkT~^*2*uo;PNf
z8bi@s(mG_4*$=MhUfqtc#KV0+A(!2@aE6XX*4HQKC>pl)2Fe<~a(Q6)l;`50#F+8(
z!HWUm1RdUa^ECCTZI{YGjkz#l>^>%MrTdY%@OC?HRT2d%m%?letWS$Lipb$u5&#sU
zqYbRKjXx6-b~J4As>*@p6QE3c2bXD%d=;aiR)_}X&;3Ar|Mm)G4nwCz|NF#d>zTzd
zyO8M5rAlXm)bV{cD#RXZAXD=SV_t&b>~XP{%p#VV8L=U5uLn%R`H((pbM-vILC@c~
zi&z9dG}wK4Xda@w4O&#R$4lN^p2Eq*>CZFD)t<azz2KG(>12_p8z_V2OiJC=y}%^t
z{TzOo5IV_KB@6YE<94P7q=3o!dN4O2ewSK%1Xm%PKs;p|!DxJoL4sqLyDGQ)PUi^<
z`5f8DgJ|2!=$6P4o5yNe@qDlrLBB4zTZ6N&@%gEaWw*dggLV{vTGZW<>8`?`wZ?Kc
z)wz)yt|w6XZ2yApsU)wFjt7^0&70QkG4nMY3YAHewhJXj0syJ1q$;tFLI0!#Jjk*g
zi^wJ#5vfUOb<qJW1Dg%DcI&qnoAkDbPe7)bp^$4A-T}Ljr-yrhbcSh4j-j)4mnK(2
z$VRM4_`JJ3?P6lY_!1|s-<_~^`GWV&kB=UI&BVCPMI`<YbDB^pn_@xKhkDb>m05O0
zhUN+$;70QxKDA;vf=P){kIvw4Vu!ir`IKIXwmnxQs(x3^hx3V^t7{1up4khK+O>8`
zQQnvdJCbmyFc(|L*}$vWFYQ<+WKEWEb+yy_eU6>}?sF;F<NNOosR~VDhmgpWC@f(*
zx+6!2$TbOmGqveMxr{BV?|7i0P%}qF<ZYsSs*6n?gT8reqEIJW3UEa=f0>%eyv;-I
z&UW&vd$Rz)_G;(EmU*``fk_y>esy94JxjuKtH5uz^ju#M?wtXocIOE52(EN^mYjY#
zXe&vfjXt-$@e&1>k-(HC+6xzZ^%X(FZ`6%3MKqv+Kqs@O#*-dY+{>hMM>#G*LB{b}
z{z&fMJM*TNT8$F8Y>cb<a~`J~I98_&<=9vZ#A@sWF_5F$PX#;uk0!>&eUF)*ZY;OK
zUa0iO#dn&G8jJRrd!Rmv0!vt@>Oe9<wA8IKC!a>`eQ%p#n&%ciZF*!>ibB(v4FL?(
z?ync7>;8=!b{|*Mn&^HT-Uj_hoO7Y?pVpl4Zt_hAe8-)%R?TUVJ42CsRENyW84{X`
z-n;|=6Nb9ZbB4ftQda)rfTQBO$-;IJ609I&YoROd15g$<&V!_*<TxzRg|RNC+|&HO
zFi^i@Wurh#>22!t_V{yN!M$`(hXUyq(^R6_ZsA6}lWXQ{^q+iij=u3RIB^54I1>Do
z&9$kuNM#KSAJ6-DvVkd<BY!;-#^Vr#$c2$I^{pe^3o`^Gi}tL&X@<R3L8(v}gDK<p
zLZosaHIvpy8VzMI84VRKC$J`gshAR#Fe%v*XE7VW8NzR1xxHmU7<#WBm*x&@1;yOC
zl1?SciYO{se-50Es|MKOPpPT*?fVten^z8MQ5U~lN+`HD6*s1}%ig8HTeog#xtZ>~
zzssL1FpD(?xRT}2E&QFcS-E9JIgSC{hpJ;st-Ym#5^Ekghu*Y(RH*bnSj`s+gpn7<
z24vx&3_~oA!C3|i0@97|50Dv@kr>d8Y;*vDIIx1FcZNGqZoJ_lEd%ZB-S7io(YyQ=
z%cDf661=H~q+?)~5atCWnr(n+R7e)v<koXM!6?x1zvD{WOmuWu3pmTdR6+Tq4d9{h
zlnrh%=oBeJ6&wi;gjomQ!6XpEKg@Sz&HmJzC+rEq4I4h*riW%CLaXp61BZorHE%`x
z)>(1M6L+4AA5e+`x$TpQ4{@ImcxFelwh*DvpGvFgn~DB2@;g~MT|7k=wT_u!279r@
zjcA(rpHM>BT$}8gq0EM<4Uju}CqJx16}1*@7zS=Ik%YcX=4&ESGflRB648c8ZimhI
zt~|nO!aR%M`M*Vdq_;1SYJ6r>n)6N1;3LrWM?%3_sdhm7VBl=8kr&j9`sT^09r;RR
zfyU;Xz1^y748YUx8YZZ}J+&#k{B9laMZS9_kxPXn&@fCp$rJ~{i-_Wd(M0msnhS-+
zL>G%R+MZWw*(=^Rv@@Vg$kCn{moE&jGB*is?}DRn(3n-PWG6>vG4-8^St-OSbnql%
zY@q}@M}22>u!gqkp)M}3wf2dE(aIo}bY!dla__uncsWAoW%1k~=Hp6seM>Emte%(B
z-`aK-dn?vzIgr?0RQAD5!TDpCf}12dWG`6|4NbavT^4a1D}UW@SOmHv)Yz#(2Mt*%
zA^BCaKZ(drmL-Zs;gm80LY|HuJBZh#^$KWm$y*;dn+lYG)rwhIKi$tKef->{(%xoO
zL0oulch?NMG<Jy~pO?Vig@7gd_`xUD-zBVAYaR$DJ+_B@=n-(%liR?14QwFO^&ptL
zT$nYu*yBxv<(?IFtR$!`7y~<!9PS&n1Xwt37~WS1=-c|diCAXoIPWmM3F+ksjwWcI
z(QBEA`F)fMR?1j$rpongRoiv(fe+cqrVF>L%Pf|HC3Y+Bj#7Tw5P!izCYv@8TLIsy
zf#v()fT{oSMPxm79w`|Q&K$aVG9!Xgj9d>v*LG}2$<oMBQ5)&s8o=~6+tXs@<i3`$
zAlzdD?wyU^BWUdU!tCLhg>Dr)-u9d}I`7vTQ++jQsc6F8bt~@~vyiFBS`fu{q@G>+
z<q=U8)P=Za(E+n*qO@>%>hZ=)C_o%<am<Mh=I<9@F$s}Q+Qui&{Rz6n(tKjTxo$AM
zFR3~`w1~*ig`tgl*NgJ)?rWjmlBK%Sxzqmut#W2mHC=)*;@QV<xu)7Ss{Lsw8CLmP
zFXisXhZ7$0jz;m;dd-9VctYKxJLW7kK0<aPdb`3VvmHpxrK#yj(df93;RUD#W!?NN
zZWC_O_R{6565%FJvN7T){E}#NGtBUP8Gu%W(vprmRBdNw$Sp!7DREZdgo8$&`C;s8
zX>6xEKBqzT<yEgk;il=vaW6?&m*m?p1Se+&2L!q}ue{n!gEqeMF;xK52mHVeTa6kx
z!uIcJqx>8RbLh&IZN!LST)$81>U=ANoQO^_dRfj{)+5R1D2zx#0dOBnq0%=6n~z$f
zvmbLvl|)Z>)c6pKazA1kH+Fu_+nM89w*0`0ts6adj_q=}UCP*|V+nK&&%hOnHCHpa
z+#CF{2p*oh8#tB08-o=_9<1z)n#7DKjx8Z4k?bY4V#{)S;sBHu_L83ajXP-BR0LI_
zNAi@{1v^#DR5x((%aAv=<O+mme*eT9dVc&2IN^)o0wBER$gWU4UpzEL8(a<2llqJ+
zTyN;J<^#~2N*tI!&(s`e$~V}TR!<x7Tf>yOF~9mKbH#`wG98avXFMn9?d#1oZV(e%
z1c~u3pwb^Yj5V9hjYXVYJ6eB-#}`L0I?E^*NhSHOx+fGOkU?i+oju`P((zCJ`)E*-
z8t2pfA})^*E>T^XHI1rD2Al2xrkv0TwkFN;)3!ayzt-<PBL55ZwZ?nX=Fx<dMnL7G
zpK-DM^s|F`0>ah^kI(7{E9c+O1quc3m{MnKc#)=QAn1JG!GTpPuc{NWOQk8u6|hB9
zPC9p*az+@s!PpQS2f$c<e7Q3i6-5Y8(GZf>Oenlq<)X0Z^eZVuOHI9?Du<7>7dVh_
z@r5$g`D+Wa#HLq4slRSf^-`54loCwgWjDQCLRR1hXOY>1qVbETFs@hYhtb%hmtrKK
z(6xajxFS~a8fgd-Zm4M6v_QZ@9Kqw>dw*l*nncC3zNp0v1jb6Yi^rJ)yR>GL0K>Tu
z3IP!%h4j>+jJ+%2;)ae%7s&nAE^3f8UPI{+paXS~VYS6#e+F@XyQDON#)O4Nzwd+-
z$28t#BPG;DWIsR?UC?qxcB5TNoEq>$v8MSRb0hr4`jVh)$4u8gAzRg&N^d!rtwlc)
z=@X#Px`)R=jEI^IPg~&dab~4Xw`$4@xWPUk*9~k)^@65nwlke|s1Ifp3xcyYL2AVF
zUjuOnEyTs15iumyq<Abs&0u54(O^C$3v>`!TysJIim`3!kIr3GV?o}EFJ71xV28fm
zZLQohfwp8Lh9Kn8Cv{qK&1V*&xo1Y##H8=ek24qvLTXPjWQhZh*jR6Fom#~khJ+eY
z>F3r%?1Ds<<8&_8Kaw(id_JsP8nbk`nCvo1We&oMp**$Zlx{?X6opj{2ihq3<Dl8V
z)YFzDi!o=kA=C<^L*&vzL^WW-6!eGatFV=+PB$8*I0}C@Gz|bl#e_aT=37CU12KQY
z7h+)#9GIiOe(P3-T`7P<p6bf2^=^_6ce4rYWi_Ypxmt{FhH=$6_dBe9P9_?aCoPTt
z(=A|SfUEha{zA99CP|L8{;(y8obG0p<D~;?iGI`Ew{$PhB`X#f?LwCjw1vo%JlQCi
z4~5U}4=2F4GS7vHbzL=}CrSeR=Wj=;Gf<9tL{I1I=~lYs2MNL_v;7J050!AH+w!(J
zcIDuH9qZtNKV91xT6*-y`3Vfzd7SW3FsD6<4Q*u-oIGJa|Fjy#{qdeLSPAS`;x>P>
z#VR`HE$s6JF1;9zJ@FsiIO5@&a2FbU`MrJTFloe`xzG&=UdQ``rGh5kHwB@Hn>ql(
z)(qV`M_F5LvdZ2flz5d8phiVw)G{S#GsG`d#+=-~kyqw}!8^4+xM5tqKkki*x9T*M
z9==<7*Z0U4OQAVn(r*Xp8ywm`9F$XrA)mUY@im3_NWc2zmo14K_8JF1uAwl4*}ZUv
zHDc8L`P?U1cewg0P&B*$0lfUFMsv!NyGTq=<E$!2*!2eY@PgrPsqc=;MLhj-oA)mP
ztIlEs@PLH-ec0;NzZ41W3J0;HYVrxkyYI}S(-o1}BibfVKCcwP;cc^w1qGz(Q=6ME
z)R{Oui^q0r^Q@<gTKx;TAkqR2wB$GXdQV>!-}mGC#9m7Bw4-L7b-;F!f0(s7B#R_#
za}W(fk5Pnl)kwPaqtz2mWbjM0G{_<V>uIN`)rn|kx7wRLL>c1uNLT8ud%BN^j~mPx
zV{{iW=qsRH-7@+vbMlr*gix96+I*ZQYag@Km|Z;S>sJw`Nl9W`M_F(wUlBtWg3pEy
zflO&^xI&AykV(3*Q%k|q!)@T(pL+|3R#dbVd|+$&lotPPc!v5rv^(WV^{%Y*FH?8+
zLr(wjpR|x|PK|Ak13N8`l9~=^<Fp!R@IW)~uJaKL8l4<Ha+<;dM)o}KDXqSwtg(xw
zOYYZ=Y9ap|o#Bf|<{T5cgRxLkUPaSM1$9tf_Lvo~ahlB4MrkB4M2=g_)XOJdyF;N;
z&q)dEYkM`4@jXPbR5{1P916uEcJ8_7X2_Wmu@<0zaX+C2Pb7!n`15&9GfEAM%}WFg
zFArzet8d8+GO=l|EMk>Cx{vviv{lp8(yk`ViA&E%dL=&C7(Z*VRfWhcV-aL4@n3=%
z*+K@W8<e{->atTg&UJ34A)<2@C)>F|I(U1}TfE1t46>I@xHDRG4hKJBppCrroWb{_
z5P`dZPVF!^f><Vy`Pl$pRqjxA8xY+>tJ!;h3cU@77UnSz5hfSKKttKk#~LOi0ZV0E
znNqJ?SeQ)Dy>%A)rz#SHUQ=n|zNfF?mUJaC#R{}4g?73gvzBVw3XmL(0*hQ~iigHP
z>A1U$@F~t>toR3-YLXl!#nh5y3=BK+no);}35E@B=MhP2QGvI+ez#v81&f$3l#LmR
zo0*|j)xeY`gD;zr$u_*dxtAScUr~f52}=xO$S&)23m8pBPG}K{-AQMKpf3iLNM-Fq
zY#DGBn{E)JkV0-K^NYA0WLAb)xUhZqS_Ul1p<_Bkp-gArLU}NL&L#AyIM%S>418%_
zB4&L>)In>KWOb^LADugC7+Kw~RlVx8Y0NO*aIO5QIl<WBqFy}}$i-a(0=j<cs|jlw
z2q-G05F;&dV2c5<JiEZVEDY=oL5o{THHkVEMUuDaZWe{jD@7&1KVCT}j73!?(I=7S
zx~a{LS=E^5^&GF}jwkdyCgnD<K&*>4Oi=K(y%RFX;8A1%B$5pb6g@f1CQu|*S6=;V
zLR#ZH*DugECaO>NuT?OCn3CvMA5mIOu<zr3ow}SBUJrJ}E0y4O3Ku17w5m$WelN|5
zZ4o7SUeh3w75#VhJlE9KW;7AIjrhKpSJ(P%DS3mjsW-OUd1gXS)}-35-a>H7{{uTf
z#J?}pxKy4%x%e{gFPGtfyVyG~s~^3L99+*=Uj%U(h=+z{xoOY|6^qPq-)Au#a4*J&
zyG_m*I1Eed{edI(r)RK%v{J3Bo#e=dz58v$f7`5|hM!HQ#Q+8nv5aQDOOM1oeX2kX
zNlq*-KqvNH9C{ZeC6uyIcjSf+1?qU-jCKf^g~zujen67wlcs8-RFO(NkvuOeO1GTO
z;6|JNAb<e5hvj%CFFur_T73TS%GUW|hGm~ntC!`L+MDr^zF=Qgko%PdjaiE8@>-G5
z$nX{`*{Y92{#Bp87sz=93Xu_tQ}vbOW+25Y2%8mz6ylqb0e%eS?MNXxFjWN708y=w
z;a`bFUs-B>S_#16%-??T_b*_b6BZT`4-yWZ<L3HJ)@iB40E-lVE1omi;?m1|V=?a$
z<mE*sCMWk~yy(D`cFRf=Pziq(R9*?)Dw=;Tht#gOz2uU@I@{`ClpXUzJ;?G3vU1)d
zORY_aC68?jIrImGx6DAog{b@}TPKyyezp}Sgticqh_g;y8^VI=aaCB5$KIazM0lUD
zCYV8zQA<CVC5)*`A+3^E{E?UxRO9~H!sVkMqKSdO*xDF^N=d__3X@s#mb|B7nOT+W
z5qawlk65>(`YPm08n6oq^fF`mGaRXU7LZ8DHe<9xiOwdZ<5xy6zM*{)P_*%fsdU1H
zmg(Hld3V?Xp!_JTmd!H4W;V#MB5RGSQ&+C&#R}as(@$Dykl%k)BKf_emUz}xbeu9<
zED|LbgaGw2P&%v7a9{&gfMu{snm$QTbb(ErmatSsBS)3R%C!BRY$NPs2NO9j8cj4O
z{1gXC2nom-oHb9k5iJnHw5PBc7>58|RI-SnAX%EQL%S&0FgC8E3>Juu8NNU@xvGV6
zdAq)I?n&UT=@&7~GEpNs<^a#eHy{#UfV)K|{>6HfX`Vcr>NaI~u^x<^N1|(;_jFaA
znitFdQexeeLr{M0=o~JZfOBP?o}BS%NIm7VLF4Dg3|o*QW>}2~YhnDiAa(Hk(wjmY
za(*FSdjM}*%x2f}eH=i5j^OeOXo?b6vsHhhiEs1KemElk1|AJQZ-@rhd#Wkr)!_P8
zqaDh}cfywq{XkNnUg;ZTmcqB1C{g$-lU)0#0_y8jdiB>I{CD-{>*HR%mY3I>u#B{k
zFe*7Th%ZD_3jJt~7{bq&G#N8MpO#U0##9J&;ycO>R!z%Zj6(F%skt;BA*S{)WVlQ{
zbQKvUMuedI!jTtT<-59C%+|2~EmmpupU*2TQ)3{TsV!F0Q%~G|ranLZn4z>(P$KZq
zMU*##@YNuk#yz**gaA^$MkzCHK?qCW6`f~-?Mio<u>D|uks1@!d;vE8Kn9d_Ebfxd
zP`_7$CAWI}fA#0ko`$E%%f)zhV}=c{7k?c%H#BvAt_o!!kZ9FpD&0t-s&tqxkLf`g
zoV%jZ3IDFFiQ>Ba&z;`O9^E=Z7wj@pDG6!cLW-8wE7LLA1RCA$^F5}K1b46>x)vo{
z(K<cA+puH;CsMFjFaRQP;(8GcRuDGL(t|9Qv}8E{B;LwRJAD%@mM|tXJcolSOI+6r
zCPUTQ>jbk5ursONs`<_K+kJhx;++cupSVngI!K2=I?7>XN#rSW1OyQOEo_!Tn`RQ+
zKd%9~6lw<<sT~ltZEbOlbNHPMO%uP(Y>j-fw-#wO&tUKrYp0r8w_4ccB-n8~ffnZH
zGx`nkdk~jFLK(c}fjk3y7dV)_OuLF5VWDs5^LKLW2(LZU<Wqagd@0uH!^1v_3{Bv3
zETM$JLhh}_k&WN0d)A}SB?G#op!pep6y@pD4DYkHyB*vNnbQKx5DShvG?SPqv#GF8
z!54aeeKJXSZ%jglcj7wsOIF?s1HY+%P{D=d;R-@`-iWb^WP*409wrM7uLzA{L*s>b
z4J6Yd>+p7iU6Y-9uM&Peaxc@i$%tS|r|v^>8Z@L-`U`2{H{4(&!ys7F=n2zrO9{#s
zvPU#ezJxUI$_BB=o)!X>P7M)wB+wPl96hJkRpmHABtjBY^XGCXoM;hw(4hdukryM&
za;aO}lTJs%8FV=vhG}aq={zJY6Z74#uuDn2e>1rVXg!K5@o2cZy<C1Cpd8Li#A?0w
zue<N6LRyUhYKvxLz`k`<KXrz_7_`8Q1yNH#uN=AUg-YW-(@6{63JwM1bogU$2{mvF
zQeQwLtMW&qOxn*XIa*1=?wZ1Xi?xt0TWmMZV3Gk_mb%ke$Se<oX(URD8rrrrjpL&i
ze7hY&))A>!A*gNYthV0zn%;H7#S-i3V8c4MxXt!<2|KKHli2+(5leUD)i;LKkaS9b
ztrrtu^Zss!*m^2%)>Cn_L>MvCue9K*B_GmkGc$i9#-<(1j@{WZY(zJknm38tai3T4
zh=#yVjjM%VrCp#v6m}3tZ+<K3M^1BCk(>;}t6nsW6K0%$h#*bR4AP`s<dF52!jLB;
z8d%Z!h)m}U9KwzeOElp;42ORz)i-eafgIa1r1D}LFs>pqWI;)oVQhah0!5^5m%_1!
ze3hP5pcW6celQypq;f^@-nlcNQ_nHQyz0Ls5kN$Ip(b2t*6Wl~KQbSc6Unx;;$&D5
z0y<t#3O^b%?cczqIe0#D3jg3a=Cng}yo+SB-A%E>&c<owEgZ*e_WT;&@7g|VwPIfK
zFm<d^MJIs}yOAWO2y~2RPObMw^;?isIi;cC6tqh~pqX$TGbnU?*<V3f=d;Pg6Ia{?
zw*$C;Dj5C{e;u=60)-Q78kJ_NUOP;Eng}hI`XD$0=__i&3I1$>#+05dAE3m#!=s3#
zX^xQi7$$1r-_V3r-_(2laix2Vleqk0Dv7R1qFXtEV^vH7#R$s`;w3tRs%W_g(k{+?
zVCDf=b?OY2i0)%*0F-Gfz~WV4^&UE(BzM=d@O6+80m#3c#XQGa444TZjdYNcO0RmH
z0@CbulOf;$oO6t5wlZQnz9@aVyHX2`*^CM<yis!#cddh>U&j5Vg(&^<qqRKb8Iocv
z0up&MF_MFxW%}^RSxw!Ko&?;kSDSEsGl7HY<L^>Hyk8!zKYD!+u1lCLgsMDDEzsBD
z%$v?u@@j?8v&E-p-h2|^0vH6eC^b$%4@hyYEfJ~2)PucwODM)HXW3OwC?@M!zR?q@
zS`04yRNWbz)8uL$xMKU~^ZI!mf({W>nMEsd(mR<Ia>Az^4=l7stNvahZ3}J)Rx!au
zoCMtdb??{!-Zw$Tji-L^xKr;+Sm8f*^DU~Bk}zf)QBg0xg>msxS0A61mTPame-YpU
z_F}+U1y809mn%-yoq>ERf4Z7oEprhNAt`==La}8aoO}ZJ$e(q3Cvw440^KPh5$<`F
zjjPf60qhs@LwCpLwP_imB0qVIpOxaNJeME3I6jU_Rel7Oy?83c2K`S^*^i^rkRXD}
zK1cAvFnDPQ{6|z?#!;dCqM!m0Nv-6~HEp#)+{9H?2`GcCh<go&{f6-w?>r#VGR&e{
zi?=6yf?97$z|!LaH1YSi7E9<uV@+OT{jTvU@s$f~|8hIj)WP}y&=wM*RuZ4Iu*OYE
zy2kOESO!YgHdK}=EK+cHu~<*ACmYSY$K&aIP2LV*4{r%3|MaKnY7H&=Gc7d5Tq8DW
z?PFX&yNO!+?eL?UG2B$EfZ4F|ItD^jdBHGhKOrF-5g>mH^gM(PTheXH@be_m{SQ<&
zN7*-WEiq_EeP(qodhZ~skKvxKMEBBdw16BBzek0p##u7R$2iX0R^`-o8~8XpM<6t7
z&gzZVC?%n`^&2;gQ@=mEdZV#fdxG?`+B>76N0Z<dcEQ85-`UK~@ZY87$oW3Zw+8_(
zW96BbZF?H{9DV53yBm1`qB#xH*wi2>;TTWZf|QwSqG=h!B>E+I(O{ERmfzIIhm9kb
z*hn*B#nhVZn}bm9YB0av)QaO#Y}<eFX(jeAfQ?K|Ld5kGXbN+^<<)Er+bsH%&;2h;
z?*a^hV|@J3y=!6pe$^+Ll13psbB~cq0%Pv3P={Wv!KHAXDJ;~kRz5wI+8bM3zm*F%
zf*FfN&E>Z;<hqxQEyHidIXc_jk$$Nk@%AL4q@c!$f<k)Ib&*EWv|}q_NKn{L98=_D
z#AdE6xlD5k&yy3lG$tGHdCdeip#7uz^SzjORkOW+f(gGmiannHQiLrC*{IbtwRkOQ
zYT>EicG&QG7-BY%N#WkX^$^gnfypYMiy0DjZy*LBr0?Dk(_Xk`2>ugMQds{l6~!2i
z<GG4jQWOvH^|I^_IgPPvGNH~2s{<=X1b5-&PX9};WSsek23rMz=4tf|d%#}ZwQ_*W
z)rS1>{QjK>XfK;bbg}Qq74y30+~2u~RLM=Mv>~ZIH>tgknQKt9-L1Gww+WpZ)3GZ}
zhGn6UaYb|VjpkL|J(5D^>O1&4j`#f4%pc6x)T_rdg^UmhxnQsb+Q)9Mc?xy;>{P~G
z7+OTMYz|gi>}n60O+kylq4Kb!d?4i^DP*p`>*$oUeD__Y?UxQ@8SO3;7TOp&qDV2r
z;TT^COh>PXs@ea$6vt!>TJ#MSr-pO)S03$BTh2y!ziC91NEfA1-$9c+C4hsJ(XuK@
z4jBQ|q6n+8V<^BW7HGQ7*I*H7M{BHBTUQaBf)*}+m}%93RE2%kH)-jm2hV4$dAGmp
zR(L&g(-FGv$*g{<QmcWZoxS77MHxWdMR^XbSFaum0k1gr;xZ`Ojd%BntCObc?xb3~
z_J&4>SV9WA+fj1pC$-3T5t#+hp}zDe3?-Bo7kJXU{g@7wSzNNoHH=A#o4`>(7k1~p
z5Y&w<gfZtF4q|V6k?R{{osz)|${X^BY5<=)T#~)fuEOL2x;-~D*M))Db$>eahU1}H
zvyq=2#YZd@-)E8wS9aaIj&MtY;9O(wT}PawC}9C={WYB3A!mccb~hU&H@DUl48O>-
z1GaN9mz~3Io*Yz3iR>QE6+S>0eyFaW1(LMX_Y75E*4GB35chHo7#BZ)YuKI412(At
zz8zJJLuA+?0ga3C)0ZuI=L6uKi0%Lg{kt_gVaMXdT|E_x>(Sm;TFsgeViUz~#V9uY
zpg*hD(k*KB>}_3iMn)|u#&4ln{)4?^ZEm9mer|uo0YkBi%Os(MLZ>i}A59=m%=uDa
z80IGNBQ+$pIbYJ2Dg1VJwbJdKbb2^V%VVHJ>y!2&tya=%C9O`e?x^%3M-YoiP&#eJ
zcp^^sJB^I1-G@cBA>}50<L@#yjEVT&+5z0pNdOHHWm6JFT#VquAjE_lg}eFgL$5vp
zwk14x{*qeu$%iim@h9Y&Qo}p`Aki=-i+KyBw`w0^qF3#I<@>KF-|g#Cxgo8~Ce|xa
z7yx}^wQ8Z}0*0hlvr7e#t6$FrolP}N;o*5&E=}e50XHn&xaN&6h0Oa_wOC09ZXSN=
zf;j2=AV!^pLF{aHy=-><Y<4%?K9if+WC8p3&dkGGMau$w!1zS7anW%2o3*CU<>D+;
z@NW4Uu3!Qu=4v-vBc-Z5;=whtH!?weh6@=jw`>fS43?X$KcUVZL$LmAayGr5FR#|M
zRv{aNE&4n?gXI`BO(bG!gJ>V%InMd%#r$^>9_f<}maE*fYZ;<Do7zr=8poDHj{n};
zYJ{&()gCQ>OJ@7KYpv?xnm=3}0IwJHTG7@UYk$MJ?*jl))fDI)_S*4&(hPl8>*jp#
zMSpI@*gssr`H66W-RN*RtDp6VxySXOYub8_szg^8c!7B5{pvLauL4;Qtp}W0_a9mJ
zo>+GuSoa;_vMmuYd+i>r{xuTt>{6_9jN3o`4MxgeuGWqd`;adH9g)YAgF(Cd%^{B-
zR+#-3({m_2mt3r-xWwyMYtU261Js+|9}x2hQM{;pUCJfZVvVJtH0#7P`f&tx6Z4Y9
zc0&#ja<P^`kN|%jr$?iP<kM1xusmD|RgXXvP!$O{Rds;-4F$6J{XW7k)=xfspY9;W
z2PaUg_vK<lV)4&Q_z5NBBhV-S_NbVx0NrG>SAR$X4n<)p1;FeRj75!Di;N1`3tj|G
zzCKkdk+1nRreuTzH~d3alQ{+Bne3>+w30IfgFkH_9@b_aLZp<VgHC%SR-(k#ragp$
z6(cyZTbLHuE2E@0wB~o9(46`D302H0V!~=kS@`CEXbm`PQT<9sy`;F?2j4?>=N&u)
zg)e`Njz%~SyBZB|c%s&=a~P{-NYxDn==g_GG8pcA6eBP5fS%?!Hk0F9B4hqotjqd?
zc^rZpGUW#(r;i}IX6bY1A$*(T4}UQNCrKpD`DHYVs5CUhA*UpG2=wVeM|3YLn^olq
z{aG8-?hQC?Bes-D7^~oc6hNgw0ame~8yoc0D-a_Y6G%hC&@BxK*@yoSvfx{Q%kL^2
zgFP~#`hDVz)JQ9kYf(sgu^A+!E|5?I5UTi(uaHQe{MF)O{_FJORYPDkxHb=}%0N`V
zdcsBBDutb{6d8cF1$ZU(0i-Hl0Vy}tjUir_^i`jwZ9VlCr|RrwzP>2^eqNcTMK0hu
zdcV*uf$4_Zw@uN4M~j$Sre~a*p~?UwX6Zr>5m@13QpP8Q17>GpC4W4(&CGbaw=%J<
ztHKxWFIz2H#EuaDvINc7gXQ%1YBA1U%C`ym%RWv0c+|llBgIAC63#sh1mvBF`*nID
z_=c{kunti-d~?N}F9z~;|BIbgRLt=$RpIH{umq?k0wW+igm_m|{ZMZ5?av-PY6=h|
zi>wKih=B6SVGr&g4hfS(c)T(H$~AaVby25a`=iE&2sI**trfO6Y6<BBT*%VE-uQIs
zGRQLJNlYnbf{JN(C@>89GPJ}rfz_ASS`r-~fKSYPjGhC0!j@1qi@(YEEX~Iln#;Q(
z3g-d<j4za~&%|y3$E1T?QXk0r_+!)sNP#JgXA@T}weONni*Td+qKc2e!JZCAdre!}
z^OqDPmL6#HKQxYqAJ+5p#dHEg@ZwyI$V)nusO1RHia1-`io`##PJr2mmICmT04|ox
zl^Ppg;uETyIvxqjZU}XudCM=I69U=u4541a)x>L~(~^Z1%2BOc^hC)70_1Fy=~C1*
zdcv{ee!yd@9g5&VAS5k-(Zt2tQ4pfAg<52evc&l}M)jx10Lp1P6AX2YFjOxb*McQl
zoIO{PjSRV#kDc?o*`SzDl?!*0U8Cl{uilT22>%np7rtAuNCEAFeXQs61x@(*bMn2R
z_jWbS=1V;FV^#L-pQG&Ajg+0Rtmc-f>{(UW<EpY-BiQNMp8ZF)Ju9?5SfquvE720%
z8)|!eOWF>q#Une0drD_x`BrEB(!m>P9D`@|JELZZ!`FE>8#lz&{$t1~6WTlsdV&P%
zsXsk}Y5D`kX81JoNeL@d%OGODXJO<PNF0vc&X-|yu3GHasKC-P7}|)9T482DFtQ#G
zw1TH+)}qX2HXO<&q}mYTdLc~+3v%7N#x@`#Va6{ukU5(<SEIujk4kfvd_cJx$cN#p
z=|tOd1((a_zpBfG%wx`08Vq7X$6?fi&JSSQj?+{K*no(HWfMswVF9v~V(lvdi_d<r
zf++Y8V93_bouh@3+ajc(amorMSu4LhtDH(9F4~3_gJ~HI&D#`OLui=>iuPu_mch`P
zw^(Ch05GcQsW6aDkXrP*cfIQX7}a#68^|Vzfo`vJW~sBqIi=xSprnrQy49j&bpTX=
za)?N7+Hz{EZqhZXSd&kPMvz<$P0N)v(pIxvGA%>$ut94XOk6Xlw93>0sMn~pWys|6
zl%AJ|4&(BwjWZ;c%~$1Eqb2;icFVO(?hfT`t55<oCW5Y~GD_NsJEJ5%7;<n;2IjSd
z34PZEhjB>z@hE{ntd?aq!?wy*bnGz^6wbUP%ypfGdvmebMI_^{c5bsG)AcxoxFA!S
zihOHRsueFJZ@f07`><OtA8z4x3WI_Nz1Q!-@d*sFi_wwkjIV*WQ$?d?UjLwfoVH={
zu&-f70tfE7-(=}&6(03g8Vr1bGo$yi5a~l-zs3hgP|Ef}k{%u(58wKdT?=r{ax>PM
z-g_S!b&+0@=bgzCSvcdn@)~)QMKT6nmy7rZ4+?q8O?!NI+O>SvobzQL#(zl!UI|#;
z_T+#lXp)|29tP=RKp(@)2)joFZ^cs&!C$k}`9;OKVDl~r8nRmAH>oC;FSJLd;Lb=T
z;Wy}=cQ+CKyXr*mMo;hjvGEoTG=Vr$PV-7O4S5ExXagbi37<}<@0`A&S?sgkKG%RL
zG#%4qQbEWI1pgZfLc{P4NwlHD=F5;$Zv|}lDDFA*qBrMWh#(+3J+c)|>TOPFG68ov
zy-Y8rixzF}V-gx9xZ2OO61umRx_j-adNwMwU>NHT+9^a|7xzsX;+gKpUXuF`l6Y|A
z(;Yt*IH6ggQm(dZ^V6zIKwOChnOhoNgL60A_&SBI>}|C2)`NO%b(-Pw2rEOaig!pT
zfk()7mdPUp^oyQdZB)^fp*D(R9GJwD2>acbpW}oRJaqt~L7{?7`!y0Tl>=JJK*ZNR
znW~wosM`7kfpf!R9sPzNECn_cbVPxD$p9}u=Rgmy+@W7sYx~m<+U!F(v}j-m{HJ$R
z$u0OgRNUl%$wb4D2JZssD+`29&jZ&LL@{)-wQq`_W@mFSgZ(E*w5LV0JfDokF(mqB
zwVsot+Zh!m5zxnQ9!!uXf1)AgWkyQd@g6>#Va?xzZI96aNm~?@;99b<nk_(y*u*Bw
zO(=S&qWmULy{dQkgx|RCkyio%?F9P2@o9ye-g30GFmRx=PDX>6@4O-@wLO^NtEmYZ
zN{=hgq+({bvzOw-j_9!<q}L^^SYSo3IptA;|9{61s2bF^H=y<~f&VpXh1^u&n1|n`
z!(Pm>p6iK!y7R2B=+px2J_LY5hb&TH*#%0$N^W}tawG<goKy}Vx%=sj3Du$Pf<U*2
zd$!X}1z=0)W7DIMsDjS7t8USdOuGA_rd)oD7Gx4`LFVt)g0!BV*}7qjkmUBQ(BxLE
z(8Onj-cc<OXl>><wKnsOU5V7xFU};2GCh~;T&0lRnN4TY@pGA0ObW&Mt1WpNi%rCW
zq_8~sOwjk$Hy$)BW~}4w7kK=RgjTG_cY}2>_c|6~Q#|Cn5C}m)!H8IXYE7(UYl43k
zz@>Ma1gz%_+Z=RIfm%<m*(*;ZWU+vfk85ml4RviW34^QMlVS0|1r29!;Q75>XD@e%
zyBi#!)4f%B;jId2V&g46(~p#I>hH|G{?24`f2Vmq(C+dj$WSf+*NDI?0mHXzx$d67
zd!KH8yFT52a4&f`iWvCPCrxW!TTQ+7DP@h8FfF$%kAKpKnxAj;DbVt|kBgz2jClf`
zMwu)gA61vRsKY!^jL=)8SQX84V}eJ=u>Pqdd-pcpF(ng)Uoz$L-P%*8#KQlWDKIDD
zJNCq-*U+&;3HEfN?jIHXjg!;Fwwuo|EnpMis>*NMRh0y~subhGUZqnyLHyN~R5)39
zN;Rb48;xuVDezK)TzS|AnSz|<LW|C@0k2EV#N%SS<veWZTP=%CQ7Iy6etJ&lQ{2|v
z_AZK>o_7WMHV5L%iPY>(_70I|vv6K9Roe#Qvq6uK$QZkS4C0cRS%6kSEMO-JRF|bU
z&Fw<Fo=@n<Z_72US*}ZO^Il%h!J0)NyhaNks16va8}Csb^MZ@-+(XTt9mFZZw81v%
zw@(i3frAOS0UTSzOWWYH)yUorxds6&w={Nut8<$#*3&G<QOC>9G2$@OrqzDJII+&r
ztyq!YM<0QD>j?b-g1tyJ2W$2~sX;}c*Z~s-Xto5q({TIXOe}<#et!`t*L_%w&o3_I
z2dr@^F!KGg7w0J|ApTx~6)MhSmp{h@2pB0QWb^!0d3b#+XDd9ko~Oj5ng*DVfZ~&*
zu&Q|y55e^w$Ajo~C86CB`pk*!ptq?MK?yHRleuh}FEv-{!PWyJ$hSKivf4k*e}=Ax
zxc3!{li?1yNl%8d*SW5FGCUVbFACniN#b@N-v;d*wnw8<c`EV*6#4;WLerK73pwyN
zCUYI0R2xIevcX7DtwLzYL%4+4IV>DLM5c%^*&y`*26BwiareG|O1Q=Kn?i{I2`x=A
zOdTE^k@ua5S4bR=hPJ&Nh7vXvAPbp?s?i4OV(Vtg0(pCVt=ySfLOrRa*4$|f&Dq|0
z#Jb|xvic6gR7CTpdt)ov?#Pk&j}5i7R9amV@tbo&ZJ6CoN#zvu>#~Z6D<M(~R>G(8
z@yXECLZb0>G`xulBeGwG5z3~*ya}0fhwTn2F(<_X0JqcKK#RfDdwW{E;f%qP0Z6S7
z!VqgbEQG*G@!&2EVnr&=2)JLy%D~BN6%~YnCXwXrPa?^NlSuLglL%G7-mjn<wVi5M
z>7tu-$Kfq>$KjiF$6-)+9O~Wit?S|jN@wlFtC_#wK8g@(x(Mx$;CuR|70t$3ej&>6
zSb+kXP%>Ue%NxM-(;mL`kfLWWB*z`4m}32o41CNG(0&@gu-~EgVH_~sVZVT3PPm5C
z0O5an5*GKdbSgg_A5nRlTp+pMMWU4JZ*UFU$T|>r9x|fI(k`XgC#9IBWYsNdi<WA=
zW<c`|f%@?fAdPZS4seIRY752?i6xJ)@wc_e1XnI{$mpvE3LbR7k~%Dgh(etipoZ=f
ziNXV@2oFG>hKbrnC9Gcv>ldRmzerSmiTX}atY5_FH3x;bSE$In2zJC9zciZ?>khNq
z#(|duXVa6>f2cuG!V#Jr=^MD`H5-3|k(VHRD(rm8`R5B7sagBuWM8PSpeuTR2^|7B
z8Q41dp-^H(Hye_H#272cqeiiNxKw0|>xVqpfUkH*51=o7_W7>*#`pAt*l!VQ>wG}c
z&}IW_W&aEt#@xqut7;O;W>}qGt*7FCV051Xy=~+FHRedeC?dGncbZgA2md5_#`U)Q
z!c<|O;fGN;WEJ2J!wKwBK8ELbC%@rcbGjJpp3f{+qh9?_cXXQv#kEa;qWb^~Wqjyb
z1bsIe#D)ggxlj+p#78Jz<|+T<Q)v=x8`ah<n*q18RfXH3KPKG6D%_{^$Ar`PKBPY;
zoK}y|=#K&SsaC#S`lH|~d7slChar}L*p<}7j#Ns&*VPK|zFwZLoM&iqIx|dK!z=7;
zeR@5WGodyM8onDWm4K(9i<{ct!1`*nlaEm@f&iRYaygU=i&tPpUqKa=yo>DsHZd8S
ztQX#-M)>!hP)&hAu!G9B36RqZ+PLd|#&Hya&kffe01nu+8}_@&4^Kzmcgz$#m3p|>
zJF*9h<BBpCkF>U`Vm<jXN48YA%B@OvJ;VJJd2V+mJl14w3ktBWFGv;x*>4lIOZ#27
z#~U}Kqa-Ow1=|gwUo-gr0sMt-{fZtos0h)0jis0zX?l<#7=Wi~fm1q*rR4KbmUDo7
zK50(sNaZ`|@{42yfu$FfP%uQyHYN}ZA4$?%s&aUq+6s=8!Z>g%dy@Dt0Pp>=YSeNJ
zKfOormhBLOZ%e^HwpKlV8&ywSRnOmE)f0c!^$7qCZalsD0ygEAFKnfu5_qr0CCOgr
zC>|)C^SjYR=gGi2PG_g%e3`lVBQ(nc(XcRHkF7$3^0*7$X(3T(x!2HiGqAI+7C8)N
zLBZAFH?{@sdWr%Uvb1j3;(lspPzlGyV*svkn4n>5T4)3`iahc^PEQy4oT`cq;vKHV
zEJdLpq>;dT*U=LRhO;%=R3r^O^TX&7<dug@_o>YhTtn#B?PjZD+_A}vce44cXQ)Iq
zu1cnX20PnYo`L2*08}?J<{Ow;o7hyq2$rQ}QPtT&VtEMbYrnPSzbTCdfbRq=sgr5+
z@ZCR60{S&N*;{O864Zytpk6Q<y@%X8V%;Dpp&TZd_cC8`kuU6U1NJ*%ro6L`NB1VV
z?=_>3S;2)>M9uo2?vvNEVm}fuK+q`+ugq(#QR`y53K@F5ZU_^3_j|Yik{&fG1X@KQ
z<gvRg|C6YS*nSu>|ElqN79LBt!jFYWA}dYkyrL|GsIr7C3qV7pSnwjZWOb|Amxi`_
zqdxkW3qZb(4WjF+r6IcvZRg0$2I3a7oF*fJ$cC4zw@><yo5WjyiHr=ki}~d|pZa=@
zd0`~*rpV~cw&A5YM>-u+$NDp8`RX}EQ*<gh+j-;FRZ&wpR$^K)bix8pD#>sk8i!t3
z8J---jV*?It%Fg#A$Xn(T3<Z`VSu!gwiSe_;5UGHe3XoYeUbGiJaENZ6<!=Byx1?i
zn1zSmwIDYUUVL-mf#h!D7AjKE=mD${w-A;T%V{K}gJbY8boO>Oc|yQW4UP)nphf*L
zEZFVksUGYW)3ji>9CpkM*r;b#qTv}q{i^8%YY*^4jJ$Dxn2dQH)gWPl5V{#PO+(eQ
z360}%Sb3TNgvt>n?5G4_qH3qxD`<LVtJH{!AlJ_x{f_Z?QNb<A>D-bwUu1=o4X9wp
zLmG3dBy$Kw!ep=_sYMV0v63w?l7L^QYCss>ty8&0rz}?pOk5L260r2XYPy6C>wUSW
z_Zhd0{Gn!bxPp*fwe?Iw(N04fD4fYC^=g*as2L;}g5)#_RD0{7QVs)xn?O5v&up{|
z6WY`ZBUcvCdj7hCZG|G%rPHM$lq9H^931ltB}brka;uq*0_U1#7}M6Sjv2TQ%!Krf
zK8=hCU_m|ipt2s<uM`Yq6NJ$%9%7>5_asaGvh283s90dsz)zVJ-r2fmQUzW+fVsn%
zl+7&(9eC{k=349C4O99C1PkBd39g`9V3>gD?OqBx5Z3?}gtSF5y<BFmc!z?C*f%k#
z6sEVnf@)?<&YF)y$1}5u9dvy=5@wnxM)->gj-FdYS^i)?n@ne7eR41y^*Zte|C6{^
zyPlY@*3+>FS$zr`emb!qQ&wv+?JT{`GlJaXA7HDpIO2Ox-5g6W8S}}nN)8f1x7c(S
z5XqiA8GDx&U04KyU@Gu`1*V)y3lYoK4i`}D%is9rt3rS{J?2Z>fzdQQo8l!VfGVQK
zz;6mFL%*SRPLCmhhq?->cEfhJ8>_3hjWh+C<7tGai(&bk=F9W~N(HETt}G^=J$Cs?
z?pU=*m4)hszgQy=hG~K;zD=aNUd;o<3s59k#T6BIeHs$oQOIjFJb-Vzpeh-_429!X
zuOTX!EKfirM!O&(!4M=AcXLVNV<~n?(zQE)5pqOWsiG*|(~Fz{@cn>RM8fszEfCpQ
zalh^T=+fG=LTiEePYuZdjxKID;V)EL|4VVy;O)Z%66{Q{&OpuC$lwlX36V4(-){SY
z6gF(%JZvBj9KVxI#_wAnoRixcoby{y`nRR($t|h6N7IuHHQlPgrk(*GDVpF0!T)hl
zgiW&YJg7Qf{CdS(-smd1T;<SEnf&#bGQ={Wx>@AVZRlL&z;3esR?;yzhT{qo1CoCF
z*~J!il%B?3cCp4zNy1shQPM6htw;B&>GUg&yFX%~{M2F#HA<&nM2&XX6~%k^inc=J
z^JTt_F4E;yUW8mJnmM4$91i!sML`$|FcvkfN9iE>Qapdx_*BgdLSdk~J6n)|@sJpX
zUeDWH!fp|7E@P*VIndI3^|HnfQAv0@Fih04AS~l(%PLZmqG-o2!#e$RaXw#&cdD{{
zqdqUYvttASWX22VjL8p!gQJ7~Q`H^m8F%q!`i%bDYDTZ&>r;D=WGbOUK=K0wx#_q2
zHm>6>$#wfPJ6k5B*anOlTkg<}JenB^=_omlq1lh)@7r;=`Pgex(WDLErl|nfUVF@n
z9QcH@U;8$kaBiY}K8t3n8L%eZ3eqUo97exrUg5I>_SSu8lO6ope7+0l5AYJtXJKLs
z?Ec%Eb^jaoFx5)_P8t^^ECLPeC8+C_?tu<sH0ILz+I4iNWQHM_e_~VynE?0CdjKxF
zVTNu@3Z$)OF0@>Ut7Vcs?)QZR0Pyu|Xi$dLCs2l^Nf5cwrg&us8cQLhRb#)NBJtoS
z;me&nWCY-0B5XSxillFcV_J1PT=P?Dy9GvWi;>v<Vq$T&fyTZD6OFS?wA_uBn`q{s
ztl=5?glO6&8lIkQq7gSM7W2{-rv`^J8Aa=vCaMicx`RKME++HY%~dDbinOQuiibwe
z*J4{>Vd(<G(@=Bfczah{9UJ4pcf+JZL3gbHu?Ju;>7@I9kV6kh?57fYsSM*Tm6iq<
zTL`wNd!2oNapfO6^M@U4cH+-=aZmi*iM~DGTRk5KT$3;l8w7IkgE(vAXVeYg)D2*j
z3_Ax$?)>AE!~p=B)pfP%==)wAJM#|`XS2t>VXvJy0Pxrw@vwk&V$2^|NkHbFL)M2q
zdnOD5yANHt+dH4TvUfiF%$5Dw_O_Gw*3PHS{LglFtocs?z?!+|${b*(oowRBJdQhu
z&fI-RZsNpsX;FOQXy3jg@7R&|P3?7-M|=_A(kjC~jKp?*K8_DvnU1b9j2(H2T~QLJ
z4kvCMPTV@2xOF)3=y2lJ;Q;_RbU3+D3-vn*G*G~zo{W(7^f&u$DNe$p2!s*c7yqyi
zpC0tcUy4IE5b9VIwY-Pk9d^GZAaGocpO-`RyaMwl9WQY4u)W2mmZj2U@Cfz~zFKAD
z)l6Qm`uIuYRo6R}61ru_TmT~nCJ}I8{8E0t%ug>;%)%fO1{g@b3VFT~SL^}KG5qK_
zONG;L3@sRPwE;qIItCcrT*6mKjOHoax2lEQqtu<E;T}9FyHN@IJ!m%coEAVLMa5oc
zn5K+Le{j@4ZoDsuN3h<66NZsEEb|@&nQyXEl-0RbkI9z4bxjV^+;`AqH&Ud3ZnOTk
zI=M?X-(Y+N&C7lnKQ7{*W%M>Y9-eMR#X0Jdp608}GX5G`%c`BhQM?G80i@veQFwdT
zY@y#k54nN7i_ror^JDP}wOmv03bDjvGNxu_QI+!P;NW5@j=yKq_3{d~bk`bmiQHMa
zqTFxoD`(kvd0ji$Yg7t&<`*3=u`5N%LW>fI7RBElyiP}zSC|lE{QPWu*I5!?ovU&n
zBe)>gaIMslQH9NFjE&=5yz?OGhEM9?xf=F*B-MIA9qeFc6(;T~6lG$BFwu%K(UxbJ
zeCtYgu{!&8CE58s2o<!TQX=w(<s=kSjsZpO)|GV<T-fdnO3Mz53e$WwhVg`Wy4)B#
z($tScz73u%Cll5$yRU<Fe*I#aWyHke|6m+Cx=l!4r$~X7O;^*?oUpu}k^&1)PZuQ&
z@Sg-&6fk{)bfHB&!(Dgpq#GxlgU%6I3kC()wGSh4Y$0(z$)?lsB8W<_S0Yj}0ua%K
zcibOZxC8cVMch$kN;IpGd8n`wo3Qd=VI?fA>e^|xIK76mv@VOff7E_<B6h&UCS)J}
z10rPt+2yq);H7{88+T1YT#`a5?WFp?!T=3#dHNycxjf|#X(Kdf7d?{k=IgZ&?O;S3
zqP0E)gcK>|;dz{4mu13shbET>5YA}ivO72#`fQaEu+{a)u$L~_K@~PzGN{Fs#><l3
zhYV};sxmbApj_aPU4;jylnWlRtMK5Za=}A(6&~GI)IvVzEaRXn%Z-B&WqDpq9RfY<
zRof|dwNp}Qrz;29lM3X_0om^ewT1OnWCQu8LhKho4F1to2GfufExxSYO-2JiM1z0I
zX*s0Xuq*R?&e~`9hcYy4pUXkY@T`3<k14~m_PJcB4DZ#x!mY|UUhPYC?K@+QvN~4_
z;By`(Y+DTsyUaYkojAa^69>BEGMC>b_W5nH0KZLU@!Q01cN{xM>{B86{H?-Z5T`6O
zuSFpo>9MfUbczOWJ(3|1Q!qf;B^R^!ET`y!==0J2C{AEnOZIy)PKGHw`poJj9eYnA
z!`%mG)oUG6W!0?3rPMnX^h$ByJQfy5f^&)L%lrYwl`6y4l)GBKsHSvEdI4|yt>F6T
zG+(3ZP1qoYSIrwhQm*|iEUp&o>G&47j7HC_OxW3%POdIjj{6jNL<FzQ&*V)C_$i*H
zECu|be-V(po&-ZrSK>d*Spxv-mPZ9rv7@Beq06<dXJNS!2C{)*A%MB9xoA<5FS)`u
zkYqO*;3B5!JimqM+9@1Lte)T!WvJc<E|OXdL6>zIMnA7Zpw(7n=xRE_YMQIx^ER^y
zO=#m4hU<5t3$S_$HEp1AHu4csKe2Rq_EKDtwX`2bS_hC%F4yOHwMOdBVDE=?FnWrQ
zl0Igb`68cY_}l^m#l0l{5ysb~7e8wsmS90IvR4Km_&tsiD4+tPM*9S#tv~_RLc#US
zH4HHJT##ISMmGJ;lCjPyqjt<HpAN<FXc=ryJ)e$$wRiQ))Ag_PYokb}!~S6Jpc6N=
zn1C|SpP*{HreLDC2p;Mxt7M+&R3}OyMXMtSMAp#};g9QOMp48Q`!W(Qr+)}W!A4;c
zpY;#p9*d(a4HXu#8E{8Nkm*P|B2^(H(HdR&_~RRjZTefV*dEnL?crn0(1o_Wt2CiT
z3AI3WtOg>y^LJp0Avwww3`t;S&$w}=p}YN)D&6jU8(3*5czC6av4Dori)B!~S>w$E
z-IT2c$I4)dB9Jz&_<rypK-)nS?SsR;;~yHb1ZEEWIw{3dw>ch7V$=;Q{m>+gMbu7_
z^J_BnAj%b8EYoL^X8nQlHCrKaT1aQv@)A_SVQm2vT~n3Qn7sVX12QAZ330XrtTR;3
zdsMLE`A`6+CX%y<B0F7RBUMP9PEJ=)ndrXyK}{;ZQ04_9WHr<r`&9(&fG2(O3*}!|
z(u?FTRjrDPY`RXO99Z>Lb3BkPI5Gbo9(TS?`_OQYX-$L~$8oyX?F*H9|9XaZn7{mR
zqL|@NXn+VA!0a-=I9tAq-WRYl`1+&)u0Mcj!Wv5GoZv5haT|z|2qof+kCul|Si<c1
zhcwfiE%$(i8Vv@$fgJI1mq)x804vg_s%y8mck;Ac*HqN?Dgr&7pwJ_TlJe7sS3iHb
z_431)pO{se!{4er0k`Y9BT=D@^b`gzq$Aq9VgunI#a9dMeM4DAVE_G%T?4+B7n4mp
z#e8P#3l;V49UMJt+w^rcgL14cUSYNHnSNB)>C5qSI-z@?_)XeMV7kugno3O(q`Ggb
z3u2Rs>OculIe{(pO8h|G+ampx^(uEvfAv_GiITgD(5Q80n%Uo|lNo*N{be?XJ0P%B
zP?e&drTUtxiYzHADg29UmE}jJq$x+n-5fooH)!svI*7+20f(Rv;Gjz&95~y6q$(7|
z9@mdWp@9rMBT#AB@DD~WV6`~?TSlW%GSWsPY&P$3q%Sj}*g?U<m3avC@pH4`gIY_X
zQk%&SXgO^)D~u79UE7vvJC55wmRq#cpcR`+QsI!`_88HH&yiZW(#S$Ss*zRON<NQ+
zF1!J*?k~YK?ZVqaSlF5mK!))uRxSu(u4hCQ(O|9t5~&P?OGgu^NF{!vHBg}l$eg`e
z1r$tz45XQVl2}F27`T#L=Ldna98(NuhoNfT>^YaGu=MEtkhBOe^qwN}*V>H|s}08`
z<n}YlPW7wU1J5U8F%hNbuBw53g+S$X3R$@N<i1&-bPhlJ-A+3CK0W-llmM`Yz4oX_
z8Me49=FqGvP#T);&x|r>X=2~2rl#N+Pz#**a6!ULm(sFjyXfrnKHXPOR3U(W{X`CJ
zf%^<+SL;{ee)N|S&vl}}KvB04uvlav?468$tecd5H68Ygn%=7>Qbmh;1}rPfZDwt>
z`pTn=76pJ35@yM5iU73^h<~!0k+PFW;O|-^<oS!AK3x6$(K=c!vm7VX2na^2nJAkm
z3sss56gJNl{VbcpNVB$wRH8*b?oB~&+psrHmbceLN{yR^sy9MZ2z01M*wVN)WnC+6
zDWa;xxfVuiDz&gQ^Oo*g;7!3sA#C)Te5KzWH=A(wB3)k7WU<jLc9c^8`}Fu(3wf}E
zj4998M0_ejty+j9QY64?uKL@WhZtk?+^Z&G|E7kKBu(cQF{>2?Z9dSHJXtQnZuOfZ
zJQhStppu2fFSC^DIV{kpnJC=NCv2hGu;T6|=|0GGE8xj5E&|<~pS)OR6KgSOflV-E
zrkmxWGGXb+=1f1`VJT{*#5WhVFug?5_DljW(i9ttE#-E@9Z+3fO=jef4%%x$!qNF7
z@0-h|X^{dwu2e69O-E#5D-l8Kh7NW`Jo9%l!YnA2J|n$9dm(B3gmdA~I2_7^k|xTO
zT7{fs*Ron~X25piXQiUHq5!oTKR;b8rWb<!^mc^GQk^=5itUf3?OEh{S6W=Mh(wV^
z4A~+@moa(zk?=t$(rSx)57TRKM_OwdrV?26!RVl@7n>T^nhssA=Nl@Kc7N2UB29vJ
zWfDMtUyXs4QR(P(F`<Pt?)*H_+8W{T7Zn^SQksYWB@!QdXR~S*m~O!3To7(?Cn+RG
zh^>>deFvs948h@(8-eRfrvexUQ9J~c!UKrBj<QctQZ|;snEg?}c7s=JKupGJn9_JL
ztn5EN_MxwITMWh~xem%X1kM&32TivcaD-BsRdJ#-uqqH-G~Ku|@N@t|Gp!#3(F7q>
zb6#e2uW@4`fi(`G08m7Lr(dJxDp>?woz@IKoXzdQqnE*;lx09`!Tv2{At9Ak2vr6$
zgCUK%(O_Ue5Mp(hFtDoGH6iYY`#+Wf+<ySc^Tm{0;oXB{$QlgVN8L0z81&%2QMA3a
zRrJS_=V<l(6&{dE(fw>$&H{9_%JS2RTG^gc9Cw-%Ce!OVloCVOldXpoQ7vGYE1XJv
z6L!43ygXfOylGTF`VB^%ZYvV!<O;3`Et$}Jo()xC@X9N!c6u~SF+M!SXch-uOBJa=
zc6-m#<i}yJ1S+Ia2ExYB>8U(Q@BG*~>_M_h#YRdygKm0!ln&$Lr}l8_1Ybz5&M{_f
zb?xDGc4{FJB_`9Y#B2+{0TOB`gnRaC2t9^!0YhWGt`@mrl;{kv^0Hwwpqq%?OwihL
z!(@Cpi6-;O;-ee|w7Qt)(^j$dNA3RP!>d+VP2pQPBNT}ED1p@Zr%1j7mMy!erb40r
z_{V7;s?q;S_a9R_sFBb((^F|5-SMQutqkt>>qd7}2_E4w_r~M=Kdb-zKXSj7PV`rK
z$J*RD4g7rk3d0PYqy%W%a&+h%OqvJHkfy2amg6;^T$0`y`bZtOl$kqzJG)xRYfF~w
zwB>y~E|yj+?L)8CYK7YP_x7QG`oN9S{2!>CC-jZA?a4LmA23{-s)hrkgN;2vOfX-<
zX(M73sgaLc?(6}28i)x~14ia%gI2$`$$>Wrv~_@L&xXMe7>SNBXf@c{8W=#}QFA!_
z`Y`$oP5BCP(B0ayK~np?wso%&&mCvf#kpuRo@;vuoPi{gH}UH%t`nzTT>r|>{V17|
zd64ZrR2TdZOzr_gdhP7<2&6bU>P(J$n6$waWX}3fgLVkaXP~Hj@!Y#zf{_CpoY7LX
z66c|dxl%n*`->0fO3%c1)6|#;)3R-)g<06#uT2Y(4z%n(l9Wxr^n$i*jYh5^%HI`~
zzFeo83IfJZQ1^X8^;Y6y@z{HSrimD}KAKIJ_4RSJTrA2|Qlv-ISMdd0_+j0{r43Z{
z$@LO0%~eS(nDqNSo4^Vc7$T!~k=|Pch}3#OZ6-M=4id9T5OUp;q&5^91_L#d61BI!
zZgS&fApFsA{0-+HM1#}OXy1VgnUKM6ZMhMyjjy78TZC@S<u<-_i^iKurOMp})D;SG
zupX6JYHVtRzLJG83_H@xpqLM1f6xydU7uEINQ0&>s2=#UOe4&N#?Ds3o%>?3ST=yI
zSlB(J5ot`DPMk>{bP{A5qRxoI)O;H=4+w+=g)kUG4OI&txO-_?2ip)>79z|{`2e$I
zs+V48>k(jV?*0}pD1bh~EnJ7|?EVZ}J5;aR+A$6IUNpC7x@A#0vRW!Jl?#tx;yGAT
z<MWtI;S9<1Kf&<`tV3~5&RWVqdQU4xeCLx>BC+ZMBp|q5;wX(i@0c$Vu;e}gfQ2}p
z4WGG8xPF<>gip@IFgp26_{?WsPV<@Y$(hioS+PlZv?yxM<qNZX+cBRDpSeu*Cd}vJ
zcP{(QyihNEQl{1L{4AU1xD=}w3q`=4#}}~C|3u>%rE09BMDb2{2`nZfu=UtFS*`G~
z1XQ5t6{_96)h!v6w{U$+7UgB$Yr4NCud{aGI%^P!2MYV+&>!{#>i4pkoQL0e=z^Zl
zuwT7GeEv78qNaW|ii(@Bm#mOT`9$yB<!Z!W+D){}<Ys2wiTEd7HAM)xjVu}4+aKfj
z%9xeg5x1N3EZ58tD~=ow^iz=yG{p`B&&Aw|v_N@TO8_d<ssiaK^}(<|%5E9FGonTm
zw^^az84QNqs5|OFQBWLjj{%5;=xo%1Em&}X^x@+yFw}dy2GGzCPdlS%a(p~8fi-Bd
zJg8mWK+o+3A>onb_lo0X!q5F<6apLztsN(wp}xvfpH~RZedwMNycaSjyU)fCWk!>4
z!U!HPih|+y(o}-#V>X0B*P-A^dmXO@5$4GOqHIw6nwT!z14Sz;NE3@|7(c8{@5*1^
z@PqPJ4&jx4e9xQ0!ZM%am8zC^g^$^<a@`TDrbO8>&yIR0QLlS|xACKs)Bb5c@IGb4
z2%oXKal6-SlrR?x5YAG25M^b-B<zZS48lfJeFa<!35r=Dg@_|rt51VItYrM*F~|yr
z%e<YbR(+Xgf$X)7jCwF2srV*Q`?=8TKyDPhuXwyd9{S;4*zJ#H<`;VAE@oQXEC{eA
z<m0fs`Jq4Q2KnES?;q8@A%MdV-o67>qAxrnzgi?1Y9+b8UH<TsSgOH4h@Tz^Ym0wB
zPzfJ+@5Ew>K*)n71L7nGQw7C^1_tCr2Hk4Oe?NdUco=6bLd2!t<aU{sk<uS_xs@t`
zE2fkq9puOa>FOX|9i*d!bWD&V9pp#{3H`=av*m5?wmkUv1C>|=8=iGEFiO<GzMc&G
zT%k;OBCem$QbTuCB<<r#Rx2kJn>OhB;Tc1zlp;_#(1eExPt^Y89+t#q1o*v!&PXu#
z`7Sy<?P3L4hN_)m4{flAIxILw8L%othqYieaE9e@p$7M|*9{G#I7jQ-zOSI$&jmC9
zbA~3P$zepV<zWFa074=0_b&Z~MH$TCAg1zY!)Zl8o_DcXdrIp$U57TtFiLtd?J-n`
zst8WGi*u6JxPd|Oz@)ds64+B<9Rujl1kx3668N}iBS6VGPS(1m!M1@Zv$N4y2g5WI
z@TePvMMzF$P@V%Bj1shJ!6KtGj?xI1TdZXQ^)h*;rSc53p<rj5dktl<C>Zw8279Q%
zVvjdqRfZ00!D`?P%i%%|PIP!3lyl799oFmJ1f~@)>$IHyRCCd<(_a?1%Q6Xmd&*iR
zzqw9I80ZiK10>KOo@x$)<$kfzkHr^HUte9X0K9<qy<9gNjJji{Mx|Kj!L)i!*i>yO
z(o??JM6|Z$8)xz=gVW8~+sr6SO^sAm8_v-M!^tLpId=?Zd=?bjAeb8wM7qAhjjok}
zBBmd0<A5C=ZJ+43C`;cQ!-t~neS&$y5=}rDa~5-8^CR<e1{3*>4K<A=+*Z3YPi6Bt
zIw9_}vd#L>tu^WKpdfAiex_~Y&lHuAK_E%(@1Du(evmBiel82@!uJ9d9z1hLXm2w^
z4TQyWI(X;Eo;1cG$P4eoRZ9TNGyQw$B%B@LO&2)X-`Z7IQMt0K>E*(#o>I*AH?!zQ
zAjti^bbn^sohH(pET<P2d3mZ}m1t(*Lm_sm$_b=O!i3QchH%o=nhapvl$^jD88)a`
zmJn*&ENF9CH{zmAU4)%etSHgerMJzqZQHhO+qP}nwr$(C&$eyb|M~l&lY7%iCo40R
z%6zDYQ7c(BM~$y*J$taaykw)eeequU5X$_PFN~x7VH#KK)Wl_uet`g0Q9QEL&<h#)
z8<?7wc`Hk&<g8f`FJJJgfiUtAzO8fF!wOD1Y-uw}8C}aVz3X9bk{%Xz!7;L7aS?(P
z!5Z>=k`^r|q}_VB@8fV5&|}Y`aW}7@Q#!M3suCqoKYMOof!_`!lMsVULu6vrANDPJ
z1Q;16CeD0_VOO6~P`yef++Qnk`oy<I=YGz_BSbqzQ?s6z$$n!_Z!a53ZnD@0g=JO}
z_!4+zNtui;KG!4Mp!2#Ms764)CXl2W>RYLapKBMq+>!(h3x|pfXJc6}2qr3(9*~Bz
z>a3i4!FHv6Hv1g)>BokCX+^$D#y^u(f;4sW*p)L$)v8*;d#T)NEonc$NdMtmHY3%9
z;wln$5FV+<9mnX>35GaUowX;YbaT{`a~-aW#uAZ#1Nn|MN7SygitS%;Sk#BwCBVGP
zfitG-TYPMXw(fpq=b-1Si&INuy^ZnG0C^`F@13%YCS*D{&OOo4srXl7JSQ-rs<x2P
zd%4ItH7as0b9F1_gqMN{PtZaAoCbnxbDy3}HnsI*A1Ji}GH&$ndGrM?Yca6Ae|toz
zf56vLHAH97y?zNxzlZXH4@m-0{>y0;OC?t(2o9cMn>W?vbXv6ng3s4<V)emsWqZ3^
z9zV_}-Q0)4G0xi)F6ph9Tg{FFI1Rn&F5#El@{lr1iZTIs!z_4W(QJ4+HZa5DJwGM#
z%S8ZTM;`%r`qa7O_W3&FL^g<+FYwsr#l|wdid#>pzPDwpc3>T9&l$xBMWO<V<FU0f
zt2w!J;UTj!;~Uh|n0p$3p=z95gnUejI6PxDJ)#}-^cJ~ToOGTx7{RzcEF$r%N;g_A
z{XG<Lg^u0=lt;?&s++w7SIjNyWC7r)l&hh7_YPg2B>8hF`@@D;LC(9U9W-Q4ExI0b
z1kNvjE5fIW*@S-&g;m92xe&;ne4i8+zO=HiQlxN*6x4joE(+L}9$!?w#7qlz!fBi6
zjSYP67GJ&OzjKCtO#9fP#Fm%mpVuG9KOLf<tciOX81V<)H;XrcF^+7_EA0vfc)cTP
zm;+We?5K8};q&l?96K?Lix$tA0;_VKJ|E`ebU?(^b_J5!y;ao)w<TL*Y$YYq50AIz
zg+(rv&y-;>uv;g;Yk>_b;w_`$%ADwVbB>6(*hKfHgn0~w3+aoBvQuFDkI=`k@O{{c
z=+WW`7eDDBX!nbl>kJgRhfr=|;?s$mAL7MQM7q5p*vc`;>md4LlKSCYkOd`>1?$1h
zm5>!BkhJgJnLyp!WSOSy$Msx8?hS2GDxK+30L6*l^ON`Y=*U_EGqy>_4k?@l<s}y0
z6a@3WD;zi>037GpU>xHf-GegBt*)L~g8Ip;(mE9b1!q<ht8mDI?dTw366b^oa~YMW
zm85$&Bp<Cv2TGbazga%C5ot+VA`ZWb)Ss~(WP)8=(C5uHRQHb<nkMa}>Y^kF`=yJ|
zjD4bPSyJ?TDQaLa9?m+!L*<p<+-fPseWRp8M(9?x5!0>1#Ud?%f5v4P>_Hn(5>?GI
zHI3@b$lWxiI99h^LxU(Mx#0ZT(}vG8r3|Nl)_UHahU!;G?)D*snea$fG!B4}<;X`5
z;65<bypbb8kz^Nkv}D(z@B5G}Xlj%U*@k~*l|v#uRW`L~spD|1Tg8DYTLkGCA%PQk
zvZYpJ?2+jxu3n(1c~?(Q(N>o_m^Had;)a`6UtlDSSe~Xt5jvzRI#dtbNudegF~~7x
z_1EXkAQ2H91i~8cGrwEQo0E{LP<F62)1Erj2pA0;{0UR&MCfzU<&-S}SbU<VCWx4Z
zzD=?xjW9mt2SrUAIrZl%nW<s$L!c6M1W>f+JkKmvgrFI`wT5i&C;L|tR=lVIA0NP&
zB_`t%R$YEtbJ8EiyRv4-NkO*bb5ES&hC2rWR`gC>CT(<A<7o#EHM|gR-Nmkq^COvi
z2T=+!!T)UrGQaLw5I+@dVT<-zJ(<jFdyJ*3Si>&XZwH*mc-0cs$|(O#p#2#(O#3g9
z_M1;}fmBD=^L*mXLy5}ymNFQ+%V-86dzSXN6hNBysaHYt1P!EwX5iAOkL(ePY6$tX
zT~umj$_7NZbs>}UsXCgf>g<&b9wO7i#kLu$t^YtGCt&iFdDRAM0X81xW>tM3v;=MW
zs7jId0Ll_h<8o=uj?s1YPCh-=C`DXL;RM~VTOwR4BaRNLVslPa!W=P%WCUEumVph?
zL2j9k`0=8j?8^Jv*Iuy%Nw!A|2n84%R))Jij92~e0o^C!`;pGGfgy@%Q{gY&@DTmD
zV7oD0&PTu$$M@eDU|Mz3$=R48-wwx+0RXR7%14(d4wgqkBA4JHV_gI7s1@!8<Qb%E
zVJq;WF^Op*JW%|O8z`$@|H7ulFTL>8rFnDF^ajjP=VHp*iNoJ=wy$pAa)@|h1E;{I
z%c>(KFaFBfH_IIDbMVK<ET3GDMp`wmD_)8$Wa|EM>UaqKeBa1(7~-Bx;-~e!4pzSW
zK3VabyIRcWH|%QK2J!A5Z|1{A+u#&QsD}1N<`x|y(i|lB|IoHYBo-_)^JK<Jaaik=
zR8f3OP}>{>Y4a-`W*IklI>~5prwwE{W6h+YZZp;`J*o~9{^_n~C&WP_qA_d>HQ0-?
zlrKE@iH@=%I}r-NaA3`0ef7)r10Y7$2Kfss94VTgt8ipCffZA2sZB1_O+!8;DdzBH
z$xUtIz~DZ57UjMU-$r`_<aoDi$5sIb4yvt`Q)@K-eDLA<4i#9SHkVd@j3USKGMJ_Z
zzUvwT(kv1Sr+?gRhv6DGm^RQB?uH)hlc&2;hh0}6J-_cV7FT#F<_zhik3B%Ka7|cg
z<S5!ZVWEukz2LxFl5mmK{jhR2m5hqj(elb1VD5}unKvY4Y?&n{xgjAcP&Y6x@&F?{
z9G!6p{THixX=*drC5X{K57sfD(Ak=6)5>6T-PO>FPY)U^qp!rHcG`B9<=%3%Uq(4s
z_<BO%o*B<Wy3rW5C^Lz4Z!@=cBCqsuX`dYgcOR$=sIx|hN-waw^D3p@Y%3TO4h-rk
zAr|ZkdcdX2n{cO!NMMl`7ept~L(M@kfF$b$PMPX!V!Pw&^zq1(;#l{7r|OMj>nu$k
z@rC=lz<bGlCBJZ|R5kuEiWwOq_MVlUL_q;QpN7*DAy30!e!P{61g70MRw~4jg)gxc
zp<shjAnw#TO>z!WjGPSWWfaY<8rt_kc2ZOjvb(_6;?V~!@^F~$?KEytk=NO+?+1%)
z!X=QUy=TXcu3jsAS2-9cYU$sQbCaAy1X<SfFD$`_N!#aPjfDneolde6Itm!#$>q*z
zbPSs4SMj$X!?K_}fE0C!p_oZ`ADg3!CzdErNgcb^nWcnpJ7-1pHLdL8<~b;9XRW^f
zHd>V$&BWU>RT|N6f}iW~%%Eee4Rl(6E``+oT%6@_`yVNGa~YgW1BuVmc_^yGmWmKY
z4^JLaR7YG{;=(G%V1*cdC7qT*97UgxwjY=aL<=34swMw0sw`jgK!+Xf$RgIcz#fsJ
zZS(otU<6QxyJkMV-dAg*A!-KnSbLAy(d5D~ac^)J8aicfq3*uvMwvyCX7OEN1n;fQ
z;NKP~#nVQ$_OLRq>fq8zI7(VApuxDlVXv~g-w~Z==>x8N-rNce^dN=QVE%-@hs}kf
z^0b}ow(ezb4<}kBT36R4bZ1o^lN}_rtKp9A_YU;DeYRHQ`-oF)1_k|bL*Xjx4=wpu
z$e3Y?%p9&MM2<5VSul2Tx!CV`J4dMCK;;jTZhI-jy2K3rE1RP`P+zqB1RL@i^9@Eo
zqnoP_K^7T9%wT^A!j-Tw9c6aRN61aBkl8@GURiu!hJ7Z4#cxi*at9$`0^9jwb246C
zQS!pU8maUFbUl0T3%AcFTR@qF=(%J?mR?=He+frQg%zv31pq7?YQRLT^{BiAo@9)w
zfZm=4f)E<8AkMn(c_C#<h%|`ezA)4L?7@iQe%h&V1i;IYZFl{J?Q(F3(P^;wjdZQt
z3uyO<W|4V!fE8dp&1-`p@-HbSRc3h<S$*|9Ae0Uf*T5(H3lKcG=BuJwv?~G~v^oyS
zC*J0IxNuo&79H0vvS9k`4sErDflv@(S-w;Ea54X0FfbObJ^W;Q@d|ut*eQ+q$DD2Y
zKqW&x;mnu0k53rjnkleyps1Zg^ITBuV!Ptn6;XzE^NdRAEaz<|VvzG%_7E|WRyu$2
zaF{c0Y3P-JYncmR_`#3*Y33UL6svYib>jL<`c;&Z%!CY4?&--^^IWlpM}ogV%@HRq
z&mD>cM&wboYCs;A3qrFF?-X=Z0gS31CPY+F<!OUR{sEo&+LFL*(&44T5G)R)DJ~v+
z&n^bkie7Vs)tqmZ-$gbfA()vxrxFMNVhZe#69I*p?dS6at7Jycn<_1d6fA$5%Ufhh
zmHZDxtD#5WeMyT6#ISujUbodRQVY<kF-XyYyO$p=Qm0iVw))Nf5|_wngZSmbkXo}4
zux~wS0njMsf1%#*DQt;7Gq~FwHdsjQkA{Dw<7W3xzP;xLCHTQLoT@Ex@#xwgCt+dd
zaptqJU5v!<Sf+k<n)45?9~-Gi?WdQ@j4^x79-@nif-|NZ<>^u1j2JHY<lzHPAp=P1
z5xU+Fd1hH|aZF)%LqgJ6hd}mzV_%>0LlY3VsFm;sf2XPPkfo!t08`rk!Jxa%UB&;i
zDhVN?SO39CAu280^3lI965#n%8vn{F)N*V2WB-6n{>CFz0v)9nY@ktaiGi?B8mtcm
zD`p4niw8!fSV6mx&;&jt3F%OZ@W+N9U2?V$))Fi?sZ~TBwFy6_*_I$_EPU|#=9ph9
z*Pu`KP<cxpDb5f$`?4m}$@|F)mn=#HO2-zZuQxN081<k51?rcSus>pxDe80GkU?*H
zH|@5jn+ay~NqZy-!#kGKzNVmx3!{i@(^6nI`v8&c(}03^Yq&C7bmFTmHEmCCavX^2
z)Vc4*2ge*n1n`)OYs8tIGz$Aw0lj`@2F?G4W?8}WNKN(B$pwz&Pqu?1PB2aj`x<%a
zV%_+yPgqSg{}EO=dx|dsVMX0vN_m(76zB?rz=vJ2^G<Gc(WD@|@UNQBR{Z8<Ky#~n
zwJ6tQP3Rr~&Q^yA-o8;zWN9tx+ZCgXB(qC~nTZy++~|ha-dXN9k;$L?u^I6DxN_EY
zv+SvJwyNQWB<^BtE0~AS;Mx!NIx$SlGxL^%ot^8%3z>=CY7%h3kHo3mDsq{fGqexM
zF#LXLaVzJGLG(gWwAp;UwT5ei{RVhK_Lsivw;mT!EO2~oH-<A9`M;ZaHp_)T+J|95
zb%4zXeSp_XPiD|7Aa^ZBcKAjfYyRXMLl{rc9%eX<WY1wz%jk-TE^q9H98c$f^0iV^
zw=h9B^sA~gUW{<?-+T07LbQb9_kS-2s1+;-8fBSbEA~9LdI?GUz_<3S*=nZMtyJgu
za``>XoW5I5M4F(#Qy%G8UfUzww`H(m*xaWIX8T{r;?(i?`mFmk#i%{?#`NHkruBG)
zTUW6NueIz<3CPeD9XD4p$g=h6oqgoQD*VpBEl+g8t4+}^z-4=G2x!k@HPwMAYyG*b
z2?oL2xHPJ^7a^HWV#625u*-K@AWk4BlRF!deU)IG(PctI*txMpSz}gIT5zChJn6Oc
zO|@l;EXS+?6lblMs9M?+rD|o|^XyDIMyxLNsMlm2<WHRCR}|*g&1e;|;*&_zL(*X6
zOZ0GFQKxh~8B=4dTK#EEY$v-0e^iDfvJnO!BVk7#+9?<lCisSt(Yl%wtm?UV)wYax
zpB*urN3|s3nK-2D)WKjz|KKC8&9|HAsEJ<%Wf2u$6Bp~m#kb0eqhyXP-1qxtef4~=
zfMX_4rakNF-4!~eUiN5BTg|R^*5vANteM7?Yo~F{r*FR|e6RW$|Js;mo+q7XWQcTJ
zfjzDmA9Z?Ht4cDWRQCM<{3p(GioJ}vmH+_2t{UL~iL*5C!Lf28V!2}aM!`ro*l^@e
z3jIS)pF~QLRv;spnEHBEoF#)b9<?n)QTH25ylGX3XH|y1HNAaZOQMnbZ{cycn=$Ck
zC3?_Aj_$q`uI>ug6`k$jyfvzm9mhdv8i6(qk$+UOVLsIso|T_`JRS~c`F*SC1CRtT
zoyU-g@!R6ixiuBPtUEUB@c6w+CDtY#gU9Kilf2(<5*Gy;XOJ|$U0-oT9L!j9%@kcb
zX3nvlUj&V{T?5ETWYVx&HD#AK%{e85uz0ZN3U4d?*0L?;o1G&WAP6?*(wsnbMY+(r
zql933W^(K6*y*NM2dDR8JloA>wzQ>9ZEt!Gv{mn|lb<yPYINv3`#88OOR2qviGd4#
zn9AJk-c(yuLH&5KN_AEnhmx3@c>GZa2?1&1-X8IQ9eI7*?D*i+qkbfgQC({P;)q4P
zd_Vch7yjwoj@a0~A@96^wfr~9t+VrL>oMoHko(8zM2hj$6AjwD+^{H;K4?U8@k+bi
ztde-@{5`%r%X%qh1BzZyYe_m?iGY5we3QXw_9$!^<+$|W!f4?_0>JW9;o|n_{Wpbd
z1d7#rr6X$@imNUe_Z2uW0uPDmtuBS5C&`~^!w=8eUsn0+v3=>S!qt+t!b|CQt2e|q
zWKPekZB}Qg@dt1H@7vuSLh~_%er&Ae4xhigSDh$25F!NdiGvOOjY}a`Jmr@k2#xqk
z7s@kV`5+-!T0AYQaa9W+{0@*0@N8(1?)Brw1l7;avAMe!k#6lrP#xS4ui9C)VXXlu
z3hltu56a8QQCHRuWSLp&-x@uvmAW!@hjr=Dp(+MSf~UncmKnthGiBYXwB@(v0p2{;
z$@ztHzPCRlBMp)zur<xmUYfMa%mo!GtW!2;uC)0N5@qEIzP1OarS}~nd0HlvHFZ%?
z>7#*)ZIjmO_?*Y0n)sf_mN&E#8_!IMl^XUN038>mK%~4k!ae&j`JRe=1$?UQ3+(__
z9RuU=BHOfq4SX(C>ZhGrCM;p9aKDWV&c;P@o)|8hNvsTx7bk7dw$S>RWGFTldak45
zWeR{!Zp%cw>VS-Z>)N~Y6V2K3X=>MWV*!Z?HUFTL>iFY?>pOkUu=SFSSS|&D<xB*S
zId>;n{PSt^tk%5ElTblJXdUGCuGQZ%Wa{5Vs#9b|Subw%aQ~1$JAVyA+x9X)*m4^D
z-`3>6v-V-5fxqp>R*PR>4j}1l&!QDO@^=^|NgV9hDD(4tOo{xC@_8Z4c-Qkhf5E&i
z5V`_fQW-`QH9;&O!m_sla{>0dm{z(7@?`)ws=<0JWeYcNWbh4wbDg2iQt>>uG9d0e
z0qz&xWck&Q$T*ysZer|=!5ObY)da)0v7AMBx25;|K(n&aTUO&GEO=?S#-H2kbD6+}
zyND{h4Z}LREa9OTZ4nq6r}yC-brlQ%qwF7Zfm1sG5aJI=XSEBl_oRh^SSJGjjO-VG
z7%^Y_ZqF44A%>q+Zn;!l*SI+r3t#M@!*}!<FJ4DmhB{%(*(|6KVp47GzAt|OzCLuA
z$+u!*0e*Izqg>bUM|&m+q%T^V)LKTAnrF}YU<TI-`%Ol~2945_J@%-^(D(%uK_e%w
z6GHnhl{Gz_7DCqyK#*eS1XT5QTkHD2vDc&e&Ii6s7B2&fn<oy{^hD9x(nO9t)+p2Z
ze!xC_o@hXQK@r5ewsu#y>lku{09ZRPH@&N)bTxQ-L32sRkyhj5E?{e{?705k?`phL
zP!^o=b>g}j^oGTwE2p)b9ca~S90*o+Qe!^XNB#g*AHWHJE=)uK7c+I^_}`%S9^Nbt
zo=3nr)&yl+8rzcD$T`*ZAk=LhuJ%hS>~OyyN$1gk&q-`9CoQ^;S2yD@()4r%E1Vug
zKKW;~pPPJa2f|xbOoT!fIT?iVqKyMd#(FG>PTjRjTcOmuR(R73KhhUWCZIP(?&SM8
zEC;~|;weE6VJT+<UEKWP{5-H+dykALDM_xsUBkQ!FM}O2OI*`?j27>?Z&2Vplj+5X
z?WNS`tz4y2q|~f6p6>;&y7peqitj7{4+G0}-Q4zUTBSD_yS75NQeOKQo4GmH&>#k?
zX6o+6g@ZC@@^!l3n`Z33G%l$Iu(V*v2v%4Hb6Otm7v|6D6mY*%aTB_8ui6jIN3ZI{
zgu(^ol3~NfTLd-!9tQJ_@Sth&NmJ$6)=4|vPD9OCgjfPbGpeh5wQ;`3A?A*P+K&^t
z`~a>`rgis9ElJ#xioY>;9}*>1`~m`K!Ts7kHo+rZQ@g%bXCN0pwJ>>BJZ9#J8fELz
zW$VQ8W<mF5D$i*LxFB0NzFtOo#j_<xsXhkzd=Ly|p}bJu*W44KuM6S0-L?o~)Qz|M
zC7EoMVB}I;+)p-QjnLKGipDTWIIlJ^pL-Y{*drsH(~1CGK!E-<cBP$ZG;cuh_{Uen
zigCC=*%mn42>z@6m~#IP!<*MbY4&k)d9KvFF&#;q=L}4343Panr4g*{@{tcwSYLoR
zU0QX`NbwNLT4m7GG}l0<%Ykp(1yp<kl;I2_J_JO#73k05b9{0fjh|_&3o<VDzeNL-
z4(P;)f{~!mw(pySLdzNWWchM(tCHF<2s33lg)z4W@0nfMj=9h_0YM$inGB&1Rykb>
zH1xR3g;E9-=m%rTehbg1X3)EM@Cl4Ho(U^s|H#FzSjbn~k{LdhYiPjacWP514!A|q
z#tFcu5U8b&-+AG!3R+iPEkvqdL<H+Fd?&xkQK>FVpSXBd2QqryAGO<U+1YekGkm>?
zP|P@0S62b7!h)`c=x?g-+RflJq~I!+Nv#*8pKP{2wKeqvUhKy_4CkStIP+9prt+3O
z@+*exl7jbAAGK=Y^x|x}lbFipl*wW()LHHL3MW>03bW4f_AMAaI3cYLVfzDs^r+8@
z6`3r=3W(Wz`&b~szf2TR)<GROOlj)O<MLUBGP0*->aN1Z(3Y#vNzIi;0uuMj7bv@m
zdIP#_=WyF4-Ho!rRGF3vZPQSBY+Cw+2m2K0>x-CiOgTym9aqmGsnHZPdtnec-rB$-
z0W>oV5Y5*hL+`Od0zPZAaiT&~lB>%of~c%5c|`|GO&XSRAn{5?l=#0DlH|F^SMWFb
z&pzxJf<edKV|rozrEqVK51KhFAuHRxIo+|eo8MKsZP7X!ZOqVd+*!uBy0VqbiqbzJ
z9eef${r!Rt3{nJ`rXAxcxtJk2QlZma(~7$IY}PjIn6O+y5!mw=Jl0~M#oC<=2E+B$
z@Jp!khViVTHqiDArdUDkNN=)4-lwHh;Ex<X4udkIFqJp)@)pP(`?`aqq|<Zydy*4E
zCJx0=%C(*N;JWhT^(gbOc+J}H7M=nsgEf@qE)-?D>#Qyj<qEJAXx`%us*^8{+SSA=
zLOVXtqL>g<is;(#Up}Ij{D+1yln}k&nL%vadyf05!}|d@GQGV=`85FZgOq1;W4{0X
z7s$MrrQ8w%{WLB02z6?}v7V1yVwYID%I+CB^qJ0EG9komv+!O?o{N`Epo@v)O{9zG
zQ0|w-ESuaEivjcV@>lzDr^8dbQwKO2IV4Sq8(DQ$<DQ8slevFta-U?YaNn3L)3#5c
znweZSJ+^FeY|-$j(CFBxPC5F}<^6%0`6%#xQM%7e)4g4KprT4?XL|N=?UTxTbZ_`6
zDDm02a=hLgqLxW?ukp(*$$D4)&CU{b$|29^wKh6F*7eA+Sw<@%-UZqRe`Zn8P22p`
zgHY<NRBk!YsK6FxqUy}JC)K+-mU_V=Z+U1PKO8$?k!hV^P79YnMPmN+KQBDDcBS22
zTwe01fXSa4Ifk#ISSH;?%b;%=+bw+J(6lRDw}ko>X_|qfW=MKz%#m8RJ8f7*s<M%$
z6C8OwXPnF{x^_+@IlU0G6@371%I}+#`cz!6t<JhpL(mgR>v;iLgffyk`Pz1ag=uK3
zVZzX%&}bb|O1!0~Yn5SpC*7g;I0y?uTcCgwCww(ZJ-|1n8xR;%5C&!^AunD{!j7mZ
zZni_tC=52o6bii0?wM}P?)y7eZrml|^b%=u&{A1*0N9{;*Sc05_2uSy=n{Z7>s#Ri
z6+xq#_XQB+DfL$~^iLi!_Og;Uhg+KmETr6y;YvKHHK^Z7%!f<bmAqB=qGE1CEn!ZV
zmXM~ZsoK)EqB3dt0TSzv1!xRFM}6D7<0BS{>^)E94U+ze1&5AURK}a)(-{0r-Z0yW
z!LgmB+JszqrB5Jd(&^dHEB8cl+-u*_!XdK|(|1>EY3LA9ofR)A<q|qI(`SM1FQgW2
zCiP=qFz$@+W^t3>>P4Sbdz(+@>4bD6zy-NQs;0Ir^?nmr)^*BJxXemy*^J@ntCm=n
z4cK<;IK0$mz13J;AC5%Nf!3zf39u~v@hC9xDCg;1JBM3nO*Gpj482y%;j?oJx@BfT
zrNFfW=L{NvBoN5g&>?d`BT-Hs5q^X>aK9^V$vjun4Cc6UP+-=~O4bZ(CQak{H)F%#
zje86H{OI{KfJev_Ss*F+7pyc_gOFx_lX<I_4XizmdGD4<94h&rXn;W8mgaVZy01xM
z?N6mA;gqX50ce;%QBSLizYqd83xL&l_1Pe(BU8qixHy4|n4H9!>Y_#mRf9wXE4igp
zI)(B4f5VJnsc95&8gK;8-V6=-@Lf^HD)iktr`QC8v{80~YF1G-K#0DZ1cB9~6Gd`O
zCm6$3dM-vVv+0smTT!TU_08tkdGUDyO5>$-6J34?GZTe!v<Yxl(gtt}+Z<W8KaMi_
zm`d|cBSR%|T}Hi?2Z-!Wob=oNDq>kpbXKlXsE-x2SDP|~8vBAKRR=D4yNfUK`y9=y
zNRvddD=ph+4M)8biaHled_E=j!y`t3`gCfiTCv_qy4VoeYdnq&(ZW`dAO(`$s)sms
zhbz@8tsgSWn2NEssYN?2I!y}J?FLu0p0G9W>c^|@4&1Nc6}~Y<_%*cO|Dl#yE0~3e
zrv#(zD97Vbjtl*>Xi_092~ya3a~|#HIdaAvmqVZU#~8<WSG5r;=;fRFhr|$x4LI4Q
zFc840(@ej=PPgjZiCM0U3KMKQi07*eNG&o|Q_j~OcE_S4qHM~&lg3y(B4uph@L-?e
z!)7wJv!qY?NhWHI34-7H-SO)hc|(jKf4z4xnyeHD8HQq~gyN0%;}6f>M_|Jeh$$Wh
zNSChnXlcZuZINz~p_0CMV^%O-=6e;b(Re2gWkp?L(<5cQ!8s}$NI!NLTYz6*?{%D~
z_43voZs})2Na?4+Qq;K+sb(a@*aNRdl-m*B5%>=AOa#{_hLlx(8gCJD&&6DSpfnVj
zhyscRW+JE!Y{8=4dANU*G}{)%8xMs4t*I8X+f826cBN%?a;+Vj7(3RWUuC-0y%>^1
zDs|5i9NZLPO=Yk8u@Wo8Oh{{I5q|0)`A{XcoNZSy+EE!vVbdq?zVQ}+s2PXJ%sqp1
z^?pFt0Nw0mXO}XRcFsL{FJGQHZ+6SQR6bIQ*%J=g@F|-3ya)2xB@_PWrOFxs(7Y4$
ziHgtXUJTud9DdO4T~?_^L_K6Udt{9(|LOqgi<Vkk0TMvf6iBKj;l_n!p8_oqOrFaN
z$&{?pkVyitbM26*B-!OlKofJbK*NL43z`IQIANtj`rT0kb46-QD?$uiWx~LQ0p&1C
z;wPR9<MIL(C!l|(-O{C9A%%TDSSD$H%Fx|shO@nN#75PnFmMQp*1jW+<WV{7rSoBu
z5NV6JW@EO(;S=UcXQ|2xy;sVo=5V5`Rsg>O0ywxkydY|7_>^T`PI+Vl=e{hl^@F%!
zs(4k4Zq|zAj{!dGI@c+0#Qi7sV$)%^@;%!_y~N#x0ug#Gm${f&7UFav+C89v&l2#K
zk})ZIp}m=C*3O}2s{)7gVBjEG7X;-1^3gkaZDjIko+jfH_^zkk;eM;U^UWn}LGHeH
z39Mux(T0vJQikPm|5C$<X0RIF`NYVslG*3jL&`$-%xkH*(^h$lN^zsVZ-R>^jU~&`
z?jjb!l0(PvW-=9JCW^?o`8N7-?o30R@HYUVDv|V_(EKWCh8m@coTBWlGi|l&c95oU
zO!8+ld2SjvG0TX&pFC4&Rl_n^nT3HLPA~wGmpu@f$McH8b@s{}p=J%<8fGS%i3pl;
zW|~sQ)-%#7p~j+pjXf1B5N3qZ&T^`v0U48nmMXPiq0*QSJ)(I)HmJKsP+(ss{QU40
zrkpE^m2SHYP5i853GdyO0ae-xDJf}UmG8DVXew(bV8<u(?F5D2P<)NEsPOF-`W+g4
z<tV+$04+)E`TQohj}nnw@OGhWninP7XRuxRVqsLggN&Ik349Q+){}vEuE>Fpv6XOs
z+2-|T0BCb*Qg6*{kXs^$D!WTy91cMqp*eh|kZ=G6l@SbH*>pWEt;N5Le}ugd_C|>@
zN@+zvq0eyU{R=y%vt)z5H8;_oOnQA_#{W{UFsdNgxtV;ziXWbSj&AL}qqjYkD}KCo
zCV^kVCB*-hGwYgizxT1>p8t3NcPo;cBg5Ir=^SY%)jf`So^*?;zHRcHX25<C0Hez2
z8LY^pwX0CJ|IKimNfaFDh#*gbcQqch9~UO~Lt+I5QJ3PW+S+=0dIP%p_+iV|vOTEB
z2X2k*H};*-Q&sv$8KXG+wi_qTibZ`rt*B!N8xEms2)RD%9)`XB{!J!nl0^7@;;V6#
zqp&_D(B{`nToHLC&TU#7!BVs|Zx3t`F&;nnNWAjU4*A@ZnzwVY)-KnjRgH%}qC)0X
z3Rp=?I-owLTJd>pNi$R@Wn2UJl?*u$coiY4CDBC<9Qz`lYOf4>>B}!DmTT8g{MgzB
z1(;)72><b~PkEdFGh)493M=FTR_AoID3|I2AJxm^c@(gVEFc=-iU(GWVm_tR{7~XF
zCZrMB`$df5Hr;1^`j<IOT&fFxUSD;>GD+|SyWM}}ljl{20FXDFX(~(Z(ED)Pmn4+@
zB{Vz}3UqoqK6QuJ+KC|Mp*D}+Gms4-mPC4@3_6nqG<__=MbdQk+_=C^7Mue4GuyTy
zgiskB#)$u8_T>J%&7Z$@q6E}V3LEV#;odfJwlxZW13*na(d+O^m|;doBst6rK!kYT
z&p6rKP-e+w7N6b>L@w6zflVnci1Vw&AK=`}4ep?}Y<ndi@Kgb{J|Nvy<V)+%Cw+h(
z2<Y@0HCufb6n=Wb8@MNJKm|X9qzyo%1z6*J-$O+2sx3Hj`CHlv#5B)lJ&p>Iw@Y|v
zJ3b&`J^9V_ImF#Ire@2~yrc-#MO{Dga+A#FEQKJ0()^<AyC^GuwH1KkC05PfO^&Kh
zGq2tJ)oNWkJrni>&ACcz<bZS<N8k}L25(8ue`y*F8H2R=Q?stS?qfP5>G@%=Vu4H`
z`{E(u0xRt9;G*hct-Xb^G_J7uPeceZqb!ic<9hF24^`xWrjcU$JGJ1Yq!WTuI}B82
zPq!WAymEeJW*t@8eTI2LYM9pj3w^`2adIB>a44v^>gSf%OOi2QsA>rJoNK9?{b*@Q
zv1CJ8Y9m-WS|z(UC~0=F7vokZBWRt&jj(PQ<8(;h&^ps3yPZ55X~%7_!H(lK1$^jg
zY2U+LfUbV3{_!abhyt2$h!=CW^LoS*_YrYqleiM0Gt5sOO|93b%4~rflT(|lb2Kv{
zo~+8`@Tfckk!u&)y)sPqt-~XX?|~#I3uzkGJZjB3hbvop`Y;gc{fG4yK&H7IvLEJJ
zQv1})NSjcq4G_TcXVtg{x6u88HEJ;#)<=ZD(q%b7`WOa+kQ25H)R84?9IS&d+p|Xc
zMnT!*l?auorhg-3KZXf}TvHEDyg3#338(-w&|WQpU21?;Rq^;_S1*HYVjgRcAA<V4
z`*g+wYBkoGAYe({sCx=PUdNm_$983e6yV!CyqN=Smb^KQZZP+vGGKe>63axUX~&EX
zd6b1E2?s$v)^22$?X6WC(gnbWOe2VEq%V`HfT$$^ja;>Lx54x9a^`0lRS9r4K0&pt
z;avXoZ~lu-vXq1_4Ud8ap=L>Z=V50`G)7#&mNQ$Tfx*>gp^$h7p(|Nl#yoE}L|EGX
z13y=5#{L`c-nx_={aH5ir(K7X1!&aJ048)01+kwB3zgB9>s0&~NMch0*S-IbQ&LX{
z`0T$xz2g&125$L?3=4m7N(M5$yMD>(HM~4#S@uP4QyJDGZg+ihQ~zK&Wz!FMcXCN-
zmmsuKNqrSTWpzqZxxedT8yJQvI9E-u*rex3ZCloDBS=#j;mr9h^UtIgx?w2-4j}lu
zvtign9Olu(@bKLgL<fmzOSwtX#OY=ND1mhvizgIZ%&zx@q4=H=;6nAaz>AC0;q#$x
z0HWD}mSvZixi&+vU125qEiX5RcDH^3%@l?!RRZTY>YWk8=4p^@lZ`HTJ&NDJ4I$45
z(7}U8OlxQ@f5;8sPr(aOa8V1Gx?!Nu4xQ-|wfql!L07$D%L_3EwE&0S@a!b_Q}>vD
z^lU@d67;kq6;xHMi9~7o8WDFE{&;N!{VX}>!L(z;1Io;ST;L~RcIxI6n5kYHzs&aK
zpC%xIEt=ICq{?q~r<@Xs7oT~50&@11Y{t#=;DgVW(AnOSVx&v<_)AJS8a5(Rvjxc0
zumGx<X5mFaT@k}+S8#gI7xjQ}_8ueH`$KV3^m{*o5;rS-1-Jsxfx7}36jI?(@^P<S
zig!qct1=Vke8Y8z#iLMQlBVS+eFKS<S#a&+k=v$3yS{AFSzWe3FE?tMKyT-dFnsZE
z+Hc7s&a1jFaGXZ1ru+<@2_x}TiMn-p9&;Bq0hTl=jX}(UtsG#ZEF_y2VXHVxn;Fx(
zmDLyG5*GJDKn_Dyq$4x(g+Q5Thae5DC?_b&5r<N+()eVBm04qpFSRuQ(#d8bkAcKj
z>&IU1?hDM%Lg!#*r+{PX0R8~A3ARr#{oL8fI&Qh)H{1wABVeaXNPvOue_^<~-)yt8
z3Q7PGG$tpCN#Kj>uMy+}dnhTSOr-@LFggD^J1huwuG{1+i6bRbH9E0CHt}Tc9;(SJ
z%Ls-<U%LUmk*IB`=o%0-*Mx=U!PdGo*mZoF&a>FD+b^&=UcJT1&N(y%v*nu%is%b0
zgvamV3rx*GrRbKOk;_a(7r~-f+uXW{zCq>#bTP=zfYf(e(<2^XsQ>%K<0ac@_+nIq
z!(9qNZO$)^g(!{OL-<*dL*ES)@f^&y4QxIO%v~D{e|6{DfO)cQeOhdt!Z9b|Knud-
zD@^6hDjgP3x|#3r*BNv};V;xIM=`$sQDNn_7hvh3R$L3`^?5r$il{~6z1hKqnJ+Il
z2oMneH*sv2(_N@BDptCW@I>Z;ZN4P~nm%Vt<)^4%VnOqBbw9FJgl?sCNfOKt1XpsR
zC+UHVJWU>wgH+C*{(8_Y1+Z1A+}b;HTt%kCxRc0ECKTpYL({Puc6hyE@8siD&|Chh
zGViGp&w3Rc=M;Fkps55Z)Ytq=E+uic%XzqxB-kaMvZPwwRRR%$+UJ`SFwCf|{v7Yg
z%d{{oGD@KJp=bHs6*op0$G0On-ZA6^8g@~nqYck!O$*or+X>{u?<CUv_K5T@pusMj
zv{@Wt3@kqAORm^)MT>Mh8W##>s*5CgGr1m)eZMayNaN9~l$Y!kNF<F0;?^-O8B1bt
zClU#<ssOf$Uyu`T*^0u>@B@P3X?UrP86>^aFe6**6^9Xu@U#!@4S%4%c1N1zAiFcj
zLd*E_n!v#|?59q*??7bD`wwyVTKX-$?FCwN@e8iq-|KHW5(G}E$yZz)jl(G5SVQ++
zV~$@!S9{_|V&9|PXv-)Ii}`Z>`}|`M8~?JQuSkv|7Pt&BrGu@32qd-EOFx)t`ioF6
z-l0#OKGU(d0F{21vK{WkMj8H&N|(0hlc6J=Y6GVU59usdASC1w+E2f}oowD;$>Vm#
zmQzJx3BDZ|jNUJVi{0FZJM~0Oxd^dB+<aof*KWCg1|S)na#2zQ9E~19wtSaSAV4I8
zaS$c#Ts&IIZ}ReTsy%%3QW#6dIIW%+Th*WD%z?#=I$g2hZ7g)h6JjPQb2@0j5a~b4
z_OraB>zWcIhw)Fg!X3LYf{UQ}`D{AL<*UHsZy^Y4?hDNB34GQA+gvLCCw9xJW)eM~
z&fb#@DYJeuCV=;p*&e}o)CS9Z{%h4S<FCcHrJ^$4U@a#(ScxGAfq)Fqma)+;>V!CM
zb6Z61vNg`8Tf15Sl3}JJgCH-=NM!;Y-rxRuEZ1W<Lvq`frw1~w!t}4Z_r`U}hF`z6
zi)avrT(3<8wD@aD+^<Dmu*|<zbKVJQMn#mnU?mD)x7#?X^2HI6l7Wgn2fa9x-tJ|9
zAu>)0j!b5@Nq$7L9Cf8$hGKuqoEKyA6#N9z57|p+$*<kSdyorv`M^boMr5X(#@XNP
z)K`dO2lvsALg&yiy7#Yl$yt2WMzh2@7M;QID}h-W)W&;f!Xc1@4t&{Is;1{(T+Frm
zNkxdTh#Fqk;4M0K168_KO^_3bbyLV3VX}^?seem#^{^5rgU5wkEg!A2Y`Pdr0RweP
z`bIyH(K2<gQ=Z31Lo>fV6qV(#^_4|v-T^h$)i?7k@NnPYi64`y>yGA*&V@%Pbl7*l
zZ_O)llsKiZ=fy=fpY;^%MG0iBhHeeC0`pX}Of+SrJ2a=d92{q}4BcY?9f47R-lHJO
zuexr0?>hq*XXr*p=sa0QeOL&nP#;<`rMfiE0NrqkozEI1FG2&3y)A~is_&bFPfVv|
z;5>pWuecb8e2@yZetUCFGi;*r9YMdPUa4wPEjkU-Qo>47QrCH29oBoI)(B_ZmPi_2
zN5e-wSZKPd)uqAFp=Q9{(X$&Ga-&?DT$KZb)A!#m5Vs9%jUl$T4L^0_d63utOwFyo
zHOC#VN7o1|fA%B{1;y)OEeH~=p&~aAcW*wV@H~``bP{?ULWAyMEs1&I7<g^DX@RqT
zwL4r*5RFRR?->$oM?tuGLz9Ww^U3g21M>^Alo~dHd?+|D$e$R#z%Kd)1w8xRZB9bD
zIj40cut_FW$|ju?CjI#lwRSk}*#tz>S0#+PUeW+Se4Eh>EM#-Gd>ReINTPC$EzKkj
z`saxE8&u~4N*KgO_U#0RFIb7)$CC%0gC~2`d%IHwE+f>X54GSUQg+cVBB`J+1ihg;
ze$DPd5$BzOLJwyFPW|qEX(CVe7khtPWtB`s`_+F;oQc0`3zN2a&eagYmqu4dR{FLG
zh>r8NIM8fefmriH9c&Oj{kabv@$8!F5U@GnkmG1c|NE^H*uBf2QW%6icu!iEygP^r
zOahAZHQZ+|`>ZQpAClk1G@5u&OHJ^8ctP#C;-`|$Mf0w;`B0^|Gf2}beO4kl-NId$
z-mr|W5=o>(sL=1rH~y#W^3ST@(4b}2*2=Ko?|+#)09fzepFscsws-*mkN^MxoZL<5
z3=N!2=>A>kS=*U8(VAat*~De?Bm7)_N5Z0MS#Vr?VOrERpvWm6WB%<ewqX^`PsGQ}
zf2o!E^>X6}O-Z6@#LZ3I8i8r*n!Dm=>f+Lnm6aJ<aR%O>HG^!UrQ>XmkduuVZhkf)
zP-p>Pk&_XtBi)%__|)`p$3^VSHH}phUbP)z65|vXVmQ@3U+c;}v-KFyp#~#5+zZ`G
ztXH&3QXm24@%S<Pz7ErDi)f#0zE!TikAp%hDv*SIZ{nLaE>4L+#SrOxa2^xASt5f7
zB@?vXC?y;1v9C28(d|}uDs6b!r(<B2RXw^{?2lr*I2~_?wqQ&4ENFhGSo{>!RO1sJ
z2o8El))lKvt94h-jFpKY>l;kd>7Yr;hhz^IW8>YGh1x*>7WfNqA#cxfdfG2E|J-ax
zU`M%l9ybpxhm`dBTi`YSx;Fo@TH>{%%r9BDd@z#r!~@$Sy5c_HmXFa)?xl2f5#*8N
z!W~){k%%`>H%+F<A1=Dh;)is%{{)@2+;NcBbZ}N(o`aILY}loFWj`2mp-;mQbwP09
zw@_DLabEsJE(jMSe)S_hhBVg96NGU3j{vDMtACap9_xRjcJp`!tC_|l9H)Q+3c?I4
z&K|FMfp>B1pyRpOrT#C`P$w)JN!%b1Ujbqq-2;d+ZR*Rl**oGy>F0U9=+-dt$1yrQ
z;+rcMFNC=olDovUWL}d%fhQdfDsjgAvE|Gc{)hV7YoP|Uh4MX)JzaL#r-{QrVq0ur
z;pHbUIj;tT_>8(}7+T_@3T9ZqcFcFm#U)y+QBYSYaA57t0=|4%PLAP6eTe0`PzWKM
z0?1O<O2a~XwBB>dI!jG6s70`m(?}B-3R4x&A}4cY(nQUdYy{y`c9FB5bzwEy0w+Ho
z|0Q9?!s5H4lrk)-W<XP;10gYEIB~^4Fz~OKW?5NFaV10b&dYe3nkJGJD3r@+Gbwqw
zx=*o-rDmVf@y))mRsGA$>%aEM)qm|JE;S?-+d&n<Z!0^VBp~wg_aEYy&CRJBiJt7B
zy#G3(z5Nf=_oh-;I*%{9Ds;<8@ro`S7ijLKw5KA(TooK;uAxOq7I^MKlQ<15_2?@P
zuOsNvk=qnM0~PKq<h%!p36nO^qA5we4xmX*g8%mFJ5TQ->Ctn1mcD=%HJ#_a1x^Ie
zw9ujBls{ZRZ?8xjt&`c8z_58va&4(t87qy9y*7Ch2Z#bS+<s+Cbw?t#YQAxHVHr-<
zkE66=6sew=6EU5Bo+Idms;w1{g~7i40UR6+rQk=`$MQFB=~LF|Ooyol)MMn&BYd;z
zK@6Nm)$boL{uX>xaxAV(HDlkk#(eHw$&1%2HanF^m5{a3GEPc))5zV7?ztU<5Bbr_
z)`pNNO@ao-zBu~uh%(Xi?-nWYGb2Tl%*ayxg@wKiC9zvFY|7&1>jLM?64(8T!&=~y
zNWs`QO0{7e9HZ868#uMAK6f3&LvkDH;@1m@Oi4*Eo5~x`AKrePg#p4xej&yLVTgwg
z0uV<9OUQ@3p0G5_9mVJKq@dm~mhWQHQxd&_x3V-*)s-@mM`ieX1M)X#JxS-9HAX;V
zSiTEMo(&Dm1X1WZR&LwH>xD8@-z;@%nH-2IElLVRk9IW@qF1~0=U*{>_*Y0z{*{yU
z|GhWU-oxoV)%~`&{ybh%l<+Iza+lE5UY`03{GS%Mn0Kr{%YXm?{-6K=IRCv%+L@YK
z+Zh<^8QIy`*xAw=<tQoGWznO1kM<2(=`QjJ&-_)%Y?L<KqoyXt-xT)w3=Y(oSJc<9
zlr8%8am^|T4q1Ewjd<L4@ZR-^FV50If%I10fee2*Bio8Blx}ox<`D`a9!&^OQWjo&
zxd!!a_rFKyJ?P{e9m`|(4S{pbgQnmgw;{GAp!{>pvEeHK{4aqm$T=`xR3dubp<2c@
zDXbB(+9h}7o)(&Dri;Uc*6+0NFMVY#Eu#NuB-fitL?r>-BZ4mkhp;F`lqJ#q4;sJ7
z@a^&;_O}4oF$nrT0=*$T3%kI$e5?Dd(g8*9{Mvp`t|1MfH{26u_xkj+G}cOO2heE6
zp_L<nu}U#`9Au}R$xM@AFKeZ2NwG}Ik1Diar~_mEVIAFoX1#s|x~Na&pEY0jAT6J0
zP=ZK78tu#03PppgYL<Pk`mc+D=uD}?5lo*%7P*v%2G-0Kmh+55$A9(|XNPeXGODhm
zA?hT)Qvr;ij)@b`(Tg3J-S5M*vuMtpj{>?Pdm@$XxrPmSFEsd_rCNc`zghT)KNYkz
zE>$cK*EI<=hp`(|W|_lULoTY(^1j^MJG-nqoBwBR5xnZ@4`!@-^ulQ0lxuh!Z6hxk
z4sZNA9(qpwzCNS<rye5X4NtuO=>X=RCjNhVFwcoDfE%Po7Ge30BxUg@MMnli1t|gw
z>atjQ&4;uQdzE~7SEkFz=s4MON`}PdX#m5WgSKAUy1I`}veEJyLy<3CM6meDpY&Bb
zr~mU1ZdV{EKAm^O&AH0eExuyOB2JrgMh%k4#{K=&I(tG7`9v{gsE7I<S_ohL^Pf+h
z+%|p%00RIZN(TUd_1`LXa4~T)`M(-&!L*XY61!{qreduXv>C$}Un;KwMgdLI1Vq9J
zI+Ln8w2hH#@Hw%yG2i%lot^Hzv5*!)2<+NkUtjl_+gj;^JpFOTfLIa71$ix6A58Kl
zIUtTc;Yt#<<y8mClQi2MQM4x@3sAq7hI2e*vg3^LX5lprKo4_5tUrt!!+X2b>1`Y9
zGyDC?!rA>q9NRrX*rrD6y%P$lXlH^yKnO3gV4$&X5z{JBJyA=!EnOfXWT`2WOiP_-
zgdcR^nVKZDD_gKp#Eua;KNE70MjcZ%YT_MwkGjMqOU>@xC+itnIIedaaOCq_vC`5S
zK|%X&I^61|+ArFfj>HsjT5Q-`4@ZBQt9u{|nR#aA__#?mtXR{*mPOQ+D<%`lt0|-}
zr}Cvt5B^lzADIyzmC|FZvBfr?-nc7M`7P7cy6G`7Xw$8WDN1J%YMpcKqBg!o2e8Hi
z$$e^tKVgF-k?!?eHmq+fT_|-kwt2S)S8Dlw^6YwxyQABx1e>-7!uM*J14}C_gHZ>q
z+XP=AWH7Rf%EZ{=P2Z*-gCqydCP!@VT%G|6ew5$1r*nC$Sv4d`yYJTeRWw%2Hd5?S
z9VnIUaYYhQ0)w{YK9^&E2l?=#3oN5&(48AzV`QK;Fw`Cu2AF1`jtQsAjWHrXXk4ZN
zK$x>{R`u<S&8#xLaI3>O9cSRn1ap~XT0;WRE=K>%7ggWnc0qSwm435?iLOQl+-!?#
zgWtmmzc2D2)Ih~`4c%wh9|Ond=#h_eP3dL^4h(~F<cgHHZzq1>gM<XeAm^%d042PC
z_vV-a$QZCaQD<V%+RNB%5&`pUtq+?N^ou<Slx6;eK~T9Ugv^NK;O+4lD!g~c^bi25
zBPWi>usCkwV7x2Eu<Y*F5=U%$F|^LT<bWMrYxG|`kVxQ|u)I>OW7V<&Qo24|XSjv`
zAo6wOU_U6@-&d$x7ks2YC1cin*aWXw*y!W90%=6zn=a(PVt-klnAZ<Ld5|(3y353S
zrHy^7=X0Nfm;1a0H;?E7fWQz~0>VSMTi<5^TufBOJ;@0?hgYOGo^|eCdVK9>!%oaN
zP?#Xaip?ZJa=;#e#bg^Yu9H05?D^TdxI+fFCBkT+06!VnMDuA@#520ytE={tpvrZm
zZoecV_fHoyNZ$}y3h}kcquzUz<{&n4Y}XPhp|EzZu}+7}4bBr`woOeJgDwjz!sX-S
z{%9GK`wYT0zrE$bZgqu0*7WPFBd37!PoKktC<-^!&$Ye^Q9@*9gu%<sd!CiVfQw>C
zc6b5=fKsg+JFxeK{;?$>qqi<LCtb?LkjqrDbmy0_=ZFwtWUzv<0?avX+?L)tpenEk
z+%;#(8ZX#6Q<LwdN-4?LZFc3>6J!kMUI%Sa;p5{9Ha#O#?ZrKrnS*;n;AHJnqMDhb
z3ad4N&=ziR?3@+7)?d{;0(*g$*9v82UVltVT6!EZ`YBlb9FGeXBQ|L%!~4YkBD<>C
z_Yw-;E9(J-iZdY80qe;^wLuVrh8xnjKT~kx+yhCkhG0J;ZlH!>;<7=_KmxQp&4%gX
zm7x9A)$d941_E#<!-J3GeG>J4+7xr<L&HNzFNB!>hwpi0A4P+UBsxf$m%h$V0i?bl
z)<{D^=#76=x`~F7h@-$$w^V7aX{Z{*&SIit4l0a6;X$Z&Pkbf-SLO~O;D<+4PiXhq
z0~OdZ=)y_mGoAtgN~vP!2?w57$hI$;T+ggBzmULBHvn3&WzmTj>&Ft;gKhpas576c
zO4`4NKu^q&%(AXBak;lzs7C}1{U5^K0m!m$*%~d|=(6oy=(3G2+qTUv+jdo#ZQHi3
zF5Bj-@5Vd##yS7}-&+w2d+#~-ip*Sd#>&i*W2XH`_)E3_cXq5kJ&c%?XRgZx;h@=m
z+N>29*tZAm3%*!aPMr_=lLFz_^g;2cgWqM2`Q3bl2Hul*tJfbz+PPFd9SEOVLX^>W
z%~2K<!c1hFqJ=7fo%M;Q!>&}2i(fKeIIxIwI?n`y_uhPAMS}grKL&mc#XEO<47{)?
zEH)*Gx$nZC<9ZSrO<LZv(9=>v@PvbHFjRUKYGE@ClanmJr;Ps{9D#wM2EXPcEjt~N
ziXG(<P!7=!DjQr&jpikL?HW<M1=Gz{;puJ$islRZ8TRGe3i|v-5D;J8m!H|&+KEF&
z{iI8;=b4RTzTk|Vfs^LGFZbw5pu?&}it|{#-343#F5PGt*vVc|+m3uxz2h~l2}^}6
z;WuT0ya`G*o7&wZ@%iN-#|#Y8;xB{tocTAvp0)Oxgwlt!WiC*;g$f>aN|KKw<-y4G
zpbuZB9VTjHN;2`K0vm!dP{#_ydN*xxkPA$yYw~y(L-WpBPC`yVi^1I4YS*K&D#t&z
z=*x%`AMR6rnIv=EU|C4TiKpPgW8}6fgl?w&?3KVF;WK361JV4A;3SA?{>!&nDU6Wq
z)OTbILA=N-ZjmR}pm~N+-05RZ|HQyVI;^@QLDX%y^EtV?!yGC+Zpsi(|GCMyN#3?0
z!E+go2?67p?@;#u)KLW%4It!aUjvqG;7oHQl6Bkud#8co=#16Dbj=6LslhKJp_Gw3
z?KeX_@Rj@00#ik4AyR6ftbu(dat;&j_(k6jn~>X32+ThN`Z?qpLtS!V8t5@>aN0$V
z=Tl(Wo0>9YAp+j8_ISvng7%j>6HaYOsd=EqY?LPGkp@4B0ZD}1CB1k&=6V!odKBs_
zZ_YJO>S}U8u$6x{NlybZmTVDxXMUV&G+WwZFY|~o5D#7-orvzUNEAibIu}j0hcT^3
zQT~Waca6cYbGQS!JT+3H{cGfo+?pH=->lNUn_X8RGvU_gjJ0c6N?72BNDZGzW}ZX4
z?>T7>#xd)V9oU>&B`{+%>II`VpUkH-^?RcDI;Vshk6ald_N&V%SgPBmkOi_H$+vQf
z=$H%bs1?l^H+N6{jxM~APuuC%E|nAc$5L0nPpo#ui;>=2g(WKW>x%HoH3>L>v1Y0^
zSJHEKB1i=et3>|96#MfFe9|X`?pmn^CR47JYXV$N1H~F0tz&14lO;W(>S>eYn$xQD
zQUSb+>8|L4JLC8fYQ2-ep~jRY3N20>tfXcl^R$%MVSx}J8a{~7IJso`=XC<@`#$6I
z8}0jVGA~wz)*a<mLcAv{LZ_Ys!Rf#tZY^cn4w{I7Acis-U6A9+xuX*fWwerC71qMb
zxikD?cSrF`K(MGijpXYp`b|zv#`xL+JdWZp9jQNU4_cIpfgI(mn4+8#pour{mezv1
zG@yyDLZeyWDf)E~p*a3xbc2HBh!`CDaysfm7EcT!Hpm<o^7$I3fxnX{KIs!uNGI_h
z2k1ORiwYxbdwvA;l}jmkna7uUC_Wouy%0;3@N8SvVx&v;-;q4e^Ks#R2Jg5<P!cT0
zqP*QmlfjlyULzW-<q@?2fhrACfrm2T9ZQcYX+=<I{|XwzMuOxqx;bta#xY<N$3@WW
z<%&NK8=H+vnK%C{%rDNFZexe&S0!+)QOJp(3?{V+u<Z9)9++awt<zn3IC4#Bb1G*D
zK?C{Ij%JY1UFOa43BEv2Um{e;`bp+p#z?%;zHBNhcA4BM<j<cL!#wCv;lkaafIaJb
z91Y8ao*Jd}#*hLJzRcpT8o??}u`swstC|_-YuOSJu7b;rH1Be!?-ZQ;Zh3X>=H`tM
z0Z;hzHpx!Q8oXW<sm<|>Az4fG`XIXdw~$!9RbJNhdJq+87$aSt+HhZ`l_E0zZmPtD
zN2i`C%797z<_4Nq-Zx#{W=Zc4h~;P$uv&~pFYD9P#YY23*R1%IO<a%nT_mewh}&0P
zCRn&J(3W*t^PbhCK8w0*K?LPh+j-dWy)4eF!{y1PZC1y-v^yFQy|VYry7SDrOvn5H
z-XrD4F&mB5K>>dAqg&yYBH2Nkq_mnLp-eSTw%J9Sq~ASXt|wI!2Gmt+aT{YtOs*;x
z=<USnF{o5DVTsDZV?Tw*CPNd=iOUC*W}YAzJ~QI6=EWIMae2ARf=!yTKJaV0g5^ak
zt}$ZFD=`oh!1LG9eOg|Kahg`~+wSM8fU4@V{BFB0cDxq&P7knC145&!(zbxg1Fl`L
z0%X4E_M>e!b;zF|L__cbS#hpd%qR@~IKuTRji1|KGdXx&=N&!~|J`wbA_v-V{&s!m
zQ2zfM2eVE!P1|_@nzxO25IBlOK1{9uP|hxJ6ERYvRC}(3Zw0N(nwfd#nv5#MyX#b~
zqb2<rKdPtr#h&AntJgWlQ$L{zH(}z`G{ybh>b{j=44+If0pAQ;BIlDnU8MUmQ|PdF
zB9qKzDP3`w8d-Lc#Qq8Y$YTFNr%Nz=3N@O&qxP40OWi(Dr(nJ<@09Gx==nv;XvNUv
z{*X8(Fn&G24kpC381>cM!(b!3i@Uhq@%7Y-m5(e&EV_TsNhFfbqnt{{o0Dgso0kVt
zGvS5tx3EsESU=|jPzfabX-GZQjF`~T(~5m!a%pwghQgq8dd^{H0mp%d1lBeZ2Crdp
z3O;VU^O&NHP}AgLBn`puzLPy<`M&Ob>?Qrn2sLvBMT%yAgizQY_JFP0=G}7P(9o80
zum`+e(O{6!o+*s`KE+IrNC~xeAXX(1;R}XR43R#zZ5o6knz)m%A8Av>^Q?6lAT{_!
zj5%Xgw4nf5Pc-S|=-m=A^8v&^u@Nj#Ee(BG=w7#~6Z|kH_Cd6H00c1QwfRzzS6kxN
z5O1~z$wqzjQMYn*U7;bjBn^2dXZgu0dwkYj4^^ofn391(+l<gjRtpu%l<yL9N!7%0
zQJ4L#bJCQxq9Rhdv`H1u=SBT#HKu-IgNBf?vdEX)-rU*{Dty&av$_KR#lk!Vfwc)z
zltgXl(ILDaes>T7`UZZg@xPjwq*xvG<I^;PTd6YjB~k|x!!2<lI52t?P9fOo?&~C1
zg>|eR3MD{ZIcsuk{lFEq7`F}RjO#}f*_ltT0c4MAMu);YFHX17T|R=kvO3w>ljM#H
zZ*Pb9_}pfa6WcPf8rlJO8{Nodt0!7Ca>!tD*J=<d0kPHjS6n4bu(MF*aoiNL!yE_p
zqV6hk*+z-^nR!XRWtt_Hg!s57N(s(o9{>^)r8tYwJ~KM!#C!Rm&-OMUcf{J2Nl<8F
zTfZ4g_$J)TqZ}GpZC2#CB+m48D6>ZW0g2(!qjVD0;2Q(0-!zC5Tp}WcpBlAiYug(M
zIvC+Dm~lW85WI1_Q`SnR{4}TmQA3Nr6wNTmuPgXt@f;G5jT^lia!Plx$YOEf=*U!i
z5+q8of$t)S;odZ)6A0{SZ{iWrmtHW-rwUHZvW|aqNCi$<euR=Pk#MOWqB6JF)mvRL
zD`BCooYqtpp`gwMo2-!a|9z4AY_#8g(YZg*l2%uYqulKqZVS-cui=VkKL4eFVbJcH
zXo>8|PG80KhW)Z?U)h?&j@g{--(gL)utV(nSK%9_>5|2*SJUS*Ot#E!T^j<CtAst>
zX%8v1pZ35F-2$|Z*O#>sp<v@mrvzkPfmrw)J^#ByxjPfDJNHN|Bw%B_XujL~C+Q9|
z-o6@Z%6JEgWO$Ic&cLdySSz{0vlbS^c$->yiAL!2T4)Qzxxjvzj(fai4g-_I!&13%
zi<+L44Snf#`m$t0qhN1NtC)$HTfN2a6GPQnRB{o)JBd!cu`Xk5sm|1lH(a+AmxW+p
z$}=Xq)Wzy72C|gMuB*yrohLH1A``ucHg}m7h#yRIuE>4?-kJ%He(nHzOhzbhO$_^z
zEdheO)*!h;h#R;q8q<xS0AB^TWk|W+8tk0kKz_-uhSQT`LjG4&6$R1&AZPuGj8mr&
zPVJI;yi>xOVA;c<x}CP|7h0TnYMT&eUb)653@q0uzMn#`mvAS2u^)J7(9^a6Uxsx)
zX`M(6o68;~S@ZR*4>iO`o8;ce%585N9sBhqk%mIz;vekk=RoFEx@PnupIiHU;}}#E
zSV<E39s1RE#XfK2J{X{_V{_$2UFm_}J{&;yGsn#!LuLLiEW+a}bH#hUL4Uc;;l=KM
z%+^IwJURq0KtS!JKtP!P8Rl_t`|lXfpt|SxRE_lI`GHmhC9ck9-2s*eAJy11a9mb(
zkjtoUzS}fgSFk^uwCs9xX?29x6bAh&ma(<^bbEDaRcyb0L;JmUmt`<b??uq>p%h{$
zyzTW&HpR;bLX>we6j`>Tc9AmnrvTfe`>QSrQo}JTMrUsQ`pcZRXug%*_4Rkgw-;vF
zZSUZF3UCSH^mT*Liyq*4@oYPxI4mW`p872FS;Bee-{n@v#7K(B>W=&Sd$~ff5aM8d
zM=7Is5loZNUCCipe6ZsS)wQAE&F^=P+cb^=%IK9Y&koe)<|8qD-;3)$T_gK!uH!_7
zbQ-EWdzn{kc4}tZdtG;B#Krv8fT5wbAhWQDi}oRjoEWK1-D@RGru#i?LwN>@{A#aP
zILn4@dGm;R7^Xac=ecO2k2at(u;Avqr37})`+nPVV{$RHHs>nPMx%BPto6XZ9{I~A
zV#eU<CIg}wn-AO4j4M(C-9ny1<LHE(xMR_rWr@%uIQQh7eqDpdsx}L+Y0-eNjP}eX
zk0uGizf3f5?Y2iZhXqL<*DVWz+pq-lh<*K5yS=y7!4}(71?=)xP8f*^oivGT=Luy~
zN6?A9>xv036Iz2q;7=DrU}fu*Und$gR=!hyOD?$c+646Xn(4#ba6t(4fubG-v7l~5
z%@wURQ*d^<;3PTR8)YuQ7x(;@RaPTd*!f=M#3L{0h*^#qvYE$ywfU{U4uAV~YW|iC
z^W8s+8PRNeUn}LiNLSqem4t}dx-~ypybtxLLw*Fv;}E2S@`cJ=t`tu^iX3l7th6E_
zzX54{_`<7JNaX8Hq7L_wPl64zx*+G_w8O>%kIPb0Re-JQE22u`?h2x}H`@DnZJc2T
zk)2WID=RVYZuU^{h3eo5@222M^N3I&goxGn(jv(f^og%zSVdZn{$dT=WL`?l7q1G7
zW?JM;uawyZl;~!P#P$nd&OYN!EM-dQ(MLmb3|NIR`!L?WUC|B@sJ<sfDF|1_!CRXW
zy%JG|3lmy>bC<lB%!el{2_AY_O4!c=c$Uaf^bUYv3#5OYMPh|cWpTM!qS|XhZb$_b
zE6Xj+Z1mldQ#~yM7NxR1oc{{naYk;{{9yw#I_a02Fj$8B&UtSQ#V?grYRmb`@R*y+
zxco$YV!1Q<F11Ft`EiKW1XH4`la1aNfR38YVV}EK8$|O9$<-zs`p1x;UcYRaWI~1<
zbr<a6%LKaqNM7qnEtd;4o{`>_+gB>0htP~SO9{hiRJa}!j1d)<cPWb;eG1cG?Ck#C
zZer<bq-Rg|H26B}&@?}}=n&r=yHma1iu58^Y>WG#y*4Y5j7lUGm=bE1jLEa09QiaB
zs(2@XPI*}#KI+#ElOU8-OCk%Wy3geYChg;eN}MfCV>V^@$WnRzL4t8WcGv0QzF2&U
zB2?;MUsFxI!=6tnR5)7@Ynf~VLA|PKshlFN=fEh>sp>$#^LL}$Fh@z?AOW0qbwmN{
zM_Y1phquGCKP>$ri1Cv@@o+#)fg1zX?VP{|W_jPcnyM%5#Li@kYhHpkZ_#52vqY^)
zd+0t0L<bQG0FW+?G?{+WL}>y<kZg`=GX+sh$?WOB9VmFMX~r?BMfxzRSC@JV<bl*k
zDIqx>rZM~fp8smuhWqL18ZryVo$regVYU@No;E#DgxlpvQG~V9Y6d_m(5Vc;k3gw4
zoOp*ir3+D3IwH8uqI!tvX%-HJl@Y6PKFDNB4^~NoaUiqJALz$4FyrsnHYhY@O?QB)
zn~&XY1Gkxbj@pbEHR(|6Sp>;8-9xekeg|rC#loBU!5HI(P<itg6tv#yxwL)xqbEkp
zlVt0n3E(+0d4yf&Vayc;HrU9YNX-zEw<jXkP3Uu?H;>Z^&Rn8?(~0N_8KMfI4`MF7
z_q$YE-w}aA{&Mihnq2?G^?6h<WfILQO=*O%@51Eggd4sIxR)-t8`CeG&VbL5&VFtb
z+b%g%BdiuC&;F0*_xX7WI;>8GLLsm-er7yL0c#WqJw21X3ZF5>d!ICh2g`oec;lpU
zrKnu{KXOV4$z!GE#&ppY5*kK}`xXAe9F{jHXoqvA^-L06qt*fbw~O_Uh_lD|51z+f
z@yqH~x**moG7F0bKoJfKBs(D**!!)ta3?N#dDtcUOUvKf)z@Z%nmYYEBy<jYV!z2^
z-$EsAXsD1?&pjH1Ndes!s%f@^z&yPxYnnag0)9rwwI<l3gQk<R{UWq(WxH;>f(~Vs
z;1@1pL+ps3r-?neviOW|8~a7pkacw^EW$Zoe@!NCin>rDvIGkslS#>_GUCnRX}Eu-
zMsPqZiywC8EXpcsg<zPNUeevUWE+&x)a&8Oya+Lzmq>+a2B-V0tEZV$YnP+QWZhB7
zT}<tV#m9r$^w)(_vY|AhheQ+B%kUh12!vZvj`d|#E@o90)8Mh~jN5>G&^npORcbbP
zNV=g%Bqq7LcY+rw=;R0{_CP?P3|hQFzlu~9i@^WN)Tbx}5*M$_0xRSOFzm}qvL@a^
z>-NCgN>xW>*Yv;c^$Wo?z!}=Ib7M<7OS2=R?n)qN@q>g~B}1s{lAARv4}7B4y{3*v
zG*2`(f--WA%A^GSRwO_w$f$#toL^ezGHy8@i2apx73xR}+{iwipjQQ>$(UQtscPS}
zRaw2*bxb*(D@XQYuA7vBNMR1Ob$5(R&EQ#uw+IX~HnIGtwxMsTY(GWJ2n0O7`Z%sn
z7`U~?kWIn0znN4yxV6yvWGkLosV2IWIK|}BD>K3fC_#v{-)UKZYyMO9Q1#3DmX52N
z)4fhNlX}lZQs^xsb44o_KgC>Lk5hb1hqH<jf0Gn3o}3V4KhJ<ma-^+Z>I5`txdIRx
zD7D3SiMZ5D>G%|!kC5OX3@ut3Gr4@i!nvSVxR%~oGSGgevXX+L<#jTpudR*lv!C?n
zykt*Fym)8Po}(JQS*l{ovOIAY$ZZ3eH5t0M;>9(EIgCQpno;JlPJNd*47nt-g{Rsq
zn7()|4Qu>@bl)GOr8`XxXFKVa_V_=Ya(!pql#Z%4KX$~xl*uko2vY(;40f40pJO}%
zAItfdh-{K*ejqH!ms4tJQUP?C^1XmoWL~BtL}FHQ6q?R^KY<Azx2|K}-uJHc7MSNq
zlJNveGvo!E#?hlm_g(3z*qNL@Qf&en*BdaJc61oxDHIzihMqq4J9@}^-YSpR1p~bv
zi(T^@K&PPM9p{*Gw$zJB66KhoWMs^{UxbpYW{(772zYxE1Z*kWwSzxn3E;k+eWJUq
zV)>tE67a1KLnPiEeAr5{VsFX|_RYe0QEwkxL}rvs2)?`vV`pil#s|QqM8-N%9DASb
zpR>cfZ%OXbTCO5#_#ykA+o!4+9(G0STDAr^mBSAv%UxLUgH*jb;k9301oq}Ve`TJ>
zwo(|f86G}VKTY!89E%_f8K2?*Jp_=b?Jz<5o$Vw8`Tu;WGz*gNl>}u(;$hQ8UW=d?
z*$4-}F#{K{5Mk(q-!$5#-y~;Dt3ZFfkx|h^E7J)!+Uj7lH5n*jRLPG6njY75)99Nt
z;ML{s!SlsroKfQrXPhl#HBans5t-4c&h1CSF`YimMar^dK?NnETs}0Vht&j(QtwiS
zic=UMQIU!;EfX!~)Fyz3)M6@4HO50PhAY9BH#wKZ*p@t*_1cAm+@Ca#+>OYtYfRyr
zJ^?sF>H?)&*FPpwR5|$2Zwwp&!x#B5Q4|>bToLCC)3Z<R(y4Z|iX2H(n5@&AqGry9
z1J&4ZDv7&3)c!uYPTfUNz{;+Hch-s-)L80(B~&*Y_1vSihSBh`RdJ@;$~=rI&!E%}
z*I&y!C!2<WDc$rvOm8o||AD~LPB0;v{MPIP=>MWwr~i3f4^mjSnde9PJ>@-El0;2G
z=2wu1f=R5r0wt`B#8`qNn7@lRT;67B%3zLk(OrKgWtLg?Jj!f(IKjc}Nok~^CS#Yw
z7TB@7+(KR`LPhZc%!P(!Hx@{WFq?9WI;FGY6wdJX(uR%i_rYDUmDH0cmLL;H@lOXb
z+#}LId<d$gC5+T8JSA><t=p8XBOV*^k;5xh-S11Srw8E<rBC%VQr|1n+uBIB#-vYz
zhC-WT8e|+5D3VbtQZF2iso~Z1wxQvQB2=jV@yp5<OfycOi2cw9T}BLt>j<irh_t^2
zPLPV+9pv)*x_IX{0`+79osRIg)hdaqD+T7O*}Ffd5t|vzCD72ql1F;Jl+PzKveAI>
zOA2Z!+I5{N@ol}*iaL@UDRh)e^I``pwwf8i9wuPu`r}R(U~Dc)Ia9eI@)TV)4^9_U
z5GA0Pw+J>|Id0cX57pFb&!w6_ihhDS1i))!e0b`BbODO=;8$yZ9d5HNwy&qde1hd3
z^x@DNRYbcGOqb@$5c{da_MAtceKtLMQ7xgk2;(|%;kU7k{QQ)mvSeFJ$ejirG4A$8
z`eRhxUw&ch=*ATFcULIR&xb-CW?j=HumfBB=}(Y<A6(*(EY_*tp4#^J{(pFE|MLi#
zSx+z``U8-H|E9g6ORFW;w6w5eqhc7%_+9wlV>3ubTqwWY(GA4i+-^Jxjt*Hc!Q8B6
zT`&EuwN8KZHx;I2t|?RoZHgx%6_hBUg3G3`#}Bwi>4yl9F{~_$yb_PIKzH>#h=e9x
z2j%bjygQ)+o*(hoO@=&7NjZCvBn*uSpkxT#<H1%o)?E6D=4b=G%0BfHUtt2u_wUlA
z1@m#QeoMmkJyoFnr~m2XZfE?BDYTC34e$Gw<mz`g|5bYl85F&zg4#+15~2sT71yGl
z3$=vD^@VicoW$8f#@AQ8zJo=}kSK8K9q{Au2VW-raDX45FD)Xf!8`Ya!)_U#g%aQ8
zNf(dvv-4LHX5|4dAnpEPaqCB9a&7V$NBuzkX%AXny=kRb$!pcw3(xg5<PbchK{t4H
z@CCg-4c=eLVyF6Or+S%Edg8@Fn>m%Ia%CBIvBHG0+2aCxG)^iwdSi0!1xgYn`P^R%
zp?3|K3i=^nL0W2r0&St)DU%MXwBH8{;^hS+hJRN@g&J4K`nL{dP5)1HrHhHKnJc~F
zCa$?E4%;>JC%VOJUwv2twxGF2$RieD5YcggRl;Ncg831<>G|{$TU_dOohx6H=0cya
z^Z2i}xd}_U)UeruJAhco2kqB~{^}h<M%JFm`W-dt#Z}7O*k;ziahD}brcV8Oc_J@j
zQNWGCal-PRUR%pYUkt;C-LWnt=-1#uGnw)iY1>2R&H3)&0c!oV$3EpPy{%TEz;i~q
z8;W*IxX>4|Lo%cy+j}J&qA94xK@16B@4Fsr+LPhc8e+a4Ya^6f|I0OX=MrN730$~!
znDBVmz#oJF`O<=uxEz&7nJ0wK$D-zKtNSU=$tt0k@^V(u=6Ozc#qeD_#!zU>V!rcf
zL%xdgBFrDFTo!IzmD#Qf$N0&`Y2=FZBt4I;t7{dpS0jHK>s>OugvJh>z1%+1`JIEf
zDbA9@WwPTRqh+Hr3S1)C>+JWRC%cx-y%E2DK6Y&G;lIq4C_BGJso-3Ty<bfb&vAZk
zd9M3rH^|D!NVs>Zl&2E*40BcAO|4tHxOUDbxZwCvn@=P%*p?OW;Rd6JkQPw&`F!Mp
zIL~;jn?IzZf%KH`xAEybqq9|J075^&%=}47jxL&~+aM2k3lpYwx+O(kOuE-WnNL+p
zvZVbg8dB#TXfcxmMLhC94RAeEK2Ge;Q(CdYB<oz<6{$Xequ3`RD#Ajn9o+BUHO-w1
z(QD2f&c$k7@du2TQUaQD7VBNAB~_5w!{2HVi|Xqf7$Z!L;3kGcv0hrmp_X`G@Ri<%
zEL}K_o#>h1A;6vsvtEo}3y52MA;v<>p5nclq@#OTa-HufHTVz%EY)?0eE5}7r2Bwx
zl41%`4qh_mVBUR@WzNVb1tI*9Q+y#)K6hYN?8RFoe=xAkz?GEzO;Z=b%hpAnhBZO;
z5nP&ji$lyKfU(aXGFJaBflhS5KLSPRmycu8PfFMxiv$Ye>zWZ}V9Ww5!Qrc7;fxZy
zjG!BGYdVdozQ*;et^Z46UYW<sgef3WK?WIhvG};$O<o}Ui2pd;UpDx&<tw*CK7VbO
ze4cZQ@5bfvah5ilAF>b8h#k}ww#%K<Nbsx3<?a3lTuC^c3V3}Y=wMG%hM!d!R<J*w
z<wf3>6!h1J>k8x<9YGz*D$#jZk<f0U(59UQ`EP_jRd#EgoR%((c#x`}&qAx;g(9-%
zjJ$H?m|!xTT9r!G7<~CZwo$FX1O)+)A>4g7d>>k7uHZX6suq~|+VnJn4f7{h^4j@h
z56X$tHy*Eq5WalL3KqfX=QH8Y;S_5+Nov$~gFji*JZcsp7T++d>aqaAtI1b`%(ehs
zHwHGjO*v5x-)1A#OG6o{K}KV*o>8XcVd?3b&Iz5v$9jG@BieC*_~;t<bQP^PdOyT4
z@!BqlvptrhuHerfz1?5<V!*(4qN&2;*D~O^({gKaBa=Yw=3y&v+YTDm(RcFP8O6VH
zp54kJuNDOApQ`SnrR`#-qwTOWa5;c`vpa8r>+fuAo0sV!M`~?Q%SAhvxoyrHGB!xr
z2UuU`frT0~GItaXg!1p%W^449$KWcU=?KuQ*COk3)~)a^`fxz^u5`-pK($TvKoR7j
zCN^abP{UeNNDg&?8J{k{Pfe*wQP@KqVGg)(m8eTl%@h4V@4N}0xYGC+xIbfh{){z{
zX$jcO=;&EmNOn6uc4p?t_t<9BLmg!OnzDs{=FH0N+{q2|7pE=)%Au+I@=6^$@4zkI
zIAxgoGj@Ad5otp*uHKpaDjT?Jbfu5U`De3866lrgdY<kRDmXSP!4=(HgZ$?GnXa$N
zn8Jm`;HQ>s{SikrR`*|>c#M$V!61U7dzx)Z^#~PUy?3AsQ@{*?)^7y}?Dh~WxaZ|h
zmZA&U*E^-!YTGzVAEaqPMzDwBy;Y#(5@y1YBBSNs7C~kHR*tw1!<D%YMYA{97muS=
zPerpf7ew5<#^B=Njti%kDyML%5M65*pIvU<>#QxcU<br1h3_VzWa)1KdT4MZM(^fd
zUhjGtfxyM%@JyZhot+Cy<R8b>&9y`FNx(~YX@V`d*}(#VztEeeNNF0ic=@S(K$KpQ
zZ0jjna@JeiMZI`4qM$3R4dC3&W>IY2G*<@7jd&wKXggwRVK-1N)e^_CK><6}sw3Fy
z+hsfA*D&~0<>-G&TJxcrOy%AmH3`k#q_H+3hA>t`!ViUv7qRdPMi$04S(Z#a(-brT
zK>~1E{Q=tT6@<e2Azm&-vHNyskc=Y|$qCBzbtdJ@H#>Zsi$XR)v5)G?d;Sxi?aQYo
zkVs&3j^xzJR<X|0Tdh*TD{rm|qLIWTzP*{a3wJLW{lvnQ27t#mkzm4MG>&efBAcHJ
zh2eUZsKQ*K4xr;U&62>1>G%^x$TLgnIhE6g-cS#6&=;14*KLy~D-4;zqpaA>w9S(C
zrESb{=z&p{VoWv0rh)Towfx4VAPx8BcLvH#nFVE}AHv{}u5c+HB+(YZ#akkvKSHs+
z(BK$Q`3cUJ+;#%^VN+}8V_ZD5nqCf_3A@!9SOK-#(41>#@B9#;Pa$_ElYf92=ou0!
zYLq}FYLZing<c=V@~IH6KnCo#(>&Upy;hPMM(r5W=DEKsjiFz|i0G7nh3Tycg8gMN
zP{%+l#BA$lPguO~+baL08%s3hwbQet<|O_6&=B&v@?ykRUKIz5(LsB^jK~$RNh2h5
z5NRTVd-30ywo-xsTISRxHiz|G-I?z7G&yO)Od0@!g0eib*gwi2Bg$s=;29~H=jK#b
zZEZJ+K`}hQsmsLLxq+tfp&IvW0lsz!K(|dF)XY$eG?%$8Y(LBqX_7Q~WxE|Fi7DC!
z)Zc}m>1RpD_SJj=KXw$IC-n^y9P)OAt5bU#joTdC^B||s<Iq%MR`Yci*J6GC%@FqF
zigIZkVq;)kiJ=!!0lC9_eJV@#q98C2lex-a@BH}KZx^ZF<fa#_Rw<W%-*3+=W*w|R
zpy853TSa6RHT;^~tx=8T8;kw?gnB1C*!E22rStZPshHk!ctv|Jb(OdRpNvDHBuwWL
z7HtX0Cj-!>Dx5JiBt%uIENVqrx5<a51~aNaI-P)^EeY}tusbRB;~_(=qrv4Y%Qu&A
zdavqzg?isCGZ^!&>MAhkIgi^K8I!_Y2ydd2s3I;_j8CUQo;PA|*B>{5!q)a5W+y8I
zhQ(w%2J_a3#voQdAWLFEkE;?!VnUZtZhCeQBOY$Wf&=lmn?d1wZzm{MO>kz66|)jG
zN532bKD{7d+mynurBvTFZcj=g1P@M%_1z}#p92jQ%X^AO=GP9nvZG&vx%K%8Wou%K
z8&$7DeYirDYxwrBrdZHDDSNUkX*BQEQvu+0&N9eOUjtLXW`ZvzAA-_AxP$U98tE_}
zsk=RHk6<W|X<|R^(%KpzEvMcHuFJia#gjVqVSrN4fL2=T{Dfy9fhKEQJ~|MkZUd^i
zay;T6Co8nE+!=8&$g9{!VNg`GCw6gfw|LAh@fB!|XPXvakz7~nG}=;|aBJm9^n2Gf
zIL*u4>$&QJWu5F8$5NV4GNfiuV4Yc%&t$slEjV-+nXhK0kr3&@!8t+>8UwtIrlthU
zn6?TKRy>-lMe0PExsBi_OzKq_sax;LK^icbe@>;vIld1YnB4hgmKYC`0ParXQaQ}%
zrhlm)nbhAx(PEXeMG$#rj-XHX!-@SG0_S>M@j!&wxPbr7>7Yk4sxOj-t}pMxgoDBt
zXzlFOc2O+kEH~gZk{CezzM!w=XX<HXDxA$j!{pm8_ycHaa#K**<`QG>gWRxvDuv7A
zoj;`F;HpAFR85^u3JqxUYX;a1xSZ|DgEUrME9qCAH6?UWQ_;R?dZ#tccAa*2E&)7u
z>^x<QT434<00z3%1g?o6<Iz!l)^4siMBl88``)~c&{Yh9dgWG*P|tleM@(fB^rR*R
z6x5oi4id|VG3Lf0ACpXt06WnMxj@<ptZE3f^Z@wDY3H5iDb;jgnsJgFCu+lDTsd^A
zcRg!J0T<NE)4ZtkeOIbxRm!Qwt10bZGt-fjH?1pEgpolX=x&DKB|u7t{0MM!VTTlB
zKjb@6%Qi!NI7liX9udY{8<o`D3Hsc&+jEBB@ObhGDE3#==;w$0qP+(N%@W!qW3NcL
zjY1s7y6nZ0fLZ%HZ}qAu+(!JHGea_W^F9ExOQUzCs4EG`<-VFM`Oh4cbE3kda@OXL
z2%sV2sBuX=7$LmGF^%Nnh^5$zF}ooMUlEqa-z#SqvJCIHoR<Ny@Xvfe5@4nvs!>g)
zzcpUOpGluy9I5Q|70SzFnRf0~wL67T_<?0NEr@ehcfM+W!Gs1@hwvpFQ;qjOC+FlN
zv5O3PfoDIj#rC`z0}g0}pI6}H5BC^B`V)y#M_b{JGI<rxjNtakq-+n;B8Y)AE+OZ1
zmgh4Q2owu!!m`^sEEU4kLkwsRsnNK|H`u;>OBKrbrFi($Gg{78lCdQF(Og>f#3Gi{
zh=R>sN+tkN+Rez+IB1>7AJQv1#=fP0hyW89UN~RPL5QX>e`yk^tR!Y4EBTBf?S%G4
zQ)_q)F=!sRhYTZOuvE-^?3+8_x8|EBas?D}g*s?79ZZwEIH-|uFOt(BlHsHEdv`)F
z=m>E65|XKoP2n)xWF3wVAR@nlzlA1!(9)QVBYpQ2McK^yS`ES}+eM|_h&|u?J}6A}
zS*IZ)G(0Q1Nrd!_l4%$5LN4QDS^!&1@`NQJ;jTRjao79u>_z03!^#86u_Pn*uWsBa
zv&8hpGDHUEN`)q&tB-mtUpUQJK5|kN?{EUe09g<zv#Rf$5d}2|tTqL7B79zpAgenE
zT*t7+!<&5wpw`ICB6`NxwWll4I~>RTrJ9tmN7LA<GgO+NooFA?+87JEz=nw_cc2R{
z5%?)A#oWyREQKZzn`aFAL}_=k=d|<vD5TN;Y#z?!5+8zQWCL4ombB3y*(^5#2W@6U
zybnIt2sWk<HAXdI1eGq61<61cPwOjB>j+d5@v@xSY*U{Qa5Tm;8Bqv|Mbm1}zJi;Z
z{(7kXLRE$9*yy3`@^o;48%4P~cd7>FP3SQ2J<Nsq0`6-e7#w5-(y#F?$N1+Xfl(SC
ze-1tS<=zi#QBj3n5~+QxK#J(QOub?NP*!8KrxTIMT!=-%M9*34yB96l&st@E)V<@5
z7A<7`I@-Q&1!q8R+AACW{Z=JcNy&osvjq10fWK;yJU#5fa<x6D;HBDnXgnZl?ZQL)
zy>UI(meE>dW|*%A$N86e_vRA(ey>a5{Ac%MHBle!^}8c4IU!&r55j?O52za=DDYFM
zE*ZKz{_KO3J&TKMw41z*keb|GMG{`P9<kJ3b3OdhrH;fIR>!x>#`4kVr$PVrY6AfS
z^<S$pH=|vsL6Z6I#p@>41SSzGT3a{TCNv_xR*V-3fm;q6!xz6Fq}JC-Tq$<GQL@KI
z$3TC^MlE7ajG|$|4golihUxE7Oez>ERQ-gO_aRmEOb|d9La$e{T=)GR;ZN_H)OlyK
zgWR}gg%vpp3^ENzq}UW;L!7gfxOo&=+SQwzrR3w$ITJQkCilKSXcyI{XVn-Q8S_mW
z^q}qWIGh2D&OI<pj~JA=6*_tZ5#F4SfV0{QBE$LQb!31kG&x3$<zf^~6T}!&5%mEr
zu@ZC5#{dYjLVXlNH6)KIqQgx~DZ72dQ%=fqfJ7C-O!P0%QMGr&sM=T6!NXfV<hTX@
z)e0mj`*pZUvX1iXN_oXwvxLve=FoQ{?H@<KJ)wIHz$+E7yC<XQ0Pw*(#Z@!x;J`e7
zf0Eg{rdA2KFI+tMvRTU$uTk3Rj$nnh>K4$NHw_iNT{>fRRrc(D2hO(8Do4#PJxCE3
z<UI%$8SS+}LYpS4aP%G%1CvRWf%I5OquGHxk|DOSoNI_CZBE1n8!xx<31?2078ViY
z44(hsk$VK;&xI@xV>WSy0D(^flFLyJz@D(6oGH^6nyLr}sl-mQSuzZZ_US@;2%f}B
zwzNi4cQtZp-|{YB-OBf@cvqRieka?B=Hxpkw!2IH<_TQV9ze)AhlN+za@t&@(oY}m
zlqp{gis0JRo752sSanmDIBy#^Yr$Cc2%LXE)hR+=hZemFHSIe};XYB)yKAqCwTI$=
zS8ddo>T>CAQFQb^dQRfR%u3jR64oH$s+Na~K3ror%MB#H*V@!34N9E<RZY0w>2Fof
zOg6}W1En>)1e~ZiDb3YZMQK3b@Pskj-}O;QaK9ch#H&xu_?5a&F+%pqfarHxOA~&L
zqV?{E6_OaDqu;+L_DRPIxYdq%vzzNetcS%UJaQM4Txu?%3w<kju0`=i_HC23<i;By
ziYk9+e9}O}*Cw@hz7-|uCLZ0iZlC%+V=WI}fFML!0)izJkyKWY=BSjA{i1%!Vr4$a
zdjSJ<7^ikKxZ28z@D_WQ5*AQzvNziAc79DG+e{8PIavj9TNSggu46lTBC^1}o0Nv%
zzT>wUF{C*(gDq>RS;wY;!wj~3M}nU%0WKc%pNW_rW{=!&qeD{3my8zMb&{$H=ylG|
zgTQ7(Y<)UFOBAOkSu5mNool`2bHflxB;^S=yxGZ5-<0`NT^EKaR#4C4%p|4Uqqr1~
zZ{~tf_&9kd=uhQJAm+!1Zz4l{98&(*HlTtToIOo-j?@^Y`&0`VzzkrJfhOJ&0XuDw
zf_!ny1@g<4C)iigjULt`eXds_W*5c0_e0iwiCt=HFdjEpInvp)Ka?iNRr#%Tl3G%n
z0mnl<OmQP(GPl4vAe_!-t*sJ2moLC)tE3ttl){SPtcA*+FYdaYtnfTSOSn)|!C$v5
z1f^QY%|TLsf#9A%3Eg59B2@-Uzg&_Mh@^!2@b>WQ$?>svRfJu0;v`r?a#@Su9)z3g
zxQ~8YpBdV#h@!^m$)^&<q}eLyR|3|(_V4y_h7osxvxX~IALGYQnw5575Ruz6?X8XJ
z$V=wiv$sstJb#q-c&yNBiCuPCJcIq_%O-Ds!u5S~rNSO3^_mZ~;0%hgwLMquW!6FC
z<+56HpWXGzdX#oZbazJJXGY$;GV9xNtWI-|PGzwIzyAz!#|tWZ(m{O(xQYL7u~4&4
zjg#naMAD~pHZcfvBTY1Y^8gXuI)8ccWf^>NYz2$`S||&2?L6IbJ2S7tssuK$Kf(IK
z-Q>{?ZWF~|3-WVZ$Ty3@BIMa74U2mWITRL~9&Kn1=fqACk~?Ui$&!QqBr<lo^%pPB
zviBz~i!aS`ZDkS<Slb9ZGh)VcAK@5+D=2|}+u%vo1+4TVcrh|U2I;+~KQzWp?2dw0
z7B0YHPkUD`F%aQl&&k!)GmFljE=`@eRc+O*BeSbMIXpY%5}D)>hW!?V@2dp#iC6L_
z80dW4Q%t-iH)Z@}T?}#JOg~jy6`UY&CkJ{+a7TNZCm*#>?D1FtmBb6zX;e*|J67Bk
z4JwZ6G2{TUA(LJwe{h}ccybl;P6DrY2+YE&a%%Qq(8~rvMocJ)B88z+HTgHMz@w3Q
zCHe6ukE7j}4s7}`vsgBWHO}RSgM}u;^pq*ba~OwPUcm6tBd#>5(a*H5x^?}lvzmD^
zp?oY#d)p}$W=8-gw|60MA!Ms<YI~p$3tOMVuoO%44;06%T8p*5x@}!rQ|FETdBrFz
zUhRpQ)495#;|qhP7PoGI*hTF8GQ(_nGg9-cK1<s&IIeP>EN5Ik2$c9@muod)1;dfq
z&kv*8BnyGmj%8+9pr2(gt01eU)nv;7C~^v&JYq|II^MLYz9Rs;#ES`np~mo=NAR{x
zFP1+U`I@KCn3*N;^pz(D7TZkTJjy3S^YzOem$L_}7RFH=#q?er-RyttPt2uZCaMMI
zoAg1E#VuRG7CJdky$rIZF`L3$8P17VCbcZWd8|DPV+aNDBvVJR&Ot0g^ctSnBu;si
zMN2qQiq}HJSMBi`(KrJS7sjBJMI}!i3WDWJEGvHYVXH<JP4Q>)JnBtTwA9q9^W%UY
z(6;5s3r$T4HN=2c!--`gp$P-yQQ9-fHmM5z^x{t<%kftXfv_LwA31@b@XF4e0>R}7
zgyfS6rc=}=vC`z#ha}2Nol(&mvbVGH`8ygo3*qW3rNFS>yFb)u2|uGr4hpludG~w7
ztqNv36l|L@Ut`!lwHnRcJ`g8mT?wMkenOMp4St9#%L9bbJO_HNda_D(1q<7ny$8NF
zp@}^|QaKudIDEbnl?jKF+wpZ|-|Q;3olLbNc9$pmn4r$#C_pqgqhf;tPk&DUn$WPt
zIudPTQi;r-Bf{d${xZGJ217bv5~BFxwF1U|CqA5MzCW%W!QPep68E++PZZ@G!h0vL
z>$Q-1+`ZEmFle~idpGfvq3@wH7W-zOwpG?5=5VzX;^A<V)b8kShex1C;Mid^E_JSq
zJ(K-O<ve;=vV}V0i8$^A9^~NT$1v9?GkF>a-G1kDkszL<Ebp+vz>}1onMu&yX9vOs
z39=rhC4PE&pn8J`Hz5MB_9Jc}3zWn!F(5;HW}$<q>wfW?XeEliU1gN+zBzKhp%%s&
z0L@s>1T%|K1VLr}c5X5cKN9f=Aa?b4iPgdgtm+!b7~JTuB~9h4gu};Jv;y|jIDVE}
zN*XA%1qqpqKTf;+<aK;0)}y+hPz82wzJN2hvr7H6h<XQ*taq{2ueu$tN^B_e(FzU*
zBL>JZzJxgl=DL-)pXY8=N2kSpp7_uEAe$5FJMYca6dWIbwK^PvXVz&#jywEK-ybI@
z&;xjnxSMD8Z1Gvi7#H3v*~ieOCTWd>gF0X;%BLBJIz#@{d9YOFXC=PO$dltjB_~+I
zdKs=Uh7YG;aYs~jJF7TI_VmT+2Q~)t<aXqgLTh_6u^ALpuE0`r@^BW@6P0hspn2Q9
zRXKPouWxp2ucg{+|3Y=6!x-S|*h|qFz@Z$zo8wlYh(01k>woehlnCHiS0h2O+EGrx
z8k!y2cS*c?$f7MUI}^q)UstI1K%JJAtbP8C+ovF8<p4lcm4a18JlNgnTFet!3F2{4
zi0O7ROlI5ef6?t>(>2@?7`xAh$9K@^eBL2aUAT+mktofExt@uos-dQzlC{Lgzd0UK
z7d~J<2E+H(Fq%ibBXw|5LlQ(uw(0;Cj1{XmcXJBX;8Tmz^OS{}a{YTBgb#Pevlz=u
z3NDgg1xzE+`(kcXhk#g_d!>Z3)Y016_TgWQUXJg~WKuC}R&i_)b*Qa+sK~<@(p*+L
z8D;2L&u}dmP~47LDg%mgbG<OK?6C05?l&UI9gKNp^qc|zXq}#Ed@yx#JO&QtB=5dM
zc$vTcV<~OntA(c*NA@C9!7<5NU4C#fo(~`CRk<gBM8<2+v5zd<jywcucR4|KSwM86
zN_#9XIgFc)NepX2Jix<n=BHsPScsEup_df9TvGx3^ca3Z;^fhe+MdR%Oh>XE6G+JK
z>gT2OJHOej^s8E_J*C;?O`p>3y+*&=rJR7yo5|50_Se@Pl}_E#dVk~x95d4~JJQVc
zTN|h+dES9&B5z;S>Qs%67m&|wqg{;+!?Ll$mZPi|4>A=~^1v+G^Blv!C)z&75Enue
zf(xsJE=}Zh)yGiG&Dpj%t;WD{NqHf!mch(g<~JT(WD>P8o0UrFuq#7>W58i;8sk@E
z6Ax82!v!Ky1L*-@ROqslOCJwTqLw@NDMlYpI#jBhSxfnC^^EfDi`hrjZoSunBd9!s
zI<drU2v`@hXmsTbX$QyNMUc6(Wem?aYxiL$`Aq~nGiUA^O3Km7p1@Xzc3FmnRmHVk
zR*!qWsKMGUnc}o0`iTe9$Snfo3C#~h@|+c)BqR16Es=s2#rDb9>%Sth+}UPVVneqj
z2E6;TU%KE>slJB)vi*Ce_0z{fM(`U!k&Orhg#I5%=kH9bgYiF1Hc`prHu(Uw;N|&Z
z{!?-%R=rfJO(muwG&J$h3oTCU(Fk)A<9TNSA1k|_6<_-#)`PF9t8vChcef7hZeHGM
z{G)q+Sq_Tls9XQL5V4#4#|LxR&bjaM)6^T><RlC)U1hE*yIl;DtF7udM7{&%)^!DM
zt3Z*NMx*!~_@PyxkL(aP%#KR1pY%E`a1cnT%>0o`uB)+>0q5dD>}(8$K}E@^Dod$O
z7FK2Z3VW}=^xDnjpAB3hq!xn0{nk;G6|5v4v?Z;esi4(SD!8Z`;IUS{-nK-?prJbc
zY>_`^bKgun!8dXD`oV9~R!fxyqh&afs>9G2lc(^fj#k^xysdiW-qa>kU(4*FQdGBw
zT+RZbTs(A{1{y{|RDsBh(2&|pG~+&6AoronDD|)4Z4yw`abizKfiL#~JRCh)xJ49@
zEeZW~y_;FMtfc>3ivIIk%{jM5mYFhtE_h3P0@28*2);U#S=q<$=iJ8!Y=)*Gse-?M
z`p+tfM$TAC>qwU*^#s>Y2WZlp;y_a4&vc^c(jI2{-+zrZ#$Oz_5!f3^>=PD_bRj_%
z8kNtX2W=jveDS*1epnbnTu}n#_E=TKSQtkAmSb%?C+lsIE^?vEEn=RbRgB546MC59
zO<BdbhNjgexQZ-4(I|P#_?*8E+?inK@+Q)nfnjAZY+Z1qA6{QMSH-xf-eULNAm?FR
zPpmRtm@51CgO384BnH9(MGZy$z+8**Clyu<&Y2DlX4H+rJA;93idm*c+o3u^nk5@1
z?7!qK(*+=2yhG*<c`F)muN0gl$N5@T>pVsKdvt*!B1iayUQ?vkCmt7<9J|gd=H=-j
zth&PT*rlIRl7Tpw7L^v{vsiJcPnY`T-h-Htgf%M#GlK`3^VMzF<no0^X@#*aY5}Q(
z_x65Rzb+5zig&>+O1cx&VQw0Xcv3XhTid^0thIm|5VO~sd!8tWVR(pL#6q$8W5hR*
zJrMtfGcsnmzBSu=NQE}JDT6epb=)tfiJVP@JDZHKW@J(nA9du3G;_``4<A?H1wfcD
zLj;&9K{lB!L$d0rLV@ZnLw%p2LG>1(AFU&TNzT<v{^$}kpY{9v&!7vjhEnGdfq!-V
zhAt6(BRvrQ-$7?$>tL<#L~o$)q^+<n+5Zh#y6pSxXF;kFe1HY%dOl6yL)sGsHCIT7
zx`?0*ZF>9>`@{T3p#oe$@iniJ&8Mio)JLD*nmQ|F(~p_?D06ewaI6%%s7%3tQIic_
z9nxe(jFZcxP3wBvONEtol7%vtbeO5e&U9-*`=0(~o`zwvV+q}MYhw7%mR@AcfgK!F
zSy6qzV6ysLGzeF5U)jr3U3jBq=9YfxX`^-~YmIjVflT6b$blO!Sy@`ZCT?gRL^`nM
zIu$#segw)j37W!Np9gc<VIdnX%S@@LUBzGiDS!mpbW{hq75|$%7r&q$Phz(gSl8v-
z@%>>xa`i^e`r0+7a=A&)Btm0YtuCWVE1YzYHhkcIJQI~KsoxG8%vZU}{mR$lmgh0r
zi_q{<L>LC&g-CJV9}4zT!@{sln(5Xe(l~L-Hml5n-;7wR!3_}R<MhYBV2=HZH5Ciq
zS=toJw|AU}&!T54uk(Z?TA-KG@&MzK3h_zPw4$Q~|1uE}(1kUO`nOg7d;6yT_%D7h
za~ne|XCvePz^dGnTj?Z!x7h#Q|22dDtEG{Hi>|e!DZQC-6eH|6Zb#JgN;S~Mbw=da
zq88jbca}JGV@3-=SW9ZIJn&^hY-GOdy6p*B8m|#+6uMqC2)eEB2URL2IUg8ozjEss
zEixJNsn+DaS9^IGK$yIOkqaPcbCn&}1fep|B^}t>zK38qgL!fvT%z%<fmc48nBn+=
zLV{I}j}5<?{aep|4e%0q&lIM`{5(b5fHiRSU-+Z%f|u{jP~+c4{_*|YKa^(w62!q+
z-^kjS-pS4B|E?ya|LF5i|NndIe<7Uyb%%diTbo+_cMqulGQ*q=HIL=@NCAcf0>b}a
z{{NpI|1hrE**ZAUn^h(W*!ly|hNmADS=?p^XdPvmjaK~x=!}%0jKi8;V^4n}MH$o$
zK5vRK{u0s;Ndg7N_wl}Zc=(vV^hg^r2m^l3<lhNFW(O+C7+xwZ36`fHPLZ`o)T@<>
zhID_3dmbC$9Sk<kK|Kv;(P1d$)FvHP(=)8s;D78y4QZF%Bk$mftkPeIi>bqm!)Hsa
zjMr<Qu8^J(F|l@hfp+GM^w48`Q$V3W?nukrQKj8G$Ky_Em1kH+7i5;8j|1*{xq2lB
z2Bqq{LOfj|%3V9w=UF3ZVtxJlL)!}NiO!Cgf^_4C=L3x7XD^ExbFbkAj3B;w!mzT#
ztTSujrPZnBJVm3E>yY;8@$zTE%tsE-Ykk2VWlujh^Cm(%IyI_Di?%==_>%Yt{h76D
z6h5WvY+1hv$A>SRZ|((3uD;$+JwH~@g2%!j#-gZ4!y@n^>yTFAZbrk#=O0=Ma}<jl
zMcv;?tZyD9#Nyk}|KNGrBd+U>`z~}PKn4QB`d`fSkBJPe%<0X7)YkurRQxW|Mec<q
z4D=8f?+b@)wqFn3uv0iL6I5ZfZ%4OAiFZ;De7%a?E3i2=dNP(c%)FVvjiI3LR9$z*
zt!)%7Z60tkz;==`zd}!KVr;K*U^;SG-?;HAL*3&d*Ng7oBr;eVe~+7`^voIDPVDUV
zS@j&;HEX|IJWI;n(PwA<SzFnO55LU<fR!%ktZ7&d`0I>nkGHG%hj2YZ?>mL0s*cpN
zl!ee|WYe<H?QGBWzPVVcdcfLZVUW$7I}kt$2WwgV7V}5+xXni(t#5b5pU=_1v(@nQ
z|Do%hgL4U>Y~R?not)UVZQHhO+jeqd+s=t?+qQKxcWUl?cix+SySuuoYxno<UAx!%
zt;ISY=^kQaLx~!eEO~mZ5+>k*#ej}9mDz%4RqV$w{7h(M?uW9Tx=phTT8GD>f2FJq
z)0UTauzOk~`-8d%olQoFTYu`zemv>g@(PSL*h*S=|1Q*9tm>5;S?M)>ciMV$+5q-f
z53P%UWz_xDxLF?xKVWA#=o*LBvF{lqT)9t2&B-5ZV&7rlHsr68F|gxdZ=Ng{Voof0
z_7i-%5Py;C(h-q5Cson6-qF#SQ14%xoTN0Y%93U=!@m1KM+cQSfR*fl#%+k^i!lM1
zSR!ukY)T0d>;5@H)3v(b+tFyT%>A|RkR}(2aJ958-;+E>=CdE}v%nTEl{sU>avV4M
zj3-pWYr6N!{AOm>dp{Evs65Dz#h>ZaCH^_b8BK(^Zw=yMXTxrJ=F3H0Y4F22x({C$
zvHc*>&Vh}uXt5a+a#OM4noU~K>|Qz-1Q2*$E`Qd*UhH8~4UI3kBn(WeOe)!+U)7Kx
zAxYea>>3SZhhKXRFe#ST5vnTW>noCONTPI%FDGHU&d0Gb$wi12DzbcYN@>>S7RVna
z($wThosjZ-68HDihU9BFuP>ApnwVn{AtO@t5bfaLL#W@wU`Ds$088AhFSHK5aHKB#
zaM0TvROpd&RRO=5-j0hg(M<6&Hf`vS(~i;X8QPVEhR0l$L0gdtIygn?<Q1WwF<@R2
z`vVoJzjU(U(q2Jn83btKjJ5JP5s+pAjqq8Gq6AnHT8%Y_@g3MOKAy~@_GAY^8!24U
zF2!0nZIACz+wsk?VSOMAT4B*$Ny4QoYM#OiDM32`ePh^ZwoBztOE<vU?z>tl9P8Gj
zF_*WXJLY2^KQYwy+`PE#EUF{y&8s%NKN+~aazk?7FD+t<jIS*LWIg9L7*wxifIm$D
zuC@4JRa@y4{=?;pFh_CEZ5=bU5y^tpQva%-p`R<F1@&Yv+#!4V?|G>8WcA&lX&IYA
ztI}o?>Z7g~86QCf?v2!7jdfK;Sp-|6hIjiS1`kDL$N17<g3Gv=oQ%i+@z0pA!yk_h
zJ^*z;0C|)phb7NSoSQ^~s84DL%)>RWJHw~w=o4G=nYC}*m$Xivda?N^o&hoax{lRz
zse!s6d+rS=P42n9d*e+|jsr^NK$R)lk4{dia~pcthhdx99QPxfW45$!43BhQoq7Cy
z!v~ZDq({?KgEBm(J%0T$DFhntuyyz2g}A>`lg=Z0<y^hR-;Uy`Ipi8}U#iKy&3_L<
zX2J}GpvTvf$5(kNFm%Afbmrot^(XEaa)_J&%?1n75yFxK%h`Kj2Sea1<J%wx{u<DV
zWBVeF-oYLYLRvVYJ*#cJ_4DOeww(K;KW+aM?|BYhn5X*r_HLtxVVA?7w6ZB8ENE*-
zh-i+M<VMxQMJA`@fF#I!aH(1|r4*+d*T<}m59f?a2MJA^V-%J0ib0$IgLEY1yHg}{
zha|dLZm5%sj-aOuKDn(wnBgMO2+lNhBb_7Msvo&`bJdw}Hpslio@2SYuL|d2ES={>
zSmrbbO|D-G$+Ju+s4-FAkmm9W`L>{t;^KkUhOLfZ6X4MOclECDr?KO3Fa?<Qf{g~i
zSz1*jzH;Tq;qL!TJ^$Hm3(Cr2)PLKp#qVJHKmCuJ8ynG@>m=&@N2d*~UD2C6jiEJF
za5lwU0}ZgnKv1X1>f~f-k!7R_jhiyL>#bcX8$~_wc|X1diCjNZbgcgea2M@=vAPSC
z-y&F!0$VZB;iYdd*;kOnCWx9E<uU|ni^>Ja{S$icMs=-9vxt^hD&@Rg%C?qzg``i|
z=zS~U&lSYbGZfUraGvY+t0rl%j-%HJC}&Y>t_*#^Qv?q*j^rBtYV&#m6Rjoylf?J}
z;F1Ok<r<_F)Req{*08bfLZ;P==T6J`wXpFyBYV&xc)miuc#ZIrt7z#q51>Z;1)HB)
z%@Sro3634A@;t%xu1ys>Mp_s8D1n4{DAXKdPEyD1G@KTxp^0LwMJvv=O$6_f;qd)O
z{E#l3dx6T*fObz`Zl|66&=uXoyL7huZ}G+=zA3*emrX=7&O~*3r6)cU6YBa-GNYUj
z6D$4aQKqT)RM4bw0reI9zviX?EQi-W2m1csc1rnMlK%qF{lDj>|7fcgc1F(Dzrygi
z38>!zQ)DHl7UOaNoanVLW<s@Y7Z@v#AUIK)A-{Z4nR>rB3yrF9`a0JO-Ao(RN`k?$
z1@}RJkEYanpubt%;=NFRWT?eI2~^9G4`%sb){rJPgQQBw_MeI85gDJ-i9B#x)33vL
zs3?Pe)E0$g07s)H#4XVs+>8Y$UxHXbGPbXsOwv%wf{!sFrqUNaB{$JmO~RD-S0r@L
z5GdK6{9yV$k$H8ji+FePyt_I@X1!?k#Ok{7x>FCige>=eKe<A8y07E!T0#}_e|{1F
z_h6l)suh>SkK&uEC-5keG%Qb-C4+@eF0DnB7FvL=GX-L>O4`!ISl4O!-0L;%&u?+p
z&&z+pwc~xlY8H4;B-4=ohtkMdDqZNrx<CCMD4+R&2W4zj#CmVeKclCajJHptiOQ&k
z7d<X}=#7WU7w_$&^JE-4cgUiU*;7%5E!{Sgw=tqpH)kEKq#tG^xOJxNDWeSCS1;L&
z=xYdmOiJO<M7iwrkv3MnDQ+*P_nhh%(_IQtF22;==2J}0P4!|fC~jKzt(qGEkLhr0
z0qJLLwHPGrw-u&(EoOHRn_L=Em=BZs=tz00?^6B!#Acyd__-|LNbBx9V^`B7n_Suh
zhK$R`2>L#1hOHSalGJBCXrBK8$O{ZJOTsikTJkQigvVmzoM6(f+E>C!n#h8uuOQ@>
zA+emQaJLO}Yf2!7-aOF?T(snPeL+^vevo=ltG(iQ!XlN|k^6Te@yV8dZ0<}<A{}tA
zTnTA-rgL7inwn!#1~CkxK#?E5u#;X5w5r4;akI-O$mjHm(=Lcu(NQlyQ;0@%86neb
z^j5Wr@~n(uk0BJL%SB}{m#_|p{PE99nIaxV4Cei&<gI2BpLC1s<hazyz5$|G_g{tf
zTbHzRQGE~EM(Iw=09HKM#1g7^iz1lJh#MjDn2DB0jIl99UYx%|-i(&XAl3&4ej_<^
z4H09}eXU#wIW!q^&W+$U-c$rz1)Gcug>9HDJ8m?oTBt2SZ}-Z`Sr`K7ONZU8e{^P_
zkE5;Aa(4jhG3?EVkdbFOQ;P*KS;}#$;9wqv(t^|Liovnp?@@wNxJ3vU9ysfO8~2pX
z5aus--_bhk%g?!IxJ*V|1CBLYX&{tdT)>NE?dtX_k%jdQht({X#wD5o;GZfc=Y>j!
z4G`D4J87<G%Zm;_!8h)#73l^~y}G>>-4Z=KzjXYDo|d?pM+d`89ZbGl!~M3+*%Q(X
zjrJX(h&UQP0|Y?MZ#Z8`5WC6(#a>e0g>nb_hgPZZyQhAVPYht-9jbVm3L#V-PYxTs
zii(?*qDHzn%MLhX&&%R0A36fAt|#1f%6UFpl4xbw%3lNZXWE0#@Z+l7tq)7^q@5@y
zl>vxSH0SwT^f6OA1Fr_dLmaYOkGfPHd=`GT`!eBfa2y+FY&B>j$&_<h99mR2KVQ5~
z;Q#L*YNDZ;Uog%u%`+YV0PBC4{Qq-_xRj$2_rUzyGBWfOAR!s36%$Kr_^Bnci(rk3
z9k`evC=Ah&viJ#TU+-|}B5?6HC?IId7VXYHUfpBdxZrDjU#8;pe7>e^Glx$Mu_7PX
zbxO|mR9>$-@q1bQbbZZwmsLLQ)Z@z~@=6n=i#l7J(K`~9iRRX&HA?k)ZfZ29ys@C6
z{Cpj&Ly2}k{Xhx|P2_j8j+HW0Sx$FGXO<-U@txh*aMhUwj<IM&I|98F1pve}VU%=Z
zD4@ZrGYdQCw=VV$1}pXaelAwrxWs>>W8dy7Z+bMqvu6uT?+o%%=;wdA*A%3qQ_9>*
zgrQ@=su#}(78s-kt-s<B06gx90-QPs-!3m6_kZW@=jvSo$q@$(`aj;!x8oP&V16~(
zonI;nF=@fWXJh69WZd)1&N=T$UE`o53nQ`3fA860vekJvOFE=7ctj2Sq!?%4pUyf*
zwl4Io@J0Hsr?xIW!`NK!4u}Kku)H4!i@0wv(zJMTaywf9coOyoc>D82Dv?*eg#$7Y
z2{NB&%K8lI@GES~PA39GL+6+<jnCi)cl8k^7FLjLIx5L!=H(&v@Ug)7J}KvB<L=K(
zrcR=p2Q2uuA_<YCt&QBb*^HjJX&_JkHCSn&4YDV6h=cnZ;d=u&>gY=EDHOY&lD;Gm
z5z~1@qqe^!a38g<A~w7q`>Z-hew{(A8~|5XKkTa_R+n%W`x+Mtt;bRnsI18H9NI7C
zYOt$Q|9WC4v?xrtBeaJlR^|&THL!Y<R;#&HIJ*6N=z!trL#igdAde*uN!7uIb%_k>
z9Yk8`+&?ru4=Ff70HMp{0D~D4;v_&C#U?@5&CNy5q-HiQBb&>ss!(+s+LxOsYiaiG
zHpXe(y^r5_{Ug!ps@`LTfqB!O>@G1{L1$Lsdu0ijl4P$Dq~-HIhd*JrhRNuM=BF=m
z^x<TW-O~rU%kE=kAvEL85FpQS_53kz+mXXdg8}v0X|hXvCgW>YBy^O6O{nc+WDJr8
z4IyGs68liXQKZq!Q3m=@un4=1pqw$HG%fEsEH5erp{(pGv^cwHwT^ak{9DHxWJEU?
zmH$F0Sw^radq)*K;#!M~xW|<6CRfc)29-=fP4SQ)F&yK|(fn`*ZF~8lU*WsqLsdpx
z?oZ~VrU2iqbx+LEuSYc<-fnb~r^-e@>tA}EYB)2hJp4oP5YdHbj&`8P27BlBaipmk
z+RX8XFB$Lf%3Oaed4i>1?86|r$Ti_xm|pAx5Jy%|6C&vshASZuex$&CCpGgtT%<Vj
z03Gqdk*)ZPE?Nc$Lt96AW$#GjDIt_y5uTHTR7^vp5m=*$j!519*fa<tRi|=pAV`WS
zD<T|K#H2Vc-E(Y5s`tQ_SliC^mN|83#hO&R_MZn2#CdUQQem4tu&*its42cqD9=NI
zOZEwVEP^i)O-SFcGlW=^vUfpPvWRq>LE}L#5&%pGP?0q<%!hVO<J{&v+_bSf3@V7<
z3dO`v7#2Pws)g54Dr6fFd{Si(nr}J-8ZyVmw#qnugPmk!$_(L)MU^7rIEOpp+Mwf?
zUBpVXG}YQqYYSUkh-h+hS7Cx8i9&Zf|H(CJMc`u5sX#Vb(?ZYmkl}nh_k*Kd#I@;|
zSPVc@K<8OkWXq4{n*&ESNl+<cQ*RTR*|PrIvu{9zi|xiAAjPm(J#ixJX!F9`w{f4I
zY9Jx~Sk*Zc$?1_^QJDF#(d60w^H&@$?{*CQsPhwJp=!ps>t6PuKYBtQNz(~*=4)1f
z(lqbuO;x@id)e03rLtYq3Kp{*X{v|ot4P~)1kND2%4|g1ReKdER|%f0rztO)Yx-)1
zmz7xsPgIv!4VK1;&XbCT6}Ia#ei=$9j6mR2M9c_&{gD_2+`KzE$OyrjvrYLV>UiKn
zgb&X8u;!lNX{2#>zG(FGk{u4;%6>nDuR!?lYHW04519gkw3V_W67uK{q4uxxMBa)?
zi>~h<__D5sNZ+*dkjNh>P<Nd`oZcNtQVdMq$=<({icr3k1>@ukR#RCk#p}cHr&xm)
zMQ8m9GR+v>1G=qsKbbESu@}i&g!Bged@Yt174NyxLzc1VLjIFqN#TTTNLl9s)#MP<
zK+Q+5O@)TwELG%<1#}*iu^P26)AX~C+`5d0XuB2uqfo})4MM?-P>-+_X-$nt9(q_5
zVH3b8IWAVgh>EJ_tC5J?V$t5S)rUF4bn~Z`*e!-|davjy-G5!yjDRMQ&^cD6wA7zA
zx?i?wIeB^WkHrrWga5Q}Z<(kxvh)*Oag~8&d%BA!Zf%ne>hY+%mAmA;bOve3O7wDd
z*d%RP;FdmWfDh{gZbsf#Ji1|~RpQH5+;nKh?zw7+=!HEt3ps(a@=L%RaQa*yD>LS8
z&L*y4lwUxlaTY_rh(=G^21dW&{9&PY3ZOjvtf%14hUx-F`u*}ACfEG4P;Ke7WV4AZ
zs!_033sQbIl~CG%>|u2;w+dmlmGF;-4_im?-he0f-y(~iLPAc(yB0OQ#Y6-y3$>%Z
z%RfRD{K8nk3!^I0?QnfR6sfp%Lw~Yl3}C0pe^#b;Mg-)c<*Nah%Wx0}8KuM#xPV$8
z;wOrK+@f7*8=c4*!lRf9<F%Z?@WKnHpY~vkc*yDas`$s-x4i5FMBVbm%%zj>US-i8
zq2BN`r^$ALX!pXe`;FW`LGX5h@X@*3;puYC&<mP}$C((q*0fFZe+@9i)CktTtCT_7
zQ4nfIRNMJHyoSCS@uQ@4I9;&c6lQVchiN~ohj%+L9^Q7N7lmJTLb;z)@qb=IaTGsm
zSjzpd?r-f<Q*Wo|8R38tJN>^K<A0{+k+yX-PyKR^{TiEsUs6*~KKW!HPVGXuJD3jX
zg}-WrWWP;E$9xF!@AovOQAA0x3MV{xcr`3V*=K458B5u7Kdh^GRTjj(xKmmk`vm24
zNwGll_XX>vWAtq@;&$(%@@FhP;`@_o(1}4==7&(=-GA=5zrA^2@NzI7#n$wgum}}F
zY)HC(GE3=+K{`M5tuk^AsMl^FrY+r8${#}{%wDL)93p9exsN(wvGT?}yKc{X*qoE3
zgj@|}fll?4w~_|{4u}@0>I=T)lR-(-%0n^;$&oG1%{|sv^$K?|HI-RSk9otmX}pCe
z|DF;fiIU(6(yAnYZ#07Xu{j%WxMC3V;mNy}5VMJaF%H}ZGf0HA`!2?>Ml!ScywNp$
zqz!Vqz=gQ+K=f;S^47ZT5=kCoZ_G}NS7O|w@uD09m}W`r0xR7!Ad;@fq!dw=lonLw
z#u&ydkXC9^UO?lgPH2+eEBXu*uvD}t8FwRCl}GK>J|V0qQcN__n{hbAaOI5nzBgfX
zs4xs*tm5RZh2{iwbeWO{PhW{=V>Lc7&oq%H{H+d)dySeN!R|W6r{Z1Pce&UzH7}F3
zTRUdSI3`%PpB>Z``2L4<da^c_%Vj{#9*~DiOPdM3?;<comXR*Qczg!hqEqP{xx?|Q
z&8%ECNn(IPpj5odEe>^XH%dXJrzi<iepWDszRnfVU8jL2f<v%liMHPCG;KC>xi2Fl
z(h0y^YCr+MAVN}V`AL6Ygm8)9UAk3L<GP`{Tk`B8xAKs7<NN9Q{&t~PlIQn+_R)HM
zTKWAw^|+<xXPrm|K8bvM-m`uC?$AEiPgPrMcsGg%hd3!lK9Tto3x4UBBbesL5fXip
zBd6a0G?q(UwPi{(u+H#p4D}J6B2Rt&SBoN7>@u2)K^1AdC(mEJ4nyhHk%0kErYueW
zLXn=YuRsRYjHRJ`u4<Uog|9L~CI;OPILlj|yJdW2$gI*^v#Vvi@qLO7UbLC3^%bS{
z)fNeirLId}_d;UvWVjF^>+D(t8)4`JK!TY}iVHcUer`JbwK<MFfmm(<Z7PKAjtelA
zH$di4F*-q9h^QIF3UO||e-@uNGr`q7xSFIcLO7BfL26PwFo+I0{t(Rqk^p@2!Lp?*
z9Sl+QE}}|UQZ)!%x=6^hs%@hI?Y;SFvrI&lz$;VC^Jdj~SxzU8fk83um<XGesg9hk
zd#%&?PCFY<9<gp{8Emd=C5fg<vx5akvD~h;=tv&p^8mrbW2fv0hzm%kseUd@9ZeDs
zi74%Xvf!@K(3l-88fJuZ+CF+0W@KiK`SxlQ>!eTeR3h=|pM(bIT7?d0NIm>Mm`$TU
z?Tk(g^QDtL_&(wgE~=}P3getEU66u*ffrPI9W%3e0{TYA3$(B-!5_>C7o5au#wEMt
za^FtrCu|caLytYN@F9j(rD)-C@YAkkm!rQpIr|yf#@20vt9*G<aNoz;j=Q;1Eo6zj
zvI+QHU?I?U<9*rePLhlaDJLRC+hgu^f6CIMjDNxa?zd>b)#e|PqB%XB6TD5;{>a7Y
z?t~R&jk$fY=AmF-cLVAaT(c*9YtqaZL?cIR=ZHUELs%SACKte}IN*Oelx`pfE@%es
zFCg8@8>Qq3Bw0qQxI%E#6zA%9m#<fFL-PnPgCDu!>8rfuWGT&D_?N1BkThnhFGVRw
z$jF317z`W2KbZ3giX|c&4l#t5=6t=rn(y(^>hgP8v3=c|*SZvuDg@QlPg~nCz{)0E
zxd54_GGh>={?{#1u+umY!6>PB)`I3+IrFUVUr43UK(odS=@$0&F1%O;!7UvVCP=Qp
zRyEgQ<<8<Sc>F$sh=b@4=`<0#%+*q}2o2b|roaklL;GPsefwT}TaXMhxL!PK+qSdW
zN8)II(lEW?J>@RJQ9t_8KR6yFO7L5<UWRF;eA@&8H@p4}$c$UE*dtL=79dOR>AMtr
zX2{mq8QeQ{RmpXNWVL0>xEKt&YmJ~!6CQ*uB{2V!nWl-&k++(n^zklY#OSQcojR$|
zZG%rLiI(|@9k>cD%0MPdEz&Vt_WjYfu_|eTy(V`C%AUxEhG2Uz2Ror|0H#M|F6p^c
zSj=XD%Y!fpRel>j8>+uLk#%%}T76>Zwnzy~${@A_U9HFUwMvWZNz+2=+a8tmF`@Nu
z6KKwkQ(C}4Xsb-7>g^zXJi8T7*egEy^UHbOezv-%*Mk}`GuHvo3i-2%y@L$*{+%nT
z2AdVVSo5C1tM>M58Y?T8Ar<spQm&{kzt_z??S&XP?M?rzb00#xtXnPh;H58}E-1Ih
zw)0?mPPrwEw%MjGfIudxB!>*NerNe3kBC?`aH<gQ^}vmQgPQkI5F!ajs!Kp1$W11W
zlOi6UuI94|;S#r`P|om->IySs82!(xC!Aee#V>guvNIc1iYQq~VYJWiZ`~&?4+%SS
zl@jrykpGlr1t~X$HCUo$;F2Xyu{fSJ<m-n53SxrWzjV|Q3Z{|)ZF8v>Z!`y#wRSf`
zQTEWwszekA5V|w>1fu68=Jv(#Uq+A#*wBMwxhA5oGepCQG~ERIn!$zU%7P{2Iugl}
z#hb7oT*=J8fOn&+X3v7WkSiZ7oVUGGd*%%#m_djfS@#1Zvb96j^6^ON!qM`UxmbA6
z2`3lq448W!X6j^~V!FQ-G%9YBNfTW$E-EbsW2WKt*era}R&f+&2A0Za{c-4grfNr~
z-v%WwN1B6jQD<9d(u)o^69u7SBWV-}xX*1D90op;YuqT+>&ZGCt-@RHqqf{yX~{ff
zsL~vK0u&Q2i_bW6w=A9hvT&JdzA})q?vrpL3tLdYaoX%$jjUbtVxQjYqWXuJtR57h
z<NWwuA)bj?aoC|6x<?ywSn)30dF)Ca7o-K&l?jPy!7=SnJnOSZByrJEDTrd1w8}IA
z>h<l!<bQW>Yjh_6!4=7lgh=4P7-W%&k?&7<aB!97<sClbWVbPgk!DgKfn$fjJ9m(K
z+H4f2ikRA3Uy9s{s)flInT=f&PIGw?YJQw(A6MlWM1px<5y9`%v(UzA8#kj9aC@e$
zN8zRBc-C+GK|nQey>B{EP{ON^i_<i(7U~qGre~Ta5y)Wj5W%xliY<(E%<$@7;vr{M
zP$NfGS?9%%P#ye1S!;emj;u4tP46@{@Tj*8NheVcx-cm=5ADRi*A6|$9i-H0pyenI
zps1gfq-ilnFDDIU<?T<-n{5EFK+VB5Em1htL7_+~v`DG$B<~!)E_0>LcBoTVQRI~p
zZSY}a4O*{he6b?|Rfk=O)qO^Wm>dZ0_chYG{<8)^)zT643I~q9B@l?4PB`97Fq9>5
zPBCYgg~1L5uS3+=II@bCv-%%%gsv}+ofDx9P;#f^Uxl|=!Y4|w55}jwjj@_HehE;O
z{blRET%5UEXsz?>hGc8aKpMIi^}rZKt<*?ERub)hS+pJZyuEQ+si!^^Qu>=+HfOoX
zr1ZqQ6W67c=A-#n%)WGHgbcw7pLm&#;v>prlxcH1{OQLdy@n`wZ6w9?cJqV59MaH+
z!aP1*nKfdbP^OP?<dOicd_6rk(7-aD%L5Tl`x@{Bt~DBRb=inCuj;bk)j!csZ?|2K
zxWHw^(Ss|rbI^0!84lYV$TJ~g5<}!6s|dQ9yb_dK<?0k3@(qX-Ey>G|jc8g;QH#+b
zU+5Tktn)fJ>P@1S^&&~EF&b<L^Pj{P4+!eN=M9xAHHk=0YdI9b*WEdLJq9912XNP`
zMw86OBat(z)t%746F02Purbtg>qiS4KR$*aIj%v$R?j5bJi5{X>WGA20ULB~3=oLv
z1Ot|cxs(QBp>0I|u}eiq$R~s-pB$yaAJuV+r?j_p=GYQ;VvF)N<4JC!j&3&-?O*+d
zNO2o<Osw>uTUm>w)M`1zTtj9CB6$m)Ft@}P9>-OCAR}?p``FrRa5@5(F!(Q%vUX(K
zNx;O&y6KH~dMy2BYEH%wl|)q$RaoI<cVSD}n((09_t@Sf+}*g=UJ15UXdUuFy58LT
z37D-L#&`9+(%x{l>Y76ER!U(;#YDlQW(60tg6E<8-vu4k&!LA1c$ZIxOCh3Di82n%
z1wOozbkrm$)bC_!ktV5gXK&ZB=~VAJ0@v6T8xG2_D%l@~V7qc_^^e35z*|2n3=YzC
z&FyyM(~AF%!QW=kSI*2b>>uow?*6Nq?O^!eyUrjQarDlUHS=_Uf~>=<wQo<_e@5Ut
zawPj18##xg{%C%?^A|h{_}Ghc<{**Z8wW>wEd`qmFpc(c{ld6ES`+DzjH{qPrc08c
zA)+0{sB%p+yrm<dHnGtt)u%egVHij60HM;q9^24w&uW#vRPHTDyW=I)LES!YMYwKi
z1l&so{s5#Y3Py-RAW9jdBSo!|Ev`T}3<;e1v}B-f{qR=tBLdSIrar&2@y%B1m~`0I
z$^0U04R7-8s{?A2*pL?WlknPEnIfv55`98H5D6Vxv;0jm7Nk<{wt@;`DjRANpoZRE
zotgOgIRdpxGy4KQCuQi|7U|BR(8fWlcMWi*cA~p*ARV+#g1`Z=oF1^=4N+(Xg5;hc
zFkI)Kcdx}hNE`mSBj~2_X2sG_EC#ves*h51s;kH$L5<%^kG5mF+=b}4XE-;tSiW>1
z&Fzx>VO<bg`<~cgKAjKTzE~Fg*3=n6Kb&MOupuN8cLnH?7$}ff%30koZJ$H>0rt~R
zN&Bc_Z1)Ji>|9-|6oZ_cIpH}Za=nAv5G-+oL9oY2xnNLqgkOtRLLJL$fX*RJI{hs@
z`dHPVkbXnP&Rn5)pbhEsJd4Q^<Lhv-1?c5f@rYT$CIErI3n1pdK}J3r_M+JixhA7}
zj~rZtSBDqYXvr#NNYwD*ZpiF*N~Zo|(a*&?*Puj>Q#bw+3e}y3s$3VRh_Ey3xE0IH
zS=sVbBfEvSN=daRc2nVx{i;De+Q{6LXcc|=g9N&uc@h$FZB8aDVq0k%uSj^$`U^&m
zR<Rr)!A@PtdJkZhZcsreVCo2m!TX(RHuLRZ(g_7OihrqA9T_!OKJdusifNQXIr6mx
zn1nF(RIO&@4FW-Ug+9B&%He`sMS>*QhGw68l~=TATlC-)dUO>X)=rQ^8q=EqjCo!I
zl%EEVpV~Mdt>I>;gX)o|J_{~7>nu{3F*4Y~5~7$UQiGZ1)V?J%N8~6Ex2jM#BwY{`
z4dV0LmZ}=gdMRcfDO_vV35nOTJzg5*$6%x1FrDoWh?$6h*^^SiVu_A`x_1Up?I_VF
z$BIJTlRIBPdu%uYBC}9OxPy7g;l;Z=!V}`34BRWT`A}|v?F&S1X)T)>>Bu97TT&@v
zfF~8Da5|c18EH<{cpmxdFD0m`N-bh}DPkCx^Tj{sWFL={>tM?dcY&WNs&6E&#Q$`Y
z?cM_6CLJFV@<NAx;5;ehx*%xaxu>Dh(QOHSafid>aK(QEk(_=eN95%(DSuq9nm+|h
z@Qm@!wmbndZFX+-Nu1FPz1{gmd@a3l?T*QpAg9eJ5&E_TC|g*{MT1pLZ56L<2E_e5
zG(x1dt<c%WOfHnd5z7!GzALCX)(%0;l-RWg{@e>5Rz(fY&O1RGg7<&$aZ~g9o9Qn_
zu4$pO&#PQM_k)$Ok{oc;=B{$3n1g3+ciml_Yl92@N$-B7=&A<W-a*1&T{s2F>e|<u
z=}5(F(Tf6<9=SNM&>tjS=F^3OPgOZ%96`Fph!XANhKCUp4K|EbJU2v(UUwrHQKyV3
z51x~d{FhufC?Kw&FBwLktQZY5fJazvFaxJR(%fu^87@e_b6o^-5aTW>zAVTVuSp3)
zfnJc#rllS6xuwoYd$N=CbhGu@$&mgj{%uS9;VMKEvWh~hNRO8AA@CjBP@Fb_ru}S7
zmC)%c*pg^c1VY-ZY*?gSWEdJgz7vBm*Y4ckZz*I@EG1b_SU*o%{4ZYpEHUvcg;Gbu
z*mGDaZ7~{dDiKaJVKSLtUQVUh(Ui6^e><#WnZ8AQHu$*gBk-e>9wfJ5wgNs1PveP5
zFZIv}o1pd)a)ygx_?7`M?78iOKu9u@v`8HfjiX`gixH)v!kEO-!hMEHZ8I)xZVG8(
zmIz)lmfCT!$DKWhVhyJaP$l3mT-+R?CLpy3X|kJ~W`Esk0;Z!4pDo8ZKpzR$Jk!X!
z%$fTBzv+wtqPRTHG?12la4ITbjTB#O7Vapi*cCG=`j@%pG0w+A)XP8Ij*)@VO-fyM
zS{NV44y!P<El%ELL<#=_nC%<UHtO?5docEIM#1Gxi=zzTr8E`<cyDQ|%sxW2_DnF_
z$|Z*q!Y1?3>K^!U;<?^uGjoK3@S^H6U#pM3-<^Fxneg<+Uy8~bf+M<C{6~Rc<h#);
zk%ALY6WXFNpyBGHvfxz?LV=GDYCN6mc(;$&)qDOe4xfhbube!Qr#okiIJnnwm8?r~
zLI1Qwcn`h12}XO=4%Va7mQTOC$t-@P%hu`G%EKWo09vy4fj^0|IKcy<r`-j5I8Vq-
zpTr%G0P8w+bJz#%XDb>5SHKNh(~0iN+A1b<R#s0(ANDjU&4-fK!`a_ucSRZ7P*Qi_
z;#2^a3|~XZ3|T7UqCp&ly)VmOHx0=G(yE)bHiybyckM9=NEzc&kP)@7V+Zj2+FR;U
zuYf1MzGdX=8t*M|QrU_>iP1B{39l+Jkw1q?R`nWpuTdJ>ljoDR)kEBp%|MQ$e=um-
zig$<CQnS{#BNPmwxVnpOT!cPn8fDVxs1S2yfRzT_$`XvX!pjv3K`K=l=qK5oQTthN
zmR2=-)QI;f?cQ>D?*!NZ?IZ!ig1)nO_Xggi-F^X0`hFd#?f5+0{<k#3rq0ok+F!|T
zk}&|le;2&|hqU59n0vJ5&DkDy2f`?QKP7oYNI%vbBpVJ$=<&1v@cxwaSaMnLixcOA
zre<9^MjSLg9N#jO>W~m!u|9otv)OHLhIu_5Ptf_fyMH>0h<o)HeY6c9ijOX3-CiES
z$DJSSad>%3h-;I8s6CB;Y(4NiGZ4v0MT`nI;hu{p$_M*-8wpaaoGetb>6XcU6~Eg1
zxIDeb<>6Vl`n~NP#qm|;`udUN@q9|^{>b9t<@Kn4c{~j-T&VSW-(~uG9W=qJ<^9|r
zrNz|(c)CCF`ZU3*>Usb0mto#-9l(Dz;`e&J4W{uWmagP=e;+~OeRq34y{-*@WVWlx
z_3}P)@_l~4pWT_=KfbbaXP*8%zkR6rRl+6eZMT~2rLaKr`Fv!4(E0wn2b_#t9zR}S
zQ$s$de7xMQUCfkaPz)J=r;-W<-G1MOmoW1N{_F82s?8<vy8PX=k-+zUy+)SQ`~gX)
z$w}i&NP|xj?s0!V@=5^skk$PrQG9(j$pezRX%!o7w04By^zeQ<7mJ6klyA}N{`P!*
zTDjr<2*`RnD$VPQ))lM`MM}J|C0?=HA;0_;U7jyHh)%ajSA-hF7|M*wE$}zQ?=q#7
zJ^?D!_dfli@N32d@|(F*FUwI2#TN))=N$^@pdGRnwGrGK@+CzTNW2n29X5|hpjg?`
za_IrnWKJ!hOzjWmK5ge^tdkA`KWliM>hfsx_2O&&I!$|T_{cyb_3FTHs@fhkr=4Sw
z-A5a+$@}H9XsPT6g%HOJPWL@2lN-_1U~R1y^~ts<P4AAPHOg(eauk8sqcTL+!7Bq*
zA&5leAp{P~1JxmvVM#!GN}-@=+kTN%eHZA5M^ERVpz?>++q(5TJDMRYnWc>02Z7L_
zqO$C%d)**b83}vBfhos%mkP2bOhPNr!`^l4p(~YZ@Z??f?+A)!L9|jjBsX7po+~S&
z-#o*A0#BfB1U>(*Hy^*<{XB7dmz?Y=&Nyj*_vVVtJaTKEw}-#}9Jl{WuJ6;;&}{u&
zy)hYG)5W}nxGtc4^m4=Jo_W+QKCpiG^2Ftxd33El82c3p4PZG|Di@KM{uPf0%T<&I
zrM<>hX5#q~&6;6$-Y1F<RcmOn_&$+;o4K6y7BeH8xitjQDAo9T`LoyYGO75Sn(8R{
zHSJ;GX=$+h!TJFLKX}TS3f3TBFCtSMh6>#5$&lJ9Zn9Wmsm-XcA0i4a?ln1Yd*l^*
zI9gCW_NT3UIK@JLAfJ{ET@{$?0t;>QS^0J>11k!pkc1?`5P#$jK`<(wL?h~%LmKi3
zB?=C9ZVP1kl$9J`p5$*rC|;@gDSuRl2<%v8LKx%)wcjLJm#Lm&*+%HKGz_<>BdyAJ
zwya=am&a>%${6(h6c?2~^^`c=8e11l-_1&_eW_2#R$rBtEHmAo25~sKP1nEjXWx;>
zu}_)os?;r~w79^B?bsRrIx1nRo=WUbgBNVaZF8t|8?gDpH?6tWW47S@w6}}7g{_Le
zwp@r@xk$@!;VlorEg5pe@Rjsr)Qnz;aoYCwvsr*~+M_CE7rM}v3~tT}|00AqBH^BO
zV(TqX%BS8`IyfL-w}TUs$l+V!p?F~=du{4Lhqc1trFtK%Tb#^f%tTV)wcR?c?@CPd
zPxF^<jf(c%(4yd=6L)nw^t(aoWC@w!QnGePdGX!RI+m&*8MP?6v9QZ<HZN|$L32W5
z?OjRhSWk0M{%PY2Lq~SM3cR^_hJ}gX!7KXE=)gW5bz7faF%WAooR76X#QUP9h%Rcg
z;GUx-TGP#S5nFZTxaSqTc0USc6{k^+3Gr!$j6J?}no`>Gl&niCKo#?)HUpQV1ljK7
z(!v~;eaULEvHg>(qQ2Ht%7qWy&2pt={^>9t#>B066z9!6B3J#<xb3XuInAmk6bsd#
zE$B!mX4Wmd$F35-1tqaE)9proP9+ry1Uru)a!z0`yU|fGUq1qE8cHaBF}wXxzzD&;
z6t_R`JHL4su((iEx07)kww*Rg%?sr~-8$(rAO7YO-Q-m4Ig@q8p1x(B?43R+$hjW!
zVXCn<f4xP`MPh~^YHE<M82pCij;5cNf=et2?Qs3RH2yrU4T3WEG8tY=M_ir~dQ-7e
zM{v5JF_*fZv7WvJj`q}z<>I1&K0|%c;j4jeB$siW3SP$P-HiXIr<8+#-Z~DO9Z+J9
z;zrd9)1L08!*^?Ws%E33<e2MFpllkulXkE0c9<yw!t%L}9osq#3#7#3oB8Wl8C-|j
zm5I4q07I<9bSvae9v}d}11i7s@NKK8*eZ!^%ayCX+vQ+33hb`v>+QjvtAB;bok!h_
z0p|G5H89L+db61wy%_sE_q{$|1WZGZYM)BQ9X;)ngsjj;4BZucduhvjz(<=EvK4y>
zY^vleq(g`uZJV*NBR&}w@WV&uiO1-?UYvj#%D@)@7pIUX-&_)nI)oK_@J_>vYF?|<
z!qTMC@$^X${U1!GC$rUlD(csH5d5XAc9u4|u=eA9a<CEw4-bwPuSx5DpX?B)Yd$nI
zyT<Y`$J#A#Hn0_NU5dXvxGNIE89xc2V@gFF{@<bupn_5$$a4|`Wk8Hs5`x44o=7K(
z2%Dm8Ycq&DU_;tCXAh#Wl$PIT<ZModY(8=VL)8#ZZoE5$1G5~f?@nxHKM-pY#F`k&
zZI&;%HLxT|K`xz2h*{W%05l2khJ;cP&wh~mwd)o0Sf-PX5etPxM`4+YinyGY$gqX@
z(JsF9_#u_?1r2D!rZoi_RUwM9Y(|&^IWWs&Dh&dHZ(IWTBy6Idt$rt$V0c!ukA%DC
z7C`d`tx@*>+mStnl6AzDevZjGWW^b4PO#3Z;^cy(m2Ur;NKEaB!fO2{lx5<*?bFsv
zJDyb!VP+IBh=sJMl1lpOKTzCZlJp^jb)le4KoBJ}(1o_JPD~ZVayQ))p#9@LiM{EC
z<C$6Jrgk+ePm?QV$rcyP?~1ep;S@06|CbDLM(E9BIr2mj$|i4sOci9Ro4#4)nZ+sj
zF;PAhM=T;UL2A88Q~_K85vZC#6S0PTo+KcCloOqDAIT+(YwAEz+_+Y2lcX&sM4Ad<
z9xswQXnEV$TId}mg^|-Gr7qG~@4l+bCWK^>gvjcGU~vjmkZNAUiZC%JUquDP;l?77
zUtG|}v0q1>ic%+by@#PvDhw!9o2WA)S8>fNAs9@9z}T_H_H3O3m9@TfLX$92rUYZX
zEM7R11ObUd+9m*L@wV#C;G9aMa)yY&QEs48lul8!yNyy`{}4>5S}Qv|8B6}g?$3a_
zl_*Wx>$!l8c8VD7d@Qy+ho<($05xKveY>vaU*PQhCXhsvzUhbPw=fnWG%Da7ayu{&
z{p5pSnxfTo8<2IZgW%UH<bb=8A@@%h3<4pCSy(u3_ll@20i5)IhE!v0Xe^)p8Q9|%
zqRZBa`Ds*V^Ms{60-MqMeFU_-dFh(Pw*?XD6bvf$z{fU3u2M~Oj)}RB?3}zy96far
z{p=a}aA1=iI@l?&xwhHDOw5yFub5BlsLY!X1$_C0Clai}fo4u=n&v1wvTo|Dlv2Hc
z0DMt3A?=J8T>nNOhGDZCST%SY0}?7XAd#>V_O<JvNGH=5q;~(&luL4BMMdLq;)Hs?
zv))+n>&IW84x<aLtcDEqi`p?Yhzf&b=#4pm(8f@)ms!kT4sa@gb6h*U*Cb-RNO?*2
z7I}|SdVUxvB5@|AwZ@0Z!rzWQ-p^U-OrQTX0;(x~Nh|X8DV}LO#`czrxmt5q$XaY#
z9GZ_f8i-DznWIXGWl72hbEUSJwr>X}T?k~p=?B}s(q(TA@G~XHAuL*o0E`49ZxP&(
z=QkpAA-@MBZxZySFG63HuD4BNO89n-8U{(AkZr}y&=<d2GT<^s)xk`q$cN@xo}HTA
z#2~j%d9iYAEpfuniS%J<M$25NQead>;Dr`>QDgrzK5&{f5MmlDQ~BOM9EehPq6^Oj
zVHzGQhU;I*OpQ``WU>9!dTXP;^_xD`B+kQ?660Z<+inDi-ig#wve7Y%11BD9Z#h*h
zYFi13y>zIn*6YYG%Yw}=EN2cd{Tnd>fJ8=s3O%O`Dh32T5HTxx`GyUefVyiD?Xh&T
zpHLObvE?jj7zl92wd_w+V&wk*5G!6i8(XZgy=OX)AoDQ*A}P&U)+N8Nd8bop@QHvA
zdBOIhj|o5r88d9iMIxHn$3Hvgx;}F@q3h^Xpwx{4<=*jB;`E0SzS@y@tyHKNwLnQ#
zwJ#yX{Ix5!nV4!Z1|hwHKW(u1N{YCN-<y=8tS=R>TDETXEC$pxny?Hd*<5nj-b{k8
zHnwW{^r}_;(S&t0i231imhR{hqngs)aBe3<q3rI`yv5ESwM`(=++B=jNzE10XN)H5
zPi@szJazysh4O%B$Atcsz{g1ycQiD%O4=I(Rvb{21FIvmitGvkTf*A7PpY$6hoCeM
zIKQ{1Qx1p>i9Hgbji4-1VLJn|5VRDoA&^$mB@Gw4&>!bTHFRVL(`p7W803i1UR@X=
zmF37>M6NNQbLG_B@$@6RnkN0h9=^a;a}{WHj5EN(+D#Q7v-e_5qZWO&pN~I4hL-c=
zHrzl<U+9@Nc5I@3C^0!q9MjfSldeL$yf{HEst~UJV`RIFF%yJP-lm{}ND90CwgzI$
zgd%v&@<R3&ch9X$9{=RFBCiE2*d`&n-FYqnX5D~yje_4(6pAF~))>L=0+L{dz4^>=
zxSy=vNP=)9q;-woT`zSxS4yDm(Koni=?#}?NU_|(%A_~EBA{rLi4_t;3eBt%Kdfd1
zpiYRv5v#P{FH&yeLo54GI^Mf*?+3t{uw4SyHc(<F$#pna0uRux$nHe6_%6N=KJcy=
zO`7(mciZS#WwD&gy5$~HcLExPUOqcE+SK&jrNDg27nE@&BELplthNBO_FavfHiYN?
zVyB~)q6(ZrKHJAt!O6A~hcuQ8WL?Z+GF1hWRhy6za+5L1g#S69nZK^=jh$C9i0ph7
zA6F9axlqDFHXQ3SOK{c8|LK;Jc|&{G)%>~eS5J!=%LHVP;nxRxZLqe^<djEu;G6`H
z5mqdH_q6KL-dX9YhqL;}AoaQymb-yCo;cA|yHT&WlF^gSvgVWI@}&_}?~v3{VKBqd
z*+fm!BeD(YajW;!B$*m{5dkNfZU1$`H!BzsydxfHd^wqcDt-Dj()%#5Gf!eY0@Ly@
zv@?s6`SSFoa;Z-e#^kb*BX|tOODbMmi<q`{F$j{%OB(AfG0V2~Ms2l;bp#y6k6S&r
zK*5pB-WEVjU44&kHvhZHCHJoSIs^(9EC!=KML9MTm<$1EE=&a4rcT(P!X=K^b_5TB
zX7+k8jAl@5YaYbB*^uvI>^mPjL=7D5U>~f|6o*J!&m$gZFK^L*zO9UWYavg_AeX=s
z5n5&|L{XJ{yLc6((QRNt0{6+{^zv|a*mXJ?w3&gw%X#QtdgF4*fD?DNwNF5lD;#71
z$d0h#LfH9Tf={&O=CvtzR{UivZ8<OF6w8PESiKu5{`eFi?04N5O5kcx9rSY}d#f4u
zdcixs!GeIKJs5g&uCqf#c!xOBdIcHJFLKH<w$y3$#Gtc2f1!C3+5u#VpuS!P<O&<Q
z+`iPQCF4qIH*wx`Sbn9<pf@TcYp`&=C7v`;9o6x-GN7iyDtw@m4J$Fl3`8tu5}bY3
zk^mzY>Z$U%A&}Ce;-cbLmahk$mHtpyXT*Mf`QqjeYfS;KaSC7^uqZfAyPN#VzhL%}
z#&J#W|LZF#D80=kd60`@&UxH!oMMQ#l_8ZhqHhD6m38CFA5XC5tZoL_QZLWcDZ9+q
z^*{s59__>G$*R-rqgW|S^wM#IrT5HVAo*JW=w)Ob*jzlm@T3C`3`J1D(2O;N5$tJA
zHG65V5M*EZfzW6zM4_q^%6U96&0kIk6CH5OqFWAcMU(hg$e3?kiFnx43^lOoL8%_T
z;`th@vH=MB#2Hex00ON{=3f(&aNMoKaYr@Z?xm@6QJ-7=lsT``+E#P}%{qH~)D4_^
z7rkC8+qi9Gwn-|n*H!T}T2L<g1U+JPf9ZQK>Xo6@3exTC@M+vU(}9kS4Dg-%_WEr2
z1>IfQt1`W8mlZhMK+14=k3$f2X=k>$QX%#@7v{`dNC(^_c2a$Nq{%}14yVVc&w}bc
zfZ|=BZWx7QX}hevnHA}xG)rLccpQ4%4dtm6FsR%#s*K^FbSuI%G{gVyK9ecvQx#94
z%0f-1<yrsCujU-~w-RJUD?#A3FiSEY(>aa#Xav|kqR+&#6Rw@&mC?@ffED$r<WYU@
zpuC1cBZqy-XVSXl*{6Fw>s&>ql<5rAwvE+96^e;fsf{hfrer>wE`~5J;AC_+rgg7=
zy$^i~g-0`pOTdD&`c74;j$=xuAKj>Gmy#VrQ@++V{*N?aX8#+Gpj}?QQ8fsp)(^j3
zbBT^r9f@F&?*@d=x*Lxii`}p<qr2$%SH^*5=DleICFKfN#TawT1T&a#Yw(SoHCm;F
zx*N6AoYP}}UwXS)IbO-2-0f#FJo3|%(~<&*43BpWXEq^j@Vg%*_SY9*dFe}2{?O4h
z@41!V7zVUbtv5Wls+X#SZB3eGUDj?A<1-=4$|~fI+2FU8X8%yBgsq0Eo3$u{4xii>
zAN|p8FNK#jmv&Xmc)^~m6E3w<jFYEGc-=DBX-(SMz>`OZ{ckiVY+Ct1r<$^k;uf`9
zcXt)Z(iM|3wuXyDIK!|}gV9MnNo|lNak*_ZlTc3$&4>xmL7F><Zd7VwhepYItHNvB
z`Wd@QunI+!Kp9U^6dYPJlGgz}j>5~XcuK-{a}pI}2bdYjD<|ItwomGLUn1lI3AEzL
zer8KULvYzWnk+beqO|5|qq2aLa>(nHESXU)zJBWi(@fhlVFm9}{Kg^BQsm9Lf6Y1>
z`lRC7^+Lv`8c6E>?Yv^j%llp;$92BQZaiYMO)X48PR6nzS|1+7F1$7jZjU;U;>yW!
zUY+^$I5P9f`6lyN(?fPnhhUvkl4%`(()U|mkNV2PXMAuyN2^z&3XKmgt}CGag2DYs
zw}A_8sBF4TRjj=+^Az=bckZ|Mj+4^Cmk`y+GokttHmw!4!3SnEBX|~zpAE~j9_z47
zUUQ$8|Fdnh@9bnCv1^V7AE#%$AltShdFpnCIRzb2PviZijF^GzRcC$5^BGkuqMLE`
z%YCYy78UmA5zki;ZJh{wJBHOIZG+$=XgmyOMH_NZNKVmNOeJjWP3nEOV3WK+v<};Z
zJ!{m-M864C&mVWS(6_RM8Qj;*nSg~HkxY=@GfFZ+rhpr^iVw>sgq7z{b198vkgGGi
z-!1Fg(NgdKZBMZe7xOFlb<fTIdddEO42^oOCXP-PcDA(UDM>nZS^r^ZOw%)HXUkH>
zz*lL0ip3O<hakw;_#@M>xk{p%9uaMk=Et`{mf}xivj=pR{ld+Aa`YOg7T5r#ue}NC
zj(X@GaCa#f)~5gWRO%Ijx{w)gmN8G4eYDWIJz5gy4Pd^~s-t8l^zEiSEgzKq<1^F8
zAC<Tw=~7CHRhz<awZuwO^cd^oy65<6Ip6D7!+Ky4+#6$vpEkz`Fk7EZHTZ71idJ`p
zz0qBd!;UjFqsD&cY4Z^V%U@}q?p?N9#270dkHX0}V|cqO4N5-cdpzf@keh+b@)2~H
zRSkqcS1Oh7<w`3sITVW|!augGHMcl*46rK|P!1ZIx$w}n;h1oI;`4=$#p1XlUg%C-
zBF{%Ik7rW+ee@IdzP@KQVSnv!WS$Jv!U8Um?gJ(fEIQm1z-d_1ri@!Iup05?l%!Oe
zM8eN=<&*ScLnh6`>R76S5QzI01U!IW%45E4pMB@pJrzH?W814;DwTptE4qhsB7oyA
zVo&KA$%l@|=92M_CB5d;I%lrhddX64%tB0au@rc3>H7W+`v26`^p{nU%5<Yp`CHcx
z5CDK*a?$^0RoIv~{Z`V@#lqS+B}qYUgB~R$@3)%e@D|M_1TCc!UaOh1mCKNe&M=UK
zRLyo+e@Wn;Wz2tgWF;6_mHV`=-KJv>lDxo4+QVxLL-ge>!jEw%Q8ECYVm_t!Po<b?
z=0%0*36wN$l2;GKLl@@KvvYEDV#j_=sl)@z=MQW3B@>NH5hWXdQq>ybu>seeWrvLe
zypcetspezI@_Lgl=p&OVF=OcV`Szx$25ugVMGlTW9a}D+(qf$NQob|4)W5W*6Awy^
zIrw^+9qDJMGND;uR?0)|QD6?=Rgn*_E!R<p{}o*8C6scq*=Vvx3Y<&`1A1b3LtR`7
z+XbYSmyuE9Xc^)_j2ne?mBOg(85Ue?1Ef2d>yR-m<T#>lv&{NOEiPBjRkD<n=_sdV
zA7^{e?|1-Mp*Lk{Ikj91bb{NM8r4_950|n6Z&b12od?~OcPEC(ai|B<g~!YYvYmc;
zua;3HnNU#4F`3~a-iF(=AG$n2Ox@%qjKuNODIgG-U-}mD{WVxwj~cehE;JMwG~iqR
zVq)n8fGK8_@?4E*^>KN|Q~RJ@A>P!xI3)hQ7W_YUvi~o3<rn$!zw2sm`rn`+w*uoG
z7r!SneqGA{g%SC^$kF+i17c&W_xtIF*1R~5&kC3UX6WQy3B%SxD2)UGNaugCbq-8|
z08x@|+qP}nw#{kVwr$(CZQHhOb9%1#ZZ|e=@BTvJRYYdxm)hq_6wVi9-mP@36BDDa
z2~%x#*YUwqc~kKtX#72IBoUKr#`OsRd-`|xPhWs}0ELFekaOh{+;N)Qno`mdjoQ7i
z>&^)W&Wy!Rnx#)05qN*574E5ph3;zt)`^Sp*k(VpE?7lm=5Q!_pf6I+B*p31Xs|AA
z5TnS^a95@)tfV;&2GDLU>2;yaQ)0^jC0DSI;|FAlAAiFhh)oR*?eM!>N_>o#^RZZ6
zmxCkSkX2&S2elbd(bw-_m#z?R)fdtgj}O@g7WjRmx;Wam3)@f=>tf&k1I4lMBiN(y
zJMCzp0Ra9BT=Tm)|9eqdRBf!bemSAp-%tsS3C=|%7T_kBz-#m@ERr!dp$)YVYpG_6
zeKV;^$&3H^Cg>lv7jJ|w;l<w0JGgf)Zk+>j`o-Y<75)(pSwVb}s|*8H68=TT(a^Ib
zzNaOF5JTg0>T44S^ckLq!Xx1ZvAKGf#%&e=_W~Kw#ToPK{Bz9bHV~Xnaxr+!Dde>u
zlVk)pF5CndC8Jb=-QMmWNrT~*Ssk?cR<-x_-cZ$mBBG0?jpw~26`$fE7!Z?V#HoA=
zExej<ZXVUc8H^Uu!wU&FJ@Wu>M6-4AY9;px8KE2n(0X8N=HhUa0oXj~pO2gIG6ggR
zL%&>wO)=wgU`Z(bryJYN?UygcS>dee0Jrui)w!~-6WB>;&P7Zy*=!(?xMr<Ce@3*0
zm&J<wOjOXw(!W3C-udNu&Tf7WD!CZirO2$ia}67ILCS%vczW*peUG_(0-3@M1sh`_
zeUv?E_du`pO%LzmdQx4*Dja&L6j({HI}WD8s+j=$kU5sz(qzF)2vmf@8Ln~5fW??T
zg$y;%{fhJqU(=IOFMJR5^Hl^@_vgwGKH$JzMNLhuFKLlG?m{ljL%@Zqq=j=Q-$D~F
z+I^bXwyJs1r!U-U205Yp8uxzj4wd_4yk#)ic7(t~u3UoA$~7MOj-ViDDQ?#|fsC3r
zk7;}d?N8uqfgo7lEEYMt4Raw{l(KpgVEsdXgW>%XMic7<nCM+U44}-oWVW)*fgzov
zDd@?!X8=TEe&dBFZM8{xbucT6ow^h;V|e>~gM;ZDpmnurm8OSc4=}puTFn!xJf>N_
zOIpJKqoiydRC2S^fIfdXFgeIdBa=#Z*EWRinOf`%!m&KTn>w%mwh5i7o=0u$$&Lma
z4|1Lt>CcyYb<d#7+Km?V$Zbf&-~kI+^X!HfjBwn`S&?KIGz%|%bK^g>4e>5kpWb9D
zd2AJr^2!e+kkYu@jjKw_Um43%<)#dVFJNM*I`6GVei4)3F|i*55xF$M2FWtgw!W89
z^4#dk8w&E?{vIR(n|LYI0SkoVT~?ZPGGUc(U|Gl(le~F)h@z0~sY8hO>?nv!1_6^z
ze6peoS8<hW*cGsWE&_gD3SN`BL=k25`FcogZ_6B5?jnvV8S)eQzu%_+Ku3+#J|dw0
z&ZP%S0D%8uBK@BmsBdg!NN03|YoSam`t0;by{REhYTXZ_t}_!jBBVj7*^huSQd1Ex
zGk$%_#2ny$zvokTmz!IJ{D*h4CUIJ(&-cyCr)@+&$ia={?xUC`%G-|X(&J!o4=1@f
zuozy?jVaUh7a)-rGGJ_-_Vo+hOuXmBdj8h_`r4oUVawhQeqEF7<Ii^XbRBTM$M&W1
z-F*5QSSJVns_R)TY4h`Rbbk(Aj=8gW*{QDIv&6$`>A9nSmD@zOn|OuR{B*rjN^Z>z
z6CWEpj`HL!NbWB8c@b@J=Z5qYHLhCgc@1<xNEtF`yAv};Q>1mz2Wy1$qZ;9l4Ec~o
zVu=KW3tZvqnPqu2GtS{hfeyh1+Q;tdky{Sv`>A2i0d30}S4Rcyxi#K~w;lIv^Ao<R
zaUfBEl`ax(o)DO>R8(J$?u4C09gO3GrXb=z{%*V|NK{yNwYs~%H{aXh-{t1FLx~DW
z;D`EK4ntB^tVU{Z-^G8ChKyOIQV%3xdmVd5+>dK3XLk>7P7xTxA<>rwvXnP1!RcSu
z0tPuwU4yWj$6puEF94eSmo-uj$^*6AYQ8XL=Qf`i?`wz}<<`3HFYWtS*UVKHHvPF^
z4*|OM<y_|v2M~jTtH)2HQtg`MF|)4MT*3saQk{xYL(6t}E1pm?)k9kaJj#6odT`EF
z!Cuhemm#C3EUSWY=lfFyy6n7DZIJnLhLT$qEgbyPWi!*|`f9Pm%cuCA?A7z3OcS{Y
zVhRJyD1I;*%xHh7KPb!MIJjNL{@eD0DYTvE&Vl9p_P^_w+_hjBhiRT<N*9(3L7h0o
zEP%~B7-4;sy;#pa&m3EnZ9_zmV44is)1zGWEFEy8D%%Ezr=kz*ZHhpX!E$6!T_!(D
z<;fWQ0<f>9iSaD?DNV|JDTer<40sxuQB|z9C(7fT=KZv|{7w<Sd<udunf`@A`2MG>
zB3Dd>MxaYk$b}dtP#>iql1nv!rx}vW_yTYxAwXsx_h~WU%OSse5m{NQ{5WaH_eL=k
ze#JLj??At-!HweTZh~8T7>5JTQ#ZbRuWjYKvTAQ#8l1e|2e`~S`rTydr;C-Q4{jt`
ze;mHQOmwP(E~@S|EWrW$qHPHY?`KQ2`A{T17G`Ll(peU`m4OBA6UDiw%4mf5Q$ojw
z-5ZayTH<9jMX&Pjx*$-?eVOtLZOXV|q2j21qz<2>*A>fxf+13DY;55uu~ngKf%uZ>
zjN4;6A5XNEc+F=t;l5A#oP9xT7HrS?e*uUnZ;V%{GF~&_<6Ry^R6#)?=7sf#%YH*u
zcV6o_5A^BS-~%)LT~`806gp^)w}HIW4Ky=_c>f4k9sfz#yzIwT5BYfl^R`=|DEEl)
zov~f{2X+v9Ll7SdlPFp9C@1)$`_+~U_(Lmi9rU_6dM;L1uC0<Dk3~8CRo?n>c6acG
zse6-nzP<@?b9P(~jU)eB7hktawJ^%VLjWa3AzDdf>V<N@jGC6}Z)8fS{~oQqC!dzf
zTu@ZhS;HfHhEJdwRG@&rt=#?;2zdPMPlgKePS%2Ex^d=zM+~_1VPG@tr_kZi&Of_i
zsTeNJQe>ONg4rObab2=%?Xzh$54&vlP#PHD*ck~8z5MBFefvC|=jQ+HxXAJIa$dJZ
z5#^$x9{Adk6%D%{{l<3q`5>1J@oN!|CV~*tOCUq2NHDDebxsk2Dk*xfF@x|y6?w%}
zA%-CYVJs%4$owL~NJ0wvd+U<IPskFqEx4izxvFwU6H&}iHEC^&ozd-E1%fw2h^J=}
znv|-P&Y&M)!=5Ql&M&}Rhehp2(<!xG;-8W6?`j*KxKEroio5~n9rgRf!1O*^-`r4z
zqBO??g4UiPQkDIZlAy^ELdGL%f}^MsfRQ?IOZu-M!weq=hI_MSyKxE!Q{|9J!-c2W
z4TxPOK%`s5B+rzEL*<~q8i0`_``I0a;5jHwSxZmSY1Y!~f?k(Lez(?$$F#aX6Bi?3
z4&vxs6P^=_`%-sN>M=SazRHI%i2!Mu6dq8MW7m>c0?}VBSEhY&D?W?cL*c0(;|ww7
z<p9GUkeJh>X49%Rl?q~oEDw+X_pGn<pRxfAf)VtE<S@Ku*K5-9#J9|@bR`9)tR_&f
z<I{6>tX_a-GNh)rS1*~Nbs}86yB>pF;%2ZM?PF5oA$ro<oeSD+WPH!(h!%(G0bFs3
zfF3~5(uq;fphpfg!h+*L9A|~mqsrowLLkkux+YK`Bcd8oYUu4C-lkgC4Hg8ztWSj4
z!F>+7@?@%~<HMST8ZOIRW(m$-<w(ODE(l0qmiO4`i)tl5!;)&^IdGUnohR@+obqpl
z(Tr5tQh0M>t7Mq77v3wGK5({<*aJ~VBG{l&fX==_fb&aUm;r9W%}XC*XmfAc7%h2#
zXq|e|i}l-sxX0S`%aB_mC}fH53u7jcHx`z=a%AmYfye;>C~g5ST@|u9!RF`8R_j(Y
zwNgZr`Svs|mceB?ZXrKopArHCj9fku024Xmbxd<{CEtUVw>iw$cai8EC+D<=r<GEu
zAc1rl&FiO{P-R|Q2F<qKnVsHr9ZgZ`7{rqAGoUrn``0M%b6Uv1E3SOcXGooTdz0XG
zBf+jl6}Bs%+X1Vos#UjS@w(C{gp9gLCua2gsd+1H{|bCh&2ve+l|DA4!R7R5PKNX;
z@M}5i8+)>+{3`m}a5U;_j<5a&LrkzrR<BNdOX*iDS$L&L>2IbiFJCYzPOoAZtFLG!
zL_9e7pbwg@Zgy4=U?9g9r*bh*af@=mu3&nw+cL*p6ye~A(5{WRUh@odt#?4XHXzEY
z?ZHR36zlB0&dSNhdFaXE&&O-v*%b@EbxrJdm)Mg(8FDfK;lS9vnAD_5zh57w-z_la
zpf^+SQZqzOIX|m(cavWT=Zc?TP5e-*Ax%`NlSriZ$VV3p;FfXZeOwqEBM{XiJTx<3
zfo4ObEsw3yAVB=2G=aU?AQjJ=G^KDKVst|;P)jzjHoD7H&aV*gHx!|qQLai5jh|_I
z5)?6+8c?WtI^aYL*=J!TWlWY9>8A)fgz6v$cCv9?2b@obA*KkZsyO+n$gy3R^iMMV
zH6jAFU!lEUMmZ)TL8{t?<34h>xNeo!2$2MKC}7odIBp#dtP&FVr=$af_{u`bE=b=I
z|7ZfHtM&CVEStu7s9P{&_>yMTu(%aixPH*DSm{yMOh(wW>Q|=wF3ti8xqGooL35Qa
zzd%u3=BXATot*5gWUA9Z;RMx~x$#!P;)p5}Au68<Y&?Y;A<`dyY7#`z1CC9bf%GhA
z{p?5ndXcF5MNC3%Ix;q`aU(vh5#3cLrH#*B%BK~H!kPFIj8;T&Sd~R3zIqRjMM52Y
zq}yh`;rx|v{n;F`T>l1Dsz>U?<y=~r!lk^hx0vyX@o|xtocSj&(|J}A;3zF)51QyQ
z*AQA8|9b`8O*u*Tr12YW%fF54*kdr&m5px4pLgIH62Jx{DCx8HxcI7Q?jp+^Q@X+d
z5OsN#&d81UAnSIN*N>2mLvIhV+8d)5`EvU`^+`Bn)}G^{@M{TE_*v{HTuu0~bp=F|
zt3tvhzTN@Fe58B{LIK0V#7o)1`mW;BZREpHH4slp0SLIwvtv=13HxMSys?(DPKd7v
zJ0rztIG<Kr`Bc=*3+9U{i3+(43#ErsF3D~fEr8a({2fg{(J=;c;Al&Y$dYld<{*!-
zutmI;0Ak@)`dQt^(Yck(wpl&(*gI!RO`^Lt-BX<{c5NYs<`st0z(WYK$JT5;2fhGX
zLGBz(1sAr?jKm4Osh1G_CT&L=FnnNVl+Moy(oIU6RN{v)QzWP0fwC6wA>(WX6Xy^U
zZpN-iE^}>^M|+F$wug^H1e}8MuMTfkZ;(&?l1H(#h?_zhf*jT5rRI36!Wgfd+8{cl
zTrXkCs!4>mtklgEk_%r2sEV`HR>5XWDDWCtU;R?dL$&>&8+a)y&k3_FWad1Ml0tT<
zQfLxGXlV)qqu~URNjYAWAxC<8Cwyae4d!`M@VKAc<!^<QyIbxIvd4-6Ra~Zcnw^fa
z)>>S05_GE&G^uN9P`N7_D&;HrSbHLsn)yf6j@DA3hR+IJ@yg(iIVJ4yZ0vTqDXOAf
z0j&R^Wiw`V8D^Du2V){$e*k@Il6m#j>}c`OuGt6|WJNHGV&9??^NsChvg)Z?U_D`i
zc_BwL$En(0Erv#;jkSO2#v*`WV>z4uQj1Gbb3eC^5@cm#4WO!N_#FwTiWIV-AcgjL
zxd8tn7}<7_&Y@e=Sy*FTe={OGuy09jx}^YBweHw?Prv--J`To7tL6m)D$D-WcJm0o
z`@j4xjlbtv)1}t^(0z~_$bC`S6X8dUtmTN@t*HqlF-p35lCkvLqPa7M4DXfSAwLwh
zIOL-Of6eIycV@uKoog2Nialb03vSavX~5@JD`V%?YScQ5eG51tJZ&>u;vg0${gOf{
zZ%}qN>1bow&8RaDnpdRz=qog~F}o`D%G~8X?G~v^a&nlMm`BM_aT1KLG8Y<LWc%o=
zu6);2SKO;ptTJ-K5-fA&@oP<EkzHcSTul7XbCtb1AIGaNt0}j?8Lqly9X#=(_!;Hr
zpVU<6f0T10QiWZXU`lNBT+TH^sCl4<s|n_gpmxeLvYQ2Y>m&7rIn8>H?e%!a<mY>h
zZ`sRmC(}60K$~7fW_qT8uU;`xwv!xz%!D&cNiwsCm_U);CZqX1OE_fp=%}~*i8ZyE
zKaOAw-=cC6mkRXvJs4be<#DjNx2$e@X805#q787ATaYVU2~ho+!v{d(4Y_^ny$zsV
zX+)H%d*;z$LBcYU7;#MpW5`RSMhS=0q1UI#B0~j%>>rg)<^D2g0G@>v<5`77mjQG%
z-@c`P?SLw}2~2*JwUj?})_ZWc8efIAf$tqn|66;_Oz=S#7BmCk!+sl`-*x`XRYXyF
zQnW0I-f=MeO6m0KH_d&_(GbAo=8uCm0Y7q7;)F&?8wpi)e8AP!Kg%Y#l`~y;Cv+=z
zk(@7OO6OWHO+is>OQrCnqK`@D%JUVLQiBlrAe`6Z0wJ2x+`J$}=<t-8Eac)1m((TL
z@Ja-kOatZh<21-Vme7kotzfm1<g$|eENEEtP1YFLZO>f#lXL|ijk?gK+@{r$ednWq
zpt~oT{mpnY-Vkh!Lh1AfV>>$7cD`nuol3n~mVPZ)s%2Grz8)*z*fo^#M|2V}Rt`>n
z0OkEI|3x^3H$_9-d8j=pG}?e}OCQ7lqz4WzOgIQTZy;VPLLL5{z6f@<P%qxerI1Ck
z@SPJbEeJ<Nsn(gsggZqxVaINjsjdS_;#iROZL3k~VH>&wH~O5QC+d&NK<%{hpQrlE
zzA>dsrpB>GO_-w|<ymF7eq;F5brLjzrYpVk99q@Cfq=Y2gy+v#f(*L#<cuepY!2~r
z0D_mNPKu+|;6f$aI>?V%R)JJyRq?E9>pzL|C8r#S2pv^x%Ouo`S515fOafW1D2f3Y
z*_mHozk%faqJe36aLFMOJy`qz-Qu(%G$GDwx-43}8fvP{wX)j)C4P8rbv3AD_CJak
zZ0c#is(YtR3m^6jL79*$G{zwfjmCqZQXO6(sJ1Qp`|gq{I-FD*Nu9p|t!klsGJ4#r
zpqhHu*X*8KaqAL_)*;>7`tU0i$O-i~(z$GoFP;(nt3{SDh=`xKc=5P+3Ra<9GL%g%
zit58C>S4LBfQF^i2BSN^(U5|bBS3`F`+~HZSbFZ>$1D?!X&!G|jX2t{2jmyBA}5ko
ziYLyy>Vkru7^(3K&pX~e>}yB$3=hw`X^nnpY5SOepotD0{4>LY@%Nu{;QKdGo*$*3
zxodgwVcq1Wbv^65wcDn$bY%CSvDrJj6KjL_D>1Cyoz<@9b3(!$CtHo1!a45=ai;#%
znaahixhqM0Kzo`V?5&VYyY111oG@y$^dk}Vyg0{qxIBv>5|=cFuhYC)Ax+;VvXmp^
zl?zG|ETnwrKl1*?FHWV^)^yL4a9;YVP3f&MihF`f?$+5g(jHX>wZ2A=ygsKrk++nH
zA`=iq<RF;TkwM%>zAR^2yigk%uC+mgdXHCeF<CF=`}UkiA0rzZ7F9J!%~C{>)gu<W
zJs_2hL#iOsVS1*%nunu411P>_Iu=f8lF0v3o{cmnQ8W_f4mHrR2ZQA%ximv`$>X&)
zDmxrWb@~gfY8xu#yc|+0r0L3CA6}IQ6{o5UHwGG6;1bbgZOU+s+#8n6*Pm~+n#lL`
zS?Dv*s+ww${b{|uUEI-ALlnbR^vpGx=qCy*QkfW6zt}A&);g24zwh{uO}#d=4A^_*
zO%ZRt8zKa~i=A%V_?`<3yZ=}gv#rprg>lv3rx%dqKBhvtyCwgPu4}oqgkZLBaDo=K
zfcWi?I`1E92IViK?KH?=u};fKWy>Tlvp>!nC7FaAsE)vO{(BDvxcA_@PA(|HiZ@&K
zzHwL3@hl68Vu1Z<TuJ}5v8n6T%XI-}){?7Wc^5{4BXc^0*8zr~)1SBqg9iJD4b8TD
z$2iyGz_}YwlS5DpUaZN@wfFdx$is>Y)Zjh&N>e2>Iez{v)@&jN*XtmGQmOA=m2iI6
z>XMw59kZE>vKPoCA$P_{GNQQVso(ma?f0+zV5pB0bXbxtc!oql@g>mRLuIgQXGK@&
z(>TN|85{H?@f7u_=t;{Q`;@;0PAB!AAD$e{tz3rMjLVB^h)c|41_H^7c>e*H-~kVA
zr9(&a8Jw#%1>F%4HvtM3)t4ZdCvv~JOYX+V*&*O>+jLAJdV9WRvwYFmX;kUI;HJyw
zg+td|)c3$j-C&&3*>dqe9FDj@k=Sv+?grPd2lD@s5Y1CmY<~SMgrDtiC~0p>7}<JR
z<&s!QP#~3{!B3*d%8TT6Ijb3$#$k)7(!Xyx$mFtFU=hCNvvWJVZg1JhXA&Th5B>nr
zrF1&_x`Vj>PQmdC-uS4IlgSQ`v<uVl!XFL~JCsIwbY2Wt@Loo5ub|s`NxQrjdARtl
z{9S*yx*EJ>H_x`h!*)^V@(x_zIXsARaNU_4!|qwG`Qg5f(jx25XNL*iW@P_C)!up)
zA&zqBlt?F9b1+;A7aIMRX*NIz_K9VGyJ)3nBn<BV)+IqZx^|8{vQ_r`_}veEtj(BD
zUGKibjV|%HMZv&2>WP9Sfd^Lt;xhnTylyT>n6*=j?{iJO|B}CON);JVPrTP`a9w`J
z-~2<V5S)}t<Kf+m*j*j|#FTXf<S@T4P5@s)&rlgYBwsd?t%A!UD>K~0o7=+eY{Qwm
zu1^WUe6zYMg)W<ZTya9I(%yuRB~#}BE0yncK}Z!1MT?!6pkv;DQyb)tUKq^=%MpxX
zlbjIFEp8Efzur*R*y<W(S9TkYni4&DPiVG|;2cs36G)V72R)m@hqJqQP;pzw$I$VV
zv}dc@?bCCs^(+ncb9LbH6sYn#s<PhTt=lQnqq%9-EatNI)|Iw|S{dbS?WnKyaO1C1
z`5LSJ&BpM28khvr4UN`zfyU4$3y})zRVW#=VC~_2D0QTwP?LTI=cyctTvK^mE0Iu>
zVAf4a&$MfX<e_oWd?E0&I61CTs81B2DA;17UZn`o1302~4D=f-O`6M-v38v@D$pm4
zCtSSH5E^vZQma18UFA`Xx695(QA_cgZMh+b*c;M1&fn4+^CI5dSW0J~(VQAz4TaXN
zZkt$N#=a2OVMOHk2l#*M-@Z{Ib`%%@01npwU0C;@`nQR*xv_~WozV`imHHLiHS;%A
zT#lE#mpVuPu2KDx;Kq#_h1Jw9H)nyYMJwAxB1*g0-&e^;Zn@3=&a+uDWX)7#%~TWZ
z*;ak{q6cqI$aT3_1otH4$09fMVoeNjuO?iIoJL6o*rGp!v$eB)PjGKNLBMx5uUB<!
z-}^qneMJ*K>8Am1P1D`RIJYx$P@sZ*Ss-q^B(MRRstuJ`)3}Uq5->%ZP?2aYok&8k
z1KiNjL;?DfV~77p76Q$Y<l3MU-<|k%RW8Ou$fF{_bt6E<&g2%k?Gko`@-}&I0lFV}
zXaGl}hUa>WZ_k!EbXADVmngAT-$y~gEU-pI>#Z}O^Fp6^B&7GB-gmLjF1Beice?5s
z{!W0-9&JsBj4J2f$ZrQ4zg9vVXXeR{l7f010{LlyO(DIM!PC)9{p}X%{4M}_rGnI1
zzVz+La!hX}Fis0ys9}#`G7#~+hwZy`X94iC^Ek_SJ@=f)Y#ij@v$5m#yIsZ5MG+Nc
zUSyl2w*Q{>`08YTq!U}a-oN?lQ&rFGdC_z2LboxX4fp-+d~Tp9Ig#(PFu+6rftvFe
z^{TE?`Qw1Sf)NeT#!N7<tKjmb?+ixIebaRM`f9(7=OVws5!;v@fR}cZ9WelfM~dzB
zWZ%#;^0fhg)1=?p*4p}7h;4VkUvESrO9b1QmgmnET1NYesB7%@(p0*g8`6$oe2<*u
zQ0rRzdyKT;%k3GR{jo=IHlCndU@}14s1!bIv;HCSG?A7f(b%$)6$Y~vSjz_6NMOey
z+iOHWEUa7werYr1nO|w)-X#;X*r{QM?HrW7NWdRR9viaqHzLpf$dG;*Si)sGfAn@@
zc+TfNiWxu#&KK`Ia!{FYkH>v{1KPbHat<_Q2bpRX-+$|Mb!8-Dt>-$n`{`;**^b3L
z&lO9S#(!YT-xG{`BeUn(08u`$XG9+F2WZiW4I|><*JPRd!|^=pbt9j-eluLSHfH<%
z{+R^EWYv)K8~{l^u}EXY%%61wu$KZO8UoXGzh*RnjQKh@_j|Z88GSeKdO?Kx1hEB*
zuG3kFj9*5Z{(Kt+c}B4#D(Z2Yezlh84CDvsNN9Kqj18gjGm;F3Lu7p(@EE-r@EOgK
zd2xq_!c-u(Ip8&Q{Jv8XKva9Y4`ZC~O0U%HZMXYQb!$D}(_Ot>R&iYAPAkF;<96TP
zJiu0YV@97|_%ivl^J4=GqdHghUJj;CJ<vF|lS@iod;~RgOW|46&9LMd_S3hA_Kh>(
zeDi>RGVAK!XVyQib9?;%$!z8B6cKq4eoi*zxo`M{cSaYDUiVs{(0?;ZLf<^vodVLQ
z+%RvJjy~~3`+9HtgKfMDo@R06GFSG<0I<BAVX~P(N=VF3B@(0?V`KfjYG#T%Iv&OU
zNAV0aW9tx+Y};rN_~UhJCP<Y+Y>p@=2xYyF_fz0+lmTe9uu5!&a9Qfvwl7^^8O6)K
zX%{VN#v#>*DCWihEE5h09G5LT+@Q>M%0XEI2+=)$c4M$LXpSWen+}0C<$>pzP<kL6
z@7SwlT!ce~1`A;Ehx&fa9K6RVrjP<A4G6T8j`1SdSD8!WEKvfqn~$^T&!Qno|3ee_
zZ7j_W)z2>mV^4K`@}XwTzctr5JAi1YyJ68%+gLGt8rNQcu6bns+2Hb}w4x89no>a>
zL*A|@I-*RA?Ppk0p?+O>!Jq602Xo#VQh@Y2!C=wQ<}`*da88+jvOek$&w##jW7)i$
zI)WD5Txjlf8yJ_=YvG4ZytoG156<vXs6Pv$v&S!n_RjZ7Vj9n*T7^zm!l4_2`vSf@
zZf+Lan54e;4@m|CqRPVqb3F>e2XOva{I1(V=0%j6w2@=_k8z?W!@W$He{?~>na_k9
zxa#{rQ_1Qk_!s;fUOEdN8D|sjaPF{TJ$Z0{=6hAl(Ro8^#ptr-5{*WVNrE}e2x&;{
zHXzghm>ABy`)-l!n4R2U^*%?*feI`!Co=RC;ZnzWo=bPoUwJ-Tc<x;@;Jc~bnrtjG
z|IS%5oxMrL{LBToU|#!^I@2pGiEfdRzpu}sFSUF>(_Nk)ti!uuL3Ka9sT<zv>^2!Q
zK?DzfV0OS_f_&b~M&KU8JXJtjC!YlYof;iXLTR*P#{^SR#fEm}8V0t8-D(PkWE9E!
zH_sCbCl4$k&Ap&6!;buW5avXD84`m|q3xE#<jxm$%+%qEcwS6>Su%ueK2?nnto+?D
zVF{Z1c&=Qq0^Oe?etd-oo<zT#*Ev~51^%Uj8qtFKk+*_cL?dDT(JnIgygNAn;+d>6
zw-&2;^Gz>$)2+NWNSF@d*&je*mn@LG325i4kf|aDMvhD(NiUxefh1t7d_PBJoqkzl
z97f0o!-?6w|K9Gpj_W>nSy9FJ{Iu3x2I5N$!Hhc09yr3L{b`m-#5i^pqwUsAm{K*f
zH<(l48mz=*6P#)un#W<~A1j%;9l7*#wW~ZBdZ`-S_^YLMYdobHiCFu!o9ol2di=4C
z(Tha-<@)dI6l^Ub$o3yJsRq-_BF&rqBA6l#;@zW=0CkL)QDL^57xB6TH!iHFWgHOH
zL`SxMYu~-xSK8UI=bOm2T=%H%{D-Dnf6qUYe~rp8&t(aI<ZO{U>D!s%QfZ_bJJBO=
z=CVrXI_J9Y>J%ckiKy)xMGP*HR&{Wx&DKW!c(e@9X9z}(1M2lliE_7pS?bYcEwIW;
zcckM!Azp=$6($ser^Gj>w&SRp;V>NAw5^SY03_@QP8|Tn?>Hdpu^FRgqFJ%EpN>z(
zJIY+5o#Mh#W;=&o$9NC6;!QR-Ww{H<?ay`4Fxv;JUx#lq9ovBAFud;<u(A+h)xZyl
z6v{M=W5!o@rK(>Gh$;rx<q!@v%V@pD0~ppil?_z7!Gw`uBpDcrrG}8k#9ZI3=dm9n
z@}O;}kGjYW{g5`-P<kjLU4pcy%OB9a#C@JkVlr2-J-39?09vmlvKHXPGFUe2(W5zK
zWi1BX=1mV-V4VR9!>IAs>bG;ieJjbET)6WE1~-T6rytmrh|h<)G+kfG1CT@bQU;nh
zx>f|;ZY;}J4^Q#^qEeyVsZ>f}alW6{M$fI-KBR>P2Lr1bi0ylQQoZ8_vIMp)7!~WO
z;s<(#o#GG5qtPvC)2+t3{~6c62KPr_T9k|@bVL9e+RT|6@q<7A0V)8=C{pE;Hv%!t
z5v>=v$SVpJte_=(ExzxGkY$fawR<vj^1xT1_3{A><HDnuIa><UGeJ2{lK%a6aF~)_
z#ON`^5y+l6lJ^7yzJ3@tI?zq1V{4Bt!l`b<@TM;~HSu*n18y`aC`G<VA@T~qUk|MM
z2pi2Zs@l~uMZMOTL(i2)+)Ht@a6q!xP}H%pfl3e;VUvIjh=C&crhVG9j2Ub%x+z?>
za%j}O<Bn%GfV{o4=}9rJ@11J&x!6(*lMcBQpCj%88$*U{<zFTE59P<1elw`9g*(10
z4b<<~A4PCED67-5MmiF5QS;gC-}3js!8@HkClorPhF#aSenh;F`>cW;!DK}X9^{&w
z*~(N{NDnTt8aaP#frgi+wyb_w5eF7ZhR>DF3{{J?VmPqyC<ba-t8JT+#X<b@P(*58
zR%tvcI0f-#F{D_&0P5E9=YgH31KP?YssV=c&b~HxYX6Bo#m1SibQ;t}5VH=JLu-wF
z5z+8;C(%U3eO#?UL^as(0ff6HYfBgut@)hr@p;${S6a=*{74Cm<rrK(1jlmg<s_}1
zmk)uHD{T!u9oI>vI(4lkbZE^Sv`yb~81Gbcdte|-W+=YbTUrWQ`a@_EvL@GM5z?N;
z)kQ75JS185J$3$!qGGeK0Si;CJS*!2g_!7G7W^yB4mZV|{(~_O9FeH|?`~HZTxcYK
zXzRjD#ah7q!IjjEp;UBb$+GkXtwKlR<_-!c#Fip~dbYPDNmXXUm2m|wYRll9Xv}fz
zEh%172NEqAzXxP?q%qcx*BR|^l=Tjl)_NF)n-3w^;zNhH<wo6Kb(hrC=UoLi%*uii
zPM0s(S_lnkd<y}^eyung;49@i@_^q;q%q@!x@7iB8n*m|L@mO`3IV4Gz5;Gu3CC{T
zcB?KNh_ClG-{AwMFcmW?!Y9JL4PJHtS|>&?klk-$>DE+LgI>1!i>jKQb=8iR4t!=)
zPc`I8u<YnT3dt{VG(vpH8DEg9Oa{Q0T3uSNBz0RwT2Gm_NXrs)MQN&Bg!Y4~C7Py1
z;+v&v7!QX3HWtJaHU;yef+(Jupml>Q9c-9Ps`q|uKP8M$qib-EOJ8}?^zE_w3Dt{m
zwHn1rJKLNvGUQZ8Y9cUbwj9>Ney`u)?Z8V7ciXCXB`8c<bHj83o4Trc$hFQaDvFZb
zk;XEVXUoog<s#byJWm11$YSA#T-69!ocU&Amxgg44S6vuOuU5ci^Dm{)0w!NM$Yu4
z89&HcVf5Rca_nI1R~Wk~tOBs<JJ!)Depbtk6kc2y;D(83OY}P*c77BrPMpu^MZn8Q
zJ@!dM7S6p6@nj-ds50mYrWr1>6c-!B%Q}NXi6<QD6pOT~87Yb0s&Bz(qQ1h-@B6X5
zJQlgInuFq5od9o+%Omu7M=k>s^#@wO6>6jq0z}8IT&yF-5M)91q)nTi&F}gy+^Jdy
zM6!Qt`}6k^tGA2WNg+7HKnsQ;Rroa!<}^z0YZ1GJpOAh;?%IhIH~Z1D%RB9;X<=k+
z?F?^oJ%6vyTrIb1UMK@DxRkIxyeG#q#{2n(ZZUG>Yvc==;GoE{&j8fm)W^@$=fNRw
zZ$+WZD6ZiBJN>VoY$CfSDy<C#o(9wh{Ek*j;<8s3k8^0B_-8sO+<j*mF3Vff0J`&h
zpvL~(ajikr$xj?0bm<a+PAEU96ps%aw>>-T{A@Y&T8X2OBgBU^##Q~BbJmLPAP~UA
z2ojnVnsQFaofjbwdZB&F-xhM5VH6a<ETiwjINWlVT^lTVWF3V+K`*9+X%FcKy|nE7
zbhU*;_^~PjJ{*GWkxT+Hc6gpux&}j~5pd~ULS_E^5d~VCAnwoP%~|1vz*|VYw+{7u
zBO7S9xRk#A6bYH6eCZe9Adl^J)(%;Vx^CYz3k;FL{bY~{>clPUs3R?mC!RB{hh1c~
zdmKBdH8y{#C~DblQ(&=V%#B)H6LEM=BG@=2L*TL><Y9?zlL=X{CtCq0KEpIecdJcq
zwzMf=<kisBC1?Lo7kK@!x-T-rA3o2XOAyGWR>Y;rz3+SLoH8L4$?{WMVOC&KPQ7n7
zBPfJ{0rj$79Xr;_sEoQ@PCr)H+wMJnjvS%uWqBz%7jt_fx5t7Se65nSGk0zy4vvTa
zR7<X0-h=HA-8KBFpt&SoL-|ard=5%ne0us<kU>y&SE%aR*kh{)!j!*};tQ<caq~h~
z;H|f$)hZ*4iR$Y3RGwSDbiq$ejc2uj=;}#N9?FV+W<N9wKEdK^l?5|0h1q{UVT&=F
z7Ri!{q=P+>=NB{NJMGT(Z}AIgl`PL$Y?U`YCK@mMCnVSWyIQ`Twu`D58-<k8c@QN6
zJ4e*clC$NBhIo<UICs<$p8Kkr2jU2J(_()Gl-LzWcS5!}LRO*qbqx$=RoAowA}%Sq
zbIN_KExpF7r%<03W3OJ>ia(%mcjl0T636XP$mN?(YNd;=6jK&u%8fww#PMz8&<<D|
zRD?DHl)5kgiZmCLh9Hn7zP)gmK9KZLzByIEPC-D$KGx#KzP$^ta>qln>nYLojP$1U
z57dlJ8)$O)$qii~DHA_6J}`}vF|<O!WhI51sz`bKLp?;(1CyR9(E-9hcdYg*)oMb9
zM5j`L!djpS<W3aMm*#Cf6-w%wV@68K=IAs%E64Ywl(sEzT9J0O=AvBTV!J!Cii=Vb
z1zj<h1xs;WFH=2BJTYK&?1!}{XnBm3+-N-*IGq*&JlsQ^0IMJ^G$e@juv?-Zpq9Xj
z<pRNo3+L(Jak(_`N<bS3*Q-~7#9O>}HRnK58^ZUQ0U0K0KN7ZX&pqyB4Yh`Jk%pYA
z5X({+QBD|80(e>^q$O&(JG=dan5#VEHGz12L9*sJNa3KPu^dEHwiaBGP@xZy_MNL;
zGNJhj0jown&Dl0RBqQ{^8X_yi`~-wCIGx1RutSYI^eR=p#fdh<#p56c?2~%uv~8)j
zZ8OrH@j(jBff+~|P_sz^)Lfwh>$+$uE#9*X$oOB;LeQ#F%A$m|`~>x-dvbo7rtC<U
zo5n?@U0Yh+)71`e7w{1&+k$hTr^)c!bD-uD#V3;N^>D1xg=Nf+QJO$jCZBVP1wntb
zu4<KWS~hJ^hlf<bf=Ez65;`S_VxZ=eLb<~)2?B2WkXiHypt!#Ekaa0Nsy%nc#Ktl*
zU_*&o+dB_PA2BHHG8d(gf|tNHR_nR~!Ofj12`Kj?K!<Wo4${u9Ytpj(-(y21u2@Q@
z>uj;Y(thj0mQp6RA?Hm5``{UpMppyb2w;P%SVK7y&^8P?RN8bEJ>6Fr`>8J6lzx12
z6z5$UzwF=a9+~61;$S#ijP<<6YUVtEJ6G7R4ajoVc&hh&Fkj8ja9U3htj&sIZ7+p3
zUU-%`BtrM_QYi=D`_Cw>`p6wq@_{Cw0X9JyFcxe*!q|uxZ&E5`NmsM3x>+?|*9Yes
zCh<&1(;T*lveP7FHbsAPQM>N&`Uosl+TF$OguU%Y<g>U~!6*(w@!#}CvV%E)*O+y(
zwk56>q#41Ri}R{peSZQj{gML!yVxrNG6ZVk%1a6j;X&J6>K^ktlK~nuM}Glqx)Ad`
z;nVqC3RJU@zyh3kqB~Mn_>;q9rNORPgg0MJ723VtI&u4MdE<SSy^ha3Ph|$TJa*x~
z5$?--(J<K^#ea03&$KV6r*w$vu!5B%QI}FAZXw|`=EJ;nY514S)perP#QALH7aARi
zDED;}z4|_YLcUAFZ601#sXbA$E@o>=yVrou8jte(j!-);33*zY2Q=D;+{P#DW5yTI
zU)1);2E28d2dSmKs*)a4WR`xWvOa7HnWg;4Qm4SV@rjrL$}=AOlD64y@MHG$HxAsO
zqa;$KSR00_ghv@*+XA|_L}LwP(jiqJwZrxq>UxHnk5t}6BdLL6W;2oETP}(T<!JJl
zVmDvi6UkYJm-M_9*s-gycsg|n65rvX@+x#-*>xH$mDp)tqp7M146UuED^jubY=`gP
z{RQKNP<0)G|IC7aPMEM}98a7aGA9i(!SV$#&@IOl=GGq4Baw8{&j=Tpv`hzz)0>`;
z<1lO}0>7y}MW%?arTvn^`<6B{MW}C>ikOqnA2nbqQRGJ3HAR1!c9AMECbJ1Kw{%)+
zH7?VWAC;3PA>!2NJel?rjb)xIMJSh-s>iTOv8@Vp0OnKf-ezqg#7NXJpZ-~R>1EXH
z)l!$F763tn2sGy@RQw;0*P$?Je!;F)T!Re4(JkovkY^qv<Tfv?%6M1nCC4yqv=K8x
z9vzMODZLwcLm6A|Gr~T1`fVX>dyklVKe}Rw*8N%NPh$>`P&??bH=!uW6>%l1IwQ0W
zvP&I)QAWq=TQ@ncpF?mkr_kg2;Jv5Y)W#|^zaRd4QvEKm0MIBjbZG&fb9S+;0CGUe
zF|Xi<0VLZ5@Yhf#w}}yAROcB;_C0Q{6!E<s=f1u>opv622Ql2<F<jD+*55(3uCN+v
z5-Ph*xSRIg(?e=|H>ze)2SOQ>P=%u-KjcLmid@^?IDJI(AGQe^y$WmTmNp3UQ~e06
zTPW3O{>XSAnZD~6GcD8<>O*^1_RN>oRJrqZ=|FiIN$<+~dt$H~$YJ4sKc}qcOwJjj
z6^WQT*W3(|&-l3T;|zdsu2#MPLZ+M=Ws1N?QI&GsQ2Q+Z+e61Na2WCA2r63OcF^wn
zIFE}{v>COMucl>9$j*`(dBF`nP$T1due6yujcjI_0?XyNk-!}P_K(~p-tt!|m7&%8
z<Wp@FA1Cd**X6T4{s$N?KJxkGWX|et0|z+(+WCfI!l_<rTdN1%k>`|v{EP8%nd8~c
z3Ca5b$ck}e#vV?EfhiU7FvAqq2y%b`zSb7`*4w<XG6;oT7>jj2>ZqL<e8H}<cxTHs
zsqA<_w3?53x5YzA5hWDD;qc-$04K$aZE2sP-yPayW$mSe={Es52?cpZ5I+eV@6|{-
zrXFnR%C5aa|0fRU$`UDA6{vln?9d3?390#q5>DV#S|U8$JzJ~A?ce{1wxEDeOso8c
zzTSVGG35Ua2mJ@C?LUnL^O!_k`N98)@m<uJ?}L>x0>`lH$g1IV3NnEeGICN{K}ws7
zcfvjFZLxV%gd3nwJ^J#TSyNjYLl3+I0Y{sGNFco%LjeL<nzJH*FVCYUoT47J>)F1~
zG{(MaCPf#bW5Dl1ycgfe4uW)Lb@c(uC!HIB{9TU<hii%Z9W!SAEHaf&cb#D#TQk-i
zB*Naep5iU0+3PeBgi>VhmV3Cek~j!+c7I2~=8jh!!G-gt?YDM1$Va9^!F;XNmV@B6
z=x;G=TmoNN(FPMt^ThG%3MO!`HHKN9L*w1!UW+c?s8XtxLSFWOCeNiDHbhTs#-Ma)
zG4+MXm{vquY?P?1{9WsteA>lYToM?stILb=KQ+=7NGlVY)X*{UxbB+`BL3aWOaRGN
z9a|7$r0CZtih6FaJr#~OZ1-nZIzzI|xh!n+b8dCSwyt^8XC=VWX|xLL{on467Ep4U
z?XM@9qx}DDf~keIvx(zxzyZg~X;bW;`8$$2hhZNH{f)IIVyOYPSjOhs@S01O7b`k=
zAz7WoAZc{M?(TM1x(uCYwrXh+jAUo$cDL8=?wEdO!@jW@SM5<f)`b4LuUdQTY3IY*
zONYw$A~yXddOO3Y>D1y${VJ=orOC)=?)GlxjGO=92|p5Q8=2m|`l`f^HuU!O-J*B^
za<tXSY9kkVynG{LSm6eta(?{0+}Ii@`z|<BRQYfoM9Z|5n~E$EXN(J1hU|%uZw;1o
ztE;8-2extkkNtH3Yj!&%{Mph7L406%TimwfzWc*j(nk!wUuXK}>N?&iwjNy$X(C9n
z(`?gHtBa?jqp?fp%&LirqRG8{Tdr<vb(3$MlO?Y6=h#=-s%Diw=GyucmGsZICe6Cj
zYo^UlKtjk#+aAN3UPBBYM}7SmmB5l1K9>4>rmFXwof%)H%IM0N{NH{j^wkz@8Sp6d
zmny0)L-4sT+U(vHUjAA(l$`l6n`aM<oVeYPS$FmQThy*SBkt=7<ZK~g#64_Rc2{z^
zs^uo<gZuK9y`#<hKDzoCKKP<1pGzT+xSkpbn{u{YRrFlQJ_RGa7eC$g^Ct$bJy~=C
z34caS;#|HG&g2a_VrBI2KM3u@r+#0uh{97b$O6VVTEJVc)d}o*T18z_4FLCPL;iyB
z=9n}D;SID7ibhlT8>u+Tw7sFb{Q`54Sfd>C9RrR?fw!DLfde@kHr`{c`+IoQ(C=}1
zv3FP_hJXNr%I8Exy{y6a81BExgJ;xHH`aHt5I!DTt)5PoZolx$X!)))aq)NUvAW5d
z?UoI-7MS!0y?Wf+O4WRj#0;oEm5pojuFj9smM@;<yjS@fVl*L0!$x8f8|DB!%?`O<
zao1SDmac#hLlq53FQ9rqoYaI!BzOf;l03F9a#$4L)2<Lp%NcO!bySJ$tjAMqeSw?{
z#;;j3xh~6>Y7Qi4V~bf1e0_?J!yBl`8?ej9P)wrEL?J-FOVcDA=_6L}b+f;4X)P{H
zES306ptMGhtWTXjC>;IA+$V@$LdCy84qd%2-0AJDtoJ@|V2RTRtR#tzO1!kqrdRvl
znCR);MF%qXL6Vp^sThteKHFus3>NsAZulEY#))SsB$luRyVF6OSsG6!{h~&R3Q$I9
z)ynOf5%V<zp&Bxr0;Q0bN-#5c0p>{oWjq_fxdT+|U|7itcyKl>Fu>G{%WfrNp)C#{
z#21BrX0&0c%+THH928;v4rZ-QKtzVMpj$Fw4JegpFtr0^HUqS}s0-L-v;7#spWK;V
z)rW!Rm$0Xe<i;*%VQN)dcf8_h(7!#kwE8}xQeu^#AtEn>J)3%;u`^jv3u6E%<)AbP
zY<Bya`;`S7F)cH^Z=*S|WN1u4UGsL;+V_?R0>nsOo1<z-)%IleR<EgnCt&4whj?K4
z`d+j9^xd9D@7QiJ-Zo~5>b-ec0uHN;1%txRqA3_#eevFPP#y4RP5n8vxU<ZF%+lpT
zl^2%GSOVS}^wA)c+p&&VSkBHj;U$7oFW!8DVJ_U5(|qiVJ`!DBxDjNX5^Nle8N3s?
zugnYH3u4Hf9GiEIx<C6zn@4SXB<BKhMk-wQoYFqbN84NL93{sg=T4fWbLyn>;admQ
zEu%WW*Gy|<O{kOM%D~`~W-_NkaAbN6V1srG37w!yhBC=nWN%FVMv%eZjv|MaAOR0V
zt`<bcG?av85yn8AVh+MuvK>ArVYkntCZ>^4KoC+MK9#xVhrT9P$*DD^96Y&W>X`v|
z0dI(fKe6ozMS)aL1XL!SSE~_NYQHTZ6*inP@%?tf;ff;{VDy6#@7?IiK<47^><p@4
zYAn-);ZN~i)_o|8#zG5VkXUX}i&i~V-Vy+YVy#7Mg{V$u-XU9BrCw$dfGcod<2zS$
zgG=H2=by|qX3a|fKaQ7y3NmN_WOa~^^i=jz7NZ}&`DJf7bi6A`_7@O&&7bp3YN7!;
zBKZQ-s8;S-At0h(_~Lt-H@^)V-D_OZ6tBrs4~S+xvlF6>NOJems%*!}yD1tN;`@ZH
zGpWi*J<+%;V0$hf`|kte%V;rrOqWDrjD=lY#VpS>TSKbYSG;sul|lttObW(=almjP
z0K~!c`9BArLnq!BL12Zt<f9MWjNgP0*j1TDk^qH|WacD~Se{S=Ues1xZMciE54f{m
zD3%stj71w=0*a{wJ`JeNJ)f5gQp=3TO_c$Mc$3NHH#bARcjH|AHVs~-cX9IC^Sr=Y
zIcH$AT+SimG~St_F&A_XJk)d~TQC2bl&FG|cd-l;4H#L7A<S1%4#T-|-T_b1xDhG_
z6WKta0ot7+@;|(b;gOY|KH|2@36tl<$nvrz#bAhzdB<@m@S`2%8fq1w;O%`zRcl8F
zolKWVy&rUgt;C%h+5$!(LC@l<$@8YYJ!a!u)K)d6BWstHT3H86=n>ke_>W$IB8l*I
z#)c0w>8sf#^*KvC<v{^_$5B$~xg5zNB}opPj?1UH_K?RVY@=)iO|mSN=^aYQ{O|a*
z5{n<w;)mP9_Yw8`RB`5lVhX9K9MZY?n4O*^kh8tsB(#*uw)hl7n*mEmLz6e3GL)Ix
z$tSeMb6Q*0$%?jhVeAg&&f4TcM=ZQPNyJ11l3po_VIeo4@l3nR!*V4CqizwCnuDb%
z+3{6rW{Y7?JnDr=r~?IXmOzEUCnce?&<oF01KH9u5jLSo>Uvy*JZf-E7{UWr;RxqO
zjlElHFV-A}1n%S82`5jCGgMH0+*HF7AkoBHRs8<p>CQ#LX#rumL4^1?$OcD35d^<N
zSNR#{;JL<Q9hPU)K`2HG6jb-4e+>>``&=%Pi<3p@dxvtttyx88U~;d1P>CeKUJ1!K
z43*TwL}?$n0m1-pg+#+jDOvXEXXRzf7sA(yEgigPz!g;*(1PXOsT6X%P37RPr=D#S
z;5mSw`QOK?px|>*a||%S;9w(GVk5bTCOjQ-a|u7|rtAzYK}A<pmfqD>+AqUwR)T@Z
zz{d1ct(x?;`K^b6oAnvWOQPp$DHQ=kab8}h$#ua;NqyEgZme7JD(~AH0%ZrH-T2KP
zoHHvgqc{+CWe{MNz?~19P-+I=tIQ<Sj)b$EHQ^*mcnrK1;Dxt7o-&h~1at|yam0Xk
zmf+-aZ<87aw!eV^8EPM93@2OG87_c=yQRuGpF!Fx(-0E#(u4&o!9j;8<a1F${h_gr
z3_gAoitahYinydAyeBtN)4vIC7U<s+78DWc5F;DR3J|MZw$xGLS^yA8HWPsyo{`wU
zc0}`jL3KXqub5ZUp9soFk|R93MiQ$sva||!|JFbh3{%7x5M8uSfFyOAW#?2sibQrg
z=A^*`D8t}k<$s(=TTAD3v)0d3Gm4AH0<!kt91{j1O&5ing-Z@aHrog6rm;0EKHqHn
zgG$=xcd&ajbaswEEPU?-wK*7Gl+yM>#n7DVxu4&c=Y2?^t-3=EK#W+oAfbL~s78jr
z6Kc{dkuce+E2W(9{Fo6Z&Jbdk-Tu=Rk}~hE2$`WPUH*@W)d4$$upxZRZUm?ZC-r+d
zzH*&bg}d#7mykZPD*H5dwW(S|4MLg#X+ApKG?>LG9Re1t0Nd*y$`z+v6P^u<KlQEO
zF;?yVpc|RZE$Z0Z(I1Tu7BEf?wA(Sa4O@NV<8f>@k~!KWR{^|6_88io$$OSKfxH9l
z5m5ONi*Tzoin56-TYPVOaW}#5&B*)MX%a(`OHL0p3v%ZoS&1)w!+5#t4;)pkIPt@3
zxuxmLDY2y43q~rj0}LNkzPcuM620j?go*wqusRwV+p=O+u|*VBFBy;M;L&rzb!7*o
z5p8v9q0~xb<PoER+jiMXhNP1DK*=r*RdAocL8QwjJ%>nFJfT&3h6=BGdmF~EbXNz~
zQM~d_E%3vO8vhr|f;R%8i`ISWVRO1|x*JlR=Fk5{*E=wY0&L5gZQHi(-fi3FZriqP
z+qP}nwr!iU@5Gy#IOoRvhKj0Kl`Hd06?I!AuQOc0qQMG1LmbmOEU-iw2KLz3&(u(F
ze%d8TGm0wrcnB2tB5XAy5X@0bawg;BQq1f@M?WUPzu22BXeBjG!n`rejB<bjxYNCa
zaIqVY$In5F;moBi+y{`+fj3_2>qg&=C!iYJ+1N0nRRwwSWBxMCVk_lswP$>M*8c5I
zTHSx`H<*KjaBigK-H~v2h*a{6<=(nfey_ezG!2_|jZ@O*r@xL37+-<P=x6yMSeOaC
zVI;Vy?|Io1-=Fy(qCBip|7;X+6J~HWCO-PSOf{YR)vjhhMpFTy!{FNf4S3Io97tYz
zKtO(Qpb_r)W8Z@9b6f6N@4GZcm6;1YqE8zdqm<ae*Wli*E!9nkSS?Y_YCFb71z-oP
z481BL+c?xYi=%w^*PoFE$9CDStAGD)68hCrcdQ_P|Mh8wZmh57T;_J}{HlvX2oHJp
z4<<oss8&-J75+t3WvYW1j&8NmQbTDM(%a;%>TiRq`7`IX>BSw9Z#(tYdY-FGwfx)i
z6c5_B1NyUx!SYR22pk1`QvN$whZ!io$F<uUV8c2Ck{d~k5mmw&oi)%>nxNeSSd8o+
zq}?Gkni_jWVh-^K>`lr-0Bne0OdOS^Zh73=Jj1rGpunIivBdspcsb$y-?zOk>*nem
zQjT0A_?UYz*?sfM^|-#1!g(X!I>@E&4dK2Y^6QuRaUW(DS(WqzGkuAmp}Db%c};H2
z9-=DIEyKj_5F~1w_AM0{6E@_v<q5(9p+}927mY3IyD_q^0dlS@AlXrC60Hsg0Vd*W
zCqiu7TvvG7s>fEU<se0=4D-ulgOq`eqOyU05(#dtS_s|<fnI156dCrjSB{-NKH^iH
zEsTPAQrvh6^o4HDDUm0Z-IZvtkTcE)rCjF;1WT{x!gfLI#CP)daQJl|5)j1^>KpO}
zZKiiJE>pTFCwe>mk%Y6lZAkV&VpcTiYt<D_2{@?+srv-Y-3xpi4byBs%?$#_DywKB
z*%gD8?g>jk0-!alpCvSYpb=!#birV__7v8KzOBtONt5pf#Ly9Y!qhTHD5R-vtvhzV
z3?^~W5X?QtbsF@^(w6mCdepiicsi86`y3;KC0Jnq;Zarx+Z3IZduklW^19ztwD_Cj
z>bpA9&4b>p%XCE|oXxJkB~cL3=Y5rhw6%EvX-`97&ZN+?osJ-vnm|T9AL9~Z*k2qN
zh~J5W@Cit|J0pYj>#Y#KrHb?*-+C7u7y{ly2#yM~og?M5Gd}jBUaVKu7?dFd==dpb
zpH1B}$MC!QrV+FSLPaNGc6;n>=d8(2^VZuHYpH<hl+8<b;?9y|;U6ZIRqbP6-o2}=
zYU%mO0K#m4b`JZdOj+47!KGE*1KR)8z#iGA4$z-nlh8nhi_Kw>-B?38|A~iLBrhQd
zn~H`P(o~$&w(&SGC>wVKQr=`gzPM~nHD)aw5%k1A-F|FTO|&fQD6CqK6SImBfo_7S
zzXQ!kft6)x;*R88FQQ?Mb^WH$my;k(@UA#YxKzw=K!^Tv%snx4j#V~YQLpt6xe@P8
z#gl3&rKiPoWH}%0oqzagd(U)U;PH@kNoV%gNhCU*h!^tZ694Tl-7No3@D#dnZ{M)T
zMQlNywfk@LX)R>Sz5PG@Ny9(<$$x1?|3{uM>sGb4MG`>xnfi{9iH1Zd2(BnEAX`!-
z1u{P?UzQ|W-Ry2S@7}wnE7tt&{B#)Y1{BF018?B^oMN7xnt@Y~EkidrAoOc|<jsN!
zaq$Jh2dENTkQ7qniKENEgqEJe`&Sv~=miUn#EcazXX3ejewl_$;3axM-q=b7#}tKu
zn~UcgG{f7pd55#Tje32ZI`LC55jYNHCyfq?e~&rtzXBjA-QBRUE>pqSc)!V^KVij)
z2c1f6>{Q2mN^+xFDH!E(yJuun*gg80eOxJ{>j~<0`{t&o%Qn|C>sqN=8!5D8C2n+R
z;JYFC?yYr04F?)4JATN2)4HFVd^cOFkszjf&5HwTh2e{pKR8w6vG-WV9>VO&^pBve
zaEmenw)=r<jXXYw4Yw0}X%-rbR=URL1^DUZ0GHVR)*S&?xbqIi?GbSi^+OV`h7O($
zW`SnJj#Z7s8)}V0=f2NH*rqTxo=~W(Dnk_4GL6Fqe53twBxvem5a`Xa^e)>8Ms9wF
zLq`dp^*+4uKynwg!Vr+K*z7a8LX;2Dgs?vmGRRoBwtQVSL@~37`~-I0dL&w@PHSYp
zQ|^{PDn)TP@6Qt6b-Whte_*U;i>A|}>OVylagGX$i(i%ndhx^*;YEVcf1+)FfwPzs
zqVLOnol*!M1C955+&GTT3RqR*uUfX#HE=28iadsD57z{F+|ChK-m(Wr@vayO;qGco
zqkBc(3I1Kr^Pb_d#bOLmu{V!pXKi?&8Emas85m;>m7Fyn_u#=u9cKu`nkYhi$`V`X
z$D}4400@bCt~pDqlh*Ke8)p|jXtw;4p+}2F*+nb3(SpmZgj7K{lSebtBQO1<HLk0>
z=~U~4?zACOrAkEDo>5&oz^q<G-`~1gTl7un=!7FU^Rg`zsWr}HRX<*Ld-jHL0tO3S
zAfZiey%%!oo%|J9r=&`vZrM(DWE?HuLVu`5H;quLz9^Z5^x6bqmU72`s-k?HW;QAl
zbG$M+yts_&EB^F7KJb>BM#>ifDD*Enr;^sR+$6lf(6<^%vRNL<GNsyjL-^G^HRA0X
z-tVMJtyJc={LM_uY4*p3LNfO6M&$#!ER_V>gd9%Bm(7wV0b#7I{ywRaX=A0k98#Ke
z^5B=$VsDB=WrMh-dNkqkr!R8hP{vRG_kXFlw9=UbQvYr!CDi}dy77PP;U-qLuDXU!
z4pwxA)2f>`8~g~~Bf5<F79bFs89m7$;Ur`Ngd_)wBGm=UBRYLhYfGJA8zjFwn4aOa
z?PYS_b;sG+jwcO`%xt`WYMLRe0FB#Gtvc@5rWAh;7Q3=`CLq&}Y1Opro9~Jb+uy(6
z$>_D*nA1~hPI@KG#%jSuC!oo+eOeJv%fI>*_o(}WGMKfh@<VDX?)8!oKziiuK&H`Z
zh-3`**>oxQg=Bm&iWd+GD&||-58lu$TOP7m)e4L;io=LmY_G2@j8MaC>E$$jFQT$D
zMiUz)ZRfC8^SPS|7HHI1MfN(6WG3CHpiIe>MLyGFOLDde#b{=;1!a8ox_<HY;V^Jv
zm+m+MyrZp}PO+8SAG<)`v|0}QQKlSSiBKacWUpllaUqi%QH(T|5;yNfF&@2?tV#78
zP1#JbV_cXSjO*QEY1@5k_npm3Fc=Ey5%*R$-6*v)t$ENB_Ok27jPWRDQ5)yBazg$j
z$a{pY#}rA{5gP$@9H}~eDCHXRmSAI7`w%7vnGdTFID03}>3ZFg!x3qd%Mv#2N#`fb
z`G0$hR}hfA^f!WP;>O6B#LQ~E204>|^MX;JDe{Xy*&N-4{B<QGW)Ao1-L}X2?s(X<
z&?BQ$eGIlF{e847)<W?CeQGv#?N5?8jh#y%-6gxoVLAkx{7YR>6;y*?CoUL}+iG?U
zh>N|Q)+xOF%*0zzWR`_?EeVh+M@wvByR;<#hyPgrbf_vw1kE$E<4~FI^}?OH$K+in
zoWX!>@OU3hb*p1kHFAvdB&~0$qZPl^cu711db(5z*+X99jNed=?j-K7i;em&jyzz>
zhIJpsps;q-`hC%)Xg59?cJ^z>p{kR!e6`|SLS8lp{DG%ekl?n^SZk3*vg`>5<cVEa
z$pKE%1dr=9dJOE)Bc?ury#&nz>Yl2f>qM|anWueJ;jv1~<65RM0V)nzL%IY$<2lDF
zgJ<AvHZSDAoTZ;=b#!H^vev_JEx$PFRUK5ibW5qWM9IxP?K)K&xOqZ>gkaw)nP5g<
zx`9M!1}^k13E^}aF&}Ob-SZYh`17M9A@Zw^OP%teZrlJL!N=yjvy^7&oLA)pl$j!X
zbX76*0Tt(O9ZHY!ugMS4f9m&yHUppj|CT5C{}w6#PwCD7x_)NGQJqjgj0hr@r_?~^
zE5njfQVKL^AgE29W=w_taluAjUOka(VV2F)#{>H~4FU<KjD0t4Llx^db!)cRRe%k+
z5aFJf$?HiIiG=muedaNHPMl&j5hs~SiOKyQ!Ka<&otU$`G4C#PRjAQF6p8pma5XXp
z7qjQFDf<eO<KX%K)zq_&PtlY2k7mZo@xSs=<_`9BhR4|d_r{^|9o3Zc#!na6Ceu87
z2-n~!sT?ph^zz)0g^4qYOt8RXtw`o$v$NB+bk(20exang6x6=8wX^khQ;FBHD;#?>
ziwBy>MzaQWD%R9A`HLaegspdV6df5Hd~x2Q?Wm-?PB;CP8v=KiD@R4&Wx|TrncJrE
zMdj1;u}GgS^F-}xI`hlz5gW%>k-z~vG(lc};GPH%<emu1Jf;&Xn{0lZ<}f8ww<RNG
z2>oHIDU0d4?;oFIh$~M<-lK)N8injM<?;jhZwx_4fFAjem%fN((+Bt5tCzRiOI|ET
zzwJlf<!<oDnZb)D{uk}ZJro>p57gi{%7~t;AcvKkVOnbT&Z|l1ui-3?bBCxew}CDj
z7A&rjS26$#A_4e3=9iv}R`w+NH@0eI@1OG3jH`9#m8x1lPJFCWjl4t;V_Mc18Grd!
zW^VS{XvfAylR(70t7x#NPjbUK_D^(lA;TnH#<u)-^y>V0EkNYLOd}Btjd*7L*B(PP
zaln{YC>ZmLOdlNDgEBNx0q*d$CjsQFxa&c&?gueru^EZa0S?oF^&YR9t1E8uiIFp|
zMr-ZN!X9_$o$~#QQUm*lGzjJ|H_h@ZBk*`3hA%F>{RyEL4!9M>i;7J)`cHbm*U0-x
ztT;>f8u<Onj=R7!r&*IAr7FH{XpOeLy;M)e%N;tw7Iam_HZ=C!CB(=1bZe27c}zMs
z+|HQ9u)Q>4qPjX;4OSiHb;`<(pFp+gK?I+H^qBSgfnzBQ!&zi~2pQcddiv>?w~K|g
z^1&~-yqu95PZCvSdx)_q8rO;tHY<+<hBdG%eFEc0i#bO64&p39?>R8qO}H3qUmK1b
zUbuM?eBtDck2{tZGDHx71ByKb95*>?Tk7PYs5hQhA}4$GBhA`&>lvC^7k7Cf1O`Ov
zx|4+vr_X@UAwN|tz3de}Z(V3%Zg=XWer^OSppYr}p#Z#KsbF;P)r)5OXe@cky#8vf
zmS(>{Q!mHBwB!-x(fx((eiSw$+^uOOZ1;pQ^u#Ujd}jBjNQ?8BGGdbyy=E_cFZ;hV
zdsl$`1WODz(m=-w`vdm`Grdgrz%pWDURe||e&W3WL697R6iblRTB)lh@GOxaeoY`m
z+^RSZPijDv_3BKpWUzsMc&uq&Q>KVE3rrsvvifcLeVoY1looRNOrwuMzgYL+%LKqg
zv6Ll+f40)S?14D?8}u1E^&Mu+LYPeuJaZ1WmlcR|8#DN&u-G|Ifgc^*bZVIymV@?k
z*E#aatre~nHjk)h(5GO@VO|tP_Pb<`I{lVlwBsP~fxzE#+qR(i!$L_iJ4!~@Jjq2&
zA_z8cP$^P%GtfmXS@1A~RNp>}fez->V8F9~SSK_RlO}=)#5b0{TqIz|rb}%~ClLHe
z3sw2a9*ZYJu2;qo2ILx)y6XCEQCfsn$_k-WN$Eio0mgPuK%(APfDq|`d<O>OluHl}
z8lkX8n&+|e7ZWSYE5Pls5l*fZ)PrX@&D)3w+QEGShgIJqy4~rDS8$0AnCShRrD=@Y
zSnQ!;&A%0V=%x`8*M`zLku#9j5QneyS;t4yk7SWtY1Na%+k=M+QYrUIOf^Y0KpLh#
z3=V-qZju$=ybC?Qj-=2y9B)S7E>~Xvl3Di_aS!ARhLfNh138(IJ;Q`zM$9iDx;8t2
zfBC8v0FcwbJCQ6xQXIt?I&m4P6XIs?lk+xG3Ap>P1X8$alje3k66dy;tQ~J!aff+{
zpAoKwAX^u~{1L=s^fQH!Tbej0S>x=E^f|ElAWLY2eSjbuQo##h6DA~}Nrr-=gu)(M
z+-<FtBe(c)S{?^sFh$140UPGn0BPUT6LiU)VIXN)V2UO%s#aC|8eC&u8LJ$`fO(jJ
zINWSUW*m?vnj+(8YsOJKJE9$2q}^L6jDRUIck_}f7D&j?NJsJQpvno-%|e+_L7Zug
zCXa=~(h>_w0Ev&#JSjMP4rt`*)GSNqm4&Ti@+40N_%t-6%{!e3LR$LyRD;<!!rkuT
z$!F1+PYNm~D-UBGa|<b4DRZ288kF6S3+(V6!URwCZFoON=F=?VMFgGM#)WT_`8t6|
zwUk5j)m4Cd>G~Loe}&dyxHvu@5%gk30<8xFoX5brY!m;~X!B74Q1N-Gt&({50Rxh!
zZLl_YgMba+Ywaz^k5@e&=YZ_2_-A%gh@d2=_(=vtAYah{7`L~y_F4;i)~UCN#E96x
zv)Q*xGdHrR*Q81)WS*L6fJ|G?;JA63pWY)gpuW3|l-<38<6>_h_cwYJc!=@juG3!9
zz@j28de}=<!wsxU+xFSivt8)_amRK%#PTBC=PH_3w2_V$;WX)h1aY%PM|<xnGMQ`$
ziOAw~^_8mSJWax%A;eGYFMdX3;SLxp!nLu)nb2`fgx!GVrgW$!bD_{O?1T^&zsJ@z
zJ}z;3E-;20Ra5)!FQcngxpM&_aC2ao{p^mMnJg_H(WqK_7>(8&KO+ir=tiHdzm9FA
zn8#Ox6mL&*wR|Y|E_;dN2~Kh#h}EfdRlUQ-T{&y&;HZbGt;r~vPmFsHM@~Z^Jx#$&
zODef*Zct)^&n|w7{j>M0hCc2GBBi80)#D-g4VJUVDXxyP1vsAj>V@<!AC5+UaP(A-
z`mmhnr;GyEH`aYTLFgu!fqD*OX@F^^;7-=(|C-bxa20$HWLtxA_G#um#%<=mXPw#?
zVwL2ija0nPa~B9%joh}YQWM)#OG5Cf*`+%)ZNxsI?CQwEZ;Tyyxl+u5M>P58XK)Pz
z+r*~9TDxf^wXldt2Rkf?prlV)#ymU#*Bx0ZB*(aVxKkzx4-Ehw4rCyEh|0kwh2~R(
z_6-#gArg~;{f<$Eos{ml!~YLQk%PnrBh~#i#ry=mhMppNxwzP$R<tm35$0xssC~l@
z<h>l=rtYBqAUGl!U?K?&vW4yb7f^z}^OqE?Em;-Ozn>^KN~DBtNtO5IQ~;<K)#*a(
zlAfMqSJ!^W#DBPe6iioearOpUC6<XU6i6u|Ql<wBd?ymWMgKxVvhNj^;d)o2iuG^~
zA!&qh5{W0yHcBt<o;B-e0|`~{zseo6JoCmG0WINso5HMR8YOc!>$j(M@&RDAcav|0
z9a<!?gw1rMsB5YfzaQjs-CE_cGk_5<#(gh}>r@Rfi)93J#BPuIQ$I^P2igK%oBTl~
z4+)w#<X+jS<P_)Qi64rI_j7|v=?&*^{M2@qCXJNF=~)RSH_!mgWkh8dF7IT&2|x_%
z(X4^ga*_)Nz>TLTPlMD3|7gG*O+q*rwV>*gQEb5M{(+Fh!=NHTd??VK6UA3y?ct`N
z0u6ubu7TTXDqZ3&8-;AvX}NObX0RPi*__NNbil5u-2(bb#h5IiYQx|i&WR|j;}d@B
zbZd`zQB-jZ7)9dp6ZJm(n)#;erWc-z?qGt;UT_R)O~|Y!Qlp(D&_Y!jB3)9lx_x~E
z&3tUf?@GD$P}iQR7T4iOiELS9?YKOySuHmYid#IdXUwMh2Ls`b_a`ryudTyP3z)7{
zE~%oxRcHyGB<5?_n(_lk98SLW%nE8!(FW)XuuqvlD}6kt=VEG2F{&_^UWEg>Ac>2(
z(?wh!6#G}@shVq=X_5Y2|85N8hmdcAo#>AZ10sz)&Sr~j1lO{;T^mnbV)+E8cV+D9
z$0Y$=R|K5a0)XGXFD;Dx%3|(|Pw$b={n<BcK$vEGnV3`};zZBmZWZ^f=S18<v7F*z
zd3A4H-w;*`{--`=D5;0DU>HAJ(HtL#IfeuFPi-<62Ux=LxdnS>jUKE1VxM^GhGxC;
zxZ0`wHu1OAK$h3NL1bKk=TYaU;k}?P^J00mQR>EptibKLayXd5ZEiR1x+Wmk{c0Lz
z;OaqN99w{5J&H(xV@s~NsZW+(c-yQWC3iXku7Y^~rx_E~&cI}3EmOS??S@F-%A{&(
zYdgVI4_!Qyj5GdI9D}K7PX&U6v_zQ5eORk#o>OJN7HL8D2ZJO34{HF=A3tPn>3ZPn
z>|{>K`n#Hmp$D?9YLyl@R?_5TpCB16ulGGI4_M!^2JhZj%go=o%3-Z0(bn4JyrI&h
z0wVR2`xP%i6E&CCUb5H-U$lIALUs3yBzHxzl=Ca}w9f-!hEREe2m@epFS~+qX`QGt
zb}5nZrW_t`@3IyA1_zfMuXnmD7H1cxwHQ0cO}phOJCz406Z7W$vI7MQZ-HeKOj08o
zJ(Q;q#X!~-PsWW|!>za88(#GV--DmWQ}PpjUmptijTH(0Q2Z&W|NesQS3>t(|NDM@
z|AUwR7v151elup-%9|FO|Fn-6pW!(Eco_-OPB=<7&_HX0gtU>=gwTOJGqHOE_u?wm
zf#UOC*VTku7qd?Ij;S2S6P#%x7fQi#N|2#@C3x^cu`Us#)DjXBTxD2wFoh9Fx&=}~
zxi=g~#qzM@<*NZnf&Rm}@0ISsj>w!)88^?(T2Ec8Zx8BK)r`2OD5V%}$U=A4&j~t8
zNpo-~AcA%Xcx%vmqI*`d_}W2rS}4;|zrUxMU2NBfP3*Kx@TKr?bM8R3?nsQ7`+9%Q
zqkCL$>NhW4OJ0%9I&OGdTER2nz>#rc@^5^4I6{);adzQ8kj<<Bj$y8P?wNalNB?%X
zJb$t;zjj1t(^L=ifAmwuG;Wmnn+oqOx))js7H{(!I_mmFhr)}MR7MqR8vBEB0q5&X
z;}n$?hv-%%uwLweZ$uTUbj3a85)TCD&csq<P~Q_3;iuDjIxGl}QNW#)`8y<d6x%?X
z{Bpz)&oPn-L<mA{8}w>L9oO$s<7;u3j{QpD{G&Y~QksI6llDBIFXM&`*92uT$+$bg
z@`Y-?>p_|q_GGf4(5Dw9k~nf)$QSYK8cOL~LZP4UI<Q$Yz0)f!A?Y$tz^GU>w~SHU
z*6|6R{1uWoFm0}S{YBiI%vJKsFUW?M4@*T3*0g8udX7E(fBEMS0RO`dzM8raV1fVu
zV88(YK>SaOqqV-d%|BgC;~Z{*AK?d9uHJwlFRWET0U1@fAP=ysz(i?-*6PaG6<VjO
zleu#<5>5C!KSP|0>(z@nd!>=Pu6s!Ts3wjPUiJI=+r<AxMLS{3RGc=>aJ&rNq>F>G
z!9Bg1w1?qBc!s<~zkW}DPPcWU^cXnx&(txX<JjHt9cH&jx=y>GZ5%$|#1nOEypQAT
z2NU8*U^g(?D%_?#`H9}uoITk@R|K|N5;kZ(F_zSZ>@xyMlyyIpSV-@R?D!9U`v_2I
z=dc0CeaMG_O1VO_S<9$pSU$c+f@luuHp5FL3{6a6o#cwm^&;w!a5C0A++Qx2=RL;(
zBvm=po_FW7L|tN4L!(2YYRV$gc9|;%garOTk`@xM(<CmCKg-419=CjK*so)*Z(LeY
zXOm)voH*Thd|1vIuy28{wPK6=k(jvf=U(@EY`5|6n}|$=$NgSkWl}CYMn5$qAJ}EG
z7=_86M-7~7@!J5}9U`h}m=_MQpeeIEe7^TVym);0^XGx%VZ8V<d`M464F<X)`C@JI
zNG?+}<MR2IfHQk2Sa5zd+yZ%lT=2R)<~D*iEHZ#)aRkRocy{akfi}oE!j4B>*svL}
ziKtmf*ptK#N_p=`T~2$v?IL|@+-=1eySlw!h*j))vz*4e9SCPsNrd-iY2rus>GWw=
zNrpE9lH#N{bPvG5AQS-*CFxF#-<}2h<>Ey3%V^f~w#hTPyz1Pv$R>rOhpiKH5fV|P
zPPmtrr&(K1V^7PEr*=zmX$JI7wD6dBRrKu244svZhg3{u!@A1!Z|rl6{hKu|$J@g#
zzV+AIR(#!yIa}tl^R#o7F!(Xi+I`%sqT+N1@!`l#=7*BG*n~s_VW7!0tu5%9eT7iL
zR<BM`AdDXlOhslUESZ#Xv89K>1Ln{wi1Nbn^Vux`ZT7Wd%CEcTTth|eHB7L>L279f
zZIFjCM9!UoJZ3B|(<UcFPs4(&S~sH5xO02UK4z81g=D~M3ujI-u3@U#%ADYF(b0uo
zZNLfAWQ+u!rX}WVOhe6;6~P66v<<2s_Z2YCj=Fkw5G3zOAd&jBbdocz+ot`rbSu$7
zfI+GX6FG1q;Fn&l@9V{;v|rkT6bdA&lk73I{JUq~Qy^b)BHH*jLivlKO$RUMt$2MH
zWo+@g)1*hB)3hk}_pkb&<`=JSmfzmv=tdI%6~2U-FGZv4v1=%w{y8r1ZVB*n#|)I@
zSnB6)WyCGv#Q5b%%#VC^o|r4<QRJ(WlrdF|exVtSAc<6gF6mIv6rMVvYfVN5O?FG?
z>upf7if=OSTi11lHY2!`Ce91?!zw6L;8_&A5*l?<A!T?d{F~>fWdUT?queV<WIy2d
ztQGpOP|dGpXr<QgJoRNa$XN=oTz3RYd^$ax2Vy9+MO|~;PcnRRiF3yx^NdNH|6nce
z;BKdQF@LdvBn*E1q}6O*LnJrcWdIo2o^5hOz?}d9_>kErE|8z}i%0Ro32~K)R{<cJ
zPGZj1RE<I@h5%>@1M^prYkjGDePxUXrfF8cNnI+o-&)`Hq93v*yT-H{1jl%D@rpG&
zMpTH?{qEt|4GLN4myl1A=J4D=G<Oz19}C2-;_qK#1<*vrW!N~^z@<-^S)aN9!!?!&
zVEt~=)74Ep1t}EhxzC3yYoh`|MS&?)lQr#^WR#6n^gx=|y^llmVjWP#0_`)*6VGdU
z6Nt-Cr>}D}>5}qmAp`vcXyuk26CtcA=@Pj!tD2_D?dYuA9Of8Qvb=n!#r5FcRA1oI
z^j)Z&yRU(*ElaM6S1LjM0qUTf1V|#JO%p+!$*=`=5mU{z=NI|EulR+Oujm$9h^Sh0
z*Q~IATY;<IIp9bp4riD4f&Q*>Mc)CURiS{xIDqdYrJ=5t^`%}bi?ut*?amEvu4{2(
z3%!@rlFb3xzDnj6q7FT={fdDyzxcM=Z6G2`_gzn=?%Dwj{lKvH++jp=veV&DL!x}n
zDh>qjjfahk7z`rq4>gPIjsY3L0ju?TV`93UjXaM{FMFu^ZB_*-=ndCDH?RER?FKC}
zC0(+wbHxc#2GPgB1!y*;H@Dgs!-tC`VZKk6*9x{O=?g9*LCPVcuF#37>tt$U5f~o+
z>@j|g2j^BMycGF7Yock`H7ED}TcByJM>g%CpRAzNJszx~StedU5oJGLwySX$?qe70
zYS4X^CtGHO=O;=azV1`h3ySiB{-*MPertNm2RoOP@a&<qX6jT*J*J|UC`#OeB2*3!
z;ysPBWDH<PdI~jrSJU7xr$aMl^<>1Y8)fo_aRVYvq!ydBX6b%@&_8%uC|%G7z9hk&
z*@ZC>@qqM@62XF9{TNi6!m>|T1$1X?4-Pyra=b4>&<xk(iDB_BM8pd=MIF9z2u=#M
zR3shNn{<1;^X*YNDA45MSLSpOF6lLokc6ftDbn|I1oTn47=ZIac*5)BCDd|7+eIOD
zGZ+%7E3N3>-v_bDAV*j|R#69vcdV6bGt1qFj00G+6|4Oq%<lJyt}~=7Ye)2x@E$jb
zYfor;5kFXROj%;Q+{8WH#BJfx%`?t&XT0J#k6cyTs&dKoSiC;hm~zQ7Sw<*=&qAuA
zfJ&>9aRqovB2cB!zCs>8&co=Qfk9T1U`U+q9WfXFv6z1UI|K0Df0YXlcs;U9sCfB?
zEN(GgwQ??p%v-t-mg1`y5bSNfo_p7fq$?I<13@GH6$J+gQS>1P7LdAE?E@c4$Q6&O
z#p{<3r@g+2+4FD1S*Xv;LUHXDZ;5x7WK!`Xn>aC5^N#$V^8aaH>D$3Ss@M$ve{`q(
zPlm$Q<lp{H-{_xzU*kOCe`XAs9D^YwKn4HdwhjYSXaUN26*yZ<stf`RM266s(H{TU
zBd&KSmse>Uy+*uZRf%Pj(4x;v!;ao`7}M(E$#&B;np>RoEp^P=cdkX3s^fzNn%b=9
zk|~DLl>rv@Q=L<|S~d6Tk=nZ1+{0JP)w`Yw5Ic6O)uR$F<|)^$I@-Q2md^4l*3%eF
zL`~v4hN|*{p)EnV1D_u49*y&wTl0^1ulEUw-M_M>kHt1tUVe49EB)Qi(z8+ADx!xg
zZL20bbd9*7g=3bFgNvfi9F`4ok~JPhE>u+>PQ@Z>sI|CQtF?34-d(D!T`iZW%4SP<
zkshol8-!saa5`E}+t&xqy^>JbROZl2-dKyxB4ujkT}q_EV3gY3SWwGch)Wg}8`X)1
z){63wodT^dwFz^)(X5I#iDuDDnyANa@Fp@#AlN($Zh^P2BVYQp%stZ)*PGRZKBlP}
z-A$rbHkMAI)?eI84ukb0pBs5G0mC3(peKZI9oY5PnK95os^s1MZs;MgNoLK7C@L<!
z{hgYY+?W=HtUjnv=Fv8_d@CVcJa{jtWpNDw&;HI#i`kfHP`<_;!>&@36qNEbE$1FD
zM4IBB6ZUfuIU5KcVg*oxn#*dOfRr4g&E2VViMXTrh3c4e3QjYvfq8$@qSv)(tzS*2
z2}QvyZ{S5kjTsk4yqFd^ky!8Yv0Cq-5A5Zlz{y$DYz7^k&>b7wV0C$gP`BsQYRnrO
za46cf6aBY_Ged4J>_7{&0TnK4+R>G=x=oGWEE@ury4)OJ@R03qZ9-qc<j=Op^QK1!
zRkf%ULxRva!x`;PyN`RaP)%>Cttx;80;6EKA*gf);fA_Kf+d+!2L>D%jg5VU6%5th
zrt83UT`$yshVth`)}p-rVBK?V7}mg!4Cb(T#2Bz9YrN@N$Gur$&1Y+#Z293Rw}RbZ
zg9_~CN3!da;QIg2gAti#STUTxN{OGfpD&n*MBb$j+tk6@0D*a>IqwI^PrmDGv@yNS
z$pMy6VHKZxLdMUUW4ib+=QLuoui3Eeb3(2zSuJpp{I$({V*DB(i1YX}tDQSr&UD06
z6>T_Hl;)?7H%yx@`rg%7C(?XxJ)wNN6DSHf1%eD_6Rr$*+bU78x%h#b1f1Fe01F4U
zt$&FI`C$L4qbi@Y4HhM6A<Dp78G1p<tt|J%fc8<8h>Bs%3|v_XK(^lPrvwB`gGZIM
zHv_o@Jmo*tnB(z9)u`j{LMFOB#qZgh><lV3{u_FydxJ=;K4X2;lNJh78m$G`ZEZyB
zI{`!t0fxEI-pbTRxz|;qcTJ+8vZg<#rch|!0HlyxKZZdul2GS{XoZ`FWDaQtI$D1Q
zGLH+z3kfOmht?QKQ@}dy9VKj*@h56N3*{hD5T4Fot%Gn>af6&do{FqJ9Q7Y_s26<O
z<~MJ7pH|OpDAbMukOlfXP>TV18~6dCG{|epGsgVTh)E&*DjKeNG7kdBHW(W?R0FQ^
zL<+;(0U3a9@mo#Gb^HX!#TBLK?q9{P{tr0v{zgV<{WOuDF#2iUtrrgsA(I==isp=L
zHYc@$T;{7#`7)0@sNL!hK&1m_Z~T0O8NkPlI=+n6bRDb9x;|*bY1h9DYPkQP&nt!)
z>d919`6smIKb=>MQ58-U@z|>R8bVEO%Xlv-M}RqBH>NU>T(oPbX+MstirI?*3MMiv
zn|Td;)_xs2QGymjP%HCKVG>mexjCc0yCQ7$Jlrx<*;-H|dh<^I$O5zp#0%H_F+am_
z*{kqj^T!ROPpl~!anx~=3#Qe-_v%G;YIwmE&=z~){(wU~sGcQz*j<e+9vTArb{cvS
z3}Dl(cC!S<r=0%lCjv064fyqZQ}p+69fRBKd6kr;c@laqoN9k1*yLeb)d);7g?0bk
zm@jzXt9rlx2#>=bKzV`0#U>Nu1yme$zqmym2J3<{&CMq~XS<8C19(ip2P$0697au<
zkSY#GuWe-@Q2dBkza)LBs7Mh^FQ}CDC%Res2P*Ar7#yw#@mO7NO*as37%K=cEC{aw
zd;BOIl|tJ~a1^Raou^{|<f%@0_DK7Xmn0tf>qGhI1&GTFj|9|d&@q&0FRs}zL_h=~
zn0r7{rYZXmvABRl6|!2pnT;^r&_>E;L)M}w#qfGrS4ykBMho8;<Hiqe*7qaM*JauC
zx19AaO7lByc6Oat&&HHuDPdXlkg!kq7~v90DS%0qYXMCn*US)vdI4WsVq`Dr_swGY
z7^Q4S<*+wXyfZxhRp(KY796NCU7Z|kniZqe{OT_APY&PmkN)rJL)*C~&0iB*nzclC
zI?q~q0;a`cZ=a55r0&qi6r0g9?6(VhAV`0sfgZ1Fy_C#Zaez`Uu{HI-B(dI4t*dGm
zQH<Q-VYwV55%apUKS$>JZJP<~D~qah&N&bfyuyIKS^09Um(Zo~Xs8kriqI0~tw|EC
zJ+iGOCQ3URrP{V6Nu;;NR#zl)@-IrakNRrC*|hmR-ronm4sp7_6?nigc$}p(Jvs`$
z@7VG3O?Br^aA-{UKaN(7M6-g-z~hZ02v(Qv=jMLo*;RdsGn9VI)%pM4gbADukB+(J
zsr*W&r@S-Hjh`b`1;_|Gc-CNAs+I&L+xrF$9?n$q!Pk&(F^fW^HUA3@wIu_DJLj*{
zn%n=J35ZtMuD$;0QCgt?zcYbZccSgTU?swc?xx&4zn~r?8I^{#mxvzn0gw^QA#~zo
z5qpa`o3_>u24B}@9HmuR)KItj%Fbh_r^jl%#a=KB4-|t3i=JM_O5U<R9Y8tRra%~I
zJO~sy^LU)<8&top&KqjNdGZEcG*f}GM$?%)k*V`0W1n3|c@MjnnT=s6z=v!v@wdQk
zla?zbl&q&44#UkC9Eo6e73sISR7b-Sfd!e<qI8N&BlFc(;4;Xf8HeE}25MW`@<Y&i
zBhFw*i=meAj=an?)d`MQ>gQd6Wm-WQeSsgVjRdL?np;pA={Dknu<%S<*s<{3bX)le
z{cK~mIzir@k^xdM-g1ksgbva33E3``4yk0&*rQ4Oq~5{I_L2Y{;vnd9!YUqbtbsxT
zS}7`eal~y?0_c`^j{bM8$8wJz)geBlk9RU4BNX~kRyz><<^=3D^sxNU<xy9LX7Mp{
zeb<tC$4Ngnnu@Y@^y;s&{NK?8v8~?J_aSqB*ypFE+}{Q8|2!=-ncSTWQ~-cQ!~dlo
z+L^ikPx{i_kxJyw@eMUmy@UWM!tIl&-pyZl0)z-5TFrXDA&6GLe|wnZ<EeFX+wo@n
z?mz*r9O`OjetU6ob?WIZ1ivkEddU!czVK9fbrC-+w~OIHO~BAc-hD8)zMTCL*mi=G
zJ$p8*HFIV>{1MOdnM%Xw0WqalS7DDA&T#hr;C<<HJUxWB&uVJ+p>nlG&UuF@@H(gt
zbGwB<h!c?NA*w<+Rnbg`;n_49Mfr97ubTpz*50ssnG}4<zT6B`hu8`!MkXCVTII0^
zl9Z0=Wj6zi>bilfeT&x#y#f7H$}E0^apbah67ApFTnDIBB~xDKd?3;B(vawzj+xZ!
zowfFhXFf+8+pI{J+lozn@^Z96pIYDZD9_dH{_8aFB`Y>$*crZVCsB0vnL3^u&Q;<U
zn}(001mboMeEn+)W|eK4+TSNjQ*NtQu|vRG8I{)0nfJYqlM3jzc)=GP-ls%Yx94%L
z_vhR3o#gp8o}28(T-Bc>T$MQ2cy!y9nE`koy94(#8Nb9ty^gy9D%+-<phV;92PtEE
zk5g+jk9$h);r)^5ba^-wQx_2Y<nC^?$`h3ph=`l;F!IH5tRR2faPSM*>e$(mL^q(^
zGDm`k#5F0Gr$KxHy1+HhsCC3H`B5Rj<F}}aL$M-9U=%sD$mKwG=Yr-_8R8WK83jNB
zlXXQZgxlR(RcN;Lp~}9_(1LX%L_o^qZdOwi7}}h+#|p3LuHh=pN_}X9E9tuB>?-oz
zPiVfE?y>gQ11bcXvg*M@{RBZOBym(48EQa)m8JE3MgR*6iP{WeE@{R*u)X_R<W3WM
z#p5vWDP0wN?5Fs)?Y{kwek&1N?8#}OmvZQ{uU8#)!(M(+arn7V;DzT1ITrC>A7l@e
zAb-OlTMJ=10zf<%r(3lU7iLHXcqsn7lPm+XiSV}en?GuSQl(rtNCi5*>|}4?Yq{~f
zecGSU{E-hk9*!RT&v#S%fPHtPhmGObfigy+v@h5eTDET-t*obkKO(nCngvkO2g?ht
zyY8ESo7|9y`6<du=L0ebZ-6s@4%UMJFL!nRW~%~VUl5jG3=YM@dXpY@-5Jzsmc=`_
z+y`RRjncfN?pPoPZi5^9gb<YgHlzSzbyV=ZNGJ?PaxX?EJTrjTG+G4Mj%8n49V$(F
zUm83dSrL(Iy6tpCu%V`RkehMviVBk;W^i6iL%)hps`-7YbQwT{Z_)b21(>oS^Pti{
zh_$4qaMHR+-n{v#*F~gfsiD;Zd{VWUf_D)Xtn-y2(vpM;Z$MKrk<cn|hkK{`*VoJ)
zftmS^StQAF#u>20=4ItP!1B4SUqSrHOmg`G>{w)@07f1LxW)`hAnf;@!v8UQs9ME!
zu)D@SZIG(s4<EfH^MQC(I&{*LIQqS+Hc^q#xL(q%CuGwT_MfxNUvUO2>W1J>GsZun
z-Lv8a*n@dXR`e)Ny%UK-!#SyF(~u)Sj)1Tj6!$+&R@C03d*$qTVgpjot`_{&37B!y
zuE=XCEjK@e_iZm!@!I1R=Tk&av$nw$W8bzJZNK(i!S`AR*HBPexzvTJV9|}T1U@AP
z)`vS3Ewtx{vpJ_Fjyn{xQZSw7L`L$zC4qaJBRDDp#eTXJg2?VTl;ik;%S)bSiS9@y
zsPpx_eggltDL8imR(eSbc{BgK7&zY&40A~}t1b-jP8ud@9T&p{5jNXl(Vi}k-1*z8
zl*r4z@1!0x8E=v2p0N<9!VS(r0OrU}Ssn9=@y^J^Vf^5OTK0Gb?hiLCPJ}*ftl8m?
zQXCvO5ZDDE?iipWGhds1v!m;bI+S_0<|B2nrfP$j8${5J>CYG<XM>VJz^4zM`3&gR
z9C&^~_^0fiOr)7Lu-ZJvKEb3Cfh@Ca)a#{QHFp!+fl~fsMQg*DQo0&#5Fy;N=2l<F
zmmdzrP2{Um!JF3?8E58B-ErMcRKmcmizY-Ft2@qJ*ud@&<t@Ia0?Z0*fv`ZzUxGON
z!U%#O!R5Dk4QZk~StY&<FL+*m*D0tVG{~C(e9UJ<pCKc)nm?W#_c^HJOB&$KccvB}
zm8v-vxfd=3e3tA1<Bx_Tr2NV=Y5a1T!1ir%VSnF9OjQK6uMUM5@#*Sg>u=PnVBp=N
z9oC-II#QCN8AwiJ@f%C_+&1i~Q>edAh3T0GNcKnFX_Uu2v~~jT7D97^7s6z)6=e`2
z0{tKfw*sLS8o-=8Q#-or?YPnV{)Jfz!1-KGt16n4(k=svO1f{ykEJeP6bh4xxdAn(
z7fP=$l9tJ)MXIG5l(H!^F9F3w)}%rj@2&gXJCeNz#y6v5w!AD38eiBuk%@<9>V`ba
z&9pM)Tqq8fn^YeTz~a!@1Z~@aAai8cp!=7-YhDI#o&n?|;=<)AFKdikBcn@2-)cWl
z#YG*}=FN0+Eb{WUgnO-RF~KsR)04W)%q4__=bGSVtJ(1{y?ViN`pAq$fG7W<(5$n=
z3f+~+6+fv^WKD#;c4S3AK<$0l3VyW=-uQ@cX6<sB_kQQLAf|Q6yHxy@B>Iwi85R=#
zMi8rs9Da8X463GTSlU`%Ql_p|EwwDHnybH?8<rP-prdF~(|Wg71{@NsXp;=Pwf~}G
zYF5fjS?B=c`E6YJqS)B$8xvjJ14Zc;!6az#2NX8r)2QCS!}$=DHfEB;-jdUY;kp%z
zj<LsJy`OzB?+$Z@1GvnJ=XG>n#yhfdhMq_WXX>FV2T9rLrCTzxdQTs$3qU{3rr|>{
z{2oC9!T8?7geUUAXdK=3vjta@?h4Rn!_3_!zOGxa1(t*IjHCvm^Ph)aL7Wo!qN^7`
z;sXrF!u&8h2VwITuyPH|D#794<M#Op5)cO$#IU}gAsKQ!HnGQ&WP;0%ZtMI;zAT$N
z#Hx3w^o^K7zJ8mq39(02ZX|Rw5QJp!<1Dt@wR$q+Xmb_rtlDvJ^)&Klt{Ew=g~RT4
zwZFsZJ-QoCju%p7`>3INE}pofAPDY)7M)#RUWr3_`;nE&KKPxnIcEX9ku@nwLi8dG
z;$x2`p-m(Eo>u3D@!grsV?m|3<(;V(LyR$);X@~EUBcINuDWbhG6zgRfDaWI_!8Kn
zxpL>{HU8Rk8sbUd3_wH!hif~RrQ^oYc%=sJ?4Jq7BET`e)jgDu$?>`o(^6^TH2J~7
zfK3a|E;`6EJ9&j5cgitmB3=7$PoX4ci5b!6;-o3Kek+S~*D%YX*C#<D9z_#2^wSgJ
zNUF3I&RQ>k8=3P7bQ*0+lU84RiEDl(S(C(B0);o%Wij)_RRc>snZGH7@(D=sR|y!q
z7(Kii_?3poXf^3tPZF@dan{;6r2^*l2P|$oS$Lf~c@Up^ad#D<Dg<Z>)gv7@4?*t7
zU?pKFe%SqDhA<6RL|=L+G$m_rB$UH{KpRXtl#>unrJ7K-8aS69h8QG?U*HEKdzi{+
zM4)@Lp-_k^GJ&=wfn$&q(C=*R;Sr`_Pb{g+ZiTE%XG0~k91s~Ji`;0?fKHspyNir7
zpOoZe4JZ>uGJA(`DTp~-`opZoul<ORo-%od;`Sr+ozc$R3*b4_9Yo31H0A^1h;vu>
zD>f^KvHV<wARW)x=7Sm;rLbXb|Gg)WB&68*%Re(QUltf!1%y6mZow~BPD9T@Ld43R
zDiPqV0;6`))wN*_!UpY}7ipX9NW@Q5M976Sd25gyC<0Pa;cjD<j9BpD2Gi4UBikAW
z7n|d1=ThMfoSO&ewf=|FVp@!-3k6j|w(oH!C%l1MY`gM?L!XmmK~X1sBIi0D<^cLT
zz8IWIz<=w=KA*ueI0mke5*L4C9N0y@ri23iO@s|Bl?ow1CPm_Y4tHyv2O`9+0V#5I
zLa6)-5(Sn7R_|c7Tmj2APae_JqFJdf5kNoZwv5?f2<MzMkf1+~SRo<4;HkXE1(6L~
zP689<fgOXBo(p~Xx?SEcfpEe$kX8Ya)UgxJjX5sSJj_!td>WOq7U>pn2Bghw`U`z}
zlpP<FD5?a9l2#wCL=U!@cvSYSdiYX#ZA9CB#$*DG+z4{FEK{uH3=6VfY((FWVxnXS
z%UV7mat4JZjRd0GInxOlHJkaJs#WB1!LDbyLc%gSJG(v+D_i$VRp#JOWpgSC|BFS#
z?`fiG4Fj(A^67-cCEyjwOm6WL|6O)nnwO3{K~acHlHpIP*7YW17W6$%nd05XZ^jjE
zB;3<J!+avaNu8dK-#sOhPO>n;OWZX8c9Yr!%fC{lPGx_j`r_CZNh9^NjU>t9+Dc{x
zdu1wFG=>h=V=0tQ!1BY!8KGYsSD~fZ?@tagN)5aQ#eV}m+y|gVC4Zr>2Xx1S`K{At
zS;jTo%ZEHk?1$8yC8R92;+MotGeTj1WS1u)m#(^eV{&^XKWiFdZHD+Ki3OAjfqs<U
z8NDt2F&J&rJ{v?ADg2Wgow&pg@{+{5`z5VaI+v8EF;@AMy3d}KL(o=l2Wb$09OrR(
zYbo-+YB>~C<9yyfwnx;Hq|XYMF~P}SUCs#|u8QTTeiNaAEAe;Z5`j?Bj|QF#&Rp1R
zdT`}xQm+00mgDt99on6bs8&5QmX@nys){k(J^YkYGrTpe6KKi)<M{S51H=?qxqE#C
zlF1AKyV$<5^?6BwRj33y&U~zry&yRvW?+J(%iQ1(9^KD2bo!+MB1=eN)YlE_ZH%{P
zj*rh>g<X@x%iHY7<CqIAe&-8WBgAQt0)j>;ZT4EVbjAUDE}x=JPkG?b>ImuiHL`2F
zO34IKGSmf8bXP@@C<_7z-)~QQjp{rDW~M!Ha{ziO_fpUSXCW5QNzJ~=*GCt}bvF6<
zx`aDdHRflSBsTy0Etln)0guXL50Y1{Vm(!#`fwqAV<YO9F}e-(KG6C3nN+Mi?3#~C
za@u(nJVG`ts`f&%S3s1v#Uq0%xAPJ;nQ=T1^$wVIYOvzhzQ~yilbWf$O%EG_6nwV#
zWVF_GNr!2dwa>ttCOCtKjr385Etl^%33XTwc-2xRhjaRc%{>^e2D2*i%&|e&GQ<w2
z#U5Pe^P26m%FD|ew>7R)<0b*Su?kV2tef6uM;Y3PQ}|I+=2k%RUF<qZhEgx^n90%M
z_WW+og7QvCp)r?zSmbLxt8#S`!$EpIuE6X;B3f<QJm!}JQPr?)(2{*z{GpgN%|R~F
z?>8zsgG#-75_#(UU0&L1)rjWnY%;RN?_<5Maz;w65nffpS(v}E+><BvGjs9RvzY8i
zKbpiVFpc=Hch4g{TTD0CVU(ur1Ou{$gDpTG|4a$z6)p`CXsU9yfp~)eb5ZvKQMhXc
z*ioJcF)j^||Ja1hDxpAw40qnniE{_Srt>M&+U>?ksbh~{OPAl-S!A3;oL*q}E&~W1
z`;RYLE|)b#OAei=Th5R7gC@{K<YRzdUf4NwmRV%`EHD#gy<`O}s8p9)py<mXoi294
z1v!^M5AeT$iGMoQ=xWk&-FFw97f~Dw+Hmu)gEOXM`2nzl5ey7;_B^5h=HRjRkn+b*
zd5Vdewzn_C+;3t=?}z~kxm!%x_z3>Or;4GY>rKtnZWP9EfXqjpTHvk1FQVa=gjSWA
zMOA{dxkGBdkNn0AE^^!_aOn9cW0&O#eQZ&E7@AfyooRGh)U9%>=K0v+q5Sk6L74`y
z<MjiKb*9jf3R7WGxJw@<aTxs}xz84At{7L8b~NLQJmJ2;%@}e|(+-V0EHqORZmEz^
z?MKr5<2zRYi0cYLQ@r)i6Kp!6Dh70!Zkg;Nj5)wa!Y_xfdl*U-@x<(D_ijo}#<Ci+
zsW_X^(Vm-3Ka+ZEfm$QK+|+W8213ncg5zWx*)V5!?A8j$&Y^=-hbM^Wte(2;_V@P{
zvG?^7PP3V9xq>d}WmbvaZKf=>J_7WJK=bEneB~QZn|_TZxwPbo@}2)w=q<$c;@<FI
zp{WbY{RfJVQ%4f|&-VFaLQlec#!PPN;6C;cIN4{TE25$RR_W6~e)gD4DRWY=#zQ?B
z<J1%g)eZ^s<WkGfN^?6S)-LpKpRxjM5)emf!Ty@#X+M<D`3)qrI}nIX<osUQnQ$+{
z%Mp##&c7J>Pn+xfC0sAHjnj^gJLu|J$7${hIRA+nsZ4xGjmb}0m3c+6WL*<si=gXm
z1L_}pFTv<()qe2|owN#R6Ao^JY)PrHfM5gY0*g|YfFvg-h+tLm!p+Rgegrh}U~1q`
z%z`UejZO2wOU!5wT_^WJ#qiOU)7J6i_HQTToeG8M2@b4-4s1rD4&jO$$>rsVaV*%{
zkQU`*#9}JQYm9oE)8TPrJo<_iGYzcUGg-nDj_Q;;Ccl~+ns+iq!({@RYTO8I8>Gs3
zWwLdB4zQ(s2Ooy5;rb+&>G-O&We`VeO6tR#M>D1&ty!g%@&gd3OfLByB)_F(baG%9
z{^xjmIFjjfD^+?y8yy;omjhTjdQKM#Z4jf`+=*L{3+IlBSID7=zb8?G#s>fo$JcIF
zKQ||i4qiIcwI|L<x@YD{6ntuRje*!H9~oac{Q(EeYm2yWt&+1XP1%!K?bnhy@%#7O
z4+*8-!{93!hZ!_pD~X*#Yc@xzHq(tQ8B^D%E61&ux_kF^JF|(vOZ)hK+QB>BBykxf
z8iYl1)HUv6*r4cIHfVU}UJ_8cI??ONxcjF67hB&HoJqK@8{4*RPi%W)+jjogwl%SB
zTN6%f+qQGEtIoMt>+FlJ?yl;aukm)(!=D6EPbWm@TP96dfr2HXM#Sw;Am1j`dr)TP
zbV=V(t?Qm>!H*9KQqRjTkL9k?@`y@D*ClZ5gt=!t`X@R1^0SAvv^+H;#2{jBt0c~0
zI{8e`%@ih^N^WZjaoEZ$3$Q3lr3>%wQ@Gw8t01Hu3S8&i=gup>Q0)$1C~XFR8)5ei
zNuYy(TjTf34#1Z$>byd_c-EsQxDvZ<9qtQz4V0k)@P|(7g}4)i{tP;>OT}M0uQf)X
zfKb1eQ%A?9(h&J~O?;)S`>3}}gHKz+0Via6Dy9!tfi(T>spk1XeS-F@C*o90J;KF+
zZQVKQ_0j2d+JMT}eu>fwd`wu!S)bavAm_!!<3l$K1If<(00fknglv8EC}l3)zI&kb
z#^4*UU-L5*jypvY8~o7L!}@?~4Y$)J!N>-EPo*2x`i>kdGd^=B?wtC1m#*CE>L8HW
z!j-8pLIHp3oub`|Mp<ka8c6WL&bVWu;s;uQ8uon)Mi}YLa)n&*OKft6`0$2JH35-e
zH2fj>+vRiFV#~r@)*EUhHSLe`b!#uy903cBb}y@bsQpK+d>Hj7YHQ+(=^3So5IC+W
zAwC(kOEesokLhehJ{x=EXug6_3d)I-XjkgTy^kOrd}S6X?zyiRxX!GKEMCQNF0G92
z$|52aOin~mK+BD%4gsE8bVtQQFWCEEKG@2$QP-XyA1v#SukF87p#Ov8S~RO_+pjU9
z`Re(GH1{->W%^$QHbo_)3>FBDLT#?-Rk686Hg6<z;x9IR_ZTPPk}2x_6@lX4e$K8$
z#N3FZx<@9S!a;q%N4(2Gx&(;@3?axj!xDH1$zyv;%}bz|A=Yv*;)boJA~Wt(4`SSj
z&|n!FiLCBd{-dmRqRi$>f^J{LmvtG`DEMmGZpegaQ9BwM+B_2S^Nbn;Ft_d+8bAvE
zvQ>n=ZwbLZZ)KHL1AMG%O=8Rw0^u|Z7A{|h4lzAaXE4$^H)|v=BI1HR{A)w4{4*uH
zi9?IDBwqw6a9MMrYFW3Rp88-VnZLX_bwzhPbybyUI2i$*u)_ry3{5@(IvrZ-pZ?WF
z8!GA=@NioZf$eIDhZ`reGx;ZrHeBztS)24HHeG0n0Sr<?u#v(70M=w>AdWxZ;M{7{
zaZVWYFSE|@yUxRnUFl}xBjR_l3Mo@dT2wnrQhSfMJ;b07w(~1GjEN7idrN*TQ^%xz
z=udUx!!GEVzAH(8K09-%+`+$;)Cjg`u*B!-VmlIzH@$g{<gha;ljKfOUUIn4Sv|>S
zu25#Rd06B7!}kGU!r$K=rdjE&0O*qb=BHNbbekNgQtfiJ5-Xf@;ldRXmR(S-Bg#%H
ztDHMh2{Ui?dF!CYKh)o0$3dsDOczX?=NWK^uC^5@%^2INCF{`MDh^_Y6*qu`5tc5u
z$-7%UFGs==V3>Qs8#=>il-%tJqh%%4fG$oH8(KkIiM<RoTJq$pR4}{0O2QsTTaeL#
z$ZL^;?Vj$=$PyR8Yj2}U4i#{^LTQX*bb{5~=#Gx7IY<k%dNnvN{t~^CIg09a6XOc?
z{m@ko3qd37dkKQQx(wmr#M4aB=>{RuYGWn?=pJen__K*sBk9VAKDAVIftjOJ@?m<7
zh%%l8*%IRvpd{F+rr@!|+KNpWA%A-d8xxaVlbU#c4;A>9o@WP5WDz4v-oy{L`}?h$
zT_D2A8)ZVu#`HlKGsq-Gdfs`6X4R9$3_36F<{4a2S7Mh_q%}x7e4j1=uBcSQlvC>)
zN4Rcx=4NGe1Lg~X>M_AG=np~;FjWYWh23R77T_n4YmEc?L)9QAc@(;LBwb(Dk}B0<
z@+!u{y}#m1DJ2TEu$B*V5>8R-r7~o236gPsORmlzlX<c8T35X2c`#3Rr_!LWz7JeN
z>+NbWKRKPDw>s6f_P8&_0?i_$<a`XSMO891X0<)@?gTboTwpy9(zdu5gST|PT)?O_
z%{<clZDjw7GxqH#8TYv){7=qiWd#Nk@8=_rll=b}HV0c1eG3!gAKnI6(`jGyfrm>B
zm8O4+vp*mpaEweva#Gf+L3R~^r`U;(OKmtcn#|m=9hh>5ZX<&BCRK02YD@-&s?5lW
zCbdJq_p{r#+v0JP=`60qmuP9Ov+thrtJxiu%o0mteUN?ron;KfegijV@9W8da1C+*
z5yBAdW$)y27WX=+q_uJ1hs^hN*lzn3ZC!wY2&N&Qa0B3r0zyB489he?sRDxZx=S=t
zox1_pe~@Oe6m`Jq^5rE+VzYZMN~5;i{34PnSg|G8>aZ<*p>frcS-qz)-o}0N(QZ$&
zNeAO21ZR)2WV(_IbwokS9IPz+HPEuic5Z5@50sY{K`VORe}8p$)@cJe``U5n%7B2V
zogG`oe(pAMUMNz#{&jTlaTDY2dDHxox9S4?BfD7=Dop`(671pTyz<=KpHE$}<k-BL
z&Ucq!yLNM#e$|}dc{}U!Imm9B620{3CK@}Up^}!Gzpn|bazhRC^?5vAOd%UBks{-8
zn)~YLh}`s4^jnE4FgOVLosOIC`x4;a-x(}?K};R$V6li>u!_Xv3Ixtr3k=W&3QTD&
z?YnuUtzEQR-f=yAxoBJO3uDKMeF57;4btO;`4ISifrq%Nezyh#nGQ$P&=vC3oI8iq
zI|1!3gTR-ru`(=4phsx9hO|r_dL~B!lFpL%Lwkr!j$RV@@IQgK>!!nl)bk{K?3O}#
z4q~eYj<6I!y?8Zp258S5J?tNYNC4N|ie4ZS^^lSha6Uo}072ItAU%F;k-?(WQxh7j
z+_+im_YfzQ9TzSgW7c*}$Mp!>))Fg=S}0W2COCK766yopHaough-xj_ETp6)WXh80
z`CSz97og3;+dBdoae-C3#Z42*_i*tHiO<!qRM}g&tYrQaEZpXqNgC|04v#^9ap3DG
z_&RM-d^IN_`J565eijn@*JFmO=Y=bw?K7<%?uJFHnN?$}8S$~XKW4cHsRfxgeX;2-
zvGBsIWngd<Gz|PZ@DbFh5E<xclz4jEqph9ivv9w)$J8VC<*wD1NP)rK@NMOP6>50Z
zy|??kf<!JMi6mYgW(H4XDLQ~Tl^(Rmj@*nwDEQk`$r&{~3W`*yeGSn^ySWV|k{7yK
zi?C+yp%=oD-((+@0#BjFVG3vJJ+lwHzCgi0Xc26wjFZELFn_~N)46a16KB&5iFNE=
z>hB4y8VwOeR|hjj22Ue-UkPe^f?<XsqFv7?@*u^Flb`MRX`SXR;;){By=SSm_q~EU
zaBYeg2SyYzdlN)}MlkWWk4RpWo~(%kjS*cA;PDjACt5JAO(e@_Gpv$)MnybbaK!C7
zN=`OuT~x;+FCoNKe#vHv0VGJ)-dh@;67mn{L?W`JucfuM{}U!4Kx762i|AN?Zo|iS
zmL4976v$bDRZ4h+@u>MgpgkiRdD1<Hq`4vj){t@s5lNfxa*w<x;66DMp&wrV{c~B-
zF@U+m$k{2|L&THT9k*Q8!Z9l6(A)fQ@1kIyyMkKYLY(8E_33k+VzycP)*bh3c5nqT
z@BK1M-DKeY>RheUyF&%+DbwAo<Ku1rc!g5Xobwz?F&!wxFj9^sqj*7(H4Fn3v8f!b
zVYDj@45C$%HW%x%DP1AbAH!#Q5!U|zRJwtb*oign0J^VIB|JSE<F@tw=!IU=pg?p@
z)19PfqNvX_DqTdbYp`i*TjjTb2n)12;={|{tavDRay0-IehvaXVUpNpPu9{KDOS9B
zTrx2HU3AA9>$M+$CjH8ru7NWs&;d<!2DWwuUv2~pr$mF^$<eFd2P(W@5u21;hy~i`
zq>`Bf0LD39b(H%uS*Lp9L2T6_W7Ch4mkb8&h$70QaSW<62H12?U(j1@if7}+JBUXS
zFftZFdvT6Xl<ev5LGgMLu0S)Dqe%o%P&y+AzD~?Y%^*hx-B}y~tH)qknq!t4H42LG
z2q3ez%#kAH$$Au8bxYAbHf&&)*ea$fX=4v@!mm<SOhAn!C@V%DkIaHJ%Q}J`W|B)X
z*Rl~9l@z!c5`{HI0Yl!rE6KieR~^5$I_{~n3<O9qUFc7)%GtBQ-$kn$v#01G!1kcT
zD*4L5-=|ytVHu}jp+H6CFK|wPq9Z11Q&#R8`_-NbV3NF71@hNPs9SjeoA1e*4r}!v
zQ?OWNWo<3=jW;%tikyTk3e^w+I#4RKvN-M4&i(Qq)7Sn9urmrBrob!pbrsIqO$0~d
z{@w*lD!1wV#TO2Vo6pU;lXWTY(oUuCvB=FG_uf6iAR1frnCcl{VoZrqjiOny?Gz4b
z=)!5I&Qb3CbE0mQvf3Q4XL+`|qIhA@EA5;t;o7@2+5w1t6jm;|O>tzpQn(7BNfaxs
zv8QswU670xNSYd#gUIM!3>_aUww1a0hXdd=sMUM{fQ4Uio=={MbW#z6&QtWGzY!dx
z->IC2wZPWYW#V{jg@<IULS%dVwKxzs)>4$@szjDQn)#h@VlGdU*BXhJ^<a-7T~99a
zqzScop~d*GSSnuLf-Tm|n<Io|VdAH<sam?M0(c^1^);E9Ux>lfNFjq3%a~b}Dz#ms
zq6LLYJyw*)PPiV_`I#6Pf}L+<nJ8jJEqK<y^A-57&T=k8bN$rCB!n-^uX^I>tP$Cs
zq}Wme%yC=pe6G=R1IUo1d)$v!5WHXTafpvk!YChQPoS7kq%f&W<0CL#z!QgFuqAsf
zQ3=8~iht4|lki*r`Z`rcMIF(#`<>XS=k()5k>sqdNhF<{F1@=E7R29iLTwS?^;o(h
z!?Xh3Yiu!|ZqwIh9Hq$^2olo^_e~CNy{V+ikmcKu^x$&ARw>ZRP#7Pgi$^E~%XUk$
zxprGyTu{lqDJASdOtB%3LAK$Ic*bKw=)CUoC?KcfNovtwK`#cu1wGs&Ib}cUqNPpG
z<NlpE(OrB2KaF|0QK}EP0djfRs9G<^iR~ZC)NkZ(Jw1XA6*F;iN@Qu2%J78F(Z<Jg
zbK+Xtv>g}2n$;c(2IGj)8ZqDG;0o_t4h>Dp;!oM_Q_Q0azchhCuHgu}h!L<x9Vb)!
zC-n-kipmt40Q07~zr5jV=%n9IbyviKH}CGYvPRFjAKz&$Hp}!geb?6tQgsG`d9f9>
z1!5bR0x{KbuTL_5XE%Sxc+RZ5w=AF7E_lr0?p8{_i?tWQ1Y?=@a(IL~L)r3{%=-#J
zd1oY<seDl!#5~*2<DjfXvZk+MR`dHZCAt4kNgD&BYZT=6!viU%Pq~et6;zfVJCK>#
z^z_+MG`~BC--3!B6T7zO)yAtHjd(ZP#5F}{7&z<QA0AhXBptC>R^+BC>kjCt8?E-3
z-W6b9cGcoPk<JLDJ+Vt?2Cf<2()Yg?m65X+--Xv-`Lf-eE|+52PwZQC#9O0MIY!x&
zPzLpA`WhVXjtP66bNb(b%6}ZaX@)KS{X!6^9U3XZ?;9OzUDs7>PPg<lYB?<s)u3Se
zFAx*H45c6NnX+@Z0Zvb00sL`Z{hpJK0V&LPhI$h|E75uGtOY8EV2sPi9trwAXoShB
zPfUsV%cXmF{IGKnagDg-)^mX`qk`hSo-;jChqPRFDzqIfjgBn8e5Cf+`U><kUJTxi
zQ~8(f437osz1tAyOqUV4MgW=UqTs@we?faBs*oIbv}?q^Y9H8*ow0;_W+{>vZncE4
zbot6A#V~Rk&c4^Bzx<?$6;z(5Wl<-o+vr1i)KFOVato%y#4Fo$jeP7)FT@7mw`g1N
z<-YV>QUZ!wp*=f{J4U5NL`H9Uk>)OmFtUzTx7a7QPke{i<#Ebl1(AeqYCG0meICft
zc<6znTEnWQ0v&*~@hZVn+$j*JxFN$f_wQM+UrvftDX74cL`k9GBLQ4;j+Dl;Yx=OH
z0r!~9<nvJ*{d;nA?f2k}rwR6b(}(%e{C~;fv?kCFscmZ0FtsVLTAl_ia79{*u!Y@-
zRq2@Q@4liAB2YI_WVkUF+kv%7eqI|Lhf!g=_VurYg3_e0aRiW#K#@DTP5~X)uqFUV
z;a`wrnTvg1qD~{NkKkwZ8&T`AXqeo?qV!KO-n(6q@`n!Mf}PZywqIBZ4PITZo%w6$
z8^F(ODvfw6T)yCo+f`R~l|EJ+x(CN11cIbsTDS&X)3<i)nG$iun8P~-3_<sdelBDN
zD}k+?yUj-P1OLRu1kkJt9!9}$j(fM-FH?NGjiU~?mVUu0@(%s>%On$8=*0{=wJ^EU
z$zXFr7w0#civCAiWg=ju)Lo3b7%9u5r$whiuC`zLbk|1%3e|b!!kwWzoK;0PvU?2h
zWjfm&bms<ab`?BSGuR7}%#q2k+4BXGjN$(wKVMtIgv6xW)Kynwf$Ql8q&KR*;P@G{
zN6fQO&SzN5L%C^f&Ie85(q1j3iA4P+Bs+lX0T~)n!U_9RG*N);%DN7z278w61DW2F
zS#*tw`sG%WZmL?ll8o4t8#}%F8GMa69$W3R2s5;}Z<o)Ltw$4Y_Sh}-j?4IQ))2_`
zVxl4xF84Bs>9{9F$Y2??bm0v21p{Y|`o|R;ZEHcu|Ca5JMerS;bZy@Ya5xp32cI85
zj%wcS9KkDd&rI<g`xIxT3o>bciHdd>N~pr>uVXrB>+e=Go$aRAZ@+$y=eLeBoy=)P
zG+S^!A6c<>t_nB!&y+p<+NtWy3om$=<>?wXu+|pqA9fF|+==|0%>cN$BU?_p%dW0G
zB)s93?g=aY=6TL}@?v^kINo1f$j~PQ@)%)812w8U7n!^Zcu8#`LSMz3OHFH;{c*8v
zQ_B==m0CTPd&TCn?D~yAxEGzyq;?2klSG}3vUUV%v{QJ`D~G9pw$T$eQ5;d(xK6DO
z{)YHtlA#h=g+L!2DiRwsxg@+<gt3C|3^^&-fVl`GsuAbi&i&|Kh$(7tgF392h~B%>
zCJ4gH`3(Yb5FiMl2D5Ef%R!^>o1Gii37!B2iwI!$%0r~e%HEX5jSsQ0M}<)|mOx35
zzd@6v2gPPwm}(cCV;9CH9gX+%OO{d|5}Ep<gg~k4P+bgp$o_9UDuEVhYA|q_l_^ZZ
z^4&rxWzCh*&Y8+qs$+R}BN@u9C*y*CKA-=zIaRIwq`tyEfa2B7p`U!aRv_=A<>c@e
z_-I38I7D2_{e#WryG4gHLuHmZbhbfyA2gq<3RgpAmzRb=nG!MQN2@+`oG2zpFExUJ
zGi%RYT}%fvIqq+-^XB=$A1;_ZCZ<>Z)=4pF-A}puL7QHy&^h1PxeZ0xA!zz=5rW}G
z@etDjS=d|-vOryGlta6zc2k@4=c8G(Dm;L3V(!$;`HOkAwt=Nr;-aW7r_SVA%L9%S
zaHUI2_AD=Wyp}#RXF#lhfR0{(5sstH83@FBa=>1d*yzj+|0O?$p}#TdXh;A}(2Rs1
zV~mz;374x@M^~$^7Ht5Vm7t9iLzP^&(yUfEOGN<JYr?B+tb*)!{DM81&0GdjlE-7v
zVOUhL`}hR^Pc-2f7--A$N3VPRgC3&&r%Cw#p$QhQi82ZUf@mSjPwKKJput!KkyvWz
z5Hf%@YN|@yxr~yeKD75doP9s{5~Mo$&U3!w&f}b>Y6k_yBVwSt*nTY}$f{i+I-o)l
zR|bs)Q<XLccWBcSjyIFO0YxzPuS<c|QoDe3noAUh!bX(_95^Q8vT`m`XK4ox!*w<3
zX4Uz;TW?!=L(8=zy08}42M;k0u98C5!e1J65xjgkdxi)-jG@ar(dK}9C#Rbrg!LEn
z$$l_88wB*U{m#QK)?w!uzlGPs1MpFku7wpn=1vDX8|xe8-^e3d76z3Wq>G(Iv8Pqx
z^5eB;qK@fp%-r92f~~h0Z8snfwi{%8D(b@e{4E_8Bcj_cr`9}<ZRoS6<!td2mrlPV
z_nZNx*{f^^5*UG<rKO;YA5M{HzH5RN>JAQ|<2WZ+4v}ZX@1sVzrE^xv!#@Iy(Tn*C
zoD|)pFmwm%qc?5h6?wOPM^&z3QIeX;+&4WK=55ySo?z|8NZN)0u)~~N*#CT1G(ZJ9
z^FKcS_8-OWf1|uE4h+V_8mIr0bNFQ|Fq~YGASIPT5LtmjQw(iYm?#o2TCQSwxB}b8
z&T+>ek$JGm<F)da!vIqYo&4`s_I{SVyV+9uba&q^B|`{KmNLuHzWSh+TOE^NXyaZw
zAwoJ9dv0gbH8`4QtF6<=W!pS|PaA))mHXK0?`v8#fo(ZszpZJX8Xo+4lZ~Ss(D@l>
zx!pi7(zD<s|M_s_KhbT&05(TX(LX5TVME2rE5kCI-c0a(lx()k1cewEXSnlBTK-O6
zgt}SlfMR8q*sk(_mh)^eW9z^ALiKV*V4sHj!eg-xF)A)CV7&+vX!dTn{D!ZvNkjnh
zpx{r#As)RJD+;rwb<5}fP@-o4I7HjCeMND+igU;r>vq-Rg+<#uDbKWfcV`_!7!@44
zXL$FW1j?MVU!ijHs&u%n1TBNy;RyeN%>G5JD~)?sqhl^|;_Y?f4SMo%El4=kgl;Ho
zrks(Nii$xxPWgL=Ew>J>%O@!r$c>OQ7~&~g;alp9k&1+FW%wJT2%G&6yK2ZzG>hW7
z!=m-`=pwzI-Bvtlm2T)qpE38koVl3II43e1AYi0=w<-reHJ?UWJYOba>S9;+fF7hs
z3I!IAHpRFg@~QAOFnM2@M~kFLnK8&wj2fe<v=ZWdV)@_W%C}D2A=z+QMKuTm@Plpc
zEX2jWzMjbaArbv9RtRl8%J21mRgg6gFo8H@(P#w5Ef*A4@XgHcJ$wd&;WOD_sN$P`
za+S<n07zieh7-b|#d2TFP<}wPtnKy>#IA*w?X}@1JA)b}i5FGg&(cIiJVt%?(C>*s
zyR-}J$~Y}AdA!GJ>E4+t*`IVJC*r*vh*X4?_giu@0FwwOnCAv7R0&lL9^ALq$J5t&
z0U0zXK5WOBAOa`{$%fw_?}Tm-_z=h5f0$(3Xw2k-WCT49*ki<k9s0HC^Yo2%EyB2l
z_=0y4q%SIyPAYA4{yfNCn8BcWa)m|0h%WwWuDc`%8f%w0;<<plSaxhF%w%~V^XmP8
zH!O>&^V4H0o6PpoclZ-eTOIxB0ge&#pexK1o%?s83QWkEbz+KrZq+v}<*-A}ZBb!F
z+k6;N&W-KgLzTDiB<}u{W;TJ@xDy58CqBUIS-MPYQ|Tqu^_|*VY!V*i0V}Vy(Cb{m
z!+v3`Xv?k>d$P)7A)&Z|M<V6Tj-qY#dL==7*NMyMZQ9eV`X9inl2lp<I%MeW&<8#5
z8Bl$}P;A=E#$*-@zehNqCCjZbG*ISm_PD{sU$o*CC1mVty}vcg7QwTe?hxkpO1pd#
zg?4ipW<A(<hm~O0tW-4=BW%qf>A-)29Z#rcR?jq=$!`Q~pam1a0aWzVXHZr56)M~q
z7{7!wF*{<*5LM60653E^lOOV>uVcZPECB4Q9oV;bW-1rrGTmcB&E=KsWc!w?3N)$i
z>z~YqI{dZN7kvm@hK^6|-KyC7+r244a0-->C|tk4JQ>_KCQ=)eFX7pdYoFB8!_|2Y
zQs=ZR(rjo0E1#tvsv~6LidT^r=z6eiUHc}!oQiTQ2(Fr52njBYs`vV1tEKZ`7?~PJ
z=N)VL;J6I?ULx7$r)aYn-8#Uaz2U$?8(uuQvE@#J^8|mAx5gEeXM{f^BPc{^DKe1$
zF%H=O=K%aNE*9<m`AMn$ly?0u1JI&4t{&#cSQ2V}j~2i{BTvIH6=*C3*-FZx9@A8R
zz@W9gwIZCqrq)mH;Qre4EiX#ZjH3))$74gZHBtwT^xFbE%3fC}s=Wv$KrZ&74mwYD
z1a4$#ijoeWlqgqdUrv6IRZ~6Of_QBPQI_X#{@l)ZM?VeYU!#S==}f7|_C8U0Tsk-(
zhtdr0-))z}p6Af6el*{?Yt-s*dl8kMg0z;+$vCWP-MGH$-gM&G4V|nyX+iE3>M|RY
zF*8<4k(B?r*n5iPlqi&+g{lDo{p^5#Y_*L4H(b-v)zsDWC#e$8T6v%2#`%s`g_{W)
z4CaB^R{5j|$+HQmM#|#UnvKFO{1}DDoJX+su-Dz4Tr<^_@@Q6`(8UwUvbl4uRn2Yd
zIcU~p*VBf+=edo?efvh6G4hjXC8KI_`*~#Y@vrYr`^C%SZL7Cm+V$ma+$y18`o350
z?hEB3+9u}jJx9ijwk>*hQbLC-S%j$4W76ECu5vNkxU$7Y*$UIq9P!pblu0R+dUfEe
z04r*AMu$&Fz95wHe}7Kd@T8J6liB+y-Je$2x*{%T9e8s>W33gsbOErnCh2hltCMO6
zI~5aBUFL+hNp|Ld)oTQetK&3JTjN1JW==kVsox!s;+e5%;n0Ee&Xi+os-HdqnM}{M
zJ^Mk0@^QIOp_^aN*{n~$@v#wrLh;=dxYNu<_`CK#feCv+^z_RYLy|5Aea6Iy6O(*f
zvqX||Tab(6KMm7JCj?yN!@TX?)ASLrK3JZeeZN4V&Po~50IOG@62m~-Iyru>z-&Cn
zx<G3aLasFpKy4uTPXfT6j3v`wr^C;WoQ^1dLq;o1mM)WpoJX^t+5~iFA4?f;HA#RN
zPkQ-+VJgEp_u0x7ijR4tsBVSyVEZLM$Uekp%>(ia|I3!fiJ4``41v3ql8;WkO8j_(
z&OvpehU?dxz+QfC+dh>>-LOS>UzW_e(1B^7@6m{m_$R=3(=)r@=H>k|CZ>6=dC60C
zxmIZ83ODkqqc}7t+oU<x>j9}2yT#y*>j9Vj8ukJqAM9h;JBQUwbYnV2xYVd^r`H;&
zx?p-16ZC3w5cNQ5m}hsin!~^CyLQZ54=nlL6+`Ehgnc#d>Vf-Phg@suclW^B<4&E)
zaCOpX{^{3SsQBe$mxCG3h)1ZTUvg?8aw<3H_r|_%x9n}-V`8AGIZyIo^e^2<;8Twa
z#*PPJ2FCtr0bc_8`b{fufxA{5u)1;l)x^Bud>nDSnML{Q*fIk{87!HDkCOqfkHa`h
zqxYLy#gaf}WIq0t6#R7#a~H?13-2T_Gq!84GYxm*Ykaxghi&TonSjDyF(?9b&V2A(
zFhR@#bNWS{@F+l>ZJ^)d&>B>{nMo7CHE>8aXcUq;^<c8&K&bF#_?o|0`J=BKm3f?;
z*BVn!rh4}c+P8BF{YK*AdO)xbZN+e9Y^wpt8BkG}_a0Ef4jF`X>>!lwHSDbNZONQu
z=|ZUicD?!${57NmWUFEZHwA4PSI@&;$TG#Uw4tyIoyOz35|p^0cfjSLEo+KT{)8)-
zkSTaw(0%EisgxKm{k+0{R5Wr8VO>Mbqsl+szGicLLPf!<9q*a`ASTOt5BDAK)-v$*
zj#%~U8SkmU67-?q>-ZL67=tX)NM7_Uwf?{lh3rXO3+dGjI#o4%D$F%eTdQ%yRu{St
z9n&hrB-81DOpDaa2KuE=Dmc4zG6=_j>P}%I)1I!i0&Bz&Vq#5vlu!j|=s`>vXn#mW
zl~^#K93>qvaJEWFpz?N>AjwqRp}J9`V2t3Ptgn9vv|hjPz;S7SRdgXdb!mjO(wPN`
zE7^$gt2*hVzSXuEHgp1~FdPSJ_&6T-(#cUN4R(gN*1?lOh!$MkC01$&BneSI^R+Vd
zt6SDYECZ6>GN_!j+IY(OR+7S1tfl&~Zh5uA*a`q8h<%y|$mWm-d52$?Q{?Sj$_$aB
zi+hkoN=gf5_sQIjFa2m!q^JF)P(cGurFxZWH<I`tBjY)XDUiZ;zy}zD6+|&psKF5k
z&es3_*w$1vGtm__0BS$jeiJLuB4}R7bt5~>8!6@ZbIMUPE5U@cvnywIEi(;G;H!&^
z$=+_@>&gu8BZ69!KSC))oU;M)3J<p^_q;><V+OR*H@J#$a`~^>D^=K9id2S;P$WS&
zI$#szqL_N2nXT#@sx+vZqN{#)sgocjgL`8XU<gp9nxIX@Vdr}fg-L*7VW8AR*ZyU}
zx{rKEb5uiau#5o7XA&j1p>+7?SU<O;2XDDYdr0H_64?N*g0o30A<x$)JuK#uqp4k9
zF;8fI-|^rJ-Z#w{+xfxcG6QpW<>AY6OY-y?4{lIJ<Xcs5y5~G!pd&6%lzMcm#kDGt
zc{77xH>Quhk%$RxCvG4U8Hfm%I+Uw_27c?$Uhl5p1;8Hq4mjapE7Xn{lA@T7w!`T8
zHg}Ps5p6Z`a8ahOSv+h$A7ND1?*$0KXKBrTdRc8>n_feX(x*O&g{Z>Zk;}=<PGhP&
z#0|A=Uh2X@rq!;u)1;&Q3(l-IQqN|$LWGktn;9z<r%p04Ocl2i*%nJF8s>WravZhq
zGFcoB4|db`;1qmJt*eu}%|aXVU`8J*b!2WY$abo@2z88ok=RXsAt#Y{QT`+tL9{cl
z$dT3BOK-#;=FJe*<3$F_+8@0if(rB>7O84+qnOfR=}W;|aJd#jH9fcRMN8@oB2A#?
zF)!7BW>UEv_Z5NoP}EIDrlPl6Q`Cnl`$_dj$zR~FHgA=X7>2OvSoOCOg1m);LP8bL
zZ$N<pzwsCoBgOd>Ur(V4Xlx&r-71qyvQu)pGc^mg&7wua<ghD;F;1eE5j4e2u%-|S
z3yFt`37urNW$LPc6-{R+P_9rR$y=Tf&vX~K<CV4c=OMV<QF}NGdJF+el6+{>3h*<!
zF-S+sBEdbi4UaP_CScMTwwNHMa8Q1q{4|g`L3*$~vGFtYqwD<Dxd{GkaIM|ia~S3@
z8LAHV3-|S&EIM!UE=X+h$(>9vV$y*XvPChc^;DSE`>jXS{fA9_)zeS5o7_-Brk&oR
z#gQqG6!7Lkmx-zYp{XXuo0tw+x-q0-i||O{BbrPqPHql|D;XIMy)&y#{H;M%g|F5z
z($$}Q{n{Qn$K;eNzEBkWOvc5j_^|cn7+%4k(p&ywb+tV%dL<zmq?1RN1{$4pR2Rl&
zZagR{E$!&O_bbApn6}@}4@o{-nktkV46tG<xT?m<p<qvf7+EZtKCd?{*r0^dewh`P
z(XcPL#yEnrz;ONYXv7=3#RHQO-EN2$&#Y!>pAVvpowJ70J<-StF%S~B9)=BOg<y*4
z0@lN;SaMd?mt$ZIalU(nIaMsLGqnq1C91EAY}y6wvkz}KNZHPu>qG2T01pq*+5%=m
z+>raJ?;cNyTE6n|As47j!ZNfaCz9-7N+pOlQIbI<D2V9#_T8Vn&C;J@wZKnjVS9-7
z@N&$V=9o-_9Kc3v8g0Qpv|EdF6=`1ajxu*dZs|#Q@7!^v9>nUQQDrvfyF*zFBXo3x
z+;9pSu<ii!vV%<l9ebhJJcii@_NlLwTuOy|k&PLWBa?e#al`Y`Izg}!)N{l#0JNj(
zN78k;V}QM=Q_4V(Lrv=yN{;P|gnpGGMI#>4xLhL{_JLpmY=Dc=-;%!0-_^M#Bt>uJ
z*eI8o!3aRPD~F8f6@QxK#BvM$cuC)2K^LFGt!^(zHTDm+-2O9&|J8vCc+RkFw$+Dl
z*z*eZutF9iNj48|4Hq(q_r6Cpso?xgBZwn5SNB`Akoja6nNwB~IOv>)D5c0Ior)nS
z!-I7+<3xS#G7Hype#$~vu15C*lHHN53n!7GZj3$$BKp%^kJHXX79*9SAR+-#-Q$?%
zUet4{!>J7D$mtV4^ppZY!k%}5{?$G}zZKRKk0{pNaHs!+C#70#2$yskO}t8)*cv^W
z&E7UmmUDvVR*@UC(>8poG#);ML5c6^DeG+fyUh8aVq#j}YmeCkZ^_8|d|*%T%F%R|
zhH9TwGy28IcqF%i!l5H;&x+b(1fi+M96>_Pp-z>tLp7n7Jgd`^PG}Ox(QL$R)Hbny
z&6eD9qX1WxF3>$L9tefhn2jt?+Z*z{h0{q48@|uJ+C^LI^+n$f(TqH!IkJFv-XsfK
z-Lohk3y)9ur($UzoMJ;2`f76?oJ(XKG*{gzL<!u^Wg}d?6)`{mqO5)4K~$o)48&p&
zN5@Y6%fFy1zw(pSv0!}jxZy;6gC3ryH#6n$L9DI()ldls&Kidz(iC2-Y#k#$95BN(
zX-;d#AP<{Ej}^zclY`T;N*Y(0ldsb%&)%T#aoKtvv=B}^Wn~s1mX@Y|Ojo9a?pmB`
z6GXmGi3T*v%NZ7hUko-*V@z7+9K@|q%&cqSX4CY!K}Hq&6LxTfd$)cQ#uMQ<PMMV`
zD{P)RSBPIhU+dJpmmeSQG&`h`^$`YEfUs$}9+rH~h^peLcTI64elHwwX?^wq;i_D?
z(~ZC2E&upd18x}ryi;2RKhp;RBmEIFK$3}gVHarXYwV>xhL4L072%P)gQTd#BonWC
zYX_L_0Ug^TZ}ZHY$oiMG?VpOt#1_fL)NTBi*U?LLvup$bZ!Gg~1kaxlv-*Ocd)OET
zz~_AAeM`sj1=JMkbos)^7JLqMg+}RJ;V{fCJG5)JSsc>q49m2QS_B=yF4KXz0G(2m
zGl7&SfkCw=?x8k_29%BqOcEIYEE4YbGvsSTM{LusHTadNFB`JAFc@52HO%?Fu+jgL
zMWE5<Mqjqh|6O-_g_#B#RsG_I!BocFR9T`7kJw@*8p=JcOww_*ezP@sug(r*quWI@
z1^55my3ngBst8}nnHpix`LN1QPa>kJ$-iYx-JIgijC}D>R@Yt-5fcm{YMENEnRrC;
zmGz|QZOP^k_?H3yi2Kiogu^MTtTxn5I&6}dZ9JMn31fbr@&YFiAZk6>l2wrdN69FK
z+gBQ*1<1yImIcrs#<z2*XLw<P=L?sSWW$uIU~JNcbp45Q3Z!mo%dL}_H@B2I^ih^-
z*VI*v%hy|sOK*QFzNp=iN$S1nan`R)CCMhgt=ZhF4zig#O>J-cY6ThM>rbJh+>$CH
zzHv4(?i=e8H6r6b2ftAob+yK#EdQ)dYnbws*wWMt)fdI8<kjJX97pd%o<bfVV$%&K
z(eVrZx7-LK<GJ+#X|4>Uhp2ZS4Xhg=#=dW0rx}!5*o2Nj-^7}pk^Z8@S*82*PfV9G
zl_(;eoU{y;8T>gwr-eKbs${-t-f6hE;{q{MtR6eH$oAC2Ra>i-_7{}rMHg}nDN3H5
zZF-|(5$)Vl4;$%0=40_Lu>=M{YgZ&S%xt95NtWdMNL<ZmiR=o-xm}AhZXf|u1grES
zSBwj|QTUGWqc1N*#30W5LW0>$2>%>*t`weu0sC@yf&Ou0fw^RIb7r-6Uchs>mj4q>
zasnlOh}>#lg~pQvJ=$ez<o)FA;-80afNVaapED-<=7yy_&5h76_eBSFUds*ZIT^dF
zg0o-l;1YSIzZAcKDf4>oHk5ah6F49$T$G>+b^EMjXvX!MRTzq;;N0Inj3aEwPUHvX
zd0{h6N;LZe7y(0Lncih!Q4f;rD=KE6ojoE9<MUZBULkS;`b5ykM>%zukxSy8%6%{e
z)u5D4kyA6Hrq_XNkF=V}CuOrSq__5I(PytjqT;`XqPV((Xs#pLF~#CXe?`_tzN?R9
z?_V#<)qA>&&8%`$eQA*(QCt?_3HByvr2Di6lQ_K1RQ#x1!Hve#?qrxNb1!*4<1N2t
zQR~gdK&i@ePGh;Wj80{ue*bvV|74tZBo43H$9&}u6x>(+OC|dg7Mh`x$X?^V$a+6B
zBN`_8%`XUkX>buJPz@=p#bk~bb}#ssO!zo=Z4TCn73gzDZ{VP$gFgOxdxz+e=C|6a
z{GPZOPYIKG-6Uo-i2tkrP1@!5(gh~sqJO}cr^n$h%mn7hWu76dDv#@Kw4lH~^-VH=
z1MkOYn!2DldsQa;@&*$6vk_B^eTn|6uRnKMuTl(;#>=OEAHQF!Jr^-YW%Kl;l^zMG
zA{9O}5S!f}uikxRx_K0!Ow_!&76`Ab7r4XTgc5u39)sa5HZgzFMdq&AMxgg*vD$E(
zZ+;=!)}Q}Iin-aubv1el`o)_wSP3H*LN;|}5&v{1{ZOCtS5~4@r6Cejm&VMk?ZI_b
zT0bHHsyK%wjw@-#!4gz-b-~GiFdVrZgc*hl)4wLoe}IsRq5Ud~Q51#{p_lvYahJUi
zb#l-9=Ip_QDsUQ-P7F9#nH@yTdDq|FE87hNEbSY(2*-$L!m>VQVC;TsXT2mw)xJ;9
zZG%rb?bLmX4a8&8NBU4Z2}A#*8JlMu@xgh+$mCto*VYuyxQ{v1x-Km$DXB4P%JFFe
z-jtkjdG@*9kXR<nixDV@_ETK6V&g$3q<w#%PP2|)gBIjz_<8fI((V|`Ak_dYxLjfa
zAb%}FsxYIr=kB``>dwni8!^$d%v>2mxu0}hTGpu>Bf5DV8O{~@fh-P^BwQ3^yo52n
z&HMRZ!El$AO*srdM1|@PNcX=&R~FT3nsGn9X}(*p==gGQGSMI;voFEi3{+LLVKUKD
zrhiT~xp^R6o{99nH_vjsun;y6frS^<^>uX@KjYg2M07!zbklDv?WW|gfUppMkAifN
z{vv4Tdm;%-gfx=r*W_xg%h#!60yKCl&eo9P{H_z(j%&uQ-1|dM0h<oT$hhOgAL{5Q
z<he;B9ARjs7jIWtg@edwyxw~B0PnFxO_TDwMsz-{JM#7gr(}?k;%xizxDeaYw)4^d
z`Tq7i?#6D8Fbm4cz0xic$TVLuTdxw$<_LlURx~qx*}zC1QgflRtP<?jh|s_R&LJD<
zNtA_$8bwqWUkpx3;T>ps1SskBe76co8hV)!o{>SSQ+XVXb)kDvSDuI`xAw0lsX0Iz
z>|IKvHGhhHJJ47P7LUdzd}?~4^vZ6dVZdic(mZo)5-dB_gZa<YB(#nXUd&EMoT+82
z0WZ1jMd3$%fz?lita*%zF1C5+l)fp4@ZD%C+1RJ{K}T}hcK|?jKj0d$Gr~7&%llF+
zK>cmU#+8a$C!R5b04mk0)JfhO%?cF9e;yxW4!)z!p}qSJ^iL@y@0{pPNb&4I_#w_D
z;gV;^ONLSXMc98?48maQDdkq8U4j`X&C16^9_23%oi8Zvgs}$b8q-uF5qk7Exp*tF
z+=r)Zfm@sd#zvk6d0JcyE4hUeu+vz%#$W%E3F;Vfm_RNVsqy3!v|$Fzzw8dv%uP<3
zQjK_KH4VIsO>yAm`81bge|aqyDuqLz=u@CLx9O$1kSrZNDq8nVcF*+NIEbCcCBnh&
zpng3ZgcGPcN;WUi6}77gjKk}tk`ZWzb#;5b19Q_`wC-}fes<E?_p03K$?5#@TtI2c
zXP0}(_b8_<gE&SnZ|3nlYH-Wx6<+EytYL7ObS+i041K>qR3`R@6^Sb&H<9wf&eiz*
z%&v${_AxtLGgS^+C=>ctpD(a#al-=tR$<(n=Hrz*RyLH&{a}B|54s_5w#W7QlX^zf
zN}UaaI}I&>bkU!Qsfjw!*5bADRoUZutg|#7MmITP=|<^Bof{`S;CmpIdPDx6x+$uY
zkh2uxx5`Hse@QS33M**_OZBBX#RPe2?Me#{byUj>qpcsOq*KopJ~A9N^o_BL9>{H*
zX5%RrkY-XG_Lfob&VDw)(<qR&-&u9^d4DonGwj0YtBv3&VvBv}1;uQs7NI*o;2zox
zqq_`Ce{9F^<Rq_-<1E2{;poKjFc{K^jv-R5kG=St)>&3m(Ziu;ZrV<v0(IHaI%*z1
z5m`?CH@7wh#W}O8jpJKPW)IbY8mfu2x8K2NQsOaW(M#lbj_oeYH7Kcdi1ls}ZGO(2
zoW?T|GfX9qU9+T~3Zz|eMK_(U;$j%CxLfYvFKpkBs>GQbaPyyJu*#w~SVT<2Z{4i<
zkN={qZ|A@(+=+mI*xG=A5dRYxcLMxq-|?-L*Trvs`kOGhi`@WdD9dvD!Xv{a`Mavd
za}WI%(ey!D#+`}qr{_PQ_%-z@SvI`<e1;i?!S?0l<@uA_v%Jw^p}_*~)EmEc%+{-(
z-`w|N=lviRR$Pmnn(KAi?!vQO=iqKzt7~JLO>ZVVUG>)`=f~#S*9hRCl!qc;;HI=2
zcZ=1)#}6s`gWo!bUEgcEr8^t{n)PW}|Mn@Hv93E+@KC}2?JIj35!4{`=5=s1j+r)K
z&qfO(_O0K5oL?noHt<}x826J2FDAbopl8+FuFwj;Yk&H8_k~-!H5M{xw{2a){e$D)
z+*iDD8FzatR^oK)Mz`bAu5Z1`YHGWMS84F~B98?-H|FIMm(0hzM{!e2DuJUfU_*BA
z*(;NP_!UhP2PAo*9%ZiZsr<QO6{1U^Wo4=n^LDH8@7KB7$oYrE(w$bedNs$oZ)<lp
zv=iW|Boc|I9ZApp8!pccCXXQ#>ahyw%)2+pU=gHlX&nLOsS8OO<rs(>%Edof^eAvC
zzR!vwn2do?8YLC6ID8y3DRG@KIB|$e5C#OFh&-V^PnEo9I<lWEgCQ?a9`mP4&3t%?
zDB^PiUIpa>h9TSyJT2Jos;{Unqy)E&Yy({f=?aDc+Mb(*CS1S)QUaol5`y6f+kIAL
zKj$ASf+os-1>|bZ%fY{2mlz&<I!Ks)(GE3w*oL}3tOML$7GQ3V^H4l-t>P~p-W&td
zhwYC3QQzX6>Wr~<H9dHS*?im}oo}we*(05#AH4hoyT^AAoPDd?q&GTxgpiYm?~Z*s
zQ@;_zrBq+iUjG8S4)n4$ehy`|O2nY%oesP4hd-b)y5a2vll3|T9tzuV_QQa#xPu%G
z_}*?HtD#)NF+#bArVa7Hl@af%LUKwqp;i&CK&zpggVB+7UqjJGxP@W{^9;)D;X|mT
zv<Wzaryg`7B0c<UAhuJig6|QGN4TXQqM#q5_PmiLHcubegMS=YK7IDQ=C2lK^r=6S
zMcp<4USqt7l_1@A*MFZo26u2+W>e-E-kpn83Xo4fZcsXa3N{1yt@u3u@^IJ~hp$W$
zUobY_asitc3L*asfiaY+cN<7x9TdCYWd(}8z@BL%<Oe4j%K-gH;vC7=acrf?FO9Y-
zY;mj(+Il{cMK3|V)+w;%(tcL{ukcfgBhy1&#8!`e;Yb(0*+Wb}%j@S5^O>YF0K&9T
z$Ax_XLleGP?B|k%MMZ7E&#4ytVtbnC8nGLLyu6mgOxhV&aID*|QUya(4n_%SVe1Sv
zLjzfw;DK0U{HTZeBT5<F7BO^xm$*DBX7N%$j#$@tkcHL)mf^TjkasT_rv2UU-&bF1
zx0`6rTQ5y%&IC~U>whwPCQ&%0^EIzu;jTC7Az4@Ng-f%}3@B=R7T8>5RPy2-bZZ#w
zYFKAxb=SEL5^-7<UDvrzmP3Bajn*VL&L-4RF7g;<(Tc$C#9!S}Nz0U_+qouVz&C5F
zbY-0<iFduCYBg^atD*vRq4hQcj<I-qm~k{|vVdyEJwlp8u9gNs=r=!9E!Rq<Nb)nI
zlOlbQ?`*c)YA|CRE~z>~akF1SrC0;|!FcodEn|Zk{lb{YUI#g9uf<2$#=j)}&nP-I
zSS}4V7hR5B*x=UD6cOkT{R6867AU7fCAV*;RpCTf+Zz!z&1OYfL@A&&*^hz<5~t@r
z=MY4HKKJ!^e^m|ogB_7Y`Nx3F*LeEcH>lKS;xKr`xM=<Z^1~f;a<5Y_-na?mT*jc^
z{JX-gWH{}?+;fAcx<l*8Tjc2JssE7Wa|Rbt&VI=pm8q2#e&fk_n6(y3PbYF!aHVfH
zL~6*GRcnnd6<$|N99aZZr&8!D*;<TUFeo7wN~LKcYO_o)3ez)?<LR)dN2$$N{4}i2
zq)vJ@$2UtL`lbDw1xW$Pbddu8L*Cda$s9|etHyk_y^rZZ&361<lvo+!sUuy`VN!!p
zeLg!hQFZ>=>D`%i>-YO~QF!Ug`Aw%xRI-twQMApbgs?ySI|+DxFlhMupuzQS5wL%0
zhxefVB4c=hF1q)RX)9*?@;(0-ROsNHbOdO4>!#?1<;ztoJcit$rv3QAMAVA%2Q=XN
z@8A_1)|(AS;4dO(=9cYjx9%RoN31U(rY_T8udZj{h5dk_?e;9U(wmOl@$uXl?MkI<
zgO%>svyb=9I-Wgvn>1kPEUyl54Ql*s-+gv%eSL3bh$A)1eWdtjcYz#RBBoEhVT3<^
z)|@@hgPWJ`gtd2+l<Q&k)~XlaN)$;hnA)3p{fR~weyzk(CH9q6$|N$f)jdfYqM(tm
z*Ql}GTP8<~6{aMVV2<|0S9ZOGYA`!?Z~37~V7unjf@lylLCV4db1Gx<#NC#R8bX7y
z)2+(;_nZ2*37joR7`J=gN{~v;P^Y^ajZh=B;SChf<@4hpqBw{TALtU`H~UPnQjTUQ
z@mLh(2?hDQ>*cB@jLg9H`hl@n@YYq)w1+R}QJ~j-*1aL72baD4y_`=yc5K5bR+}Cw
zi`gkK@5)v<7D7se)p~55V&Y570VWNT*SMFcf0~*qN_;YySkAyrncF1S-qD$uP{468
zkSmLyTb1?zWefSsQA&QT&y@m?Hw3hK@q1^KqQk)cE$Qsgv7*0v+UVX?q3y9g&Jg`d
zi9>~#P?W54bG>SDj%(Y3!Q_5zo|w@_lcYAaA5TN&*75WRx7vZ!`UxPsGX=6v-H@BW
zS&rsB=XtEi%W<7R`XlfQHU8Vm=W=$dnKBUWr3Gsj_vEq1%f)XFmuiFY{_3#HU_D`7
zcNtCsN7blNTF)2A4k>(t@fL@L^`ygszwNi54+`^T%L(E8#NkYbh|Z1qaBq9)H-r`>
za@+|!8J|54>pxlRcN7@T1(RjK(FDQbOA~!e`g7$3YnmTjLbontxPG0ZzH-H|PQh2~
zSY6)E#vy1DB&%SZfGr6e61+qZknZVj`eQ=8SGlrIg@=E$=z~ZjiNVhY{-AaoT)ZY*
zN)(35We-fmv7OB?LWoD<T^pmj6FIK_6*tGmba9)MFYx%jAedtqgVA}m15(V9TtRg4
z)-ZJoS*FAVt?|o4Ln|okWM2jIE?bbUbCd>RKRyLiuuj(;_+4pM*ZllkNNnP|bO*(M
z&qM?_Z&Vo37+muZ4yvWRM8r$#*2+NgvEJ3ViiK?v0%z&kkc*qo=aOBo*#Of+==zfu
zmtMhbHEMiZ;^1iYY}FC9dtAnO@)9mbvh8j)Dp%sp)i-159zoAfGV{Jb&t0V@9RVwN
zkQ<YSMw0xH;U%qOxWcm$i60sim>BTcgZVno9rqv#n1LV6Wphq@1YlXXP|K_v^bRS8
zKk;{cDq(GN*ptwS_zlGbI+I*#@u2pUhV17Mirr*yh-E_zse>GIdcjQ%KnF?<xnz0K
z=<Mn8CMH~6OJ%g)U4oKJc=fx$soeiV{NAmZ^If-L+XftGi;q}y&>FpW5h#q<oCH9}
zd&$%i==d856Xf+BD>+bpycPItm6MwJYizT_!KRfYD|bjhk!=Z?+-UmkHhzILp83A?
z^h^avhxYijH`zaW`U0&sL?U2P)X{8yT@Jz8*N)H7oDk8iErpi>l%oYNw1NbGjBz4*
z12rbdz96|qii)YJVWI4Y#Bv+DIA~~n3dm?XcME%pN4-7P@*1MkZ*Po&1U*{7k+e9+
zUBw|W*+%635|^N?d&PA012^ZVa$>-?ELWj}1V%Lr?C@D$M%+Z=-jR<a`>9t0e^Map
z3?^Zg#f8UTbge^wk;$otclBL0>)5VSRq;MeknhTmYa3BQ8fsi7t#l0LtH-~6X4c=#
z;w-d`K|4Ac391R2AXD?w@wanw?={AkdQd-eWQttcws9gnAAX_o`Lz`MiX81c6V^GJ
z^@?o-Nw2&Gj0zKKki&P_%8W;?b5OjHy;p@m*GnO(j`Dgx_oWjxUT?cmXa7y8`}YuZ
z^#}nw)T-#2l9Gr+gK8C|V2=Z_yzJXnFxmYFBqC728;aX)riiS0vSqt0f?=})2S#t1
zldkA=itP8y{rzaQXMTxNcWQ=;z7vVl29dsrcGs{cNa&94yYkGz&2j@C!0XTul7!}-
zs+)v7HSCYttPNSBa^?v%Rorzhvyc|~SQ|<k?`j@w%gXWpA?uvFD+|<S9ox2TbHz@_
zwr$(CZL5=xZQHhOcHAdtd{=w#tMv!w8nb4-RZj&;Nl_5vJfLi;aA;GOkJcP^*bb^e
z;K9l$Ny{@j&<>7RNq|CHq(XyFbS6l~ghJ52l@=!Ha1fAbbkC!0D`NNBCmc`AH_?rP
zW~7GW*Yc+L6?a<Y=MkV%wATSq5L_))Re<oJ62p$Tx6MIruP4ZB^q)(bB~^O7!4Cg$
zY10{q$>4=Sxi2~V6>H7U1ecSXAa0?TBuO2(GItzmj;A3-8a~69`37rqx*n`B#1}^=
zed!r;&3}!@MSUhkY=}wbq-HtZV-fp_+f}=J$W~`slw*E9jUf82g#N4Bikvqnd6X8n
zc1|T41r<;E_jG@i1Wi2XG?K}QM!3h;<~?RWc1>L&hETiK6>_%h*-YOIn!~h~)Ep(2
znEH5wLqnx093;AsjerHWP<n2@w?3&OU!vEI`|%Ydi(a1OoZ0{a4@27*@dfkd!w-29
zOA;DlZvt22_R%ywIJkU=zb-hn&)Uscr;0$~THd4^Buh~=`bj(PE2NU0+zI*;(OU^b
zu*U$T_+^4{JWmY%mhDYajV`=rqWJIKE)8>{UdRwx(`rKfl_Gk*s-8JeVL#mzu5Z6r
z!+>VPVQ^!v--$;$)sPvI^6A{-J*n<X_GcRwC~~<(dDJ_P<KgMu=HHxkYXM2`t?w@X
zRny+2;-T$tO*JK$kG6t8mZwBDIAK3`cDqltM9r>l-&msQf4stWI>XfA8xu{}0y|+C
zyV)p?C3=!A8cyioz~FAZ@>GhIzx(6{*cScxedCHw-kdlaQ<SANLwvngRV{w2a@q1r
zK7YYem`H<dI0r{@tVQ6>jwt~#45Q@_$HHanu4^P88%UAQu1Pq0{_-!gCX=KeD3M;#
zYRyIoW0au5i*-5BwaEwENtX=`I$Xtmdb9LXagw(zpC6CGhmrVuu@bz5%24}rq=9HS
z(&@`m#xX(G?8TGTZwoy%_ZKjsycbiWM<R7=jGmWTO%3-L0YY_58P{pN8Vluv(E?e}
zy^UfH%8E5WNL-8NyD^KHlLboQjNxkK9aJ2@0|s9<pX?Y~9gLObZ%@woKIdEQc_8FU
zA~Ziz&DstU%rhyAq=9W_38>DVCyhtGm&mWYl5m~jJDB+X{<`m8F>IV(v;CFCL+mxZ
zdSYirKmnF$iK}7oYQGugykX9Vk|!Gg)@viM=-AmFYz}xDKdEVIY~U&{;RInEIyX$}
zfu}&e@&jnPx^gEG&55K<&a3u-Um#hLCkZ(V+1@}yQt)0v5NCT=;)oUT>K4pJzs??!
zZS9lCH##E#9b<E?ECsZ8Dbehz9swyO*<=X9%2?@4OmH=gala_hBz_p!18`-}wG!_z
zoG7+phuQ8~Gf>-x-NgUWED3>ZZvGO<>E!{OQBhMk&>~1hnH`1U$3Pym?Vz8gcBC|0
zO=y`UwF(6k{yNgXG)>!?L7x*Jxv$*0JT8Q^2!&NdjJpjImjHiF(71?XI7|@Rmm7@v
za|b$7#~a4Rs#wFjM{@eoI{A!FhI@2|_>oA7LY|G*jMR8gkz*b186>|W6hkOm%se^E
zZB6{KH?!X-edLfO63BL%l@GCEwZ0Xb+e|)r(3C{BIC8&{esAa#TQ<y|bEZ794<=ej
zRnfQ~f?2$Vx`<-APEv6pPf$pMHetiTS&zDh?#o5gj}3Nh$?uSpBhfB@b0{RZH72|}
zsyX50LT-0$Sz?(<vLB+NQ@@D$rw-%^L^>ZrIUstA@2SSxvvLpN#NFX$&y0Y4zxif%
zr5Wr%`DZ?8PjGqPm3jMoNG`0PixL%&sXu)PY^fsLQBKJJ?DqP&^6G<>Wh~|J$bB9j
zr)N@Va*<IZ?HN6l<F{0g$vs>a8&aP*cV1s#jy8uH^U>3SUb$e%m$`6RpVL-N^Qy+t
zI&h(Q{%KY}rJSDxmh%Vd?lgVZ8iHwcXx~C9Y&Q|Xg}3F9iD16trUDisI<~VhVO#+f
z5R2WnVDV3iRMV^=pqUqc7a}2i+o0|Y{!?+5(Oa+B*~_Qxmi?u-lgp-+%Z;47#hQhn
z_OB@=w^L<>(cdc1LfuN>4qZ8+N-=|ySrBaBmL0<Rr*gYEhwrwhMq28QyLJlRH_l=;
zlu0rbRbZw?Et!0nglRv#VJ9)8UM_OrE2UH`=j4<-Hu;)1_Se)Ft{c>X52?KACb`)*
zkE4#g#C)!z^+dW0J#8r-4zKLF<1LbvIh~?+wjHj3V^Lsor|s-^xMx#}QY9ALlYt^e
z=~_!!;b=C+OY}>fp8*HAcTu0}bjDQ}&UB49u(_8mSlJ&Gxvhk7M3T^iaxFIQmb_EY
zWJl0)fLq1Ww1sg-&qzwnf>oG!L;ovKjd&<&ZmF=kn{Wt;VvW+YQlnO)4q(KL_6+3$
zB~~dz3ZJ=c$GnbSrEEFI*T^FVoc@H(KFUHjMe!p7#o7%5751!oa3L)-MGVl#NN&Az
zd71856cHdQnUs*r|C{}i^bURKSdG<XQ!ZeI`t?_LE|#k)x)XgzwxlpBr9`~z5T$IK
z16KWB3$<afZfN+;scJm>KsF*#1UTm^Db#Cb+=Ow;1K$-W89MD$kQ$jtkDWh2MQo<u
zW+_{kO)@>zC9d{&!xi;s!YKt)-yF<W*WZt#^o*`YRaWLb&zDGPJZoxHW9m4%ytDm9
zAO#DPI{;;M4QJi(VC|!Ct*6(5)_gk6_<Tse2@K+JVWehLAWAp@CBms!O4EJphk!a%
zaXa}$5fK6M0gcMn1X29#cppX-1>$4#>S-dNdtgwB>(jOFR%GWYjfb}d8&@!u6tMg(
z&W-3RW6+czMskY{O{k4I1zm*9c6pco*S73kAq;z#dJkzMx_K-gKwZ)HM}v^IjvViK
zNhl}-VoHhMB*`$)XTS&-Zchc-A=;Ui$<cHtmNRyc6!TKq>}TRAMV>B3=^8vH`(Y4n
zv`N#A{wg(cl$R+`vw1Tev$ZzFuu~%k@Wc)9T;yuVAmU>hA|$&|#+d_H_l%UwBWlIY
zh;_Qop#1aG3*_VDJNUe7vpgs=*Tud5Qs3FcY>sk-1ivy6ryvNNjI|HR@yplAS$nNB
zp|}$JZiq?TeUlznL)cXDI<{)qoE?VNR5*dxZhekQz@9|!WKsKF7y0BhET8_6F!0S3
z-9t-bv+mnnR*JkyXVi)+bI4G4y&eCdNswtLQWB4yFYxQ8#|s?vjLiG-7(rO>J}w+u
z6i$EiK^$LX6gjrOVnS-)KbbNP-7NG3xSjB`;NtzH*{SWX05f5Ls8#3J4VbX?i;7Qy
zG>O6}s-Vgle#lKf{?P98T3@#J3)*+^!LTILr8hiE&*A59M4&gyO*8+?QK&heJCv#Y
zal=vGHm>!*Jl7+B(E`(C;kHtNeI9T1b}qDULcQ-cAXMYP0Y5)mB$lnHjnJy1N=&%S
z96cA}J%IAA_T8=M=FC1}cpwV?%-ol~2(sExOGuD;sK&p5==z#*?jw4z`XK`-dTiZ|
z`yM-jAz!}YK$V^K0i(Ie_z(1(AxE~FifZMTHD1;c-+V5FKjww{a+)zbk~nbXdiFR7
ziR5pY*D#WikyVgP6$HHHLo@|A^6Sa(5D7S&`khC2h%da?q#n%uj%NCgS3j!x%Htmq
z|CcY8j4wN>Dg-JTSw=(zH0sdtr~Em_)|125wRNQ#FS@G*{@Nk5h3wDRm?AfVU8p0Z
zAig%<+)F|OFkgoftN0vs{!6jsNQkoFXQ8QYwq`<oJ7q>(g^Ur;+hTBV@uK*m_qJ6(
zEcM1mQHf!m__=Q3vN+Ohg=-Pxkj8$i*nZH@XTZN^>+Em%9>mkGMSj5V*?az=CK^_>
zx1G<YBC|ec5i}x$ERVxPo1k>Iy$>9+-&IRSi#5qs&v*q?H>*$gCOAN^MSjlc1Gy+#
z9u7u$ccm6Fl}m?;<ip|rZI-L`p_12T<=ZpH_3}R>Y{VX7m`hb2xbZA{gZW(dh&U}D
zmOT(|f`tzayS$ezk4)MP9Y|1t%0(}rOLUl52OQ3bk@H#l2<nFawoKDm>HE~;-?ew)
zp}*Hd3C8irGlp?Fcz$tzI2n$roYwRQkyzQbmhV9yxr9XiRutZ%hM|@^GyA6+t*0|M
zE{RpkMrmtjiDj8g%>pckQnBLeLKtxf$~3^nP}I}+x7fL&^Bp%1><GK#iU60c-E=Oz
z^n-*a1#WQ1MG{=?+l`jsW_)y+3$59Zi*`oXoRJ8PS-!sTL7WPVOJlr79zMqYe7A|m
z2VWTE)%e#~gD95^RC_yRV%B>}lQ<1h6Qy}tAqL2=hoKAASC<*lR!(dsHiDGvWvUXY
zk<PMTqghe|79}h74Ru&j2c08u@JXIU`cd+_keqgr4j_Z~uNXoO5YO?+<%~4hag$!Z
zT9XoNZf8J`H^<V2F-))i?+`$7EtE~7LGLXjC4eVS^dTbs#I|OkOjL8vB}FCYw}ZD`
zaW|ckV)*A1xxRr+OmB?WzV$CJk{d~7pFnT2x}kN8T*rd>svtK<@_TyhNXmD*zFD%7
zm$Y$iH7gbUKGn@OZ>+uKPx>y4%+4=8mLiM#oqN!*b&Z-9iXxrDCGFz(n($J)h@N2H
zso(g#5+Ycy+m89jB1)^SU2F=>dXm@YxTc?B*%*vUtwBgi9(FYF6Ly6xpN|~A>HQpV
zI(b6i7<9+>zP~<1zHWbW>^-~Ic6l{jg58a#j(8ZWLKu|t7|D_xe5u9Jx6yqt_F;+2
z*?MJU5biHHKSjWc*vOpK{ah867F1iYyJZb#&Cwbuvmz$Q;;v^`LaV?dD0T;6E7xEw
z*=uH)BSU{7h0uij1UU)lSa706Bl;Nf?`IP`-vSTZu1pljNvS{;uAz7jqIqT$sv)u^
zs6cg)0~P@J6S|vhg0L{*{puT{p-X+2HA5Y52m%>Z(b#{v<{<_le$oICxfhINE()96
z)JU;O)6qno-Qw~h(GmOyz|WmsRyMsPp0AfKn_wD+c7*=k8yG)WPCQCBbuQ};W8UQl
zHjvg@YKH0+E6n7IM&s+Fsi7fEVSfWpX%$I3e8F^7uV}L0=C!mfVn<ug6mSyyQSEiP
zFt10_E2=QTEY_yLtUhXdE~%I)Ba_rET53sK#Ylh>3xT}T64t#rRj9;*WE|-qDrK*m
zNdr5u8|4Dw9UTMliis7hZ<3qfPY4|9u3LqkwHtb9i|@9fARcCdQa#nl#TadI-x<9k
zHW}^8p$!=uT^z@-a6dOBC=JsDQ9yx2Bx&4sE%zk2AczwjK?ExX=|L^-FtwQ!w9GuW
zwGX?YG=fVny)FhgTdq=(7&`df<w;|V{ifkKZnO7lYbbxgJl5t~<^=hZ6n*ld@s%2}
zD!spBi@@w*ZLuFsaLJ^85^;Y}T5z`_EINSoeKmm99>S<l%qT~c(}o)z*?p${lWDhS
z$(l299t?FkQ;jLBlUP43n6~~@?ATHP{>{y0)E~*Owp4w0y4#ryZ4+z5mdCQOme6}|
z(VXx(lyxeM&agg5DEJ7lCKrN9>{lBX3Dz$1R2*b-o;R}=m<*Y5$TD7eF@`wzHmbKg
z3EVYep!ugDq!)}fy5KNQxS%`(Pxwbx)9`eg%DMhwI()Zvn8`>er#SJX`9@it5=#Gb
z#3hoHqb8u|pYE&ZZ&^OVpi@V#>E)Y_)l_n<n6ShCFgo8!77-uPoiz3Wj#+Nv(2|J|
z(FA@U(W?&D#uS{^y{ntR-CSYklmJQ3VNw4LXsbu54?+HwC%=HTC;!PFRON}@HFXF6
zSfUO7B=@gZ&tXy-p?L2qQdoNb>-rNkzG^KTj`BVH#`Mn3Z~J;w+nCTj-X0;pVEq%E
zL0Skv1nu%;P0{8U%nlxBZ*aKoi?;czIa>i!cwrYF6=DtW#Z$#KJHyN#N(?N;ciGDq
zp3e2+VS9q^5vlX=vny`P_Sc9fB`$8>c_%S%SjN9PAZ$l!*UaCe*&lD8f6kN--&AvV
z_BwiWR<A@Vep3+M-*Fo~DWgHpzl0d#Abx{$&vb#AG)@_yc<kj5f7G<dq#4#_;VJ-;
zY;A^amJ%G>7E2x%2W)SVLhaiE4CG%XThWP>E-OJ~ryJL>sxMe^$zPsWC=}*_3Tt1%
z?wF0vW_?N`=NkGLt!#MEl!69;GmJ+b*!YlN%#VbCzsLY6&^@?GRIu+aawM}pt?ATF
z`&e;Ge+pQXA{Pbm*9cbSjdTq!Qr1*-7q(Ptl#wVf0eM$>t%&lw3DY*NhR*GevAvaj
zhX`sjGr=u=b$6|89zf-`6WMtFl-~iPzr5w0|6N|`J%}}EYk1Ja&B;%g9`;$H#rfS%
zI7q=3^73)N3mtd^{XuPWj<gtR=#Ou4&GEreP_HGR4-tB1)Ly1G-GL{lnsxbWY>hM5
zu31~Xu4GsP|F>LD9s!|OXz6^aeBE!bF-ujjucMeBlnl<qe=tD@B90Db|BE>R@)4$<
z0eSQW>p}A{$HI6)2}8D|?MJ%)W@gSZbpqf*<8V2qNjJmMw?7W<hf_k9ANVdF68GR=
zk@Ia2hoE<tj0U5NeKs%btpsT7IYxU-xhg{`*i^rhIMJMxX!irs$abT!xU!!%gg&Fi
zT$B=T^`7+|$A<&wS|HuzW@ow&l@lH;c{M45SG~br+e8ruJhq4?9!OZfh#PMb2s{?(
zw~_R%?oe|v^Za(Lel;<P9d^QUEqvcAnG810)1k<dpYQuhKbO1AT)GtN=wCCSGTX@R
z_X$iE_w*^xWb@tyJaW|M8^qnH6a*~D2M^5}--l9JAt}HoByZ+z7E9lqR#DvF$;&1A
z8JU!Z%+2^e8<&Eq*xV3fazTumeL*7v^Yr1mb397}`K1f#dU$?MHEr7-o@e<3{bzaa
zNaq*4G6WD%+fP;de=`ae-5Q?C*$PPiczvSOd9RVO-4?9kD`Rsl>de?AfVI83?2!m2
zBm<l>p6)u`|2m>ZoR4!+UESPXfA8G%i0%I|<$+*@E}f_pg<R}MPL)3=E*;SIu;s26
z2N936lYk(_?GIAi&h^xitj#3U`15$?R7}#F!`R!n9c}P-ogN`(^XB@xY^=lk2n_%+
zhxqE}r5R`07EqECcd@TXs=2YsO>`1%nc!Od{!T1WdLC3MD*mQE&+9qkps6?%J#}pD
z1QFu~Nj-&B)vqXe5i&2SDCzXr3jKXGA*l3%&Xa251;~^ccN?DnB2F#keEQ<|Wkgh!
zdRi9?*RggR+8}G^hou)Vak<eZ-t-ba?Vvhnyb)t_shmDrL1k{SaEDrJRsREoIQpe*
zNkrn(1ETMK4GV`txZ0IwRKVZZ49+@a7Odu+@%#>WnfO-l<nj|q-IuTowJwU7DE6zK
zZnbfxtAP|%3*4L!ph#<5+KH^LxaRR~kOH~Hb(TeT-qlwSfn~R<6D(K6^)ZW1cte=i
zrG%1ddKR@P%nKJ27P;Os%eQKHw>fp=#o@_Md*rLf2vUHh&((;F4H27d30>=6V@(kU
zPP)|%U21BM7DUskg8`1!SQ8?3vbC0@vxnUk0@X9csFSAkCgLM8YdP|3580936sB$0
z!(?hqXOzO6D`@YBaDI;u_kOjel3F+{XVinDpVSGgjdO7B6L_Kwze(KcCd6}Va(5C$
z>|+u+Y2g*V6mOfv8)T{185nbqGThk`r+oBXWo*<uY6$EEL)~Qroy$D&-QY)Min>+Z
zEtm54Va<dyLioLpZ;z?MqjUO~`Q;rRsr^&FyA2<NZA_N@7{>#&L_XAHecKigz(ha(
zHPQ$PLp=jfu%77|kE9tNJnspz_<PHEYKf40A??&jq3{1R(LcIDp5#&Xtrl1G=QzWT
zT$WIPw32LcA!YkUj#;W-WU|-~=?;`%Zf<mT#$a!+TW66Gd)el4z)(D=*+-<)b(4V>
zhVP0t+@C@l)C)e9eQNbpRh%0O&h-1+%v7DW8EEowb6|jxqWonTTL{4se84{oXH6y7
zEz0U_UN)<(#dY|qmgFz<_NkMaT)c{(i{!;-Fd6kDM8{ETMv#H3Z7Xn<_lB<@#ZYNy
z3SlM3DlgaI75Nl`Mi1P1jVbw-80$W!4b62|y-Z6UnW>S=uaIKOQ6riQBt2%TK+|B_
zip-Gnu{ZiZ#(xhIgD2%iBo4<v9DjzQ5o#+)-el`kxbX-oKHIwW?@%9!|5mE=g5Dm*
zmhu#pl$q0FHTh!p(34PBV~32D^#WC~`0#)4LmeI<MI7q0z`RC>cRoH!wD6kqZ&fn8
z`yL{bkfVx5&Ki3*2&lC`)?-Czrj4d7dHCIQxNY?4Z0;!vVlLKJpc??4YE|e9%Qxy~
zWRT1{e<6Lw6h{mASp+2d?iV^XiNdq6J&~rV?c*b-5ZqwgJD43Y+`izPPLqi#xB&Xs
zU2l(mbRQQ%RadXgE36a28xygU%|JbL{66{m9!@ju-y7^)(VZr)3Y$B2E8w=0`CI&i
z6PXO06g$25R-}Q}d-Q#$T_*;j;I6xeUIj%>XLtx2j7u;EHJb+PSAt62^4!FQVAMBv
z&pV&8&N(Cz*d9u0AFk8$hya<bfhBR>rD;6&`$WzxQ7lZ#r}dJyKJ~?p2p<Xv1qm<7
zI&n=8Xjr{u**4$?hifhIAKGqQT>X52VJwG+YZ49pDo5{ZW}-Bwkk53(>h&jF`jSY&
zCq5a^zl4(DaSiEhIn5_}#*oY<2Ydi;TYR=<XM{ZBDqH`go15q|DH-XJEfxr%keNUO
zI8?XiPd(KfGjF_112l^r5OHxg>hvq=p6SdHJ%ZbW^$FVxrCM{0SlpN~a3$(pCQ_E2
zB#3&ikwXg6>V3K`HoDqy&1ugeVS(CH?lhoBh_&{hay)=GAKdeRjW?!jU7dX$3=FLl
zKCd4ONgf^ZH%lc2HHC6$9ECy{Dz~%k2@l3isTmeO2*a(cB{*Vy_Y#ybu&jNVd8N{X
zXHV+uE?}-aU52%9smZ4EJEBSoq|1Gd7#Be-J2A_ccV}P$2yIp*Bj@g3RkF`WY@hB<
z0(#t*I6UeVm<=!cYY-d;_>=w0WY-VmKi@ywNNp^gus}cn!T(9%I=dLUIRC`tv~7RN
z<4E6l|6tfTw}$phxnWZD0uY=9y_NO;AbD|$V8_=V@2%7a(~E2nd|m5yuQkx~)}yxV
z$WFqor8UK?{oY<CUGH-6os71P*_a2zMP`<2a@$;S8kP<;1TU_aim5k8p@fpK0`<1}
zXSa38<OHf;Gi-PUX;qI2N80?vFyg2enwy}YDzZw56NgCRo%J#>5_f6ra=$;<hING)
zll<jA^3e(zOgQi6d7Qlnyg^kHv>TnTV86gSaro`y=|Qd4&D{xhpOmlPtVZedZzyol
zaD+`~7h3JF9bt&=?MPuMCfMAnMcSC^t=1}gEJhnf1Bd_#6djVbgVf2igU;2uRy2l;
zhtyLaZ0(=s6^8d;*3DY}g>GETUG{MHpjyMEJlH~OdWEhveU2nDg2*pR6E8$fn)~bp
zIbxx}QRA0qi7SAY>`N||8W{4Lxr4KLnP(u;vv5I}5RxiIm|?@1Yyl{>x(P+Wy^#rH
zk2S(1Z3I<R@+Sx25V@xMX2TfSOLrNofdEh&+mwA995N=o8vji!1QQj2;6_=%Wp;Kf
zu#h-7SP4B$+uMbzv0#ECFtBAVe0%G6tf0*iZon_GUjc<4pb3TdCVG3*&(DB6S*(l}
zQ}L4ZJ$oQJM7Ait0}e<WYC$;*1c^1u>6|s{f}x&R5vyi1CP!e3wtPr(s)A3mpS_+s
z>=Ao%q4lXI6TB)U*#}tY1|SoDd`S$Woh{*q_E5SEZAs$R1Tkk=>LZK>3p?V_E#W+>
z)&wIvV)+zpN#@oBa3zrX20>UdYGASiJVp;DpT`~iti6pe%gwq9Z!k{nPJBTiMHrnp
z+yms2SkJ=2d0%pupmCK^LuZEs(E9aPA1G&6;CJ~;kSx?oj9FVx3V!hI*dp*gAur&x
zvy>!JUbqosn^oua`L=`?%0R^4xW%bQLq&Yvb*wDhv3g0X#80_H&`lGaF7s@(*UaWc
z1?=86;|ktbE9Cl7g=emHe-U)c5{Ze6=MArbF*vcf9X8GgV|I=R<x0rqz3sP6*2DC#
z!^uI|Cf%Y^1;?zkD$Dd#%UR3zYMestB1F{{S3zPmPOdI2i+j#XEi(%YhhAce+>gFy
zSNH*{Au@!ot{2~HMeji0O;;khPyj{imVIVh8km=)M{4Y?qWV^~iFGN_MFZ?5J1SC{
zZ5(la!JoEyEu>u#OV?SBNNE52Zf1mS@8mr{IgOiiV7_j1Bu4O^i@fSF*j@ethEKEX
z9ezPnCTyp3S8LGF^C<XhFH2%$FDXT^>;9NeXaa1P5sg+v+B4Jj8NKppT}an9UpM#p
z^W*#anFd@)9X=m#80F3IAa_d0k^6PTk^MLB$!5sY-a5KPw=YMTxiz?H<O4ywkk7Tp
zl?@(rJP0zuJmev;Jc2?Au6iN-^vY<RCS|WCVD`yLVN}bYV_g+r;eEZuoe<lyrfy{0
zLS|ac?H`oIJgCYX^YjfxDx8%xir7M_*Gy6;^?7|-&O-iLq0Tf;p)B~D40*#J7a?wJ
zU(H-DV<T@(bh0G$FZh0j%oUZ64qL&^2|aFn13NO<bs}2gq4}5<X}>wAALy)|Qn9K|
zxuQaOVluu~p9C>TR_M$noALnogR!(h(kGMx-7t>Nym{a(N@RD?S@Yzi=Swnla5-)Z
zM1OGu0~z!{^E`s}kcd)1py>I-`NpbsHF&f~NH>(ZGt854W5m7PKc9al;#TdPd}o`@
z`i#R+@^o;hGhT-{gH|Bu);TwA9Gy(`ng3>3G@qI%Iz4umjOGW^O_@dyvYcyJ%|OYO
z25m_-sDtOo{ijTai${KHdUokxTP)I0nliIk9hFw^=9cEtQ6c6Wr#w1;BwZhDiae|s
zC_qFDBYMx61i5myo|j!aB%?mFRj4i&fz8sdcuNv0n(8$&q9*3haltGH$#kPvL6<Bn
zo#?Pd>MQcKGPF4h3u0|)tlG&goPcgt9MP0mKEUgx+JXj>7^524ZX?D@M}pmeRQ8T6
z*}C}ncrGn0*_tvnGj^5pB?`b84Hd^l`lA)IIb5qBc%ncNpNgu58Wc{#Q`(~}fo?;3
zu}`E4%f%h=`stu8aRv$ENJQ;q^TQqagT#2<5LZ#OHdEYNr3bpmziLyHqoaI$m890Z
zl_BXzJA)dUe3MSEo#s-5Lt$33v&dl{(~BQeMTfyf>Lqq?)lXsG+w_6~xW+RYo%mUU
z9bV;&kuB%VTWuX_hT7;Af9Q3=7$zr|wNcwUej7*TS<=lv#vNBMF?LbI`|V_HU#DE%
ze#qdf<wvxbA%ng;s;`Ac-FH?8s=x-K=rXL?^f*&gH6^ksT{7#&cwVRAQIblWK)xA3
z@|!7=vVzN_sXpm4Nc%_<Lp1X$uawOC=TZ|RJ3R+l9m7$My8*(VVWHW6!tlq2i?>Ww
zg78}WU-<!*F?UobJ)bP23K^V#*Yx{YotHZbyrBbl!ob7}Qv5;_jZ_ar*&T}D<St^m
z0Rr!cuO!Vfi5qC!E=v2wv@-Ah@?SQ9Wljk$V)-y3UG^7aFA9~DuH~V;{Q^4-mr@|C
zk3`<5{gG#>uy@$e@hkH}fIkgwWHXt417YewWlv?bRMM;?l~7qFHR^<P{X~X#BKs-t
z;^`*bptb490jqV%8ey%gfxwH`_)5E6`wYxb*0c7%uC^q@q`Afse<pC&FbuYUora!?
z_?jdL7Rd=>mu~*m4v*ftu=zn}n%5?ixp0a6xz_r{FivIvow8iKRl4C3r6q=ju?1El
zV5Z!&Wqwt5KF0GP#YLQO$cRL>jjYl{#+&K_o6sC53nFK|hL|dB`51kIW7CH5kFUqY
zf%UBE*23dXXMSAU6%=xBO^(Q-IE*x#&E!&TE@0o!R`>fQi6az+QMxjFQi@^vrl2-_
zVVjKzMWlK{=zAtBbMZ}WK<&zAyYjIp(dzo?%vR&-FT=;1X<RE_XLUXK&2rhc=$jr^
z4v!+=zW_~j?!r*a$S~_!$>xJg{4)9w$QZd3z2giHS;J;G5V%N?diK32j-E6|a%s=a
zlxJZIaLb2va~sPziwCQ`6WElle56rtypju7pXMFzW@TftrfwGjQV6nc-g>O+DGPgb
z1|Tv1%rc&Dui=qoxOh<@L28Q0%Y{;II{Z|tlmS8Q_;I8Riq6dTW2)7!nAQeEnddc3
zCc5wa*jR)e{7;kWzi_WeE~vhr8I1nV`1HFpn;lE;PZS?%Ix=X*d*Hsu%^3`#1=c0n
zYMA$-@i-YbO+dK|+D>#6-qvU>n(V(s)Ho_o+m3&jUdt*YOPClx$Enh6?6_nUKhwT;
z2(QL9)EnA3_axmd51D`Xv{n7H`nJRjFpOr{>ZVn|=0QQw-oe-64seepF5OX%lN3%w
z71=vZ{JYe|Pu7;Wx<iw?Y%u%W?|dR7g<5Ep{|rVvJFqys3#QM5-(@o`z~dv2=T;~D
z8=pOM$Zh|+!Rou|)`gvh3t*L}Q-+paRj*vWlsI3E7UAAM;4j$na>o=5B54N?0E?2$
z5|mMKegQ!CLYaB)TXv-A8%i`nCd!rT8bLwG{}HUoPUYFb^%#AR(x=EhUJzt%Nl~_=
z9GXu(A2Yo3o>P7`6jBkaU_i1r%q)$MKzdbrDNTU6yP3r~^Z@&9rQU3(ZzhTcv|twI
z<MmT8!2V@_pF6G73Y9Z-&K<DIZMslbIwT^40x_f(UI2enr=kOq?7Ml3b_eR#imH7Z
zR2sVRPA=Vf3F-B@6;xZ|4Yd*X22ydu88n0Qjo&yW+&KHeR`3Vt@E0}MC1Jk39USHS
z>c>x}wd7bN+M3iW=4sSycDs|eSsD)Rc@Fr;+-I;S|9o=To*u&1S`N?!P(p&#u!Y~q
zOfI#T#RLJAf%vt-90@bH|7G|<H0Eb=yfJ_6_pSBf<=}Vo@nbYT1<EF`(45cQPZ^ff
zf4cAouzvQ(kM!>|9s~LI4+s>7&pkG!SU)>G@^@PLar^`RPpSXM`KbK;hu|3ZV>kU*
z+3x?OzD22ut^I-m+P97W@c3&{2%RwxOE5EW;YAUhvbd4X6!c483ul_Pn|{H?=k1QR
zR#a(Bz3;&bH-PbUYtf6I07jgcD6aRNtA{`*{vNbwwSfq;DO5WM3S|&#M^aXj$E=Af
zZkOW#t0YOMzKWnSCSiJAUME2XfIfP!ScAie-y)~**4ud}0Dteq)X+D~NjM`1khio1
zfsZRst9VkOeLdu3?N&!(WL7W>V-ZPWN%g(8_fM{S=bR~f%(;I+b8*uS<eYhZ9Li65
zDeg!2yWc%LHhJpqB^d*SiapT@sdTotDh51pPOeZ<T{x=R+gSN-?BhF0YuNcA81WQm
z=O#03$_^?dG8|nsE<17Ro{5#@*A}Dw3UGrh(8c3<wwjmZ{pYC|i4l~PlG-KVketNF
zBRUiEB3%IbXI}73cyqFuFn<)jMJDu2QXESRF3ZGibkbF4M7l{Cl}-3wat##oCGE}L
zDi7iNH8kfs6B(>h0Y@e&3Th*VyYaw~Sr!i6m__=)O67(Z<R;V=I81#o-oq<sn<1}%
zd;6cq-{wJYS!XYg9+5B=Yg1aUXcRH|=tlNG!5}M#P$PpECwB0ev8`S+Vj)(yNx|nY
zMCBV#GlzhGcx&i!bP29~AX|1WCpUJ9PAf;p7<!wGuU~rJXfI~XpAF~!|M{f6Rv-Mv
z!2|+=H2)t{(&hgbr|>M5NyVOiaAI0?l_9_ThUwX1RiQLXk7!U__#9}+P*FJ3Gb8uv
zzuk7{-T#seb`TJjxUepMy?3wPUR*wR3FNp)MC;M4G>o#HPkYrS=P!D4<jA=}STFw#
z_nrUsF(|cxN~un%PeFJ?;q5&xaqxF{`=MLD?$cAX*qeEFk!%{W`{}fVlMrMkx8JbK
zYXx6Pmj|Ze8kzm~I!ufLrJ`x2>M@&U6n*Yf*jH7!Yto5^rmIQlfnzAA2h^na;s+hw
zmt`WuLJ2Q^e0g#mhkr~evPVVg%5O8_2A)lMJ&yPd&eiF(yYHS_wUyODjGDzq)cC1c
z%hfr@5<%DY#KcV`+-aV6Tb|{=nMCu}z0z`7HHSwBEN}0u_3Fb6g3WDqoXL%gvv;RW
z>ozyOPxqsGyt;W!v)Z;7a%;Rc{@?4<lSig=LAHk63%bjcgjWy8lZ}(b$w!|R&!toa
zy3UdMJ5AZE<LArjv(Y#ayJAAgUrF#t-qjAh75(bw5||q5S*44=k6tHNrajLg%s<FV
z9?&*M)un#FdA>oMN{rT=+@rqt8L{(0<BHZ4>REtereCj2qdz;BqQ8crD*YoiGjWy0
zy$1$L=@BkSU!67!I@8{bI{hFBryM^Y|6TGtd_LK7cbW9K7=3gx>E^W3h2;IQZ1tW|
z{up^CN2$rs;L~$9fGC!cpL9+)$W)kzyL>UbXeduM4v@eke6C`_c*ab&Lx{^WvHW2!
zk>Ox3GQ&i}zncFdNxNr2#SLiF`>N3j1cI2RVfWbO&w2SMwk%JN3<j;&4mW~Q(_x)W
z@?ny14CDe3Me5|{uPMD<o#3FUVfG4<7PR_8HJQ~unsCUQ=X%m03=L1OYaZr8Ohp;>
z95h6br(#0>LW4vKz{t!5vNGQqqCq!K{$P_I$sHxbjq@JgX7a~+c@V0R11rJnu_9=V
z%r&_kjK{8xF?>}b6>||M3b*8(4j9xov#ep*Nfz$v0fhO>&Bp)f6>`9uN(CnZtymnc
z4yz)Gt@F_Z-lab=;DcO)+hsNK;gKg89qbD1tEgz4oK0SgPETh)_<TBO>K2o?j<(@y
z$J`P<iZ_U6is`#!*7$9F$O$6i23<r{uukQRqG*RgN&^{Hp6Eht6-0Ypc`yE#T^w^B
zYeAIN6D!DHDqIKx2Ru#w`YaZQv&uB%no+#?+HYeV^Remlu?sJ74PzvB_~sDO8ielZ
z{*7OCRa{xHH^XGm+ofWktd=)VAB6Wwz7t2`wSaP^;-a&t&f!kz*3FjoNtuSmDkP>0
zSaVuAp}#~i)ZAbNK7gXAzML-FKf3!=SOn2q5ck&!kquDRF=&9tOW|1A_NwK~GX(=~
zh#*qb2Npl7PXSVvkcdHJVdW#5iVYk=l?7+>v(|Lg`4bN-)TdFg9;x@a>jOfcr@ulR
zs#6A{`$Lmn?ZiO8i0)$Gw?pqUC{DW9PT)%c!E5V2n%TBV1C~hac_(}U*b@{|T9*wH
zJsHrxIif+{4yAi8T<1nM=-i&+#~gKIU?~}}3_rWZr9uK3GJfsvH0`2Y;P@KOrqlwA
zF{OsCWW>%QgRzhePd(Z?v<0fH0i=u=xU7FUX}^!c&%DMbv!f47e0^QEsujVL#a9-j
zPVG`wOoI7E?>CUPdx*mhLg!Li5h>j}D6v)S>>e~g_&^+B<v@KoyFz=kO6FWjPK!XO
znBWLf^pnHp?PU5vL9>Gb^_dQXyhP%TZxyNMFUp;ve5+|wqKjcGg`KVnaj5}c)}_1j
z?<Tr~-cXgzf}s^|%jyVwOC-TmyZ8YfvGrp8VZCJ#yn(kE0Z#GsdsRsX4By2S)dmo2
z?gb;cb}W}qHqH*@DDL2cBTnvX-f}l}9zBd2-`$Sk_?bST!qCwB0ynIO!&HPQ)^@jv
zc%Y#p5AHnav@!wYAFYEXV$o{LS#>pHjl@U>8?{m|iCCSNAYAS_SlZ61=lPB%BJevt
zFCg;japvKw^y725;mOD{2eHaO6QL<AtY7;UU_PG!d|;}ng@L#T_|I)8@mJ<EDI1%;
zCh2fnmo!D5#1$BzEdL{z-PksE6biPS?;rD;!-4@BIEBub+z8u9*iHc@cL#wez(R~`
zylQ;hESfAI#NolG*Gw6F$!+tdO~M+4<e1a<g_wT2K-<;(^5l4Z{*?Z{^P(!;5k~1o
z9u@7KB)WLX*ZY289kqk!9b%=d%SWtwO+`-cgE}nFl|E|Sg~#dH2IKNt{bOA95vMF6
zUEN4PjV}F$9nwkrY8f`RR6uuXx3tm(Fc2<&prv^BBFBl-WU;x0XV)(viRPaO@%B%-
z+BUTveM1{N_-DP|!pYR;kCtcEaw=P=RcXk+>P|`&Gs{8!yy$2dJ#s5byI`M04&&u3
zNY%IFuNdCJakzo)G_2AuADHZ#SQHAWdBa>zmAhPzk2pQbI_!`EEtB&a_Cd}=MZ!_B
z@8)?2Ddr+8vHA(nKNs-<vG__4gn`;Ci9Hu;LccAF=hV?`eKr9OI^lp2+|Mz1zo%SS
zpYu^3&El(n6h+7aLFffV1<MeQWfSA^B>f=y1q5v61*O3AInKZok39HaO3Jzz;a26t
zxABS`?|@*e6Bql$prgbvi{Wxa;rU_%vJe5E=d3#jY2-hgh3bHhxvYO?QvP{m%@L?;
zlNuNFn>^mETBNxAFA)x7=tnm&BDSK!OyAMlYeaz87u|dQZF(GZXQ42SGw$x4>zTpr
zIHL?qRQ6?*?-gTY=zmv&OIC!<$;rtbOFhWq$9`Gz!%$_be3sn5_!60UuOe;zNmEBU
zH!C*V0%nK?RWbzJKrXN{z<Lb($lA@$yPJNsh%_K}3fhkew;zJ*j5h<?!y=9NbLf=d
zFSGjh8)DVg=0e0lzCYEB)T?Wg#T;cu=@pGpI$L$B+yLW#3(Oegjo=L7;<cKAhPZA?
zI5i#|92YnNllUBK;KpXL(k+R+OxfA3Kw)AOaUowKE74qx23-NS0k%>r%yrYjb9PO=
zXJyDtX1o2aSk>%vD0oN#P*a-MTOnjsN4)twxa|9N@St|f>n7kr^;{}lQ86nT=J0Dx
zoYt`$Iy{M1u|q$#;LoTIFKX65jf>&HRm8HSD#1=pKIX)UAzvD?^wh#!-2>btE>&`n
zy5*P_vc%v`IO-Vi_Fc|a*N@;^?|hbiZjapS=UH1^5_j^$CRx!gEV^5WW7^=dUdIqj
zi6cS5h#1;NNdqsjWx!%)uv|xMChTzS$kcX(ZSQs}(}8IMxG32@wtd)f_9t`!Qgtih
zk1t40062<Fwo(-e>L*cdyoQtYYdI!D3B^x>Ouo(HCD&*(9WpZ9T$boDu0fqwe8GH!
z^{{P&JNkF2qY&euTG4^VT-!6s1Slw=wgc+wqFz~Kk<ukBCLgLRgy4p6>h%HZ5?be!
zsHB$8)N{e%!*k^(bx&oYq|_gZE_>}6;fQ-3DMkM2Ai(#<@BxTKMk05#NuuX)l8Q*X
z)}ETilQF#IZ`TSON%w9393tg}vxDcRf&K6sQ_EtoYt;N6N8f7NWGdlrpu3ZUvSmP}
zBCl;6i`@k`7vw|g=)T{Tn>g1%8SfOM%CFg9pheLdJdcAw5HyVj#t2(Cp@ZSXO&5}J
z=EJ{v>-CbiUuO&-Fs4UN!{@W090VJK1s$>*blx!=?gy0dxR>#{zOB;|_>w$9*KK%4
zm1%J*BW#}=!V(_QFx9g-@C;~@naSC56c?{WBI)%=HbY3AsO>$|p;0#;W%BxX?*VjB
zvGskV>NBke;?+K!E-QU~^DsKNs}&^;-2pZ6xP1F)*zQ{`hh@4JqlM!F!ws-kVx_E9
zs@Yw^y;a8T;6N_Nf2ga?--~B_)tbiqC)eHTa8_(8g3%ivOx>0Zql}<vJTxu2nv_D@
ztenDsnKYz`<mS0PMLq1bg!+0}(hKg3I<bIXO)o~*#?1tjU?$VrqY+O}yeCaHW10QY
z3(I<2<*iKIYM^woIrit+)X!n-J&1soY&KTlS21S`2}Qh+HSI=XbltSs1uYdDvlr0~
zX=_jmp8;!xrtsvE?x<}X)+n9-bk(}cC$tSuWIw~F8ya`+$h`sjo+fnGyNu<nL)P^|
z#71ix_7e`P1E=OpD3KT}LoQeY66yDKeqbu84&&17@#T6~uv-KbeqO1&%;$cd4SM%2
z_uH-g=ed+g=IK6$bg~^b-bn2^S&K4;wx+NshOnv6(QBs(W&L=Q=E=&C=NCkyLA{%~
zI_%@uN!tA#Af_AcOTOc*H9vJ`2&w+`^Z(?kJ66sL(ln`6^<Ky7()+8jvb3A^ySsO6
zSuciGQ~5Mk?$|%)K%wb{BV`B?(&*M7{cBW3^f?7f$*og1Hz|{-^(tp-1Je2Lr=vet
zD%23fUeLf!S{~VtHhM?i=jJtK)-+*q93H_nMhoKiEOzzj=_CL0V8a?`jbbKSj(Z_C
zQ<%x(?gZkm6YVk77$nQwhZ}^Q(TsXyzEB(`I+sVqie$-1taNP}La2=kDijLbmYO+u
zS8vusL}xCKR^1q?poY-vf?}P04qBEGA!$W2kjAbnmH;KXAH9)gdpH4#9hfAVriXxd
z=A9@vv7&L_@>}MY+Ta{LrF2OJu`XP!zi9m8U_lP$vh;Tri>F9@J0HsLMLoFdEfztm
zMHhW?usY_E8peq^t--Kvtp~x4zaOqhaLgd5F+rx?yVu$fto~k)djBeYF6-TxD)IZS
z!jBZj1>sQp>uf4vi87<<@yi3S*;@8jckKA#!d}jVlApdooCvFx(?;90l!2LRmY+;I
zm(;ARDtd<wyYG)~b4YF%?AtiHjSk)QVJqgZk5}C&^4st5p#>hh-fo?O`%W~OmFa)<
z<9#U2pyS#0#KD&<>ZYQ7DKZ$LoW@9UF{!^(fTAGGGf9m5p6f1PV@CEHj9WTH%|Q&P
z1iE)vl;}dVu!s5RR_G-2N-{7ICak77*giId)eb}<;5hWVF)^nbhr}^1f>Mzk#ytjs
zU(VJhgjq$DsHaRFgOa+zgL1~s$&=RAX(8a##hPl=nvM#e((9&Cp|0qO>(m7eYVm*v
z&<5iWh%r$&!wV-R5FP*pUW3AWl}vGcpiW3!#4I~oz>YYbv{09}mSs=E=Jw+;wrhyR
z%Y|Lg#*B4OG*Ti97yUWmwRyEbbKa1K+ry>!j&NT1j`B&rCx5%a=4qYU9Fm_pOwQ+m
zbfgL?)(B2Gl#$trs)(N)5@ap2ME?3Ih+m*5Yk7=w3{`Rag<YITqm@k(V_+>gtXa3J
z6+4EbB5K>phq1tWdbw^XIm%pPUoMh+lKOCqL8G1G((XTkV(Np0bR=7%QS6gmBtggd
zC@kQY=8hY^gn*5P^Jdo5&4y8WVp?4f+KF%6*~5lGlfVhLlxFu2rWJG&cUXvxl%z|U
zpnop|N(Q`{`A2z>fkTkXIH@`xv<(S@^2HAaHBOxjKWu-u<I#wKt?kTsya3y|i|5AJ
zkqTSa=vajnh{X;}DO!ABCQEdaRlU$281U!p%U&YBU>ap8vZ&hgNOAjCg_(~y)J;jG
z+$oHbo+!l7#7YygWP|v6F7j9HBJh78-w-yaNgam=!aIlS@UEySIXjgA0duu|QF4g*
zn7N^o=$Vlv2|hXe!!is&PpTokzEY+MK6!0aqepV>gH8kD{)ATDvnHwvP~s>biDkMF
zjNSc%JRR)(P(2VM&j_|Wd4o8)SM`U8L~TT&P9!FnGGF_*_K46L{F2-L3946?u`M?h
zqG042aR|Rgr9vk;M#uqmrSwgJz|z>Kj4Oqv``1LT!JY#tLid~&5+_6@&>1flNtv$%
zTLgSBJX7j=`db(4cmQXjNU|nmt!eaAJ*)yYhE&C6S{o=91vI9RyQnu%;u-ME*RTWJ
zv=cPfuTuvkfg72QbA*%>B@t8EzYvpdLl1o?L^^4B7cf;mpND`X!5d<9j4v2+bvpKR
zUi~n_-1{`L;rw_H%yw)!qY=PYB{5<O<vD2H?NDvUa9oY?aGORuBNv;Zgxb9iBICaq
zotjM)8qVb1DR356d}nyky*0Vo3PZWE0hqYOgbr{3$sbkL(k@oK`REQ%MK`nN;aSr%
z2knd4KOG7xr|y>y`RU_KT#IJFh_ep`7Su633<Gk5${A@kBlyTuV9V3!<r`?&Ko#1p
z<ywMQFEtPAq<_#lHLDoPGjNGdtYQ|}p0zKxrQu0#m(Xc`NoMu<TEW2YslF|ol+iUl
zA|26!Be{{QDrYQ<T$-n557<V4W`~+mljHR~9W(eSG`)H1W(9Yh9LL+d<Uns+K2g+H
zZuQ<A31TC9#Ont1Y9GDhvT=Bguxd&{llz|S-tFDs3@M<O=?gIb%X&Y5?b?=S`<6{N
zo=WAwch$0r>t>f0>{GZyviZ0skKa$QRr+;3rNl;@00Ou#plX^d!&c*6da{kodRLO?
z!@846<$dx@UC8NHO2|_`m{V8s`CZSN!Qh2)Q{V!jCQPpHPFybR%@*6Xvo%2<Fb@TV
z+Q2Ez*z?@(O<(tc*oi4|O?uy{?DziAwyg1@Dtag(r?RIzN4_giveT~f#!CcsX)@B^
z=-=qY3CAWKaz~q~)$T3Ym9f?eFV1KjvoX12l6kpcu2U7}?Kz=so@k9>(v|o+SrWs9
zJ%wASUa9HB-rT2aDT9>9adG*2gP?O~9y<tk8SD5-C$VUV$XQ3-ITxec+3Z?HhgDO8
zbftxElqyQ|;yQiMnUxxJnT1Y&EzRcqv%E$x_GT4^R;=+Bw$=_CR=4#Fctg&Ch~nsI
zFd0iV6FP*P#+Jgatt51kG}N(>B`^eCa21OTN5T*y6IB(PK0S0&^_~ST!>>O1R3r&T
zx838A>q%y*;dJZAf4F(G7ByueW<?l$$&+C-k0_3d7AJH`{k8+GLG|bm4R2BA@Zw%a
z$XRe6S0d2#jgnw^9JSIkuMX>x{5ryEKT`<3QRkiT0SU<>t{vWerFu?`pGX#X*k7;J
z5&&-#CXFR>2>=os=Tbrvk1CE}W_ds6$T=WS|4{j}yme;|@}2qX@j-facN@C4>(9O%
z3pQO>Bu>D%B9EvhlQ#mWG3~UoPU$!wu7+puBus=m3(Y~xFCY8i{8gt!OjKA7Sz*J_
zM`sAQ&ayJfREJnwr@K$D&qA+9smuj;PFt*YMjKg3f#c&U3Ns)7XQk3l>ERC5^K1JN
zOwjmOBl@*vuGTWMPCy=dyxFhO28Sej7&PEP7mu%GH>Dy>UgLCWU>?87>Gtclzt=ue
zQInjjTbJ4p+3kAcIa$|sfD}b<3~0Y-*Y!Bg;eZ>mRso@~+pfsa^s*8eArT!4&ZA1=
zck1+>&g21dZIK~xl{al(UY@6hSujO8f3koc82nwbW3G{H#!nY%j|4On5J`DaaD#^~
zx!*u%m8W+RYPXc@creco!n6&BzMtV`NMMrkud}d<j~#VA1pPs!bk~2!<UM4T)Yl>$
zwPC@$I}dBHVa%zWC^Q0qBhZ)L3H|5ajeN2p75_XT`StlWr1@@?4492nAi2H$?La)#
z*MXAnpt*~+kxDwI@mqZJMkrJuPdhq%fo;Y?<?r-LQ_f#IBfLM8mO^FV;!W84jYv33
zlH3K*=ivaxi4xZak*1SS7rfdbGtfo*5FUY{B!zd#h1leXC-g|pUrUt_NY01LCgL;Z
zGPNE07pAl%$@DVR+g8NUD@h{eZXZ7BqU;>rEQotD7ly1vCZB{w5j)t#DG|5E{MdhU
z^4$E~ZP(tG<{?UG2H<6HqkOB(BPRiARlYlPpTy&tSobxV8`*~)(bg#L7<590@mEN*
zO?=A#PaC}hqGp@w=ipNOvqJJ;+1?);-6A$g)_On?DQxzpR%1#m7_Dk9x^zyd5VOz%
zrXAkaZG}#k(@|h3e|Nn^%QowPob2&_>QNYlJXUZVc;yYG0Z$J|Lm1uku3-$t6b2vc
z!TS=qg-}RXQL~GhaK=l-U2qK?PKB@m)iRZS&-HEVE-l{EgulCH1v_6BdsAI{j=l&*
z0Tu)KTkuxAO16&6X(w#pkt?7FEYLe|!5CmJgYDlkvAL`Ug!irxzgA%GJ1}^SFCa_H
zY#6LjKVf50@j@;;_5bL42Ov>`AX{{7-LY-kw(Xfawr$(C?U_5aZQHi(`LplG?!LGG
z{fg*_is*{ytgMReuFP{zmTamFk-ay|$xc5=;p0SAaDTbr&J->wh$x3_GIwVV7msD&
z#x{c&iC{AD9-F^!@iV6NSB?)#{}8_?^Wm=lX_^7gT3^N_s_j^DF1+a>CoX+zf_WLp
z!y6U2jP|Jd&6-F&{|`~O#lK45X;=UNTfzSyr@EN@zoKqLBDYMR5r3d51;TbKk>RM}
z0z`#+iFXgUTLkO<m*NJez&Ln5uUuXpJ6&AZp!ds*lW;p8POr~aoMtBBY{i5aAVTNZ
zvBRd>o_0?!Pbc!;1nji=PIFo=aN^(JeB8~gr*7uV*<|TbQg>p8zvjFUB9hnsqQjH~
z<_2ug=ZGZQUwI5I@<t}oLnK}ZxM_84zf26Q(se`HzAvcNZ^lWF?rcCcw*<luU98x=
zJ(H<_D?rU2d7(;w#0MQBA;auLm`ztM{~d@I8)l8ycc$_?6?~vzHZ#drnNk5eEUXPa
z5MY*;kb)cyql-GY8>CCA{k+#_mIU~x=JOS)%bKA*^*KLg$DZ*B<}-AKFz%-_W;5TO
z=wdg=Z3apPo=rEjyv6Wi)N;lRpTpMPcXnrAx{Y$va}sFL*l_|rwR`#4*oIKFr0QS7
z3*wgYm{3Mna0k&bnI{g1-+J~GxUGi(uC~V><td@Ap_4&!*O?2kTm!I5x%{RtG}*$3
zIBUz@Q%?KS+{yf-t}d_B*zuIN{nfb*2Nx5wF3_;;(N+lvBx$yk8NfZr$wy5`a|H?3
z-NtuH!t;ve473PZ#*m%9ZN{Ay=IoO<4P)l`!wUI6_E+&h@-(7RG_Ygq=%{#)7!}10
z)ziRIZFhhP7p$x6L007xY<Ds3VB3wpEuTT9>Q2QqKbrXsikpU^0u5l#ScYYN7xQGx
z6sEr&e0-lieZ(-&;!S7b%moovT#&vba||EmJ@JmdJ!1rV5SY-N(?<7)aCz^Cr>;c9
z>kHc9>FZ^1i5cqcPZb(On^fGyIR>W$gxQ7(vYNhP+czkf4Q9<%_>Mv#AD0#QlWlt+
zDEZZyLN+rR0U;qUO@yzB2o9*dTQT6c3)regAzU0YzRHh>-069u>;aZwu)0Z)u*8q1
zvbtoS!k6GQ4$9D~=D%D&U?Q5c7n6K>8NPQv>6(Uy&LT09uulO2t-L`Z(2DBy3HHmc
z#!SKcZ_lIn&L15(#F{CQ`Y#1{;tjkb{5{R+pk!_-EED2R*lK?AVJnV{k}!k}ib8_K
zxoK)Zt*C_YOh)*tC~Vutl#3W#T>RBln*#Qn!6=1pWP+~vrVj4%lzPhrkhfS`G67ii
z&{SkpasuYrF==oJz)e8$5HD&O*n^4P6L)ra1tU{3F@ZwR#Q8h-3;NS$c(@Ccgd@R1
zLpMZNF5tHSIR2Ip(%!uY3$W}WIoli`R4?yY8_=297%Hj*$k`5p-4%9_kizk>4WnYo
zcWwz%17zuA$jWL&8hjD&_ooszOquu7cePd%(5?^aca|&|s%{76Ax=l|Olt~66m{Zy
z=WQ#KS;!mAQ2eSqKcJEviBCxw&$%*EiImIjsuo$fb21IXIGRQdaXGZ!!{x|ff86Pe
zlJfSpndCYCIcR*C@TXtZ5o<GHO~2T15fV=&NYUNN4nY|KaRol)N?;?*cpFD=^w11j
z;dde!u-CvEAU{n~!KxY5#H9+&rQu&F=XcIYFbQ3f4Y1=}DXpp@_CpSv?XEm%$ii=_
zhG#JjaXvQ?MO=S@46ye*B0ox~;@Jofq5?h4;XWrujC?r;xPn?d2(H#Hj87O!cd;$v
zOjK7vZvQh?KwA|RZXLDJqo@Qd_{vc=k**FC&W^UuM91kTI%OCidq+O(7g{j_fQ+xA
z<6)13B4B^<!ne;ytfj5k%P2f8QX$L67`f!^YVkv^N?>EvIXzQn*=uOYVf|>U@z)G)
zPaV>g+QI?ZSJ2>^Cw*%~#dyF5OLw8OCKqe`^tp>95`JoTGm|^z2PXw|%Lyn0F%C<>
zKgdp_R`~_i4!U0IC?D*VUX;y4G4$1RL{U6o1kESQmb3O++@2xctV7M!EO$8epq^Wg
zC4gMi5VpIzftWkg_W((O<?UKfIzyNf@cg0`QQ{5eQaNeOhZJUsnkm_M{)XsJ!&yhE
zij6%gZFy;@S7b!xJb&QiQPPD&B@@(?5}DFf$1Am&wL-UCm|EFugsdwHA^)kgR7eDJ
zK%&kNXC2tgdT+yUM&F#vkC)d1nyqd`9&A_crm`0p1CfjrhkZANR4Qo^u~QYY(B4E3
zox2+_(uV4;8lGVk9u_rre4+)HW~jI`RG@N;eBY6u!vB-Md=?kK@8)|pWyQ*{;>eIL
zN$vWU!)8M?(z7&c8lq5Qy^ihIk(R-qpDJfPj;(O@L0{C`dTtT8_yi#xwVkhVpd~ZR
zw1C{f(QV^knbA#C1!L+<>FZ?=55VYzTPr~B@Pk?c{CiuAN4#Rs-jNdpwmC1|(pEx`
z_tW60x*}L$AK>_vXyj--@SEtg7NTCs2&&xR>vhAq7-AkA6qBVr8L}ny%Y;TTu9l6?
zHmn-bKvbLXi-9Z@i$3o&#}yQ#%)uW6mM;gf9g#Q<%c#FIOmSAXT)4Ap)~g}6#51-c
zvW;mKCW$iX0~aORDu#0bqHu_B+%F!QpNOD!;ODp$I5(lGbFT~r_15pTQKJvr_DZ@`
zkA#d~9ujb5(?Th@v$jQs1@Q-7>Vz>&4+}GMpql-HLce37V!AC-g)}r{ER!UmI1E%J
z(jCfveFKAt31G+6#fZsFH?|zWVo%u}ZAheILB$T+fR|zh!(50MZ<d4^3Qo@UE>6~o
zC_JqH$(;4IRiHbYIgDh0)#ds@smSg4qzQekGtV+6O<HT&=-V^DXuVq-m^_N!0Er^s
zp^P%2l|D1xeVmz-Maq3mWZOR}l1h;I!`QeW?buwb?BjwIwOyFPk8P?+Rw}dF%Eeb1
zxuV+l5Qwx$2+exMufpESQ*v4L@Cn12?g{|(<T5s<(UlSM;4#A-=XOYJU1xzQXH%fz
z%bPwk8gltv#t2ZbpNE30XjUR>`JQ&yB)0{qMAqPxRsf#jTMQSVVo$smJF2%bs?roW
zqLcrKg$dPmc3;nhXl4w0Pa$y;1RhGgEN*aXh@=N*b1Pq{%n(TnovLwR2j*w1(|zO=
zfZN<%VZvw+m>5<<*_mlh(j#Ym$3;avpn$DC5aMz@9@g~voQidsyEZMSbwWsBxjaRl
z3^Vfk7M!jtggWRm!E#XbU+zX#fx7JRKIaPZa@MPW0TRBd>AE|YxsfcLuZZR~x8n*5
z1oK8Xx#Ucn>Gy}?fAD4!CpSfz3g@(WnrBNGe@8^4fQc~tGI4R{z?+{?vWS)Z1ndN8
zzh!&+`|prh^Pi;TcW1~@R~nO$t|I~-CQD#kbJP!GG>ZhH3VAjoU=k$8G~D`dDUqqZ
zP6<1~>6FPFij~#ag9(VV2N~B7gEPHN)e}Tn#sM|Ph>A_<wK>Xy=UzApoX_SH#!w2$
z#xf)X6B8Pw`Ab5g@4J4|WCVj15CYK0byZkm;K1%{5tSuq8TC~J=NMRDYEM23Y+=GN
zl4|DdxevzogF&9`OWp>%^08nCZ<qO_Jd*-zz^5A{m%e5Z<Za;Q`8^f}7ll5tWimID
zJ3;8}yvh{~*RH8i=ErTGtY!s!E_@Zw4=FplCCG*rmqzhMdhv~MN;iD5WCPoaeiamv
ze6ommTV4*elYq1@FqA7tn-Fz?e_+CLH=fkd#G&B1UQ}+R3E)<{R#yWvmuaUwEec0*
zU-v*R9EBV{0{h<-g`J-M2|iX)a^>k%74QIz@<m@S7S)8E=}9tnv*hHT$&DEU9z)As
z-Of!JCRJKxxc5~0jRkDj+vNpg#;IXyuzFUUmrc>SuE3zNJtM1BVPK!YR(#PAIlaPT
zmF{FtPNS+viA(!B|0C0>bqKH+z+O!X2n*h9*?nPcKzigfZDU0Cn52st%)j=O)y%-{
zkJt1zz>s;Z6O!v!x!jFCuzqV`_h_D?FIIbP$08O=QrGm4m0O?KySBdiM~9VpA2PF?
z)=RF8mN9U-e9H0;w7uo-s0Z)5t5^de;~_+qhcRftM+8VkG9Jt%<nJm5{O0AxaQpj*
zr&KJ6JFBCfMuk9Zq`AiYBo-JZ<!iY$8=GpO65<(*L^Ha&ng$1`q8z+%?V)Z#!>h6$
z#+9`n;6AQ~p6-lO@Xd|F?r5IC#i!%3#piVP0y~rRRY`Hi9U_;`1<gqmbX!zEN<{>v
z)yFNP826RE8Om^|(S4z$vXtS`(Feh_^%s~YzG$%SRX_aM>^cUeSsREkmw0ExF*7M1
znqy}!KB8SlyYQuoWw$V&lCy7}_M0kq%&KbG5ii_|vF9V;CsVZkngc{s5yvf$V5vfB
z)FX%eOa0dT;}<Mv71c@NxIAQsZJ4Tj)p?gzInz@`q@hJKmC@Y8lQ^JEf%tvQc&BW*
ze6!_Vgej7>x5AGmvZ6lj1>;O7a7s3(I-QFbHobF_z;$OgP{<gG+d4>}yCv@#su*2+
znUMFNs^#dm?q{9O+vRvFMfp@PEMBh^UZDNQi9KcISF&WU_1`qvsR}39Ub27WmXV2#
zI-d3*kfrkV@CE@29BM1-VZ2J12ylSxWoIlg;jOz%2##@Ryy`f8!}&oHgg*YF>sdow
zH#cx@#H~~A-BiVZB^}18`~sh4O0rX|j!v;Wb3;v*<*My{Nm<6Eq_b+6<;w#;4=pRI
zZkhfZ$6LkHC5&lTnM58BF|@hm=vQfbtL38H!n+w;V>9RxSp_1Y%1;WWa){_nvnqry
zbZ6*<6s&)<yPx1Vb$I9!R7%+FOvyO?3U_yLBDKu<;o85bmc^X9hU=AY*~Ji;rrGT~
zg8EeVmd~K5bHgRt4IM<wsl5eZdlYiv4R>YU%9;P{RDZ*dPd>#J=U7sW+U#crOtG9x
z?9Yqr<J5v;yk_exZse}O$9!H{xn0Z{h7=}PhyUS2L-@2;!hz%L3y{ywqA#|!rA!pH
ztUX;*GweTbXeF0&n=geZ6}lt3WRW`*XAi-<B_kpgY7PC_@kVpy69t0%Cv&W}Ldxyf
zFFtTajc9gp`vk-pK6~lhyeSOwdR7D2+iT--dr>MJB}l7Pmw@Vnnz4@2l<&IKCnxK5
zHipTB%NAX?+Q(Afmog<cYbilnn*J%B_Vkh{DpjA}nuQo*E;ic3&gbq!>8xV^r5~Az
zI}!ema{PE#^I)VR;evLOJM(@234Mar-_0>Y`Gt+^WmGtpLG_2NH~5POiy(<R0S7gX
zx;crb)Zgv{F!v7x*j&}F9{1<N@P6sY)m{efIhJmN!cQ3NN`Lh%RulBQ#pXY5r|+-G
zwidg0nhbBHMB7}^Q6<}*!8{)<+v_iNepnFpG&B*zMs&B*F}0M+Nzj1V#6?J3=ka6G
zGks3muBn<reFIAf@ZuvUkQvg6Sc>~5ywz&u-qqrc^l9LYr|zU(s0Z&L@I8@7#hMLr
zom#@Z(kTY&KK`#7Z^l*KRXyz2BE1?oorIq^pUWzP(UA9pNNIzL<wwD*tKLl$`t&5<
z64IHn$y7Wz^oHEb2Z~CVX8kV16o?dv@$A_4Q<aPg$9vN{^b~-QcV#Li;!ofm+p$wg
z8<2^n`idGIN6L+-naA<X1XWr7_|U#k{})?w^o&#hNeeB(o$<+1VE2QN{W1)=F7(Bs
zhMfDPCES>Z*MW+zrMD12`1(_g8#T-$#`U4|<Z9jE!*O=9Vly|F3yG&opSuJgTv4L9
zSlRM#9@@XLDx6kt7Mtxmv{$QBYL@pRYvv(pJ_*)CFPFqZ@f~gd&ISy0ns~WSZ<mlv
z^Vp-O^D|`^<z_TXx2E6LGz4e`GS9g-ZtUqx=h)2$y(T|Y(>>AGoKoOk@Vc}#kt^Ht
zDPR`(xZ*N+>xGjrtR(l+;7mr@Q;qBhe=g8=iNQwixa094d!=Ww=Gt&*0$^h*P5DHW
zs_fOS8*1N}wIb5y1TOL`l`Q0(B&MW1uk*XB8uIQ(YV=%5VZ*BW!&s3nAg#KX70tZO
zjvzFHF7nnU`CFb4khx7t3qVYoe`9Z0mj0sfAfdKJc&+)9>-keidHL>`ThlURxx@8(
zR*D+QGCZ<8*%)=wMi)aC>r|ztYDb+D#KQGRd$+EjjWFoU!f?!Hn7m0VRYyOMmsukb
zN7Qr)XTrGkA}lTSO+UTJd@W4%?k>x$Gvn)@H*SjpYR)ju)8^3L{kADLJCv^6&U0^z
zB%rubculD4UO>tk<vC53?qvKpf~b?5&x_z+0BviP<c_kd*vPC&%Ex%XYSfgqs@|c%
zavWloU%*Yg#KB}w@uI^B;sC`l!<Ow{<W*88DwH>eRSL4iDx`dZd|hskc79oC4r6(L
ze<<bqp>_>~#cT64jnwA-(6aQ=Ua)BGmcBNY34mv6l6t>X@{#%1%^oONP(madMl^)=
z9;&Vbn2s<tJyyyfw%zCaX1V}BVpzcGMQW?xO-r|i1zM)#94%1HejXd)o43sFj2_dE
z1!ZXhpJE$J^fTNB?N}R9^h*{-v^BYKIGgw5&ebf+zT2QguD)!BDQ(iBoen#n>o_!R
z$Q%}X22rxw9<TtGQ}^hh?r1kDBvmXh`jo2$mvs54<^i3uTB`&UOD8Q&mAQ+-6t|o?
z4R%2ySx`7q>RVRa)b`|+PT-;m@HDUn6U$Na+hoL?=^VKmHR)Gh&B3tvY-&9;{m=pH
z{rafB@~Z<TTTCCx9_$TPcZEz6B(>jwSFD1NnN!}Xt`17Sj|+kXM;buFvSM@~-_cH5
zJk9AZxP>H~pp`ehiU+dCh;+Lp`!SqT0r`s)usqAJJ+hUouCdUUMttsyh)wv<9P!u2
zgt4E6I^Ts{h6V$K!x;P|NE&aMxbeanw+6k|PFqG{3P0hK+)lss&=t#JLAMfu{ss!n
z!Ag@8dNP&`Z2p$RkvM6%ZlTY_!}e*b?0Dh`E|@luqanp(%lNyxIrNNR)t2itRx{nA
zeavx!L~EF0w~-$;LQ2njp+h?!UttM?1)XNF*<XH(6QQhDUBZ4;?&$A$n>?rnC(BtJ
zbj0E+-pE~jU<zaas@e_e0Pj!imZ)ewG`E@*lsEXrD18Eg(dqh`@S}qM%h!Ao&kE>P
z*fC1r`!?(LWr_<;^iM$3q8rLkP-(N?i_(LtHhx46Y9V;NGlwsbeV-TRz0MdC4C%~8
z)6K`Iq4=D!LNd>CJ`bgbXT|H({pTvsxLovnPSXB39xYo;jC88dJgL*ca8NC=Kihvx
zN!(U~wMeJefTY$5ckxvCx0Hu~$v;7<9AzF6T=(g^X~3H{3)<M?-)c?K83*>1MALd|
zj$p$V8Ove>wEnGG(28F^_+@B=MsTd+242K?-$D+Zwxa}#Wyqtn5AC1;xdTHh1eEu<
zwmKBQg=LL%etlzM+|(Wg$wS5dt5)RkwecHD_`R1R0`=C4=QQ(dzdL^<#SBk_FDd>l
z*8-i?>rzGW&Kg$z@W%W+Wjur&0$V~TP9XklV2#~IhPKrdBTm`a4j28e<m0K7;?thj
zCYetfk_l~Uo%kh6W-6z%i4z~)%cItDapHBqGVdLK11kUxLh8Xw`Un0xmqgg}=Wyq+
zMoin(<a2i7euu`%*h{J?s;z)`JZb=<g&Rpy(=0SqXbJd6oZ*o`3EIWDn~~}4JA$J=
zbs@=smd&_kRC#LKr*usa)!10r_Sb5sohXB_sSXc|ksk$hHzxWj7&EimDmczjU=kZ@
zh8}wmk>pqyD(Fy3D;;|~?u)HCyHMCfuJ$DWwtAk?cLOSHcz4n9dpCm;JbI?f7+kkz
z%;%GIk<>0=_nu^WquZ5+Ez3=}JJo(pof>8u7ZmsdMdrV%inj0aQ_cP;fb_<wg57&U
z0xX5luv#XhHhE%*$NUj8eK2h+s@Gcx&#|6=3+O?&Ti3afJPv=0eXU0{?Zfs4(Lhtb
z#-ZmAa+mhcusTj28Crrh)VwY9E7I=EzCS;iZX&?YF^n4KmQ>I~fkQ0+0uVq?Hci~t
z`~dvn<2%r)*v)p8RxeWfW6o0Ui}UPT;LLN($?_Pr8hxqfK8O%z<gOq&EEu6`PL_5T
z4&^(IKkldeoCCcKI)<7GpMZ53@Q9-D4GOvXhGyaXR3z|S<h@<Rm00*Dg^-3!4~qdM
zVuCv(Mv%8KgM8xmTmmo55hw*T$d<cj?(7Y-fE^Y$LJ+rrepP=$WRKfK$69m~xep>Y
zdAp9Zn@WrX`+W!D=wWUjH%)~2acK{e%IRjLmS}MeMpL9rvJHbuEjVwMoB7i7=VbXY
zOJrzYU%hM85V1JAFyha=kXXS?Krrx-_4qtfH{qN(F7r<9^O%f4c>ZK{1lAd5$3OKJ
zojhYnW3mIqoaY(0wRzv>vV<4c$VUqMs2ke3Jn6nsWUog+@A1-_RW`QEoqL9-(Y0R5
ze}V3yopTDaPXJ|hjh+)!e)lGz&ZnzI)u^RMWN9oi@XChF&9G=V2RmY6>TmNqI=^Yv
zXi`Y(uNd%UfnDuYdN}9jWiPk3BEBxK5^kxGj1K~o?<xf4H|L^uQ&3qA;$0B%!LfBH
zwnh{k99UYhvgVDXqx->nBUbc$f@C6t7bKN+xLp*7C<`ka41^FY6p%eHxQ*V}qvd7s
zgj*u5LofPB*$m8ixjBf+h#gTR-5ZRy4JUh>Fa9^-Q+^WOWFbe`dD}=>!F>cj2d__N
zhlLJD>ZBo{;Gbx@<H8=PbsN~!<xak|aDbvABI1s#Z=XKhX%HdRK!p*_qcWY^9ZwBM
z86`ow+P^`s{5yJCf%kBcTS6(m>hy?XUl~+1v|Ch*N)p9mT+smxR>dZQ<r=nmCOpLw
zt%U4pV|=jr-F8e$uI^0^8RUK{V!lzu?6kgP8Gmp|o~rpMHiqW!f(sEmKk1D4vIgfb
zQ#VJ9V_?3S{OTJ-c^_`FsfychTw36P(Y-WTu{3ET(F&a`M(?QgfA}8ngmUAtUT(F<
ztH(uSgyVCE2dJx$zG?L9bA!KLUeLb%_l81FdnX4@YIw9rb=S99$k#U2OvKI{k}+8{
z3#Q(b3ZXEc1;~G#t-sPpDw^qA?o+J3h&N7r#|6$hD<<Fb?28i!o5sk@499jnZx?2U
zG)!2X4PZ|YWckd&;)OM%=03=OaWqI7S&LICx(aY+&*@c?vBzVrmm=+cqV{5i5?k3?
zUX<&%glg*Y`V7QJT+=hCcgNEAC6ml|2-n<cmk$jlg-;~%Ti3MsZ`k5KFd>Bt|Fn$l
zu_ds()h@_5rYD1k|9v0Aqa+ATKx0>7gq#dJFI^MjLsh~HExmHR<ya@aXTT0GVQjLl
zjVJ-LxU+{5QTR6ivT6wM?vuFh{BGrC=tZhm!Z2EzNo!+pt)^r}8_9ddhwi+7FMgGe
z2ZNN=@oyds&w_}7{_=61jn7nRZeN3#xtFH2C&38gZ^(wIH=<$7;FP5Aj^VfXkS{V`
z>uh>o6MiY5_^WtS|GKHsb?m~?6|wJu@$(hD|1&c>F;j7G3JL&#{pbId#50Rl^R&&7
zNB-&XipZqVXta-X`MWS>9gfr<Pr>O`wM9gROq+~6`<Hh5`f1Ho*AHktL%HqbbNV3%
zd<hra=TaXJP?{DtO}7^u;EJITgkhV6(d;s$Jan$mD6mh<y`^@T7&7IqVW=?RBSRi0
zjh4I9r`c@+7QcL2=w1B>g3S8VX%}?;;jIMc>rE-)6e2tn+DXU3zac?x#T<x=4MBE~
zzE8_)2q^ZqDo=P=S1%oS*Xx+j)Wbjn6SK9aQ6LW%IuiOb?VNcypF8}r)v9=aZfm%c
zBiyr@$r(Ksv?6piD;3Z}WgTGGx7gx#R?AT-s)59$dB~W+{kqiw((3SF8l!ZFk};#}
z$LxYL7V>csT%tft74Hc5zrIOu9;yX<%>lfq=%}xP=`oG5VGW!zHr77I?bs3%TMN5I
zck9lQCtT?Z1CX2IzSJg(IT@^y8!(K~a$p(K+qu?p{1uisA6v%YJ@J`B$BO{A>R3JX
zE>~co^L8sw7J5f+24T!r+)lJSEMU0X7xqm&4YctCS*^;hdq}$U$jEg{R25^*hwa`)
zECv421b{BBy%LMYVcNT1F~~f!Q(Yn+Th^$v(8<xw#?ppufZ@V9zoxz#fS)|RGCwAe
z8GNA)C9pQ|s#snvj!)cL@(q*n50DP=kV!R`n$RI72yXfk!CilQ6d;zmx>>~Y*<KL=
z?0bNP>Ga`|)%8&J=v}Y{Q9+4w#Uc~d+WhF4B_XAcY8(_VUVIJ?I|feG8R|;gIG$wl
zg=h|GG5YJwXlWs}g&2quitzXZw|7<t)=B@O%C9Bn$xZ&v!c!06Y06PlD1#IcjqVO)
z?;Z0(wz4hCKU~KB66v>{((Kme9mo?l=Dr<TIjypS1ezY~5BX*emPQ!;v5bc7SkhBa
zO>F3kOvBG+X1{-KwUV{%DfrK+n|2;Cgd?CM52I?yx&tXf4tnZ49WZkoLh;bhWEx({
zS5sn5`Z8Bk-L|ikh_w$epymNQ5@Uu*V#fl4Yu8@EoCyfT{Iv&LC`m?5+V8uxHh?43
z&aJSd5<q;4($_BP?hw2hqP6!ex9ojJG<q{C*xQT_0q8(-*=Jy3Lcy*xatM{tRkX;)
zIGhZv3@Rgwpe7OsZjx_Q;J(EQENxx+Ee$ke8aa1cqNA<XjL#bJV)tj3Tm3)N(G@Sj
zTNf}8GTCubik<oAnrgh3Lt3{B^hRe@uyrNGzs`P<w-fU$h&;4CHfp;pI0Pf_;ds>Y
z7=SS7byQoev!8^R+*`F~Ri`~C1^cidpdHVz+78CNu5U4SiTvJA=Egn8$3nQp96=VZ
z-w%=EHk_7BciuFc)D6-MeO>keux;wrKlNPwd=GNnP@=u(mnA;h87Gh>R$SmvVi;qb
zdMJvqnyo8+V!Rc`Y}d+gHNiemVWf%GLaHUq9yhO*d&e$*iHS@;P+3d}+Vbn5t*jc^
zQ*l$f@nEQy?KVYT{tysnSgWLuB~c~%DH~>ZUP;cXpxV#0|50rMzl$b+|9ac^46}MA
zq@H%Vklxx)cg{)-K`zrm5+fYG@DU2|jJ$b$9NnQLJ4O>SI=@7QMDY<}%5`X4wrH!f
zvW*?c^JuvQOXyhT$s<BkV&W=;GHi%or{$?tf#T|*df+!dNwd)3xiIei7TO}`sTiiz
ze7$<BQh<MpGfZM83g=c*rRdvJBLF*zu9oOj2kVoOycG&!Br^X*V49?QiW!!=z?BM?
zZl*CRuDBo%YT_~M2&2xF2Yl;0=Xe&BhHLk*n~{u-U=f~25DGZ+;nNIdrRUt{Y!p_4
zjP_p<ymZH4vbB&w^jamSWM)9I;4|U--|4}Inot^I=+7zUzg6<Qey?AA1%RWQF`a{x
zv96u9k?wC_*TvYu(cIRC)>_~0Z&WpuA3d^Y!wW?aOJLX>NgwDP_@BV5{bPFe^izHT
zFV|9Pjh>sVL>3Kydt}jX(Cs2-Jmvks7L4!<#(f`2W-`iZHQq1`SM$>wlu`H=DjFT0
zJUb<T{`Zh3_n~uI8tWV{Z*T-D?*IO3nEYCpS8-aD|7+QwUrfz^5}5zPG9w2UT_<-t
zV_Gxk=qacGe&pbnU%37j+BZFfDD)fzRFXx3D%v;|ER3OEK$rJH93Pfi>O;2Yw0=#M
zlLQKWjx9z>DV5H(NQw#9CK2O`y5d5>M(9V?Q^ccQqfbRMv0X;Y?Sr>cg9pdKaA6I~
zMLCXiR6V%~?U|zLaal-7LUcm=)$&iq)h!Ea<DW3{oT3o{H!m<CJmK9L0C<60I}jI~
z1a7`zE3^OU@?IBD7yI7{QU6ZpKLJAjo12>^E1^Q@(ZO~vT~YeicYLgAA!)<Fg<+cE
z*+Toqgu$WqyF8^$s&Don>+uCoZ0Yn^Bp)Q*S=9sMS6x|R8ibh&=i5(`&Fqsxcs2dK
z+8v9B?0*#0DSAP<bo;J9==F_<@9Fi;_eQwnWPfnuyzBmpFAe~3Z4IUIYuJA^@Za8l
zYzP+<y8m92tKUK5k^LVrzvlA-0|3DN_n_agzi1pYYsC%80e%E;*=_^#K5)>!i9q>Z
zVIRy2MNE{t{@QEVNrU?EObIN6pDmer;^%t!iOKYjbc_mlDHsg?+e@CmeXxF9jehg3
zA#Fm%BHH3cCm{%o`EZ4izNh$f_J0J+3<Mp~VVkMv9MCEzUufCMWZjKVJ<K`N-Q-dq
zEL)#$O6j}rhLu>ulD~oe<iFN$bEFF*SSHwwF>Szn*k^x6ndj_YSX~ZST?^gPhxYp3
ztpaj+;I+xx4<qUXQ0#T|0(k~;4w9NDv>eo$ah&(sU?P;0Qim@wCO4ingtc8~Q5u)3
z^$$f%EU!3c;ciBqNdOlQBt01{xi!(tnimX?F(6c86prGj{vPv_zZ=Wz!PaUM6JRlT
zrc{5#7gm?W8&sPIz3mYH+_KAxe9_H`u3s2T0U0+5q)n14wekd0-Q4flUxQj5bhS-y
zZ@UQZ?#R4LLVxN4HYw|G6^uF3i(^MfJ5)sV6h0-Im5#UWNjRfX8Hyk%f*_(<?fCHE
zxKJA+V@gd(Rx~sl|NIZ-Nxm8(_(dcD04GHNfM4;+|6JbxwQ{<~PG&~NF0_VA*p`k5
z%(spoRKn<ziu3b=?tj@$=83yN?#W}c#x1#RE*o(X$zJD<cXvZ?wO-{uL0w2R>Zv*G
z_w;s;Ynon)m#!_IG<Uu;`?B1=xgoq`rENWAU+O=CnYbNkJ*+>*&t)X`W?Vl!!NEC4
z7BQu3UIaARwTg1&c%KT%wQ+WQQ-DGB6FRj&>Y$>LW}<&fuOVF2s{BC))%RzC1<n&r
z16kgWzHzg>0YB-B-Pc3G-C0SBS)lE7-OtM5#EVn=r~G<G2Ju6`BAWC%ZH>uwp%KSI
z`^U%O<_$gu4Rlr=L$mfBS|(blLNvxYQdlLUCf|zm+0iT}f$&Gs(&rTP;Y+acR|}vG
zYT~_mz}F4nW`r~AZ8<%(?pSJlbX~?_n|jy$_H1_uN^?GMPBf5b5*B|B6sIBTMtXP2
zt6RM-2lC1AytEroe|cDf-04RKm4f}UhAt~-%awG6r*g;A_3d;K+;XelQ+%x<%V)(Q
zq>I5lR9t=J9KT8dVe6;hrHRR6hdt1~#Z~CMluQK5lGB+K4Q)W8<DxNi;3`__yzl5K
zeJ}%Klq$g#Lv9#N^#jz{`EvSb0llWZ1H<ojpr>L!R8Hk=V4RYpEAyPvxB_4S>G$5~
zEIF7;i|brB=6a0tfwp>Aj^T?w;%VZ`4pnK#Av0cT;-hP#*N}wlICDLf1&C8BirKq{
z=Mk~)6=_|`2{2cwqQ3((I@hw^Cy;sxjtmZGKk@tJVIq7a(E31MBm;-{KQs1=k_o#F
z!IqrVJKFHT+|Q-XoFj}8N2HazPq*Wu=>6N~pPJ7ia$QTmXP<RXf$8Semx;=4l^G1(
z=uvZxQ!4_&!#5OXU=AMlo5`@D&(QWbIbI;9o00HL5GF>gV0%G5R7q3FJtXvtNka7m
zGoqZAmz>1hmzU?~m;o)*Gn-MzM90hkLI@5}l?nbm+5j#ABAn<~T}_lHsc)7q;XDYh
z%bhwGX1NK`1ShWa?S-{2tazd4Ny1A&<U4G2o+2qe>3!RtQXZ5!`>kdj_JO`v=+cE7
z=WervI}~Xpa#xF%D}m5KdhB%KnbJsvlWo|pVBMV^N9;y3%#ykrimRX99+0bhgjvF;
z%tL(?x5L>T;`ocas$t$$Zl13=?1FOQ&ZZvltAtTG{&*hskEL>e`%jLAo25!@doXKx
za3J_F&U4t|l`DN{fPpdSQd142N*!oH3s}JwMQ8PIZqEtJ#f(*1V2K;mu?Xrcm5kmE
zx>-h*N;oK1{GbGtc0CVzDwZ)wJB2GhxTMyMXPV5IKk6R~Lnwlts-B+(rQ{?0zG@t#
zmmt)_`7`9i(Sjz;x{UCOg=pHXE`{hkqu^jd?KvH?UUsOy0BGa>iFFA=Zo4%~w|sVw
z@bJ*9nvH2%rZ3-@p_9SA>UL;sy?R~@_R}3`Pnha3WoeKW$2v-Hv-WFcm(#em`=?Bw
z;lP9JV2uA56a?|`O~EFSMg6Rdnk5H7^tC}brKLg?fbH>LkbUs5$ty%F)A2@>r*X$5
z)~T=(uq`nIv7Py+HudOc&mk?71@FF+(la`qP8&oJzIW-V@zejRhygg$ID*6<&TLCs
z$}|*#VbZM3xF3FO_WCITBL^rI(WL8K<JGy!9*+1Zp7<y0kM4aDO`uRy3DX%L=dMr6
zvNs&vixRXJviE&~_xZ;C_?TZo_~5tua=%CQ2%H#|9vC%&G24AWpsf>i!g)yU-n$%t
z=*Q8Q5f^78&yIZP?Ipfr{UX5I84C$P4-uCQkA>geoPvaaS$UyfjK2cGJmNwxv5@}}
zV_A?&Gs0DMdo(-F;lg<SC3!RA-KkV~=JWZvs!rlpB|B@ESax){879^y3968>NSfCQ
z?iN?t31(r%c1o3Z(}1Ib@imKJ5eQx>bDo9P8!=qED15*=jj{H=aw1hT)WT$2C&r?`
z$u@A^bnE8>pHX4_Gl^ME9nE;X3Qr){+hg|Zw+AE&2wDs69Mw563<z+SY{}s5|F`Oa
z1ZNjBn!3$T-7au#9A1OZA9zv>)#vrHI#;pYDRAh@Vt|G_w51?8xduQllT?d3mxNtq
zn|Dg%Da&0?jQDUvSNA!K^^kSBy0|B+T6g{%J1oD>$#j}qy|;l$igk>kiU=pDEhlsV
zGULv$)&NIqYB$=8Z5^j{%NXmHx=JdTu0buIdp*&g$xLd(YVy<7>=Q%(6NF+8KCdQ|
z7T{pI4Bnz_rvX!-iB^=gJl?%gvp<a8<hrZ^E5YQiXTT69?7^l^2Cwbbs;T`y8aKRA
z0$=jU4L9lFUA^N(q3@`YNtb?hJdWqVwo91v^q{|OiT95Cg)?@(z<hVYFmMx_gJLc`
zT?6Pzs%^eW>D*F4^kwGobIeuYoLaK1{MO6JgTy7vDwtEBwsPHkqLK(s9Os-G2zhVd
z2mV{aFAg|8-c(;Xy#NG~NHaT{N_c3T^*SQqNN#>}O+XX!d09e(p8_A9yF{XoOiroH
z?85MAWHC(ac+*hcf!KXf+mz{Xi+~OOUZZexvwB+1LDfK7iqj@kR)oIij;~*av}g%(
zV~{e~{a8HEBLR=YI<Nr%t?m|smnWW1Zy%pD|C&kC|8O^p%!x$$!Sv#X7nES@S=HKs
zpCoM+lOKa0lJ(9oR~MYr)KRaT>G|AZ`4x@}z2)MBhoLa+^-}V!eUk4gz~KUB(R=|%
zMZZxJKKWfMiu3c*R3L6c<<}k!ELq-8b4|X^0?a9+4eo&L0Cz1$;s%Vw3NUtr)><O&
zng}u7vdVDMpIRwO6qX4Wjut5xEt1c(L`Y-a!44nXH^bQm@Nlt8;MeL&^t0mNgZLgD
zi32o)abFzbCIVkvEHAM>>r467?IKdND)#?FH7FB@Ik4#_482c>iW4CFgF=*a<c6I$
zJ+#z%u&UazP{~$NO17X_T|!z|4_mH%-f@B7VWpoQd;#Ir#sD%x_^?YG?X`(uNht~%
zKshmHCIkYnpc*~w#H{sdGuW?s)<4h>+t3tNW5mFKf*|l{2$}fk9>@OpI_STYs;z-1
zZd=HN!Qzlw<miaGrS|?LeNH7xKJcIfWPArk+(HYZ%-*0?W%77Zc*CMOBpjWZU(2g>
z&6Rve6N+k~%~}5{Qiz=>r#Cn;Xkp&7?&kIG*KelGZOAza*rWj1?%4VC6^EaHlN^;l
zAFg!;49&g6gfz&+28huOZWyL?xt_~6xp&IrgE4)Mju-_x`;W}cfhaIL1fW!VHq$`D
z$O;vIjY?W&^G+_ZZ_NPZ!<PcW)j`qt@Y8SM?8h}`H23N4nvkoYkgLTbu(R)3Fm<^I
zm=CP5`Kbx>j`7b3ZxR1Ys3R2R02vdT7Z<@|)~c)Ft6FX$<&^w~rv;<COdGo^-VFT9
z<7aM2Ej^?`?W=CA^D9Z=-<}|Z=JHmX_~zu|-!M;hwrlm<nMbZXmJ*!FAtmqORhlM+
zEzwH9((_lZ<kt|6hCy-(<UpwUv$8#x0%OR}dA=a3Xgcb7GfT-YE6lobG%@LcSTqM;
zSij{AyJhXL%>`qf(X8p4z<+^LvT7$972_5RUR)~Z@@9-rEW{wWfVs}v7>AliP}2Uy
zuHzQxGKCbx<j_+E(e{1ZhU@FO@upX$tBwi&0iAPTi-MnXZT@3U<DV*Y9xB|=?POb)
zbcoW@LON9gBEIQy&0l#6EHM!;7=dZABuY^<{F|G*UQsjvvP`Z=`3-TGvJ7P<l+kg}
z(~R@Lz;^g+wQ|S!E$s-W(FvH_)pk8<yk&0gT{&c!#t(5ao>srGU|bC>4~E1FbUrT7
z$OBe@AT&f~v07$N7*x4yg4mtJ%XRPUu2j|OTF<)=hjyQw>$<FKDv1}VM0251=KT3(
z!rZ8VNL9<wk^m&AahPGip0ugNq;#^eOtNw-+>QeG%ZTGT*kcK(<26nDBzxhyyd52Q
z<vzDIPDK3Cu5v@E{PC`)MU@B7n?0z*FK#NU)VJh{25omApPK!NY9G%{oad~$B50|;
zXD80q>u~1??J}%VGtH4Y<Dq$CHytqHf962j%}^$6Wm0H7P^M#k!xZN@1DA!Vdi)3C
zpXF7NMa6VPA5irFbR^0z3E!C{#E=$F)CwOUg(W=~2j1#CHHB#Hj1@qbVNz#Cnn-z*
zb<NUWBHe=DMaN|S*vY?w`<^@J^Q{73zqiWNgkIGd3K{P^-NGu{dkvQ*mmCTPkSr55
z>%aEn?yUnvo6C?EiWVxSo{0*U0cDdq$wfH=YpeAw+3neh#hHllbflK%Rqi^hvIed<
zjcX(pXf)rI6(y|O{v9bz%|MF<r}q~$8UEu%t)>a5Wx<nrM*&{r!^R%SY<EILqhH6*
z0~KN5x`1H=6KhTv+*nGxJVdd!j3W&wMiwpfIo8}4lJipy+uDbiEiD(T4X%*T4($*h
z(f}gMrojxh&D3wp{rFKAGKs0R_4u1Jh8OO~@rGog{cEKSvq6WC*b+HPqx`HB-Uv%r
z-vkv=K&AD^gdhls1RHMp4h`!pm0kq4<m!O{z{=7tTxF*KeszD&)FTnR1gz)uiQILK
zqwfin592lfUG2Q+2L;f9G1%gp<??%cBM>Y%QMZLT+}lXYLI**SF0oO{HSk{1p3kdd
zdpBdtqCuhU!cJz>xx0DB0j#iQ{X9_Ds@kb}1zM(o`C{iTkm@d`@_jaTQ$#i7jY(zy
zy}(dQUd~POXPLN6)F+>VnP}i3uCg3|5;(F*ou8C~M4G6x_Oh?c6jBjXlNeXvZuad^
z6oE=U`qgU@3veq`#v!B;QN5_q2H-V$sk>;lOX*0hnx80LF5}Y{R!_e1-N?50M2X(9
z_@d}Eb~}%VhESm1959hJ1wY%WC^1^v2q(L$FtJH2KsvyQaib!aIpdJW#0Qc_5w2z<
z>OM*Vsw^B4&3i$%Eth!8@=>YrF2!a}O!cLcr^v$>rk~A}oG&zfN=mOMsq<gr0AZ6j
zEd!@}x>cZ2N6_N(h)9c}AzpegpGi*~6;m7WLSnG7ioPgg_M3x+eqU-B6KKT|rln#r
ziRC&&<Kk7L{_kICi<Tcg3shYtw0_Y)2L41D22Sfg*`}a;8x~HI|LZC7k*7(A98t15
zq#O8av;<8oiY%Nn%z-Ckk1pMuwrYB!j7*L_n2tX;?Qisa-eQ$b*=?G+%XK&hp8*gw
z`6&KlA%d%D=!$L1R(M5F1(@EE$A<_x>e;-f0_t<no+X=)f0yxq8C2`AiDI^u=nGtP
zkV1aZa50u+BnZA@#?hJ~X9+om`%^}$JyR(9Gm7zBfAtw%e`VoqEY4g?%j9c1oC*G9
zoNzZL!WQ3{3`m)%KiW{xM|rIj9uqYnj1MU4u#QVx)gFBFYD%^cjhAD=yJZBSk{&FJ
zqq_}DuM_wAPSra}F%+htbj^NHRAvVezq|N#eWI@Rl%mMWF?c0*@pDVk0f6}?gV7}I
zU`Ced0WiQ5pcq$q+<^J*pDx<Y1@omp88qK~X?1*@lLgwTI;8H#TbHX04&!bp^c?B0
zL;bnTCqany7u&lxA@o%S&7^8#?5c?XK44zuC=Ah)<8Gfc1P6zrgrULAKsHr62<Pc)
z2hqEZQ4Oc61#li`5|kqyTMs76Ger(Mkw!PKa?tjGI}am?+OF(%%L|H%&JZ!4tQZB*
zv_yr8yisiRGIYqSXgZ=w4f`s^p~F>8FYUi9aBGTj7e2(lPn0-Pmja0eGH!v}msdrA
z6wlFXLVbnloqhBybb{qcof1d44O*h#EPuI+@nbBAmL(D3;AmXsS5fN?t}0bD*zQ%l
zZk=TXmV_0iiOEAk5XfW8w6%lXp#r(1{@dqk+&687koD(6Jv$Od;mit==52S-D4iBR
zVw0VLS&cvQO&S*UE`h0|Ux6oGKt*c!3=^JgZ`C)tB`b+!Cn8)_QZJRg9`-$lz_B<H
zN&!v>Kjv<fx-wJsiF{vW`qH3zHbz+d-i!#e$U(e(7s&ijC|`?qHv&R5Cyk_1Pgaw(
zEsUHTsKLr8i~x9h&^&@Ln*TP!HvTqLo*rI#jQ{B9T);&n*wh>X>@8v7j!9hLudvbi
z54nMAgAYLl5|XuIk&2V*0b@sW%4k#;ojH54_C7r|Dm?N%9S!Pe9|=JPQkU{6aaufJ
z6lxK&G3qWQL$-26pqfy`>3}Vy`I|{v<RVaK`d<XoA&JBZip{6~D!#fPjTEY~Sxu2e
zJ?S{0g6Ml8gTL5a{&_z_0A3nwRvSE&DZ0i3^57Is?cxVs|D@fv>u|D2JC~&{Ld+Ml
zqhR0Vp^uEuSF3tH)t-OJWz~7qL6xsE8QP#fM+Wu2r_fElCHT(p-NDGl1WeC4lXm%C
zP2ZYjQ>psoYFe31t2`^y=*az1@vS@vS{%<tTpf^G>zL7sjRd4E!A6w`2^<oiysxam
zT2A^bAo>OwJY`?;MYCCwof=?})EoXOa&6ULJ=(&PgZ2ITY#tJCjWhF)7hVs(C$(yr
z^TYpOIJlxI-RC-e!ii$Q97=?kD&K(>4H^EG@4fZ^o+jLiV-~0F7_S6Aqw{8i(0+J>
zyS$C%Xu$>ue!%X)=z2=hXNF{D--ul<)O_lx^%WH(0g2FxXWuuWbXUaj(w_mPV^=g1
z{@x7`eF{of!8q*X&666T7W|`zEFaneC9=r9Rub&z`_OCdpKGm7(a0s6zdd6bvZTA1
zY5vx2s;>LIcGU9btH`$c;+DSNU_?sEB#AuY`>!Z31i*g;aM{bHXOn-!fAGIS-2cBY
zpjmWu7hFF*Opxh4a(sJIL^+NI<T+f$YE>9JA25YvR?-NWLdTr{SZT+HSC#!b)gt!p
zGr|JOpm%2&q$t2L@{apvq&l_-Dqk84zD6)cmg}M@)R_b~f+|+TOot5i443@<kDmGI
zTLdWcAL+COEH4j#0gH!AWtydr1xm$=6r5#$nr7&mONad@>r&#e2)5JYg-huFGZ39O
zyefb4TgX6>_5U&t6LTvkV~1aMD3+z;y6AtfL$O@<1z7FBEY=z7xK_i0DD>cmZRQsX
z<M(P=K*LW3#0_3rTwicGyZ*S4UWHl=qi<dPHqV=wbNJ<;sI$TPvo!7Qh3I9G3${W}
zv&N_6TZ=s1guw^Xruw#YTzihwEL}CgOr8udLrI+uUY0UnzLLo7k3=Uf=Y=UIM^0za
zJC_+(nvdVAnFk>)MZ=@%Q!+d1@+%G4o+my;b8>%IT;`{gPNt=rXh|_-h6gj=#ZB5x
zkGG6jd|}M^8#p&f8vB9RSK`c9tP5ATnKG)EduMlG!KWGsn6<w}EjlJ#I;xNQbh@t^
zLyMl3XnXm6kqF;8@Mg@#rdq)DAX_K)@*Ms7yN-kH%i%3wobMG3?`LL1>x8-!p778$
zAmN_FS#%PbaIw`nNLljCszakIKgBI_o`N|OHLr9$-2<Rl*=%ThO{V!Sx6O<qv%2xl
zqlCUHbCw~_K+x-#AGkSnxr3*f$?aZ3xUmy??%!Ex^W!;oh~AuRWhlyo>S(i^pN{=3
z2Y%XXr+)|c6w_MaCiYk=Vl@sgIPXQ@LObtU{9N>7&ODrYWzhPC=?z;&vvcTMt`d7j
z3;w<JT4)h9+EYKMfUqjzaWO$?6$|0O?HI;sDz|)ZRITo1?i(f3Z#4xlk_xa``nM!`
zdmd+p9Bt7=*rAg{uicXCecl)0cfCczV|55{Y45k8<__?Iy}O&I2e*sRIto}TgV((T
zF8cCWwH7SCcJZjuIhda%-y1B|pPCV(G{0u{-PW8Q{<>Qt%lg*J$uWzLdJbN*RmKa%
z1^>)}h4Mqaac^lqjS(jhPjeS+FZ0-tPd}OUfs9Vot*p=h@;H%Lcb0i8fi}F^go>Xg
zYVi6+RuAesVw4yQ{(i$pkr&4u4<oQ_{NZ$#NHa%jsytBG`#mqFtw&UAl?3E~FOa3(
zLB{?OZ0gU#AGx+5iqjnwus0^9c2h`V1zGy|?0RrMIr}lt=eg)MYPr3a_ZJzSxTWb6
zGFIZJVq<Ak4d5oHhvBhr7u%sw70V>_3^0Md+87z|Wi-++D<Rxm#x<_Ssy~84sDLsH
zi|*;$UG7GIAp3yJ$bud4{-SRDOw>j_=PtNib}-1oDnw?;B<Y_bLG(Y9Vt3Shd1BJV
zg+g8|kDu^rUiiKkM!>-#RNz;1V{=iWQZyvQDDhF-v{1)3FpbZ-$hzUysW8DHf*w-Z
z;@5BT!<Dm^ndH|)i||@)iS0SCKJL~nEXFEEvJe51l_E%jAup_TF!lfoK*NjG^wQ|l
zjydjvz`*#u%oZh!ya6$;i4`jJ@y8SKc64SAvqNc4gTeZLZN~r&CmW<KNut;sxA=(A
zs7O1^oinT%HvZ6+iCQD+h$G*wUTX#x>*JAoSdL1NWT?#pw|;H^xPS#oI>Y_&+y|T^
z|1m9mNO_6C00bdsv(uJC!c0OUzTIn%KD6^Nhm`(3sDB8Ze$*1QvUM!%<Ipn`M=gz`
z8UY9NB$9#k53VK<ptphVV8`L_C;-+0!2$mO2!OX#Vj3V)1tK~mq<8(4AlmpNs=T;p
zlz1m_@QS<wn81Nxhnv9VtPISH8492yaY58_K4LZ~O^gp^&s^9?;eZ@Rp<DYBQOk(~
z2_C2|hcQ91|ISI;0tq9VA=bQkG!ha9Owe|Cm?pe`$%q|?)kp2AFDu{z1BG->6sG6I
zQBdm)48e*XcS_<y-%(7<AGV7+J+WO+HVvy2GZj9*CI^A0t2VD^lRR54wl>;%o>(m%
zJ<`G+!MtG*&|oWc-{lZxC|+eB<a(VT?A{XK%#moy1P>x-@ECH7x5{U8ZclrFpD&+S
z?tZdY#GQSWt<x0U%;09K?V8EW;XZo0Yt=og1sgt@E?pACVN#}S5p{d=%f#<oi->|g
zN)dq`BP4djq?T5jR;n%`{1TP!us7-F21;W&griR1?e-tD0!{!VEFm!*DIu>98uY8y
z=cc*w>y%63Yp1j0KSFwS@FAt!&5vzx=#o5rkPoNzpxGr!inyx$PWS(a^N@iKOc@#o
z645!qKyZo>S14*lRZ=ghyQLGlA9^3UEmQ{JtHOK3N0D`wmUc@W%R7uE=s)}`Eldrf
zLHG*bq(JWf6jW<`5U-9I;ur3W*4jvp?@nmTKc$Cl5HJ6GH21e&HBhIS=k86fVnR9$
zzjPpRt~iV`Viw0PkSp<GY%{#q2MO9eWVfYRg7U@P2e^`TErW-`w;R#EX+Z~(Z3=io
zk-+wXEgz>FKB-}9=v3GR$q}#~tsSQ~Ym)E{<W5<c%d%6+j+UNb`+q2V$0*CXC0#ge
z+s;bcwpnT0wr$(CDs8(Gm04+2+P0l}_uGB@^m$Kr-~PTe#u|IAA3I`4thr-F#C+y6
z&89Mb-NH5Vz~!6&micomV1F|5bGv)FgEv`>$pAnY8IeddQoL~xZR*CB#sho?dhX~f
zGzk05+uHqwRqXgOldNO%)m<kcme?@_v<5yxoAHXli~`RNsoAu4q3ar+D;_AASvQ!@
z9E55^)`T4Cz(L$S2NGloNWK)bK*Y=OkVVt##G0zhwc^oKv8i}}`|QIp-HefyRHuA;
zyWRW#RM8SwXh=ydiV$56qDAT(Q(~nJ!EXAsT(~93K257p3R(@;C+aTYm*@>59Z>29
zq&6-S;2b^JOmj`NGTlTMnL`n-6tnV>wwh5HY3M;Ph!#zOn~Ndfx-81vh<yd8untUE
z%@;`nj8D*QJ&scAVK0$lxYMEAtw$dT1Y3<3<|3v#X~%#STXA$#CDlKkMYSx>4Es$F
zKV`GNGPy-|U-J-oMTm&P`m&>5uR)in4S69A)()Q$sv!`F2NDQtg-ye~sZ_LbNPNP{
z+_eQ2c)F%_NE-KBjLI7Dj|x?xk9(un6E?IuKjoRrBvV7}cL8@>M|?x)i|fz#(+-tH
z@m}#?x^XvD==LilIo{t?qv~i!Xs}g1I${ab^kLiXu8=g%vL$VaqWKUie^MqaZFj~+
z#W3#^OzGbG=*x!0;+|Wr{Vce<TqFkB!!BUdX&FMhB;Aq6+|q@4P}KyP0{v3<N>B|d
zlhJIRnCsLZjB!9BtZ#IQ3&5yHh76BE;zeJxv+}0<?v%VS9JP}%8A8j>ORe2->a+}N
zl5~qdC^qF-DXq$4PmnV1BWPYdZcc(#G9^h27jl#V$CwE9bDEvSJpQ1KN?qUYGS)(=
z-byK>nkY0)<a&?m77}Wxf2tpWH=_F-Y1)P9Wc)O1DDL=4HPeZMmN8e`wXcX81imYZ
zIh&;>(b##qKJz%$2L*rEFR}3(d?XNJyjE2$gUjpkZ#z<L+o7SykR+u$V*`1i;0!f6
z&{EZ1!JlsvS>d+NHzuj_`_j-f@F_)9q#OQerDbVwY9R2_ER)aFb7oqMH=Sl3+@6TP
z=Sx}4zh@*j;QBmf(DzC7eqK~trW)^v)rOj-&yl9tTrI7r0{FUM*rma4VkBB`?;{Qr
zlIDgV5P|D^Y7}kle~6-%b78<yAR>|--6r5gF?!G`KF2J62JLgfk^oq{i6)IBOc22h
zR|0tprC1oe8Y~2s`3}OUTb(Ecg_n^L6?OFYAO^yQX+h0@cV`a?+M~D!%(cK_{7@~d
zD08oyG_~s_r~&Vz<b#nxpOIh2dvTd$ZNiW~9wP3_@PtYVTEYpiGF(>Rz)`yi=gkPi
z+P1QvwTCT|(vcD9@gi+%*C8AngV^*$`p_)PxsaUEr=l(b=C_dz@r)O)qVQm8=`Jtz
zO1yFP69s=5p(Xt?&NafO(q}S))Ob!t0P)`<z6PaR_usCQ+@8J`m|i>)RQR0P^ZlwO
zHt1)!h0|e~5KpXBCE2nLP(U@FcZZ*c?0_-eKgYl&7YLm!0NY!Jx_!u8H2%&+M$5L@
z(UXfD%4Zc1kNUYj3u4+Y(x8L6GRIzstp5{+VQYJLAKc=2K7-t8Ww$q5i$H5^lZG{)
zvRJh@#HIfTG%FpeFOwK7wtqSoO;B?5`U4yfl5N^=zS%-8B)_k5n)Ega*f^^n%75*=
zt&_XAY{)CoQhLs_hP$+W`j(u?C(&a7_|@|Qn`r^R0E?$Zz}BqkI|f>lvas{}^T<q;
zy`NCtHR#w}mdZb)Z2qcn(oxK&`T!I>6;S_kWnfXNdS<uEi1x{sH&~Q~hIAUBC%$Z{
z#9bgtR{>)wv8*aV?56gkqyA6iT#fe=7p5?ZaHs3L;Ga*sBhGNv$BGjJBa(m@BNvvi
z2B*qcl?))($6<a~$;HG1uCN2+pe$Ol0QF~NkX@!z?l;8WzfUCMQHkCIJ>C%WNf=Tr
zj99yI^aT50dkF9fkB)ZzuE<seHkSs-fX-)$7D+KkxfH?s2NrBhQpsFp*|5b~<a$C^
z9?;5eK^|U%Q4X~XzTNtOm`7oVAaqfTVjmtV<qg2ur5HMXovY!B#Ykeo|LSFu!5;J}
zieV$eZ*j}{QS5BU)hDRL;Ulma7-hq(qENv6N97Z4uQDvlzU$+9sJ6}wm;o`o3awRf
z<4Aiimh1&|!4BHwtk!!==*3l`x(y0kyl<Ej>5Ti`5Zd2Ge`U6#j9UFWPuVT{WNKZp
z(=iy&3&^UlZa~%K)H0^NF1c4YnYcg7a>{$KA8ZTk(<(}h+4>!P+;A~r29ic6XxNzp
ziqrj%8*vB4?$L)*vCFv<WBBH#3C^t#OAr)l7jT(hw!W@yT?O!l#W3G{=SQyQl`yn9
z`b#h<^!UHSjGH05#PBgWGV$w)LV%i>n|wBtR94#CXhP8c?ATo<J5{7l-4K)|vnm(R
z`o0<LN4jfvNY#+mdeKoiSF+_B-))nl&e(x)KF)NiiQnv^3)>-|#ba8>yIeMTt(y;A
zCd+!nu>Rv@_(i?dPP5hC(dsy+Z-j5!Kh(GPDdsx;_2?-sxjbz*E&v95xw7-O37X&Y
zFa=v<rU~46fr>p(fpu2ek3Fx-5c{+9h6<uXVNCpA1k;!*ZUC6`Z-%9=#D|Crh>}*7
z#K@>N=L`s1DpCC}R9PaRB9W~d?16^da_`;R!K&iaj8G#aanbZSS`5>=Go`pKsZYn8
z11;4*rFGW=f3JmI;xpqoV1R%|Y5o%^2S9H9JG95r$q`Tp<5<?s3I0_Gqmxo8_y(z~
zEP$0#BJYeBj*w<cV_Jq_cz1^%*}I#?6WByQMB(_(O048&=H_~r)p@OIB4yLzrLUOW
zE|hE(UxISC%X*ucbK_>CHs0M8X;m(}#*DWMc<F#!UHYJ}#4H5S6+e#niNt3Ta^`ur
zSCe;fd?b9gHhno8I=IL+#h|x!tFGg*8}kuZ4GHXRYgMDMudTB`jMI?49P8-$Yb8aG
zFu&hqZCmZ$>mojNT~UDYoMww!mrX;en_6^!s_^2>?R4@zNZM+e_QE56f;9$PJnpg^
zMweEm_gO6Z<th0JBuH&JLtXcBV5?X6m}XJj7rVVDe)p8NA8td$f$g)l2KZ$t=j;-}
zlZF3#1j6eO<o%_^pR+AeahDULA&bOpj`7kF`s-&Ze7UP^<G6=<7EeJq{9{9Zr*wYy
zt2sY)><+uTy-KOKp_2?yewI;E?$d{FhbjVzBI_rzlS#uP3grs05t+ULz=F>2?JNaN
zhW^sws7?)@FIofzU}ec~ADbyUa|dBaN-)1|QT@GotYbgS<zpx<EHD}2dVK`71VeQG
zjMfTPtaodUS&>Qzm*fq#mUT$5IGfE)R_Zn_XTq25OE+oO5O~MwrhYpK<8R6B_i8Z`
zM|z#*C1YMygyPs2<Y%oStU~W#V5D1H4Ny)5eE~}E5F@*PS!+ety3WR*hi@-IZ+t{}
zy$!+Z|I#j@znR|B%OewP7z)r1y0Q~->hB9#OCU@!s623M(c<#@Q|g;yZ_nN>Q@%H)
zJ8!WqE=%8gb{k?(d4AWH344ac9L&+Lipt@OA>O-I>o+GhvhFW#bXzr%>?}50tdZp5
zguE0N5<CNw%LPrZi{=Dj^a*23n#-{A{+p24Dxy-fA(ZnP-HwO>WSb}E?atV$gFUw{
z;{dlGbPUN4SMspnyShXBH<8PI0{kfgoWQ$0PgoIPRvdUvc$5m@QC29Ov@=`APRP!)
z*aW9i9zP!h0qY1+pve9c6GJW<BZ-(0f<WnQ_)14C9e!4#!VOMi4!w4^xX9+U>+4k!
z1H~@*Eq>LP0VWzpAnw!IuKAmQCSLa05HRBb_9etzk(ZlaS$)7v-p&|cIo~@XC2j-k
z<*$_Q8n|+9XAql-w(oxL*f3f&yI(~;k+F4WPvb2^-&BP=goYWU-T8`Or_CVHDwO3~
ze%bwMSBrc5#zb#m6YrTzTE}ZIhQKM`{9<q0x4as5*lf*TwlSMrxGhf=whn?&KyPz(
zfQ7B9vt?m7Z4)_B@J$S7_)&gc<fkDmqDhRwfK&p<pViSk%0V-Ujj4r&KcF@KEbiF{
zUH!lcD0t$yv3*Ku`M>nf)U^qg(+y~84sqMX5N4zY8lSj9+RgxoctMxu$($--aLbMw
z2{(KqsRn2-L*c{p0Yyc2u`vXw^jt1d$ZNbUc`>r)`jZHPjSC965@-jEQYg=&)M@G+
z;xZ_UC!V3pe%5{BSQKHU{p9D9re%2~=F7!#O$+c2>Ywnr1|)M}&9k2niPqR&_?+6+
z>=;vLW77;_ezN!a$ljKT=}^l87*(pq-?G?o5<nkf9J^gpBV0$*^y61}?|ll2TeMKN
zQ_<_YZ8I@K#)n=;m#C%Si|ka_>x~b?)@hfPASO6Gt%~W2&nq<w9>chSEw&QWh(Xc|
zJP5?B<3{f*ST1>G#TaxZAs9-;2HkPp_GCrZ)l=wfJcP|mzeTJQt=jN%E-#@3alXFW
z4o}lU342NnMGvaXMc4<6#G{^u${g?S;R2_jbfVR6W)>M>02xx+ItK0zkm+?~YQQvN
zobiqg=JL^)RJ2Ns3>uXj!F-Yz(cl*M?)3te{o*pGs(W|^mxs{L7}Wv$PpBJ%Q_St5
z1U~GO8Hn{?Op6BDhFdG<0mq}D=ZCDKtvKlv=9S>s!La3!Yw{)El`KluPvxcJ$ZPX<
zs4k>{?k;O>bM8}7QMg$450*Gs?bhA1b0ctY^G$qXLPpw6(C8F+AF0<MAx%Y$#j;JX
z<j^`x6h-4biD@o8vq0z0Mpv!{e|zwf@0j@M5Mi-9zSRq^c%7&$sN)gNiz#nSb*3m$
zNLS%QHlJ&x98$jVOa`ehC=sm~sj6;8MK!KydItpV@=^>N<z{2I<|P}&B|dNwLR_!(
zj2SH(U8CukyZWHuSiYg!PKe^g0MYSI&(=cW5pz&0L6#vBIhQvL!sWv9DEv{P4aP&i
zy+?y`-$wGwu|dkbHj-|NyhP6F(AOordS&%)3t8`zwCSzm;4KwyqTi;-%$d{6f}b{}
z3v7Z;cE56^^>A9&M36}v3&x}-My6Fd<@X8^!<FVXtaR5eVk;=DGwo{eneEUe>K3B@
zkm15;C+r9#brkOw#I>xPq|}y{gLImxelLOg>y<4e-zRFFmr4Q(q-g!`TXsic$UCbR
zM&8*KW2&(uTjbn}6r~J6XuT~mQ3pvS&ZY&LqZ9T?FC~<^XI}vYy(DDsO1(Sp4;#}@
z3Iz?>D{JtPZNiMGA-7ZKI1M&y-aq_$i&8&5+5B&00^-{uJp8{UC_A$MsqbiVIIO(!
z&|yDBomi#GunHR*ewAd|`b@&Q*#i2)2Cd#A$@gyr9}!QBaaQ6IjKz6z(SbX9%)}w2
zC8cZ}Vl+H+b_&_qB4!i;{yMWQ)cQX9bCF0tU~LdwuPLGLB^eZnG_1xM)o*?1<N|D@
zE%tr3k-tHW6g0d|b~v#25yq$DlTdH1uG(Gv$2iSZQg^BeeS8<3x0EAZ4(ASlc2rV6
zPPS<xa;lJGxy|B-=M{vaj3p1dDVR*b<xE}$D|MZZS&t>AGOgG(?lD`oDHMA)j*y3|
zosf(+h&Q_rV@Ml<@WrZ*e(ZMinJw1<Ma30mz^T^nTjO*~8bZ2jMTDbvSi~CBe0$+{
zVv01>xtTrxKaEDjYQjA9Kl)apD1Zd^D+9VH3RL#b`a%%jF`|i13S&2L4U$(=@SBml
ze1&taV6V1WnO|wuEQ-f6v3v$cYpY<lw5i(f-&vo0NteG%pHwk?cs<qhIPgHy_6%{@
z(Ub8mM&x^Z;^tL8|CkMGFMw+;xKBM_DVViyVYQQ7ZzfA@6$j#Yu9Y1i=d&N}xbC^*
zBEgfXkD59N2OMm6QjEd<g?jkJQ;COMY<m4yKD_~m;qMMRsbuuw&oA$6#UL^svIStl
z&jhIm5+W?1RuV+97xX{ha)sjE%q2kYEFo5-gr^TGchIQ$>@(6BkqS`BTu`MRUKM`Z
zaZv>I-L45VXoELyEJy-@TRwU^QaL)(ff281_SIc+`<+|QA@?!=*a1)X;KHFNE*XMj
zAoAPqWZWRHb$5b+7jpkoXYJ>vBAl5ao5GyTn!U~_U-few{=F{6v*oh1QH^-^uuaK!
zb3!W2v_ToW!R2*N4<_Mg8vtv*;2InsQl?9VXnY|akIsB1z7JFOMpisq|G^YfDRA@t
zFMCJtYyZyV0!$kj0I>i6u^!`ZX{JSPlAOZ7QJkLF{pkmfCB`6$9Pkh>zS)&9>XxJC
zfkjQTUCFH>+x2(<!KNXE#2|O)mS?_ctJW;39jXPw3)mq~Z}8y*Uk5ls4Q_l_ivXC1
zS*XzF6+GzIA{vZK9`jlzt4!`YSt{Npf_;HEoD>L!Q%CjDxMk*(dMC>$)EH_gIHknw
zN@h+p22T7|7(1MX*sH{k@9Y!GVe2K;EF3h^(9`rW8Y9!85KecTCfJy?`eLN_ohY`i
zHb`cb`3OrWV|cOSWoAU15W;BKRNpgcc@+02X5CtfxuJ*4ZtK0c{sa_$(6m(HOYQ1L
zT)B!}6l9GITy#nCtYbni?PTy4TSt#vCZV5TFd|~rKeG5E5056oN$IPFtd<GC`6m<e
zgeLq*X@l7%cu7J5H78u2A~Htv-I#Fydp^Gg2FmFOaIy!Wf(0P<{|?o+HMF#&H_laA
zv&#mov<R!i`d->dv)r-~y-*e@3g`IU2lbm;X{(l@Mvf{|d%cDw>X)}UrcI0^VEJma
zJH7c-PNtCW0LZmvKgsFP`46d>N=K2ftFe|!p0-x5;Nrfks@q#rK4UZO<c=tdL`*aO
z&}k|Kjb@uIU4m7=a(hng?}mJg6gepQb~9ckaC@d3?T;F4`Z8Bc{uf_lxLGB%ajWjD
ziES<_uc?#X_;VLYPuI^HE$#v^Z{fwK(X@0}cwCfyLj)1|jI@s_-uco)II*=QjtP%&
zM(8zD^|Z-zpzb@=#9u#fVtp`F9@2}Y&pN+;S%^ns)~QjHHA2g0z!c5|qdNr}thSA<
z<Kd!aYd~ajb@ByI`98gNw!tEMbM}1;<HAIKoji>W03a+DQ@V0I6M8QyJp)})7%W6o
z9fVlogA=M3!1NCKI0PfEO}?io^{GaSM@<%E<$l>!A!6OOLP8%LRaI3xK6wU+r3^j1
zxutzt0OOR9#GoD?eY3@Ems}s~qS?d1hV><p7yr{Mp#@9Ypq?_6r<r_nxso3vo)+A5
z1*hK1f9b!{XIE1wglBESKK*kWM5TsTcc{A)ze1WxEGSpWT})%x19ZrJPpzJ<J-{ek
zY+fmXJl0~cyQfOJFoH6-QGgWzC)B0mwf3r(OD=2BpVL7Tae~SN6P|Vvu%Bs092|2F
zT!iHsh<3wBT5x8ER=wU!z)HZ$HnF8Vv4iFxyBV?<mqU(HtugIg>WPd|q$JM0=hdoH
z1l}vcou}hf&8((WbQAOKQDj@v?cRaq^cRF?Qg^Y?YLwUH1uB^$AJkTgM~1>59wNA3
zt%}o8eZjQjiwnPtjD4Ak6y1zCSkG017SKX;ddpXDcPS4X7GUq3U-p9;2JHn@4nPR5
zx7=idmT3_dPqUp$enp<yNl9FkW!I<XL7v#zJFx7Ds&oa!x!4#+s~`^!Aoh>HrA-H|
zOOuhPW$#3GL?`Sv4R(d{zoR7(xujEZKb>E2VP}^KiNCs7#X^$E+ekqZ=?~J1(Ouj|
zA_yrLM4~tU4rFtd)fVI+`Q_|YLp^`^GVZZ^tS(Y$oT+ZRqq6zi!l<Bn-YV-6=8uyQ
zGsY094G2b`IMjuS?#7Y?Cnc_SozXc(*%~Nw^9V=D*U$5<0JoctyFWc>7F}24`hWk2
z$`rPd<^X1&9XbD-P-yFH4yd@byzS>1k-oe?(99y{>`bW<iAAw1s_9f*E*8?#fy9XA
zmLpiHq`0}UeB3Ul+;YsbLH3B5EVXU&r=OlTUhv9Y7PA^7xmxg?2gU1Fwa!W&WY0Ec
zxi1c%U+VdcBJ<lybDg;Oj9JeS^il${jsHwUj=O!Nda^Gbex3~2aBS9O<{h=4*sbwB
z4C(zOEtbjl4bszyjSf`mKDMC9-J3<dXy@`8IsGADuBQ;wwwil*x2b_kX!&t8dTv(f
za;TM@gmnKQ-GoO|@f_I33i55)(7Q=taV~vs>U=&nT+jsw$FlXLUS#0m`Gy95Nh~ck
ziR2EH?;4SJ4^D0y)U_uO?_SP4NuUasuo05?&Uh+-Fe1<aECTe3SICwt@Fe%dO*Yif
zo7bnt^=<F*TC54vw2*=JV)EW+B9>-@_r!|Bl1anlge!J#SqYlF0j{;%m&Qnanm1J6
zH_2L?$ed_qE(Cm-b)#{si-sS*!U0>}(Q&NAWGtQAk#$jXbp=?9zfxAvcb3b}%{v~l
z;3Dy{oky+Fxv>J`N_RP~;(8N?)z}P&kh(M%j?M~BHRJ3Pat@M$WgG5P)^!bXL<&9C
z)QDP`RxM;W2bXf-r9X>`4g7%kEvy;YcAM~o8HZzZqqu?|7DlR2I*cT9S7yBNH?V*K
z&?_Owt&}+{f$9%9PBTFd|Gq>;yCZ8Mcp_H6bco-@=9xGFP~dD4(Fjbh!A1Hjl-}fl
zH?!?BALl`(;#!NcErQ)aR?wr*Br~eRtF<?Vwgs@l$f&$Rr7W|v+n)Y#tn~r)Bk=Nb
z1PGDU9GqTdu7aejGv%44+10|6cI(?&k*-U_*+m_kG1*Xgd3zo@Eed()8H_F8TnjS9
zt`bNa(+3irxPIq_q`1{`T)*&5z62=fC(_xtxBBTxnO#`@BAjhTzqF7(EmdA8FC|e!
zB}?&Ou2vhf(0QFiI>NTvS#_(OH0)dLD5qZwrby_Tq)P+Cx?lH@j17Zh2EPH5@hxKL
zU$s%-zvh!@CJE?DnyRuOQ=`9g?MMcv#r;)DCOcWo{EKwnE0`kUaX`^~{S;}?&GP=U
z1$t;Ta3G7;VFd(+bD7Ed{DZl_{0w-F+$sq&OeihX*YA31rBt+#7MMmN8zqAJ4Q=XI
z0^%1}df;T=fT^RxqQ<fC0Ue6)Gh8>g)1oLWjS?$pekko}-)1bcIVI8AREYK{yK7yB
z496Ow?fiv1#2jnaa4uaxf0{E)D>2Q$oCVNA5{j5vK;htu-6XxlB0aga9i4;WWv7uH
zV#W5!q3$>8&l&^TRyqLY)(G06>(Rvf9A&CK9Q}{dnvp+a*hbTe*Tz^F38hnCZ)dAf
zfn0`YSwiZHi6uxo*fEH;UFXDh0mu<6(GThVfqRYz_j^iNB~%_P;Ohl&gjNU^(g9)x
z8AMOUXfxSQ0Ko-C9A4sd87);y2eT}v=jzfrOaWa8f-4m@`?#_sBE5OzFJzBBD-h`8
z=FmmQ-gTr<h&8Bu8f!nSiDRI5D%*Dl<GKcJxafy^f+`|IF4yL71j@_C=+XVZX&(i}
zp;#ZE8n(Z$FJ9U&CY;AU&=V{<pv}-;6=QfTom5hlO!84(U4_pgZMi$>$*bhRdGXay
zMg6oTA@x@NxkxE;&_y}VcJl7desD&HafAy_F03;l8WN5R3hgS0Z-Vnp4iwc9?-P0;
zMF)lp<N>-k?BmoTn4f6nsvP$B@oXOH80Q*);viJ1=!!je!ZJ#vbF@pBE;l@8NMSdc
z>MF88ywQi@(+ARnKt74jSyLnen9K;Xh?%hRoOvmT1N9Cae#uTVqpT;dnK>wI#xEDa
zR6l0qeg_{!c?m}})dDT70kQ*b6YN0qFR*62%@4$yJ_4<A5vvMiwJOmQOl2zX3?|bm
z5xjTi{+-tGw#kVgf~j)w&!Ox1p@erJFy{G&Uu`i-6ir2y%k7w7IGhxSJVtTs59=@S
zxo(^`ZQ=P<Gl~Ji=r|hQx}o3Ve-~kh#O;nz)?oDAQ7ZoWaT~fGi2>E3o&ce<$k7)s
zq_Sf&c*l-nsuJ3Gt}Qyn-zm~S`s#r`Mq)~oGE34e%9KG3GH<^<-(%#}a2tOrf$#UT
z8sxe&J(xbSYum?A+$gd3rWG>Z#p$&>|JFHi0i9tw-NfG{+tr5WdDa;hrjV9MKYZxH
z+FRB$rZ2ctvyK{kp6T-kt7yRoPK(m9_)w^UeC-(n{LuZ!I;bo?tujYPB9DyUk1k+Q
zCSJ2$EO%oepS_)PRHR53>-IQ8Mh~VjgISLDWgjWMq)iBXBLS%(?QAGJJCX`}qGQCB
z<uFcHIkgiMjZF9*_`XDrvI<ODuhCk(vUVp~Lkxsjng~vzw{e4Y+qgA&`P6pVv>ZpK
zz|-#aqRS5I6vZmj<1+S|Z*|)=8W!!!G34z#R-$(1*K9TI(Urpop)gb;M(u2{T=Dw(
z%{S;EeXPond=k0CrU)!M@U;1m#cyv5nW%(CWP)ljEF)t94r4kj51q=kAJ@{U(oz=-
z>k)pxCcOg-xhA-(L*Snsg!NYgF|d$uLP3R*seosE+xZL)d{j#r)8CD8*oW-&|LjZQ
z(gSe}f_yI&1g0&_jD^Pfq1N^*Jf%hlF&Vv2By!)!If+)=_|-=Ksq`7lLJyd%jB%8{
z5iVI>8h(wUXBnsG`$ek>)k7oKxw*wrJ8-wGZie`^a_2To&EwvYf-q9oHAcVFHYh3>
za4{&nor22;)3Ul&y>JO|ttWV}u3k3{qlq#wvX+ne_8^t`ECIt(u#E~$&`H>>5cw3`
zybylddNgbro);xAlOLN;=p~WM0P;@iwRn;;0T(tsT|hcRKwB=5o+EI3CfL)n4;l%x
zPsR&2di^RaG+GO*!}?_+d5}XSXXJZ;CBqVW`M?e11C*x#z1(~2w?bi3A*_%PXd&i!
zOJX0?iVv264UGiv-n!o|{$)Z^%q{Y;rk@V$u-jeT24S)m{A<Yy0-eB6mL@`pQ(r5l
zLk_G#@dB^8gwkIssQc+)>Dix#dIKp^i3+H#l3#=LedKSL>S9IvBO*Sp2D`8nIkB~)
zW~5Tg*)TAhV{p7!Mq>fUD8Qq)7K*UbaJ=WkSx<E@aD?SDkVR7?G)NFpB@YqOVYcKF
zg;3dN5Fs<epkxp$qq=(1bdfd}mBI<bMaFN%c4lBLs|-ZrgqFY6tSe$EqYe+(HF9Jo
zBU@&H5+$;#zdf${){mEh8%U|_Tf^Kof2+*uCd4>6<$fvQ<K1-m-NuSl<@>d|n<2s8
zZRa>B)8hgZ67~xwh>ls>MYkU*w~+%fUqc(*NO;Xi;)<7ecl2`qcIPw{1k8<PeW5Im
zpMyzq$&U_~R#`hb3m-8i-vd1po1`b1T?~RSEqahs+O?wsa;m63y0{k)GnQXcTG0kf
z;L}&s|4O12Jn7i7Y3sD5Au}AY0|7F!8QLDDn@imJt||u@AEnGX!O86G+RCgEnk=YV
zd@JiFE&GNX0bZ64*T=7;-*eXbk#r1scc9+GitJDqPr7g=81k3D$@A>Ioq0~j7ab-l
zAEV`Aw)KQNo0rjj+)fsQ4iN?%q2WF98Xw1puES^T-lZBtxz{RLb^B2#d*lR_P+~Lf
z2A%q~n0;KJgE+UF5v}M>$$Ii8r60A*Z5_PpbB_0={L&#e>a;ktSD1eiquO42&PQBU
zaf2JLg_v^z%0)uRo30e&!B|K{-)8n1Dj_vPUV*+!W*Oorqn}tCG4k%YsDFbGrJLq^
z6Fs5U)UeJCiF0lF4_RJ5d)Oou-P3#?ag>01_AgmgUgzX}lJk!06}_i;uzpD=qJpm(
ziS>dW%z=0~*GilY4fMK*7Vn^YqUwkQ4OXY9Ke=`3pk{U<%?Oq$&eDS%Cm-6#au|@4
zRaJ<5vhERhTZ7reryPMf!S!eyd`ArsXX-a1Tb3t0|B}lGVthn{0Rf!=C>j1sTjQ@>
z-l9fr!!DZ<t=BGZaL%ridTwmGQ+ADB!$C_$n3+hotG1Dn2_A<WIe~to*?p^lsc<07
zc8;V4B>&3i=qm&Bdj_p#1e9al^+Aa228)xt<PkWYICSb1k7k}-FrF#TD;A3XiM>-+
z@7ehcNKJgC9Tq}EeXRW1+|LYyHRsyMo(zzCn1|+0KYXScGE~C43}v`LuX32eVC+Ck
z?>`+2boW~GI6`|XgO7^(^JFw}XNfml1o{p<zSsuak-hcZ)T@PfzlNLpp?7tsnO=CE
z;aC$U4KTeW*DU!WKO3dX`tQ#$q;FdI%x;C`{47u0Dz%@cvi6kjwWyV2)vJp6IU-}H
z&GIU}Gl<2Nbjo}o$TKF#G!6G_@Hz~It@yRb2h*vylVE*xqpqxW8xYX5reb`tg!qK#
zdjrho1hm?>+vn2QBDma{;!{tQOK18qP{Ux3Mp(Q_y3eF8m>?1PMvZc{JQ_1_6KLZW
zwd&a)HBMqz%yH{|Yf}FVl-V-B4Y{qWxHAAfh#Rg9<V)X}fH;Ze9Cdn6(d=aDGjbDH
zF_ab_-Id(UvNc4<d4GI|cL5@T$cDhg|Lk>(i^GM|A1LXoemm4nRDkUUa;&qnW;j`P
z#oKUuF(?S+oe9OjS)y&gyTx7_rBxuUd1ebZ2VtZA&Kxrx>TM^_3nfEGZ$;wgl%>%y
z^Y%pj%PZ@bu4j&zz_E-;M*47Ijk%!6Z81Gg5R6<aqEU;$`b9)v<QS=3lSOVpes1x{
z>VERp{0wA?Yw2E|&q8`U#O-YTT4nx<eB0gGMxcIR{=vRsPf4o8B0{}6`=Q==5tWUl
zvSY2bTC&CEWLX)H8U4a#Ps&Te1`B3uFn^Fll7Q_o&&?^M0}K6p_NT2UN&;h0v{C_e
z?~qBG13yiuut02*HwyVqXQUyR{DovEX~NhkpZ_C(CyRwOt}oU(2@*zojilX72E>8X
zALiQSUVe5&|JSgpsrFp`DCIrz_=++L+UmMusctt2iRA6A*szeh=PBe%?CZpyT56yG
zPSkd`6@?BxkI<nZIh68OA>B5gc;Oe;8}z*@T<qdt|Mx`d9V%V%nI!ic=L_>J*75lW
zGmhBoH#M;=Q<Kyen%h|ZN)HK(19Kg(d51RE2Id~GoUdW8W<Gq9DKu(X(d?MpG>h?r
zZ)BN><0dWS`*DNcTO(m02mka<M~%vLPAwOq3x5$u<%9Lwgd28XWenBX<MwFIwg1c>
zOcj1{#HY)rOQwvx-2)w5ba!cd{ZtZMl<8GG!>jDQnT_-?KyyEHr5(;T!iAv0@v&~q
z2y99f!VtsZW#(b-j@B@t2neKL(qNK9&}7b{jYRYp96J6CvMMp7R%cT@w8E(MCUMJB
zOl!U5`!HUG$upW5)E&~RVx#dm`%O@t%-H!w^Vq}to2u|7D$&8$MT!5Td{x~K!V~8!
z+^Ve03bl1HP(iSlPX%M`Lct4eNvzwpV+6<NPm_weLiWd0;w??c2ak)?J<Qwku9Bk!
znl5o?J$X;}T5!hei2%hbIGhZcn;+=kLsvpOVeSQ>K^PB!yI}llqtM>W%*Ni(gx<I`
zUg)o`Z0Pi4-Jh9gFQNs?$XO371-+PU&p8+Ki;+fccXQmB=Pg@g8Z;Crox^LhL+>lG
z)qT?hvM@g(N}88AHm<rFY;=)`W|X89b~3Ts9NwJ=tUWCIpH3Y$qwY2Qfe26A842wP
zD#b3HhH+665f%5%j?G8?k~KiB#+s$l*;11iKD{tYYj@^ieiF5dTgF{Mj2(gci*(nd
zipT8)TM-Nf+bf|tKe@Ts$A?u}=yzZt;y5`;k#I#JpApkkS77GywuaoNYZ~LGnXJF!
zZ+B6%dM1@}%2<4I%f_=-$g!SY%-wcGbp8H?^kks-XN^0+F)V<#_5bJ~i_%yo7!W2j
zaBJr%L7Wy0TpFR^(E3m!H$<i@Ye5t_zcjvH?daH5Sr#~BofI@m49=WZiTJ5E-?mKy
zNGy)1jOW?RR-HfG^_VHZ(<9<0iQfF-Q8cbr-&AF~D(IvY?V-*ue1>fvFa(@t^R+(A
ztM%jYl_QMw{{AeDvXye-033O_fQ~QPzdlPxS5sG0dgC5-Pe4-%K$Q1_Ru(~B(;4X~
zrI-QMEP`sBy=o^6Vq)4dFS?(#=Yns%m21x9IHd>ry|U&mZp-_#=ZJK~cv(DT)lLvP
z%LjjF=jED(-!m{?{Hh#tZx&3Ie=An4T=^*@CVrY8UJ%wyPngncg7c?(TTWPvV(S&2
zno0^!2>q$k7j?r8t%*v7lP-^*k}SzBDg$No2IC+1RdU-fjdbrF-dY)vwMXIT1@vWT
zIZBi5H+Ufow_qq94ml-(JdOx-zvLjg>=GU6*yk6P5T-p6mUyhpa8c!wpMP$<9n92J
zm`)oGT$kHfv_OKt^E1X^ycfCyL5=*T6#uapuz;>u=|2b(>effqg%v}Jy6c=a$tnFK
zzEGnAQouI_uUq)n0{KMmEXs7q2n!w=zeVt$+-N;#(70WP$7H8^&?Zf+VIrzRVHKn`
zqd5(@*V1|b>kQ+);&glfa*fi9)^HT3X+=6hK#)7s?HUgmM%#8ey~FT&=+Bda-P82~
z0-voy`O_}<<Mn-Un@Y|Z(E9E!!W3$!Pt%Bah4}L4<K27GytplYyv(H2Npk^Xu$-oy
zf`TQmAiel#h%9c#nZ2SwhTGfXBFP2GBt<l4?oo9$*+8a2!He~C{4^7>y=DZ1w76f=
z4!bN8Y`O0}g`z0=EGsrDC%aZ{u8ml}U~C1t5ioB#&;+_Jkiha&{5!KAKI|ieVG>1a
z+P1AK-NdRSYfe4B$;^+3&?l{LgJ9j@GaqTN0awjH%+`x*!h`Du2;#`@{ktzszk8<V
zH;<xDc8lyUHO8A)n0_b8KlQ!ecbeUi9J{549zWQ{O)Kv{2&zPJ0AR1~ZiZ8H1)FPP
zc_HIvmxo<0HSC9-*7M&>H>z4Lu3Ij?sk^T_HLis1O?>Ke=-%w7nO`@RKW_u|iT3%Y
zA3)ZHzNa=o={B0>n=KRzhEYx4Di=3)+lW_ExsZgSM|5K+rXzVme?Nv;fm=MUz?`>L
z-n@0tWF!8bESzlK`K*2TY+QbFnFp_*3}0uiTz1L?A7MRlZUxp6h`-Bquy(jmw<1(S
z4s)s--N$<pMg&41F8qtQkHG}bYWGt&O~<_QGkV=l7FGRrvh|)<4h2Hdsltwk!)b9E
zyM8sd(8&PhI_yW<x|u068*OaiN81+9Gm2D(?o+If8EqVsBwdc^jP)5#Th+L(W&g^>
z56lXRyhfgM$~2$24Sf}sBKDb{*^?C)C&%8$C6>InYZKO=n8~v<3u6V;)-G@}l&b`)
z2tP8k6PQ;7P0^U+@{sl^9IEubr+5v_gsCHgC8y2{*;>~8QDVg$ghpzph9sIU^YZzY
zcSwww7(Nv8BPs=T*0b3a4;{FJ2}YPREg7m_jNgz**@EHRsHMpWmw7nb76w6X)b2~6
zCx7V_8rUjz!J;z>2ayj1NwYs^X@25~BG?sO_mAfy*jRS?2T8gmIXOI{4rd%X53_MY
z`MGpyXN74oj<NrpRg$g>4zuf#Wo-?bq%5b5(GMPGMe3OZVUd7uPXyAekz2tzBVJeb
zE5#%upd6)alZW2ZZgiEAnteuMjflDp;5b#~#<Ikm4S(oT*mDN34{A>vUFr{LY5?;R
zTHfS(@jC|kiXUaAILv%<>L!cQLO&<jyW63Y<++6Yay7jt=9ZZ~y&BnK6Q_&w1YcHF
zM{P>yHFkJ495ba17r^br^gQ>I*es~i<Ikm^Apagc&DpL5a{zC_1nB=UdRp|T>D#X|
zBKh0#hpOsP3eOkd&JA1h$gNJ4S<Y`(v-Spv89}}Y4=o_}HOYYlf=RQ<NlN+jzHMB6
zu84wWQUg=%y^mgv>gT^a<;UOe>2t<Ikfp<PwvNe?Yf=SltirTY4oBJdYg8_F9bC*_
zc7~vM6y0@OW43{7P{ggL7b{T*8T!@jl8j`))=RmFB+0EN-8gM(XtJ6HZBDAZSIR0g
zPG+AkuTSxGXCN>fHeyx_n+!-b!STy;wY2;WbThdsPcaFmm&)W^8jI{Ya7;Tf(U@P$
zZc40Na>o>+HPB3WiAz)l)lu@mWiJ}dNKM9utx{6iJ07X=y{?+HRM+fKB+5`hi<4Ii
zD3P#=R7)eHQ@NGiU0~X~G+uYg7SXfFZTPNtKR5EjVtF+%F&V28qeNvpiS@EP9eXd1
z31Y<EnFDk3p%3<TXKhHG-*=h<^)s=jyZInlL7qRFT`MYBvZz=R&ICyti<|9s&w9z&
zxYH(!8<D8V!Dig6yQgs*K7Ag-i)R-oC7}|MUnrS+(FgchtNkXDz;vj%es;CT4Vg9a
z**&h_5pC0T{t}RmQ<|pWA850o{rHCT%D^T_4YSrdq9sj(&ul7W{<~v=;ObmXsBmS9
zzdliWasCkG)|%!^uuWlf%H}J3!qg^RZ*g%~tIm-1tj_M5<FFpycR>_GlG3Q@AaoWQ
zUH|d>mT$tx!_kNkZEBxg46P#WmYN*TVlZl!YB?2t%p3CA&w4ClONK(x=G9D08W&Z0
zc6@fZ?Hy?&($zbNuG@LBopk&NRr*8$nO`Vezg=h_ot+>Ew66&we30G;o^xR_yM`|K
zc+M1S^39xE)AQER4?oq1GiZkn&hw{<R&`ZGI!O)w1WRDJQhoi?O#5F{e2khZ`4xa%
zfE`ee0w{C;J^}3HL2vw9{VZ-y5baYg-)LA8rgKEs74;{%5LIU)I&J$V3)KaH1x}lX
zE_&JgDJRns))sU57*Wb?Z7OHN{gP$Xb($Q(xhH_j<aMMVczl}+f1_BWSisv9M_K9B
zcLt|ME!Eg6FK(JG0ppL~DdXHeu6XX)i|8`#p&r({9((kLgyUcZeyv*j=Gp*J3I0Z5
zc=$d4%crmV$2D**VLDiY$w@t%s}Z6LwCCiJO>{*VkppiVT>4K(Bs0!v({Wwhry=mV
z+7Vn+4O8ojCE8hZ{RKLIdCgx|?1KE&AWkQkaQ!a{S@y?Gfl}q=S8z@)UmEisM&x;N
zONB=kQSr{)PbfGgG(cGGn<TNBwQL?J<!-`cv$sJmX_#%2ULLZ$v0wv<r8U0uI42vV
zC^wuhC=In`Yzs78#IFc+Cl6zW+SU<U#S$>PCRS6M@^DtYIWfogD<=+N%vFClWn2Oh
zsH{j;CzDtaI#=VhhS-117_z$P3N+_%uc^c-Mv$CTVAkI}69~<4ozYqD@VZhx^)fSd
z&<dT9;J<*ODoDye^B!}l84dWiVqn?HWYwabjx0q%H|&d|{Z=*>4>$XoL=u`$MKYvA
z3q~j+{}NIt+$#~A%oK`UVS#({;KYl41LgLe%l{3BdR(aH;wv*P=OoJg>*c8U6PEi-
z0R7i$i8|KGH-l0|&l(e`wi+@+Cg*-Dsl|*O_+yLrl-|p11{dV*Ku<rJZq`(5wAFBT
zc8;vUnMlvq!{@vbVjAmUa1>wZis_(m4yCyi9&7>~qd>MN?0v%TNLdv{r$`-I8nk`R
z6P?#Dpr4N!;0>H$jfA1|?kFI=XFl{jT)CO$sP{7HSi8@Djrq!zHD>I6-X$nJG*v&!
z?%f8Zq0@}T3}^r@N~~vl!4WcUMp>%4fY>fc+Z8ybV+g1Ki#O);x%|vF0=|Rh1mCQO
z^sMqB((LS7gbr0YdRl^hdEx2nGL&^eoa~$@C-78plVqHh+tY<3zp>k#1o_BR!OKAH
zd*UbIGXN(c3Z-Mv0J2P>HRU)*3L)t9x)eeKMq4kdtnihE+NSsP09SLidhxS3Bf_FI
z9S3QslC72=VjcbF%ZqiYDfa7Ayr4!{w-RxBz@0dn)md0^Tezq_?PzPs5P`pYr&)F7
z!ZDugdLMKE%AanKjEo<CUNo&B4YM=@9cj50fh$NC2tJuVV#tA963FK!*-q!TC0dxD
z_Sz~Z(gB+%Kacmjm1I8lM<5qLz7*l%rt;^Jo1U-|?<A&&LCLT(Wu|tH&;X}n16mXK
zLynwg1w2>wGkPLAe}iJ?biLMd@wVeGV-amarytLs+X^(0sM_{5FfpJ<0${S|o<MPp
zP{?)GdQ{H2o$Adz9g;)Drm}AnSj|^1&B9k-Gvl8&tgyGLNgx{-^T3SbVJ!R{vXjF_
zv9__!&GUN+W0<sI8FQo>>sQXr$XRGA9-Ej5qTGh1^HPLFL1kHU_Fe<4w|wC#-2JxI
z$6G%=+x((e+S3u;yvf|@WM6#FwhAe1j9J>D0$kRuV6=n+visQN&X<L&**e!F5{1C{
zcnXY2h|Fih&uWC)gAz@SaFG-(n^S|~PWw>mZ;^#Mwy9@+AxwqZ49TpiJJ$@p?ai?V
zNVo}CcE7yDsjbOdbqy}^44Q6i=WjSEOhae|D+MSDApn_(0J`szhiXO)2gev+3>m>>
zHMUu76HlwUo}M2R(t6!Ev4REN=a+cwU+*J58^b>(V|yr}jZZ^><B*Wo<<pRFL7-8g
ziA45qRmN<qU@Z2#uWLDhEg?RYK0K`CU-XF+NFEJl1k+UF%rWGR&FZXK4=ZU9ZAw7B
zgLcw5Cyi1gr(yDht*}Lj3lbe=L=~%ei8<rBVXzN*6pAUqJ`iHBFDFlgOD4(LR0kY8
z;Qk<aH97n<fa<TXJ}^zQvGG?p2UMf~U0Ap9O^~w&VFGLsJyi#FX}T-|>vdk)yae+z
zSolWKBk{K=XqF8}^3TU*cdb?!l0&r3c=NEr!7hUpY=f4(-adnl5gNfzEB@=!>KnzY
zCQ}w!i4pU|$%?%lq@fKv$@108dtO~WLoWTv>u2_Sy2+n!1wxf8o827a{?0C?jXdFx
zP+f`ewB<NTs-l+6W&R4I5Jznt2h390)=V{w8g5xvlNDboh@v>MMz<z5;{FHv_*5Kn
z3t?iU`tTGsn$kErA`cn#UO%1`WhAaHJdx8y==dSX#MXh(@T%SK|EYEHmoLr{)f6=V
zUDr|oi|l_Xcb#1fU7YESf2!NY&j})Zu75^BkH8*q`ow;pm7#Z6WpK&?_K~`8taO3J
zm62JcD+>F%JU>rsRoq@cOzL-Y-!#w5EPcdB4wwc-b|ehT*KxbK+@(9?Gpk!~ntco8
z35lJ?T;t|mO~XQ<uM_xFL5CBbIlUuB!nNmgdM9S?rA5LzM~;$27R56^GUAA(Fjq4#
zrszDZbRxA|114C@>oG#WYoJioTOdeM1PxoH^5PsW(F>)hA)|z6&)=vpG`nIkHuYA_
zyMA$;UHLq|S5Wz(kwi+1#CxCX1>6qFBgVhg=ZLqaMqvcE$0<!-P`-kNX+(dssuuem
zYugi|-)gGaSCB}%LJRvnIbE9lX=JNi7S~p+d6!7CLzK|MT-8Rl&zi+n1FXrD+0CFd
zL`_$k9sn|7aSA8ZJ~+C?Ym`8|h+^53Y=ln#Ek^4z3RsIn-$b@LWxcFdlbnC3g{IgH
zTQWSg!GdK3MPiW|?v|+qB^-@m+J%#I<n26;YLN*EcTSF_OHtlS!S{Xu%dRjynB1+6
z&Y>4c88JKfnwf%!9ZOLTPK?Q*1WWgxmRY`)2rU9Bi;QyVZmX=J+#pR-ed4YfRXiA^
zRG^7R9=UCQ3fAod{qfY3Ct@+vsjxor8Q!FaiMXk&Tfg~WeTV=778pGM2_h~cdM7)$
zF4K)|Ax08P2|FY|^gSq9V+5&@Gr3*6P|S#%3mrn=qHL;TiX?+5r4#&XnK0>UFK%M}
zhOT*nS-?`l{pmQ}82uXBCd*>LV4vCjegwb%Zh-5o!*`N4^xX&EL<gzUbdq|;LK|8E
zvGDP059PpGi_V@J9{*=cc}B?62HuSj0__ShemJL@wZQFM#nGZy2*!gDF^;@!JU<M_
zP%Ek*?!xNakw#{$TXTXmDsG$h9nBTj&Je88+=p38q3G`zj8D1n;|1aVp_KG&G~0?+
zldCBRb9G2B*PT!c&FASWv;K7BDPMT=3kG>ZGMD=?On$n3(w0jCT81yL=OnV`W{})J
zv~ml+*I&f`UEgIh7->3n;mn=W)Tde#v=Ad*f6~9z)n-3-8o^7$E{ZRL&vf$>>l{0U
zVk`DI6sOT$oKe}&Mb!^&&84WX^;C>2O?D)zwL^Pm1?6}phRLI|g~jJN)^vb;zI19`
zcGq2A#TG*q!XIc`YxcB8ak{<6gBjCBpq`tq1_WTP{rtJNXMC}D*+8=DAgXPvz;lmX
zv+h!F$igP6C3j05Qw-glW7d?(uWIdEhPPjeh&P1LGsr+4KXG%@`O})oD;xGVT;N#t
zO?O^QCI2)@NCKj(v*D905#J0ojjcW9WRZ&O!j>EI_}l^`ohHsmnVy(+ZJ~<A7RW-a
z-iFl@h@n>QrTpn?H$zIV94&fN_gwB8`;V`&*Jbs~MLI_Wvu*dR=fV5;+AEcTo>8VZ
z#6r4c2i`9szZ55{`Z9uXwT%rmlI*q|FYq{JL{;7u=&5(P9ghDj9si0%v}0|yAAqQA
z1t=f?he%|Rn;;|uAhr&j-q2@a#e^RS)(gq;(x`;w%ODa<EHp9C4&@F#x1)>r+OTme
zZV?4jV42K*>M;~GW2Ko3(5TtpgI9Yfs&$B@Z1l6q_?EIc{CkNesDp1t&-l%4b}F>K
z_AWM<v&rs))=ESh{L09zE^xW-{#{Wf<pD(<*}bnlS!08LJY8w>eUbdUxpiOq=DYkY
z+?j4axn)c|b&Oo&Q+vB$t=uPkM-zklkwpU91+jvhW7Rho{**go8x4g&ap(Hm`Ab86
z)7Uu)5`w@x=9_MR>a%5`6bPM>K!=;!N(`v)pZWt)mHG;QLeDg@;a=hnT|Sq@XkgRv
z{}X}CU+((xDTD+Ga8*1Yc>kA|m<ymb@l{^4S_42+m;8QiD;m?ZS|-nhb2G92fi4Y9
zX{QQprEPRKp=%*GWoWfH?sHdLR&k=PcXu_L;$mr{aU&5y!uo{;PTNQK{C@Y2+UGk_
zhe_DWyUvg{<K@R#w{^vw_C$Eq%d8Z=tWASNpCT0Z4W&I_w8=a$Zz{n)IW|wU2g8cF
zSKh6Q>@wtW2RTAOtJ#B0P#D4VO%x`LG}Hy}SF9@MjQC1RV56`+d62$c*m<O<X}19c
zi6lJr^#uG~w+v;|C<}5%z^CK1GWk>BfMqlRet-EE`Mu1i2L?W6CG`)BAnR+nN6B{I
zJA@ML>1l)Ln-nGnSu{8@sqVa>kmy#=?kZDOWIn|GlM|#o3z?)Ds#f_=djC!zr$RDm
z{OOb!j+Csd2nR2pnOlx;I|a2%Qi@@E&O9#r)QSebBk%>dV=NqBU$S_rL1I~z3UMTN
z!ekQRb$?tmWJngqwTmlLyIH^|V2r~fwFHU)w-0_LtXyos=QGq2*gY@4ubw=~LUb?;
zyFGoTf_v3zgCq`fo^Z8%;R$Yr3LB&2<7Xb<u{=)4q{zw@wX+8{WXW#egZJ&m_viYf
zge@@X{*;E{q)1FB>&}vUOUlkpR<q3eWX>|@v3wR%x=Cr4192SwTq$qRUP1DSIynFC
z$mUF(C12i-NxOQ$W~E22y5JSmkU;$QPjo$hjhcL>JmylspX?lP{&%CsLN|^HF^CZ<
z)ci^vfN~4|1fnR8APW<UF=XsWc%+sqiESny*0+Z=*e;v!zUuNuqNxOz3e#9zxeJQN
zsf$2@l(W>DAg+BGg*8gVhwjVcCFamIzpsro7{EBrQKjg{G>se8PE5C9WB5Zx%#-PD
zwiq8HJ9r^fg!`SfR!AtVSV1{pfD*U~`_rm&v$uS>bJKa$OG2_1zQ4b9K8Zk>W=>e@
z8IZyNg9!XvkN^Vu1F#BU%s&n=AhQ2^jr4y%0qZXrV1S7J?G6|S3&>k-xbFCL6_EV>
z{rFF*P(aAx(3%|G^+=Wg=yx5?U%vVesYHM;kpRxWq|zJP7}}ZBnb|wp8oK<m3RjIn
zDDMA7#b2901Z*zunF0{z`0zkLaQ{#d01)2^Q2dt)IYVnx0F9FAKT8kD0z+8%e@X}X
zH;yTQn13A5|B(KdTcqIrUAmE>^WS~5)Kxq3zxn2`AO59)I~fQN6ahFf{~<a6aOd#P
zqD}0LtxcW&V-Jh}e>VI(PZs?*q;)Vrm<s^3VbK4fhzOwQ8^C9OdFbD409@!B+uPa#
zHp(#mXEi=uO8=`pf9-KW_Mqrw4-nc1IR8^k0N_p+07v-m)L8tpngHSq<p1{*`FCq-
zSH(zR00Daq;3R^-S@ZSZsBtiK(RZ*hbopz1(f>_fo{7f)YSv$C@X0-!mRbNKselPE
z#{Q;H4DgEmSF`@o_aFOU{7v19y3YS6b#R*GslETFvojCNIg8@>j|mN$>1piDRF+hf
zEZIh5G?s|5L>fv{@7u=P;;oeBp)r%0Av6_<Au)rghIq0}#gJY0Eg5S(b`M3eX3p*H
z?Y-yze$|=b36DSS=lz~@?z!jw-ut@;Y?cNg-4@{-KNWE5F!eNL*wa?tBv|<*=R7xv
zjC-K38G?2FOn~`mwIQL{g4#GA62<ngU&LB11i4RrAqi}?5Ga9BIQBYST}VO{^xfEN
zv!x`3W{P<xCQ*(+j<H#iNJ2=xOJ0>h++F#Cy~)C-GOz6dLK7Mgj!m_HKr#uk_*Gl8
z3y@XWRK>yeeU1RB`c7PN$_DtPkZ=p*eecYMU~39Uj7!X`9fwS~BX$)Dm$dW9BgoOx
z&~9RE58o}|D!(OG)RuNAl>{@klCDI88PCFlXFQSSAFU0BlqD3dBf&N-cA8=i*B0bL
zT(m;L;c!ZTX`>Zo-R!b~L`w3~9`6Y5>IBmklVXc%Bq2?%wwp*KuSEY2CxBE2nFy1v
zUlvFa%9=)xNGFk8Ui1oH0bzI@elV%{N{u9>X;yM3iIlQn-~cd_)Ca7G6J8cP%|gkX
zwPg}!F=b_EOGc*0=Xw9Y;&3KFOclA;FlC0nx0v$U1`H=jNpPMp+Ya(}tM($~@G~U{
zvX{TlC=#S%-qf$1!T)PuX*I0r7GS|nZ)FgzpP2TfurYOFehfDL#%YMj>nI2{OK{6a
zX!@*tufyo-LwjFbBL+|E_I|?s+R9LQ6=$dsSrzA0uU@6wgF;lANHs3x;u`O&^V2hT
zKey6MSq@{}h97LC0AY02?yK?`CSUlpxsg>129gT=V)bI>KdSy{QEj9c_#cP*ScKNk
zM;+{E9}$fT>i<*nnpSg|vD5;G%@<zD#T`d#QNkWb$lYO>g^ZA(KnM=dAj++Y1$9t#
zh2q%|Tc>FdRP~?(7%mhK)Ib3}-eIBI2)uSvdAHhc+xH7BIOeb^Y+Y?P3s5!92>XZG
zjj>dkAdFf(D~BX4ZPOo3JwRFz%omL6*;pABs8(TA{=qyFYW9KY4*NiZE#UAnn%_hj
z6%?w9&|uiT{5>St*xf7UY9ZaY!_fz<D-TK(W;Xpi)tgW@n-)3lBcT#}1LpnE4$7W*
zNos?<<!7pllBc)2<6Ret*||p!Z-E<%6V>eK9$z|I+R0c|{Q&uSW%l=WlBTeqhRsU@
zKb-~>V)Ge&CFdw^Qz$u)ZT6P;d@?ozP&r1d|DRq`3<S!R!8%m1UpVX}eItW3YF<m-
z+dO%WXjBbgIU;+g4iM&#(t_1U=fibvyIHn=I_Ox=Ll`DSRss|m2Ijg);z)LwbAsfG
zGZlk&dw?74;LTdYiemxX7sIMQJ_Ioy{nsw1NuwX!Tx!}J@@dIbNiw8pHhQ!xKf1SC
z<%>5|eIJ}B5te?tuq``^+aHoACggg{2&lBU<XK1}%w04%)VdYCcdQZXmuO!^&^Pqs
zqv+?z8ze|GBkSYiAxvL_6ufCWGfN4AloX{DlOU7oCnbkKWUh7-A?|Y!M1PkzucXxZ
z9tpDL!yP5;B86=bP*_P>oDu{nDYJP<g4m5*W#I}C8>l*PqDqd(5KU;5w<<!b!O_)t
z^<xspG9>@acMz@S;2O+nkyHZ*>AAP}35io=ZoOwMY&aeJiBVjDIh8ld@ou?1@i~cb
zGo#6#$DraJ<Uu@7QXmtvRSMD+{N>FENP|0EEl0^$kB&bW)C1~#2Ux7|Bh>8mh@iVo
zgBKA#<24C!ckjlR#W2xNnv1)l=9vgleGZ?eY&-pqL}`Dq_hXn3(qyQS3{@XXS~l5=
z6lAV9&)<_MfumBdt%SAqoJw>^;dVp`3!I6AcYNoDqy;47!I8*4FiCnsU}2Z17RU%l
zL&tr_B*LXjja#;ZgtF8{wA7+gm;eoqS!IWw*LsXGAyLZfN!K6=N>4xw_D9imMGDgL
zI{#x5#a3G9c@8GWU*HpLt=OBG5)p=VSoSs}5h_%_4`$Dt-66+g!h$<80@AQ)QZo|a
znN`xVMerEO)SbOnr!RI`<c8lxgsOKN_}S6lqz#GjI3s(FJ=kdi_#HFsUn?+>j_jlD
zNQ{!R4br3FWB1y{7i~;OTm@pVbD6vdqYE8LfZLv~CT<NSX%{%#P!%$OgL!>E8v+0+
zSe4V61h{Itt4A)ZfCIi1t&{~1#JF@@b?k;0v2<)#5@U1t?Tuh^X$c6yo><jgfq`^5
z9k3@cUS<|&v}q|xjo=9pHq^nMh{3*v=0&({awGwA8vc3J8bqvzNr3@PT}1#=;MJrr
z32@%atzrc<TfZ7B0(?dy05sD^3O-N|BmoLerI!Z4Qqc~!(^!F3pa?(;C>sIxJOy`+
z*U#!02-cin9e~>fRDaI`J>+?R)csJ!Sduq@=V6{9Iv6Vw4DH|%gYeB56NG!R^9;LC
z9Yeq6`_Cej+N$dTY)@982jD$f^1_)UU*lHn`hhKA;HQRx!9c7rJ8zl!frClo<A&k<
z`03NDETg|;SZTM>-E0I2gBud@n60tO7^EiMYa|JS`vvisnF|Dr(vG9Ncr*!vFCXVI
zi3!RWq{hyD90`MOeC9E+%LI(9N-rGZPGaEe{&>ctR0Rgo;dtGH#K1S*@r(&+0z+P@
z+m4$|g5b-#cu4tgB7|Mt#ZQ(e7pKq|=n?^*u|G?JfsA!O*@wiyZ~O9$x!VPX;+~^V
zo{Gf5FU|6t)EzZ&ke+yBe-a13@yc^X>=ZZ(HDc@Ifg}okGmNMFl&45RT4wrAtCPac
zH{mySc#1ppB49OE^-Y;#omkW%goMDS$9aU^K{*1cnCcQnLf}K$Jfhzr0ijSUjyR<y
zQSgCFp0fX#A_Zw#{Vak+!RHitN@;;W5$nZI21bz(_)r^<us$hAAQd$)@(rMZmLwne
N?Q11TZ($%w`WIpSXy^a{

literal 0
HcmV?d00001

diff --git a/drivers/net/sxe/Makefile b/drivers/net/sxe/Makefile
index 5e2870fdc4..fd8213701d 100644
--- a/drivers/net/sxe/Makefile
+++ b/drivers/net/sxe/Makefile
@@ -13,6 +13,10 @@ CFLAGS += -DSXE_DPDK
 CFLAGS += -DSXE_HOST_DRIVER
 CFLAGS += -DSXE_DPDK_L4_FEATURES
 CFLAGS += -DSXE_DPDK_SRIOV
+CFLAGS += -DSXE_DPDK_FILTER_CTRL
+CFLAGS += -DSXE_DPDK_MACSEC
+CFLAGS += -DSXE_DPDK_TM
+CFLAGS += -DSXE_DPDK_SIMD
 CFLAGS += -O3
 CFLAGS += $(WERROR_FLAGS)
 
@@ -86,6 +90,16 @@ SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_stats.c
 SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_tx.c
 SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_vf.c
 
+ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_vec_neon.c
+else
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_vec_sse.c
+endif
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_filter_ctrl.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_fnav.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_macsec.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_tm.c
+
 SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_main.c
 SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_filter.c
 SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_msg.c
diff --git a/drivers/net/sxe/base/docker_version b/drivers/net/sxe/base/docker_version
deleted file mode 100644
index 33ecb22479..0000000000
--- a/drivers/net/sxe/base/docker_version
+++ /dev/null
@@ -1,4 +0,0 @@
-dpdk_images_v0.1:
-只包含dodk源码,源码目录:/usr/src/dpdk
-dpdk_images_v0.2:
-包含dpdk源码和build下的原生编译产物
diff --git a/drivers/net/sxe/base/sxe_common.c b/drivers/net/sxe/base/sxe_common.c
index 62f76ccf3f..0a3c53e3ca 100644
--- a/drivers/net/sxe/base/sxe_common.c
+++ b/drivers/net/sxe/base/sxe_common.c
@@ -11,9 +11,9 @@
 #include "sxe_common.h"
 
 #define SXE_TRACE_ID_COUNT_MASK  0x00000000000000FFLLU
-#define SXE_TRACE_ID_TID_MASK    0x0000000000FFFF00LLU
+#define SXE_TRACE_ID_TID_MASK	0x0000000000FFFF00LLU
 #define SXE_TRACE_ID_TIME_MASK   0x00FFFFFFFF000000LLU
-#define SXE_TRACE_ID_FLAG        0xFF00000000000000LLU
+#define SXE_TRACE_ID_FLAG		0xFF00000000000000LLU
 
 #define SXE_TRACE_ID_COUNT_SHIFT 0
 #define SXE_TRACE_ID_TID_SHIFT   8
@@ -22,18 +22,17 @@
 #define SXE_SEC_TO_MS(sec) (sec * 1000ULL)
 #define SXE_SEC_TO_NS(sec) (sec * 1000000000ULL)
 
-#define SXE_USEC_PER_MS          1000
+#define SXE_USEC_PER_MS		  1000
 
-static u64 sxe_trace_id      = 0;
+u64 sxe_trace_id;
 
 u64 sxe_time_get_real_ms(void)
 {
 	u64 ms = 0;
-	struct timeval      tv = { 0 };
+	struct timeval	  tv = { 0 };
 	s32 ret = gettimeofday(&tv, NULL);
-	if(ret < 0) {
+	if (ret < 0)
 		goto l_end;
-	}
 
 	ms = SXE_SEC_TO_MS(tv.tv_sec) + tv.tv_usec / SXE_USEC_PER_MS;
 
@@ -43,8 +42,8 @@ u64 sxe_time_get_real_ms(void)
 
 u64 sxe_trace_id_gen(void)
 {
-	u64 tid       = getpid() + (pthread_self() << 20);
-	u64 index     = 0;
+	u64 tid	   = getpid() + (pthread_self() << 20);
+	u64 index	 = 0;
 	u64 timestamp = sxe_time_get_real_ms();
 
 	sxe_trace_id = (SXE_TRACE_ID_FLAG)
@@ -57,7 +56,6 @@ u64 sxe_trace_id_gen(void)
 void sxe_trace_id_clean(void)
 {
 	sxe_trace_id = 0;
-	return;
 }
 
 u64 sxe_trace_id_get(void)
diff --git a/drivers/net/sxe/base/sxe_compat_platform.h b/drivers/net/sxe/base/sxe_compat_platform.h
index 8509f3cf0c..a870585567 100644
--- a/drivers/net/sxe/base/sxe_compat_platform.h
+++ b/drivers/net/sxe/base/sxe_compat_platform.h
@@ -19,27 +19,27 @@
 #ifdef SXE_TEST
 #define STATIC
 #else
-#define STATIC static
+#define static static
 #endif
 
 #ifndef DIV_ROUND_UP
-#define DIV_ROUND_UP(n, d)      (((n) + (d) - 1) / (d))
+#define DIV_ROUND_UP(n, d)	  (((n) + (d) - 1) / (d))
 #endif
 
 #define __iomem
 #define __force
 
-#define min(a,b)	RTE_MIN(a,b)
+#define min(a, b)	RTE_MIN(a, b)
 
 #ifdef __has_attribute
 #if __has_attribute(__fallthrough__)
 # define fallthrough __attribute__((__fallthrough__))
 #else
-# define fallthrough do {} while (0)  
-#endif 
+# define fallthrough do {} while (0)
+#endif
 #else
-# define fallthrough do {} while (0)  
-#endif 
+# define fallthrough do {} while (0)
+#endif
 
 #define __swab32(_value) \
 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
@@ -74,7 +74,7 @@
 #define mdelay rte_delay_ms
 #define udelay rte_delay_us
 #define usleep_range(min, max) rte_delay_us(min)
-#define msleep(x)             rte_delay_us(x*1000)
+#define msleep(x)			 rte_delay_us(x*1000)
 
 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
 #define BIT(x)	(1UL << (x))
@@ -82,15 +82,15 @@
 
 #define NSEC_PER_SEC	1000000000L
 
-#define ETH_P_1588	0x88F7		
+#define ETH_P_1588	0x88F7
 
 #define VLAN_PRIO_SHIFT		13
 
 static inline void
 set_bit(unsigned long nr, void *addr)
 {
-        int *m = ((int *)addr) + (nr >> 5);
-        *m |= 1 << (nr & 31);
+	int *m = ((int *)addr) + (nr >> 5);
+	*m |= 1 << (nr & 31);
 }
 
 static inline int
@@ -137,7 +137,6 @@ static inline u32 sxe_read_addr(const volatile void *addr)
 static inline void  sxe_write_addr(u32 value, volatile void *addr)
 {
 	rte_write32((rte_cpu_to_le_32(value)), addr);
-	return;
 }
 
 #endif
diff --git a/drivers/net/sxe/base/sxe_compat_version.h b/drivers/net/sxe/base/sxe_compat_version.h
index 32d1a0862a..cf253309d8 100644
--- a/drivers/net/sxe/base/sxe_compat_version.h
+++ b/drivers/net/sxe/base/sxe_compat_version.h
@@ -19,7 +19,7 @@ int sxe_eth_dev_callback_process(struct rte_eth_dev *dev,
 
 #define __rte_cold __attribute__((cold))
 
-#define ETH_SPEED_NUM_UNKNOWN UINT32_MAX 
+#define ETH_SPEED_NUM_UNKNOWN UINT32_MAX
 #ifdef RTE_ARCH_ARM64
 #define RTE_ARCH_ARM
 #endif
@@ -38,131 +38,129 @@ int sxe_eth_dev_callback_process(struct rte_eth_dev *dev,
 
 #if defined DPDK_20_11_5 || defined DPDK_19_11_6
 
-#define    RTE_ETH_RSS_IPV4                ETH_RSS_IPV4
-#define    RTE_ETH_RSS_NONFRAG_IPV4_TCP    ETH_RSS_NONFRAG_IPV4_TCP
-#define    RTE_ETH_RSS_NONFRAG_IPV4_UDP    ETH_RSS_NONFRAG_IPV4_UDP
-#define    RTE_ETH_RSS_IPV6                ETH_RSS_IPV6
-#define    RTE_ETH_RSS_NONFRAG_IPV6_TCP    ETH_RSS_NONFRAG_IPV6_TCP
-#define    RTE_ETH_RSS_NONFRAG_IPV6_UDP    ETH_RSS_NONFRAG_IPV6_UDP
-#define    RTE_ETH_RSS_IPV6_EX             ETH_RSS_IPV6_EX
-#define    RTE_ETH_RSS_IPV6_TCP_EX         ETH_RSS_IPV6_TCP_EX
-#define    RTE_ETH_RSS_IPV6_UDP_EX         ETH_RSS_IPV6_UDP_EX
+#define	RTE_ETH_RSS_IPV4				ETH_RSS_IPV4
+#define	RTE_ETH_RSS_NONFRAG_IPV4_TCP	ETH_RSS_NONFRAG_IPV4_TCP
+#define	RTE_ETH_RSS_NONFRAG_IPV4_UDP	ETH_RSS_NONFRAG_IPV4_UDP
+#define	RTE_ETH_RSS_IPV6				ETH_RSS_IPV6
+#define	RTE_ETH_RSS_NONFRAG_IPV6_TCP	ETH_RSS_NONFRAG_IPV6_TCP
+#define	RTE_ETH_RSS_NONFRAG_IPV6_UDP	ETH_RSS_NONFRAG_IPV6_UDP
+#define	RTE_ETH_RSS_IPV6_EX			 ETH_RSS_IPV6_EX
+#define	RTE_ETH_RSS_IPV6_TCP_EX		 ETH_RSS_IPV6_TCP_EX
+#define	RTE_ETH_RSS_IPV6_UDP_EX		 ETH_RSS_IPV6_UDP_EX
 
 
-#define    RTE_ETH_VLAN_TYPE_UNKNOWN       ETH_VLAN_TYPE_UNKNOWN
-#define    RTE_ETH_VLAN_TYPE_INNER         ETH_VLAN_TYPE_INNER
-#define    RTE_ETH_VLAN_TYPE_OUTER         ETH_VLAN_TYPE_OUTER
-#define    RTE_ETH_VLAN_TYPE_MAX           ETH_VLAN_TYPE_MAX
+#define	RTE_ETH_VLAN_TYPE_UNKNOWN	   ETH_VLAN_TYPE_UNKNOWN
+#define	RTE_ETH_VLAN_TYPE_INNER		 ETH_VLAN_TYPE_INNER
+#define	RTE_ETH_VLAN_TYPE_OUTER		 ETH_VLAN_TYPE_OUTER
+#define	RTE_ETH_VLAN_TYPE_MAX		   ETH_VLAN_TYPE_MAX
 
 
-#define    RTE_ETH_8_POOLS        ETH_8_POOLS
-#define    RTE_ETH_16_POOLS       ETH_16_POOLS
-#define    RTE_ETH_32_POOLS       ETH_32_POOLS
-#define    RTE_ETH_64_POOLS       ETH_64_POOLS
+#define	RTE_ETH_8_POOLS		ETH_8_POOLS
+#define	RTE_ETH_16_POOLS	   ETH_16_POOLS
+#define	RTE_ETH_32_POOLS	   ETH_32_POOLS
+#define	RTE_ETH_64_POOLS	   ETH_64_POOLS
 
 
-#define RTE_ETH_4_TCS       ETH_4_TCS
-#define RTE_ETH_8_TCS       ETH_8_TCS
+#define RTE_ETH_4_TCS	   ETH_4_TCS
+#define RTE_ETH_8_TCS	   ETH_8_TCS
 
 
-#define RTE_ETH_MQ_RX_NONE          ETH_MQ_RX_NONE
-#define RTE_ETH_MQ_RX_RSS           ETH_MQ_RX_RSS
-#define RTE_ETH_MQ_RX_DCB           ETH_MQ_RX_DCB
-#define RTE_ETH_MQ_RX_DCB_RSS       ETH_MQ_RX_DCB_RSS
-#define RTE_ETH_MQ_RX_VMDQ_ONLY     ETH_MQ_RX_VMDQ_ONLY
-#define RTE_ETH_MQ_RX_VMDQ_RSS      ETH_MQ_RX_VMDQ_RSS
-#define RTE_ETH_MQ_RX_VMDQ_DCB      ETH_MQ_RX_VMDQ_DCB
+#define RTE_ETH_MQ_RX_NONE		  ETH_MQ_RX_NONE
+#define RTE_ETH_MQ_RX_RSS		   ETH_MQ_RX_RSS
+#define RTE_ETH_MQ_RX_DCB		   ETH_MQ_RX_DCB
+#define RTE_ETH_MQ_RX_DCB_RSS	   ETH_MQ_RX_DCB_RSS
+#define RTE_ETH_MQ_RX_VMDQ_ONLY	 ETH_MQ_RX_VMDQ_ONLY
+#define RTE_ETH_MQ_RX_VMDQ_RSS	  ETH_MQ_RX_VMDQ_RSS
+#define RTE_ETH_MQ_RX_VMDQ_DCB	  ETH_MQ_RX_VMDQ_DCB
 #define RTE_ETH_MQ_RX_VMDQ_DCB_RSS  ETH_MQ_RX_VMDQ_DCB_RSS
 
 
-#define RTE_ETH_MQ_TX_NONE          ETH_MQ_TX_NONE
-#define RTE_ETH_MQ_TX_DCB           ETH_MQ_TX_DCB
-#define RTE_ETH_MQ_TX_VMDQ_DCB      ETH_MQ_TX_VMDQ_DCB
-#define RTE_ETH_MQ_TX_VMDQ_ONLY     ETH_MQ_TX_VMDQ_ONLY
+#define RTE_ETH_MQ_TX_NONE		  ETH_MQ_TX_NONE
+#define RTE_ETH_MQ_TX_DCB		   ETH_MQ_TX_DCB
+#define RTE_ETH_MQ_TX_VMDQ_DCB	  ETH_MQ_TX_VMDQ_DCB
+#define RTE_ETH_MQ_TX_VMDQ_ONLY	 ETH_MQ_TX_VMDQ_ONLY
 
 
-#define RTE_ETH_FC_NONE         RTE_FC_NONE
-#define RTE_ETH_FC_RX_PAUSE     RTE_FC_RX_PAUSE
-#define RTE_ETH_FC_TX_PAUSE     RTE_FC_TX_PAUSE
-#define RTE_ETH_FC_FULL         RTE_FC_FULL
+#define RTE_ETH_FC_NONE		 RTE_FC_NONE
+#define RTE_ETH_FC_RX_PAUSE	 RTE_FC_RX_PAUSE
+#define RTE_ETH_FC_TX_PAUSE	 RTE_FC_TX_PAUSE
+#define RTE_ETH_FC_FULL		 RTE_FC_FULL
 
 
-#define RTE_ETH_MQ_RX_RSS_FLAG      ETH_MQ_RX_RSS_FLAG
-#define RTE_ETH_MQ_RX_DCB_FLAG      ETH_MQ_RX_DCB_FLAG
-#define RTE_ETH_MQ_RX_VMDQ_FLAG     ETH_MQ_RX_VMDQ_FLAG
+#define RTE_ETH_MQ_RX_RSS_FLAG	  ETH_MQ_RX_RSS_FLAG
+#define RTE_ETH_MQ_RX_DCB_FLAG	  ETH_MQ_RX_DCB_FLAG
+#define RTE_ETH_MQ_RX_VMDQ_FLAG	 ETH_MQ_RX_VMDQ_FLAG
 
 
-#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP       DEV_RX_OFFLOAD_VLAN_STRIP
-#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM       DEV_RX_OFFLOAD_IPV4_CKSUM
-#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM        DEV_RX_OFFLOAD_UDP_CKSUM
-#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM        DEV_RX_OFFLOAD_TCP_CKSUM
-#define RTE_ETH_RX_OFFLOAD_TCP_LRO          DEV_RX_OFFLOAD_TCP_LRO
-#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP       DEV_RX_OFFLOAD_QINQ_STRIP
+#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP	   DEV_RX_OFFLOAD_VLAN_STRIP
+#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM	   DEV_RX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM		DEV_RX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM		DEV_RX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_LRO		  DEV_RX_OFFLOAD_TCP_LRO
+#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP	   DEV_RX_OFFLOAD_QINQ_STRIP
 #define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
-#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP     DEV_RX_OFFLOAD_MACSEC_STRIP
-#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER      DEV_RX_OFFLOAD_VLAN_FILTER
-#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND      DEV_RX_OFFLOAD_VLAN_EXTEND
-#define RTE_ETH_RX_OFFLOAD_SCATTER          DEV_RX_OFFLOAD_SCATTER
-#define RTE_ETH_RX_OFFLOAD_TIMESTAMP        DEV_RX_OFFLOAD_TIMESTAMP
-#define RTE_ETH_RX_OFFLOAD_SECURITY         DEV_RX_OFFLOAD_SECURITY
-#define RTE_ETH_RX_OFFLOAD_KEEP_CRC         DEV_RX_OFFLOAD_KEEP_CRC
-#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM       DEV_RX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP	 DEV_RX_OFFLOAD_MACSEC_STRIP
+#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER	  DEV_RX_OFFLOAD_VLAN_FILTER
+#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND	  DEV_RX_OFFLOAD_VLAN_EXTEND
+#define RTE_ETH_RX_OFFLOAD_SCATTER		  DEV_RX_OFFLOAD_SCATTER
+#define RTE_ETH_RX_OFFLOAD_TIMESTAMP		DEV_RX_OFFLOAD_TIMESTAMP
+#define RTE_ETH_RX_OFFLOAD_SECURITY		 DEV_RX_OFFLOAD_SECURITY
+#define RTE_ETH_RX_OFFLOAD_KEEP_CRC		 DEV_RX_OFFLOAD_KEEP_CRC
+#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM	   DEV_RX_OFFLOAD_SCTP_CKSUM
 #define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM
-#define RTE_ETH_RX_OFFLOAD_RSS_HASH         DEV_RX_OFFLOAD_RSS_HASH
+#define RTE_ETH_RX_OFFLOAD_RSS_HASH		 DEV_RX_OFFLOAD_RSS_HASH
 
 
-#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT      DEV_TX_OFFLOAD_VLAN_INSERT
-#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM       DEV_TX_OFFLOAD_IPV4_CKSUM
-#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM        DEV_TX_OFFLOAD_UDP_CKSUM
-#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM        DEV_TX_OFFLOAD_TCP_CKSUM
-#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM       DEV_TX_OFFLOAD_SCTP_CKSUM
-#define RTE_ETH_TX_OFFLOAD_TCP_TSO          DEV_TX_OFFLOAD_TCP_TSO
-#define RTE_ETH_TX_OFFLOAD_UDP_TSO          DEV_TX_OFFLOAD_UDP_TSO
+#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT	  DEV_TX_OFFLOAD_VLAN_INSERT
+#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM	   DEV_TX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM		DEV_TX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM		DEV_TX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM	   DEV_TX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_TSO		  DEV_TX_OFFLOAD_TCP_TSO
+#define RTE_ETH_TX_OFFLOAD_UDP_TSO		  DEV_TX_OFFLOAD_UDP_TSO
 #define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM
-#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT      DEV_TX_OFFLOAD_QINQ_INSERT
-#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    DEV_TX_OFFLOAD_VXLAN_TNL_TSO
-#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO      DEV_TX_OFFLOAD_GRE_TNL_TSO
-#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO     DEV_TX_OFFLOAD_IPIP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT	  DEV_TX_OFFLOAD_QINQ_INSERT
+#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO	DEV_TX_OFFLOAD_VXLAN_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO	  DEV_TX_OFFLOAD_GRE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO	 DEV_TX_OFFLOAD_IPIP_TNL_TSO
 #define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   DEV_TX_OFFLOAD_GENEVE_TNL_TSO
-#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT    DEV_TX_OFFLOAD_MACSEC_INSERT
-#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE      DEV_TX_OFFLOAD_MT_LOCKFREE
-#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS       DEV_TX_OFFLOAD_MULTI_SEGS
+#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT	DEV_TX_OFFLOAD_MACSEC_INSERT
+#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE	  DEV_TX_OFFLOAD_MT_LOCKFREE
+#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS	   DEV_TX_OFFLOAD_MULTI_SEGS
 #define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE   DEV_TX_OFFLOAD_MBUF_FAST_FREE
-#define RTE_ETH_TX_OFFLOAD_SECURITY         DEV_TX_OFFLOAD_SECURITY
-#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO      DEV_TX_OFFLOAD_UDP_TNL_TSO
-#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO       DEV_TX_OFFLOAD_IP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_SECURITY		 DEV_TX_OFFLOAD_SECURITY
+#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO	  DEV_TX_OFFLOAD_UDP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO	   DEV_TX_OFFLOAD_IP_TNL_TSO
 #define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM  DEV_TX_OFFLOAD_OUTER_UDP_CKSUM
 #define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP
 
 
-#define RTE_ETH_LINK_SPEED_AUTONEG      ETH_LINK_SPEED_AUTONEG
-#define RTE_ETH_LINK_SPEED_FIXED        ETH_LINK_SPEED_FIXED
-#define RTE_ETH_LINK_SPEED_1G           ETH_LINK_SPEED_1G
-#define RTE_ETH_LINK_SPEED_10G          ETH_LINK_SPEED_10G
+#define RTE_ETH_LINK_SPEED_AUTONEG	  ETH_LINK_SPEED_AUTONEG
+#define RTE_ETH_LINK_SPEED_FIXED		ETH_LINK_SPEED_FIXED
+#define RTE_ETH_LINK_SPEED_1G		   ETH_LINK_SPEED_1G
+#define RTE_ETH_LINK_SPEED_10G		  ETH_LINK_SPEED_10G
 
-#define RTE_ETH_SPEED_NUM_NONE          ETH_SPEED_NUM_NONE
-#define RTE_ETH_SPEED_NUM_1G            ETH_SPEED_NUM_1G  
-#define RTE_ETH_SPEED_NUM_10G           ETH_SPEED_NUM_10G
-#define RTE_ETH_SPEED_NUM_UNKNOWN       ETH_SPEED_NUM_UNKNOWN
+#define RTE_ETH_SPEED_NUM_NONE		  ETH_SPEED_NUM_NONE
+#define RTE_ETH_SPEED_NUM_1G			ETH_SPEED_NUM_1G
+#define RTE_ETH_SPEED_NUM_10G		   ETH_SPEED_NUM_10G
+#define RTE_ETH_SPEED_NUM_UNKNOWN	   ETH_SPEED_NUM_UNKNOWN
 
 
-#define RTE_ETH_LINK_HALF_DUPLEX        ETH_LINK_HALF_DUPLEX
-#define RTE_ETH_LINK_FULL_DUPLEX        ETH_LINK_FULL_DUPLEX
-#define RTE_ETH_LINK_DOWN               ETH_LINK_DOWN       
-#define RTE_ETH_LINK_UP                 ETH_LINK_UP 
+#define RTE_ETH_LINK_HALF_DUPLEX		ETH_LINK_HALF_DUPLEX
+#define RTE_ETH_LINK_FULL_DUPLEX		ETH_LINK_FULL_DUPLEX
+#define RTE_ETH_LINK_DOWN			   ETH_LINK_DOWN
+#define RTE_ETH_LINK_UP				 ETH_LINK_UP
 
 
-#define RTE_ETH_RSS_RETA_SIZE_128       ETH_RSS_RETA_SIZE_128
-#define RTE_ETH_RETA_GROUP_SIZE         RTE_RETA_GROUP_SIZE
-
+#define RTE_ETH_RSS_RETA_SIZE_128	   ETH_RSS_RETA_SIZE_128
+#define RTE_ETH_RETA_GROUP_SIZE		 RTE_RETA_GROUP_SIZE
 
 #define RTE_ETH_VMDQ_MAX_VLAN_FILTERS   ETH_VMDQ_MAX_VLAN_FILTERS
 #define RTE_ETH_DCB_NUM_USER_PRIORITIES ETH_DCB_NUM_USER_PRIORITIES
-#define RTE_ETH_VMDQ_DCB_NUM_QUEUES     ETH_VMDQ_DCB_NUM_QUEUES
-#define RTE_ETH_DCB_NUM_QUEUES          ETH_DCB_NUM_QUEUES
-
+#define RTE_ETH_VMDQ_DCB_NUM_QUEUES	 ETH_VMDQ_DCB_NUM_QUEUES
+#define RTE_ETH_DCB_NUM_QUEUES		  ETH_DCB_NUM_QUEUES
 
-#define RTE_ETH_DCB_PFC_SUPPORT     ETH_DCB_PFC_SUPPORT
+#define RTE_ETH_DCB_PFC_SUPPORT	 ETH_DCB_PFC_SUPPORT
 
 
 #define RTE_ETH_VLAN_STRIP_OFFLOAD   ETH_VLAN_STRIP_OFFLOAD
@@ -170,74 +168,73 @@ int sxe_eth_dev_callback_process(struct rte_eth_dev *dev,
 #define RTE_ETH_VLAN_EXTEND_OFFLOAD  ETH_VLAN_EXTEND_OFFLOAD
 #define RTE_ETH_QINQ_STRIP_OFFLOAD   ETH_QINQ_STRIP_OFFLOAD
 
-#define RTE_ETH_VLAN_STRIP_MASK      ETH_VLAN_STRIP_MASK
-#define RTE_ETH_VLAN_FILTER_MASK     ETH_VLAN_FILTER_MASK
-#define RTE_ETH_VLAN_EXTEND_MASK     ETH_VLAN_EXTEND_MASK
-#define RTE_ETH_QINQ_STRIP_MASK      ETH_QINQ_STRIP_MASK
-#define RTE_ETH_VLAN_ID_MAX          ETH_VLAN_ID_MAX
+#define RTE_ETH_VLAN_STRIP_MASK	  ETH_VLAN_STRIP_MASK
+#define RTE_ETH_VLAN_FILTER_MASK	 ETH_VLAN_FILTER_MASK
+#define RTE_ETH_VLAN_EXTEND_MASK	 ETH_VLAN_EXTEND_MASK
+#define RTE_ETH_QINQ_STRIP_MASK	  ETH_QINQ_STRIP_MASK
+#define RTE_ETH_VLAN_ID_MAX		  ETH_VLAN_ID_MAX
 
 
 #define RTE_ETH_NUM_RECEIVE_MAC_ADDR   ETH_NUM_RECEIVE_MAC_ADDR
 #define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY ETH_VMDQ_NUM_UC_HASH_ARRAY
 
-#define RTE_ETH_VMDQ_ACCEPT_UNTAG      ETH_VMDQ_ACCEPT_UNTAG
-#define RTE_ETH_VMDQ_ACCEPT_HASH_MC    ETH_VMDQ_ACCEPT_HASH_MC
-#define RTE_ETH_VMDQ_ACCEPT_HASH_UC    ETH_VMDQ_ACCEPT_HASH_UC
+#define RTE_ETH_VMDQ_ACCEPT_UNTAG	  ETH_VMDQ_ACCEPT_UNTAG
+#define RTE_ETH_VMDQ_ACCEPT_HASH_MC	ETH_VMDQ_ACCEPT_HASH_MC
+#define RTE_ETH_VMDQ_ACCEPT_HASH_UC	ETH_VMDQ_ACCEPT_HASH_UC
 #define RTE_ETH_VMDQ_ACCEPT_BROADCAST  ETH_VMDQ_ACCEPT_BROADCAST
 #define RTE_ETH_VMDQ_ACCEPT_MULTICAST  ETH_VMDQ_ACCEPT_MULTICAST
 
-#define RTE_VLAN_HLEN       4  
-
-
-#define RTE_MBUF_F_RX_VLAN                  PKT_RX_VLAN
-#define RTE_MBUF_F_RX_RSS_HASH              PKT_RX_RSS_HASH
-#define RTE_MBUF_F_RX_FDIR                  PKT_RX_FDIR
-#define RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD    PKT_RX_EIP_CKSUM_BAD
-#define RTE_MBUF_F_RX_VLAN_STRIPPED         PKT_RX_VLAN_STRIPPED
-#define RTE_MBUF_F_RX_IP_CKSUM_MASK         PKT_RX_IP_CKSUM_MASK
-#define RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN      PKT_RX_IP_CKSUM_UNKNOWN
-#define RTE_MBUF_F_RX_IP_CKSUM_BAD          PKT_RX_IP_CKSUM_BAD
-#define RTE_MBUF_F_RX_IP_CKSUM_GOOD         PKT_RX_IP_CKSUM_GOOD
-#define RTE_MBUF_F_RX_IP_CKSUM_NONE         PKT_RX_IP_CKSUM_NONE
-#define RTE_MBUF_F_RX_L4_CKSUM_MASK         PKT_RX_L4_CKSUM_MASK
-#define RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN      PKT_RX_L4_CKSUM_UNKNOWN
-#define RTE_MBUF_F_RX_L4_CKSUM_BAD          PKT_RX_L4_CKSUM_BAD
-#define RTE_MBUF_F_RX_L4_CKSUM_GOOD         PKT_RX_L4_CKSUM_GOOD
-#define RTE_MBUF_F_RX_L4_CKSUM_NONE         PKT_RX_L4_CKSUM_NONE
-#define RTE_MBUF_F_RX_IEEE1588_PTP          PKT_RX_IEEE1588_PTP
-#define RTE_MBUF_F_RX_IEEE1588_TMST         PKT_RX_IEEE1588_TMST
-#define RTE_MBUF_F_RX_FDIR_ID               PKT_RX_FDIR_ID
-#define RTE_MBUF_F_RX_FDIR_FLX              PKT_RX_FDIR_FLX
-#define RTE_MBUF_F_RX_QINQ_STRIPPED         PKT_RX_QINQ_STRIPPED
-#define RTE_MBUF_F_RX_LRO                   PKT_RX_LRO
-#define RTE_MBUF_F_RX_SEC_OFFLOAD	        PKT_RX_SEC_OFFLOAD
+#define RTE_VLAN_HLEN	   4
+
+#define RTE_MBUF_F_RX_VLAN				  PKT_RX_VLAN
+#define RTE_MBUF_F_RX_RSS_HASH			  PKT_RX_RSS_HASH
+#define RTE_MBUF_F_RX_FDIR				  PKT_RX_FDIR
+#define RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD	PKT_RX_EIP_CKSUM_BAD
+#define RTE_MBUF_F_RX_VLAN_STRIPPED		 PKT_RX_VLAN_STRIPPED
+#define RTE_MBUF_F_RX_IP_CKSUM_MASK		 PKT_RX_IP_CKSUM_MASK
+#define RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN	  PKT_RX_IP_CKSUM_UNKNOWN
+#define RTE_MBUF_F_RX_IP_CKSUM_BAD		  PKT_RX_IP_CKSUM_BAD
+#define RTE_MBUF_F_RX_IP_CKSUM_GOOD		 PKT_RX_IP_CKSUM_GOOD
+#define RTE_MBUF_F_RX_IP_CKSUM_NONE		 PKT_RX_IP_CKSUM_NONE
+#define RTE_MBUF_F_RX_L4_CKSUM_MASK		 PKT_RX_L4_CKSUM_MASK
+#define RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN	  PKT_RX_L4_CKSUM_UNKNOWN
+#define RTE_MBUF_F_RX_L4_CKSUM_BAD		  PKT_RX_L4_CKSUM_BAD
+#define RTE_MBUF_F_RX_L4_CKSUM_GOOD		 PKT_RX_L4_CKSUM_GOOD
+#define RTE_MBUF_F_RX_L4_CKSUM_NONE		 PKT_RX_L4_CKSUM_NONE
+#define RTE_MBUF_F_RX_IEEE1588_PTP		  PKT_RX_IEEE1588_PTP
+#define RTE_MBUF_F_RX_IEEE1588_TMST		 PKT_RX_IEEE1588_TMST
+#define RTE_MBUF_F_RX_FDIR_ID			   PKT_RX_FDIR_ID
+#define RTE_MBUF_F_RX_FDIR_FLX			  PKT_RX_FDIR_FLX
+#define RTE_MBUF_F_RX_QINQ_STRIPPED		 PKT_RX_QINQ_STRIPPED
+#define RTE_MBUF_F_RX_LRO				   PKT_RX_LRO
+#define RTE_MBUF_F_RX_SEC_OFFLOAD			PKT_RX_SEC_OFFLOAD
 #define RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED	PKT_RX_SEC_OFFLOAD_FAILED
-#define RTE_MBUF_F_RX_QINQ                  PKT_RX_QINQ
-
-#define RTE_MBUF_F_TX_SEC_OFFLOAD	        PKT_TX_SEC_OFFLOAD
-#define RTE_MBUF_F_TX_MACSEC                PKT_TX_MACSEC
-#define RTE_MBUF_F_TX_QINQ                  PKT_TX_QINQ
-#define RTE_MBUF_F_TX_TCP_SEG               PKT_TX_TCP_SEG
-#define RTE_MBUF_F_TX_IEEE1588_TMST         PKT_TX_IEEE1588_TMST
-#define RTE_MBUF_F_TX_L4_NO_CKSUM           PKT_TX_L4_NO_CKSUM
-#define RTE_MBUF_F_TX_TCP_CKSUM             PKT_TX_TCP_CKSUM
-#define RTE_MBUF_F_TX_SCTP_CKSUM            PKT_TX_SCTP_CKSUM
-#define RTE_MBUF_F_TX_UDP_CKSUM             PKT_TX_UDP_CKSUM
-#define RTE_MBUF_F_TX_L4_MASK               PKT_TX_L4_MASK
-#define RTE_MBUF_F_TX_IP_CKSUM              PKT_TX_IP_CKSUM
-#define RTE_MBUF_F_TX_IPV4                  PKT_TX_IPV4
-#define RTE_MBUF_F_TX_IPV6                  PKT_TX_IPV6
-#define RTE_MBUF_F_TX_VLAN                  PKT_TX_VLAN
-#define RTE_MBUF_F_TX_OUTER_IP_CKSUM        PKT_TX_OUTER_IP_CKSUM
-#define RTE_MBUF_F_TX_OUTER_IPV4            PKT_TX_OUTER_IPV4
-#define RTE_MBUF_F_TX_OUTER_IPV6            PKT_TX_OUTER_IPV6
-
-#define RTE_MBUF_F_TX_OFFLOAD_MASK          PKT_TX_OFFLOAD_MASK
-
-#define RTE_ETH_8_POOLS                     ETH_8_POOLS
-#define RTE_ETH_16_POOLS                    ETH_16_POOLS
-#define RTE_ETH_32_POOLS                    ETH_32_POOLS
-#define RTE_ETH_64_POOLS                    ETH_64_POOLS
+#define RTE_MBUF_F_RX_QINQ				  PKT_RX_QINQ
+
+#define RTE_MBUF_F_TX_SEC_OFFLOAD			PKT_TX_SEC_OFFLOAD
+#define RTE_MBUF_F_TX_MACSEC				PKT_TX_MACSEC
+#define RTE_MBUF_F_TX_QINQ				  PKT_TX_QINQ
+#define RTE_MBUF_F_TX_TCP_SEG			   PKT_TX_TCP_SEG
+#define RTE_MBUF_F_TX_IEEE1588_TMST		 PKT_TX_IEEE1588_TMST
+#define RTE_MBUF_F_TX_L4_NO_CKSUM		   PKT_TX_L4_NO_CKSUM
+#define RTE_MBUF_F_TX_TCP_CKSUM			 PKT_TX_TCP_CKSUM
+#define RTE_MBUF_F_TX_SCTP_CKSUM			PKT_TX_SCTP_CKSUM
+#define RTE_MBUF_F_TX_UDP_CKSUM			 PKT_TX_UDP_CKSUM
+#define RTE_MBUF_F_TX_L4_MASK			   PKT_TX_L4_MASK
+#define RTE_MBUF_F_TX_IP_CKSUM			  PKT_TX_IP_CKSUM
+#define RTE_MBUF_F_TX_IPV4				  PKT_TX_IPV4
+#define RTE_MBUF_F_TX_IPV6				  PKT_TX_IPV6
+#define RTE_MBUF_F_TX_VLAN				  PKT_TX_VLAN
+#define RTE_MBUF_F_TX_OUTER_IP_CKSUM		PKT_TX_OUTER_IP_CKSUM
+#define RTE_MBUF_F_TX_OUTER_IPV4			PKT_TX_OUTER_IPV4
+#define RTE_MBUF_F_TX_OUTER_IPV6			PKT_TX_OUTER_IPV6
+
+#define RTE_MBUF_F_TX_OFFLOAD_MASK		  PKT_TX_OFFLOAD_MASK
+
+#define RTE_ETH_8_POOLS					 ETH_8_POOLS
+#define RTE_ETH_16_POOLS					ETH_16_POOLS
+#define RTE_ETH_32_POOLS					ETH_32_POOLS
+#define RTE_ETH_64_POOLS					ETH_64_POOLS
 
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
 #define RTE_ETHDEV_DEBUG_RX
@@ -248,7 +245,7 @@ int sxe_eth_dev_callback_process(struct rte_eth_dev *dev,
 
 #if defined DPDK_20_11_5 || defined DPDK_19_11_6
 #define rte_eth_fdir_pballoc_type   rte_fdir_pballoc_type
-#define rte_eth_fdir_conf 			rte_fdir_conf
+#define rte_eth_fdir_conf			rte_fdir_conf
 
 #define RTE_ETH_FDIR_PBALLOC_64K   RTE_FDIR_PBALLOC_64K
 #define RTE_ETH_FDIR_PBALLOC_128K  RTE_FDIR_PBALLOC_128K
@@ -261,15 +258,15 @@ int sxe_eth_dev_callback_process(struct rte_eth_dev *dev,
 	(&((pci_dev)->intr_handle))
 
 #define SXE_DEV_FNAV_CONF(dev) \
-	(&((dev)->data->dev_conf.fdir_conf)) 
+	(&((dev)->data->dev_conf.fdir_conf))
 #define SXE_GET_FRAME_SIZE(dev) \
 	(dev->data->dev_conf.rxmode.max_rx_pkt_len)
-	
+
 #elif defined DPDK_21_11_5
 #define SXE_PCI_INTR_HANDLE(pci_dev) \
 	((pci_dev)->intr_handle)
 #define SXE_DEV_FNAV_CONF(dev) \
-	(&((dev)->data->dev_conf.fdir_conf)) 
+	(&((dev)->data->dev_conf.fdir_conf))
 #define SXE_GET_FRAME_SIZE(dev) \
 	(dev->data->mtu + SXE_ETH_OVERHEAD)
 
@@ -277,7 +274,7 @@ int sxe_eth_dev_callback_process(struct rte_eth_dev *dev,
 #define SXE_PCI_INTR_HANDLE(pci_dev) \
 	((pci_dev)->intr_handle)
 #define SXE_DEV_FNAV_CONF(dev) \
-	(&((struct sxe_adapter *)(dev)->data->dev_private)->fnav_conf) 
+	(&((struct sxe_adapter *)(dev)->data->dev_private)->fnav_conf)
 #define RTE_ADAPTER_HAVE_FNAV_CONF
 #define SXE_GET_FRAME_SIZE(dev) \
 	(dev->data->mtu + SXE_ETH_OVERHEAD)
diff --git a/drivers/net/sxe/base/sxe_errno.h b/drivers/net/sxe/base/sxe_errno.h
index e4de8bef29..3d14e0794c 100644
--- a/drivers/net/sxe/base/sxe_errno.h
+++ b/drivers/net/sxe/base/sxe_errno.h
@@ -17,45 +17,45 @@
 #define SXE_ERR_VF(errcode)		SXE_ERR_MODULE(SXE_ERR_MODULE_VF, errcode)
 #define SXE_ERR_HDC(errcode)	SXE_ERR_MODULE(SXE_ERR_MODULE_HDC, errcode)
 
-#define SXE_ERR_CONFIG                        EINVAL
-#define SXE_ERR_PARAM                         EINVAL
-#define SXE_ERR_RESET_FAILED                  EPERM
-#define SXE_ERR_NO_SPACE                      ENOSPC
-#define SXE_ERR_FNAV_CMD_INCOMPLETE           EBUSY
-#define SXE_ERR_MBX_LOCK_FAIL                 EBUSY
-#define SXE_ERR_OPRATION_NOT_PERM             EPERM
-#define SXE_ERR_LINK_STATUS_INVALID           EINVAL
-#define SXE_ERR_LINK_SPEED_INVALID            EINVAL
-#define SXE_ERR_DEVICE_NOT_SUPPORTED          EOPNOTSUPP
-#define SXE_ERR_HDC_LOCK_BUSY                 EBUSY
-#define SXE_ERR_HDC_FW_OV_TIMEOUT             ETIMEDOUT
-#define SXE_ERR_MDIO_CMD_TIMEOUT              ETIMEDOUT
-#define SXE_ERR_INVALID_LINK_SETTINGS         EINVAL
-#define SXE_ERR_FNAV_REINIT_FAILED            EIO
-#define SXE_ERR_CLI_FAILED                    EIO
-#define SXE_ERR_MASTER_REQUESTS_PENDING       SXE_ERR_PF(1)
-#define SXE_ERR_SFP_NO_INIT_SEQ_PRESENT       SXE_ERR_PF(2)
-#define SXE_ERR_ENABLE_SRIOV_FAIL             SXE_ERR_PF(3)
-#define SXE_ERR_IPSEC_SA_STATE_NOT_EXSIT      SXE_ERR_PF(4)
-#define SXE_ERR_SFP_NOT_PERSENT               SXE_ERR_PF(5)
-#define SXE_ERR_PHY_NOT_PERSENT               SXE_ERR_PF(6)
-#define SXE_ERR_PHY_RESET_FAIL                SXE_ERR_PF(7)
-#define SXE_ERR_FC_NOT_NEGOTIATED             SXE_ERR_PF(8)
-#define SXE_ERR_SFF_NOT_SUPPORTED             SXE_ERR_PF(9)
+#define SXE_ERR_CONFIG						EINVAL
+#define SXE_ERR_PARAM						 EINVAL
+#define SXE_ERR_RESET_FAILED				  EPERM
+#define SXE_ERR_NO_SPACE					  ENOSPC
+#define SXE_ERR_FNAV_CMD_INCOMPLETE		   EBUSY
+#define SXE_ERR_MBX_LOCK_FAIL				 EBUSY
+#define SXE_ERR_OPRATION_NOT_PERM			 EPERM
+#define SXE_ERR_LINK_STATUS_INVALID		   EINVAL
+#define SXE_ERR_LINK_SPEED_INVALID			EINVAL
+#define SXE_ERR_DEVICE_NOT_SUPPORTED		  EOPNOTSUPP
+#define SXE_ERR_HDC_LOCK_BUSY				 EBUSY
+#define SXE_ERR_HDC_FW_OV_TIMEOUT			 ETIMEDOUT
+#define SXE_ERR_MDIO_CMD_TIMEOUT			  ETIMEDOUT
+#define SXE_ERR_INVALID_LINK_SETTINGS		 EINVAL
+#define SXE_ERR_FNAV_REINIT_FAILED			EIO
+#define SXE_ERR_CLI_FAILED					EIO
+#define SXE_ERR_MASTER_REQUESTS_PENDING	   SXE_ERR_PF(1)
+#define SXE_ERR_SFP_NO_INIT_SEQ_PRESENT	   SXE_ERR_PF(2)
+#define SXE_ERR_ENABLE_SRIOV_FAIL			 SXE_ERR_PF(3)
+#define SXE_ERR_IPSEC_SA_STATE_NOT_EXSIT	  SXE_ERR_PF(4)
+#define SXE_ERR_SFP_NOT_PERSENT			   SXE_ERR_PF(5)
+#define SXE_ERR_PHY_NOT_PERSENT			   SXE_ERR_PF(6)
+#define SXE_ERR_PHY_RESET_FAIL				SXE_ERR_PF(7)
+#define SXE_ERR_FC_NOT_NEGOTIATED			 SXE_ERR_PF(8)
+#define SXE_ERR_SFF_NOT_SUPPORTED			 SXE_ERR_PF(9)
 
-#define SXEVF_ERR_MAC_ADDR_INVALID              EINVAL
-#define SXEVF_ERR_RESET_FAILED                  EIO
-#define SXEVF_ERR_ARGUMENT_INVALID              EINVAL
-#define SXEVF_ERR_NOT_READY                     EBUSY
-#define SXEVF_ERR_POLL_ACK_FAIL                 EIO
-#define SXEVF_ERR_POLL_MSG_FAIL                 EIO
-#define SXEVF_ERR_MBX_LOCK_FAIL                 EBUSY
-#define SXEVF_ERR_REPLY_INVALID                 EINVAL
-#define SXEVF_ERR_IRQ_NUM_INVALID               EINVAL
-#define SXEVF_ERR_PARAM                         EINVAL
-#define SXEVF_ERR_MAILBOX_FAIL                  SXE_ERR_VF(1)
-#define SXEVF_ERR_MSG_HANDLE_ERR                SXE_ERR_VF(2)
-#define SXEVF_ERR_DEVICE_NOT_SUPPORTED          SXE_ERR_VF(3)
-#define SXEVF_ERR_IPSEC_SA_STATE_NOT_EXSIT      SXE_ERR_VF(4)
+#define SXEVF_ERR_MAC_ADDR_INVALID			  EINVAL
+#define SXEVF_ERR_RESET_FAILED				  EIO
+#define SXEVF_ERR_ARGUMENT_INVALID			  EINVAL
+#define SXEVF_ERR_NOT_READY					 EBUSY
+#define SXEVF_ERR_POLL_ACK_FAIL				 EIO
+#define SXEVF_ERR_POLL_MSG_FAIL				 EIO
+#define SXEVF_ERR_MBX_LOCK_FAIL				 EBUSY
+#define SXEVF_ERR_REPLY_INVALID				 EINVAL
+#define SXEVF_ERR_IRQ_NUM_INVALID			   EINVAL
+#define SXEVF_ERR_PARAM						 EINVAL
+#define SXEVF_ERR_MAILBOX_FAIL				  SXE_ERR_VF(1)
+#define SXEVF_ERR_MSG_HANDLE_ERR				SXE_ERR_VF(2)
+#define SXEVF_ERR_DEVICE_NOT_SUPPORTED		  SXE_ERR_VF(3)
+#define SXEVF_ERR_IPSEC_SA_STATE_NOT_EXSIT	  SXE_ERR_VF(4)
 
 #endif
diff --git a/drivers/net/sxe/base/sxe_hw.c b/drivers/net/sxe/base/sxe_hw.c
index 14d1d67456..78c56d2bd9 100644
--- a/drivers/net/sxe/base/sxe_hw.c
+++ b/drivers/net/sxe/base/sxe_hw.c
@@ -4,7 +4,7 @@
 #ifdef SXE_PHY_CONFIGURE
 #include <linux/mdio.h>
 #endif
-#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST)
+#if defined(__KERNEL__) || defined(SXE_KERNEL_TEST)
 #include "sxe_pci.h"
 #include "sxe_log.h"
 #include "sxe_debug.h"
@@ -24,25 +24,25 @@
 
 #define SXE_MSGID_MASK  (0xFFFFFFFF)
 
-#define SXE_CTRL_MSG_MASK          (0x700)
+#define SXE_CTRL_MSG_MASK		  (0x700)
 
-#define SXE_RING_WAIT_LOOP        10
-#define SXE_REG_NAME_LEN          16
+#define SXE_RING_WAIT_LOOP		10
+#define SXE_REG_NAME_LEN		  16
 #define SXE_DUMP_REG_STRING_LEN   73
-#define SXE_DUMP_REGS_NUM         64
-#define SXE_MAX_RX_DESC_POLL      10
-#define SXE_LPBK_EN               0x00000001
-#define SXE_MACADDR_LOW_4_BYTE    4
+#define SXE_DUMP_REGS_NUM		 64
+#define SXE_MAX_RX_DESC_POLL	  10
+#define SXE_LPBK_EN			   0x00000001
+#define SXE_MACADDR_LOW_4_BYTE	4
 #define SXE_MACADDR_HIGH_2_BYTE   2
-#define SXE_RSS_FIELD_MASK        0xffff0000
-#define SXE_MRQE_MASK             0x0000000f
+#define SXE_RSS_FIELD_MASK		0xffff0000
+#define SXE_MRQE_MASK			 0x0000000f
 
-#define SXE_HDC_DATA_LEN_MAX         256
+#define SXE_HDC_DATA_LEN_MAX		 256
 
 #define SXE_8_TC_MSB				(0x11111111)
 
-STATIC u32 sxe_read_reg(struct sxe_hw *hw, u32 reg);
-STATIC void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value);
+static u32 sxe_read_reg(struct sxe_hw *hw, u32 reg);
+static void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value);
 static void sxe_write_reg64(struct sxe_hw *hw, u32 reg, u64 value);
 
 #define SXE_WRITE_REG_ARRAY_32(a, reg, offset, value) \
@@ -50,7 +50,7 @@ static void sxe_write_reg64(struct sxe_hw *hw, u32 reg, u64 value);
 #define SXE_READ_REG_ARRAY_32(a, reg, offset) \
 	sxe_read_reg(a, reg + (offset << 2))
 
-#define SXE_REG_READ(hw, addr)        sxe_read_reg(hw, addr)
+#define SXE_REG_READ(hw, addr)		sxe_read_reg(hw, addr)
 #define SXE_REG_WRITE(hw, reg, value) sxe_write_reg(hw, reg, value)
 #define SXE_WRITE_FLUSH(a) sxe_read_reg(a, SXE_STATUS)
 #define SXE_REG_WRITE_ARRAY(hw, reg, offset, value) \
@@ -90,26 +90,21 @@ u16 sxe_mac_reg_num_get(void)
 }
 
 
-#ifndef SXE_DPDK 
+#ifndef SXE_DPDK
 
 void sxe_hw_fault_handle(struct sxe_hw *hw)
 {
 	struct sxe_adapter *adapter = hw->adapter;
 
-	if (test_bit(SXE_HW_FAULT, &hw->state)) {
-		goto l_ret;
-	}
+	if (test_bit(SXE_HW_FAULT, &hw->state))
+		return;
 
 	set_bit(SXE_HW_FAULT, &hw->state);
 
 	LOG_DEV_ERR("sxe nic hw fault\n");
 
-	if ((hw->fault_handle != NULL) && (hw->priv != NULL) ) {
+	if ((hw->fault_handle != NULL) && (hw->priv != NULL))
 		hw->fault_handle(hw->priv);
-	}
-
-l_ret:
-	return;
 }
 
 static u32 sxe_hw_fault_check(struct sxe_hw *hw, u32 reg)
@@ -118,32 +113,29 @@ static u32 sxe_hw_fault_check(struct sxe_hw *hw, u32 reg)
 	u8  __iomem *base_addr = hw->reg_base_addr;
 	struct sxe_adapter *adapter = hw->adapter;
 
-	if (sxe_is_hw_fault(hw)) {
+	if (sxe_is_hw_fault(hw))
 		goto l_out;
-	}
 
 	for (i = 0; i < SXE_REG_READ_RETRY; i++) {
 		value = hw->reg_read(base_addr + SXE_STATUS);
-		if (value != SXE_REG_READ_FAIL) {
+		if (value != SXE_REG_READ_FAIL)
 			break;
-		}
 
 		mdelay(3);
 	}
 
-	if (SXE_REG_READ_FAIL == value) {
+	if (value == SXE_REG_READ_FAIL) {
 		LOG_ERROR_BDF("read registers multiple times failed, ret=%#x\n", value);
 		sxe_hw_fault_handle(hw);
-	} else {
+	} else
 		value = hw->reg_read(base_addr + reg);
-	}
 
 	return value;
 l_out:
 	return SXE_REG_READ_FAIL;
 }
 
-STATIC u32 sxe_read_reg(struct sxe_hw *hw, u32 reg)
+static u32 sxe_read_reg(struct sxe_hw *hw, u32 reg)
 {
 	u32 value;
 	u8  __iomem *base_addr = hw->reg_base_addr;
@@ -155,7 +147,7 @@ STATIC u32 sxe_read_reg(struct sxe_hw *hw, u32 reg)
 	}
 
 	value = hw->reg_read(base_addr + reg);
-	if (unlikely(SXE_REG_READ_FAIL == value)) {
+	if (unlikely(value == SXE_REG_READ_FAIL)) {
 		LOG_ERROR_BDF("reg[0x%x] read failed, ret=%#x\n", reg, value);
 		value = sxe_hw_fault_check(hw, reg);
 	}
@@ -164,32 +156,29 @@ STATIC u32 sxe_read_reg(struct sxe_hw *hw, u32 reg)
 	return value;
 }
 
-STATIC void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value)
+static void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value)
 {
 	u8 __iomem *base_addr = hw->reg_base_addr;
 
-	if (sxe_is_hw_fault(hw)) {
-		goto l_ret;
-	}
+	if (sxe_is_hw_fault(hw))
+		return;
 
 	hw->reg_write(value, base_addr + reg);
 
-l_ret:
-	return;
 }
 
-#else 
+#else
 
-STATIC u32 sxe_read_reg(struct sxe_hw *hw, u32 reg)
+static u32 sxe_read_reg(struct sxe_hw *hw, u32 reg)
 {
 	u32 i, value;
 	u8  __iomem *base_addr = hw->reg_base_addr;
 
 	value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
-	if (unlikely(SXE_REG_READ_FAIL == value)) {
+	if (unlikely(value == SXE_REG_READ_FAIL)) {
 
 		value = rte_le_to_cpu_32(rte_read32(base_addr + SXE_STATUS));
-		if (unlikely(SXE_REG_READ_FAIL != value)) {
+		if (unlikely(value != SXE_REG_READ_FAIL)) {
 
 			value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
 		} else {
@@ -198,16 +187,15 @@ STATIC u32 sxe_read_reg(struct sxe_hw *hw, u32 reg)
 			for (i = 0; i < SXE_REG_READ_RETRY; i++) {
 
 				value = rte_le_to_cpu_32(rte_read32(base_addr + SXE_STATUS));
-				if (unlikely(SXE_REG_READ_FAIL != value)) {
+				if (unlikely(value != SXE_REG_READ_FAIL)) {
 
 					value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
 					LOG_INFO("reg[0x%x] read ok, value=%#x\n",
 									reg, value);
 					break;
-				} else {
-					LOG_ERROR("reg[0x%x] and reg[0x%x] read failed, ret=%#x\n",
-							reg, SXE_STATUS, value);
 				}
+				LOG_ERROR("reg[0x%x] and reg[0x%x] read failed, ret=%#x\n",
+						reg, SXE_STATUS, value);
 
 				mdelay(3);
 			}
@@ -217,13 +205,12 @@ STATIC u32 sxe_read_reg(struct sxe_hw *hw, u32 reg)
 	return value;
 }
 
-STATIC void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value)
+static void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value)
 {
 	u8 __iomem *base_addr = hw->reg_base_addr;
 
 	rte_write32((rte_cpu_to_le_32(value)), (base_addr + reg));
 
-	return;
 }
 #endif
 
@@ -231,14 +218,11 @@ static void sxe_write_reg64(struct sxe_hw *hw, u32 reg, u64 value)
 {
 	u8 __iomem *reg_addr = hw->reg_base_addr;
 
-	if (sxe_is_hw_fault(hw)) {
-		goto l_ret;
-	}
+	if (sxe_is_hw_fault(hw))
+		return;
 
 	writeq(value, reg_addr + reg);
 
-l_ret:
-	return;
 }
 
 
@@ -251,7 +235,6 @@ void sxe_hw_no_snoop_disable(struct sxe_hw *hw)
 	SXE_REG_WRITE(hw, SXE_CTRL_EXT, ctrl_ext);
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 s32 sxe_hw_uc_addr_pool_enable(struct sxe_hw *hw,
@@ -291,21 +274,19 @@ static s32 sxe_hw_uc_addr_pool_disable(struct sxe_hw *hw, u8 rar_idx)
 	hi = SXE_REG_READ(hw, SXE_MPSAR_HIGH(rar_idx));
 	low = SXE_REG_READ(hw, SXE_MPSAR_LOW(rar_idx));
 
-	if (sxe_is_hw_fault(hw)) {
+	if (sxe_is_hw_fault(hw))
 		goto l_end;
-	}
 
 	if (!hi & !low) {
 		LOG_DEBUG_BDF("no need clear rar-pool relation register.\n");
 		goto l_end;
 	}
 
-	if (low) {
+	if (low)
 		SXE_REG_WRITE(hw, SXE_MPSAR_LOW(rar_idx), 0);
-	}
-	if (hi) {
+
+	if (hi)
 		SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(rar_idx), 0);
-	}
 
 
 l_end:
@@ -328,9 +309,9 @@ s32 sxe_hw_nic_reset(struct sxe_hw *hw)
 
 	for (i = 0; i < 10; i++) {
 		ctrl = SXE_REG_READ(hw, SXE_CTRL);
-		if (!(ctrl & SXE_CTRL_RST_MASK)) {
+		if (!(ctrl & SXE_CTRL_RST_MASK))
 			break;
-		}
+
 		udelay(1);
 	}
 
@@ -350,13 +331,11 @@ void sxe_hw_pf_rst_done_set(struct sxe_hw *hw)
 	value |= SXE_CTRL_EXT_PFRSTD;
 	SXE_REG_WRITE(hw, SXE_CTRL_EXT, value);
 
-	return;
 }
 
 static void sxe_hw_regs_flush(struct sxe_hw *hw)
 {
 	SXE_WRITE_FLUSH(hw);
-	return;
 }
 
 static const struct sxe_reg_info sxe_reg_info_tbl[] = {
@@ -397,74 +376,74 @@ static void sxe_hw_reg_print(struct sxe_hw *hw,
 
 	switch (reginfo->addr) {
 	case SXE_SRRCTL(0):
-		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
 			regs[i] = SXE_REG_READ(hw, SXE_SRRCTL(i));
-		}
+
 		break;
 	case SXE_RDLEN(0):
-		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
 			regs[i] = SXE_REG_READ(hw, SXE_RDLEN(i));
-		}
+
 		break;
 	case SXE_RDH(0):
-		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
 			regs[i] = SXE_REG_READ(hw, SXE_RDH(i));
-		}
+
 		break;
 	case SXE_RDT(0):
-		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
 			regs[i] = SXE_REG_READ(hw, SXE_RDT(i));
-		}
+
 		break;
 	case SXE_RXDCTL(0):
-		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
 			regs[i] = SXE_REG_READ(hw, SXE_RXDCTL(i));
-		}
+
 		break;
 	case SXE_RDBAL(0):
-		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
 			regs[i] = SXE_REG_READ(hw, SXE_RDBAL(i));
-		}
+
 		break;
 	case SXE_RDBAH(0):
-		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
 			regs[i] = SXE_REG_READ(hw, SXE_RDBAH(i));
-		}
+
 		break;
 	case SXE_TDBAL(0):
-		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
 			regs[i] = SXE_REG_READ(hw, SXE_TDBAL(i));
-		}
+
 		break;
 	case SXE_TDBAH(0):
-		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
 			regs[i] = SXE_REG_READ(hw, SXE_TDBAH(i));
-		}
+
 		break;
 	case SXE_TDLEN(0):
-		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
 			regs[i] = SXE_REG_READ(hw, SXE_TDLEN(i));
-		}
+
 		break;
 	case SXE_TDH(0):
-		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
 			regs[i] = SXE_REG_READ(hw, SXE_TDH(i));
-		}
+
 		break;
 	case SXE_TDT(0):
-		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
 			regs[i] = SXE_REG_READ(hw, SXE_TDT(i));
-		}
+
 		break;
 	case SXE_TXDCTL(0):
-		for (i = 0; i < SXE_DUMP_REGS_NUM; i++) {
+		for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
 			regs[i] = SXE_REG_READ(hw, SXE_TXDCTL(i));
-		}
+
 		break;
 	default:
 		LOG_DEV_INFO("%-15s %08x\n",
 			reginfo->name, SXE_REG_READ(hw, reginfo->addr));
-		goto l_end;
+		return;
 	}
 
 	while (first_reg_idx < SXE_DUMP_REGS_NUM) {
@@ -473,15 +452,12 @@ static void sxe_hw_reg_print(struct sxe_hw *hw,
 			"%s[%d-%d]", reginfo->name,
 			first_reg_idx, (first_reg_idx + 7));
 
-		for (j = 0; j < 8; j++) {
+		for (j = 0; j < 8; j++)
 			value += sprintf(value, " %08x", regs[first_reg_idx++]);
-		}
 
 		LOG_DEV_ERR("%-15s%s\n", reg_name, buf);
 	}
 
-l_end:
-	return;
 }
 
 static void sxe_hw_reg_dump(struct sxe_hw *hw)
@@ -489,11 +465,10 @@ static void sxe_hw_reg_dump(struct sxe_hw *hw)
 	const struct sxe_reg_info *reginfo;
 
 	for (reginfo = (const struct sxe_reg_info *)sxe_reg_info_tbl;
-	     reginfo->name; reginfo++) {
+		 reginfo->name; reginfo++) {
 		sxe_hw_reg_print(hw, reginfo);
 	}
 
-	return;
 }
 
 static s32 sxe_hw_status_reg_test(struct sxe_hw *hw)
@@ -539,22 +514,22 @@ struct sxe_self_test_reg {
 static const struct sxe_self_test_reg self_test_reg[] = {
 	{ SXE_FCRTL(0),  1,   PATTERN_TEST, 0x8007FFE0, 0x8007FFF0 },
 	{ SXE_FCRTH(0),  1,   PATTERN_TEST, 0x8007FFE0, 0x8007FFF0 },
-	{ SXE_PFCTOP,    1,   PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_PFCTOP,	1,   PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
 	{ SXE_FCTTV(0),  1,   PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
 	{ SXE_VLNCTRL,   1,   PATTERN_TEST, 0x00000000, 0x00000000 },
 	{ SXE_RDBAL(0),  4,   PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
 	{ SXE_RDBAH(0),  4,   PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
 	{ SXE_RDLEN(0),  4,   PATTERN_TEST, 0x000FFFFF, 0x000FFFFF },
 	{ SXE_RXDCTL(0), 4,   WRITE_NO_TEST, 0, SXE_RXDCTL_ENABLE },
-	{ SXE_RDT(0),    4,   PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ SXE_RDT(0),	4,   PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
 	{ SXE_RXDCTL(0), 4,   WRITE_NO_TEST, 0, 0 },
 	{ SXE_TDBAL(0),  4,   PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
 	{ SXE_TDBAH(0),  4,   PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
 	{ SXE_TDLEN(0),  4,   PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
-	{ SXE_RXCTRL,    1,   SET_READ_TEST, 0x00000001, 0x00000001 },
-	{ SXE_RAL(0),    16,  TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
-	{ SXE_RAL(0),    16,  TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
-	{ SXE_MTA(0),    128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_RXCTRL,	1,   SET_READ_TEST, 0x00000001, 0x00000001 },
+	{ SXE_RAL(0),	16,  TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ SXE_RAL(0),	16,  TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
+	{ SXE_MTA(0),	128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
 	{ .reg = 0 }
 };
 
@@ -625,7 +600,7 @@ static s32 sxe_hw_reg_set_and_check(struct sxe_hw *hw, int reg,
 	return ret;
 }
 
-STATIC s32 sxe_hw_regs_test(struct sxe_hw *hw)
+static s32 sxe_hw_regs_test(struct sxe_hw *hw)
 {
 	u32 i;
 	s32 ret = 0;
@@ -676,9 +651,8 @@ STATIC s32 sxe_hw_regs_test(struct sxe_hw *hw)
 				break;
 			}
 
-			if (ret) {
+			if (ret)
 				goto l_end;
-			}
 
 		}
 		test++;
@@ -716,7 +690,6 @@ static void sxe_hw_ring_irq_enable(struct sxe_hw *hw, u64 qmask)
 		SXE_REG_WRITE(hw, SXE_EIMS_EX(1), mask1);
 	}
 
-	return;
 }
 
 u32 sxe_hw_pending_irq_read_clear(struct sxe_hw *hw)
@@ -727,7 +700,6 @@ u32 sxe_hw_pending_irq_read_clear(struct sxe_hw *hw)
 void sxe_hw_pending_irq_write_clear(struct sxe_hw *hw, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_EICR, value);
-	return;
 }
 
 u32 sxe_hw_irq_cause_get(struct sxe_hw *hw)
@@ -739,7 +711,6 @@ static void sxe_hw_event_irq_trigger(struct sxe_hw *hw)
 {
 	SXE_REG_WRITE(hw, SXE_EICS, (SXE_EICS_TCP_TIMER | SXE_EICS_OTHER));
 
-	return;
 }
 
 static void sxe_hw_ring_irq_trigger(struct sxe_hw *hw, u64 eics)
@@ -750,7 +721,6 @@ static void sxe_hw_ring_irq_trigger(struct sxe_hw *hw, u64 eics)
 	SXE_REG_WRITE(hw, SXE_EICS_EX(0), mask);
 	mask = (eics >> 32);
 	SXE_REG_WRITE(hw, SXE_EICS_EX(1), mask);
-	return;
 }
 
 void sxe_hw_ring_irq_auto_disable(struct sxe_hw *hw,
@@ -763,14 +733,12 @@ void sxe_hw_ring_irq_auto_disable(struct sxe_hw *hw,
 		SXE_REG_WRITE(hw, SXE_EIAM, SXE_EICS_RTX_QUEUE);
 	}
 
-	return;
 }
 
 void sxe_hw_irq_general_reg_set(struct sxe_hw *hw, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_GPIE, value);
 
-	return;
 }
 
 u32 sxe_hw_irq_general_reg_get(struct sxe_hw *hw)
@@ -782,7 +750,6 @@ static void sxe_hw_set_eitrsel(struct sxe_hw *hw, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_EITRSEL, value);
 
-	return;
 }
 
 void sxe_hw_event_irq_map(struct sxe_hw *hw, u8 offset, u16 irq_idx)
@@ -800,7 +767,6 @@ void sxe_hw_event_irq_map(struct sxe_hw *hw, u8 offset, u16 irq_idx)
 
 	SXE_REG_WRITE(hw, SXE_IVAR_MISC, ivar);
 
-	return;
 }
 
 void sxe_hw_ring_irq_map(struct sxe_hw *hw, bool is_tx,
@@ -819,7 +785,6 @@ void sxe_hw_ring_irq_map(struct sxe_hw *hw, bool is_tx,
 
 	SXE_REG_WRITE(hw, SXE_IVAR(reg_idx >> 1), ivar);
 
-	return;
 }
 
 void sxe_hw_ring_irq_interval_set(struct sxe_hw *hw,
@@ -831,7 +796,6 @@ void sxe_hw_ring_irq_interval_set(struct sxe_hw *hw,
 
 	SXE_REG_WRITE(hw, SXE_EITR(irq_idx), eitr);
 
-	return;
 }
 
 static void sxe_hw_event_irq_interval_set(struct sxe_hw *hw,
@@ -839,28 +803,24 @@ static void sxe_hw_event_irq_interval_set(struct sxe_hw *hw,
 {
 	SXE_REG_WRITE(hw, SXE_EITR(irq_idx), value);
 
-	return;
 }
 
 void sxe_hw_event_irq_auto_clear_set(struct sxe_hw *hw, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_EIAC, value);
 
-	return;
 }
 
 void sxe_hw_specific_irq_disable(struct sxe_hw *hw, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_EIMC, value);
 
-	return;
 }
 
 void sxe_hw_specific_irq_enable(struct sxe_hw *hw, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_EIMS, value);
 
-	return;
 }
 
 void sxe_hw_all_irq_disable(struct sxe_hw *hw)
@@ -872,7 +832,6 @@ void sxe_hw_all_irq_disable(struct sxe_hw *hw)
 
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 static void sxe_hw_spp_configure(struct sxe_hw *hw, u32 hw_spp_proc_delay_us)
@@ -882,7 +841,6 @@ static void sxe_hw_spp_configure(struct sxe_hw *hw, u32 hw_spp_proc_delay_us)
 			~SXE_SPP_PROC_DELAY_US_MASK) |
 			hw_spp_proc_delay_us);
 
-	return;
 }
 
 static s32 sxe_hw_irq_test(struct sxe_hw *hw, u32 *icr, bool shared)
@@ -985,13 +943,12 @@ u32 sxe_hw_link_speed_get(struct sxe_hw *hw)
 	struct sxe_adapter *adapter = hw->adapter;
 	value = SXE_REG_READ(hw, SXE_COMCTRL);
 
-	if ((value & SXE_COMCTRL_SPEED_10G) == SXE_COMCTRL_SPEED_10G) {
+	if ((value & SXE_COMCTRL_SPEED_10G) == SXE_COMCTRL_SPEED_10G)
 		speed = SXE_LINK_SPEED_10GB_FULL;
-	} else if ((value & SXE_COMCTRL_SPEED_1G) == SXE_COMCTRL_SPEED_1G) {
+	else if ((value & SXE_COMCTRL_SPEED_1G) == SXE_COMCTRL_SPEED_1G)
 		speed = SXE_LINK_SPEED_1GB_FULL;
-	} else {
+	else
 		speed = SXE_LINK_SPEED_UNKNOWN;
-	}
 
 	LOG_DEBUG_BDF("hw link speed=%x, (0x80=10G, 0x20=1G)\n, reg=%x",
 			speed, value);
@@ -1005,18 +962,17 @@ void sxe_hw_link_speed_set(struct sxe_hw *hw, u32 speed)
 
 	ctrl = SXE_REG_READ(hw, SXE_COMCTRL);
 
-	if (SXE_LINK_SPEED_1GB_FULL == speed) {
+	if (speed == SXE_LINK_SPEED_1GB_FULL) {
 		ctrl |= SXE_COMCTRL_SPEED_1G;
-	} else if (SXE_LINK_SPEED_10GB_FULL == speed) {
+	} else if (speed == SXE_LINK_SPEED_10GB_FULL) {
 		ctrl |= SXE_COMCTRL_SPEED_10G;
 	}
 
 	SXE_REG_WRITE(hw, SXE_COMCTRL, ctrl);
 
-	return;
 }
 
-STATIC bool sxe_hw_1g_link_up_check(struct sxe_hw *hw)
+static bool sxe_hw_1g_link_up_check(struct sxe_hw *hw)
 {
 	return (SXE_REG_READ(hw, SXE_LINKS) & SXE_LINKS_UP) ? true : false;
 }
@@ -1036,9 +992,9 @@ bool sxe_hw_is_link_state_up(struct sxe_hw *hw)
 
 		link_speed = sxe_hw_link_speed_get(hw);
 		if ((link_speed == SXE_LINK_SPEED_10GB_FULL) &&
-		    (links_reg & SXE_10G_LINKS_DOWN)) {
+			(links_reg & SXE_10G_LINKS_DOWN))
 			ret = false;
-		}
+
 	}
 
 	return ret;
@@ -1052,7 +1008,6 @@ void sxe_hw_mac_pad_enable(struct sxe_hw *hw)
 	ctl |= SXE_MACCFG_PAD_EN;
 	SXE_REG_WRITE(hw, SXE_MACCFG, ctl);
 
-	return;
 }
 
 s32 sxe_hw_fc_enable(struct sxe_hw *hw)
@@ -1066,7 +1021,7 @@ s32 sxe_hw_fc_enable(struct sxe_hw *hw)
 
 	flctrl_val = SXE_REG_READ(hw, SXE_FLCTRL);
 	flctrl_val &= ~(SXE_FCTRL_TFCE_MASK | SXE_FCTRL_RFCE_MASK |
-		       SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
+			   SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
 
 	switch (hw->fc.current_mode) {
 	case SXE_FC_NONE:
@@ -1089,7 +1044,7 @@ s32 sxe_hw_fc_enable(struct sxe_hw *hw)
 
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
 		if ((hw->fc.current_mode & SXE_FC_TX_PAUSE) &&
-		    hw->fc.high_water[i]) {
+			hw->fc.high_water[i]) {
 			fcrtl = (hw->fc.low_water[i] << 9) | SXE_FCRTL_XONE;
 			SXE_REG_WRITE(hw, SXE_FCRTL(i), fcrtl);
 			fcrth = (hw->fc.high_water[i] << 9) | SXE_FCRTH_FCEN;
@@ -1103,9 +1058,8 @@ s32 sxe_hw_fc_enable(struct sxe_hw *hw)
 
 	flctrl_val |= SXE_FCTRL_TFCE_DPF_EN;
 
-	if ((hw->fc.current_mode & SXE_FC_TX_PAUSE)) {
+	if ((hw->fc.current_mode & SXE_FC_TX_PAUSE))
 		flctrl_val |= (SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
-	}
 
 	SXE_REG_WRITE(hw, SXE_FLCTRL, flctrl_val);
 
@@ -1116,9 +1070,8 @@ s32 sxe_hw_fc_enable(struct sxe_hw *hw)
 	SXE_REG_WRITE(hw, SXE_PFCTOP, reg);
 
 	reg = hw->fc.pause_time * 0x00010001U;
-	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) {
+	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
 		SXE_REG_WRITE(hw, SXE_FCTTV(i), reg);
-	}
 
 	SXE_REG_WRITE(hw, SXE_FCRTV, hw->fc.pause_time / 2);
 
@@ -1130,9 +1083,8 @@ void sxe_fc_autoneg_localcap_set(struct sxe_hw *hw)
 {
 	u32 reg = 0;
 
-	if (hw->fc.requested_mode == SXE_FC_DEFAULT) {
+	if (hw->fc.requested_mode == SXE_FC_DEFAULT)
 		hw->fc.requested_mode = SXE_FC_FULL;
-	}
 
 	reg = SXE_REG_READ(hw, SXE_PCS1GANA);
 
@@ -1154,7 +1106,6 @@ void sxe_fc_autoneg_localcap_set(struct sxe_hw *hw)
 	}
 
 	SXE_REG_WRITE(hw, SXE_PCS1GANA, reg);
-	return;
 }
 
 s32 sxe_hw_pfc_enable(struct sxe_hw *hw, u8 tc_idx)
@@ -1169,20 +1120,18 @@ s32 sxe_hw_pfc_enable(struct sxe_hw *hw, u8 tc_idx)
 
 	flctrl_val = SXE_REG_READ(hw, SXE_FLCTRL);
 	flctrl_val &= ~(SXE_FCTRL_TFCE_MASK | SXE_FCTRL_RFCE_MASK |
-		       SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
+			   SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
 
 	switch (hw->fc.current_mode) {
 	case SXE_FC_NONE:
 		rx_en_num = 0;
 		for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
 			reg = SXE_REG_READ(hw, SXE_FCRTH(i));
-			if (reg & SXE_FCRTH_FCEN) {
+			if (reg & SXE_FCRTH_FCEN)
 				rx_en_num++;
-			}
 		}
-		if (rx_en_num > 1) {
+		if (rx_en_num > 1)
 			flctrl_val |= SXE_FCTRL_TFCE_PFC_EN;
-		}
 
 		break;
 
@@ -1192,14 +1141,12 @@ s32 sxe_hw_pfc_enable(struct sxe_hw *hw, u8 tc_idx)
 		rx_en_num = 0;
 		for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
 			reg = SXE_REG_READ(hw, SXE_FCRTH(i));
-			if (reg & SXE_FCRTH_FCEN) {
+			if (reg & SXE_FCRTH_FCEN)
 				rx_en_num++;
-			}
 		}
 
-		if (rx_en_num > 1) {
+		if (rx_en_num > 1)
 			flctrl_val |= SXE_FCTRL_TFCE_PFC_EN;
-		}
 
 		break;
 	case SXE_FC_TX_PAUSE:
@@ -1216,7 +1163,7 @@ s32 sxe_hw_pfc_enable(struct sxe_hw *hw, u8 tc_idx)
 	}
 
 	if ((hw->fc.current_mode & SXE_FC_TX_PAUSE) &&
-	    hw->fc.high_water[tc_idx]) {
+		hw->fc.high_water[tc_idx]) {
 		fcrtl = (hw->fc.low_water[tc_idx] << 9) | SXE_FCRTL_XONE;
 		SXE_REG_WRITE(hw, SXE_FCRTL(tc_idx), fcrtl);
 		fcrth = (hw->fc.high_water[tc_idx] << 9) | SXE_FCRTH_FCEN;
@@ -1243,9 +1190,8 @@ s32 sxe_hw_pfc_enable(struct sxe_hw *hw, u8 tc_idx)
 	SXE_REG_WRITE(hw, SXE_PFCTOP, reg);
 
 	reg = hw->fc.pause_time * 0x00010001U;
-	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) {
+	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
 		SXE_REG_WRITE(hw, SXE_FCTTV(i), reg);
-	}
 
 	SXE_REG_WRITE(hw, SXE_FCRTV, hw->fc.pause_time / 2);
 
@@ -1260,7 +1206,6 @@ void sxe_hw_crc_configure(struct sxe_hw *hw)
 	ctrl |=  SXE_PCCTRL_TXCE | SXE_PCCTRL_RXCE | SXE_PCCTRL_PCSC_ALL;
 	SXE_REG_WRITE(hw, SXE_PCCTRL, ctrl);
 
-	return;
 }
 
 void sxe_hw_loopback_switch(struct sxe_hw *hw, bool is_enable)
@@ -1271,7 +1216,6 @@ void sxe_hw_loopback_switch(struct sxe_hw *hw, bool is_enable)
 
 	SXE_REG_WRITE(hw, SXE_LPBKCTRL, value);
 
-	return;
 }
 
 void sxe_hw_mac_txrx_enable(struct sxe_hw *hw)
@@ -1282,7 +1226,6 @@ void sxe_hw_mac_txrx_enable(struct sxe_hw *hw)
 	ctl |= SXE_COMCTRL_TXEN | SXE_COMCTRL_RXEN | SXE_COMCTRL_EDSEL;
 	SXE_REG_WRITE(hw, SXE_COMCTRL, ctl);
 
-	return;
 }
 
 void sxe_hw_mac_max_frame_set(struct sxe_hw *hw, u32 max_frame)
@@ -1297,7 +1240,6 @@ void sxe_hw_mac_max_frame_set(struct sxe_hw *hw, u32 max_frame)
 	maxfs |=  SXE_MAXFS_RFSEL | SXE_MAXFS_TFSEL;
 	SXE_REG_WRITE(hw, SXE_MAXFS, maxfs);
 
-	return;
 }
 
 u32 sxe_hw_mac_max_frame_get(struct sxe_hw *hw)
@@ -1324,14 +1266,13 @@ bool sxe_device_supports_autoneg_fc(struct sxe_hw *hw)
 	return supported;
 }
 
-STATIC void sxe_hw_fc_param_init(struct sxe_hw *hw)
+static void sxe_hw_fc_param_init(struct sxe_hw *hw)
 {
 	hw->fc.requested_mode = SXE_FC_FULL;
-	hw->fc.current_mode = SXE_FC_FULL;	
+	hw->fc.current_mode = SXE_FC_FULL;
 	hw->fc.pause_time = SXE_DEFAULT_FCPAUSE;
 
 	hw->fc.disable_fc_autoneg = true;
-	return;
 }
 
 void sxe_hw_fc_tc_high_water_mark_set(struct sxe_hw *hw,
@@ -1339,7 +1280,6 @@ void sxe_hw_fc_tc_high_water_mark_set(struct sxe_hw *hw,
 {
 	hw->fc.high_water[tc_idx] = mark;
 
-	return;
 }
 
 void sxe_hw_fc_tc_low_water_mark_set(struct sxe_hw *hw,
@@ -1347,7 +1287,6 @@ void sxe_hw_fc_tc_low_water_mark_set(struct sxe_hw *hw,
 {
 	hw->fc.low_water[tc_idx] = mark;
 
-	return;
 }
 
 bool sxe_hw_is_fc_autoneg_disabled(struct sxe_hw *hw)
@@ -1359,7 +1298,6 @@ void sxe_hw_fc_autoneg_disable_set(struct sxe_hw *hw,
 							bool is_disabled)
 {
 	hw->fc.disable_fc_autoneg = is_disabled;
-	return;
 }
 
 static enum sxe_fc_mode sxe_hw_fc_current_mode_get(struct sxe_hw *hw)
@@ -1376,7 +1314,6 @@ void sxe_hw_fc_requested_mode_set(struct sxe_hw *hw,
 						enum sxe_fc_mode mode)
 {
 	hw->fc.requested_mode = mode;
-	return;
 }
 
 static const struct sxe_mac_operations sxe_mac_ops = {
@@ -1415,14 +1352,12 @@ u32 sxe_hw_pool_rx_mode_get(struct sxe_hw *hw, u16 pool_idx)
 void sxe_hw_rx_mode_set(struct sxe_hw *hw, u32 filter_ctrl)
 {
 	SXE_REG_WRITE(hw, SXE_FCTRL, filter_ctrl);
-	return;
 }
 
 void sxe_hw_pool_rx_mode_set(struct sxe_hw *hw,
 						u32 vmolr, u16 pool_idx)
 {
 	SXE_REG_WRITE(hw, SXE_VMOLR(pool_idx), vmolr);
-	return;
 }
 
 void sxe_hw_rx_lro_enable(struct sxe_hw *hw, bool is_enable)
@@ -1430,12 +1365,10 @@ void sxe_hw_rx_lro_enable(struct sxe_hw *hw, bool is_enable)
 	u32 rfctl = SXE_REG_READ(hw, SXE_RFCTL);
 	rfctl &= ~SXE_RFCTL_LRO_DIS;
 
-	if (!is_enable) {
+	if (!is_enable)
 		rfctl |= SXE_RFCTL_LRO_DIS;
-	}
 
 	SXE_REG_WRITE(hw, SXE_RFCTL, rfctl);
-	return;
 }
 
 void sxe_hw_rx_nfs_filter_disable(struct sxe_hw *hw)
@@ -1444,7 +1377,6 @@ void sxe_hw_rx_nfs_filter_disable(struct sxe_hw *hw)
 
 	rfctl |= (SXE_RFCTL_NFSW_DIS | SXE_RFCTL_NFSR_DIS);
 	SXE_REG_WRITE(hw, SXE_RFCTL, rfctl);
-	return;
 }
 
 void sxe_hw_rx_udp_frag_checksum_disable(struct sxe_hw *hw)
@@ -1454,7 +1386,6 @@ void sxe_hw_rx_udp_frag_checksum_disable(struct sxe_hw *hw)
 	rxcsum = SXE_REG_READ(hw, SXE_RXCSUM);
 	rxcsum |= SXE_RXCSUM_PCSD;
 	SXE_REG_WRITE(hw, SXE_RXCSUM, rxcsum);
-	return;
 }
 
 void sxe_hw_fc_mac_addr_set(struct sxe_hw *hw, u8 *mac_addr)
@@ -1462,16 +1393,15 @@ void sxe_hw_fc_mac_addr_set(struct sxe_hw *hw, u8 *mac_addr)
 	u32 mac_addr_h, mac_addr_l;
 
 	mac_addr_l = ((u32)mac_addr[5] |
-		    ((u32)mac_addr[4] << 8) |
-		    ((u32)mac_addr[3] << 16) |
-		    ((u32)mac_addr[2] << 24));
+			((u32)mac_addr[4] << 8) |
+			((u32)mac_addr[3] << 16) |
+			((u32)mac_addr[2] << 24));
 	mac_addr_h = (((u32)mac_addr[1] << 16) |
-		    ((u32)mac_addr[0] << 24));
+			((u32)mac_addr[0] << 24));
 
 	SXE_REG_WRITE(hw, SXE_SACONH, mac_addr_h);
 	SXE_REG_WRITE(hw, SXE_SACONL, mac_addr_l);
 
-	return;
 }
 
 s32 sxe_hw_uc_addr_add(struct sxe_hw *hw, u32 rar_idx,
@@ -1542,7 +1472,6 @@ void sxe_hw_mta_hash_table_set(struct sxe_hw *hw,
 						u8 index, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_MTA(index), value);
-	return;
 }
 
 void sxe_hw_mta_hash_table_update(struct sxe_hw *hw,
@@ -1555,7 +1484,6 @@ void sxe_hw_mta_hash_table_update(struct sxe_hw *hw,
 	LOG_INFO("mta update value:0x%x.\n", value);
 	SXE_REG_WRITE(hw, SXE_MTA(reg_idx), value);
 
-	return;
 }
 
 void sxe_hw_mc_filter_enable(struct sxe_hw *hw)
@@ -1564,7 +1492,6 @@ void sxe_hw_mc_filter_enable(struct sxe_hw *hw)
 
 	SXE_REG_WRITE(hw, SXE_MCSTCTRL, value);
 
-	return;
 }
 
 static void sxe_hw_mc_filter_disable(struct sxe_hw *hw)
@@ -1575,7 +1502,6 @@ static void sxe_hw_mc_filter_disable(struct sxe_hw *hw)
 
 	SXE_REG_WRITE(hw, SXE_MCSTCTRL, value);
 
-	return;
 }
 
 void sxe_hw_uc_addr_clear(struct sxe_hw *hw)
@@ -1594,26 +1520,22 @@ void sxe_hw_uc_addr_clear(struct sxe_hw *hw)
 
 	LOG_DEV_DEBUG("clear %u uta filter addr register\n",
 			SXE_UTA_ENTRY_NUM_MAX);
-	for (i = 0; i < SXE_UTA_ENTRY_NUM_MAX; i++) {
+	for (i = 0; i < SXE_UTA_ENTRY_NUM_MAX; i++)
 		SXE_REG_WRITE(hw, SXE_UTA(i), 0);
-	}
 
 	SXE_REG_WRITE(hw, SXE_MCSTCTRL, SXE_MC_FILTER_TYPE0);
 
 	LOG_DEV_DEBUG("clear %u mta filter addr register\n",
 			SXE_MTA_ENTRY_NUM_MAX);
-	for (i = 0; i < SXE_MTA_ENTRY_NUM_MAX; i++) {
+	for (i = 0; i < SXE_MTA_ENTRY_NUM_MAX; i++)
 		SXE_REG_WRITE(hw, SXE_MTA(i), 0);
-	}
 
-	return;
 }
 
 static void sxe_hw_ethertype_filter_set(struct sxe_hw *hw,
 						u8 filter_type, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_ETQF(filter_type), value);
-	return;
 }
 
 void sxe_hw_vt_ctrl_cfg(struct sxe_hw *hw, u8 default_pool)
@@ -1622,14 +1544,13 @@ void sxe_hw_vt_ctrl_cfg(struct sxe_hw *hw, u8 default_pool)
 
 	ctrl = SXE_REG_READ(hw, SXE_VT_CTL);
 
-	ctrl |= SXE_VT_CTL_VT_ENABLE; 
+	ctrl |= SXE_VT_CTL_VT_ENABLE;
 	ctrl &= ~SXE_VT_CTL_POOL_MASK;
 	ctrl |= default_pool << SXE_VT_CTL_POOL_SHIFT;
-	ctrl |= SXE_VT_CTL_REPLEN; 
+	ctrl |= SXE_VT_CTL_REPLEN;
 
 	SXE_REG_WRITE(hw, SXE_VT_CTL, ctrl);
 
-	return;
 }
 
 void sxe_hw_vt_disable(struct sxe_hw *hw)
@@ -1640,7 +1561,6 @@ void sxe_hw_vt_disable(struct sxe_hw *hw)
 	vmdctl &= ~SXE_VMD_CTL_POOL_EN;
 	SXE_REG_WRITE(hw, SXE_VT_CTL, vmdctl);
 
-	return;
 }
 
 #ifdef SXE_WOL_CONFIGURE
@@ -1649,7 +1569,6 @@ static void sxe_hw_wol_status_set(struct sxe_hw *hw)
 {
 	SXE_REG_WRITE(hw, SXE_WUS, ~0);
 
-	return;
 }
 
 static void sxe_hw_wol_mode_set(struct sxe_hw *hw, u32 wol_status)
@@ -1660,16 +1579,14 @@ static void sxe_hw_wol_mode_set(struct sxe_hw *hw, u32 wol_status)
 
 	fctrl = SXE_REG_READ(hw, SXE_FCTRL);
 	fctrl |= SXE_FCTRL_BAM;
-	if (wol_status & SXE_WUFC_MC) {
+	if (wol_status & SXE_WUFC_MC)
 		fctrl |= SXE_FCTRL_MPE;
-	}
 
 	SXE_REG_WRITE(hw, SXE_FCTRL, fctrl);
 
 	SXE_REG_WRITE(hw, SXE_WUFC, wol_status);
 	sxe_hw_wol_status_set(hw);
 
-	return;
 }
 
 static void sxe_hw_wol_mode_clean(struct sxe_hw *hw)
@@ -1677,7 +1594,6 @@ static void sxe_hw_wol_mode_clean(struct sxe_hw *hw)
 	SXE_REG_WRITE(hw, SXE_WUC, 0);
 	SXE_REG_WRITE(hw, SXE_WUFC, 0);
 
-	return;
 }
 #endif
 
@@ -1708,7 +1624,7 @@ static const struct sxe_filter_mac_operations sxe_filter_mac_ops = {
 	.wol_status_set			= sxe_hw_wol_status_set,
 #endif
 
-	.vt_disable                     = sxe_hw_vt_disable,
+	.vt_disable					 = sxe_hw_vt_disable,
 };
 
 u32 sxe_hw_vlan_pool_filter_read(struct sxe_hw *hw, u16 reg_index)
@@ -1720,7 +1636,6 @@ static void sxe_hw_vlan_pool_filter_write(struct sxe_hw *hw,
 						u16 reg_index, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_VLVF(reg_index), value);
-	return;
 }
 
 static u32 sxe_hw_vlan_pool_filter_bitmap_read(struct sxe_hw *hw,
@@ -1733,14 +1648,12 @@ static void sxe_hw_vlan_pool_filter_bitmap_write(struct sxe_hw *hw,
 						u16 reg_index, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_VLVFB(reg_index), value);
-	return;
 }
 
 void sxe_hw_vlan_filter_array_write(struct sxe_hw *hw,
 					u16 reg_index, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_VFTA(reg_index), value);
-	return;
 }
 
 u32 sxe_hw_vlan_filter_array_read(struct sxe_hw *hw, u16 reg_index)
@@ -1753,14 +1666,12 @@ void sxe_hw_vlan_filter_switch(struct sxe_hw *hw, bool is_enable)
 	u32 vlnctrl;
 
 	vlnctrl = SXE_REG_READ(hw, SXE_VLNCTRL);
-	if (is_enable) {
+	if (is_enable)
 		vlnctrl |= SXE_VLNCTRL_VFE;
-	} else {
+	else
 		vlnctrl &= ~SXE_VLNCTRL_VFE;
-	}
 
 	SXE_REG_WRITE(hw, SXE_VLNCTRL, vlnctrl);
-	return;
 }
 
 static void sxe_hw_vlan_untagged_pkts_rcv_switch(struct sxe_hw *hw,
@@ -1768,15 +1679,13 @@ static void sxe_hw_vlan_untagged_pkts_rcv_switch(struct sxe_hw *hw,
 {
 	u32 vmolr = SXE_REG_READ(hw, SXE_VMOLR(vf));
 	vmolr |= SXE_VMOLR_BAM;
-	if (accept) {
+	if (accept)
 		vmolr |= SXE_VMOLR_AUPE;
-	} else {
+	else
 		vmolr &= ~SXE_VMOLR_AUPE;
-	}
 
 	LOG_WARN("vf:%u value:0x%x.\n", vf, vmolr);
 	SXE_REG_WRITE(hw, SXE_VMOLR(vf), vmolr);
-	return;
 }
 
 s32 sxe_hw_vlvf_slot_find(struct sxe_hw *hw, u32 vlan, bool vlvf_bypass)
@@ -1801,14 +1710,12 @@ s32 sxe_hw_vlvf_slot_find(struct sxe_hw *hw, u32 vlan, bool vlvf_bypass)
 			goto l_end;
 		}
 
-		if (!first_empty_slot && !bits) {
+		if (!first_empty_slot && !bits)
 			first_empty_slot = regindex;
-		}
 	}
 
-	if (!first_empty_slot) {
+	if (!first_empty_slot)
 		LOG_DEV_WARN("no space in VLVF.\n");
-	}
 
 	ret = first_empty_slot ? : -SXE_ERR_NO_SPACE;
 l_end:
@@ -1839,15 +1746,13 @@ s32 sxe_hw_vlan_filter_configure(struct sxe_hw *hw,
 	vfta_delta &= vlan_on ? ~vfta : vfta;
 	vfta ^= vfta_delta;
 
-	if (!(SXE_REG_READ(hw, SXE_VT_CTL) & SXE_VT_CTL_VT_ENABLE)) {
+	if (!(SXE_REG_READ(hw, SXE_VT_CTL) & SXE_VT_CTL_VT_ENABLE))
 		goto vfta_update;
-	}
 
 	vlvf_index = sxe_hw_vlvf_slot_find(hw, vid, vlvf_bypass);
 	if (vlvf_index < 0) {
-		if (vlvf_bypass) {
+		if (vlvf_bypass)
 			goto vfta_update;
-		}
 
 		ret = vlvf_index;
 		goto l_end;
@@ -1856,17 +1761,15 @@ s32 sxe_hw_vlan_filter_configure(struct sxe_hw *hw,
 	bits = SXE_REG_READ(hw, SXE_VLVFB(vlvf_index * 2 + pool / 32));
 
 	bits |= BIT(pool % 32);
-	if (vlan_on) {
+	if (vlan_on)
 		goto vlvf_update;
-	}
 
 	bits ^= BIT(pool % 32);
 
 	if (!bits &&
-	    !SXE_REG_READ(hw, SXE_VLVFB(vlvf_index * 2 + 1 - pool / 32))) {
-		if (vfta_delta) {
+		!SXE_REG_READ(hw, SXE_VLVFB(vlvf_index * 2 + 1 - pool / 32))) {
+		if (vfta_delta)
 			SXE_REG_WRITE(hw, SXE_VFTA(regidx), vfta);
-		}
 
 		SXE_REG_WRITE(hw, SXE_VLVF(vlvf_index), 0);
 		SXE_REG_WRITE(hw, SXE_VLVFB(vlvf_index * 2 + pool / 32), 0);
@@ -1881,9 +1784,8 @@ s32 sxe_hw_vlan_filter_configure(struct sxe_hw *hw,
 	SXE_REG_WRITE(hw, SXE_VLVF(vlvf_index), SXE_VLVF_VIEN | vid);
 
 vfta_update:
-	if (vfta_delta) {
+	if (vfta_delta)
 		SXE_REG_WRITE(hw, SXE_VFTA(regidx), vfta);
-	}
 
 l_end:
 	return ret;
@@ -1893,9 +1795,8 @@ void sxe_hw_vlan_filter_array_clear(struct sxe_hw *hw)
 {
 	u32 offset;
 
-	for (offset = 0; offset < SXE_VFT_TBL_SIZE; offset++) {
+	for (offset = 0; offset < SXE_VFT_TBL_SIZE; offset++)
 		SXE_REG_WRITE(hw, SXE_VFTA(offset), 0);
-	}
 
 	for (offset = 0; offset < SXE_VLVF_ENTRIES; offset++) {
 		SXE_REG_WRITE(hw, SXE_VLVF(offset), 0);
@@ -1903,7 +1804,6 @@ void sxe_hw_vlan_filter_array_clear(struct sxe_hw *hw)
 		SXE_REG_WRITE(hw, SXE_VLVFB(offset * 2 + 1), 0);
 	}
 
-	return;
 }
 
 static const struct sxe_filter_vlan_operations sxe_filter_vlan_ops = {
@@ -1924,56 +1824,51 @@ static void sxe_hw_rx_pkt_buf_switch(struct sxe_hw *hw, bool is_on)
 {
 	u32 dbucfg = SXE_REG_READ(hw, SXE_DRXCFG);
 
-	if (is_on) {
+	if (is_on)
 		dbucfg |= SXE_DRXCFG_DBURX_START;
-	} else {
+	else
 		dbucfg &= ~SXE_DRXCFG_DBURX_START;
-	}
 
 	SXE_REG_WRITE(hw, SXE_DRXCFG, dbucfg);
 
-	return;
 }
 
 static void sxe_hw_rx_pkt_buf_size_configure(struct sxe_hw *hw,
-			     u8 num_pb,
-			     u32 headroom,
-			     u16 strategy)
+				 u8 num_pb,
+				 u32 headroom,
+				 u16 strategy)
 {
 	u16 total_buf_size = (SXE_RX_PKT_BUF_SIZE - headroom);
 	u32 rx_buf_size;
 	u16 i = 0;
 
-	if (!num_pb) {
+	if (!num_pb)
 		num_pb = 1;
-	}
 
 	switch (strategy) {
 	case (PBA_STRATEGY_WEIGHTED):
 		rx_buf_size = ((total_buf_size * 5 * 2) / (num_pb * 8));
 		total_buf_size -= rx_buf_size * (num_pb / 2);
 		rx_buf_size <<= SXE_RX_PKT_BUF_SIZE_SHIFT;
-		for (i = 0; i < (num_pb / 2); i++) {
+		for (i = 0; i < (num_pb / 2); i++)
 			SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), rx_buf_size);
-		}
+
 		fallthrough;
 	case (PBA_STRATEGY_EQUAL):
 		rx_buf_size = (total_buf_size / (num_pb - i))
 				<< SXE_RX_PKT_BUF_SIZE_SHIFT;
-		for (; i < num_pb; i++) {
+		for (; i < num_pb; i++)
 			SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), rx_buf_size);
-		}
+
 		break;
 
 	default:
 		break;
 	}
 
-	for (; i < SXE_PKG_BUF_NUM_MAX; i++) {
+	for (; i < SXE_PKG_BUF_NUM_MAX; i++)
 		SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), 0);
-	}
 
-	return;
 }
 
 u32 sxe_hw_rx_pkt_buf_size_get(struct sxe_hw *hw, u8 pb)
@@ -1990,28 +1885,27 @@ void sxe_hw_rx_multi_ring_configure(struct sxe_hw *hw,
 	mrqc &= ~SXE_MRQE_MASK;
 
 	if (sriov_enable) {
-		if (tcs > 4) {
-			mrqc |= SXE_MRQC_VMDQRT8TCEN;	
-		} else if (tcs > 1) {
-			mrqc |= SXE_MRQC_VMDQRT4TCEN;	
-		} else if (is_4q_per_pool == true) {
+		if (tcs > 4)
+			mrqc |= SXE_MRQC_VMDQRT8TCEN;
+		else if (tcs > 1)
+			mrqc |= SXE_MRQC_VMDQRT4TCEN;
+		else if (is_4q_per_pool == true)
 			mrqc |= SXE_MRQC_VMDQRSS32EN;
-		} else {
+		else
 			mrqc |= SXE_MRQC_VMDQRSS64EN;
-		}
+
 	} else {
-		if (tcs > 4) {
+		if (tcs > 4)
 			mrqc |= SXE_MRQC_RTRSS8TCEN;
-		} else if (tcs > 1) {
+		else if (tcs > 1)
 			mrqc |= SXE_MRQC_RTRSS4TCEN;
-		} else {
+		else
 			mrqc |= SXE_MRQC_RSSEN;
-		}
+
 	}
 
 	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
 
-	return;
 }
 
 static void sxe_hw_rss_hash_pkt_type_set(struct sxe_hw *hw, u32 version)
@@ -2020,21 +1914,19 @@ static void sxe_hw_rss_hash_pkt_type_set(struct sxe_hw *hw, u32 version)
 	u32 rss_field = 0;
 
 	rss_field |= SXE_MRQC_RSS_FIELD_IPV4 |
-		     SXE_MRQC_RSS_FIELD_IPV4_TCP |
-		     SXE_MRQC_RSS_FIELD_IPV6 |
-		     SXE_MRQC_RSS_FIELD_IPV6_TCP;
+			 SXE_MRQC_RSS_FIELD_IPV4_TCP |
+			 SXE_MRQC_RSS_FIELD_IPV6 |
+			 SXE_MRQC_RSS_FIELD_IPV6_TCP;
 
-	if (version == SXE_RSS_IP_VER_4) {
+	if (version == SXE_RSS_IP_VER_4)
 		rss_field |= SXE_MRQC_RSS_FIELD_IPV4_UDP;
-	}
-	if (version == SXE_RSS_IP_VER_6) {
+
+	if (version == SXE_RSS_IP_VER_6)
 		rss_field |= SXE_MRQC_RSS_FIELD_IPV6_UDP;
-	}
 
 	mrqc |= rss_field;
 	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
 
-	return;
 }
 
 static void sxe_hw_rss_hash_pkt_type_update(struct sxe_hw *hw,
@@ -2045,23 +1937,21 @@ static void sxe_hw_rss_hash_pkt_type_update(struct sxe_hw *hw,
 	mrqc = SXE_REG_READ(hw, SXE_MRQC);
 
 	mrqc |= SXE_MRQC_RSS_FIELD_IPV4
-	      | SXE_MRQC_RSS_FIELD_IPV4_TCP
-	      | SXE_MRQC_RSS_FIELD_IPV6
-	      | SXE_MRQC_RSS_FIELD_IPV6_TCP;
+		  | SXE_MRQC_RSS_FIELD_IPV4_TCP
+		  | SXE_MRQC_RSS_FIELD_IPV6
+		  | SXE_MRQC_RSS_FIELD_IPV6_TCP;
 
 	mrqc &= ~(SXE_MRQC_RSS_FIELD_IPV4_UDP |
 		  SXE_MRQC_RSS_FIELD_IPV6_UDP);
 
-	if (version == SXE_RSS_IP_VER_4) {
+	if (version == SXE_RSS_IP_VER_4)
 		mrqc |= SXE_MRQC_RSS_FIELD_IPV4_UDP;
-	}
-	if (version == SXE_RSS_IP_VER_6) {
+
+	if (version == SXE_RSS_IP_VER_6)
 		mrqc |= SXE_MRQC_RSS_FIELD_IPV6_UDP;
-	}
 
 	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
 
-	return;
 }
 
 static void sxe_hw_rss_rings_used_set(struct sxe_hw *hw, u32 rss_num,
@@ -2069,35 +1959,29 @@ static void sxe_hw_rss_rings_used_set(struct sxe_hw *hw, u32 rss_num,
 {
 	u32 psrtype = 0;
 
-	if (rss_num > 3) {
+	if (rss_num > 3)
 		psrtype |= 2u << 29;
-	} else if (rss_num > 1) {
+	else if (rss_num > 1)
 		psrtype |= 1u << 29;
-	}
 
-	while (pool--) {
+	while (pool--)
 		SXE_REG_WRITE(hw, SXE_PSRTYPE(pf_offset + pool), psrtype);
-	}
 
-	return;
 }
 
 void sxe_hw_rss_key_set_all(struct sxe_hw *hw, u32 *rss_key)
 {
 	u32 i;
 
-	for (i = 0; i < SXE_MAX_RSS_KEY_ENTRIES; i++) {
+	for (i = 0; i < SXE_MAX_RSS_KEY_ENTRIES; i++)
 		SXE_REG_WRITE(hw, SXE_RSSRK(i), rss_key[i]);
-	}
 
-	return;
 }
 
 void sxe_hw_rss_redir_tbl_reg_write(struct sxe_hw *hw,
 						u16 reg_idx, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_RETA(reg_idx >> 2), value);
-	return;
 }
 
 void sxe_hw_rss_redir_tbl_set_all(struct sxe_hw *hw, u8 *redir_tbl)
@@ -2114,7 +1998,6 @@ void sxe_hw_rss_redir_tbl_set_all(struct sxe_hw *hw, u8 *redir_tbl)
 			tbl = 0;
 		}
 	}
-	return;
 }
 
 void sxe_hw_rx_cap_switch_on(struct sxe_hw *hw)
@@ -2132,7 +2015,6 @@ void sxe_hw_rx_cap_switch_on(struct sxe_hw *hw)
 	rxctrl |= SXE_RXCTRL_RXEN;
 	SXE_REG_WRITE(hw, SXE_RXCTRL, rxctrl);
 
-	return;
 }
 
 void sxe_hw_rx_cap_switch_off(struct sxe_hw *hw)
@@ -2153,7 +2035,6 @@ void sxe_hw_rx_cap_switch_off(struct sxe_hw *hw)
 		SXE_REG_WRITE(hw, SXE_RXCTRL, rxctrl);
 	}
 
-	return;
 }
 
 static void sxe_hw_rx_func_switch_on(struct sxe_hw *hw)
@@ -2164,7 +2045,6 @@ static void sxe_hw_rx_func_switch_on(struct sxe_hw *hw)
 	rxctrl |= SXE_COMCTRL_RXEN | SXE_COMCTRL_EDSEL;
 	SXE_REG_WRITE(hw, SXE_COMCTRL, rxctrl);
 
-	return;
 }
 
 void sxe_hw_tx_pkt_buf_switch(struct sxe_hw *hw, bool is_on)
@@ -2182,27 +2062,22 @@ void sxe_hw_tx_pkt_buf_switch(struct sxe_hw *hw, bool is_on)
 		SXE_REG_WRITE(hw, SXE_DTXCFG, dbucfg);
 	}
 
-	return;
 }
 
 void sxe_hw_tx_pkt_buf_size_configure(struct sxe_hw *hw, u8 num_pb)
 {
 	u32 i, tx_pkt_size;
 
-	if (!num_pb){
+	if (!num_pb)
 		num_pb = 1;
-	}
 
 	tx_pkt_size = SXE_TX_PBSIZE_MAX / num_pb;
-	for (i = 0; i < num_pb; i++) {
+	for (i = 0; i < num_pb; i++)
 		SXE_REG_WRITE(hw, SXE_TXPBSIZE(i), tx_pkt_size);
-	}
 
-	for (; i < SXE_PKG_BUF_NUM_MAX; i++) {
+	for (; i < SXE_PKG_BUF_NUM_MAX; i++)
 		SXE_REG_WRITE(hw, SXE_TXPBSIZE(i), 0);
-	}
 
-	return;
 }
 
 void sxe_hw_rx_lro_ack_switch(struct sxe_hw *hw, bool is_on)
@@ -2217,7 +2092,6 @@ void sxe_hw_rx_lro_ack_switch(struct sxe_hw *hw, bool is_on)
 
 	SXE_REG_WRITE(hw, SXE_LRODBU, lro_dbu);
 
-	return;
 }
 
 static void sxe_hw_vf_rx_switch(struct sxe_hw *hw,
@@ -2232,10 +2106,9 @@ static void sxe_hw_vf_rx_switch(struct sxe_hw *hw,
 
 	SXE_REG_WRITE(hw, SXE_VFRE(reg_offset), vfre);
 
-	return;
 }
 
-STATIC s32 sxe_hw_fnav_wait_init_done(struct sxe_hw *hw)
+static s32 sxe_hw_fnav_wait_init_done(struct sxe_hw *hw)
 {
 	u32 i;
 	s32 ret = 0;
@@ -2266,16 +2139,15 @@ void sxe_hw_fnav_enable(struct sxe_hw *hw, u32 fnavctrl)
 	SXE_REG_WRITE(hw, SXE_FNAVSKEY, SXE_FNAV_SAMPLE_HASH_KEY);
 
 	fnavctrl_ori = SXE_REG_READ(hw, SXE_FNAVCTRL);
-	if((fnavctrl_ori & 0x13) != (fnavctrl & 0x13)) {
+	if ((fnavctrl_ori & 0x13) != (fnavctrl & 0x13))
 		is_clear_stat = true;
-	}
 
 	SXE_REG_WRITE(hw, SXE_FNAVCTRL, fnavctrl);
 	SXE_WRITE_FLUSH(hw);
 
 	sxe_hw_fnav_wait_init_done(hw);
 
-	if(is_clear_stat) {
+	if (is_clear_stat) {
 		SXE_REG_READ(hw, SXE_FNAVUSTAT);
 		SXE_REG_READ(hw, SXE_FNAVFSTAT);
 		SXE_REG_READ(hw, SXE_FNAVMATCH);
@@ -2283,7 +2155,6 @@ void sxe_hw_fnav_enable(struct sxe_hw *hw, u32 fnavctrl)
 		SXE_REG_READ(hw, SXE_FNAVLEN);
 	}
 
-	return;
 }
 
 static s32 sxe_hw_fnav_mode_init(struct sxe_hw *hw,
@@ -2295,8 +2166,8 @@ static s32 sxe_hw_fnav_mode_init(struct sxe_hw *hw,
 
 	if ((sxe_fnav_mode != SXE_FNAV_SAMPLE_MODE) &&
 		(sxe_fnav_mode != SXE_FNAV_SPECIFIC_MODE)) {
-		LOG_ERROR_BDF("mode[%u] a error fnav mode, fnav do not work. please use"
-			"SXE_FNAV_SAMPLE_MODE or SXE_FNAV_SPECIFIC_MODE\n",
+		LOG_ERROR_BDF("mode[%u] a error fnav mode, fnav do not work. please"
+			" use SXE_FNAV_SAMPLE_MODE or SXE_FNAV_SPECIFIC_MODE\n",
 			sxe_fnav_mode);
 		goto l_end;
 	}
@@ -2307,8 +2178,8 @@ static s32 sxe_hw_fnav_mode_init(struct sxe_hw *hw,
 	}
 
 	fnavctrl |= (0x6 << SXE_FNAVCTRL_FLEX_SHIFT) |
-		    (0xA << SXE_FNAVCTRL_MAX_LENGTH_SHIFT) |
-		    (4 << SXE_FNAVCTRL_FULL_THRESH_SHIFT);
+			(0xA << SXE_FNAVCTRL_MAX_LENGTH_SHIFT) |
+			(4 << SXE_FNAVCTRL_FULL_THRESH_SHIFT);
 
 	sxe_hw_fnav_enable(hw, fnavctrl);
 
@@ -2359,7 +2230,7 @@ static s32 sxe_hw_fnav_flow_type_mask_get(struct sxe_hw *hw,
 	case 0x0:
 		*fnavm |= SXE_FNAVM_L4P;
 		if (input_mask->ntuple.dst_port ||
-		    input_mask->ntuple.src_port) {
+			input_mask->ntuple.src_port) {
 			LOG_DEV_ERR("error on src/dst port mask\n");
 			ret = -SXE_ERR_CONFIG;
 			goto l_ret;
@@ -2423,7 +2294,7 @@ static s32 sxe_hw_fnav_flex_bytes_mask_get(struct sxe_hw *hw,
 }
 
 s32 sxe_hw_fnav_specific_rule_mask_set(struct sxe_hw *hw,
-				    union sxe_fnav_rule_info *input_mask)
+					union sxe_fnav_rule_info *input_mask)
 {
 	s32 ret;
 	u32 fnavm = SXE_FNAVM_DIPv6;
@@ -2431,44 +2302,39 @@ s32 sxe_hw_fnav_specific_rule_mask_set(struct sxe_hw *hw,
 	struct sxe_adapter *adapter = hw->adapter;
 
 
-	if (input_mask->ntuple.bkt_hash) {
+	if (input_mask->ntuple.bkt_hash)
 		LOG_DEV_ERR("bucket hash should always be 0 in mask\n");
-	}
 
 	ret = sxe_hw_fnav_vm_pool_mask_get(hw, input_mask->ntuple.vm_pool, &fnavm);
-	if (ret) {
+	if (ret)
 		goto l_err_config;
-	}
 
 	ret = sxe_hw_fnav_flow_type_mask_get(hw, input_mask,  &fnavm);
-	if (ret) {
+	if (ret)
 		goto l_err_config;
-	}
 
 	ret = sxe_hw_fnav_vlan_mask_get(hw, input_mask->ntuple.vlan_id, &fnavm);
-	if (ret) {
+	if (ret)
 		goto l_err_config;
-	}
 
 	ret = sxe_hw_fnav_flex_bytes_mask_get(hw, input_mask->ntuple.flex_bytes, &fnavm);
-	if (ret) {
+	if (ret)
 		goto l_err_config;
-	}
 
 	LOG_DEBUG_BDF("fnavm = 0x%x\n", fnavm);
 	SXE_REG_WRITE(hw, SXE_FNAVM, fnavm);
 
 	fnavtcpm = sxe_hw_fnav_port_mask_get(input_mask->ntuple.src_port,
-					     input_mask->ntuple.dst_port);
+						 input_mask->ntuple.dst_port);
 
 	LOG_DEBUG_BDF("fnavtcpm = 0x%x\n", fnavtcpm);
 	SXE_REG_WRITE(hw, SXE_FNAVTCPM, ~fnavtcpm);
 	SXE_REG_WRITE(hw, SXE_FNAVUDPM, ~fnavtcpm);
 
 	SXE_REG_WRITE_BE32(hw, SXE_FNAVSIP4M,
-			     ~input_mask->ntuple.src_ip[0]);
+				 ~input_mask->ntuple.src_ip[0]);
 	SXE_REG_WRITE_BE32(hw, SXE_FNAVDIP4M,
-			     ~input_mask->ntuple.dst_ip[0]);
+				 ~input_mask->ntuple.dst_ip[0]);
 
 	return 0;
 
@@ -2476,16 +2342,15 @@ s32 sxe_hw_fnav_specific_rule_mask_set(struct sxe_hw *hw,
 	return -SXE_ERR_CONFIG;
 }
 
-STATIC s32 sxe_hw_fnav_cmd_complete_check(struct sxe_hw *hw,
+static s32 sxe_hw_fnav_cmd_complete_check(struct sxe_hw *hw,
 							u32 *fnavcmd)
 {
 	u32 i;
 
 	for (i = 0; i < SXE_FNAVCMD_CMD_POLL * 10; i++) {
 		*fnavcmd = SXE_REG_READ(hw, SXE_FNAVCMD);
-		if (!(*fnavcmd & SXE_FNAVCMD_CMD_MASK)) {
+		if (!(*fnavcmd & SXE_FNAVCMD_CMD_MASK))
 			return 0;
-		}
 
 		udelay(10);
 	}
@@ -2497,17 +2362,16 @@ static void sxe_hw_fnav_filter_ip_set(struct sxe_hw *hw,
 					union sxe_fnav_rule_info *input)
 {
 	SXE_REG_WRITE_BE32(hw, SXE_FNAVSIPv6(0),
-			     input->ntuple.src_ip[0]);
+				 input->ntuple.src_ip[0]);
 	SXE_REG_WRITE_BE32(hw, SXE_FNAVSIPv6(1),
-			     input->ntuple.src_ip[1]);
+				 input->ntuple.src_ip[1]);
 	SXE_REG_WRITE_BE32(hw, SXE_FNAVSIPv6(2),
-			     input->ntuple.src_ip[2]);
+				 input->ntuple.src_ip[2]);
 
 	SXE_REG_WRITE_BE32(hw, SXE_FNAVIPSA, input->ntuple.src_ip[0]);
 
 	SXE_REG_WRITE_BE32(hw, SXE_FNAVIPDA, input->ntuple.dst_ip[0]);
 
-	return;
 }
 
 static void sxe_hw_fnav_filter_port_set(struct sxe_hw *hw,
@@ -2520,7 +2384,6 @@ static void sxe_hw_fnav_filter_port_set(struct sxe_hw *hw,
 	fnavport |= be16_to_cpu(input->ntuple.src_port);
 	SXE_REG_WRITE(hw, SXE_FNAVPORT, fnavport);
 
-	return;
 }
 
 static void sxe_hw_fnav_filter_vlan_set(struct sxe_hw *hw,
@@ -2533,7 +2396,6 @@ static void sxe_hw_fnav_filter_vlan_set(struct sxe_hw *hw,
 	fnavvlan |= ntohs(input->ntuple.vlan_id);
 	SXE_REG_WRITE(hw, SXE_FNAVVLAN, fnavvlan);
 
-	return;
 }
 
 static void sxe_hw_fnav_filter_bkt_hash_set(struct sxe_hw *hw,
@@ -2546,7 +2408,6 @@ static void sxe_hw_fnav_filter_bkt_hash_set(struct sxe_hw *hw,
 	fnavhash |= soft_id << SXE_FNAVHASH_SIG_SW_INDEX_SHIFT;
 	SXE_REG_WRITE(hw, SXE_FNAVHASH, fnavhash);
 
-	return;
 }
 
 static s32 sxe_hw_fnav_filter_cmd_set(struct sxe_hw *hw,
@@ -2561,9 +2422,8 @@ static s32 sxe_hw_fnav_filter_cmd_set(struct sxe_hw *hw,
 		  SXE_FNAVCMD_LAST | SXE_FNAVCMD_QUEUE_EN;
 
 #ifndef SXE_DPDK
-	if (queue == SXE_FNAV_DROP_QUEUE) {
+	if (queue == SXE_FNAV_DROP_QUEUE)
 		fnavcmd |= SXE_FNAVCMD_DROP;
-	}
 #endif
 
 	fnavcmd |= input->ntuple.flow_type << SXE_FNAVCMD_FLOW_TYPE_SHIFT;
@@ -2572,9 +2432,8 @@ static s32 sxe_hw_fnav_filter_cmd_set(struct sxe_hw *hw,
 
 	SXE_REG_WRITE(hw, SXE_FNAVCMD, fnavcmd);
 	ret = sxe_hw_fnav_cmd_complete_check(hw, &fnavcmd);
-	if (ret) {
+	if (ret)
 		LOG_DEV_ERR("flow navigator command did not complete!\n");
-	}
 
 	return ret;
 }
@@ -2597,9 +2456,8 @@ s32 sxe_hw_fnav_specific_rule_add(struct sxe_hw *hw,
 	SXE_WRITE_FLUSH(hw);
 
 	ret = sxe_hw_fnav_filter_cmd_set(hw, input, queue);
-	if (ret) {
+	if (ret)
 		LOG_ERROR_BDF("set fnav filter cmd error. ret=%d\n", ret);
-	}
 
 	return ret;
 }
@@ -2656,7 +2514,6 @@ void sxe_hw_fnav_sample_rule_configure(struct sxe_hw *hw,
 
 	LOG_DEV_DEBUG("tx queue=%x hash=%x\n", queue, (u32)fnavhashcmd);
 
-	return;
 }
 
 static u64 sxe_hw_fnav_sample_rule_hash_get(struct sxe_hw *hw,
@@ -2787,7 +2644,6 @@ static void sxe_hw_fnav_sample_stats_reinit(struct sxe_hw *hw)
 	SXE_REG_READ(hw, SXE_FNAVMISS);
 	SXE_REG_READ(hw, SXE_FNAVLEN);
 
-	return;
 }
 
 static void sxe_hw_ptp_freq_adjust(struct sxe_hw *hw, u32 adj_freq)
@@ -2796,7 +2652,6 @@ static void sxe_hw_ptp_freq_adjust(struct sxe_hw *hw, u32 adj_freq)
 	SXE_REG_WRITE(hw, SXE_TIMADJH, adj_freq);
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 u64 sxe_hw_ptp_systime_get(struct sxe_hw *hw)
@@ -2822,7 +2677,6 @@ void sxe_hw_ptp_systime_init(struct sxe_hw *hw)
 	SXE_REG_WRITE(hw, SXE_SYSTIMH, 0);
 
 	SXE_WRITE_FLUSH(hw);
-	return;
 }
 
 void sxe_hw_ptp_init(struct sxe_hw *hw)
@@ -2834,21 +2688,19 @@ void sxe_hw_ptp_init(struct sxe_hw *hw)
 	SXE_TSCTRL_L4_UNICAST;
 
 	regval = SXE_REG_READ(hw, SXE_TSCTRL);
-	regval &= ~SXE_TSCTRL_ONESTEP;	
-	regval &= ~SXE_TSCTRL_CSEN;	
+	regval &= ~SXE_TSCTRL_ONESTEP;
+	regval &= ~SXE_TSCTRL_CSEN;
 	regval |= tsctl;
 	SXE_REG_WRITE(hw, SXE_TSCTRL, regval);
 
 	SXE_REG_WRITE(hw, SXE_TIMINC,
 			SXE_TIMINC_SET(SXE_INCPD, SXE_IV_NS, SXE_IV_SNS));
 
-	return;
 }
 
 void sxe_hw_ptp_rx_timestamp_clear(struct sxe_hw *hw)
 {
 	SXE_REG_READ(hw, SXE_RXSTMPH);
-	return;
 }
 
 void sxe_hw_ptp_tx_timestamp_get(struct sxe_hw *hw,
@@ -2878,13 +2730,11 @@ void sxe_hw_ptp_tx_timestamp_get(struct sxe_hw *hw,
 	*ts_ns  = (sec_8bit << 24) | ((reg_ns & 0xFFFFFF00) >> 8);
 
 	if (unlikely((sec_24bit - systimm_24bit) >= 0x00FFFFF0)) {
-		if (systimm_8bit >= 1) {
+		if (systimm_8bit >= 1)
 			systimm_8bit -= 1;
-		}
 	}
 
 	*ts_sec = systimm_8bit | sec_24bit;
-	return;
 }
 
 u64 sxe_hw_ptp_rx_timestamp_get(struct sxe_hw *hw)
@@ -2909,9 +2759,8 @@ bool sxe_hw_ptp_is_rx_timestamp_valid(struct sxe_hw *hw)
 	u32 tsyncrxctl;
 
 	tsyncrxctl = SXE_REG_READ(hw, SXE_TSYNCRXCTL);
-	if (tsyncrxctl & SXE_TSYNCRXCTL_RXTT) {
+	if (tsyncrxctl & SXE_TSYNCRXCTL_RXTT)
 		rx_tmstamp_valid = true;
-	}
 
 	return rx_tmstamp_valid;
 }
@@ -2923,9 +2772,9 @@ void sxe_hw_ptp_timestamp_mode_set(struct sxe_hw *hw,
 
 	if (is_l2) {
 		SXE_REG_WRITE(hw, SXE_ETQF(SXE_ETQF_FILTER_1588),
-			(SXE_ETQF_FILTER_EN |   
-			 SXE_ETQF_1588 |	
-			 ETH_P_1588));		
+			(SXE_ETQF_FILTER_EN |
+			 SXE_ETQF_1588 |
+			 ETH_P_1588));
 	} else {
 		SXE_REG_WRITE(hw, SXE_ETQF(SXE_ETQF_FILTER_1588), 0);
 	}
@@ -2940,7 +2789,6 @@ void sxe_hw_ptp_timestamp_mode_set(struct sxe_hw *hw,
 
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 void sxe_hw_ptp_timestamp_enable(struct sxe_hw *hw)
@@ -2954,7 +2802,6 @@ void sxe_hw_ptp_timestamp_enable(struct sxe_hw *hw)
 			SXE_TSYNCRXCTL_REN));
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 static void sxe_hw_dcb_tc_rss_configure(struct sxe_hw *hw, u16 rss)
@@ -2991,9 +2838,8 @@ static void sxe_hw_tx_ring_disable(struct sxe_hw *hw, u8 reg_idx,
 		wait_delay += delay_interval * 2;
 		txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
 
-		if (!(txdctl & SXE_TXDCTL_ENABLE)) {
+		if (!(txdctl & SXE_TXDCTL_ENABLE))
 			return;
-		}
 	}
 
 	LOG_MSG_ERR(drv, "register TXDCTL.ENABLE not cleared within the polling period\n");
@@ -3037,7 +2883,6 @@ static u32 sxe_hw_tx_dbu_fc_status_get(struct sxe_hw *hw)
 static void sxe_hw_fnav_sample_hash_set(struct sxe_hw *hw, u64 hash)
 {
 	SXE_REG64_WRITE(hw, SXE_FNAVHASH, hash);
-	return;
 }
 
 static const struct sxe_dbu_operations sxe_dbu_ops = {
@@ -3088,19 +2933,12 @@ static const struct sxe_dbu_operations sxe_dbu_ops = {
 };
 
 
-void sxe_hw_rx_dma_ctrl_init(struct sxe_hw *hw, bool crc_strip_on)
+void sxe_hw_rx_dma_ctrl_init(struct sxe_hw *hw)
 {
 	u32 rx_dma_ctrl = SXE_REG_READ(hw, SXE_RDRXCTL);
 
-	if (crc_strip_on) {
-		rx_dma_ctrl |= SXE_RDRXCTL_CRCSTRIP;
-	} else {
-		rx_dma_ctrl &= ~SXE_RDRXCTL_CRCSTRIP;
-	}
-
 	rx_dma_ctrl &= ~SXE_RDRXCTL_LROFRSTSIZE;
 	SXE_REG_WRITE(hw, SXE_RDRXCTL, rx_dma_ctrl);
-	return;
 }
 
 void sxe_hw_rx_dma_lro_ctrl_set(struct sxe_hw *hw)
@@ -3109,7 +2947,6 @@ void sxe_hw_rx_dma_lro_ctrl_set(struct sxe_hw *hw)
 
 	rx_dma_ctrl |= SXE_RDRXCTL_LROACKC;
 	SXE_REG_WRITE(hw, SXE_RDRXCTL, rx_dma_ctrl);
-	return;
 }
 
 void sxe_hw_rx_desc_thresh_set(struct sxe_hw *hw, u8 reg_idx)
@@ -3121,7 +2958,6 @@ void sxe_hw_rx_desc_thresh_set(struct sxe_hw *hw, u8 reg_idx)
 	rxdctl |= 0x10;
 	SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
 
-	return;
 }
 
 void sxe_hw_rx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on)
@@ -3156,7 +2992,6 @@ void sxe_hw_rx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on)
 			  "the polling period\n", reg_idx, is_on);
 	}
 
-	return;
 }
 
 void sxe_hw_rx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on)
@@ -3172,7 +3007,6 @@ void sxe_hw_rx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on
 
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 void sxe_hw_rx_queue_desc_reg_configure(struct sxe_hw *hw,
@@ -3181,21 +3015,18 @@ void sxe_hw_rx_queue_desc_reg_configure(struct sxe_hw *hw,
 {
 	SXE_REG_WRITE(hw, SXE_RDH(reg_idx), rdh_value);
 	SXE_REG_WRITE(hw, SXE_RDT(reg_idx), rdt_value);
-	return;
 }
 
 static void sxe_hw_rx_ring_head_init(struct sxe_hw *hw, u8 reg_idx)
 {
 	SXE_REG_WRITE(hw, SXE_RDH(reg_idx), 0);
 
-	return;
 }
 
 static void sxe_hw_rx_ring_tail_init(struct sxe_hw *hw, u8 reg_idx)
 {
 	SXE_REG_WRITE(hw, SXE_RDT(reg_idx), 0);
 
-	return;
 }
 
 void sxe_hw_rx_ring_desc_configure(struct sxe_hw *hw,
@@ -3212,12 +3043,10 @@ void sxe_hw_rx_ring_desc_configure(struct sxe_hw *hw,
 	sxe_hw_rx_ring_head_init(hw, reg_idx);
 	sxe_hw_rx_ring_tail_init(hw, reg_idx);
 
-	return;
 }
 
 void sxe_hw_rx_rcv_ctl_configure(struct sxe_hw *hw, u8 reg_idx,
-				   u32 header_buf_len, u32 pkg_buf_len
-				   )
+				   u32 header_buf_len, u32 pkg_buf_len)
 {
 	u32 srrctl;
 
@@ -3228,7 +3057,6 @@ void sxe_hw_rx_rcv_ctl_configure(struct sxe_hw *hw, u8 reg_idx,
 
 	SXE_REG_WRITE(hw, SXE_SRRCTL(reg_idx), srrctl);
 
-	return;
 }
 
 void sxe_hw_rx_lro_ctl_configure(struct sxe_hw *hw,
@@ -3240,7 +3068,6 @@ void sxe_hw_rx_lro_ctl_configure(struct sxe_hw *hw,
 	lroctrl |= max_desc;
 	SXE_REG_WRITE(hw, SXE_LROCTL(reg_idx), lroctrl);
 
-	return;
 }
 
 static u32 sxe_hw_rx_desc_ctrl_get(struct sxe_hw *hw, u8 reg_idx)
@@ -3264,7 +3091,6 @@ static void sxe_hw_dcb_arbiter_set(struct sxe_hw *hw, bool is_enable)
 		SXE_REG_WRITE(hw, SXE_RTTDCS, rttdcs);
 	}
 
-	return;
 }
 
 
@@ -3291,11 +3117,10 @@ static void sxe_hw_tx_multi_ring_configure(struct sxe_hw *hw, u8 tcs,
 		} else if (tcs > SXE_DCB_1_TC) {
 			mtqc = SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ;
 		} else {
-			if (max_txq > 63) {
+			if (max_txq > 63)
 				mtqc = SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ;
-			} else {
+			else
 				mtqc = SXE_MTQC_64Q_1PB;
-			}
 		}
 	}
 
@@ -3303,21 +3128,18 @@ static void sxe_hw_tx_multi_ring_configure(struct sxe_hw *hw, u8 tcs,
 
 	sxe_hw_dcb_arbiter_set(hw, true);
 
-	return;
 }
 
 void sxe_hw_tx_ring_head_init(struct sxe_hw *hw, u8 reg_idx)
 {
 	SXE_REG_WRITE(hw, SXE_TDH(reg_idx), 0);
 
-	return;
 }
 
 void sxe_hw_tx_ring_tail_init(struct sxe_hw *hw, u8 reg_idx)
 {
 	SXE_REG_WRITE(hw, SXE_TDT(reg_idx), 0);
 
-	return;
 }
 
 void sxe_hw_tx_ring_desc_configure(struct sxe_hw *hw,
@@ -3328,14 +3150,12 @@ void sxe_hw_tx_ring_desc_configure(struct sxe_hw *hw,
 
 	SXE_WRITE_FLUSH(hw);
 
-	SXE_REG_WRITE(hw, SXE_TDBAL(reg_idx), (desc_dma_addr & \
-						DMA_BIT_MASK(32)));
+	SXE_REG_WRITE(hw, SXE_TDBAL(reg_idx), (desc_dma_addr & DMA_BIT_MASK(32)));
 	SXE_REG_WRITE(hw, SXE_TDBAH(reg_idx), (desc_dma_addr >> 32));
 	SXE_REG_WRITE(hw, SXE_TDLEN(reg_idx), desc_mem_len);
 	sxe_hw_tx_ring_head_init(hw, reg_idx);
 	sxe_hw_tx_ring_tail_init(hw, reg_idx);
 
-	return;
 }
 
 void sxe_hw_tx_desc_thresh_set(
@@ -3352,7 +3172,6 @@ void sxe_hw_tx_desc_thresh_set(
 
 	SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
 
-	return;
 }
 
 void sxe_hw_all_ring_disable(struct sxe_hw *hw, u32 ring_max)
@@ -3372,7 +3191,6 @@ void sxe_hw_all_ring_disable(struct sxe_hw *hw, u32 ring_max)
 	SXE_WRITE_FLUSH(hw);
 	usleep_range(1000, 2000);
 
-	return;
 }
 
 void sxe_hw_tx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on)
@@ -3404,7 +3222,6 @@ void sxe_hw_tx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on)
 			  "the polling period\n", reg_idx, is_on);
 	}
 
-	return;
 }
 
 void sxe_hw_tx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on)
@@ -3418,7 +3235,6 @@ void sxe_hw_tx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on
 		SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
 	}
 
-	return;
 }
 
 void sxe_hw_tx_pkt_buf_thresh_configure(struct sxe_hw *hw,
@@ -3426,26 +3242,21 @@ void sxe_hw_tx_pkt_buf_thresh_configure(struct sxe_hw *hw,
 {
 	u32 i, tx_pkt_size, tx_pb_thresh;
 
-	if (!num_pb){
+	if (!num_pb)
 		num_pb = 1;
-	}
 
 	tx_pkt_size = SXE_TX_PBSIZE_MAX / num_pb;
-	if (true == dcb_enable) {
+	if (true == dcb_enable)
 		tx_pb_thresh = (tx_pkt_size / 1024) - SXE_TX_PKT_SIZE_MAX;
-	} else {
+	else
 		tx_pb_thresh = (tx_pkt_size / 1024) - SXE_NODCB_TX_PKT_SIZE_MAX;
-	}
 
-	for (i = 0; i < num_pb; i++) {
+	for (i = 0; i < num_pb; i++)
 		SXE_REG_WRITE(hw, SXE_TXPBTHRESH(i), tx_pb_thresh);
-	}
 
-	for (; i < SXE_PKG_BUF_NUM_MAX; i++) {
+	for (; i < SXE_PKG_BUF_NUM_MAX; i++)
 		SXE_REG_WRITE(hw, SXE_TXPBTHRESH(i), 0);
-	}
 
-	return;
 }
 
 void sxe_hw_tx_enable(struct sxe_hw *hw)
@@ -3456,7 +3267,6 @@ void sxe_hw_tx_enable(struct sxe_hw *hw)
 	ctl |= SXE_DMATXCTL_TE;
 	SXE_REG_WRITE(hw, SXE_DMATXCTL, ctl);
 
-	return;
 }
 
 static u32 sxe_hw_tx_desc_ctrl_get(struct sxe_hw *hw, u8 reg_idx)
@@ -3476,7 +3286,6 @@ static void sxe_hw_tx_desc_wb_thresh_clear(struct sxe_hw *hw, u8 reg_idx)
 	reg_data |= SXE_TXDCTL_ENABLE;
 	SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg_data);
 
-	return;
 }
 
 void sxe_hw_vlan_tag_strip_switch(struct sxe_hw *hw,
@@ -3486,15 +3295,13 @@ void sxe_hw_vlan_tag_strip_switch(struct sxe_hw *hw,
 
 	rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_index));
 
-	if (is_enable) {
+	if (is_enable)
 		rxdctl |= SXE_RXDCTL_VME;
-	} else {
+	else
 		rxdctl &= ~SXE_RXDCTL_VME;
-	}
 
 	SXE_REG_WRITE(hw, SXE_RXDCTL(reg_index), rxdctl);
 
-	return;
 }
 
 static void sxe_hw_tx_vlan_tag_set(struct sxe_hw *hw,
@@ -3503,13 +3310,11 @@ static void sxe_hw_tx_vlan_tag_set(struct sxe_hw *hw,
 	u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | SXE_VMVIR_VLANA_DEFAULT;
 
 	SXE_REG_WRITE(hw, SXE_VMVIR(vf), vmvir);
-	return;
 }
 
 void sxe_hw_tx_vlan_tag_clear(struct sxe_hw *hw, u32 vf)
 {
 	SXE_REG_WRITE(hw, SXE_VMVIR(vf), 0);
-	return;
 }
 
 u32 sxe_hw_tx_vlan_insert_get(struct sxe_hw *hw, u32 vf)
@@ -3523,42 +3328,39 @@ void sxe_hw_tx_ring_info_get(struct sxe_hw *hw,
 	*head = SXE_REG_READ(hw, SXE_TDH(idx));
 	*tail = SXE_REG_READ(hw, SXE_TDT(idx));
 
-	return;
 }
 
 void sxe_hw_dcb_rx_bw_alloc_configure(struct sxe_hw *hw,
-				      u16 *refill,
-				      u16 *max,
-				      u8 *bwg_id,
-				      u8 *prio_type,
-				      u8 *prio_tc,
-				      u8 max_priority)
-{
-	u32    reg;
-	u32    credit_refill;
-	u32    credit_max;
-	u8     i;
+					  u16 *refill,
+					  u16 *max,
+					  u8 *bwg_id,
+					  u8 *prio_type,
+					  u8 *prio_tc,
+					  u8 max_priority)
+{
+	u32	reg;
+	u32	credit_refill;
+	u32	credit_max;
+	u8	 i;
 
 	reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC | SXE_RTRPCS_ARBDIS;
 	SXE_REG_WRITE(hw, SXE_RTRPCS, reg);
 
 	reg = 0;
-	for (i = 0; i < max_priority; i++) {
+	for (i = 0; i < max_priority; i++)
 		reg |= (prio_tc[i] << (i * SXE_RTRUP2TC_UP_SHIFT));
-	}
 
 	SXE_REG_WRITE(hw, SXE_RTRUP2TC, reg);
 
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
 		credit_refill = refill[i];
-		credit_max    = max[i];
+		credit_max	= max[i];
 		reg = credit_refill | (credit_max << SXE_RTRPT4C_MCL_SHIFT);
 
 		reg |= (u32)(bwg_id[i]) << SXE_RTRPT4C_BWG_SHIFT;
 
-		if (prio_type[i] == PRIO_LINK) {
+		if (prio_type[i] == PRIO_LINK)
 			reg |= SXE_RTRPT4C_LSP;
-		}
 
 		SXE_REG_WRITE(hw, SXE_RTRPT4C(i), reg);
 	}
@@ -3566,7 +3368,6 @@ void sxe_hw_dcb_rx_bw_alloc_configure(struct sxe_hw *hw,
 	reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC;
 	SXE_REG_WRITE(hw, SXE_RTRPCS, reg);
 
-	return;
 }
 
 void sxe_hw_dcb_tx_desc_bw_alloc_configure(struct sxe_hw *hw,
@@ -3575,8 +3376,8 @@ void sxe_hw_dcb_tx_desc_bw_alloc_configure(struct sxe_hw *hw,
 					   u8 *bwg_id,
 					   u8 *prio_type)
 {
-	u32    reg, max_credits;
-	u8     i;
+	u32	reg, max_credits;
+	u8	 i;
 
 	for (i = 0; i < 128; i++) {
 		SXE_REG_WRITE(hw, SXE_RTTDQSEL, i);
@@ -3589,13 +3390,11 @@ void sxe_hw_dcb_tx_desc_bw_alloc_configure(struct sxe_hw *hw,
 		reg |= refill[i];
 		reg |= (u32)(bwg_id[i]) << SXE_RTTDT2C_BWG_SHIFT;
 
-		if (prio_type[i] == PRIO_GROUP) {
+		if (prio_type[i] == PRIO_GROUP)
 			reg |= SXE_RTTDT2C_GSP;
-		}
 
-		if (prio_type[i] == PRIO_LINK) {
+		if (prio_type[i] == PRIO_LINK)
 			reg |= SXE_RTTDT2C_LSP;
-		}
 
 		SXE_REG_WRITE(hw, SXE_RTTDT2C(i), reg);
 	}
@@ -3603,7 +3402,6 @@ void sxe_hw_dcb_tx_desc_bw_alloc_configure(struct sxe_hw *hw,
 	reg = SXE_RTTDCS_TDPAC | SXE_RTTDCS_TDRM;
 	SXE_REG_WRITE(hw, SXE_RTTDCS, reg);
 
-	return;
 }
 
 void sxe_hw_dcb_tx_data_bw_alloc_configure(struct sxe_hw *hw,
@@ -3618,14 +3416,13 @@ void sxe_hw_dcb_tx_data_bw_alloc_configure(struct sxe_hw *hw,
 	u8 i;
 
 	reg = SXE_RTTPCS_TPPAC | SXE_RTTPCS_TPRM |
-	      (SXE_RTTPCS_ARBD_DCB << SXE_RTTPCS_ARBD_SHIFT) |
-	      SXE_RTTPCS_ARBDIS;
+		  (SXE_RTTPCS_ARBD_DCB << SXE_RTTPCS_ARBD_SHIFT) |
+		  SXE_RTTPCS_ARBDIS;
 	SXE_REG_WRITE(hw, SXE_RTTPCS, reg);
 
 	reg = 0;
-	for (i = 0; i < max_priority; i++) {
+	for (i = 0; i < max_priority; i++)
 		reg |= (prio_tc[i] << (i * SXE_RTTUP2TC_UP_SHIFT));
-	}
 
 	SXE_REG_WRITE(hw, SXE_RTTUP2TC, reg);
 
@@ -3634,22 +3431,19 @@ void sxe_hw_dcb_tx_data_bw_alloc_configure(struct sxe_hw *hw,
 		reg |= (u32)(max[i]) << SXE_RTTPT2C_MCL_SHIFT;
 		reg |= (u32)(bwg_id[i]) << SXE_RTTPT2C_BWG_SHIFT;
 
-		if (prio_type[i] == PRIO_GROUP) {
+		if (prio_type[i] == PRIO_GROUP)
 			reg |= SXE_RTTPT2C_GSP;
-		}
 
-		if (prio_type[i] == PRIO_LINK) {
+		if (prio_type[i] == PRIO_LINK)
 			reg |= SXE_RTTPT2C_LSP;
-		}
 
 		SXE_REG_WRITE(hw, SXE_RTTPT2C(i), reg);
 	}
 
 	reg = SXE_RTTPCS_TPPAC | SXE_RTTPCS_TPRM |
-	      (SXE_RTTPCS_ARBD_DCB << SXE_RTTPCS_ARBD_SHIFT);
+		  (SXE_RTTPCS_ARBD_DCB << SXE_RTTPCS_ARBD_SHIFT);
 	SXE_REG_WRITE(hw, SXE_RTTPCS, reg);
 
-	return;
 }
 
 void sxe_hw_dcb_pfc_configure(struct sxe_hw *hw,
@@ -3682,9 +3476,8 @@ void sxe_hw_dcb_pfc_configure(struct sxe_hw *hw,
 	SXE_REG_WRITE(hw, SXE_PFCTOP, reg_val);
 
 	for (i = 0; i < max_priority; i++) {
-		if (prio_tc[i] > max_tc) {
+		if (prio_tc[i] > max_tc)
 			max_tc = prio_tc[i];
-		}
 	}
 
 	for (i = 0; i <= max_tc; i++) {
@@ -3716,13 +3509,11 @@ void sxe_hw_dcb_pfc_configure(struct sxe_hw *hw,
 	}
 
 	reg = hw->fc.pause_time * 0x00010001;
-	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) {
+	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
 		SXE_REG_WRITE(hw, SXE_FCTTV(i), reg);
-	}
 
 	SXE_REG_WRITE(hw, SXE_FCRTV, hw->fc.pause_time / 2);
 
-	return;
 }
 
 static void sxe_hw_dcb_8tc_vmdq_off_stats_configure(struct sxe_hw *hw)
@@ -3736,28 +3527,26 @@ static void sxe_hw_dcb_8tc_vmdq_off_stats_configure(struct sxe_hw *hw)
 	}
 
 	for (i = 0; i < 32; i++) {
-		if (i < 8) {
+		if (i < 8)
 			reg = 0x00000000;
-		} else if (i < 16) {
+		else if (i < 16)
 			reg = 0x01010101;
-		} else if (i < 20) {
+		else if (i < 20)
 			reg = 0x02020202;
-		} else if (i < 24) {
+		else if (i < 24)
 			reg = 0x03030303;
-		} else if (i < 26) {
+		else if (i < 26)
 			reg = 0x04040404;
-		} else if (i < 28) {
+		else if (i < 28)
 			reg = 0x05050505;
-		} else if (i < 30) {
+		else if (i < 30)
 			reg = 0x06060606;
-		} else {
+		else
 			reg = 0x07070707;
-		}
 
 		SXE_REG_WRITE(hw, SXE_TQSM(i), reg);
 	}
 
-	return;
 }
 
 static void sxe_hw_dcb_rx_up_tc_map_set(struct sxe_hw *hw, u8 tc)
@@ -3771,28 +3560,23 @@ static void sxe_hw_dcb_rx_up_tc_map_set(struct sxe_hw *hw, u8 tc)
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
 		u8 up2tc = reg >> (i * SXE_RTRUP2TC_UP_SHIFT);
 
-		if (up2tc > tc) {
+		if (up2tc > tc)
 			reg &= ~(0x7 << SXE_RTRUP2TC_UP_MASK);
-		}
 	}
 
-	if (reg != rsave) {
+	if (reg != rsave)
 		SXE_REG_WRITE(hw, SXE_RTRUP2TC, reg);
-	}
 
-	return;
 }
 
 void sxe_hw_vt_pool_loopback_switch(struct sxe_hw *hw,
 							bool is_enable)
 {
-	if (true == is_enable) {
+	if (true == is_enable)
 		SXE_REG_WRITE(hw, SXE_PFDTXGSWC, SXE_PFDTXGSWC_VT_LBEN);
-	} else {
+	else
 		SXE_REG_WRITE(hw, SXE_PFDTXGSWC, 0);
-	}
 
-	return;
 }
 
 void sxe_hw_pool_rx_ring_drop_enable(struct sxe_hw *hw, u8 vf_idx,
@@ -3801,12 +3585,10 @@ void sxe_hw_pool_rx_ring_drop_enable(struct sxe_hw *hw, u8 vf_idx,
 	u32 qde = SXE_QDE_ENABLE;
 	u8 i;
 
-	if (pf_vlan) {
+	if (pf_vlan)
 		qde |= SXE_QDE_HIDE_VLAN;
-	}
 
-	for (i = (vf_idx * ring_per_pool); i < ((vf_idx + 1) * ring_per_pool); i++)
-	{
+	for (i = (vf_idx * ring_per_pool); i < ((vf_idx + 1) * ring_per_pool); i++) {
 		u32 value;
 
 		SXE_WRITE_FLUSH(hw);
@@ -3817,7 +3599,6 @@ void sxe_hw_pool_rx_ring_drop_enable(struct sxe_hw *hw, u8 vf_idx,
 		SXE_REG_WRITE(hw, SXE_QDE, value);
 	}
 
-	return;
 }
 
 u32 sxe_hw_rx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx)
@@ -3830,7 +3611,6 @@ void sxe_hw_rx_pool_bitmap_set(struct sxe_hw *hw,
 {
 	SXE_REG_WRITE(hw, SXE_VFRE(reg_idx), bitmap);
 
-	return;
 }
 
 u32 sxe_hw_tx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx)
@@ -3843,14 +3623,12 @@ void sxe_hw_tx_pool_bitmap_set(struct sxe_hw *hw,
 {
 	SXE_REG_WRITE(hw, SXE_VFTE(reg_idx), bitmap);
 
-	return;
 }
 
 void sxe_hw_dcb_max_mem_window_set(struct sxe_hw *hw, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_RTTBCNRM, value);
 
-	return;
 }
 
 void sxe_hw_dcb_tx_ring_rate_factor_set(struct sxe_hw *hw,
@@ -3859,7 +3637,6 @@ void sxe_hw_dcb_tx_ring_rate_factor_set(struct sxe_hw *hw,
 	SXE_REG_WRITE(hw, SXE_RTTDQSEL, ring_idx);
 	SXE_REG_WRITE(hw, SXE_RTTBCNRC, rate);
 
-	return;
 }
 
 void sxe_hw_spoof_count_enable(struct sxe_hw *hw,
@@ -3871,7 +3648,6 @@ void sxe_hw_spoof_count_enable(struct sxe_hw *hw,
 
 	SXE_REG_WRITE(hw, SXE_VMECM(reg_idx), value);
 
-	return;
 }
 
 void sxe_hw_pool_mac_anti_spoof_set(struct sxe_hw *hw,
@@ -3883,15 +3659,13 @@ void sxe_hw_pool_mac_anti_spoof_set(struct sxe_hw *hw,
 
 	value = SXE_REG_READ(hw, SXE_SPOOF(reg_index));
 
-	if (status) {
+	if (status)
 		value |= BIT(bit_index);
-	} else {
+	else
 		value &= ~BIT(bit_index);
-	}
 
 	SXE_REG_WRITE(hw, SXE_SPOOF(reg_index), value);
 
-	return;
 }
 
 static void sxe_hw_dcb_rx_up_tc_map_get(struct sxe_hw *hw, u8 *map)
@@ -3904,22 +3678,19 @@ static void sxe_hw_dcb_rx_up_tc_map_get(struct sxe_hw *hw, u8 *map)
 			(reg >> (i * SXE_RTRUP2TC_UP_SHIFT));
 	}
 
-	return;
 }
 
 void sxe_hw_rx_drop_switch(struct sxe_hw *hw, u8 idx, bool is_enable)
 {
 	u32 srrctl = SXE_REG_READ(hw, SXE_SRRCTL(idx));
 
-	if (true == is_enable) {
+	if (true == is_enable)
 		srrctl |= SXE_SRRCTL_DROP_EN;
-	} else {
+	else
 		srrctl &= ~SXE_SRRCTL_DROP_EN;
-	}
 
 	SXE_REG_WRITE(hw, SXE_SRRCTL(idx), srrctl);
 
-	return;
 }
 
 static void sxe_hw_pool_vlan_anti_spoof_set(struct sxe_hw *hw,
@@ -3931,15 +3702,13 @@ static void sxe_hw_pool_vlan_anti_spoof_set(struct sxe_hw *hw,
 
 	value = SXE_REG_READ(hw, SXE_SPOOF(reg_index));
 
-	if (status) {
+	if (status)
 		value |= BIT(bit_index);
-	} else {
+	else
 		value &= ~BIT(bit_index);
-	}
 
 	SXE_REG_WRITE(hw, SXE_SPOOF(reg_index), value);
 
-	return;
 }
 
 static void sxe_hw_vf_tx_desc_addr_clear(struct sxe_hw *hw,
@@ -3952,7 +3721,6 @@ static void sxe_hw_vf_tx_desc_addr_clear(struct sxe_hw *hw,
 		SXE_REG_WRITE(hw, SXE_PVFTDWBAH_N(ring_per_pool, vf_idx, i), 0);
 	}
 
-	return;
 }
 
 static void sxe_hw_vf_tx_ring_disable(struct sxe_hw *hw,
@@ -3974,7 +3742,6 @@ static void sxe_hw_vf_tx_ring_disable(struct sxe_hw *hw,
 
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 void sxe_hw_dcb_rate_limiter_clear(struct sxe_hw *hw, u8 ring_max)
@@ -3987,7 +3754,6 @@ void sxe_hw_dcb_rate_limiter_clear(struct sxe_hw *hw, u8 ring_max)
 	}
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 static void sxe_hw_tx_tph_update(struct sxe_hw *hw, u8 ring_idx, u8 cpu)
@@ -3996,12 +3762,11 @@ static void sxe_hw_tx_tph_update(struct sxe_hw *hw, u8 ring_idx, u8 cpu)
 
 	value <<= SXE_TPH_TXCTRL_CPUID_SHIFT;
 
-	value |= SXE_TPH_TXCTRL_DESC_RRO_EN | \
-		 SXE_TPH_TXCTRL_DATA_RRO_EN | \
-		 SXE_TPH_TXCTRL_DESC_TPH_EN;
+	value |= (SXE_TPH_TXCTRL_DESC_RRO_EN |
+		 SXE_TPH_TXCTRL_DATA_RRO_EN |
+		 SXE_TPH_TXCTRL_DESC_TPH_EN);
 
 	SXE_REG_WRITE(hw, SXE_TPH_TXCTRL(ring_idx), value);
-	return;
 }
 
 static void sxe_hw_rx_tph_update(struct sxe_hw *hw, u8 ring_idx, u8 cpu)
@@ -4010,29 +3775,26 @@ static void sxe_hw_rx_tph_update(struct sxe_hw *hw, u8 ring_idx, u8 cpu)
 
 	value <<= SXE_TPH_RXCTRL_CPUID_SHIFT;
 
-	value |= SXE_TPH_RXCTRL_DESC_RRO_EN | \
-		 SXE_TPH_RXCTRL_DATA_TPH_EN | \
-		 SXE_TPH_RXCTRL_DESC_TPH_EN;
+	value |= (SXE_TPH_RXCTRL_DESC_RRO_EN |
+		 SXE_TPH_RXCTRL_DATA_TPH_EN |
+		 SXE_TPH_RXCTRL_DESC_TPH_EN);
 
 	SXE_REG_WRITE(hw, SXE_TPH_RXCTRL(ring_idx), value);
-	return;
 }
 
 static void sxe_hw_tph_switch(struct sxe_hw *hw, bool is_enable)
 {
-	if (is_enable == true) {
+	if (is_enable == true)
 		SXE_REG_WRITE(hw, SXE_TPH_CTRL, SXE_TPH_CTRL_MODE_CB2);
-	} else {
+	else
 		SXE_REG_WRITE(hw, SXE_TPH_CTRL, SXE_TPH_CTRL_DISABLE);
-	}
 
-	return;
 }
 
 static const struct sxe_dma_operations sxe_dma_ops = {
-	.rx_dma_ctrl_init		= sxe_hw_rx_dma_ctrl_init,
-	.rx_ring_switch			= sxe_hw_rx_ring_switch,
-	.rx_ring_switch_not_polling	= sxe_hw_rx_ring_switch_not_polling,
+	.rx_dma_ctrl_init	   = sxe_hw_rx_dma_ctrl_init,
+	.rx_ring_switch		 = sxe_hw_rx_ring_switch,
+	.rx_ring_switch_not_polling = sxe_hw_rx_ring_switch_not_polling,
 	.rx_ring_desc_configure		= sxe_hw_rx_ring_desc_configure,
 	.rx_desc_thresh_set		= sxe_hw_rx_desc_thresh_set,
 	.rx_rcv_ctl_configure		= sxe_hw_rx_rcv_ctl_configure,
@@ -4049,7 +3811,7 @@ static const struct sxe_dma_operations sxe_dma_ops = {
 	.tx_desc_thresh_set		= sxe_hw_tx_desc_thresh_set,
 	.tx_desc_wb_thresh_clear	= sxe_hw_tx_desc_wb_thresh_clear,
 	.tx_ring_switch			= sxe_hw_tx_ring_switch,
-	.tx_ring_switch_not_polling     = sxe_hw_tx_ring_switch_not_polling,
+	.tx_ring_switch_not_polling	 = sxe_hw_tx_ring_switch_not_polling,
 	.tx_pkt_buf_thresh_configure	= sxe_hw_tx_pkt_buf_thresh_configure,
 	.tx_desc_ctrl_get		= sxe_hw_tx_desc_ctrl_get,
 	.tx_ring_info_get		= sxe_hw_tx_ring_info_get,
@@ -4084,9 +3846,9 @@ static const struct sxe_dma_operations sxe_dma_ops = {
 	.max_dcb_memory_window_set	= sxe_hw_dcb_max_mem_window_set,
 	.spoof_count_enable		= sxe_hw_spoof_count_enable,
 
-	.vf_tx_ring_disable	        = sxe_hw_vf_tx_ring_disable,
-	.all_ring_disable               = sxe_hw_all_ring_disable,
-	.tx_ring_tail_init 	        = sxe_hw_tx_ring_tail_init,
+	.vf_tx_ring_disable		 = sxe_hw_vf_tx_ring_disable,
+	.all_ring_disable		   = sxe_hw_all_ring_disable,
+	.tx_ring_tail_init		  = sxe_hw_tx_ring_tail_init,
 };
 
 
@@ -4099,16 +3861,15 @@ static void sxe_hw_ipsec_rx_sa_load(struct sxe_hw *hw, u16 idx,
 
 	reg &= SXE_RXTXIDX_IPS_EN;
 	reg |= type << SXE_RXIDX_TBL_SHIFT |
-	       idx << SXE_RXTXIDX_IDX_SHIFT |
-	       SXE_RXTXIDX_WRITE;
+		   idx << SXE_RXTXIDX_IDX_SHIFT |
+		   SXE_RXTXIDX_WRITE;
 	SXE_REG_WRITE(hw, SXE_IPSRXIDX, reg);
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 static void sxe_hw_ipsec_rx_ip_store(struct sxe_hw *hw,
-					     __be32 *ip_addr, u8 ip_len, u8 ip_idx)
+						 __be32 *ip_addr, u8 ip_len, u8 ip_idx)
 {
 	u8 i;
 
@@ -4119,11 +3880,10 @@ static void sxe_hw_ipsec_rx_ip_store(struct sxe_hw *hw,
 	SXE_WRITE_FLUSH(hw);
 	sxe_hw_ipsec_rx_sa_load(hw, ip_idx, SXE_IPSEC_IP_TABLE);
 
-	return;
 }
 
 static void sxe_hw_ipsec_rx_spi_store(struct sxe_hw *hw,
-					     __be32 spi, u8 ip_idx, u16 sa_idx)
+						 __be32 spi, u8 ip_idx, u16 sa_idx)
 {
 	SXE_REG_WRITE(hw, SXE_IPSRXSPI, (__force u32)cpu_to_le32((__force u32)spi));
 
@@ -4133,7 +3893,6 @@ static void sxe_hw_ipsec_rx_spi_store(struct sxe_hw *hw,
 
 	sxe_hw_ipsec_rx_sa_load(hw, sa_idx, SXE_IPSEC_SPI_TABLE);
 
-	return;
 }
 
 static void sxe_hw_ipsec_rx_key_store(struct sxe_hw *hw,
@@ -4152,7 +3911,6 @@ static void sxe_hw_ipsec_rx_key_store(struct sxe_hw *hw,
 
 	sxe_hw_ipsec_rx_sa_load(hw, sa_idx, SXE_IPSEC_KEY_TABLE);
 
-	return;
 }
 
 static void sxe_hw_ipsec_tx_sa_load(struct sxe_hw *hw, u16 idx)
@@ -4164,7 +3922,6 @@ static void sxe_hw_ipsec_tx_sa_load(struct sxe_hw *hw, u16 idx)
 	SXE_REG_WRITE(hw, SXE_IPSTXIDX, reg);
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 static void sxe_hw_ipsec_tx_key_store(struct sxe_hw *hw, u32 *key,
@@ -4181,7 +3938,6 @@ static void sxe_hw_ipsec_tx_key_store(struct sxe_hw *hw, u32 *key,
 
 	sxe_hw_ipsec_tx_sa_load(hw, sa_idx);
 
-	return;
 }
 
 static void sxe_hw_ipsec_sec_data_stop(struct sxe_hw *hw, bool is_linkup)
@@ -4200,9 +3956,8 @@ static void sxe_hw_ipsec_sec_data_stop(struct sxe_hw *hw, bool is_linkup)
 
 	tx_empty = SXE_REG_READ(hw, SXE_SECTXSTAT) & SXE_SECTXSTAT_SECTX_RDY;
 	rx_empty = SXE_REG_READ(hw, SXE_SECRXSTAT) & SXE_SECRXSTAT_SECRX_RDY;
-	if (tx_empty && rx_empty) {
-		goto l_out;
-	}
+	if (tx_empty && rx_empty)
+		return;
 
 	if (!is_linkup) {
 		SXE_REG_WRITE(hw, SXE_LPBKCTRL, SXE_LPBKCTRL_EN);
@@ -4213,11 +3968,11 @@ static void sxe_hw_ipsec_sec_data_stop(struct sxe_hw *hw, bool is_linkup)
 
 	limit = 20;
 	do {
-	   mdelay(10);
-	   tx_empty = SXE_REG_READ(hw, SXE_SECTXSTAT) &
-		   SXE_SECTXSTAT_SECTX_RDY;
+		mdelay(10);
+		tx_empty = SXE_REG_READ(hw, SXE_SECTXSTAT) &
+			SXE_SECTXSTAT_SECTX_RDY;
 	   rx_empty = SXE_REG_READ(hw, SXE_SECRXSTAT) &
-		   SXE_SECRXSTAT_SECRX_RDY;
+			SXE_SECRXSTAT_SECRX_RDY;
 	} while (!(tx_empty && rx_empty) && limit--);
 
 	if (!is_linkup) {
@@ -4226,8 +3981,6 @@ static void sxe_hw_ipsec_sec_data_stop(struct sxe_hw *hw, bool is_linkup)
 		SXE_WRITE_FLUSH(hw);
 	}
 
-l_out:
-	return;
 }
 
 static void sxe_hw_ipsec_engine_start(struct sxe_hw *hw, bool is_linkup)
@@ -4252,7 +4005,6 @@ static void sxe_hw_ipsec_engine_start(struct sxe_hw *hw, bool is_linkup)
 
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 static void sxe_hw_ipsec_engine_stop(struct sxe_hw *hw, bool is_linkup)
@@ -4273,18 +4025,17 @@ static void sxe_hw_ipsec_engine_stop(struct sxe_hw *hw, bool is_linkup)
 	reg |= SXE_SECRXCTRL_SECRX_DIS;
 	SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
 
-	 SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, 0x250);
+	SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, 0x250);
 
-	 reg = SXE_REG_READ(hw, SXE_SECTXMINIFG);
-	 reg = (reg & 0xfffffff0) | 0x1;
-	 SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg);
+	reg = SXE_REG_READ(hw, SXE_SECTXMINIFG);
+	reg = (reg & 0xfffffff0) | 0x1;
+	SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg);
 
-	 SXE_REG_WRITE(hw, SXE_SECTXCTRL, SXE_SECTXCTRL_SECTX_DIS);
-	 SXE_REG_WRITE(hw, SXE_SECRXCTRL, SXE_SECRXCTRL_SECRX_DIS);
+	SXE_REG_WRITE(hw, SXE_SECTXCTRL, SXE_SECTXCTRL_SECTX_DIS);
+	SXE_REG_WRITE(hw, SXE_SECRXCTRL, SXE_SECRXCTRL_SECRX_DIS);
 
-	 SXE_WRITE_FLUSH(hw);
+	SXE_WRITE_FLUSH(hw);
 
-	 return;
 }
 
 bool sxe_hw_ipsec_offload_is_disable(struct sxe_hw *hw)
@@ -4294,7 +4045,7 @@ bool sxe_hw_ipsec_offload_is_disable(struct sxe_hw *hw)
 	bool ret = false;
 
 	if ((tx_dis & SXE_SECTXSTAT_SECTX_OFF_DIS) ||
-	    (rx_dis & SXE_SECRXSTAT_SECRX_OFF_DIS)) {
+		(rx_dis & SXE_SECRXSTAT_SECRX_OFF_DIS)) {
 		ret = true;
 	}
 
@@ -4306,7 +4057,6 @@ void sxe_hw_ipsec_sa_disable(struct sxe_hw *hw)
 	SXE_REG_WRITE(hw, SXE_IPSRXIDX, 0);
 	SXE_REG_WRITE(hw, SXE_IPSTXIDX, 0);
 
-	return;
 }
 
 static const struct sxe_sec_operations sxe_sec_ops = {
@@ -4344,18 +4094,18 @@ void sxe_hw_stats_regs_clean(struct sxe_hw *hw)
 	SXE_REG_READ(hw, SXE_TXDGPC);
 	SXE_REG_READ(hw, SXE_TXDGBCH);
 	SXE_REG_READ(hw, SXE_TXDGBCL);
-	SXE_REG_READ(hw,SXE_RXDDGPC);
+	SXE_REG_READ(hw, SXE_RXDDGPC);
 	SXE_REG_READ(hw, SXE_RXDDGBCH);
-	SXE_REG_READ(hw,SXE_RXDDGBCL);
-	SXE_REG_READ(hw,SXE_RXLPBKGPC);
+	SXE_REG_READ(hw, SXE_RXDDGBCL);
+	SXE_REG_READ(hw, SXE_RXLPBKGPC);
 	SXE_REG_READ(hw, SXE_RXLPBKGBCH);
-	SXE_REG_READ(hw,SXE_RXLPBKGBCL);
-	SXE_REG_READ(hw,SXE_RXDLPBKGPC);
+	SXE_REG_READ(hw, SXE_RXLPBKGBCL);
+	SXE_REG_READ(hw, SXE_RXDLPBKGPC);
 	SXE_REG_READ(hw, SXE_RXDLPBKGBCH);
-	SXE_REG_READ(hw,SXE_RXDLPBKGBCL);
-	SXE_REG_READ(hw,SXE_RXTPCIN);
-	SXE_REG_READ(hw,SXE_RXTPCOUT);
-	SXE_REG_READ(hw,SXE_RXPRDDC);
+	SXE_REG_READ(hw, SXE_RXDLPBKGBCL);
+	SXE_REG_READ(hw, SXE_RXTPCIN);
+	SXE_REG_READ(hw, SXE_RXTPCOUT);
+	SXE_REG_READ(hw, SXE_RXPRDDC);
 	SXE_REG_READ(hw, SXE_TXSWERR);
 	SXE_REG_READ(hw, SXE_TXSWITCH);
 	SXE_REG_READ(hw, SXE_TXREPEAT);
@@ -4382,9 +4132,9 @@ void sxe_hw_stats_regs_clean(struct sxe_hw *hw)
 	SXE_REG_READ(hw, SXE_RFC);
 	SXE_REG_READ(hw, SXE_ROC);
 	SXE_REG_READ(hw, SXE_RJC);
-	for (i = 0; i < 8; i++) {
+	for (i = 0; i < 8; i++)
 		SXE_REG_READ(hw, SXE_PRCPF(i));
-	}
+
 	SXE_REG_READ(hw, SXE_TORL);
 	SXE_REG_READ(hw, SXE_TORH);
 	SXE_REG_READ(hw, SXE_TPR);
@@ -4397,11 +4147,9 @@ void sxe_hw_stats_regs_clean(struct sxe_hw *hw)
 	SXE_REG_READ(hw, SXE_PTC1522);
 	SXE_REG_READ(hw, SXE_MPTC);
 	SXE_REG_READ(hw, SXE_BPTC);
-	for (i = 0; i < 8; i++) {
+	for (i = 0; i < 8; i++)
 		SXE_REG_READ(hw, SXE_PFCT(i));
-	}
 
-	return;
 }
 
 static void sxe_hw_stats_seq_get(struct sxe_hw *hw, struct sxe_mac_stats *stats)
@@ -4414,8 +4162,8 @@ static void sxe_hw_stats_seq_get(struct sxe_hw *hw, struct sxe_mac_stats *stats)
 #endif
 
 	for (i = 0; i < 8; i++) {
-		stats->prcpf[i] += SXE_REG_READ(hw,SXE_PRCPF(i));
-		tx_pfc_num = SXE_REG_READ(hw,SXE_PFCT(i));
+		stats->prcpf[i] += SXE_REG_READ(hw, SXE_PRCPF(i));
+		tx_pfc_num = SXE_REG_READ(hw, SXE_PFCT(i));
 		stats->pfct[i] += tx_pfc_num;
 		stats->total_tx_pause += tx_pfc_num;
 	}
@@ -4433,7 +4181,6 @@ static void sxe_hw_stats_seq_get(struct sxe_hw *hw, struct sxe_mac_stats *stats)
 	}
 #endif
 
-	return;
 }
 
 void sxe_hw_stats_seq_clean(struct sxe_hw *hw, struct sxe_mac_stats *stats)
@@ -4453,15 +4200,13 @@ void sxe_hw_stats_seq_clean(struct sxe_hw *hw, struct sxe_mac_stats *stats)
 	if (gotch != 0) {
 		LOG_INFO("GOTCH is not clear!\n");
 	}
-	
 	for (i = 0; i < 8; i++) {
-		stats->prcpf[i] += SXE_REG_READ(hw,SXE_PRCPF(i));
-		tx_pfc_num = SXE_REG_READ(hw,SXE_PFCT(i));
+		stats->prcpf[i] += SXE_REG_READ(hw, SXE_PRCPF(i));
+		tx_pfc_num = SXE_REG_READ(hw, SXE_PFCT(i));
 		stats->pfct[i] += tx_pfc_num;
 		stats->total_tx_pause += tx_pfc_num;
 	}
 
-	return;
 }
 
 void sxe_hw_stats_get(struct sxe_hw *hw, struct sxe_mac_stats *stats)
@@ -4497,20 +4242,20 @@ void sxe_hw_stats_get(struct sxe_hw *hw, struct sxe_mac_stats *stats)
 	stats->txdgbc += (((u64)SXE_REG_READ(hw, SXE_TXDGBCH) << 32) +
 				SXE_REG_READ(hw, SXE_TXDGBCL));
 
-	stats->rxddpc += SXE_REG_READ(hw,SXE_RXDDGPC);
+	stats->rxddpc += SXE_REG_READ(hw, SXE_RXDDGPC);
 	stats->rxddbc += ((u64)SXE_REG_READ(hw, SXE_RXDDGBCH) << 32) +
-				(SXE_REG_READ(hw,SXE_RXDDGBCL));
+				(SXE_REG_READ(hw, SXE_RXDDGBCL));
 
-	stats->rxlpbkpc += SXE_REG_READ(hw,SXE_RXLPBKGPC);
+	stats->rxlpbkpc += SXE_REG_READ(hw, SXE_RXLPBKGPC);
 	stats->rxlpbkbc += ((u64)SXE_REG_READ(hw, SXE_RXLPBKGBCH) << 32) +
-			(SXE_REG_READ(hw,SXE_RXLPBKGBCL));
+			(SXE_REG_READ(hw, SXE_RXLPBKGBCL));
 
-	stats->rxdlpbkpc += SXE_REG_READ(hw,SXE_RXDLPBKGPC);
+	stats->rxdlpbkpc += SXE_REG_READ(hw, SXE_RXDLPBKGPC);
 	stats->rxdlpbkbc += ((u64)SXE_REG_READ(hw, SXE_RXDLPBKGBCH) << 32) +
-				(SXE_REG_READ(hw,SXE_RXDLPBKGBCL));
-	stats->rxtpcing += SXE_REG_READ(hw,SXE_RXTPCIN);
-	stats->rxtpceng += SXE_REG_READ(hw,SXE_RXTPCOUT);
-	stats->prddc += SXE_REG_READ(hw,SXE_RXPRDDC);
+				(SXE_REG_READ(hw, SXE_RXDLPBKGBCL));
+	stats->rxtpcing += SXE_REG_READ(hw, SXE_RXTPCIN);
+	stats->rxtpceng += SXE_REG_READ(hw, SXE_RXTPCOUT);
+	stats->prddc += SXE_REG_READ(hw, SXE_RXPRDDC);
 	stats->txswerr += SXE_REG_READ(hw, SXE_TXSWERR);
 	stats->txswitch += SXE_REG_READ(hw, SXE_TXSWITCH);
 	stats->txrepeat += SXE_REG_READ(hw, SXE_TXREPEAT);
@@ -4522,8 +4267,8 @@ void sxe_hw_stats_get(struct sxe_hw *hw, struct sxe_mac_stats *stats)
 		stats->dburxgdreecnt[i] += SXE_REG_READ(hw, SXE_DBUDREECNT(i));
 		rx_dbu_drop = SXE_REG_READ(hw, SXE_DBUDROFPCNT(i));
 		stats->dburxdrofpcnt[i] += rx_dbu_drop;
-		stats->dbutxtcin[i] += SXE_REG_READ(hw,SXE_DBUDTTCICNT(i));
-		stats->dbutxtcout[i] += SXE_REG_READ(hw,SXE_DBUDTTCOCNT(i));
+		stats->dbutxtcin[i] += SXE_REG_READ(hw, SXE_DBUDTTCICNT(i));
+		stats->dbutxtcout[i] += SXE_REG_READ(hw, SXE_DBUDTTCOCNT(i));
 	}
 
 	stats->fnavadd += (SXE_REG_READ(hw, SXE_FNAVUSTAT) & 0xFFFF);
@@ -4602,7 +4347,6 @@ void sxe_hw_stats_get(struct sxe_hw *hw, struct sxe_mac_stats *stats)
 	stats->gotc = stats->total_gotc;
 #endif
 
-	return;
 }
 
 static u32 sxe_hw_tx_packets_num_get(struct sxe_hw *hw)
@@ -4620,9 +4364,8 @@ static u32 sxe_hw_mac_stats_dump(struct sxe_hw *hw, u32 *regs_buff, u32 buf_size
 	u32 i;
 	u32 regs_num = buf_size / sizeof(u32);
 
-	for (i = 0; i < regs_num; i++) {
+	for (i = 0; i < regs_num; i++)
 		regs_buff[i] = SXE_REG_READ(hw, mac_regs[i]);
-	}
 
 	return i;
 }
@@ -4649,11 +4392,10 @@ void sxe_hw_mbx_init(struct sxe_hw *hw)
 
 	hw->mbx.stats.rcv_msgs   = 0;
 	hw->mbx.stats.send_msgs  = 0;
-	hw->mbx.stats.acks       = 0;
-	hw->mbx.stats.reqs       = 0;
-	hw->mbx.stats.rsts       = 0;
+	hw->mbx.stats.acks	   = 0;
+	hw->mbx.stats.reqs	   = 0;
+	hw->mbx.stats.rsts	   = 0;
 
-	return;
 }
 
 static bool sxe_hw_vf_irq_check(struct sxe_hw *hw, u32 mask, u32 index)
@@ -4752,7 +4494,7 @@ s32 sxe_hw_rcv_msg_from_vf(struct sxe_hw *hw, u32 *msg,
 	for (i = 0; i < msg_entry; i++) {
 		msg[i] = SXE_REG_READ(hw, (SXE_PFMBMEM(index) + (i << 2)));
 		LOG_DEBUG_BDF("vf_idx:%u read mbx mem[%u]:0x%x.\n",
-			      index, i, msg[i]);
+				  index, i, msg[i]);
 	}
 
 	SXE_REG_WRITE(hw, SXE_PFMAILBOX(index), SXE_PFMAILBOX_ACK);
@@ -4788,16 +4530,15 @@ s32 sxe_hw_send_msg_to_vf(struct sxe_hw *hw, u32 *msg,
 
 	old = SXE_REG_READ(hw, (SXE_PFMBMEM(index)));
 	LOG_DEBUG_BDF("original send msg:0x%x. mbx mem[0]:0x%x\n", *msg, old);
-	if (msg[0] & SXE_CTRL_MSG_MASK) {
+	if (msg[0] & SXE_CTRL_MSG_MASK)
 		msg[0] |= (old & SXE_MSGID_MASK);
-	} else {
+	else
 		msg[0] |= (old & SXE_PFMSG_MASK);
-	}
 
 	for (i = 0; i < msg_len; i++) {
 		SXE_REG_WRITE(hw, (SXE_PFMBMEM(index) + (i << 2)), msg[i]);
 		LOG_DEBUG_BDF("vf_idx:%u write mbx mem[%u]:0x%x.\n",
-			      index, i, msg[i]);
+				  index, i, msg[i]);
 	}
 
 	SXE_REG_WRITE(hw, SXE_PFMAILBOX(index), SXE_PFMAILBOX_STS);
@@ -4811,14 +4552,12 @@ void sxe_hw_mbx_mem_clear(struct sxe_hw *hw, u8 vf_idx)
 {
 	u8 msg_idx;
 	struct sxe_adapter *adapter = hw->adapter;
-	for (msg_idx = 0; msg_idx < hw->mbx.msg_len; msg_idx++) {
+	for (msg_idx = 0; msg_idx < hw->mbx.msg_len; msg_idx++)
 		SXE_REG_WRITE_ARRAY(hw, SXE_PFMBMEM(vf_idx), msg_idx, 0);
-	}
 
 	SXE_WRITE_FLUSH(hw);
 
 	LOG_INFO_BDF("vf_idx:%u clear mbx mem.\n", vf_idx);
-	return;
 }
 
 static const struct sxe_mbx_operations sxe_mbx_ops = {
@@ -4838,7 +4577,6 @@ void sxe_hw_pcie_vt_mode_set(struct sxe_hw *hw, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_GCR_EXT, value);
 
-	return;
 }
 
 static const struct sxe_pcie_operations sxe_pcie_ops = {
@@ -4857,9 +4595,8 @@ s32 sxe_hw_hdc_lock_get(struct sxe_hw *hw, u32 trylock)
 
 	for (i = 0; i < trylock; i++) {
 		val = SXE_REG_READ(hw, SXE_HDC_SW_LK) & SXE_HDC_SW_LK_BIT;
-		if (!val) {
+		if (!val)
 			break;
-		}
 
 		udelay(10);
 	}
@@ -4898,9 +4635,8 @@ void sxe_hw_hdc_lock_release(struct sxe_hw *hw, u32 retry_cnt)
 			hw->hdc.pf_lock_val = 0;
 			break;
 		}
-	} while((retry_cnt--) > 0);
+	} while ((retry_cnt--) > 0);
 
-	return;
 }
 
 void sxe_hw_hdc_fw_ov_clear(struct sxe_hw *hw)
@@ -4912,9 +4648,8 @@ bool sxe_hw_hdc_is_fw_over_set(struct sxe_hw *hw)
 {
 	bool fw_ov = false;
 
-	if (SXE_REG_READ(hw, SXE_HDC_FW_OV) & SXE_HDC_FW_OV_BIT) {
+	if (SXE_REG_READ(hw, SXE_HDC_FW_OV) & SXE_HDC_FW_OV_BIT)
 		fw_ov = true;
-	}
 
 	return fw_ov;
 }
@@ -4924,21 +4659,18 @@ void sxe_hw_hdc_packet_send_done(struct sxe_hw *hw)
 	SXE_REG_WRITE(hw, SXE_HDC_SW_OV, SXE_HDC_SW_OV_BIT);
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 void sxe_hw_hdc_packet_header_send(struct sxe_hw *hw, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_HDC_PACKET_HEAD0, value);
 
-	return;
 }
 
 void sxe_hw_hdc_packet_data_dword_send(struct sxe_hw *hw,
 						u16 dword_index, u32 value)
 {
 	SXE_WRITE_REG_ARRAY_32(hw, SXE_HDC_PACKET_DATA0, dword_index, value);
-	return;
 }
 
 u32 sxe_hw_hdc_fw_ack_header_get(struct sxe_hw *hw)
@@ -4965,7 +4697,6 @@ u32 sxe_hw_hdc_fw_status_get(struct sxe_hw *hw)
 void sxe_hw_hdc_drv_status_set(struct sxe_hw *hw, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_DRV_STATUS_REG, value);
-	return;
 }
 
 u32 sxe_hw_hdc_channel_state_get(struct sxe_hw *hw)
@@ -4979,7 +4710,7 @@ u32 sxe_hw_hdc_channel_state_get(struct sxe_hw *hw)
 	return state;
 }
 
-STATIC u32 sxe_hw_hdc_irq_event_get(struct sxe_hw *hw)
+static u32 sxe_hw_hdc_irq_event_get(struct sxe_hw *hw)
 {
 	u32 status = SXE_REG_READ(hw, SXE_HDC_MSI_STATUS_REG);
 	struct sxe_adapter *adapter = hw->adapter;
@@ -4999,7 +4730,6 @@ static void sxe_hw_hdc_irq_event_clear(struct sxe_hw *hw, u32 event)
 	status &= ~event;
 	SXE_REG_WRITE(hw, SXE_HDC_MSI_STATUS_REG, status);
 
-	return;
 }
 
 static void sxe_hw_hdc_resource_clean(struct sxe_hw *hw)
@@ -5008,33 +4738,31 @@ static void sxe_hw_hdc_resource_clean(struct sxe_hw *hw)
 
 	SXE_REG_WRITE(hw, SXE_HDC_SW_LK, 0x0);
 	SXE_REG_WRITE(hw, SXE_HDC_PACKET_HEAD0, 0x0);
-	for (i = 0; i < SXE_HDC_DATA_LEN_MAX; i++) {
+	for (i = 0; i < SXE_HDC_DATA_LEN_MAX; i++)
 		SXE_WRITE_REG_ARRAY_32(hw, SXE_HDC_PACKET_DATA0, i, 0x0);
-	}
 
-	return;
 }
 
 static const struct sxe_hdc_operations sxe_hdc_ops = {
-	.pf_lock_get            = sxe_hw_hdc_lock_get,
-	.pf_lock_release        = sxe_hw_hdc_lock_release,
-	.is_fw_over_set         = sxe_hw_hdc_is_fw_over_set,
-	.fw_ack_header_rcv      = sxe_hw_hdc_fw_ack_header_get,
-	.packet_send_done       = sxe_hw_hdc_packet_send_done,
-	.packet_header_send     = sxe_hw_hdc_packet_header_send,
+	.pf_lock_get			= sxe_hw_hdc_lock_get,
+	.pf_lock_release		= sxe_hw_hdc_lock_release,
+	.is_fw_over_set		 = sxe_hw_hdc_is_fw_over_set,
+	.fw_ack_header_rcv	  = sxe_hw_hdc_fw_ack_header_get,
+	.packet_send_done	   = sxe_hw_hdc_packet_send_done,
+	.packet_header_send	 = sxe_hw_hdc_packet_header_send,
 	.packet_data_dword_send = sxe_hw_hdc_packet_data_dword_send,
 	.packet_data_dword_rcv  = sxe_hw_hdc_packet_data_dword_rcv,
-	.fw_status_get          = sxe_hw_hdc_fw_status_get,
-	.drv_status_set         = sxe_hw_hdc_drv_status_set,
-	.irq_event_get          = sxe_hw_hdc_irq_event_get,
-	.irq_event_clear        = sxe_hw_hdc_irq_event_clear,
-	.fw_ov_clear            = sxe_hw_hdc_fw_ov_clear,
-	.channel_state_get      = sxe_hw_hdc_channel_state_get,
-	.resource_clean         = sxe_hw_hdc_resource_clean,
+	.fw_status_get		  = sxe_hw_hdc_fw_status_get,
+	.drv_status_set		 = sxe_hw_hdc_drv_status_set,
+	.irq_event_get		  = sxe_hw_hdc_irq_event_get,
+	.irq_event_clear		= sxe_hw_hdc_irq_event_clear,
+	.fw_ov_clear			= sxe_hw_hdc_fw_ov_clear,
+	.channel_state_get	  = sxe_hw_hdc_channel_state_get,
+	.resource_clean		 = sxe_hw_hdc_resource_clean,
 };
 
 #ifdef SXE_PHY_CONFIGURE
-#define SXE_MDIO_COMMAND_TIMEOUT 100 
+#define SXE_MDIO_COMMAND_TIMEOUT 100
 
 static s32 sxe_hw_phy_reg_write(struct sxe_hw *hw, s32 prtad, u32 reg_addr,
 				u32 device_type, u16 phy_data)
@@ -5056,9 +4784,9 @@ static s32 sxe_hw_phy_reg_write(struct sxe_hw *hw, s32 prtad, u32 reg_addr,
 		udelay(10);
 
 		command = SXE_REG_READ(hw, SXE_MSCA);
-		if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0) {
+		if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0)
 			break;
-		}
+
 	}
 
 	if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) {
@@ -5079,9 +4807,8 @@ static s32 sxe_hw_phy_reg_write(struct sxe_hw *hw, s32 prtad, u32 reg_addr,
 		udelay(10);
 
 		command = SXE_REG_READ(hw, SXE_MSCA);
-		if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0) {
+		if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0)
 			break;
-		}
 	}
 
 	if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) {
@@ -5112,9 +4839,8 @@ static s32 sxe_hw_phy_reg_read(struct sxe_hw *hw, s32 prtad, u32 reg_addr,
 		udelay(10);
 
 		command = SXE_REG_READ(hw, SXE_MSCA);
-		if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0) {
+		if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0)
 			break;
-		}
 	}
 
 	if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) {
@@ -5166,7 +4892,7 @@ static s32 sxe_hw_phy_id_get(struct sxe_hw *hw, u32 prtad, u32 *id)
 
 
 	ret = sxe_hw_phy_reg_read(hw, prtad, MDIO_DEVID1, MDIO_MMD_PMAPMD,
-				      &phy_id_low);
+					  &phy_id_low);
 
 	if (ret) {
 		LOG_ERROR("get phy id upper 16 bits failed, prtad=%d\n", prtad);
@@ -5193,7 +4919,7 @@ s32 sxe_hw_phy_link_cap_get(struct sxe_hw *hw, u32 prtad, u32 *speed)
 	u16 speed_ability;
 
 	ret = hw->phy.ops->reg_read(hw, prtad, MDIO_SPEED, MDIO_MMD_PMAPMD,
-				      &speed_ability);
+					  &speed_ability);
 	if (ret) {
 		*speed = 0;
 		LOG_ERROR("get phy link cap failed, ret=%d, prtad=%d\n",
@@ -5201,17 +4927,14 @@ s32 sxe_hw_phy_link_cap_get(struct sxe_hw *hw, u32 prtad, u32 *speed)
 		goto l_end;
 	}
 
-	if (speed_ability & MDIO_SPEED_10G) {
+	if (speed_ability & MDIO_SPEED_10G)
 		*speed |= SXE_LINK_SPEED_10GB_FULL;
-	}
 
-	if (speed_ability & MDIO_PMA_SPEED_1000) {
+	if (speed_ability & MDIO_PMA_SPEED_1000)
 		*speed |= SXE_LINK_SPEED_1GB_FULL;
-	}
 
-	if (speed_ability & MDIO_PMA_SPEED_100) {
+	if (speed_ability & MDIO_PMA_SPEED_100)
 		*speed |= SXE_LINK_SPEED_100_FULL;
-	}
 
 l_end:
 	return ret;
@@ -5234,9 +4957,8 @@ static s32 sxe_hw_phy_ctrl_reset(struct sxe_hw *hw, u32 prtad)
 		msleep(100);
 		ret = sxe_hw_phy_reg_read(hw, prtad, MDIO_CTRL1,
 					MDIO_MMD_PHYXS, &ctrl);
-		if (ret) {
+		if (ret)
 			goto l_end;
-		}
 
 		if (!(ctrl & MDIO_CTRL1_RESET)) {
 			udelay(2);
@@ -5275,23 +4997,21 @@ void sxe_hw_ops_init(struct sxe_hw *hw)
 	hw->pcie.ops	= &sxe_pcie_ops;
 	hw->hdc.ops	= &sxe_hdc_ops;
 #ifdef SXE_PHY_CONFIGURE
-	hw->phy.ops     = &sxe_phy_hw_ops;
+	hw->phy.ops	 = &sxe_phy_hw_ops;
 #endif
 
 	hw->filter.mac.ops	= &sxe_filter_mac_ops;
 	hw->filter.vlan.ops	= &sxe_filter_vlan_ops;
-	return;
 }
 
 u32 sxe_hw_rss_key_get_by_idx(struct sxe_hw *hw, u8 reg_idx)
 {
 	u32 rss_key;
 
-	if (reg_idx >= SXE_MAX_RSS_KEY_ENTRIES) {
+	if (reg_idx >= SXE_MAX_RSS_KEY_ENTRIES)
 		rss_key = 0;
-	} else {
+	else
 		rss_key = SXE_REG_READ(hw, SXE_RSSRK(reg_idx));
-	}
 
 	return rss_key;
 }
@@ -5300,9 +5020,8 @@ bool sxe_hw_is_rss_enabled(struct sxe_hw *hw)
 {
 	bool rss_enable = false;
 	u32 mrqc = SXE_REG_READ(hw, SXE_MRQC);
-	if (mrqc & SXE_MRQC_RSSEN) {
+	if (mrqc & SXE_MRQC_RSSEN)
 		rss_enable = true;
-	}
 
 	return rss_enable;
 }
@@ -5318,15 +5037,28 @@ u32 sxe_hw_rss_field_get(struct sxe_hw *hw)
 	return (mrqc & SXE_RSS_FIELD_MASK);
 }
 
-#ifdef SXE_DPDK 
+#ifdef SXE_DPDK
 
 #define SXE_TRAFFIC_CLASS_MAX  8
 
-#define SXE_MR_VLAN_MSB_REG_OFFSET         4
+#define SXE_MR_VLAN_MSB_REG_OFFSET		 4
 #define SXE_MR_VIRTUAL_POOL_MSB_REG_OFFSET 4
 
-#define SXE_MR_TYPE_MASK                   0x0F
-#define SXE_MR_DST_POOL_OFFSET             8
+#define SXE_MR_TYPE_MASK				   0x0F
+#define SXE_MR_DST_POOL_OFFSET			 8
+
+void sxe_hw_crc_strip_config(struct sxe_hw *hw, bool keep_crc)
+{
+	u32 crcflag = SXE_REG_READ(hw, SXE_CRC_STRIP_REG);
+
+	if (keep_crc) {
+		crcflag |= SXE_KEEP_CRC_EN;
+	} else {
+		crcflag &= ~SXE_KEEP_CRC_EN;
+	}
+
+	SXE_REG_WRITE(hw, SXE_CRC_STRIP_REG, crcflag);
+}
 
 void sxe_hw_rx_pkt_buf_size_set(struct sxe_hw *hw, u8 tc_idx, u16 pbsize)
 {
@@ -5336,7 +5068,6 @@ void sxe_hw_rx_pkt_buf_size_set(struct sxe_hw *hw, u8 tc_idx, u16 pbsize)
 	SXE_REG_WRITE(hw, SXE_RXPBSIZE(tc_idx), rxpbsize);
 	sxe_hw_rx_pkt_buf_switch(hw, true);
 
-	return;
 }
 
 void sxe_hw_dcb_vmdq_mq_configure(struct sxe_hw *hw, u8 num_pools)
@@ -5349,13 +5080,11 @@ void sxe_hw_dcb_vmdq_mq_configure(struct sxe_hw *hw, u8 num_pools)
 
 	pbsize = (u8)(SXE_RX_PKT_BUF_SIZE / nb_tcs);
 
-	for (i = 0; i < nb_tcs; i++) {
+	for (i = 0; i < nb_tcs; i++)
 		sxe_hw_rx_pkt_buf_size_set(hw, i, pbsize);
-	}
 
-	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
 		sxe_hw_rx_pkt_buf_size_set(hw, i, 0);
-	}
 
 	mrqc = (num_pools == RTE_ETH_16_POOLS) ?
 		SXE_MRQC_VMDQRT8TCEN : SXE_MRQC_VMDQRT4TCEN;
@@ -5363,7 +5092,6 @@ void sxe_hw_dcb_vmdq_mq_configure(struct sxe_hw *hw, u8 num_pools)
 
 	SXE_REG_WRITE(hw, SXE_RTRPCS, SXE_RTRPCS_RRM);
 
-	return;
 }
 
 static const struct sxe_reg_info sxe_regs_general_group[] = {
@@ -5468,9 +5196,8 @@ static u32 sxe_regs_group_count(const struct sxe_reg_info *regs)
 	int i = 0;
 	int count = 0;
 
-	while (regs[i].count) {
+	while (regs[i].count)
 		count += regs[i++].count;
-	}
 
 	return count;
 };
@@ -5487,7 +5214,7 @@ static u32 sxe_hw_regs_group_read(struct sxe_hw *hw,
 			reg_buf[count + j] = SXE_REG_READ(hw,
 					regs[i].addr + j * regs[i].stride);
 			LOG_INFO("regs= %s, regs_addr=%x, regs_value=%04x\n",
-				regs[i].name , regs[i].addr, reg_buf[count + j]);
+				regs[i].name, regs[i].addr, reg_buf[count + j]);
 		}
 
 		i++;
@@ -5504,9 +5231,8 @@ u32 sxe_hw_all_regs_group_num_get(void)
 	const struct sxe_reg_info *reg_group;
 	const struct sxe_reg_info **reg_set = sxe_regs_group;
 
-	while ((reg_group = reg_set[i++])) {
+	while ((reg_group = reg_set[i++]))
 		count += sxe_regs_group_count(reg_group);
-	}
 
 	return count;
 }
@@ -5517,14 +5243,12 @@ void sxe_hw_all_regs_group_read(struct sxe_hw *hw, u32 *data)
 	const struct sxe_reg_info *reg_group;
 	const struct sxe_reg_info **reg_set = sxe_regs_group;
 
-	while ((reg_group = reg_set[i++])) {
+	while ((reg_group = reg_set[i++]))
 		count += sxe_hw_regs_group_read(hw, reg_group, &data[count]);
-	}
 
 	LOG_INFO("read regs cnt=%u, regs num=%u\n",
 				count, sxe_hw_all_regs_group_num_get());
 
-	return;
 }
 
 static void sxe_hw_default_pool_configure(struct sxe_hw *hw,
@@ -5534,14 +5258,12 @@ static void sxe_hw_default_pool_configure(struct sxe_hw *hw,
 	u32 vt_ctl;
 
 	vt_ctl = SXE_VT_CTL_VT_ENABLE | SXE_VT_CTL_REPLEN;
-	if (default_pool_enabled) {
+	if (default_pool_enabled)
 		vt_ctl |= (default_pool_idx << SXE_VT_CTL_POOL_SHIFT);
-	} else {
+	else
 		vt_ctl |= SXE_VT_CTL_DIS_DEFPL;
-	}
 
 	SXE_REG_WRITE(hw, SXE_VT_CTL, vt_ctl);
-	return;
 }
 
 void sxe_hw_dcb_vmdq_default_pool_configure(struct sxe_hw *hw,
@@ -5549,31 +5271,27 @@ void sxe_hw_dcb_vmdq_default_pool_configure(struct sxe_hw *hw,
 						u8 default_pool_idx)
 {
 	sxe_hw_default_pool_configure(hw, default_pool_enabled, default_pool_idx);
-	return;
 }
 
 u32 sxe_hw_ring_irq_switch_get(struct sxe_hw *hw, u8 idx)
 {
 	u32 mask;
 
-	if (idx == 0) {
+	if (idx == 0)
 		mask = SXE_REG_READ(hw, SXE_EIMS_EX(0));
-	} else {
+	else
 		mask = SXE_REG_READ(hw, SXE_EIMS_EX(1));
-	}
 
 	return mask;
 }
 
 void sxe_hw_ring_irq_switch_set(struct sxe_hw *hw, u8 idx, u32 value)
 {
-	if (idx == 0) {
+	if (idx == 0)
 		SXE_REG_WRITE(hw, SXE_EIMS_EX(0), value);
-	} else {
+	else
 		SXE_REG_WRITE(hw, SXE_EIMS_EX(1), value);
-	}
 
-	return;
 }
 
 void sxe_hw_dcb_vmdq_up_2_tc_configure(struct sxe_hw *hw,
@@ -5583,13 +5301,11 @@ void sxe_hw_dcb_vmdq_up_2_tc_configure(struct sxe_hw *hw,
 	u8 i;
 
 	up2tc = 0;
-	for (i = 0; i < MAX_USER_PRIORITY; i++) {
+	for (i = 0; i < MAX_USER_PRIORITY; i++)
 		up2tc |= ((tc_arr[i] & 0x07) << (i * 3));
-	}
 
 	SXE_REG_WRITE(hw, SXE_RTRUP2TC, up2tc);
 
-	return;
 }
 
 u32 sxe_hw_uta_hash_table_get(struct sxe_hw *hw, u8 reg_idx)
@@ -5602,7 +5318,6 @@ void sxe_hw_uta_hash_table_set(struct sxe_hw *hw,
 {
 	SXE_REG_WRITE(hw, SXE_UTA(reg_idx), value);
 
-	return;
 }
 
 u32 sxe_hw_vlan_type_get(struct sxe_hw *hw)
@@ -5613,7 +5328,6 @@ u32 sxe_hw_vlan_type_get(struct sxe_hw *hw)
 void sxe_hw_vlan_type_set(struct sxe_hw *hw, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_VLNCTRL, value);
-	return;
 }
 
 void sxe_hw_dcb_vmdq_vlan_configure(struct sxe_hw *hw,
@@ -5626,9 +5340,8 @@ void sxe_hw_dcb_vmdq_vlan_configure(struct sxe_hw *hw,
 	vlanctrl |= SXE_VLNCTRL_VFE;
 	SXE_REG_WRITE(hw, SXE_VLNCTRL, vlanctrl);
 
-	for (i = 0; i < SXE_VFT_TBL_SIZE; i++) {
+	for (i = 0; i < SXE_VFT_TBL_SIZE; i++)
 		SXE_REG_WRITE(hw, SXE_VFTA(i), 0xFFFFFFFF);
-	}
 
 	SXE_REG_WRITE(hw, SXE_VFRE(0),
 			num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
@@ -5636,13 +5349,11 @@ void sxe_hw_dcb_vmdq_vlan_configure(struct sxe_hw *hw,
 	SXE_REG_WRITE(hw, SXE_MPSAR_LOW(0), 0xFFFFFFFF);
 	SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(0), 0xFFFFFFFF);
 
-	return;
 }
 
 void sxe_hw_vlan_ext_type_set(struct sxe_hw *hw, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_EXVET, value);
-	return;
 }
 
 u32 sxe_hw_txctl_vlan_type_get(struct sxe_hw *hw)
@@ -5653,7 +5364,6 @@ u32 sxe_hw_txctl_vlan_type_get(struct sxe_hw *hw)
 void sxe_hw_txctl_vlan_type_set(struct sxe_hw *hw, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_DMATXCTL, value);
-	return;
 }
 
 u32 sxe_hw_ext_vlan_get(struct sxe_hw *hw)
@@ -5664,13 +5374,11 @@ u32 sxe_hw_ext_vlan_get(struct sxe_hw *hw)
 void sxe_hw_ext_vlan_set(struct sxe_hw *hw, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_CTRL_EXT, value);
-	return;
 }
 
 void sxe_hw_rxq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_RQSMR(idx), value);
-	return;
 }
 
 void sxe_hw_dcb_vmdq_pool_configure(struct sxe_hw *hw,
@@ -5682,13 +5390,11 @@ void sxe_hw_dcb_vmdq_pool_configure(struct sxe_hw *hw,
 
 	SXE_REG_WRITE(hw, SXE_VLVFB(pool_idx * 2), pools_map);
 
-	return;
 }
 
 void sxe_hw_txq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value)
 {
 	SXE_REG_WRITE(hw, SXE_TQSM(idx), value);
-	return;
 }
 
 void sxe_hw_dcb_rx_configure(struct sxe_hw *hw, bool is_vt_on,
@@ -5746,14 +5452,12 @@ void sxe_hw_dcb_rx_configure(struct sxe_hw *hw, bool is_vt_on,
 	vlanctrl |= SXE_VLNCTRL_VFE;
 	SXE_REG_WRITE(hw, SXE_VLNCTRL, vlanctrl);
 
-	for (i = 0; i < SXE_VFT_TBL_SIZE; i++) {
+	for (i = 0; i < SXE_VFT_TBL_SIZE; i++)
 		SXE_REG_WRITE(hw, SXE_VFTA(i), 0xFFFFFFFF);
-	}
 
 	reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC;
 	SXE_REG_WRITE(hw, SXE_RTRPCS, reg);
 
-	return;
 }
 
 void sxe_hw_fc_status_get(struct sxe_hw *hw,
@@ -5762,19 +5466,16 @@ void sxe_hw_fc_status_get(struct sxe_hw *hw,
 	u32 flctrl;
 
 	flctrl = SXE_REG_READ(hw, SXE_FLCTRL);
-	if (flctrl & (SXE_FCTRL_RFCE_PFC_EN | SXE_FCTRL_RFCE_LFC_EN)) {
+	if (flctrl & (SXE_FCTRL_RFCE_PFC_EN | SXE_FCTRL_RFCE_LFC_EN))
 		*rx_pause_on = true;
-	} else {
+	else
 		*rx_pause_on = false;
-	}
 
-	if (flctrl & (SXE_FCTRL_TFCE_PFC_EN | SXE_FCTRL_TFCE_LFC_EN)) {
+	if (flctrl & (SXE_FCTRL_TFCE_PFC_EN | SXE_FCTRL_TFCE_LFC_EN))
 		*tx_pause_on = true;
-	} else {
+	else
 		*tx_pause_on = false;
-	}
 
-	return;
 }
 
 void sxe_hw_fc_base_init(struct sxe_hw *hw)
@@ -5792,7 +5493,6 @@ void sxe_hw_fc_base_init(struct sxe_hw *hw)
 	}
 
 	hw->fc.send_xon = 1;
-	return;
 }
 
 u32 sxe_hw_fc_tc_high_water_mark_get(struct sxe_hw *hw, u8 tc_idx)
@@ -5813,7 +5513,6 @@ u16 sxe_hw_fc_send_xon_get(struct sxe_hw *hw)
 void sxe_hw_fc_send_xon_set(struct sxe_hw *hw, u16 send_xon)
 {
 	hw->fc.send_xon = send_xon;
-	return;
 }
 
 u16 sxe_hw_fc_pause_time_get(struct sxe_hw *hw)
@@ -5824,7 +5523,6 @@ u16 sxe_hw_fc_pause_time_get(struct sxe_hw *hw)
 void sxe_hw_fc_pause_time_set(struct sxe_hw *hw, u16 pause_time)
 {
 	hw->fc.pause_time = pause_time;
-	return;
 }
 
 void sxe_hw_dcb_tx_configure(struct sxe_hw *hw, bool is_vt_on, u8 tc_num)
@@ -5835,15 +5533,13 @@ void sxe_hw_dcb_tx_configure(struct sxe_hw *hw, bool is_vt_on, u8 tc_num)
 	reg |= SXE_RTTDCS_ARBDIS;
 	SXE_REG_WRITE(hw, SXE_RTTDCS, reg);
 
-	if (tc_num == 8) {
+	if (tc_num == 8)
 		reg = SXE_MTQC_RT_ENA | SXE_MTQC_8TC_8TQ;
-	} else {
+	else
 		reg = SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ;
-	}
 
-	if (is_vt_on) {
+	if (is_vt_on)
 		reg |= SXE_MTQC_VT_ENA;
-	}
 
 	SXE_REG_WRITE(hw, SXE_MTQC, reg);
 
@@ -5852,7 +5548,6 @@ void sxe_hw_dcb_tx_configure(struct sxe_hw *hw, bool is_vt_on, u8 tc_num)
 	SXE_REG_WRITE(hw, SXE_RTTDCS, reg);
 
 
-	return;
 }
 
 void sxe_hw_rx_ip_checksum_offload_switch(struct sxe_hw *hw,
@@ -5861,36 +5556,31 @@ void sxe_hw_rx_ip_checksum_offload_switch(struct sxe_hw *hw,
 	u32 rxcsum;
 
 	rxcsum = SXE_REG_READ(hw, SXE_RXCSUM);
-	if (is_on) {
+	if (is_on)
 		rxcsum |= SXE_RXCSUM_IPPCSE;
-	} else {
+	else
 		rxcsum &= ~SXE_RXCSUM_IPPCSE;
-	}
 
 	SXE_REG_WRITE(hw, SXE_RXCSUM, rxcsum);
 
-	return;
 }
 
 void sxe_hw_rss_cap_switch(struct sxe_hw *hw, bool is_on)
 {
 	u32 mrqc = SXE_REG_READ(hw, SXE_MRQC);
-	if (is_on) {
+	if (is_on)
 		mrqc |= SXE_MRQC_RSSEN;
-	} else {
+	else
 		mrqc &= ~SXE_MRQC_RSSEN;
-	}
 
 	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
 
-	return;
 }
 
 void sxe_hw_pool_xmit_enable(struct sxe_hw *hw, u16 reg_idx, u8 pool_num)
 {
 	SXE_REG_WRITE(hw, SXE_VFTE(reg_idx),
 		pool_num == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
-	return;
 }
 
 void sxe_hw_rss_field_set(struct sxe_hw *hw, u32 rss_field)
@@ -5901,7 +5591,6 @@ void sxe_hw_rss_field_set(struct sxe_hw *hw, u32 rss_field)
 	mrqc |= rss_field;
 	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
 
-	return;
 }
 
 static void sxe_hw_dcb_4tc_vmdq_off_stats_configure(struct sxe_hw *hw)
@@ -5910,44 +5599,38 @@ static void sxe_hw_dcb_4tc_vmdq_off_stats_configure(struct sxe_hw *hw)
 	u8  i;
 
 	for (i = 0; i < 32; i++) {
-		if (i % 8 > 3) {
+		if (i % 8 > 3)
 			continue;
-		}
 
 		reg = 0x01010101 * (i / 8);
 		SXE_REG_WRITE(hw, SXE_RQSMR(i), reg);
 	}
 	for (i = 0; i < 32; i++) {
-		if (i < 16) {
+		if (i < 16)
 			reg = 0x00000000;
-		} else if (i < 24) {
+		else if (i < 24)
 			reg = 0x01010101;
-		} else if (i < 28) {
+		else if (i < 28)
 			reg = 0x02020202;
-		} else {
+		else
 			reg = 0x03030303;
-		}
 
 		SXE_REG_WRITE(hw, SXE_TQSM(i), reg);
 	}
 
-	return;
 }
 
 static void sxe_hw_dcb_4tc_vmdq_on_stats_configure(struct sxe_hw *hw)
 {
 	u8  i;
 
-	for (i = 0; i < 32; i++) {
+	for (i = 0; i < 32; i++)
 		SXE_REG_WRITE(hw, SXE_RQSMR(i), 0x03020100);
-	}
 
 
-	for (i = 0; i < 32; i++) {
+	for (i = 0; i < 32; i++)
 		SXE_REG_WRITE(hw, SXE_TQSM(i), 0x03020100);
-	}
 
-	return;
 }
 
 void sxe_hw_rss_redir_tbl_set_by_idx(struct sxe_hw *hw,
@@ -5969,21 +5652,18 @@ u32 sxe_hw_rss_redir_tbl_get_by_idx(struct sxe_hw *hw, u16 reg_idx)
 void sxe_hw_ptp_time_inc_stop(struct sxe_hw *hw)
 {
 	SXE_REG_WRITE(hw, SXE_TIMINC, 0);
-	return;
 }
 
 void sxe_hw_dcb_tc_stats_configure(struct sxe_hw *hw,
 					u8 tc_num, bool vmdq_active)
 {
-	if (tc_num == 8 && vmdq_active == false) {
+	if (tc_num == 8 && vmdq_active == false)
 		sxe_hw_dcb_8tc_vmdq_off_stats_configure(hw);
-	} else if (tc_num == 4 && vmdq_active == false) {
+	else if (tc_num == 4 && vmdq_active == false)
 		sxe_hw_dcb_4tc_vmdq_off_stats_configure(hw);
-	} else if (tc_num == 4 && vmdq_active == true) {
+	else if (tc_num == 4 && vmdq_active == true)
 		sxe_hw_dcb_4tc_vmdq_on_stats_configure(hw);
-	}
 
-	return;
 }
 
 void sxe_hw_ptp_timestamp_disable(struct sxe_hw *hw)
@@ -5997,7 +5677,6 @@ void sxe_hw_ptp_timestamp_disable(struct sxe_hw *hw)
 			~SXE_TSYNCRXCTL_REN));
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 void sxe_hw_mac_pool_clear(struct sxe_hw *hw, u8 rar_idx)
@@ -6007,14 +5686,12 @@ void sxe_hw_mac_pool_clear(struct sxe_hw *hw, u8 rar_idx)
 	if (rar_idx > SXE_UC_ENTRY_NUM_MAX) {
 		LOG_ERROR_BDF("rar_idx:%d invalid.(err:%d)\n",
 			  rar_idx, SXE_ERR_PARAM);
-		goto l_end;
+		return;
 	}
 
 	SXE_REG_WRITE(hw, SXE_MPSAR_LOW(rar_idx), 0);
 	SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(rar_idx), 0);
 
-l_end:
-	return;
 }
 
 void sxe_hw_vmdq_mq_configure(struct sxe_hw *hw)
@@ -6024,7 +5701,6 @@ void sxe_hw_vmdq_mq_configure(struct sxe_hw *hw)
 	mrqc = SXE_MRQC_VMDQEN;
 	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
 
-	return;
 }
 
 void sxe_hw_vmdq_default_pool_configure(struct sxe_hw *hw,
@@ -6032,7 +5708,6 @@ void sxe_hw_vmdq_default_pool_configure(struct sxe_hw *hw,
 						u8 default_pool_idx)
 {
 	sxe_hw_default_pool_configure(hw, default_pool_enabled, default_pool_idx);
-	return;
 }
 
 void sxe_hw_vmdq_vlan_configure(struct sxe_hw *hw,
@@ -6045,24 +5720,20 @@ void sxe_hw_vmdq_vlan_configure(struct sxe_hw *hw,
 	vlanctrl |= SXE_VLNCTRL_VFE;
 	SXE_REG_WRITE(hw, SXE_VLNCTRL, vlanctrl);
 
-	for (i = 0; i < SXE_VFT_TBL_SIZE; i++) {
+	for (i = 0; i < SXE_VFT_TBL_SIZE; i++)
 		SXE_REG_WRITE(hw, SXE_VFTA(i), 0xFFFFFFFF);
-	}
 
 	SXE_REG_WRITE(hw, SXE_VFRE(0), 0xFFFFFFFF);
-	if (num_pools == RTE_ETH_64_POOLS) {
+	if (num_pools == RTE_ETH_64_POOLS)
 		SXE_REG_WRITE(hw, SXE_VFRE(1), 0xFFFFFFFF);
-	}
 
-	for (i = 0; i < num_pools; i++) {
+	for (i = 0; i < num_pools; i++)
 		SXE_REG_WRITE(hw, SXE_VMOLR(i), rx_mode);
-	}
 
 	SXE_REG_WRITE(hw, SXE_MPSAR_LOW(0), 0xFFFFFFFF);
 	SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(0), 0xFFFFFFFF);
 
 	SXE_WRITE_FLUSH(hw);
-	return;
 }
 
 u32 sxe_hw_pcie_vt_mode_get(struct sxe_hw *hw)
@@ -6082,7 +5753,6 @@ void sxe_rx_fc_threshold_set(struct sxe_hw *hw)
 		SXE_REG_WRITE(hw, SXE_FCRTH(i), high);
 	}
 
-	return;
 }
 
 void sxe_hw_vmdq_pool_configure(struct sxe_hw *hw,
@@ -6101,19 +5771,16 @@ void sxe_hw_vmdq_pool_configure(struct sxe_hw *hw,
 	}
 
 	SXE_WRITE_FLUSH(hw);
-	return;
 }
 
 void sxe_hw_vmdq_loopback_configure(struct sxe_hw *hw)
 {
 	u8 i;
 	SXE_REG_WRITE(hw, SXE_PFDTXGSWC, SXE_PFDTXGSWC_VT_LBEN);
-	for (i = 0; i < SXE_VMTXSW_REGISTER_COUNT; i++) {
+	for (i = 0; i < SXE_VMTXSW_REGISTER_COUNT; i++)
 		SXE_REG_WRITE(hw, SXE_VMTXSW(i), 0xFFFFFFFF);
-	}
 
 	SXE_WRITE_FLUSH(hw);
-	return;
 }
 
 void sxe_hw_tx_multi_queue_configure(struct sxe_hw *hw,
@@ -6145,7 +5812,7 @@ void sxe_hw_tx_multi_queue_configure(struct sxe_hw *hw,
 			SXE_REG_WRITE(hw, SXE_VFTE(1), UINT32_MAX);
 
 			for (queue_idx = 0; queue_idx < SXE_HW_TXRX_RING_NUM_MAX;
-			    queue_idx++) {
+				queue_idx++) {
 				SXE_REG_WRITE(hw, SXE_QDE,
 					(SXE_QDE_WRITE |
 					(queue_idx << SXE_QDE_IDX_SHIFT)));
@@ -6161,7 +5828,6 @@ void sxe_hw_tx_multi_queue_configure(struct sxe_hw *hw,
 
 	sxe_hw_dcb_arbiter_set(hw, true);
 
-	return;
 }
 
 void sxe_hw_vf_queue_drop_enable(struct sxe_hw *hw, u8 vf_idx,
@@ -6170,8 +5836,7 @@ void sxe_hw_vf_queue_drop_enable(struct sxe_hw *hw, u8 vf_idx,
 	u32 value;
 	u8 i;
 
-	for (i = (vf_idx * ring_per_pool); i < ((vf_idx + 1) * ring_per_pool); i++)
-	{
+	for (i = (vf_idx * ring_per_pool); i < ((vf_idx + 1) * ring_per_pool); i++) {
 		value = SXE_QDE_ENABLE | SXE_QDE_WRITE;
 		SXE_WRITE_FLUSH(hw);
 
@@ -6180,7 +5845,6 @@ void sxe_hw_vf_queue_drop_enable(struct sxe_hw *hw, u8 vf_idx,
 		SXE_REG_WRITE(hw, SXE_QDE, value);
 	}
 
-	return;
 }
 
 bool sxe_hw_vt_status(struct sxe_hw *hw)
@@ -6188,17 +5852,16 @@ bool sxe_hw_vt_status(struct sxe_hw *hw)
 	bool ret;
 	u32 vt_ctl = SXE_REG_READ(hw, SXE_VT_CTL);
 
-	if (vt_ctl & SXE_VMD_CTL_POOL_EN) {
+	if (vt_ctl & SXE_VMD_CTL_POOL_EN)
 		ret = true;
-	} else {
+	else
 		ret = false;
-	}
 
 	return ret;
 }
 
 void sxe_hw_mirror_ctl_set(struct sxe_hw *hw, u8 rule_id,
-				    u8 mirror_type, u8 dst_pool, bool on)
+					u8 mirror_type, u8 dst_pool, bool on)
 {
 	u32 mr_ctl;
 
@@ -6214,23 +5877,20 @@ void sxe_hw_mirror_ctl_set(struct sxe_hw *hw, u8 rule_id,
 
 	SXE_REG_WRITE(hw, SXE_MRCTL(rule_id), mr_ctl);
 
-	return;
 }
 
-void sxe_hw_mirror_virtual_pool_set(struct sxe_hw *hw, u8 rule_id,u32 lsb, u32 msb)
+void sxe_hw_mirror_virtual_pool_set(struct sxe_hw *hw, u8 rule_id, u32 lsb, u32 msb)
 {
 	SXE_REG_WRITE(hw, SXE_VMRVM(rule_id), lsb);
 	SXE_REG_WRITE(hw, SXE_VMRVM(rule_id  + SXE_MR_VIRTUAL_POOL_MSB_REG_OFFSET), msb);
 
-	return;
 }
 
-void sxe_hw_mirror_vlan_set(struct sxe_hw *hw, u8 rule_id,u32 lsb, u32 msb)
+void sxe_hw_mirror_vlan_set(struct sxe_hw *hw, u8 rule_id, u32 lsb, u32 msb)
 {
 	SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id), lsb);
 	SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id  + SXE_MR_VLAN_MSB_REG_OFFSET), msb);
 
-	return;
 }
 
 void sxe_hw_mirror_rule_clear(struct sxe_hw *hw, u8 rule_id)
@@ -6243,7 +5903,6 @@ void sxe_hw_mirror_rule_clear(struct sxe_hw *hw, u8 rule_id)
 	SXE_REG_WRITE(hw, SXE_VMRVM(rule_id), 0);
 	SXE_REG_WRITE(hw, SXE_VMRVM(rule_id  + SXE_MR_VIRTUAL_POOL_MSB_REG_OFFSET), 0);
 
-	return;
 }
 
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
@@ -6266,28 +5925,29 @@ void sxe_hw_fivetuple_filter_add(struct rte_eth_dev *dev,
 	ftqf |= (u32)((filter->filter_info.priority &
 			SXE_FTQF_PRIORITY_MASK) << SXE_FTQF_PRIORITY_SHIFT);
 
-	if (filter->filter_info.src_ip_mask == 0) {
+	if (filter->filter_info.src_ip_mask == 0)
 		mask &= SXE_FTQF_SOURCE_ADDR_MASK;
-	}
-	if (filter->filter_info.dst_ip_mask == 0) {
+
+	if (filter->filter_info.dst_ip_mask == 0)
 		mask &= SXE_FTQF_DEST_ADDR_MASK;
-	}
-	if (filter->filter_info.src_port_mask == 0) {
+
+	if (filter->filter_info.src_port_mask == 0)
 		mask &= SXE_FTQF_SOURCE_PORT_MASK;
-	}
-	if (filter->filter_info.dst_port_mask == 0) {
+
+	if (filter->filter_info.dst_port_mask == 0)
 		mask &= SXE_FTQF_DEST_PORT_MASK;
-	}
-	if (filter->filter_info.proto_mask == 0) {
+
+	if (filter->filter_info.proto_mask == 0)
 		mask &= SXE_FTQF_PROTOCOL_COMP_MASK;
-	}
+
 	ftqf |= mask << SXE_FTQF_5TUPLE_MASK_SHIFT;
 	ftqf |= SXE_FTQF_POOL_MASK_EN;
 	ftqf |= SXE_FTQF_QUEUE_ENABLE;
 
 	LOG_DEBUG("add fivetuple filter, index[%u], src_ip[0x%x], dst_ip[0x%x]"
-		"src_port[%u], dst_port[%u], ftqf[0x%x], queue[%u]", i, filter->filter_info.src_ip,
-		filter->filter_info.dst_ip, filter->filter_info.src_port, filter->filter_info.dst_port,
+		"src_port[%u], dst_port[%u], ftqf[0x%x], queue[%u]", i,
+		filter->filter_info.src_ip, filter->filter_info.dst_ip,
+		filter->filter_info.src_port, filter->filter_info.dst_port,
 		ftqf, filter->queue);
 
 	SXE_REG_WRITE(hw, SXE_DAQF(i), filter->filter_info.dst_ip);
@@ -6299,7 +5959,6 @@ void sxe_hw_fivetuple_filter_add(struct rte_eth_dev *dev,
 	l34timir |= (u32)(filter->queue << SXE_L34T_IMIR_QUEUE_SHIFT);
 	SXE_REG_WRITE(hw, SXE_L34T_IMIR(i), l34timir);
 
-	return;
 }
 
 void sxe_hw_fivetuple_filter_del(struct sxe_hw *hw, u16 reg_index)
@@ -6310,7 +5969,6 @@ void sxe_hw_fivetuple_filter_del(struct sxe_hw *hw, u16 reg_index)
 	SXE_REG_WRITE(hw, SXE_FTQF(reg_index), 0);
 	SXE_REG_WRITE(hw, SXE_L34T_IMIR(reg_index), 0);
 
-	return;
 }
 
 void sxe_hw_ethertype_filter_add(struct sxe_hw *hw,
@@ -6329,7 +5987,6 @@ void sxe_hw_ethertype_filter_add(struct sxe_hw *hw,
 	SXE_REG_WRITE(hw, SXE_ETQS(reg_index), etqs);
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 void sxe_hw_ethertype_filter_del(struct sxe_hw *hw, u8 filter_type)
@@ -6338,7 +5995,6 @@ void sxe_hw_ethertype_filter_del(struct sxe_hw *hw, u8 filter_type)
 	SXE_REG_WRITE(hw, SXE_ETQS(filter_type), 0);
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 void sxe_hw_syn_filter_add(struct sxe_hw *hw, u16 queue, u8 priority)
@@ -6348,16 +6004,14 @@ void sxe_hw_syn_filter_add(struct sxe_hw *hw, u16 queue, u8 priority)
 	synqf = (u32)(((queue << SXE_SYN_FILTER_QUEUE_SHIFT) &
 			SXE_SYN_FILTER_QUEUE) | SXE_SYN_FILTER_ENABLE);
 
-	if (priority) {
+	if (priority)
 		synqf |= SXE_SYN_FILTER_SYNQFP;
-	} else {
+	else
 		synqf &= ~SXE_SYN_FILTER_SYNQFP;
-	}
 
 	SXE_REG_WRITE(hw, SXE_SYNQF, synqf);
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 void sxe_hw_syn_filter_del(struct sxe_hw *hw)
@@ -6370,7 +6024,6 @@ void sxe_hw_syn_filter_del(struct sxe_hw *hw)
 	SXE_REG_WRITE(hw, SXE_SYNQF, synqf);
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 void sxe_hw_fnav_rx_pkt_buf_size_reset(struct sxe_hw *hw, u32 pbsize)
@@ -6378,11 +6031,9 @@ void sxe_hw_fnav_rx_pkt_buf_size_reset(struct sxe_hw *hw, u32 pbsize)
 	S32 i;
 
 	SXE_REG_WRITE(hw, SXE_RXPBSIZE(0), (SXE_REG_READ(hw, SXE_RXPBSIZE(0)) - pbsize));
-	for (i = 1; i < 8; i++) {
+	for (i = 1; i < 8; i++)
 		SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), 0);
-	}
 
-	return;
 }
 
 void sxe_hw_fnav_flex_mask_set(struct sxe_hw *hw, u16 flex_mask)
@@ -6390,12 +6041,10 @@ void sxe_hw_fnav_flex_mask_set(struct sxe_hw *hw, u16 flex_mask)
 	u32 fnavm;
 
 	fnavm = SXE_REG_READ(hw, SXE_FNAVM);
-	if (flex_mask == UINT16_MAX) {
+	if (flex_mask == UINT16_MAX)
 		fnavm &= ~SXE_FNAVM_FLEX;
-	}
 
 	SXE_REG_WRITE(hw, SXE_FNAVM, fnavm);
-	return;
 }
 
 void sxe_hw_fnav_ipv6_mask_set(struct sxe_hw *hw, u16 src_mask, u16 dst_mask)
@@ -6405,7 +6054,6 @@ void sxe_hw_fnav_ipv6_mask_set(struct sxe_hw *hw, u16 src_mask, u16 dst_mask)
 	fnavipv6m = (dst_mask << 16) | src_mask;
 	SXE_REG_WRITE(hw, SXE_FNAVIP6M, ~fnavipv6m);
 
-	return;
 }
 
 s32 sxe_hw_fnav_flex_offset_set(struct sxe_hw *hw, u16 offset)
@@ -6425,7 +6073,6 @@ s32 sxe_hw_fnav_flex_offset_set(struct sxe_hw *hw, u16 offset)
 	if (ret) {
 		LOG_ERROR("flow director signature poll time exceeded!\n");
 	}
-
 	return ret;
 }
 #endif
@@ -6450,9 +6097,9 @@ static void sxe_macsec_stop_data(struct sxe_hw *hw, bool link)
 		SXE_SECTXSTAT_SECTX_RDY;
 	r_rdy = SXE_REG_READ(hw, SXE_SECRXSTAT) &
 		SXE_SECRXSTAT_SECRX_RDY;
-	if (t_rdy && r_rdy)
+	if (t_rdy && r_rdy) {
 		return;
-
+	}
 	if (!link) {
 		SXE_REG_WRITE(hw, SXE_LPBKCTRL, 0x1);
 
@@ -6474,13 +6121,11 @@ static void sxe_macsec_stop_data(struct sxe_hw *hw, bool link)
 		SXE_WRITE_FLUSH(hw);
 	}
 
-	return;
 }
 void sxe_hw_rx_queue_mode_set(struct sxe_hw *hw, u32 mrqc)
 {
 	SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
 
-	return;
 }
 
 void sxe_hw_macsec_enable(struct sxe_hw *hw, bool is_up, u32 tx_mode,
@@ -6527,7 +6172,6 @@ void sxe_hw_macsec_enable(struct sxe_hw *hw, bool is_up, u32 tx_mode,
 
 	SXE_WRITE_FLUSH(hw);
 
-	return;
 }
 
 void sxe_hw_macsec_disable(struct sxe_hw *hw, bool is_up)
@@ -6555,7 +6199,6 @@ void sxe_hw_macsec_disable(struct sxe_hw *hw, bool is_up)
 	SXE_REG_WRITE(hw, SXE_SECRXCTRL, SXE_SECRXCTRL_SECRX_DIS);
 
 	SXE_WRITE_FLUSH(hw);
-	return;
 }
 
 void sxe_hw_macsec_txsc_set(struct sxe_hw *hw, u32 scl, u32 sch)
@@ -6564,7 +6207,6 @@ void sxe_hw_macsec_txsc_set(struct sxe_hw *hw, u32 scl, u32 sch)
 	SXE_REG_WRITE(hw, SXE_LSECTXSCH, sch);
 
 	SXE_WRITE_FLUSH(hw);
-	return;
 }
 
 void sxe_hw_macsec_rxsc_set(struct sxe_hw *hw, u32 scl, u32 sch, u16 pi)
@@ -6577,7 +6219,6 @@ void sxe_hw_macsec_rxsc_set(struct sxe_hw *hw, u32 scl, u32 sch, u16 pi)
 	SXE_REG_WRITE(hw, SXE_LSECRXSCH, reg);
 
 	SXE_WRITE_FLUSH(hw);
-	return;
 
 }
 
@@ -6594,9 +6235,9 @@ void sxe_hw_macsec_tx_sa_configure(struct sxe_hw *hw, u8 sa_idx,
 	SXE_WRITE_FLUSH(hw);
 
 	SXE_REG_WRITE(hw, SXE_LSECTXPN(sa_idx), pn);
-	for (i = 0; i < 4; i++) {
+	for (i = 0; i < 4; i++)
 		SXE_REG_WRITE(hw, SXE_LSECTXKEY(sa_idx, i), keys[i]);
-	}
+
 	SXE_WRITE_FLUSH(hw);
 
 	reg = SXE_REG_READ(hw, SXE_LSECTXSA);
@@ -6613,7 +6254,6 @@ void sxe_hw_macsec_tx_sa_configure(struct sxe_hw *hw, u8 sa_idx,
 	}
 
 	SXE_WRITE_FLUSH(hw);
-	return;
 }
 
 void sxe_hw_macsec_rx_sa_configure(struct sxe_hw *hw, u8 sa_idx,
@@ -6632,16 +6272,15 @@ void sxe_hw_macsec_rx_sa_configure(struct sxe_hw *hw, u8 sa_idx,
 
 	SXE_REG_WRITE(hw, SXE_LSECRXPN(sa_idx), pn);
 
-	for (i = 0; i < 4; i++) {
+	for (i = 0; i < 4; i++)
 		SXE_REG_WRITE(hw, SXE_LSECRXKEY(sa_idx, i), keys[i]);
-	}
+
 	SXE_WRITE_FLUSH(hw);
 
 	reg = ((an << SXE_LSECRXSA_AN_SHIFT) & SXE_LSECRXSA_AN_MASK) | SXE_LSECRXSA_SAV;
 	SXE_REG_WRITE(hw, SXE_LSECRXSA(sa_idx), reg);
 	SXE_WRITE_FLUSH(hw);
-	return;
 }
-				
-#endif 
-#endif 
+
+#endif
+#endif
diff --git a/drivers/net/sxe/base/sxe_hw.h b/drivers/net/sxe/base/sxe_hw.h
index 8adc9fc15b..aa19817d96 100644
--- a/drivers/net/sxe/base/sxe_hw.h
+++ b/drivers/net/sxe/base/sxe_hw.h
@@ -5,7 +5,7 @@
 #ifndef __SXE_HW_H__
 #define __SXE_HW_H__
 
-#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST)
+#if defined(__KERNEL__) || defined(SXE_KERNEL_TEST)
 #include <linux/types.h>
 #include <linux/kernel.h>
 #else
@@ -20,17 +20,17 @@
 
 #include "sxe_regs.h"
 
-#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST)
+#if defined(__KERNEL__) || defined(SXE_KERNEL_TEST)
 #define SXE_PRIU64  "llu"
 #define SXE_PRIX64  "llx"
 #define SXE_PRID64  "lld"
-#define SXE_RMB()     rmb()
+#define SXE_RMB()	 rmb() /* verify reading before check ****/
 
 #else
 #define SXE_PRIU64  PRIu64
 #define SXE_PRIX64  PRIx64
 #define SXE_PRID64  PRId64
-#define SXE_RMB()     rte_rmb()
+#define SXE_RMB()	 rte_rmb()
 #endif
 
 struct sxe_hw;
@@ -40,29 +40,29 @@ struct sxe_fc_info;
 #define SXE_MAC_ADDR_LEN 6
 #define SXE_QUEUE_STATS_MAP_REG_NUM 32
 
-#define SXE_FC_DEFAULT_HIGH_WATER_MARK    0x80
-#define SXE_FC_DEFAULT_LOW_WATER_MARK     0x40
+#define SXE_FC_DEFAULT_HIGH_WATER_MARK	0x80
+#define SXE_FC_DEFAULT_LOW_WATER_MARK	 0x40
 
 #define  SXE_MC_ADDR_EXTRACT_MASK  (0xFFF)
-#define  SXE_MC_ADDR_SHIFT         (5)    
-#define  SXE_MC_ADDR_REG_MASK      (0x7F) 
-#define  SXE_MC_ADDR_BIT_MASK      (0x1F) 
+#define  SXE_MC_ADDR_SHIFT		 (5)
+#define  SXE_MC_ADDR_REG_MASK	  (0x7F)
+#define  SXE_MC_ADDR_BIT_MASK	  (0x1F)
 
 #define SXE_TXTS_POLL_CHECK		3
 #define SXE_TXTS_POLL			5
 #define SXE_TIME_TO_NS(ns, sec)	(((u64)(ns)) + (u64)(((u64)(sec)) * NSEC_PER_SEC))
 
 enum sxe_strict_prio_type {
-	PRIO_NONE = 0, 
-	PRIO_GROUP,    
-	PRIO_LINK      
+	PRIO_NONE = 0,
+	PRIO_GROUP,
+	PRIO_LINK
 };
 
 enum sxe_mc_filter_type {
-	SXE_MC_FILTER_TYPE0 = 0,  
-	SXE_MC_FILTER_TYPE1,      
-	SXE_MC_FILTER_TYPE2,      
-	SXE_MC_FILTER_TYPE3       
+	SXE_MC_FILTER_TYPE0 = 0,
+	SXE_MC_FILTER_TYPE1,
+	SXE_MC_FILTER_TYPE2,
+	SXE_MC_FILTER_TYPE3
 };
 
 #define SXE_POOLS_NUM_MAX 64
@@ -84,57 +84,57 @@ enum sxe_mc_filter_type {
 #define SXE_VF_NUM_16		16
 #define SXE_VF_NUM_32		32
 
-#define SXE_TX_DESC_EOP_MASK  0x01000000   
-#define SXE_TX_DESC_RS_MASK   0x08000000   
-#define SXE_TX_DESC_STAT_DD   0x00000001   
-#define SXE_TX_DESC_CMD       (SXE_TX_DESC_EOP_MASK | SXE_TX_DESC_RS_MASK)
-#define SXE_TX_DESC_TYPE_DATA 0x00300000   
-#define SXE_TX_DESC_DEXT      0x20000000   
-#define SXE_TX_DESC_IFCS      0x02000000   
-#define SXE_TX_DESC_VLE       0x40000000 
-#define SXE_TX_DESC_TSTAMP    0x00080000 
-#define SXE_TX_DESC_FLAGS     (SXE_TX_DESC_TYPE_DATA | \
+#define SXE_TX_DESC_EOP_MASK  0x01000000
+#define SXE_TX_DESC_RS_MASK   0x08000000
+#define SXE_TX_DESC_STAT_DD   0x00000001
+#define SXE_TX_DESC_CMD	   (SXE_TX_DESC_EOP_MASK | SXE_TX_DESC_RS_MASK)
+#define SXE_TX_DESC_TYPE_DATA 0x00300000
+#define SXE_TX_DESC_DEXT	  0x20000000
+#define SXE_TX_DESC_IFCS	  0x02000000
+#define SXE_TX_DESC_VLE	   0x40000000
+#define SXE_TX_DESC_TSTAMP	0x00080000
+#define SXE_TX_DESC_FLAGS	 (SXE_TX_DESC_TYPE_DATA | \
 				SXE_TX_DESC_IFCS | \
 				SXE_TX_DESC_DEXT| \
 				SXE_TX_DESC_EOP_MASK)
-#define SXE_TXD_DTYP_CTXT     0x00200000 
-#define SXE_TXD_DCMD_TSE      0x80000000 
-#define SXE_TXD_MAC_LINKSEC   0x00040000 
-#define SXE_TXD_MAC_1588      0x00080000 
-#define SXE_TX_DESC_PAYLEN_SHIFT     14
-#define SXE_TX_OUTERIPCS_SHIFT	17 
+#define SXE_TXD_DTYP_CTXT	 0x00200000
+#define SXE_TXD_DCMD_TSE	  0x80000000
+#define SXE_TXD_MAC_LINKSEC   0x00040000
+#define SXE_TXD_MAC_1588	  0x00080000
+#define SXE_TX_DESC_PAYLEN_SHIFT	 14
+#define SXE_TX_OUTERIPCS_SHIFT	17
 
 #define SXE_TX_POPTS_IXSM   0x01
 #define SXE_TX_POPTS_TXSM   0x02
-#define SXE_TXD_POPTS_SHIFT 8  
+#define SXE_TXD_POPTS_SHIFT 8
 #define SXE_TXD_POPTS_IXSM  (SXE_TX_POPTS_IXSM << SXE_TXD_POPTS_SHIFT)
 #define SXE_TXD_POPTS_TXSM  (SXE_TX_POPTS_TXSM << SXE_TXD_POPTS_SHIFT)
 #define SXE_TXD_POPTS_IPSEC (0x00000400)
 
-#define SXE_TX_CTXTD_DTYP_CTXT      0x00200000 
-#define SXE_TX_CTXTD_TUCMD_IPV6     0x00000000 
-#define SXE_TX_CTXTD_TUCMD_IPV4     0x00000400 
-#define SXE_TX_CTXTD_TUCMD_L4T_UDP  0x00000000 
-#define SXE_TX_CTXTD_TUCMD_L4T_TCP  0x00000800 
-#define SXE_TX_CTXTD_TUCMD_L4T_SCTP 0x00001000 
-#define SXE_TX_CTXTD_TUCMD_L4T_RSV  0x00001800 
-#define SXE_TX_CTXTD_TUCMD_IPSEC_TYPE_ESP   0x00002000 
-#define SXE_TX_CTXTD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 
-
-#define SXE_TX_CTXTD_L4LEN_SHIFT          8  
-#define SXE_TX_CTXTD_MSS_SHIFT            16 
-#define SXE_TX_CTXTD_MACLEN_SHIFT         9  
-#define SXE_TX_CTXTD_VLAN_SHIFT           16
-#define SXE_TX_CTXTD_VLAN_MASK            0xffff0000
-#define SXE_TX_CTXTD_MACLEN_MASK          0x0000fE00
-#define SXE_TX_CTXTD_OUTER_IPLEN_SHIFT    16 
-#define SXE_TX_CTXTD_TUNNEL_LEN_SHIFT     24 
-
-#define SXE_VLAN_TAG_SIZE     4
-
-#define SXE_RSS_KEY_SIZE                (40)  
-#define SXE_MAX_RSS_KEY_ENTRIES		(10)  
-#define SXE_MAX_RETA_ENTRIES            (128) 
+#define SXE_TX_CTXTD_DTYP_CTXT	  0x00200000
+#define SXE_TX_CTXTD_TUCMD_IPV6	 0x00000000
+#define SXE_TX_CTXTD_TUCMD_IPV4	 0x00000400
+#define SXE_TX_CTXTD_TUCMD_L4T_UDP  0x00000000
+#define SXE_TX_CTXTD_TUCMD_L4T_TCP  0x00000800
+#define SXE_TX_CTXTD_TUCMD_L4T_SCTP 0x00001000
+#define SXE_TX_CTXTD_TUCMD_L4T_RSV  0x00001800
+#define SXE_TX_CTXTD_TUCMD_IPSEC_TYPE_ESP   0x00002000
+#define SXE_TX_CTXTD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
+
+#define SXE_TX_CTXTD_L4LEN_SHIFT		  8
+#define SXE_TX_CTXTD_MSS_SHIFT			16
+#define SXE_TX_CTXTD_MACLEN_SHIFT		 9
+#define SXE_TX_CTXTD_VLAN_SHIFT		   16
+#define SXE_TX_CTXTD_VLAN_MASK			0xffff0000
+#define SXE_TX_CTXTD_MACLEN_MASK		  0x0000fE00
+#define SXE_TX_CTXTD_OUTER_IPLEN_SHIFT	16
+#define SXE_TX_CTXTD_TUNNEL_LEN_SHIFT	 24
+
+#define SXE_VLAN_TAG_SIZE	 4
+
+#define SXE_RSS_KEY_SIZE				(40)
+#define SXE_MAX_RSS_KEY_ENTRIES		(10)
+#define SXE_MAX_RETA_ENTRIES			(128)
 
 #define SXE_TIMINC_IV_NS_SHIFT  8
 #define SXE_TIMINC_INCPD_SHIFT  24
@@ -142,49 +142,49 @@ enum sxe_mc_filter_type {
 	(((incpd) << SXE_TIMINC_INCPD_SHIFT) | \
 	((iv_ns) << SXE_TIMINC_IV_NS_SHIFT) | (iv_sns))
 
-#define PBA_STRATEGY_EQUAL       (0)    
-#define PBA_STRATEGY_WEIGHTED    (1)	
-#define SXE_PKG_BUF_NUM_MAX               (8)
+#define PBA_STRATEGY_EQUAL	   (0)
+#define PBA_STRATEGY_WEIGHTED	(1)
+#define SXE_PKG_BUF_NUM_MAX			   (8)
 #define SXE_HW_TXRX_RING_NUM_MAX 128
 #define SXE_VMDQ_DCB_NUM_QUEUES  SXE_HW_TXRX_RING_NUM_MAX
-#define SXE_RX_PKT_BUF_SIZE 			(512)
+#define SXE_RX_PKT_BUF_SIZE				(512)
 
 #define SXE_UC_ENTRY_NUM_MAX   128
 #define SXE_HW_TX_NONE_MODE_Q_NUM 64
 
-#define SXE_MBX_MSG_NUM    16
+#define SXE_MBX_MSG_NUM	16
 #define SXE_MBX_RETRY_INTERVAL   500
-#define SXE_MBX_RETRY_COUNT      2000
+#define SXE_MBX_RETRY_COUNT	  2000
 
 #define SXE_VF_UC_ENTRY_NUM_MAX 10
 #define SXE_VF_MC_ENTRY_NUM_MAX 30
 
 #define SXE_UTA_ENTRY_NUM_MAX   128
 #define SXE_MTA_ENTRY_NUM_MAX   128
-#define SXE_HASH_UC_NUM_MAX   4096 
+#define SXE_HASH_UC_NUM_MAX   4096
 
-#define  SXE_MAC_ADDR_EXTRACT_MASK  (0xFFF) 
-#define  SXE_MAC_ADDR_SHIFT         (5)     
-#define  SXE_MAC_ADDR_REG_MASK      (0x7F)  
-#define  SXE_MAC_ADDR_BIT_MASK      (0x1F)  
+#define  SXE_MAC_ADDR_EXTRACT_MASK  (0xFFF)
+#define  SXE_MAC_ADDR_SHIFT		 (5)
+#define  SXE_MAC_ADDR_REG_MASK	  (0x7F)
+#define  SXE_MAC_ADDR_BIT_MASK	  (0x1F)
 
-#define  SXE_VFT_TBL_SIZE          (128)   
-#define  SXE_VLAN_ID_SHIFT         (5)     
-#define  SXE_VLAN_ID_REG_MASK      (0x7F)  
-#define  SXE_VLAN_ID_BIT_MASK      (0x1F)  
+#define  SXE_VFT_TBL_SIZE		  (128)
+#define  SXE_VLAN_ID_SHIFT		 (5)
+#define  SXE_VLAN_ID_REG_MASK	  (0x7F)
+#define  SXE_VLAN_ID_BIT_MASK	  (0x1F)
 
-#define SXE_TX_PBSIZE_MAX    0x00028000 
-#define SXE_TX_PKT_SIZE_MAX  0xA        
-#define SXE_NODCB_TX_PKT_SIZE_MAX 0x14 
+#define SXE_TX_PBSIZE_MAX	0x00028000
+#define SXE_TX_PKT_SIZE_MAX  0xA
+#define SXE_NODCB_TX_PKT_SIZE_MAX 0x14
 #define SXE_RING_ENABLE_WAIT_LOOP 10
 
-#define VFTA_BLOCK_SIZE 		8
-#define VF_BLOCK_BITS 			(32)
+#define VFTA_BLOCK_SIZE		 8
+#define VF_BLOCK_BITS		   (32)
 #define SXE_MAX_MAC_HDR_LEN		127
 #define SXE_MAX_NETWORK_HDR_LEN		511
 #define SXE_MAC_ADDR_LEN		6
 
-#define SXE_FNAV_BUCKET_HASH_KEY    0x3DAD14E2
+#define SXE_FNAV_BUCKET_HASH_KEY	0x3DAD14E2
 #define SXE_FNAV_SAMPLE_HASH_KEY 0x174D3614
 #define SXE_SAMPLE_COMMON_HASH_KEY \
 		(SXE_FNAV_BUCKET_HASH_KEY & SXE_FNAV_SAMPLE_HASH_KEY)
@@ -202,11 +202,11 @@ enum sxe_mc_filter_type {
 #define SXE_SAMPLE_VLAN_MASK		0xEFFF
 #define SXE_SAMPLE_FLEX_BYTES_MASK	0xFFFF
 
-#define SXE_FNAV_INIT_DONE_POLL               10
-#define SXE_FNAV_DROP_QUEUE                   127
+#define SXE_FNAV_INIT_DONE_POLL			   10
+#define SXE_FNAV_DROP_QUEUE				   127
 
-#define MAX_TRAFFIC_CLASS        8
-#define DEF_TRAFFIC_CLASS        1
+#define MAX_TRAFFIC_CLASS		8
+#define DEF_TRAFFIC_CLASS		1
 
 #define SXE_LINK_SPEED_UNKNOWN   0
 #define SXE_LINK_SPEED_10_FULL   0x0002
@@ -245,176 +245,176 @@ enum sxe_sample_type {
 };
 
 enum {
-	SXE_DIAG_TEST_PASSED                = 0,
-	SXE_DIAG_TEST_BLOCKED               = 1,
-	SXE_DIAG_STATS_REG_TEST_ERR         = 2,
-	SXE_DIAG_REG_PATTERN_TEST_ERR       = 3,
-	SXE_DIAG_CHECK_REG_TEST_ERR         = 4,
-	SXE_DIAG_DISABLE_IRQ_TEST_ERR       = 5,
-	SXE_DIAG_ENABLE_IRQ_TEST_ERR        = 6,
+	SXE_DIAG_TEST_PASSED				= 0,
+	SXE_DIAG_TEST_BLOCKED			   = 1,
+	SXE_DIAG_STATS_REG_TEST_ERR		 = 2,
+	SXE_DIAG_REG_PATTERN_TEST_ERR	   = 3,
+	SXE_DIAG_CHECK_REG_TEST_ERR		 = 4,
+	SXE_DIAG_DISABLE_IRQ_TEST_ERR	   = 5,
+	SXE_DIAG_ENABLE_IRQ_TEST_ERR		= 6,
 	SXE_DIAG_DISABLE_OTHER_IRQ_TEST_ERR = 7,
-	SXE_DIAG_TX_RING_CONFIGURE_ERR      = 8,
-	SXE_DIAG_RX_RING_CONFIGURE_ERR      = 9,
-	SXE_DIAG_ALLOC_SKB_ERR              = 10,
-	SXE_DIAG_LOOPBACK_SEND_TEST_ERR     = 11,
-	SXE_DIAG_LOOPBACK_RECV_TEST_ERR     = 12,
+	SXE_DIAG_TX_RING_CONFIGURE_ERR	  = 8,
+	SXE_DIAG_RX_RING_CONFIGURE_ERR	  = 9,
+	SXE_DIAG_ALLOC_SKB_ERR			  = 10,
+	SXE_DIAG_LOOPBACK_SEND_TEST_ERR	 = 11,
+	SXE_DIAG_LOOPBACK_RECV_TEST_ERR	 = 12,
 };
 
-#define SXE_RXD_STAT_DD       0x01    
-#define SXE_RXD_STAT_EOP      0x02    
-#define SXE_RXD_STAT_FLM      0x04    
-#define SXE_RXD_STAT_VP       0x08    
-#define SXE_RXDADV_NEXTP_MASK   0x000FFFF0 
+#define SXE_RXD_STAT_DD	   0x01
+#define SXE_RXD_STAT_EOP	  0x02
+#define SXE_RXD_STAT_FLM	  0x04
+#define SXE_RXD_STAT_VP	   0x08
+#define SXE_RXDADV_NEXTP_MASK   0x000FFFF0
 #define SXE_RXDADV_NEXTP_SHIFT  0x00000004
-#define SXE_RXD_STAT_UDPCS    0x10    
-#define SXE_RXD_STAT_L4CS     0x20    
-#define SXE_RXD_STAT_IPCS     0x40    
-#define SXE_RXD_STAT_PIF      0x80    
-#define SXE_RXD_STAT_CRCV     0x100   
-#define SXE_RXD_STAT_OUTERIPCS  0x100 
-#define SXE_RXD_STAT_VEXT     0x200   
-#define SXE_RXD_STAT_UDPV     0x400   
-#define SXE_RXD_STAT_DYNINT   0x800   
-#define SXE_RXD_STAT_LLINT    0x800   
-#define SXE_RXD_STAT_TSIP     0x08000 
-#define SXE_RXD_STAT_TS       0x10000 
-#define SXE_RXD_STAT_SECP     0x20000 
-#define SXE_RXD_STAT_LB       0x40000 
-#define SXE_RXD_STAT_ACK      0x8000  
-#define SXE_RXD_ERR_CE        0x01    
-#define SXE_RXD_ERR_LE        0x02    
-#define SXE_RXD_ERR_PE        0x08    
-#define SXE_RXD_ERR_OSE       0x10    
-#define SXE_RXD_ERR_USE       0x20    
-#define SXE_RXD_ERR_TCPE      0x40    
-#define SXE_RXD_ERR_IPE       0x80    
-#define SXE_RXDADV_ERR_MASK           0xfff00000 
-#define SXE_RXDADV_ERR_SHIFT          20         
-#define SXE_RXDADV_ERR_OUTERIPER	0x04000000 
-#define SXE_RXDADV_ERR_FCEOFE         0x80000000 
-#define SXE_RXDADV_ERR_FCERR          0x00700000 
-#define SXE_RXDADV_ERR_FNAV_LEN       0x00100000 
-#define SXE_RXDADV_ERR_FNAV_DROP      0x00200000 
-#define SXE_RXDADV_ERR_FNAV_COLL      0x00400000 
-#define SXE_RXDADV_ERR_HBO    0x00800000 
-#define SXE_RXDADV_ERR_CE     0x01000000 
-#define SXE_RXDADV_ERR_LE     0x02000000 
-#define SXE_RXDADV_ERR_PE     0x08000000 
-#define SXE_RXDADV_ERR_OSE    0x10000000 
-#define SXE_RXDADV_ERR_IPSEC_INV_PROTOCOL  0x08000000 
-#define SXE_RXDADV_ERR_IPSEC_INV_LENGTH    0x10000000 
+#define SXE_RXD_STAT_UDPCS	0x10
+#define SXE_RXD_STAT_L4CS	 0x20
+#define SXE_RXD_STAT_IPCS	 0x40
+#define SXE_RXD_STAT_PIF	  0x80
+#define SXE_RXD_STAT_CRCV	 0x100
+#define SXE_RXD_STAT_OUTERIPCS  0x100
+#define SXE_RXD_STAT_VEXT	 0x200
+#define SXE_RXD_STAT_UDPV	 0x400
+#define SXE_RXD_STAT_DYNINT   0x800
+#define SXE_RXD_STAT_LLINT	0x800
+#define SXE_RXD_STAT_TSIP	 0x08000
+#define SXE_RXD_STAT_TS	   0x10000
+#define SXE_RXD_STAT_SECP	 0x20000
+#define SXE_RXD_STAT_LB	   0x40000
+#define SXE_RXD_STAT_ACK	  0x8000
+#define SXE_RXD_ERR_CE		0x01
+#define SXE_RXD_ERR_LE		0x02
+#define SXE_RXD_ERR_PE		0x08
+#define SXE_RXD_ERR_OSE	   0x10
+#define SXE_RXD_ERR_USE	   0x20
+#define SXE_RXD_ERR_TCPE	  0x40
+#define SXE_RXD_ERR_IPE	   0x80
+#define SXE_RXDADV_ERR_MASK		   0xfff00000
+#define SXE_RXDADV_ERR_SHIFT		  20
+#define SXE_RXDADV_ERR_OUTERIPER	0x04000000
+#define SXE_RXDADV_ERR_FCEOFE		 0x80000000
+#define SXE_RXDADV_ERR_FCERR		  0x00700000
+#define SXE_RXDADV_ERR_FNAV_LEN	   0x00100000
+#define SXE_RXDADV_ERR_FNAV_DROP	  0x00200000
+#define SXE_RXDADV_ERR_FNAV_COLL	  0x00400000
+#define SXE_RXDADV_ERR_HBO	0x00800000
+#define SXE_RXDADV_ERR_CE	 0x01000000
+#define SXE_RXDADV_ERR_LE	 0x02000000
+#define SXE_RXDADV_ERR_PE	 0x08000000
+#define SXE_RXDADV_ERR_OSE	0x10000000
+#define SXE_RXDADV_ERR_IPSEC_INV_PROTOCOL  0x08000000
+#define SXE_RXDADV_ERR_IPSEC_INV_LENGTH	0x10000000
 #define SXE_RXDADV_ERR_IPSEC_AUTH_FAILED   0x18000000
-#define SXE_RXDADV_ERR_USE    0x20000000 
-#define SXE_RXDADV_ERR_L4E    0x40000000 
-#define SXE_RXDADV_ERR_IPE    0x80000000 
-#define SXE_RXD_VLAN_ID_MASK  0x0FFF  
-#define SXE_RXD_PRI_MASK      0xE000  
-#define SXE_RXD_PRI_SHIFT     13
-#define SXE_RXD_CFI_MASK      0x1000  
-#define SXE_RXD_CFI_SHIFT     12
-#define SXE_RXDADV_LROCNT_MASK        0x001E0000
-#define SXE_RXDADV_LROCNT_SHIFT       17
-
-#define SXE_RXDADV_STAT_DD            SXE_RXD_STAT_DD  
-#define SXE_RXDADV_STAT_EOP           SXE_RXD_STAT_EOP 
-#define SXE_RXDADV_STAT_FLM           SXE_RXD_STAT_FLM 
-#define SXE_RXDADV_STAT_VP            SXE_RXD_STAT_VP  
-#define SXE_RXDADV_STAT_MASK          0x000fffff 
-#define SXE_RXDADV_STAT_TS		0x00010000 
-#define SXE_RXDADV_STAT_SECP		0x00020000 
-
-#define SXE_RXDADV_PKTTYPE_NONE       0x00000000
-#define SXE_RXDADV_PKTTYPE_IPV4       0x00000010 
-#define SXE_RXDADV_PKTTYPE_IPV4_EX    0x00000020 
-#define SXE_RXDADV_PKTTYPE_IPV6       0x00000040 
-#define SXE_RXDADV_PKTTYPE_IPV6_EX    0x00000080 
-#define SXE_RXDADV_PKTTYPE_TCP        0x00000100 
-#define SXE_RXDADV_PKTTYPE_UDP        0x00000200 
-#define SXE_RXDADV_PKTTYPE_SCTP       0x00000400 
-#define SXE_RXDADV_PKTTYPE_NFS        0x00000800 
-#define SXE_RXDADV_PKTTYPE_VXLAN      0x00000800 
-#define SXE_RXDADV_PKTTYPE_TUNNEL     0x00010000 
-#define SXE_RXDADV_PKTTYPE_IPSEC_ESP  0x00001000 
-#define SXE_RXDADV_PKTTYPE_IPSEC_AH   0x00002000 
-#define SXE_RXDADV_PKTTYPE_LINKSEC    0x00004000 
-#define SXE_RXDADV_PKTTYPE_ETQF       0x00008000 
-#define SXE_RXDADV_PKTTYPE_ETQF_MASK  0x00000070 
-#define SXE_RXDADV_PKTTYPE_ETQF_SHIFT 4          
+#define SXE_RXDADV_ERR_USE	0x20000000
+#define SXE_RXDADV_ERR_L4E	0x40000000
+#define SXE_RXDADV_ERR_IPE	0x80000000
+#define SXE_RXD_VLAN_ID_MASK  0x0FFF
+#define SXE_RXD_PRI_MASK	  0xE000
+#define SXE_RXD_PRI_SHIFT	 13
+#define SXE_RXD_CFI_MASK	  0x1000
+#define SXE_RXD_CFI_SHIFT	 12
+#define SXE_RXDADV_LROCNT_MASK		0x001E0000
+#define SXE_RXDADV_LROCNT_SHIFT	   17
+
+#define SXE_RXDADV_STAT_DD			SXE_RXD_STAT_DD
+#define SXE_RXDADV_STAT_EOP		   SXE_RXD_STAT_EOP
+#define SXE_RXDADV_STAT_FLM		   SXE_RXD_STAT_FLM
+#define SXE_RXDADV_STAT_VP			SXE_RXD_STAT_VP
+#define SXE_RXDADV_STAT_MASK		  0x000fffff
+#define SXE_RXDADV_STAT_TS		0x00010000
+#define SXE_RXDADV_STAT_SECP		0x00020000
+
+#define SXE_RXDADV_PKTTYPE_NONE	   0x00000000
+#define SXE_RXDADV_PKTTYPE_IPV4	   0x00000010
+#define SXE_RXDADV_PKTTYPE_IPV4_EX	0x00000020
+#define SXE_RXDADV_PKTTYPE_IPV6	   0x00000040
+#define SXE_RXDADV_PKTTYPE_IPV6_EX	0x00000080
+#define SXE_RXDADV_PKTTYPE_TCP		0x00000100
+#define SXE_RXDADV_PKTTYPE_UDP		0x00000200
+#define SXE_RXDADV_PKTTYPE_SCTP	   0x00000400
+#define SXE_RXDADV_PKTTYPE_NFS		0x00000800
+#define SXE_RXDADV_PKTTYPE_VXLAN	  0x00000800
+#define SXE_RXDADV_PKTTYPE_TUNNEL	 0x00010000
+#define SXE_RXDADV_PKTTYPE_IPSEC_ESP  0x00001000
+#define SXE_RXDADV_PKTTYPE_IPSEC_AH   0x00002000
+#define SXE_RXDADV_PKTTYPE_LINKSEC	0x00004000
+#define SXE_RXDADV_PKTTYPE_ETQF	   0x00008000
+#define SXE_RXDADV_PKTTYPE_ETQF_MASK  0x00000070
+#define SXE_RXDADV_PKTTYPE_ETQF_SHIFT 4
 
 struct sxe_mac_stats {
-	u64 crcerrs;           
-	u64 errbc;             
-	u64 rlec;              
-	u64 prc64;             
-	u64 prc127;            
-	u64 prc255;            
-	u64 prc511;            
-	u64 prc1023;           
-	u64 prc1522;           
-	u64 gprc;              
-	u64 bprc;              
-	u64 mprc;              
-	u64 gptc;              
-	u64 gorc;              
-	u64 gotc;              
-	u64 ruc;               
-	u64 rfc;               
-	u64 roc;               
-	u64 rjc;               
-	u64 tor;               
-	u64 tpr;               
-	u64 tpt;               
-	u64 ptc64;             
-	u64 ptc127;            
-	u64 ptc255;            
-	u64 ptc511;            
-	u64 ptc1023;           
-	u64 ptc1522;           
-	u64 mptc;              
-	u64 bptc;              
-	u64 qprc[16];          
-	u64 qptc[16];          
-	u64 qbrc[16];          
-	u64 qbtc[16];          
-	u64 qprdc[16];         
-	u64 dburxtcin[8];      
-	u64 dburxtcout[8];     
-	u64 dburxgdreecnt[8];  
-	u64 dburxdrofpcnt[8];  
-	u64 dbutxtcin[8];      
-	u64 dbutxtcout[8];     
-	u64 rxdgpc;            
-	u64 rxdgbc;            
-	u64 rxddpc;            
-	u64 rxddbc;            
-	u64 rxtpcing;          
-	u64 rxtpceng;          
-	u64 rxlpbkpc;          
-	u64 rxlpbkbc;          
-	u64 rxdlpbkpc;         
-	u64 rxdlpbkbc;         
-	u64 prddc;             
-	u64 txdgpc;            
-	u64 txdgbc;            
-	u64 txswerr;           
-	u64 txswitch;          
-	u64 txrepeat;          
-	u64 txdescerr;         
-
-	u64 fnavadd;           
-	u64 fnavrmv;           
-	u64 fnavadderr;        
-	u64 fnavrmverr;        
-	u64 fnavmatch;         
-	u64 fnavmiss;          
-	u64 hw_rx_no_dma_resources; 
-	u64 prcpf[8];          
-	u64 pfct[8];           
-	u64 mpc[8];            
-
-	u64 total_tx_pause;    
-	u64 total_gptc;        
-	u64 total_gotc;        
+	u64 crcerrs;
+	u64 errbc;
+	u64 rlec;
+	u64 prc64;
+	u64 prc127;
+	u64 prc255;
+	u64 prc511;
+	u64 prc1023;
+	u64 prc1522;
+	u64 gprc;
+	u64 bprc;
+	u64 mprc;
+	u64 gptc;
+	u64 gorc;
+	u64 gotc;
+	u64 ruc;
+	u64 rfc;
+	u64 roc;
+	u64 rjc;
+	u64 tor;
+	u64 tpr;
+	u64 tpt;
+	u64 ptc64;
+	u64 ptc127;
+	u64 ptc255;
+	u64 ptc511;
+	u64 ptc1023;
+	u64 ptc1522;
+	u64 mptc;
+	u64 bptc;
+	u64 qprc[16];
+	u64 qptc[16];
+	u64 qbrc[16];
+	u64 qbtc[16];
+	u64 qprdc[16];
+	u64 dburxtcin[8];
+	u64 dburxtcout[8];
+	u64 dburxgdreecnt[8];
+	u64 dburxdrofpcnt[8];
+	u64 dbutxtcin[8];
+	u64 dbutxtcout[8];
+	u64 rxdgpc;
+	u64 rxdgbc;
+	u64 rxddpc;
+	u64 rxddbc;
+	u64 rxtpcing;
+	u64 rxtpceng;
+	u64 rxlpbkpc;
+	u64 rxlpbkbc;
+	u64 rxdlpbkpc;
+	u64 rxdlpbkbc;
+	u64 prddc;
+	u64 txdgpc;
+	u64 txdgbc;
+	u64 txswerr;
+	u64 txswitch;
+	u64 txrepeat;
+	u64 txdescerr;
+
+	u64 fnavadd;
+	u64 fnavrmv;
+	u64 fnavadderr;
+	u64 fnavrmverr;
+	u64 fnavmatch;
+	u64 fnavmiss;
+	u64 hw_rx_no_dma_resources;
+	u64 prcpf[8];
+	u64 pfct[8];
+	u64 mpc[8];
+
+	u64 total_tx_pause;
+	u64 total_gptc;
+	u64 total_gotc;
 };
 
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
@@ -440,16 +440,16 @@ struct sxe_fivetuple_filter_info {
 };
 
 struct sxe_fivetuple_node_info {
-	u16 index;  
-	u16 queue;  
+	u16 index;
+	u16 queue;
 	struct sxe_fivetuple_filter_info filter_info;
 };
 #endif
 
 union sxe_fnav_rule_info {
 	struct {
-		u8     vm_pool;
-		u8     flow_type;
+		u8	 vm_pool;
+		u8	 flow_type;
 		__be16 vlan_id;
 		__be32 dst_ip[4];
 		__be32 src_ip[4];
@@ -480,21 +480,21 @@ void sxe_hw_ops_init(struct sxe_hw *hw);
 
 
 struct sxe_reg_info {
-	u32 addr;        
-	u32 count;       
-	u32 stride;      
-	const s8 *name;  
+	u32 addr;
+	u32 count;
+	u32 stride;
+	const s8 *name;
 };
 
 struct sxe_setup_operations {
-	s32  (*reset)(struct sxe_hw *);
-	void (*pf_rst_done_set)(struct sxe_hw *);
-	void (*no_snoop_disable)(struct sxe_hw *);
-	u32  (*reg_read)(struct sxe_hw *, u32);
-	void (*reg_write)(struct sxe_hw *, u32, u32);
-	void (*regs_dump)(struct sxe_hw *);
-	void (*regs_flush)(struct sxe_hw *);
-	s32  (*regs_test)(struct sxe_hw *);
+	s32  (*reset)(struct sxe_hw *hw);
+	void (*pf_rst_done_set)(struct sxe_hw *hw);
+	void (*no_snoop_disable)(struct sxe_hw *hw);
+	u32  (*reg_read)(struct sxe_hw *hw, u32 reg);
+	void (*reg_write)(struct sxe_hw *hw, u32 reg, u32 val);
+	void (*regs_dump)(struct sxe_hw *hw);
+	void (*regs_flush)(struct sxe_hw *hw);
+	s32  (*regs_test)(struct sxe_hw *hw);
 };
 
 struct sxe_hw_setup {
@@ -503,20 +503,22 @@ struct sxe_hw_setup {
 
 struct sxe_irq_operations {
 	u32  (*pending_irq_read_clear)(struct sxe_hw *hw);
-	void (*pending_irq_write_clear)(struct sxe_hw * hw, u32 value);
+	void (*pending_irq_write_clear)(struct sxe_hw *hw, u32 value);
 	void (*irq_general_reg_set)(struct sxe_hw *hw, u32 value);
 	u32  (*irq_general_reg_get)(struct sxe_hw *hw);
 	void (*ring_irq_auto_disable)(struct sxe_hw *hw, bool is_misx);
 	void (*set_eitrsel)(struct sxe_hw *hw, u32 value);
-	void (*ring_irq_interval_set)(struct sxe_hw *hw, u16 irq_idx, u32 interval);
-	void (*event_irq_interval_set)(struct sxe_hw * hw, u16 irq_idx, u32 value);
+	void (*ring_irq_interval_set)(struct sxe_hw *hw, u16 irq_idx,
+			u32 interval);
+	void (*event_irq_interval_set)(struct sxe_hw *hw, u16 irq_idx,
+			u32 value);
 	void (*event_irq_auto_clear_set)(struct sxe_hw *hw, u32 value);
 	void (*ring_irq_map)(struct sxe_hw *hw, bool is_tx,
-                                u16 reg_idx, u16 irq_idx);
+							u16 reg_idx, u16 irq_idx);
 	void (*event_irq_map)(struct sxe_hw *hw, u8 offset, u16 irq_idx);
-	void (*ring_irq_enable)(struct sxe_hw * hw, u64 qmask);
-	u32  (*irq_cause_get)(struct sxe_hw * hw);
-	void (*event_irq_trigger)(struct sxe_hw * hw);
+	void (*ring_irq_enable)(struct sxe_hw *hw, u64 qmask);
+	u32  (*irq_cause_get)(struct sxe_hw *hw);
+	void (*event_irq_trigger)(struct sxe_hw *hw);
 	void (*ring_irq_trigger)(struct sxe_hw *hw, u64 eics);
 	void (*specific_irq_disable)(struct sxe_hw *hw, u32 value);
 	void (*specific_irq_enable)(struct sxe_hw *hw, u32 value);
@@ -530,26 +532,26 @@ struct sxe_irq_info {
 };
 
 struct sxe_mac_operations {
-	bool (*link_up_1g_check)(struct sxe_hw *);
-	bool (*link_state_is_up)(struct sxe_hw *);
-	u32  (*link_speed_get)(struct sxe_hw *);
-	void (*link_speed_set)(struct sxe_hw *, u32 speed);
-	void (*pad_enable)(struct sxe_hw *);
-	s32  (*fc_enable)(struct sxe_hw *);
-	void (*crc_configure)(struct sxe_hw *);
-	void (*loopback_switch)(struct sxe_hw *, bool);
+	bool (*link_up_1g_check)(struct sxe_hw *hw);
+	bool (*link_state_is_up)(struct sxe_hw *hw);
+	u32  (*link_speed_get)(struct sxe_hw *hw);
+	void (*link_speed_set)(struct sxe_hw *hw, u32 speed);
+	void (*pad_enable)(struct sxe_hw *hw);
+	s32  (*fc_enable)(struct sxe_hw *hw);
+	void (*crc_configure)(struct sxe_hw *hw);
+	void (*loopback_switch)(struct sxe_hw *hw, bool val);
 	void (*txrx_enable)(struct sxe_hw *hw);
-	void (*max_frame_set)(struct sxe_hw *, u32);
-	u32  (*max_frame_get)(struct sxe_hw *);
-	void (*fc_autoneg_localcap_set)(struct sxe_hw *);
-	void (*fc_tc_high_water_mark_set)(struct sxe_hw *, u8, u32);
-	void (*fc_tc_low_water_mark_set)(struct sxe_hw *, u8, u32);
-	void (*fc_param_init)(struct sxe_hw *);
-	enum sxe_fc_mode (*fc_current_mode_get)(struct sxe_hw *);
-	enum sxe_fc_mode (*fc_requested_mode_get)(struct sxe_hw *);
-	void (*fc_requested_mode_set)(struct sxe_hw *, enum sxe_fc_mode);
-	bool (*is_fc_autoneg_disabled)(struct sxe_hw *);
-	void (*fc_autoneg_disable_set)(struct sxe_hw *, bool);
+	void (*max_frame_set)(struct sxe_hw *hw, u32 val);
+	u32  (*max_frame_get)(struct sxe_hw *hw);
+	void (*fc_autoneg_localcap_set)(struct sxe_hw *hw);
+	void (*fc_tc_high_water_mark_set)(struct sxe_hw *hw, u8 tc_idx, u32 val);
+	void (*fc_tc_low_water_mark_set)(struct sxe_hw *hw, u8 tc_idx, u32 val);
+	void (*fc_param_init)(struct sxe_hw *hw);
+	enum sxe_fc_mode (*fc_current_mode_get)(struct sxe_hw *hw);
+	enum sxe_fc_mode (*fc_requested_mode_get)(struct sxe_hw *hw);
+	void (*fc_requested_mode_set)(struct sxe_hw *hw, enum sxe_fc_mode e);
+	bool (*is_fc_autoneg_disabled)(struct sxe_hw *hw);
+	void (*fc_autoneg_disable_set)(struct sxe_hw *hw, bool val);
 };
 
 #define SXE_FLAGS_DOUBLE_RESET_REQUIRED	0x01
@@ -562,25 +564,26 @@ struct sxe_mac_info {
 };
 
 struct sxe_filter_mac_operations {
-	u32 (*rx_mode_get)(struct sxe_hw *);
-	void (*rx_mode_set)(struct sxe_hw *, u32);
-	u32 (*pool_rx_mode_get)(struct sxe_hw *, u16);
-	void (*pool_rx_mode_set)(struct sxe_hw *, u32, u16);
-	void (*rx_lro_enable) (struct sxe_hw *, bool);
-	void (*rx_udp_frag_checksum_disable) (struct sxe_hw *);
-	s32  (*uc_addr_add)(struct sxe_hw *, u32, u8 *, u32);
-	s32  (*uc_addr_del)(struct sxe_hw *, u32);
-	void (*uc_addr_clear)(struct sxe_hw *);
+	u32 (*rx_mode_get)(struct sxe_hw *hw);
+	void (*rx_mode_set)(struct sxe_hw *hw, u32 filter_ctrl);
+	u32 (*pool_rx_mode_get)(struct sxe_hw *hw, u16 idx);
+	void (*pool_rx_mode_set)(struct sxe_hw *hw, u32 vmolr, u16 idx);
+	void (*rx_lro_enable)(struct sxe_hw *hw, bool is_enable);
+	void (*rx_udp_frag_checksum_disable)(struct sxe_hw *hw);
+	s32  (*uc_addr_add)(struct sxe_hw *hw, u32 rar_idx,
+							u8 *addr, u32 pool_idx);
+	s32  (*uc_addr_del)(struct sxe_hw *hw, u32 idx);
+	void (*uc_addr_clear)(struct sxe_hw *hw);
 	void (*mta_hash_table_set)(struct sxe_hw *hw, u8 index, u32 value);
 	void (*mta_hash_table_update)(struct sxe_hw *hw, u8 reg_idx, u8 bit_idx);
 	void (*fc_mac_addr_set)(struct sxe_hw *hw, u8 *mac_addr);
 
-	void (*mc_filter_enable)(struct sxe_hw *);
+	void (*mc_filter_enable)(struct sxe_hw *hw);
 
 	void (*mc_filter_disable)(struct sxe_hw *hw);
 
-	void (*rx_nfs_filter_disable)(struct sxe_hw *);
-	void (*ethertype_filter_set)(struct sxe_hw *, u8, u32);
+	void (*rx_nfs_filter_disable)(struct sxe_hw *hw);
+	void (*ethertype_filter_set)(struct sxe_hw *hw, u8 filter_type, u32 val);
 
 	void (*vt_ctrl_configure)(struct sxe_hw *hw, u8 num_vfs);
 
@@ -600,16 +603,17 @@ struct sxe_filter_mac {
 };
 
 struct sxe_filter_vlan_operations {
-	u32 (*pool_filter_read)(struct sxe_hw *, u16);
-	void (*pool_filter_write)(struct sxe_hw *, u16, u32);
-	u32 (*pool_filter_bitmap_read)(struct sxe_hw *, u16);
-	void (*pool_filter_bitmap_write)(struct sxe_hw *, u16, u32);
-	void (*filter_array_write)(struct sxe_hw *, u16, u32);
-	u32  (*filter_array_read)(struct sxe_hw *, u16);
-	void (*filter_array_clear)(struct sxe_hw *);
-	void (*filter_switch)(struct sxe_hw *,bool);
-	void (*untagged_pkts_rcv_switch)(struct sxe_hw *, u32, bool);
-	s32  (*filter_configure)(struct sxe_hw *, u32, u32, bool, bool);
+	u32 (*pool_filter_read)(struct sxe_hw *hw, u16 reg_idx);
+	void (*pool_filter_write)(struct sxe_hw *hw, u16 reg_idx, u32 val);
+	u32 (*pool_filter_bitmap_read)(struct sxe_hw *hw, u16 reg_idx);
+	void (*pool_filter_bitmap_write)(struct sxe_hw *hw, u16 reg_idx, u32 val);
+	void (*filter_array_write)(struct sxe_hw *hw, u16 reg_idx, u32 val);
+	u32  (*filter_array_read)(struct sxe_hw *hw, u16 reg_idx);
+	void (*filter_array_clear)(struct sxe_hw *hw);
+	void (*filter_switch)(struct sxe_hw *hw, bool enable);
+	void (*untagged_pkts_rcv_switch)(struct sxe_hw *hw, u32 vf, bool accept);
+	s32  (*filter_configure)(struct sxe_hw *hw, u32 vid, u32 pool,
+								bool vlan_on, bool vlvf_bypass);
 };
 
 struct sxe_filter_vlan {
@@ -622,55 +626,63 @@ struct sxe_filter_info {
 };
 
 struct sxe_dbu_operations {
-	void (*rx_pkt_buf_size_configure)(struct sxe_hw *, u8, u32, u16);
-	void (*rx_pkt_buf_switch)(struct sxe_hw *, bool);
-	void (*rx_multi_ring_configure)(struct sxe_hw *, u8, bool, bool);
-	void (*rss_key_set_all)(struct sxe_hw *, u32 *);
-	void (*rss_redir_tbl_set_all)(struct sxe_hw *, u8 *);
-	void (*rx_cap_switch_on)(struct sxe_hw *);
-	void (*rss_hash_pkt_type_set)(struct sxe_hw *, u32);
-	void (*rss_hash_pkt_type_update)(struct sxe_hw *, u32);
-	void (*rss_rings_used_set)(struct sxe_hw *, u32, u16, u16);
-	void (*lro_ack_switch)(struct sxe_hw *, bool);
-	void (*vf_rx_switch)(struct sxe_hw *, u32, u32, bool);
-
-	s32  (*fnav_mode_init)(struct sxe_hw *, u32, u32);
-	s32  (*fnav_specific_rule_mask_set)(struct sxe_hw *,
-						union sxe_fnav_rule_info *);
-	s32  (*fnav_specific_rule_add)(struct sxe_hw *,
-						union sxe_fnav_rule_info *,
-						u16, u8);
-	s32  (*fnav_specific_rule_del)(struct sxe_hw *,
-					  union sxe_fnav_rule_info *, u16);
-	s32  (*fnav_sample_hash_cmd_get)(struct sxe_hw *,
-						u8, u32, u8, u64 *);
+	void (*rx_pkt_buf_size_configure)(struct sxe_hw *hw, u8 num_pb,
+			u32 headroom, u16 strategy);
+	void (*rx_pkt_buf_switch)(struct sxe_hw *hw, bool is_on);
+	void (*rx_multi_ring_configure)(struct sxe_hw *hw, u8 tcs,
+			bool is_4q, bool sriov_enable);
+	void (*rss_key_set_all)(struct sxe_hw *hw, u32 *rss_key);
+	void (*rss_redir_tbl_set_all)(struct sxe_hw *hw, u8 *redir_tbl);
+	void (*rx_cap_switch_on)(struct sxe_hw *hw);
+	void (*rss_hash_pkt_type_set)(struct sxe_hw *hw, u32 version);
+	void (*rss_hash_pkt_type_update)(struct sxe_hw *hw, u32 version);
+	void (*rss_rings_used_set)(struct sxe_hw *hw, u32 rss_num,
+			u16 pool, u16 pf_offset);
+	void (*lro_ack_switch)(struct sxe_hw *hw, bool is_on);
+	void (*vf_rx_switch)(struct sxe_hw *hw, u32 reg_offset,
+			u32 vf_index, bool is_off);
+
+	s32  (*fnav_mode_init)(struct sxe_hw *hw, u32 fnavctrl, u32 fnav_mode);
+	s32  (*fnav_specific_rule_mask_set)(struct sxe_hw *hw,
+						union sxe_fnav_rule_info *mask);
+	s32  (*fnav_specific_rule_add)(struct sxe_hw *hw,
+						union sxe_fnav_rule_info *input,
+						u16 soft_id, u8 queue);
+	s32  (*fnav_specific_rule_del)(struct sxe_hw *hw,
+					  union sxe_fnav_rule_info *input, u16 soft_id);
+	s32  (*fnav_sample_hash_cmd_get)(struct sxe_hw *hw,
+						u8 flow_type, u32 hash_value,
+						u8 queue, u64 *hash_cmd);
 	void (*fnav_sample_stats_reinit)(struct sxe_hw *hw);
 	void (*fnav_sample_hash_set)(struct sxe_hw *hw, u64 hash);
-	s32  (*fnav_single_sample_rule_del)(struct sxe_hw *,u32);
-
-	void (*ptp_init)(struct sxe_hw *);
-	void (*ptp_freq_adjust)(struct sxe_hw *, u32);
-	void (*ptp_systime_init)(struct sxe_hw *);
-	u64  (*ptp_systime_get)(struct sxe_hw *);
-	void (*ptp_tx_timestamp_get)(struct sxe_hw *, u32 *ts_sec, u32 *ts_ns);
-	void (*ptp_timestamp_mode_set)(struct sxe_hw *, bool, u32, u32);
-	void (*ptp_rx_timestamp_clear)(struct sxe_hw *);
-	u64  (*ptp_rx_timestamp_get)(struct sxe_hw *);
-	bool (*ptp_is_rx_timestamp_valid)(struct sxe_hw *);
-	void (*ptp_timestamp_enable)(struct sxe_hw *);
-
-	void (*tx_pkt_buf_switch)(struct sxe_hw *, bool);
+	s32  (*fnav_single_sample_rule_del)(struct sxe_hw *hw, u32 hash);
+
+	void (*ptp_init)(struct sxe_hw *hw);
+	void (*ptp_freq_adjust)(struct sxe_hw *hw, u32 adj_freq);
+	void (*ptp_systime_init)(struct sxe_hw *hw);
+	u64  (*ptp_systime_get)(struct sxe_hw *hw);
+	void (*ptp_tx_timestamp_get)(struct sxe_hw *hw, u32 *ts_sec, u32 *ts_ns);
+	void (*ptp_timestamp_mode_set)(struct sxe_hw *hw, bool is_l2,
+			u32 tsctl, u32 tses);
+	void (*ptp_rx_timestamp_clear)(struct sxe_hw *hw);
+	u64  (*ptp_rx_timestamp_get)(struct sxe_hw *hw);
+	bool (*ptp_is_rx_timestamp_valid)(struct sxe_hw *hw);
+	void (*ptp_timestamp_enable)(struct sxe_hw *hw);
+
+	void (*tx_pkt_buf_switch)(struct sxe_hw *hw, bool is_on);
 
 	void (*dcb_tc_rss_configure)(struct sxe_hw *hw, u16 rss_i);
 
-	void (*tx_pkt_buf_size_configure)(struct sxe_hw *, u8);
+	void (*tx_pkt_buf_size_configure)(struct sxe_hw *hw, u8 num_pb);
 
-	void (*rx_cap_switch_off)(struct sxe_hw *);
-	u32  (*rx_pkt_buf_size_get)(struct sxe_hw *, u8);
+	void (*rx_cap_switch_off)(struct sxe_hw *hw);
+	u32  (*rx_pkt_buf_size_get)(struct sxe_hw *hw, u8 pb);
 	void (*rx_func_switch_on)(struct sxe_hw *hw);
 
-	void (*tx_ring_disable)(struct sxe_hw *, u8, unsigned long);
-	void (*rx_ring_disable)(struct sxe_hw *, u8, unsigned long);
+	void (*tx_ring_disable)(struct sxe_hw *hw, u8 reg_idx,
+			unsigned long timeout);
+	void (*rx_ring_disable)(struct sxe_hw *hw, u8 reg_idx,
+			unsigned long timeout);
 
 	u32  (*tx_dbu_fc_status_get)(struct sxe_hw *hw);
 };
@@ -681,44 +693,50 @@ struct sxe_dbu_info {
 
 
 struct sxe_dma_operations {
-	void (*rx_dma_ctrl_init)(struct sxe_hw *, bool);
-	void (*rx_ring_disable)(struct sxe_hw *, u8);
-	void (*rx_ring_switch)(struct sxe_hw *, u8, bool);
-	void (*rx_ring_switch_not_polling)(struct sxe_hw *, u8, bool);
-	void (*rx_ring_desc_configure)(struct sxe_hw *, u32, u64, u8);
-	void (*rx_desc_thresh_set)(struct sxe_hw *, u8);
-	void (*rx_rcv_ctl_configure)(struct sxe_hw *, u8, u32, u32);
-	void (*rx_lro_ctl_configure)(struct sxe_hw *, u8, u32);
-	u32  (*rx_desc_ctrl_get)(struct sxe_hw *, u8);
-	void (*rx_dma_lro_ctl_set)(struct sxe_hw *);
-	void (*rx_drop_switch)(struct sxe_hw *, u8, bool);
+	void (*rx_dma_ctrl_init)(struct sxe_hw *hw);
+	void (*rx_ring_disable)(struct sxe_hw *hw, u8 ring_idx);
+	void (*rx_ring_switch)(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+	void (*rx_ring_switch_not_polling)(struct sxe_hw *hw, u8 reg_idx,
+										bool is_on);
+	void (*rx_ring_desc_configure)(struct sxe_hw *hw, u32 desc_mem_len,
+			u64 desc_dma_addr, u8 reg_idx);
+	void (*rx_desc_thresh_set)(struct sxe_hw *hw, u8 reg_idx);
+	void (*rx_rcv_ctl_configure)(struct sxe_hw *hw, u8 reg_idx,
+			u32 header_buf_len, u32 pkg_buf_len);
+	void (*rx_lro_ctl_configure)(struct sxe_hw *hw, u8 reg_idx, u32 max_desc);
+	u32  (*rx_desc_ctrl_get)(struct sxe_hw *hw, u8 reg_idx);
+	void (*rx_dma_lro_ctl_set)(struct sxe_hw *hw);
+	void (*rx_drop_switch)(struct sxe_hw *hw, u8 idx, bool is_enable);
 	void (*rx_tph_update)(struct sxe_hw *hw, u8 ring_idx, u8 cpu);
 
-	void (*tx_enable)(struct sxe_hw *);
-	void (*tx_multi_ring_configure)(struct sxe_hw *, u8, u16, bool, u16);
-	void (*tx_ring_desc_configure)(struct sxe_hw *, u32, u64, u8);
-	void (*tx_desc_thresh_set)(struct sxe_hw *, u8, u32, u32, u32);
-	void (*tx_ring_switch)(struct sxe_hw *, u8, bool);
-	void (*tx_ring_switch_not_polling)(struct sxe_hw *, u8, bool);
-	void (*tx_pkt_buf_thresh_configure)(struct sxe_hw *, u8, bool);
-	u32  (*tx_desc_ctrl_get)(struct sxe_hw *, u8);
-	void (*tx_ring_info_get)(struct sxe_hw *, u8, u32 *, u32 *);
-	void (*tx_desc_wb_thresh_clear)(struct sxe_hw *, u8);
-
-	void (*vlan_tag_strip_switch)(struct sxe_hw *, u16, bool);
-	void (*tx_vlan_tag_set)(struct sxe_hw *, u16, u16, u32);
-	void (*tx_vlan_tag_clear)(struct sxe_hw *, u32);
+	void (*tx_enable)(struct sxe_hw *hw);
+	void (*tx_multi_ring_configure)(struct sxe_hw *hw, u8 tcs, u16 pool_mask,
+			bool sriov_enable, u16 max_txq);
+	void (*tx_ring_desc_configure)(struct sxe_hw *hw, u32 desc_mem_len,
+			u64 desc_dma_addr, u8 reg_idx);
+	void (*tx_desc_thresh_set)(struct sxe_hw *hw, u8 reg_idx, u32 wb_thresh,
+			u32 host_thresh, u32 prefech_thresh);
+	void (*tx_ring_switch)(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+	void (*tx_ring_switch_not_polling)(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+	void (*tx_pkt_buf_thresh_configure)(struct sxe_hw *hw, u8 num_pb, bool dcb_enable);
+	u32  (*tx_desc_ctrl_get)(struct sxe_hw *hw, u8 reg_idx);
+	void (*tx_ring_info_get)(struct sxe_hw *hw, u8 idx, u32 *head, u32 *tail);
+	void (*tx_desc_wb_thresh_clear)(struct sxe_hw *hw, u8 reg_idx);
+
+	void (*vlan_tag_strip_switch)(struct sxe_hw *hw, u16 reg_index, bool is_enable);
+	void (*tx_vlan_tag_set)(struct sxe_hw *hw, u16 vid, u16 qos, u32 vf);
+	void (*tx_vlan_tag_clear)(struct sxe_hw *hw, u32 vf);
 	void (*tx_tph_update)(struct sxe_hw *hw, u8 ring_idx, u8 cpu);
 
 	void (*tph_switch)(struct sxe_hw *hw, bool is_enable);
 
 	void  (*dcb_rx_bw_alloc_configure)(struct sxe_hw *hw,
-				      u16 *refill,
-				      u16 *max,
-				      u8 *bwg_id,
-				      u8 *prio_type,
-				      u8 *prio_tc,
-				      u8 max_priority);
+					  u16 *refill,
+					  u16 *max,
+					  u8 *bwg_id,
+					  u8 *prio_type,
+					  u8 *prio_tc,
+					  u8 max_priority);
 	void  (*dcb_tx_desc_bw_alloc_configure)(struct sxe_hw *hw,
 					   u16 *refill,
 					   u16 *max,
@@ -766,7 +784,8 @@ struct sxe_dma_info {
 struct sxe_sec_operations {
 	void (*ipsec_rx_ip_store)(struct sxe_hw *hw, __be32 *ip_addr, u8 ip_len, u8 ip_idx);
 	void (*ipsec_rx_spi_store)(struct sxe_hw *hw, __be32 spi, u8 ip_idx, u16 idx);
-	void (*ipsec_rx_key_store)(struct sxe_hw *hw, u32 *key,  u8 key_len, u32 salt, u32 mode, u16 idx);
+	void (*ipsec_rx_key_store)(struct sxe_hw *hw, u32 *key,  u8 key_len,
+			u32 salt, u32 mode, u16 idx);
 	void (*ipsec_tx_key_store)(struct sxe_hw *hw, u32 *key,  u8 key_len, u32 salt, u16 idx);
 	void (*ipsec_sec_data_stop)(struct sxe_hw *hw, bool is_linkup);
 	void (*ipsec_engine_start)(struct sxe_hw *hw, bool is_linkup);
@@ -780,12 +799,12 @@ struct sxe_sec_info {
 };
 
 struct sxe_stat_operations {
-	void (*stats_clear)(struct sxe_hw *);
-	void (*stats_get)(struct sxe_hw *, struct sxe_mac_stats *);
+	void (*stats_clear)(struct sxe_hw *hw);
+	void (*stats_get)(struct sxe_hw *hw, struct sxe_mac_stats *st);
 
 	u32 (*tx_packets_num_get)(struct sxe_hw *hw);
 	u32 (*unsecurity_packets_num_get)(struct sxe_hw *hw);
-	u32  (*mac_stats_dump)(struct sxe_hw *, u32 *, u32);
+	u32  (*mac_stats_dump)(struct sxe_hw *hw, u32 *regs_buff, u32 buf_size);
 	u32  (*tx_dbu_to_mac_stats)(struct sxe_hw *hw);
 };
 
@@ -807,20 +826,20 @@ struct sxe_mbx_operations {
 };
 
 struct sxe_mbx_stats {
-	u32 send_msgs; 
-	u32 rcv_msgs;  
+	u32 send_msgs;
+	u32 rcv_msgs;
 
-	u32 reqs;      
-	u32 acks;      
-	u32 rsts;      
+	u32 reqs;
+	u32 acks;
+	u32 rsts;
 };
 
 struct sxe_mbx_info {
-	const struct sxe_mbx_operations *ops; 
-	struct sxe_mbx_stats stats; 
-	u32 retry;    
-	u32 interval; 
-	u32 msg_len; 
+	const struct sxe_mbx_operations *ops;
+	struct sxe_mbx_stats stats;
+	u32 retry;
+	u32 interval;
+	u32 msg_len;
 };
 
 struct sxe_pcie_operations {
@@ -828,7 +847,7 @@ struct sxe_pcie_operations {
 };
 
 struct sxe_pcie_info {
-	const struct sxe_pcie_operations *ops; 
+	const struct sxe_pcie_operations *ops;
 };
 
 enum sxe_hw_state {
@@ -845,40 +864,41 @@ enum sxe_fc_mode {
 };
 
 struct sxe_fc_info {
-	u32 high_water[MAX_TRAFFIC_CLASS]; 
-	u32 low_water[MAX_TRAFFIC_CLASS]; 
-	u16 pause_time; 
-	bool strict_ieee; 
-	bool disable_fc_autoneg; 
-	u16 send_xon; 
-	enum sxe_fc_mode current_mode; 
-	enum sxe_fc_mode requested_mode; 
+	u32 high_water[MAX_TRAFFIC_CLASS];
+	u32 low_water[MAX_TRAFFIC_CLASS];
+	u16 pause_time;
+	bool strict_ieee;
+	bool disable_fc_autoneg;
+	u16 send_xon;
+	enum sxe_fc_mode current_mode;
+	enum sxe_fc_mode requested_mode;
 };
 
 struct sxe_fc_nego_mode {
-	u32 adv_sym; 
-	u32 adv_asm; 
-	u32 lp_sym;  
-	u32 lp_asm;  
+	u32 adv_sym;
+	u32 adv_asm;
+	u32 lp_sym;
+	u32 lp_asm;
 
 };
 
 struct sxe_hdc_operations {
-	s32 (*pf_lock_get)(struct sxe_hw *, u32);
-	void (*pf_lock_release)(struct sxe_hw *, u32);
-	bool (*is_fw_over_set)(struct sxe_hw *);
-	u32 (*fw_ack_header_rcv)(struct sxe_hw *);
-	void (*packet_send_done)(struct sxe_hw *);
-	void (*packet_header_send)(struct sxe_hw *, u32);
-	void (*packet_data_dword_send)(struct sxe_hw *, u16, u32);
-	u32  (*packet_data_dword_rcv)(struct sxe_hw *, u16);
-	u32 (*fw_status_get)(struct sxe_hw *);
-	void (*drv_status_set)(struct sxe_hw *, u32);
-	u32 (*irq_event_get)(struct sxe_hw *);
-	void (*irq_event_clear)(struct sxe_hw *, u32);
-	void (*fw_ov_clear)(struct sxe_hw *);
-	u32 (*channel_state_get)(struct sxe_hw *);
-	void (*resource_clean)(struct sxe_hw *);
+	s32 (*pf_lock_get)(struct sxe_hw *hw, u32 trylock);
+	void (*pf_lock_release)(struct sxe_hw *hw, u32 retry_cnt);
+	bool (*is_fw_over_set)(struct sxe_hw *hw);
+	u32 (*fw_ack_header_rcv)(struct sxe_hw *hw);
+	void (*packet_send_done)(struct sxe_hw *hw);
+	void (*packet_header_send)(struct sxe_hw *hw, u32 value);
+	void (*packet_data_dword_send)(struct sxe_hw *hw,
+									u16 dword_index, u32 value);
+	u32  (*packet_data_dword_rcv)(struct sxe_hw *hw, u16 dword_index);
+	u32 (*fw_status_get)(struct sxe_hw *hw);
+	void (*drv_status_set)(struct sxe_hw *hw, u32 value);
+	u32 (*irq_event_get)(struct sxe_hw *hw);
+	void (*irq_event_clear)(struct sxe_hw *hw, u32 event);
+	void (*fw_ov_clear)(struct sxe_hw *hw);
+	u32 (*channel_state_get)(struct sxe_hw *hw);
+	void (*resource_clean)(struct sxe_hw *hw);
 };
 
 struct sxe_hdc_info {
@@ -901,29 +921,29 @@ struct sxe_phy_reg_info {
 };
 
 struct sxe_hw {
-	u8 __iomem *reg_base_addr;            
+	u8 __iomem *reg_base_addr;
 
 	void *adapter;
 	void *priv;
-	unsigned long state;   
+	unsigned long state;
 	void (*fault_handle)(void *priv);
 	u32 (*reg_read)(const volatile void *reg);
 	void (*reg_write)(u32 value, volatile void *reg);
 
-	struct sxe_hw_setup  setup;           
-	struct sxe_irq_info  irq;             
-	struct sxe_mac_info  mac;             
-	struct sxe_filter_info filter;        
-	struct sxe_dbu_info  dbu;             
-	struct sxe_dma_info  dma;             
-	struct sxe_sec_info  sec;             
-	struct sxe_stat_info stat;            
+	struct sxe_hw_setup  setup;
+	struct sxe_irq_info  irq;
+	struct sxe_mac_info  mac;
+	struct sxe_filter_info filter;
+	struct sxe_dbu_info  dbu;
+	struct sxe_dma_info  dma;
+	struct sxe_sec_info  sec;
+	struct sxe_stat_info stat;
 	struct sxe_fc_info   fc;
 
-	struct sxe_mbx_info mbx;              
-	struct sxe_pcie_info pcie;            
-	struct sxe_hdc_info  hdc;             
-	struct sxe_phy_reg_info phy;          
+	struct sxe_mbx_info mbx;
+	struct sxe_pcie_info pcie;
+	struct sxe_hdc_info  hdc;
+	struct sxe_phy_reg_info phy;
 };
 
 u16 sxe_mac_reg_num_get(void);
@@ -951,7 +971,6 @@ static inline void sxe_hw_fault_handle_init(struct sxe_hw *hw,
 	hw->priv = priv;
 	hw->fault_handle = handle;
 
-	return;
 }
 
 static inline void sxe_hw_reg_handle_init(struct sxe_hw *hw,
@@ -961,10 +980,11 @@ static inline void sxe_hw_reg_handle_init(struct sxe_hw *hw,
 	hw->reg_read  = read;
 	hw->reg_write = write;
 
-	return;
 }
 
-#ifdef SXE_DPDK 
+#ifdef SXE_DPDK
+
+void sxe_hw_crc_strip_config(struct sxe_hw *hw, bool keep_crc);
 
 void sxe_hw_stats_seq_clean(struct sxe_hw *hw, struct sxe_mac_stats *stats);
 
@@ -1117,12 +1137,12 @@ void sxe_hw_rss_field_set(struct sxe_hw *hw, u32 rss_field);
 
 void sxe_hw_rss_redir_tbl_set_all(struct sxe_hw *hw, u8 *redir_tbl);
 
-u32 sxe_hw_rss_redir_tbl_get_by_idx(struct sxe_hw *hw, u16);
+u32 sxe_hw_rss_redir_tbl_get_by_idx(struct sxe_hw *hw, u16 reg_idx);
 
 void sxe_hw_rss_redir_tbl_set_by_idx(struct sxe_hw *hw,
 						u16 reg_idx, u32 value);
 
-void sxe_hw_rx_dma_ctrl_init(struct sxe_hw *hw, bool crc_strip_on);
+void sxe_hw_rx_dma_ctrl_init(struct sxe_hw *hw);
 
 void sxe_hw_mac_max_frame_set(struct sxe_hw *hw, u32 max_frame);
 
@@ -1291,12 +1311,12 @@ void sxe_hw_dcb_tc_stats_configure(struct sxe_hw *hw,
 					u8 tc_count, bool vmdq_active);
 
 void sxe_hw_dcb_rx_bw_alloc_configure(struct sxe_hw *hw,
-				      u16 *refill,
-				      u16 *max,
-				      u8 *bwg_id,
-				      u8 *prio_type,
-				      u8 *prio_tc,
-				      u8 max_priority);
+					  u16 *refill,
+					  u16 *max,
+					  u8 *bwg_id,
+					  u8 *prio_type,
+					  u8 *prio_tc,
+					  u8 max_priority);
 
 void sxe_hw_dcb_tx_desc_bw_alloc_configure(struct sxe_hw *hw,
 					   u16 *refill,
@@ -1411,12 +1431,12 @@ s32 sxe_hw_vlvf_slot_find(struct sxe_hw *hw, u32 vlan, bool vlvf_bypass);
 
 u32 sxe_hw_vlan_pool_filter_read(struct sxe_hw *hw, u16 reg_index);
 
-void sxe_hw_mirror_vlan_set(struct sxe_hw *hw, u8 idx,u32 lsb, u32 msb);
+void sxe_hw_mirror_vlan_set(struct sxe_hw *hw, u8 idx, u32 lsb, u32 msb);
 
-void sxe_hw_mirror_virtual_pool_set(struct sxe_hw *hw, u8 idx,u32 lsb, u32 msb);
+void sxe_hw_mirror_virtual_pool_set(struct sxe_hw *hw, u8 idx, u32 lsb, u32 msb);
 
 void sxe_hw_mirror_ctl_set(struct sxe_hw *hw, u8 rule_id,
-				    u8 mirror_type, u8 dst_pool, bool on);
+					u8 mirror_type, u8 dst_pool, bool on);
 
 void sxe_hw_mirror_rule_clear(struct sxe_hw *hw, u8 rule_id);
 
@@ -1470,7 +1490,7 @@ void sxe_hw_syn_filter_add(struct sxe_hw *hw, u16 queue, u8 priority);
 void sxe_hw_syn_filter_del(struct sxe_hw *hw);
 
 void sxe_hw_rss_key_set_all(struct sxe_hw *hw, u32 *rss_key);
-#endif 
+#endif
 
 void sxe_hw_fnav_enable(struct sxe_hw *hw, u32 fnavctrl);
 
@@ -1493,7 +1513,7 @@ void sxe_hw_rss_redir_tbl_reg_write(struct sxe_hw *hw,
 u32 sxe_hw_fnav_port_mask_get(__be16 src_port_mask, __be16 dst_port_mask);
 
 s32 sxe_hw_fnav_specific_rule_mask_set(struct sxe_hw *hw,
-				    union sxe_fnav_rule_info *input_mask);
+					union sxe_fnav_rule_info *input_mask);
 
 s32 sxe_hw_vlan_filter_configure(struct sxe_hw *hw,
 					u32 vid, u32 pool,
@@ -1501,5 +1521,5 @@ s32 sxe_hw_vlan_filter_configure(struct sxe_hw *hw,
 
 void sxe_hw_ptp_systime_init(struct sxe_hw *hw);
 
-#endif 
+#endif
 #endif
diff --git a/drivers/net/sxe/base/sxe_logs.h b/drivers/net/sxe/base/sxe_logs.h
index 510d7aae5c..81088c2fc8 100644
--- a/drivers/net/sxe/base/sxe_logs.h
+++ b/drivers/net/sxe/base/sxe_logs.h
@@ -11,9 +11,9 @@
 
 #include "sxe_types.h"
 
-#define LOG_FILE_NAME_LEN     256
-#define LOG_FILE_PATH         "/var/log/"
-#define LOG_FILE_PREFIX       "sxepmd.log"
+#define LOG_FILE_NAME_LEN	 256
+#define LOG_FILE_PATH		 "/var/log/"
+#define LOG_FILE_PREFIX	   "sxepmd.log"
 
 extern s32 sxe_log_init;
 extern s32 sxe_log_rx;
@@ -36,9 +36,9 @@ extern s32 sxe_log_hw;
 		gettimeofday(&tv, NULL); \
 		td = localtime(&tv.tv_sec); \
 		strftime(log_time, sizeof(log_time), "%Y-%m-%d-%H:%M:%S", td); \
-	} while(0)
+	} while (0)
 
-#define filename_printf(x) strrchr((x),'/')?strrchr((x),'/')+1:(x)
+#define filename_printf(x) (strrchr((x), '/')?strrchr((x), '/')+1:(x))
 
 #ifdef SXE_DPDK_DEBUG
 #define PMD_LOG_DEBUG(logtype, fmt, ...) \
@@ -50,7 +50,7 @@ extern s32 sxe_log_hw;
 			"DEBUG", log_time, pthread_self(), \
 			filename_printf(__FILE__), __LINE__, \
 			__func__, ##__VA_ARGS__); \
-	} while(0)
+	} while (0)
 
 #define PMD_LOG_INFO(logtype, fmt, ...) \
 	do { \
@@ -61,7 +61,7 @@ extern s32 sxe_log_hw;
 			"INFO", log_time, pthread_self(), \
 			filename_printf(__FILE__), __LINE__, \
 			__func__, ##__VA_ARGS__); \
-	} while(0)
+	} while (0)
 
 #define PMD_LOG_NOTICE(logtype, fmt, ...) \
 	do { \
@@ -72,7 +72,7 @@ extern s32 sxe_log_hw;
 			"NOTICE", log_time, pthread_self(), \
 			filename_printf(__FILE__), __LINE__, \
 			__func__, ##__VA_ARGS__); \
-	} while(0)
+	} while (0)
 
 #define PMD_LOG_WARN(logtype, fmt, ...) \
 	do { \
@@ -83,7 +83,7 @@ extern s32 sxe_log_hw;
 			"WARN", log_time, pthread_self(), \
 			filename_printf(__FILE__), __LINE__, \
 			__func__, ##__VA_ARGS__); \
-	} while(0)
+	} while (0)
 
 #define PMD_LOG_ERR(logtype, fmt, ...) \
 	do { \
@@ -94,7 +94,7 @@ extern s32 sxe_log_hw;
 			"ERR", log_time, pthread_self(), \
 			filename_printf(__FILE__), __LINE__, \
 			__func__, ##__VA_ARGS__); \
-	} while(0)
+	} while (0)
 
 #define PMD_LOG_CRIT(logtype, fmt, ...) \
 	do { \
@@ -105,7 +105,7 @@ extern s32 sxe_log_hw;
 			"CRIT", log_time, pthread_self(), \
 			filename_printf(__FILE__), __LINE__, \
 			__func__, ##__VA_ARGS__); \
-	} while(0)
+	} while (0)
 
 #define PMD_LOG_ALERT(logtype, fmt, ...) \
 	do { \
@@ -116,7 +116,7 @@ extern s32 sxe_log_hw;
 			"ALERT", log_time, pthread_self(), \
 			filename_printf(__FILE__), __LINE__, \
 			__func__, ##__VA_ARGS__); \
-	} while(0)
+	} while (0)
 
 #define PMD_LOG_EMERG(logtype, fmt, ...) \
 	do { \
@@ -127,56 +127,40 @@ extern s32 sxe_log_hw;
 			"EMERG", log_time, pthread_self(), \
 			filename_printf(__FILE__), __LINE__, \
 			__func__, ##__VA_ARGS__); \
-	} while(0)
+	} while (0)
 
 #else
 #define PMD_LOG_DEBUG(logtype, fmt, ...) \
-	do { \
 		rte_log(RTE_LOG_DEBUG, logtype, "%s(): " \
-			fmt "\n", __func__, ##__VA_ARGS__); \
-	} while(0)
+			fmt "\n", __func__, ##__VA_ARGS__)
 
 #define PMD_LOG_INFO(logtype, fmt, ...) \
-	do { \
 		rte_log(RTE_LOG_INFO, logtype, "%s(): " \
-			fmt "\n", __func__, ##__VA_ARGS__); \
-	} while(0)
+			fmt "\n", __func__, ##__VA_ARGS__)
 
 #define PMD_LOG_NOTICE(logtype, fmt, ...) \
-	do { \
 		rte_log(RTE_LOG_NOTICE, logtype, "%s(): " \
-			fmt "\n", __func__, ##__VA_ARGS__); \
-	} while(0)
+			fmt "\n", __func__, ##__VA_ARGS__)
 
 #define PMD_LOG_WARN(logtype, fmt, ...) \
-	do { \
 		rte_log(RTE_LOG_WARNING, logtype, "%s(): " \
-			fmt "\n", __func__, ##__VA_ARGS__); \
-	} while(0)
+			fmt "\n", __func__, ##__VA_ARGS__)
 
 #define PMD_LOG_ERR(logtype, fmt, ...) \
-	do { \
 		rte_log(RTE_LOG_ERR, logtype, "%s(): " \
-			fmt "\n", __func__, ##__VA_ARGS__); \
-	} while(0)
+			fmt "\n", __func__, ##__VA_ARGS__)
 
 #define PMD_LOG_CRIT(logtype, fmt, ...) \
-	do { \
 		rte_log(RTE_LOG_CRIT, logtype, "%s(): " \
-			fmt "\n", __func__, ##__VA_ARGS__); \
-	} while(0)
+			fmt "\n", __func__, ##__VA_ARGS__)
 
 #define PMD_LOG_ALERT(logtype, fmt, ...) \
-	do { \
 		rte_log(RTE_LOG_ALERT, logtype, "%s(): " \
-			fmt "\n", __func__, ##__VA_ARGS__); \
-	} while(0)
+			fmt "\n", __func__, ##__VA_ARGS__)
 
 #define PMD_LOG_EMERG(logtype, fmt, ...) \
-	do { \
 		rte_log(RTE_LOG_EMERG, logtype, "%s(): " \
-			fmt "\n", __func__, ##__VA_ARGS__); \
-	} while(0)
+			fmt "\n", __func__, ##__VA_ARGS__)
 
 #endif
 
@@ -184,54 +168,38 @@ extern s32 sxe_log_hw;
 
 #ifdef SXE_DPDK_DEBUG
 #define LOG_DEBUG(fmt, ...) \
-	do { \
-		PMD_LOG_DEBUG(DRV, fmt, ##__VA_ARGS__); \
-	   } while(0)
+		PMD_LOG_DEBUG(DRV, fmt, ##__VA_ARGS__)
 
 #define LOG_INFO(fmt, ...) \
-	do { \
-		PMD_LOG_INFO(DRV, fmt, ##__VA_ARGS__); \
-	   } while(0)
+		PMD_LOG_INFO(DRV, fmt, ##__VA_ARGS__)
 
 #define LOG_WARN(fmt, ...) \
-	do { \
-		PMD_LOG_WARN(DRV, fmt, ##__VA_ARGS__); \
-	   } while(0)
+		PMD_LOG_WARN(DRV, fmt, ##__VA_ARGS__)
 
 #define LOG_ERROR(fmt, ...) \
-	do { \
-		PMD_LOG_ERR(DRV, fmt, ##__VA_ARGS__); \
-	   } while(0)
+		PMD_LOG_ERR(DRV, fmt, ##__VA_ARGS__)
 
 #define LOG_DEBUG_BDF(fmt, ...) \
-	do { \
-		PMD_LOG_DEBUG(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__); \
-	   } while(0)
+		PMD_LOG_DEBUG(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__)
 
 #define LOG_INFO_BDF(fmt, ...) \
-	do { \
-		PMD_LOG_INFO(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__); \
-	   } while(0)
+		PMD_LOG_INFO(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__)
 
 #define LOG_WARN_BDF(fmt, ...) \
-	do { \
-		PMD_LOG_WARN(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__); \
-	   } while(0)
+		PMD_LOG_WARN(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__)
 
 #define LOG_ERROR_BDF(fmt, ...) \
-	do { \
-		PMD_LOG_ERR(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__); \
-	   } while(0)
+		PMD_LOG_ERR(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__)
 
 #else
 #define LOG_DEBUG(fmt, ...)
 #define LOG_INFO(fmt, ...)
 #define LOG_WARN(fmt, ...)
 #define LOG_ERROR(fmt, ...)
-#define LOG_DEBUG_BDF(fmt, ...) do { UNUSED(adapter); } while(0)
-#define LOG_INFO_BDF(fmt, ...)  do { UNUSED(adapter); } while(0)
-#define LOG_WARN_BDF(fmt, ...)  do { UNUSED(adapter); } while(0)
-#define LOG_ERROR_BDF(fmt, ...) do { UNUSED(adapter); } while(0)
+#define LOG_DEBUG_BDF(fmt, ...) UNUSED(adapter)
+#define LOG_INFO_BDF(fmt, ...) UNUSED(adapter)
+#define LOG_WARN_BDF(fmt, ...) UNUSED(adapter)
+#define LOG_ERROR_BDF(fmt, ...) UNUSED(adapter)
 #endif
 
 #ifdef SXE_DPDK_DEBUG
@@ -239,61 +207,61 @@ extern s32 sxe_log_hw;
 	do { \
 		UNUSED(adapter); \
 		LOG_DEBUG_BDF(fmt, ##__VA_ARGS__); \
-	} while(0)
+	} while (0)
 
 #define LOG_DEV_INFO(fmt, ...) \
 	do { \
 		UNUSED(adapter); \
 		LOG_INFO_BDF(fmt, ##__VA_ARGS__); \
-	} while(0)
+	} while (0)
 
 #define LOG_DEV_WARN(fmt, ...) \
 	do { \
 		UNUSED(adapter); \
 		LOG_WARN_BDF(fmt, ##__VA_ARGS__); \
-	} while(0)
+	} while (0)
 
 #define LOG_DEV_ERR(fmt, ...) \
 	do { \
 		UNUSED(adapter); \
 		LOG_ERROR_BDF(fmt, ##__VA_ARGS__); \
-	} while(0)
+	} while (0)
 
 #define LOG_MSG_DEBUG(msglvl, fmt, ...) \
 	do { \
 		UNUSED(adapter); \
 		LOG_DEBUG_BDF(fmt, ##__VA_ARGS__); \
-	} while(0)
+	} while (0)
 
 #define LOG_MSG_INFO(msglvl, fmt, ...) \
 	do { \
 		UNUSED(adapter); \
 		LOG_INFO_BDF(fmt, ##__VA_ARGS__); \
-	} while(0)
+	} while (0)
 
 #define LOG_MSG_WARN(msglvl, fmt, ...) \
 	do { \
 		UNUSED(adapter); \
 		LOG_WARN_BDF(fmt, ##__VA_ARGS__); \
-	} while(0)
+	} while (0)
 
 #define LOG_MSG_ERR(msglvl, fmt, ...) \
 	do { \
 		UNUSED(adapter); \
 		LOG_ERROR_BDF(fmt, ##__VA_ARGS__); \
-	} while(0)
+	} while (0)
 
 #else
-#define LOG_DEV_DEBUG(fmt, ...) do { UNUSED(adapter); } while(0)
-#define LOG_DEV_INFO(fmt, ...)  do { UNUSED(adapter); } while(0)
-#define LOG_DEV_WARN(fmt, ...)  do { UNUSED(adapter); } while(0)
-#define LOG_DEV_ERR(fmt, ...)   do { UNUSED(adapter); } while(0)
-#define LOG_MSG_DEBUG(msglvl, fmt, ...) do { UNUSED(adapter); } while(0)
-#define LOG_MSG_INFO(msglvl, fmt, ...)  do { UNUSED(adapter); } while(0)
-#define LOG_MSG_WARN(msglvl, fmt, ...)  do { UNUSED(adapter); } while(0)
-#define LOG_MSG_ERR(msglvl, fmt, ...)   do { UNUSED(adapter); } while(0)
+#define LOG_DEV_DEBUG(fmt, ...) UNUSED(adapter)
+#define LOG_DEV_INFO(fmt, ...) UNUSED(adapter)
+#define LOG_DEV_WARN(fmt, ...) UNUSED(adapter)
+#define LOG_DEV_ERR(fmt, ...) UNUSED(adapter)
+#define LOG_MSG_DEBUG(msglvl, fmt, ...) UNUSED(adapter)
+#define LOG_MSG_INFO(msglvl, fmt, ...) UNUSED(adapter)
+#define LOG_MSG_WARN(msglvl, fmt, ...) UNUSED(adapter)
+#define LOG_MSG_ERR(msglvl, fmt, ...) UNUSED(adapter)
 #endif
 
 void sxe_log_stream_init(void);
 
-#endif 
+#endif
diff --git a/drivers/net/sxe/base/sxe_offload_common.c b/drivers/net/sxe/base/sxe_offload_common.c
index a7075b4669..b8d7597b84 100644
--- a/drivers/net/sxe/base/sxe_offload_common.c
+++ b/drivers/net/sxe/base/sxe_offload_common.c
@@ -30,18 +30,17 @@ u64 __sxe_rx_port_offload_capa_get(struct rte_eth_dev *dev)
 	rx_offload_capa = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
 		   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
 		   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
-		   RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+		   RTE_ETH_RX_OFFLOAD_KEEP_CRC	|
 #ifdef DEV_RX_JUMBO_FRAME
 		   DEV_RX_OFFLOAD_JUMBO_FRAME |
 #endif
 		   RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
-		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 
+		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
 		   RTE_ETH_RX_OFFLOAD_SCATTER |
 		   RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
-	if (!RTE_ETH_DEV_SRIOV(dev).active) {
+	if (!RTE_ETH_DEV_SRIOV(dev).active)
 		rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
-	}
 
 	return rx_offload_capa;
 }
@@ -57,7 +56,7 @@ u64 __sxe_tx_port_offload_capa_get(struct rte_eth_dev *dev)
 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
-		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO	 |
 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
 		RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
 
diff --git a/drivers/net/sxe/base/sxe_queue_common.c b/drivers/net/sxe/base/sxe_queue_common.c
index eda73c3f79..6f6ba98dbe 100644
--- a/drivers/net/sxe/base/sxe_queue_common.c
+++ b/drivers/net/sxe/base/sxe_queue_common.c
@@ -46,7 +46,6 @@ static void sxe_tx_queues_clear(struct rte_eth_dev *dev)
 		}
 	}
 
-	return;
 }
 
 static void sxe_rx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed)
@@ -62,7 +61,6 @@ static void sxe_rx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_all
 		}
 	}
 
-	return;
 }
 
 s32 __rte_cold __sxe_rx_queue_setup(struct rx_setup *rx_setup, bool is_vf)
@@ -90,7 +88,7 @@ s32 __rte_cold __sxe_rx_queue_setup(struct rx_setup *rx_setup, bool is_vf)
 	if (desc_num % SXE_RX_DESC_RING_ALIGN != 0 ||
 			(desc_num > SXE_MAX_RING_DESC) ||
 			(desc_num < SXE_MIN_RING_DESC)) {
-		PMD_LOG_ERR(INIT, "desc_num %u error",desc_num);
+		PMD_LOG_ERR(INIT, "desc_num %u error", desc_num);
 		ret = -EINVAL;
 		goto l_end;
 	}
@@ -115,11 +113,10 @@ s32 __rte_cold __sxe_rx_queue_setup(struct rx_setup *rx_setup, bool is_vf)
 	rxq->reg_idx = (u16)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 		rxq->crc_len = RTE_ETHER_CRC_LEN;
-	} else {
+	else
 		rxq->crc_len = 0;
-	}
 
 	rxq->drop_en = rx_conf->rx_drop_en;
 	rxq->deferred_start = rx_conf->rx_deferred_start;
@@ -140,13 +137,12 @@ s32 __rte_cold __sxe_rx_queue_setup(struct rx_setup *rx_setup, bool is_vf)
 
 	memset(rx_mz->addr, 0, SXE_RX_RING_SIZE);
 
-	if (is_vf) {
+	if (is_vf)
 		rxq->rdt_reg_addr = (volatile u32 *)(rx_setup->reg_base_addr +
 			SXE_VFRDT(rxq->reg_idx));
-	} else {
+	else
 		rxq->rdt_reg_addr = (volatile u32 *)(rx_setup->reg_base_addr +
 			SXE_RDT(rxq->reg_idx));
-	}
 
 	rxq->base_addr = rx_mz->iova;
 
@@ -160,9 +156,8 @@ s32 __rte_cold __sxe_rx_queue_setup(struct rx_setup *rx_setup, bool is_vf)
 	}
 
 	len = desc_num;
-	if (*rx_setup->rx_batch_alloc_allowed) {
+	if (*rx_setup->rx_batch_alloc_allowed)
 		len += RTE_PMD_SXE_MAX_RX_BURST;
-	}
 
 	rxq->buffer_ring = rte_zmalloc_socket("rxq->sw_ring",
 					  sizeof(struct sxe_rx_buffer) * len,
@@ -186,21 +181,21 @@ s32 __rte_cold __sxe_rx_queue_setup(struct rx_setup *rx_setup, bool is_vf)
 	}
 
 	PMD_LOG_DEBUG(INIT, "buffer_ring=%p sc_buffer_ring=%p desc_ring=%p "
-			    "dma_addr=0x%"SXE_PRIX64,
-		     rxq->buffer_ring, rxq->sc_buffer_ring, rxq->desc_ring,
-		     rxq->base_addr);
+				"dma_addr=0x%"SXE_PRIX64,
+			 rxq->buffer_ring, rxq->sc_buffer_ring, rxq->desc_ring,
+			 rxq->base_addr);
 
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
 	if (!rte_is_power_of_2(desc_num)) {
 		PMD_LOG_DEBUG(INIT, "queue[%d] doesn't meet Vector Rx "
-				    "preconditions - canceling the feature for "
-				    "the whole port[%d]",
-			     rxq->queue_id, rxq->port_id);
-		if (is_vf) {
+					"preconditions - canceling the feature for "
+					"the whole port[%d]",
+				 rxq->queue_id, rxq->port_id);
+		if (is_vf)
 			vf_adapter->rx_vec_allowed = false;
-		} else {
+		else
 			pf_adapter->rx_vec_allowed = false;
-		}
+
 	} else {
 		sxe_rxq_vec_setup(rxq);
 	}
@@ -245,29 +240,29 @@ int __rte_cold __sxe_tx_queue_setup(struct tx_setup *tx_setup, bool is_vf)
 		goto l_end;
 	}
 
-	txq->ops               = sxe_tx_default_ops_get();
-	txq->ring_depth        = ring_depth;
-	txq->queue_idx         = tx_queue_id;
-	txq->port_id           = dev->data->port_id;
-	txq->pthresh           = tx_conf->tx_thresh.pthresh;
-	txq->hthresh           = tx_conf->tx_thresh.hthresh;
-	txq->wthresh           = tx_conf->tx_thresh.wthresh;
-	txq->rs_thresh         = rs_thresh;
-	txq->free_thresh       = free_thresh;
+	txq->ops			   = sxe_tx_default_ops_get();
+	txq->ring_depth		= ring_depth;
+	txq->queue_idx		 = tx_queue_id;
+	txq->port_id		   = dev->data->port_id;
+	txq->pthresh		   = tx_conf->tx_thresh.pthresh;
+	txq->hthresh		   = tx_conf->tx_thresh.hthresh;
+	txq->wthresh		   = tx_conf->tx_thresh.wthresh;
+	txq->rs_thresh		 = rs_thresh;
+	txq->free_thresh	   = free_thresh;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
-	txq->reg_idx           = (u16)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+	txq->reg_idx		   = (u16)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		tx_queue_id : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + tx_queue_id);
-	txq->offloads          = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+	txq->offloads		  = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
-	if (is_vf) {
-		txq->tdt_reg_addr = (volatile u32 *)(tx_setup->reg_base_addr + SXE_VFTDT(txq->reg_idx));
-	} else {
-		txq->tdt_reg_addr = (u32 *)(tx_setup->reg_base_addr + SXE_TDT(txq->reg_idx));
-	}
+	if (is_vf)
+		txq->tdt_reg_addr = (volatile u32 *)(tx_setup->reg_base_addr +
+								SXE_VFTDT(txq->reg_idx));
+	else
+		txq->tdt_reg_addr = (u32 *)(tx_setup->reg_base_addr +
+								SXE_TDT(txq->reg_idx));
 
 	PMD_LOG_INFO(INIT, "buffer_ring=%p desc_ring=%p dma_addr=0x%"PRIx64,
-		     txq->buffer_ring, txq->desc_ring,
-		     (long unsigned int)txq->base_addr);
+			 txq->buffer_ring, txq->desc_ring, (u64)txq->base_addr);
 	sxe_tx_function_set(dev, txq);
 
 	txq->ops->init(txq);
@@ -294,7 +289,6 @@ void __sxe_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
 	qinfo->conf.rx_deferred_start = rxq->deferred_start;
 	qinfo->conf.offloads = rxq->offloads;
 
-	return;
 }
 
 void __sxe_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
@@ -313,23 +307,22 @@ void __sxe_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
 	q_info->conf.offloads = txq->offloads;
 	q_info->conf.tx_deferred_start = txq->tx_deferred_start;
 
-	return;
 }
 
 s32 __sxe_tx_done_cleanup(void *tx_queue, u32 free_cnt)
 {
 	int ret;
 	struct sxe_tx_queue *txq = (struct sxe_tx_queue *)tx_queue;
-	if (txq->offloads == 0 && \
+	if (txq->offloads == 0 &&
 		txq->rs_thresh >= RTE_PMD_SXE_MAX_TX_BURST) {
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
 		if (txq->rs_thresh <= RTE_SXE_MAX_TX_FREE_BUF_SZ &&
 #ifndef DPDK_19_11_6
-		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
+			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
 #endif
-		    (rte_eal_process_type() != RTE_PROC_PRIMARY ||
-		    txq->buffer_ring_vec != NULL)) {
-		    ret = sxe_tx_done_cleanup_vec(txq, free_cnt);
+			(rte_eal_process_type() != RTE_PROC_PRIMARY ||
+			txq->buffer_ring_vec != NULL)) {
+			ret = sxe_tx_done_cleanup_vec(txq, free_cnt);
 		} else{
 			ret = sxe_tx_done_cleanup_simple(txq, free_cnt);
 		}
@@ -357,7 +350,7 @@ s32 __rte_cold __sxe_rx_queue_mbufs_alloc(struct sxe_rx_queue *rxq)
 
 		if (mbuf == NULL) {
 			PMD_LOG_ERR(DRV, "rx mbuf alloc failed queue_id=%u",
-					(unsigned) rxq->queue_id);
+					(u16)rxq->queue_id);
 			ret = -ENOMEM;
 			goto l_end;
 		}
@@ -386,7 +379,6 @@ void __rte_cold __sxe_rx_queue_free(struct sxe_rx_queue *rxq)
 		rte_memzone_free(rxq->mz);
 		rte_free(rxq);
 	}
-	return;
 }
 
 void __rte_cold __sxe_tx_queue_free(struct sxe_tx_queue *txq)
@@ -398,7 +390,6 @@ void __rte_cold __sxe_tx_queue_free(struct sxe_tx_queue *txq)
 		rte_free(txq);
 	}
 
-	return;
 }
 
 void __rte_cold __sxe_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed)
@@ -409,12 +400,11 @@ void __rte_cold __sxe_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_a
 
 	sxe_rx_queues_clear(dev, rx_batch_alloc_allowed);
 
-	return;
 }
 
 void __sxe_queues_free(struct rte_eth_dev *dev)
 {
-	unsigned i;
+	unsigned int i;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -430,10 +420,10 @@ void __sxe_queues_free(struct rte_eth_dev *dev)
 	}
 	dev->data->nb_tx_queues = 0;
 
-	return;
 }
 
-void __sxe_secondary_proc_init(struct rte_eth_dev *eth_dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
+void __sxe_secondary_proc_init(struct rte_eth_dev *eth_dev,
+	bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
 {
 	struct sxe_tx_queue *txq;
 	if (eth_dev->data->tx_queues) {
@@ -441,10 +431,9 @@ void __sxe_secondary_proc_init(struct rte_eth_dev *eth_dev, bool rx_batch_alloc_
 		sxe_tx_function_set(eth_dev, txq);
 	} else {
 		PMD_LOG_NOTICE(INIT, "No TX queues configured yet. "
-			     "Using default TX function.");
+				 "Using default TX function.");
 	}
 
 	sxe_rx_function_set(eth_dev, rx_batch_alloc_allowed, rx_vec_allowed);
-	return;
 }
 
diff --git a/drivers/net/sxe/base/sxe_queue_common.h b/drivers/net/sxe/base/sxe_queue_common.h
index a38113b643..40867449db 100644
--- a/drivers/net/sxe/base/sxe_queue_common.h
+++ b/drivers/net/sxe/base/sxe_queue_common.h
@@ -15,9 +15,9 @@
 #define RTE_PMD_SXE_MAX_RX_BURST 32
 
 enum sxe_ctxt_num {
-	SXE_CTXT_DESC_0    = 0, 
-	SXE_CTXT_DESC_1    = 1, 
-	SXE_CTXT_DESC_NUM  = 2, 
+	SXE_CTXT_DESC_0	= 0,
+	SXE_CTXT_DESC_1	= 1,
+	SXE_CTXT_DESC_NUM  = 2,
 };
 
 struct rx_setup {
@@ -42,7 +42,7 @@ struct tx_setup {
 
 union sxe_tx_data_desc {
 	struct {
-		__le64 buffer_addr; 
+		__le64 buffer_addr;
 		__le32 cmd_type_len;
 		__le32 olinfo_status;
 	} read;
@@ -63,142 +63,142 @@ struct sxe_rx_queue_stats {
 
 union sxe_rx_data_desc {
 	struct {
-		__le64 pkt_addr; 
-		__le64 hdr_addr; 
+		__le64 pkt_addr;
+		__le64 hdr_addr;
 	} read;
 	struct {
 		struct {
 			union {
 				__le32 data;
 				struct {
-					__le16 pkt_info; 
-					__le16 hdr_info; 
+					__le16 pkt_info;
+					__le16 hdr_info;
 				} hs_rss;
 			} lo_dword;
 			union {
-				__le32 rss; 
+				__le32 rss;
 				struct {
-					__le16 ip_id; 
-					__le16 csum; 
+					__le16 ip_id;
+					__le16 csum;
 				} csum_ip;
 			} hi_dword;
 		} lower;
 		struct {
-			__le32 status_error; 
-			__le16 length; 
-			__le16 vlan; 
+			__le32 status_error;
+			__le16 length;
+			__le16 vlan;
 		} upper;
 	} wb;
- };
+};
 
 struct sxe_tx_buffer {
-	struct rte_mbuf *mbuf; 
-	u16 next_id;             
-	u16 last_id;             
+	struct rte_mbuf *mbuf;
+	u16 next_id;
+	u16 last_id;
 };
 
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
 struct sxe_tx_buffer_vec {
-	struct rte_mbuf *mbuf; 
+	struct rte_mbuf *mbuf;
 };
 #endif
 
 union sxe_tx_offload {
 	u64 data[2];
 	struct {
-		u64 l2_len:7;     
-		u64 l3_len:9;     
-		u64 l4_len:8;     
-		u64 tso_segsz:16; 
-		u64 vlan_tci:16;  
-
-		u64 outer_l3_len:8; 
-		u64 outer_l2_len:8; 
+		u64 l2_len:7;
+		u64 l3_len:9;
+		u64 l4_len:8;
+		u64 tso_segsz:16;
+		u64 vlan_tci:16;
+
+		u64 outer_l3_len:8;
+		u64 outer_l2_len:8;
 	};
 };
 
 struct sxe_ctxt_info {
-	u64 flags;  
+	u64 flags;
 	union sxe_tx_offload tx_offload;
 	union sxe_tx_offload tx_offload_mask;
 };
 
 struct sxe_tx_queue {
 	volatile union sxe_tx_data_desc *desc_ring;
-	u64             base_addr;          
+	u64			 base_addr;
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
 	union {
-		struct sxe_tx_buffer *buffer_ring;          
-		struct sxe_tx_buffer_vec *buffer_ring_vec;  
+		struct sxe_tx_buffer *buffer_ring;
+		struct sxe_tx_buffer_vec *buffer_ring_vec;
 	};
 #else
-	struct sxe_tx_buffer *buffer_ring;	
+	struct sxe_tx_buffer *buffer_ring;
 #endif
-	volatile u32   *tdt_reg_addr;       
-	u16            ring_depth;          
-	u16            next_to_use;         
-	u16            free_thresh;         
-
-	u16            rs_thresh;
-
-	u16            desc_used_num;
-	u16            next_to_clean;  
-	u16            desc_free_num;   
-	u16            next_dd;        
-	u16            next_rs;        
-	u16            queue_idx;      
-	u16            reg_idx;        
-	u16            port_id;        
-	u8             pthresh;        
-	u8             hthresh;        
-
-	u8             wthresh;
-	u64            offloads;       
-	u32            ctx_curr;       
-	struct sxe_ctxt_info ctx_cache[SXE_CTXT_DESC_NUM]; 
-	const struct sxe_txq_ops *ops; 
-	u8     tx_deferred_start;      
+	volatile u32   *tdt_reg_addr;
+	u16			ring_depth;
+	u16			next_to_use;
+	u16			free_thresh;
+
+	u16			rs_thresh;
+
+	u16			desc_used_num;
+	u16			next_to_clean;
+	u16			desc_free_num;
+	u16			next_dd;
+	u16			next_rs;
+	u16			queue_idx;
+	u16			reg_idx;
+	u16			port_id;
+	u8			 pthresh;
+	u8			 hthresh;
+
+	u8			 wthresh;
+	u64			offloads;
+	u32			ctx_curr;
+	struct sxe_ctxt_info ctx_cache[SXE_CTXT_DESC_NUM];
+	const struct sxe_txq_ops *ops;
+	u8	 tx_deferred_start;
 	const struct rte_memzone *mz;
 };
 
 struct sxe_rx_queue {
-	struct rte_mempool  *mb_pool;   
-	volatile union sxe_rx_data_desc *desc_ring; 
-	u64  base_addr;                 
-	volatile u32   *rdt_reg_addr;   
-	struct sxe_rx_buffer *buffer_ring; 
-	struct sxe_rx_buffer *sc_buffer_ring; 
+	struct rte_mempool  *mb_pool;
+	volatile union sxe_rx_data_desc *desc_ring;
+	u64  base_addr;
+	volatile u32   *rdt_reg_addr;
+	struct sxe_rx_buffer *buffer_ring;
+	struct sxe_rx_buffer *sc_buffer_ring;
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
-	struct rte_mbuf *pkt_first_seg; 
-	struct rte_mbuf *pkt_last_seg;  
-	u64    mbuf_init_value;		
-	u8     is_using_sse;		
+	struct rte_mbuf *pkt_first_seg;
+	struct rte_mbuf *pkt_last_seg;
+	u64	mbuf_init_value;
+	u8	 is_using_sse;
 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM)
-	u16    realloc_num; 		
-	u16    realloc_start;		
+	u16	realloc_num;
+	u16	realloc_start;
 #endif
 #endif
-	u16    ring_depth;           
-	u16    processing_idx;     
-	u16    hold_num;            
-	u16    completed_pkts_num;     
-	u16    next_ret_pkg;         
-	u16    batch_alloc_trigger;  
-
-	u16    batch_alloc_size;
-	u16    queue_id;            
-	u16    reg_idx;              
-	u16    pkt_type_mask;        
-	u16    port_id;              
-	u8     crc_len;              
-	u8     drop_en;              
-	u8     deferred_start;       
-	u64    vlan_flags;           
-	u64    offloads;             
-	struct rte_mbuf fake_mbuf;   
+	u16	ring_depth;
+	u16	processing_idx;
+	u16	hold_num;
+	u16	completed_pkts_num;
+	u16	next_ret_pkg;
+	u16	batch_alloc_trigger;
+
+	u16	batch_alloc_size;
+	u16	queue_id;
+	u16	reg_idx;
+	u16	pkt_type_mask;
+	u16	port_id;
+	u8	 crc_len;
+	u8	 drop_en;
+	u8	 deferred_start;
+	u64	vlan_flags;
+	u64	offloads;
+	struct rte_mbuf fake_mbuf;
 	struct rte_mbuf *completed_ring[RTE_PMD_SXE_MAX_RX_BURST * 2];
 	const struct rte_memzone *mz;
-	struct sxe_rx_queue_stats rx_stats;  
+	struct sxe_rx_queue_stats rx_stats;
 };
 
 struct sxe_txq_ops {
@@ -231,6 +231,7 @@ void __rte_cold __sxe_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_a
 
 void __sxe_queues_free(struct rte_eth_dev *dev);
 
-void __sxe_secondary_proc_init(struct rte_eth_dev *eth_dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
+void __sxe_secondary_proc_init(struct rte_eth_dev *eth_dev,
+	bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
 
 #endif
diff --git a/drivers/net/sxe/base/sxe_rx_common.c b/drivers/net/sxe/base/sxe_rx_common.c
index 4472058a29..b6ca690ec8 100644
--- a/drivers/net/sxe/base/sxe_rx_common.c
+++ b/drivers/net/sxe/base/sxe_rx_common.c
@@ -39,23 +39,23 @@ static inline void sxe_rx_resource_prefetch(u16 next_idx,
 		rte_sxe_prefetch(&buf_ring[next_idx]);
 	}
 
-	return;
 }
 
-void __rte_cold __sxe_rx_function_set(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
+void __rte_cold __sxe_rx_function_set(struct rte_eth_dev *dev,
+	bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
 {
 
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
 	u16  i, is_using_sse;
 
 	if (sxe_rx_vec_condition_check(dev) ||
-	    !rx_batch_alloc_allowed 
+		!rx_batch_alloc_allowed
 #ifndef DPDK_19_11_6
 		|| rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128
 #endif
 		) {
 		PMD_LOG_DEBUG(INIT, "Port[%d] doesn't meet Vector Rx "
-				    "preconditions", dev->data->port_id);
+					"preconditions", dev->data->port_id);
 		*rx_vec_allowed = false;
 	}
 #else
@@ -76,24 +76,31 @@ void __rte_cold __sxe_rx_function_set(struct rte_eth_dev *dev, bool rx_batch_all
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
 		if (*rx_vec_allowed) {
 			PMD_LOG_DEBUG(INIT, "Using Vector Scattered Rx "
-					    "callback (port=%d).",
-				     dev->data->port_id);
+						"callback (port=%d).",
+					 dev->data->port_id);
 
 			dev->rx_pkt_burst = sxe_scattered_pkts_vec_recv;
-		} else
+
 #endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+
+		} else if (rx_batch_alloc_allowed) {
+#else
 		if (rx_batch_alloc_allowed) {
+#endif
+
 			PMD_LOG_DEBUG(INIT, "Using a Scattered with bulk "
 					   "allocation callback (port=%d).",
-				     dev->data->port_id);
+					 dev->data->port_id);
 
 			dev->rx_pkt_burst = sxe_batch_alloc_lro_pkts_recv;
 		} else {
 			PMD_LOG_DEBUG(INIT, "Using Regular (non-vector, "
-					    "single allocation) "
-					    "Scattered Rx callback "
-					    "(port=%d).",
-				     dev->data->port_id);
+						"single allocation) "
+						"Scattered Rx callback "
+						"(port=%d).",
+					 dev->data->port_id);
 
 			dev->rx_pkt_burst = sxe_single_alloc_lro_pkts_recv;
 		}
@@ -101,17 +108,17 @@ void __rte_cold __sxe_rx_function_set(struct rte_eth_dev *dev, bool rx_batch_all
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
 	else if (*rx_vec_allowed) {
 		PMD_LOG_DEBUG(INIT, "Vector rx enabled, please make sure RX "
-				    "burst size no less than %d (port=%d).",
-			     SXE_DESCS_PER_LOOP,
-			     dev->data->port_id);
+					"burst size no less than %d (port=%d).",
+				 SXE_DESCS_PER_LOOP,
+				 dev->data->port_id);
 
 		dev->rx_pkt_burst = sxe_pkts_vec_recv;
 	}
 #endif
 	else if (rx_batch_alloc_allowed) {
 		PMD_LOG_DEBUG(INIT, "Rx Burst Bulk Alloc Preconditions are "
-				    "satisfied. Rx Burst Bulk Alloc function "
-				    "will be used on port=%d.",
+					"satisfied. Rx Burst Bulk Alloc function "
+					"will be used on port=%d.",
 				dev->data->port_id);
 
 		dev->rx_pkt_burst = sxe_batch_alloc_pkts_recv;
@@ -136,7 +143,6 @@ void __rte_cold __sxe_rx_function_set(struct rte_eth_dev *dev, bool rx_batch_all
 	}
 #endif
 
-	return;
 }
 
 #if defined DPDK_20_11_5 || defined DPDK_19_11_6
@@ -156,9 +162,8 @@ s32 __sxe_rx_descriptor_done(void *rx_queue, u16 offset)
 	}
 
 	index = rxq->processing_idx + offset;
-	if (index >= rxq->ring_depth) {
+	if (index >= rxq->ring_depth)
 		index -= rxq->ring_depth;
-	}
 
 	desc = &rxq->desc_ring[index];
 	is_done = !!(desc->wb.upper.status_error &
@@ -198,17 +203,15 @@ s32 __sxe_rx_descriptor_status(void *rx_queue, u16 offset)
 	}
 
 	desc = rxq->processing_idx + offset;
-	if (desc >= rxq->ring_depth) {
+	if (desc >= rxq->ring_depth)
 		desc -= rxq->ring_depth;
-	}
 
 	status = &rxq->desc_ring[desc].wb.upper.status_error;
-	if (*status & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD)) {
+	if (*status & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD))
 		ret =  RTE_ETH_RX_DESC_DONE;
-	}
 
 l_end:
-	LOG_DEBUG("rx queue[%u] get desc status=%d\n",rxq->queue_id, ret);
+	LOG_DEBUG("rx queue[%u] get desc status=%d\n", rxq->queue_id, ret);
 	return ret;
 }
 
@@ -234,23 +237,22 @@ u16 __sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,
 	while (done_num < pkts_num) {
 		cur_desc = &desc_ring[processing_idx];
 		staterr = cur_desc->wb.upper.status_error;
-		if (!(staterr & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD))) {
+		if (!(staterr & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD)))
 			break;
-		}
 
 		rxd = *cur_desc;
 
 		LOG_DEBUG("port_id=%u queue_id=%u processing_idx=%u "
 			   "staterr=0x%08x pkt_len=%u",
-			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
-			   (unsigned) processing_idx, (unsigned) staterr,
-			   (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+			   (unsigned int)rxq->port_id, (unsigned int) rxq->queue_id,
+			   (unsigned int)processing_idx, (unsigned int) staterr,
+			   (unsigned int)rte_le_to_cpu_16(rxd.wb.upper.length));
 
 		new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
 		if (new_mb == NULL) {
 			LOG_ERROR("RX mbuf alloc failed port_id=%u "
-				   "queue_id=%u", (unsigned) rxq->port_id,
-				   (unsigned) rxq->queue_id);
+				   "queue_id=%u", (unsigned int) rxq->port_id,
+				   (unsigned int) rxq->queue_id);
 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
 			break;
 		}
@@ -258,9 +260,8 @@ u16 __sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,
 		hold_num++;
 		cur_buf = &buff_ring[processing_idx];
 		processing_idx++;
-		if (processing_idx == rxq->ring_depth) {
+		if (processing_idx == rxq->ring_depth)
 			processing_idx = 0;
-		}
 
 		sxe_rx_resource_prefetch(processing_idx, buff_ring, desc_ring);
 
@@ -293,9 +294,9 @@ u16 __sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,
 	if (hold_num > rxq->batch_alloc_size) {
 		LOG_DEBUG("port_id=%u queue_id=%u rx_tail=%u "
 			   "num_hold=%u num_done=%u",
-			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
-			   (unsigned) processing_idx, (unsigned) hold_num,
-			   (unsigned) done_num);
+			   (unsigned int)rxq->port_id, (unsigned int)rxq->queue_id,
+			   (unsigned int)processing_idx, (unsigned int)hold_num,
+			   (unsigned int)done_num);
 		processing_idx = (u16)((processing_idx == 0) ?
 				(rxq->ring_depth - 1) : (processing_idx - 1));
 		SXE_PCI_REG_WC_WRITE(rxq->rdt_reg_addr, processing_idx);
@@ -308,7 +309,7 @@ u16 __sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,
 
 const u32 *__sxe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 {
-	const u32 * ptypes = NULL;
+	const u32 *ptypes = NULL;
 	static const u32 ptypes_arr[] = {
 		RTE_PTYPE_L2_ETHER,
 		RTE_PTYPE_L3_IPV4,
@@ -337,7 +338,7 @@ const u32 *__sxe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
 #if defined(RTE_ARCH_X86)
 	if (dev->rx_pkt_burst == sxe_pkts_vec_recv ||
-	    dev->rx_pkt_burst == sxe_scattered_pkts_vec_recv) {
+		dev->rx_pkt_burst == sxe_scattered_pkts_vec_recv) {
 		ptypes = ptypes_arr;
 	}
 #endif
diff --git a/drivers/net/sxe/base/sxe_rx_common.h b/drivers/net/sxe/base/sxe_rx_common.h
index b7eb37f54a..93d2314968 100644
--- a/drivers/net/sxe/base/sxe_rx_common.h
+++ b/drivers/net/sxe/base/sxe_rx_common.h
@@ -7,7 +7,8 @@
 
 #include "sxe_dpdk_version.h"
 
-void __rte_cold __sxe_rx_function_set(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
+void __rte_cold __sxe_rx_function_set(struct rte_eth_dev *dev,
+	bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
 
 #if defined DPDK_20_11_5 || defined DPDK_19_11_6
 s32 __sxe_rx_descriptor_done(void *rx_queue, u16 offset);
diff --git a/drivers/net/sxe/base/sxe_tx_common.c b/drivers/net/sxe/base/sxe_tx_common.c
index a47f90109a..e74556866f 100644
--- a/drivers/net/sxe/base/sxe_tx_common.c
+++ b/drivers/net/sxe/base/sxe_tx_common.c
@@ -33,15 +33,13 @@ int __sxe_tx_descriptor_status(void *tx_queue, u16 offset)
 	desc_idx = ((desc_idx + txq->rs_thresh - 1) / txq->rs_thresh) * txq->rs_thresh;
 	if (desc_idx >= txq->ring_depth) {
 		desc_idx -= txq->ring_depth;
-		if (desc_idx >= txq->ring_depth) {
+		if (desc_idx >= txq->ring_depth)
 			desc_idx -= txq->ring_depth;
-		}
 	}
 
 	status = &txq->desc_ring[desc_idx].wb.status;
-	if (*status & rte_cpu_to_le_32(SXE_TX_DESC_STAT_DD)) {
+	if (*status & rte_cpu_to_le_32(SXE_TX_DESC_STAT_DD))
 		ret = RTE_ETH_TX_DESC_DONE;
-	}
 
 l_end:
 	return ret;
diff --git a/drivers/net/sxe/base/sxe_types.h b/drivers/net/sxe/base/sxe_types.h
index 966ee230b3..a36a3cfbf6 100644
--- a/drivers/net/sxe/base/sxe_types.h
+++ b/drivers/net/sxe/base/sxe_types.h
@@ -18,7 +18,7 @@
 typedef uint8_t		u8;
 typedef uint16_t	u16;
 typedef uint32_t	u32;
-typedef uint64_t	u64; 
+typedef uint64_t	u64;
 
 typedef char		s8;
 typedef int16_t		s16;
diff --git a/drivers/net/sxe/base/sxevf_hw.c b/drivers/net/sxe/base/sxevf_hw.c
index 75ac9dd25b..5786e28f92 100644
--- a/drivers/net/sxe/base/sxevf_hw.c
+++ b/drivers/net/sxe/base/sxevf_hw.c
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (C), 2022, Linkdata Technology Co., Ltd.
  */
-#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST) 
+#if defined(__KERNEL__) || defined(SXE_KERNEL_TEST)
 #include <linux/etherdevice.h>
 
 #include "sxevf_hw.h"
@@ -12,7 +12,7 @@
 #include "sxevf_ring.h"
 #include "sxevf.h"
 #include "sxevf_rx_proc.h"
-#else 
+#else
 #include "sxe_errno.h"
 #include "sxe_logs.h"
 #include "sxe_dpdk_version.h"
@@ -28,37 +28,33 @@ struct sxevf_adapter;
 #define DMA_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
 #define DMA_MASK_NONE	0x0ULL
 
-#define  SXEVF_REG_READ_CNT    5
+#define  SXEVF_REG_READ_CNT	5
 
-#define SXE_REG_READ_FAIL       0xffffffffU
+#define SXE_REG_READ_FAIL	   0xffffffffU
 
-#define SXEVF_RING_WAIT_LOOP                   (100)
-#define SXEVF_MAX_RX_DESC_POLL                 (10)
+#define SXEVF_RING_WAIT_LOOP				   (100)
+#define SXEVF_MAX_RX_DESC_POLL				 (10)
 
 
-#define SXEVF_REG_READ(hw, addr)        sxevf_reg_read(hw, addr)
+#define SXEVF_REG_READ(hw, addr)		sxevf_reg_read(hw, addr)
 #define SXEVF_REG_WRITE(hw, reg, value) sxevf_reg_write(hw, reg, value)
 #define SXEVF_WRITE_FLUSH(a) sxevf_reg_read(a, SXE_VFSTATUS)
 
-#ifndef SXE_DPDK 
+#ifndef SXE_DPDK
 void sxevf_hw_fault_handle(struct sxevf_hw *hw)
 {
 	struct sxevf_adapter *adapter = hw->adapter;
 
-	if (test_bit(SXEVF_HW_FAULT, &hw->state)) {
-		goto l_ret;
-	}
+	if (test_bit(SXEVF_HW_FAULT, &hw->state))
+		return;
 
 	set_bit(SXEVF_HW_FAULT, &hw->state);
 
 	LOG_DEV_ERR("sxe nic hw fault\n");
 
-	if ((hw->fault_handle != NULL) && (hw->priv != NULL) ) {
+	if ((hw->fault_handle != NULL) && (hw->priv != NULL))
 		hw->fault_handle(hw->priv);
-	}
 
-l_ret:
-	return;
 }
 
 static void sxevf_hw_fault_check(struct sxevf_hw *hw, u32 reg)
@@ -68,32 +64,27 @@ static void sxevf_hw_fault_check(struct sxevf_hw *hw, u32 reg)
 	struct sxevf_adapter *adapter = hw->adapter;
 	u8 i;
 
-	if (reg == SXE_VFSTATUS) {
+	if (reg == SXE_VFSTATUS)
 		sxevf_hw_fault_handle(hw);
-		return;
-	}
 
 
 	for (i = 0; i < SXEVF_REG_READ_CNT; i++) {
 		value = hw->reg_read(base_addr + SXE_VFSTATUS);
 
-		if (value != SXEVF_REG_READ_FAIL) {
+		if (value != SXEVF_REG_READ_FAIL)
 			break;
-		}
 
 		mdelay(20);
 	}
 
 	LOG_INFO_BDF("retry done i:%d value:0x%x\n", i, value);
 
-	if (value == SXEVF_REG_READ_FAIL) {
+	if (value == SXEVF_REG_READ_FAIL)
 		sxevf_hw_fault_handle(hw);
-	}
 
-	return;
 }
 
-STATIC u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg)
+static u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg)
 {
 	u32 value;
 	u8  __iomem *base_addr = hw->reg_base_addr;
@@ -105,7 +96,7 @@ STATIC u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg)
 	}
 
 	value = hw->reg_read(base_addr + reg);
-	if (unlikely(SXEVF_REG_READ_FAIL == value)) {
+	if (unlikely(value == SXEVF_REG_READ_FAIL)) {
 		LOG_ERROR_BDF("reg[0x%x] read failed, value=%#x\n", reg, value);
 		sxevf_hw_fault_check(hw, reg);
 	}
@@ -114,29 +105,26 @@ STATIC u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg)
 	return value;
 }
 
-STATIC void sxevf_reg_write(struct sxevf_hw *hw, u32 reg, u32 value)
+static void sxevf_reg_write(struct sxevf_hw *hw, u32 reg, u32 value)
 {
 	u8 __iomem *base_addr = hw->reg_base_addr;
 
-	if (sxevf_is_hw_fault(hw)) {
-		goto l_ret;
-	}
+	if (sxevf_is_hw_fault(hw))
+		return;
 
 	hw->reg_write(value, base_addr + reg);
 
-l_ret:
-	return;
 }
 
-#else 
+#else
 
-STATIC u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg)
+static u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg)
 {
 	u32 i, value;
 	u8  __iomem *base_addr = hw->reg_base_addr;
 
 	value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
-	if (unlikely(SXEVF_REG_READ_FAIL == value)) {
+	if (unlikely(value == SXEVF_REG_READ_FAIL)) {
 		for (i = 0; i < SXEVF_REG_READ_CNT; i++) {
 			LOG_ERROR("reg[0x%x] read failed, value=%#x\n",
 							reg, value);
@@ -154,13 +142,12 @@ STATIC u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg)
 	return value;
 }
 
-STATIC void sxevf_reg_write(struct sxevf_hw *hw, u32 reg, u32 value)
+static void sxevf_reg_write(struct sxevf_hw *hw, u32 reg, u32 value)
 {
 	u8 __iomem *base_addr = hw->reg_base_addr;
 
 	rte_write32((rte_cpu_to_le_32(value)), (base_addr + reg));
 
-	return;
 }
 #endif
 
@@ -190,7 +177,6 @@ void sxevf_hw_stop(struct sxevf_hw *hw)
 		}
 	}
 
-	return;
 }
 
 void sxevf_msg_write(struct sxevf_hw *hw, u8 index, u32 msg)
@@ -201,7 +187,6 @@ void sxevf_msg_write(struct sxevf_hw *hw, u8 index, u32 msg)
 
 	LOG_DEBUG_BDF("index:%u write mbx mem:0x%x.\n", index, msg);
 
-	return;
 }
 
 u32 sxevf_msg_read(struct sxevf_hw *hw, u8 index)
@@ -222,21 +207,18 @@ u32 sxevf_mailbox_read(struct sxevf_hw *hw)
 void sxevf_mailbox_write(struct sxevf_hw *hw, u32 value)
 {
 	SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, value);
-	return;
 }
 
 void sxevf_pf_req_irq_trigger(struct sxevf_hw *hw)
 {
 	SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, SXE_VFMAILBOX_REQ);
 
-	return;
 }
 
 void sxevf_pf_ack_irq_trigger(struct sxevf_hw *hw)
 {
 	SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, SXE_VFMAILBOX_ACK);
 
-	return;
 }
 
 void sxevf_event_irq_map(struct sxevf_hw *hw, u16 vector)
@@ -252,14 +234,12 @@ void sxevf_event_irq_map(struct sxevf_hw *hw, u16 vector)
 
 	SXEVF_REG_WRITE(hw, SXE_VFIVAR_MISC, ivar);
 
-	return;
 }
 
 void sxevf_specific_irq_enable(struct sxevf_hw *hw, u32 value)
 {
 	SXEVF_REG_WRITE(hw, SXE_VFEIMS, value);
 
-	return;
 }
 
 void sxevf_irq_enable(struct sxevf_hw *hw, u32 mask)
@@ -267,7 +247,6 @@ void sxevf_irq_enable(struct sxevf_hw *hw, u32 mask)
 	SXEVF_REG_WRITE(hw, SXE_VFEIAM, mask);
 	SXEVF_REG_WRITE(hw, SXE_VFEIMS, mask);
 
-	return;
 }
 
 void sxevf_irq_disable(struct sxevf_hw *hw)
@@ -277,7 +256,6 @@ void sxevf_irq_disable(struct sxevf_hw *hw)
 
 	SXEVF_WRITE_FLUSH(hw);
 
-	return;
 }
 
 void sxevf_hw_ring_irq_map(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx, u16 vector)
@@ -295,7 +273,6 @@ void sxevf_hw_ring_irq_map(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx, u16
 
 	SXEVF_REG_WRITE(hw, SXE_VFIVAR(hw_ring_idx >> 1), ivar);
 
-	return;
 }
 
 void sxevf_ring_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx, u32 interval)
@@ -306,40 +283,36 @@ void sxevf_ring_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx, u32 interval)
 
 	SXEVF_REG_WRITE(hw, SXE_VFEITR(irq_idx), eitr);
 
-	return;
 }
 
 static void sxevf_event_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx, u32 value)
 {
 	SXEVF_REG_WRITE(hw, SXE_VFEITR(irq_idx), value);
 
-	return;
 }
 
 static void sxevf_pending_irq_clear(struct sxevf_hw *hw)
 {
 	SXEVF_REG_READ(hw, SXE_VFEICR);
 
-	return;
 }
 
 static void sxevf_ring_irq_trigger(struct sxevf_hw *hw, u64 eics)
 {
 	SXEVF_REG_WRITE(hw, SXE_VFEICS, eics);
 
-	return;
 }
 
 static const struct sxevf_irq_operations sxevf_irq_ops = {
 	.ring_irq_interval_set   = sxevf_ring_irq_interval_set,
 	.event_irq_interval_set  = sxevf_event_irq_interval_set,
-	.ring_irq_map         = sxevf_hw_ring_irq_map,
-	.event_irq_map           = sxevf_event_irq_map,
-	.pending_irq_clear       = sxevf_pending_irq_clear,
-	.ring_irq_trigger        = sxevf_ring_irq_trigger,
-	.specific_irq_enable     = sxevf_specific_irq_enable,
-	.irq_enable              = sxevf_irq_enable,
-	.irq_disable             = sxevf_irq_disable,
+	.ring_irq_map		 = sxevf_hw_ring_irq_map,
+	.event_irq_map		   = sxevf_event_irq_map,
+	.pending_irq_clear	   = sxevf_pending_irq_clear,
+	.ring_irq_trigger		= sxevf_ring_irq_trigger,
+	.specific_irq_enable	 = sxevf_specific_irq_enable,
+	.irq_enable			  = sxevf_irq_enable,
+	.irq_disable			 = sxevf_irq_disable,
 };
 
 void sxevf_hw_reset(struct sxevf_hw *hw)
@@ -347,10 +320,9 @@ void sxevf_hw_reset(struct sxevf_hw *hw)
 	SXEVF_REG_WRITE(hw, SXE_VFCTRL, SXE_VFCTRL_RST);
 	SXEVF_WRITE_FLUSH(hw);
 
-	return;
 }
 
-STATIC bool sxevf_hw_rst_done(struct sxevf_hw *hw)
+static bool sxevf_hw_rst_done(struct sxevf_hw *hw)
 {
 	return !(SXEVF_REG_READ(hw, SXE_VFCTRL) & SXE_VFCTRL_RST);
 }
@@ -374,9 +346,8 @@ static u32 sxevf_reg_dump(struct sxevf_hw *hw, u32 *regs_buff, u32 buf_size)
 	u32 i;
 	u32 regs_num = buf_size / sizeof(u32);
 
-	for (i = 0; i < regs_num; i++) {
+	for (i = 0; i < regs_num; i++)
 		regs_buff[i] = SXEVF_REG_READ(hw, dump_regs[i]);
-	}
 
 	return i;
 }
@@ -476,7 +447,7 @@ static s32 sxevf_reg_set_and_check(struct sxevf_hw *hw, int reg,
 	return ret;
 }
 
-STATIC s32 sxevf_regs_test(struct sxevf_hw *hw)
+static s32 sxevf_regs_test(struct sxevf_hw *hw)
 {
 	u32 i;
 	s32 ret = 0;
@@ -521,9 +492,8 @@ STATIC s32 sxevf_regs_test(struct sxevf_hw *hw)
 				break;
 			}
 
-			if (ret) {
+			if (ret)
 				goto l_end;
-			}
 
 		}
 		test++;
@@ -545,14 +515,13 @@ static const struct sxevf_setup_operations sxevf_setup_ops = {
 static void sxevf_tx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
 				u64 desc_dma_addr, u8 reg_idx)
 {
-	SXEVF_REG_WRITE(hw, SXEVF_TDBAL(reg_idx), (desc_dma_addr & \
+	SXEVF_REG_WRITE(hw, SXEVF_TDBAL(reg_idx), (desc_dma_addr &
 						DMA_BIT_MASK(32)));
 	SXEVF_REG_WRITE(hw, SXEVF_TDBAH(reg_idx), (desc_dma_addr >> 32));
 	SXEVF_REG_WRITE(hw, SXEVF_TDLEN(reg_idx), desc_mem_len);
 	SXEVF_REG_WRITE(hw, SXEVF_TDH(reg_idx), 0);
 	SXEVF_REG_WRITE(hw, SXEVF_TDT(reg_idx), 0);
 
-	return;
 }
 
 static void sxevf_tx_writeback_off(struct sxevf_hw *hw, u8 reg_idx)
@@ -560,7 +529,6 @@ static void sxevf_tx_writeback_off(struct sxevf_hw *hw, u8 reg_idx)
 	SXEVF_REG_WRITE(hw, SXEVF_TDWBAH(reg_idx), 0);
 	SXEVF_REG_WRITE(hw, SXEVF_TDWBAL(reg_idx), 0);
 
-	return;
 }
 
 static void sxevf_tx_desc_thresh_set(
@@ -578,7 +546,6 @@ static void sxevf_tx_desc_thresh_set(
 
 	SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
 
-	return;
 }
 
 void sxevf_tx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on)
@@ -610,7 +577,6 @@ void sxevf_tx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on)
 			  "the polling period\n", reg_idx, is_on);
 	}
 
-	return;
 }
 
 static void sxevf_rx_disable(struct sxevf_hw *hw, u8 reg_idx)
@@ -619,9 +585,8 @@ static void sxevf_rx_disable(struct sxevf_hw *hw, u8 reg_idx)
 	u32 wait_loop = SXEVF_RX_RING_POLL_MAX;
 	struct sxevf_adapter *adapter = hw->adapter;
 
-	if (!hw->reg_base_addr) {
-		goto l_end;
-	}
+	if (!hw->reg_base_addr)
+		return;
 
 	rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
 	rxdctl &= ~SXE_VFRXDCTL_ENABLE;
@@ -637,8 +602,6 @@ static void sxevf_rx_disable(struct sxevf_hw *hw, u8 reg_idx)
 				reg_idx);
 	}
 
-l_end:
-	return;
 }
 
 void sxevf_rx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on)
@@ -673,7 +636,6 @@ void sxevf_rx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on)
 			  "the polling period\n", reg_idx, is_on);
 	}
 
-	return;
 }
 
 void sxevf_rx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
@@ -689,7 +651,6 @@ void sxevf_rx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
 	SXEVF_REG_WRITE(hw, SXE_VFRDH(reg_idx), 0);
 	SXEVF_REG_WRITE(hw, SXE_VFRDT(reg_idx), 0);
 
-	return;
 }
 
 void sxevf_rx_rcv_ctl_configure(struct sxevf_hw *hw, u8 reg_idx,
@@ -697,9 +658,8 @@ void sxevf_rx_rcv_ctl_configure(struct sxevf_hw *hw, u8 reg_idx,
 {
 	u32 srrctl = 0;
 
-	if (drop_en) {
+	if (drop_en)
 		srrctl = SXEVF_SRRCTL_DROP_EN;
-	}
 
 	srrctl |= ((header_buf_len << SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT) &
 			SXEVF_SRRCTL_BSIZEHDR_MASK);
@@ -708,7 +668,6 @@ void sxevf_rx_rcv_ctl_configure(struct sxevf_hw *hw, u8 reg_idx,
 
 	SXEVF_REG_WRITE(hw, SXE_VFSRRCTL(reg_idx), srrctl);
 
-	return;
 }
 
 static void sxevf_tx_ring_info_get(struct sxevf_hw *hw,
@@ -717,48 +676,47 @@ static void sxevf_tx_ring_info_get(struct sxevf_hw *hw,
 	*head = SXEVF_REG_READ(hw, SXE_VFTDH(idx));
 	*tail = SXEVF_REG_READ(hw, SXE_VFTDT(idx));
 
-	return;
 }
 
 static const struct sxevf_dma_operations sxevf_dma_ops = {
 	.tx_ring_desc_configure  = sxevf_tx_ring_desc_configure,
-	.tx_writeback_off      = sxevf_tx_writeback_off,
-	.tx_desc_thresh_set    = sxevf_tx_desc_thresh_set,
-	.tx_ring_switch        = sxevf_tx_ring_switch,
-	.tx_ring_info_get      = sxevf_tx_ring_info_get,
-
-	.rx_disable          = sxevf_rx_disable,
-	.rx_ring_switch      = sxevf_rx_ring_switch,
-	.rx_ring_desc_configure= sxevf_rx_ring_desc_configure,
+	.tx_writeback_off	  = sxevf_tx_writeback_off,
+	.tx_desc_thresh_set	= sxevf_tx_desc_thresh_set,
+	.tx_ring_switch		= sxevf_tx_ring_switch,
+	.tx_ring_info_get	  = sxevf_tx_ring_info_get,
+
+	.rx_disable		  = sxevf_rx_disable,
+	.rx_ring_switch	  = sxevf_rx_ring_switch,
+	.rx_ring_desc_configure = sxevf_rx_ring_desc_configure,
 	.rx_rcv_ctl_configure  = sxevf_rx_rcv_ctl_configure,
 };
 
 #ifdef SXE_DPDK
-#define SXEVF_32BIT_COUNTER_UPDATE(reg, last, cur)                          \
-	{																\
-		u32 latest = SXEVF_REG_READ(hw, reg);				\
-		cur += (latest - last) & UINT_MAX;						\
-		last = latest;											\
-	}
-	
-#define SXEVF_36BIT_COUNTER_UPDATE(lsb, msb, last, cur)                \
-	{																 \
-		u64 new_lsb = SXEVF_REG_READ(hw, lsb);					 \
-		u64 new_msb = SXEVF_REG_READ(hw, msb);					 \
-		u64 latest = ((new_msb << 32) | new_lsb);				 \
+#define SXEVF_32BIT_COUNTER_UPDATE(reg, last, cur) \
+	{ \
+		u32 latest = SXEVF_REG_READ(hw, reg); \
+		cur += (latest - last) & UINT_MAX; \
+		last = latest; \
+	}
+
+#define SXEVF_36BIT_COUNTER_UPDATE(lsb, msb, last, cur) \
+	{ \
+		u64 new_lsb = SXEVF_REG_READ(hw, lsb); \
+		u64 new_msb = SXEVF_REG_READ(hw, msb); \
+		u64 latest = ((new_msb << 32) | new_lsb); \
 		cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
-		last = latest;											 \
+		last = latest; \
 	}
 
 #else
 #define SXEVF_32BIT_COUNTER_UPDATE(reg, last_counter, counter)	\
-	{							\
-		u32 current_counter = SXEVF_REG_READ(hw, reg);	\
-		if (current_counter < last_counter)		\
-			counter += 0x100000000LL;		\
-		last_counter = current_counter;			\
-		counter &= 0xFFFFFFFF00000000LL;		\
-		counter |= current_counter;			\
+	{ \
+		u32 current_counter = SXEVF_REG_READ(hw, reg); \
+		if (current_counter < last_counter) \
+			counter += 0x100000000LL; \
+		last_counter = current_counter; \
+		counter &= 0xFFFFFFFF00000000LL; \
+		counter |= current_counter; \
 	}
 
 #define SXEVF_36BIT_COUNTER_UPDATE(reg_lsb, reg_msb, last_counter, counter) \
@@ -791,7 +749,6 @@ void sxevf_packet_stats_get(struct sxevf_hw *hw,
 	SXEVF_32BIT_COUNTER_UPDATE(SXEVF_VFMPRC, stats->last_vfmprc,
 				stats->vfmprc);
 
-	return;
 }
 
 void sxevf_stats_init_value_get(struct sxevf_hw *hw,
@@ -805,7 +762,6 @@ void sxevf_stats_init_value_get(struct sxevf_hw *hw,
 	stats->last_vfgotc |= (((u64)(SXEVF_REG_READ(hw, SXE_VFGOTC_MSB))) << 32);
 	stats->last_vfmprc = SXEVF_REG_READ(hw, SXE_VFMPRC);
 
-	return;
 }
 static const struct sxevf_stat_operations sxevf_stat_ops = {
 	.packet_stats_get	= sxevf_packet_stats_get,
@@ -816,13 +772,11 @@ static void sxevf_rx_max_used_ring_set(struct sxevf_hw *hw, u16 max_rx_ring)
 {
 	u32 rqpl = 0;
 
-	if (max_rx_ring > 1) {
+	if (max_rx_ring > 1)
 		rqpl |= BIT(29);
-	}
 
 	SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, rqpl);
 
-	return;
 }
 
 static const struct sxevf_dbu_operations sxevf_dbu_ops = {
@@ -844,23 +798,22 @@ static const struct sxevf_mbx_operations sxevf_mbx_ops = {
 void sxevf_hw_ops_init(struct sxevf_hw *hw)
 {
 	hw->setup.ops   = &sxevf_setup_ops;
-	hw->irq.ops     = &sxevf_irq_ops;
-	hw->mbx.ops     = &sxevf_mbx_ops;
-	hw->dma.ops     = &sxevf_dma_ops;
-	hw->stat.ops    = &sxevf_stat_ops;
-	hw->dbu.ops     = &sxevf_dbu_ops;
+	hw->irq.ops	 = &sxevf_irq_ops;
+	hw->mbx.ops	 = &sxevf_mbx_ops;
+	hw->dma.ops	 = &sxevf_dma_ops;
+	hw->stat.ops	= &sxevf_stat_ops;
+	hw->dbu.ops	 = &sxevf_dbu_ops;
 
-	return;
 }
 
-#ifdef SXE_DPDK 
+#ifdef SXE_DPDK
 
-#define SXEVF_RSS_FIELD_MASK        0xffff0000
-#define SXEVF_MRQC_RSSEN            0x00000001 
+#define SXEVF_RSS_FIELD_MASK		0xffff0000
+#define SXEVF_MRQC_RSSEN			0x00000001
 
-#define SXEVF_RSS_KEY_SIZE                (40)  
-#define SXEVF_MAX_RSS_KEY_ENTRIES	  (10)  
-#define SXEVF_MAX_RETA_ENTRIES            (128) 
+#define SXEVF_RSS_KEY_SIZE				(40)
+#define SXEVF_MAX_RSS_KEY_ENTRIES	  (10)
+#define SXEVF_MAX_RETA_ENTRIES			(128)
 
 void sxevf_rxtx_reg_init(struct sxevf_hw *hw)
 {
@@ -886,7 +839,6 @@ void sxevf_rxtx_reg_init(struct sxevf_hw *hw)
 
 	SXEVF_WRITE_FLUSH(hw);
 
-	return;
 }
 
 u32 sxevf_irq_cause_get(struct sxevf_hw *hw)
@@ -898,21 +850,19 @@ void sxevf_tx_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
 				u64 desc_dma_addr, u8 reg_idx)
 {
 
-	SXEVF_REG_WRITE(hw, SXEVF_TDBAL(reg_idx), (desc_dma_addr & \
+	SXEVF_REG_WRITE(hw, SXEVF_TDBAL(reg_idx), (desc_dma_addr &
 						DMA_BIT_MASK(32)));
 	SXEVF_REG_WRITE(hw, SXEVF_TDBAH(reg_idx), (desc_dma_addr >> 32));
 	SXEVF_REG_WRITE(hw, SXEVF_TDLEN(reg_idx), desc_mem_len);
 	SXEVF_REG_WRITE(hw, SXEVF_TDH(reg_idx), 0);
 	SXEVF_REG_WRITE(hw, SXEVF_TDT(reg_idx), 0);
 
-	return;
 }
 
 void sxevf_rss_bit_num_set(struct sxevf_hw *hw, u32 value)
 {
 	SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, value);
 
-	return;
 }
 
 void sxevf_hw_vlan_tag_strip_switch(struct sxevf_hw *hw,
@@ -922,15 +872,13 @@ void sxevf_hw_vlan_tag_strip_switch(struct sxevf_hw *hw,
 
 	vlnctrl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_index));
 
-	if (is_enable) {
+	if (is_enable)
 		vlnctrl |= SXEVF_RXDCTL_VME;
-	} else {
+	else
 		vlnctrl &= ~SXEVF_RXDCTL_VME;
-	}
 
 	SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_index), vlnctrl);
 
-	return;
 }
 
 void sxevf_tx_queue_thresh_set(struct sxevf_hw *hw, u8 reg_idx,
@@ -944,14 +892,12 @@ void sxevf_tx_queue_thresh_set(struct sxevf_hw *hw, u8 reg_idx,
 
 	SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
 
-	return;
 }
 
 void sxevf_rx_desc_tail_set(struct sxevf_hw *hw, u8 reg_idx, u32 value)
 {
 	SXEVF_REG_WRITE(hw, SXE_VFRDT(reg_idx), value);
 
-	return;
 }
 
 u32 sxevf_hw_rss_redir_tbl_get(struct sxevf_hw *hw, u16 reg_idx)
@@ -963,18 +909,16 @@ void sxevf_hw_rss_redir_tbl_set(struct sxevf_hw *hw,
 						u16 reg_idx, u32 value)
 {
 	SXEVF_REG_WRITE(hw, SXE_VFRETA(reg_idx >> 2), value);
-	return;
 }
 
 u32 sxevf_hw_rss_key_get(struct sxevf_hw *hw, u8 reg_idx)
 {
 	u32 rss_key;
 
-	if (reg_idx >= SXEVF_MAX_RSS_KEY_ENTRIES) {
+	if (reg_idx >= SXEVF_MAX_RSS_KEY_ENTRIES)
 		rss_key = 0;
-	} else {
+	else
 		rss_key = SXEVF_REG_READ(hw, SXE_VFRSSRK(reg_idx));
-	}
 
 	return rss_key;
 }
@@ -989,9 +933,8 @@ bool sxevf_hw_is_rss_enabled(struct sxevf_hw *hw)
 {
 	bool rss_enable = false;
 	u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
-	if (mrqc & SXEVF_MRQC_RSSEN) {
+	if (mrqc & SXEVF_MRQC_RSSEN)
 		rss_enable = true;
-	}
 
 	return rss_enable;
 }
@@ -1000,25 +943,21 @@ void sxevf_hw_rss_key_set_all(struct sxevf_hw *hw, u32 *rss_key)
 {
 	u32 i;
 
-	for (i = 0; i < SXEVF_MAX_RSS_KEY_ENTRIES; i++) {
+	for (i = 0; i < SXEVF_MAX_RSS_KEY_ENTRIES; i++)
 		SXEVF_REG_WRITE(hw, SXE_VFRSSRK(i), rss_key[i]);
-	}
 
-	return;
 }
 
 void sxevf_hw_rss_cap_switch(struct sxevf_hw *hw, bool is_on)
 {
 	u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
-	if (is_on) {
+	if (is_on)
 		mrqc |= SXEVF_MRQC_RSSEN;
-	} else {
+	else
 		mrqc &= ~SXEVF_MRQC_RSSEN;
-	}
 
 	SXEVF_REG_WRITE(hw, SXE_VFMRQC, mrqc);
 
-	return;
 }
 
 void sxevf_hw_rss_field_set(struct sxevf_hw *hw, u32 rss_field)
@@ -1029,7 +968,6 @@ void sxevf_hw_rss_field_set(struct sxevf_hw *hw, u32 rss_field)
 	mrqc |= rss_field;
 	SXEVF_REG_WRITE(hw, SXE_VFMRQC, mrqc);
 
-	return;
 }
 
 u32 sxevf_hw_regs_group_read(struct sxevf_hw *hw,
@@ -1044,7 +982,7 @@ u32 sxevf_hw_regs_group_read(struct sxevf_hw *hw,
 			reg_buf[count + j] = SXEVF_REG_READ(hw,
 					regs[i].addr + j * regs[i].stride);
 			LOG_INFO("regs= %s, regs_addr=%x, regs_value=%04x\n",
-				regs[i].name , regs[i].addr, reg_buf[count + j]);
+				regs[i].name, regs[i].addr, reg_buf[count + j]);
 		}
 
 		i++;
diff --git a/drivers/net/sxe/base/sxevf_hw.h b/drivers/net/sxe/base/sxevf_hw.h
index 67d711d5b8..1530a3949f 100644
--- a/drivers/net/sxe/base/sxevf_hw.h
+++ b/drivers/net/sxe/base/sxevf_hw.h
@@ -5,7 +5,7 @@
 #ifndef __SXEVF_HW_H__
 #define __SXEVF_HW_H__
 
-#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST)
+#if defined(__KERNEL__) || defined(SXE_KERNEL_TEST)
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/if_ether.h>
@@ -18,7 +18,7 @@
 
 #include "sxevf_regs.h"
 
-#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST)
+#if defined(__KERNEL__) || defined(SXE_KERNEL_TEST)
 #define SXE_PRIU64  "llu"
 #define SXE_PRIX64  "llx"
 #define SXE_PRID64  "lld"
@@ -28,30 +28,30 @@
 #define SXE_PRID64  PRId64
 #endif
 
-#define SXEVF_TXRX_RING_NUM_MAX           8  
-#define SXEVF_MAX_TXRX_DESC_POLL          (10)
+#define SXEVF_TXRX_RING_NUM_MAX		   8
+#define SXEVF_MAX_TXRX_DESC_POLL		  (10)
 #define SXEVF_TX_DESC_PREFETCH_THRESH_32  (32)
-#define SXEVF_TX_DESC_HOST_THRESH_1       (1)
+#define SXEVF_TX_DESC_HOST_THRESH_1	   (1)
 #define SXEVF_TX_DESC_WRITEBACK_THRESH_8  (8)
-#define SXEVF_TXDCTL_HTHRESH_SHIFT        (8)
-#define SXEVF_TXDCTL_WTHRESH_SHIFT        (16)
+#define SXEVF_TXDCTL_HTHRESH_SHIFT		(8)
+#define SXEVF_TXDCTL_WTHRESH_SHIFT		(16)
 
-#define SXEVF_TXDCTL_THRESH_MASK          (0x7F)
+#define SXEVF_TXDCTL_THRESH_MASK		  (0x7F)
 
-#define SXEVF_RX_RING_POLL_MAX           (10)
+#define SXEVF_RX_RING_POLL_MAX		   (10)
 
-#define SXEVF_MAC_HDR_LEN_MAX           (127)
-#define SXEVF_NETWORK_HDR_LEN_MAX       (511)
+#define SXEVF_MAC_HDR_LEN_MAX		   (127)
+#define SXEVF_NETWORK_HDR_LEN_MAX	   (511)
 
-#define SXEVF_LINK_SPEED_UNKNOWN        0
+#define SXEVF_LINK_SPEED_UNKNOWN		0
 #define SXEVF_LINK_SPEED_1GB_FULL	0x0020
 #define SXEVF_LINK_SPEED_10GB_FULL	0x0080
 #define SXEVF_LINK_SPEED_100_FULL	0x0008
 
-#define SXEVF_VFT_TBL_SIZE           (128)   
-#define SXEVF_HW_TXRX_RING_NUM_MAX   (128)   
+#define SXEVF_VFT_TBL_SIZE		   (128)
+#define SXEVF_HW_TXRX_RING_NUM_MAX   (128)
 
-#define SXEVF_VLAN_TAG_SIZE          (4)
+#define SXEVF_VLAN_TAG_SIZE		  (4)
 
 #define SXEVF_HW_UC_ENTRY_NUM_MAX   128
 
@@ -65,7 +65,7 @@ enum {
 	SXEVF_DIAG_TEST_PASSED			= 0,
 	SXEVF_DIAG_TEST_BLOCKED			= 1,
 	SXEVF_DIAG_REG_PATTERN_TEST_ERR		= 2,
-	SXEVF_DIAG_CHECK_REG_TEST_ERR           = 3,
+	SXEVF_DIAG_CHECK_REG_TEST_ERR		   = 3,
 };
 
 struct sxevf_hw;
@@ -83,11 +83,11 @@ struct sxevf_hw_stats {
 	u64 last_vfgotc;
 	u64 last_vfmprc;
 
-	u64 vfgprc;      
-	u64 vfgptc;      
-	u64 vfgorc;      
-	u64 vfgotc;      
-	u64 vfmprc;      
+	u64 vfgprc;
+	u64 vfgptc;
+	u64 vfgorc;
+	u64 vfgotc;
+	u64 vfmprc;
 
 	u64 saved_reset_vfgprc;
 	u64 saved_reset_vfgptc;
@@ -100,12 +100,12 @@ void sxevf_hw_ops_init(struct sxevf_hw *hw);
 
 
 struct sxevf_setup_operations {
-	void (*reset)(struct sxevf_hw *);
+	void (*reset)(struct sxevf_hw *hw);
 	void (*hw_stop)(struct sxevf_hw *hw);
 	s32  (*regs_test)(struct sxevf_hw *hw);
 	u32  (*link_state_get)(struct sxevf_hw *hw);
 	u32  (*regs_dump)(struct sxevf_hw *hw, u32 *regs_buff, u32 buf_size);
-	bool (*reset_done)(struct sxevf_hw *);
+	bool (*reset_done)(struct sxevf_hw *hw);
 };
 
 struct sxevf_hw_setup {
@@ -115,12 +115,12 @@ struct sxevf_hw_setup {
 struct sxevf_irq_operations {
 	void (*pending_irq_clear)(struct sxevf_hw *hw);
 	void (*ring_irq_interval_set)(struct sxevf_hw *hw, u16 irq_idx, u32 interval);
-	void (*event_irq_interval_set)(struct sxevf_hw * hw, u16 irq_idx, u32 value);
+	void (*event_irq_interval_set)(struct sxevf_hw *hw, u16 irq_idx, u32 value);
 	void (*ring_irq_map)(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx, u16 irq_idx);
 	void (*event_irq_map)(struct sxevf_hw *hw, u16 irq_idx);
 	void (*ring_irq_trigger)(struct sxevf_hw *hw, u64 eics);
-	void (*irq_enable)(struct sxevf_hw * hw, u32 mask);
-	void (*specific_irq_enable)(struct sxevf_hw * hw, u32 value);
+	void (*irq_enable)(struct sxevf_hw *hw, u32 mask);
+	void (*specific_irq_enable)(struct sxevf_hw *hw, u32 value);
 	void (*irq_disable)(struct sxevf_hw *hw);
 	void (*irq_off)(struct sxevf_hw *hw);
 };
@@ -142,37 +142,40 @@ struct sxevf_mbx_operations {
 };
 
 struct sxevf_mbx_stats {
-	u32 send_msgs; 
-	u32 rcv_msgs;  
+	u32 send_msgs;
+	u32 rcv_msgs;
 
-	u32 reqs;      
-	u32 acks;      
-	u32 rsts;      
+	u32 reqs;
+	u32 acks;
+	u32 rsts;
 };
 
 struct sxevf_mbx_info {
-	const struct sxevf_mbx_operations *ops; 
-
-	struct sxevf_mbx_stats stats; 
-	u32 msg_len;  
-	u32 retry;    
-	u32 interval; 
-	u32 reg_value; 
-	u32 api_version; 
+	const struct sxevf_mbx_operations *ops;
+
+	struct sxevf_mbx_stats stats;
+	u32 msg_len;
+	u32 retry;
+	u32 interval;
+	u32 reg_value;
+	u32 api_version;
 };
 
 struct sxevf_dma_operations {
-	void (* tx_ring_desc_configure)(struct sxevf_hw *, u32, u64, u8);
-	void (* tx_writeback_off)(struct sxevf_hw *, u8);
-	void (* tx_desc_thresh_set)(struct sxevf_hw *, u8, u32, u32, u32);
-	void (* tx_ring_switch)(struct sxevf_hw *, u8, bool);
-	void (* tx_desc_wb_flush)(struct sxevf_hw *, u8);
-	void (* tx_ring_info_get)(struct sxevf_hw *hw, u8 reg_idx,
+	void (*tx_ring_desc_configure)(struct sxevf_hw *hw, u32 desc_mem_len,
+				u64 desc_dma_addr, u8 reg_idx);
+	void (*tx_writeback_off)(struct sxevf_hw *hw, u8 reg_idx);
+	void (*tx_desc_thresh_set)(struct sxevf_hw *hw, u8 reg_idx,
+				u32 wb_thresh, u32 host_thresh, u32 prefech_thresh);
+	void (*tx_ring_switch)(struct sxevf_hw *hw, u8 reg_idx, bool is_on);
+	void (*tx_desc_wb_flush)(struct sxevf_hw *hw, u8 val);
+	void (*tx_ring_info_get)(struct sxevf_hw *hw, u8 reg_idx,
 					u32 *head, u32 *tail);
-	void (* rx_disable)(struct sxevf_hw *, u8);
-	void (* rx_ring_switch)(struct sxevf_hw *, u8, bool);
-	void (* rx_ring_desc_configure)(struct sxevf_hw *, u32, u64, u8);
-	void (* rx_rcv_ctl_configure)(struct sxevf_hw *hw, u8 reg_idx,
+	void (*rx_disable)(struct sxevf_hw *hw, u8 reg_idx);
+	void (*rx_ring_switch)(struct sxevf_hw *hw, u8 reg_idx, bool is_on);
+	void (*rx_ring_desc_configure)(struct sxevf_hw *hw, u32 desc_mem_len,
+				u64 desc_dma_addr, u8 reg_idx);
+	void (*rx_rcv_ctl_configure)(struct sxevf_hw *hw, u8 reg_idx,
 				   u32 header_buf_len, u32 pkg_buf_len, bool drop_en);
 };
 
@@ -181,8 +184,8 @@ struct sxevf_dma_info {
 };
 
 struct sxevf_stat_operations {
-	void (*packet_stats_get)(struct sxevf_hw *,
-			struct sxevf_hw_stats *);
+	void (*packet_stats_get)(struct sxevf_hw *hw,
+			struct sxevf_hw_stats *stats);
 	void (*stats_init_value_get)(struct sxevf_hw *hw,
 				struct sxevf_hw_stats *stats);
 };
@@ -192,7 +195,7 @@ struct sxevf_stat_info {
 };
 
 struct sxevf_dbu_operations {
-	void (*rx_max_used_ring_set)(struct sxevf_hw *, u16);
+	void (*rx_max_used_ring_set)(struct sxevf_hw *hw, u16 max_rx_ring);
 
 };
 
@@ -206,30 +209,30 @@ enum sxevf_hw_state {
 };
 
 struct sxevf_hw {
-	u8 __iomem *reg_base_addr;      
+	u8 __iomem *reg_base_addr;
 	void *adapter;
 
 	void *priv;
-	unsigned long state;   
+	unsigned long state;
 	void (*fault_handle)(void *priv);
 	u32 (*reg_read)(const volatile void *reg);
 	void (*reg_write)(u32 value, volatile void *reg);
-	s32	board_type;		
+	s32	board_type;
 
-	struct sxevf_hw_setup   setup;   
-	struct sxevf_irq_info   irq;     
-	struct sxevf_mbx_info   mbx;     
+	struct sxevf_hw_setup   setup;
+	struct sxevf_irq_info   irq;
+	struct sxevf_mbx_info   mbx;
 
-	struct sxevf_dma_info    dma;    
-	struct sxevf_stat_info   stat;   
-	struct sxevf_dbu_info    dbu;
+	struct sxevf_dma_info	dma;
+	struct sxevf_stat_info   stat;
+	struct sxevf_dbu_info	dbu;
 };
 
 struct sxevf_reg_info {
-	u32 addr;        
-	u32 count;       
-	u32 stride;      
-	const s8 *name;  
+	u32 addr;
+	u32 count;
+	u32 stride;
+	const s8 *name;
 };
 
 u16 sxevf_reg_dump_num_get(void);
@@ -247,7 +250,6 @@ static inline void sxevf_hw_fault_handle_init(struct sxevf_hw *hw,
 	hw->priv = priv;
 	hw->fault_handle = handle;
 
-	return;
 }
 
 static inline void sxevf_hw_reg_handle_init(struct sxevf_hw *hw,
@@ -257,10 +259,9 @@ static inline void sxevf_hw_reg_handle_init(struct sxevf_hw *hw,
 	hw->reg_read  = read;
 	hw->reg_write = write;
 
-	return;
 }
 
-#ifdef SXE_DPDK 
+#ifdef SXE_DPDK
 
 void sxevf_irq_disable(struct sxevf_hw *hw);
 
@@ -347,5 +348,5 @@ u32 sxevf_hw_regs_group_read(struct sxevf_hw *hw,
 				const struct sxevf_reg_info *regs,
 				u32 *reg_buf);
 
-#endif 
+#endif
 #endif
diff --git a/drivers/net/sxe/base/sxevf_regs.h b/drivers/net/sxe/base/sxevf_regs.h
index 43486db526..50a22f559c 100644
--- a/drivers/net/sxe/base/sxevf_regs.h
+++ b/drivers/net/sxe/base/sxevf_regs.h
@@ -14,106 +14,106 @@
 #define SXE_VFLINKS_SPEED_1G	0x00000004
 #define SXE_VFLINKS_SPEED_100	0x00000002
 
-#define SXE_VFCTRL        0x00000
-#define SXE_VFSTATUS      0x00008
-#define SXE_VFLINKS       0x00018
-#define SXE_VFFRTIMER     0x00048
+#define SXE_VFCTRL		0x00000
+#define SXE_VFSTATUS	  0x00008
+#define SXE_VFLINKS	   0x00018
+#define SXE_VFFRTIMER	 0x00048
 #define SXE_VFRXMEMWRAP   0x03190
-#define SXE_VFEICR        0x00100
-#define SXE_VFEICS        0x00104
-#define SXE_VFEIMS        0x00108
-#define SXE_VFEIMC        0x0010C
-#define SXE_VFEIAM        0x00114
-#define SXE_VFEITR(x)     (0x00820 + (4 * (x)))
-#define SXE_VFIVAR(x)     (0x00120 + (4 * (x)))
-#define SXE_VFIVAR_MISC    0x00140
-#define SXE_VFRDBAL(x)    (0x01000 + (0x40 * (x)))
-#define SXE_VFRDBAH(x)    (0x01004 + (0x40 * (x)))
-#define SXE_VFRDLEN(x)    (0x01008 + (0x40 * (x)))
-#define SXE_VFRDH(x)      (0x01010 + (0x40 * (x)))
-#define SXE_VFRDT(x)      (0x01018 + (0x40 * (x)))
+#define SXE_VFEICR		0x00100
+#define SXE_VFEICS		0x00104
+#define SXE_VFEIMS		0x00108
+#define SXE_VFEIMC		0x0010C
+#define SXE_VFEIAM		0x00114
+#define SXE_VFEITR(x)	 (0x00820 + (4 * (x)))
+#define SXE_VFIVAR(x)	 (0x00120 + (4 * (x)))
+#define SXE_VFIVAR_MISC	0x00140
+#define SXE_VFRDBAL(x)	(0x01000 + (0x40 * (x)))
+#define SXE_VFRDBAH(x)	(0x01004 + (0x40 * (x)))
+#define SXE_VFRDLEN(x)	(0x01008 + (0x40 * (x)))
+#define SXE_VFRDH(x)	  (0x01010 + (0x40 * (x)))
+#define SXE_VFRDT(x)	  (0x01018 + (0x40 * (x)))
 #define SXE_VFRXDCTL(x)   (0x01028 + (0x40 * (x)))
 #define SXE_VFSRRCTL(x)   (0x01014 + (0x40 * (x)))
 #define SXE_VFLROCTL(x)   (0x0102C + (0x40 * (x)))
-#define SXE_VFPSRTYPE     0x00300
-#define SXE_VFTDBAL(x)    (0x02000 + (0x40 * (x)))
-#define SXE_VFTDBAH(x)    (0x02004 + (0x40 * (x)))
-#define SXE_VFTDLEN(x)    (0x02008 + (0x40 * (x)))
-#define SXE_VFTDH(x)      (0x02010 + (0x40 * (x)))
-#define SXE_VFTDT(x)      (0x02018 + (0x40 * (x)))
+#define SXE_VFPSRTYPE	 0x00300
+#define SXE_VFTDBAL(x)	(0x02000 + (0x40 * (x)))
+#define SXE_VFTDBAH(x)	(0x02004 + (0x40 * (x)))
+#define SXE_VFTDLEN(x)	(0x02008 + (0x40 * (x)))
+#define SXE_VFTDH(x)	  (0x02010 + (0x40 * (x)))
+#define SXE_VFTDT(x)	  (0x02018 + (0x40 * (x)))
 #define SXE_VFTXDCTL(x)   (0x02028 + (0x40 * (x)))
 #define SXE_VFTDWBAL(x)   (0x02038 + (0x40 * (x)))
 #define SXE_VFTDWBAH(x)   (0x0203C + (0x40 * (x)))
-#define SXE_VFDCA_RXCTRL(x)    (0x0100C + (0x40 * (x)))
-#define SXE_VFDCA_TXCTRL(x)    (0x0200c + (0x40 * (x)))
-#define SXE_VFGPRC        0x0101C
-#define SXE_VFGPTC        0x0201C
-#define SXE_VFGORC_LSB    0x01020
-#define SXE_VFGORC_MSB    0x01024
-#define SXE_VFGOTC_LSB    0x02020
-#define SXE_VFGOTC_MSB    0x02024
-#define SXE_VFMPRC        0x01034
-#define SXE_VFMRQC        0x3000
-#define SXE_VFRSSRK(x)    (0x3100 + ((x) * 4))
-#define SXE_VFRETA(x)     (0x3200 + ((x) * 4))
-
-#define SXEVF_VFEIMC_IRQ_MASK            (7)
-#define SXEVF_IVAR_ALLOC_VALID    (0x80)
-
-#define SXEVF_EITR_CNT_WDIS       (0x80000000)
-#define SXEVF_EITR_ITR_MASK       (0x00000FF8)
-#define SXEVF_EITR_ITR_SHIFT      (2)
-#define SXEVF_EITR_ITR_MAX        (SXEVF_EITR_ITR_MASK >> SXEVF_EITR_ITR_SHIFT)
+#define SXE_VFDCA_RXCTRL(x)	(0x0100C + (0x40 * (x)))
+#define SXE_VFDCA_TXCTRL(x)	(0x0200c + (0x40 * (x)))
+#define SXE_VFGPRC		0x0101C
+#define SXE_VFGPTC		0x0201C
+#define SXE_VFGORC_LSB	0x01020
+#define SXE_VFGORC_MSB	0x01024
+#define SXE_VFGOTC_LSB	0x02020
+#define SXE_VFGOTC_MSB	0x02024
+#define SXE_VFMPRC		0x01034
+#define SXE_VFMRQC		0x3000
+#define SXE_VFRSSRK(x)	(0x3100 + ((x) * 4))
+#define SXE_VFRETA(x)	 (0x3200 + ((x) * 4))
+
+#define SXEVF_VFEIMC_IRQ_MASK			(7)
+#define SXEVF_IVAR_ALLOC_VALID	(0x80)
+
+#define SXEVF_EITR_CNT_WDIS	   (0x80000000)
+#define SXEVF_EITR_ITR_MASK	   (0x00000FF8)
+#define SXEVF_EITR_ITR_SHIFT	  (2)
+#define SXEVF_EITR_ITR_MAX		(SXEVF_EITR_ITR_MASK >> SXEVF_EITR_ITR_SHIFT)
 
 #define SXE_VFRXDCTL_ENABLE  0x02000000
 #define SXE_VFTXDCTL_ENABLE  0x02000000
-#define SXE_VFCTRL_RST       0x04000000
-
-#define SXEVF_RXDCTL_ENABLE     0x02000000  
-#define SXEVF_RXDCTL_VME	0x40000000  
-
-#define SXEVF_PSRTYPE_RQPL_SHIFT               29 
-
-#define SXEVF_SRRCTL_DROP_EN                   0x10000000
-#define SXEVF_SRRCTL_DESCTYPE_DATA_ONEBUF      0x02000000
-#define SXEVF_SRRCTL_BSIZEPKT_SHIFT            (10)
-#define SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT        (2)
-#define SXEVF_SRRCTL_BSIZEPKT_MASK	       0x0000007F
-#define SXEVF_SRRCTL_BSIZEHDR_MASK	       0x00003F00
-
-#define SXE_VFMAILBOX       0x002FC
-#define SXE_VFMBMEM         0x00200
-
-#define SXE_VFMAILBOX_REQ     0x00000001 
-#define SXE_VFMAILBOX_ACK     0x00000002 
-#define SXE_VFMAILBOX_VFU     0x00000004 
-#define SXE_VFMAILBOX_PFU     0x00000008 
-#define SXE_VFMAILBOX_PFSTS   0x00000010 
-#define SXE_VFMAILBOX_PFACK   0x00000020 
-#define SXE_VFMAILBOX_RSTI    0x00000040 
-#define SXE_VFMAILBOX_RSTD    0x00000080 
-#define SXE_VFMAILBOX_RC_BIT  0x000000B0  
-
-#define SXEVF_TDBAL(_i)      (0x02000 + ((_i) * 0x40))
-#define SXEVF_TDBAH(_i)      (0x02004 + ((_i) * 0x40))
-#define SXEVF_TDLEN(_i)      (0x02008 + ((_i) * 0x40))
-#define SXEVF_TDH(_i)        (0x02010 + ((_i) * 0x40))
-#define SXEVF_TDT(_i)        (0x02018 + ((_i) * 0x40))
-#define SXEVF_TXDCTL(_i)     (0x02028 + ((_i) * 0x40))
-#define SXEVF_TDWBAL(_i)     (0x02038 + ((_i) * 0x40))
-#define SXEVF_TDWBAH(_i)     (0x0203C + ((_i) * 0x40))
-
-#define SXEVF_TXDCTL_SWFLSH  (0x02000000)  
-#define SXEVF_TXDCTL_ENABLE  (0x02000000) 
-
-#define SXEVF_VFGPRC          0x0101C
-#define SXEVF_VFGPTC          0x0201C
-#define SXEVF_VFGORC_LSB      0x01020
-#define SXEVF_VFGORC_MSB      0x01024
-#define SXEVF_VFGOTC_LSB      0x02020
-#define SXEVF_VFGOTC_MSB      0x02024
-#define SXEVF_VFMPRC          0x01034
-
-#define SXEVF_EICR_MASK       0x07
+#define SXE_VFCTRL_RST	   0x04000000
+
+#define SXEVF_RXDCTL_ENABLE	 0x02000000
+#define SXEVF_RXDCTL_VME	0x40000000
+
+#define SXEVF_PSRTYPE_RQPL_SHIFT			   29
+
+#define SXEVF_SRRCTL_DROP_EN				   0x10000000
+#define SXEVF_SRRCTL_DESCTYPE_DATA_ONEBUF	  0x02000000
+#define SXEVF_SRRCTL_BSIZEPKT_SHIFT			(10)
+#define SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT		(2)
+#define SXEVF_SRRCTL_BSIZEPKT_MASK		   0x0000007F
+#define SXEVF_SRRCTL_BSIZEHDR_MASK		   0x00003F00
+
+#define SXE_VFMAILBOX	   0x002FC
+#define SXE_VFMBMEM		 0x00200
+
+#define SXE_VFMAILBOX_REQ	 0x00000001
+#define SXE_VFMAILBOX_ACK	 0x00000002
+#define SXE_VFMAILBOX_VFU	 0x00000004
+#define SXE_VFMAILBOX_PFU	 0x00000008
+#define SXE_VFMAILBOX_PFSTS   0x00000010
+#define SXE_VFMAILBOX_PFACK   0x00000020
+#define SXE_VFMAILBOX_RSTI	0x00000040
+#define SXE_VFMAILBOX_RSTD	0x00000080
+#define SXE_VFMAILBOX_RC_BIT  0x000000B0
+
+#define SXEVF_TDBAL(_i)	  (0x02000 + ((_i) * 0x40))
+#define SXEVF_TDBAH(_i)	  (0x02004 + ((_i) * 0x40))
+#define SXEVF_TDLEN(_i)	  (0x02008 + ((_i) * 0x40))
+#define SXEVF_TDH(_i)		(0x02010 + ((_i) * 0x40))
+#define SXEVF_TDT(_i)		(0x02018 + ((_i) * 0x40))
+#define SXEVF_TXDCTL(_i)	 (0x02028 + ((_i) * 0x40))
+#define SXEVF_TDWBAL(_i)	 (0x02038 + ((_i) * 0x40))
+#define SXEVF_TDWBAH(_i)	 (0x0203C + ((_i) * 0x40))
+
+#define SXEVF_TXDCTL_SWFLSH  (0x02000000)
+#define SXEVF_TXDCTL_ENABLE  (0x02000000)
+
+#define SXEVF_VFGPRC		  0x0101C
+#define SXEVF_VFGPTC		  0x0201C
+#define SXEVF_VFGORC_LSB	  0x01020
+#define SXEVF_VFGORC_MSB	  0x01024
+#define SXEVF_VFGOTC_LSB	  0x02020
+#define SXEVF_VFGOTC_MSB	  0x02024
+#define SXEVF_VFMPRC		  0x01034
+
+#define SXEVF_EICR_MASK	   0x07
 
 #endif
diff --git a/drivers/net/sxe/include/drv_msg.h b/drivers/net/sxe/include/drv_msg.h
index 9f06624cc3..e441f9d371 100644
--- a/drivers/net/sxe/include/drv_msg.h
+++ b/drivers/net/sxe/include/drv_msg.h
@@ -16,7 +16,7 @@
 
 
 typedef struct sxe_version_resp {
-    U8 fw_version[SXE_VERSION_LEN];
-}sxe_version_resp_s;
+	U8 fw_version[SXE_VERSION_LEN];
+} sxe_version_resp_s;
 
-#endif 
+#endif
diff --git a/drivers/net/sxe/include/sxe/mgl/sxe_port.h b/drivers/net/sxe/include/sxe/mgl/sxe_port.h
index e41cb9e87b..642b9eb045 100644
--- a/drivers/net/sxe/include/sxe/mgl/sxe_port.h
+++ b/drivers/net/sxe/include/sxe/mgl/sxe_port.h
@@ -11,26 +11,27 @@ extern "C" {
 #include "mgc_types.h"
 #include "ps3_types.h"
 
-typedef enum MglPortCmdSetCode{
-    MGL_CMD_PORT_SET_BASE      = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 0),
-    MGL_CMD_PORT_SET_REG       = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 1),
-    MGL_CMD_PORT_SET_LED       = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 2),
-    MGL_CMD_SXE_SOC_HTHRESHOLD = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 3),
-    MGL_CMD_SXE_SFP_HTHRESHOLD = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 4),
-    MGL_CMD_SXE_SOC_RST        = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 5),
-    MGL_CMD_SXE_SET_MFGINFO    = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 6),
-    MGL_CMD_SXE_SET_INSIGHT    = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 7),
-    MGL_CMD_SXE_OPT_INSIGHT    = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 8),
+typedef enum MglPortCmdSetCode {
+	MGL_CMD_PORT_SET_BASE	  = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 0),
+	MGL_CMD_PORT_SET_REG	   = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 1),
+	MGL_CMD_PORT_SET_LED	   = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 2),
+	MGL_CMD_SXE_SOC_HTHRESHOLD = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 3),
+	MGL_CMD_SXE_SFP_HTHRESHOLD = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 4),
+	MGL_CMD_SXE_SOC_RST		= MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 5),
+	MGL_CMD_SXE_SET_MFGINFO	= MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 6),
+	MGL_CMD_SXE_SET_INSIGHT	= MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 7),
+	MGL_CMD_SXE_OPT_INSIGHT	= MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 8),
+	MGL_CMD_SXE_SET_LLDPSTATE  = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 9),
 } MglPortCmdSetCode_e;
 
-typedef enum MglPortCmdGetCode{
-    MGL_CMD_SXE_GET_REG        = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 0),
-    MGL_CMD_SXE_GET_SOC_INFO   = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 1),
-    MGL_CMD_SXE_LOG_EXPORT     = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 2),
-    MGL_CMD_SXE_REGS_DUMP      = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 3),
-    MGL_CMD_SXE_GET_MFGINFO    = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 4),
-    MGL_CMD_SXE_MAC_ADDR_GET   = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 5),
-    MGL_CMD_SXE_GET_INSIGHT    = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 6),
+typedef enum MglPortCmdGetCode {
+	MGL_CMD_SXE_GET_REG		= MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 0),
+	MGL_CMD_SXE_GET_SOC_INFO   = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 1),
+	MGL_CMD_SXE_LOG_EXPORT	 = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 2),
+	MGL_CMD_SXE_REGS_DUMP	  = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 3),
+	MGL_CMD_SXE_GET_MFGINFO	= MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 4),
+	MGL_CMD_SXE_MAC_ADDR_GET   = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 5),
+	MGL_CMD_SXE_GET_INSIGHT	= MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 6),
 } MglPortCmdGetCode_e;
 
 #if defined(__cplusplus)
diff --git a/drivers/net/sxe/include/sxe/sxe_cli.h b/drivers/net/sxe/include/sxe/sxe_cli.h
index 206cc48542..a03e51b08e 100644
--- a/drivers/net/sxe/include/sxe/sxe_cli.h
+++ b/drivers/net/sxe/include/sxe/sxe_cli.h
@@ -9,160 +9,165 @@
 #include "sxe_drv_type.h"
 #endif
 
-#define SXE_VERION_LEN                  (32)
-#define SXE_MAC_NUM                     (128)
-#define SXE_PORT_TRANSCEIVER_LEN        (32)
-#define SXE_PORT_VENDOR_LEN             (32)
-#define SXE_CHIP_TYPE_LEN               (32)
-#define SXE_VPD_SN_LEN                  (16)
-#define SXE_SOC_RST_TIME                (0x93A80)  
-#define SXE_SFP_TEMP_THRESHOLD_INTERVAL (3)        
-#define MGC_TERMLOG_INFO_MAX_LEN        (12 * 1024)
-#define SXE_REGS_DUMP_MAX_LEN           (12 * 1024)
-#define SXE_PRODUCT_NAME_LEN        (32)       
+#define SXE_VERION_LEN				  (32)
+#define SXE_MAC_NUM					 (128)
+#define SXE_PORT_TRANSCEIVER_LEN		(32)
+#define SXE_PORT_VENDOR_LEN			 (32)
+#define SXE_CHIP_TYPE_LEN			   (32)
+#define SXE_VPD_SN_LEN				  (16)
+#define SXE_SOC_RST_TIME				(0x93A80)
+#define SXE_SFP_TEMP_THRESHOLD_INTERVAL (3)
+#define MGC_TERMLOG_INFO_MAX_LEN		(12 * 1024)
+#define SXE_REGS_DUMP_MAX_LEN		   (12 * 1024)
+#define SXE_PRODUCT_NAME_LEN		(32)
 
 typedef enum sxe_led_mode {
-    SXE_IDENTIFY_LED_BLINK_ON   = 0,    
-    SXE_IDENTIFY_LED_BLINK_OFF,         
-    SXE_IDENTIFY_LED_ON,                
-    SXE_IDENTIFY_LED_OFF,               
-    SXE_IDENTIFY_LED_RESET,             
+	SXE_IDENTIFY_LED_BLINK_ON   = 0,
+	SXE_IDENTIFY_LED_BLINK_OFF,
+	SXE_IDENTIFY_LED_ON,
+	SXE_IDENTIFY_LED_OFF,
+	SXE_IDENTIFY_LED_RESET,
 } sxe_led_mode_s;
 
 typedef struct sxe_led_ctrl {
-    U32    mode;      
-    U32    duration;  
+	U32	mode;
+	U32	duration;
 
 } sxe_led_ctrl_s;
 
 typedef struct sxe_led_ctrl_resp {
-    U32    ack;       
+	U32	ack;
 } sxe_led_ctrl_resp_s;
 
 typedef enum PortLinkSpeed {
-    PORT_LINK_NO            = 0,     
-    PORT_LINK_100M          = 1,     
-    PORT_LINK_1G            = 2,     
-    PORT_LINK_10G           = 3,     
+	PORT_LINK_NO			= 0,
+	PORT_LINK_100M		  = 1,
+	PORT_LINK_1G			= 2,
+	PORT_LINK_10G		   = 3,
 } PortLinkSpeed_e;
 
 typedef struct SysSocInfo {
-    S8     fwVer[SXE_VERION_LEN];        
-    S8     optVer[SXE_VERION_LEN];       
-    U8     socStatus;                    
-    U8     pad[3];
-    S32    socTemp;                      
-    U64    chipId;                       
-    S8     chipType[SXE_CHIP_TYPE_LEN];  
-    S8     pba[SXE_VPD_SN_LEN];          
-    S8     productName[SXE_PRODUCT_NAME_LEN];   
+	S8	 fwVer[SXE_VERION_LEN];
+	S8	 optVer[SXE_VERION_LEN];
+	U8	 socStatus;
+	U8	 pad[3];
+	S32	socTemp;
+	U64	chipId;
+	S8	 chipType[SXE_CHIP_TYPE_LEN];
+	S8	 pba[SXE_VPD_SN_LEN];
+	S8	 productName[SXE_PRODUCT_NAME_LEN];
 } SysSocInfo_s;
 
 typedef struct SysPortInfo {
-    U64    mac[SXE_MAC_NUM];         
-    U8     isPortAbs;                
-    U8     linkStat;                 
-    U8     linkSpeed;                
-
-
-    U8     isSfp:1;                                     
-    U8     isGetInfo:1;                                 
-    U8     rvd:6;                                       
-    S8     opticalModTemp;                              
-    U8     pad[3];
-    S8     transceiverType[SXE_PORT_TRANSCEIVER_LEN];   
-    S8     vendorName[SXE_PORT_VENDOR_LEN];             
-    S8     vendorPn[SXE_PORT_VENDOR_LEN];               
+	U64	mac[SXE_MAC_NUM];
+	U8	 isPortAbs;
+	U8	 linkStat;
+	U8	 linkSpeed;
+
+
+	U8	 isSfp:1;
+	U8	 isGetInfo:1;
+	U8	 rvd:6;
+	S8	 opticalModTemp;
+	U8	 pad[3];
+	S8	 transceiverType[SXE_PORT_TRANSCEIVER_LEN];
+	S8	 vendorName[SXE_PORT_VENDOR_LEN];
+	S8	 vendorPn[SXE_PORT_VENDOR_LEN];
 } SysPortInfo_s;
 
 typedef struct SysInfoResp {
-    SysSocInfo_s     socInfo;        
-    SysPortInfo_s    portInfo;       
+	SysSocInfo_s	 socInfo;
+	SysPortInfo_s	portInfo;
 } SysInfoResp_s;
 
 typedef enum SfpTempTdMode {
-    SFP_TEMP_THRESHOLD_MODE_ALARM   = 0,
-    SFP_TEMP_THRESHOLD_MODE_WARN,
+	SFP_TEMP_THRESHOLD_MODE_ALARM   = 0,
+	SFP_TEMP_THRESHOLD_MODE_WARN,
 } SfpTempTdMode_e;
 
-typedef struct SfpTempTdSet{
-    U8     mode;             
-    U8     pad[3];
-    S8     hthreshold;       
-    S8     lthreshold;       
+typedef struct SfpTempTdSet {
+	U8	 mode;
+	U8	 pad[3];
+	S8	 hthreshold;
+	S8	 lthreshold;
 } SfpTempTdSet_s;
 
 typedef struct SxeLogExportResp {
-    U16    curLogLen;       
-    U8     isEnd;
-    U8     pad;
-    S32    sessionId;       
-    S8     data[0];
+	U16	curLogLen;
+	U8	 isEnd;
+	U8	 pad;
+	S32	sessionId;
+	S8	 data[0];
 } SxeLogExportResp_s;
 
 typedef enum SxeLogExportType  {
-    SXE_LOG_EXPORT_REQ    = 0,     
-    SXE_LOG_EXPORT_FIN,            
-    SXE_LOG_EXPORT_ABORT,          
+	SXE_LOG_EXPORT_REQ	= 0,
+	SXE_LOG_EXPORT_FIN,
+	SXE_LOG_EXPORT_ABORT,
 } SxeLogExportType_e;
 
 typedef struct SxeLogExportReq {
-    U8     isALLlog;       
-    U8     cmdtype;        
-    U8     isBegin;        
-    U8     pad;
-    S32    sessionId;      
-    U32    logLen;         
+	U8	 isALLlog;
+	U8	 cmdtype;
+	U8	 isBegin;
+	U8	 pad;
+	S32	sessionId;
+	U32	logLen;
 } SxeLogExportReq_s;
 
 typedef struct SocRstReq {
-    U32    time;        
+	U32	time;
 } SocRstReq_s;
 
 typedef struct RegsDumpResp {
-    U32    curdwLen;    
-    U8     data[0];
+	U32	curdwLen;
+	U8	 data[0];
 } RegsDumpResp_s;
 
 enum {
-    SXE_MFG_PART_NUMBER_LEN   = 8,
-    SXE_MFG_SERIAL_NUMBER_LEN = 16,
-    SXE_MFG_REVISION_LEN      = 4,
-    SXE_MFG_OEM_STR_LEN       = 64,
-    SXE_MFG_SXE_BOARD_ASSEMBLY_LEN  = 32,
-    SXE_MFG_SXE_BOARD_TRACE_NUM_LEN = 16,
-    SXE_MFG_SXE_MAC_ADDR_CNT        = 2,
+	SXE_MFG_PART_NUMBER_LEN   = 8,
+	SXE_MFG_SERIAL_NUMBER_LEN = 16,
+	SXE_MFG_REVISION_LEN	  = 4,
+	SXE_MFG_OEM_STR_LEN	   = 64,
+	SXE_MFG_SXE_BOARD_ASSEMBLY_LEN  = 32,
+	SXE_MFG_SXE_BOARD_TRACE_NUM_LEN = 16,
+	SXE_MFG_SXE_MAC_ADDR_CNT		= 2,
 };
 
 typedef struct sxeMfgInfo {
-    U8 partNumber[SXE_MFG_PART_NUMBER_LEN];      
-    U8 serialNumber [SXE_MFG_SERIAL_NUMBER_LEN]; 
-    U32 mfgDate;                               
-    U8 revision[SXE_MFG_REVISION_LEN];         
-    U32 reworkDate;                            
-    U8 pad[4];
-    U64 macAddr[SXE_MFG_SXE_MAC_ADDR_CNT];             
-    U8 boardTraceNum[SXE_MFG_SXE_BOARD_TRACE_NUM_LEN]; 
-    U8 boardAssembly[SXE_MFG_SXE_BOARD_ASSEMBLY_LEN];  
-    U8 extra1[SXE_MFG_OEM_STR_LEN];                    
-    U8 extra2[SXE_MFG_OEM_STR_LEN];                    
+	U8 partNumber[SXE_MFG_PART_NUMBER_LEN];
+	U8 serialNumber[SXE_MFG_SERIAL_NUMBER_LEN];
+	U32 mfgDate;
+	U8 revision[SXE_MFG_REVISION_LEN];
+	U32 reworkDate;
+	U8 pad[4];
+	U64 macAddr[SXE_MFG_SXE_MAC_ADDR_CNT];
+	U8 boardTraceNum[SXE_MFG_SXE_BOARD_TRACE_NUM_LEN];
+	U8 boardAssembly[SXE_MFG_SXE_BOARD_ASSEMBLY_LEN];
+	U8 extra1[SXE_MFG_OEM_STR_LEN];
+	U8 extra2[SXE_MFG_OEM_STR_LEN];
 } sxeMfgInfo_t;
 
+typedef struct SxeLldpInfo {
+	U8	  lldpState;
+	U8	  pad[3];
+} SxeLldpInfo_t;
+
 typedef struct RegsDumpReq {
-    U32    baseAddr;    
-    U32    dwLen;       
+	U32	baseAddr;
+	U32	dwLen;
 } RegsDumpReq_s;
 
 typedef enum sxe_pcs_mode {
-    SXE_PCS_MODE_1000BASE_KX_WO = 0, 
-    SXE_PCS_MODE_1000BASE_KX_W,      
-    SXE_PCS_MODE_SGMII,              
-    SXE_PCS_MODE_10GBASE_KR_WO,      
-    SXE_PCS_MODE_AUTO_NEGT_73,       
-    SXE_PCS_MODE_LPBK_PHY_TX2RX,     
-    SXE_PCS_MODE_LPBK_PHY_RX2TX,     
-    SXE_PCS_MODE_LPBK_PCS_RX2TX,     
-    SXE_PCS_MODE_BUTT,               
+	SXE_PCS_MODE_1000BASE_KX_WO = 0,
+	SXE_PCS_MODE_1000BASE_KX_W,
+	SXE_PCS_MODE_SGMII,
+	SXE_PCS_MODE_10GBASE_KR_WO,
+	SXE_PCS_MODE_AUTO_NEGT_73,
+	SXE_PCS_MODE_LPBK_PHY_TX2RX,
+	SXE_PCS_MODE_LPBK_PHY_RX2TX,
+	SXE_PCS_MODE_LPBK_PCS_RX2TX,
+	SXE_PCS_MODE_BUTT,
 } sxe_pcs_mode_e;
 
 typedef enum sxe_remote_fault_mode {
@@ -174,40 +179,40 @@ typedef enum sxe_remote_fault_mode {
 } sxe_remote_fault_e;
 
 typedef struct sxe_phy_cfg {
-    sxe_pcs_mode_e mode;          
-    U32 mtu;
+	sxe_pcs_mode_e mode;
+	U32 mtu;
 } sxe_pcs_cfg_s;
 
 typedef enum sxe_an_speed {
-    SXE_AN_SPEED_NO_LINK = 0,
-    SXE_AN_SPEED_100M,
-    SXE_AN_SPEED_1G,      
-    SXE_AN_SPEED_10G,     
-    SXE_AN_SPEED_UNKNOWN,
+	SXE_AN_SPEED_NO_LINK = 0,
+	SXE_AN_SPEED_100M,
+	SXE_AN_SPEED_1G,
+	SXE_AN_SPEED_10G,
+	SXE_AN_SPEED_UNKNOWN,
 } sxe_an_speed_e;
 
 typedef enum sxe_phy_pause_cap {
-    SXE_PAUSE_CAP_NO_PAUSE    = 0,   
-    SXE_PAUSE_CAP_ASYMMETRIC_PAUSE,  
-    SXE_PAUSE_CAP_SYMMETRIC_PAUSE,   
-    SXE_PAUSE_CAP_BOTH_PAUSE,        
-    SXE_PAUSE_CAP_UNKNOWN,
+	SXE_PAUSE_CAP_NO_PAUSE	= 0,
+	SXE_PAUSE_CAP_ASYMMETRIC_PAUSE,
+	SXE_PAUSE_CAP_SYMMETRIC_PAUSE,
+	SXE_PAUSE_CAP_BOTH_PAUSE,
+	SXE_PAUSE_CAP_UNKNOWN,
 } sxe_phy_pause_cap_e;
 
 typedef enum sxe_phy_duplex_type {
-    SXE_FULL_DUPLEX	= 0,	  
-    SXE_HALF_DUPLEX	= 1,	  
-    SXE_UNKNOWN_DUPLEX,
+	SXE_FULL_DUPLEX	= 0,
+	SXE_HALF_DUPLEX	= 1,
+	SXE_UNKNOWN_DUPLEX,
 } sxe_phy_duplex_type_e;
 
 typedef struct sxe_phy_an_cap {
-    sxe_remote_fault_e   remote_fault; 
-    sxe_phy_pause_cap_e  pause_cap;    
-    sxe_phy_duplex_type_e duplex_cap;  
+	sxe_remote_fault_e   remote_fault;
+	sxe_phy_pause_cap_e  pause_cap;
+	sxe_phy_duplex_type_e duplex_cap;
 } sxe_phy_an_cap_s;
 
 typedef struct sxe_an_cap {
-    sxe_phy_an_cap_s local;     
-    sxe_phy_an_cap_s peer;      
+	sxe_phy_an_cap_s local;
+	sxe_phy_an_cap_s peer;
 } sxe_an_cap_s;
 #endif
diff --git a/drivers/net/sxe/include/sxe/sxe_hdc.h b/drivers/net/sxe/include/sxe/sxe_hdc.h
index bbdc273bf9..ee69d9afb0 100644
--- a/drivers/net/sxe/include/sxe/sxe_hdc.h
+++ b/drivers/net/sxe/include/sxe/sxe_hdc.h
@@ -9,35 +9,35 @@
 #include "sxe_drv_type.h"
 #endif
 
-#define HDC_CACHE_TOTAL_LEN     (16 *1024)    
-#define ONE_PACKET_LEN_MAX      (1024)        
-#define DWORD_NUM               (256)         
-#define HDC_TRANS_RETRY_COUNT   (3)           
+#define HDC_CACHE_TOTAL_LEN	 (16 *1024)
+#define ONE_PACKET_LEN_MAX	  (1024)
+#define DWORD_NUM			   (256)
+#define HDC_TRANS_RETRY_COUNT   (3)
 
 
 typedef enum SxeHdcErrnoCode {
-    PKG_OK            = 0,     
-    PKG_ERR_REQ_LEN,           
-    PKG_ERR_RESP_LEN,          
-    PKG_ERR_PKG_SKIP,          
-    PKG_ERR_NODATA,            
-    PKG_ERR_PF_LK,             
-    PKG_ERR_OTHER,
+	PKG_OK			= 0,
+	PKG_ERR_REQ_LEN,
+	PKG_ERR_RESP_LEN,
+	PKG_ERR_PKG_SKIP,
+	PKG_ERR_NODATA,
+	PKG_ERR_PF_LK,
+	PKG_ERR_OTHER,
 } SxeHdcErrnoCode_e;
 
 typedef union HdcHeader {
-    struct {
-        U8 pid:4;          
-        U8 errCode:4;      
-        U8 len;            
-        U16 startPkg:1;    
-        U16 endPkg:1;      
-        U16 isRd:1;        
-        U16 msi:1;         
-        U16 totalLen:12;   
-    } head;
-    U32 dw0;
+	struct {
+		U8 pid:4;
+		U8 errCode:4;
+		U8 len;
+		U16 startPkg:1;
+		U16 endPkg:1;
+		U16 isRd:1;
+		U16 msi:1;
+		U16 totalLen:12;
+	} head;
+	U32 dw0;
 } HdcHeader_u;
 
-#endif 
+#endif
 
diff --git a/drivers/net/sxe/include/sxe/sxe_ioctl.h b/drivers/net/sxe/include/sxe/sxe_ioctl.h
index 4f39b0f92c..0a796add40 100644
--- a/drivers/net/sxe/include/sxe/sxe_ioctl.h
+++ b/drivers/net/sxe/include/sxe/sxe_ioctl.h
@@ -9,11 +9,11 @@
 #endif
 
 struct SxeIoctlSyncCmd {
-    U64   traceid;
-    void *inData;
-    U32   inLen;
-    void *outData;
-    U32   outLen;
+	U64   traceid;
+	void *inData;
+	U32   inLen;
+	void *outData;
+	U32   outLen;
 };
 
 #define SXE_CMD_IOCTL_SYNC_CMD _IOWR('M', 1, struct SxeIoctlSyncCmd)
diff --git a/drivers/net/sxe/include/sxe/sxe_msg.h b/drivers/net/sxe/include/sxe/sxe_msg.h
index 3db4e60ce5..cea8915aa6 100644
--- a/drivers/net/sxe/include/sxe/sxe_msg.h
+++ b/drivers/net/sxe/include/sxe/sxe_msg.h
@@ -15,125 +15,125 @@
 #define SXE_HDC_MSG_HDR_SIZE  sizeof(struct sxe_hdc_drv_cmd_msg)
 
 enum sxe_cmd_type {
-    SXE_CMD_TYPE_CLI,
-    SXE_CMD_TYPE_DRV,
-    SXE_CMD_TYPE_UNKOWN,
+	SXE_CMD_TYPE_CLI,
+	SXE_CMD_TYPE_DRV,
+	SXE_CMD_TYPE_UNKNOWN,
 };
 
 typedef struct sxe_hdc_cmd_hdr {
-    U8 cmd_type;       
-    U8 cmd_sub_type;   
-    U8 reserve[6];
+	U8 cmd_type;
+	U8 cmd_sub_type;
+	U8 reserve[6];
 }sxe_hdc_cmd_hdr_s;
 
 
 
 typedef enum SxeFWState {
-    SXE_FW_START_STATE_UNDEFINED    = 0x00,   
-    SXE_FW_START_STATE_INIT_BASE    = 0x10,   
-    SXE_FW_START_STATE_SCAN_DEVICE  = 0x20,   
-    SXE_FW_START_STATE_FINISHED     = 0x30,   
-    SXE_FW_START_STATE_UPGRADE      = 0x31,   
-    SXE_FW_RUNNING_STATE_ABNOMAL    = 0x40,   
-    SXE_FW_START_STATE_MASK         = 0xF0,
+	SXE_FW_START_STATE_UNDEFINED	= 0x00,
+	SXE_FW_START_STATE_INIT_BASE	= 0x10,
+	SXE_FW_START_STATE_SCAN_DEVICE  = 0x20,
+	SXE_FW_START_STATE_FINISHED	 = 0x30,
+	SXE_FW_START_STATE_UPGRADE	  = 0x31,
+	SXE_FW_RUNNING_STATE_ABNOMAL	= 0x40,
+	SXE_FW_START_STATE_MASK		 = 0xF0,
 }SxeFWState_e;
 
 typedef struct SxeFWStateInfo {
-    U8 socStatus;          
-    char statBuff[32];       
+	U8 socStatus;
+	char statBuff[32];
 } SxeFWStateInfo_s;
 
 
 typedef enum MsiEvt {
-    MSI_EVT_SOC_STATUS          = 0x1,
-    MSI_EVT_HDC_FWOV            = 0x2,
-    MSI_EVT_HDC_TIME_SYNC       = 0x4,
+	MSI_EVT_SOC_STATUS		  = 0x1,
+	MSI_EVT_HDC_FWOV			= 0x2,
+	MSI_EVT_HDC_TIME_SYNC	   = 0x4,
 
-    MSI_EVT_MAX                 = 0x80000000,
+	MSI_EVT_MAX				 = 0x80000000,
 } MsiEvt_u;
 
 
 typedef enum SxeFwHdcState {
-    SXE_FW_HDC_TRANSACTION_IDLE = 0x01,
-    SXE_FW_HDC_TRANSACTION_BUSY,
+	SXE_FW_HDC_TRANSACTION_IDLE = 0x01,
+	SXE_FW_HDC_TRANSACTION_BUSY,
 
-    SXE_FW_HDC_TRANSACTION_ERR,
+	SXE_FW_HDC_TRANSACTION_ERR,
 } SxeFwHdcState_e;
 
 enum sxe_hdc_cmd_opcode {
-    SXE_CMD_SET_WOL         = 1,
-    SXE_CMD_LED_CTRL,
-    SXE_CMD_SFP_READ,
-    SXE_CMD_SFP_WRITE,
-    SXE_CMD_TX_DIS_CTRL     = 5,
-    SXE_CMD_TINE_SYNC,
-    SXE_CMD_RATE_SELECT,
-    SXE_CMD_R0_MAC_GET,
-    SXE_CMD_LOG_EXPORT,
-    SXE_CMD_FW_VER_GET  = 10,
-    SXE_CMD_PCS_SDS_INIT,         
-    SXE_CMD_AN_SPEED_GET,         
-    SXE_CMD_AN_CAP_GET,           
-    SXE_CMD_GET_SOC_INFO,         
-    SXE_CMD_MNG_RST = 15,         
-
-    SXE_CMD_MAX,
+	SXE_CMD_SET_WOL		 = 1,
+	SXE_CMD_LED_CTRL,
+	SXE_CMD_SFP_READ,
+	SXE_CMD_SFP_WRITE,
+	SXE_CMD_TX_DIS_CTRL	 = 5,
+	SXE_CMD_TINE_SYNC,
+	SXE_CMD_RATE_SELECT,
+	SXE_CMD_R0_MAC_GET,
+	SXE_CMD_LOG_EXPORT,
+	SXE_CMD_FW_VER_GET  = 10,
+	SXE_CMD_PCS_SDS_INIT,
+	SXE_CMD_AN_SPEED_GET,
+	SXE_CMD_AN_CAP_GET,
+	SXE_CMD_GET_SOC_INFO,
+	SXE_CMD_MNG_RST = 15,
+
+	SXE_CMD_MAX,
 };
 
 enum sxe_hdc_cmd_errcode {
-    SXE_ERR_INVALID_PARAM = 1,
+	SXE_ERR_INVALID_PARAM = 1,
 };
 
 typedef struct sxe_hdc_drv_cmd_msg {
 
-    U16 opcode;
-    U16 errcode;
-    union dataLength {
-        U16 req_len;
-        U16 ack_len;
-    } length;
-    U8 reserve[8];
-    U64 traceid;
-    U8 body[0];
+	U16 opcode;
+	U16 errcode;
+	union dataLength {
+		U16 req_len;
+		U16 ack_len;
+	} length;
+	U8 reserve[8];
+	U64 traceid;
+	U8 body[0];
 } sxe_hdc_drv_cmd_msg_s;
 
 
 typedef struct sxe_sfp_rw_req {
-    U16 offset;       
-    U16 len;          
-    U8  write_data[0];
+	U16 offset;
+	U16 len;
+	U8  write_data[0];
 } sxe_sfp_rw_req_s;
 
 
 typedef struct sxe_sfp_read_resp {
-    U16 len;     
-    U8  resp[0]; 
+	U16 len;
+	U8  resp[0];
 } sxe_sfp_read_resp_s;
 
-typedef enum sxe_sfp_rate{
-    SXE_SFP_RATE_1G     = 0,
-    SXE_SFP_RATE_10G    = 1,
+typedef enum sxe_sfp_rate {
+	SXE_SFP_RATE_1G	 = 0,
+	SXE_SFP_RATE_10G	= 1,
 } sxe_sfp_rate_e;
 
 
 typedef struct sxe_sfp_rate_able {
-    sxe_sfp_rate_e rate;       
+	sxe_sfp_rate_e rate;
 } sxe_sfp_rate_able_s;
 
 
 typedef struct sxe_spp_tx_able {
-    BOOL isDisable;       
+	BOOL isDisable;
 } sxe_spp_tx_able_s;
 
 
 typedef struct sxe_default_mac_addr_resp {
-    U8  addr[SXE_MAC_ADDR_LEN]; 
+	U8  addr[SXE_MAC_ADDR_LEN];
 } sxe_default_mac_addr_resp_s;
 
 
 typedef struct sxe_mng_rst {
-    BOOL enable;       
+	BOOL enable;
 } sxe_mng_rst_s;
 
-#endif 
+#endif
 
diff --git a/drivers/net/sxe/include/sxe/sxe_regs.h b/drivers/net/sxe/include/sxe/sxe_regs.h
index 0652cd4906..aa41f5aa18 100644
--- a/drivers/net/sxe/include/sxe/sxe_regs.h
+++ b/drivers/net/sxe/include/sxe/sxe_regs.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (C), 2022, Linkdata Technology Co., Ltd.
  */
- 
+
 #ifndef __SXE_REGS_H__
 #define __SXE_REGS_H__
 
@@ -20,9 +20,9 @@
 #endif
 
 
-#define SXE_CTRL	0x00000 
-#define SXE_STATUS	0x00008 
-#define SXE_CTRL_EXT	0x00018 
+#define SXE_CTRL	0x00000
+#define SXE_STATUS	0x00008
+#define SXE_CTRL_EXT	0x00018
 
 
 #define SXE_CTRL_LNK_RST	0x00000008
@@ -57,7 +57,7 @@
 #define SXE_FCCFG_TFCE_PRIORITY	0x00000010
 
 
-#define SXE_GCR_EXT           0x11050 
+#define SXE_GCR_EXT		   0x11050
 
 
 #define SXE_GCR_CMPL_TMOUT_MASK		0x0000F000
@@ -117,7 +117,7 @@
 #define SXE_EIMC_EX(i)	(0x00AB0 + (i) * 4)
 #define SXE_EIAM_EX(i)	(0x00AD0 + (i) * 4)
 #define SXE_EITR(i)	(((i) <= 23) ? (0x00820 + ((i) * 4)) : \
-                        	(0x012300 + (((i) - 24) * 4)))
+							(0x012300 + (((i) - 24) * 4)))
 
 #define SXE_SPP_PROC	0x00AD8
 #define SXE_SPP_STATE	0x00AF4
@@ -136,13 +136,13 @@
 
 
 #define SXE_EICS_RTX_QUEUE	SXE_EICR_RTX_QUEUE
-#define SXE_EICS_FLOW_NAV	SXE_EICR_FLOW_NAV 
-#define SXE_EICS_MAILBOX	SXE_EICR_MAILBOX  
-#define SXE_EICS_LSC		SXE_EICR_LSC      
-#define SXE_EICS_ECC		SXE_EICR_ECC      
-#define SXE_EICS_HDC		SXE_EICR_HDC      
+#define SXE_EICS_FLOW_NAV	SXE_EICR_FLOW_NAV
+#define SXE_EICS_MAILBOX	SXE_EICR_MAILBOX
+#define SXE_EICS_LSC		SXE_EICR_LSC
+#define SXE_EICS_ECC		SXE_EICR_ECC
+#define SXE_EICS_HDC		SXE_EICR_HDC
 #define SXE_EICS_TCP_TIMER	SXE_EICR_TCP_TIMER
-#define SXE_EICS_OTHER		SXE_EICR_OTHER    
+#define SXE_EICS_OTHER		SXE_EICR_OTHER
 
 
 #define SXE_EIMS_RTX_QUEUE	SXE_EICR_RTX_QUEUE
@@ -156,9 +156,9 @@
 #define SXE_EIMS_ENABLE_MASK	(SXE_EIMS_RTX_QUEUE | SXE_EIMS_LSC | \
 					SXE_EIMS_TCP_TIMER | SXE_EIMS_OTHER)
 
-#define SXE_EIMC_FLOW_NAV	SXE_EICR_FLOW_NAV 
-#define SXE_EIMC_LSC		SXE_EICR_LSC      
-#define SXE_EIMC_HDC		SXE_EICR_HDC      
+#define SXE_EIMC_FLOW_NAV	SXE_EICR_FLOW_NAV
+#define SXE_EIMC_LSC		SXE_EICR_LSC
+#define SXE_EIMC_HDC		SXE_EICR_HDC
 
 
 #define SXE_GPIE_SPP0_EN	0x00000001
@@ -204,7 +204,7 @@
 #define SXE_RXCSUM		0x05000
 #define SXE_RFCTL		0x05008
 #define SXE_FCTRL		0x05080
-#define SXE_EXVET               0x05078
+#define SXE_EXVET			   0x05078
 #define SXE_VLNCTRL		0x05088
 #define SXE_MCSTCTRL		0x05090
 #define SXE_ETQF(_i)		(0x05128 + ((_i) * 4))
@@ -218,8 +218,8 @@
 #define SXE_MPSAR_LOW(_i)	(0x0A600 + ((_i) * 8))
 #define SXE_MPSAR_HIGH(_i)	(0x0A604 + ((_i) * 8))
 #define SXE_PSRTYPE(_i)		(0x0EA00 + ((_i) * 4))
-#define SXE_RETA(_i)		(0x0EB00 + ((_i) * 4)) 
-#define SXE_RSSRK(_i)		(0x0EB80 + ((_i) * 4)) 
+#define SXE_RETA(_i)		(0x0EB00 + ((_i) * 4))
+#define SXE_RSSRK(_i)		(0x0EB80 + ((_i) * 4))
 #define SXE_RQTC		0x0EC70
 #define SXE_MRQC		0x0EC80
 #define SXE_IEOI		0x0F654
@@ -242,8 +242,8 @@
 #define SXE_LPL_DEFAULT			0x26000000
 
 
-#define SXE_RXCSUM_IPPCSE	0x00001000  
-#define SXE_RXCSUM_PCSD		0x00002000  
+#define SXE_RXCSUM_IPPCSE	0x00001000
+#define SXE_RXCSUM_PCSD		0x00002000
 
 
 #define SXE_RFCTL_LRO_DIS	0x00000020
@@ -259,14 +259,14 @@
 #define SXE_FCTRL_DPF		0x00002000
 
 
-#define SXE_VLNCTRL_VET		0x0000FFFF 
-#define SXE_VLNCTRL_CFI		0x10000000 
-#define SXE_VLNCTRL_CFIEN	0x20000000 
-#define SXE_VLNCTRL_VFE		0x40000000 
-#define SXE_VLNCTRL_VME		0x80000000 
+#define SXE_VLNCTRL_VET		0x0000FFFF
+#define SXE_VLNCTRL_CFI		0x10000000
+#define SXE_VLNCTRL_CFIEN	0x20000000
+#define SXE_VLNCTRL_VFE		0x40000000
+#define SXE_VLNCTRL_VME		0x80000000
 
-#define SXE_EXVET_VET_EXT_SHIFT              16
-#define SXE_EXTENDED_VLAN	             (1 << 26)
+#define SXE_EXVET_VET_EXT_SHIFT			  16
+#define SXE_EXTENDED_VLAN				 (1 << 26)
 
 
 #define SXE_MCSTCTRL_MFE	4
@@ -290,10 +290,10 @@
 #define SXE_ETQS_QUEUE_EN	0x80000000
 
 
-#define SXE_SYN_FILTER_ENABLE         0x00000001
-#define SXE_SYN_FILTER_QUEUE          0x000000FE
-#define SXE_SYN_FILTER_QUEUE_SHIFT    1
-#define SXE_SYN_FILTER_SYNQFP         0x80000000
+#define SXE_SYN_FILTER_ENABLE		 0x00000001
+#define SXE_SYN_FILTER_QUEUE		  0x000000FE
+#define SXE_SYN_FILTER_QUEUE_SHIFT	1
+#define SXE_SYN_FILTER_SYNQFP		 0x80000000
 
 
 #define SXE_RAH_VIND_MASK	0x003C0000
@@ -309,26 +309,26 @@
 #define SXE_PSRTYPE_L2HDR	0x00001000
 
 
-#define SXE_MRQC_RSSEN                 0x00000001 
-#define SXE_MRQC_MRQE_MASK                    0xF
-#define SXE_MRQC_RT8TCEN               0x00000002
-#define SXE_MRQC_RT4TCEN               0x00000003
-#define SXE_MRQC_RTRSS8TCEN            0x00000004
-#define SXE_MRQC_RTRSS4TCEN            0x00000005
-#define SXE_MRQC_VMDQEN                0x00000008
-#define SXE_MRQC_VMDQRSS32EN           0x0000000A
-#define SXE_MRQC_VMDQRSS64EN           0x0000000B
-#define SXE_MRQC_VMDQRT8TCEN           0x0000000C
-#define SXE_MRQC_VMDQRT4TCEN           0x0000000D
-#define SXE_MRQC_RSS_FIELD_MASK        0xFFFF0000
-#define SXE_MRQC_RSS_FIELD_IPV4_TCP    0x00010000
-#define SXE_MRQC_RSS_FIELD_IPV4        0x00020000
+#define SXE_MRQC_RSSEN				 0x00000001
+#define SXE_MRQC_MRQE_MASK					0xF
+#define SXE_MRQC_RT8TCEN			   0x00000002
+#define SXE_MRQC_RT4TCEN			   0x00000003
+#define SXE_MRQC_RTRSS8TCEN			0x00000004
+#define SXE_MRQC_RTRSS4TCEN			0x00000005
+#define SXE_MRQC_VMDQEN				0x00000008
+#define SXE_MRQC_VMDQRSS32EN		   0x0000000A
+#define SXE_MRQC_VMDQRSS64EN		   0x0000000B
+#define SXE_MRQC_VMDQRT8TCEN		   0x0000000C
+#define SXE_MRQC_VMDQRT4TCEN		   0x0000000D
+#define SXE_MRQC_RSS_FIELD_MASK		0xFFFF0000
+#define SXE_MRQC_RSS_FIELD_IPV4_TCP	0x00010000
+#define SXE_MRQC_RSS_FIELD_IPV4		0x00020000
 #define SXE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
-#define SXE_MRQC_RSS_FIELD_IPV6_EX     0x00080000
-#define SXE_MRQC_RSS_FIELD_IPV6        0x00100000
-#define SXE_MRQC_RSS_FIELD_IPV6_TCP    0x00200000
-#define SXE_MRQC_RSS_FIELD_IPV4_UDP    0x00400000
-#define SXE_MRQC_RSS_FIELD_IPV6_UDP    0x00800000
+#define SXE_MRQC_RSS_FIELD_IPV6_EX	 0x00080000
+#define SXE_MRQC_RSS_FIELD_IPV6		0x00100000
+#define SXE_MRQC_RSS_FIELD_IPV6_TCP	0x00200000
+#define SXE_MRQC_RSS_FIELD_IPV4_UDP	0x00400000
+#define SXE_MRQC_RSS_FIELD_IPV6_UDP	0x00800000
 #define SXE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
 
 
@@ -348,9 +348,9 @@
 					(0x0D028 + (((_i) - 64) * 0x40)))
 #define SXE_LROCTL(_i)		(((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
 					(0x0D02C + (((_i) - 64) * 0x40)))
-#define SXE_RDRXCTL		0x02F00  
-#define SXE_RXCTRL		0x03000 
-#define SXE_LRODBU 		0x03028  
+#define SXE_RDRXCTL		0x02F00
+#define SXE_RXCTRL		0x03000
+#define SXE_LRODBU		0x03028
 #define SXE_RXPBSIZE(_i)	(0x03C00 + ((_i) * 4))
 
 #define SXE_DRXCFG		(0x03C20)
@@ -370,9 +370,9 @@
 #define SXE_SRRCTL_BSIZEHDR_MASK	0x00003F00
 
 
-#define SXE_RXDCTL_ENABLE	0x02000000 
-#define SXE_RXDCTL_SWFLSH	0x04000000 
-#define SXE_RXDCTL_VME		0x40000000 
+#define SXE_RXDCTL_ENABLE	0x02000000
+#define SXE_RXDCTL_SWFLSH	0x04000000
+#define SXE_RXDCTL_VME		0x40000000
 #define SXE_RXDCTL_DESC_FIFO_AE_TH_SHIFT	8
 #define SXE_RXDCTL_PREFETCH_NUM_CFG_SHIFT	16
 
@@ -409,11 +409,11 @@
 #define SXE_LRODBU_LROACKDIS	0x00000080
 
 
-#define SXE_DRXCFG_GSP_ZERO    0x00000002
+#define SXE_DRXCFG_GSP_ZERO	0x00000002
 #define SXE_DRXCFG_DBURX_START 0x00000001
 
 
-#define SXE_DMATXCTL		0x04A80   
+#define SXE_DMATXCTL		0x04A80
 #define SXE_TDBAL(_i)		(0x06000 + ((_i) * 0x40))
 #define SXE_TDBAH(_i)		(0x06004 + ((_i) * 0x40))
 #define SXE_TDLEN(_i)		(0x06008 + ((_i) * 0x40))
@@ -424,25 +424,25 @@
 #define SXE_PVFTDWBAH(p)	(0x0603C + (0x40 * (p)))
 #define SXE_TXPBSIZE(_i)	(0x0CC00 + ((_i) * 4))
 #define SXE_TXPBTHRESH(_i)	(0x04950 + ((_i) * 4))
-#define SXE_MTQC		0x08120               
-#define SXE_TXPBFCS		0x0CE00               
-#define SXE_DTXCFG		0x0CE08               
-#define SXE_DTMPCNT		0x0CE98               
+#define SXE_MTQC		0x08120
+#define SXE_TXPBFCS		0x0CE00
+#define SXE_DTXCFG		0x0CE08
+#define SXE_DTMPCNT		0x0CE98
 
 
 #define SXE_DMATXCTL_DEFAULT		0x81000000
 
 
-#define SXE_DMATXCTL_TE		0x1       
-#define SXE_DMATXCTL_GDV	0x8       
-#define SXE_DMATXCTL_VT_SHIFT	16        
-#define SXE_DMATXCTL_VT_MASK    0xFFFF0000
+#define SXE_DMATXCTL_TE		0x1
+#define SXE_DMATXCTL_GDV	0x8
+#define SXE_DMATXCTL_VT_SHIFT	16
+#define SXE_DMATXCTL_VT_MASK	0xFFFF0000
 
 
 #define SXE_TXDCTL_HTHRESH_SHIFT 8
 #define SXE_TXDCTL_WTHRESH_SHIFT 16
-#define SXE_TXDCTL_ENABLE     0x02000000
-#define SXE_TXDCTL_SWFLSH     0x04000000
+#define SXE_TXDCTL_ENABLE	 0x02000000
+#define SXE_TXDCTL_SWFLSH	 0x04000000
 
 #define SXE_PVFTDWBAL_N(ring_per_pool, vf_idx, vf_ring_idx) \
 		SXE_PVFTDWBAL((ring_per_pool) * (vf_idx) + vf_ring_idx)
@@ -470,7 +470,7 @@
 #define SXE_TFCS_PB_MASK	0xff
 
 
-#define SXE_DTXCFG_DBUTX_START	0x00000001   
+#define SXE_DTXCFG_DBUTX_START	0x00000001
 #define SXE_DTXCFG_DBUTX_BUF_ALFUL_CFG	0x20
 
 
@@ -494,7 +494,7 @@
 
 
 #define SXE_RTRPT4C_MCL_SHIFT	12
-#define SXE_RTRPT4C_BWG_SHIFT	9 
+#define SXE_RTRPT4C_BWG_SHIFT	9
 #define SXE_RTRPT4C_GSP		0x40000000
 #define SXE_RTRPT4C_LSP		0x80000000
 
@@ -537,40 +537,40 @@
 #define SXE_RTTPCS_ARBDIS	0x00000040
 #define SXE_RTTPCS_TPRM		0x00000100
 #define SXE_RTTPCS_ARBD_SHIFT	22
-#define SXE_RTTPCS_ARBD_DCB	0x4       
+#define SXE_RTTPCS_ARBD_DCB	0x4
 
 
 #define SXE_RTTPT2C_MCL_SHIFT 12
 #define SXE_RTTPT2C_BWG_SHIFT 9
-#define SXE_RTTPT2C_GSP       0x40000000
-#define SXE_RTTPT2C_LSP       0x80000000
+#define SXE_RTTPT2C_GSP	   0x40000000
+#define SXE_RTTPT2C_LSP	   0x80000000
 
 
 #define SXE_TPH_CTRL		0x11074
-#define SXE_TPH_TXCTRL(_i)      (0x0600C + ((_i) * 0x40))
+#define SXE_TPH_TXCTRL(_i)	  (0x0600C + ((_i) * 0x40))
 #define SXE_TPH_RXCTRL(_i)	(((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
 				 (0x0D00C + (((_i) - 64) * 0x40)))
 
 
 #define SXE_TPH_CTRL_ENABLE		0x00000000
 #define SXE_TPH_CTRL_DISABLE		0x00000001
-#define SXE_TPH_CTRL_MODE_CB1		0x00      
-#define SXE_TPH_CTRL_MODE_CB2		0x02      
+#define SXE_TPH_CTRL_MODE_CB1		0x00
+#define SXE_TPH_CTRL_MODE_CB2		0x02
 
 
-#define SXE_TPH_RXCTRL_DESC_TPH_EN	BIT(5) 
-#define SXE_TPH_RXCTRL_HEAD_TPH_EN	BIT(6) 
-#define SXE_TPH_RXCTRL_DATA_TPH_EN	BIT(7) 
-#define SXE_TPH_RXCTRL_DESC_RRO_EN	BIT(9) 
+#define SXE_TPH_RXCTRL_DESC_TPH_EN	BIT(5)
+#define SXE_TPH_RXCTRL_HEAD_TPH_EN	BIT(6)
+#define SXE_TPH_RXCTRL_DATA_TPH_EN	BIT(7)
+#define SXE_TPH_RXCTRL_DESC_RRO_EN	BIT(9)
 #define SXE_TPH_RXCTRL_DATA_WRO_EN	BIT(13)
 #define SXE_TPH_RXCTRL_HEAD_WRO_EN	BIT(15)
-#define SXE_TPH_RXCTRL_CPUID_SHIFT	24     
+#define SXE_TPH_RXCTRL_CPUID_SHIFT	24
 
-#define SXE_TPH_TXCTRL_DESC_TPH_EN	BIT(5) 
-#define SXE_TPH_TXCTRL_DESC_RRO_EN	BIT(9) 
+#define SXE_TPH_TXCTRL_DESC_TPH_EN	BIT(5)
+#define SXE_TPH_TXCTRL_DESC_RRO_EN	BIT(9)
 #define SXE_TPH_TXCTRL_DESC_WRO_EN	BIT(11)
 #define SXE_TPH_TXCTRL_DATA_RRO_EN	BIT(13)
-#define SXE_TPH_TXCTRL_CPUID_SHIFT	24     
+#define SXE_TPH_TXCTRL_CPUID_SHIFT	24
 
 
 #define SXE_SECTXCTRL		0x08800
@@ -579,18 +579,18 @@
 #define SXE_SECTXMINIFG		0x08810
 #define SXE_SECRXCTRL		0x08D00
 #define SXE_SECRXSTAT		0x08D04
-#define SXE_LSECTXCTRL            0x08A04
-#define SXE_LSECTXSCL             0x08A08
-#define SXE_LSECTXSCH             0x08A0C
-#define SXE_LSECTXSA              0x08A10
-#define SXE_LSECTXPN(_n)          (0x08A14 + (4 * (_n)))
-#define SXE_LSECTXKEY(_n, _m)     (0x08A1C + ((0x10 * (_n)) + (4 * (_m))))
-#define SXE_LSECRXCTRL            0x08B04
-#define SXE_LSECRXSCL             0x08B08
-#define SXE_LSECRXSCH             0x08B0C
-#define SXE_LSECRXSA(_i)          (0x08B10 + (4 * (_i)))
-#define SXE_LSECRXPN(_i)          (0x08B18 + (4 * (_i)))
-#define SXE_LSECRXKEY(_n, _m)     (0x08B20 + ((0x10 * (_n)) + (4 * (_m))))  
+#define SXE_LSECTXCTRL			0x08A04
+#define SXE_LSECTXSCL			 0x08A08
+#define SXE_LSECTXSCH			 0x08A0C
+#define SXE_LSECTXSA			  0x08A10
+#define SXE_LSECTXPN(_n)		  (0x08A14 + (4 * (_n)))
+#define SXE_LSECTXKEY(_n, _m)	 (0x08A1C + ((0x10 * (_n)) + (4 * (_m))))
+#define SXE_LSECRXCTRL			0x08B04
+#define SXE_LSECRXSCL			 0x08B08
+#define SXE_LSECRXSCH			 0x08B0C
+#define SXE_LSECRXSA(_i)		  (0x08B10 + (4 * (_i)))
+#define SXE_LSECRXPN(_i)		  (0x08B18 + (4 * (_i)))
+#define SXE_LSECRXKEY(_n, _m)	 (0x08B20 + ((0x10 * (_n)) + (4 * (_m))))
 
 
 #define SXE_SECTXCTRL_SECTX_DIS		0x00000001
@@ -605,7 +605,7 @@
 
 #define SXE_SECRXCTRL_SECRX_DIS		0x00000001
 #define SXE_SECRXCTRL_RX_DIS		0x00000002
-#define SXE_SECRXCTRL_RP              0x00000080
+#define SXE_SECRXCTRL_RP			  0x00000080
 
 
 #define SXE_SECRXSTAT_SECRX_RDY		0x00000001
@@ -614,41 +614,41 @@
 
 #define SXE_SECTX_DCB_ENABLE_MASK	0x00001F00
 
-#define SXE_LSECTXCTRL_EN_MASK        0x00000003
-#define SXE_LSECTXCTRL_EN_SHIFT       0
-#define SXE_LSECTXCTRL_ES             0x00000010
-#define SXE_LSECTXCTRL_AISCI          0x00000020
+#define SXE_LSECTXCTRL_EN_MASK		0x00000003
+#define SXE_LSECTXCTRL_EN_SHIFT	   0
+#define SXE_LSECTXCTRL_ES			 0x00000010
+#define SXE_LSECTXCTRL_AISCI		  0x00000020
 #define SXE_LSECTXCTRL_PNTHRSH_MASK   0xFFFFFF00
 #define SXE_LSECTXCTRL_PNTHRSH_SHIFT  8
-#define SXE_LSECTXCTRL_RSV_MASK       0x000000D8
+#define SXE_LSECTXCTRL_RSV_MASK	   0x000000D8
 
-#define SXE_LSECRXCTRL_EN_MASK        0x0000000C
-#define SXE_LSECRXCTRL_EN_SHIFT       2
-#define SXE_LSECRXCTRL_DROP_EN        0x00000010
+#define SXE_LSECRXCTRL_EN_MASK		0x0000000C
+#define SXE_LSECRXCTRL_EN_SHIFT	   2
+#define SXE_LSECRXCTRL_DROP_EN		0x00000010
 #define SXE_LSECRXCTRL_DROP_EN_SHIFT  4
-#define SXE_LSECRXCTRL_PLSH           0x00000040
-#define SXE_LSECRXCTRL_PLSH_SHIFT     6
-#define SXE_LSECRXCTRL_RP             0x00000080
-#define SXE_LSECRXCTRL_RP_SHIFT       7
-#define SXE_LSECRXCTRL_RSV_MASK       0xFFFFFF33
-
-#define SXE_LSECTXSA_AN0_MASK         0x00000003
-#define SXE_LSECTXSA_AN0_SHIFT        0
-#define SXE_LSECTXSA_AN1_MASK         0x0000000C
-#define SXE_LSECTXSA_AN1_SHIFT        2
-#define SXE_LSECTXSA_SELSA            0x00000010
-#define SXE_LSECTXSA_SELSA_SHIFT      4
-#define SXE_LSECTXSA_ACTSA            0x00000020
-
-#define SXE_LSECRXSA_AN_MASK          0x00000003
-#define SXE_LSECRXSA_AN_SHIFT         0
-#define SXE_LSECRXSA_SAV              0x00000004
-#define SXE_LSECRXSA_SAV_SHIFT        2
-#define SXE_LSECRXSA_RETIRED          0x00000010
-#define SXE_LSECRXSA_RETIRED_SHIFT    4
-
-#define SXE_LSECRXSCH_PI_MASK         0xFFFF0000
-#define SXE_LSECRXSCH_PI_SHIFT        16
+#define SXE_LSECRXCTRL_PLSH		   0x00000040
+#define SXE_LSECRXCTRL_PLSH_SHIFT	 6
+#define SXE_LSECRXCTRL_RP			 0x00000080
+#define SXE_LSECRXCTRL_RP_SHIFT	   7
+#define SXE_LSECRXCTRL_RSV_MASK	   0xFFFFFF33
+
+#define SXE_LSECTXSA_AN0_MASK		 0x00000003
+#define SXE_LSECTXSA_AN0_SHIFT		0
+#define SXE_LSECTXSA_AN1_MASK		 0x0000000C
+#define SXE_LSECTXSA_AN1_SHIFT		2
+#define SXE_LSECTXSA_SELSA			0x00000010
+#define SXE_LSECTXSA_SELSA_SHIFT	  4
+#define SXE_LSECTXSA_ACTSA			0x00000020
+
+#define SXE_LSECRXSA_AN_MASK		  0x00000003
+#define SXE_LSECRXSA_AN_SHIFT		 0
+#define SXE_LSECRXSA_SAV			  0x00000004
+#define SXE_LSECRXSA_SAV_SHIFT		2
+#define SXE_LSECRXSA_RETIRED		  0x00000010
+#define SXE_LSECRXSA_RETIRED_SHIFT	4
+
+#define SXE_LSECRXSCH_PI_MASK		 0xFFFF0000
+#define SXE_LSECRXSCH_PI_SHIFT		16
 
 #define SXE_LSECTXCTRL_DISABLE	0x0
 #define SXE_LSECTXCTRL_AUTH		0x1
@@ -658,7 +658,7 @@
 #define SXE_LSECRXCTRL_CHECK		0x1
 #define SXE_LSECRXCTRL_STRICT		0x2
 #define SXE_LSECRXCTRL_DROP		0x3
-#define SXE_SECTXCTRL_STORE_FORWARD_ENABLE    0x4
+#define SXE_SECTXCTRL_STORE_FORWARD_ENABLE	0x4
 
 
 
@@ -757,34 +757,34 @@
 #define SXE_PXOFFRXCNT(_i)	(0x04160 + ((_i) * 4))
 
 #define SXE_EPC_GPRC		0x050E0
-#define SXE_RXDGPC              0x02F50
-#define SXE_RXDGBCL             0x02F54
-#define SXE_RXDGBCH             0x02F58
-#define SXE_RXDDGPC             0x02F5C
-#define SXE_RXDDGBCL            0x02F60
-#define SXE_RXDDGBCH            0x02F64
-#define SXE_RXLPBKGPC           0x02F68
-#define SXE_RXLPBKGBCL          0x02F6C
-#define SXE_RXLPBKGBCH          0x02F70
-#define SXE_RXDLPBKGPC          0x02F74
-#define SXE_RXDLPBKGBCL         0x02F78
-#define SXE_RXDLPBKGBCH         0x02F7C
-
-#define SXE_RXTPCIN             0x02F88
-#define SXE_RXTPCOUT            0x02F8C
-#define SXE_RXPRDDC             0x02F9C
+#define SXE_RXDGPC			  0x02F50
+#define SXE_RXDGBCL			 0x02F54
+#define SXE_RXDGBCH			 0x02F58
+#define SXE_RXDDGPC			 0x02F5C
+#define SXE_RXDDGBCL			0x02F60
+#define SXE_RXDDGBCH			0x02F64
+#define SXE_RXLPBKGPC		   0x02F68
+#define SXE_RXLPBKGBCL		  0x02F6C
+#define SXE_RXLPBKGBCH		  0x02F70
+#define SXE_RXDLPBKGPC		  0x02F74
+#define SXE_RXDLPBKGBCL		 0x02F78
+#define SXE_RXDLPBKGBCH		 0x02F7C
+
+#define SXE_RXTPCIN			 0x02F88
+#define SXE_RXTPCOUT			0x02F8C
+#define SXE_RXPRDDC			 0x02F9C
 
 #define SXE_TXDGPC		0x087A0
-#define SXE_TXDGBCL             0x087A4
-#define SXE_TXDGBCH             0x087A8
-#define SXE_TXSWERR             0x087B0
-#define SXE_TXSWITCH            0x087B4
-#define SXE_TXREPEAT            0x087B8
-#define SXE_TXDESCERR           0x087BC
+#define SXE_TXDGBCL			 0x087A4
+#define SXE_TXDGBCH			 0x087A8
+#define SXE_TXSWERR			 0x087B0
+#define SXE_TXSWITCH			0x087B4
+#define SXE_TXREPEAT			0x087B8
+#define SXE_TXDESCERR		   0x087BC
 #define SXE_MNGPRC		0x040B4
 #define SXE_MNGPDC		0x040B8
-#define SXE_RQSMR(_i)		(0x02300 + ((_i) * 4))   
-#define SXE_TQSM(_i)		(0x08600 + ((_i) * 4))   
+#define SXE_RQSMR(_i)		(0x02300 + ((_i) * 4))
+#define SXE_TQSM(_i)		(0x08600 + ((_i) * 4))
 #define SXE_QPRC(_i)		(0x01030 + ((_i) * 0x40))
 #define SXE_QBRC_L(_i)		(0x01034 + ((_i) * 0x40))
 #define SXE_QBRC_H(_i)		(0x01038 + ((_i) * 0x40))
@@ -792,9 +792,9 @@
 
 #define SXE_QPRDC(_i)		(0x01430 + ((_i) * 0x40))
 #define SXE_QPTC(_i)		(0x08680 + ((_i) * 0x4))
-#define SXE_QBTC_L(_i)		(0x08700 + ((_i) * 0x8)) 
-#define SXE_QBTC_H(_i)		(0x08704 + ((_i) * 0x8)) 
-#define SXE_SSVPC		0x08780                  
+#define SXE_QBTC_L(_i)		(0x08700 + ((_i) * 0x8))
+#define SXE_QBTC_H(_i)		(0x08704 + ((_i) * 0x8))
+#define SXE_SSVPC		0x08780
 #define SXE_MNGPTC		0x0CF90
 #define SXE_MPC(_i)		(0x03FA0 + ((_i) * 4))
 
@@ -808,91 +808,91 @@
 
 
 
-#define SXE_WUC                       0x05800
-#define SXE_WUFC                      0x05808
-#define SXE_WUS                       0x05810
-#define SXE_IP6AT(_i)                 (0x05880 + ((_i) * 4))   
+#define SXE_WUC					   0x05800
+#define SXE_WUFC					  0x05808
+#define SXE_WUS					   0x05810
+#define SXE_IP6AT(_i)				 (0x05880 + ((_i) * 4))
 
 
-#define SXE_IP6AT_CNT                 4
+#define SXE_IP6AT_CNT				 4
 
 
-#define SXE_WUC_PME_EN                0x00000002
-#define SXE_WUC_PME_STATUS            0x00000004
-#define SXE_WUC_WKEN                  0x00000010
-#define SXE_WUC_APME                  0x00000020
+#define SXE_WUC_PME_EN				0x00000002
+#define SXE_WUC_PME_STATUS			0x00000004
+#define SXE_WUC_WKEN				  0x00000010
+#define SXE_WUC_APME				  0x00000020
 
 
-#define SXE_WUFC_LNKC                 0x00000001
-#define SXE_WUFC_MAG                  0x00000002
-#define SXE_WUFC_EX                   0x00000004
-#define SXE_WUFC_MC                   0x00000008
-#define SXE_WUFC_BC                   0x00000010
-#define SXE_WUFC_ARP                  0x00000020
-#define SXE_WUFC_IPV4                 0x00000040
-#define SXE_WUFC_IPV6                 0x00000080
-#define SXE_WUFC_MNG                  0x00000100
+#define SXE_WUFC_LNKC				 0x00000001
+#define SXE_WUFC_MAG				  0x00000002
+#define SXE_WUFC_EX				   0x00000004
+#define SXE_WUFC_MC				   0x00000008
+#define SXE_WUFC_BC				   0x00000010
+#define SXE_WUFC_ARP				  0x00000020
+#define SXE_WUFC_IPV4				 0x00000040
+#define SXE_WUFC_IPV6				 0x00000080
+#define SXE_WUFC_MNG				  0x00000100
 
 
 
 
-#define SXE_TSCTRL              0x14800
-#define SXE_TSES                0x14804
-#define SXE_TSYNCTXCTL          0x14810
-#define SXE_TSYNCRXCTL          0x14820
-#define SXE_RXSTMPL             0x14824
-#define SXE_RXSTMPH             0x14828
-#define SXE_SYSTIML             0x14840
-#define SXE_SYSTIMM             0x14844
-#define SXE_SYSTIMH             0x14848
-#define SXE_TIMADJL             0x14850
-#define SXE_TIMADJH             0x14854
-#define SXE_TIMINC              0x14860
+#define SXE_TSCTRL			  0x14800
+#define SXE_TSES				0x14804
+#define SXE_TSYNCTXCTL		  0x14810
+#define SXE_TSYNCRXCTL		  0x14820
+#define SXE_RXSTMPL			 0x14824
+#define SXE_RXSTMPH			 0x14828
+#define SXE_SYSTIML			 0x14840
+#define SXE_SYSTIMM			 0x14844
+#define SXE_SYSTIMH			 0x14848
+#define SXE_TIMADJL			 0x14850
+#define SXE_TIMADJH			 0x14854
+#define SXE_TIMINC			  0x14860
 
 
-#define SXE_TSYNCTXCTL_TXTT     0x0001
-#define SXE_TSYNCTXCTL_TEN      0x0010
+#define SXE_TSYNCTXCTL_TXTT	 0x0001
+#define SXE_TSYNCTXCTL_TEN	  0x0010
 
 
-#define SXE_TSYNCRXCTL_RXTT     0x0001
-#define SXE_TSYNCRXCTL_REN      0x0010
+#define SXE_TSYNCRXCTL_RXTT	 0x0001
+#define SXE_TSYNCRXCTL_REN	  0x0010
 
 
-#define SXE_TSCTRL_TSSEL        0x00001
-#define SXE_TSCTRL_TSEN         0x00002
-#define SXE_TSCTRL_VER_2        0x00010
-#define SXE_TSCTRL_ONESTEP      0x00100
-#define SXE_TSCTRL_CSEN         0x01000
-#define SXE_TSCTRL_PTYP_ALL     0x00C00
+#define SXE_TSCTRL_TSSEL		0x00001
+#define SXE_TSCTRL_TSEN		 0x00002
+#define SXE_TSCTRL_VER_2		0x00010
+#define SXE_TSCTRL_ONESTEP	  0x00100
+#define SXE_TSCTRL_CSEN		 0x01000
+#define SXE_TSCTRL_PTYP_ALL	 0x00C00
 #define SXE_TSCTRL_L4_UNICAST   0x08000
 
 
-#define SXE_TSES_TXES                   0x00200
-#define SXE_TSES_RXES                   0x00800
-#define SXE_TSES_TXES_V1_SYNC           0x00000
-#define SXE_TSES_TXES_V1_DELAY_REQ      0x00100
-#define SXE_TSES_TXES_V1_ALL            0x00200
-#define SXE_TSES_RXES_V1_SYNC           0x00000
-#define SXE_TSES_RXES_V1_DELAY_REQ      0x00400
-#define SXE_TSES_RXES_V1_ALL            0x00800
-#define SXE_TSES_TXES_V2_ALL            0x00200
-#define SXE_TSES_RXES_V2_ALL            0x00800
+#define SXE_TSES_TXES				   0x00200
+#define SXE_TSES_RXES				   0x00800
+#define SXE_TSES_TXES_V1_SYNC		   0x00000
+#define SXE_TSES_TXES_V1_DELAY_REQ	  0x00100
+#define SXE_TSES_TXES_V1_ALL			0x00200
+#define SXE_TSES_RXES_V1_SYNC		   0x00000
+#define SXE_TSES_RXES_V1_DELAY_REQ	  0x00400
+#define SXE_TSES_RXES_V1_ALL			0x00800
+#define SXE_TSES_TXES_V2_ALL			0x00200
+#define SXE_TSES_RXES_V2_ALL			0x00800
 
-#define SXE_IV_SNS              0
-#define SXE_IV_NS               8
-#define SXE_INCPD               0
-#define SXE_BASE_INCVAL         8
+#define SXE_IV_SNS			  0
+#define SXE_IV_NS			   8
+#define SXE_INCPD			   0
+#define SXE_BASE_INCVAL		 8
 
 
 #define SXE_VT_CTL		0x051B0
 #define SXE_PFMAILBOX(_i)	(0x04B00 + (4 * (_i)))
 
 #define SXE_PFMBICR(_i)		(0x00710 + (4 * (_i)))
-#define SXE_VFLRE(i)		((i & 1)? 0x001C0 : 0x00600)
+#define SXE_VFLRE(i)		((i & 1) ? 0x001C0 : 0x00600)
 #define SXE_VFLREC(i)		(0x00700 + (i * 4))
 #define SXE_VFRE(_i)		(0x051E0 + ((_i) * 4))
 #define SXE_VFTE(_i)		(0x08110 + ((_i) * 4))
-#define SXE_QDE			(0x02F04)             
+#define SXE_QDE			(0x02F04)
 #define SXE_SPOOF(_i)		(0x08200 + (_i) * 4)
 #define SXE_PFDTXGSWC		0x08220
 #define SXE_VMVIR(_i)		(0x08000 + ((_i) * 4))
@@ -900,7 +900,7 @@
 #define SXE_VLVF(_i)		(0x0F100 + ((_i) * 4))
 #define SXE_VLVFB(_i)		(0x0F200 + ((_i) * 4))
 #define SXE_MRCTL(_i)		(0x0F600 + ((_i) * 4))
-#define SXE_VMRVLAN(_i)	        (0x0F610 + ((_i) * 4))
+#define SXE_VMRVLAN(_i)			(0x0F610 + ((_i) * 4))
 #define SXE_VMRVM(_i)		(0x0F630 + ((_i) * 4))
 #define SXE_VMECM(_i)		(0x08790 + ((_i) * 4))
 #define SXE_PFMBMEM(_i)		(0x13000 + (64 * (_i)))
@@ -932,23 +932,23 @@
 
 
 #define SXE_VT_CTL_DIS_DEFPL  0x20000000
-#define SXE_VT_CTL_REPLEN     0x40000000
-#define SXE_VT_CTL_VT_ENABLE  0x00000001 
+#define SXE_VT_CTL_REPLEN	 0x40000000
+#define SXE_VT_CTL_VT_ENABLE  0x00000001
 #define SXE_VT_CTL_POOL_SHIFT 7
 #define SXE_VT_CTL_POOL_MASK  (0x3F << SXE_VT_CTL_POOL_SHIFT)
 
 
-#define SXE_PFMAILBOX_STS         0x00000001
-#define SXE_PFMAILBOX_ACK         0x00000002
-#define SXE_PFMAILBOX_VFU         0x00000004
-#define SXE_PFMAILBOX_PFU         0x00000008
-#define SXE_PFMAILBOX_RVFU        0x00000010
+#define SXE_PFMAILBOX_STS		 0x00000001
+#define SXE_PFMAILBOX_ACK		 0x00000002
+#define SXE_PFMAILBOX_VFU		 0x00000004
+#define SXE_PFMAILBOX_PFU		 0x00000008
+#define SXE_PFMAILBOX_RVFU		0x00000010
 
 
-#define SXE_PFMBICR_VFREQ         0x00000001
-#define SXE_PFMBICR_VFACK         0x00010000
-#define SXE_PFMBICR_VFREQ_MASK    0x0000FFFF
-#define SXE_PFMBICR_VFACK_MASK    0xFFFF0000
+#define SXE_PFMBICR_VFREQ		 0x00000001
+#define SXE_PFMBICR_VFACK		 0x00010000
+#define SXE_PFMBICR_VFREQ_MASK	0x0000FFFF
+#define SXE_PFMBICR_VFACK_MASK	0xFFFF0000
 
 
 #define SXE_QDE_ENABLE		(0x00000001)
@@ -962,7 +962,7 @@
 #define SXE_SPOOF_VLAN_SHIFT  (8)
 
 
-#define SXE_PFDTXGSWC_VT_LBEN	0x1 
+#define SXE_PFDTXGSWC_VT_LBEN	0x1
 
 
 #define SXE_VMVIR_VLANA_DEFAULT 0x40000000
@@ -978,51 +978,51 @@
 #define SXE_VMOLR_MPE		0x10000000
 
 
-#define SXE_VLVF_VIEN         0x80000000 
-#define SXE_VLVF_ENTRIES      64
+#define SXE_VLVF_VIEN		 0x80000000
+#define SXE_VLVF_ENTRIES	  64
 #define SXE_VLVF_VLANID_MASK  0x00000FFF
 
 
-#define SXE_HDC_HOST_BASE       0x16000
-#define SXE_HDC_SW_LK           (SXE_HDC_HOST_BASE + 0x00)
-#define SXE_HDC_PF_LK           (SXE_HDC_HOST_BASE + 0x04)
-#define SXE_HDC_SW_OV           (SXE_HDC_HOST_BASE + 0x08)
-#define SXE_HDC_FW_OV           (SXE_HDC_HOST_BASE + 0x0C)
-#define SXE_HDC_PACKET_HEAD0    (SXE_HDC_HOST_BASE + 0x10)
+#define SXE_HDC_HOST_BASE	   0x16000
+#define SXE_HDC_SW_LK		   (SXE_HDC_HOST_BASE + 0x00)
+#define SXE_HDC_PF_LK		   (SXE_HDC_HOST_BASE + 0x04)
+#define SXE_HDC_SW_OV		   (SXE_HDC_HOST_BASE + 0x08)
+#define SXE_HDC_FW_OV		   (SXE_HDC_HOST_BASE + 0x0C)
+#define SXE_HDC_PACKET_HEAD0	(SXE_HDC_HOST_BASE + 0x10)
 
-#define SXE_HDC_PACKET_DATA0    (SXE_HDC_HOST_BASE + 0x20)
+#define SXE_HDC_PACKET_DATA0	(SXE_HDC_HOST_BASE + 0x20)
 
 
 #define SXE_HDC_MSI_STATUS_REG  0x17000
-#define SXE_FW_STATUS_REG       0x17004
-#define SXE_DRV_STATUS_REG      0x17008
-#define SXE_FW_HDC_STATE_REG    0x1700C
-#define SXE_R0_MAC_ADDR_RAL     0x17010
-#define SXE_R0_MAC_ADDR_RAH     0x17014
+#define SXE_FW_STATUS_REG	   0x17004
+#define SXE_DRV_STATUS_REG	  0x17008
+#define SXE_FW_HDC_STATE_REG	0x1700C
+#define SXE_R0_MAC_ADDR_RAL	 0x17010
+#define SXE_R0_MAC_ADDR_RAH	 0x17014
 #define SXE_CRC_STRIP_REG		0x17018
 
 
-#define SXE_HDC_SW_LK_BIT       0x0001
-#define SXE_HDC_PF_LK_BIT       0x0003
-#define SXE_HDC_SW_OV_BIT       0x0001
-#define SXE_HDC_FW_OV_BIT       0x0001
+#define SXE_HDC_SW_LK_BIT	   0x0001
+#define SXE_HDC_PF_LK_BIT	   0x0003
+#define SXE_HDC_SW_OV_BIT	   0x0001
+#define SXE_HDC_FW_OV_BIT	   0x0001
 #define SXE_HDC_RELEASE_SW_LK   0x0000
 
-#define SXE_HDC_LEN_TO_REG(n)        (n - 1)
-#define SXE_HDC_LEN_FROM_REG(n)      (n + 1)
+#define SXE_HDC_LEN_TO_REG(n)		(n - 1)
+#define SXE_HDC_LEN_FROM_REG(n)	  (n + 1)
 
 
-#define SXE_RX_PKT_BUF_SIZE_SHIFT    10
-#define SXE_TX_PKT_BUF_SIZE_SHIFT    10
+#define SXE_RX_PKT_BUF_SIZE_SHIFT	10
+#define SXE_TX_PKT_BUF_SIZE_SHIFT	10
 
-#define SXE_RXIDX_TBL_SHIFT           1
-#define SXE_RXTXIDX_IPS_EN            0x00000001
-#define SXE_RXTXIDX_IDX_SHIFT         3
-#define SXE_RXTXIDX_READ              0x40000000
-#define SXE_RXTXIDX_WRITE             0x80000000
+#define SXE_RXIDX_TBL_SHIFT		   1
+#define SXE_RXTXIDX_IPS_EN			0x00000001
+#define SXE_RXTXIDX_IDX_SHIFT		 3
+#define SXE_RXTXIDX_READ			  0x40000000
+#define SXE_RXTXIDX_WRITE			 0x80000000
 
 
-#define SXE_KEEP_CRC_EN		      0x00000001
+#define SXE_KEEP_CRC_EN			  0x00000001
 
 
 #define SXE_VMD_CTL			0x0581C
@@ -1032,208 +1032,208 @@
 #define SXE_VMD_CTL_POOL_FILTER		0x00000002
 
 
-#define SXE_FLCTRL                    0x14300
-#define SXE_PFCTOP                    0x14304
-#define SXE_FCTTV0                    0x14310
-#define SXE_FCTTV(_i)                (SXE_FCTTV0 + ((_i) * 4))
-#define SXE_FCRTV                     0x14320
-#define SXE_TFCS                      0x14324
+#define SXE_FLCTRL					0x14300
+#define SXE_PFCTOP					0x14304
+#define SXE_FCTTV0					0x14310
+#define SXE_FCTTV(_i)				(SXE_FCTTV0 + ((_i) * 4))
+#define SXE_FCRTV					 0x14320
+#define SXE_TFCS					  0x14324
 
 
-#define SXE_FCTRL_TFCE_MASK           0x0018
-#define SXE_FCTRL_TFCE_LFC_EN         0x0008
-#define SXE_FCTRL_TFCE_PFC_EN         0x0010
-#define SXE_FCTRL_TFCE_DPF_EN         0x0020
-#define SXE_FCTRL_RFCE_MASK           0x0300
-#define SXE_FCTRL_RFCE_LFC_EN         0x0100
-#define SXE_FCTRL_RFCE_PFC_EN         0x0200
+#define SXE_FCTRL_TFCE_MASK		   0x0018
+#define SXE_FCTRL_TFCE_LFC_EN		 0x0008
+#define SXE_FCTRL_TFCE_PFC_EN		 0x0010
+#define SXE_FCTRL_TFCE_DPF_EN		 0x0020
+#define SXE_FCTRL_RFCE_MASK		   0x0300
+#define SXE_FCTRL_RFCE_LFC_EN		 0x0100
+#define SXE_FCTRL_RFCE_PFC_EN		 0x0200
 
-#define SXE_FCTRL_TFCE_FCEN_MASK      0x00FF0000
-#define SXE_FCTRL_TFCE_XONE_MASK      0xFF000000
+#define SXE_FCTRL_TFCE_FCEN_MASK	  0x00FF0000
+#define SXE_FCTRL_TFCE_XONE_MASK	  0xFF000000
 
 
-#define SXE_PFCTOP_FCT               0x8808
-#define SXE_PFCTOP_FCOP_MASK         0xFFFF0000
-#define SXE_PFCTOP_FCOP_PFC          0x01010000
-#define SXE_PFCTOP_FCOP_LFC          0x00010000
+#define SXE_PFCTOP_FCT			   0x8808
+#define SXE_PFCTOP_FCOP_MASK		 0xFFFF0000
+#define SXE_PFCTOP_FCOP_PFC		  0x01010000
+#define SXE_PFCTOP_FCOP_LFC		  0x00010000
 
 
-#define SXE_COMCTRL                   0x14400
-#define SXE_PCCTRL                    0x14404
-#define SXE_LPBKCTRL                  0x1440C
-#define SXE_MAXFS                     0x14410
-#define SXE_SACONH                    0x14420
-#define SXE_SACONL                    0x14424
-#define SXE_VLANCTRL                  0x14430
-#define SXE_VLANID                    0x14434
-#define SXE_LINKS                     0x14454
-#define SXE_FPGA_SDS_STS	      0x14704
-#define SXE_MSCA                      0x14500
-#define SXE_MSCD                      0x14504
+#define SXE_COMCTRL				   0x14400
+#define SXE_PCCTRL					0x14404
+#define SXE_LPBKCTRL				  0x1440C
+#define SXE_MAXFS					 0x14410
+#define SXE_SACONH					0x14420
+#define SXE_SACONL					0x14424
+#define SXE_VLANCTRL				  0x14430
+#define SXE_VLANID					0x14434
+#define SXE_LINKS					 0x14454
+#define SXE_FPGA_SDS_STS		  0x14704
+#define SXE_MSCA					  0x14500
+#define SXE_MSCD					  0x14504
 
-#define SXE_HLREG0                    0x04240
-#define SXE_MFLCN                     0x04294
-#define SXE_MACC                      0x04330
+#define SXE_HLREG0					0x04240
+#define SXE_MFLCN					 0x04294
+#define SXE_MACC					  0x04330
 
-#define SXE_PCS1GLSTA                 0x0420C
-#define SXE_MFLCN                     0x04294
-#define SXE_PCS1GANA                  0x04850
-#define SXE_PCS1GANLP                 0x04854
+#define SXE_PCS1GLSTA				 0x0420C
+#define SXE_MFLCN					 0x04294
+#define SXE_PCS1GANA				  0x04850
+#define SXE_PCS1GANLP				 0x04854
 
 
-#define SXE_LPBKCTRL_EN               0x00000001
+#define SXE_LPBKCTRL_EN			   0x00000001
 
 
-#define SXE_MAC_ADDR_SACONH_SHIFT     32
-#define SXE_MAC_ADDR_SACONL_MASK      0xFFFFFFFF
+#define SXE_MAC_ADDR_SACONH_SHIFT	 32
+#define SXE_MAC_ADDR_SACONL_MASK	  0xFFFFFFFF
 
 
-#define SXE_PCS1GLSTA_AN_COMPLETE     0x10000
-#define SXE_PCS1GLSTA_AN_PAGE_RX      0x20000
-#define SXE_PCS1GLSTA_AN_TIMED_OUT    0x40000
+#define SXE_PCS1GLSTA_AN_COMPLETE	 0x10000
+#define SXE_PCS1GLSTA_AN_PAGE_RX	  0x20000
+#define SXE_PCS1GLSTA_AN_TIMED_OUT	0x40000
 #define SXE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000
-#define SXE_PCS1GLSTA_AN_ERROR_RWS    0x100000
-
-#define SXE_PCS1GANA_SYM_PAUSE        0x100
-#define SXE_PCS1GANA_ASM_PAUSE        0x80 
-
-
-#define SXE_LKSTS_PCS_LKSTS_UP        0x00000001
-#define SXE_LINK_UP_TIME              90
-#define SXE_AUTO_NEG_TIME             45
-
-
-#define SXE_MSCA_NP_ADDR_MASK      0x0000FFFF
-#define SXE_MSCA_NP_ADDR_SHIFT     0
-#define SXE_MSCA_DEV_TYPE_MASK     0x001F0000
-#define SXE_MSCA_DEV_TYPE_SHIFT    16        
-#define SXE_MSCA_PHY_ADDR_MASK     0x03E00000
-#define SXE_MSCA_PHY_ADDR_SHIFT    21        
-#define SXE_MSCA_OP_CODE_MASK      0x0C000000
-#define SXE_MSCA_OP_CODE_SHIFT     26        
-#define SXE_MSCA_ADDR_CYCLE        0x00000000
-#define SXE_MSCA_WRITE             0x04000000
-#define SXE_MSCA_READ              0x0C000000
-#define SXE_MSCA_READ_AUTOINC      0x08000000
-#define SXE_MSCA_ST_CODE_MASK      0x30000000
-#define SXE_MSCA_ST_CODE_SHIFT     28        
-#define SXE_MSCA_NEW_PROTOCOL      0x00000000
-#define SXE_MSCA_OLD_PROTOCOL      0x10000000
-#define SXE_MSCA_BYPASSRA_C45      0x40000000
+#define SXE_PCS1GLSTA_AN_ERROR_RWS	0x100000
+
+#define SXE_PCS1GANA_SYM_PAUSE		0x100
+#define SXE_PCS1GANA_ASM_PAUSE		0x80
+
+
+#define SXE_LKSTS_PCS_LKSTS_UP		0x00000001
+#define SXE_LINK_UP_TIME			  90
+#define SXE_AUTO_NEG_TIME			 45
+
+
+#define SXE_MSCA_NP_ADDR_MASK	  0x0000FFFF
+#define SXE_MSCA_NP_ADDR_SHIFT	 0
+#define SXE_MSCA_DEV_TYPE_MASK	 0x001F0000
+#define SXE_MSCA_DEV_TYPE_SHIFT	16
+#define SXE_MSCA_PHY_ADDR_MASK	 0x03E00000
+#define SXE_MSCA_PHY_ADDR_SHIFT	21
+#define SXE_MSCA_OP_CODE_MASK	  0x0C000000
+#define SXE_MSCA_OP_CODE_SHIFT	 26
+#define SXE_MSCA_ADDR_CYCLE		0x00000000
+#define SXE_MSCA_WRITE			 0x04000000
+#define SXE_MSCA_READ			  0x0C000000
+#define SXE_MSCA_READ_AUTOINC	  0x08000000
+#define SXE_MSCA_ST_CODE_MASK	  0x30000000
+#define SXE_MSCA_ST_CODE_SHIFT	 28
+#define SXE_MSCA_NEW_PROTOCOL	  0x00000000
+#define SXE_MSCA_OLD_PROTOCOL	  0x10000000
+#define SXE_MSCA_BYPASSRA_C45	  0x40000000
 #define SXE_MSCA_MDI_CMD_ON_PROG   0x80000000
 
 
-#define MDIO_MSCD_RDATA_LEN        16
-#define MDIO_MSCD_RDATA_SHIFT      16
-
-
-#define SXE_CRCERRS                   0x14A04
-#define SXE_ERRBC                     0x14A10
-#define SXE_RLEC                      0x14A14
-#define SXE_PRC64                     0x14A18
-#define SXE_PRC127                    0x14A1C
-#define SXE_PRC255                    0x14A20
-#define SXE_PRC511                    0x14A24
-#define SXE_PRC1023                   0x14A28
-#define SXE_PRC1522                   0x14A2C
-#define SXE_BPRC                      0x14A30
-#define SXE_MPRC                      0x14A34
-#define SXE_GPRC                      0x14A38
-#define SXE_GORCL                     0x14A3C
-#define SXE_GORCH                     0x14A40
-#define SXE_RUC                       0x14A44
-#define SXE_RFC                       0x14A48
-#define SXE_ROC                       0x14A4C
-#define SXE_RJC                       0x14A50
-#define SXE_TORL                      0x14A54
-#define SXE_TORH                      0x14A58
-#define SXE_TPR                       0x14A5C
-#define SXE_PRCPF(_i)                 (0x14A60 + ((_i) * 4))
-#define SXE_GPTC                      0x14B00
-#define SXE_GOTCL                     0x14B04
-#define SXE_GOTCH                     0x14B08
-#define SXE_TPT                       0x14B0C
-#define SXE_PTC64                     0x14B10
-#define SXE_PTC127                    0x14B14
-#define SXE_PTC255                    0x14B18
-#define SXE_PTC511                    0x14B1C
-#define SXE_PTC1023                   0x14B20
-#define SXE_PTC1522                   0x14B24
-#define SXE_MPTC                      0x14B28
-#define SXE_BPTC                      0x14B2C
-#define SXE_PFCT(_i)                  (0x14B30 + ((_i) * 4))
-
-#define SXE_MACCFG                    0x0CE04
-#define SXE_MACCFG_PAD_EN             0x00000001
-
-
-#define SXE_COMCTRL_TXEN	      0x0001        
-#define SXE_COMCTRL_RXEN	      0x0002        
-#define SXE_COMCTRL_EDSEL	      0x0004        
-#define SXE_COMCTRL_SPEED_1G	      0x0200        
-#define SXE_COMCTRL_SPEED_10G	      0x0300        
-
-
-#define SXE_PCCTRL_TXCE		      0x0001        
-#define SXE_PCCTRL_RXCE		      0x0002        
-#define SXE_PCCTRL_PEN		      0x0100        
-#define SXE_PCCTRL_PCSC_ALL	      0x30000       
-
-
-#define SXE_MAXFS_TFSEL		      0x0001        
-#define SXE_MAXFS_RFSEL		      0x0002        
-#define SXE_MAXFS_MFS_MASK	      0xFFFF0000    
-#define SXE_MAXFS_MFS		      0x40000000    
-#define SXE_MAXFS_MFS_SHIFT	      16            
-
-
-#define SXE_LINKS_UP 	              0x00000001    
-
-#define SXE_10G_LINKS_DOWN            0x00000006
-
-
-#define SXE_LINK_SPEED_UNKNOWN        0             
-#define SXE_LINK_SPEED_10_FULL        0x0002        
-#define SXE_LINK_SPEED_100_FULL       0x0008        
-#define SXE_LINK_SPEED_1GB_FULL       0x0020        
-#define SXE_LINK_SPEED_10GB_FULL      0x0080        
-
-
-#define SXE_HLREG0_TXCRCEN            0x00000001  
-#define SXE_HLREG0_RXCRCSTRP          0x00000002  
-#define SXE_HLREG0_JUMBOEN            0x00000004  
-#define SXE_HLREG0_TXPADEN            0x00000400  
-#define SXE_HLREG0_TXPAUSEEN          0x00001000  
-#define SXE_HLREG0_RXPAUSEEN          0x00004000  
-#define SXE_HLREG0_LPBK               0x00008000  
-#define SXE_HLREG0_MDCSPD             0x00010000  
-#define SXE_HLREG0_CONTMDC            0x00020000  
-#define SXE_HLREG0_CTRLFLTR           0x00040000  
-#define SXE_HLREG0_PREPEND            0x00F00000  
-#define SXE_HLREG0_PRIPAUSEEN         0x01000000  
-#define SXE_HLREG0_RXPAUSERECDA       0x06000000  
-#define SXE_HLREG0_RXLNGTHERREN       0x08000000  
-#define SXE_HLREG0_RXPADSTRIPEN       0x10000000  
-
-#define SXE_MFLCN_PMCF                0x00000001  
-#define SXE_MFLCN_DPF                 0x00000002  
-#define SXE_MFLCN_RPFCE               0x00000004  
-#define SXE_MFLCN_RFCE                0x00000008  
-#define SXE_MFLCN_RPFCE_MASK	      0x00000FF4  
-#define SXE_MFLCN_RPFCE_SHIFT         4
-
-#define SXE_MACC_FLU                  0x00000001
-#define SXE_MACC_FSV_10G              0x00030000
-#define SXE_MACC_FS                   0x00040000
-
-#define SXE_DEFAULT_FCPAUSE           0xFFFF
-
-
-#define SXE_SAQF(_i)		(0x0E000 + ((_i) * 4)) 
-#define SXE_DAQF(_i)		(0x0E200 + ((_i) * 4)) 
-#define SXE_SDPQF(_i)		(0x0E400 + ((_i) * 4)) 
-#define SXE_FTQF(_i)		(0x0E600 + ((_i) * 4)) 
-#define SXE_L34T_IMIR(_i)	(0x0E800 + ((_i) * 4)) 
+#define MDIO_MSCD_RDATA_LEN		16
+#define MDIO_MSCD_RDATA_SHIFT	  16
+
+
+#define SXE_CRCERRS				   0x14A04
+#define SXE_ERRBC					 0x14A10
+#define SXE_RLEC					  0x14A14
+#define SXE_PRC64					 0x14A18
+#define SXE_PRC127					0x14A1C
+#define SXE_PRC255					0x14A20
+#define SXE_PRC511					0x14A24
+#define SXE_PRC1023				   0x14A28
+#define SXE_PRC1522				   0x14A2C
+#define SXE_BPRC					  0x14A30
+#define SXE_MPRC					  0x14A34
+#define SXE_GPRC					  0x14A38
+#define SXE_GORCL					 0x14A3C
+#define SXE_GORCH					 0x14A40
+#define SXE_RUC					   0x14A44
+#define SXE_RFC					   0x14A48
+#define SXE_ROC					   0x14A4C
+#define SXE_RJC					   0x14A50
+#define SXE_TORL					  0x14A54
+#define SXE_TORH					  0x14A58
+#define SXE_TPR					   0x14A5C
+#define SXE_PRCPF(_i)				 (0x14A60 + ((_i) * 4))
+#define SXE_GPTC					  0x14B00
+#define SXE_GOTCL					 0x14B04
+#define SXE_GOTCH					 0x14B08
+#define SXE_TPT					   0x14B0C
+#define SXE_PTC64					 0x14B10
+#define SXE_PTC127					0x14B14
+#define SXE_PTC255					0x14B18
+#define SXE_PTC511					0x14B1C
+#define SXE_PTC1023				   0x14B20
+#define SXE_PTC1522				   0x14B24
+#define SXE_MPTC					  0x14B28
+#define SXE_BPTC					  0x14B2C
+#define SXE_PFCT(_i)				  (0x14B30 + ((_i) * 4))
+
+#define SXE_MACCFG					0x0CE04
+#define SXE_MACCFG_PAD_EN			 0x00000001
+
+
+#define SXE_COMCTRL_TXEN		  0x0001
+#define SXE_COMCTRL_RXEN		  0x0002
+#define SXE_COMCTRL_EDSEL		  0x0004
+#define SXE_COMCTRL_SPEED_1G		  0x0200
+#define SXE_COMCTRL_SPEED_10G		  0x0300
+
+
+#define SXE_PCCTRL_TXCE			  0x0001
+#define SXE_PCCTRL_RXCE			  0x0002
+#define SXE_PCCTRL_PEN			  0x0100
+#define SXE_PCCTRL_PCSC_ALL		  0x30000
+
+
+#define SXE_MAXFS_TFSEL			  0x0001
+#define SXE_MAXFS_RFSEL			  0x0002
+#define SXE_MAXFS_MFS_MASK		  0xFFFF0000
+#define SXE_MAXFS_MFS			  0x40000000
+#define SXE_MAXFS_MFS_SHIFT		  16
+
+
+#define SXE_LINKS_UP			0x00000001
+
+#define SXE_10G_LINKS_DOWN		0x00000006
+
+
+#define SXE_LINK_SPEED_UNKNOWN		0
+#define SXE_LINK_SPEED_10_FULL		0x0002
+#define SXE_LINK_SPEED_100_FULL	   0x0008
+#define SXE_LINK_SPEED_1GB_FULL	   0x0020
+#define SXE_LINK_SPEED_10GB_FULL	  0x0080
+
+
+#define SXE_HLREG0_TXCRCEN			0x00000001
+#define SXE_HLREG0_RXCRCSTRP		  0x00000002
+#define SXE_HLREG0_JUMBOEN			0x00000004
+#define SXE_HLREG0_TXPADEN			0x00000400
+#define SXE_HLREG0_TXPAUSEEN		  0x00001000
+#define SXE_HLREG0_RXPAUSEEN		  0x00004000
+#define SXE_HLREG0_LPBK			   0x00008000
+#define SXE_HLREG0_MDCSPD			 0x00010000
+#define SXE_HLREG0_CONTMDC			0x00020000
+#define SXE_HLREG0_CTRLFLTR		   0x00040000
+#define SXE_HLREG0_PREPEND			0x00F00000
+#define SXE_HLREG0_PRIPAUSEEN		 0x01000000
+#define SXE_HLREG0_RXPAUSERECDA	   0x06000000
+#define SXE_HLREG0_RXLNGTHERREN	   0x08000000
+#define SXE_HLREG0_RXPADSTRIPEN	   0x10000000
+
+#define SXE_MFLCN_PMCF				0x00000001
+#define SXE_MFLCN_DPF				 0x00000002
+#define SXE_MFLCN_RPFCE			   0x00000004
+#define SXE_MFLCN_RFCE				0x00000008
+#define SXE_MFLCN_RPFCE_MASK		  0x00000FF4
+#define SXE_MFLCN_RPFCE_SHIFT		 4
+
+#define SXE_MACC_FLU				  0x00000001
+#define SXE_MACC_FSV_10G			  0x00030000
+#define SXE_MACC_FS				   0x00040000
+
+#define SXE_DEFAULT_FCPAUSE		   0xFFFF
+
+
+#define SXE_SAQF(_i)		(0x0E000 + ((_i) * 4))
+#define SXE_DAQF(_i)		(0x0E200 + ((_i) * 4))
+#define SXE_SDPQF(_i)		(0x0E400 + ((_i) * 4))
+#define SXE_FTQF(_i)		(0x0E600 + ((_i) * 4))
+#define SXE_L34T_IMIR(_i)	(0x0E800 + ((_i) * 4))
 
 #define SXE_MAX_FTQF_FILTERS		128
 #define SXE_FTQF_PROTOCOL_MASK		0x00000003
@@ -1264,11 +1264,11 @@
 #define SXE_L34T_IMIR_QUEUE			0x0FE00000
 #define SXE_L34T_IMIR_QUEUE_SHIFT	21
 
-#define SXE_VMTXSW(_i)                (0x05180 + ((_i) * 4))   
-#define SXE_VMTXSW_REGISTER_COUNT     2
+#define SXE_VMTXSW(_i)				(0x05180 + ((_i) * 4))
+#define SXE_VMTXSW_REGISTER_COUNT	 2
 
-#define SXE_TXSTMP_SEL		0x14510  
-#define SXE_TXSTMP_VAL		0x1451c  
+#define SXE_TXSTMP_SEL		0x14510
+#define SXE_TXSTMP_VAL		0x1451c
 
 #define SXE_TXTS_MAGIC0		0x005a005900580057
 #define SXE_TXTS_MAGIC1		0x005e005d005c005b
diff --git a/drivers/net/sxe/include/sxe_type.h b/drivers/net/sxe/include/sxe_type.h
index 433385a0c9..d416632c3f 100644
--- a/drivers/net/sxe/include/sxe_type.h
+++ b/drivers/net/sxe/include/sxe_type.h
@@ -5,69 +5,69 @@
 #ifndef __SXE_TYPE_H__
 #define __SXE_TYPE_H__
 
-#define SXE_TXD_CMD_EOP   0x01000000  
-#define SXE_TXD_CMD_RS    0x08000000  
-#define SXE_TXD_STAT_DD   0x00000001  
+#define SXE_TXD_CMD_EOP   0x01000000
+#define SXE_TXD_CMD_RS	0x08000000
+#define SXE_TXD_STAT_DD   0x00000001
 
-#define SXE_TXD_CMD       (SXE_TXD_CMD_EOP | SXE_TXD_CMD_RS)
+#define SXE_TXD_CMD	   (SXE_TXD_CMD_EOP | SXE_TXD_CMD_RS)
 
 
 typedef union sxe_adv_tx_desc {
-    struct {
-        U64 buffer_addr;
-        U32 cmd_type_len;
-        U32 olinfo_status;
-    } read;
-    struct {
-        U64 rsvd;
-        U32 nxtseq_seed;
-        U32 status;
-    } wb;
-}sxe_adv_tx_desc_u;
+	struct {
+		U64 buffer_addr;
+		U32 cmd_type_len;
+		U32 olinfo_status;
+	} read;
+	struct {
+		U64 rsvd;
+		U32 nxtseq_seed;
+		U32 status;
+	} wb;
+} sxe_adv_tx_desc_u;
 
 typedef union sxe_adv_rx_desc {
-    struct {
-        U64 pkt_addr;
-        U64 hdr_addr;
-    } read;
-    struct {
-        struct {
-            union {
-                U32 data;
-                struct {
-                    U16 pkt_info;
-                    U16 hdr_info;
-                } hs_rss;
-            } lo_dword;
-            union {
-                U32 rss;
-                struct {
-                    U16 ip_id;
-                    U16 csum;
-                } csum_ip;
-            }hi_dword;
-        } lower;
-        struct {
-            U32 status_error;
-            U16 length;
-            U16 vlan;
-        } upper;
-    } wb;
-}sxe_adv_rx_desc_u;
-
-#define SXE_RXD_STAT_DD    0x01  
-#define SXE_RXD_STAT_EOP   0x02  
+	struct {
+		U64 pkt_addr;
+		U64 hdr_addr;
+	} read;
+	struct {
+		struct {
+			union {
+				U32 data;
+				struct {
+					U16 pkt_info;
+					U16 hdr_info;
+				} hs_rss;
+			} lo_dword;
+			union {
+				U32 rss;
+				struct {
+					U16 ip_id;
+					U16 csum;
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			U32 status_error;
+			U16 length;
+			U16 vlan;
+		} upper;
+	} wb;
+} sxe_adv_rx_desc_u;
+
+#define SXE_RXD_STAT_DD	0x01
+#define SXE_RXD_STAT_EOP   0x02
 
 
 #define PCI_VENDOR_ID_STARS		 0x1FF2
 #define SXE_DEV_ID_FPGA			 0x1160
 
 
-#define SXE_CTRL      0x00000
-#define SXE_STATUS    0x00008
+#define SXE_CTRL	  0x00000
+#define SXE_STATUS	0x00008
 #define SXE_CTRL_EXT  0x00018
-#define SXE_ESDP      0x00020
-#define SXE_EODSDP    0x00028
+#define SXE_ESDP	  0x00020
+#define SXE_EODSDP	0x00028
 
 #define SXE_I2CCTL_8259X	0x00028
 #define SXE_I2CCTL_X540	SXE_I2CCTL_8259X
@@ -76,19 +76,19 @@ typedef union sxe_adv_rx_desc {
 #define SXE_I2CCTL_X550EM_a	SXE_I2CCTL_X550
 #define SXE_I2CCTL(_hw)	SXE_BY_MAC((_hw), I2CCTL)
 
-#define SXE_LEDCTL    0x00200
+#define SXE_LEDCTL	0x00200
 #define SXE_FRTIMER   0x00048
 #define SXE_TCPTIMER  0x0004C
 #define SXE_CORESPARE 0x00600
-#define SXE_EXVET     0x05078
+#define SXE_EXVET	 0x05078
 
 
-#define SXE_EICR      0x00800
-#define SXE_EICS      0x00808
-#define SXE_EIMS      0x00880
-#define SXE_EIMC      0x00888
-#define SXE_EIAC      0x00810
-#define SXE_EIAM      0x00890
+#define SXE_EICR	  0x00800
+#define SXE_EICS	  0x00808
+#define SXE_EIMS	  0x00880
+#define SXE_EIMC	  0x00888
+#define SXE_EIAC	  0x00810
+#define SXE_EIAM	  0x00890
 #define SXE_EICR_EX(_i)   (0x00A80 + (_i) * 4)
 #define SXE_EICS_EX(_i)   (0x00A90 + (_i) * 4)
 #define SXE_EIMS_EX(_i)   (0x00AA0 + (_i) * 4)
@@ -110,26 +110,28 @@ typedef union sxe_adv_rx_desc {
 			 (0x0D028 + (((_i) - 64) * 0x40)))
 #define SXE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
 			 (0x0D02C + (((_i) - 64) * 0x40)))
-#define SXE_RSCDBU     0x03028
-#define SXE_RDDCC      0x02F20
+#define SXE_RSCDBU	 0x03028
+#define SXE_RDDCC	  0x02F20
 #define SXE_RXMEMWRAP  0x03190
 #define SXE_STARCTRL   0x03024
 
 #define SXE_SRRCTL(_i) (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : (0x0D014 + (((_i) - 64) * 0x40)))
 
-#define SXE_DCA_RXCTRL(_i)    (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : (0x0D00C + (((_i) - 64) * 0x40)))
-#define SXE_RDRXCTL           0x02F00
-#define SXE_RXPBSIZE(_i)      (0x03C00 + ((_i) * 4))    
+#define SXE_DCA_RXCTRL(_i)	(((_i) < 64) ? \
+		(0x0100C + ((_i) * 0x40)) : \
+		(0x0D00C + (((_i) - 64) * 0x40)))
+#define SXE_RDRXCTL		   0x02F00
+#define SXE_RXPBSIZE(_i)	  (0x03C00 + ((_i) * 4))
 #define SXE_DRXCFG	0x03C20
-#define SXE_RXCTRL    0x03000
-#define SXE_DROPEN    0x03D04
+#define SXE_RXCTRL	0x03000
+#define SXE_DROPEN	0x03D04
 #define SXE_RXPBSIZE_SHIFT 10
-#define SXE_DRXCFG_GSP_ZERO    0x00000002
+#define SXE_DRXCFG_GSP_ZERO	0x00000002
 #define SXE_DRXCFG_DBURX_START 0x00000001
 
 
-#define SXE_RXCSUM    0x05000
-#define SXE_RFCTL     0x05008
+#define SXE_RXCSUM	0x05000
+#define SXE_RFCTL	 0x05008
 #define SXE_DRECCCTL  0x02F08
 #define SXE_DRECCCTL_DISABLE 0
 
@@ -141,61 +143,61 @@ typedef union sxe_adv_rx_desc {
 #define SXE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
 
 
-#define SXE_PSRTYPE(_i)    (0x0EA00 + ((_i) * 4))
+#define SXE_PSRTYPE(_i)	(0x0EA00 + ((_i) * 4))
 
 
 #define SXE_VFTA(_i)  (0x0A000 + ((_i) * 4))
 
 
 #define SXE_VFTAVIND(_j, _i)  (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
-#define SXE_FCTRL     0x05080
+#define SXE_FCTRL	 0x05080
 #define SXE_VLNCTRL   0x05088
 #define SXE_MCSTCTRL  0x05090
-#define SXE_MRQC      0x0EC80
-#define SXE_SAQF(_i)  (0x0E000 + ((_i) * 4)) 
-#define SXE_DAQF(_i)  (0x0E200 + ((_i) * 4)) 
-#define SXE_SDPQF(_i) (0x0E400 + ((_i) * 4)) 
-#define SXE_FTQF(_i)  (0x0E600 + ((_i) * 4)) 
-#define SXE_ETQF(_i)  (0x05128 + ((_i) * 4)) 
-#define SXE_ETQS(_i)  (0x0EC00 + ((_i) * 4)) 
-#define SXE_SYNQF     0x0EC30                 
-#define SXE_RQTC      0x0EC70
-#define SXE_MTQC      0x08120
-#define SXE_VLVF(_i)  (0x0F100 + ((_i) * 4)) 
-#define SXE_VLVFB(_i) (0x0F200 + ((_i) * 4)) 
-#define SXE_VMVIR(_i) (0x08000 + ((_i) * 4)) 
-#define SXE_PFFLPL     0x050B0
-#define SXE_PFFLPH     0x050B4
-#define SXE_VT_CTL         0x051B0
-#define SXE_PFMAILBOX(_i)  (0x04B00 + (4 * (_i)))   
-#define SXE_PFMBMEM(_i)    (0x13000 + (64 * (_i)))  
-#define SXE_PFMBICR(_i)    (0x00710 + (4 * (_i)))   
-#define SXE_PFMBIMR(_i)    (0x00720 + (4 * (_i)))   
-#define SXE_VFRE(_i)       (0x051E0 + ((_i) * 4))
-#define SXE_VFTE(_i)       (0x08110 + ((_i) * 4))
-#define SXE_VMECM(_i)      (0x08790 + ((_i) * 4))
-#define SXE_QDE            0x2F04
-#define SXE_VMTXSW(_i)     (0x05180 + ((_i) * 4))   
-#define SXE_VMOLR(_i)      (0x0F000 + ((_i) * 4))    
-#define SXE_UTA(_i)        (0x0F400 + ((_i) * 4))
-#define SXE_MRCTL(_i)      (0x0F600 + ((_i) * 4))
-#define SXE_VMRVLAN(_i)    (0x0F610 + ((_i) * 4))
-#define SXE_VMRVM(_i)      (0x0F630 + ((_i) * 4))
-#define SXE_WQBR_RX(_i)    (0x2FB0 + ((_i) * 4))    
-#define SXE_WQBR_TX(_i)    (0x8130 + ((_i) * 4))    
-#define SXE_L34T_IMIR(_i)  (0x0E800 + ((_i) * 4))   
-#define SXE_RXFECCERR0         0x051B8
+#define SXE_MRQC	  0x0EC80
+#define SXE_SAQF(_i)  (0x0E000 + ((_i) * 4))
+#define SXE_DAQF(_i)  (0x0E200 + ((_i) * 4))
+#define SXE_SDPQF(_i) (0x0E400 + ((_i) * 4))
+#define SXE_FTQF(_i)  (0x0E600 + ((_i) * 4))
+#define SXE_ETQF(_i)  (0x05128 + ((_i) * 4))
+#define SXE_ETQS(_i)  (0x0EC00 + ((_i) * 4))
+#define SXE_SYNQF	 0x0EC30
+#define SXE_RQTC	  0x0EC70
+#define SXE_MTQC	  0x08120
+#define SXE_VLVF(_i)  (0x0F100 + ((_i) * 4))
+#define SXE_VLVFB(_i) (0x0F200 + ((_i) * 4))
+#define SXE_VMVIR(_i) (0x08000 + ((_i) * 4))
+#define SXE_PFFLPL	 0x050B0
+#define SXE_PFFLPH	 0x050B4
+#define SXE_VT_CTL		 0x051B0
+#define SXE_PFMAILBOX(_i)  (0x04B00 + (4 * (_i)))
+#define SXE_PFMBMEM(_i)	(0x13000 + (64 * (_i)))
+#define SXE_PFMBICR(_i)	(0x00710 + (4 * (_i)))
+#define SXE_PFMBIMR(_i)	(0x00720 + (4 * (_i)))
+#define SXE_VFRE(_i)	   (0x051E0 + ((_i) * 4))
+#define SXE_VFTE(_i)	   (0x08110 + ((_i) * 4))
+#define SXE_VMECM(_i)	  (0x08790 + ((_i) * 4))
+#define SXE_QDE			0x2F04
+#define SXE_VMTXSW(_i)	 (0x05180 + ((_i) * 4))
+#define SXE_VMOLR(_i)	  (0x0F000 + ((_i) * 4))
+#define SXE_UTA(_i)		(0x0F400 + ((_i) * 4))
+#define SXE_MRCTL(_i)	  (0x0F600 + ((_i) * 4))
+#define SXE_VMRVLAN(_i)	(0x0F610 + ((_i) * 4))
+#define SXE_VMRVM(_i)	  (0x0F630 + ((_i) * 4))
+#define SXE_WQBR_RX(_i)	(0x2FB0 + ((_i) * 4))
+#define SXE_WQBR_TX(_i)	(0x8130 + ((_i) * 4))
+#define SXE_L34T_IMIR(_i)  (0x0E800 + ((_i) * 4))
+#define SXE_RXFECCERR0		 0x051B8
 #define SXE_LLITHRESH 0x0EC90
-#define SXE_IMIR(_i)  (0x05A80 + ((_i) * 4))         
-#define SXE_IMIREXT(_i)       (0x05AA0 + ((_i) * 4))
-#define SXE_IMIRVP    0x0EC60
+#define SXE_IMIR(_i)  (0x05A80 + ((_i) * 4))
+#define SXE_IMIREXT(_i)	   (0x05AA0 + ((_i) * 4))
+#define SXE_IMIRVP	0x0EC60
 #define SXE_VMD_CTL   0x0581C
-#define SXE_RETA(_i)  (0x0EB00 + ((_i) * 4))        
-#define SXE_ERETA(_i)	(0x0EE80 + ((_i) * 4))     
-#define SXE_RSSRK(_i) (0x0EB80 + ((_i) * 4))       
+#define SXE_RETA(_i)  (0x0EB00 + ((_i) * 4))
+#define SXE_ERETA(_i)	(0x0EE80 + ((_i) * 4))
+#define SXE_RSSRK(_i) (0x0EB80 + ((_i) * 4))
 
 
-#define SXE_TDBAL(_i) (0x06000 + ((_i) * 0x40))  
+#define SXE_TDBAL(_i) (0x06000 + ((_i) * 0x40))
 #define SXE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
 #define SXE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
 #define SXE_TDH(_i)   (0x06010 + ((_i) * 0x40))
@@ -203,82 +205,82 @@ typedef union sxe_adv_rx_desc {
 #define SXE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
 #define SXE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
 #define SXE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
-#define SXE_DTXCTL    0x07E00
+#define SXE_DTXCTL	0x07E00
 
-#define SXE_DMATXCTL      0x04A80
-#define SXE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4))  
-#define SXE_PFDTXGSWC     0x08220
-#define SXE_DTXMXSZRQ     0x08100
-#define SXE_DTXTCPFLGL    0x04A88
-#define SXE_DTXTCPFLGH    0x04A8C
-#define SXE_LBDRPEN       0x0CA00
-#define SXE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4))  
+#define SXE_DMATXCTL	  0x04A80
+#define SXE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4))
+#define SXE_PFDTXGSWC	 0x08220
+#define SXE_DTXMXSZRQ	 0x08100
+#define SXE_DTXTCPFLGL	0x04A88
+#define SXE_DTXTCPFLGH	0x04A8C
+#define SXE_LBDRPEN	   0x0CA00
+#define SXE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4))
 
-#define SXE_DMATXCTL_TE       0x1   
-#define SXE_DMATXCTL_NS       0x2   
-#define SXE_DMATXCTL_GDV      0x8   
-#define SXE_DMATXCTL_MDP_EN   0x20  
-#define SXE_DMATXCTL_MBINTEN  0x40  
-#define SXE_DMATXCTL_VT_SHIFT 16    
+#define SXE_DMATXCTL_TE	   0x1
+#define SXE_DMATXCTL_NS	   0x2
+#define SXE_DMATXCTL_GDV	  0x8
+#define SXE_DMATXCTL_MDP_EN   0x20
+#define SXE_DMATXCTL_MBINTEN  0x40
+#define SXE_DMATXCTL_VT_SHIFT 16
 
-#define SXE_PFDTXGSWC_VT_LBEN 0x1   
+#define SXE_PFDTXGSWC_VT_LBEN 0x1
 
 
 #define SXE_DCA_TXCTRL_82599(_i)  (0x0600C + ((_i) * 0x40))
-#define SXE_TIPG      0x0CB00
-#define SXE_TXPBSIZE(_i)      (0x0CC00 + ((_i) * 4))  
+#define SXE_TIPG	  0x0CB00
+#define SXE_TXPBSIZE(_i)	  (0x0CC00 + ((_i) * 4))
 #define SXE_DTXCFG	0x0CE08
 #define SXE_MNGTXMAP  0x0CD10
 #define SXE_TIPG_FIBER_DEFAULT 3
-#define SXE_TXPBSIZE_SHIFT    10
+#define SXE_TXPBSIZE_SHIFT	10
 #define SXE_DTXCFG_DBUTX_START  0x00000001
 
 
-#define SXE_RTRPCS      0x02430
-#define SXE_RTTDCS      0x04900
-#define SXE_RTTDCS_ARBDIS     0x00000040   
-#define SXE_RTTPCS      0x0CD00
-#define SXE_RTRUP2TC    0x03020
-#define SXE_RTTUP2TC    0x0C800
-#define SXE_RTRPT4C(_i) (0x02140 + ((_i) * 4))  
-#define SXE_TXLLQ(_i)   (0x082E0 + ((_i) * 4))  
-#define SXE_RTRPT4S(_i) (0x02160 + ((_i) * 4))  
-#define SXE_RTTDT2C(_i) (0x04910 + ((_i) * 4))  
-#define SXE_RTTDT2S(_i) (0x04930 + ((_i) * 4))  
-#define SXE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4))  
-#define SXE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4))  
-#define SXE_RTTDQSEL    0x04904
-#define SXE_RTTDT1C     0x04908
-#define SXE_RTTDT1S     0x0490C
-
-
-#define SXE_RTTQCNCR                0x08B00
-#define SXE_RTTQCNTG                0x04A90
-#define SXE_RTTBCNRD                0x0498C
-#define SXE_RTTQCNRR                0x0498C
-#define SXE_RTTDTECC                0x04990
-#define SXE_RTTDTECC_NO_BCN         0x00000100
-#define SXE_RTTBCNRC                0x04984
-#define SXE_RTTBCNRC_RS_ENA         0x80000000
-#define SXE_RTTBCNRC_RF_DEC_MASK    0x00003FFF
+#define SXE_RTRPCS	  0x02430
+#define SXE_RTTDCS	  0x04900
+#define SXE_RTTDCS_ARBDIS	 0x00000040
+#define SXE_RTTPCS	  0x0CD00
+#define SXE_RTRUP2TC	0x03020
+#define SXE_RTTUP2TC	0x0C800
+#define SXE_RTRPT4C(_i) (0x02140 + ((_i) * 4))
+#define SXE_TXLLQ(_i)   (0x082E0 + ((_i) * 4))
+#define SXE_RTRPT4S(_i) (0x02160 + ((_i) * 4))
+#define SXE_RTTDT2C(_i) (0x04910 + ((_i) * 4))
+#define SXE_RTTDT2S(_i) (0x04930 + ((_i) * 4))
+#define SXE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4))
+#define SXE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4))
+#define SXE_RTTDQSEL	0x04904
+#define SXE_RTTDT1C	 0x04908
+#define SXE_RTTDT1S	 0x0490C
+
+
+#define SXE_RTTQCNCR				0x08B00
+#define SXE_RTTQCNTG				0x04A90
+#define SXE_RTTBCNRD				0x0498C
+#define SXE_RTTQCNRR				0x0498C
+#define SXE_RTTDTECC				0x04990
+#define SXE_RTTDTECC_NO_BCN		 0x00000100
+#define SXE_RTTBCNRC				0x04984
+#define SXE_RTTBCNRC_RS_ENA		 0x80000000
+#define SXE_RTTBCNRC_RF_DEC_MASK	0x00003FFF
 #define SXE_RTTBCNRC_RF_INT_SHIFT   14
-#define SXE_RTTBCNRC_RF_INT_MASK    (SXE_RTTBCNRC_RF_DEC_MASK << SXE_RTTBCNRC_RF_INT_SHIFT)
-#define SXE_RTTBCNRM                0x04980
-#define SXE_RTTQCNRM                0x04980
+#define SXE_RTTBCNRC_RF_INT_MASK	(SXE_RTTBCNRC_RF_DEC_MASK << SXE_RTTBCNRC_RF_INT_SHIFT)
+#define SXE_RTTBCNRM				0x04980
+#define SXE_RTTQCNRM				0x04980
 
 
-#define SXE_MACCFG      0x0CE04
+#define SXE_MACCFG	  0x0CE04
 
 
-#define SXE_GCR_EXT           0x11050
-#define SXE_GSCL_5_82599      0x11030
-#define SXE_GSCL_6_82599      0x11034
-#define SXE_GSCL_7_82599      0x11038
-#define SXE_GSCL_8_82599      0x1103C
-#define SXE_PHYADR_82599      0x11040
-#define SXE_PHYDAT_82599      0x11044
-#define SXE_PHYCTL_82599      0x11048
-#define SXE_PBACLR_82599      0x11068
+#define SXE_GCR_EXT		   0x11050
+#define SXE_GSCL_5_82599	  0x11030
+#define SXE_GSCL_6_82599	  0x11034
+#define SXE_GSCL_7_82599	  0x11038
+#define SXE_GSCL_8_82599	  0x1103C
+#define SXE_PHYADR_82599	  0x11040
+#define SXE_PHYDAT_82599	  0x11044
+#define SXE_PHYCTL_82599	  0x11048
+#define SXE_PBACLR_82599	  0x11068
 
 #define SXE_CIAA_8259X	0x11088
 
@@ -286,28 +288,28 @@ typedef union sxe_adv_rx_desc {
 #define SXE_CIAD_8259X	0x1108C
 
 
-#define SXE_PICAUSE           0x110B0
-#define SXE_PIENA             0x110B8
-#define SXE_CDQ_MBR_82599     0x110B4
-#define SXE_PCIESPARE         0x110BC
-#define SXE_MISC_REG_82599    0x110F0
+#define SXE_PICAUSE		   0x110B0
+#define SXE_PIENA			 0x110B8
+#define SXE_CDQ_MBR_82599	 0x110B4
+#define SXE_PCIESPARE		 0x110BC
+#define SXE_MISC_REG_82599	0x110F0
 #define SXE_ECC_CTRL_0_82599  0x11100
 #define SXE_ECC_CTRL_1_82599  0x11104
 #define SXE_ECC_STATUS_82599  0x110E0
-#define SXE_BAR_CTRL_82599    0x110F4
+#define SXE_BAR_CTRL_82599	0x110F4
 
 
-#define SXE_GCR_CMPL_TMOUT_MASK       0x0000F000
-#define SXE_GCR_CMPL_TMOUT_10ms       0x00001000
-#define SXE_GCR_CMPL_TMOUT_RESEND     0x00010000
-#define SXE_GCR_CAP_VER2              0x00040000
+#define SXE_GCR_CMPL_TMOUT_MASK	   0x0000F000
+#define SXE_GCR_CMPL_TMOUT_10ms	   0x00001000
+#define SXE_GCR_CMPL_TMOUT_RESEND	 0x00010000
+#define SXE_GCR_CAP_VER2			  0x00040000
 
-#define SXE_GCR_EXT_MSIX_EN           0x80000000
-#define SXE_GCR_EXT_BUFFERS_CLEAR     0x40000000
-#define SXE_GCR_EXT_VT_MODE_16        0x00000001
-#define SXE_GCR_EXT_VT_MODE_32        0x00000002
-#define SXE_GCR_EXT_VT_MODE_64        0x00000003
-#define SXE_GCR_EXT_SRIOV             (SXE_GCR_EXT_MSIX_EN | \
+#define SXE_GCR_EXT_MSIX_EN		   0x80000000
+#define SXE_GCR_EXT_BUFFERS_CLEAR	 0x40000000
+#define SXE_GCR_EXT_VT_MODE_16		0x00000001
+#define SXE_GCR_EXT_VT_MODE_32		0x00000002
+#define SXE_GCR_EXT_VT_MODE_64		0x00000003
+#define SXE_GCR_EXT_SRIOV			 (SXE_GCR_EXT_MSIX_EN | \
 					 SXE_GCR_EXT_VT_MODE_64)
 
 
@@ -320,108 +322,108 @@ typedef union sxe_adv_rx_desc {
 #define SXE_PCS1GANLP 0x0421C
 #define SXE_PCS1GANNP 0x04220
 #define SXE_PCS1GANLPNP 0x04224
-#define SXE_HLREG0    0x04240
-#define SXE_HLREG1    0x04244
-#define SXE_PAP       0x04248
-#define SXE_MACA      0x0424C
-#define SXE_APAE      0x04250
-#define SXE_ARD       0x04254
-#define SXE_AIS       0x04258
-#define SXE_MSCA      0x0425C
-#define SXE_MSRWD     0x04260
-#define SXE_MLADD     0x04264
-#define SXE_MHADD     0x04268
-#define SXE_MAXFRS    0x04268
-#define SXE_TREG      0x0426C
-#define SXE_PCSS1     0x04288
-#define SXE_PCSS2     0x0428C
-#define SXE_XPCSS     0x04290
-#define SXE_MFLCN     0x04294
+#define SXE_HLREG0	0x04240
+#define SXE_HLREG1	0x04244
+#define SXE_PAP	   0x04248
+#define SXE_MACA	  0x0424C
+#define SXE_APAE	  0x04250
+#define SXE_ARD	   0x04254
+#define SXE_AIS	   0x04258
+#define SXE_MSCA	  0x0425C
+#define SXE_MSRWD	 0x04260
+#define SXE_MLADD	 0x04264
+#define SXE_MHADD	 0x04268
+#define SXE_MAXFRS	0x04268
+#define SXE_TREG	  0x0426C
+#define SXE_PCSS1	 0x04288
+#define SXE_PCSS2	 0x0428C
+#define SXE_XPCSS	 0x04290
+#define SXE_MFLCN	 0x04294
 #define SXE_SERDESC   0x04298
 #define SXE_MAC_SGMII_BUSY 0x04298
-#define SXE_MACS      0x0429C
-#define SXE_AUTOC     0x042A0
-#define SXE_LINKS     0x042A4
-#define SXE_LINKS2    0x04324
-#define SXE_AUTOC2    0x042A8
-#define SXE_AUTOC3    0x042AC
-#define SXE_ANLP1     0x042B0
-#define SXE_ANLP2     0x042B4
-#define SXE_MACC      0x04330
+#define SXE_MACS	  0x0429C
+#define SXE_AUTOC	 0x042A0
+#define SXE_LINKS	 0x042A4
+#define SXE_LINKS2	0x04324
+#define SXE_AUTOC2	0x042A8
+#define SXE_AUTOC3	0x042AC
+#define SXE_ANLP1	 0x042B0
+#define SXE_ANLP2	 0x042B4
+#define SXE_MACC	  0x04330
 #define SXE_ATLASCTL  0x04800
-#define SXE_MMNGC     0x042D0
+#define SXE_MMNGC	 0x042D0
 #define SXE_ANLPNP1   0x042D4
 #define SXE_ANLPNP2   0x042D8
 #define SXE_KRPCSFC   0x042E0
-#define SXE_KRPCSS    0x042E4
-#define SXE_FECS1     0x042E8
-#define SXE_FECS2     0x042EC
+#define SXE_KRPCSS	0x042E4
+#define SXE_FECS1	 0x042E8
+#define SXE_FECS2	 0x042EC
 #define SXE_SMADARCTL 0x14F10
-#define SXE_MPVC      0x04318
-#define SXE_SGMIIC    0x04314
+#define SXE_MPVC	  0x04318
+#define SXE_SGMIIC	0x04314
 
 
-#define SXE_COMCTRL             0x14400
-#define SXE_PCCTRL              0x14404
-#define SXE_LPBKCTRL            0x1440C
-#define SXE_MAXFS               0x14410
-#define SXE_SACONH              0x14420
-#define SXE_VLANCTRL            0x14430
-#define SXE_VLANID              0x14434
-#define SXE_VLANCTRL            0x14430
-#define SXE_FPAG_SDS_CON        0x14700
+#define SXE_COMCTRL			 0x14400
+#define SXE_PCCTRL			  0x14404
+#define SXE_LPBKCTRL			0x1440C
+#define SXE_MAXFS			   0x14410
+#define SXE_SACONH			  0x14420
+#define SXE_VLANCTRL			0x14430
+#define SXE_VLANID			  0x14434
+#define SXE_VLANCTRL			0x14430
+#define SXE_FPAG_SDS_CON		0x14700
 
 
-#define SXE_COMCTRL_TXEN        0x0001
-#define SXE_COMCTRL_RXEN        0x0002
-#define SXE_COMCTRL_EDSEL       0x0004
-#define SXE_COMCTRL_SPEED_1G    0x0200
+#define SXE_COMCTRL_TXEN		0x0001
+#define SXE_COMCTRL_RXEN		0x0002
+#define SXE_COMCTRL_EDSEL	   0x0004
+#define SXE_COMCTRL_SPEED_1G	0x0200
 #define SXE_COMCTRL_SPEED_10G   0x0300
 
 
-#define SXE_PCCTRL_TXCE         0x0001
-#define SXE_PCCTRL_RXCE         0x0002
-#define SXE_PCCTRL_PEN          0x0100
-#define SXE_PCCTRL_PCSC_ALL     0x30000
+#define SXE_PCCTRL_TXCE		 0x0001
+#define SXE_PCCTRL_RXCE		 0x0002
+#define SXE_PCCTRL_PEN		  0x0100
+#define SXE_PCCTRL_PCSC_ALL	 0x30000
 
 
-#define SXE_MAXFS_TFSEL         0x0001
-#define SXE_MAXFS_RFSEL         0x0002
-#define SXE_MAXFS_MFS_MASK      0xFFFF0000
-#define SXE_MAXFS_MFS           0x40000000
-#define SXE_MAXFS_MFS_SHIFT     16
+#define SXE_MAXFS_TFSEL		 0x0001
+#define SXE_MAXFS_RFSEL		 0x0002
+#define SXE_MAXFS_MFS_MASK	  0xFFFF0000
+#define SXE_MAXFS_MFS		   0x40000000
+#define SXE_MAXFS_MFS_SHIFT	 16
 
 
-#define SXE_FPGA_SDS_CON_FULL_DUPLEX_MODE    0x00200000
-#define SXE_FPGA_SDS_CON_ANRESTART           0x00008000
-#define SXE_FPGA_SDS_CON_AN_ENABLE           0x00001000
+#define SXE_FPGA_SDS_CON_FULL_DUPLEX_MODE	0x00200000
+#define SXE_FPGA_SDS_CON_ANRESTART		   0x00008000
+#define SXE_FPGA_SDS_CON_AN_ENABLE		   0x00001000
 
 
-#define SXE_RSCDBU_RSCSMALDIS_MASK    0x0000007F
-#define SXE_RSCDBU_RSCACKDIS          0x00000080
+#define SXE_RSCDBU_RSCSMALDIS_MASK	0x0000007F
+#define SXE_RSCDBU_RSCACKDIS		  0x00000080
 
 
-#define SXE_RDRXCTL_RDMTS_1_2     0x00000000  
-#define SXE_RDRXCTL_CRCSTRIP      0x00000002  
-#define SXE_RDRXCTL_PSP           0x00000004  
-#define SXE_RDRXCTL_MVMEN         0x00000020
-#define SXE_RDRXCTL_DMAIDONE      0x00000008  
-#define SXE_RDRXCTL_AGGDIS        0x00010000  
-#define SXE_RDRXCTL_RSCFRSTSIZE   0x003E0000  
-#define SXE_RDRXCTL_RSCLLIDIS     0x00800000  
-#define SXE_RDRXCTL_RSCACKC       0x02000000  
-#define SXE_RDRXCTL_FCOE_WRFIX    0x04000000  
-#define SXE_RDRXCTL_MBINTEN       0x10000000
-#define SXE_RDRXCTL_MDP_EN        0x20000000
+#define SXE_RDRXCTL_RDMTS_1_2	 0x00000000
+#define SXE_RDRXCTL_CRCSTRIP	  0x00000002
+#define SXE_RDRXCTL_PSP		   0x00000004
+#define SXE_RDRXCTL_MVMEN		 0x00000020
+#define SXE_RDRXCTL_DMAIDONE	  0x00000008
+#define SXE_RDRXCTL_AGGDIS		0x00010000
+#define SXE_RDRXCTL_RSCFRSTSIZE   0x003E0000
+#define SXE_RDRXCTL_RSCLLIDIS	 0x00800000
+#define SXE_RDRXCTL_RSCACKC	   0x02000000
+#define SXE_RDRXCTL_FCOE_WRFIX	0x04000000
+#define SXE_RDRXCTL_MBINTEN	   0x10000000
+#define SXE_RDRXCTL_MDP_EN		0x20000000
 
 
-#define SXE_CTRL_GIO_DIS      0x00000004
-#define SXE_CTRL_LNK_RST      0x00000008
-#define SXE_CTRL_RST          0x04000000
-#define SXE_CTRL_RST_MASK     (SXE_CTRL_LNK_RST | SXE_CTRL_RST)
+#define SXE_CTRL_GIO_DIS	  0x00000004
+#define SXE_CTRL_LNK_RST	  0x00000008
+#define SXE_CTRL_RST		  0x04000000
+#define SXE_CTRL_RST_MASK	 (SXE_CTRL_LNK_RST | SXE_CTRL_RST)
 
 
-#define SXE_MHADD_MFS_MASK    0xFFFF0000
+#define SXE_MHADD_MFS_MASK	0xFFFF0000
 #define SXE_MHADD_MFS_SHIFT   16
 
 
@@ -431,92 +433,92 @@ typedef union sxe_adv_rx_desc {
 #define SXE_CTRL_EXT_DRV_LOAD 0x10000000
 
 
-#define SXE_TXPBSIZE_20KB     0x00005000  
-#define SXE_TXPBSIZE_40KB     0x0000A000  
-#define SXE_RXPBSIZE_48KB     0x0000C000  
-#define SXE_RXPBSIZE_64KB     0x00010000  
-#define SXE_RXPBSIZE_80KB     0x00014000  
-#define SXE_RXPBSIZE_128KB    0x00020000  
-#define SXE_RXPBSIZE_MAX      0x00080000  
-#define SXE_TXPBSIZE_MAX      0x00028000  
+#define SXE_TXPBSIZE_20KB	 0x00005000
+#define SXE_TXPBSIZE_40KB	 0x0000A000
+#define SXE_RXPBSIZE_48KB	 0x0000C000
+#define SXE_RXPBSIZE_64KB	 0x00010000
+#define SXE_RXPBSIZE_80KB	 0x00014000
+#define SXE_RXPBSIZE_128KB	0x00020000
+#define SXE_RXPBSIZE_MAX	  0x00080000
+#define SXE_TXPBSIZE_MAX	  0x00028000
 
-#define SXE_TXPKT_SIZE_MAX    0xA         
+#define SXE_TXPKT_SIZE_MAX	0xA
 #define SXE_MAX_PB		8
 
 
-#define SXE_HLREG0_TXCRCEN      0x00000001  
-#define SXE_HLREG0_RXCRCSTRP    0x00000002  
-#define SXE_HLREG0_JUMBOEN      0x00000004  
-#define SXE_HLREG0_TXPADEN      0x00000400  
-#define SXE_HLREG0_TXPAUSEEN    0x00001000  
-#define SXE_HLREG0_RXPAUSEEN    0x00004000  
-#define SXE_HLREG0_LPBK         0x00008000  
-#define SXE_HLREG0_MDCSPD       0x00010000  
-#define SXE_HLREG0_CONTMDC      0x00020000  
-#define SXE_HLREG0_CTRLFLTR     0x00040000  
-#define SXE_HLREG0_PREPEND      0x00F00000  
-#define SXE_HLREG0_PRIPAUSEEN   0x01000000  
-#define SXE_HLREG0_RXPAUSERECDA 0x06000000  
-#define SXE_HLREG0_RXLNGTHERREN 0x08000000  
-#define SXE_HLREG0_RXPADSTRIPEN 0x10000000  
+#define SXE_HLREG0_TXCRCEN	  0x00000001
+#define SXE_HLREG0_RXCRCSTRP	0x00000002
+#define SXE_HLREG0_JUMBOEN	  0x00000004
+#define SXE_HLREG0_TXPADEN	  0x00000400
+#define SXE_HLREG0_TXPAUSEEN	0x00001000
+#define SXE_HLREG0_RXPAUSEEN	0x00004000
+#define SXE_HLREG0_LPBK		 0x00008000
+#define SXE_HLREG0_MDCSPD	   0x00010000
+#define SXE_HLREG0_CONTMDC	  0x00020000
+#define SXE_HLREG0_CTRLFLTR	 0x00040000
+#define SXE_HLREG0_PREPEND	  0x00F00000
+#define SXE_HLREG0_PRIPAUSEEN   0x01000000
+#define SXE_HLREG0_RXPAUSERECDA 0x06000000
+#define SXE_HLREG0_RXLNGTHERREN 0x08000000
+#define SXE_HLREG0_RXPADSTRIPEN 0x10000000
 
 
 #define SXE_VMOLR_UPE		  0x00400000
 #define SXE_VMOLR_VPE		  0x00800000
-#define SXE_VMOLR_AUPE        0x01000000
-#define SXE_VMOLR_ROMPE       0x02000000
-#define SXE_VMOLR_ROPE        0x04000000
-#define SXE_VMOLR_BAM         0x08000000
-#define SXE_VMOLR_MPE         0x10000000
+#define SXE_VMOLR_AUPE		0x01000000
+#define SXE_VMOLR_ROMPE	   0x02000000
+#define SXE_VMOLR_ROPE		0x04000000
+#define SXE_VMOLR_BAM		 0x08000000
+#define SXE_VMOLR_MPE		 0x10000000
 
 
-#define SXE_RXCSUM_IPPCSE     0x00001000  
-#define SXE_RXCSUM_PCSD       0x00002000  
+#define SXE_RXCSUM_IPPCSE	 0x00001000
+#define SXE_RXCSUM_PCSD	   0x00002000
 
 
-#define SXE_VMD_CTL_VMDQ_EN     0x00000001
+#define SXE_VMD_CTL_VMDQ_EN	 0x00000001
 #define SXE_VMD_CTL_VMDQ_FILTER 0x00000002
 
 
-#define	SXE_MACCFG_PAD_EN       0x00000001
+#define	SXE_MACCFG_PAD_EN	   0x00000001
 
 
-#define SXE_IRQ_CLEAR_MASK    0xFFFFFFFF
+#define SXE_IRQ_CLEAR_MASK	0xFFFFFFFF
 
 
-#define SXE_STATUS_LAN_ID         0x0000000C
-#define SXE_STATUS_LAN_ID_SHIFT   2         
-#define SXE_STATUS_GIO            0x00080000
+#define SXE_STATUS_LAN_ID		 0x0000000C
+#define SXE_STATUS_LAN_ID_SHIFT   2
+#define SXE_STATUS_GIO			0x00080000
 
 
 #define SXE_LINKS_KX_AN_COMP  0x80000000
-#define SXE_LINKS_UP          0x40000000
-#define SXE_LINKS_SPEED       0x20000000
-#define SXE_LINKS_MODE        0x18000000
-#define SXE_LINKS_RX_MODE     0x06000000
-#define SXE_LINKS_TX_MODE     0x01800000
-#define SXE_LINKS_XGXS_EN     0x00400000
-#define SXE_LINKS_SGMII_EN    0x02000000
+#define SXE_LINKS_UP		  0x40000000
+#define SXE_LINKS_SPEED	   0x20000000
+#define SXE_LINKS_MODE		0x18000000
+#define SXE_LINKS_RX_MODE	 0x06000000
+#define SXE_LINKS_TX_MODE	 0x01800000
+#define SXE_LINKS_XGXS_EN	 0x00400000
+#define SXE_LINKS_SGMII_EN	0x02000000
 #define SXE_LINKS_PCS_1G_EN   0x00200000
-#define SXE_LINKS_1G_AN_EN    0x00100000
+#define SXE_LINKS_1G_AN_EN	0x00100000
 #define SXE_LINKS_KX_AN_IDLE  0x00080000
-#define SXE_LINKS_1G_SYNC     0x00040000
+#define SXE_LINKS_1G_SYNC	 0x00040000
 #define SXE_LINKS_10G_ALIGN   0x00020000
 #define SXE_LINKS_10G_LANE_SYNC 0x00017000
-#define SXE_LINKS_TL_FAULT    0x00001000
-#define SXE_LINKS_SIGNAL      0x00000F00
+#define SXE_LINKS_TL_FAULT	0x00001000
+#define SXE_LINKS_SIGNAL	  0x00000F00
 
 
-#define SXE_PCI_DEVICE_STATUS   0x7A 
+#define SXE_PCI_DEVICE_STATUS   0x7A
 #define SXE_PCI_DEVICE_STATUS_TRANSACTION_PENDING   0x0020
-#define SXE_PCI_LINK_STATUS     0x82 
-#define SXE_PCI_DEVICE_CONTROL2 0x98 
-#define SXE_PCI_LINK_WIDTH      0x3F0
-#define SXE_PCI_LINK_WIDTH_1    0x10
-#define SXE_PCI_LINK_WIDTH_2    0x20
-#define SXE_PCI_LINK_WIDTH_4    0x40
-#define SXE_PCI_LINK_WIDTH_8    0x80
-#define SXE_PCI_LINK_SPEED      0xF
+#define SXE_PCI_LINK_STATUS	 0x82
+#define SXE_PCI_DEVICE_CONTROL2 0x98
+#define SXE_PCI_LINK_WIDTH	  0x3F0
+#define SXE_PCI_LINK_WIDTH_1	0x10
+#define SXE_PCI_LINK_WIDTH_2	0x20
+#define SXE_PCI_LINK_WIDTH_4	0x40
+#define SXE_PCI_LINK_WIDTH_8	0x80
+#define SXE_PCI_LINK_SPEED	  0xF
 #define SXE_PCI_LINK_SPEED_2500 0x1
 #define SXE_PCI_LINK_SPEED_5000 0x2
 #define SXE_PCI_LINK_SPEED_8000 0x3
@@ -539,39 +541,39 @@ typedef union sxe_adv_rx_desc {
 #define SXE_PCI_MASTER_DISABLE_TIMEOUT	800
 
 
-#define SXE_RAH_VIND_MASK     0x003C0000
-#define SXE_RAH_VIND_SHIFT    18
-#define SXE_RAH_AV            0x80000000
-#define SXE_CLEAR_VMDQ_ALL    0xFFFFFFFF
+#define SXE_RAH_VIND_MASK	 0x003C0000
+#define SXE_RAH_VIND_SHIFT	18
+#define SXE_RAH_AV			0x80000000
+#define SXE_CLEAR_VMDQ_ALL	0xFFFFFFFF
 
 
-#define SXE_RFCTL_ISCSI_DIS       0x00000001
+#define SXE_RFCTL_ISCSI_DIS	   0x00000001
 #define SXE_RFCTL_ISCSI_DWC_MASK  0x0000003E
 #define SXE_RFCTL_ISCSI_DWC_SHIFT 1
 #define SXE_RFCTL_RSC_DIS		0x00000020
-#define SXE_RFCTL_NFSW_DIS        0x00000040
-#define SXE_RFCTL_NFSR_DIS        0x00000080
-#define SXE_RFCTL_NFS_VER_MASK    0x00000300
+#define SXE_RFCTL_NFSW_DIS		0x00000040
+#define SXE_RFCTL_NFSR_DIS		0x00000080
+#define SXE_RFCTL_NFS_VER_MASK	0x00000300
 #define SXE_RFCTL_NFS_VER_SHIFT   8
-#define SXE_RFCTL_NFS_VER_2       0
-#define SXE_RFCTL_NFS_VER_3       1
-#define SXE_RFCTL_NFS_VER_4       2
-#define SXE_RFCTL_IPV6_DIS        0x00000400
+#define SXE_RFCTL_NFS_VER_2	   0
+#define SXE_RFCTL_NFS_VER_3	   1
+#define SXE_RFCTL_NFS_VER_4	   2
+#define SXE_RFCTL_IPV6_DIS		0x00000400
 #define SXE_RFCTL_IPV6_XSUM_DIS   0x00000800
-#define SXE_RFCTL_IPFRSP_DIS      0x00004000
-#define SXE_RFCTL_IPV6_EX_DIS     0x00010000
+#define SXE_RFCTL_IPFRSP_DIS	  0x00004000
+#define SXE_RFCTL_IPV6_EX_DIS	 0x00010000
 #define SXE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
 
 
-#define SXE_TXDCTL_ENABLE     0x02000000   
-#define SXE_TXDCTL_SWFLSH     0x04000000   
-#define SXE_TXDCTL_WTHRESH_SHIFT      16   
+#define SXE_TXDCTL_ENABLE	 0x02000000
+#define SXE_TXDCTL_SWFLSH	 0x04000000
+#define SXE_TXDCTL_WTHRESH_SHIFT	  16
 
 
-#define SXE_RXCTRL_RXEN       0x00000001 
-#define SXE_RXCTRL_DMBYPS     0x00000002 
-#define SXE_RXDCTL_ENABLE     0x02000000 
-#define SXE_RXDCTL_SWFLSH     0x04000000 
+#define SXE_RXCTRL_RXEN	   0x00000001
+#define SXE_RXCTRL_DMBYPS	 0x00000002
+#define SXE_RXDCTL_ENABLE	 0x02000000
+#define SXE_RXDCTL_SWFLSH	 0x04000000
 
 
 #define SXE_RXDCTL_DESC_FIFO_AFUL_TH_MASK 0x0000001F
@@ -585,12 +587,12 @@ typedef union sxe_adv_rx_desc {
 #define SXE_PCI_MASTER_DISABLE_TIMEOUT	800
 
 
-#define SXE_FCTRL_SBP 0x00000002  
-#define SXE_FCTRL_MPE 0x00000100  
-#define SXE_FCTRL_UPE 0x00000200  
-#define SXE_FCTRL_BAM 0x00000400  
-#define SXE_FCTRL_PMCF 0x00001000 
-#define SXE_FCTRL_DPF 0x00002000  
+#define SXE_FCTRL_SBP 0x00000002
+#define SXE_FCTRL_MPE 0x00000100
+#define SXE_FCTRL_UPE 0x00000200
+#define SXE_FCTRL_BAM 0x00000400
+#define SXE_FCTRL_PMCF 0x00001000
+#define SXE_FCTRL_DPF 0x00002000
 
 
 #define SXE_QDE_ENABLE	0x00000001
@@ -599,89 +601,89 @@ typedef union sxe_adv_rx_desc {
 #define SXE_QDE_IDX_SHIFT	8
 #define SXE_QDE_WRITE		0x00010000
 
-#define SXE_TXD_POPTS_IXSM 0x01      
-#define SXE_TXD_POPTS_TXSM 0x02      
-#define SXE_TXD_CMD_EOP    0x01000000
+#define SXE_TXD_POPTS_IXSM 0x01
+#define SXE_TXD_POPTS_TXSM 0x02
+#define SXE_TXD_CMD_EOP	0x01000000
 #define SXE_TXD_CMD_IFCS   0x02000000
-#define SXE_TXD_CMD_IC     0x04000000
-#define SXE_TXD_CMD_RS     0x08000000
+#define SXE_TXD_CMD_IC	 0x04000000
+#define SXE_TXD_CMD_RS	 0x08000000
 #define SXE_TXD_CMD_DEXT   0x20000000
-#define SXE_TXD_CMD_VLE    0x40000000
-#define SXE_TXD_STAT_DD    0x00000001
+#define SXE_TXD_CMD_VLE	0x40000000
+#define SXE_TXD_STAT_DD	0x00000001
 
 
-#define SXE_SRRCTL_BSIZEPKT_SHIFT     10          
-#define SXE_SRRCTL_RDMTS_SHIFT        22
-#define SXE_SRRCTL_RDMTS_MASK         0x01C00000
-#define SXE_SRRCTL_DROP_EN            0x10000000
-#define SXE_SRRCTL_BSIZEPKT_MASK      0x0000007F
-#define SXE_SRRCTL_BSIZEHDR_MASK      0x00003F00
-#define SXE_SRRCTL_DESCTYPE_LEGACY    0x00000000
+#define SXE_SRRCTL_BSIZEPKT_SHIFT	 10
+#define SXE_SRRCTL_RDMTS_SHIFT		22
+#define SXE_SRRCTL_RDMTS_MASK		 0x01C00000
+#define SXE_SRRCTL_DROP_EN			0x10000000
+#define SXE_SRRCTL_BSIZEPKT_MASK	  0x0000007F
+#define SXE_SRRCTL_BSIZEHDR_MASK	  0x00003F00
+#define SXE_SRRCTL_DESCTYPE_LEGACY	0x00000000
 #define SXE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
 #define SXE_SRRCTL_DESCTYPE_HDR_SPLIT  0x04000000
 #define SXE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
 #define SXE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
-#define SXE_SRRCTL_DESCTYPE_MASK      0x0E000000
+#define SXE_SRRCTL_DESCTYPE_MASK	  0x0E000000
 
-#define SXE_RXDPS_HDRSTAT_HDRSP       0x00008000
+#define SXE_RXDPS_HDRSTAT_HDRSP	   0x00008000
 #define SXE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
 
-#define SXE_RXDADV_RSSTYPE_MASK       0x0000000F
-#define SXE_RXDADV_PKTTYPE_MASK       0x0000FFF0
-#define SXE_RXDADV_PKTTYPE_MASK_EX    0x0001FFF0
-#define SXE_RXDADV_HDRBUFLEN_MASK     0x00007FE0
-#define SXE_RXDADV_RSCCNT_MASK        0x001E0000
-#define SXE_RXDADV_RSCCNT_SHIFT       17
-#define SXE_RXDADV_HDRBUFLEN_SHIFT    5
-#define SXE_RXDADV_SPLITHEADER_EN     0x00001000
-#define SXE_RXDADV_SPH                0x8000
+#define SXE_RXDADV_RSSTYPE_MASK	   0x0000000F
+#define SXE_RXDADV_PKTTYPE_MASK	   0x0000FFF0
+#define SXE_RXDADV_PKTTYPE_MASK_EX	0x0001FFF0
+#define SXE_RXDADV_HDRBUFLEN_MASK	 0x00007FE0
+#define SXE_RXDADV_RSCCNT_MASK		0x001E0000
+#define SXE_RXDADV_RSCCNT_SHIFT	   17
+#define SXE_RXDADV_HDRBUFLEN_SHIFT	5
+#define SXE_RXDADV_SPLITHEADER_EN	 0x00001000
+#define SXE_RXDADV_SPH				0x8000
 
 
-#define SXE_ADVTXD_DTYP_DATA  0x00300000        
-#define SXE_ADVTXD_DCMD_IFCS  SXE_TXD_CMD_IFCS  
-#define SXE_ADVTXD_DCMD_DEXT  SXE_TXD_CMD_DEXT  
-#define SXE_ADVTXD_PAYLEN_SHIFT    14           
+#define SXE_ADVTXD_DTYP_DATA  0x00300000
+#define SXE_ADVTXD_DCMD_IFCS  SXE_TXD_CMD_IFCS
+#define SXE_ADVTXD_DCMD_DEXT  SXE_TXD_CMD_DEXT
+#define SXE_ADVTXD_PAYLEN_SHIFT	14
 
 
 #define SXE_FLAGS_DOUBLE_RESET_REQUIRED	0x01
 
 
-#define SXE_ERR_EEPROM                        -1
-#define SXE_ERR_EEPROM_CHECKSUM               -2
-#define SXE_ERR_PHY                           -3
-#define SXE_ERR_CONFIG                        -4
-#define SXE_ERR_PARAM                         -5
-#define SXE_ERR_MAC_TYPE                      -6
-#define SXE_ERR_UNKNOWN_PHY                   -7
-#define SXE_ERR_LINK_SETUP                    -8
-#define SXE_ERR_ADAPTER_STOPPED               -9
-#define SXE_ERR_INVALID_MAC_ADDR              -10
-#define SXE_ERR_DEVICE_NOT_SUPPORTED          -11
-#define SXE_ERR_MASTER_REQUESTS_PENDING       -12
-#define SXE_ERR_INVALID_LINK_SETTINGS         -13
-#define SXE_ERR_AUTONEG_NOT_COMPLETE          -14
-#define SXE_ERR_RESET_FAILED                  -15
-#define SXE_ERR_SWFW_SYNC                     -16
-#define SXE_ERR_PHY_ADDR_INVALID              -17
-#define SXE_ERR_I2C                           -18
-#define SXE_ERR_SFP_NOT_SUPPORTED             -19
-#define SXE_ERR_SFP_NOT_PRESENT               -20
-#define SXE_ERR_SFP_NO_INIT_SEQ_PRESENT       -21
-#define SXE_ERR_NO_SAN_ADDR_PTR               -22
-#define SXE_ERR_FDIR_REINIT_FAILED            -23
-#define SXE_ERR_EEPROM_VERSION                -24
-#define SXE_ERR_NO_SPACE                      -25
-#define SXE_ERR_OVERTEMP                      -26
-#define SXE_ERR_FC_NOT_NEGOTIATED             -27
-#define SXE_ERR_FC_NOT_SUPPORTED              -28
-#define SXE_ERR_SFP_SETUP_NOT_COMPLETE        -30
-#define SXE_ERR_PBA_SECTION                   -31
-#define SXE_ERR_INVALID_ARGUMENT              -32
-#define SXE_ERR_HOST_INTERFACE_COMMAND        -33
+#define SXE_ERR_EEPROM						-1
+#define SXE_ERR_EEPROM_CHECKSUM			   -2
+#define SXE_ERR_PHY						   -3
+#define SXE_ERR_CONFIG						-4
+#define SXE_ERR_PARAM						 -5
+#define SXE_ERR_MAC_TYPE					  -6
+#define SXE_ERR_UNKNOWN_PHY				   -7
+#define SXE_ERR_LINK_SETUP					-8
+#define SXE_ERR_ADAPTER_STOPPED			   -9
+#define SXE_ERR_INVALID_MAC_ADDR			  -10
+#define SXE_ERR_DEVICE_NOT_SUPPORTED		  -11
+#define SXE_ERR_MASTER_REQUESTS_PENDING	   -12
+#define SXE_ERR_INVALID_LINK_SETTINGS		 -13
+#define SXE_ERR_AUTONEG_NOT_COMPLETE		  -14
+#define SXE_ERR_RESET_FAILED				  -15
+#define SXE_ERR_SWFW_SYNC					 -16
+#define SXE_ERR_PHY_ADDR_INVALID			  -17
+#define SXE_ERR_I2C						   -18
+#define SXE_ERR_SFP_NOT_SUPPORTED			 -19
+#define SXE_ERR_SFP_NOT_PRESENT			   -20
+#define SXE_ERR_SFP_NO_INIT_SEQ_PRESENT	   -21
+#define SXE_ERR_NO_SAN_ADDR_PTR			   -22
+#define SXE_ERR_FDIR_REINIT_FAILED			-23
+#define SXE_ERR_EEPROM_VERSION				-24
+#define SXE_ERR_NO_SPACE					  -25
+#define SXE_ERR_OVERTEMP					  -26
+#define SXE_ERR_FC_NOT_NEGOTIATED			 -27
+#define SXE_ERR_FC_NOT_SUPPORTED			  -28
+#define SXE_ERR_SFP_SETUP_NOT_COMPLETE		-30
+#define SXE_ERR_PBA_SECTION				   -31
+#define SXE_ERR_INVALID_ARGUMENT			  -32
+#define SXE_ERR_HOST_INTERFACE_COMMAND		-33
 #define SXE_ERR_FDIR_CMD_INCOMPLETE		-38
 #define SXE_ERR_FW_RESP_INVALID		-39
 #define SXE_ERR_TOKEN_RETRY			-40
-#define SXE_NOT_IMPLEMENTED                   0x7FFFFFFF
+#define SXE_NOT_IMPLEMENTED				   0x7FFFFFFF
 
 #define SXE_FUSES0_GROUP(_i)		(0x11158 + ((_i) * 4))
 #define SXE_FUSES0_300MHZ		BIT(5)
@@ -790,5 +792,5 @@ typedef union sxe_adv_rx_desc {
 #define SXE_NW_MNG_IF_SEL_MDIO_PHY_ADD	\
 				(0x1F << SXE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
 
-#endif 
+#endif
 
diff --git a/drivers/net/sxe/include/sxe_version.h b/drivers/net/sxe/include/sxe_version.h
index 50afd69a63..6b5e4caef1 100644
--- a/drivers/net/sxe/include/sxe_version.h
+++ b/drivers/net/sxe/include/sxe_version.h
@@ -4,29 +4,28 @@
 #ifndef __SXE_VER_H__
 #define __SXE_VER_H__
 
-#define SXE_VERSION                "0.0.0.0"
-#define SXE_COMMIT_ID              "13cf402"
-#define SXE_BRANCH                 "feature/sagitta-1.3.0-P3-dpdk_patch_rwy"
-#define SXE_BUILD_TIME             "2024-08-24 11:02:12"
+#define SXE_VERSION				"0.0.0.0"
+#define SXE_COMMIT_ID			  "51935d6"
+#define SXE_BRANCH				 "feature/sagitta-1.3.0-P3-dpdk_patch_rwy"
+#define SXE_BUILD_TIME			 "2024-09-05 21:49:55"
 
 
-#define SXE_DRV_NAME                   "sxe"
-#define SXEVF_DRV_NAME                 "sxevf"
-#define SXE_DRV_LICENSE                "GPL v2"
-#define SXE_DRV_COPYRIGHT              "Copyright (C), 2022, Linkdata Technology Co., Ltd."
-#define SXE_DRV_AUTHOR                 "Linkdata Technology Corporation"
-#define SXE_DRV_DESCRIPTION            "LD 1160-2X 2-port 10G SFP+ NIC"
-#define SXEVF_DRV_DESCRIPTION          "LD 1160-2X Virtual Function"
-#define SXE_DRV_CONNECTION             "Linkdata Technology 10G Network Connection"
+#define SXE_DRV_NAME				   "sxe"
+#define SXEVF_DRV_NAME				 "sxevf"
+#define SXE_DRV_LICENSE				"GPL v2"
+#define SXE_DRV_AUTHOR				 "sxe"
+#define SXEVF_DRV_AUTHOR			   "sxevf"
+#define SXE_DRV_DESCRIPTION			"sxe driver"
+#define SXEVF_DRV_DESCRIPTION		  "sxevf driver"
 
 
-#define SXE_FW_NAME                     "soc"
-#define SXE_FW_ARCH                     "arm32"
+#define SXE_FW_NAME					 "soc"
+#define SXE_FW_ARCH					 "arm32"
 
 #ifndef PS3_CFG_RELEASE
-#define PS3_SXE_FW_BUILD_MODE             "debug"
+#define PS3_SXE_FW_BUILD_MODE			 "debug"
 #else
-#define PS3_SXE_FW_BUILD_MODE             "release"
+#define PS3_SXE_FW_BUILD_MODE			 "release"
 #endif
 
 #endif
diff --git a/drivers/net/sxe/meson.build b/drivers/net/sxe/meson.build
index 5e7b49dcf6..50611c27fe 100644
--- a/drivers/net/sxe/meson.build
+++ b/drivers/net/sxe/meson.build
@@ -5,6 +5,10 @@ cflags += ['-DSXE_DPDK']
 cflags += ['-DSXE_HOST_DRIVER']
 cflags += ['-DSXE_DPDK_L4_FEATURES']
 cflags += ['-DSXE_DPDK_SRIOV']
+cflags += ['-DSXE_DPDK_FILTER_CTRL']
+cflags += ['-DSXE_DPDK_MACSEC']
+cflags += ['-DSXE_DPDK_TM']
+cflags += ['-DSXE_DPDK_SIMD']
 
 #subdir('base')
 #objs = [base_objs]
@@ -26,6 +30,10 @@ sources = files(
 	'pf/sxe_ptp.c',
 	'pf/sxe_vf.c',
 	'pf/sxe_dcb.c',
+	'pf/sxe_filter_ctrl.c',
+	'pf/sxe_fnav.c',
+	'pf/sxe_tm.c',
+	'pf/sxe_macsec.c',
 	'vf/sxevf_main.c',
 	'vf/sxevf_filter.c',
 	'vf/sxevf_irq.c',
@@ -47,6 +55,12 @@ sources = files(
 
 testpmd_sources = files('sxe_testpmd.c')
 
+if arch_subdir == 'x86'
+	sources += files('pf/sxe_vec_sse.c')
+elif arch_subdir == 'arm'
+	sources += files('pf/sxe_vec_neon.c')
+endif
+
 includes += include_directories('base')
 includes += include_directories('pf')
 includes += include_directories('vf')
diff --git a/drivers/net/sxe/pf/sxe.h b/drivers/net/sxe/pf/sxe.h
index 139480e90d..4d7e03adee 100644
--- a/drivers/net/sxe/pf/sxe.h
+++ b/drivers/net/sxe/pf/sxe.h
@@ -28,21 +28,23 @@
 struct sxe_hw;
 struct sxe_vlan_context;
 
-#define SXE_LPBK_DISABLED   0x0 
-#define SXE_LPBK_ENABLED    0x1 
+#define SXE_LPBK_DISABLED   0x0
+#define SXE_LPBK_ENABLED	0x1
 
-#define PCI_VENDOR_ID_STARS      0x1FF2
-#define SXE_DEV_ID_ASIC          0x10a1
+#define PCI_VENDOR_ID_STARS	  0x1FF2
+#define SXE_DEV_ID_ASIC		  0x10a1
 
 #define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
-#define MAC_ADDR(x) ((u8*)(x))[0],((u8*)(x))[1], \
-	           ((u8*)(x))[2],((u8*)(x))[3], \
-	           ((u8*)(x))[4],((u8*)(x))[5]
+#define MAC_ADDR(x) ((u8 *)(x))[0], ((u8 *)(x))[1], \
+			   ((u8 *)(x))[2], ((u8 *)(x))[3], \
+			   ((u8 *)(x))[4], ((u8 *)(x))[5]
 
 #ifdef RTE_PMD_PACKET_PREFETCH
 #define rte_packet_prefetch(p)  rte_prefetch1(p)
 #else
-#define rte_packet_prefetch(p)  do {} while(0)
+#define rte_packet_prefetch(p) \
+	do { \
+	} while (0)
 #endif
 
 #if 1
@@ -56,9 +58,9 @@ struct sxe_vlan_context;
 #endif
 
 struct sxe_ptp_context {
-	struct rte_timecounter      systime_tc;
-	struct rte_timecounter      rx_tstamp_tc;
-	struct rte_timecounter      tx_tstamp_tc;
+	struct rte_timecounter	  systime_tc;
+	struct rte_timecounter	  rx_tstamp_tc;
+	struct rte_timecounter	  tx_tstamp_tc;
 	u32 tx_hwtstamp_sec;
 	u32 tx_hwtstamp_nsec;
 };
@@ -75,7 +77,7 @@ struct sxe_adapter {
 #endif
 	struct sxe_ptp_context ptp_ctxt;
 	struct sxe_phy_context phy_ctxt;
-	struct sxe_virtual_context vt_ctxt; 
+	struct sxe_virtual_context vt_ctxt;
 
 	struct sxe_stats_info stats_info;
 	struct sxe_dcb_context dcb_ctxt;
@@ -97,7 +99,7 @@ struct sxe_adapter {
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
 	bool rx_vec_allowed;
 #endif
-	s8 name[PCI_PRI_STR_SIZE+1]; 
+	s8 name[PCI_PRI_STR_SIZE+1];
 
 	u32 mtu;
 
@@ -114,4 +116,4 @@ void sxe_hw_start(struct sxe_hw *hw);
 
 bool is_sxe_supported(struct rte_eth_dev *dev);
 
-#endif 
+#endif
diff --git a/drivers/net/sxe/pf/sxe_dcb.c b/drivers/net/sxe/pf/sxe_dcb.c
index 5217cc655f..dad5b29e23 100644
--- a/drivers/net/sxe/pf/sxe_dcb.c
+++ b/drivers/net/sxe/pf/sxe_dcb.c
@@ -15,9 +15,9 @@
 #define DCB_RX_CONFIG  1
 #define DCB_TX_CONFIG  1
 
-#define DCB_CREDIT_QUANTUM	64   
-#define MAX_CREDIT_REFILL       511  
-#define MAX_CREDIT              4095 
+#define DCB_CREDIT_QUANTUM	64
+#define MAX_CREDIT_REFILL	   511
+#define MAX_CREDIT			  4095
 
 void sxe_dcb_init(struct rte_eth_dev *dev)
 {
@@ -54,7 +54,6 @@ void sxe_dcb_init(struct rte_eth_dev *dev)
 	cfg->vmdq_active = true;
 	cfg->round_robin_enable = false;
 
-	return;
 }
 
 static u8 sxe_dcb_get_tc_from_up(struct sxe_dcb_config *cfg,
@@ -64,14 +63,12 @@ static u8 sxe_dcb_get_tc_from_up(struct sxe_dcb_config *cfg,
 	u8 prio_mask = BIT(up);
 	u8 tc = cfg->num_tcs.pg_tcs;
 
-	if (!tc) {
+	if (!tc)
 		goto l_ret;
-	}
 
 	for (tc--; tc; tc--) {
-		if (prio_mask & tc_config[tc].channel[direction].up_to_tc_bitmap) {
+		if (prio_mask & tc_config[tc].channel[direction].up_to_tc_bitmap)
 			break;
-		}
 	}
 
 l_ret:
@@ -89,7 +86,6 @@ static void sxe_dcb_up2tc_map_parse(struct sxe_dcb_config *cfg,
 		LOG_DEBUG("up[%u] --- up2tc_map[%u]\n", up, map[up]);
 	}
 
-	return;
 }
 
 s32 sxe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
@@ -122,10 +118,10 @@ s32 sxe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
 	max_high_water = (rx_buf_size -
 			RTE_ETHER_MAX_LEN) >> SXE_RX_PKT_BUF_SIZE_SHIFT;
 	if ((pfc_conf->fc.high_water > max_high_water) ||
-	    (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
+		(pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
 		PMD_LOG_ERR(INIT, "Invalid high/low water setup value in KB, "
-			    "high water=0x%x, low water=0x%x",
-			    pfc_conf->fc.high_water, pfc_conf->fc.low_water);
+				"high water=0x%x, low water=0x%x",
+				pfc_conf->fc.high_water, pfc_conf->fc.low_water);
 		PMD_LOG_ERR(INIT, "High_water must <= 0x%x", max_high_water);
 		ret = -EINVAL;
 		goto l_end;
@@ -162,21 +158,19 @@ s32 sxe_get_dcb_info(struct rte_eth_dev *dev,
 	u8 tcs_num;
 	u8 i, j;
 
-	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
-	} else {
+	else
 		dcb_info->nb_tcs = 1;
-	}
 
 	tc_queue = &dcb_info->tc_queue;
 	tcs_num = dcb_info->nb_tcs;
 
-	if (dcb_config->vmdq_active) { 
+	if (dcb_config->vmdq_active) {
 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
-		}
 
 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
 			for (j = 0; j < tcs_num; j++) {
@@ -197,12 +191,11 @@ s32 sxe_get_dcb_info(struct rte_eth_dev *dev,
 				}
 			}
 		}
-	} else { 
+	} else {
 		struct rte_eth_dcb_rx_conf *rx_conf =
 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
-		}
 
 		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
 			for (i = 0; i < dcb_info->nb_tcs; i++) {
@@ -279,7 +272,6 @@ static void sxe_dcb_vmdq_rx_param_get(struct rte_eth_dev *dev,
 						(u8)(1 << i);
 	}
 
-	return;
 }
 
 void sxe_dcb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
@@ -315,7 +307,6 @@ void sxe_dcb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 					cfg->pool_map[i].pools);
 	}
 
-	return;
 }
 
 static void sxe_dcb_rx_param_get(struct rte_eth_dev *dev,
@@ -341,11 +332,10 @@ static void sxe_dcb_rx_param_get(struct rte_eth_dev *dev,
 						(u8)(1 << i);
 	}
 
-	return;
 }
 
 static void sxe_dcb_rx_hw_configure(struct rte_eth_dev *dev,
-		       struct sxe_dcb_config *dcb_config)
+			   struct sxe_dcb_config *dcb_config)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
 	struct sxe_hw *hw = &adapter->hw;
@@ -354,7 +344,6 @@ static void sxe_dcb_rx_hw_configure(struct rte_eth_dev *dev,
 	sxe_hw_dcb_rx_configure(hw, dcb_config->vmdq_active,
 				RTE_ETH_DEV_SRIOV(dev).active,
 				dcb_config->num_tcs.pg_tcs);
-	return;
 }
 
 static void sxe_dcb_vmdq_tx_param_get(struct rte_eth_dev *dev,
@@ -385,7 +374,6 @@ static void sxe_dcb_vmdq_tx_param_get(struct rte_eth_dev *dev,
 						(u8)(1 << i);
 	}
 
-	return;
 }
 
 static void sxe_dcb_vmdq_tx_hw_configure(struct rte_eth_dev *dev,
@@ -402,7 +390,6 @@ static void sxe_dcb_vmdq_tx_hw_configure(struct rte_eth_dev *dev,
 
 	sxe_hw_dcb_tx_configure(hw, dcb_config->vmdq_active,
 				dcb_config->num_tcs.pg_tcs);
-	return;
 }
 
 static void sxe_dcb_tx_param_get(struct rte_eth_dev *dev,
@@ -428,7 +415,6 @@ static void sxe_dcb_tx_param_get(struct rte_eth_dev *dev,
 						(u8)(1 << i);
 	}
 
-	return;
 }
 
 static u32 sxe_dcb_min_credit_get(u32 max_frame)
@@ -468,9 +454,8 @@ static u32 sxe_dcb_cee_min_link_percent_get(
 		link_percentage = sxe_dcb_cee_tc_link_percent_get(
 					cee_config, direction, tc_index);
 
-		if (link_percentage && link_percentage < min_link_percent) {
+		if (link_percentage && link_percentage < min_link_percent)
 			min_link_percent = link_percentage;
-		}
 	}
 
 	return min_link_percent;
@@ -513,18 +498,16 @@ static s32 sxe_dcb_cee_tc_credits_calculate(struct sxe_hw *hw,
 		LOG_DEBUG_BDF("tc[%u] bwg_percent=%u, link_percentage=%u\n",
 			tc_index, tc_info->bwg_percent, link_percentage);
 
-		if (tc_info->bwg_percent > 0 && link_percentage == 0) {
+		if (tc_info->bwg_percent > 0 && link_percentage == 0)
 			link_percentage = 1;
-		}
 
 		tc_info->link_percent = (u8)link_percentage;
 
 		credit_refill = min(link_percentage * total_credit,
-				    (u32)MAX_CREDIT_REFILL);
+					(u32)MAX_CREDIT_REFILL);
 
-		if (credit_refill < min_credit) {
+		if (credit_refill < min_credit)
 			credit_refill = min_credit;
-		}
 
 		tc_info->data_credits_refill = (u16)credit_refill;
 		LOG_DEBUG_BDF("tc[%u] credit_refill=%u\n",
@@ -532,16 +515,14 @@ static s32 sxe_dcb_cee_tc_credits_calculate(struct sxe_hw *hw,
 
 		credit_max = (link_percentage * MAX_CREDIT) / 100;
 
-		if (credit_max < min_credit) {
+		if (credit_max < min_credit)
 			credit_max = min_credit;
-		}
 		LOG_DEBUG_BDF("tc[%u] credit_max=%u\n",
 					tc_index, credit_max);
 
-		if (direction == DCB_PATH_TX) {
+		if (direction == DCB_PATH_TX)
 			cee_config->tc_config[tc_index].desc_credits_max =
 				(u16)credit_max;
-		}
 
 		tc_info->data_credits_max = (u16)credit_max;
 	}
@@ -556,12 +537,10 @@ static void sxe_dcb_cee_refill_parse(struct sxe_dcb_config *cfg,
 	struct sxe_tc_config *tc_config = &cfg->tc_config[0];
 
 	for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
-		refill[tc] = tc_config[tc]. \
-			channel[direction].data_credits_refill;
+		refill[tc] = tc_config[tc].channel[direction].data_credits_refill;
 		LOG_DEBUG("tc[%u] --- refill[%u]\n", tc, refill[tc]);
 	}
 
-	return;
 }
 
 static void sxe_dcb_cee_max_credits_parse(struct sxe_dcb_config *cfg,
@@ -575,7 +554,6 @@ static void sxe_dcb_cee_max_credits_parse(struct sxe_dcb_config *cfg,
 		LOG_DEBUG("tc[%u] --- max_credits[%u]\n", tc, max_credits[tc]);
 	}
 
-	return;
 }
 
 static void sxe_dcb_cee_bwgid_parse(struct sxe_dcb_config *cfg,
@@ -589,7 +567,6 @@ static void sxe_dcb_cee_bwgid_parse(struct sxe_dcb_config *cfg,
 		LOG_DEBUG("tc[%u] --- bwgid[%u]\n", tc, bwgid[tc]);
 	}
 
-	return;
 }
 
 static void sxe_dcb_cee_prio_parse(struct sxe_dcb_config *cfg,
@@ -603,7 +580,6 @@ static void sxe_dcb_cee_prio_parse(struct sxe_dcb_config *cfg,
 		LOG_DEBUG("tc[%u] --- ptype[%u]\n", tc, ptype[tc]);
 	}
 
-	return;
 }
 
 static void sxe_dcb_cee_pfc_parse(struct sxe_dcb_config *cfg,
@@ -613,13 +589,11 @@ static void sxe_dcb_cee_pfc_parse(struct sxe_dcb_config *cfg,
 	struct sxe_tc_config *tc_config = &cfg->tc_config[0];
 
 	for (*pfc_en = 0, up = 0; up < MAX_TRAFFIC_CLASS; up++) {
-		if (tc_config[map[up]].pfc_type != pfc_disabled) {
+		if (tc_config[map[up]].pfc_type != pfc_disabled)
 			*pfc_en |= BIT(up);
-		}
 	}
 	LOG_DEBUG("cfg[%p] pfc_en[0x%x]\n", cfg, *pfc_en);
 
-	return;
 }
 
 static s32 sxe_dcb_tc_stats_configure(struct sxe_hw *hw,
@@ -673,7 +647,6 @@ static void sxe_dcb_rx_mq_mode_configure(struct rte_eth_dev *dev,
 		break;
 	}
 
-	return;
 }
 
 static void sxe_dcb_tx_mq_mode_configure(struct rte_eth_dev *dev,
@@ -705,7 +678,6 @@ static void sxe_dcb_tx_mq_mode_configure(struct rte_eth_dev *dev,
 		break;
 	}
 
-	return;
 }
 
 static void sxe_dcb_bwg_percentage_alloc(struct rte_eth_dev *dev,
@@ -719,25 +691,22 @@ static void sxe_dcb_bwg_percentage_alloc(struct rte_eth_dev *dev,
 	u8 nb_tcs = dcb_config->num_tcs.pfc_tcs;
 
 	if (nb_tcs == RTE_ETH_4_TCS) {
-
-
-	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-		if (map[i] >= nb_tcs) {
-			PMD_LOG_INFO(DRV, "map[up-%u] to tc[%u] not exist, "
-					"change to tc 0", i, map[i]);
-			map[i] = 0;
+		for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+			if (map[i] >= nb_tcs) {
+				PMD_LOG_INFO(DRV, "map[up-%u] to tc[%u] not exist, "
+								"change to tc 0", i, map[i]);
+				map[i] = 0;
+			}
 		}
-	}
 
 		for (i = 0; i < nb_tcs; i++) {
 			tc = &dcb_config->tc_config[i];
 			if (bw_conf->tc_num != nb_tcs) {
 				tc->channel[DCB_PATH_TX].bwg_percent =
+							(u8)(100 / nb_tcs);
+		}
+		tc->channel[DCB_PATH_RX].bwg_percent =
 					(u8)(100 / nb_tcs);
-			}
-
-			tc->channel[DCB_PATH_RX].bwg_percent =
-						(u8)(100 / nb_tcs);
 		}
 		for (; i < MAX_TRAFFIC_CLASS; i++) {
 			tc = &dcb_config->tc_config[i];
@@ -757,7 +726,6 @@ static void sxe_dcb_bwg_percentage_alloc(struct rte_eth_dev *dev,
 		}
 	}
 
-	return;
 }
 
 static void sxe_dcb_rx_pkt_buf_configure(struct sxe_hw *hw,
@@ -768,15 +736,12 @@ static void sxe_dcb_rx_pkt_buf_configure(struct sxe_hw *hw,
 
 	pbsize = (u16)(rx_buffer_size / tcs_num);
 
-	for (i = 0; i < tcs_num; i++) {
+	for (i = 0; i < tcs_num; i++)
 		sxe_hw_rx_pkt_buf_size_set(hw, i, pbsize);
-	}
 
-	for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+	for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
 		sxe_hw_rx_pkt_buf_size_set(hw, i, 0);
-	}
 
-	return;
 }
 
 static void sxe_dcb_tx_pkt_buf_configure(struct sxe_hw *hw, u8 tcs_num)
@@ -787,7 +752,6 @@ static void sxe_dcb_tx_pkt_buf_configure(struct sxe_hw *hw, u8 tcs_num)
 	sxe_hw_tx_pkt_buf_thresh_configure(hw, tcs_num, true);
 
 	sxe_hw_tx_pkt_buf_switch(hw, true);
-	return;
 }
 
 static void sxe_dcb_rx_configure(struct rte_eth_dev *dev,
@@ -795,10 +759,10 @@ static void sxe_dcb_rx_configure(struct rte_eth_dev *dev,
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
 	struct sxe_hw *hw = &adapter->hw;
-	u8 tsa[MAX_TRAFFIC_CLASS]     = {0};
+	u8 tsa[MAX_TRAFFIC_CLASS]	 = {0};
 	u8 bwgid[MAX_TRAFFIC_CLASS]   = {0};
 	u16 refill[MAX_TRAFFIC_CLASS] = {0};
-	u16 max[MAX_TRAFFIC_CLASS]    = {0};
+	u16 max[MAX_TRAFFIC_CLASS]	= {0};
 
 	sxe_dcb_rx_pkt_buf_configure(hw, SXE_RX_PKT_BUF_SIZE, dcb_config->num_tcs.pg_tcs);
 
@@ -809,7 +773,6 @@ static void sxe_dcb_rx_configure(struct rte_eth_dev *dev,
 
 	sxe_hw_dcb_rx_bw_alloc_configure(hw, refill, max,
 				bwgid, tsa, map, MAX_USER_PRIORITY);
-	return;
 }
 
 static void sxe_dcb_tx_configure(struct rte_eth_dev *dev,
@@ -817,10 +780,10 @@ static void sxe_dcb_tx_configure(struct rte_eth_dev *dev,
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
 	struct sxe_hw *hw = &adapter->hw;
-	u8 tsa[MAX_TRAFFIC_CLASS]     = {0};
+	u8 tsa[MAX_TRAFFIC_CLASS]	 = {0};
 	u8 bwgid[MAX_TRAFFIC_CLASS]   = {0};
 	u16 refill[MAX_TRAFFIC_CLASS] = {0};
-	u16 max[MAX_TRAFFIC_CLASS]    = {0};
+	u16 max[MAX_TRAFFIC_CLASS]	= {0};
 
 	sxe_dcb_tx_pkt_buf_configure(hw, dcb_config->num_tcs.pg_tcs);
 
@@ -833,7 +796,6 @@ static void sxe_dcb_tx_configure(struct rte_eth_dev *dev,
 	sxe_hw_dcb_tx_data_bw_alloc_configure(hw, refill, max,
 				bwgid, tsa, map, MAX_USER_PRIORITY);
 
-	return;
 }
 
 static void sxe_dcb_pfc_configure(struct sxe_hw *hw,
@@ -855,13 +817,11 @@ static void sxe_dcb_pfc_configure(struct sxe_hw *hw,
 	}
 
 	sxe_dcb_cee_pfc_parse(dcb_config, map, &pfc_en);
-	if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS) {
+	if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
 		pfc_en &= 0x0F;
-	}
 
 	sxe_hw_dcb_pfc_configure(hw, pfc_en, map, MAX_USER_PRIORITY);
 
-	return;
 }
 
 static void sxe_dcb_hw_configure(struct rte_eth_dev *dev,
@@ -884,21 +844,17 @@ static void sxe_dcb_hw_configure(struct rte_eth_dev *dev,
 	sxe_dcb_cee_tc_credits_calculate(hw, dcb_config, max_frame, DCB_PATH_TX);
 	sxe_dcb_cee_tc_credits_calculate(hw, dcb_config, max_frame, DCB_PATH_RX);
 
-	if (rx_configed) {
+	if (rx_configed)
 		sxe_dcb_rx_configure(dev, dcb_config, map);
-	}
 
-	if (tx_configed) {
+	if (tx_configed)
 		sxe_dcb_tx_configure(dev, dcb_config, map);
-	}
 
 	sxe_dcb_tc_stats_configure(hw, dcb_config);
 
-	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
+	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT)
 		sxe_dcb_pfc_configure(hw, dcb_config, map);
-	}
 
-	return;
 }
 
 void sxe_dcb_configure(struct rte_eth_dev *dev)
@@ -913,21 +869,19 @@ void sxe_dcb_configure(struct rte_eth_dev *dev)
 	if ((dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB) &&
 		(dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB) &&
 		(dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)) {
-		PMD_LOG_INFO(INIT, "dcb config failed, cause mq_mode=0x%x", 
+		PMD_LOG_INFO(INIT, "dcb config failed, cause mq_mode=0x%x",
 				(u8)dev_conf->rxmode.mq_mode);
-		goto l_end;
+		return;
 	}
 
 	if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES) {
-		PMD_LOG_INFO(INIT, "dcb config failed, cause nb_rx_queues=%u > %u", 
+		PMD_LOG_INFO(INIT, "dcb config failed, cause nb_rx_queues=%u > %u",
 			dev->data->nb_rx_queues, RTE_ETH_DCB_NUM_QUEUES);
-		goto l_end;
+		return;
 	}
 
 	sxe_dcb_hw_configure(dev, dcb_cfg);
 
-l_end:
-	return;
 }
 
 s32 rte_pmd_sxe_tc_bw_set(u8 port,
@@ -979,20 +933,19 @@ s32 rte_pmd_sxe_tc_bw_set(u8 port,
 
 	if (nb_tcs != tc_num) {
 		PMD_LOG_ERR(DRV,
-			    "Weight should be set for all %d enabled TCs.",
-			    nb_tcs);
+				"Weight should be set for all %d enabled TCs.",
+				nb_tcs);
 		ret = -EINVAL;
 		goto l_end;
 	}
 
 	sum = 0;
-	for (i = 0; i < nb_tcs; i++) {
+	for (i = 0; i < nb_tcs; i++)
 		sum += bw_weight[i];
-	}
 
 	if (sum != 100) {
 		PMD_LOG_ERR(DRV,
-			    "The summary of the TC weight should be 100.");
+				"The summary of the TC weight should be 100.");
 		ret = -EINVAL;
 		goto l_end;
 	}
diff --git a/drivers/net/sxe/pf/sxe_dcb.h b/drivers/net/sxe/pf/sxe_dcb.h
index accfc930af..2330febb2e 100644
--- a/drivers/net/sxe/pf/sxe_dcb.h
+++ b/drivers/net/sxe/pf/sxe_dcb.h
@@ -1,21 +1,21 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (C), 2022, Linkdata Technology Co., Ltd.
  */
- 
+
 #ifndef __SXE_DCB_H__
 #define __SXE_DCB_H__
 #include <stdbool.h>
 
-#define PBA_STRATEGY_EQUAL       (0)    
-#define PBA_STRATEGY_WEIGHTED    (1)	
-#define MAX_BW_GROUP             8
-#define MAX_USER_PRIORITY        8
-#define SXE_DCB_MAX_TRAFFIC_CLASS        8
+#define PBA_STRATEGY_EQUAL	   (0)
+#define PBA_STRATEGY_WEIGHTED	(1)
+#define MAX_BW_GROUP			 8
+#define MAX_USER_PRIORITY		8
+#define SXE_DCB_MAX_TRAFFIC_CLASS		8
 
 enum sxe_dcb_strict_prio_type {
-	DCB_PRIO_NONE = 0, 
-	DCB_PRIO_GROUP,    
-	DCB_PRIO_LINK      
+	DCB_PRIO_NONE = 0,
+	DCB_PRIO_GROUP,
+	DCB_PRIO_LINK
 };
 enum {
 	DCB_PATH_TX   =  0,
@@ -35,18 +35,18 @@ enum sxe_dcb_pba_config {
 };
 
 struct sxe_dcb_num_tcs {
-	u8 pg_tcs;	
+	u8 pg_tcs;
 	u8 pfc_tcs;
 };
 
 struct sxe_tc_bw_alloc {
-	u8 bwg_id;		  
-	u8 bwg_percent;		  
-	u8 link_percent;	  
-	u8 up_to_tc_bitmap;	  
-	u16 data_credits_refill;  
-	u16 data_credits_max;	  
-	enum sxe_dcb_strict_prio_type prio_type; 
+	u8 bwg_id;
+	u8 bwg_percent;
+	u8 link_percent;
+	u8 up_to_tc_bitmap;
+	u16 data_credits_refill;
+	u16 data_credits_max;
+	enum sxe_dcb_strict_prio_type prio_type;
 };
 
 enum sxe_dcb_pfc_type {
@@ -57,17 +57,17 @@ enum sxe_dcb_pfc_type {
 };
 
 struct sxe_tc_config {
-	struct sxe_tc_bw_alloc channel[DCB_PATH_NUM]; 
-	enum sxe_dcb_pfc_type  pfc_type; 
+	struct sxe_tc_bw_alloc channel[DCB_PATH_NUM];
+	enum sxe_dcb_pfc_type  pfc_type;
 
-	u16 desc_credits_max; 
-	u8 tc; 
+	u16 desc_credits_max;
+	u8 tc;
 };
 
 struct sxe_dcb_config {
 	struct sxe_tc_config tc_config[SXE_DCB_MAX_TRAFFIC_CLASS];
 	struct sxe_dcb_num_tcs num_tcs;
-	u8 bwg_link_percent[DCB_PATH_NUM][MAX_BW_GROUP]; 
+	u8 bwg_link_percent[DCB_PATH_NUM][MAX_BW_GROUP];
 	bool pfc_mode_enable;
 	bool round_robin_enable;
 
@@ -76,7 +76,7 @@ struct sxe_dcb_config {
 };
 
 struct sxe_bw_config {
-	u8 tc_num; 
+	u8 tc_num;
 };
 
 struct sxe_dcb_context {
@@ -86,7 +86,7 @@ struct sxe_dcb_context {
 
 void sxe_dcb_init(struct rte_eth_dev *dev);
 
-s32 sxe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 
+s32 sxe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
 					struct rte_eth_pfc_conf *pfc_conf);
 
 s32 sxe_get_dcb_info(struct rte_eth_dev *dev,
diff --git a/drivers/net/sxe/pf/sxe_ethdev.c b/drivers/net/sxe/pf/sxe_ethdev.c
index 00c6674f75..136469cb72 100644
--- a/drivers/net/sxe/pf/sxe_ethdev.c
+++ b/drivers/net/sxe/pf/sxe_ethdev.c
@@ -53,20 +53,20 @@
 #include "sxe_tm.h"
 #endif
 
-#define SXE_DEFAULT_MTU             1500
-#define SXE_ETH_HLEN                14
-#define SXE_ETH_FCS_LEN             4
-#define SXE_ETH_FRAME_LEN           1514
+#define SXE_DEFAULT_MTU			 1500
+#define SXE_ETH_HLEN				14
+#define SXE_ETH_FCS_LEN			 4
+#define SXE_ETH_FRAME_LEN		   1514
 
-#define SXE_ETH_MAX_LEN  (RTE_ETHER_MTU + SXE_ETH_OVERHEAD) 
+#define SXE_ETH_MAX_LEN  (RTE_ETHER_MTU + SXE_ETH_OVERHEAD)
 
-STATIC const struct rte_eth_desc_lim sxe_rx_desc_lim = {
+static const struct rte_eth_desc_lim sxe_rx_desc_lim = {
 	.nb_max = SXE_MAX_RING_DESC,
 	.nb_min = SXE_MIN_RING_DESC,
 	.nb_align = SXE_RX_DESC_RING_ALIGN,
 };
 
-STATIC const struct rte_eth_desc_lim sxe_tx_desc_lim = {
+static const struct rte_eth_desc_lim sxe_tx_desc_lim = {
 	.nb_max = SXE_MAX_RING_DESC,
 	.nb_min = SXE_MIN_RING_DESC,
 	.nb_align = SXE_TX_DESC_RING_ALIGN,
@@ -74,9 +74,9 @@ STATIC const struct rte_eth_desc_lim sxe_tx_desc_lim = {
 	.nb_mtu_seg_max = SXE_TX_MAX_SEG,
 };
 
-s32 sxe_dev_reset(struct rte_eth_dev *eth_dev);
+static s32 sxe_dev_reset(struct rte_eth_dev *eth_dev);
 
-STATIC s32 sxe_dev_configure(struct rte_eth_dev *dev)
+static s32 sxe_dev_configure(struct rte_eth_dev *dev)
 {
 	s32 ret;
 	struct sxe_adapter *adapter = dev->data->dev_private;
@@ -94,7 +94,7 @@ STATIC s32 sxe_dev_configure(struct rte_eth_dev *dev)
 	ret  = sxe_mq_mode_check(dev);
 	if (ret != 0) {
 		PMD_LOG_ERR(INIT, "sxe mq mode check fails with %d.",
-			    ret);
+				ret);
 		goto l_end;
 	}
 
@@ -114,13 +114,12 @@ STATIC s32 sxe_dev_configure(struct rte_eth_dev *dev)
 static void sxe_txrx_start(struct rte_eth_dev *dev)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
-	struct sxe_hw     *hw = &adapter->hw;
+	struct sxe_hw	 *hw = &adapter->hw;
 
 	sxe_hw_rx_cap_switch_on(hw);
 
 	sxe_hw_mac_txrx_enable(hw);
 
-	return;
 }
 
 static s32 sxe_link_configure(struct rte_eth_dev *dev)
@@ -145,11 +144,11 @@ static s32 sxe_link_configure(struct rte_eth_dev *dev)
 		goto l_end;
 	}
 
-	if (adapter->phy_ctxt.sfp_info.multispeed_fiber) {
+	if (adapter->phy_ctxt.sfp_info.multispeed_fiber)
 		ret = sxe_multispeed_sfp_link_configure(dev, conf_speeds, false);
-	} else {
+	else
 		ret = sxe_sfp_link_configure(dev);
-	}
+
 	if (ret) {
 		PMD_LOG_ERR(INIT, "link config failed, speed=%x",
 						conf_speeds);
@@ -184,7 +183,7 @@ static s32 sxe_loopback_pcs_init(struct sxe_adapter *adapter,
 	sxe_fc_mac_addr_set(adapter);
 
 	LOG_INFO_BDF("mode:%u max_frame:0x%x loopback pcs init done.\n",
-		     mode, max_frame);
+			 mode, max_frame);
 l_end:
 	return ret;
 }
@@ -300,12 +299,12 @@ static s32 sxe_dev_start(struct rte_eth_dev *dev)
 
 	if (dev->data->dev_conf.lpbk_mode == SXE_LPBK_DISABLED) {
 		sxe_link_configure(dev);
-	} else if (dev->data->dev_conf.lpbk_mode == SXE_LPBK_ENABLED){
-	       sxe_loopback_configure(adapter);
+	} else if (dev->data->dev_conf.lpbk_mode == SXE_LPBK_ENABLED) {
+		sxe_loopback_configure(adapter);
 	} else {
 		ret = -ENOTSUP;
 		PMD_LOG_ERR(INIT, "unsupport loopback mode:%u.",
-			    dev->data->dev_conf.lpbk_mode);
+				dev->data->dev_conf.lpbk_mode);
 		goto l_end;
 	}
 
@@ -320,9 +319,8 @@ static s32 sxe_dev_start(struct rte_eth_dev *dev)
 	sxe_dcb_configure(dev);
 
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
-	if (macsec_ctxt->offload_en) {
+	if (macsec_ctxt->offload_en)
 		sxe_macsec_enable(dev, macsec_ctxt);
-	}
 #endif
 
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
@@ -389,15 +387,14 @@ static s32 sxe_dev_stop(struct rte_eth_dev *dev)
 	adapter->is_stopped = true;
 
 	num = rte_eal_alarm_cancel(sxe_event_irq_delayed_handler, dev);
-	if (num > 0) {
+	if (num > 0)
 		sxe_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
-	}
 
 	LOG_DEBUG_BDF("dev stop success.");
 
 l_end:
 #ifdef DPDK_19_11_6
-	return;
+	LOG_DEBUG_BDF("at end of dev stop.");
 #else
 	return ret;
 #endif
@@ -417,7 +414,7 @@ static s32 sxe_dev_close(struct rte_eth_dev *dev)
 
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
 		PMD_LOG_INFO(INIT, "not primary, do nothing");
-		 goto l_end;
+		goto l_end;
 	}
 
 	sxe_hw_hdc_drv_status_set(hw, (u32)false);
@@ -436,9 +433,8 @@ static s32 sxe_dev_close(struct rte_eth_dev *dev)
 	sxe_dev_stop(dev);
 #else
 	ret = sxe_dev_stop(dev);
-	if (ret) {
+	if (ret)
 		PMD_LOG_ERR(INIT, "dev stop fail.(err:%d)", ret);
-	}
 #endif
 
 	sxe_queues_free(dev);
@@ -461,7 +457,7 @@ static s32 sxe_dev_close(struct rte_eth_dev *dev)
 
 l_end:
 #ifdef DPDK_19_11_6
-	return;
+	LOG_DEBUG_BDF("at end of dev close.");
 #else
 	return ret;
 #endif
@@ -476,13 +472,12 @@ static s32 sxe_dev_infos_get(struct rte_eth_dev *dev,
 	dev_info->max_rx_queues = SXE_HW_TXRX_RING_NUM_MAX;
 	dev_info->max_tx_queues = SXE_HW_TXRX_RING_NUM_MAX;
 	if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
-		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE) {
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE)
 			dev_info->max_tx_queues = SXE_HW_TX_NONE_MODE_Q_NUM;
-		}
 	}
 
 	dev_info->min_rx_bufsize = 1024;
-	dev_info->max_rx_pktlen = 15872; 
+	dev_info->max_rx_pktlen = 15872;
 	dev_info->max_mac_addrs = SXE_UC_ENTRY_NUM_MAX;
 	dev_info->max_hash_mac_addrs = SXE_HASH_UC_NUM_MAX;
 	dev_info->max_vfs = pci_dev->max_vfs;
@@ -493,7 +488,7 @@ static s32 sxe_dev_infos_get(struct rte_eth_dev *dev,
 
 	dev_info->rx_queue_offload_capa = sxe_rx_queue_offload_capa_get(dev);
 	dev_info->rx_offload_capa = (sxe_rx_port_offload_capa_get(dev) |
-				     dev_info->rx_queue_offload_capa);
+					 dev_info->rx_queue_offload_capa);
 	dev_info->tx_queue_offload_capa = sxe_tx_queue_offload_capa_get(dev);
 	dev_info->tx_offload_capa = sxe_tx_port_offload_capa_get(dev);
 
@@ -548,7 +543,7 @@ static s32 sxe_mtu_set(struct rte_eth_dev *dev, u16 mtu)
 
 	ret = sxe_dev_infos_get(dev, &dev_info);
 	if (ret != 0) {
-		PMD_LOG_ERR(INIT, "get dev info fails with ret=%d",ret);
+		PMD_LOG_ERR(INIT, "get dev info fails with ret=%d", ret);
 		goto l_end;
 	}
 
@@ -586,7 +581,7 @@ static s32 sxe_mtu_set(struct rte_eth_dev *dev, u16 mtu)
 }
 
 static int sxe_get_regs(struct rte_eth_dev *dev,
-	      struct rte_dev_reg_info *regs)
+		  struct rte_dev_reg_info *regs)
 {
 	s32 ret = 0;
 	u32 *data = regs->data;
@@ -625,7 +620,7 @@ static s32 sxe_led_reset(struct rte_eth_dev *dev)
 	struct sxe_adapter *adapter = (struct sxe_adapter *)(dev->data->dev_private);
 	struct sxe_hw *hw = &adapter->hw;
 
-	ctrl.mode = SXE_IDENTIFY_LED_RESET; 
+	ctrl.mode = SXE_IDENTIFY_LED_RESET;
 	ctrl.duration = 0;
 
 	ret = sxe_driver_cmd_trans(hw, SXE_CMD_LED_CTRL,
@@ -635,7 +630,7 @@ static s32 sxe_led_reset(struct rte_eth_dev *dev)
 		LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:led reset", ret);
 		ret = -EIO;
 	} else {
-		LOG_DEBUG_BDF("led reset sucess");
+		LOG_DEBUG_BDF("led reset success");
 	}
 
 	return ret;
@@ -648,8 +643,7 @@ static s32 sxe_led_ctrl(struct sxe_adapter *adapter, bool is_on)
 	struct sxe_led_ctrl ctrl;
 	struct sxe_hw *hw = &adapter->hw;
 
-	ctrl.mode = (true == is_on) ? SXE_IDENTIFY_LED_ON : \
-					SXE_IDENTIFY_LED_OFF;
+	ctrl.mode = (true == is_on) ? SXE_IDENTIFY_LED_ON : SXE_IDENTIFY_LED_OFF;
 	ctrl.duration = 0;
 
 	ret = sxe_driver_cmd_trans(hw, SXE_CMD_LED_CTRL,
@@ -708,11 +702,10 @@ static int sxe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
 		goto l_end;
 	}
 
-	ret += 1; 
+	ret += 1;
 
-	if (fw_size >= (size_t)ret) {
+	if (fw_size >= (size_t)ret)
 		ret = 0;
-	}
 
 l_end:
 	return ret;
@@ -787,9 +780,9 @@ static const struct eth_dev_ops sxe_eth_dev_ops = {
 	.timesync_read_time	= sxe_timesync_read_time,
 	.timesync_write_time	= sxe_timesync_write_time,
 
-	.vlan_filter_set      = sxe_vlan_filter_set,
-	.vlan_tpid_set        = sxe_vlan_tpid_set,
-	.vlan_offload_set     = sxe_vlan_offload_set,
+	.vlan_filter_set	  = sxe_vlan_filter_set,
+	.vlan_tpid_set		= sxe_vlan_tpid_set,
+	.vlan_offload_set	 = sxe_vlan_offload_set,
 	.vlan_strip_queue_set = sxe_vlan_strip_queue_set,
 
 	.get_reg		= sxe_get_regs,
@@ -809,8 +802,8 @@ static const struct eth_dev_ops sxe_eth_dev_ops = {
 
 #ifdef ETH_DEV_MIRROR_RULE
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
-	.mirror_rule_set        = sxe_mirror_rule_set,
-	.mirror_rule_reset      = sxe_mirror_rule_reset,
+	.mirror_rule_set		= sxe_mirror_rule_set,
+	.mirror_rule_reset	  = sxe_mirror_rule_reset,
 #endif
 #endif
 
@@ -885,20 +878,20 @@ static s32 sxe_hw_base_init(struct rte_eth_dev *eth_dev)
 #endif
 
 l_out:
-	if (ret) {
+	if (ret)
 		sxe_hw_hdc_drv_status_set(hw, (u32)false);
-	}
+
 	return ret;
 }
 
-void sxe_secondary_proc_init(struct rte_eth_dev *eth_dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
+void sxe_secondary_proc_init(struct rte_eth_dev *eth_dev,
+	bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
 {
 	__sxe_secondary_proc_init(eth_dev, rx_batch_alloc_allowed, rx_vec_allowed);
 
-	return;
 }
 
-STATIC void sxe_ethdev_mac_mem_free(struct rte_eth_dev *eth_dev)
+static void sxe_ethdev_mac_mem_free(struct rte_eth_dev *eth_dev)
 {
 	struct sxe_adapter *adapter = eth_dev->data->dev_private;
 
@@ -917,14 +910,13 @@ STATIC void sxe_ethdev_mac_mem_free(struct rte_eth_dev *eth_dev)
 		adapter->mac_filter_ctxt.uc_addr_table = NULL;
 	}
 
-	return;
 }
 
-#ifdef  DPDK_19_11_6
+#ifdef DPDK_19_11_6
 static void sxe_pf_init(struct sxe_adapter *adapter)
 {
 	memset(&adapter->vlan_ctxt, 0, sizeof(adapter->vlan_ctxt));
-	memset(&adapter->mac_filter_ctxt.uta_hash_table, 0, \
+	memset(&adapter->mac_filter_ctxt.uta_hash_table, 0,
 		sizeof(adapter->mac_filter_ctxt.uta_hash_table));
 	memset(&adapter->dcb_ctxt.config, 0, sizeof(adapter->dcb_ctxt.config));
 
@@ -932,7 +924,6 @@ static void sxe_pf_init(struct sxe_adapter *adapter)
 	memset(&adapter->filter_ctxt, 0, sizeof(adapter->filter_ctxt));
 #endif
 
-	return;
 }
 #endif
 
@@ -948,7 +939,7 @@ s32 sxe_ethdev_init(struct rte_eth_dev *eth_dev, void *param __rte_unused)
 	eth_dev->dev_ops = &sxe_eth_dev_ops;
 
 #ifndef ETH_DEV_OPS_HAS_DESC_RELATE
-	eth_dev->rx_queue_count       = sxe_rx_queue_count;
+	eth_dev->rx_queue_count	   = sxe_rx_queue_count;
 	eth_dev->rx_descriptor_status = sxe_rx_descriptor_status;
 	eth_dev->tx_descriptor_status = sxe_tx_descriptor_status;
 #ifdef ETH_DEV_RX_DESC_DONE
@@ -962,10 +953,12 @@ s32 sxe_ethdev_init(struct rte_eth_dev *eth_dev, void *param __rte_unused)
 
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
-		sxe_secondary_proc_init(eth_dev, adapter->rx_batch_alloc_allowed, &adapter->rx_vec_allowed);
+		sxe_secondary_proc_init(eth_dev, adapter->rx_batch_alloc_allowed,
+				&adapter->rx_vec_allowed);
 #else
 		bool rx_vec_allowed = 0;
-		sxe_secondary_proc_init(eth_dev, adapter->rx_batch_alloc_allowed, &rx_vec_allowed);
+		sxe_secondary_proc_init(eth_dev, adapter->rx_batch_alloc_allowed,
+				&rx_vec_allowed);
 #endif
 		goto l_out;
 	}
@@ -973,7 +966,7 @@ s32 sxe_ethdev_init(struct rte_eth_dev *eth_dev, void *param __rte_unused)
 	rte_atomic32_clear(&adapter->link_thread_running);
 	rte_eth_copy_pci_info(eth_dev, pci_dev);
 
-#ifdef  DPDK_19_11_6
+#ifdef DPDK_19_11_6
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
 	sxe_pf_init(adapter);
 #endif
@@ -1045,13 +1038,13 @@ s32 sxe_ethdev_uninit(struct rte_eth_dev *eth_dev)
 	return 0;
 }
 
-s32 sxe_dev_reset(struct rte_eth_dev *eth_dev)
+static s32 sxe_dev_reset(struct rte_eth_dev *eth_dev)
 {
 	s32 ret;
 
 	if (eth_dev->data->sriov.active) {
 		ret = -ENOTSUP;
-		PMD_LOG_ERR(INIT, "sriov actived, not support reset pf port[%u]",
+		PMD_LOG_ERR(INIT, "sriov activated, not support reset pf port[%u]",
 			eth_dev->data->port_id);
 		goto l_end;
 	}
@@ -1092,7 +1085,7 @@ s32 rte_pmd_sxe_tx_loopback_set(u16 port, u8 on)
 	if (on > 1) {
 		ret = -EINVAL;
 		PMD_LOG_ERR(DRV, "port:%u invalid user configure value:%u.",
-		            port, on);
+					port, on);
 		goto l_out;
 	}
 
diff --git a/drivers/net/sxe/pf/sxe_ethdev.h b/drivers/net/sxe/pf/sxe_ethdev.h
index f1165e0413..66034343ea 100644
--- a/drivers/net/sxe/pf/sxe_ethdev.h
+++ b/drivers/net/sxe/pf/sxe_ethdev.h
@@ -7,11 +7,11 @@
 
 #include "sxe.h"
 
-#define SXE_MMW_SIZE_DEFAULT        0x4
-#define SXE_MMW_SIZE_JUMBO_FRAME    0x14
-#define SXE_MAX_JUMBO_FRAME_SIZE    0x2600 
+#define SXE_MMW_SIZE_DEFAULT		0x4
+#define SXE_MMW_SIZE_JUMBO_FRAME	0x14
+#define SXE_MAX_JUMBO_FRAME_SIZE	0x2600
 
-#define SXE_ETH_MAX_LEN  (RTE_ETHER_MTU + SXE_ETH_OVERHEAD) 
+#define SXE_ETH_MAX_LEN  (RTE_ETHER_MTU + SXE_ETH_OVERHEAD)
 
 #define SXE_HKEY_MAX_INDEX 10
 #define SXE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
@@ -22,6 +22,7 @@ s32 sxe_ethdev_init(struct rte_eth_dev *eth_dev, void *param __rte_unused);
 
 s32 sxe_ethdev_uninit(struct rte_eth_dev *eth_dev);
 
-void sxe_secondary_proc_init(struct rte_eth_dev *eth_dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
+void sxe_secondary_proc_init(struct rte_eth_dev *eth_dev,
+	bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
 
 #endif
diff --git a/drivers/net/sxe/pf/sxe_filter.c b/drivers/net/sxe/pf/sxe_filter.c
index e323af94f8..1d1d78b516 100644
--- a/drivers/net/sxe/pf/sxe_filter.c
+++ b/drivers/net/sxe/pf/sxe_filter.c
@@ -25,7 +25,7 @@
 #include "sxe_cli.h"
 #include "sxe_compat_version.h"
 
-#define PF_POOL_INDEX(p)        (p)
+#define PF_POOL_INDEX(p)		(p)
 
 #define SXE_STRIP_BITMAP_SET(h, q) \
 	do { \
@@ -76,16 +76,14 @@ static void sxe_default_mac_addr_get(struct sxe_adapter *adapter)
 	ret = sxe_get_mac_addr_from_fw(adapter, mac_addr.addr_bytes);
 	if (ret || !rte_is_valid_assigned_ether_addr(&mac_addr)) {
 		LOG_DEBUG("invalid default mac addr:"MAC_FMT" result:%d\n",
-			      MAC_ADDR(mac_addr.addr_bytes), ret);
-		goto l_out;
+				  MAC_ADDR(mac_addr.addr_bytes), ret);
+		return;
 	}
 
 	LOG_DEBUG("default mac addr = "MAC_FMT"\n", MAC_ADDR(mac_addr.addr_bytes));
 	rte_ether_addr_copy(&mac_addr, &adapter->mac_filter_ctxt.def_mac_addr);
 	rte_ether_addr_copy(&mac_addr, &adapter->mac_filter_ctxt.fc_mac_addr);
 
-l_out:
-	return;
 }
 
 static u8 sxe_sw_uc_entry_add(struct sxe_adapter *adapter, u8 index,
@@ -114,9 +112,8 @@ static u8 sxe_sw_uc_entry_del(struct sxe_adapter *adapter, u8 index)
 	struct sxe_uc_addr_table *uc_table = adapter->mac_filter_ctxt.uc_addr_table;
 
 	for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
-		if (!uc_table[i].used || (uc_table[i].type != SXE_PF)) {
+		if (!uc_table[i].used || (uc_table[i].type != SXE_PF))
 			continue;
-		}
 
 		if (uc_table[i].original_index == index) {
 			uc_table[i].used = false;
@@ -154,16 +151,14 @@ s32 sxe_sw_uc_entry_vf_del(struct sxe_adapter *adapter, u8 vf_idx,
 	struct sxe_uc_addr_table *uc_table = adapter->mac_filter_ctxt.uc_addr_table;
 
 	for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
-		if (!uc_table[i].used || (uc_table[i].type == SXE_PF)) {
+		if (!uc_table[i].used || (uc_table[i].type == SXE_PF))
 			continue;
-		}
 
 		if (uc_table[i].vf_idx == vf_idx) {
 			uc_table[i].used = false;
 			sxe_hw_uc_addr_del(&adapter->hw, i);
-			if (!macvlan) {
+			if (!macvlan)
 				break;
-			}
 		}
 	}
 
@@ -244,11 +239,11 @@ s32 sxe_promiscuous_enable(struct rte_eth_dev *dev)
 	u32 flt_ctrl;
 
 	flt_ctrl = sxe_hw_rx_mode_get(hw);
-	PMD_LOG_DEBUG(DRV,"read flt_ctrl=0x%x\n", flt_ctrl);
+	PMD_LOG_DEBUG(DRV, "read flt_ctrl=0x%x\n", flt_ctrl);
 
 	flt_ctrl |= (SXE_FCTRL_UPE | SXE_FCTRL_MPE);
 
-	PMD_LOG_DEBUG(DRV,"write flt_ctrl=0x%x\n", flt_ctrl);
+	PMD_LOG_DEBUG(DRV, "write flt_ctrl=0x%x\n", flt_ctrl);
 	sxe_hw_rx_mode_set(hw, flt_ctrl);
 
 	return 0;
@@ -261,16 +256,15 @@ s32 sxe_promiscuous_disable(struct rte_eth_dev *dev)
 	u32 flt_ctrl;
 
 	flt_ctrl = sxe_hw_rx_mode_get(hw);
-	PMD_LOG_DEBUG(DRV,"read flt_ctrl=0x%x\n", flt_ctrl);
+	PMD_LOG_DEBUG(DRV, "read flt_ctrl=0x%x\n", flt_ctrl);
 
 	flt_ctrl &= (~SXE_FCTRL_UPE);
-	if (dev->data->all_multicast == 1) {
+	if (dev->data->all_multicast == 1)
 		flt_ctrl |= SXE_FCTRL_MPE;
-	} else {
+	else
 		flt_ctrl &= (~SXE_FCTRL_MPE);
-	}
 
-	PMD_LOG_DEBUG(DRV,"write flt_ctrl=0x%x\n", flt_ctrl);
+	PMD_LOG_DEBUG(DRV, "write flt_ctrl=0x%x\n", flt_ctrl);
 	sxe_hw_rx_mode_set(hw, flt_ctrl);
 
 	return 0;
@@ -283,11 +277,11 @@ s32 sxe_allmulticast_enable(struct rte_eth_dev *dev)
 	u32 flt_ctrl;
 
 	flt_ctrl = sxe_hw_rx_mode_get(hw);
-	PMD_LOG_DEBUG(DRV,"read flt_ctrl=0x%x\n", flt_ctrl);
+	PMD_LOG_DEBUG(DRV, "read flt_ctrl=0x%x\n", flt_ctrl);
 
 	flt_ctrl |= SXE_FCTRL_MPE;
 
-	PMD_LOG_DEBUG(DRV,"write flt_ctrl=0x%x\n", flt_ctrl);
+	PMD_LOG_DEBUG(DRV, "write flt_ctrl=0x%x\n", flt_ctrl);
 	sxe_hw_rx_mode_set(hw, flt_ctrl);
 
 	return 0;
@@ -300,16 +294,16 @@ s32 sxe_allmulticast_disable(struct rte_eth_dev *dev)
 	u32 flt_ctrl;
 
 	if (dev->data->promiscuous == 1) {
-		PMD_LOG_DEBUG(DRV,"promiscuous is enable, allmulticast must be enabled.\n");
+		PMD_LOG_DEBUG(DRV, "promiscuous is enable, allmulticast must be enabled.\n");
 		goto l_out;
 	}
 
 	flt_ctrl = sxe_hw_rx_mode_get(hw);
-	PMD_LOG_DEBUG(DRV,"read flt_ctrl=0x%x\n", flt_ctrl);
+	PMD_LOG_DEBUG(DRV, "read flt_ctrl=0x%x\n", flt_ctrl);
 
 	flt_ctrl &= (~SXE_FCTRL_MPE);
 
-	PMD_LOG_DEBUG(DRV,"write flt_ctrl=0x%x\n", flt_ctrl);
+	PMD_LOG_DEBUG(DRV, "write flt_ctrl=0x%x\n", flt_ctrl);
 	sxe_hw_rx_mode_set(hw, flt_ctrl);
 
 l_out:
@@ -317,8 +311,8 @@ s32 sxe_allmulticast_disable(struct rte_eth_dev *dev)
 }
 
 s32 sxe_mac_addr_add(struct rte_eth_dev *dev,
-			     struct rte_ether_addr *mac_addr,
-			     u32 index, u32 pool)
+				 struct rte_ether_addr *mac_addr,
+				 u32 index, u32 pool)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
 	struct sxe_hw *hw = &adapter->hw;
@@ -359,15 +353,13 @@ void sxe_mac_addr_remove(struct rte_eth_dev *dev, u32 index)
 	if (ret) {
 		PMD_LOG_ERR(DRV, "rar_idx:%u remove fail.(err:%d)",
 				rar_idx, ret);
-		goto l_out;
+		return;
 	}
 
 	PMD_LOG_INFO(DRV, "rar_idx:%u mac_addr:"MAC_FMT" remove done",
 			rar_idx,
 			MAC_ADDR(&dev->data->mac_addrs[rar_idx]));
 
-l_out:
-	return;
 }
 
 void sxe_fc_mac_addr_set(struct sxe_adapter *adapter)
@@ -377,11 +369,10 @@ void sxe_fc_mac_addr_set(struct sxe_adapter *adapter)
 	sxe_hw_fc_mac_addr_set(hw,
 			adapter->mac_filter_ctxt.fc_mac_addr.addr_bytes);
 
-	return;
 }
 
 s32 sxe_mac_addr_set(struct rte_eth_dev *dev,
-			     struct rte_ether_addr *mac_addr)
+				 struct rte_ether_addr *mac_addr)
 {
 	u8 pool_idx;
 	struct sxe_adapter *adapter = dev->data->dev_private;
@@ -425,7 +416,6 @@ static void sxe_hash_mac_addr_parse(u8 *mac_addr, u16 *reg_idx,
 	PMD_LOG_DEBUG(DRV, "mac_addr:"MAC_FMT" hash reg_idx:%u bit_idx:%u",
 			 MAC_ADDR(mac_addr), *reg_idx, *bit_idx);
 
-	return;
 }
 
 s32 sxe_uc_hash_table_set(struct rte_eth_dev *dev,
@@ -442,16 +432,15 @@ s32 sxe_uc_hash_table_set(struct rte_eth_dev *dev,
 	sxe_hash_mac_addr_parse(mac_addr->addr_bytes, &reg_idx, &bit_idx);
 
 	value = (mac_filter->uta_hash_table[reg_idx] >> bit_idx) & 0x1;
-	if (value == on) {
+	if (value == on)
 		goto l_out;
-	}
 
 	value = sxe_hw_uta_hash_table_get(hw, reg_idx);
 	if (on) {
 		mac_filter->uta_used_count++;
 		value |= (0x1 << bit_idx);
 		mac_filter->uta_hash_table[reg_idx] |= (0x1 << bit_idx);
-	} else { 
+	} else {
 		mac_filter->uta_used_count--;
 		value &= ~(0x1 << bit_idx);
 		mac_filter->uta_hash_table[reg_idx] &= ~(0x1 << bit_idx);
@@ -509,13 +498,11 @@ s32 sxe_set_mc_addr_list(struct rte_eth_dev *dev,
 		mac_filter->mta_hash_table[reg_idx] |= (0x1 << bit_idx);
 	}
 
-	for (i = 0; i < SXE_MTA_ENTRY_NUM_MAX; i++) {
+	for (i = 0; i < SXE_MTA_ENTRY_NUM_MAX; i++)
 		sxe_hw_mta_hash_table_set(hw, i, mac_filter->mta_hash_table[i]);
-	}
 
-	if (nb_mc_addr) {
+	if (nb_mc_addr)
 		sxe_hw_mc_filter_enable(hw);
-	}
 
 	PMD_LOG_INFO(DRV, "mc addr list cnt:%u set to mta done.", nb_mc_addr);
 
@@ -535,11 +522,10 @@ s32 sxe_vlan_filter_set(struct rte_eth_dev *eth_dev, u16 vlan_id, s32 on)
 	bit_idx = (vlan_id & SXE_VLAN_ID_BIT_MASK);
 
 	value = sxe_hw_vlan_filter_array_read(hw, reg_idx);
-	if (on) {
+	if (on)
 		value |= (1 << bit_idx);
-	} else {
+	else
 		value &= ~(1 << bit_idx);
-	}
 
 	sxe_hw_vlan_filter_array_write(hw, reg_idx, value);
 
@@ -563,11 +549,10 @@ static void sxe_vlan_tpid_write(struct sxe_hw *hw, u16 tpid)
 		(tpid << SXE_DMATXCTL_VT_SHIFT);
 	sxe_hw_txctl_vlan_type_set(hw, value);
 
-	return;
 }
 
 s32 sxe_vlan_tpid_set(struct rte_eth_dev *eth_dev,
-		    enum rte_vlan_type vlan_type, u16 tpid)
+			enum rte_vlan_type vlan_type, u16 tpid)
 {
 	struct sxe_adapter *adapter = eth_dev->data->dev_private;
 	struct sxe_hw *hw = &adapter->hw;
@@ -585,7 +570,7 @@ s32 sxe_vlan_tpid_set(struct rte_eth_dev *eth_dev,
 		} else {
 			ret = -ENOTSUP;
 			PMD_LOG_ERR(DRV, "unsupport inner vlan without "
-				     "global double vlan.");
+					 "global double vlan.");
 		}
 		break;
 	case RTE_ETH_VLAN_TYPE_OUTER:
@@ -615,19 +600,18 @@ static void sxe_vlan_strip_bitmap_set(struct rte_eth_dev *dev, u16 queue_idx, bo
 	sxe_rx_queue_s *rxq;
 
 	if ((queue_idx >= SXE_HW_TXRX_RING_NUM_MAX) ||
-	    (queue_idx >= dev->data->nb_rx_queues)) {
+		(queue_idx >= dev->data->nb_rx_queues)) {
 		PMD_LOG_ERR(DRV, "invalid queue idx:%u exceed max"
 			   " queue number:%u or nb_rx_queues:%u.",
 			   queue_idx, SXE_HW_TXRX_RING_NUM_MAX,
 			   dev->data->nb_rx_queues);
-		goto l_out;
+		return;
 	}
 
-	if (on) {
+	if (on)
 		SXE_STRIP_BITMAP_SET(vlan_ctxt, queue_idx);
-	} else {
+	else
 		SXE_STRIP_BITMAP_CLEAR(vlan_ctxt, queue_idx);
-	}
 
 	rxq = dev->data->rx_queues[queue_idx];
 
@@ -640,10 +624,8 @@ static void sxe_vlan_strip_bitmap_set(struct rte_eth_dev *dev, u16 queue_idx, bo
 	}
 
 	PMD_LOG_INFO(DRV, "queue idx:%u vlan strip on:%d set bitmap and offload done.",
-		     queue_idx, on);
+			 queue_idx, on);
 
-l_out:
-	return;
 }
 
 void sxe_vlan_strip_switch_set(struct rte_eth_dev *dev)
@@ -659,17 +641,15 @@ void sxe_vlan_strip_switch_set(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
 
-		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			on = true;
-		} else {
+		else
 			on = false;
-		}
 		sxe_hw_vlan_tag_strip_switch(hw, i, on);
 
 		sxe_vlan_strip_bitmap_set(dev, i, on);
 	}
 
-	return;
 }
 
 static void sxe_vlan_filter_disable(struct rte_eth_dev *dev)
@@ -681,7 +661,6 @@ static void sxe_vlan_filter_disable(struct rte_eth_dev *dev)
 
 	sxe_hw_vlan_filter_switch(hw, 0);
 
-	return;
 }
 
 static void sxe_vlan_filter_enable(struct rte_eth_dev *dev)
@@ -699,11 +678,9 @@ static void sxe_vlan_filter_enable(struct rte_eth_dev *dev)
 	vlan_ctl |= SXE_VLNCTRL_VFE;
 	sxe_hw_vlan_type_set(hw, vlan_ctl);
 
-	for (i = 0; i < SXE_VFT_TBL_SIZE; i++) {
+	for (i = 0; i < SXE_VFT_TBL_SIZE; i++)
 		sxe_hw_vlan_filter_array_write(hw, i, vlan_ctxt->vlan_hash_table[i]);
-	}
 
-	return;
 }
 
 static void sxe_vlan_extend_disable(struct rte_eth_dev *dev)
@@ -722,7 +699,6 @@ static void sxe_vlan_extend_disable(struct rte_eth_dev *dev)
 	ctrl &= ~SXE_EXTENDED_VLAN;
 	sxe_hw_ext_vlan_set(hw, ctrl);
 
-	return;
 }
 
 static void sxe_vlan_extend_enable(struct rte_eth_dev *dev)
@@ -741,35 +717,31 @@ static void sxe_vlan_extend_enable(struct rte_eth_dev *dev)
 	ctrl |= SXE_EXTENDED_VLAN;
 	sxe_hw_ext_vlan_set(hw, ctrl);
 
-	return;
 }
 
 static s32 sxe_vlan_offload_configure(struct rte_eth_dev *dev, s32 mask)
 {
 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 
-	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
 		sxe_vlan_strip_switch_set(dev);
-	}
 
 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
-		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
 			sxe_vlan_filter_enable(dev);
-		} else {
+		else
 			sxe_vlan_filter_disable(dev);
-		}
 	}
 
 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
-		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
+		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
 			sxe_vlan_extend_enable(dev);
-		} else {
+		else
 			sxe_vlan_extend_disable(dev);
-		}
 	}
 
 	PMD_LOG_INFO(DRV, "mask:0x%x rx mode offload:0x%"SXE_PRIX64
-		     " vlan offload set done", mask, rxmode->offloads);
+			 " vlan offload set done", mask, rxmode->offloads);
 
 	return 0;
 }
@@ -780,7 +752,7 @@ s32 sxe_vlan_offload_set(struct rte_eth_dev *dev, s32 vlan_mask)
 	s32 ret = 0;
 
 	if (vlan_mask & RTE_ETH_VLAN_STRIP_MASK) {
-		PMD_LOG_WARN(DRV, "vlan strip has been on, not support to set.");
+		PMD_LOG_WARN(DRV, "please set vlan strip before device start, not at this stage.");
 		ret = -1;
 		goto l_out;
 	}
@@ -788,7 +760,7 @@ s32 sxe_vlan_offload_set(struct rte_eth_dev *dev, s32 vlan_mask)
 
 	sxe_vlan_offload_configure(dev, mask);
 
-	PMD_LOG_INFO(DRV, "vlan offload mask:0x%d set done.", vlan_mask);
+	PMD_LOG_INFO(DRV, "vlan offload mask:0x%x set done.", vlan_mask);
 
 l_out:
 	return ret;
@@ -797,10 +769,10 @@ s32 sxe_vlan_offload_set(struct rte_eth_dev *dev, s32 vlan_mask)
 void sxe_vlan_strip_queue_set(struct rte_eth_dev *dev, u16 queue, s32 on)
 {
 	UNUSED(dev);
+	UNUSED(queue);
 	UNUSED(on);
-	PMD_LOG_WARN(DRV, "queue:%u vlan strip has been on, not support to set.", queue);
+	PMD_LOG_WARN(DRV, "please set vlan strip before device start, not at this stage.");
 
-	return;
 }
 
 void sxe_vlan_filter_configure(struct rte_eth_dev *dev)
@@ -811,7 +783,7 @@ void sxe_vlan_filter_configure(struct rte_eth_dev *dev)
 	u32 vlan_ctl;
 
 	vlan_mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
-		    RTE_ETH_VLAN_EXTEND_MASK;
+			RTE_ETH_VLAN_EXTEND_MASK;
 	sxe_vlan_offload_configure(dev, vlan_mask);
 
 	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
@@ -821,6 +793,5 @@ void sxe_vlan_filter_configure(struct rte_eth_dev *dev)
 		LOG_DEBUG_BDF("vmdq mode enable vlan filter done.");
 	}
 
-	return;
 }
 
diff --git a/drivers/net/sxe/pf/sxe_filter.h b/drivers/net/sxe/pf/sxe_filter.h
index a541dce586..f7d147e492 100644
--- a/drivers/net/sxe/pf/sxe_filter.h
+++ b/drivers/net/sxe/pf/sxe_filter.h
@@ -19,14 +19,14 @@
 struct sxe_adapter;
 
 #define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
-#define MAC_ADDR(x) ((u8*)(x))[0],((u8*)(x))[1], \
-			   ((u8*)(x))[2],((u8*)(x))[3], \
-			   ((u8*)(x))[4],((u8*)(x))[5]
+#define MAC_ADDR(x) ((u8 *)(x))[0], ((u8 *)(x))[1], \
+			   ((u8 *)(x))[2], ((u8 *)(x))[3], \
+			   ((u8 *)(x))[4], ((u8 *)(x))[5]
 
 #define BYTE_BIT_NUM   8
 
-#define SXE_VLAN_STRIP_BITMAP_SIZE    \
-        RTE_ALIGN((SXE_HW_TXRX_RING_NUM_MAX / (sizeof(u32) * BYTE_BIT_NUM)), \
+#define SXE_VLAN_STRIP_BITMAP_SIZE	\
+		RTE_ALIGN((SXE_HW_TXRX_RING_NUM_MAX / (sizeof(u32) * BYTE_BIT_NUM)), \
 		sizeof(u32))
 
 struct sxe_vlan_context {
@@ -42,24 +42,24 @@ enum sxe_uc_addr_src_type {
 };
 
 struct sxe_uc_addr_table {
-	u8 rar_idx;         
-	u8 vf_idx;          
-	u8 type;            
-	u8 original_index;  
-	bool used;          
-	u8 addr[SXE_MAC_ADDR_LEN];  
+	u8 rar_idx;
+	u8 vf_idx;
+	u8 type;
+	u8 original_index;
+	bool used;
+	u8 addr[SXE_MAC_ADDR_LEN];
 };
 
 struct sxe_mac_filter_context {
-	struct rte_ether_addr def_mac_addr; 
-	struct rte_ether_addr cur_mac_addr; 
+	struct rte_ether_addr def_mac_addr;
+	struct rte_ether_addr cur_mac_addr;
 
 	struct rte_ether_addr fc_mac_addr;
 
-	u32 uta_used_count;            
-	u32 uta_hash_table[SXE_UTA_ENTRY_NUM_MAX]; 
+	u32 uta_used_count;
+	u32 uta_hash_table[SXE_UTA_ENTRY_NUM_MAX];
 
-	u32 mta_hash_table[SXE_MTA_ENTRY_NUM_MAX]; 
+	u32 mta_hash_table[SXE_MTA_ENTRY_NUM_MAX];
 	struct sxe_uc_addr_table *uc_addr_table;
 };
 
@@ -74,13 +74,13 @@ s32 sxe_allmulticast_enable(struct rte_eth_dev *dev);
 s32 sxe_allmulticast_disable(struct rte_eth_dev *dev);
 
 s32 sxe_mac_addr_add(struct rte_eth_dev *dev,
-			     struct rte_ether_addr *mac_addr,
-			     u32 rar_idx, u32 pool);
+				 struct rte_ether_addr *mac_addr,
+				 u32 rar_idx, u32 pool);
 
 void sxe_mac_addr_remove(struct rte_eth_dev *dev, u32 rar_idx);
 
 s32 sxe_mac_addr_set(struct rte_eth_dev *dev,
-			     struct rte_ether_addr *mac_addr);
+				 struct rte_ether_addr *mac_addr);
 
 s32 sxe_uc_hash_table_set(struct rte_eth_dev *dev,
 			struct rte_ether_addr *mac_addr, u8 on);
@@ -94,7 +94,7 @@ s32 sxe_set_mc_addr_list(struct rte_eth_dev *dev,
 s32 sxe_vlan_filter_set(struct rte_eth_dev *eth_dev, u16 vlan_id, s32 on);
 
 s32 sxe_vlan_tpid_set(struct rte_eth_dev *eth_dev,
-		    enum rte_vlan_type vlan_type, u16 tpid);
+			enum rte_vlan_type vlan_type, u16 tpid);
 
 s32 sxe_vlan_offload_set(struct rte_eth_dev *dev, s32 vlan_mask);
 
diff --git a/drivers/net/sxe/pf/sxe_filter_ctrl.c b/drivers/net/sxe/pf/sxe_filter_ctrl.c
new file mode 100644
index 0000000000..3bf2453f70
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_filter_ctrl.c
@@ -0,0 +1,2951 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+#include <rte_malloc.h>
+#include <rte_flow_driver.h>
+#include <rte_hash.h>
+#include <rte_hash_crc.h>
+
+#include "sxe.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe_fnav.h"
+#include "sxe_filter_ctrl.h"
+#include "sxe_offload.h"
+
+#define SXE_MIN_FIVETUPLE_PRIORITY 1
+#define SXE_MAX_FIVETUPLE_PRIORITY 7
+
+struct sxe_ntuple_filter_ele {
+	TAILQ_ENTRY(sxe_ntuple_filter_ele) entries;
+	struct rte_eth_ntuple_filter filter_info;
+};
+
+struct sxe_ethertype_filter_ele {
+	TAILQ_ENTRY(sxe_ethertype_filter_ele) entries;
+	struct rte_eth_ethertype_filter filter_info;
+};
+
+struct sxe_eth_syn_filter_ele {
+	TAILQ_ENTRY(sxe_eth_syn_filter_ele) entries;
+	struct rte_eth_syn_filter filter_info;
+};
+
+struct sxe_fnav_rule_ele {
+	TAILQ_ENTRY(sxe_fnav_rule_ele) entries;
+	struct sxe_fnav_rule filter_info;
+};
+
+struct sxe_rss_filter_ele {
+	TAILQ_ENTRY(sxe_rss_filter_ele) entries;
+	struct sxe_rss_filter filter_info;
+};
+
+struct sxe_fivetuple_filter {
+	TAILQ_ENTRY(sxe_fivetuple_filter) entries;
+	u16 index;
+	struct sxe_fivetuple_filter_info filter_info;
+	u16 queue;
+};
+
+static inline
+bool sxe_is_user_param_null(const struct rte_flow_item *pattern,
+				const struct rte_flow_action *actions,
+				const struct rte_flow_attr *attr,
+				struct rte_flow_error *error)
+{
+	bool ret = true;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				NULL, "NULL pattern.");
+		PMD_LOG_ERR(DRV, "pattern is null, validate failed.");
+		goto l_out;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				NULL, "NULL action.");
+		PMD_LOG_ERR(DRV, "action is null, validate failed.");
+		goto l_out;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		PMD_LOG_ERR(DRV, "attribute is null, validate failed.");
+		goto l_out;
+	}
+
+	ret = false;
+
+l_out:
+	return ret;
+}
+
+static inline
+bool sxe_is_attribute_wrong(const struct rte_flow_attr *attr,
+					struct rte_flow_error *error)
+{
+	bool ret = true;
+
+	if (!attr->ingress) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				attr, "Only support ingress.");
+		PMD_LOG_ERR(DRV, "only sopport ingrass, validate failed.");
+		goto l_out;
+	}
+
+	if (attr->egress) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				attr, "Not support egress.");
+		PMD_LOG_ERR(DRV, "not support egress, validate failed.");
+		goto l_out;
+	}
+
+	if (attr->transfer) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+				attr, "Not support transfer.");
+		PMD_LOG_ERR(DRV, "not support transfer, validate failed.");
+		goto l_out;
+	}
+
+	if (attr->group) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				attr, "Not support group.");
+		PMD_LOG_ERR(DRV, "not support group, validate failed.");
+		goto l_out;
+	}
+
+	ret = false;
+
+l_out:
+	return ret;
+}
+
+static inline
+bool sxe_is_port_mask_wrong(u16 src_port_mask, u16 dst_port_mask,
+					const struct rte_flow_item *item,
+					struct rte_flow_error *error)
+{
+	bool ret = true;
+
+	if ((src_port_mask != 0 && src_port_mask != UINT16_MAX) ||
+		(dst_port_mask != 0 && dst_port_mask != UINT16_MAX)) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by ntuple filter");
+		PMD_LOG_WARN(DRV, "mask--src_port[0x%x], dst_port[0x%x], validate failed.",
+				src_port_mask, dst_port_mask);
+		goto l_out;
+	}
+
+	ret = false;
+
+l_out:
+	return ret;
+}
+
+static inline
+const struct rte_flow_item *sxe_next_no_void_pattern(
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_item *current)
+{
+	const struct rte_flow_item *next =
+		current ? current + 1 : &pattern[0];
+	while (1) {
+		if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
+			return next;
+		next++;
+	}
+}
+
+static inline
+const struct rte_flow_action *sxe_next_no_void_action(
+		const struct rte_flow_action actions[],
+		const struct rte_flow_action *current)
+{
+	const struct rte_flow_action *next =
+		current ? current + 1 : &actions[0];
+	while (1) {
+		if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
+			return next;
+		next++;
+	}
+}
+
+static inline
+const struct rte_flow_item *sxe_next_no_fuzzy_pattern(
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_item *current)
+{
+	const struct rte_flow_item *next =
+		sxe_next_no_void_pattern(pattern, current);
+	while (1) {
+		if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
+			return next;
+		next = sxe_next_no_void_pattern(pattern, next);
+	}
+}
+
+static u8
+sxe_flow_l4type_convert(enum rte_flow_item_type protocol)
+{
+	U8 proto = 0;
+
+	switch (protocol) {
+	case RTE_FLOW_ITEM_TYPE_TCP:
+		proto = IPPROTO_TCP;
+		break;
+	case RTE_FLOW_ITEM_TYPE_UDP:
+		proto = IPPROTO_UDP;
+		break;
+	case RTE_FLOW_ITEM_TYPE_SCTP:
+		proto = IPPROTO_SCTP;
+		break;
+	default:
+		PMD_LOG_WARN(DRV, "flow l4type convert failed.");
+	}
+
+	return proto;
+}
+
+static s32 sxe_filter_action_parse(struct rte_eth_dev *dev,
+					const struct rte_flow_action actions[],
+					struct rte_flow_error *error,
+					u16 *queue_index)
+{
+	const struct rte_flow_action *act;
+
+	act = sxe_next_no_void_action(actions, NULL);
+	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			act, "Not supported action.");
+		PMD_LOG_WARN(DRV, "action check wrong, validate failed.");
+		goto l_out;
+	}
+
+	*queue_index =
+		((const struct rte_flow_action_queue *)act->conf)->index;
+
+	if (*queue_index >= dev->data->nb_rx_queues) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			act, "queue index much too big");
+		PMD_LOG_ERR(DRV, "queue index check wrong, validate failed.");
+		goto l_out;
+	}
+
+	act = sxe_next_no_void_action(actions, act);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			act, "Not supported action.");
+		PMD_LOG_WARN(DRV, "action check wrong, validate failed.");
+		goto l_out;
+	}
+
+	PMD_LOG_DEBUG(DRV, "filter action parse success.");
+	rte_errno = 0;
+
+l_out:
+	return -rte_errno;
+}
+
+static s32 sxe_fivetuple_filter_pattern_parse(const struct rte_flow_item pattern[],
+						struct rte_eth_ntuple_filter *filter,
+						struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_item_ipv4 *ipv4_spec;
+	const struct rte_flow_item_ipv4 *ipv4_mask;
+	const struct rte_flow_item_tcp *tcp_spec;
+	const struct rte_flow_item_tcp *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec;
+	const struct rte_flow_item_udp *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec;
+	const struct rte_flow_item_sctp *sctp_mask;
+	const struct rte_flow_item_eth *eth_spec;
+	const struct rte_flow_item_eth *eth_mask;
+	const struct rte_flow_item_vlan *vlan_spec;
+	const struct rte_flow_item_vlan *vlan_mask;
+	struct rte_flow_item_eth eth_null;
+	struct rte_flow_item_vlan vlan_null;
+
+	memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
+	memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
+
+	item = sxe_next_no_void_pattern(pattern, NULL);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+		item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+		PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+		goto l_out;
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		eth_spec = item->spec;
+		eth_mask = item->mask;
+
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+			goto l_out;
+		}
+
+		if ((item->spec || item->mask) &&
+			((item->spec && memcmp(eth_spec, &eth_null,
+				sizeof(struct rte_flow_item_eth))) ||
+			(item->mask && memcmp(eth_mask, &eth_null,
+				sizeof(struct rte_flow_item_eth))))) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			PMD_LOG_WARN(DRV, "item spec[%p], item mask[%p], validate failed.",
+						item->spec, item->mask);
+			goto l_out;
+		}
+
+		item = sxe_next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+			goto l_out;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+		vlan_spec = item->spec;
+		vlan_mask = item->mask;
+
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+			goto l_out;
+		}
+
+		if ((item->spec || item->mask) &&
+			((item->spec && memcmp(vlan_spec, &vlan_null,
+				sizeof(struct rte_flow_item_vlan))) ||
+			(item->mask && memcmp(vlan_mask, &vlan_null,
+				sizeof(struct rte_flow_item_vlan))))) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			PMD_LOG_WARN(DRV, "item spec[%p], item mask[%p], validate failed.",
+						item->spec, item->mask);
+			goto l_out;
+		}
+
+		item = sxe_next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+			goto l_out;
+		}
+	}
+
+	if (item->mask) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+			goto l_out;
+		}
+
+		if (!item->spec || !item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Invalid ntuple spec");
+			PMD_LOG_WARN(DRV, "item spec is null, validate failed.");
+			goto l_out;
+		}
+
+		ipv4_mask = item->mask;
+		if (SXE_5TUPLE_IPV4_MASK) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			PMD_LOG_WARN(DRV, "not support other mask set, validate failed.");
+			goto l_out;
+		}
+		if ((ipv4_mask->hdr.src_addr != 0 &&
+			ipv4_mask->hdr.src_addr != UINT32_MAX) ||
+			(ipv4_mask->hdr.dst_addr != 0 &&
+			ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
+			(ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
+			ipv4_mask->hdr.next_proto_id != 0)) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			PMD_LOG_WARN(DRV, "mask--src_addr[0x%x], dst_addr[0x%x], next_proto_id[0x%x], validate failed.",
+				ipv4_mask->hdr.src_addr, ipv4_mask->hdr.dst_addr,
+				ipv4_mask->hdr.next_proto_id);
+			goto l_out;
+		}
+
+		filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+		filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+		filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+
+		ipv4_spec = item->spec;
+		filter->dst_ip = ipv4_spec->hdr.dst_addr;
+		filter->src_ip = ipv4_spec->hdr.src_addr;
+		filter->proto  = ipv4_spec->hdr.next_proto_id;
+	}
+
+	item = sxe_next_no_void_pattern(pattern, item);
+	if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+		item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+		item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+		item->type != RTE_FLOW_ITEM_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+		PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+		goto l_out;
+	}
+
+	if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
+		(!item->spec && !item->mask)) {
+		if (!filter->proto_mask) {
+			filter->proto_mask = UINT8_MAX;
+			filter->proto = sxe_flow_l4type_convert(item->type);
+		}
+		PMD_LOG_DEBUG(DRV, "TCP/UDP/SCTP item spec and mask is null, to check action.");
+		rte_errno = 0;
+		goto l_out;
+	}
+
+	if (item->type != RTE_FLOW_ITEM_TYPE_END &&
+		(!item->spec || !item->mask)) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Invalid ntuple mask");
+		PMD_LOG_WARN(DRV, "spec or mask is null, validate failed.");
+		goto l_out;
+	}
+
+	if (item->last) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			item, "Not supported last point for range");
+		PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+		goto l_out;
+
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+		if (filter->proto != IPPROTO_TCP &&
+			filter->proto_mask != 0) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			PMD_LOG_WARN(DRV, "protocal id is not TCP, please check.");
+			goto l_out;
+		}
+
+		tcp_mask = item->mask;
+		if (SXE_5TUPLE_TCP_MASK) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			PMD_LOG_WARN(DRV, "not support other mask set, validate failed.");
+			goto l_out;
+		}
+
+		if (sxe_is_port_mask_wrong(tcp_mask->hdr.src_port,
+				tcp_mask->hdr.dst_port, item, error)) {
+			PMD_LOG_WARN(DRV, "port mask set wrong, validate failed.");
+			goto l_out;
+		}
+
+		filter->dst_port_mask  = tcp_mask->hdr.dst_port;
+		filter->src_port_mask  = tcp_mask->hdr.src_port;
+
+		tcp_spec = item->spec;
+		filter->dst_port  = tcp_spec->hdr.dst_port;
+		filter->src_port  = tcp_spec->hdr.src_port;
+	} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+		if (filter->proto != IPPROTO_UDP &&
+			filter->proto_mask != 0) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			PMD_LOG_WARN(DRV, "protocal id is not UDP, please check.");
+			goto l_out;
+		}
+
+		udp_mask = item->mask;
+		if (SXE_5TUPLE_UDP_MASK) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			PMD_LOG_WARN(DRV, "not support other mask set, validate failed.");
+			goto l_out;
+		}
+
+		if (sxe_is_port_mask_wrong(udp_mask->hdr.src_port,
+				udp_mask->hdr.dst_port, item, error)) {
+			PMD_LOG_WARN(DRV, "port mask set wrong, validate failed.");
+			goto l_out;
+		}
+
+		filter->dst_port_mask = udp_mask->hdr.dst_port;
+		filter->src_port_mask = udp_mask->hdr.src_port;
+
+		udp_spec = item->spec;
+		filter->dst_port = udp_spec->hdr.dst_port;
+		filter->src_port = udp_spec->hdr.src_port;
+	} else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+		if (filter->proto != IPPROTO_SCTP &&
+			filter->proto_mask != 0) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			PMD_LOG_WARN(DRV, "protocal id is not SCTP, please check.");
+			goto l_out;
+		}
+
+		sctp_mask = item->mask;
+		if (SXE_5TUPLE_SCTP_MASK) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			PMD_LOG_WARN(DRV, "not support other mask set, validate failed.");
+			goto l_out;
+		}
+
+		if (sxe_is_port_mask_wrong(sctp_mask->hdr.src_port,
+				sctp_mask->hdr.dst_port, item, error)) {
+			PMD_LOG_WARN(DRV, "port mask set wrong, validate failed.");
+			goto l_out;
+		}
+
+		filter->dst_port_mask = sctp_mask->hdr.dst_port;
+		filter->src_port_mask = sctp_mask->hdr.src_port;
+
+		sctp_spec = item->spec;
+		filter->dst_port = sctp_spec->hdr.dst_port;
+		filter->src_port = sctp_spec->hdr.src_port;
+	} else {
+		rte_errno = 0;
+		goto l_out;
+	}
+
+	item = sxe_next_no_void_pattern(pattern, item);
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by ntuple filter");
+		PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+		goto l_out;
+	}
+
+	PMD_LOG_DEBUG(DRV, "fivetuple filter pattern parse success.");
+	rte_errno = 0;
+
+l_out:
+	return -rte_errno;
+
+}
+
+static s32 sxe_fivetuple_filter_parse(struct rte_eth_dev *dev,
+				const struct rte_flow_attr *attr,
+				const struct rte_flow_item pattern[],
+				const struct rte_flow_action actions[],
+				struct rte_eth_ntuple_filter *filter,
+				struct rte_flow_error *error)
+{
+	s32 ret = 0;
+	u16 queue_index = 0;
+
+	if (sxe_is_user_param_null(pattern, actions, attr, error)) {
+		PMD_LOG_ERR(DRV, "user param is null, validate failed.");
+		goto parse_failed;
+	}
+
+	ret = sxe_fivetuple_filter_pattern_parse(pattern, filter, error);
+	if (ret != 0) {
+		PMD_LOG_WARN(DRV, "pattern check wrong, validate failed.");
+		goto parse_failed;
+	}
+
+	ret = sxe_filter_action_parse(dev, actions, error, &queue_index);
+	if (ret != 0) {
+		PMD_LOG_WARN(DRV, "action check wrong, validate failed.");
+		goto parse_failed;
+	} else {
+		filter->queue = queue_index;
+	}
+
+	if (sxe_is_attribute_wrong(attr, error)) {
+		PMD_LOG_ERR(DRV, "attribute check wrong, validate failed.");
+		goto parse_failed;
+	}
+
+	if (attr->priority > 0xFFFF) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				attr, "Error priority.");
+		PMD_LOG_ERR(DRV, "priority check wrong, validate failed.");
+		goto parse_failed;
+	}
+
+	filter->priority = (u16)attr->priority;
+	if (attr->priority < SXE_MIN_FIVETUPLE_PRIORITY ||
+		attr->priority > SXE_MAX_FIVETUPLE_PRIORITY) {
+		PMD_LOG_WARN(DRV, "priority[%d] is out of 1~7, set to 1.", attr->priority);
+		filter->priority = SXE_MIN_FIVETUPLE_PRIORITY;
+	}
+
+	PMD_LOG_DEBUG(DRV, "five tuple filter fit, validate success!!");
+	rte_errno = 0;
+	goto l_out;
+
+parse_failed:
+	memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+	PMD_LOG_WARN(DRV, "five tuple filter, validate failed.");
+l_out:
+	return -rte_errno;
+}
+
+static enum sxe_fivetuple_protocol
+sxe_protocol_type_convert(u8 protocol_value)
+{
+	enum sxe_fivetuple_protocol protocol;
+
+	switch (protocol_value) {
+	case IPPROTO_TCP:
+		protocol = SXE_FILTER_PROTOCOL_TCP;
+		break;
+	case IPPROTO_UDP:
+		protocol = SXE_FILTER_PROTOCOL_UDP;
+		break;
+	case IPPROTO_SCTP:
+		protocol = SXE_FILTER_PROTOCOL_SCTP;
+		break;
+	default:
+		protocol = SXE_FILTER_PROTOCOL_NONE;
+	}
+
+	return protocol;
+}
+
+static s32
+sxe_ntuple_filter_to_fivetuple(struct rte_eth_ntuple_filter *ntuple_filter,
+			struct sxe_fivetuple_filter_info *filter_info)
+{
+	s32 ret = -EINVAL;
+
+	switch (ntuple_filter->dst_ip_mask) {
+	case UINT32_MAX:
+		filter_info->dst_ip_mask = 0;
+		filter_info->dst_ip = ntuple_filter->dst_ip;
+		break;
+	case 0:
+		filter_info->dst_ip_mask = 1;
+		break;
+	default:
+		PMD_LOG_ERR(DRV, "invalid dst_ip mask.");
+		goto l_out;
+	}
+
+	switch (ntuple_filter->src_ip_mask) {
+	case UINT32_MAX:
+		filter_info->src_ip_mask = 0;
+		filter_info->src_ip = ntuple_filter->src_ip;
+		break;
+	case 0:
+		filter_info->src_ip_mask = 1;
+		break;
+	default:
+		PMD_LOG_ERR(DRV, "invalid src_ip mask.");
+		goto l_out;
+	}
+
+	switch (ntuple_filter->dst_port_mask) {
+	case UINT16_MAX:
+		filter_info->dst_port_mask = 0;
+		filter_info->dst_port = ntuple_filter->dst_port;
+		break;
+	case 0:
+		filter_info->dst_port_mask = 1;
+		break;
+	default:
+		PMD_LOG_ERR(DRV, "invalid dst_port mask.");
+		goto l_out;
+	}
+
+	switch (ntuple_filter->src_port_mask) {
+	case UINT16_MAX:
+		filter_info->src_port_mask = 0;
+		filter_info->src_port = ntuple_filter->src_port;
+		break;
+	case 0:
+		filter_info->src_port_mask = 1;
+		break;
+	default:
+		PMD_LOG_ERR(DRV, "invalid src_port mask.");
+		goto l_out;
+	}
+
+	switch (ntuple_filter->proto_mask) {
+	case UINT8_MAX:
+		filter_info->proto_mask = 0;
+		filter_info->protocol =
+			sxe_protocol_type_convert(ntuple_filter->proto);
+		break;
+	case 0:
+		filter_info->proto_mask = 1;
+		break;
+	default:
+		PMD_LOG_ERR(DRV, "invalid protocol mask.");
+		goto l_out;
+	}
+
+	filter_info->priority = (u8)ntuple_filter->priority;
+	ret = 0;
+
+l_out:
+	return ret;
+}
+
+static struct sxe_fivetuple_filter *
+sxe_fivetuple_filter_lookup(struct sxe_fivetuple_filter_list *filter_list,
+			struct sxe_fivetuple_filter_info *filter_info)
+{
+	struct sxe_fivetuple_filter *filter;
+
+	TAILQ_FOREACH(filter, filter_list, entries) {
+		if (memcmp(filter_info, &filter->filter_info,
+			sizeof(struct sxe_fivetuple_filter_info)) == 0) {
+			goto l_out;
+		}
+	}
+	filter = NULL;
+
+l_out:
+	return filter;
+}
+
+static s32
+sxe_fivetuple_filter_add(struct rte_eth_dev *dev,
+			struct sxe_fivetuple_filter *filter)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_filter_context *filter_ctxt = &adapter->filter_ctxt;
+	struct sxe_fivetuple_node_info filter_node_info;
+	s32 i, index, shift;
+	s32 ret = 0;
+
+	for (i = 0; i < SXE_MAX_FTQF_FILTERS; i++) {
+		index = i / (sizeof(u32) * BYTE_BIT_NUM);
+		shift = i % (sizeof(u32) * BYTE_BIT_NUM);
+		if (!(filter_ctxt->fivetuple_mask[index] & (1 << shift))) {
+			filter_ctxt->fivetuple_mask[index] |= 1 << shift;
+			filter->index = i;
+			TAILQ_INSERT_TAIL(&filter_ctxt->fivetuple_list, filter, entries);
+			break;
+		}
+	}
+	if (i >= SXE_MAX_FTQF_FILTERS) {
+		PMD_LOG_ERR(DRV, "fivetuple filters are full.");
+		ret = -ENOSYS;
+		goto l_out;
+	}
+
+	filter_node_info.index = filter->index;
+	filter_node_info.queue = filter->queue;
+	memcpy(&filter_node_info.filter_info, &filter->filter_info,
+			sizeof(struct sxe_fivetuple_filter_info));
+	sxe_hw_fivetuple_filter_add(dev, &filter_node_info);
+
+l_out:
+	return ret;
+}
+
+static void
+sxe_fivetuple_filter_delete(struct rte_eth_dev *dev,
+			struct sxe_fivetuple_filter *filter)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_filter_context *filter_ctxt = &adapter->filter_ctxt;
+	u16 index = filter->index;
+
+	filter_ctxt->fivetuple_mask[index / (sizeof(u32) * BYTE_BIT_NUM)] &=
+				~(1 << (index % (sizeof(u32) * BYTE_BIT_NUM)));
+	TAILQ_REMOVE(&filter_ctxt->fivetuple_list, filter, entries);
+	rte_free(filter);
+
+	sxe_hw_fivetuple_filter_del(hw, index);
+
+}
+
+static s32 sxe_fivetuple_filter_configure(struct rte_eth_dev *dev,
+				struct rte_eth_ntuple_filter *ntuple_filter,
+				bool add)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_filter_context *filter_ctxt = &adapter->filter_ctxt;
+	struct sxe_fivetuple_filter_info filter_fivetuple_info;
+	struct sxe_fivetuple_filter *filter;
+	s32 ret = 0;
+
+	memset(&filter_fivetuple_info, 0, sizeof(struct sxe_fivetuple_filter_info));
+	ret = sxe_ntuple_filter_to_fivetuple(ntuple_filter, &filter_fivetuple_info);
+	if (ret < 0)
+		goto l_out;
+
+	filter = sxe_fivetuple_filter_lookup(&filter_ctxt->fivetuple_list,
+					  &filter_fivetuple_info);
+	if (filter != NULL && add) {
+		PMD_LOG_ERR(DRV, "filter exists, not support add.");
+		ret = -EEXIST;
+		goto l_out;
+	}
+	if (filter == NULL && !add) {
+		PMD_LOG_ERR(DRV, "filter doesn't exist, not support delete.");
+		ret = -ENOENT;
+		goto l_out;
+	}
+
+	if (add) {
+		filter = rte_zmalloc("sxe_fivetuple_filter",
+				sizeof(struct sxe_fivetuple_filter), 0);
+		if (filter == NULL) {
+			PMD_LOG_ERR(DRV, "fivetuple filter malloc failed.");
+			ret = -ENOMEM;
+			goto l_out;
+		}
+		rte_memcpy(&filter->filter_info,
+				&filter_fivetuple_info,
+				sizeof(struct sxe_fivetuple_filter_info));
+		filter->queue = ntuple_filter->queue;
+
+		ret = sxe_fivetuple_filter_add(dev, filter);
+		if (ret < 0) {
+			PMD_LOG_ERR(DRV, "fivetuple filter add failed.");
+			rte_free(filter);
+			goto l_out;
+		}
+	} else {
+		sxe_fivetuple_filter_delete(dev, filter);
+	}
+
+l_out:
+	return ret;
+}
+
+void sxe_fivetuple_filter_uninit(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_filter_context *filter_ctxt = &adapter->filter_ctxt;
+	struct sxe_fivetuple_filter *filter;
+
+	while ((filter = TAILQ_FIRST(&filter_ctxt->fivetuple_list))) {
+		TAILQ_REMOVE(&filter_ctxt->fivetuple_list,
+				 filter,
+				 entries);
+		rte_free(filter);
+	}
+	memset(filter_ctxt->fivetuple_mask, 0,
+		   sizeof(u32) * SXE_5TUPLE_ARRAY_SIZE);
+
+}
+
+static s32
+sxe_ethertype_filter_pattern_parse(const struct rte_flow_item pattern[],
+					struct rte_eth_ethertype_filter *filter,
+					struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_item_eth *eth_spec;
+	const struct rte_flow_item_eth *eth_mask;
+
+	item = sxe_next_no_void_pattern(pattern, NULL);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by ethertype filter");
+		PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+		goto l_out;
+	}
+
+	if (item->last) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			item, "Not supported last point for range");
+		PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+		goto l_out;
+	}
+
+	if (!item->spec || !item->mask) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ethertype filter");
+		PMD_LOG_WARN(DRV, "spec[%p] or mask[%p] is null.", item->spec, item->mask);
+		goto l_out;
+	}
+
+	eth_spec = item->spec;
+	eth_mask = item->mask;
+
+	if (!rte_is_zero_ether_addr(&eth_mask->src) ||
+		!rte_is_zero_ether_addr(&eth_mask->dst)) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Invalid ether address mask");
+		PMD_LOG_WARN(DRV, "mac src or dst mask is not zero.");
+		goto l_out;
+	}
+
+	if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Invalid ethertype mask");
+		PMD_LOG_WARN(DRV, "ethertype mask[0x%x] is wrong.", eth_mask->type);
+		goto l_out;
+	}
+	filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+	if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+		filter->ether_type == RTE_ETHER_TYPE_IPV6) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			NULL, "IPv4/IPv6 not supported by ethertype filter");
+		PMD_LOG_WARN(DRV, "not support IPv4 and IPv6 ethertype, validate failed.");
+		goto l_out;
+	}
+
+	item = sxe_next_no_void_pattern(pattern, item);
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ethertype filter.");
+		PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+		goto l_out;
+	}
+
+	PMD_LOG_DEBUG(DRV, "ethertype filter pattern parse success.");
+	rte_errno = 0;
+
+l_out:
+	return -rte_errno;
+}
+
+static s32
+sxe_ethertype_filter_parse(struct rte_eth_dev *dev,
+				const struct rte_flow_attr *attr,
+				const struct rte_flow_item pattern[],
+				const struct rte_flow_action actions[],
+				struct rte_eth_ethertype_filter *filter,
+				struct rte_flow_error *error)
+{
+	s32 ret;
+	u16 queue;
+
+	if (sxe_is_user_param_null(pattern, actions, attr, error)) {
+		PMD_LOG_ERR(DRV, "user param is null, validate failed.\n");
+		goto parse_failed;
+	}
+
+	ret = sxe_ethertype_filter_pattern_parse(pattern, filter, error);
+	if (ret != 0) {
+		PMD_LOG_WARN(DRV, "pattern check wrong, validate failed.");
+		goto parse_failed;
+	}
+
+	ret = sxe_filter_action_parse(dev, actions, error, &queue);
+	if (ret != 0) {
+		PMD_LOG_WARN(DRV, "action check wrong, validate failed.");
+		goto parse_failed;
+	} else {
+		filter->queue = queue;
+	}
+
+	if (sxe_is_attribute_wrong(attr, error)) {
+		PMD_LOG_ERR(DRV, "user attribute is wrong, validate failed.\n");
+		goto parse_failed;
+	}
+	if (attr->priority) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				attr, "Not support priority.");
+		PMD_LOG_ERR(DRV, "not support priority, validate failed.\n");
+		goto parse_failed;
+	}
+
+	PMD_LOG_DEBUG(DRV, "ethertype filter fit, validate success!!");
+	rte_errno = 0;
+	goto l_out;
+
+parse_failed:
+	memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+	PMD_LOG_WARN(DRV, "ethertype filter, validate failed.");
+l_out:
+	return -rte_errno;
+
+}
+
+static s32
+sxe_ethertype_filter_lookup(struct sxe_filter_context *filter_ctxt,
+				u16 ethertype)
+{
+	s32 i;
+
+	for (i = 0; i < SXE_MAX_ETQF_FILTERS; i++) {
+		if (filter_ctxt->ethertype_filters[i].ethertype == ethertype &&
+			(filter_ctxt->ethertype_mask & (1 << i))) {
+			goto l_out;
+		}
+	}
+	i = -1;
+
+l_out:
+	return i;
+}
+
+static s32
+sxe_ethertype_filter_insert(struct sxe_filter_context *filter_ctxt,
+				  struct sxe_ethertype_filter *ethertype_filter)
+{
+	s32 i;
+
+	for (i = 0; i < SXE_MAX_ETQF_FILTERS; i++) {
+		if (!(filter_ctxt->ethertype_mask & (1 << i))) {
+			filter_ctxt->ethertype_mask |= 1 << i;
+			filter_ctxt->ethertype_filters[i].ethertype =
+				ethertype_filter->ethertype;
+			filter_ctxt->ethertype_filters[i].queue =
+				ethertype_filter->queue;
+			filter_ctxt->ethertype_filters[i].conf =
+				ethertype_filter->conf;
+			goto l_out;
+		}
+	}
+	i = -1;
+
+l_out:
+	return i;
+}
+
+static s32
+sxe_ethertype_filter_remove(struct sxe_filter_context *filter_ctxt,
+						u8 index)
+{
+	if (index >= SXE_MAX_ETQF_FILTERS) {
+		index = -1;
+		goto l_out;
+	}
+	filter_ctxt->ethertype_mask &= ~(1 << index);
+	filter_ctxt->ethertype_filters[index].ethertype = 0;
+	filter_ctxt->ethertype_filters[index].queue = 0;
+	filter_ctxt->ethertype_filters[index].conf = false;
+
+l_out:
+	return index;
+}
+
+static s32
+sxe_ethertype_filter_configure(struct rte_eth_dev *dev,
+			struct rte_eth_ethertype_filter *filter,
+			bool add)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_filter_context *filter_ctxt = &adapter->filter_ctxt;
+	s32 ret;
+	s32 result = 0;
+	struct sxe_ethertype_filter ethertype_filter;
+
+	ret = sxe_ethertype_filter_lookup(filter_ctxt, filter->ether_type);
+	if (ret >= 0 && add) {
+		PMD_LOG_ERR(DRV, "ethertype (0x%04x) filter exists.",
+				filter->ether_type);
+		result = -EEXIST;
+		goto l_out;
+	}
+	if (ret < 0 && !add) {
+		PMD_LOG_ERR(DRV, "ethertype (0x%04x) filter doesn't exist.",
+				filter->ether_type);
+		result = -ENOENT;
+		goto l_out;
+	}
+
+	if (add) {
+		ethertype_filter.ethertype = filter->ether_type;
+		ethertype_filter.queue = filter->queue;
+		ethertype_filter.conf = false;
+
+		ret = sxe_ethertype_filter_insert(filter_ctxt,
+							&ethertype_filter);
+		if (ret < 0) {
+			PMD_LOG_ERR(DRV, "ethertype filters are full.");
+			result = -ENOSPC;
+			goto l_out;
+		}
+
+		sxe_hw_ethertype_filter_add(hw, ret, filter->ether_type, filter->queue);
+	} else {
+		ret = sxe_ethertype_filter_remove(filter_ctxt, (u8)ret);
+		if (ret < 0) {
+			PMD_LOG_ERR(DRV, "ethertype filters remove failed.");
+			result = -ENOSYS;
+			goto l_out;
+		}
+
+		sxe_hw_ethertype_filter_del(hw, ret);
+	}
+
+l_out:
+	return result;
+}
+
+static s32
+sxe_syn_filter_pattern_parse(const struct rte_flow_item pattern[],
+					struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_item_tcp *tcp_spec;
+	const struct rte_flow_item_tcp *tcp_mask;
+
+	item = sxe_next_no_void_pattern(pattern, NULL);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+		item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+		item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+		item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by syn filter");
+		PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+		goto l_out;
+	}
+
+	if (item->last) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			item, "Not supported last point for range");
+		PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+		goto l_out;
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		if (item->spec || item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Invalid SYN address mask");
+			PMD_LOG_WARN(DRV, "eth spec and mask should be NULL, validate failed.");
+			goto l_out;
+		}
+
+		item = sxe_next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+			item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by syn filter");
+			PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+			goto l_out;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+		item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+		if (item->spec || item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Invalid SYN mask");
+			PMD_LOG_WARN(DRV, "ip spec and mask should be null, validate failed.");
+			goto l_out;
+		}
+
+		item = sxe_next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by syn filter");
+			PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+			goto l_out;
+		}
+	}
+
+	if (!item->spec || !item->mask) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Invalid SYN mask");
+		PMD_LOG_WARN(DRV, "tcp spec or mask is null, validate failed.");
+		goto l_out;
+	}
+
+	if (item->last) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			item, "Not supported last point for range");
+		PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+		goto l_out;
+	}
+
+	tcp_spec = item->spec;
+	tcp_mask = item->mask;
+	if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
+		tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
+		SXE_SYN_TCP_MASK) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by syn filter");
+		PMD_LOG_WARN(DRV, "not support other mask set, validate failed.");
+		goto l_out;
+	}
+
+	item = sxe_next_no_void_pattern(pattern, item);
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by syn filter");
+		PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+		goto l_out;
+	}
+
+	PMD_LOG_DEBUG(DRV, "syn filter pattern parse success.");
+	rte_errno = 0;
+
+l_out:
+	return -rte_errno;
+}
+
+static s32
+sxe_syn_filter_parse(struct rte_eth_dev *dev,
+				const struct rte_flow_attr *attr,
+				const struct rte_flow_item pattern[],
+				const struct rte_flow_action actions[],
+				struct rte_eth_syn_filter *filter,
+				struct rte_flow_error *error)
+{
+	s32 ret;
+	u16 queue;
+
+	if (sxe_is_user_param_null(pattern, actions, attr, error)) {
+		PMD_LOG_ERR(DRV, "user param is null, validate failed.\n");
+		goto parse_failed;
+	}
+
+	ret = sxe_syn_filter_pattern_parse(pattern, error);
+	if (ret != 0) {
+		PMD_LOG_WARN(DRV, "pattern check wrong, validate failed.");
+		goto parse_failed;
+	}
+
+	ret = sxe_filter_action_parse(dev, actions, error, &queue);
+	if (ret != 0) {
+		PMD_LOG_WARN(DRV, "action check wrong, validate failed.");
+		goto parse_failed;
+	} else {
+		filter->queue = queue;
+	}
+
+	if (sxe_is_attribute_wrong(attr, error)) {
+		PMD_LOG_ERR(DRV, "user attribute is wrong, validate failed.\n");
+		goto parse_failed;
+	}
+	if (!attr->priority) {
+		filter->hig_pri = 0;
+	} else if (attr->priority == (u32)~0U) {
+		filter->hig_pri = 1;
+	} else {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+			attr, "Not support priority.");
+		PMD_LOG_ERR(DRV, "priority check wrong, validate failed.");
+		goto parse_failed;
+	}
+
+	PMD_LOG_DEBUG(DRV, "syn filter fit, validate success!!");
+	rte_errno = 0;
+	goto l_out;
+
+parse_failed:
+	memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+	PMD_LOG_WARN(DRV, "sys filter, validate failed.");
+l_out:
+	return -rte_errno;
+}
+
+static s32
+sxe_syn_filter_configure(struct rte_eth_dev *dev,
+			struct rte_eth_syn_filter *filter,
+			bool add)
+{
+	s32 ret = 0;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_filter_context *filter_ctxt = &adapter->filter_ctxt;
+	struct sxe_syn_filter *syn_filter = &filter_ctxt->syn_filter;
+	bool is_syn_enable = syn_filter->is_syn_enable;
+
+	if (add) {
+		if (is_syn_enable) {
+			PMD_LOG_WARN(DRV, "syn filter is on, no need to reopen.");
+			ret = -EINVAL;
+			goto l_out;
+		}
+
+		sxe_hw_syn_filter_add(hw, filter->queue, filter->hig_pri);
+		syn_filter->is_syn_enable = true;
+		syn_filter->queue = filter->queue;
+		syn_filter->priority = filter->hig_pri;
+	} else {
+		if (!is_syn_enable) {
+			PMD_LOG_WARN(DRV, "syn filter is off, not support to reclose.");
+			ret = -ENOENT;
+			goto l_out;
+		}
+
+		sxe_hw_syn_filter_del(hw);
+		syn_filter->is_syn_enable = false;
+	}
+
+l_out:
+	return ret;
+}
+
+static void
+sxe_syn_filter_delete(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_filter_context *filter_ctxt = &adapter->filter_ctxt;
+	struct sxe_syn_filter *syn_filter = &filter_ctxt->syn_filter;
+	bool is_syn_enable = syn_filter->is_syn_enable;
+
+	if (is_syn_enable) {
+		sxe_hw_syn_filter_del(hw);
+		syn_filter->is_syn_enable = false;
+	}
+
+}
+
+static bool
+sxe_is_fnav_signature_mode(const struct rte_flow_item pattern[])
+{
+	const struct rte_flow_item_fuzzy *spec, *last, *mask;
+	const struct rte_flow_item *item;
+	u32 sh, lh, mh;
+	s32 i = 0;
+	bool ismatch = false;
+
+	while (1) {
+		item = pattern + i;
+		if (item->type == RTE_FLOW_ITEM_TYPE_END)
+			break;
+
+		if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
+			spec = item->spec;
+			last = item->last;
+			mask = item->mask;
+
+			if (!spec || !mask) {
+				ismatch = false;
+				goto l_out;
+			}
+
+			sh = spec->thresh;
+
+			if (!last)
+				lh = sh;
+			else
+				lh = last->thresh;
+
+			mh = mask->thresh;
+			sh = sh & mh;
+			lh = lh & mh;
+
+			if (!sh || sh > lh) {
+				ismatch = false;
+				goto l_out;
+			}
+
+			ismatch = true;
+			goto l_out;
+		}
+
+		i++;
+	}
+
+l_out:
+	return ismatch;
+}
+
+static s32
+sxe_fnav_filter_pattern_parse(struct rte_eth_dev *dev,
+			const struct rte_flow_item pattern[],
+			struct sxe_fnav_rule *rule,
+			struct rte_flow_error *error)
+{
+	enum rte_fdir_mode fnav_mode =
+		SXE_DEV_FNAV_CONF(dev)->mode;
+	const struct rte_flow_item *item;
+	const struct rte_flow_item_ipv4 *ipv4_spec;
+	const struct rte_flow_item_ipv4 *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec;
+	const struct rte_flow_item_ipv6 *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec;
+	const struct rte_flow_item_tcp *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec;
+	const struct rte_flow_item_udp *udp_mask;
+	const struct rte_flow_item_sctp *sctp_mask;
+	const struct rte_flow_item_vlan *vlan_spec;
+	const struct rte_flow_item_vlan *vlan_mask;
+	const struct rte_flow_item_raw *raw_mask;
+	const struct rte_flow_item_raw *raw_spec;
+	u8 j;
+
+	memset(rule, 0, sizeof(struct sxe_fnav_rule));
+	memset(&rule->mask, 0xFF, sizeof(struct sxe_hw_fnav_mask));
+	rule->mask.vlan_tci_mask = 0;
+	rule->mask.flex_bytes_mask = 0;
+
+	item = sxe_next_no_fuzzy_pattern(pattern, NULL);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+		item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+		item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+		item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+		item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+		item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+		PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+		goto l_out;
+	}
+
+	if (sxe_is_fnav_signature_mode(pattern))
+		rule->mode = RTE_FDIR_MODE_SIGNATURE;
+	else
+		rule->mode = RTE_FDIR_MODE_PERFECT;
+
+#ifdef DPDK_22_11_3
+	s32 ret;
+	if (fnav_mode == RTE_FDIR_MODE_NONE) {
+		SXE_DEV_FNAV_CONF(dev)->mode = rule->mode;
+		ret = sxe_fnav_filter_configure(dev);
+		if (ret) {
+			SXE_DEV_FNAV_CONF(dev)->mode = RTE_FDIR_MODE_NONE;
+			PMD_LOG_ERR(DRV, "fnav config fail.");
+			rte_errno = -ret;
+			goto l_out;
+		}
+	} else if (fnav_mode != rule->mode) {
+#else
+	if (fnav_mode == RTE_FDIR_MODE_NONE ||
+		fnav_mode != rule->mode) {
+#endif
+		rte_flow_error_set(error, ENOTSUP,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Fnav mode is not correct");
+		PMD_LOG_WARN(DRV, "fnav mode is wrong, validate failed.");
+		goto l_out;
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		if (item->spec || item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "mac spec or mask is not NULL, validate failed.");
+			goto l_out;
+		}
+
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+			goto l_out;
+		}
+
+		item = sxe_next_no_fuzzy_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+			goto l_out;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+		if (!(item->spec && item->mask)) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "vlan spec or mask is null, validate failed.");
+			goto l_out;
+		}
+
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+			goto l_out;
+		}
+
+		vlan_spec = item->spec;
+		vlan_mask = item->mask;
+
+		rule->sxe_fnav.ntuple.vlan_id = vlan_spec->tci;
+		rule->mask.vlan_tci_mask = vlan_mask->tci;
+		rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
+
+		item = sxe_next_no_fuzzy_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+			goto l_out;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+		rule->sxe_fnav.ntuple.flow_type = SXE_SAMPLE_FLOW_TYPE_IPV4;
+
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+			goto l_out;
+		}
+
+		if (!item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "ipv4 mask is null, validate failed.");
+			goto l_out;
+		}
+		rule->b_mask = true;
+		ipv4_mask = item->mask;
+		if (SXE_FNAV_IPV4_MASK) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "not support other mask set, validate failed.");
+			goto l_out;
+		}
+		rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
+		rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
+
+		if (item->spec) {
+			rule->b_spec = true;
+			ipv4_spec = item->spec;
+			rule->sxe_fnav.ntuple.dst_ip[0] =
+				ipv4_spec->hdr.dst_addr;
+			rule->sxe_fnav.ntuple.src_ip[0] =
+				ipv4_spec->hdr.src_addr;
+		}
+
+		item = sxe_next_no_fuzzy_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+			item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+			item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+			item->type != RTE_FLOW_ITEM_TYPE_END &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+			goto l_out;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+		rule->sxe_fnav.ntuple.flow_type = SXE_SAMPLE_FLOW_TYPE_IPV6;
+
+		if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
+			item->last ||
+			!item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			PMD_LOG_WARN(DRV, "error in ipv6, validate failed.");
+			goto l_out;
+		}
+
+		rule->b_mask = true;
+		ipv6_mask = item->mask;
+		if (SXE_FNAV_IPV6_MASK) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "not support other mask set, validate failed.");
+			goto l_out;
+		}
+
+		for (j = 0; j < 16; j++) {
+			if (ipv6_mask->hdr.src_addr[j] == 0) {
+				rule->mask.src_ipv6_mask &= ~(1 << j);
+			} else if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Not supported by fnav filter");
+				PMD_LOG_WARN(DRV, "src addr mask is wrong, validate failed.");
+				goto l_out;
+			}
+		}
+
+		for (j = 0; j < 16; j++) {
+			if (ipv6_mask->hdr.dst_addr[j] == 0) {
+				rule->mask.dst_ipv6_mask &= ~(1 << j);
+			} else if (ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Not supported by fnav filter");
+				PMD_LOG_WARN(DRV, "dst addr mask is wrong, validate failed.");
+				goto l_out;
+			}
+		}
+
+		if (item->spec) {
+			rule->b_spec = true;
+			ipv6_spec = item->spec;
+			rte_memcpy(rule->sxe_fnav.ntuple.src_ip,
+				   ipv6_spec->hdr.src_addr, 16);
+			rte_memcpy(rule->sxe_fnav.ntuple.dst_ip,
+				   ipv6_spec->hdr.dst_addr, 16);
+		}
+
+		item = sxe_next_no_fuzzy_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+			item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+			item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+			item->type != RTE_FLOW_ITEM_TYPE_END &&
+			item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+			goto l_out;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+		rule->sxe_fnav.ntuple.flow_type |= SXE_SAMPLE_L4TYPE_TCP;
+
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+			goto l_out;
+		}
+
+		if (!item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "tcp mask is null, validate failed.");
+			goto l_out;
+		}
+		rule->b_mask = true;
+		tcp_mask = item->mask;
+		if (SXE_FNAV_TCP_MASK) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "not support other mask set, validate failed.");
+			goto l_out;
+		}
+		rule->mask.src_port_mask = tcp_mask->hdr.src_port;
+		rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
+
+		if (item->spec) {
+			rule->b_spec = true;
+			tcp_spec = item->spec;
+			rule->sxe_fnav.ntuple.src_port =
+				tcp_spec->hdr.src_port;
+			rule->sxe_fnav.ntuple.dst_port =
+				tcp_spec->hdr.dst_port;
+		}
+
+		item = sxe_next_no_fuzzy_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+			goto l_out;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+		rule->sxe_fnav.ntuple.flow_type |= SXE_SAMPLE_L4TYPE_UDP;
+
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+			goto l_out;
+		}
+
+		if (!item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "udp mask is null, validate failed.");
+			goto l_out;
+		}
+		rule->b_mask = true;
+		udp_mask = item->mask;
+		if (SXE_FNAV_UDP_MASK) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "not support other mask set, validate failed.");
+			goto l_out;
+		}
+		rule->mask.src_port_mask = udp_mask->hdr.src_port;
+		rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
+
+		if (item->spec) {
+			rule->b_spec = true;
+			udp_spec = item->spec;
+			rule->sxe_fnav.ntuple.src_port =
+				udp_spec->hdr.src_port;
+			rule->sxe_fnav.ntuple.dst_port =
+				udp_spec->hdr.dst_port;
+		}
+
+		item = sxe_next_no_fuzzy_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+			goto l_out;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+		rule->sxe_fnav.ntuple.flow_type |= SXE_SAMPLE_L4TYPE_SCTP;
+
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+			goto l_out;
+		}
+
+		sctp_mask = item->mask;
+		if (sctp_mask && SXE_FNAV_SCTP_MASK) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "not support other mask set, validate failed.");
+			goto l_out;
+		}
+
+		item = sxe_next_no_fuzzy_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+			item->type != RTE_FLOW_ITEM_TYPE_END) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+			goto l_out;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+			goto l_out;
+		}
+
+		if (!item->mask || !item->spec) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "raw mask or spec is null, validate failed.");
+			goto l_out;
+		}
+
+		raw_mask = item->mask;
+		if (SXE_FNAV_RAW_MASK) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "raw mask is wrong, validate failed.");
+			goto l_out;
+		}
+
+		raw_spec = item->spec;
+		if (SXE_FNAV_RAW_SPEC) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "raw spec is wrong, validate failed.");
+			goto l_out;
+		}
+
+		rule->mask.flex_bytes_mask = 0xffff;
+		rule->sxe_fnav.ntuple.flex_bytes =
+			(((u16)raw_spec->pattern[1]) << 8) | raw_spec->pattern[0];
+		rule->flex_bytes_offset = raw_spec->offset;
+	}
+
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		item = sxe_next_no_fuzzy_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by fnav filter");
+			PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+			goto l_out;
+		}
+	}
+
+	PMD_LOG_DEBUG(DRV, "fnav filter pattern parse success.");
+	rte_errno = 0;
+
+l_out:
+	return -rte_errno;
+}
+
+static s32
+sxe_fnav_filter_action_parse(struct rte_eth_dev *dev,
+				const struct rte_flow_action actions[],
+				struct sxe_fnav_rule *rule,
+				struct rte_flow_error *error)
+{
+	const struct rte_flow_action *act;
+	const struct rte_flow_action_queue *act_queue;
+	const struct rte_flow_action_mark *mark;
+
+	act = sxe_next_no_void_action(actions, NULL);
+	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+		act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			act, "Not supported action.");
+		PMD_LOG_WARN(DRV, "action check wrong, validate failed.");
+		goto l_out;
+	}
+
+	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+		act_queue = (const struct rte_flow_action_queue *)act->conf;
+		rule->queue = act_queue->index;
+		if (rule->queue >= dev->data->nb_rx_queues) {
+			rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION,
+				act, "Not supported action.");
+			PMD_LOG_ERR(DRV, "queue index check wrong, validate failed.");
+			goto l_out;
+		}
+	} else {
+		if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION,
+				act, "Not supported action.");
+			PMD_LOG_ERR(DRV, "signature not supprot drop, validate failed.");
+			goto l_out;
+		}
+		if (rule->sxe_fnav.ntuple.src_port != 0 ||
+			rule->sxe_fnav.ntuple.dst_port != 0) {
+			rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION,
+				act, "Not supported action.");
+			PMD_LOG_ERR(DRV, "not supported action, validate failed.");
+			goto l_out;
+		}
+		rule->fnavflags = SXE_FNAVCMD_DROP;
+	}
+
+	act = sxe_next_no_void_action(actions, act);
+	if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
+		(act->type != RTE_FLOW_ACTION_TYPE_END)) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			act, "Not supported action.");
+		PMD_LOG_WARN(DRV, "action check wrong, validate failed.");
+		goto l_out;
+	}
+
+	rule->soft_id = 0;
+
+	if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+		mark = (const struct rte_flow_action_mark *)act->conf;
+		rule->soft_id = mark->id;
+		act = sxe_next_no_void_action(actions, act);
+	}
+
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			act, "Not supported action.");
+		PMD_LOG_WARN(DRV, "action check wrong, validate failed.");
+		goto l_out;
+	}
+
+	PMD_LOG_DEBUG(DRV, "fnav filter action parse success.");
+	rte_errno = 0;
+
+l_out:
+	return -rte_errno;
+}
+
+static s32
+sxe_fnav_filter_parse(struct rte_eth_dev *dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct sxe_fnav_rule *rule,
+			struct rte_flow_error *error)
+{
+	s32 ret;
+
+	if (sxe_is_user_param_null(pattern, actions, attr, error)) {
+		PMD_LOG_ERR(DRV, "user param is null, validate failed.\n");
+		goto parse_failed;
+	}
+
+	ret = sxe_fnav_filter_pattern_parse(dev, pattern, rule, error);
+	if (ret != 0) {
+		PMD_LOG_WARN(DRV, "pattern check wrong, validate failed.");
+		goto parse_failed;
+	}
+
+	ret = sxe_fnav_filter_action_parse(dev, actions, rule, error);
+	if (ret != 0) {
+		PMD_LOG_WARN(DRV, "action check wrong, validate failed.");
+		goto parse_failed;
+	}
+
+	if (sxe_is_attribute_wrong(attr, error)) {
+		PMD_LOG_ERR(DRV, "user attribute is wrong, validate failed.\n");
+		goto parse_failed;
+	}
+	if (attr->priority) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+			attr, "Not support priority.");
+		PMD_LOG_ERR(DRV, "priority check wrong, validate failed.");
+		goto parse_failed;
+	}
+
+	PMD_LOG_DEBUG(DRV, "fnav filter fit, validate success!!");
+	rte_errno = 0;
+	goto l_out;
+
+parse_failed:
+	memset(rule, 0, sizeof(struct sxe_fnav_rule));
+	PMD_LOG_WARN(DRV, "fnav filter, validate failed.");
+l_out:
+	return -rte_errno;
+}
+
+static s32
+sxe_fnav_filter_program(struct rte_eth_dev *dev,
+				struct sxe_fnav_rule *rule,
+				bool del)
+{
+	u32 fnavcmd_flags;
+	u32 fnavhash;
+	u8 queue;
+	s32 ret;
+	u32 soft_id = 0;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_fnav_context *fnav_ctxt = &adapter->fnav_ctxt;
+	enum rte_fdir_mode fnav_mode = SXE_DEV_FNAV_CONF(dev)->mode;
+	struct sxe_fnav_filter *node;
+	bool add_node = false;
+
+	if (fnav_mode == RTE_FDIR_MODE_NONE ||
+		fnav_mode != rule->mode) {
+		PMD_LOG_ERR(DRV, "fnav mode is wrong.");
+		ret = -ENOTSUP;
+		goto l_out;
+	}
+
+	rule->sxe_fnav.ntuple.bkt_hash = 0;
+	if (fnav_mode == RTE_FDIR_MODE_PERFECT) {
+		if (rule->sxe_fnav.ntuple.flow_type & SXE_SAMPLE_L4TYPE_IPV6_MASK) {
+			PMD_LOG_ERR(DRV, "ipv6 is not supported in perfect mode!");
+			ret = -ENOTSUP;
+			goto l_out;
+		}
+		fnavhash = sxe_fnav_perfect_hash_compute(&rule->sxe_fnav,
+						SXE_DEV_FNAV_CONF(dev)->pballoc);
+		soft_id = rule->soft_id;
+	} else {
+		fnavhash = sxe_fnav_signature_hash_compute(&rule->sxe_fnav,
+						SXE_DEV_FNAV_CONF(dev)->pballoc);
+	}
+	rule->sxe_fnav.ntuple.bkt_hash = fnavhash;
+
+	if (del) {
+		ret = sxe_fnav_filter_remove(fnav_ctxt, &rule->sxe_fnav);
+		if (ret < 0) {
+			PMD_LOG_ERR(DRV, "fnav filter remove failed.");
+			goto l_out;
+		}
+
+		ret = sxe_hw_fnav_specific_rule_del(hw, &rule->sxe_fnav, soft_id);
+		if (ret < 0)
+			PMD_LOG_ERR(DRV, "fail to delete fnav filter!");
+		else
+			PMD_LOG_DEBUG(DRV, "success to delete fnav filter!");
+		goto l_out;
+	}
+
+	fnavcmd_flags = 0;
+	if (rule->fnavflags & SXE_FNAVCMD_DROP) {
+		if (fnav_mode == RTE_FDIR_MODE_PERFECT) {
+			queue = SXE_DEV_FNAV_CONF(dev)->drop_queue;
+			fnavcmd_flags |= SXE_FNAVCMD_DROP;
+		} else {
+			PMD_LOG_ERR(DRV, "drop option is not supported in"
+					" signature mode.");
+			ret = -EINVAL;
+			goto l_out;
+		}
+	} else if (rule->queue < SXE_HW_TXRX_RING_NUM_MAX) {
+		queue = rule->queue;
+	} else {
+		PMD_LOG_ERR(DRV, "not support action.");
+		ret = -EINVAL;
+		goto l_out;
+	}
+
+	node = sxe_fnav_filter_lookup(fnav_ctxt, &rule->sxe_fnav);
+	if (node) {
+		PMD_LOG_ERR(DRV, "conflict with existing fnav filter!");
+		ret = -EINVAL;
+		goto l_out;
+	} else {
+		add_node = true;
+		node = rte_zmalloc("sxe_fnav",
+				   sizeof(struct sxe_fnav_filter), 0);
+		if (!node) {
+			PMD_LOG_ERR(DRV, "fnav node malloc failed.");
+			ret = -ENOMEM;
+			goto l_out;
+		}
+		rte_memcpy(&node->sxe_fnav, &rule->sxe_fnav,
+				sizeof(union sxe_fnav_rule_info));
+		node->fnavflags = fnavcmd_flags;
+		node->fnavhash = fnavhash;
+		node->soft_id = soft_id;
+		node->queue = queue;
+
+		ret = sxe_fnav_filter_insert(fnav_ctxt, node);
+		if (ret < 0) {
+			rte_free(node);
+			goto l_out;
+		}
+	}
+
+	if (fnav_mode == RTE_FDIR_MODE_PERFECT) {
+		ret = sxe_hw_fnav_specific_rule_add(hw, &rule->sxe_fnav,
+						soft_id, queue);
+	} else {
+		sxe_hw_fnav_sample_rule_configure(hw,
+						rule->sxe_fnav.ntuple.flow_type,
+						fnavhash, queue);
+	}
+	if (ret < 0) {
+		PMD_LOG_ERR(DRV, "fail to add fnav filter!");
+		if (add_node)
+			(void)sxe_fnav_filter_remove(fnav_ctxt, &rule->sxe_fnav);
+
+	} else {
+		ret = 0;
+		PMD_LOG_DEBUG(DRV, "success to add fnav filter");
+	}
+
+l_out:
+	return ret;
+}
+
+s32 sxe_fnav_filter_init(struct rte_eth_dev *dev)
+{
+	s32 ret = 0;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_fnav_context *fnav_info = &adapter->fnav_ctxt;
+	char fnav_hash_name[RTE_HASH_NAMESIZE];
+	struct rte_hash_parameters fnav_hash_params = {
+		.name = fnav_hash_name,
+		.entries = SXE_MAX_FNAV_FILTER_NUM,
+		.key_len = sizeof(union sxe_fnav_rule_info),
+		.hash_func = rte_hash_crc,
+		.hash_func_init_val = 0,
+		.socket_id = rte_socket_id(),
+	};
+
+	TAILQ_INIT(&fnav_info->fnav_list);
+	snprintf(fnav_hash_name, RTE_HASH_NAMESIZE,
+		 "fnav_%s", dev->device->name);
+
+	fnav_info->hash_handle = rte_hash_create(&fnav_hash_params);
+	if (!fnav_info->hash_handle) {
+		PMD_LOG_ERR(INIT, "failed to create fnav hash table!");
+		ret = -EINVAL;
+		goto l_out;
+	}
+
+	fnav_info->hash_map = rte_zmalloc("sxe",
+					  sizeof(struct sxe_fnav_filter *) *
+					  SXE_MAX_FNAV_FILTER_NUM,
+					  0);
+	if (!fnav_info->hash_map) {
+		PMD_LOG_ERR(INIT,
+				 "failed to allocate memory for fnav hash map!");
+		rte_hash_free(fnav_info->hash_handle);
+		ret = -ENOMEM;
+		goto l_out;
+	}
+	fnav_info->mask_added = false;
+
+l_out:
+	return ret;
+}
+
+void sxe_fnav_filter_uninit(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_fnav_context *fnav_info = &adapter->fnav_ctxt;
+	struct sxe_fnav_filter *fnav_filter;
+
+	if (fnav_info->hash_map) {
+		rte_free(fnav_info->hash_map);
+		fnav_info->hash_map = NULL;
+	}
+	if (fnav_info->hash_handle)
+		rte_hash_free(fnav_info->hash_handle);
+
+	while ((fnav_filter = TAILQ_FIRST(&fnav_info->fnav_list))) {
+		TAILQ_REMOVE(&fnav_info->fnav_list,
+				 fnav_filter,
+				 entries);
+		rte_free(fnav_filter);
+	}
+
+}
+
+static s32
+sxe_rss_filter_conf_copy(struct sxe_rss_filter *out,
+			const struct rte_flow_action_rss *in)
+{
+	s32 ret = 0;
+
+	if (in->key_len > RTE_DIM(out->key) ||
+		in->queue_num > RTE_DIM(out->queue)) {
+		ret = -EINVAL;
+		goto l_out;
+	}
+
+	out->conf = (struct rte_flow_action_rss){
+		.func = in->func,
+		.level = in->level,
+		.types = in->types,
+		.key_len = in->key_len,
+		.queue_num = in->queue_num,
+		.key = memcpy(out->key, in->key, in->key_len),
+		.queue = memcpy(out->queue, in->queue,
+				sizeof(*in->queue) * in->queue_num),
+	};
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_rss_filter_action_parse(struct rte_eth_dev *dev,
+						const struct rte_flow_action actions[],
+						struct sxe_rss_filter *rss_filter,
+						struct rte_flow_error *error)
+{
+	const struct rte_flow_action *act;
+	const struct rte_flow_action_rss *rss;
+	u16 n;
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				NULL, "NULL action.");
+		PMD_LOG_ERR(DRV, "action is null, validate failed.");
+		goto l_out;
+	}
+
+	act = sxe_next_no_void_action(actions, NULL);
+	if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			act, "Not supported action.");
+		PMD_LOG_WARN(DRV, "action check wrong, validate failed.");
+		goto l_out;
+	}
+
+	rss = (const struct rte_flow_action_rss *)act->conf;
+
+	if (!rss || !rss->queue_num) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION,
+				act, "no valid queues");
+		PMD_LOG_WARN(DRV, "rss queue is invalid, validate failed.");
+		goto l_out;
+	}
+
+	if (rss->queue_num > dev->data->nb_rx_queues) {
+		rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"too many queues for RSS context");
+		PMD_LOG_WARN(DRV, "too many queues for rss context, validate failed.");
+		goto l_out;
+	}
+
+	for (n = 0; n < rss->queue_num; n++) {
+		if (rss->queue[n] >= dev->data->nb_rx_queues) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION,
+				act, "queue id > max number of queues");
+			PMD_LOG_WARN(DRV, "queue id > max number of queues, validate failed.");
+			goto l_out;
+		}
+	}
+
+	if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
+		rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"non-default RSS hash functions are not supported");
+		PMD_LOG_WARN(DRV, "non-default rss hash functions are not supported.");
+		goto l_out;
+	}
+
+	if (rss->level) {
+		rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"a nonzero RSS encapsulation level is not supported");
+		PMD_LOG_WARN(DRV, "a nonzero rss encapsulation level is not supported.");
+		goto l_out;
+	}
+
+	if (rss->key_len && rss->key_len != RTE_DIM(rss_filter->key)) {
+		rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"RSS hash key must be exactly 40 bytes");
+		PMD_LOG_WARN(DRV, "rss hash key must be exactly 40 bytes.");
+		goto l_out;
+	}
+
+	if (sxe_rss_filter_conf_copy(rss_filter, rss)) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, act,
+				"RSS context initialization failure");
+		PMD_LOG_WARN(DRV, "rss context initialization failure, validate failed.");
+		goto l_out;
+	}
+
+	act = sxe_next_no_void_action(actions, act);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			act, "Not supported action.");
+		PMD_LOG_WARN(DRV, "action check wrong, validate failed.");
+		goto l_out;
+	}
+
+	PMD_LOG_DEBUG(DRV, "rss filter action parse success.");
+	rte_errno = 0;
+
+l_out:
+	return -rte_errno;
+}
+
+static s32
+sxe_rss_filter_parse(struct rte_eth_dev *dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_action actions[],
+			struct sxe_rss_filter *rss_filter,
+			struct rte_flow_error *error)
+{
+	s32 ret = 0;
+
+	ret = sxe_rss_filter_action_parse(dev, actions, rss_filter, error);
+	if (ret != 0) {
+		PMD_LOG_WARN(DRV, "action check wrong, validate failed.");
+		goto parse_failed;
+	}
+
+	if (sxe_is_attribute_wrong(attr, error)) {
+		PMD_LOG_ERR(DRV, "user attribute is wrong, validate failed.\n");
+		goto parse_failed;
+	}
+	if (attr->priority) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Error priority.");
+		PMD_LOG_ERR(DRV, "priority check wrong, validate failed.");
+		goto parse_failed;
+	}
+
+	PMD_LOG_DEBUG(DRV, "rss filter fit, validate success!!");
+	rte_errno = 0;
+	goto l_out;
+
+parse_failed:
+	memset(rss_filter, 0, sizeof(struct sxe_rss_filter));
+	PMD_LOG_WARN(DRV, "rss filter, validate failed.");
+l_out:
+	return -rte_errno;
+}
+
+bool sxe_is_rss_filter_same(const struct rte_flow_action_rss *cur_rss,
+			const struct rte_flow_action_rss *user_rss)
+{
+	return (cur_rss->func == user_rss->func &&
+		cur_rss->level == user_rss->level &&
+		cur_rss->types == user_rss->types &&
+		cur_rss->key_len == user_rss->key_len &&
+		cur_rss->queue_num == user_rss->queue_num &&
+		!memcmp(cur_rss->key, user_rss->key, user_rss->key_len) &&
+		!memcmp(cur_rss->queue, user_rss->queue,
+			sizeof(*user_rss->queue) * user_rss->queue_num));
+}
+
+static s32
+sxe_rss_filter_configure(struct rte_eth_dev *dev,
+		struct sxe_rss_filter *rss_filter, bool add)
+{
+	u16 i, j;
+	s32 ret = 0;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	u8  rss_indir_tbl[SXE_MAX_RETA_ENTRIES];
+	struct sxe_filter_context *filter_ctxt = &adapter->filter_ctxt;
+	struct rte_eth_rss_conf rss_conf = {
+		.rss_key = rss_filter->conf.key_len ?
+			(void *)(uintptr_t)rss_filter->conf.key : NULL,
+		.rss_key_len = rss_filter->conf.key_len,
+		.rss_hf = rss_filter->conf.types,
+	};
+
+	if (!add) {
+		if (sxe_is_rss_filter_same(&filter_ctxt->rss_filter.conf,
+					  &rss_filter->conf)) {
+			sxe_rss_disable(dev);
+			memset(&filter_ctxt->rss_filter, 0,
+				sizeof(struct sxe_rss_filter));
+			PMD_LOG_DEBUG(DRV, "rss filter delete success.");
+			goto l_out;
+		}
+		PMD_LOG_ERR(DRV, "rss filter delete failed.");
+		ret = -EINVAL;
+		goto l_out;
+	}
+
+	if (filter_ctxt->rss_filter.conf.queue_num) {
+		PMD_LOG_ERR(DRV, "rss filter has been create, not support recreate.");
+		ret = -EINVAL;
+		goto l_out;
+	}
+
+	for (i = 0, j = 0; i < SXE_MAX_RETA_ENTRIES; i++, j++) {
+		if (j == rss_filter->conf.queue_num)
+			j = 0;
+
+		rss_indir_tbl[i] = rss_filter->conf.queue[j];
+	}
+	sxe_hw_rss_redir_tbl_set_all(hw, rss_indir_tbl);
+
+	if ((rss_conf.rss_hf & SXE_RSS_OFFLOAD_ALL) == 0) {
+		sxe_rss_disable(dev);
+		PMD_LOG_DEBUG(DRV, "hash function is null, rss filter delete success.");
+		goto l_out;
+	}
+
+	if (rss_conf.rss_key == NULL)
+		rss_conf.rss_key = sxe_rss_hash_key_get();
+
+	sxe_rss_hash_set(hw, &rss_conf);
+	if (sxe_rss_filter_conf_copy(&filter_ctxt->rss_filter, &rss_filter->conf)) {
+		PMD_LOG_ERR(DRV, "copy rss filter info to private data failed.");
+		ret = -EINVAL;
+		goto l_out;
+	}
+
+l_out:
+	return ret;
+}
+
+static void
+sxe_rss_filter_delete(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_filter_context *filter_ctxt = &adapter->filter_ctxt;
+
+	if (filter_ctxt->rss_filter.conf.queue_num)
+		sxe_rss_filter_configure(dev, &filter_ctxt->rss_filter, false);
+
+}
+
+static void
+sxe_fivetuple_filter_restore(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_filter_context *filter_ctxt = &adapter->filter_ctxt;
+	struct sxe_fivetuple_filter *filter;
+	struct sxe_fivetuple_node_info filter_node_info;
+
+	TAILQ_FOREACH(filter, &filter_ctxt->fivetuple_list, entries) {
+		filter_node_info.index = filter->index;
+		filter_node_info.queue = filter->queue;
+		memcpy(&filter_node_info.filter_info, &filter->filter_info,
+				sizeof(struct sxe_fivetuple_filter_info));
+		sxe_hw_fivetuple_filter_add(dev, &filter_node_info);
+	}
+
+}
+
+static void
+sxe_ethertype_filter_restore(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_filter_context *filter_ctxt = &adapter->filter_ctxt;
+	s32 i;
+	u16 ethertype, queue;
+
+	for (i = 0; i < SXE_MAX_ETQF_FILTERS; i++) {
+		if (filter_ctxt->ethertype_mask & (1 << i)) {
+			ethertype = filter_ctxt->ethertype_filters[i].ethertype;
+			queue = filter_ctxt->ethertype_filters[i].queue;
+			sxe_hw_ethertype_filter_add(hw, i, ethertype, queue);
+		}
+	}
+
+}
+
+static void
+sxe_syn_filter_restore(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_filter_context *filter_ctxt = &adapter->filter_ctxt;
+	struct sxe_syn_filter *syn_filter = &filter_ctxt->syn_filter;
+	bool is_syn_enable = syn_filter->is_syn_enable;
+	u16 queue = syn_filter->queue;
+	u8 priority = syn_filter->priority;
+
+	if (is_syn_enable)
+		sxe_hw_syn_filter_add(hw, queue, priority);
+
+}
+
+static void
+sxe_rss_filter_restore(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_filter_context *filter_ctxt = &adapter->filter_ctxt;
+
+	if (filter_ctxt->rss_filter.conf.queue_num) {
+		sxe_rss_filter_configure(dev,
+			&filter_ctxt->rss_filter, true);
+	}
+
+}
+
+void sxe_filter_restore(struct rte_eth_dev *dev)
+{
+	sxe_fivetuple_filter_restore(dev);
+	sxe_ethertype_filter_restore(dev);
+	sxe_syn_filter_restore(dev);
+	sxe_fnav_filter_restore(dev);
+	sxe_rss_filter_restore(dev);
+
+}
+
+static s32 sxe_flow_validate(struct rte_eth_dev *dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error)
+{
+	struct rte_eth_ntuple_filter ntuple_filter;
+	struct rte_eth_ethertype_filter ethertype_filter;
+	struct rte_eth_syn_filter syn_filter;
+	struct sxe_fnav_rule fnav_rule;
+	struct sxe_rss_filter rss_filter;
+	s32 ret = 0;
+
+	/* Five tuple filter */
+	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+	ret = sxe_fivetuple_filter_parse(dev, attr, pattern,
+				actions, &ntuple_filter, error);
+	if (!ret)
+		goto l_out;
+
+	/* Ethertype filter */
+	memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+	ret = sxe_ethertype_filter_parse(dev, attr, pattern,
+				actions, &ethertype_filter, error);
+	if (!ret)
+		goto l_out;
+
+	/* Syn filter:export */
+	memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+	ret = sxe_syn_filter_parse(dev, attr, pattern,
+				actions, &syn_filter, error);
+	if (!ret)
+		goto l_out;
+
+	/* Fnav filter:export */
+	memset(&fnav_rule, 0, sizeof(struct sxe_fnav_rule));
+	ret = sxe_fnav_filter_parse(dev, attr, pattern,
+				actions, &fnav_rule, error);
+	if (!ret)
+		goto l_out;
+
+	/* RSS filter:export */
+	memset(&rss_filter, 0, sizeof(struct sxe_rss_filter));
+	ret = sxe_rss_filter_parse(dev, attr,
+				actions, &rss_filter, error);
+
+l_out:
+	return ret;
+}
+
+static struct rte_flow *
+sxe_flow_create(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error)
+{
+	s32 ret;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct rte_eth_ntuple_filter ntuple_filter;
+	struct rte_eth_ethertype_filter ethertype_filter;
+	struct rte_eth_syn_filter syn_filter;
+	struct sxe_fnav_rule fnav_rule;
+	struct sxe_fnav_context *fnav_info = &adapter->fnav_ctxt;
+	struct sxe_rss_filter rss_filter;
+	struct rte_flow *flow = NULL;
+	struct sxe_ntuple_filter_ele *ntuple_filter_ele;
+	struct sxe_ethertype_filter_ele *ethertype_filter_ele;
+	struct sxe_eth_syn_filter_ele *syn_filter_ele;
+	struct sxe_fnav_rule_ele *fnav_rule_ele;
+	struct sxe_rss_filter_ele *rss_filter_ele;
+	u8 first_mask = false;
+
+	flow = rte_zmalloc("sxe_rte_flow", sizeof(struct rte_flow), 0);
+	if (!flow) {
+		PMD_LOG_ERR(DRV, "failed to allocate memory");
+		ret = -ENOMEM;
+		goto l_out;
+	}
+
+	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+	ret = sxe_fivetuple_filter_parse(dev, attr, pattern,
+			actions, &ntuple_filter, error);
+	if (!ret) {
+		ret = sxe_fivetuple_filter_configure(dev, &ntuple_filter, 1);
+		if (!ret) {
+			ntuple_filter_ele = rte_zmalloc("sxe_ntuple_filter",
+				sizeof(struct sxe_ntuple_filter_ele), 0);
+			if (!ntuple_filter_ele) {
+				PMD_LOG_ERR(DRV, "failed to allocate memory");
+				ret = -ENOMEM;
+				goto fail;
+			}
+			rte_memcpy(&ntuple_filter_ele->filter_info,
+				&ntuple_filter,
+				sizeof(struct rte_eth_ntuple_filter));
+			flow->rule = ntuple_filter_ele;
+			flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+			PMD_LOG_DEBUG(DRV, "create fivetuple_filter success!");
+			goto l_out;
+		}
+		PMD_LOG_ERR(DRV, "create fivetuple_filter failed!");
+		goto fail;
+	}
+
+	memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+	ret = sxe_ethertype_filter_parse(dev, attr, pattern,
+				actions, &ethertype_filter, error);
+	if (!ret) {
+		ret = sxe_ethertype_filter_configure(dev,
+				&ethertype_filter, true);
+		if (!ret) {
+			ethertype_filter_ele = rte_zmalloc(
+				"sxe_ethertype_filter",
+				sizeof(struct sxe_ethertype_filter_ele), 0);
+			if (!ethertype_filter_ele) {
+				PMD_LOG_ERR(DRV, "failed to allocate memory");
+				ret = -ENOMEM;
+				goto fail;
+			}
+			rte_memcpy(&ethertype_filter_ele->filter_info,
+				&ethertype_filter,
+				sizeof(struct rte_eth_ethertype_filter));
+			flow->rule = ethertype_filter_ele;
+			flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+			PMD_LOG_DEBUG(DRV, "create ethertype_filter success!");
+			goto l_out;
+		}
+		PMD_LOG_ERR(DRV, "create ethertype_filter failed!");
+		goto fail;
+	}
+
+	memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+	ret = sxe_syn_filter_parse(dev, attr, pattern,
+				actions, &syn_filter, error);
+	if (!ret) {
+		ret = sxe_syn_filter_configure(dev, &syn_filter, true);
+		if (!ret) {
+			syn_filter_ele = rte_zmalloc("sxe_syn_filter",
+				sizeof(struct sxe_eth_syn_filter_ele), 0);
+			if (!syn_filter_ele) {
+				PMD_LOG_ERR(DRV, "failed to allocate memory");
+				ret = -ENOMEM;
+				goto fail;
+			}
+			rte_memcpy(&syn_filter_ele->filter_info,
+				&syn_filter,
+				sizeof(struct rte_eth_syn_filter));
+			flow->rule = syn_filter_ele;
+			flow->filter_type = RTE_ETH_FILTER_SYN;
+			PMD_LOG_DEBUG(DRV, "create syn_filter success!");
+			goto l_out;
+		}
+		PMD_LOG_ERR(DRV, "create syn_filter failed!");
+		goto fail;
+	}
+
+	memset(&fnav_rule, 0, sizeof(struct sxe_fnav_rule));
+	ret = sxe_fnav_filter_parse(dev, attr, pattern,
+				actions, &fnav_rule, error);
+	if (!ret) {
+		if (fnav_rule.b_mask) {
+			if (!fnav_info->mask_added) {
+				rte_memcpy(&fnav_info->mask, &fnav_rule.mask,
+						sizeof(struct sxe_hw_fnav_mask));
+
+				if (fnav_rule.mask.flex_bytes_mask &&
+					fnav_info->flex_bytes_offset !=
+						fnav_rule.flex_bytes_offset) {
+					ret = sxe_hw_fnav_flex_offset_set(hw,
+						fnav_rule.flex_bytes_offset);
+					if (ret) {
+						PMD_LOG_ERR(DRV, "flex_byte_offset set failed.");
+						goto fail;
+					} else {
+						fnav_info->flex_bytes_offset =
+							fnav_rule.flex_bytes_offset;
+					}
+				}
+
+				ret = sxe_fnav_mask_set(dev);
+				if (ret) {
+					PMD_LOG_ERR(DRV, "fnav filter create---fnav mask set failed.");
+					goto fail;
+				}
+
+				fnav_info->mask_added = true;
+				first_mask = true;
+			} else {
+				/* All fnav filter use one mask */
+				ret = memcmp(&fnav_info->mask,
+					&fnav_rule.mask,
+					sizeof(struct sxe_hw_fnav_mask));
+				if (ret) {
+					ret = -EINVAL;
+					PMD_LOG_ERR(DRV, "don't support to set different mask.");
+					goto fail;
+				}
+
+				if (fnav_rule.mask.flex_bytes_mask &&
+					fnav_info->flex_bytes_offset !=
+					fnav_rule.flex_bytes_offset) {
+					PMD_LOG_ERR(DRV, "don't support to set different flex_byte_offset.");
+					ret = -EINVAL;
+					goto fail;
+				}
+			}
+		}
+
+		if (fnav_rule.b_spec) {
+			ret = sxe_fnav_filter_program(dev, &fnav_rule, false);
+			if (!ret) {
+				fnav_rule_ele = rte_zmalloc("sxe_fnav_filter",
+					sizeof(struct sxe_fnav_rule_ele), 0);
+				if (!fnav_rule_ele) {
+					PMD_LOG_ERR(DRV, "failed to allocate memory");
+					ret = -ENOMEM;
+					goto fail;
+				}
+				rte_memcpy(&fnav_rule_ele->filter_info,
+					&fnav_rule,
+					sizeof(struct sxe_fnav_rule));
+				flow->rule = fnav_rule_ele;
+				flow->filter_type = RTE_ETH_FILTER_FDIR;
+				PMD_LOG_DEBUG(DRV, "create fnav_filter success!");
+				goto l_out;
+			}
+
+			if (ret) {
+				if (first_mask)
+					fnav_info->mask_added = false;
+				PMD_LOG_ERR(DRV, "fnav_rule_spec set failed!");
+				goto fail;
+			}
+		}
+
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "create fnav_filter failed!");
+		goto fail;
+	}
+
+	memset(&rss_filter, 0, sizeof(struct sxe_rss_filter));
+	ret = sxe_rss_filter_parse(dev, attr,
+					actions, &rss_filter, error);
+	if (!ret) {
+		ret = sxe_rss_filter_configure(dev, &rss_filter, true);
+		if (!ret) {
+			rss_filter_ele = rte_zmalloc("sxe_rss_filter",
+				sizeof(struct sxe_rss_filter_ele), 0);
+			if (!rss_filter_ele) {
+				PMD_LOG_ERR(DRV, "failed to allocate memory");
+				ret = -ENOMEM;
+				goto fail;
+			}
+
+			sxe_rss_filter_conf_copy(&rss_filter_ele->filter_info,
+						 &rss_filter.conf);
+			flow->rule = rss_filter_ele;
+			flow->filter_type = RTE_ETH_FILTER_HASH;
+			PMD_LOG_DEBUG(DRV, "create rss_filter success!");
+			goto l_out;
+		}
+	}
+fail:
+	rte_flow_error_set(error, -ret,
+			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			   "Failed to create flow.");
+	rte_free(flow);
+	flow = NULL;
+l_out:
+	return flow;
+
+}
+
+static s32 sxe_flow_destroy(struct rte_eth_dev *dev,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	s32 ret;
+	struct rte_flow *pmd_flow = flow;
+	enum rte_filter_type filter_type = pmd_flow->filter_type;
+	struct sxe_fnav_rule fnav_rule;
+	struct rte_eth_ntuple_filter ntuple_filter;
+	struct rte_eth_ethertype_filter ethertype_filter;
+	struct rte_eth_syn_filter syn_filter;
+	struct sxe_ntuple_filter_ele *ntuple_filter_ele;
+	struct sxe_ethertype_filter_ele *ethertype_filter_ele;
+	struct sxe_eth_syn_filter_ele *syn_filter_ele;
+	struct sxe_fnav_rule_ele *fnav_rule_ele;
+	struct sxe_rss_filter_ele *rss_filter_ele;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_fnav_context *fnav_info = &adapter->fnav_ctxt;
+
+	switch (filter_type) {
+	case RTE_ETH_FILTER_NTUPLE:
+		ntuple_filter_ele = (struct sxe_ntuple_filter_ele *)
+					pmd_flow->rule;
+		rte_memcpy(&ntuple_filter,
+			&ntuple_filter_ele->filter_info,
+			sizeof(struct rte_eth_ntuple_filter));
+		ret = sxe_fivetuple_filter_configure(dev, &ntuple_filter, 0);
+		if (!ret) {
+			PMD_LOG_DEBUG(DRV, "destroy fivetuple filter success.");
+			rte_free(ntuple_filter_ele);
+		}
+		break;
+	case RTE_ETH_FILTER_ETHERTYPE:
+		ethertype_filter_ele = (struct sxe_ethertype_filter_ele *)
+					pmd_flow->rule;
+		rte_memcpy(&ethertype_filter,
+			&ethertype_filter_ele->filter_info,
+			sizeof(struct rte_eth_ethertype_filter));
+		ret = sxe_ethertype_filter_configure(dev,
+				&ethertype_filter, false);
+		if (!ret) {
+			PMD_LOG_DEBUG(DRV, "destroy ethertype filter success.");
+			rte_free(ethertype_filter_ele);
+		}
+		break;
+	case RTE_ETH_FILTER_SYN:
+		syn_filter_ele = (struct sxe_eth_syn_filter_ele *)
+				pmd_flow->rule;
+		rte_memcpy(&syn_filter,
+			&syn_filter_ele->filter_info,
+			sizeof(struct rte_eth_syn_filter));
+		ret = sxe_syn_filter_configure(dev, &syn_filter, false);
+		if (!ret) {
+			PMD_LOG_DEBUG(DRV, "destroy syn filter success.");
+			rte_free(syn_filter_ele);
+		}
+		break;
+	case RTE_ETH_FILTER_FDIR:
+		fnav_rule_ele = (struct sxe_fnav_rule_ele *)pmd_flow->rule;
+		rte_memcpy(&fnav_rule,
+			&fnav_rule_ele->filter_info,
+			sizeof(struct sxe_fnav_rule));
+		ret = sxe_fnav_filter_program(dev, &fnav_rule, true);
+		if (!ret) {
+			PMD_LOG_DEBUG(DRV, "destroy fnav filter success.");
+			rte_free(fnav_rule_ele);
+			if (TAILQ_EMPTY(&fnav_info->fnav_list))
+				fnav_info->mask_added = false;
+		}
+		break;
+	case RTE_ETH_FILTER_HASH:
+		rss_filter_ele = (struct sxe_rss_filter_ele *)
+				pmd_flow->rule;
+		ret = sxe_rss_filter_configure(dev,
+					&rss_filter_ele->filter_info, false);
+		if (!ret) {
+			PMD_LOG_DEBUG(DRV, "destroy rss filter success.");
+			rte_free(rss_filter_ele);
+		}
+		break;
+	default:
+		PMD_LOG_WARN(DRV, "filter type (%d) not supported", filter_type);
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE,
+				NULL, "Failed to destroy flow");
+		goto l_out;
+	}
+
+	rte_free(flow);
+
+l_out:
+	return ret;
+}
+
+static s32 sxe_flow_flush(struct rte_eth_dev *dev,
+			struct rte_flow_error *error)
+{
+	s32 ret = 0;
+	u8  i;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_filter_context *filter_ctxt = &adapter->filter_ctxt;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_fivetuple_filter *filter;
+
+	while ((filter = TAILQ_FIRST(&filter_ctxt->fivetuple_list)))
+		sxe_fivetuple_filter_delete(dev, filter);
+
+	for (i = 0; i < SXE_MAX_ETQF_FILTERS; i++) {
+		if (filter_ctxt->ethertype_mask & (1 << i) &&
+			!filter_ctxt->ethertype_filters[i].conf) {
+			(void)sxe_ethertype_filter_remove(filter_ctxt, (u8)i);
+			sxe_hw_ethertype_filter_del(hw, i);
+		}
+	}
+
+	sxe_syn_filter_delete(dev);
+
+	ret = sxe_fnav_filter_delete_all(dev);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+					NULL, "Failed to flush rule");
+		PMD_LOG_ERR(DRV, "flush fnav filter failed, flush failed.");
+		goto l_out;
+	}
+
+	sxe_rss_filter_delete(dev);
+
+l_out:
+	return ret;
+}
+
+const struct rte_flow_ops sxe_flow_ops = {
+	.validate = sxe_flow_validate,
+	.create = sxe_flow_create,
+	.destroy = sxe_flow_destroy,
+	.flush = sxe_flow_flush,
+};
+
+#ifdef ETH_DEV_OPS_FILTER_CTRL
+s32 sxe_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
+					enum rte_filter_type filter_type,
+					enum rte_filter_op filter_op,
+					void *arg)
+{
+	s32 ret = 0;
+
+	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET) {
+			ret = -EINVAL;
+			goto l_out;
+		}
+		*(const void **)arg = &sxe_flow_ops;
+		break;
+	default:
+		PMD_LOG_WARN(DRV, "filter type (%d) not supported", filter_type);
+		ret = -EINVAL;
+		break;
+	}
+
+l_out:
+	return ret;
+}
+#else
+s32 sxe_flow_ops_get(__rte_unused struct rte_eth_dev *dev,
+			   const struct rte_flow_ops **ops)
+{
+	*ops = &sxe_flow_ops;
+	return 0;
+}
+#endif
+#endif
diff --git a/drivers/net/sxe/pf/sxe_filter_ctrl.h b/drivers/net/sxe/pf/sxe_filter_ctrl.h
new file mode 100644
index 0000000000..4487caf3ad
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_filter_ctrl.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_FILTER_CTRL_H__
+#define __SXE_FILTER_CTRL_H__
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+#include "sxe_filter.h"
+#include "sxe_regs.h"
+
+#define SXE_HKEY_MAX_INDEX 10
+
+#define SXE_5TUPLE_ARRAY_SIZE \
+	(RTE_ALIGN(SXE_MAX_FTQF_FILTERS, (sizeof(u32) * BYTE_BIT_NUM)) / \
+	 (sizeof(u32) * BYTE_BIT_NUM))
+
+#define SXE_5TUPLE_IPV4_MASK (ipv4_mask->hdr.version_ihl || \
+			ipv4_mask->hdr.type_of_service ||   \
+			ipv4_mask->hdr.total_length ||	  \
+			ipv4_mask->hdr.packet_id ||		 \
+			ipv4_mask->hdr.fragment_offset ||   \
+			ipv4_mask->hdr.time_to_live ||	  \
+			ipv4_mask->hdr.hdr_checksum)
+
+#define SXE_5TUPLE_TCP_MASK (tcp_mask->hdr.tcp_flags || \
+			tcp_mask->hdr.sent_seq  ||	\
+			tcp_mask->hdr.recv_ack  ||	\
+			tcp_mask->hdr.data_off  ||	\
+			tcp_mask->hdr.rx_win ||		\
+			tcp_mask->hdr.cksum ||		\
+			tcp_mask->hdr.tcp_urp)
+
+#define SXE_5TUPLE_UDP_MASK (udp_mask->hdr.dgram_len || \
+			udp_mask->hdr.dgram_cksum)
+
+#define SXE_5TUPLE_SCTP_MASK (sctp_mask->hdr.tag || \
+			sctp_mask->hdr.cksum)
+
+#define SXE_SYN_TCP_MASK (tcp_mask->hdr.src_port || \
+			  tcp_mask->hdr.dst_port || \
+			  tcp_mask->hdr.sent_seq || \
+			  tcp_mask->hdr.recv_ack || \
+			  tcp_mask->hdr.data_off || \
+			  tcp_mask->hdr.rx_win ||   \
+			  tcp_mask->hdr.cksum ||	\
+			  tcp_mask->hdr.tcp_urp)
+
+#define SXE_FNAV_IPV4_MASK (ipv4_mask->hdr.version_ihl || \
+			ipv4_mask->hdr.type_of_service || \
+			ipv4_mask->hdr.total_length ||	\
+			ipv4_mask->hdr.packet_id ||	  \
+			ipv4_mask->hdr.fragment_offset || \
+			ipv4_mask->hdr.time_to_live ||	\
+			ipv4_mask->hdr.next_proto_id ||   \
+			ipv4_mask->hdr.hdr_checksum)
+
+#define SXE_FNAV_IPV6_MASK (ipv6_mask->hdr.vtc_flow || \
+			ipv6_mask->hdr.payload_len ||  \
+			ipv6_mask->hdr.proto ||		\
+			ipv6_mask->hdr.hop_limits)
+
+#define SXE_FNAV_TCP_MASK (tcp_mask->hdr.sent_seq || \
+			tcp_mask->hdr.recv_ack ||	\
+			tcp_mask->hdr.data_off ||	\
+			tcp_mask->hdr.tcp_flags ||   \
+			tcp_mask->hdr.rx_win ||	  \
+			tcp_mask->hdr.cksum ||	   \
+			tcp_mask->hdr.tcp_urp)
+
+#define SXE_FNAV_UDP_MASK (udp_mask->hdr.dgram_len || \
+			   udp_mask->hdr.dgram_cksum)
+
+#define SXE_FNAV_SCTP_MASK (sctp_mask->hdr.src_port ||  \
+			sctp_mask->hdr.dst_port ||	  \
+			sctp_mask->hdr.tag ||		\
+			sctp_mask->hdr.cksum)
+
+#define SXE_FNAV_RAW_MASK (raw_mask->relative != 0x1 || \
+			raw_mask->search != 0x1 ||	  \
+			raw_mask->reserved != 0x0 ||	\
+			(u32)raw_mask->offset != 0xffffffff || \
+			raw_mask->limit != 0xffff ||	\
+			raw_mask->length != 0xffff ||   \
+			raw_mask->pattern[0] != 0xff || \
+			raw_mask->pattern[1] != 0xff)
+
+#define SXE_FNAV_RAW_SPEC (raw_spec->relative != 0 || \
+			raw_spec->search != 0 ||	  \
+			raw_spec->reserved != 0 ||	\
+			raw_spec->offset > SXE_MAX_FLX_SOURCE_OFF || \
+			raw_spec->offset % 2 ||	   \
+			raw_spec->limit != 0 ||	   \
+			raw_spec->length != 2 ||	  \
+			(raw_spec->pattern[0] == 0xff && \
+			raw_spec->pattern[1] == 0xff))
+
+struct rte_flow {
+	enum rte_filter_type filter_type;
+	void *rule;
+};
+
+TAILQ_HEAD(sxe_fivetuple_filter_list, sxe_fivetuple_filter);
+
+struct sxe_ethertype_filter {
+	u16 ethertype;
+	u16 queue;
+	bool conf;
+};
+
+struct sxe_syn_filter {
+	bool is_syn_enable;
+	u16  queue;
+	u8   priority;
+};
+
+struct sxe_rss_filter {
+	struct rte_flow_action_rss conf;
+	u8 key[SXE_HKEY_MAX_INDEX * sizeof(u32)];
+	u16 queue[SXE_HW_TXRX_RING_NUM_MAX];
+};
+
+struct sxe_filter_context {
+	u8 ethertype_mask;
+	struct sxe_ethertype_filter ethertype_filters[SXE_MAX_ETQF_FILTERS];
+	u32 fivetuple_mask[SXE_5TUPLE_ARRAY_SIZE];
+	struct sxe_fivetuple_filter_list fivetuple_list;
+	struct sxe_syn_filter syn_filter;
+	struct sxe_rss_filter rss_filter;
+};
+
+#ifdef ETH_DEV_OPS_FILTER_CTRL
+s32 sxe_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
+					enum rte_filter_type filter_type,
+					enum rte_filter_op filter_op,
+					void *arg);
+#else
+s32 sxe_flow_ops_get(__rte_unused struct rte_eth_dev *dev,
+			   const struct rte_flow_ops **ops);
+#endif
+
+s32 sxe_fnav_filter_init(struct rte_eth_dev *dev);
+
+void sxe_fnav_filter_uninit(struct rte_eth_dev *dev);
+
+void sxe_filter_restore(struct rte_eth_dev *dev);
+
+void sxe_fivetuple_filter_uninit(struct rte_eth_dev *dev);
+
+bool sxe_is_rss_filter_same(const struct rte_flow_action_rss *cur_rss,
+			const struct rte_flow_action_rss *user_rss);
+
+#endif
+#endif
diff --git a/drivers/net/sxe/pf/sxe_flow_ctrl.c b/drivers/net/sxe/pf/sxe_flow_ctrl.c
index 33c4ffeb9d..890c5e7df3 100644
--- a/drivers/net/sxe/pf/sxe_flow_ctrl.c
+++ b/drivers/net/sxe/pf/sxe_flow_ctrl.c
@@ -20,7 +20,7 @@ s32 sxe_flow_ctrl_enable(struct rte_eth_dev *dev)
 	return ret;
 }
 
-s32 sxe_flow_ctrl_get(struct rte_eth_dev *dev, 
+s32 sxe_flow_ctrl_get(struct rte_eth_dev *dev,
 					struct rte_eth_fc_conf *fc_conf)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
@@ -38,20 +38,19 @@ s32 sxe_flow_ctrl_get(struct rte_eth_dev *dev,
 
 	sxe_hw_fc_status_get(hw, &rx_pause_on, &tx_pause_on);
 
-	if (rx_pause_on && tx_pause_on) {
+	if (rx_pause_on && tx_pause_on)
 		fc_conf->mode = RTE_ETH_FC_FULL;
-	} else if (rx_pause_on) {
+	else if (rx_pause_on)
 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
-	} else if (tx_pause_on) {
+	else if (tx_pause_on)
 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
-	} else {
+	else
 		fc_conf->mode = RTE_ETH_FC_NONE;
-	}
 
 	return 0;
 }
 
-s32 sxe_flow_ctrl_set(struct rte_eth_dev *dev, 
+s32 sxe_flow_ctrl_set(struct rte_eth_dev *dev,
 					struct rte_eth_fc_conf *fc_conf)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
diff --git a/drivers/net/sxe/pf/sxe_flow_ctrl.h b/drivers/net/sxe/pf/sxe_flow_ctrl.h
index 0be5d1aaaf..fb124b11bd 100644
--- a/drivers/net/sxe/pf/sxe_flow_ctrl.h
+++ b/drivers/net/sxe/pf/sxe_flow_ctrl.h
@@ -1,16 +1,16 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (C), 2022, Linkdata Technology Co., Ltd.
  */
- 
+
 #ifndef __SXE_FLOW_CTRL_H__
 #define __SXE_FLOW_CTRL_H__
 
 s32 sxe_flow_ctrl_enable(struct rte_eth_dev *dev);
 
-s32 sxe_flow_ctrl_get(struct rte_eth_dev *dev, 
+s32 sxe_flow_ctrl_get(struct rte_eth_dev *dev,
 					struct rte_eth_fc_conf *fc_conf);
 
-s32 sxe_flow_ctrl_set(struct rte_eth_dev *dev, 
+s32 sxe_flow_ctrl_set(struct rte_eth_dev *dev,
 					struct rte_eth_fc_conf *fc_conf);
 
 #endif
diff --git a/drivers/net/sxe/pf/sxe_fnav.c b/drivers/net/sxe/pf/sxe_fnav.c
new file mode 100644
index 0000000000..c0b54bbefe
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_fnav.c
@@ -0,0 +1,507 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_vxlan.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+#include <rte_malloc.h>
+#include <rte_hash.h>
+
+#include "sxe.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe_fnav.h"
+
+#define FNAVCTRL_PBALLOC_MASK  0x03
+
+#define PBALLOC_SIZE_SHIFT	 15
+
+#define PERFECT_BUCKET_64KB_HASH_MASK   0x07FF
+#define PERFECT_BUCKET_128KB_HASH_MASK  0x0FFF
+#define PERFECT_BUCKET_256KB_HASH_MASK  0x1FFF
+#define SIG_BUCKET_64KB_HASH_MASK	   0x1FFF
+#define SIG_BUCKET_128KB_HASH_MASK	  0x3FFF
+#define SIG_BUCKET_256KB_HASH_MASK	  0x7FFF
+
+#define SXE_DEFAULT_FLEXBYTES_OFFSET	12
+
+static s32 sxe_ipv6_to_mask(const u32 *ipaddr, u16 *ipv6m)
+{
+	u8 ipv6_addr[16];
+	u8 i;
+
+	rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));
+	(*ipv6m) = 0;
+	for (i = 0; i < sizeof(ipv6_addr); i++) {
+		if (ipv6_addr[i] == UINT8_MAX)
+			(*ipv6m) |= 1 << i;
+		else if (ipv6_addr[i] != 0) {
+			PMD_LOG_ERR(DRV, " invalid ipv6 address mask.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static s32
+sxe_fnav_ctrl_info_parse(const struct rte_eth_fdir_conf *conf, u32 *fnavctrl)
+{
+	*fnavctrl = 0;
+	s32 ret = 0;
+
+	switch (conf->pballoc) {
+	case RTE_ETH_FDIR_PBALLOC_64K:
+		*fnavctrl |= SXE_FNAVCTRL_PBALLOC_64K;
+		break;
+	case RTE_ETH_FDIR_PBALLOC_128K:
+		*fnavctrl |= SXE_FNAVCTRL_PBALLOC_128K;
+		break;
+	case RTE_ETH_FDIR_PBALLOC_256K:
+		*fnavctrl |= SXE_FNAVCTRL_PBALLOC_256K;
+		break;
+	default:
+		PMD_LOG_ERR(INIT, "invalid fnav_conf->pballoc value");
+		ret = -EINVAL;
+		goto l_out;
+	};
+
+	switch (conf->status) {
+	case RTE_FDIR_NO_REPORT_STATUS:
+		break;
+	case RTE_FDIR_REPORT_STATUS:
+		*fnavctrl |= SXE_FNAVCTRL_REPORT_STATUS;
+		break;
+	case RTE_FDIR_REPORT_STATUS_ALWAYS:
+		*fnavctrl |= SXE_FNAVCTRL_REPORT_STATUS_ALWAYS;
+		break;
+	default:
+		PMD_LOG_ERR(INIT, "invalid fnav_conf->status value");
+		ret = -EINVAL;
+		goto l_out;
+	};
+
+	*fnavctrl |= (SXE_DEFAULT_FLEXBYTES_OFFSET / sizeof(u16)) <<
+			SXE_FNAVCTRL_FLEX_SHIFT;
+
+	if (conf->mode == RTE_FDIR_MODE_PERFECT) {
+		*fnavctrl |= SXE_FNAVCTRL_SPECIFIC_MATCH;
+		*fnavctrl |= (conf->drop_queue << SXE_FNAVCTRL_DROP_Q_SHIFT);
+	}
+
+l_out:
+	return ret;
+}
+
+static s32
+sxe_fnav_mask_store(struct rte_eth_dev *dev,
+				const struct rte_eth_fdir_masks *mask)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_fnav_context *fnav_ctxt = &adapter->fnav_ctxt;
+	u16 dst_ipv6_mask = 0;
+	u16 src_ipv6_mask = 0;
+	int rtn = 0;
+
+	memset(&fnav_ctxt->mask, 0, sizeof(struct sxe_hw_fnav_mask));
+	fnav_ctxt->mask.vlan_tci_mask = mask->vlan_tci_mask;
+	fnav_ctxt->mask.src_port_mask = mask->src_port_mask;
+	fnav_ctxt->mask.dst_port_mask = mask->dst_port_mask;
+	fnav_ctxt->mask.src_ipv4_mask = mask->ipv4_mask.src_ip;
+	fnav_ctxt->mask.dst_ipv4_mask = mask->ipv4_mask.dst_ip;
+
+	rtn = sxe_ipv6_to_mask(mask->ipv6_mask.src_ip, &src_ipv6_mask);
+	if (rtn == -EINVAL) {
+		return rtn;
+	}
+	rtn = sxe_ipv6_to_mask(mask->ipv6_mask.dst_ip, &dst_ipv6_mask);
+	if (rtn == -EINVAL) {
+		return rtn;
+	}
+	fnav_ctxt->mask.src_ipv6_mask = src_ipv6_mask;
+	fnav_ctxt->mask.dst_ipv6_mask = dst_ipv6_mask;
+
+	return 0;
+}
+
+s32 sxe_fnav_mask_set(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_fnav_context *fnav_ctxt = &adapter->fnav_ctxt;
+	union sxe_fnav_rule_info mask;
+	s32 ret = 0;
+
+	PMD_INIT_FUNC_TRACE();
+	memset(&mask, 0, sizeof(union sxe_fnav_rule_info));
+
+	if (fnav_ctxt->mask.dst_port_mask != 0 || fnav_ctxt->mask.src_port_mask != 0)
+		mask.ntuple.flow_type |= SXE_SAMPLE_L4TYPE_MASK;
+
+	mask.ntuple.vlan_id = fnav_ctxt->mask.vlan_tci_mask;
+	mask.ntuple.flex_bytes = fnav_ctxt->mask.flex_bytes_mask;
+	mask.ntuple.dst_port = fnav_ctxt->mask.dst_port_mask;
+	mask.ntuple.src_port = fnav_ctxt->mask.src_port_mask;
+	mask.ntuple.src_ip[0] = fnav_ctxt->mask.src_ipv4_mask;
+	mask.ntuple.dst_ip[0] = fnav_ctxt->mask.dst_ipv4_mask;
+
+	ret = sxe_hw_fnav_specific_rule_mask_set(hw, &mask);
+	if (ret) {
+		PMD_LOG_ERR(DRV, "error on setting fnav mask");
+		goto l_out;
+	}
+
+	if (SXE_DEV_FNAV_CONF(dev)->mode == RTE_FDIR_MODE_SIGNATURE) {
+		sxe_hw_fnav_ipv6_mask_set(hw, fnav_ctxt->mask.src_ipv6_mask,
+								  fnav_ctxt->mask.dst_ipv6_mask);
+	}
+
+l_out:
+	return ret;
+}
+
+static s32
+sxe_fnav_mask_configure(struct rte_eth_dev *dev,
+			const struct rte_eth_fdir_masks *mask)
+{
+	s32 ret;
+
+	ret = sxe_fnav_mask_store(dev, mask);
+	if (ret) {
+		PMD_LOG_ERR(INIT, " error on storing fnav mask");
+		goto l_out;
+	}
+
+	ret = sxe_fnav_mask_set(dev);
+	if (ret) {
+		PMD_LOG_ERR(INIT, " error on setting fnav mask");
+		goto l_out;
+	}
+
+l_out:
+	return ret;
+}
+
+static s32
+sxe_fnav_flex_conf_set(struct rte_eth_dev *dev,
+		const struct rte_eth_fdir_flex_conf *conf, u32 *fnavctrl)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_fnav_context *fnav_ctxt = &adapter->fnav_ctxt;
+	const struct rte_eth_flex_payload_cfg *flex_cfg;
+	const struct rte_eth_fdir_flex_mask *flex_mask;
+	u16 flexbytes = 0;
+	u16 i;
+	s32 ret = -EINVAL;
+
+	if (conf == NULL) {
+		PMD_LOG_ERR(DRV, "null pointer.");
+		goto l_out;
+	}
+
+	for (i = 0; i < conf->nb_payloads; i++) {
+		flex_cfg = &conf->flex_set[i];
+		if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
+			PMD_LOG_ERR(DRV, "unsupported payload type.");
+			goto l_out;
+		}
+		if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
+			(flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
+			(flex_cfg->src_offset[0] <= SXE_MAX_FLX_SOURCE_OFF)) {
+			*fnavctrl &= ~SXE_FNAVCTRL_FLEX_MASK;
+			*fnavctrl |=
+				(flex_cfg->src_offset[0] / sizeof(u16)) <<
+					SXE_FNAVCTRL_FLEX_SHIFT;
+		} else {
+			PMD_LOG_ERR(DRV, "invalid flexbytes arguments.");
+			goto l_out;
+		}
+	}
+
+	for (i = 0; i < conf->nb_flexmasks; i++) {
+		flex_mask = &conf->flex_mask[i];
+		if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
+			PMD_LOG_ERR(DRV, "flexmask should be set globally.");
+			goto l_out;
+		}
+		flexbytes = (u16)(((flex_mask->mask[0] << 8) & 0xFF00) |
+					((flex_mask->mask[1]) & 0xFF));
+		if (flexbytes != UINT16_MAX && flexbytes != 0) {
+			PMD_LOG_ERR(DRV, "invalid flexbytes mask arguments.");
+			goto l_out;
+		}
+	}
+
+	sxe_hw_fnav_flex_mask_set(hw, flexbytes);
+
+	fnav_ctxt->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
+	fnav_ctxt->flex_bytes_offset = (u8)((*fnavctrl &
+					SXE_FNAVCTRL_FLEX_MASK) >>
+					SXE_FNAVCTRL_FLEX_SHIFT);
+	ret = 0;
+
+l_out:
+	return ret;
+}
+
+s32 sxe_fnav_filter_configure(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	s32 ret;
+	u32 fnavctrl, pbsize;
+	enum rte_fdir_mode mode = SXE_DEV_FNAV_CONF(dev)->mode;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (mode != RTE_FDIR_MODE_SIGNATURE &&
+		mode != RTE_FDIR_MODE_PERFECT) {
+		ret = -ENOSYS;
+		goto l_out;
+	}
+
+	ret = sxe_fnav_ctrl_info_parse(SXE_DEV_FNAV_CONF(dev), &fnavctrl);
+	if (ret) {
+		PMD_LOG_ERR(INIT, "fnav flag config fail.");
+		goto l_out;
+	}
+
+	pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fnavctrl & FNAVCTRL_PBALLOC_MASK)));
+	sxe_hw_fnav_rx_pkt_buf_size_reset(hw, pbsize);
+
+	ret = sxe_fnav_mask_configure(dev, &(SXE_DEV_FNAV_CONF(dev)->mask));
+	if (ret < 0) {
+		PMD_LOG_ERR(INIT, " error on setting fnav mask");
+		goto l_out;
+	}
+
+	ret = sxe_fnav_flex_conf_set(dev,
+		&(SXE_DEV_FNAV_CONF(dev)->flex_conf), &fnavctrl);
+	if (ret < 0) {
+		PMD_LOG_ERR(INIT, "error on setting fnav flexible arguments.");
+		goto l_out;
+	}
+
+	sxe_hw_fnav_enable(hw, fnavctrl);
+
+l_out:
+	return ret;
+}
+
+static u32
+sxe_fnav_hash_compute(union sxe_fnav_rule_info *rule_info, u32 key)
+{
+	__be32 common_dword = 0;
+	u32 high_dword, low_dword, flow_vm_vlan;
+	u32 result = 0;
+	u8 i;
+
+	flow_vm_vlan = rte_be_to_cpu_32(rule_info->fast_access[0]);
+
+	for (i = 1; i <= 10; i++)
+		common_dword ^= rule_info->fast_access[i];
+
+	high_dword = rte_be_to_cpu_32(common_dword);
+	low_dword = (high_dword >> 16) | (high_dword << 16);
+	high_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+	if (key & 0x0001)
+		result ^= low_dword;
+
+	if (key & 0x00010000)
+		result ^= high_dword;
+
+	low_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+	for (i = 15; i; i--) {
+		if (key & (0x0001 << i))
+			result ^= low_dword >> i;
+		if (key & (0x00010000 << i))
+			result ^= high_dword >> i;
+	}
+
+	return result;
+}
+
+u32 sxe_fnav_perfect_hash_compute(union sxe_fnav_rule_info *rule_info,
+		enum rte_eth_fdir_pballoc_type pballoc)
+{
+	u32 ret;
+
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K) {
+		ret = sxe_fnav_hash_compute(rule_info,
+				SXE_FNAV_BUCKET_HASH_KEY) &
+				PERFECT_BUCKET_256KB_HASH_MASK;
+		goto l_out;
+	} else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K) {
+		ret = sxe_fnav_hash_compute(rule_info,
+				SXE_FNAV_BUCKET_HASH_KEY) &
+				PERFECT_BUCKET_128KB_HASH_MASK;
+		goto l_out;
+	} else {
+		ret = sxe_fnav_hash_compute(rule_info,
+				SXE_FNAV_BUCKET_HASH_KEY) &
+				PERFECT_BUCKET_64KB_HASH_MASK;
+		goto l_out;
+	}
+
+l_out:
+	return ret;
+}
+
+u32 sxe_fnav_signature_hash_compute(union sxe_fnav_rule_info *rule_info,
+		enum rte_eth_fdir_pballoc_type pballoc)
+{
+	u32 bucket_hash, sig_hash;
+
+	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K) {
+		bucket_hash = sxe_fnav_hash_compute(rule_info,
+				SXE_FNAV_BUCKET_HASH_KEY) &
+				SIG_BUCKET_256KB_HASH_MASK;
+	} else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K) {
+		bucket_hash = sxe_fnav_hash_compute(rule_info,
+				SXE_FNAV_BUCKET_HASH_KEY) &
+				SIG_BUCKET_128KB_HASH_MASK;
+	} else {
+		bucket_hash = sxe_fnav_hash_compute(rule_info,
+				SXE_FNAV_BUCKET_HASH_KEY) &
+				SIG_BUCKET_64KB_HASH_MASK;
+	}
+
+	sig_hash = sxe_fnav_hash_compute(rule_info, SXE_FNAV_SAMPLE_HASH_KEY);
+
+	return (sig_hash << SXE_FNAVHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
+}
+
+s32 sxe_fnav_filter_insert(struct sxe_fnav_context *fnav_ctxt,
+			 struct sxe_fnav_filter *fnav_filter)
+{
+	s32 ret;
+
+	ret = rte_hash_add_key(fnav_ctxt->hash_handle,
+				   &fnav_filter->sxe_fnav);
+	if (ret < 0) {
+		PMD_LOG_ERR(DRV,
+				"failed to insert fnav filter to hash table %d!",
+				ret);
+		goto l_out;
+	}
+
+	fnav_ctxt->hash_map[ret] = fnav_filter;
+
+	TAILQ_INSERT_TAIL(&fnav_ctxt->fnav_list, fnav_filter, entries);
+
+l_out:
+	return ret;
+}
+
+s32 sxe_fnav_filter_remove(struct sxe_fnav_context *fnav_ctxt,
+			 union sxe_fnav_rule_info *key)
+{
+	s32 ret;
+	struct sxe_fnav_filter *fnav_filter;
+
+	ret = rte_hash_del_key(fnav_ctxt->hash_handle, key);
+	if (ret < 0) {
+		PMD_LOG_ERR(DRV, "no such fnav filter to delete %d!", ret);
+		goto l_out;
+	}
+
+	fnav_filter = fnav_ctxt->hash_map[ret];
+	fnav_ctxt->hash_map[ret] = NULL;
+
+	TAILQ_REMOVE(&fnav_ctxt->fnav_list, fnav_filter, entries);
+	rte_free(fnav_filter);
+
+l_out:
+	return ret;
+}
+
+struct sxe_fnav_filter *
+sxe_fnav_filter_lookup(struct sxe_fnav_context *fnav_ctxt,
+			 union sxe_fnav_rule_info *key)
+{
+	s32 ret;
+	struct sxe_fnav_filter *fnav_filter;
+
+	ret = rte_hash_lookup(fnav_ctxt->hash_handle, (const void *)key);
+	if (ret < 0) {
+		fnav_filter = NULL;
+		goto l_out;
+	}
+
+	fnav_filter = fnav_ctxt->hash_map[ret];
+l_out:
+	return fnav_filter;
+}
+
+s32 sxe_fnav_filter_delete_all(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_fnav_context *fnav_ctxt = &adapter->fnav_ctxt;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_fnav_filter *fnav_filter;
+	struct sxe_fnav_filter *filter;
+	s32 ret = 0;
+
+	rte_hash_reset(fnav_ctxt->hash_handle);
+	memset(fnav_ctxt->hash_map, 0,
+		   sizeof(struct sxe_fnav_filter *) * SXE_MAX_FNAV_FILTER_NUM);
+	filter = TAILQ_FIRST(&fnav_ctxt->fnav_list);
+
+	while ((fnav_filter = TAILQ_FIRST(&fnav_ctxt->fnav_list))) {
+		TAILQ_REMOVE(&fnav_ctxt->fnav_list,
+				fnav_filter, entries);
+		rte_free(fnav_filter);
+	}
+	fnav_ctxt->mask_added = false;
+
+	if (filter != NULL) {
+		ret = sxe_hw_fnav_sample_rules_table_reinit(hw);
+		if (ret < 0) {
+			PMD_LOG_ERR(INIT, "failed to re-initialize fd table.");
+			goto l_out;
+		}
+	}
+
+l_out:
+	return ret;
+}
+
+void sxe_fnav_filter_restore(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct sxe_fnav_context *fnav_ctxt = &adapter->fnav_ctxt;
+	struct sxe_fnav_filter *node;
+	enum rte_fdir_mode fnav_mode = SXE_DEV_FNAV_CONF(dev)->mode;
+
+	if (fnav_mode == RTE_FDIR_MODE_PERFECT) {
+		TAILQ_FOREACH(node, &fnav_ctxt->fnav_list, entries) {
+			(void)sxe_hw_fnav_specific_rule_add(hw,
+							&node->sxe_fnav,
+							node->soft_id,
+							node->queue);
+		}
+	} else {
+		TAILQ_FOREACH(node, &fnav_ctxt->fnav_list, entries) {
+			sxe_hw_fnav_sample_rule_configure(hw,
+							node->sxe_fnav.ntuple.flow_type,
+							node->fnavhash,
+							node->queue);
+		}
+	}
+
+}
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_fnav.h b/drivers/net/sxe/pf/sxe_fnav.h
new file mode 100644
index 0000000000..2b4eb81de6
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_fnav.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_FNAV_H__
+#define __SXE_FNAV_H__
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+#define SXE_MAX_FLX_SOURCE_OFF  62
+
+#define SXE_MAX_FNAV_FILTER_NUM		(1024 * 32)
+
+struct sxe_hw_fnav_mask {
+	u16 vlan_tci_mask;
+	u32 src_ipv4_mask;
+	u32 dst_ipv4_mask;
+	u16 src_ipv6_mask;
+	u16 dst_ipv6_mask;
+	u16 src_port_mask;
+	u16 dst_port_mask;
+	u16 flex_bytes_mask;
+};
+
+struct sxe_fnav_filter {
+	TAILQ_ENTRY(sxe_fnav_filter) entries;
+	union sxe_fnav_rule_info sxe_fnav;
+	u32 fnavflags;
+	u32 fnavhash;
+	u32 soft_id;
+	u8 queue;
+};
+
+TAILQ_HEAD(sxe_fnav_filter_list, sxe_fnav_filter);
+
+struct sxe_fnav_rule {
+	struct sxe_hw_fnav_mask mask;
+	union sxe_fnav_rule_info sxe_fnav;
+	bool b_spec;
+	bool b_mask;
+	enum rte_fdir_mode mode;
+	u32 fnavflags;
+	u32 soft_id;
+	u8 queue;
+	u8 flex_bytes_offset;
+};
+
+struct sxe_fnav_context {
+	struct sxe_hw_fnav_mask mask;
+	u8	 flex_bytes_offset;
+	struct sxe_fnav_filter_list fnav_list;
+	struct sxe_fnav_filter **hash_map;
+	struct rte_hash *hash_handle;
+	bool mask_added;
+};
+
+s32 sxe_fnav_filter_configure(struct rte_eth_dev *dev);
+
+s32 sxe_fnav_mask_set(struct rte_eth_dev *dev);
+
+s32 sxe_fnav_filter_remove(struct sxe_fnav_context *fnav_ctxt,
+			 union sxe_fnav_rule_info *key);
+
+struct sxe_fnav_filter *
+sxe_fnav_filter_lookup(struct sxe_fnav_context *fnav_ctxt,
+			 union sxe_fnav_rule_info *key);
+
+s32 sxe_fnav_filter_insert(struct sxe_fnav_context *fnav_ctxt,
+			 struct sxe_fnav_filter *fnav_filter);
+
+u32 sxe_fnav_perfect_hash_compute(union sxe_fnav_rule_info *rule_info,
+			enum rte_eth_fdir_pballoc_type pballoc);
+
+u32 sxe_fnav_signature_hash_compute(union sxe_fnav_rule_info *rule_info,
+			enum rte_eth_fdir_pballoc_type pballoc);
+
+void sxe_fnav_filter_restore(struct rte_eth_dev *dev);
+
+s32 sxe_fnav_filter_delete_all(struct rte_eth_dev *dev);
+
+#endif
+#endif
diff --git a/drivers/net/sxe/pf/sxe_irq.c b/drivers/net/sxe/pf/sxe_irq.c
index 90c1e168f8..e7995d85d9 100644
--- a/drivers/net/sxe/pf/sxe_irq.c
+++ b/drivers/net/sxe/pf/sxe_irq.c
@@ -31,17 +31,17 @@
 #include "sxe_compat_version.h"
 #include "sxe_vf.h"
 
-#define SXE_LINK_DOWN_TIMEOUT 4000 
-#define SXE_LINK_UP_TIMEOUT   1000 
+#define SXE_LINK_DOWN_TIMEOUT 4000
+#define SXE_LINK_UP_TIMEOUT   1000
 
-#define SXE_IRQ_MAILBOX          (u32)(1 << 1)
-#define SXE_IRQ_MACSEC           (u32)(1 << 2)
+#define SXE_IRQ_MAILBOX		  (u32)(1 << 1)
+#define SXE_IRQ_MACSEC		   (u32)(1 << 2)
 
-#define SXE_LINK_UP_TIME         90 
+#define SXE_LINK_UP_TIME		 90
 
-#define SXE_MISC_VEC_ID          RTE_INTR_VEC_ZERO_OFFSET
+#define SXE_MISC_VEC_ID		  RTE_INTR_VEC_ZERO_OFFSET
 
-#define SXE_RX_VEC_BASE          RTE_INTR_VEC_RXTX_OFFSET
+#define SXE_RX_VEC_BASE		  RTE_INTR_VEC_RXTX_OFFSET
 
 static void sxe_link_info_output(struct rte_eth_dev *dev)
 {
@@ -63,7 +63,6 @@ static void sxe_link_info_output(struct rte_eth_dev *dev)
 				pci_dev->addr.devid,
 				pci_dev->addr.function);
 
-	return;
 }
 
 void sxe_event_irq_delayed_handler(void *param)
@@ -94,9 +93,8 @@ void sxe_event_irq_delayed_handler(void *param)
 	rte_spinlock_unlock(&adapter->irq_ctxt.event_irq_lock);
 
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
-	if (eicr & SXE_EICR_MAILBOX) {
+	if (eicr & SXE_EICR_MAILBOX)
 		sxe_mbx_irq_handler(eth_dev);
-	}
 #endif
 
 	if (irq->action & SXE_IRQ_LINK_UPDATE) {
@@ -116,7 +114,6 @@ void sxe_event_irq_delayed_handler(void *param)
 
 	rte_intr_ack(intr_handle);
 
-	return;
 }
 
 static void sxe_lsc_irq_handler(struct rte_eth_dev *eth_dev)
@@ -134,12 +131,12 @@ static void sxe_lsc_irq_handler(struct rte_eth_dev *eth_dev)
 
 	if (!link.link_status && !link_up) {
 		PMD_LOG_DEBUG(DRV, "link change irq, down->down, do nothing.");
-		goto l_out;
+		return;
 	}
 
 	if (irq->to_pcs_init) {
 		PMD_LOG_DEBUG(DRV, "to set pcs init, do nothing.");
-		goto l_out;
+		return;
 	}
 
 	PMD_LOG_INFO(DRV, "link change irq handler start");
@@ -150,8 +147,8 @@ static void sxe_lsc_irq_handler(struct rte_eth_dev *eth_dev)
 					SXE_LINK_UP_TIMEOUT;
 
 	if (rte_eal_alarm_set(timeout * 1000,
-			      sxe_event_irq_delayed_handler,
-			      (void *)eth_dev) < 0) {
+				  sxe_event_irq_delayed_handler,
+				  (void *)eth_dev) < 0) {
 		PMD_LOG_ERR(DRV, "submit event irq delay handle fail.");
 	} else {
 		irq->enable_mask &= ~SXE_EIMS_LSC;
@@ -159,8 +156,6 @@ static void sxe_lsc_irq_handler(struct rte_eth_dev *eth_dev)
 
 	PMD_LOG_INFO(DRV, "link change irq handler end");
 
-l_out:
-	return;
 }
 
 static s32 sxe_event_irq_action(struct rte_eth_dev *eth_dev)
@@ -187,7 +182,7 @@ static s32 sxe_event_irq_action(struct rte_eth_dev *eth_dev)
 	return 0;
 }
 
-STATIC void sxe_event_irq_handler(void *data)
+static void sxe_event_irq_handler(void *data)
 {
 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)data;
 	struct sxe_adapter *adapter = eth_dev->data->dev_private;
@@ -208,17 +203,14 @@ STATIC void sxe_event_irq_handler(void *data)
 
 	rte_spinlock_unlock(&adapter->irq_ctxt.event_irq_lock);
 
-	if (eicr & SXE_EICR_LSC) {
+	if (eicr & SXE_EICR_LSC)
 		irq->action |= SXE_IRQ_LINK_UPDATE;
-	}
 
-	if (eicr & SXE_EICR_MAILBOX) {
+	if (eicr & SXE_EICR_MAILBOX)
 		irq->action |= SXE_IRQ_MAILBOX;
-	}
 
-	if (eicr & SXE_EICR_LINKSEC) {
+	if (eicr & SXE_EICR_LINKSEC)
 		irq->action |= SXE_IRQ_MACSEC;
-	}
 
 	sxe_event_irq_action(eth_dev);
 
@@ -226,7 +218,6 @@ STATIC void sxe_event_irq_handler(void *data)
 	sxe_hw_specific_irq_enable(hw, irq->enable_mask);
 	rte_spinlock_unlock(&adapter->irq_ctxt.event_irq_lock);
 
-	return;
 }
 
 void sxe_irq_init(struct rte_eth_dev *eth_dev)
@@ -247,7 +238,7 @@ void sxe_irq_init(struct rte_eth_dev *eth_dev)
 	u32 gpie = 0;
 
 	if ((irq_handle->type == RTE_INTR_HANDLE_UIO) ||
-	    (irq_handle->type == RTE_INTR_HANDLE_VFIO_MSIX)) {
+		(irq_handle->type == RTE_INTR_HANDLE_VFIO_MSIX)) {
 		gpie = sxe_hw_irq_general_reg_get(hw);
 
 		gpie |= SXE_GPIE_MSIX_MODE | SXE_GPIE_OCD;
@@ -257,7 +248,6 @@ void sxe_irq_init(struct rte_eth_dev *eth_dev)
 
 	sxe_hw_specific_irq_enable(hw, irq->enable_mask);
 #endif
-	return;
 }
 
 static s32 sxe_irq_general_config(struct rte_eth_dev *dev)
@@ -271,7 +261,7 @@ static s32 sxe_irq_general_config(struct rte_eth_dev *dev)
 
 	gpie = sxe_hw_irq_general_reg_get(hw);
 	if (!rte_intr_dp_is_en(handle) &&
-	    !(gpie & (SXE_GPIE_MSIX_MODE | SXE_GPIE_PBA_SUPPORT))) {
+		!(gpie & (SXE_GPIE_MSIX_MODE | SXE_GPIE_PBA_SUPPORT))) {
 		ret = -SXE_ERR_CONFIG;
 		gpie |= SXE_GPIE_MSIX_MODE;
 		PMD_LOG_INFO(DRV, "rx queue irq num:%d gpie:0x%x.",
@@ -304,14 +294,13 @@ static void sxe_msix_configure(struct rte_eth_dev *dev)
 	ret = sxe_irq_general_config(dev);
 	if (ret) {
 		PMD_LOG_INFO(DRV, "unsupport msi-x, no need config irq");
-		goto l_out;
+		return;
 	}
 
-	if (rte_intr_allow_others(handle)) {
+	if (rte_intr_allow_others(handle))
 		vector = base = SXE_RX_VEC_BASE;
-	}
 
-	irq_interval = SXE_EITR_INTERVAL_US(SXE_QUEUE_ITR_INTERVAL_DEFAULT);
+	irq_interval = SXE_EITR_INTERVAL_US(SXE_QUEUE_ITR_INTERVAL);
 
 	if (rte_intr_dp_is_en(handle)) {
 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
@@ -331,9 +320,8 @@ static void sxe_msix_configure(struct rte_eth_dev *dev)
 					queue_id,
 					rx_queue->reg_idx,
 					vector);
-			if (vector < base + handle->nb_efd - 1) {
+			if (vector < base + handle->nb_efd - 1)
 				vector++;
-			}
 		}
 		sxe_hw_event_irq_map(hw, 1, SXE_MISC_VEC_ID);
 	}
@@ -346,8 +334,6 @@ static void sxe_msix_configure(struct rte_eth_dev *dev)
 	value &= ~(SXE_EIMS_OTHER | SXE_EIMS_MAILBOX | SXE_EIMS_LSC);
 	sxe_hw_event_irq_auto_clear_set(hw, value);
 
-l_out:
-	return;
 }
 
 s32 sxe_irq_configure(struct rte_eth_dev *eth_dev)
@@ -358,12 +344,12 @@ s32 sxe_irq_configure(struct rte_eth_dev *eth_dev)
 	s32 ret = 0;
 
 	if ((rte_intr_cap_multiple(handle) ||
-	     !RTE_ETH_DEV_SRIOV(eth_dev).active) &&
-	    eth_dev->data->dev_conf.intr_conf.rxq != 0) {
+		 !RTE_ETH_DEV_SRIOV(eth_dev).active) &&
+		eth_dev->data->dev_conf.intr_conf.rxq != 0) {
 		irq_num = eth_dev->data->nb_rx_queues;
 		if (irq_num > SXE_QUEUE_IRQ_NUM_MAX) {
 			PMD_LOG_ERR(DRV, "irq_num:%u exceed limit:%u ",
-				      irq_num, SXE_QUEUE_IRQ_NUM_MAX);
+					  irq_num, SXE_QUEUE_IRQ_NUM_MAX);
 			ret = -ENOTSUP;
 			goto l_out;
 		}
@@ -371,15 +357,15 @@ s32 sxe_irq_configure(struct rte_eth_dev *eth_dev)
 		if (rte_intr_efd_enable(handle, irq_num)) {
 			ret = -SXE_ERR_CONFIG;
 			PMD_LOG_ERR(DRV,
-				      "intr_handle type:%d irq num:%d invalid",
-				      handle->type, irq_num);
+					  "intr_handle type:%d irq num:%d invalid",
+					  handle->type, irq_num);
 			goto l_out;
 		}
 	}
 
 	if (rte_intr_dp_is_en(handle) && !handle->intr_vec) {
 		handle->intr_vec = rte_zmalloc("intr_vec",
-				    eth_dev->data->nb_rx_queues * sizeof(u32), 0);
+					eth_dev->data->nb_rx_queues * sizeof(u32), 0);
 		if (handle->intr_vec == NULL) {
 			PMD_LOG_ERR(DRV, "rx queue irq vector "
 					 "allocate %zuB memory fail.",
@@ -394,14 +380,14 @@ s32 sxe_irq_configure(struct rte_eth_dev *eth_dev)
 	sxe_irq_enable(eth_dev);
 
 	PMD_LOG_INFO(DRV,
-		      "intr_conf rxq:%u intr_handle type:%d rx queue num:%d "
-		      "queue irq num:%u total irq num:%u "
-		      "config done",
-		      eth_dev->data->dev_conf.intr_conf.rxq,
-		      handle->type,
-		      eth_dev->data->nb_rx_queues,
-		      handle->nb_efd,
-		      handle->max_intr);
+			  "intr_conf rxq:%u intr_handle type:%d rx queue num:%d "
+			  "queue irq num:%u total irq num:%u "
+			  "config done",
+			  eth_dev->data->dev_conf.intr_conf.rxq,
+			  handle->type,
+			  eth_dev->data->nb_rx_queues,
+			  handle->nb_efd,
+			  handle->max_intr);
 
 l_out:
 	return ret;
@@ -418,35 +404,32 @@ void sxe_irq_enable(struct rte_eth_dev *eth_dev)
 	if (rte_intr_allow_others(handle)) {
 		sxe_link_info_output(eth_dev);
 
-		if (eth_dev->data->dev_conf.intr_conf.lsc != 0) {
+		if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
 			irq->enable_mask |= SXE_EIMS_LSC;
-		} else {
+		else
 			irq->enable_mask &= ~SXE_EIMS_LSC;
-		}
+
 	} else {
 		rte_intr_callback_unregister(handle,
-					     sxe_event_irq_handler, eth_dev);
-		if (eth_dev->data->dev_conf.intr_conf.lsc != 0) {
+						 sxe_event_irq_handler, eth_dev);
+		if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
 			PMD_LOG_ERR(DRV, "event irq not support.");
-		}
 	}
 
 	/* check if rxq interrupt is enabled */
 	if (eth_dev->data->dev_conf.intr_conf.rxq != 0 &&
-	    rte_intr_dp_is_en(handle)) {
+		rte_intr_dp_is_en(handle))
 		irq->enable_mask |= SXE_EIMS_RTX_QUEUE;
-	}
 
 	rte_intr_enable(handle);
 
 	sxe_hw_specific_irq_enable(hw, irq->enable_mask);
 
 	PMD_LOG_INFO(DRV,
-		      "intr_handle type:%d enable irq mask:0x%x",
-		      handle->type,
-		      irq->enable_mask);
+			  "intr_handle type:%d enable irq mask:0x%x",
+			  handle->type,
+			  irq->enable_mask);
 
-	return;
 }
 
 void sxe_irq_vec_free(struct rte_intr_handle *handle)
@@ -456,7 +439,6 @@ void sxe_irq_vec_free(struct rte_intr_handle *handle)
 		handle->intr_vec = NULL;
 	}
 
-	return;
 }
 
 void sxe_irq_disable(struct rte_eth_dev *eth_dev)
@@ -473,7 +455,6 @@ void sxe_irq_disable(struct rte_eth_dev *eth_dev)
 	rte_intr_efd_disable(handle);
 	sxe_irq_vec_free(handle);
 
-	return;
 }
 
 void sxe_irq_uninit(struct rte_eth_dev *eth_dev)
@@ -492,14 +473,13 @@ void sxe_irq_uninit(struct rte_eth_dev *eth_dev)
 			break;
 		} else if (ret != -EAGAIN) {
 			PMD_LOG_ERR(DRV,
-				    "irq handler unregister fail, next to retry");
+					"irq handler unregister fail, next to retry");
 		}
 		rte_delay_ms(100);
 	} while (retry++ < (10 + SXE_LINK_UP_TIME));
 
 	rte_eal_alarm_cancel(sxe_event_irq_delayed_handler, eth_dev);
 
-	return;
 }
 
 s32 sxe_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, u16 queue_id)
@@ -528,7 +508,7 @@ s32 sxe_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, u16 queue_id)
 	rte_intr_ack(intr_handle);
 
 	PMD_LOG_INFO(DRV, "queue_id:%u irq enabled enable_mask:0x%x.",
-		    queue_id, irq->enable_mask);
+			queue_id, irq->enable_mask);
 
 	return 0;
 }
@@ -555,7 +535,7 @@ s32 sxe_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, u16 queue_id)
 	}
 
 	PMD_LOG_INFO(DRV, "queue_id:%u irq disabled enable_mask:0x%x.",
-		    queue_id, irq->enable_mask);
+			queue_id, irq->enable_mask);
 
 	return 0;
 }
diff --git a/drivers/net/sxe/pf/sxe_irq.h b/drivers/net/sxe/pf/sxe_irq.h
index 322d7023c9..7b63013545 100644
--- a/drivers/net/sxe/pf/sxe_irq.h
+++ b/drivers/net/sxe/pf/sxe_irq.h
@@ -15,21 +15,22 @@
 #include "sxe_compat_platform.h"
 #include "sxe_compat_version.h"
 
-#define SXE_QUEUE_IRQ_NUM_MAX    15
+#define SXE_QUEUE_IRQ_NUM_MAX	15
 
-#define SXE_QUEUE_ITR_INTERVAL_DEFAULT   500 
+#define SXE_QUEUE_ITR_INTERVAL_DEFAULT   500
+#define SXE_QUEUE_ITR_INTERVAL   3
 
 #define SXE_EITR_INTERVAL_UNIT_NS	2048
-#define SXE_EITR_ITR_INT_SHIFT          3
-#define SXE_IRQ_ITR_MASK                (0x00000FF8)
+#define SXE_EITR_ITR_INT_SHIFT		  3
+#define SXE_IRQ_ITR_MASK				(0x00000FF8)
 #define SXE_EITR_INTERVAL_US(us) \
 	(((us) * 1000 / SXE_EITR_INTERVAL_UNIT_NS << SXE_EITR_ITR_INT_SHIFT) & \
 		SXE_IRQ_ITR_MASK)
 
 struct sxe_irq_context {
-	u32 action;          
-	u32 enable_mask;    
-	u32 enable_mask_original; 
+	u32 action;
+	u32 enable_mask;
+	u32 enable_mask_original;
 	rte_spinlock_t event_irq_lock;
 	bool to_pcs_init;
 };
diff --git a/drivers/net/sxe/pf/sxe_macsec.c b/drivers/net/sxe/pf/sxe_macsec.c
new file mode 100644
index 0000000000..4a49405a95
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_macsec.c
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
+
+#include <rte_malloc.h>
+#include <rte_ethdev.h>
+#include <rte_memcpy.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_bus_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <rte_bus_pci.h>
+#else
+#include <ethdev_driver.h>
+#include <bus_pci_driver.h>
+#endif
+
+#include "sxe_logs.h"
+#include "sxe.h"
+#include "sxe_macsec.h"
+#include "rte_pmd_sxe.h"
+
+void sxe_macsec_enable(struct rte_eth_dev *dev,
+				struct sxe_macsec_context *macsec_ctxt)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct rte_eth_link link;
+
+	u32 tx_mode = macsec_ctxt->encrypt_en ? SXE_LSECTXCTRL_AUTH_ENCRYPT :
+			  SXE_LSECTXCTRL_AUTH;
+	u32 rx_mode = SXE_LSECRXCTRL_STRICT;
+
+	rte_eth_linkstatus_get(dev, &link);
+
+	sxe_hw_macsec_enable(hw, link.link_status, tx_mode,
+			  rx_mode, SXE_LSECTXCTRL_PNTHRSH_MASK);
+
+	PMD_LOG_INFO(INIT, "link status:%u tx mode:%u rx mode:%u "
+				  " pn_thrsh:0x%x macsec enabled",
+				  link.link_status,
+			  tx_mode, rx_mode,
+			  SXE_LSECTXCTRL_PNTHRSH_MASK);
+
+}
+
+static void sxe_macsec_disable(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_hw *hw = &adapter->hw;
+	struct rte_eth_link link;
+
+	rte_eth_linkstatus_get(dev, &link);
+
+	sxe_hw_macsec_disable(hw, link.link_status);
+
+	PMD_LOG_INFO(INIT, "link status:%u macsec disabled ", link.link_status);
+
+}
+
+static void sxe_macsec_configure_save(struct rte_eth_dev *dev,
+				struct sxe_macsec_context *user_macsec)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_macsec_context *macsec_ctxt = &adapter->macsec_ctxt;
+
+	macsec_ctxt->offload_en = user_macsec->offload_en;
+	macsec_ctxt->encrypt_en = user_macsec->encrypt_en;
+	macsec_ctxt->replayprotect_en = user_macsec->replayprotect_en;
+
+}
+
+static void sxe_macsec_configure_reset(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_macsec_context *macsec_ctxt = &adapter->macsec_ctxt;
+
+	macsec_ctxt->offload_en = 0;
+	macsec_ctxt->encrypt_en = 0;
+	macsec_ctxt->replayprotect_en = 0;
+
+}
+
+s32 rte_pmd_sxe_macsec_enable(u16 port, u8 en, u8 rp_en)
+{
+	struct rte_eth_dev *dev;
+	struct sxe_macsec_context user_macsec;
+	s32 ret = 0;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_sxe_supported(dev)) {
+		ret = -ENOTSUP;
+		PMD_LOG_ERR(INIT, "port:%u not support macsec.", port);
+		goto l_out;
+	}
+
+	user_macsec.offload_en = true;
+	user_macsec.encrypt_en = en;
+	user_macsec.replayprotect_en = rp_en;
+
+	sxe_macsec_configure_save(dev, &user_macsec);
+	sxe_macsec_enable(dev, &user_macsec);
+
+l_out:
+	return ret;
+}
+
+s32 rte_pmd_sxe_macsec_disable(u16 port)
+{
+	struct rte_eth_dev *dev;
+	s32 ret = 0;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_sxe_supported(dev)) {
+		ret = -ENOTSUP;
+		PMD_LOG_ERR(INIT, "port:%u not support macsec.", port);
+		goto l_out;
+	}
+
+	sxe_macsec_configure_reset(dev);
+	sxe_macsec_disable(dev);
+
+l_out:
+	return ret;
+}
+
+s32 rte_pmd_sxe_macsec_txsc_configure(u16 port, u8 *mac)
+{
+	struct rte_eth_dev *dev;
+	struct sxe_adapter *adapter;
+	s32 ret = 0;
+	u8 mac_addr[SXE_MAC_ADDR_LEN + 2];
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_sxe_supported(dev)) {
+		ret = -ENOTSUP;
+		PMD_LOG_ERR(INIT, "port:%u not support macsec.", port);
+		goto l_out;
+	}
+
+	rte_memcpy(mac_addr, mac, SXE_MAC_ADDR_LEN);
+	adapter = dev->data->dev_private;
+	sxe_hw_macsec_txsc_set(&adapter->hw, (*(u32 *)mac_addr), (*(u32 *)&mac_addr[4]));
+
+	PMD_LOG_INFO(DRV, "tx sc mac_addr:"MAC_FMT" configure done",
+			 MAC_ADDR(mac_addr));
+l_out:
+	return ret;
+}
+
+s32 rte_pmd_sxe_macsec_rxsc_configure(u16 port, u8 *mac, u16 pi)
+{
+	struct rte_eth_dev *dev;
+	struct sxe_adapter *adapter;
+	s32 ret = 0;
+	u8 mac_addr[SXE_MAC_ADDR_LEN + 2];
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_sxe_supported(dev)) {
+		ret = -ENOTSUP;
+		PMD_LOG_ERR(INIT, "port:%u not support macsec.", port);
+		goto l_out;
+	}
+
+	rte_memcpy(mac_addr, mac, SXE_MAC_ADDR_LEN);
+	adapter = dev->data->dev_private;
+	sxe_hw_macsec_rxsc_set(&adapter->hw, (*(u32 *)mac_addr), (*(u32 *)&mac_addr[4]), pi);
+
+	PMD_LOG_INFO(DRV, "rx sc mac_addr:"MAC_FMT" pi:%u configure done",
+			 MAC_ADDR(mac_addr), pi);
+l_out:
+	return ret;
+}
+
+s32 rte_pmd_sxe_macsec_txsa_configure(u16 port, u8 sa_idx, u8 an,
+				 u32 pn, u8 *keys)
+{
+	struct rte_eth_dev *dev;
+	struct sxe_adapter *adapter;
+	s32 ret = 0;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_sxe_supported(dev)) {
+		ret = -ENOTSUP;
+		PMD_LOG_ERR(DRV, "port:%u not support macsec.(err:%d)", port, ret);
+		goto l_out;
+	}
+
+	if (sa_idx >= SXE_LINKSEC_MAX_SA_COUNT) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "port:%u sa_idx:%u invalid.(err:%d)",
+					port, sa_idx, ret);
+		goto l_out;
+	}
+
+	adapter = dev->data->dev_private;
+	sxe_hw_macsec_tx_sa_configure(&adapter->hw, sa_idx, an, rte_cpu_to_be_32(pn), (u32 *)keys);
+
+	PMD_LOG_INFO(DRV, "port:%u sa_idx:%u an:%u pn:0x%x keys:0x%x "
+			 "tx sa configure done",
+			 port, sa_idx, an, pn, *(u32 *)keys);
+
+l_out:
+	return ret;
+}
+
+s32 rte_pmd_sxe_macsec_rxsa_configure(u16 port, u8 sa_idx, u8 an,
+				 u32 pn, u8 *keys)
+{
+	struct rte_eth_dev *dev;
+	struct sxe_adapter *adapter;
+	s32 ret = 0;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+
+	if (!is_sxe_supported(dev)) {
+		ret = -ENOTSUP;
+		PMD_LOG_ERR(DRV, "port:%u not support macsec.(err:%d)", port, ret);
+		goto l_out;
+	}
+
+	if (sa_idx >= SXE_LINKSEC_MAX_SA_COUNT) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "port:%u sa_idx:%u invalid.(err:%d)",
+				port, sa_idx, ret);
+		goto l_out;
+	}
+
+	adapter = dev->data->dev_private;
+	sxe_hw_macsec_rx_sa_configure(&adapter->hw, sa_idx, an, rte_cpu_to_be_32(pn), (u32 *)keys);
+
+	PMD_LOG_INFO(DRV, "port:%u sa_idx:%u an:%u pn:0x%x keys:0x%x "
+			 "rx sa configure done",
+			 port, sa_idx, an, pn, *(u32 *)keys);
+
+l_out:
+	return ret;
+}
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_macsec.h b/drivers/net/sxe/pf/sxe_macsec.h
new file mode 100644
index 0000000000..5497ad360d
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_macsec.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_DPDK_MACSEC_H__
+#define __SXE_DPDK_MACSEC_H__
+
+#include "sxe_types.h"
+
+struct sxe_macsec_context {
+	u8 offload_en;
+	u8 encrypt_en;
+	u8 replayprotect_en;
+};
+
+void sxe_macsec_enable(struct rte_eth_dev *dev,
+				struct sxe_macsec_context *macsec_ctxt);
+
+#endif
+
diff --git a/drivers/net/sxe/pf/sxe_main.c b/drivers/net/sxe/pf/sxe_main.c
index 3f30f26508..d25b1d9250 100644
--- a/drivers/net/sxe/pf/sxe_main.c
+++ b/drivers/net/sxe/pf/sxe_main.c
@@ -45,7 +45,7 @@ static const struct rte_pci_id sxe_pci_tbl[] = {
 
 s8 g_log_filename[LOG_FILE_NAME_LEN] = {0};
 
-bool is_log_created = false;
+bool is_log_created;
 
 #ifdef SXE_DPDK_DEBUG
 void sxe_log_stream_init(void)
@@ -56,14 +56,13 @@ void sxe_log_stream_init(void)
 	u8 len;
 	s8 time[40];
 
-	if (is_log_created) {
-		goto l_out;
-	}
+	if (is_log_created)
+		return;
 
 	memset(g_log_filename, 0, LOG_FILE_NAME_LEN);
 
 	len = snprintf(g_log_filename, LOG_FILE_NAME_LEN, "%s%s.",
-		      LOG_FILE_PATH, LOG_FILE_PREFIX);
+			  LOG_FILE_PATH, LOG_FILE_PREFIX);
 
 	gettimeofday(&tv, NULL);
 	td = localtime(&tv.tv_sec);
@@ -75,8 +74,8 @@ void sxe_log_stream_init(void)
 	fp = fopen(g_log_filename, "w+");
 	if (fp == NULL) {
 		PMD_LOG_ERR(INIT, "open log file:%s fail, errno:%d %s.",
-			    g_log_filename, errno, strerror(errno));
-		goto l_out;
+				g_log_filename, errno, strerror(errno));
+		return;
 	}
 
 	PMD_LOG_NOTICE(INIT, "log stream file:%s.", g_log_filename);
@@ -85,8 +84,6 @@ void sxe_log_stream_init(void)
 
 	is_log_created = true;
 
-l_out:
-	return;
 }
 #endif
 
@@ -148,14 +145,14 @@ static s32 sxe_remove(struct rte_pci_device *pci_dev)
 	return ret;
 }
 
-STATIC struct rte_pci_driver rte_sxe_pmd = {
+static struct rte_pci_driver rte_sxe_pmd = {
 	.id_table  = sxe_pci_tbl,
 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
-	.probe     = sxe_probe,
-	.remove    = sxe_remove,
+	.probe	 = sxe_probe,
+	.remove	= sxe_remove,
 };
 
-STATIC s32 sxe_mng_reset(struct sxe_hw *hw, bool enable)
+static s32 sxe_mng_reset(struct sxe_hw *hw, bool enable)
 {
 	s32 ret;
 	sxe_mng_rst_s mng_rst;
@@ -234,7 +231,6 @@ void sxe_hw_start(struct sxe_hw *hw)
 	hw->mac.auto_restart = true;
 	PMD_LOG_INFO(INIT, "auto_restart:%u.\n", hw->mac.auto_restart);
 
-	return;
 }
 
 static bool is_device_supported(struct rte_eth_dev *dev,
@@ -242,9 +238,8 @@ static bool is_device_supported(struct rte_eth_dev *dev,
 {
 	bool ret = true;
 
-	if (strcmp(dev->device->driver->name, drv->driver.name)) {
+	if (strcmp(dev->device->driver->name, drv->driver.name))
 		ret = false;
-	}
 
 	return ret;
 }
diff --git a/drivers/net/sxe/pf/sxe_offload.c b/drivers/net/sxe/pf/sxe_offload.c
index deea11451a..e47cf29330 100644
--- a/drivers/net/sxe/pf/sxe_offload.c
+++ b/drivers/net/sxe/pf/sxe_offload.c
@@ -15,7 +15,7 @@
 #include "sxe_queue_common.h"
 #include "sxe_offload_common.h"
 
-STATIC u8 rss_sxe_key[40] = {
+static u8 rss_sxe_key[40] = {
 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
@@ -29,7 +29,7 @@ STATIC u8 rss_sxe_key[40] = {
 #define SXE_8_BIT_MASK   UINT8_MAX
 
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
-u8* sxe_rss_hash_key_get(void)
+u8 *sxe_rss_hash_key_get(void)
 {
 	return rss_sxe_key;
 }
@@ -65,7 +65,6 @@ void sxe_rss_disable(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	sxe_hw_rss_cap_switch(hw, false);
-	return;
 }
 
 void sxe_rss_hash_set(struct sxe_hw *hw,
@@ -91,34 +90,28 @@ void sxe_rss_hash_set(struct sxe_hw *hw,
 	}
 
 	rss_hf = rss_conf->rss_hf;
-	if (rss_hf & RTE_ETH_RSS_IPV4) {
+	if (rss_hf & RTE_ETH_RSS_IPV4)
 		rss_field |= SXE_MRQC_RSS_FIELD_IPV4;
-	}
 
-	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
 		rss_field |= SXE_MRQC_RSS_FIELD_IPV4_TCP;
-	}
 
-	if (rss_hf & RTE_ETH_RSS_IPV6) {
+	if (rss_hf & RTE_ETH_RSS_IPV6)
 		rss_field |= SXE_MRQC_RSS_FIELD_IPV6;
-	}
 
-	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
 		rss_field |= SXE_MRQC_RSS_FIELD_IPV6_TCP;
-	}
 
-	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
 		rss_field |= SXE_MRQC_RSS_FIELD_IPV4_UDP;
-	}
 
-	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
+	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 		rss_field |= SXE_MRQC_RSS_FIELD_IPV6_UDP;
-	}
+
 	sxe_hw_rss_field_set(hw, rss_field);
 
 	sxe_hw_rss_cap_switch(hw, true);
 
-	return;
 }
 
 void sxe_rss_configure(struct rte_eth_dev *dev)
@@ -128,15 +121,14 @@ void sxe_rss_configure(struct rte_eth_dev *dev)
 	struct sxe_hw *hw = &adapter->hw;
 	u16 i;
 	u16 j;
-	u8  rss_indir_tbl[SXE_MAX_RETA_ENTRIES];    
+	u8  rss_indir_tbl[SXE_MAX_RETA_ENTRIES];
 
 	PMD_INIT_FUNC_TRACE();
 
 	if (adapter->rss_reta_updated == false) {
 		for (i = 0, j = 0; i < SXE_MAX_RETA_ENTRIES; i++, j++) {
-			if (j == dev->data->nb_rx_queues) {
+			if (j == dev->data->nb_rx_queues)
 				j = 0;
-			}
 
 			rss_indir_tbl[i] = j;
 		}
@@ -148,17 +140,14 @@ void sxe_rss_configure(struct rte_eth_dev *dev)
 	if ((rss_conf->rss_hf & SXE_RSS_OFFLOAD_ALL) == 0) {
 		PMD_LOG_INFO(INIT, "user rss config match hw supports is 0");
 		sxe_rss_disable(dev);
-		goto l_end;
+		return;
 	}
 
-	if (rss_conf->rss_key == NULL) {
-		rss_conf->rss_key = rss_sxe_key; 
-	}
+	if (rss_conf->rss_key == NULL)
+		rss_conf->rss_key = rss_sxe_key;
 
 	sxe_rss_hash_set(hw, rss_conf);
 
-l_end:
-	return;
 }
 
 s32 sxe_rss_reta_update(struct rte_eth_dev *dev,
@@ -197,15 +186,13 @@ s32 sxe_rss_reta_update(struct rte_eth_dev *dev,
 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (u8)((reta_conf[idx].mask >> shift) &
 						SXE_4_BIT_MASK);
-		if (!mask) {
+		if (!mask)
 			continue;
-		}
 
-		if (mask == SXE_4_BIT_MASK) {
+		if (mask == SXE_4_BIT_MASK)
 			r = 0;
-		} else {
+		else
 			r = sxe_hw_rss_redir_tbl_get_by_idx(hw, i);
-		}
 
 		for (j = 0, reta = 0; j < SXE_4_BIT_WIDTH; j++) {
 			if (mask & (0x1 << j)) {
@@ -251,9 +238,8 @@ s32 sxe_rss_reta_query(struct rte_eth_dev *dev,
 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (u8)((reta_conf[idx].mask >> shift) &
 						SXE_4_BIT_MASK);
-		if (!mask) {
+		if (!mask)
 			continue;
-		}
 
 		reta = sxe_hw_rss_redir_tbl_get_by_idx(hw, i);
 		for (j = 0; j < SXE_4_BIT_WIDTH; j++) {
@@ -280,7 +266,7 @@ s32 sxe_rss_hash_update(struct rte_eth_dev *dev,
 	rss_hf = (rss_conf->rss_hf & SXE_RSS_OFFLOAD_ALL);
 
 	if (!sxe_hw_is_rss_enabled(hw)) {
-		if (rss_hf != 0){
+		if (rss_hf != 0) {
 			PMD_LOG_ERR(DRV, "rss not init but want set");
 			ret = -EINVAL;
 			goto l_end;
@@ -289,7 +275,7 @@ s32 sxe_rss_hash_update(struct rte_eth_dev *dev,
 		goto l_end;
 	}
 
-	if (rss_hf == 0){
+	if (rss_hf == 0) {
 		PMD_LOG_ERR(DRV, "rss init but want disable it");
 		ret = -EINVAL;
 		goto l_end;
@@ -302,7 +288,7 @@ s32 sxe_rss_hash_update(struct rte_eth_dev *dev,
 }
 
 s32 sxe_rss_hash_conf_get(struct rte_eth_dev *dev,
-			    struct rte_eth_rss_conf *rss_conf)
+				struct rte_eth_rss_conf *rss_conf)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
 	struct sxe_hw *hw = &adapter->hw;
@@ -332,29 +318,23 @@ s32 sxe_rss_hash_conf_get(struct rte_eth_dev *dev,
 
 	rss_hf = 0;
 	rss_field = sxe_hw_rss_field_get(hw);
-	if (rss_field & SXE_MRQC_RSS_FIELD_IPV4) {
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV4)
 		rss_hf |= RTE_ETH_RSS_IPV4;
-	}
 
-	if (rss_field & SXE_MRQC_RSS_FIELD_IPV4_TCP) {
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV4_TCP)
 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
-	}
 
-	if (rss_field & SXE_MRQC_RSS_FIELD_IPV4_UDP) {
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV4_UDP)
 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
-	}
 
-	if (rss_field & SXE_MRQC_RSS_FIELD_IPV6) {
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV6)
 		rss_hf |= RTE_ETH_RSS_IPV6;
-	}
 
-	if (rss_field & SXE_MRQC_RSS_FIELD_IPV6_TCP) {
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV6_TCP)
 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
-	}
 
-	if (rss_field & SXE_MRQC_RSS_FIELD_IPV6_UDP) {
+	if (rss_field & SXE_MRQC_RSS_FIELD_IPV6_UDP)
 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
-	}
 
 	PMD_LOG_DEBUG(DRV, "got rss hash func=0x%"SXE_PRIX64, rss_hf);
 	rss_conf->rss_hf = rss_hf;
diff --git a/drivers/net/sxe/pf/sxe_offload.h b/drivers/net/sxe/pf/sxe_offload.h
index d1f651feb6..458b6464c5 100644
--- a/drivers/net/sxe/pf/sxe_offload.h
+++ b/drivers/net/sxe/pf/sxe_offload.h
@@ -16,7 +16,7 @@
 		RTE_ETH_RSS_NONFRAG_IPV6_UDP)
 
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
-u8* sxe_rss_hash_key_get(void);
+u8 *sxe_rss_hash_key_get(void);
 #endif
 
 void sxe_rss_hash_set(struct sxe_hw *hw,
@@ -46,6 +46,6 @@ s32 sxe_rss_hash_update(struct rte_eth_dev *dev,
 			struct rte_eth_rss_conf *rss_conf);
 
 s32 sxe_rss_hash_conf_get(struct rte_eth_dev *dev,
-			    struct rte_eth_rss_conf *rss_conf);
+				struct rte_eth_rss_conf *rss_conf);
 
 #endif
diff --git a/drivers/net/sxe/pf/sxe_phy.c b/drivers/net/sxe/pf/sxe_phy.c
index 595bbcbc25..fade4bbf90 100644
--- a/drivers/net/sxe/pf/sxe_phy.c
+++ b/drivers/net/sxe/pf/sxe_phy.c
@@ -30,16 +30,16 @@
 #include "sxe_compat_version.h"
 
 #define SXE_WAIT_LINK_UP_FAILED	1
-#define SXE_WARNING_TIMEOUT	9000 
-#define SXE_CHG_SFP_RATE_MS     40   
-#define SXE_1G_WAIT_PCS_MS      100  
-#define SXE_10G_WAIT_PCS_MS     100  
-#define SXE_HZ_TRANSTO_MS       1000
-#define SXE_AN_COMPLETE_TIME    5    
-#define SXE_10G_WAIT_13_TIME    13   
-#define SXE_10G_WAIT_5_TIME     5    
-
-STATIC void *sxe_setup_link_thread_handler(void *param)
+#define SXE_WARNING_TIMEOUT	9000
+#define SXE_CHG_SFP_RATE_MS	 40
+#define SXE_1G_WAIT_PCS_MS	  100
+#define SXE_10G_WAIT_PCS_MS	 100
+#define SXE_HZ_TRANSTO_MS	   1000
+#define SXE_AN_COMPLETE_TIME	5
+#define SXE_10G_WAIT_13_TIME	13
+#define SXE_10G_WAIT_5_TIME	 5
+
+static void *sxe_setup_link_thread_handler(void *param)
 {
 	s32 ret;
 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
@@ -59,14 +59,13 @@ STATIC void *sxe_setup_link_thread_handler(void *param)
 	speed = (conf_speeds & allowed_speeds) ? (conf_speeds & allowed_speeds) :
 		allowed_speeds;
 
-	if (adapter->phy_ctxt.sfp_info.multispeed_fiber) {
+	if (adapter->phy_ctxt.sfp_info.multispeed_fiber)
 		ret = sxe_multispeed_sfp_link_configure(dev, speed, true);
-	} else {
+	else
 		ret = sxe_sfp_link_configure(dev);
-	}
-	if (ret) {
+
+	if (ret)
 		PMD_LOG_ERR(INIT, "link setup failed, ret=%d", ret);
-	}
 
 	irq->action &= ~SXE_IRQ_LINK_CONFIG;
 	rte_atomic32_clear(&adapter->link_thread_running);
@@ -84,17 +83,15 @@ void sxe_wait_setup_link_complete(struct rte_eth_dev *dev,
 		timeout--;
 
 		if (timeout_ms) {
-			if (!timeout) {
-				goto l_end;
-			}
+			if (!timeout)
+				return;
+
 		} else if (!timeout) {
 			timeout = SXE_WARNING_TIMEOUT;
 			PMD_LOG_ERR(INIT, "link thread not complete too long time!");
 		}
 	}
 
-l_end:
-	return;
 }
 
 static s32 sxe_an_cap_get(struct sxe_adapter *adapter, sxe_an_cap_s *an_cap)
@@ -105,9 +102,8 @@ static s32 sxe_an_cap_get(struct sxe_adapter *adapter, sxe_an_cap_s *an_cap)
 	ret = sxe_driver_cmd_trans(hw, SXE_CMD_AN_CAP_GET,
 				NULL, 0,
 				(void *)an_cap, sizeof(*an_cap));
-	if (ret) {
+	if (ret)
 		PMD_LOG_ERR(INIT, "hdc trans failed ret=%d, cmd:negotiaton cap get", ret);
-	}
 
 	return ret;
 }
@@ -136,9 +132,8 @@ s32 sxe_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		goto l_end;
 	}
 
-	if (dev->data->dev_conf.intr_conf.lsc) {
+	if (dev->data->dev_conf.intr_conf.lsc)
 		wait_to_complete = 0;
-	}
 
 	sxe_link_info_get(adapter, &link_speed, &orig_link_up);
 	sxe_link_info_get(adapter, &link_speed, &link_up);
@@ -151,9 +146,8 @@ s32 sxe_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 
 	if (wait_to_complete) {
 		for (i = 0; i < SXE_LINK_UP_TIME; i++) {
-			if (link_up == true) {
+			if (link_up == true)
 				break;
-			}
 
 			rte_delay_us_sleep(100000);
 
@@ -199,9 +193,8 @@ s32 sxe_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 		} else {
 			for (i = 0; i < SXE_AN_COMPLETE_TIME; i++) {
 				sxe_an_cap_get(adapter, &an_cap);
-				if (an_cap.peer.remote_fault != SXE_REMOTE_UNKNOWN) {
+				if (an_cap.peer.remote_fault != SXE_REMOTE_UNKNOWN)
 					break;
-				}
 				rte_delay_us_sleep(100000);
 			}
 		}
@@ -252,9 +245,9 @@ s32 sxe_link_status_update(struct rte_eth_dev *dev)
 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
 		for (i = 0; i < SXE_AN_COMPLETE_TIME; i++) {
 			sxe_an_cap_get(adapter, &an_cap);
-			if (an_cap.peer.remote_fault != SXE_REMOTE_UNKNOWN) {
+			if (an_cap.peer.remote_fault != SXE_REMOTE_UNKNOWN)
 				break;
-			}
+
 			rte_delay_us_sleep(100000);
 		}
 		break;
@@ -306,7 +299,7 @@ int sxe_dev_set_link_down(struct rte_eth_dev *dev)
 }
 
 
-STATIC s32 sxe_sfp_eeprom_read(struct sxe_adapter *adapter, u16 offset,
+static s32 sxe_sfp_eeprom_read(struct sxe_adapter *adapter, u16 offset,
 					u16 len, u8 *data)
 {
 	s32 ret;
@@ -392,21 +385,19 @@ void sxe_sfp_tx_laser_enable(struct sxe_adapter *adapter)
 {
 	sxe_sfp_tx_laser_ctrl(adapter, false);
 
-	return;
 }
 
 void sxe_sfp_tx_laser_disable(struct sxe_adapter *adapter)
 {
 	sxe_sfp_tx_laser_ctrl(adapter, true);
 
-	return;
 }
 
 s32 sxe_sfp_reset(struct sxe_adapter *adapter)
 {
 	PMD_LOG_INFO(INIT, "auto_restart:%u.\n", adapter->hw.mac.auto_restart);
 
-	if(adapter->hw.mac.auto_restart) {
+	if (adapter->hw.mac.auto_restart) {
 		sxe_sfp_tx_laser_disable(adapter);
 		sxe_sfp_tx_laser_enable(adapter);
 		adapter->hw.mac.auto_restart = false;
@@ -423,7 +414,7 @@ void sxe_sfp_link_capabilities_get(struct sxe_adapter *adapter, u32 *speed,
 	*speed = 0;
 
 	if (sfp->type == SXE_SFP_TYPE_1G_CU ||
-	    sfp->type == SXE_SFP_TYPE_1G_SXLX ) {
+		sfp->type == SXE_SFP_TYPE_1G_SXLX) {
 		*speed = SXE_LINK_SPEED_1GB_FULL;
 		*autoneg = true;
 		goto l_end;
@@ -439,7 +430,6 @@ void sxe_sfp_link_capabilities_get(struct sxe_adapter *adapter, u32 *speed,
 
 l_end:
 	PMD_LOG_INFO(INIT, "sfp link speed cap=%d", *speed);
-	return;
 }
 
 s32 sxe_sfp_rate_select(struct sxe_adapter *adapter, sxe_sfp_rate_e rate)
@@ -453,22 +443,29 @@ s32 sxe_sfp_rate_select(struct sxe_adapter *adapter, sxe_sfp_rate_e rate)
 	ret = sxe_driver_cmd_trans(hw, SXE_CMD_RATE_SELECT,
 				&rate_able, sizeof(rate_able),
 				NULL, 0);
-	if (ret) {
+	if (ret)
 		PMD_LOG_ERR(INIT, "sfp rate select failed, ret=%d", ret);
-	}
 
 	PMD_LOG_INFO(INIT, "sfp tx rate select end, rate=%d", rate);
 
 	return ret;
 }
 
-s32 sxe_pcs_sds_init(struct sxe_adapter *adapter,
+s32 sxe_pcs_sds_init(struct rte_eth_dev *dev,
 				sxe_pcs_mode_e mode, u32 max_frame)
 {
 	s32 ret;
+	bool keep_crc = false;
 	sxe_pcs_cfg_s pcs_cfg;
+	struct sxe_adapter *adapter = dev->data->dev_private;
 	struct sxe_hw *hw = &adapter->hw;
 	struct sxe_irq_context *irq = &adapter->irq_ctxt;
+	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
+		keep_crc = true;
+	}
+	sxe_hw_crc_strip_config(hw, keep_crc);
 
 	pcs_cfg.mode = mode;
 	pcs_cfg.mtu  = max_frame;
@@ -486,7 +483,7 @@ s32 sxe_pcs_sds_init(struct sxe_adapter *adapter,
 	sxe_fc_mac_addr_set(adapter);
 
 	LOG_INFO_BDF("mode:%u max_frame:0x%x pcs sds init done.\n",
-		     mode, max_frame);
+			 mode, max_frame);
 l_end:
 	return ret;
 }
@@ -510,15 +507,14 @@ s32 sxe_conf_speed_get(struct rte_eth_dev *dev, u32 *conf_speeds)
 
 	*conf_speeds = SXE_LINK_SPEED_UNKNOWN;
 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
-		*conf_speeds = SXE_LINK_SPEED_1GB_FULL | \
+		*conf_speeds = SXE_LINK_SPEED_1GB_FULL |
 				 SXE_LINK_SPEED_10GB_FULL;
 	} else {
-		if (*link_speeds & RTE_ETH_LINK_SPEED_10G) {
+		if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
 			*conf_speeds |= SXE_LINK_SPEED_10GB_FULL;
-		}
-		if (*link_speeds & RTE_ETH_LINK_SPEED_1G) {
+
+		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
 			*conf_speeds |= SXE_LINK_SPEED_1GB_FULL;
-		}
 	}
 
 l_end:
@@ -556,11 +552,10 @@ s32 sxe_multispeed_sfp_link_configure(struct rte_eth_dev *dev, u32 speed, bool i
 
 		rte_delay_us_sleep((SXE_CHG_SFP_RATE_MS * SXE_HZ_TRANSTO_MS));
 
-		ret = sxe_pcs_sds_init(adapter, SXE_PCS_MODE_10GBASE_KR_WO,
+		ret = sxe_pcs_sds_init(dev, SXE_PCS_MODE_10GBASE_KR_WO,
 						frame_size);
-		if (ret) {
+		if (ret)
 			goto l_end;
-		}
 
 
 		for (i = 0; i < wait_time; i++) {
@@ -581,9 +576,8 @@ s32 sxe_multispeed_sfp_link_configure(struct rte_eth_dev *dev, u32 speed, bool i
 		irq->to_pcs_init = true;
 
 		speedcnt++;
-		if (highest_link_speed == SXE_LINK_SPEED_UNKNOWN) {
+		if (highest_link_speed == SXE_LINK_SPEED_UNKNOWN)
 			highest_link_speed = SXE_LINK_SPEED_1GB_FULL;
-		}
 
 		ret = sxe_sfp_rate_select(adapter, SXE_SFP_RATE_1G);
 		if (ret) {
@@ -593,11 +587,10 @@ s32 sxe_multispeed_sfp_link_configure(struct rte_eth_dev *dev, u32 speed, bool i
 
 		rte_delay_us_sleep((SXE_CHG_SFP_RATE_MS * SXE_HZ_TRANSTO_MS));
 
-		ret = sxe_pcs_sds_init(adapter, SXE_PCS_MODE_1000BASE_KX_W,
+		ret = sxe_pcs_sds_init(dev, SXE_PCS_MODE_1000BASE_KX_W,
 						frame_size);
-		if (ret) {
+		if (ret)
 			goto l_end;
-		}
 
 
 		rte_delay_us_sleep(SXE_1G_WAIT_PCS_MS * SXE_HZ_TRANSTO_MS);
@@ -613,20 +606,18 @@ s32 sxe_multispeed_sfp_link_configure(struct rte_eth_dev *dev, u32 speed, bool i
 		PMD_LOG_WARN(INIT, "1G link cfg failed, retry...");
 	}
 
-	if (speedcnt > 1) {
+	if (speedcnt > 1)
 		ret = sxe_multispeed_sfp_link_configure(dev, highest_link_speed, is_in_thread);
-	}
+
 l_out:
 
 	adapter->phy_ctxt.autoneg_advertised = 0;
 
-	if (speed & SXE_LINK_SPEED_10GB_FULL) {
+	if (speed & SXE_LINK_SPEED_10GB_FULL)
 		adapter->phy_ctxt.autoneg_advertised |= SXE_LINK_SPEED_10GB_FULL;
-	}
 
-	if (speed & SXE_LINK_SPEED_1GB_FULL) {
+	if (speed & SXE_LINK_SPEED_1GB_FULL)
 		adapter->phy_ctxt.autoneg_advertised |= SXE_LINK_SPEED_1GB_FULL;
-	}
 
 l_end:
 	return ret;
@@ -645,7 +636,6 @@ void sxe_link_info_get(struct sxe_adapter *adapter, u32 *link_speed, bool *link_
 		*link_speed = sxe_hw_link_speed_get(hw);
 	}
 
-	return;
 }
 
 static s32 sxe_sfp_fc_autoneg(struct sxe_adapter *adapter)
@@ -704,18 +694,17 @@ static void sxe_fc_autoneg(struct sxe_adapter *adapter)
 		goto l_end;
 	}
 
-	if(link_speed != SXE_LINK_SPEED_1GB_FULL){
-		PMD_LOG_INFO(INIT, "link speed=%x, (0x80=10G, 0x20=1G), dont fc autoneg", link_speed);
+	if (link_speed != SXE_LINK_SPEED_1GB_FULL) {
+		PMD_LOG_INFO(INIT, "link speed=%x, (0x80=10G, 0x20=1G), "
+				"dont fc autoneg", link_speed);
 		goto l_end;
 	}
 
 	ret = sxe_sfp_fc_autoneg(adapter);
 l_end:
-	if(ret) {
+	if (ret)
 		hw->fc.current_mode = hw->fc.requested_mode;
-	}
 
-	return;
 }
 
 s32 sxe_fc_enable(struct sxe_adapter *adapter)
@@ -732,9 +721,9 @@ s32 sxe_fc_enable(struct sxe_adapter *adapter)
 
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
 		if ((hw->fc.current_mode & SXE_FC_TX_PAUSE) &&
-		    hw->fc.high_water[i]) {
+			hw->fc.high_water[i]) {
 			if (!hw->fc.low_water[i] ||
-			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+				hw->fc.low_water[i] >= hw->fc.high_water[i]) {
 				PMD_LOG_DEBUG(INIT, "invalid water mark configuration, "
 					"tc[%u] low_water=%u, high_water=%u",
 					i, hw->fc.low_water[i],
@@ -751,9 +740,8 @@ s32 sxe_fc_enable(struct sxe_adapter *adapter)
 	sxe_fc_autoneg(adapter);
 
 	ret = sxe_hw_fc_enable(hw);
-	if (ret) {
+	if (ret)
 		PMD_LOG_ERR(INIT, "link fc enable failed, ret=%d", ret);
-	}
 
 l_end:
 	return ret;
@@ -787,9 +775,8 @@ s32 sxe_pfc_enable(struct sxe_adapter *adapter, u8 tc_idx)
 	sxe_fc_autoneg(adapter);
 
 	ret = sxe_hw_pfc_enable(hw, tc_idx);
-	if (ret) {
+	if (ret)
 		PMD_LOG_ERR(INIT, "link fc enable failed, ret=%d", ret);
-	}
 
 l_ret:
 	return ret;
@@ -828,15 +815,15 @@ s32 sxe_sfp_identify(struct sxe_adapter *adapter)
 
 	if (sfp_comp_code[SXE_SFF_CABLE_TECHNOLOGY] & SXE_SFF_DA_PASSIVE_CABLE) {
 		sfp_type = SXE_SFP_TYPE_DA_CU;
-	}  else if (sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] & \
+	}  else if (sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] &
 		(SXE_SFF_10GBASESR_CAPABLE | SXE_SFF_10GBASELR_CAPABLE)) {
 		sfp_type = SXE_SFP_TYPE_SRLR;
-	} else if (sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \
+	} else if (sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] &
 		SXE_SFF_1GBASET_CAPABLE) {
 		sfp_type = SXE_SFP_TYPE_1G_CU;
-	} else if ((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \
-		SXE_SFF_1GBASESX_CAPABLE) || \
-		(sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \
+	} else if ((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] &
+		SXE_SFF_1GBASESX_CAPABLE) ||
+		(sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] &
 		SXE_SFF_1GBASELX_CAPABLE)) {
 		sfp_type = SXE_SFP_TYPE_1G_SXLX;
 	} else {
@@ -844,13 +831,13 @@ s32 sxe_sfp_identify(struct sxe_adapter *adapter)
 	}
 
 	sfp->multispeed_fiber = false;
-	if (((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \
+	if (((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] &
 			SXE_SFF_1GBASESX_CAPABLE) &&
-		(sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] & \
+		(sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] &
 			SXE_SFF_10GBASESR_CAPABLE)) ||
-		((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \
+		((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] &
 			SXE_SFF_1GBASELX_CAPABLE) &&
-		(sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] & \
+		(sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] &
 			SXE_SFF_10GBASELR_CAPABLE))) {
 		sfp->multispeed_fiber = true;
 	}
@@ -874,22 +861,20 @@ s32 sxe_sfp_link_configure(struct rte_eth_dev *dev)
 
 	sxe_sfp_link_capabilities_get(adapter, &speed, &an);
 
-	if (SXE_LINK_SPEED_1GB_FULL == speed) {
+	if (speed == SXE_LINK_SPEED_1GB_FULL) {
 		pcs_mode = SXE_PCS_MODE_1000BASE_KX_W;
 		adapter->phy_ctxt.autoneg_advertised = SXE_LINK_SPEED_1GB_FULL;
-	} else if (SXE_LINK_SPEED_10GB_FULL == speed) {
+	} else if (speed == SXE_LINK_SPEED_10GB_FULL) {
 		pcs_mode = SXE_PCS_MODE_10GBASE_KR_WO;
 		adapter->phy_ctxt.autoneg_advertised = SXE_LINK_SPEED_10GB_FULL;
 	}
 
-	ret = sxe_pcs_sds_init(adapter, pcs_mode, frame_size);
-	if (ret) {
+	ret = sxe_pcs_sds_init(dev, pcs_mode, frame_size);
+	if (ret)
 		PMD_LOG_ERR(INIT, "pcs sds init failed, ret=%d", ret);
-	}
 
-	if (SXE_LINK_SPEED_1GB_FULL == speed) {
+	if (speed == SXE_LINK_SPEED_1GB_FULL)
 		sxe_link_status_update(dev);
-	}
 
 	PMD_LOG_INFO(INIT, "link :cfg speed=%x, pcs_mode=%x, atuoreg=%d",
 					speed, pcs_mode, an);
@@ -926,7 +911,7 @@ int sxe_get_module_info(struct rte_eth_dev *dev,
 		page_swap = true;
 	}
 
-	if ((sff8472_rev == SXE_SFF_8472_UNSUP) || page_swap || \
+	if ((sff8472_rev == SXE_SFF_8472_UNSUP) || page_swap ||
 			!(addr_mode & SXE_SFF_DDM_IMPLEMENTED)) {
 		info->type = RTE_ETH_MODULE_SFF_8079;
 		info->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
@@ -980,11 +965,10 @@ s32 sxe_phy_init(struct sxe_adapter *adapter)
 	s32 ret = 0;
 	enum sxe_media_type media_type = sxe_media_type_get(adapter);
 
-	if (SXE_MEDIA_TYPE_FIBER == media_type) {
+	if (media_type == SXE_MEDIA_TYPE_FIBER) {
 		ret = sxe_sfp_identify(adapter);
-		if (ret) {
+		if (ret)
 			PMD_LOG_ERR(INIT, "phy identify failed, ret=%d", ret);
-		}
 	} else {
 		PMD_LOG_ERR(INIT, "phy init failed, only support SFP.");
 	}
diff --git a/drivers/net/sxe/pf/sxe_phy.h b/drivers/net/sxe/pf/sxe_phy.h
index b0ec2388b9..3d4328be31 100644
--- a/drivers/net/sxe/pf/sxe_phy.h
+++ b/drivers/net/sxe/pf/sxe_phy.h
@@ -1,8 +1,8 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (C), 2022, Linkdata Technology Co., Ltd.
  */
-#ifndef  __SXE_PHY_H__
-#define  __SXE_PHY_H__
+#ifndef __SXE_PHY_H__
+#define __SXE_PHY_H__
 
 #include <rte_ethdev.h>
 #include "drv_msg.h"
@@ -10,17 +10,17 @@
 #include "sxe_msg.h"
 
 #define SXE_SFF_BASE_ADDR			0x0
-#define SXE_SFF_IDENTIFIER			0x0  
-#define SXE_SFF_10GBE_COMP_CODES		0x3  
-#define SXE_SFF_1GBE_COMP_CODES			0x6  
-#define SXE_SFF_CABLE_TECHNOLOGY		0x8  
-#define SXE_SFF_8472_DIAG_MONITOR_TYPE		0x5C 
-#define SXE_SFF_8472_COMPLIANCE			0x5E 
+#define SXE_SFF_IDENTIFIER			0x0
+#define SXE_SFF_10GBE_COMP_CODES		0x3
+#define SXE_SFF_1GBE_COMP_CODES			0x6
+#define SXE_SFF_CABLE_TECHNOLOGY		0x8
+#define SXE_SFF_8472_DIAG_MONITOR_TYPE		0x5C
+#define SXE_SFF_8472_COMPLIANCE			0x5E
 
 #define SXE_SFF_IDENTIFIER_SFP			0x3
-#define SXE_SFF_ADDRESSING_MODE			0x4  
+#define SXE_SFF_ADDRESSING_MODE			0x4
 #define SXE_SFF_8472_UNSUP			0x0
-#define SXE_SFF_DDM_IMPLEMENTED			0x40 
+#define SXE_SFF_DDM_IMPLEMENTED			0x40
 #define SXE_SFF_DA_PASSIVE_CABLE		0x4
 #define SXE_SFF_DA_ACTIVE_CABLE			0x8
 #define SXE_SFF_DA_SPEC_ACTIVE_LIMITING		0x4
@@ -30,16 +30,16 @@
 #define SXE_SFF_10GBASESR_CAPABLE		0x10
 #define SXE_SFF_10GBASELR_CAPABLE		0x20
 
-#define SXE_SFP_COMP_CODE_SIZE			10  
-#define SXE_SFP_EEPROM_SIZE_MAX			512 
+#define SXE_SFP_COMP_CODE_SIZE			10
+#define SXE_SFP_EEPROM_SIZE_MAX			512
 
-#define SXE_IRQ_LINK_UPDATE      (u32)(1 << 0)
-#define SXE_IRQ_LINK_CONFIG      (u32)(1 << 3)
+#define SXE_IRQ_LINK_UPDATE	  (u32)(1 << 0)
+#define SXE_IRQ_LINK_CONFIG	  (u32)(1 << 3)
 struct sxe_adapter;
 
 enum sxe_media_type {
 	SXE_MEDIA_TYPE_UNKWON = 0,
-	SXE_MEDIA_TYPE_FIBER  = 1, 
+	SXE_MEDIA_TYPE_FIBER  = 1,
 };
 
 enum sxe_phy_idx {
@@ -48,24 +48,24 @@ enum sxe_phy_idx {
 };
 
 enum sxe_sfp_type {
-	SXE_SFP_TYPE_DA_CU       = 0, 
-	SXE_SFP_TYPE_SRLR        = 1, 
-	SXE_SFP_TYPE_1G_CU       = 2, 
-	SXE_SFP_TYPE_1G_SXLX     = 4, 
-	SXE_SFP_TYPE_UNKNOWN     = 0xFFFF ,
+	SXE_SFP_TYPE_DA_CU	   = 0,
+	SXE_SFP_TYPE_SRLR		= 1,
+	SXE_SFP_TYPE_1G_CU	   = 2,
+	SXE_SFP_TYPE_1G_SXLX	 = 4,
+	SXE_SFP_TYPE_UNKNOWN	 = 0xFFFF,
 };
 
 struct sxe_sfp_info {
 	enum sxe_sfp_type	type;
-	bool			multispeed_fiber; 
+	bool			multispeed_fiber;
 };
 
 struct sxe_phy_context {
-	bool is_sfp;                  
-	bool sfp_tx_laser_disabled;   
-	u32  speed;                   
-	u32  autoneg_advertised;      
-	struct sxe_sfp_info sfp_info; 
+	bool is_sfp;
+	bool sfp_tx_laser_disabled;
+	u32  speed;
+	u32  autoneg_advertised;
+	struct sxe_sfp_info sfp_info;
 };
 
 s32 sxe_phy_init(struct sxe_adapter *adapter);
@@ -93,7 +93,7 @@ int sxe_get_module_eeprom(struct rte_eth_dev *dev,
 s32 sxe_sfp_identify(struct sxe_adapter *adapter);
 s32 sxe_sfp_reset(struct sxe_adapter *adapter);
 
-s32 sxe_pcs_sds_init(struct sxe_adapter *adapter, 
+s32 sxe_pcs_sds_init(struct rte_eth_dev *dev,
 				sxe_pcs_mode_e mode, u32 max_frame);
 
 s32 sxe_sfp_rate_select(struct sxe_adapter *adapter, sxe_sfp_rate_e rate);
@@ -115,7 +115,4 @@ s32 sxe_sfp_link_configure(struct rte_eth_dev *dev);
 
 void sxe_mac_configure(struct sxe_adapter *adapter);
 
-s32 sxe_pcs_sds_init(struct sxe_adapter *adapter, sxe_pcs_mode_e mode,
-			     u32 max_frame);
-
 #endif
diff --git a/drivers/net/sxe/pf/sxe_pmd_hdc.c b/drivers/net/sxe/pf/sxe_pmd_hdc.c
index 9137776a01..39fc782b2d 100644
--- a/drivers/net/sxe/pf/sxe_pmd_hdc.c
+++ b/drivers/net/sxe/pf/sxe_pmd_hdc.c
@@ -42,13 +42,11 @@ void sxe_hdc_channel_init(void)
 {
 	s32 ret;
 	ret = sem_init(sxe_hdc_sema_get(), 0, 1);
-	if (ret) {
-		PMD_LOG_ERR(INIT, "hdc sem init failed,ret=%d",ret);
-	}
+	if (ret)
+		PMD_LOG_ERR(INIT, "hdc sem init failed,ret=%d", ret);
 
 	sxe_trace_id_gen();
 
-	return;
 }
 
 void sxe_hdc_channel_uninit(void)
@@ -56,7 +54,6 @@ void sxe_hdc_channel_uninit(void)
 	sem_destroy(sxe_hdc_sema_get());
 	sxe_trace_id_clean();
 
-	return;
 }
 
 static s32 sxe_fw_time_sync_process(struct sxe_hw *hw)
@@ -69,9 +66,8 @@ static s32 sxe_fw_time_sync_process(struct sxe_hw *hw)
 	ret = sxe_driver_cmd_trans(hw, SXE_CMD_TINE_SYNC,
 				(void *)&timestamp, sizeof(timestamp),
 				NULL, 0);
-	if (ret) {
-		LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:time sync\n",ret);
-	}
+	if (ret)
+		LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:time sync\n", ret);
 
 	return ret;
 }
@@ -85,14 +81,14 @@ s32 sxe_fw_time_sync(struct sxe_hw *hw)
 
 	status = sxe_hw_hdc_fw_status_get(hw);
 	if (status != SXE_FW_START_STATE_FINISHED) {
-		LOG_ERROR_BDF("fw[%p] status[0x%x] is not good",hw, status);
+		LOG_ERROR_BDF("fw[%p] status[0x%x] is not good", hw, status);
 		ret = -SXE_FW_STATUS_ERR;
 		goto l_ret;
 	}
 
 	ret_v = sxe_fw_time_sync_process(hw);
 	if (ret_v) {
-		LOG_WARN_BDF("fw time sync failed, ret_v=%d\n",ret_v);
+		LOG_WARN_BDF("fw time sync failed, ret_v=%d\n", ret_v);
 		goto l_ret;
 	}
 
@@ -108,7 +104,6 @@ static inline s32 sxe_hdc_lock_get(struct sxe_hw *hw)
 static inline void sxe_hdc_lock_release(struct sxe_hw *hw)
 {
 	sxe_hw_hdc_lock_release(hw, SXE_HDC_RELEASELOCK_MAX);
-	return;
 }
 
 static inline s32 sxe_poll_fw_ack(struct sxe_hw *hw, u32 timeout)
@@ -120,11 +115,10 @@ static inline s32 sxe_poll_fw_ack(struct sxe_hw *hw, u32 timeout)
 
 	for (i = 0; i < timeout; i++) {
 		fw_ov = sxe_hw_hdc_is_fw_over_set(hw);
-		if (fw_ov) {
+		if (fw_ov)
 			break;
-		}
 
-		msleep(10);
+		mdelay(10);
 	}
 
 	if (i >= timeout) {
@@ -142,13 +136,12 @@ static inline s32 sxe_poll_fw_ack(struct sxe_hw *hw, u32 timeout)
 static inline void hdc_channel_clear(struct sxe_hw *hw)
 {
 	sxe_hw_hdc_fw_ov_clear(hw);
-	return;
 }
 
 static s32 hdc_packet_ack_get(struct sxe_hw *hw, u64 trace_id,
 				HdcHeader_u *pkt_header)
 {
-	s32 ret     = 0;
+	s32 ret	 = 0;
 	u32 timeout = SXE_HDC_WAIT_TIME;
 	struct sxe_adapter *adapter = hw->adapter;
 	UNUSED(trace_id);
@@ -163,7 +156,7 @@ static s32 hdc_packet_ack_get(struct sxe_hw *hw, u64 trace_id,
 		goto l_out;
 	}
 
-	pkt_header->dw0 = sxe_hw_hdc_fw_ack_header_get(hw);;
+	pkt_header->dw0 = sxe_hw_hdc_fw_ack_header_get(hw);
 	if (pkt_header->head.errCode == PKG_ERR_PKG_SKIP) {
 		ret = -SXE_HDC_PKG_SKIP_ERR;
 		goto l_out;
@@ -189,9 +182,8 @@ static void hdc_packet_header_fill(HdcHeader_u *pkt_header,
 
 	pkt_header->head.totalLen = SXE_HDC_LEN_TO_REG(total_len);
 
-	if (pkt_index == 0 && is_read == 0) {
+	if (pkt_index == 0 && is_read == 0)
 		pkt_header->head.startPkg = SXE_HDC_BIT_1;
-	}
 
 	if (pkt_index == (pkt_num - 1)) {
 		pkt_header->head.endPkg = SXE_HDC_BIT_1;
@@ -204,37 +196,33 @@ static void hdc_packet_header_fill(HdcHeader_u *pkt_header,
 	pkt_header->head.isRd = is_read;
 	pkt_header->head.msi = 0;
 
-	return ;
 }
 
 static inline void hdc_packet_send_done(struct sxe_hw *hw)
 {
 	sxe_hw_hdc_packet_send_done(hw);
-	return;
 }
 
 static inline void hdc_packet_header_send(struct sxe_hw *hw,
 							u32 header)
 {
 	sxe_hw_hdc_packet_header_send(hw, header);
-	return;
 }
 
 static inline void hdc_packet_data_dword_send(struct sxe_hw *hw,
 						u16 dword_index, u32 value)
 {
 	sxe_hw_hdc_packet_data_dword_send(hw, dword_index, value);
-	return;
 }
 
 static void hdc_packet_send(struct sxe_hw *hw, u64 trace_id,
 			HdcHeader_u *pkt_header, u8 *data,
 			u16 data_len)
 {
-	u16          dw_idx   = 0;
-	u16          pkt_len       = 0;
-	u16          offset        = 0;
-	u32          pkg_data      = 0;
+	u16		  dw_idx   = 0;
+	u16		  pkt_len	   = 0;
+	u16		  offset		= 0;
+	u32		  pkg_data	  = 0;
 	struct sxe_adapter *adapter = hw->adapter;
 	UNUSED(trace_id);
 
@@ -244,9 +232,8 @@ static void hdc_packet_send(struct sxe_hw *hw, u64 trace_id,
 
 	hdc_packet_header_send(hw, pkt_header->dw0);
 
-	if (data == NULL || data_len == 0) {
-	    goto l_send_done;
-	}
+	if (data == NULL || data_len == 0)
+		goto l_send_done;
 
 	pkt_len = SXE_HDC_LEN_FROM_REG(pkt_header->head.len);
 	for (dw_idx = 0; dw_idx < pkt_len; dw_idx++) {
@@ -273,7 +260,6 @@ static void hdc_packet_send(struct sxe_hw *hw, u64 trace_id,
 
 	hdc_packet_send_done(hw);
 
-	return;
 }
 
 static inline u32 hdc_packet_data_dword_rcv(struct sxe_hw *hw,
@@ -286,16 +272,16 @@ static void hdc_resp_data_rcv(struct sxe_hw *hw, u64 trace_id,
 				HdcHeader_u *pkt_header, u8 *out_data,
 				u16 out_len)
 {
-	u16          dw_idx      = 0;
-	u16          dw_num      = 0;
-	u16          offset = 0;
-	u32          pkt_data;
+	u16		  dw_idx	  = 0;
+	u16		  dw_num	  = 0;
+	u16		  offset = 0;
+	u32		  pkt_data;
 	struct sxe_adapter *adapter = hw->adapter;
 	UNUSED(trace_id);
 
 	dw_num = SXE_HDC_LEN_FROM_REG(pkt_header->head.len);
 	for (dw_idx = 0; dw_idx < dw_num; dw_idx++) {
-		pkt_data= hdc_packet_data_dword_rcv(hw, dw_idx);
+		pkt_data = hdc_packet_data_dword_rcv(hw, dw_idx);
 		offset = dw_idx * ONE_DWORD_LEN;
 		LOG_DEBUG_BDF("trace_id=0x%"SXE_PRIX64" get data from reg[%u] dword=0x%x\n",
 				trace_id, dw_idx, pkt_data);
@@ -309,18 +295,17 @@ static void hdc_resp_data_rcv(struct sxe_hw *hw, u64 trace_id,
 		}
 	}
 
-	return;
 }
 
-STATIC s32 hdc_req_process(struct sxe_hw *hw, u64 trace_id,
+static s32 hdc_req_process(struct sxe_hw *hw, u64 trace_id,
 			u8 *in_data, u16 in_len)
 {
-	s32 ret 	= 0;
+	s32 ret = 0;
 	u32 total_len	= 0;
-	u16 pkt_num     = 0;
-	u16 index       = 0;
-	u16 offset      = 0;
-	HdcHeader_u     pkt_header;
+	u16 pkt_num	 = 0;
+	u16 index	   = 0;
+	u16 offset	  = 0;
+	HdcHeader_u	 pkt_header;
 	bool is_retry   = false;
 	struct sxe_adapter *adapter = hw->adapter;
 
@@ -343,27 +328,26 @@ STATIC s32 hdc_req_process(struct sxe_hw *hw, u64 trace_id,
 		hdc_packet_send(hw, trace_id, &pkt_header,
 				in_data + offset, in_len);
 
-		if (index == pkt_num - 1) {
+		if (index == pkt_num - 1)
 			break;
-		}
 
 		ret = hdc_packet_ack_get(hw, trace_id, &pkt_header);
 		if (ret == -EINTR) {
 			LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" interrupted\n", trace_id);
 			goto l_out;
 		} else if (ret == -SXE_HDC_PKG_SKIP_ERR) {
-			LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" req ack"
+			LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" req ack "
 					"failed, retry\n", trace_id);
 			if (is_retry) {
 				ret = -SXE_HDC_RETRY_ERR;
 				goto l_out;
 			}
 
-			index --;
+			index--;
 			is_retry = true;
 			continue;
 		} else if (ret != SXE_HDC_SUCCESS) {
-			LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" req ack"
+			LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" req ack "
 					"failed, ret=%d\n", trace_id, ret);
 			ret = -SXE_HDC_RETRY_ERR;
 			goto l_out;
@@ -382,36 +366,36 @@ STATIC s32 hdc_req_process(struct sxe_hw *hw, u64 trace_id,
 static s32 hdc_resp_process(struct sxe_hw *hw, u64 trace_id,
 			u8 *out_data, u16 out_len)
 {
-	s32          ret;
-	u32          req_dwords;
-	u32          resp_len;
-	u32          resp_dwords;
-	u16          pkt_num;
-	u16          index;
-	u16          offset;
+	s32		  ret;
+	u32		  req_dwords;
+	u32		  resp_len;
+	u32		  resp_dwords;
+	u16		  pkt_num;
+	u16		  index;
+	u16		  offset;
 	HdcHeader_u  pkt_header;
-	bool     retry          = false;
+	bool	 retry		  = false;
 	struct sxe_adapter *adapter = hw->adapter;
 
-	LOG_DEBUG_BDF("hdc trace_id=0x%"SXE_PRIX64" req's last cmd ack get\n",trace_id);
+	LOG_DEBUG_BDF("hdc trace_id=0x%"SXE_PRIX64" req's last cmd ack get\n", trace_id);
 	ret = hdc_packet_ack_get(hw, trace_id, &pkt_header);
 	if (ret == -EINTR) {
 		LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" interrupted\n", trace_id);
 		goto l_out;
-	} else if(ret) {
+	} else if (ret) {
 		LOG_ERROR_BDF("hdc trace_id=0x%"SXE_PRIX64" ack get failed, ret=%d\n",
 				trace_id, ret);
 		ret = -SXE_HDC_RETRY_ERR;
 		goto l_out;
 	}
 
-	LOG_DEBUG_BDF("hdc trace_id=0x%"SXE_PRIX64" req's last cmd ack get"
-		"succeed header[0x%x]\n",trace_id, pkt_header.dw0);
+	LOG_DEBUG_BDF("hdc trace_id=0x%"SXE_PRIX64" req's last cmd ack get "
+		"succeed header[0x%x]\n", trace_id, pkt_header.dw0);
 
 	if (!pkt_header.head.startPkg) {
 		ret = -SXE_HDC_RETRY_ERR;
 		LOG_ERROR_BDF("trace_id=0x%"SXE_PRIX64" ack header has error:"
-				"not set start bit\n",trace_id);
+				"not set start bit\n", trace_id);
 		goto l_out;
 	}
 
@@ -420,16 +404,15 @@ static s32 hdc_resp_process(struct sxe_hw *hw, u64 trace_id,
 	if (resp_dwords > req_dwords) {
 		ret = -SXE_HDC_RETRY_ERR;
 		LOG_ERROR_BDF("trace_id=0x%"SXE_PRIX64" rsv len check failed:"
-				"resp_dwords=%u, req_dwords=%u\n",trace_id,
+				"resp_dwords=%u, req_dwords=%u\n", trace_id,
 				resp_dwords, req_dwords);
 		goto l_out;
 	}
 
 	resp_len = resp_dwords << 2;
 	LOG_DEBUG_BDF("outlen = %u bytes, resp_len = %u bytes\n", out_len, resp_len);
-	if (resp_len > out_len) {
+	if (resp_len > out_len)
 		resp_len = out_len;
-	}
 
 	hdc_resp_data_rcv(hw, trace_id, &pkt_header, out_data, resp_len);
 
@@ -449,19 +432,19 @@ static s32 hdc_resp_process(struct sxe_hw *hw, u64 trace_id,
 			LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" interrupted\n", trace_id);
 			goto l_out;
 		} else if (ret == -SXE_HDC_PKG_SKIP_ERR) {
-			LOG_ERROR_BDF("trace_id=0x%"SXE_PRIX64" hdc resp ack polling"
+			LOG_ERROR_BDF("trace_id=0x%"SXE_PRIX64" hdc resp ack polling "
 					"failed, ret=%d\n", trace_id, ret);
 			if (retry) {
 				ret = -SXE_HDC_RETRY_ERR;
 				goto l_out;
 			}
 
-			index --;
+			index--;
 			retry = true;
 			continue;
 		} else if (ret != SXE_HDC_SUCCESS) {
-			LOG_ERROR_BDF("trace_id=0x%"SXE_PRIX64" hdc resp ack polling"
-					"failed, ret=%d\n",trace_id, ret);
+			LOG_ERROR_BDF("trace_id=0x%"SXE_PRIX64" hdc resp ack polling "
+					"failed, ret=%d\n", trace_id, ret);
 			ret = -SXE_HDC_RETRY_ERR;
 			goto l_out;
 		}
@@ -491,7 +474,7 @@ static s32 sxe_hdc_packet_trans(struct sxe_hw *hw, u64 trace_id,
 
 	status = sxe_hw_hdc_fw_status_get(hw);
 	if (status != SXE_FW_START_STATE_FINISHED) {
-		LOG_ERROR_BDF("fw[%p] status[0x%x] is not good\n",hw, status);
+		LOG_ERROR_BDF("fw[%p] status[0x%x] is not good\n", hw, status);
 		ret = -SXE_FW_STATUS_ERR;
 		goto l_ret;
 	}
@@ -514,16 +497,16 @@ static s32 sxe_hdc_packet_trans(struct sxe_hw *hw, u64 trace_id,
 	ret = hdc_req_process(hw, trace_id, trans_info->in.data,
 				trans_info->in.len);
 	if (ret) {
-		LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" req process"
-				"failed, ret=%d\n",trace_id, ret);
+		LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" req process "
+				"failed, ret=%d\n", trace_id, ret);
 		goto l_hdc_lock_release;
 	}
 
 	ret = hdc_resp_process(hw, trace_id, trans_info->out.data,
 				trans_info->out.len);
 	if (ret) {
-		LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" resp process"
-				"failed, ret=%d\n",trace_id, ret);
+		LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" resp process "
+				"failed, ret=%d\n", trace_id, ret);
 	}
 
 l_hdc_lock_release:
@@ -532,7 +515,7 @@ static s32 sxe_hdc_packet_trans(struct sxe_hw *hw, u64 trace_id,
 	return ret;
 }
 
-STATIC s32 sxe_hdc_cmd_process(struct sxe_hw *hw, u64 trace_id,
+static s32 sxe_hdc_cmd_process(struct sxe_hw *hw, u64 trace_id,
 				struct sxe_hdc_trans_info *trans_info)
 {
 	s32 ret;
@@ -548,22 +531,22 @@ STATIC s32 sxe_hdc_cmd_process(struct sxe_hw *hw, u64 trace_id,
 		goto l_ret;
 	}
 
-	LOG_DEBUG_BDF("hw[%p] cmd trace=0x%"SXE_PRIX64" \n",hw, trace_id);
-	
+	LOG_DEBUG_BDF("hw[%p] cmd trace=0x%"SXE_PRIX64"\n", hw, trace_id);
+
 	ret = sem_wait(sxe_hdc_sema_get());
 	if (ret) {
 		LOG_WARN_BDF("hw[%p] hdc concurrency full\n", hw);
 		goto l_ret;
 	}
 
-	for (retry_idx = 0; retry_idx < 250; retry_idx++ ) {
+	for (retry_idx = 0; retry_idx < 250; retry_idx++) {
 		ret = sxe_hdc_packet_trans(hw, trace_id, trans_info);
 		if (ret == SXE_SUCCESS) {
 			goto l_up;
 		} else if (ret == -SXE_HDC_RETRY_ERR) {
 			rte_delay_ms(10);
 			continue;
-	 	} else {
+		} else {
 			LOG_ERROR_BDF("sxe hdc packet trace_id=0x%"SXE_PRIX64
 					" trans error, ret=%d\n", trace_id, ret);
 			ret = -EFAULT;
@@ -572,16 +555,16 @@ STATIC s32 sxe_hdc_cmd_process(struct sxe_hw *hw, u64 trace_id,
 	}
 
 l_up:
-	LOG_DEBUG_BDF("hw[%p] cmd trace=0x%"SXE_PRIX64"\n",hw, trace_id);
+	LOG_DEBUG_BDF("hw[%p] cmd trace=0x%"SXE_PRIX64"\n", hw, trace_id);
 	sem_post(sxe_hdc_sema_get());
 l_ret:
 	ret = pthread_sigmask(SIG_SETMASK, &old_mask, NULL);
-	if (ret) {
+	if (ret)
 		LOG_ERROR_BDF("hdc restore old signal mask failed, ret=%d\n", ret);
-	}
-	if (ret == -SXE_HDC_RETRY_ERR) {
+
+	if (ret == -SXE_HDC_RETRY_ERR)
 		ret = -EFAULT;
-	}
+
 	return ret;
 }
 
@@ -590,7 +573,6 @@ static void sxe_cmd_hdr_init(struct sxe_hdc_cmd_hdr *cmd_hdr,
 {
 	cmd_hdr->cmd_type = cmd_type;
 	cmd_hdr->cmd_sub_type = 0;
-	return;
 }
 
 static void sxe_driver_cmd_msg_init(struct sxe_hdc_drv_cmd_msg *msg,
@@ -603,11 +585,9 @@ static void sxe_driver_cmd_msg_init(struct sxe_hdc_drv_cmd_msg *msg,
 	msg->length.req_len = SXE_HDC_MSG_HDR_SIZE + req_len;
 	msg->traceid = trace_id;
 
-	if (req_data && req_len != 0) {
+	if (req_data && req_len != 0)
 		memcpy(msg->body, (u8 *)req_data, req_len);
-	}
 
-	return;
 }
 
 static void sxe_hdc_trans_info_init(
@@ -619,7 +599,6 @@ static void sxe_hdc_trans_info_init(
 	trans_info->in.len   = in_len;
 	trans_info->out.data = out_data_buf;
 	trans_info->out.len  = out_len;
-	return;
 }
 
 s32 sxe_driver_cmd_trans(struct sxe_hw *hw, u16 opcode,
@@ -647,16 +626,16 @@ s32 sxe_driver_cmd_trans(struct sxe_hw *hw, u16 opcode,
 
 	in_data_buf = rte_zmalloc("pmd hdc in buffer", in_len, RTE_CACHE_LINE_SIZE);
 	if (in_data_buf == NULL) {
-		LOG_ERROR_BDF("cmd trace_id=0x%"SXE_PRIX64" kzalloc indata"
-				"mem len[%u] failed\n",trace_id, in_len);
+		LOG_ERROR_BDF("cmd trace_id=0x%"SXE_PRIX64" kzalloc indata "
+				"mem len[%u] failed\n", trace_id, in_len);
 		ret = -ENOMEM;
 		goto l_ret;
 	}
 
 	out_data_buf = rte_zmalloc("pmd hdc out buffer", out_len, RTE_CACHE_LINE_SIZE);
 	if (out_data_buf == NULL) {
-		LOG_ERROR_BDF("cmd trace_id=0x%"SXE_PRIX64" kzalloc out_data"
-				"mem len[%u] failed\n",trace_id, out_len);
+		LOG_ERROR_BDF("cmd trace_id=0x%"SXE_PRIX64" kzalloc out_data "
+				"mem len[%u] failed\n", trace_id, out_len);
 		ret = -ENOMEM;
 		goto l_in_buf_free;
 	}
@@ -678,7 +657,7 @@ s32 sxe_driver_cmd_trans(struct sxe_hw *hw, u16 opcode,
 	ret = sxe_hdc_cmd_process(hw, trace_id, &trans_info);
 	if (ret) {
 		LOG_ERROR_BDF("hdc cmd trace_id=0x%"SXE_PRIX64" hdc cmd process"
-				" failed, ret=%d\n",trace_id, ret);
+				" failed, ret=%d\n", trace_id, ret);
 		goto l_out_buf_free;
 	}
 
@@ -700,9 +679,8 @@ s32 sxe_driver_cmd_trans(struct sxe_hw *hw, u16 opcode,
 		goto l_out_buf_free;
 	}
 
-	if (resp_len != 0) {
+	if (resp_len != 0)
 		memcpy(resp_data, ack->body, resp_len);
-	}
 
 	LOG_DEBUG_BDF("driver get hdc ack trace_id=0x%"SXE_PRIX64","
 			" ack_len=%u, ack_data_len=%u\n",
diff --git a/drivers/net/sxe/pf/sxe_pmd_hdc.h b/drivers/net/sxe/pf/sxe_pmd_hdc.h
index 13671f3a83..98e6599b9d 100644
--- a/drivers/net/sxe/pf/sxe_pmd_hdc.h
+++ b/drivers/net/sxe/pf/sxe_pmd_hdc.h
@@ -8,18 +8,18 @@
 #include "sxe_hw.h"
 #include "sxe_errno.h"
 
-#define SXE_HDC_SUCCESS              0
-#define SXE_HDC_FALSE                SXE_ERR_HDC(1)
-#define SXE_HDC_INVAL_PARAM          SXE_ERR_HDC(2)
-#define SXE_HDC_BUSY                 SXE_ERR_HDC(3)
-#define SXE_HDC_FW_OPS_FAILED        SXE_ERR_HDC(4)
-#define SXE_HDC_FW_OV_TIMEOUT        SXE_ERR_HDC(5)
-#define SXE_HDC_REQ_ACK_HEAD_ERR     SXE_ERR_HDC(6)
-#define SXE_HDC_REQ_ACK_TLEN_ERR     SXE_ERR_HDC(7)
-#define SXE_HDC_PKG_SKIP_ERR         SXE_ERR_HDC(8)
-#define SXE_HDC_PKG_OTHER_ERR        SXE_ERR_HDC(9)
-#define SXE_HDC_RETRY_ERR            SXE_ERR_HDC(10)
-#define SXE_FW_STATUS_ERR            SXE_ERR_HDC(11)
+#define SXE_HDC_SUCCESS			  0
+#define SXE_HDC_FALSE				SXE_ERR_HDC(1)
+#define SXE_HDC_INVAL_PARAM		  SXE_ERR_HDC(2)
+#define SXE_HDC_BUSY				 SXE_ERR_HDC(3)
+#define SXE_HDC_FW_OPS_FAILED		SXE_ERR_HDC(4)
+#define SXE_HDC_FW_OV_TIMEOUT		SXE_ERR_HDC(5)
+#define SXE_HDC_REQ_ACK_HEAD_ERR	 SXE_ERR_HDC(6)
+#define SXE_HDC_REQ_ACK_TLEN_ERR	 SXE_ERR_HDC(7)
+#define SXE_HDC_PKG_SKIP_ERR		 SXE_ERR_HDC(8)
+#define SXE_HDC_PKG_OTHER_ERR		SXE_ERR_HDC(9)
+#define SXE_HDC_RETRY_ERR			SXE_ERR_HDC(10)
+#define SXE_FW_STATUS_ERR			SXE_ERR_HDC(11)
 
 struct sxe_hdc_data_info {
 	u8 *data;
@@ -31,8 +31,8 @@ struct sxe_hdc_trans_info {
 	struct sxe_hdc_data_info out;
 };
 
-s32 sxe_driver_cmd_trans(struct sxe_hw *hw, u16 opcode, 
-					void *req_data, u16 req_len, 
+s32 sxe_driver_cmd_trans(struct sxe_hw *hw, u16 opcode,
+					void *req_data, u16 req_len,
 					void *resp_data, u16 resp_len);
 
 void sxe_hdc_channel_init(void);
diff --git a/drivers/net/sxe/pf/sxe_ptp.c b/drivers/net/sxe/pf/sxe_ptp.c
index 166665ad11..5e7fa05307 100644
--- a/drivers/net/sxe/pf/sxe_ptp.c
+++ b/drivers/net/sxe/pf/sxe_ptp.c
@@ -34,7 +34,6 @@ static void sxe_timecounters_start(struct rte_eth_dev *dev)
 	adapter->ptp_ctxt.tx_hwtstamp_nsec = 0;
 	adapter->ptp_ctxt.tx_hwtstamp_sec = 0;
 
-	return;
 }
 
 s32 sxe_timesync_enable(struct rte_eth_dev *dev)
@@ -45,7 +44,7 @@ s32 sxe_timesync_enable(struct rte_eth_dev *dev)
 
 	sxe_hw_ptp_init(hw);
 
-	 
+
 	sxe_hw_ptp_timestamp_mode_set(hw, true, 0, tses);
 
 	sxe_hw_ptp_timestamp_enable(hw);
@@ -93,7 +92,7 @@ s32 sxe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
 
 	rx_tstamp_cycles = sxe_hw_ptp_rx_timestamp_get(hw);
 	ns = rte_timecounter_update(&adapter->ptp_ctxt.rx_tstamp_tc, rx_tstamp_cycles);
-	PMD_LOG_DEBUG(DRV, "got rx_tstamp_cycles = %"SXE_PRIU64"ns=%"SXE_PRIU64, 
+	PMD_LOG_DEBUG(DRV, "got rx_tstamp_cycles = %"SXE_PRIU64"ns=%"SXE_PRIU64,
 			rx_tstamp_cycles, ns);
 	*timestamp = rte_ns_to_timespec(ns);
 
@@ -126,9 +125,8 @@ s32 sxe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
 	sxe_hw_ptp_tx_timestamp_get(hw, &ts_sec, &ts_ns);
 	if (ts_ns  != adapter->ptp_ctxt.tx_hwtstamp_nsec ||
 		ts_sec != adapter->ptp_ctxt.tx_hwtstamp_sec) {
-		for (i = 0; i < SXE_TXTS_POLL_CHECK; i++) {
+		for (i = 0; i < SXE_TXTS_POLL_CHECK; i++)
 			sxe_hw_ptp_tx_timestamp_get(hw, &last_sec, &last_ns);
-		}
 
 		for (; i < SXE_TXTS_POLL; i++) {
 			sxe_hw_ptp_tx_timestamp_get(hw, &ts_sec, &ts_ns);
@@ -147,7 +145,7 @@ s32 sxe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
 	} else {
 		adapter->ptp_ctxt.tx_hwtstamp_nsec = ts_ns;
 		adapter->ptp_ctxt.tx_hwtstamp_sec  = ts_sec;
-		tx_tstamp_cycles = 
+		tx_tstamp_cycles =
 			sxe_timesync_tx_tstamp_cycles_get(adapter);
 		ns = rte_timecounter_update(&adapter->ptp_ctxt.tx_tstamp_tc,
 						tx_tstamp_cycles);
@@ -181,14 +179,14 @@ s32 sxe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
 
 	systime_cycles = sxe_hw_ptp_systime_get(hw);
 	ns = rte_timecounter_update(&adapter->ptp_ctxt.systime_tc, systime_cycles);
-	PMD_LOG_DEBUG(DRV, "got systime_cycles = %"SXE_PRIU64"ns=%"SXE_PRIU64, 
+	PMD_LOG_DEBUG(DRV, "got systime_cycles = %"SXE_PRIU64"ns=%"SXE_PRIU64,
 			systime_cycles, ns);
 	*ts = rte_ns_to_timespec(ns);
 
 	return 0;
 }
 
-s32 sxe_timesync_write_time(struct rte_eth_dev *dev, 
+s32 sxe_timesync_write_time(struct rte_eth_dev *dev,
 					const struct timespec *ts)
 {
 	u64 ns;
diff --git a/drivers/net/sxe/pf/sxe_ptp.h b/drivers/net/sxe/pf/sxe_ptp.h
index 367c1a34a0..14971b2d50 100644
--- a/drivers/net/sxe/pf/sxe_ptp.h
+++ b/drivers/net/sxe/pf/sxe_ptp.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (C), 2022, Linkdata Technology Co., Ltd.
  */
- 
+
 #ifndef __SXE_PTP_H__
 #define __SXE_PTP_H__
 
@@ -20,7 +20,7 @@ s32 sxe_timesync_adjust_time(struct rte_eth_dev *dev, s64 delta);
 
 s32 sxe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts);
 
-s32 sxe_timesync_write_time(struct rte_eth_dev *dev, 
+s32 sxe_timesync_write_time(struct rte_eth_dev *dev,
 					const struct timespec *ts);
 
 #endif
diff --git a/drivers/net/sxe/pf/sxe_queue.c b/drivers/net/sxe/pf/sxe_queue.c
index 8a0042022b..98e4a5c1ac 100644
--- a/drivers/net/sxe/pf/sxe_queue.c
+++ b/drivers/net/sxe/pf/sxe_queue.c
@@ -27,7 +27,7 @@
 #endif
 #include "sxe_compat_version.h"
 
-#define SXE_RXQ_SCAN_INTERVAL 				4
+#define SXE_RXQ_SCAN_INTERVAL   4
 
 #ifndef DEFAULT_TX_RS_THRESH
 #define DEFAULT_TX_RS_THRESH   32
@@ -39,9 +39,9 @@
 
 #define RTE_SXE_WAIT_100_US	100
 
-#define SXE_MMW_SIZE_DEFAULT        0x4
-#define SXE_MMW_SIZE_JUMBO_FRAME    0x14
-#define SXE_MAX_JUMBO_FRAME_SIZE    0x2600 
+#define SXE_MMW_SIZE_DEFAULT		0x4
+#define SXE_MMW_SIZE_JUMBO_FRAME	0x14
+#define SXE_MAX_JUMBO_FRAME_SIZE	0x2600
 
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
 static s32 sxe_vf_rss_rxq_num_validate(struct rte_eth_dev *dev, u16 rxq_num)
@@ -67,7 +67,7 @@ static s32 sxe_vf_rss_rxq_num_validate(struct rte_eth_dev *dev, u16 rxq_num)
 	RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
 		pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
 
-	PMD_LOG_INFO(INIT, "enable sriov, vfs num:%u, %u pool mode, %u queue pre pool"
+	PMD_LOG_INFO(INIT, "enable sriov, vfs num:%u, %u pool mode, %u queue pre pool "
 				"vm total queue num are %u",
 				pci_dev->max_vfs,
 				RTE_ETH_DEV_SRIOV(dev).active,
@@ -85,38 +85,38 @@ s32 sxe_sriov_mq_mode_check(struct rte_eth_dev *dev)
 	u16 tx_q_num = dev->data->nb_tx_queues;
 
 	switch (dev_conf->rxmode.mq_mode) {
-		case RTE_ETH_MQ_RX_VMDQ_DCB:
-			PMD_LOG_INFO(INIT, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in sriov");
-			break;
+	case RTE_ETH_MQ_RX_VMDQ_DCB:
+		PMD_LOG_INFO(INIT, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in sriov");
+		break;
+
+	case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
+		PMD_LOG_ERR(INIT, "RTE_ETH_MQ_RX_VMDQ_DCB_RSS mode unsupported in sriov");
+		ret = -EINVAL;
+		goto l_end;
 
-		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
-			PMD_LOG_ERR(INIT, "RTE_ETH_MQ_RX_VMDQ_DCB_RSS mode unsupported in sriov");
+	case RTE_ETH_MQ_RX_RSS:
+	case RTE_ETH_MQ_RX_VMDQ_RSS:
+		dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
+		if ((rx_q_num <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) &&
+				sxe_vf_rss_rxq_num_validate(dev, rx_q_num)) {
+			PMD_LOG_ERR(INIT, "sriov is active, invalid queue number[%d], "
+				" for vmdq rss, allowed value are 1, 2 or 4",
+					rx_q_num);
 			ret = -EINVAL;
 			goto l_end;
+		}
+		break;
 
-		case RTE_ETH_MQ_RX_RSS:
-		case RTE_ETH_MQ_RX_VMDQ_RSS:
-			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
-			if ((rx_q_num <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) && 
-					sxe_vf_rss_rxq_num_validate(dev, rx_q_num)) {
-				PMD_LOG_ERR(INIT, "sriov is active, invalid queue number[%d], "
-					" for vmdq rss, allowed value are 1, 2 or 4",
-						rx_q_num);
-				ret = -EINVAL;
-				goto l_end;
-			}
-			break;
-
-		case RTE_ETH_MQ_RX_VMDQ_ONLY:
-		case RTE_ETH_MQ_RX_NONE:
-			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
-			break;
+	case RTE_ETH_MQ_RX_VMDQ_ONLY:
+	case RTE_ETH_MQ_RX_NONE:
+		dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
+		break;
 
-		default:
-			PMD_LOG_ERR(INIT, "sriov is active, wrong mq_mode rx %d",
-					dev_conf->rxmode.mq_mode);
-			ret = -EINVAL;
-			goto l_end;
+	default:
+		PMD_LOG_ERR(INIT, "sriov is active, wrong mq_mode rx %d",
+				dev_conf->rxmode.mq_mode);
+		ret = -EINVAL;
+		goto l_end;
 	}
 
 	switch (dev_conf->txmode.mq_mode) {
@@ -129,7 +129,7 @@ s32 sxe_sriov_mq_mode_check(struct rte_eth_dev *dev)
 		ret = -EINVAL;
 		goto l_end;
 
-	default: 
+	default:
 		dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_ONLY;
 		break;
 	}
@@ -174,9 +174,9 @@ static inline s32 sxe_non_sriov_mq_mode_check(struct rte_eth_dev *dev)
 			goto l_end;
 		}
 
-		if (!((dev_conf->rx_adv_conf.vmdq_dcb_conf.nb_queue_pools == \
-			RTE_ETH_16_POOLS ) || (
-			dev_conf->rx_adv_conf.vmdq_dcb_conf.nb_queue_pools == \
+		if (!((dev_conf->rx_adv_conf.vmdq_dcb_conf.nb_queue_pools ==
+			RTE_ETH_16_POOLS) || (
+			dev_conf->rx_adv_conf.vmdq_dcb_conf.nb_queue_pools ==
 			RTE_ETH_32_POOLS))) {
 			PMD_LOG_ERR(INIT, "VMDQ+DCB selected,"
 					" nb_queue_pools must be %d or %d",
@@ -215,9 +215,9 @@ static inline s32 sxe_non_sriov_mq_mode_check(struct rte_eth_dev *dev)
 			goto l_end;
 		}
 
-		if (!((dev_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools == \
-			RTE_ETH_16_POOLS ) || (
-			dev_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools == \
+		if (!((dev_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+			RTE_ETH_16_POOLS) || (
+			dev_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
 			RTE_ETH_32_POOLS))) {
 			PMD_LOG_ERR(INIT, "VMDQ+DCB selected,"
 					" nb_queue_pools must be %d or %d",
@@ -275,7 +275,6 @@ void sxe_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
 {
 	__sxe_tx_queue_info_get(dev, queue_id, q_info);
 
-	return;
 }
 
 s32 __rte_cold sxe_txq_arg_validate(struct rte_eth_dev *dev, u16 ring_depth,
@@ -295,16 +294,15 @@ s32 __rte_cold sxe_txq_arg_validate(struct rte_eth_dev *dev, u16 ring_depth,
 	*rs_thresh = (DEFAULT_TX_RS_THRESH + *free_thresh > ring_depth) ?
 			ring_depth - *free_thresh : DEFAULT_TX_RS_THRESH;
 
-	if (tx_conf->tx_rs_thresh > 0) {
+	if (tx_conf->tx_rs_thresh > 0)
 		*rs_thresh = tx_conf->tx_rs_thresh;
-	}
 
 	if (*rs_thresh + *free_thresh > ring_depth) {
 		PMD_LOG_ERR(INIT, "tx_rs_thresh + tx_free_thresh must not "
-			     "exceed nb_desc. (tx_rs_thresh=%u "
-			     "tx_free_thresh=%u nb_desc=%u port = %d)",
-			     *rs_thresh, *free_thresh,
-			     ring_depth, dev->data->port_id);
+				 "exceed nb_desc. (tx_rs_thresh=%u "
+				 "tx_free_thresh=%u nb_desc=%u port = %d)",
+				 *rs_thresh, *free_thresh,
+				 ring_depth, dev->data->port_id);
 		goto l_end;
 	}
 
@@ -326,34 +324,34 @@ s32 __rte_cold sxe_txq_arg_validate(struct rte_eth_dev *dev, u16 ring_depth,
 
 	if (*free_thresh >= (ring_depth - 3)) {
 		PMD_LOG_ERR(INIT, "tx_rs_thresh must be less than the "
-			     "tx_free_thresh must be less than the number of "
-			     "TX descriptors minus 3. (tx_free_thresh=%u "
-			     "port=%d)",
-			     *free_thresh, dev->data->port_id);
+				 "tx_free_thresh must be less than the number of "
+				 "TX descriptors minus 3. (tx_free_thresh=%u "
+				 "port=%d)",
+				 *free_thresh, dev->data->port_id);
 		goto l_end;
 	}
 
 	if (*rs_thresh > *free_thresh) {
 		PMD_LOG_ERR(INIT, "tx_rs_thresh must be less than or equal to "
-			     "tx_free_thresh. (tx_free_thresh=%u "
-			     "tx_rs_thresh=%u port=%d)",
-			     *free_thresh, *rs_thresh, dev->data->port_id);
+				 "tx_free_thresh. (tx_free_thresh=%u "
+				 "tx_rs_thresh=%u port=%d)",
+				 *free_thresh, *rs_thresh, dev->data->port_id);
 		goto l_end;
 	}
 
 	if ((ring_depth % *rs_thresh) != 0) {
 		PMD_LOG_ERR(INIT, "tx_rs_thresh must be a divisor of the "
-			     "number of TX descriptors. (tx_rs_thresh=%u "
-			     "port=%d, ring_depth=%d)",
-			     *rs_thresh, dev->data->port_id, ring_depth);
+				 "number of TX descriptors. (tx_rs_thresh=%u "
+				 "port=%d, ring_depth=%d)",
+				 *rs_thresh, dev->data->port_id, ring_depth);
 		goto l_end;
 	}
 
 	if ((*rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
 		PMD_LOG_ERR(INIT, "TX WTHRESH must be set to 0 if "
-			     "tx_rs_thresh is greater than 1. "
-			     "(tx_rs_thresh=%u port=%d)",
-			     *rs_thresh, dev->data->port_id);
+				 "tx_rs_thresh is greater than 1. "
+				 "(tx_rs_thresh=%u port=%d)",
+				 *rs_thresh, dev->data->port_id);
 		goto l_end;
 	}
 
@@ -365,11 +363,9 @@ s32 __rte_cold sxe_txq_arg_validate(struct rte_eth_dev *dev, u16 ring_depth,
 
 static void __rte_cold sxe_tx_buffer_ring_free(sxe_tx_queue_s *txq)
 {
-	if (txq != NULL && txq->buffer_ring != NULL) {
+	if (txq != NULL && txq->buffer_ring != NULL)
 		rte_free(txq->buffer_ring);
-	}
 
-	return;
 }
 
 static void __rte_cold sxe_tx_queue_mbufs_release(sxe_tx_queue_s *txq)
@@ -385,28 +381,24 @@ static void __rte_cold sxe_tx_queue_mbufs_release(sxe_tx_queue_s *txq)
 		}
 	}
 
-	return;
 }
 
 void __rte_cold sxe_tx_queue_free(sxe_tx_queue_s *txq)
 {
 	__sxe_tx_queue_free(txq);
 
-	return;
 }
 
 #if defined DPDK_20_11_5 || defined DPDK_19_11_6
 void __rte_cold sxe_tx_queue_release(void *txq)
 {
 	sxe_tx_queue_free(txq);
-	return;
 }
 #else
 void __rte_cold sxe_tx_queue_release(struct rte_eth_dev *dev,
 					u16 queue_idx)
 {
 	sxe_tx_queue_free(dev->data->tx_queues[queue_idx]);
-	return;
 }
 #endif
 
@@ -414,34 +406,32 @@ static void __rte_cold sxe_tx_queue_init(sxe_tx_queue_s *txq)
 {
 	u16 prev, i;
 	volatile sxe_tx_data_desc_u *txd;
-	static const sxe_tx_data_desc_u zeroed_desc = {{0}};
+	static const sxe_tx_data_desc_u zeroed_desc = { {0} };
 	struct sxe_tx_buffer *tx_buffer = txq->buffer_ring;
 
-	for (i = 0; i < txq->ring_depth; i++) {
+	for (i = 0; i < txq->ring_depth; i++)
 		txq->desc_ring[i] = zeroed_desc;
-	}
 
 	prev = txq->ring_depth - 1;
 	for (i = 0; i < txq->ring_depth; i++) {
 		txd = &txq->desc_ring[i];
 		txd->wb.status = rte_cpu_to_le_32(SXE_TX_DESC_STAT_DD);
-		tx_buffer[i].mbuf       = NULL;
-		tx_buffer[i].last_id    = i;
+		tx_buffer[i].mbuf	   = NULL;
+		tx_buffer[i].last_id	= i;
 		tx_buffer[prev].next_id = i;
 		prev = i;
 	}
 
-	txq->ctx_curr      = 0;
+	txq->ctx_curr	  = 0;
 	txq->desc_used_num = 0;
 	txq->desc_free_num = txq->ring_depth - 1;
 	txq->next_to_use   = 0;
 	txq->next_to_clean = txq->ring_depth - 1;
-	txq->next_dd       = txq->rs_thresh  - 1;
-	txq->next_rs       = txq->rs_thresh  - 1;
+	txq->next_dd	   = txq->rs_thresh  - 1;
+	txq->next_rs	   = txq->rs_thresh  - 1;
 	memset((void *)&txq->ctx_cache, 0,
 			SXE_CTXT_DESC_NUM * sizeof(struct sxe_ctxt_info));
 
-	return;
 }
 
 sxe_tx_queue_s * __rte_cold sxe_tx_queue_alloc(
@@ -545,7 +535,6 @@ void sxe_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
 {
 	__sxe_rx_queue_info_get(dev, queue_id, qinfo);
 
-	return;
 }
 
 s32 __rte_cold sxe_rx_queue_mbufs_alloc(struct sxe_rx_queue *rxq)
@@ -557,7 +546,7 @@ s32 __rte_cold sxe_rx_queue_start(struct rte_eth_dev *dev,
 						u16 queue_id)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
-	struct sxe_hw      *hw = &adapter->hw;
+	struct sxe_hw	  *hw = &adapter->hw;
 	struct sxe_rx_queue *rxq;
 	u16 reg_idx;
 	s32 ret;
@@ -570,7 +559,7 @@ s32 __rte_cold sxe_rx_queue_start(struct rte_eth_dev *dev,
 	ret = sxe_rx_queue_mbufs_alloc(rxq);
 	if (ret) {
 		PMD_LOG_ERR(INIT, "could not alloc mbuf for queue:%d",
-			     queue_id);
+				 queue_id);
 		goto l_end;
 	}
 
@@ -583,7 +572,7 @@ s32 __rte_cold sxe_rx_queue_start(struct rte_eth_dev *dev,
 	return ret;
 }
 
-STATIC void __rte_cold sxe_rx_queue_sc_mbufs_free(struct rte_mbuf *mbuf)
+static void __rte_cold sxe_rx_queue_sc_mbufs_free(struct rte_mbuf *mbuf)
 {
 	u16 i;
 	u16 num_segs = mbuf->nb_segs;
@@ -595,17 +584,16 @@ STATIC void __rte_cold sxe_rx_queue_sc_mbufs_free(struct rte_mbuf *mbuf)
 		mbuf = next_seg;
 	}
 
-	return;
 }
 
 void __rte_cold sxe_rx_queue_mbufs_free(struct sxe_rx_queue *rxq)
 {
 	u16 i;
-	
+
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
 	if (rxq->is_using_sse) {
 		sxe_rx_queue_vec_mbufs_release(rxq);
-		goto l_out;
+		return;
 	}
 #endif
 
@@ -636,32 +624,24 @@ void __rte_cold sxe_rx_queue_mbufs_free(struct sxe_rx_queue *rxq)
 		}
 	}
 
-#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
-l_out:
-#endif
-
-	return;
 }
 
 void __rte_cold sxe_rx_queue_init(bool rx_batch_alloc_allowed,
 						struct sxe_rx_queue *rxq)
 {
-	static const sxe_rx_data_desc_u zeroed_desc = {{0}};
+	static const sxe_rx_data_desc_u zeroed_desc = { {0} };
 	u16 i;
 	u16 len = rxq->ring_depth;
 
-	if (rx_batch_alloc_allowed) {
+	if (rx_batch_alloc_allowed)
 		len += RTE_PMD_SXE_MAX_RX_BURST;
-	}
 
-	for (i = 0; i < len; i++) {
+	for (i = 0; i < len; i++)
 		rxq->desc_ring[i] = zeroed_desc;
-	}
 
 	memset(&rxq->fake_mbuf, 0, sizeof(rxq->fake_mbuf));
-	for (i = rxq->ring_depth; i < len; ++i) {
+	for (i = rxq->ring_depth; i < len; ++i)
 		rxq->buffer_ring[i].mbuf = &rxq->fake_mbuf;
-	}
 
 	rxq->completed_pkts_num = 0;
 	rxq->next_ret_pkg = 0;
@@ -670,9 +650,8 @@ void __rte_cold sxe_rx_queue_init(bool rx_batch_alloc_allowed,
 	rxq->hold_num = 0;
 
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
-	if (rxq->pkt_first_seg != NULL) {
+	if (rxq->pkt_first_seg != NULL)
 		rte_pktmbuf_free(rxq->pkt_first_seg);
-	}
 
 	rxq->pkt_first_seg = NULL;
 	rxq->pkt_last_seg = NULL;
@@ -683,34 +662,30 @@ void __rte_cold sxe_rx_queue_init(bool rx_batch_alloc_allowed,
 #endif
 #endif
 
-	return;
 }
 
 void __rte_cold sxe_rx_queue_free(struct sxe_rx_queue *rxq)
 {
 	__sxe_rx_queue_free(rxq);
-	return;
 }
 
 #if defined DPDK_20_11_5 || defined DPDK_19_11_6
 void __rte_cold sxe_rx_queue_release(void *rxq)
 {
 	sxe_rx_queue_free(rxq);
-	return;
 }
 #else
 void __rte_cold sxe_rx_queue_release(struct rte_eth_dev *dev,
 					u16 queue_idx)
 {
 	sxe_rx_queue_free(dev->data->rx_queues[queue_idx]);
-	return;
 }
 #endif
 
 s32 __rte_cold sxe_rx_queue_stop(struct rte_eth_dev *dev, u16 queue_id)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
-	struct sxe_hw     *hw = &adapter->hw;
+	struct sxe_hw	 *hw = &adapter->hw;
 	struct sxe_rx_queue *rxq;
 	u16 reg_idx;
 
@@ -745,7 +720,7 @@ u32 sxe_rx_queue_count(void *rx_queue)
 #else
 	rxq = rx_queue;
 #endif
-	
+
 	desc = &(rxq->desc_ring[rxq->processing_idx]);
 
 	while ((count < rxq->ring_depth) &&
@@ -766,18 +741,16 @@ void __rte_cold sxe_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_all
 {
 	__sxe_txrx_queues_clear(dev, rx_batch_alloc_allowed);
 
-	return;
 }
 
 void sxe_queues_free(struct rte_eth_dev *dev)
 {
 	__sxe_queues_free(dev);
-	return;
 }
 
 const struct sxe_txq_ops def_txq_ops = {
-	.init             = sxe_tx_queue_init,
-	.mbufs_release    = sxe_tx_queue_mbufs_release,
+	.init			 = sxe_tx_queue_init,
+	.mbufs_release	= sxe_tx_queue_mbufs_release,
 	.buffer_ring_free = sxe_tx_buffer_ring_free,
 };
 
@@ -791,19 +764,18 @@ void sxe_multi_queue_tx_configure(struct rte_eth_dev *dev)
 	struct sxe_hw *hw = (&((struct sxe_adapter *)(dev->data->dev_private))->hw);
 	u16 pools_num = RTE_ETH_DEV_SRIOV(dev).active;
 	bool sriov_active = !!pools_num;
-	bool vmdq_active = (dev->data->dev_conf.txmode.mq_mode == 
+	bool vmdq_active = (dev->data->dev_conf.txmode.mq_mode ==
 				RTE_ETH_MQ_TX_VMDQ_ONLY);
 
 	sxe_hw_tx_multi_queue_configure(hw, vmdq_active, sriov_active, pools_num);
 
-	return;
 }
 
 #if defined DPDK_20_11_5 || defined DPDK_21_11_5 || defined DPDK_19_11_6
-s32 sxe_queue_rate_limit_set(struct rte_eth_dev *dev, 
+s32 sxe_queue_rate_limit_set(struct rte_eth_dev *dev,
 					u16 queue_idx, u16 tx_rate)
 #else
-s32 sxe_queue_rate_limit_set(struct rte_eth_dev *dev, 
+s32 sxe_queue_rate_limit_set(struct rte_eth_dev *dev,
 					u16 queue_idx, u32 tx_rate)
 #endif
 {
@@ -838,11 +810,11 @@ s32 sxe_queue_rate_limit_set(struct rte_eth_dev *dev,
 	rxmode = &dev->data->dev_conf.rxmode;
 
 	if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
-	    (rxmode->max_rx_pkt_len >= SXE_MAX_JUMBO_FRAME_SIZE)) {
+		(rxmode->max_rx_pkt_len >= SXE_MAX_JUMBO_FRAME_SIZE)) {
 #else
 	if (dev->data->mtu + SXE_ETH_OVERHEAD >= SXE_MAX_JUMBO_FRAME_SIZE) {
 #endif
-		sxe_hw_dcb_max_mem_window_set(hw, 
+		sxe_hw_dcb_max_mem_window_set(hw,
 						SXE_MMW_SIZE_JUMBO_FRAME);
 	} else {
 		sxe_hw_dcb_max_mem_window_set(hw, SXE_MMW_SIZE_DEFAULT);
diff --git a/drivers/net/sxe/pf/sxe_queue.h b/drivers/net/sxe/pf/sxe_queue.h
index ef3036a07d..9f73a2ac3f 100644
--- a/drivers/net/sxe/pf/sxe_queue.h
+++ b/drivers/net/sxe/pf/sxe_queue.h
@@ -7,40 +7,40 @@
 #include "sxe_dpdk_version.h"
 #include "sxe_queue_common.h"
 
-#define SXE_TXRX_RING_NUM_MAX     64  
+#define SXE_TXRX_RING_NUM_MAX	 64
 
-#define SXE_TX_MAX_SEG          40
+#define SXE_TX_MAX_SEG		  40
 
 #define	SXE_MIN_RING_DESC	32
 #define	SXE_MAX_RING_DESC	4096
 
-#define SXE_MMW_SIZE_DEFAULT        0x4
-#define SXE_MMW_SIZE_JUMBO_FRAME    0x14
-#define SXE_MAX_JUMBO_FRAME_SIZE    0x2600 
+#define SXE_MMW_SIZE_DEFAULT		0x4
+#define SXE_MMW_SIZE_JUMBO_FRAME	0x14
+#define SXE_MAX_JUMBO_FRAME_SIZE	0x2600
 
 #define SXE_DEFAULT_RX_FREE_THRESH  32
-#define SXE_DEFAULT_RX_PTHRESH      8
-#define SXE_DEFAULT_RX_HTHRESH      8
-#define SXE_DEFAULT_RX_WTHRESH      0
+#define SXE_DEFAULT_RX_PTHRESH	  8
+#define SXE_DEFAULT_RX_HTHRESH	  8
+#define SXE_DEFAULT_RX_WTHRESH	  0
 
 #define SXE_DEFAULT_TX_FREE_THRESH  32
-#define SXE_DEFAULT_TX_PTHRESH      32
-#define SXE_DEFAULT_TX_HTHRESH      0
-#define SXE_DEFAULT_TX_WTHRESH      0
+#define SXE_DEFAULT_TX_PTHRESH	  32
+#define SXE_DEFAULT_TX_HTHRESH	  0
+#define SXE_DEFAULT_TX_WTHRESH	  0
 #define SXE_DEFAULT_TX_RSBIT_THRESH 32
 
-#define SXE_ALIGN               128
+#define SXE_ALIGN			   128
 #define SXE_RX_DESC_RING_ALIGN	(SXE_ALIGN / sizeof(sxe_rx_data_desc_u))
 #define SXE_TX_DESC_RING_ALIGN	(SXE_ALIGN / sizeof(sxe_tx_data_desc_u))
 
-#define SXE_TX_MAX_SEG          40
+#define SXE_TX_MAX_SEG		  40
 #define RTE_SXE_REGISTER_POLL_WAIT_10_MS  10
 
 typedef union sxe_tx_data_desc sxe_tx_data_desc_u;
 typedef struct sxe_rx_buffer   sxe_rx_buffer_s;
 typedef union sxe_rx_data_desc sxe_rx_data_desc_u;
-typedef struct sxe_tx_queue    sxe_tx_queue_s;
-typedef struct sxe_rx_queue    sxe_rx_queue_s;
+typedef struct sxe_tx_queue	sxe_tx_queue_s;
+typedef struct sxe_rx_queue	sxe_rx_queue_s;
 
 struct sxe_tx_context_desc {
 	__le32 vlan_macip_lens;
@@ -127,7 +127,7 @@ s32 sxe_queue_rate_limit_set(struct rte_eth_dev *dev,
 					u16 queue_idx, u16 tx_rate);
 
 #else
-s32 sxe_queue_rate_limit_set(struct rte_eth_dev *dev, 
+s32 sxe_queue_rate_limit_set(struct rte_eth_dev *dev,
 					u16 queue_idx, u32 tx_rate);
 #endif
 
@@ -144,4 +144,4 @@ s32 sxe_sriov_mq_mode_check(struct rte_eth_dev *dev);
 
 void __rte_cold sxe_rx_queue_mbufs_free(sxe_rx_queue_s *rxq);
 
-#endif 
+#endif
diff --git a/drivers/net/sxe/pf/sxe_rx.c b/drivers/net/sxe/pf/sxe_rx.c
index febd9fc634..e0074021b4 100644
--- a/drivers/net/sxe/pf/sxe_rx.c
+++ b/drivers/net/sxe/pf/sxe_rx.c
@@ -68,51 +68,51 @@
 #define SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP		0x2F
 #define SXE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP		0x4F
 
-#define SXE_PACKET_TYPE_NVGRE                   0x00
-#define SXE_PACKET_TYPE_NVGRE_IPV4              0x01
-#define SXE_PACKET_TYPE_NVGRE_IPV4_TCP          0x11
-#define SXE_PACKET_TYPE_NVGRE_IPV4_UDP          0x21
-#define SXE_PACKET_TYPE_NVGRE_IPV4_SCTP         0x41
-#define SXE_PACKET_TYPE_NVGRE_IPV4_EXT          0x03
-#define SXE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP      0x13
-#define SXE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP      0x23
-#define SXE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP     0x43
-#define SXE_PACKET_TYPE_NVGRE_IPV6              0x04
-#define SXE_PACKET_TYPE_NVGRE_IPV6_TCP          0x14
-#define SXE_PACKET_TYPE_NVGRE_IPV6_UDP          0x24
-#define SXE_PACKET_TYPE_NVGRE_IPV6_SCTP         0x44
-#define SXE_PACKET_TYPE_NVGRE_IPV6_EXT          0x0C
-#define SXE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP      0x1C
-#define SXE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP      0x2C
-#define SXE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP     0x4C
-#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6         0x05
-#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP     0x15
-#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP     0x25
-#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT     0x0D
+#define SXE_PACKET_TYPE_NVGRE				   0x00
+#define SXE_PACKET_TYPE_NVGRE_IPV4			  0x01
+#define SXE_PACKET_TYPE_NVGRE_IPV4_TCP		  0x11
+#define SXE_PACKET_TYPE_NVGRE_IPV4_UDP		  0x21
+#define SXE_PACKET_TYPE_NVGRE_IPV4_SCTP		 0x41
+#define SXE_PACKET_TYPE_NVGRE_IPV4_EXT		  0x03
+#define SXE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP	  0x13
+#define SXE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP	  0x23
+#define SXE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP	 0x43
+#define SXE_PACKET_TYPE_NVGRE_IPV6			  0x04
+#define SXE_PACKET_TYPE_NVGRE_IPV6_TCP		  0x14
+#define SXE_PACKET_TYPE_NVGRE_IPV6_UDP		  0x24
+#define SXE_PACKET_TYPE_NVGRE_IPV6_SCTP		 0x44
+#define SXE_PACKET_TYPE_NVGRE_IPV6_EXT		  0x0C
+#define SXE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP	  0x1C
+#define SXE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP	  0x2C
+#define SXE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP	 0x4C
+#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6		 0x05
+#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP	 0x15
+#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP	 0x25
+#define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT	 0x0D
 #define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0x1D
 #define SXE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0x2D
 
-#define SXE_PACKET_TYPE_VXLAN                   0x80
-#define SXE_PACKET_TYPE_VXLAN_IPV4              0x81
-#define SXE_PACKET_TYPE_VXLAN_IPV4_TCP          0x91
-#define SXE_PACKET_TYPE_VXLAN_IPV4_UDP          0xA1
-#define SXE_PACKET_TYPE_VXLAN_IPV4_SCTP         0xC1
-#define SXE_PACKET_TYPE_VXLAN_IPV4_EXT          0x83
-#define SXE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP      0x93
-#define SXE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP      0xA3
-#define SXE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP     0xC3
-#define SXE_PACKET_TYPE_VXLAN_IPV6              0x84
-#define SXE_PACKET_TYPE_VXLAN_IPV6_TCP          0x94
-#define SXE_PACKET_TYPE_VXLAN_IPV6_UDP          0xA4
-#define SXE_PACKET_TYPE_VXLAN_IPV6_SCTP         0xC4
-#define SXE_PACKET_TYPE_VXLAN_IPV6_EXT          0x8C
-#define SXE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP      0x9C
-#define SXE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP      0xAC
-#define SXE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP     0xCC
-#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6         0x85
-#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP     0x95
-#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP     0xA5
-#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT     0x8D
+#define SXE_PACKET_TYPE_VXLAN				   0x80
+#define SXE_PACKET_TYPE_VXLAN_IPV4			  0x81
+#define SXE_PACKET_TYPE_VXLAN_IPV4_TCP		  0x91
+#define SXE_PACKET_TYPE_VXLAN_IPV4_UDP		  0xA1
+#define SXE_PACKET_TYPE_VXLAN_IPV4_SCTP		 0xC1
+#define SXE_PACKET_TYPE_VXLAN_IPV4_EXT		  0x83
+#define SXE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP	  0x93
+#define SXE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP	  0xA3
+#define SXE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP	 0xC3
+#define SXE_PACKET_TYPE_VXLAN_IPV6			  0x84
+#define SXE_PACKET_TYPE_VXLAN_IPV6_TCP		  0x94
+#define SXE_PACKET_TYPE_VXLAN_IPV6_UDP		  0xA4
+#define SXE_PACKET_TYPE_VXLAN_IPV6_SCTP		 0xC4
+#define SXE_PACKET_TYPE_VXLAN_IPV6_EXT		  0x8C
+#define SXE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP	  0x9C
+#define SXE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP	  0xAC
+#define SXE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP	 0xCC
+#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6		 0x85
+#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP	 0x95
+#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP	 0xA5
+#define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT	 0x8D
 #define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0x9D
 #define SXE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0xAD
 
@@ -426,7 +426,6 @@ void sxe_rx_mbuf_common_header_fill(
 				desc.wb.lower.hi_dword.csum_ip.ip_id);
 	}
 
-	return;
 }
 
 static inline void sxe_rx_resource_prefetch(u16 next_idx,
@@ -441,7 +440,6 @@ static inline void sxe_rx_resource_prefetch(u16 next_idx,
 		rte_sxe_prefetch(&buf_ring[next_idx]);
 	}
 
-	return;
 }
 
 u16 sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -459,9 +457,8 @@ static inline u16 sxe_ret_pkts_to_user(sxe_rx_queue_s *rxq,
 
 	pkts_num = (u16)RTE_MIN(pkts_num, rxq->completed_pkts_num);
 
-	for (i = 0; i < pkts_num; ++i) {
+	for (i = 0; i < pkts_num; ++i)
 		rx_pkts[i] = completed_mbuf[i];
-	}
 
 	/* Update completed packets num and next available position */
 	rxq->completed_pkts_num = (u16)(rxq->completed_pkts_num - pkts_num);
@@ -494,9 +491,8 @@ static inline u16 sxe_rx_hw_ring_scan(sxe_rx_queue_s *rxq)
 
 	status = rx_desc->wb.upper.status_error;
 
-	if (!(status & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD))) {
+	if (!(status & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD)))
 		goto l_end;
-	}
 
 	for (i = 0; i < RTE_PMD_SXE_MAX_RX_BURST;
 		i += LOOK_AHEAD, rx_desc += LOOK_AHEAD, rx_buf += LOOK_AHEAD) {
@@ -531,24 +527,21 @@ static inline u16 sxe_rx_hw_ring_scan(sxe_rx_queue_s *rxq)
 						pkt_info[j], status_arr[j]);
 		}
 
-		for (j = 0; j < LOOK_AHEAD; ++j) {
+		for (j = 0; j < LOOK_AHEAD; ++j)
 			rxq->completed_ring[i + j] = rx_buf[j].mbuf;
-		}
 
-		if (num_dd_set != LOOK_AHEAD) {
+		if (num_dd_set != LOOK_AHEAD)
 			break;
-		}
 	}
 
-	for (i = 0; i < done_num; ++i) {
+	for (i = 0; i < done_num; ++i)
 		rxq->buffer_ring[rxq->processing_idx + i].mbuf = NULL;
-	}
 
 l_end:
 	return done_num;
 }
 
-STATIC inline s32 sxe_rx_bufs_batch_alloc(sxe_rx_queue_s *rxq,
+static inline s32 sxe_rx_bufs_batch_alloc(sxe_rx_queue_s *rxq,
 							bool reset_mbuf)
 {
 	volatile union sxe_rx_data_desc *desc_ring;
@@ -568,7 +561,7 @@ STATIC inline s32 sxe_rx_bufs_batch_alloc(sxe_rx_queue_s *rxq,
 			rxq->batch_alloc_trigger, rxq->batch_alloc_size);
 
 	diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)buf_ring,
-				    rxq->batch_alloc_size);
+					rxq->batch_alloc_size);
 	if (unlikely(diag != 0)) {
 		LOG_DEBUG("port_id=%u, rxq=%u buffer alloc failed\n",
 				rxq->port_id, rxq->queue_id);
@@ -579,9 +572,8 @@ STATIC inline s32 sxe_rx_bufs_batch_alloc(sxe_rx_queue_s *rxq,
 	desc_ring = &rxq->desc_ring[alloc_idx];
 	for (i = 0; i < rxq->batch_alloc_size; ++i) {
 		mbuf = buf_ring[i].mbuf;
-		if (reset_mbuf) {
+		if (reset_mbuf)
 			mbuf->port = rxq->port_id;
-		}
 
 		rte_mbuf_refcnt_set(mbuf, 1);
 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
@@ -592,9 +584,8 @@ STATIC inline s32 sxe_rx_bufs_batch_alloc(sxe_rx_queue_s *rxq,
 	}
 
 	rxq->batch_alloc_trigger = rxq->batch_alloc_trigger + rxq->batch_alloc_size;
-	if (rxq->batch_alloc_trigger >= rxq->ring_depth) {
+	if (rxq->batch_alloc_trigger >= rxq->ring_depth)
 		rxq->batch_alloc_trigger = rxq->batch_alloc_size - 1;
-	}
 
 l_end:
 	return ret;
@@ -629,7 +620,7 @@ static inline u16 sxe_burst_pkts_recv(void *rx_queue,
 			u32 i, j;
 
 			LOG_ERROR("rx mbuf alloc failed port_id=%u "
-					"queue_id=%u", (unsigned) rxq->port_id,
+					"queue_id=%u", (unsigned int) rxq->port_id,
 					(u16)rxq->queue_id);
 
 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
@@ -637,9 +628,8 @@ static inline u16 sxe_burst_pkts_recv(void *rx_queue,
 
 			rxq->completed_pkts_num = 0;
 			rxq->processing_idx = (u16)(rxq->processing_idx - done_num);
-			for (i = 0, j = rxq->processing_idx; i < done_num; ++i, ++j) {
+			for (i = 0, j = rxq->processing_idx; i < done_num; ++i, ++j)
 				rxq->buffer_ring[j].mbuf = rxq->completed_ring[i];
-			}
 
 			done_num = 0;
 			goto l_end;
@@ -649,9 +639,8 @@ static inline u16 sxe_burst_pkts_recv(void *rx_queue,
 		SXE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr, alloced_idx);
 	}
 
-	if (rxq->processing_idx >= rxq->ring_depth) {
+	if (rxq->processing_idx >= rxq->ring_depth)
 		rxq->processing_idx = 0;
-	}
 
 	if (rxq->completed_pkts_num) {
 		done_num = sxe_ret_pkts_to_user(rxq, rx_pkts, pkts_num);
@@ -690,9 +679,8 @@ u16 sxe_batch_alloc_pkts_recv(void *rx_queue,
 		ret = sxe_burst_pkts_recv(rx_queue, &rx_pkts[done_num], n);
 		done_num = (u16)(done_num + ret);
 		pkts_num = (u16)(pkts_num - ret);
-		if (ret < n) {
+		if (ret < n)
 			break;
-		}
 	}
 
 l_end:
@@ -714,8 +702,7 @@ static inline s32 sxe_lro_new_mbufs_alloc(sxe_rx_queue_s *rxq,
 				"port_id=%u queue_id=%u",
 				rxq->port_id, rxq->queue_id);
 
-			rte_eth_devices[rxq->port_id].data->
-						rx_mbuf_alloc_failed++;
+			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
 			ret = -ENOMEM;
 			goto l_end;
 		}
@@ -736,8 +723,7 @@ static inline s32 sxe_lro_new_mbufs_alloc(sxe_rx_queue_s *rxq,
 					"port_id=%u queue_id=%u",
 					rxq->port_id, rxq->queue_id);
 
-			rte_eth_devices[rxq->port_id].data->
-						rx_mbuf_alloc_failed++;
+			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
 			ret = -ENOMEM;
 			goto l_end;
 		}
@@ -764,7 +750,6 @@ static inline void sxe_rx_resource_update(sxe_rx_buffer_s *rx_buf,
 		rx_buf->mbuf = NULL;
 	}
 
-	return;
 }
 
 static inline u16 sxe_rx_next_idx_get(union sxe_rx_data_desc *desc,
@@ -797,7 +782,6 @@ static inline void sxe_lro_first_seg_update(struct rte_mbuf **first_seg,
 		(*first_seg)->pkt_len += data_len;
 		(*first_seg)->nb_segs++;
 	}
-	return;
 }
 
 static inline void sxe_mbuf_fields_process(struct rte_mbuf *first_seg,
@@ -816,9 +800,8 @@ static inline void sxe_mbuf_fields_process(struct rte_mbuf *first_seg,
 	if (unlikely(cur_mbuf->data_len <= rxq->crc_len)) {
 		struct rte_mbuf *lp;
 
-		for (lp = first_seg; lp->next != cur_mbuf; lp = lp->next) {
+		for (lp = first_seg; lp->next != cur_mbuf; lp = lp->next)
 			;
-		}
 
 		first_seg->nb_segs--;
 		lp->data_len -= rxq->crc_len - cur_mbuf->data_len;
@@ -829,7 +812,6 @@ static inline void sxe_mbuf_fields_process(struct rte_mbuf *first_seg,
 	}
 
 	rte_packet_prefetch((u8 *)first_seg->buf_addr + first_seg->data_off);
-	return;
 }
 
 static inline u16 sxe_lro_pkts_recv(void *rx_queue,
@@ -843,7 +825,7 @@ static inline u16 sxe_lro_pkts_recv(void *rx_queue,
 	u16 cur_idx = rxq->processing_idx;
 	u16 done_num = 0;
 	u16 hold_num = rxq->hold_num;
-	u16 prev_idx = rxq->processing_idx; 
+	u16 prev_idx = rxq->processing_idx;
 	s32 err;
 
 	while (done_num < pkts_num) {
@@ -865,9 +847,8 @@ static inline u16 sxe_lro_pkts_recv(void *rx_queue,
 		cur_desc = &desc_ring[cur_idx];
 		staterr = rte_le_to_cpu_32(cur_desc->wb.upper.status_error);
 
-		if (!(staterr & SXE_RXDADV_STAT_DD)) {
+		if (!(staterr & SXE_RXDADV_STAT_DD))
 			break;
-		}
 
 		__atomic_thread_fence(__ATOMIC_ACQUIRE);
 
@@ -890,9 +871,8 @@ static inline u16 sxe_lro_pkts_recv(void *rx_queue,
 		is_eop = !!(staterr & SXE_RXDADV_STAT_EOP);
 
 		next_idx = cur_idx + 1;
-		if (next_idx == rxq->ring_depth) {
+		if (next_idx == rxq->ring_depth)
 			next_idx = 0;
-		}
 
 		sxe_rx_resource_prefetch(next_idx, buf_ring, desc_ring);
 
@@ -962,10 +942,10 @@ u16 sxe_single_alloc_lro_pkts_recv(void *rx_queue,
 	return sxe_lro_pkts_recv(rx_queue, rx_pkts, pkts_num, false);
 }
 
-void __rte_cold sxe_rx_function_set(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
+void __rte_cold sxe_rx_function_set(struct rte_eth_dev *dev,
+	bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
 {
 	__sxe_rx_function_set(dev, rx_batch_alloc_allowed, rx_vec_allowed);
-	return;
 }
 
 #ifdef ETH_DEV_RX_DESC_DONE
@@ -985,9 +965,8 @@ s32 sxe_rx_descriptor_done(void *rx_queue, u16 offset)
 	}
 
 	index = rxq->processing_idx + offset;
-	if (index >= rxq->ring_depth) {
+	if (index >= rxq->ring_depth)
 		index -= rxq->ring_depth;
-	}
 
 	desc = &rxq->desc_ring[index];
 	is_done = !!(desc->wb.upper.status_error &
@@ -1028,17 +1007,15 @@ s32 sxe_rx_descriptor_status(void *rx_queue, u16 offset)
 	}
 
 	desc = rxq->processing_idx + offset;
-	if (desc >= rxq->ring_depth) {
+	if (desc >= rxq->ring_depth)
 		desc -= rxq->ring_depth;
-	}
 
 	status = &rxq->desc_ring[desc].wb.upper.status_error;
-	if (*status & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD)) {
+	if (*status & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD))
 		ret =  RTE_ETH_RX_DESC_DONE;
-	}
 
 l_end:
-	LOG_DEBUG("rx queue[%u] get desc status=%d\n",rxq->queue_id, ret);
+	LOG_DEBUG("rx queue[%u] get desc status=%d\n", rxq->queue_id, ret);
 	return ret;
 }
 
@@ -1049,7 +1026,7 @@ s32 __rte_cold sxe_rx_queue_setup(struct rte_eth_dev *dev,
 			 struct rte_mempool *mp)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
-	struct sxe_hw     *hw = &adapter->hw;
+	struct sxe_hw	 *hw = &adapter->hw;
 	struct rx_setup rx_setup = { 0 };
 	s32 ret;
 
@@ -1065,9 +1042,8 @@ s32 __rte_cold sxe_rx_queue_setup(struct rte_eth_dev *dev,
 	rx_setup.rx_batch_alloc_allowed = &adapter->rx_batch_alloc_allowed;
 
 	ret = __sxe_rx_queue_setup(&rx_setup, false);
-	if (ret) {
+	if (ret)
 		LOG_ERROR_BDF("rx queue setup fail.(err:%d)", ret);
-	}
 
 	return ret;
 }
@@ -1083,7 +1059,6 @@ static void sxe_rx_mode_configure(struct sxe_hw *hw)
 	flt_ctrl |= SXE_FCTRL_PMCF;
 	LOG_DEBUG("write flt_ctrl=0x%x", flt_ctrl);
 	sxe_hw_rx_mode_set(hw, flt_ctrl);
-	return;
 }
 
 static inline void
@@ -1096,63 +1071,52 @@ static inline void
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
 
-		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
 			rxq->crc_len = RTE_ETHER_CRC_LEN;
-		} else {
+		else
 			rxq->crc_len = 0;
-		}
 
-		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+			rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+		}
 	}
 
-	return;
 }
 
 static inline void
 	sxe_rx_offload_configure(struct rte_eth_dev *dev)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
-	struct sxe_hw     *hw = &adapter->hw;
+	struct sxe_hw	 *hw = &adapter->hw;
 	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
-	bool crc_strp_on;
 	bool ip_csum_offload;
 
-	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
-		crc_strp_on = false;
-	} else {
-		crc_strp_on = true;
-	}
-	sxe_hw_rx_dma_ctrl_init(hw, crc_strp_on);
+	sxe_hw_rx_dma_ctrl_init(hw);
 
 #if defined DPDK_20_11_5 || defined DPDK_19_11_6
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 		adapter->mtu = rx_conf->max_rx_pkt_len - SXE_ETH_OVERHEAD;
-	}
 #else
-	if (dev->data->mtu > RTE_ETHER_MTU) {
+	if (dev->data->mtu > RTE_ETHER_MTU)
 		adapter->mtu = dev->data->mtu;
-	}
 #endif
 
-	rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+	rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
-	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
-	}
 
 	sxe_hw_rx_udp_frag_checksum_disable(hw);
 
-	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
 		ip_csum_offload = true;
-	} else {
+	else
 		ip_csum_offload = false;
-	}
 
 	sxe_hw_rx_ip_checksum_offload_switch(hw, ip_csum_offload);
 
 	sxe_rx_queue_offload_configure(dev);
 
-	return;
 }
 
 static inline void sxe_rx_queue_attr_configure(
@@ -1160,7 +1124,7 @@ static inline void sxe_rx_queue_attr_configure(
 					sxe_rx_queue_s *queue)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
-	struct sxe_hw     *hw = &adapter->hw;
+	struct sxe_hw	 *hw = &adapter->hw;
 	u32 srrctl_size;
 	u64 desc_dma_addr;
 	u32 desc_mem_len;
@@ -1182,9 +1146,8 @@ static inline void sxe_rx_queue_attr_configure(
 	sxe_hw_rx_rcv_ctl_configure(hw, reg_idx,
 			SXE_LRO_HDR_SIZE, buf_size);
 
-	if (queue->drop_en) {
+	if (queue->drop_en)
 		sxe_hw_rx_drop_switch(hw, reg_idx, true);
-	}
 
 	sxe_hw_rx_desc_thresh_set(hw, reg_idx);
 
@@ -1194,12 +1157,10 @@ static inline void sxe_rx_queue_attr_configure(
 	buf_size = (u16) ((srrctl_size & SXE_SRRCTL_BSIZEPKT_MASK) <<
 				SXE_SRRCTL_BSIZEPKT_SHIFT);
 
-	if (frame_size + 2 * SXE_VLAN_TAG_SIZE > buf_size) {
+	if (frame_size + 2 * SXE_VLAN_TAG_SIZE > buf_size)
 		dev->data->scattered_rx = 1;
-	}
 
 	sxe_hw_rx_ring_switch(hw, reg_idx, true);
-	return;
 }
 
 static inline void sxe_rx_queue_configure(struct rte_eth_dev *dev)
@@ -1207,10 +1168,9 @@ static inline void sxe_rx_queue_configure(struct rte_eth_dev *dev)
 	u16 i;
 	sxe_rx_queue_s **queue = (sxe_rx_queue_s **)dev->data->rx_queues;
 
-	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		sxe_rx_queue_attr_configure(dev, queue[i]);
-	}
-	return;
+
 }
 
 static u32 sxe_lro_max_desc_get(struct rte_mempool *pool)
@@ -1221,15 +1181,14 @@ static u32 sxe_lro_max_desc_get(struct rte_mempool *pool)
 	u16 maxdesc = RTE_IPV4_MAX_PKT_LEN /
 			(mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
 
-	if (maxdesc >= 16) {
+	if (maxdesc >= 16)
 		desc_num = SXE_LROCTL_MAXDESC_16;
-	} else if (maxdesc >= 8) {
+	else if (maxdesc >= 8)
 		desc_num = SXE_LROCTL_MAXDESC_8;
-	} else if (maxdesc >= 4) {
+	else if (maxdesc >= 4)
 		desc_num = SXE_LROCTL_MAXDESC_4;
-	} else {
+	else
 		desc_num = SXE_LROCTL_MAXDESC_1;
-	}
 
 	return desc_num;
 }
@@ -1250,9 +1209,8 @@ static s32 sxe_lro_sanity_check(struct rte_eth_dev *dev, bool *lro_capable)
 	}
 
 	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
+	if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		*lro_capable = true;
-	}
 
 	if (!(*lro_capable) && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
 		PMD_LOG_CRIT(INIT, "lro is requested on HW that doesn't "
@@ -1274,18 +1232,15 @@ static void sxe_lro_hw_configure(struct sxe_hw *hw, bool lro_capable,
 
 	sxe_hw_rx_dma_lro_ctrl_set(hw);
 
-	if ((lro_capable) && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
+	if ((lro_capable) && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
 		is_enable = true;
-	} else {
+	else
 		is_enable = false;
-	}
 
-	if (is_enable) {
+	if (is_enable)
 		sxe_hw_rx_nfs_filter_disable(hw);
-	}
 
 	sxe_hw_rx_lro_enable(hw, is_enable);
-	return;
 }
 
 static void sxe_lro_irq_configure(struct sxe_hw *hw, u16 reg_idx,
@@ -1293,12 +1248,11 @@ static void sxe_lro_irq_configure(struct sxe_hw *hw, u16 reg_idx,
 {
 	u32 irq_interval;
 
-	irq_interval = SXE_EITR_INTERVAL_US(SXE_QUEUE_ITR_INTERVAL_DEFAULT);
+	irq_interval = SXE_EITR_INTERVAL_US(SXE_QUEUE_ITR_INTERVAL);
 	sxe_hw_ring_irq_interval_set(hw, reg_idx, irq_interval);
 
 	sxe_hw_ring_irq_map(hw, false, reg_idx, irq_idx);
 
-	return;
 }
 
 static void sxe_lro_hw_queue_configure(struct rte_eth_dev *dev,
@@ -1317,13 +1271,12 @@ static void sxe_lro_hw_queue_configure(struct rte_eth_dev *dev,
 		sxe_lro_irq_configure(hw, reg_idx, i);
 	}
 
-	return;
 }
 
 static s32 sxe_lro_configure(struct rte_eth_dev *dev)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
-	struct sxe_hw     *hw = &adapter->hw;
+	struct sxe_hw	 *hw = &adapter->hw;
 	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
 	bool lro_capable = false;
 
@@ -1365,7 +1318,7 @@ static s32 __rte_cold sxe_rx_start(struct rte_eth_dev *dev)
 		if (!rxq->deferred_start) {
 			ret = sxe_rx_queue_start(dev, i);
 			if (ret < 0) {
-				PMD_LOG_ERR(INIT, "rx queue[%u] start failed",i);
+				PMD_LOG_ERR(INIT, "rx queue[%u] start failed", i);
 				goto l_end;
 			}
 		}
@@ -1378,7 +1331,7 @@ static s32 __rte_cold sxe_rx_start(struct rte_eth_dev *dev)
 s32 __rte_cold sxe_rx_configure(struct rte_eth_dev *dev)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
-	struct sxe_hw     *hw = &adapter->hw;
+	struct sxe_hw	 *hw = &adapter->hw;
 	s32 ret;
 
 	PMD_INIT_FUNC_TRACE();
@@ -1402,7 +1355,8 @@ s32 __rte_cold sxe_rx_configure(struct rte_eth_dev *dev)
 	}
 
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
-	sxe_rx_function_set(dev, adapter->rx_batch_alloc_allowed, &adapter->rx_vec_allowed);
+	sxe_rx_function_set(dev, adapter->rx_batch_alloc_allowed,
+			&adapter->rx_vec_allowed);
 #else
 	sxe_rx_function_set(dev, adapter->rx_batch_alloc_allowed, NULL);
 #endif
@@ -1419,34 +1373,28 @@ s32 __rte_cold sxe_rx_configure(struct rte_eth_dev *dev)
 
 static void sxe_vmdq_rx_mode_get(u32 rx_mask, u32 *orig_val)
 {
-	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) {
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
 		*orig_val |= SXE_VMOLR_AUPE;
-	}
 
-	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC) {
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
 		*orig_val |= SXE_VMOLR_ROMPE;
-	}
 
-	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC) {
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
 		*orig_val |= SXE_VMOLR_ROPE;
-	}
 
-	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST) {
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
 		*orig_val |= SXE_VMOLR_BAM;
-	}
 
-	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST) {
+	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
 		*orig_val |= SXE_VMOLR_MPE;
-	}
 
-	return;
 }
 
 static void sxe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 {
 	struct rte_eth_vmdq_rx_conf *cfg;
 	struct sxe_adapter *adapter = dev->data->dev_private;
-	struct sxe_hw     *hw = &adapter->hw;
+	struct sxe_hw	 *hw = &adapter->hw;
 	enum rte_eth_nb_pools pools_num;
 	u32 rx_mode = 0;
 	u16 i;
@@ -1472,11 +1420,9 @@ static void sxe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 						cfg->pool_map[i].pools);
 	}
 
-	if (cfg->enable_loop_back) {
+	if (cfg->enable_loop_back)
 		sxe_hw_vmdq_loopback_configure(hw);
-	}
 
-	return;
 }
 
 s32 sxe_rx_features_configure(struct rte_eth_dev *dev)
@@ -1530,8 +1476,8 @@ s32 sxe_rx_features_configure(struct rte_eth_dev *dev)
 	}
 
 	LOG_INFO("pool num:%u rx mq_mode:0x%x configure result:%d.",
-		     RTE_ETH_DEV_SRIOV(dev).active,
-		     dev->data->dev_conf.rxmode.mq_mode, ret);
+			 RTE_ETH_DEV_SRIOV(dev).active,
+			 dev->data->dev_conf.rxmode.mq_mode, ret);
 
 	return ret;
 }
diff --git a/drivers/net/sxe/pf/sxe_rx.h b/drivers/net/sxe/pf/sxe_rx.h
index 7322a54a2c..19854d4cf4 100644
--- a/drivers/net/sxe/pf/sxe_rx.h
+++ b/drivers/net/sxe/pf/sxe_rx.h
@@ -14,10 +14,10 @@
 #define SXE_RXDADV_ERR_CKSUM_BIT  30
 #define SXE_RXDADV_ERR_CKSUM_MSK  3
 
-#define SXE_PACKET_TYPE_MAX               0X80
-#define SXE_PACKET_TYPE_TN_MAX            0X100
-#define SXE_PACKET_TYPE_MASK              0X7F
-#define SXE_RXD_STAT_TMST                 0x10000   
+#define SXE_PACKET_TYPE_MAX			   0X80
+#define SXE_PACKET_TYPE_TN_MAX			0X100
+#define SXE_PACKET_TYPE_MASK			  0X7F
+#define SXE_RXD_STAT_TMST				 0x10000
 
 #define SXE_DESCS_PER_LOOP 4
 
@@ -40,9 +40,8 @@ static inline u64 sxe_rx_desc_status_to_pkt_flags(u32 rx_status,
 	pkt_flags = (rx_status & SXE_RXD_STAT_VP) ?  vlan_flags : 0;
 
 #ifdef RTE_LIBRTE_IEEE1588
-	if (rx_status & SXE_RXD_STAT_TMST) {
+	if (rx_status & SXE_RXD_STAT_TMST)
 		pkt_flags = pkt_flags | RTE_MBUF_F_RX_IEEE1588_TMST;
-	}
 #endif
 	return pkt_flags;
 }
@@ -62,7 +61,7 @@ static inline u64 sxe_rx_desc_error_to_pkt_flags(u32 rx_status)
 		SXE_RXDADV_ERR_CKSUM_BIT) & SXE_RXDADV_ERR_CKSUM_MSK];
 
 	if ((rx_status & SXE_RXD_STAT_OUTERIPCS) &&
-	    (rx_status & SXE_RXDADV_ERR_OUTERIPER)) {
+		(rx_status & SXE_RXDADV_ERR_OUTERIPER)) {
 		pkt_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
 	}
 
@@ -101,9 +100,8 @@ static inline u32 sxe_rxd_pkt_info_to_pkt_type(u32 pkt_info,
 							u16 ptype_mask)
 {
 
-	if (unlikely(pkt_info & SXE_RXDADV_PKTTYPE_ETQF)) {
+	if (unlikely(pkt_info & SXE_RXDADV_PKTTYPE_ETQF))
 		return RTE_PTYPE_UNKNOWN;
-	}
 
 	pkt_info = (pkt_info >> SXE_RXDADV_PKTTYPE_ETQF_SHIFT) & ptype_mask;
 
@@ -126,21 +124,21 @@ static inline bool __rte_cold
 
 	if (!(rxq->batch_alloc_size >= RTE_PMD_SXE_MAX_RX_BURST)) {
 		PMD_LOG_DEBUG(INIT, "rx burst batch alloc check: "
-			     "rxq->batch_alloc_size=%d, "
-			     "RTE_PMD_SXE_MAX_RX_BURST=%d",
-			     rxq->batch_alloc_size, RTE_PMD_SXE_MAX_RX_BURST);
+				 "rxq->batch_alloc_size=%d, "
+				 "RTE_PMD_SXE_MAX_RX_BURST=%d",
+				 rxq->batch_alloc_size, RTE_PMD_SXE_MAX_RX_BURST);
 		support = false;
 	} else if (!(rxq->batch_alloc_size < rxq->ring_depth)) {
 		PMD_LOG_DEBUG(INIT, "rx burst batch alloc check: "
-			     "rxq->batch_alloc_size=%d, "
-			     "rxq->ring_depth=%d",
-			     rxq->batch_alloc_size, rxq->ring_depth);
+				 "rxq->batch_alloc_size=%d, "
+				 "rxq->ring_depth=%d",
+				 rxq->batch_alloc_size, rxq->ring_depth);
 		support = false;
 	} else if (!((rxq->ring_depth % rxq->batch_alloc_size) == 0)) {
 		PMD_LOG_DEBUG(INIT, "rx burst batch alloc preconditions: "
-			     "rxq->nb_rx_desc=%d, "
-			     "rxq->batch_alloc_size=%d",
-			     rxq->ring_depth, rxq->batch_alloc_size);
+				 "rxq->nb_rx_desc=%d, "
+				 "rxq->batch_alloc_size=%d",
+				 rxq->ring_depth, rxq->batch_alloc_size);
 		support = false;
 	}
 
@@ -149,7 +147,8 @@ static inline bool __rte_cold
 
 s32 sxe_rx_configure(struct rte_eth_dev *dev);
 
-void sxe_rx_function_set(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
+void sxe_rx_function_set(struct rte_eth_dev *dev,
+		bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
 
 #ifdef ETH_DEV_RX_DESC_DONE
 s32 sxe_rx_descriptor_done(void *rx_queue, u16 offset);
@@ -157,10 +156,10 @@ s32 sxe_rx_descriptor_done(void *rx_queue, u16 offset);
 
 s32 sxe_rx_descriptor_status(void *rx_queue, u16 offset);
 
-u16 sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,u16 num_pkts);
+u16 sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts, u16 num_pkts);
 
 s32 sxe_rx_queue_setup(struct rte_eth_dev *dev,
-			 u16 queue_idx,u16 num_desc,
+			 u16 queue_idx, u16 num_desc,
 			 unsigned int socket_id,
 			 const struct rte_eth_rxconf *rx_conf,
 			 struct rte_mempool *mp);
diff --git a/drivers/net/sxe/pf/sxe_stats.c b/drivers/net/sxe/pf/sxe_stats.c
index 5d9de2991c..9e1943336d 100644
--- a/drivers/net/sxe/pf/sxe_stats.c
+++ b/drivers/net/sxe/pf/sxe_stats.c
@@ -14,8 +14,7 @@
 #define SXE_STAT_MAP_CNT 4
 #define SXE_STAT_MAP_MASK 0x0F
 
-#define SXE_QUEUE_STAT_COUNT   \
-    (sizeof(stats_info->hw_stats.qprc) / sizeof(stats_info->hw_stats.qprc[0]))
+#define SXE_QUEUE_STAT_COUNT ARRAY_SIZE(stats_info->hw_stats.qprc)
 
 static const struct sxe_stats_field sxe_xstats_sw_field[] = {
 	{"rx_l3_l4_xsum_error", offsetof(struct sxe_sw_stats,
@@ -75,10 +74,10 @@ static const struct sxe_stats_field sxe_xstats_fc_field[] = {
 };
 
 #define SXE_XSTAT_SW_CNT  (sizeof(sxe_xstats_sw_field) / \
-		      sizeof(sxe_xstats_sw_field[0]))
+			  sizeof(sxe_xstats_sw_field[0]))
 
 #define SXE_XSTAT_MAC_CNT (sizeof(sxe_xstats_mac_field) / \
-		      sizeof(sxe_xstats_mac_field[0]))
+			  sizeof(sxe_xstats_mac_field[0]))
 
 #define SXE_XSTAT_FC_CNT (sizeof(sxe_xstats_fc_field) / \
 			   sizeof(sxe_xstats_fc_field[0]))
@@ -102,8 +101,8 @@ s32 sxe_eth_stats_get(struct rte_eth_dev *eth_dev,
 	struct sxe_stats_info *stats_info = &adapter->stats_info;
 	struct sxe_hw *hw = &adapter->hw;
 	u32 i;
-	u64 rx_packets = 0; 
-	u64 rx_bytes = 0;   
+	u64 rx_packets = 0;
+	u64 rx_bytes = 0;
 	s32 ret = 0;
 
 	sxe_hw_stats_get(hw, &stats_info->hw_stats);
@@ -117,7 +116,7 @@ s32 sxe_eth_stats_get(struct rte_eth_dev *eth_dev,
 	for (i = 0; i < SXE_QUEUE_STAT_COUNT; i++) {
 		rx_packets += stats_info->hw_stats.qprc[i];
 		rx_bytes += stats_info->hw_stats.qbrc[i];
-	
+
 		stats->q_ipackets[i] = stats_info->hw_stats.qprc[i];
 		stats->q_opackets[i] = stats_info->hw_stats.qptc[i];
 		stats->q_ibytes[i] = stats_info->hw_stats.qbrc[i];
@@ -153,7 +152,7 @@ static s32 sxe_hw_xstat_offset_get(u32 id, u32 *offset)
 	} else {
 		ret = -SXE_ERR_PARAM;
 		PMD_LOG_ERR(DRV, "invalid id:%u exceed stats size cnt:%u.",
-			    id, size);
+				id, size);
 	}
 
 	return ret;
@@ -169,7 +168,7 @@ static s32 sxe_sw_xstat_offset_get(u32 id, u32 *offset)
 	} else {
 		ret = -SXE_ERR_PARAM;
 		PMD_LOG_ERR(DRV, "invalid id:%u exceed stats size cnt:%u.",
-			    id, size);
+				id, size);
 	}
 
 	return ret;
@@ -185,26 +184,25 @@ static s32 sxe_fc_xstat_field_offset_get(u32 id, u8 priority, u32 *offset)
 	} else {
 		ret = -SXE_ERR_PARAM;
 		PMD_LOG_ERR(DRV, "invalid id:%u exceed stats size cnt:%u.",
-			    id, size);
+				id, size);
 	}
 
 	return ret;
 }
 
-static void sxe_sw_stats_get(struct rte_eth_dev *eth_dev, 
+static void sxe_sw_stats_get(struct rte_eth_dev *eth_dev,
 				struct sxe_sw_stats *stats)
 {
 	u32 i;
 	u64 hw_csum_rx_error = 0;
 	sxe_rx_queue_s *rxq;
-	
+
 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
 		rxq = eth_dev->data->rx_queues[i];
 		hw_csum_rx_error += rxq->rx_stats.csum_err;
 	}
 	stats->hw_csum_rx_error = hw_csum_rx_error;
 
-	return;
 }
 
 s32 sxe_xstats_get(struct rte_eth_dev *eth_dev,
@@ -222,14 +220,14 @@ s32 sxe_xstats_get(struct rte_eth_dev *eth_dev,
 
 	cnt = SXE_XSTAT_CNT;
 	PMD_LOG_INFO(DRV, "xstat size:%u. hw xstat field cnt:%lu "
-		    "fc xstat field cnt:%lu ", cnt,
-		    SXE_XSTAT_MAC_CNT,
-		    SXE_XSTAT_FC_CNT);
+			"fc xstat field cnt:%lu ", cnt,
+			SXE_XSTAT_MAC_CNT,
+			SXE_XSTAT_FC_CNT);
 
 	if (usr_cnt < cnt) {
 		ret = cnt;
 		PMD_LOG_ERR(DRV, "user usr_cnt:%u less than stats cnt:%u.",
-			    usr_cnt, cnt);
+				usr_cnt, cnt);
 		goto l_out;
 	}
 
@@ -336,7 +334,7 @@ s32 sxe_xstats_names_get(__rte_unused struct rte_eth_dev *dev,
 	if (usr_cnt < SXE_XSTAT_CNT) {
 		ret = -SXE_ERR_PARAM;
 		PMD_LOG_ERR(DRV, "max:%lu usr_cnt:%u invalid.(err:%d)",
-			    SXE_XSTAT_CNT, usr_cnt, ret);
+				SXE_XSTAT_CNT, usr_cnt, ret);
 		goto l_out;
 	}
 
@@ -385,8 +383,8 @@ static s32 sxe_all_xstats_value_get(struct rte_eth_dev *eth_dev,
 
 	if (usr_cnt < size) {
 		PMD_LOG_WARN(DRV, "ids null usr_cnt:%u less than xstats"
-			     " cnt:%u, return xstat cnt.",
-			      usr_cnt, size);
+				 " cnt:%u, return xstat cnt.",
+				  usr_cnt, size);
 		ret = size;
 		goto l_out;
 	}
@@ -396,7 +394,7 @@ static s32 sxe_all_xstats_value_get(struct rte_eth_dev *eth_dev,
 
 	if (values == NULL) {
 		PMD_LOG_WARN(DRV, "ids and values null, "
-			     "read clean stats regs");
+				 "read clean stats regs");
 		ret = 0;
 		goto l_out;
 	}
@@ -523,19 +521,18 @@ s32 sxe_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
 	if (reg_idx >= SXE_QUEUE_STATS_MAP_REG_NUM) {
 		ret = -EIO;
 		PMD_LOG_ERR(DRV, "invalid queue_id:%u reg_idx exceeded "
-			    "max map cnt:%u.(err:%d)",
-			    queue_id, SXE_QUEUE_STATS_MAP_REG_NUM, ret);
+				"max map cnt:%u.(err:%d)",
+				queue_id, SXE_QUEUE_STATS_MAP_REG_NUM, ret);
 		goto l_out;
 	}
 
 	map_idx = (u8)(queue_id % SXE_STAT_MAP_CNT);
 	map_mask <<= (SXE_STAT_MAP_WIDTH * map_idx);
 
-	if (!is_rx) {
+	if (!is_rx)
 		stats_map->txq_stats_map[reg_idx] &= ~map_mask;
-	} else {
+	else
 		stats_map->rxq_stats_map[reg_idx] &= ~map_mask;
-	}
 
 	qsmr_mask = (stat_reg_idx & SXE_STAT_MAP_MASK) << (SXE_STAT_MAP_WIDTH * map_idx);
 	if (!is_rx) {
@@ -547,12 +544,12 @@ s32 sxe_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
 	}
 
 	PMD_LOG_INFO(DRV, "port %u %s queue_id %d stat map to stat reg[%u] "
-		     "%s[%u] 0x%08x ",
-		     (u16)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
-		     queue_id, stat_reg_idx,
-		     is_rx ? "RQSMR" : "TQSM", reg_idx,
-		     is_rx ? stats_map->rxq_stats_map[reg_idx] :
-		     stats_map->txq_stats_map[reg_idx]);
+			 "%s[%u] 0x%08x ",
+			 (u16)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+			 queue_id, stat_reg_idx,
+			 is_rx ? "RQSMR" : "TQSM", reg_idx,
+			 is_rx ? stats_map->rxq_stats_map[reg_idx] :
+			 stats_map->txq_stats_map[reg_idx]);
 
 l_out:
 	return ret;
@@ -570,7 +567,6 @@ void sxe_queue_stats_map_restore(struct rte_eth_dev *eth_dev)
 		sxe_hw_rxq_stat_map_set(hw, reg_idx, stats_map->rxq_stats_map[reg_idx]);
 	}
 
-	return;
 }
 
 void sxe_queue_stats_map_reset(struct rte_eth_dev *eth_dev)
@@ -588,6 +584,5 @@ void sxe_queue_stats_map_reset(struct rte_eth_dev *eth_dev)
 		sxe_hw_rxq_stat_map_set(hw, reg_idx, 0);
 	}
 
-	return;
 }
 
diff --git a/drivers/net/sxe/pf/sxe_stats.h b/drivers/net/sxe/pf/sxe_stats.h
index 792a160753..8be0ce9448 100644
--- a/drivers/net/sxe/pf/sxe_stats.h
+++ b/drivers/net/sxe/pf/sxe_stats.h
@@ -14,7 +14,7 @@
 #define SXE_STATS_FIELD_NAME_SIZE  50
 
 struct sxe_sw_stats {
-	u64 hw_csum_rx_error;  
+	u64 hw_csum_rx_error;
 };
 
 struct sxe_stats_map {
@@ -23,9 +23,9 @@ struct sxe_stats_map {
 };
 
 struct sxe_stats_info {
-	struct sxe_sw_stats  sw_stats;  
-	struct sxe_mac_stats hw_stats;  	
-	struct sxe_stats_map stats_map; 
+	struct sxe_sw_stats  sw_stats;
+	struct sxe_mac_stats hw_stats;
+	struct sxe_stats_map stats_map;
 };
 
 struct sxe_stats_field {
diff --git a/drivers/net/sxe/pf/sxe_tm.c b/drivers/net/sxe/pf/sxe_tm.c
new file mode 100644
index 0000000000..4a87f255be
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_tm.c
@@ -0,0 +1,1115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+
+#include <rte_malloc.h>
+#include "rte_ethdev.h"
+#include "rte_tm_driver.h"
+#include "rte_tm.h"
+#include "sxe_dpdk_version.h"
+#if defined(DPDK_20_11_5) || defined(DPDK_21_11_5) || defined(DPDK_19_11_6)
+#include <rte_bus_pci.h>
+#else
+#include <bus_pci_driver.h>
+#endif
+
+#include "sxe.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe_queue.h"
+
+void sxe_tm_ctxt_init(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_tm_context *tm_ctxt = &adapter->tm_ctxt;
+
+	TAILQ_INIT(&tm_ctxt->shaper_profile_list);
+
+	tm_ctxt->root = NULL;
+	TAILQ_INIT(&tm_ctxt->queue_list);
+	TAILQ_INIT(&tm_ctxt->tc_list);
+	tm_ctxt->tc_node_num = 0;
+	tm_ctxt->queue_node_num = 0;
+	tm_ctxt->committed = false;
+
+}
+
+void sxe_tm_ctxt_uninit(struct rte_eth_dev *dev)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_tm_context *tm_ctxt = &adapter->tm_ctxt;
+
+	struct sxe_tm_shaper_profile *shaper_profile;
+	struct sxe_tm_node *tm_node;
+
+	while ((tm_node = TAILQ_FIRST(&tm_ctxt->queue_list))) {
+		TAILQ_REMOVE(&tm_ctxt->queue_list, tm_node, node);
+		rte_free(tm_node);
+	}
+	tm_ctxt->queue_node_num = 0;
+
+	while ((tm_node = TAILQ_FIRST(&tm_ctxt->tc_list))) {
+		TAILQ_REMOVE(&tm_ctxt->tc_list, tm_node, node);
+		rte_free(tm_node);
+	}
+	tm_ctxt->tc_node_num = 0;
+
+	if (tm_ctxt->root) {
+		rte_free(tm_ctxt->root);
+		tm_ctxt->root = NULL;
+	}
+
+	while ((shaper_profile = TAILQ_FIRST(&tm_ctxt->shaper_profile_list))) {
+		TAILQ_REMOVE(&tm_ctxt->shaper_profile_list, shaper_profile, node);
+		rte_free(shaper_profile);
+	}
+
+}
+
+static inline u8 sxe_tcs_num_get(struct rte_eth_dev *dev)
+{
+	struct rte_eth_conf *eth_conf;
+	u8 tcs_num = 0;
+
+	eth_conf = &dev->data->dev_conf;
+	if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
+		tcs_num = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+	} else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
+		if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+			RTE_ETH_32_POOLS) {
+			tcs_num = RTE_ETH_4_TCS;
+		} else {
+			tcs_num = RTE_ETH_8_TCS;
+		}
+	} else {
+		tcs_num = 1;
+	}
+
+	return tcs_num;
+}
+
+static s32 sxe_capabilities_get(struct rte_eth_dev *dev,
+			  struct rte_tm_capabilities *cap,
+			  struct rte_tm_error *error)
+{
+	UNUSED(dev);
+	s32 ret = 0;
+
+	if (!cap || !error) {
+		PMD_LOG_ERR(DRV, "sxe get tm cap failed, cap or error is NULL");
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	error->type = RTE_TM_ERROR_TYPE_NONE;
+	memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+	cap->n_nodes_max = 1 + MAX_TRAFFIC_CLASS + SXE_HW_TXRX_RING_NUM_MAX;
+
+	cap->n_levels_max = 3;
+	cap->non_leaf_nodes_identical = 1;
+	cap->leaf_nodes_identical = 1;
+	cap->shaper_n_max = cap->n_nodes_max;
+	cap->shaper_private_n_max = cap->n_nodes_max;
+	cap->shaper_private_dual_rate_n_max = 0;
+	cap->shaper_private_rate_min = 0;
+	cap->shaper_private_rate_max = 1250000000ull;
+#ifndef DPDK_19_11_6
+	cap->shaper_private_packet_mode_supported = 0;
+	cap->shaper_private_byte_mode_supported = 1;
+#endif
+
+	cap->shaper_shared_n_max = 0;
+	cap->shaper_shared_n_nodes_per_shaper_max = 0;
+	cap->shaper_shared_n_shapers_per_node_max = 0;
+	cap->shaper_shared_dual_rate_n_max = 0;
+	cap->shaper_shared_rate_min = 0;
+	cap->shaper_shared_rate_max = 0;
+#ifndef DPDK_19_11_6
+	cap->shaper_shared_packet_mode_supported = 0;
+	cap->shaper_shared_byte_mode_supported = 0;
+#endif
+	cap->sched_n_children_max = SXE_HW_TXRX_RING_NUM_MAX;
+
+	cap->sched_sp_n_priorities_max = 1;
+	cap->sched_wfq_n_children_per_group_max = 0;
+	cap->sched_wfq_n_groups_max = 0;
+#ifndef DPDK_19_11_6
+	cap->sched_wfq_packet_mode_supported = 0;
+	cap->sched_wfq_byte_mode_supported = 0;
+#endif
+	cap->sched_wfq_weight_max = 1;
+	cap->cman_head_drop_supported = 0;
+	cap->dynamic_update_mask = 0;
+	cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
+	cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+	cap->cman_wred_context_n_max = 0;
+	cap->cman_wred_context_private_n_max = 0;
+	cap->cman_wred_context_shared_n_max = 0;
+	cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
+	cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
+	cap->stats_mask = 0;
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_shaper_profile_param_check(
+					struct rte_tm_shaper_params *profile,
+					struct rte_tm_error *error)
+{
+	s32 ret = -EINVAL;
+
+	if (profile->committed.rate) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+		error->message = "committed rate not supported";
+		goto l_end;
+	}
+
+	if (profile->committed.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+		error->message = "committed bucket size not supported";
+		goto l_end;
+	}
+
+	if (profile->peak.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+		error->message = "peak bucket size not supported";
+		goto l_end;
+	}
+
+	if (profile->pkt_length_adjust) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+		error->message = "packet length adjustment not supported";
+		goto l_end;
+	}
+
+	ret = 0;
+l_end:
+	return ret;
+}
+
+static inline struct sxe_tm_shaper_profile *sxe_shaper_profile_search(
+						struct rte_eth_dev *dev,
+						u32 id)
+{
+	struct sxe_tm_shaper_profile *profile = NULL;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_tm_context *tm_ctxt = &adapter->tm_ctxt;
+	struct sxe_shaper_profile_list *shaper_profile_list =
+		&tm_ctxt->shaper_profile_list;
+	struct sxe_tm_shaper_profile *shaper_profile;
+
+	TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+		if (id == shaper_profile->id) {
+			profile = shaper_profile;
+			PMD_LOG_DEBUG(DRV, "got shaper_profile in idx[%u]", id);
+		}
+	}
+
+	return profile;
+}
+
+static s32 sxe_shaper_profile_add(struct rte_eth_dev *dev,
+			u32 id,
+			struct rte_tm_shaper_params *profile,
+			struct rte_tm_error *error)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_tm_context *tm_ctxt = &adapter->tm_ctxt;
+	struct sxe_tm_shaper_profile *shaper_profile;
+	s32 ret;
+
+	if (!profile || !error) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "shaper profile add failed, profile or error NULL");
+		goto l_end;
+	}
+
+	ret = sxe_shaper_profile_param_check(profile, error);
+	if (ret) {
+		PMD_LOG_ERR(DRV, "sxe_shaper_profile_param_check err=%d", ret);
+		goto l_end;
+	}
+
+	shaper_profile = sxe_shaper_profile_search(dev, id);
+	if (shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID exist";
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	shaper_profile = rte_zmalloc("sxe_tm_shaper_profile",
+					sizeof(struct sxe_tm_shaper_profile),
+					0);
+	if (!shaper_profile) {
+		ret = -ENOMEM;
+		PMD_LOG_ERR(DRV, "shaper profile id[%u] alloc mem failed",
+				id);
+		goto l_end;
+	}
+
+	shaper_profile->id = id;
+	rte_memcpy(&shaper_profile->profile, profile,
+			 sizeof(struct rte_tm_shaper_params));
+	TAILQ_INSERT_TAIL(&tm_ctxt->shaper_profile_list,
+			  shaper_profile, node);
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_shaper_profile_del(struct rte_eth_dev *dev,
+			u32 id,
+			struct rte_tm_error *error)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_tm_context *tm_ctxt = &adapter->tm_ctxt;
+	struct sxe_tm_shaper_profile *shaper_profile;
+	s32 ret = -EINVAL;
+
+	if (!error) {
+		PMD_LOG_ERR(DRV, "shaper profile del failed, error is NULL");
+		goto l_end;
+	}
+
+	shaper_profile = sxe_shaper_profile_search(dev, id);
+	if (!shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID not exist";
+		goto l_end;
+	}
+
+	if (shaper_profile->ref_cnt) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "profile in use";
+		goto l_end;
+	}
+
+	TAILQ_REMOVE(&tm_ctxt->shaper_profile_list, shaper_profile, node);
+	rte_free(shaper_profile);
+
+	ret = 0;
+
+l_end:
+	return ret;
+}
+
+static inline s32 sxe_non_leaf_node_param_check(
+					struct rte_tm_node_params *params,
+					struct rte_tm_error *error)
+{
+	s32 ret = -EINVAL;
+
+
+	if (params->nonleaf.wfq_weight_mode) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+		error->message = "WFQ not supported";
+		goto l_end;
+	}
+
+	if (params->nonleaf.n_sp_priorities != 1) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+		error->message = "SP priority not supported";
+		goto l_end;
+	}
+
+	ret = 0;
+l_end:
+	return ret;
+}
+
+static inline s32 sxe_leaf_node_param_check(
+					struct rte_tm_node_params *params,
+					struct rte_tm_error *error)
+{
+	s32 ret = -EINVAL;
+
+	if (params->leaf.cman) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
+		error->message = "Congestion management not supported";
+		goto l_end;
+	}
+
+	if (params->leaf.wred.wred_profile_id != RTE_TM_WRED_PROFILE_ID_NONE) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
+		error->message = "WRED not supported";
+		goto l_end;
+	}
+
+	if (params->leaf.wred.shared_wred_context_id) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
+		error->message = "WRED not supported";
+		goto l_end;
+	}
+
+	if (params->leaf.wred.n_shared_wred_contexts) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
+		error->message = "WRED not supported";
+		goto l_end;
+	}
+
+	ret = 0;
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_node_param_check(struct rte_eth_dev *dev, u32 node_id,
+					u32 priority, u32 weight, u32 level_id,
+					struct rte_tm_node_params *params,
+					struct rte_tm_error *error)
+{
+	s32 ret = -EINVAL;
+
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid node id";
+		goto l_end;
+	}
+
+	if (priority) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+		error->message = "priority should be 0";
+		goto l_end;
+	}
+
+	if (weight != 1) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+		error->message = "weight must be 1";
+		goto l_end;
+	}
+
+	if (params->shared_shaper_id) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
+		error->message = "shared shaper not supported";
+		goto l_end;
+	}
+
+	if (params->n_shared_shapers) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
+		error->message = "shared shaper not supported";
+		goto l_end;
+	}
+
+	if (node_id >= dev->data->nb_tx_queues && level_id != SXE_TM_NODE_TYPE_QUEUE) {
+		ret = sxe_non_leaf_node_param_check(params, error);
+		PMD_LOG_INFO(DRV, "non leaf param check ret=%d", ret);
+		goto l_end;
+	}
+
+	ret = sxe_leaf_node_param_check(params, error);
+	PMD_LOG_INFO(DRV, "leaf param check ret=%d", ret);
+
+l_end:
+	return ret;
+}
+
+static inline struct sxe_tm_node *sxe_tm_node_search(
+				struct rte_eth_dev *dev, u32 node_id,
+				enum sxe_tm_node_type *node_type)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_tm_context *tm_ctxt = &adapter->tm_ctxt;
+	struct sxe_tm_node *target_node = NULL;
+	struct sxe_tm_node *tmp_node = NULL;
+
+	if (tm_ctxt->root && tm_ctxt->root->id == node_id) {
+		*node_type = SXE_TM_NODE_TYPE_PORT;
+		target_node = tm_ctxt->root;
+		goto l_end;
+	}
+
+	TAILQ_FOREACH(tmp_node, &tm_ctxt->tc_list, node) {
+		if (tmp_node->id == node_id) {
+			*node_type = SXE_TM_NODE_TYPE_TC;
+			target_node = tmp_node;
+			goto l_end;
+		}
+	}
+
+	TAILQ_FOREACH(tmp_node, &tm_ctxt->queue_list, node) {
+		if (tmp_node->id == node_id) {
+			*node_type = SXE_TM_NODE_TYPE_QUEUE;
+			target_node = tmp_node;
+		}
+	}
+
+l_end:
+	return target_node;
+}
+
+static void sxe_tc_owned_queues_get(struct rte_eth_dev *dev,
+					u16 tc_idx, u16 *base, u16 *num)
+{
+	u8 tcs_num = sxe_tcs_num_get(dev);
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	u16 vf_num = pci_dev->max_vfs;
+
+	*base = 0;
+	*num = 0;
+
+	if (vf_num) {
+		if (tcs_num == 1) {
+			if (vf_num >= RTE_ETH_32_POOLS) {
+				*num = 2;
+				*base = vf_num * 2;
+			} else if (vf_num >= RTE_ETH_16_POOLS) {
+				*num = 4;
+				*base = vf_num * 4;
+			} else {
+				*num = 8;
+				*base = vf_num * 8;
+			}
+		} else {
+			*num = 1;
+			*base = vf_num * tcs_num + tc_idx;
+		}
+	} else {
+		if (tcs_num == RTE_ETH_8_TCS) {
+			switch (tc_idx) {
+			case 0:
+				*base = 0;
+				*num = 32;
+				break;
+			case 1:
+				*base = 32;
+				*num = 32;
+				break;
+			case 2:
+				*base = 64;
+				*num = 16;
+				break;
+			case 3:
+				*base = 80;
+				*num = 16;
+				break;
+			case 4:
+				*base = 96;
+				*num = 8;
+				break;
+			case 5:
+				*base = 104;
+				*num = 8;
+				break;
+			case 6:
+				*base = 112;
+				*num = 8;
+				break;
+			case 7:
+				*base = 120;
+				*num = 8;
+				break;
+			default:
+				return;
+			}
+		} else {
+			switch (tc_idx) {
+			case 0:
+				*base = 0;
+				*num = 64;
+				break;
+			case 1:
+				*base = 64;
+				*num = 32;
+				break;
+			case 2:
+				*base = 96;
+				*num = 16;
+				break;
+			case 3:
+				*base = 112;
+				*num = 16;
+				break;
+			default:
+				return;
+			}
+		}
+	}
+
+}
+
+static s32 sxe_node_add_param_check(struct rte_eth_dev *dev, u32 node_id,
+				u32 priority, u32 weight, u32 level_id,
+				struct rte_tm_node_params *params,
+				struct rte_tm_error *error)
+{
+	s32 ret;
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_tm_context *tm_ctxt = &adapter->tm_ctxt;
+
+	if (!params || !error) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "node add failed because params or error NULL");
+		goto l_end;
+	}
+
+	if (tm_ctxt->committed) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "already committed";
+		ret = -EINVAL;
+		goto l_end;
+	}
+
+	ret = sxe_node_param_check(dev, node_id, priority, weight, level_id,
+					 params, error);
+	PMD_LOG_DEBUG(DRV, "sxe_node_param_check ret=%d", ret);
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_node_add(struct rte_eth_dev *dev, u32 node_id,
+				u32 parent_node_id, u32 priority,
+				u32 weight, u32 level_id,
+				struct rte_tm_node_params *params,
+				struct rte_tm_error *error)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_tm_context *tm_ctxt = &adapter->tm_ctxt;
+
+	enum sxe_tm_node_type node_type = SXE_TM_NODE_TYPE_MAX;
+	enum sxe_tm_node_type parent_node_type = SXE_TM_NODE_TYPE_MAX;
+	struct sxe_tm_shaper_profile *shaper_profile = NULL;
+	struct sxe_tm_node *tm_node;
+	struct sxe_tm_node *parent_node;
+	u8 tcs_num;
+	u16 q_base = 0;
+	u16 q_nb = 0;
+	s32 ret;
+
+	ret = sxe_node_add_param_check(dev, node_id, priority, weight, level_id,
+					params, error);
+	if (ret) {
+		PMD_LOG_ERR(DRV, "sxe_node_add_param_check err = %d", ret);
+		goto l_end;
+	}
+
+	ret = -EINVAL;
+	if (sxe_tm_node_search(dev, node_id, &node_type)) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "node id already used";
+		goto l_end;
+	}
+
+	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+		shaper_profile = sxe_shaper_profile_search(
+					dev, params->shaper_profile_id);
+		if (!shaper_profile) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+			error->message = "shaper profile not exist";
+			goto l_end;
+		}
+	}
+
+	if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+		if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+			level_id > SXE_TM_NODE_TYPE_PORT) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+			error->message = "Wrong level";
+			goto l_end;
+		}
+
+		if (tm_ctxt->root) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+			error->message = "already have a root";
+			goto l_end;
+		}
+
+		tm_node = rte_zmalloc("sxe_tm_node",
+					  sizeof(struct sxe_tm_node),
+					  0);
+		if (!tm_node) {
+			ret = -ENOMEM;
+			PMD_LOG_ERR(DRV, "tm node mem alloc faield");
+			goto l_end;
+		}
+
+		tm_node->id = node_id;
+		tm_node->priority = priority;
+		tm_node->weight = weight;
+		tm_node->ref_cnt = 0;
+		tm_node->no = 0;
+		tm_node->parent = NULL;
+		tm_node->shaper_profile = shaper_profile;
+		rte_memcpy(&tm_node->params, params,
+				 sizeof(struct rte_tm_node_params));
+		tm_ctxt->root = tm_node;
+
+		if (shaper_profile)
+			shaper_profile->ref_cnt++;
+
+		ret = 0;
+		goto l_end;
+	}
+
+	parent_node = sxe_tm_node_search(dev, parent_node_id,
+					   &parent_node_type);
+	if (!parent_node) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+		error->message = "parent not exist";
+		goto l_end;
+	}
+
+	if (parent_node_type != SXE_TM_NODE_TYPE_PORT &&
+		parent_node_type != SXE_TM_NODE_TYPE_TC) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+		error->message = "parent is not port or TC";
+		goto l_end;
+	}
+
+	if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+		level_id != parent_node_type + 1) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+		error->message = "Wrong level";
+		goto l_end;
+	}
+
+	if (parent_node_type == SXE_TM_NODE_TYPE_PORT) {
+		tcs_num = sxe_tcs_num_get(dev);
+		if (tm_ctxt->tc_node_num >= tcs_num) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "too many TCs";
+			goto l_end;
+		}
+	} else {
+		if (tm_ctxt->queue_node_num >= dev->data->nb_tx_queues) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "too many queues";
+			goto l_end;
+		}
+
+		sxe_tc_owned_queues_get(dev, parent_node->no, &q_base, &q_nb);
+		if (parent_node->ref_cnt >= q_nb) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "too many queues than TC supported";
+			goto l_end;
+		}
+
+		if (node_id >= dev->data->nb_tx_queues) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "too large queue id";
+			goto l_end;
+		}
+	}
+
+	tm_node = rte_zmalloc("sxe_tm_node",
+				  sizeof(struct sxe_tm_node),
+				  0);
+	if (!tm_node) {
+		ret = -ENOMEM;
+		PMD_LOG_ERR(DRV, "tm node mem alloc faield");
+		goto l_end;
+	}
+
+	tm_node->id = node_id;
+	tm_node->priority = priority;
+	tm_node->weight = weight;
+	tm_node->ref_cnt = 0;
+	tm_node->parent = parent_node;
+	tm_node->shaper_profile = shaper_profile;
+	rte_memcpy(&tm_node->params, params,
+			 sizeof(struct rte_tm_node_params));
+	if (parent_node_type == SXE_TM_NODE_TYPE_PORT) {
+		tm_node->no = parent_node->ref_cnt;
+		TAILQ_INSERT_TAIL(&tm_ctxt->tc_list,
+				  tm_node, node);
+		tm_ctxt->tc_node_num++;
+	} else {
+		tm_node->no = q_base + parent_node->ref_cnt;
+		TAILQ_INSERT_TAIL(&tm_ctxt->queue_list,
+				  tm_node, node);
+		tm_ctxt->queue_node_num++;
+	}
+
+	tm_node->parent->ref_cnt++;
+
+	if (shaper_profile)
+		shaper_profile->ref_cnt++;
+
+	ret = 0;
+l_end:
+	return ret;
+}
+
+static s32 sxe_node_delete(struct rte_eth_dev *dev, u32 node_id,
+		struct rte_tm_error *error)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_tm_context *tm_ctxt = &adapter->tm_ctxt;
+
+	enum sxe_tm_node_type node_type = SXE_TM_NODE_TYPE_MAX;
+	struct sxe_tm_node *tm_node;
+	s32 ret = -EINVAL;
+
+	if (!error) {
+		PMD_LOG_ERR(DRV, "tm node del faield because error is NULL");
+		goto l_end;
+	}
+
+	if (tm_ctxt->committed) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "already committed";
+		goto l_end;
+	}
+
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid node id";
+		goto l_end;
+	}
+
+	tm_node = sxe_tm_node_search(dev, node_id, &node_type);
+	if (!tm_node) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		goto l_end;
+	}
+
+	if (tm_node->ref_cnt) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message =
+			"cannot delete a node which has children";
+		goto l_end;
+	}
+
+	if (node_type == SXE_TM_NODE_TYPE_PORT) {
+		if (tm_node->shaper_profile)
+			tm_node->shaper_profile->ref_cnt--;
+		rte_free(tm_node);
+		tm_ctxt->root = NULL;
+		ret = 0;
+		goto l_end;
+	}
+
+	if (tm_node->shaper_profile)
+		tm_node->shaper_profile->ref_cnt--;
+
+	tm_node->parent->ref_cnt--;
+	if (node_type == SXE_TM_NODE_TYPE_TC) {
+		TAILQ_REMOVE(&tm_ctxt->tc_list, tm_node, node);
+		tm_ctxt->tc_node_num--;
+	} else {
+		TAILQ_REMOVE(&tm_ctxt->queue_list, tm_node, node);
+		tm_ctxt->queue_node_num--;
+	}
+	rte_free(tm_node);
+
+	ret = 0;
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_node_type_get(struct rte_eth_dev *dev, u32 node_id,
+				s32 *is_leaf, struct rte_tm_error *error)
+{
+	enum sxe_tm_node_type node_type = SXE_TM_NODE_TYPE_MAX;
+	struct sxe_tm_node *tm_node;
+	s32 ret = -EINVAL;
+
+	if (!is_leaf || !error) {
+		PMD_LOG_ERR(DRV, "%s faield because "
+				"error or is_leaf is NULL", __func__);
+		goto l_end;
+	}
+
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid node id";
+		goto l_end;
+	}
+
+	tm_node = sxe_tm_node_search(dev, node_id, &node_type);
+	if (!tm_node) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		goto l_end;
+	}
+
+	if (node_type == SXE_TM_NODE_TYPE_QUEUE)
+		*is_leaf = true;
+	else
+		*is_leaf = false;
+
+	ret = 0;
+
+l_end:
+	return ret;
+}
+
+static s32 sxe_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+				u32 level_id,
+				struct rte_tm_level_capabilities *cap,
+				struct rte_tm_error *error)
+{
+	s32 ret = -EINVAL;
+
+	if (!cap || !error) {
+		PMD_LOG_ERR(DRV, "get level[%u] capabilities faield because "
+				"cap or error is NULL", level_id);
+		goto l_end;
+	}
+
+	if (level_id >= SXE_TM_NODE_TYPE_MAX) {
+		error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+		error->message = "too deep level";
+		goto l_end;
+	}
+
+	if (level_id == SXE_TM_NODE_TYPE_PORT) {
+		cap->n_nodes_max = 1;
+		cap->n_nodes_nonleaf_max = 1;
+		cap->n_nodes_leaf_max = 0;
+	} else if (level_id == SXE_TM_NODE_TYPE_TC) {
+		cap->n_nodes_max = MAX_TRAFFIC_CLASS;
+		cap->n_nodes_nonleaf_max = MAX_TRAFFIC_CLASS;
+		cap->n_nodes_leaf_max = 0;
+	} else {
+		cap->n_nodes_max = SXE_HW_TXRX_RING_NUM_MAX;
+		cap->n_nodes_nonleaf_max = 0;
+		cap->n_nodes_leaf_max = SXE_HW_TXRX_RING_NUM_MAX;
+	}
+
+	cap->non_leaf_nodes_identical = true;
+	cap->leaf_nodes_identical = true;
+
+	if (level_id != SXE_TM_NODE_TYPE_QUEUE) {
+		cap->nonleaf.shaper_private_supported = true;
+		cap->nonleaf.shaper_private_dual_rate_supported = false;
+		cap->nonleaf.shaper_private_rate_min = 0;
+		cap->nonleaf.shaper_private_rate_max = 1250000000ull;
+#ifndef DPDK_19_11_6
+		cap->nonleaf.shaper_private_packet_mode_supported = 0;
+		cap->nonleaf.shaper_private_byte_mode_supported = 1;
+#endif
+		cap->nonleaf.shaper_shared_n_max = 0;
+#ifndef DPDK_19_11_6
+		cap->nonleaf.shaper_shared_packet_mode_supported = 0;
+		cap->nonleaf.shaper_shared_byte_mode_supported = 0;
+#endif
+		if (level_id == SXE_TM_NODE_TYPE_PORT) {
+			cap->nonleaf.sched_n_children_max =
+				SXE_DCB_MAX_TRAFFIC_CLASS;
+		} else {
+			cap->nonleaf.sched_n_children_max =
+				SXE_HW_TXRX_RING_NUM_MAX;
+		}
+
+		cap->nonleaf.sched_sp_n_priorities_max = 1;
+		cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+		cap->nonleaf.sched_wfq_n_groups_max = 0;
+		cap->nonleaf.sched_wfq_weight_max = 1;
+#ifndef DPDK_19_11_6
+		cap->nonleaf.sched_wfq_packet_mode_supported = 0;
+		cap->nonleaf.sched_wfq_byte_mode_supported = 0;
+#endif
+		cap->nonleaf.stats_mask = 0;
+
+		ret = 0;
+		goto l_end;
+	}
+
+	cap->leaf.shaper_private_supported = true;
+	cap->leaf.shaper_private_dual_rate_supported = false;
+	cap->leaf.shaper_private_rate_min = 0;
+	cap->leaf.shaper_private_rate_max = 1250000000ull;
+#ifndef DPDK_19_11_6
+	cap->leaf.shaper_private_packet_mode_supported = 0;
+	cap->leaf.shaper_private_byte_mode_supported = 1;
+#endif
+	cap->leaf.shaper_shared_n_max = 0;
+#ifndef DPDK_19_11_6
+	cap->leaf.shaper_shared_packet_mode_supported = 0;
+	cap->leaf.shaper_shared_byte_mode_supported = 0;
+#endif
+	cap->leaf.cman_head_drop_supported = false;
+	cap->leaf.cman_wred_context_private_supported = true;
+	cap->leaf.cman_wred_context_shared_n_max = 0;
+	cap->leaf.stats_mask = 0;
+
+	ret = 0;
+l_end:
+	return ret;
+}
+
+static s32 sxe_node_capabilities_get(struct rte_eth_dev *dev,
+				u32 node_id,
+				struct rte_tm_node_capabilities *cap,
+				struct rte_tm_error *error)
+{
+	enum sxe_tm_node_type node_type = SXE_TM_NODE_TYPE_MAX;
+	struct sxe_tm_node *tm_node;
+	s32 ret = -EINVAL;
+
+	if (!cap || !error) {
+		PMD_LOG_ERR(DRV, "get node[%u] capabilities faield because "
+				"cap or error is NULL", node_id);
+		goto l_end;
+	}
+
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid node id";
+		goto l_end;
+	}
+
+	tm_node = sxe_tm_node_search(dev, node_id, &node_type);
+	if (!tm_node) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		goto l_end;
+	}
+
+	cap->shaper_private_supported = true;
+	cap->shaper_private_dual_rate_supported = false;
+	cap->shaper_private_rate_min = 0;
+	cap->shaper_private_rate_max = 1250000000ull;
+#ifndef DPDK_19_11_6
+	cap->shaper_private_packet_mode_supported = 0;
+	cap->shaper_private_byte_mode_supported = 1;
+#endif
+	cap->shaper_shared_n_max = 0;
+#ifndef DPDK_19_11_6
+	cap->shaper_shared_packet_mode_supported = 0;
+	cap->shaper_shared_byte_mode_supported = 0;
+#endif
+
+	if (node_type == SXE_TM_NODE_TYPE_QUEUE) {
+		cap->leaf.cman_head_drop_supported = false;
+		cap->leaf.cman_wred_context_private_supported = true;
+		cap->leaf.cman_wred_context_shared_n_max = 0;
+	} else {
+		if (node_type == SXE_TM_NODE_TYPE_PORT) {
+			cap->nonleaf.sched_n_children_max = MAX_TRAFFIC_CLASS;
+		} else {
+			cap->nonleaf.sched_n_children_max =
+				SXE_HW_TXRX_RING_NUM_MAX;
+		}
+
+		cap->nonleaf.sched_sp_n_priorities_max = 1;
+		cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+		cap->nonleaf.sched_wfq_n_groups_max = 0;
+		cap->nonleaf.sched_wfq_weight_max = 1;
+#ifndef DPDK_19_11_6
+		cap->nonleaf.sched_wfq_packet_mode_supported = 0;
+		cap->nonleaf.sched_wfq_byte_mode_supported = 0;
+#endif
+	}
+
+	cap->stats_mask = 0;
+
+	ret = 0;
+l_end:
+	return ret;
+}
+
+static s32 sxe_hierarchy_commit(struct rte_eth_dev *dev,
+			s32 clear_on_fail,
+			struct rte_tm_error *error)
+{
+	struct sxe_adapter *adapter = dev->data->dev_private;
+	struct sxe_tm_context *tm_ctxt = &adapter->tm_ctxt;
+
+	struct sxe_tm_node *tm_node;
+	u64 bw;
+	s32 ret = -EINVAL;
+
+	if (!error) {
+		PMD_LOG_ERR(DRV, "%s faield because "
+				"error is NULL", __func__);
+		goto l_end;
+	}
+
+	if (!tm_ctxt->root) {
+		PMD_LOG_INFO(DRV, "tm hierarchy committed");
+		goto done;
+	}
+
+	if (tm_ctxt->root->shaper_profile &&
+		tm_ctxt->root->shaper_profile->profile.peak.rate) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "no port max bandwidth";
+		goto fail_clear;
+	}
+
+	TAILQ_FOREACH(tm_node, &tm_ctxt->tc_list, node) {
+		if (tm_node->shaper_profile &&
+			tm_node->shaper_profile->profile.peak.rate) {
+			error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+			error->message = "no TC max bandwidth";
+			goto fail_clear;
+		}
+	}
+
+	TAILQ_FOREACH(tm_node, &tm_ctxt->queue_list, node) {
+		if (tm_node->shaper_profile)
+			bw = tm_node->shaper_profile->profile.peak.rate;
+		else
+			bw = 0;
+
+		if (bw) {
+			bw = bw * 8 / 1000 / 1000;
+			ret = sxe_queue_rate_limit_set(dev, tm_node->no, bw);
+			if (ret) {
+				error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+				error->message =
+					"failed to set queue max bandwidth";
+				goto fail_clear;
+			}
+		}
+	}
+
+done:
+	tm_ctxt->committed = true;
+	ret = 0;
+	goto l_end;
+
+fail_clear:
+	if (clear_on_fail) {
+		sxe_tm_ctxt_uninit(dev);
+		sxe_tm_ctxt_init(dev);
+	}
+l_end:
+	return ret;
+}
+
+static const struct rte_tm_ops sxe_tm_ops = {
+	.capabilities_get = sxe_capabilities_get,
+	.shaper_profile_add = sxe_shaper_profile_add,
+	.shaper_profile_delete = sxe_shaper_profile_del,
+	.node_add = sxe_node_add,
+	.node_delete = sxe_node_delete,
+	.node_type_get = sxe_node_type_get,
+	.level_capabilities_get = sxe_level_capabilities_get,
+	.node_capabilities_get = sxe_node_capabilities_get,
+	.hierarchy_commit = sxe_hierarchy_commit,
+};
+
+s32 sxe_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
+{
+	s32 ret = 0;
+
+	if (!arg) {
+		ret = -EINVAL;
+		PMD_LOG_ERR(DRV, "%s faield because "
+				"arg is NULL", __func__);
+		goto l_end;
+	}
+
+	*(const void **)arg = &sxe_tm_ops;
+
+l_end:
+	return ret;
+}
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_tm.h b/drivers/net/sxe/pf/sxe_tm.h
new file mode 100644
index 0000000000..cc736b167f
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_tm.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_TM_H__
+#define __SXE_TM_H__
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+#include <rte_tm.h>
+
+#include "sxe_types.h"
+
+enum sxe_tm_node_type {
+	SXE_TM_NODE_TYPE_PORT,
+	SXE_TM_NODE_TYPE_TC,
+	SXE_TM_NODE_TYPE_QUEUE,
+	SXE_TM_NODE_TYPE_MAX,
+};
+
+struct sxe_tm_shaper_profile {
+	TAILQ_ENTRY(sxe_tm_shaper_profile) node;
+	u32 id;
+	u32 ref_cnt;
+	struct rte_tm_shaper_params profile;
+};
+
+struct sxe_tm_node {
+	TAILQ_ENTRY(sxe_tm_node) node;
+	u32 id;
+	u32 priority;
+	u32 weight;
+	u32 ref_cnt;
+	u16 no;
+	struct sxe_tm_node *parent;
+	struct sxe_tm_shaper_profile *shaper_profile;
+	struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(sxe_shaper_profile_list, sxe_tm_shaper_profile);
+TAILQ_HEAD(sxe_tm_node_list, sxe_tm_node);
+
+struct sxe_tm_context {
+	struct sxe_shaper_profile_list shaper_profile_list;
+	struct sxe_tm_node *root;
+	struct sxe_tm_node_list tc_list;
+	struct sxe_tm_node_list queue_list;
+	u32 tc_node_num;
+	u32 queue_node_num;
+	bool committed;
+};
+
+void sxe_tm_ctxt_init(struct rte_eth_dev *dev);
+
+void sxe_tm_ctxt_uninit(struct rte_eth_dev *dev);
+
+s32 sxe_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg);
+
+#endif
+#endif
diff --git a/drivers/net/sxe/pf/sxe_tx.c b/drivers/net/sxe/pf/sxe_tx.c
index 6b92e6faed..5ccd6f5432 100644
--- a/drivers/net/sxe/pf/sxe_tx.c
+++ b/drivers/net/sxe/pf/sxe_tx.c
@@ -44,31 +44,29 @@
 		RTE_MBUF_F_TX_IP_CKSUM |		 \
 		RTE_MBUF_F_TX_L4_MASK |		 \
 		RTE_MBUF_F_TX_TCP_SEG |		 \
-		RTE_MBUF_F_TX_MACSEC  |      \
+		RTE_MBUF_F_TX_MACSEC  |	  \
 		RTE_MBUF_F_TX_OUTER_IP_CKSUM |		 \
 		SXE_TX_IEEE1588_TMST)
 
 #define SXE_TX_OFFLOAD_NOTSUP_MASK (RTE_MBUF_F_TX_OFFLOAD_MASK ^ SXE_TX_OFFLOAD_MASK)
 #define RTE_SXE_MAX_TX_FREE_BUF_SZ 64
-#define SXE_TXD_IDX_SHIFT	4 
+#define SXE_TXD_IDX_SHIFT	4
 #define SXE_TX_MIN_PKT_LEN	14
 
-extern const struct sxe_txq_ops def_txq_ops;
-
 void __rte_cold sxe_tx_function_set(struct rte_eth_dev *dev,
 					sxe_tx_queue_s *txq)
 {
 	/* Offload off and signle simple tx code path < 32 use simple tx code path */
 	if ((txq->offloads == 0) &&
-	    (txq->rs_thresh >= RTE_PMD_SXE_MAX_TX_BURST)){
+		(txq->rs_thresh >= RTE_PMD_SXE_MAX_TX_BURST)) {
 		dev->tx_pkt_prepare = NULL;
 #if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
 		if (txq->rs_thresh <= RTE_SXE_MAX_TX_FREE_BUF_SZ &&
 #ifndef DPDK_19_11_6
-		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
+			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
 #endif
-		    (rte_eal_process_type() != RTE_PROC_PRIMARY ||
-		    sxe_txq_vec_setup(txq) == 0)) {
+			(rte_eal_process_type() != RTE_PROC_PRIMARY ||
+			sxe_txq_vec_setup(txq) == 0)) {
 			dev->tx_pkt_burst   = sxe_pkts_vector_xmit;
 			PMD_LOG_INFO(INIT, "using vector tx code path");
 		} else {
@@ -81,19 +79,18 @@ void __rte_cold sxe_tx_function_set(struct rte_eth_dev *dev,
 #endif
 
 	} else {
-		dev->tx_pkt_burst   = sxe_pkts_xmit_with_offload;;
+		dev->tx_pkt_burst   = sxe_pkts_xmit_with_offload;
 		dev->tx_pkt_prepare = sxe_prep_pkts;
 
 		PMD_LOG_INFO(INIT, "using full-featured tx code path");
 		PMD_LOG_INFO(INIT, " - offloads = 0x%" PRIx64,
-					(long unsigned int)txq->offloads);
+					(unsigned long)txq->offloads);
 		PMD_LOG_INFO(INIT, " - tx_rs_thresh = %d "
 				   "[RTE_PMD_SXE_MAX_TX_BURST=%d]",
 				txq->rs_thresh,
 				RTE_PMD_SXE_MAX_TX_BURST);
 	}
 
-	return;
 }
 
 int __rte_cold sxe_tx_queue_setup(struct rte_eth_dev *dev,
@@ -132,12 +129,10 @@ static void __rte_cold sxe_tx_start(struct rte_eth_dev *dev)
 		txq = dev->data->tx_queues[i];
 		sxe_hw_tx_desc_thresh_set(hw, txq->reg_idx,
 				txq->wthresh, txq->hthresh, txq->pthresh);
-		if (!txq->tx_deferred_start) {
+		if (!txq->tx_deferred_start)
 			sxe_tx_queue_start(dev, i);
-		}
 	}
 
-	return;
 }
 
 static void sxe_tx_buf_configure(struct sxe_hw *hw)
@@ -152,7 +147,6 @@ static void sxe_tx_buf_configure(struct sxe_hw *hw)
 
 	sxe_hw_mac_pad_enable(hw);
 
-	return;
 }
 
 void __rte_cold sxe_tx_configure(struct rte_eth_dev *dev)
@@ -180,7 +174,6 @@ void __rte_cold sxe_tx_configure(struct rte_eth_dev *dev)
 
 	sxe_tx_start(dev);
 
-	return;
 }
 
 static inline void sxe_single_desc_fill(volatile sxe_tx_data_desc_u *desc,
@@ -199,7 +192,6 @@ static inline void sxe_single_desc_fill(volatile sxe_tx_data_desc_u *desc,
 			rte_cpu_to_le_32(pkt_len << SXE_TX_DESC_PAYLEN_SHIFT);
 	rte_sxe_prefetch(&(*pkts)->pool);
 
-	return;
 }
 
 #define TX4_PER_LOOP 4
@@ -227,7 +219,6 @@ static inline void sxe_four_desc_fill(volatile sxe_tx_data_desc_u *desc,
 		rte_sxe_prefetch(&(*pkts)->pool);
 	}
 
-	return;
 }
 
 static inline void sxe_tx_ring_fill(sxe_tx_queue_s *txq,
@@ -242,9 +233,8 @@ static inline void sxe_tx_ring_fill(sxe_tx_queue_s *txq,
 	leftover = (pkts_num & ((u32)  TX4_PER_LOOP_MASK));
 
 	for (i = 0; i < mainpart; i += TX4_PER_LOOP) {
-		for (j = 0; j < TX4_PER_LOOP; ++j) {
+		for (j = 0; j < TX4_PER_LOOP; ++j)
 			(buffer + i + j)->mbuf = *(pkts + i + j);
-		}
 		sxe_four_desc_fill(desc + i, pkts + i);
 	}
 
@@ -256,7 +246,6 @@ static inline void sxe_tx_ring_fill(sxe_tx_queue_s *txq,
 		}
 	}
 
-	return;
 }
 
 s32 sxe_tx_bufs_free(sxe_tx_queue_s *txq)
@@ -279,14 +268,13 @@ s32 sxe_tx_bufs_free(sxe_tx_queue_s *txq)
 		mbuf = rte_pktmbuf_prefree_seg(buffer->mbuf);
 		buffer->mbuf = NULL;
 
-		if (unlikely(mbuf == NULL)) {
+		if (unlikely(mbuf == NULL))
 			continue;
-		}
 
 		if (mbuf_free_num >= RTE_SXE_MAX_TX_FREE_BUF_SZ ||
-		    (mbuf_free_num > 0 && mbuf->pool != free_mbuf[0]->pool)) {
+			(mbuf_free_num > 0 && mbuf->pool != free_mbuf[0]->pool)) {
 			rte_mempool_put_bulk(free_mbuf[0]->pool,
-					     (void **)free_mbuf, mbuf_free_num);
+						 (void **)free_mbuf, mbuf_free_num);
 			mbuf_free_num = 0;
 		}
 
@@ -298,11 +286,10 @@ s32 sxe_tx_bufs_free(sxe_tx_queue_s *txq)
 					(void **)free_mbuf, mbuf_free_num);
 	}
 
-	txq->next_dd       += txq->rs_thresh;
+	txq->next_dd	   += txq->rs_thresh;
 	txq->desc_free_num += txq->rs_thresh;
-	if (txq->next_dd >= txq->ring_depth) {
+	if (txq->next_dd >= txq->ring_depth)
 		txq->next_dd = txq->rs_thresh - 1;
-	}
 
 	ret = txq->rs_thresh;
 
@@ -317,9 +304,8 @@ static inline u16 sxe_pkts_xmit(void *tx_queue,
 	sxe_tx_queue_s *txq = (sxe_tx_queue_s *)tx_queue;
 	volatile sxe_tx_data_desc_u *desc_ring = txq->desc_ring;
 
-	if (txq->desc_free_num < txq->free_thresh) {
+	if (txq->desc_free_num < txq->free_thresh)
 		sxe_tx_bufs_free(txq);
-	}
 
 	xmit_pkts_num = (u16)RTE_MIN(txq->desc_free_num, xmit_pkts_num);
 	if (unlikely(xmit_pkts_num == 0)) {
@@ -350,14 +336,12 @@ static inline u16 sxe_pkts_xmit(void *tx_queue,
 		desc_ring[txq->next_rs].read.cmd_type_len |=
 			rte_cpu_to_le_32(SXE_TX_DESC_RS_MASK);
 		txq->next_rs = (u16)(txq->next_rs + txq->rs_thresh);
-		if (txq->next_rs >= txq->ring_depth) {
+		if (txq->next_rs >= txq->ring_depth)
 			txq->next_rs = (u16)(txq->rs_thresh - 1);
-		}
 	}
 
-	if (txq->next_to_use >= txq->ring_depth) {
+	if (txq->next_to_use >= txq->ring_depth)
 		txq->next_to_use = 0;
-	}
 
 	rte_wmb();
 	rte_write32_wc_relaxed((rte_cpu_to_le_32(txq->next_to_use)),
@@ -387,13 +371,12 @@ u16 sxe_pkts_simple_xmit(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num
 		ret = sxe_pkts_xmit(tx_queue, &(tx_pkts[xmit_pkts_num]),
 							need_xmit_pkts);
 
-		pkts_num      -= ret;
+		pkts_num	  -= ret;
 		xmit_pkts_num += ret;
 
 		/* Don't have enough desc */
-		if (ret < need_xmit_pkts) {
+		if (ret < need_xmit_pkts)
 			break;
-		}
 	}
 
 	LOG_DEBUG("simple xmit:port_id=%u, queue_id=%u, "
@@ -421,9 +404,8 @@ u16 sxe_pkts_vector_xmit(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 		xmit_pkts_num += ret;
 		pkts_num -= ret;
-		if (ret < need_xmit_pkts) {
+		if (ret < need_xmit_pkts)
 			break;
-		}
 	}
 
 	return xmit_pkts_num;
@@ -484,21 +466,21 @@ static inline bool sxe_cache_ctxt_desc_match(
 {
 	bool ret;
 
-	ol_info->l2_len       = pkt->l2_len;
-	ol_info->l3_len       = pkt->l3_len;
-	ol_info->l4_len       = pkt->l4_len;
-	ol_info->vlan_tci     = pkt->vlan_tci;
-	ol_info->tso_segsz    = pkt->tso_segsz;
+	ol_info->l2_len	   = pkt->l2_len;
+	ol_info->l3_len	   = pkt->l3_len;
+	ol_info->l4_len	   = pkt->l4_len;
+	ol_info->vlan_tci	 = pkt->vlan_tci;
+	ol_info->tso_segsz	= pkt->tso_segsz;
 	ol_info->outer_l2_len = pkt->outer_l2_len;
 	ol_info->outer_l3_len = pkt->outer_l3_len;
 
 	if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
 		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
-		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
-		     & ol_info->data[0])) &&
+			(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+			 & ol_info->data[0])) &&
 		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
-		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
-		     & ol_info->data[1])))) {
+			(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+			 & ol_info->data[1])))) {
 
 		ret = false;
 		goto l_end;
@@ -508,11 +490,11 @@ static inline bool sxe_cache_ctxt_desc_match(
 
 	if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
 		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
-		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
-		     & ol_info->data[0])) &&
+			(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+			 & ol_info->data[0])) &&
 		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
-		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
-		     & ol_info->data[1])))) {
+			(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+			 & ol_info->data[1])))) {
 
 		ret = false;
 		goto l_end;
@@ -545,9 +527,8 @@ static inline void sxe_ctxt_desc_fill(sxe_tx_queue_s *txq,
 
 	mss_l4len_idx |= (ctx_idx << SXE_TXD_IDX_SHIFT);
 
-	if (ol_flags & RTE_MBUF_F_TX_VLAN) {
+	if (ol_flags & RTE_MBUF_F_TX_VLAN)
 		tx_offload_mask.vlan_tci |= ~0;
-	}
 
 	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
 		if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
@@ -614,14 +595,14 @@ static inline void sxe_ctxt_desc_fill(sxe_tx_queue_s *txq,
 		tx_offload_mask.outer_l3_len |= ~0;
 		tx_offload_mask.l2_len |= ~0;
 		seqnum_seed |= tx_offload.outer_l3_len
-			       << SXE_TX_CTXTD_OUTER_IPLEN_SHIFT;
+				   << SXE_TX_CTXTD_OUTER_IPLEN_SHIFT;
 		seqnum_seed |= tx_offload.l2_len
-			       << SXE_TX_CTXTD_TUNNEL_LEN_SHIFT;
+				   << SXE_TX_CTXTD_TUNNEL_LEN_SHIFT;
 		vlan_macip_lens |= (tx_offload.outer_l2_len <<
-				    	SXE_TX_CTXTD_MACLEN_SHIFT);
+					SXE_TX_CTXTD_MACLEN_SHIFT);
 	} else {
 		vlan_macip_lens |= (tx_offload.l2_len <<
-				    	SXE_TX_CTXTD_MACLEN_SHIFT);
+						SXE_TX_CTXTD_MACLEN_SHIFT);
 	}
 
 	txq->ctx_cache[ctx_idx].flags = ol_flags;
@@ -629,31 +610,27 @@ static inline void sxe_ctxt_desc_fill(sxe_tx_queue_s *txq,
 		tx_offload_mask.data[0] & tx_offload.data[0];
 	txq->ctx_cache[ctx_idx].tx_offload.data[1]  =
 		tx_offload_mask.data[1] & tx_offload.data[1];
-	txq->ctx_cache[ctx_idx].tx_offload_mask    = tx_offload_mask;
+	txq->ctx_cache[ctx_idx].tx_offload_mask	= tx_offload_mask;
 
 	ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
 	ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
 	ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
-	ctx_txd->seqnum_seed     = seqnum_seed;
+	ctx_txd->seqnum_seed	 = seqnum_seed;
 
-	return;
 }
 
 static inline u32 sxe_tx_desc_csum_info_setup(u64 ol_flags)
 {
 	u32 desc_csum = 0;
 
-	if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM) {
+	if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM)
 		desc_csum |= SXE_TXD_POPTS_TXSM;
-	}
 
-	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
+	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
 		desc_csum |= SXE_TXD_POPTS_IXSM;
-	}
 
-	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
 		desc_csum |= SXE_TXD_POPTS_TXSM;
-	}
 
 	return desc_csum;
 }
@@ -662,22 +639,18 @@ static inline u32 sxe_tx_desc_cmdtype_setup(u64 ol_flags)
 {
 	u32 cmdtype = 0;
 
-	if (ol_flags & RTE_MBUF_F_TX_VLAN) {
+	if (ol_flags & RTE_MBUF_F_TX_VLAN)
 		cmdtype |= SXE_TX_DESC_VLE;
-	}
 
-	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
 		cmdtype |= SXE_TXD_DCMD_TSE;
-	}
 
-	if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
+	if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
 		cmdtype |= (1 << SXE_TX_OUTERIPCS_SHIFT);
-	}
 
 #ifdef SXE_DPDK_MACSEC
-	if (ol_flags & RTE_MBUF_F_TX_MACSEC) {
+	if (ol_flags & RTE_MBUF_F_TX_MACSEC)
 		cmdtype |= SXE_TXD_MAC_LINKSEC;
-	}
 #endif
 
 	return cmdtype;
@@ -697,9 +670,8 @@ static inline s32 sxe_xmit_cleanup(sxe_tx_queue_s *txq)
 
 	desc_to_clean_to = (u16)(ntc + txq->rs_thresh);
 
-	if (desc_to_clean_to >= ring_depth) {
+	if (desc_to_clean_to >= ring_depth)
 		desc_to_clean_to = (u16)(desc_to_clean_to - ring_depth);
-	}
 
 	desc_to_clean_to = buffer_ring[desc_to_clean_to].last_id;
 
@@ -749,9 +721,8 @@ static inline s32 sxe_tx_pkt_desc_clean(
 			txq->port_id, txq->queue_idx);
 
 	ret = sxe_xmit_cleanup(txq);
-	if (ret) {
+	if (ret)
 		goto l_end;
-	}
 
 	if (unlikely(need_desc_num > txq->rs_thresh)) {
 		LOG_DEBUG(
@@ -769,9 +740,8 @@ static inline s32 sxe_tx_pkt_desc_clean(
 		/* Clean up enought desc */
 		while (need_desc_num > txq->desc_free_num) {
 			ret = sxe_xmit_cleanup(txq);
-			if (ret) {
+			if (ret)
 				goto l_end;
-			}
 		}
 	}
 
@@ -791,20 +761,19 @@ u16 __sxe_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16
 	sxe_tx_queue_s  *txq = tx_queue;
 	u32 pkt_len, cmd_type_len, olinfo_status;
 	u16 need_desc_num, last_desc_idx, xmit_num, ntu, seg_len;
-	volatile sxe_tx_data_desc_u *tail_desc = NULL; 
+	volatile sxe_tx_data_desc_u *tail_desc = NULL;
 	volatile sxe_tx_data_desc_u *desc_ring, *desc;
 	struct sxe_tx_buffer *buffer_ring, *buffer, *next_buffer;
 
 	ol_info.data[SXE_CTXT_DESC_0] = 0;
 	ol_info.data[SXE_CTXT_DESC_1] = 0;
-	ntu         = txq->next_to_use;
+	ntu		 = txq->next_to_use;
 	desc_ring   = txq->desc_ring;
 	buffer_ring = txq->buffer_ring;
-	buffer      = &buffer_ring[ntu];
+	buffer	  = &buffer_ring[ntu];
 
-	if (txq->desc_free_num < txq->free_thresh) {
+	if (txq->desc_free_num < txq->free_thresh)
 		sxe_xmit_cleanup(txq);
-	}
 
 	/* Refresh cache, pre fetch data to cache */
 	rte_sxe_prefetch(&buffer->mbuf->pool);
@@ -815,38 +784,35 @@ u16 __sxe_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16
 		pkt_len = pkt->pkt_len;
 
 		ol_req = pkt->ol_flags & SXE_TX_OFFLOAD_MASK;
-		if (ol_req) {
+		if (ol_req)
 			new_ctx = sxe_cache_ctxt_desc_match(txq, pkt, ol_req, &ol_info);
-		}
 
 		need_desc_num = (u16)(pkt->nb_segs + new_ctx);
 
 		if (tail_desc != NULL &&
-		    need_desc_num + txq->desc_used_num >= txq->rs_thresh) {
+			need_desc_num + txq->desc_used_num >= txq->rs_thresh) {
 			tail_desc->read.cmd_type_len |=
 				rte_cpu_to_le_32(SXE_TX_DESC_RS_MASK);
 		}
 
 		last_desc_idx = (u16) (ntu + need_desc_num - 1);
 
-		if (last_desc_idx >= txq->ring_depth) {
+		if (last_desc_idx >= txq->ring_depth)
 			last_desc_idx = (u16) (last_desc_idx - txq->ring_depth);
-		}
 
 		LOG_DEBUG("port_id=%u queue_id=%u pktlen=%u"
 			   " next_to_ues=%u last_desc_idx=%u",
-			   (unsigned) txq->port_id,
-			   (unsigned) txq->queue_idx,
-			   (unsigned) pkt_len,
-			   (unsigned) ntu,
-			   (unsigned) last_desc_idx);
+			   (unsigned int) txq->port_id,
+			   (unsigned int) txq->queue_idx,
+			   (unsigned int) pkt_len,
+			   (unsigned int) ntu,
+			   (unsigned int) last_desc_idx);
 
 		if (need_desc_num > txq->desc_free_num) {
 			ret = sxe_tx_pkt_desc_clean(txq, need_desc_num);
-			if(ret) {
-				if (0 == xmit_num) {
+			if (ret) {
+				if (xmit_num == 0)
 					goto l_end;
-				}
 
 				goto l_end_of_tx;
 			}
@@ -854,9 +820,8 @@ u16 __sxe_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16
 
 		cmd_type_len = SXE_TX_DESC_TYPE_DATA | SXE_TX_DESC_IFCS;
 #ifdef RTE_LIBRTE_IEEE1588
-		if (pkt->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) {
+		if (pkt->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
 			cmd_type_len |= SXE_TXD_MAC_1588;
-		}
 #endif
 
 		olinfo_status = 0;
@@ -891,10 +856,10 @@ u16 __sxe_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16
 
 			LOG_DEBUG("tx need offload, port_id=%u "
 			"queue_id=%u pktlen=%u, ctxt_id=%u",
-			   (unsigned) txq->port_id,
-			   (unsigned) txq->queue_idx,
-			   (unsigned) pkt_len,
-			   (unsigned) txq->ctx_curr);
+			   (unsigned int) txq->port_id,
+			   (unsigned int) txq->queue_idx,
+			   (unsigned int) pkt_len,
+			   (unsigned int) txq->ctx_curr);
 
 			cmd_type_len  |= sxe_tx_desc_cmdtype_setup(pkt->ol_flags);
 			olinfo_status |= sxe_tx_desc_csum_info_setup(pkt->ol_flags);
@@ -908,11 +873,10 @@ u16 __sxe_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16
 			next_buffer = &buffer_ring[buffer->next_id];
 
 			rte_prefetch0(&next_buffer->mbuf->pool);
-			if (buffer->mbuf != NULL) {
+			if (buffer->mbuf != NULL)
 				rte_pktmbuf_free_seg(buffer->mbuf);
-			}
 
-			buffer->mbuf = m_seg;  
+			buffer->mbuf = m_seg;
 
 			seg_len = m_seg->data_len;
 
@@ -956,8 +920,8 @@ u16 __sxe_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16
 	rte_wmb();
 
 	LOG_DEBUG("port_id=%u queue_idx=%u next_to_use=%u xmit_num=%u",
-		   (unsigned) txq->port_id, (unsigned) txq->queue_idx,
-		   (unsigned) ntu, (unsigned) xmit_num);
+		   (unsigned int) txq->port_id, (unsigned int) txq->queue_idx,
+		   (unsigned int) ntu, (unsigned int) xmit_num);
 
 	rte_write32_wc_relaxed(ntu, txq->tdt_reg_addr);
 
@@ -980,7 +944,7 @@ u32 sxe_tx_done_cleanup_full(sxe_tx_queue_s *txq, u32 free_cnt)
 	u16 nb_tx_to_clean;
 	struct sxe_tx_buffer *buffer_ring = txq->buffer_ring;
 
-	ntu    = txq->next_to_use;
+	ntu	= txq->next_to_use;
 	tx_id  = buffer_ring[ntu].next_id;
 
 	if (txq->desc_free_num == 0 && sxe_xmit_cleanup(txq)) {
@@ -991,12 +955,11 @@ u32 sxe_tx_done_cleanup_full(sxe_tx_queue_s *txq, u32 free_cnt)
 	nb_tx_to_clean  = txq->desc_free_num;
 	nb_tx_free_last = txq->desc_free_num;
 
-	if (!free_cnt) {
+	if (!free_cnt)
 		free_cnt = txq->ring_depth;
-	}
 
 	for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
-		for (i = 0; i < (nb_tx_to_clean && pkt_cnt < free_cnt && \
+		for (i = 0; i < (nb_tx_to_clean && pkt_cnt < free_cnt &&
 			tx_id != ntu); i++) {
 			if (buffer_ring[tx_id].mbuf != NULL) {
 				rte_pktmbuf_free_seg(buffer_ring[tx_id].mbuf);
@@ -1008,15 +971,14 @@ u32 sxe_tx_done_cleanup_full(sxe_tx_queue_s *txq, u32 free_cnt)
 			tx_id = buffer_ring[tx_id].next_id;
 		}
 
-		if (txq->rs_thresh > txq->ring_depth - txq->desc_free_num || \
+		if (txq->rs_thresh > txq->ring_depth - txq->desc_free_num ||
 				tx_id == ntu) {
 			break;
 		}
 
 		if (pkt_cnt < free_cnt) {
-			if (sxe_xmit_cleanup(txq)) {
+			if (sxe_xmit_cleanup(txq))
 				break;
-			}
 
 			nb_tx_to_clean = txq->desc_free_num - nb_tx_free_last;
 			nb_tx_free_last = txq->desc_free_num;
@@ -1031,21 +993,18 @@ int sxe_tx_done_cleanup_simple(sxe_tx_queue_s *txq, u32 free_cnt)
 {
 	int i, n, cnt;
 
-	if (free_cnt == 0 || free_cnt > txq->ring_depth) {
+	if (free_cnt == 0 || free_cnt > txq->ring_depth)
 		free_cnt = txq->ring_depth;
-	}
 
 	cnt = free_cnt - free_cnt % txq->rs_thresh;
 
 	for (i = 0; i < cnt; i += n) {
-		if (txq->ring_depth - txq->desc_free_num < txq->rs_thresh) {
+		if (txq->ring_depth - txq->desc_free_num < txq->rs_thresh)
 			break;
-		}
 
 		n = sxe_tx_bufs_free(txq);
-		if (n == 0) {
+		if (n == 0)
 			break;
-		}
 	}
 
 	return i;
@@ -1056,9 +1015,8 @@ int sxe_tx_done_cleanup(void *tx_queue, u32 free_cnt)
 	s32 ret;
 
 	ret = __sxe_tx_done_cleanup(tx_queue, free_cnt);
-	if (ret) {
+	if (ret)
 		PMD_LOG_ERR(INIT, "tx cleanup fail.(err:%d)", ret);
-	}
 
 	return ret;
 }
diff --git a/drivers/net/sxe/pf/sxe_tx.h b/drivers/net/sxe/pf/sxe_tx.h
index 78249c3340..ec731d8fcc 100644
--- a/drivers/net/sxe/pf/sxe_tx.h
+++ b/drivers/net/sxe/pf/sxe_tx.h
@@ -28,4 +28,4 @@ u32 sxe_tx_done_cleanup_full(sxe_tx_queue_s *txq, u32 free_cnt);
 
 s32 sxe_tx_bufs_free(sxe_tx_queue_s *txq);
 
-#endif 
+#endif
diff --git a/drivers/net/sxe/pf/sxe_vec_common.h b/drivers/net/sxe/pf/sxe_vec_common.h
new file mode 100644
index 0000000000..3be75ad8e5
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_vec_common.h
@@ -0,0 +1,328 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_VEC_COMMON_H__
+#define __SXE_VEC_COMMON_H__
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#include <stdint.h>
+#include <rte_mempool.h>
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#else
+#include <ethdev_driver.h>
+#include <dev_driver.h>
+#include <rte_malloc.h>
+#endif
+#include "sxe.h"
+#include "sxe_rx.h"
+
+#define RTE_SXE_MAX_TX_FREE_BUF_SZ	64
+#define SXE_TXD_STAT_DD				0x00000001
+
+static __rte_always_inline s32
+sxe_tx_bufs_vec_free(struct sxe_tx_queue *txq)
+{
+	struct sxe_tx_buffer_vec *txep;
+	u32 status;
+	u32 n;
+	u32 i;
+	s32 ret;
+	s32 nb_free = 0;
+	struct rte_mbuf *m, *free[RTE_SXE_MAX_TX_FREE_BUF_SZ];
+
+	status = txq->desc_ring[txq->next_dd].wb.status;
+	if (!(status & SXE_TXD_STAT_DD)) {
+		ret = 0;
+		goto out;
+	}
+
+	n = txq->rs_thresh;
+
+	txep = &txq->buffer_ring_vec[txq->next_dd - (n - 1)];
+	m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
+
+	if (likely(m != NULL)) {
+		free[0] = m;
+			nb_free = 1;
+			for (i = 1; i < n; i++) {
+				m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+				if (likely(m != NULL)) {
+					if (likely(m->pool == free[0]->pool)) {
+						free[nb_free++] = m;
+					} else {
+						rte_mempool_put_bulk(free[0]->pool,
+								(void *)free, nb_free);
+						free[0] = m;
+						nb_free = 1;
+					}
+				}
+			}
+			rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+	} else {
+		for (i = 1; i < n; i++) {
+			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+			if (m != NULL)
+				rte_mempool_put(m->pool, m);
+		}
+	}
+
+	txq->desc_free_num = (u16)(txq->desc_free_num + txq->rs_thresh);
+	txq->next_dd = (u16)(txq->next_dd + txq->rs_thresh);
+	if (txq->next_dd >= txq->ring_depth)
+		txq->next_dd = (u16)(txq->rs_thresh - 1);
+
+	ret = txq->rs_thresh;
+out:
+	return ret;
+}
+
+static inline u16
+sxe_packets_reassemble(sxe_rx_queue_s *rxq, struct rte_mbuf **rx_bufs,
+			u16 bufs_num, u8 *split_flags)
+{
+	struct rte_mbuf *pkts[bufs_num];
+	struct rte_mbuf *start = rxq->pkt_first_seg;
+	struct rte_mbuf *end = rxq->pkt_last_seg;
+	u32 pkt_idx, buf_idx;
+
+	for (buf_idx = 0, pkt_idx = 0; buf_idx < bufs_num; buf_idx++) {
+		if (end != NULL) {
+			end->next = rx_bufs[buf_idx];
+			rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+			start->nb_segs++;
+			start->pkt_len += rx_bufs[buf_idx]->data_len;
+			end = end->next;
+
+			if (!split_flags[buf_idx]) {
+				start->hash = end->hash;
+				start->ol_flags = end->ol_flags;
+				start->pkt_len -= rxq->crc_len;
+				if (end->data_len > rxq->crc_len) {
+					end->data_len -= rxq->crc_len;
+				} else {
+					struct rte_mbuf *secondlast = start;
+
+					start->nb_segs--;
+					while (secondlast->next != end)
+						secondlast = secondlast->next;
+
+					secondlast->data_len -= (rxq->crc_len -
+							end->data_len);
+					secondlast->next = NULL;
+					rte_pktmbuf_free_seg(end);
+				}
+				pkts[pkt_idx++] = start;
+				start = end = NULL;
+			}
+		} else {
+			if (!split_flags[buf_idx]) {
+				pkts[pkt_idx++] = rx_bufs[buf_idx];
+				continue;
+			}
+			end = start = rx_bufs[buf_idx];
+			rx_bufs[buf_idx]->data_len += rxq->crc_len;
+			rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+		}
+	}
+
+	rxq->pkt_first_seg = start;
+	rxq->pkt_last_seg = end;
+	memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+
+	return pkt_idx;
+}
+
+static inline void
+sxe_rx_vec_mbufs_release(sxe_rx_queue_s *rxq)
+{
+	u16 i;
+
+	if (rxq->buffer_ring == NULL || rxq->realloc_num >= rxq->ring_depth)
+		return;
+
+	if (rxq->realloc_num == 0) {
+		for (i = 0; i < rxq->ring_depth; i++) {
+			if (rxq->buffer_ring[i].mbuf != NULL)
+				rte_pktmbuf_free_seg(rxq->buffer_ring[i].mbuf);
+		}
+	} else {
+		for (i = rxq->processing_idx;
+			 i != rxq->realloc_start;
+			 i = (i + 1) % rxq->ring_depth) {
+			if (rxq->buffer_ring[i].mbuf != NULL)
+				rte_pktmbuf_free_seg(rxq->buffer_ring[i].mbuf);
+		}
+	}
+
+	rxq->realloc_num = rxq->ring_depth;
+
+	memset(rxq->buffer_ring, 0, sizeof(rxq->buffer_ring[0]) * rxq->ring_depth);
+
+}
+
+static inline s32
+sxe_default_rxq_vec_setup(sxe_rx_queue_s *rxq)
+{
+	uintptr_t p;
+	struct rte_mbuf mbuf = { .buf_addr = 0 };
+
+	mbuf.nb_segs = 1;
+	mbuf.data_off = RTE_PKTMBUF_HEADROOM;
+	mbuf.port = rxq->port_id;
+	rte_mbuf_refcnt_set(&mbuf, 1);
+
+	rte_compiler_barrier();
+	p = (uintptr_t)&mbuf.rearm_data;
+	rxq->mbuf_init_value = *(u64 *)p;
+
+	return 0;
+}
+
+static inline s32
+sxe_default_rx_vec_condition_check(struct rte_eth_dev *dev)
+{
+	s32 ret = 0;
+
+#ifndef RTE_LIBRTE_IEEE1588
+	struct rte_eth_fdir_conf *fnav_conf = SXE_DEV_FNAV_CONF(dev);
+	if (fnav_conf->mode != RTE_FDIR_MODE_NONE)
+		ret = -1;
+#else
+	RTE_SET_USED(dev);
+	ret = -1;
+#endif
+
+	return ret;
+}
+
+static __rte_always_inline void
+sxe_vec_mbuf_fill(struct sxe_tx_buffer_vec *buffer_ring,
+		 struct rte_mbuf **tx_pkts, u16 pkts_num)
+{
+	s32 i;
+
+	for (i = 0; i < pkts_num; ++i)
+		buffer_ring[i].mbuf = tx_pkts[i];
+
+}
+
+static inline void
+sxe_tx_queue_vec_init(sxe_tx_queue_s *txq)
+{
+	u16 i;
+	volatile sxe_tx_data_desc_u *txd;
+	static const sxe_tx_data_desc_u zeroed_desc = { {0} };
+	struct sxe_tx_buffer_vec *tx_buffer = txq->buffer_ring_vec;
+
+	for (i = 0; i < txq->ring_depth; i++)
+		txq->desc_ring[i] = zeroed_desc;
+
+	for (i = 0; i < txq->ring_depth; i++) {
+		txd = &txq->desc_ring[i];
+		txd->wb.status = SXE_TX_DESC_STAT_DD;
+		tx_buffer[i].mbuf = NULL;
+	}
+
+	txq->ctx_curr	  = 0;
+	txq->desc_used_num = 0;
+	txq->desc_free_num = txq->ring_depth - 1;
+	txq->next_to_use   = 0;
+	txq->next_to_clean = txq->ring_depth - 1;
+	txq->next_dd	   = txq->rs_thresh  - 1;
+	txq->next_rs	   = txq->rs_thresh  - 1;
+	memset((void *)&txq->ctx_cache, 0,
+			SXE_CTXT_DESC_NUM * sizeof(struct sxe_ctxt_info));
+
+}
+
+static inline void
+sxe_tx_mbufs_vec_release(sxe_tx_queue_s *txq)
+{
+	u16 i;
+	struct sxe_tx_buffer_vec *tx_buffer;
+	const u16 max_desc = (u16)(txq->ring_depth - 1);
+
+	if (txq->buffer_ring_vec == NULL || txq->desc_free_num == max_desc)
+		return;
+
+	for (i = txq->next_dd - (txq->rs_thresh - 1);
+		 i != txq->next_to_use;
+		 i = (i + 1) % txq->ring_depth) {
+		tx_buffer = &txq->buffer_ring_vec[i];
+		rte_pktmbuf_free_seg(tx_buffer->mbuf);
+	}
+	txq->desc_free_num = max_desc;
+
+	for (i = 0; i < txq->ring_depth; i++) {
+		tx_buffer = &txq->buffer_ring_vec[i];
+		tx_buffer->mbuf = NULL;
+	}
+
+}
+
+static inline void
+sxe_tx_buffer_ring_vec_free(sxe_tx_queue_s *txq)
+{
+	if (txq == NULL)
+		return;
+
+	if (txq->buffer_ring_vec != NULL) {
+		rte_free(txq->buffer_ring_vec - 1);
+		txq->buffer_ring_vec = NULL;
+	}
+
+}
+
+static inline s32
+sxe_default_txq_vec_setup(sxe_tx_queue_s *txq,
+				const struct sxe_txq_ops *txq_ops)
+{
+	s32 ret = 0;
+
+	if (txq->buffer_ring_vec == NULL) {
+		ret = -1;
+		goto l_out;
+	}
+
+	txq->buffer_ring_vec = txq->buffer_ring_vec + 1;
+	txq->ops = txq_ops;
+
+l_out:
+	return ret;
+}
+
+static inline int
+sxe_tx_done_cleanup_vec(sxe_tx_queue_s *txq, u32 free_cnt)
+{
+	UNUSED(txq);
+	UNUSED(free_cnt);
+
+	return -ENOTSUP;
+}
+
+s32 sxe_txq_vec_setup(sxe_tx_queue_s *txq);
+
+s32 sxe_rx_vec_condition_check(struct rte_eth_dev *dev);
+
+s32 sxe_rxq_vec_setup(sxe_rx_queue_s *rxq);
+
+void sxe_rx_queue_vec_mbufs_release(sxe_rx_queue_s *rxq);
+
+u16 sxe_scattered_pkts_vec_recv(void *rx_queue, struct rte_mbuf **rx_pkts, u16 pkts_num);
+
+u16 sxe_pkts_vec_recv(void *rx_queue, struct rte_mbuf **rx_pkts, u16 pkts_num);
+
+u16
+__sxe_pkts_vector_xmit(void *tx_queue, struct rte_mbuf **tx_pkts,
+			   u16 pkts_num);
+
+#endif
+#endif
diff --git a/drivers/net/sxe/pf/sxe_vec_neon.c b/drivers/net/sxe/pf/sxe_vec_neon.c
new file mode 100644
index 0000000000..6f9fdbd659
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_vec_neon.c
@@ -0,0 +1,606 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#include <stdint.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+#include <rte_malloc.h>
+
+#include <rte_vect.h>
+#include "sxe_vec_common.h"
+
+#define RTE_SXE_DESCS_PER_LOOP			4
+#define SXE_PACKET_TYPE_MASK_TUNNEL		0xFF
+#define SXE_PACKET_TYPE_SHIFT			0x04
+#define SXE_RXDADV_ERR_TCPE				0x40000000
+#define SXE_VPMD_DESC_EOP_MASK			0x02020202
+#define SXE_UINT8_BIT					(CHAR_BIT * sizeof(u8))
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+static inline void
+sxe_rxq_rearm(struct sxe_rx_queue *rxq)
+{
+	s32 i;
+	u16 rx_id;
+	volatile union sxe_rx_data_desc *rxdp;
+	struct sxe_rx_buffer *rxep = &rxq->buffer_ring[rxq->realloc_start];
+	struct rte_mbuf *mb0, *mb1;
+	uint64x2_t dma_addr0, dma_addr1;
+	uint64x2_t zero = vdupq_n_u64(0);
+	u64 paddr;
+	uint8x8_t p;
+
+	rxdp = rxq->desc_ring + rxq->realloc_start;
+
+	if (unlikely(rte_mempool_get_bulk(rxq->mb_pool,
+					  (void *)rxep,
+					  RTE_PMD_SXE_MAX_RX_BURST) < 0)) {
+		if (rxq->realloc_num + RTE_PMD_SXE_MAX_RX_BURST >=
+			rxq->ring_depth) {
+			for (i = 0; i < RTE_SXE_DESCS_PER_LOOP; i++) {
+				rxep[i].mbuf = &rxq->fake_mbuf;
+				vst1q_u64((u64 *)&rxdp[i].read,
+					  zero);
+			}
+		}
+		rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+			RTE_PMD_SXE_MAX_RX_BURST;
+		return;
+	}
+
+	p = vld1_u8((u8 *)&rxq->mbuf_init_value);
+
+	for (i = 0; i < RTE_PMD_SXE_MAX_RX_BURST; i += 2, rxep += 2) {
+		mb0 = rxep[0].mbuf;
+		mb1 = rxep[1].mbuf;
+
+		vst1_u8((u8 *)&mb0->rearm_data, p);
+		paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM;
+		dma_addr0 = vsetq_lane_u64(paddr, zero, 0);
+
+		vst1q_u64((u64 *)&rxdp++->read, dma_addr0);
+
+		vst1_u8((u8 *)&mb1->rearm_data, p);
+		paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM;
+		dma_addr1 = vsetq_lane_u64(paddr, zero, 0);
+		vst1q_u64((u64 *)&rxdp++->read, dma_addr1);
+	}
+
+	rxq->realloc_start += RTE_PMD_SXE_MAX_RX_BURST;
+	if (rxq->realloc_start >= rxq->ring_depth)
+		rxq->realloc_start = 0;
+
+	rxq->realloc_num -= RTE_PMD_SXE_MAX_RX_BURST;
+
+	rx_id = (u16)((rxq->realloc_start == 0) ?
+				(rxq->ring_depth - 1) : (rxq->realloc_start - 1));
+
+	sxe_write_addr(rx_id, rxq->rdt_reg_addr);
+
+}
+
+static inline void
+sxe_desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
+		  uint8x16_t staterr, u8 vlan_flags, u16 udp_p_flag,
+		  struct rte_mbuf **rx_pkts)
+{
+	u16 udp_p_flag_hi;
+	uint8x16_t ptype, udp_csum_skip;
+	uint32x4_t temp_udp_csum_skip = {0, 0, 0, 0};
+	uint8x16_t vtag_lo, vtag_hi, vtag;
+	uint8x16_t temp_csum;
+	uint32x4_t csum = {0, 0, 0, 0};
+
+	union {
+		u16 e[4];
+		u64 word;
+	} vol;
+
+	const uint8x16_t rsstype_msk = {
+			0x0F, 0x0F, 0x0F, 0x0F,
+			0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00};
+
+	const uint8x16_t rss_flags = {
+			0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
+			0, RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH,
+			RTE_MBUF_F_RX_RSS_HASH, 0, 0, 0,
+			0, 0, 0, RTE_MBUF_F_RX_FDIR};
+
+	const uint8x16_t vlan_csum_msk = {
+			SXE_RXD_STAT_VP, SXE_RXD_STAT_VP,
+			SXE_RXD_STAT_VP, SXE_RXD_STAT_VP,
+			0, 0, 0, 0,
+			0, 0, 0, 0,
+			(SXE_RXDADV_ERR_TCPE | SXE_RXDADV_ERR_IPE) >> 24,
+			(SXE_RXDADV_ERR_TCPE | SXE_RXDADV_ERR_IPE) >> 24,
+			(SXE_RXDADV_ERR_TCPE | SXE_RXDADV_ERR_IPE) >> 24,
+			(SXE_RXDADV_ERR_TCPE | SXE_RXDADV_ERR_IPE) >> 24};
+
+	const uint8x16_t vlan_csum_map_lo = {
+			RTE_MBUF_F_RX_IP_CKSUM_GOOD,
+			RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+			RTE_MBUF_F_RX_IP_CKSUM_BAD,
+			RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+			0, 0, 0, 0,
+			vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_GOOD,
+			vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+			vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_BAD,
+			vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+			0, 0, 0, 0};
+
+	const uint8x16_t vlan_csum_map_hi = {
+			RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(u8), 0,
+			RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(u8), 0,
+			0, 0, 0, 0,
+			RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(u8), 0,
+			RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(u8), 0,
+			0, 0, 0, 0};
+
+	udp_p_flag_hi = udp_p_flag >> 8;
+
+	const uint8x16_t udp_hdr_p_msk = {
+			0, 0, 0, 0,
+			udp_p_flag_hi, udp_p_flag_hi, udp_p_flag_hi, udp_p_flag_hi,
+			0, 0, 0, 0,
+			0, 0, 0, 0};
+
+	const uint8x16_t udp_csum_bad_shuf = {
+			0xFF, ~(u8)RTE_MBUF_F_RX_L4_CKSUM_BAD, 0, 0,
+			0, 0, 0, 0,
+			0, 0, 0, 0,
+			0, 0, 0, 0};
+
+	ptype = vzipq_u8(sterr_tmp1.val[0], sterr_tmp2.val[0]).val[0];
+
+	udp_csum_skip = vandq_u8(ptype, udp_hdr_p_msk);
+
+	temp_udp_csum_skip = vcopyq_laneq_u32(temp_udp_csum_skip, 0,
+				vreinterpretq_u32_u8(udp_csum_skip), 1);
+
+	ptype = vandq_u8(ptype, rsstype_msk);
+	ptype = vqtbl1q_u8(rss_flags, ptype);
+
+	vtag = vandq_u8(staterr, vlan_csum_msk);
+
+	temp_csum = vshrq_n_u8(vtag, 6);
+
+	csum = vsetq_lane_u32(vgetq_lane_u32(vreinterpretq_u32_u8(temp_csum), 3), csum, 0);
+	vtag = vorrq_u8(vreinterpretq_u8_u32(csum), vtag);
+
+	vtag_hi = vqtbl1q_u8(vlan_csum_map_hi, vtag);
+	vtag_hi = vshrq_n_u8(vtag_hi, 7);
+
+	vtag_lo = vqtbl1q_u8(vlan_csum_map_lo, vtag);
+	vtag_lo = vorrq_u8(ptype, vtag_lo);
+
+	udp_csum_skip = vshrq_n_u8(vreinterpretq_u8_u32(temp_udp_csum_skip), 1);
+	udp_csum_skip = vqtbl1q_u8(udp_csum_bad_shuf, udp_csum_skip);
+	vtag_lo = vandq_u8(vtag_lo, udp_csum_skip);
+
+	vtag = vzipq_u8(vtag_lo, vtag_hi).val[0];
+	vol.word = vgetq_lane_u64(vreinterpretq_u64_u8(vtag), 0);
+
+	rx_pkts[0]->ol_flags = vol.e[0];
+	rx_pkts[1]->ol_flags = vol.e[1];
+	rx_pkts[2]->ol_flags = vol.e[2];
+	rx_pkts[3]->ol_flags = vol.e[3];
+}
+
+static inline u32
+sxe_get_packet_type(u32 pkt_info,
+		u32 etqf_check,
+		u32 tunnel_check)
+{
+	u32 rte;
+
+	if (etqf_check) {
+		rte = RTE_PTYPE_UNKNOWN;
+		goto out;
+	}
+
+	if (tunnel_check) {
+		pkt_info &= SXE_PACKET_TYPE_MASK_TUNNEL;
+		rte = sxe_ptype_table_tn[pkt_info];
+		goto out;
+	}
+
+	pkt_info &= SXE_PACKET_TYPE_MASK;
+	rte = sxe_ptype_table[pkt_info];
+
+out:
+	return rte;
+}
+
+static inline void
+sxe_desc_to_ptype_v(uint64x2_t descs[4], u16 pkt_type_mask,
+		struct rte_mbuf **rx_pkts)
+{
+	uint32x4_t etqf_check, tunnel_check;
+	uint32x4_t etqf_mask = vdupq_n_u32(0x8000);
+	uint32x4_t tunnel_mask = vdupq_n_u32(0x10000);
+	uint32x4_t ptype_mask = vdupq_n_u32((u32)pkt_type_mask);
+	uint32x4_t ptype0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
+				vreinterpretq_u32_u64(descs[2])).val[0];
+	uint32x4_t ptype1 = vzipq_u32(vreinterpretq_u32_u64(descs[1]),
+				vreinterpretq_u32_u64(descs[3])).val[0];
+
+	ptype0 = vzipq_u32(ptype0, ptype1).val[0];
+
+	etqf_check = vandq_u32(ptype0, etqf_mask);
+	tunnel_check = vandq_u32(ptype0, tunnel_mask);
+
+	ptype0 = vandq_u32(vshrq_n_u32(ptype0, SXE_PACKET_TYPE_SHIFT),
+			ptype_mask);
+
+	rx_pkts[0]->packet_type =
+		sxe_get_packet_type(vgetq_lane_u32(ptype0, 0),
+				vgetq_lane_u32(etqf_check, 0),
+				vgetq_lane_u32(tunnel_check, 0));
+	rx_pkts[1]->packet_type =
+		sxe_get_packet_type(vgetq_lane_u32(ptype0, 1),
+				vgetq_lane_u32(etqf_check, 1),
+				vgetq_lane_u32(tunnel_check, 1));
+	rx_pkts[2]->packet_type =
+		sxe_get_packet_type(vgetq_lane_u32(ptype0, 2),
+				vgetq_lane_u32(etqf_check, 2),
+				vgetq_lane_u32(tunnel_check, 2));
+	rx_pkts[3]->packet_type =
+		sxe_get_packet_type(vgetq_lane_u32(ptype0, 3),
+				vgetq_lane_u32(etqf_check, 3),
+				vgetq_lane_u32(tunnel_check, 3));
+}
+
+static inline u16
+sxe_recv_raw_pkts_vec(struct sxe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+		   u16 nb_pkts, u8 *split_packet)
+{
+	volatile union sxe_rx_data_desc *rxdp;
+	struct sxe_rx_buffer *sw_ring;
+	u16 nb_pkts_recd;
+	s32 pos;
+	u16 rte;
+	uint8x16_t shuf_msk = {
+		0xFF, 0xFF,
+		0xFF, 0xFF,
+		12, 13,
+		0xFF, 0xFF,
+		12, 13,
+		14, 15,
+		4, 5, 6, 7
+		};
+	uint16x8_t crc_adjust = {0, 0, rxq->crc_len, 0,
+				 rxq->crc_len, 0, 0, 0};
+	u8 vlan_flags;
+	u16 udp_p_flag = 0;
+
+	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_SXE_DESCS_PER_LOOP);
+
+	rxdp = rxq->desc_ring + rxq->processing_idx;
+
+	rte_prefetch_non_temporal(rxdp);
+
+	if (rxq->realloc_num > RTE_PMD_SXE_MAX_RX_BURST)
+		sxe_rxq_rearm(rxq);
+
+	if (!(rxdp->wb.upper.status_error &
+				rte_cpu_to_le_32(SXE_RXDADV_STAT_DD))) {
+		rte = 0;
+		goto out;
+	}
+
+	udp_p_flag = SXE_RXDADV_PKTTYPE_UDP;
+
+	sw_ring = &rxq->buffer_ring[rxq->processing_idx];
+
+	RTE_BUILD_BUG_ON((RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED) > UINT8_MAX);
+	vlan_flags = rxq->vlan_flags & UINT8_MAX;
+
+	for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+			pos += RTE_SXE_DESCS_PER_LOOP,
+			rxdp += RTE_SXE_DESCS_PER_LOOP) {
+		uint64x2_t descs[RTE_SXE_DESCS_PER_LOOP];
+		uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+		uint8x16x2_t sterr_tmp1, sterr_tmp2;
+		uint64x2_t mbp1, mbp2;
+		uint8x16_t staterr;
+		uint16x8_t tmp;
+		u32 stat;
+
+		mbp1 = vld1q_u64((u64 *)&sw_ring[pos]);
+
+		vst1q_u64((u64 *)&rx_pkts[pos], mbp1);
+
+		mbp2 = vld1q_u64((u64 *)&sw_ring[pos + 2]);
+
+		descs[0] =  vld1q_u64((u64 *)(rxdp));
+		descs[1] =  vld1q_u64((u64 *)(rxdp + 1));
+		descs[2] =  vld1q_u64((u64 *)(rxdp + 2));
+		descs[3] =  vld1q_u64((u64 *)(rxdp + 3));
+
+		vst1q_u64((u64 *)&rx_pkts[pos + 2], mbp2);
+
+		if (split_packet) {
+			rte_mbuf_prefetch_part2(rx_pkts[pos]);
+			rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+			rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+			rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+		}
+
+		pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk);
+		pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk);
+
+		pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
+		pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
+
+		sterr_tmp2 = vzipq_u8(vreinterpretq_u8_u64(descs[1]),
+					  vreinterpretq_u8_u64(descs[3]));
+		sterr_tmp1 = vzipq_u8(vreinterpretq_u8_u64(descs[0]),
+					  vreinterpretq_u8_u64(descs[2]));
+
+		staterr = vzipq_u8(sterr_tmp1.val[1], sterr_tmp2.val[1]).val[0];
+
+		sxe_desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, vlan_flags,
+				  udp_p_flag, &rx_pkts[pos]);
+
+		tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
+		pkt_mb4 = vreinterpretq_u8_u16(tmp);
+		tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
+		pkt_mb3 = vreinterpretq_u8_u16(tmp);
+
+		vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
+			 pkt_mb4);
+		vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
+			 pkt_mb3);
+
+		tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
+		pkt_mb2 = vreinterpretq_u8_u16(tmp);
+		tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
+		pkt_mb1 = vreinterpretq_u8_u16(tmp);
+
+		if (split_packet) {
+			stat = vgetq_lane_u32(vreinterpretq_u32_u8(staterr), 0);
+			*(s32 *)split_packet = ~stat & SXE_VPMD_DESC_EOP_MASK;
+
+			split_packet += RTE_SXE_DESCS_PER_LOOP;
+		}
+
+		staterr = vshlq_n_u8(staterr, SXE_UINT8_BIT - 1);
+		staterr = vreinterpretq_u8_s8
+				(vshrq_n_s8(vreinterpretq_s8_u8(staterr),
+					SXE_UINT8_BIT - 1));
+		stat = ~vgetq_lane_u32(vreinterpretq_u32_u8(staterr), 0);
+
+		rte_prefetch_non_temporal(rxdp + RTE_SXE_DESCS_PER_LOOP);
+
+		vst1q_u8((u8 *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
+			 pkt_mb2);
+		vst1q_u8((u8 *)&rx_pkts[pos]->rx_descriptor_fields1,
+			 pkt_mb1);
+
+		sxe_desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]);
+
+		if (unlikely(stat == 0)) {
+			nb_pkts_recd += RTE_SXE_DESCS_PER_LOOP;
+		} else {
+			nb_pkts_recd += __builtin_ctz(stat) / SXE_UINT8_BIT;
+			break;
+		}
+	}
+
+	rxq->processing_idx = (u16)(rxq->processing_idx + nb_pkts_recd);
+	rxq->processing_idx = (u16)(rxq->processing_idx & (rxq->ring_depth - 1));
+	rxq->realloc_num = (u16)(rxq->realloc_num + nb_pkts_recd);
+
+	rte = nb_pkts_recd;
+
+out:
+	return rte;
+}
+
+u16
+sxe_pkts_vec_recv(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts)
+{
+	return sxe_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+static u16
+sxe_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+				   u16 nb_pkts)
+{
+	u32 i = 0;
+	struct sxe_rx_queue *rxq = rx_queue;
+	u8 split_flags[RTE_PMD_SXE_MAX_RX_BURST] = {0};
+
+	u16 nb_bufs = sxe_recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+			split_flags);
+	if (nb_bufs == 0)
+		goto l_out;
+
+	const u64 *split_fl64 = (u64 *)split_flags;
+	if (rxq->pkt_first_seg == NULL &&
+			split_fl64[0] == 0 && split_fl64[1] == 0 &&
+			split_fl64[2] == 0 && split_fl64[3] == 0)
+		goto l_out;
+
+	if (rxq->pkt_first_seg == NULL) {
+		while (i < nb_bufs && !split_flags[i])
+			i++;
+		if (i == nb_bufs)
+			goto l_out;
+		rxq->pkt_first_seg = rx_pkts[i];
+	}
+
+	nb_bufs = i + sxe_packets_reassemble(rxq, &rx_pkts[i], nb_bufs - i,
+		&split_flags[i]);
+
+l_out:
+	return nb_bufs;
+}
+
+u16
+sxe_scattered_pkts_vec_recv(void *rx_queue, struct rte_mbuf **rx_pkts,
+				  u16 nb_pkts)
+{
+	u16 retval = 0;
+
+	while (nb_pkts > RTE_PMD_SXE_MAX_RX_BURST) {
+		u16 burst;
+
+		burst = sxe_recv_scattered_burst_vec(rx_queue,
+							   rx_pkts + retval,
+							   RTE_PMD_SXE_MAX_RX_BURST);
+		retval += burst;
+		nb_pkts -= burst;
+		if (burst < RTE_PMD_SXE_MAX_RX_BURST)
+			goto l_out;
+	}
+
+	retval += sxe_recv_scattered_burst_vec(rx_queue,
+						rx_pkts + retval,
+						nb_pkts);
+l_out:
+	return retval;
+}
+
+static inline void
+sxe_single_vec_desc_fill(volatile union sxe_tx_data_desc *txdp,
+		struct rte_mbuf *pkt, u64 flags)
+{
+	uint64x2_t descriptor = {
+			pkt->buf_iova + pkt->data_off,
+			(u64)pkt->pkt_len << 46 | flags | pkt->data_len};
+
+	vst1q_u64((u64 *)&txdp->read, descriptor);
+}
+
+static inline void
+sxe_vec_desc_fill(volatile union sxe_tx_data_desc *txdp,
+		struct rte_mbuf **pkt, u16 nb_pkts,  u64 flags)
+{
+	s32 i;
+
+	for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+		sxe_single_vec_desc_fill(txdp, *pkt, flags);
+}
+
+u16 __sxe_pkts_vector_xmit(void *tx_queue, struct rte_mbuf **tx_pkts,
+			   u16 nb_pkts)
+{
+	struct sxe_tx_queue *txq = (struct sxe_tx_queue *)tx_queue;
+	volatile union sxe_tx_data_desc *txdp;
+	struct sxe_tx_buffer_vec *txep;
+	u16 n, nb_commit, tx_id;
+	u64 flags = SXE_TX_DESC_FLAGS;
+	u64 rs = SXE_TX_DESC_RS_MASK | SXE_TX_DESC_FLAGS;
+	s32 i;
+
+	nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh);
+
+	if (txq->desc_free_num < txq->free_thresh)
+		sxe_tx_bufs_vec_free(txq);
+
+	nb_commit = nb_pkts = (u16)RTE_MIN(txq->desc_free_num, nb_pkts);
+	if (unlikely(nb_pkts == 0))
+		goto l_out;
+
+	tx_id = txq->next_to_use;
+	txdp = &txq->desc_ring[tx_id];
+	txep = &txq->buffer_ring_vec[tx_id];
+
+	txq->desc_free_num = (u16)(txq->desc_free_num - nb_pkts);
+
+	n = (u16)(txq->ring_depth - tx_id);
+	if (nb_commit >= n) {
+		sxe_vec_mbuf_fill(txep, tx_pkts, n);
+
+		for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+			sxe_single_vec_desc_fill(txdp, *tx_pkts, flags);
+
+		sxe_single_vec_desc_fill(txdp, *tx_pkts++, rs);
+
+		nb_commit = (u16)(nb_commit - n);
+
+		tx_id = 0;
+		txq->next_rs = (u16)(txq->rs_thresh - 1);
+
+		txdp = &txq->desc_ring[tx_id];
+		txep = &txq->buffer_ring_vec[tx_id];
+	}
+
+	sxe_vec_mbuf_fill(txep, tx_pkts, nb_commit);
+	sxe_vec_desc_fill(txdp, tx_pkts, nb_commit, flags);
+
+	tx_id = (u16)(tx_id + nb_commit);
+	if (tx_id > txq->next_rs) {
+		txq->desc_ring[txq->next_rs].read.cmd_type_len |=
+			rte_cpu_to_le_32(SXE_TX_DESC_RS_MASK);
+		txq->next_rs = (u16)(txq->next_rs +
+			txq->rs_thresh);
+	}
+
+	txq->next_to_use = tx_id;
+
+	sxe_write_addr(txq->next_to_use, txq->tdt_reg_addr);
+
+l_out:
+	return nb_pkts;
+}
+
+static void __rte_cold
+sxe_tx_queue_release_mbufs_vec(struct sxe_tx_queue *txq)
+{
+	sxe_tx_mbufs_vec_release(txq);
+}
+
+void __rte_cold
+sxe_rx_queue_vec_mbufs_release(struct sxe_rx_queue *rxq)
+{
+	sxe_rx_vec_mbufs_release(rxq);
+}
+
+static void __rte_cold
+sxe_tx_free_swring(struct sxe_tx_queue *txq)
+{
+	sxe_tx_buffer_ring_vec_free(txq);
+}
+
+static void __rte_cold
+sxe_reset_tx_queue(struct sxe_tx_queue *txq)
+{
+	sxe_tx_queue_vec_init(txq);
+}
+
+static const struct sxe_txq_ops vec_txq_ops = {
+	.init = sxe_reset_tx_queue,
+	.mbufs_release = sxe_tx_queue_release_mbufs_vec,
+	.buffer_ring_free = sxe_tx_free_swring,
+};
+
+s32 __rte_cold
+sxe_rxq_vec_setup(struct sxe_rx_queue *rxq)
+{
+	return sxe_default_rxq_vec_setup(rxq);
+}
+
+s32 __rte_cold
+sxe_txq_vec_setup(struct sxe_tx_queue *txq)
+{
+	return sxe_default_txq_vec_setup(txq, &vec_txq_ops);
+}
+
+s32 __rte_cold
+sxe_rx_vec_condition_check(struct rte_eth_dev *dev)
+{
+	return sxe_default_rx_vec_condition_check(dev);
+}
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_vec_sse.c b/drivers/net/sxe/pf/sxe_vec_sse.c
new file mode 100644
index 0000000000..1c2c319b92
--- /dev/null
+++ b/drivers/net/sxe/pf/sxe_vec_sse.c
@@ -0,0 +1,634 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#include <stdint.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+#include <rte_malloc.h>
+#include <tmmintrin.h>
+
+#include "sxe_vec_common.h"
+#include "sxe_compat_version.h"
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+#define SXE_MAX_TX_FREE_BUF_SZ 64
+
+static inline void
+sxe_rxq_realloc(sxe_rx_queue_s *rx_queue)
+{
+	s32 i;
+	u16 rx_index;
+	volatile union sxe_rx_data_desc *desc_ring;
+	sxe_rx_buffer_s *buf_ring =
+			&rx_queue->buffer_ring[rx_queue->realloc_start];
+	struct rte_mbuf *mbuf_0, *mbuf_1;
+	__m128i head_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
+			RTE_PKTMBUF_HEADROOM);
+	__m128i dma_addr0, dma_addr1;
+
+	const __m128i addr_mask = _mm_set_epi64x(0, UINT64_MAX);
+
+	desc_ring = rx_queue->desc_ring + rx_queue->realloc_start;
+
+	if (rte_mempool_get_bulk(rx_queue->mb_pool,
+				 (void *)buf_ring,
+				 RTE_PMD_SXE_MAX_RX_BURST) < 0) {
+		if (rx_queue->realloc_num + RTE_PMD_SXE_MAX_RX_BURST >=
+			rx_queue->ring_depth) {
+			dma_addr0 = _mm_setzero_si128();
+			for (i = 0; i < SXE_DESCS_PER_LOOP; i++) {
+				buf_ring[i].mbuf = &rx_queue->fake_mbuf;
+				_mm_store_si128((__m128i *)&desc_ring[i].read,
+						dma_addr0);
+			}
+		}
+		rte_eth_devices[rx_queue->port_id].data->rx_mbuf_alloc_failed +=
+			RTE_PMD_SXE_MAX_RX_BURST;
+		return;
+	}
+
+	for (i = 0; i < RTE_PMD_SXE_MAX_RX_BURST; i += 2, buf_ring += 2) {
+		__m128i vaddr0, vaddr1;
+
+		mbuf_0 = buf_ring[0].mbuf;
+		mbuf_1 = buf_ring[1].mbuf;
+
+		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
+				offsetof(struct rte_mbuf, buf_addr) + 8);
+
+		vaddr0 = _mm_loadu_si128((__m128i *)&(mbuf_0->buf_addr));
+		vaddr1 = _mm_loadu_si128((__m128i *)&(mbuf_1->buf_addr));
+
+		dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+		dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+		dma_addr0 = _mm_add_epi64(dma_addr0, head_room);
+		dma_addr1 = _mm_add_epi64(dma_addr1, head_room);
+
+		dma_addr0 = _mm_and_si128(dma_addr0, addr_mask);
+		dma_addr1 = _mm_and_si128(dma_addr1, addr_mask);
+
+		_mm_store_si128((__m128i *)&desc_ring++->read, dma_addr0);
+		_mm_store_si128((__m128i *)&desc_ring++->read, dma_addr1);
+	}
+
+	rx_queue->realloc_start += RTE_PMD_SXE_MAX_RX_BURST;
+	if (rx_queue->realloc_start >= rx_queue->ring_depth)
+		rx_queue->realloc_start = 0;
+
+	rx_queue->realloc_num -= RTE_PMD_SXE_MAX_RX_BURST;
+
+	rx_index = (u16) ((rx_queue->realloc_start == 0) ?
+			(rx_queue->ring_depth - 1) : (rx_queue->realloc_start - 1));
+
+	SXE_PCI_REG_WC_WRITE_RELAXED(rx_queue->rdt_reg_addr, rx_index);
+
+}
+
+static inline void
+sxe_desc_to_olflags(__m128i descs[4], __m128i mbuf_init, u8 vlan_flags,
+			u16 udp_p_flag, struct rte_mbuf **rx_pkts)
+{
+	__m128i ptype0, ptype1, vtype0, vtype1, csum, udp_csum_skip;
+	__m128i rearm0, rearm1, rearm2, rearm3;
+
+	const __m128i rsstype_mask = _mm_set_epi16(
+			0x0000, 0x0000, 0x0000, 0x0000,
+			0x000F, 0x000F, 0x000F, 0x000F);
+
+	const __m128i ol_flags_mask = _mm_set_epi16(
+			0x0000, 0x0000, 0x0000, 0x0000,
+			0x00FF, 0x00FF, 0x00FF, 0x00FF);
+
+	const __m128i rss_flags = _mm_set_epi8(RTE_MBUF_F_RX_FDIR, 0, 0, 0,
+			0, 0, 0, RTE_MBUF_F_RX_RSS_HASH,
+			RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH, 0,
+			RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, 0);
+
+	const __m128i vlan_csum_mask = _mm_set_epi16(
+		(SXE_RXDADV_ERR_L4E | SXE_RXDADV_ERR_IPE) >> 16,
+		(SXE_RXDADV_ERR_L4E | SXE_RXDADV_ERR_IPE) >> 16,
+		(SXE_RXDADV_ERR_L4E | SXE_RXDADV_ERR_IPE) >> 16,
+		(SXE_RXDADV_ERR_L4E | SXE_RXDADV_ERR_IPE) >> 16,
+		SXE_RXD_STAT_VP, SXE_RXD_STAT_VP,
+		SXE_RXD_STAT_VP, SXE_RXD_STAT_VP);
+
+	const __m128i vlan_csum_map_low = _mm_set_epi8(
+		0, 0, 0, 0,
+		vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+		vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_BAD,
+		vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+		vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_GOOD,
+		0, 0, 0, 0,
+		RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+		RTE_MBUF_F_RX_IP_CKSUM_BAD,
+		RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+		RTE_MBUF_F_RX_IP_CKSUM_GOOD);
+
+	const __m128i vlan_csum_map_high = _mm_set_epi8(
+		0, 0, 0, 0,
+		0, RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(u8), 0,
+		RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(u8),
+		0, 0, 0, 0,
+		0, RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(u8), 0,
+		RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(u8));
+
+	const __m128i udp_hdr_p_msk = _mm_set_epi16
+		(0, 0, 0, 0,
+		 udp_p_flag, udp_p_flag, udp_p_flag, udp_p_flag);
+
+	const __m128i udp_csum_bad_shuf = _mm_set_epi8
+		(0, 0, 0, 0, 0, 0, 0, 0,
+		 0, 0, 0, 0, 0, 0, ~(u8)RTE_MBUF_F_RX_L4_CKSUM_BAD, 0xFF);
+
+	ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
+	ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
+
+	vtype0 = _mm_unpackhi_epi16(descs[0], descs[1]);
+	vtype1 = _mm_unpackhi_epi16(descs[2], descs[3]);
+
+	ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
+
+	udp_csum_skip = _mm_and_si128(ptype0, udp_hdr_p_msk);
+
+	ptype0 = _mm_and_si128(ptype0, rsstype_mask);
+
+	ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
+
+	vtype1 = _mm_unpacklo_epi32(vtype0, vtype1);
+	vtype1 = _mm_and_si128(vtype1, vlan_csum_mask);
+
+	csum = _mm_srli_epi16(vtype1, 14);
+
+	csum = _mm_srli_si128(csum, 8);
+	vtype1 = _mm_or_si128(csum, vtype1);
+
+	vtype0 = _mm_shuffle_epi8(vlan_csum_map_high, vtype1);
+	vtype0 = _mm_slli_epi16(vtype0, sizeof(u8));
+
+	vtype1 = _mm_shuffle_epi8(vlan_csum_map_low, vtype1);
+	vtype1 = _mm_and_si128(vtype1, ol_flags_mask);
+	vtype1 = _mm_or_si128(vtype0, vtype1);
+
+	vtype1 = _mm_or_si128(ptype0, vtype1);
+
+	udp_csum_skip = _mm_srli_epi16(udp_csum_skip, 9);
+	udp_csum_skip = _mm_shuffle_epi8(udp_csum_bad_shuf, udp_csum_skip);
+	vtype1 = _mm_and_si128(vtype1, udp_csum_skip);
+
+	rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtype1, 8), 0x10);
+	rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtype1, 6), 0x10);
+	rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtype1, 4), 0x10);
+	rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtype1, 2), 0x10);
+
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+			offsetof(struct rte_mbuf, rearm_data) + 8);
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+			RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
+
+	_mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
+	_mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
+	_mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
+	_mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
+
+}
+
+static inline u32 sxe_packet_type_get(int index,
+					u32 pkt_info,
+					u32 etqf_check)
+{
+	if (etqf_check & (0x02 << (index * SXE_DESCS_PER_LOOP)))
+		return RTE_PTYPE_UNKNOWN;
+
+	pkt_info &= SXE_PACKET_TYPE_MASK;
+	return sxe_ptype_table[pkt_info];
+}
+
+static inline void
+sxe_desc_to_ptype_vec(__m128i descs[4], u16 pkt_type_mask,
+		struct rte_mbuf **rx_pkts)
+{
+	__m128i etqf_mask = _mm_set_epi64x(0x800000008000LL, 0x800000008000LL);
+	__m128i ptype_mask = _mm_set_epi32(
+		pkt_type_mask, pkt_type_mask, pkt_type_mask, pkt_type_mask);
+
+	u32 etqf_check, pkt_info;
+
+	__m128i ptype0 = _mm_unpacklo_epi32(descs[0], descs[2]);
+	__m128i ptype1 = _mm_unpacklo_epi32(descs[1], descs[3]);
+
+	ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
+
+	etqf_check = _mm_movemask_epi8(_mm_and_si128(ptype0, etqf_mask));
+
+	ptype0 = _mm_and_si128(_mm_srli_epi32(ptype0, SXE_RXDADV_PKTTYPE_ETQF_SHIFT),
+				   ptype_mask);
+
+
+	pkt_info = _mm_extract_epi32(ptype0, 0);
+	rx_pkts[0]->packet_type =
+		sxe_packet_type_get(0, pkt_info, etqf_check);
+	pkt_info = _mm_extract_epi32(ptype0, 1);
+	rx_pkts[1]->packet_type =
+		sxe_packet_type_get(1, pkt_info, etqf_check);
+	pkt_info = _mm_extract_epi32(ptype0, 2);
+	rx_pkts[2]->packet_type =
+		sxe_packet_type_get(2, pkt_info, etqf_check);
+	pkt_info = _mm_extract_epi32(ptype0, 3);
+	rx_pkts[3]->packet_type =
+		sxe_packet_type_get(3, pkt_info, etqf_check);
+
+}
+
+static inline u16
+sxe_raw_pkts_vec_recv(sxe_rx_queue_s *rx_queue, struct rte_mbuf **rx_pkts,
+		u16 pkts_num, u8 *split_packet)
+{
+	volatile union sxe_rx_data_desc *desc_ring;
+	sxe_rx_buffer_s *buffer_ring;
+	u16 pkts_recd_num;
+	s32 pos;
+	u64 var;
+	__m128i shuf_msk;
+	__m128i crc_adjust = _mm_set_epi16(
+				0, 0, 0,
+				-rx_queue->crc_len,
+				0,
+				-rx_queue->crc_len,
+				0, 0
+			);
+
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+	__m128i dd_check, eop_check;
+	__m128i mbuf_init;
+	u8 vlan_flags;
+	u16 udp_p_flag = 0;
+
+	pkts_num = RTE_MIN(pkts_num, RTE_PMD_SXE_MAX_RX_BURST);
+
+	pkts_num = RTE_ALIGN_FLOOR(pkts_num, SXE_DESCS_PER_LOOP);
+
+	desc_ring = rx_queue->desc_ring + rx_queue->processing_idx;
+
+	rte_prefetch0(desc_ring);
+
+	if (rx_queue->realloc_num > RTE_PMD_SXE_MAX_RX_BURST)
+		sxe_rxq_realloc(rx_queue);
+
+	if (!(desc_ring->wb.upper.status_error &
+				rte_cpu_to_le_32(SXE_RXDADV_STAT_DD))) {
+		pkts_recd_num = 0;
+		goto l_out;
+	}
+
+	udp_p_flag = SXE_RXDADV_PKTTYPE_UDP;
+
+	dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+	eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+	shuf_msk = _mm_set_epi8(
+		7, 6, 5, 4,
+		15, 14,
+		13, 12,
+		0xFF, 0xFF,
+		13, 12,
+		0xFF, 0xFF,
+		0xFF, 0xFF
+		);
+
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+	mbuf_init = _mm_set_epi64x(0, rx_queue->mbuf_init_value);
+
+	buffer_ring = &rx_queue->buffer_ring[rx_queue->processing_idx];
+
+	RTE_BUILD_BUG_ON((RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED) > UINT8_MAX);
+	vlan_flags = rx_queue->vlan_flags & UINT8_MAX;
+
+	for (pos = 0, pkts_recd_num = 0; pos < pkts_num;
+			pos += SXE_DESCS_PER_LOOP,
+			desc_ring += SXE_DESCS_PER_LOOP) {
+		__m128i descs[SXE_DESCS_PER_LOOP];
+		__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+		__m128i zero, staterr, state_err1, state_err2;
+		__m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+		__m128i mbp2;
+#endif
+
+		mbp1 = _mm_loadu_si128((__m128i *)&buffer_ring[pos]);
+
+		descs[3] = _mm_loadu_si128((__m128i *)(desc_ring + 3));
+		rte_compiler_barrier();
+
+		_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+#if defined(RTE_ARCH_X86_64)
+		mbp2 = _mm_loadu_si128((__m128i *)&buffer_ring[pos+2]);
+#endif
+
+		descs[2] = _mm_loadu_si128((__m128i *)(desc_ring + 2));
+		rte_compiler_barrier();
+		descs[1] = _mm_loadu_si128((__m128i *)(desc_ring + 1));
+		rte_compiler_barrier();
+		descs[0] = _mm_loadu_si128((__m128i *)(desc_ring));
+
+#if defined(RTE_ARCH_X86_64)
+		_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
+
+		if (split_packet) {
+			rte_mbuf_prefetch_part2(rx_pkts[pos]);
+			rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+			rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+			rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+		}
+
+		rte_compiler_barrier();
+
+		pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
+		pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
+		pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
+		pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
+
+		state_err2 = _mm_unpackhi_epi32(descs[3], descs[2]);
+		state_err1 = _mm_unpackhi_epi32(descs[1], descs[0]);
+
+		sxe_desc_to_olflags(descs, mbuf_init, vlan_flags, udp_p_flag,
+					&rx_pkts[pos]);
+
+		pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
+		pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+
+		zero = _mm_xor_si128(dd_check, dd_check);
+
+		staterr = _mm_unpacklo_epi32(state_err1, state_err2);
+
+		_mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
+				pkt_mb4);
+		_mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
+				pkt_mb3);
+
+		pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
+		pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+
+		if (split_packet) {
+			__m128i eop_shuf_mask = _mm_set_epi8(
+				0xFF, 0xFF, 0xFF, 0xFF,
+				0xFF, 0xFF, 0xFF, 0xFF,
+				0xFF, 0xFF, 0xFF, 0xFF,
+				0x04, 0x0C, 0x00, 0x08
+				);
+
+			__m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+			eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+			*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+			split_packet += SXE_DESCS_PER_LOOP;
+		}
+
+		staterr = _mm_and_si128(staterr, dd_check);
+
+		staterr = _mm_packs_epi32(staterr, zero);
+
+		_mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
+				pkt_mb2);
+		_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+				pkt_mb1);
+
+		sxe_desc_to_ptype_vec(descs, rx_queue->pkt_type_mask, &rx_pkts[pos]);
+
+		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+		pkts_recd_num += var;
+		if (likely(var != SXE_DESCS_PER_LOOP))
+			break;
+	}
+
+	rx_queue->processing_idx = (u16)(rx_queue->processing_idx + pkts_recd_num);
+	rx_queue->processing_idx = (u16)(rx_queue->processing_idx & (rx_queue->ring_depth - 1));
+	rx_queue->realloc_num = (u16)(rx_queue->realloc_num + pkts_recd_num);
+
+l_out:
+	return pkts_recd_num;
+}
+
+u16
+sxe_pkts_vec_recv(void *rx_queue, struct rte_mbuf **rx_pkts, u16 pkts_num)
+{
+	return sxe_raw_pkts_vec_recv(rx_queue, rx_pkts, pkts_num, NULL);
+}
+
+static u16
+sxe_scattered_burst_vec_recv(void *rx_queue, struct rte_mbuf **rx_pkts,
+				u16 pkts_num)
+{
+	u16 i = 0;
+	u16 bufs_num;
+	sxe_rx_queue_s *rxq = rx_queue;
+	u8 split_flags[RTE_PMD_SXE_MAX_RX_BURST] = {0};
+
+	bufs_num = sxe_raw_pkts_vec_recv(rxq, rx_pkts, pkts_num,
+			split_flags);
+	if (bufs_num == 0)
+		goto l_out;
+
+	const u64 *split_flag_64 = (u64 *)split_flags;
+	if (rxq->pkt_first_seg == NULL &&
+		split_flag_64[0] == 0 && split_flag_64[1] == 0 &&
+		split_flag_64[2] == 0 && split_flag_64[3] == 0)
+		goto l_out;
+
+	if (rxq->pkt_first_seg == NULL) {
+		while (i < bufs_num && !split_flags[i])
+			i++;
+		if (i == bufs_num)
+			goto l_out;
+		rxq->pkt_first_seg = rx_pkts[i];
+	}
+
+	bufs_num = i + sxe_packets_reassemble(rxq, &rx_pkts[i], bufs_num - i,
+		&split_flags[i]);
+
+l_out:
+	return bufs_num;
+}
+
+u16
+sxe_scattered_pkts_vec_recv(void *rx_queue, struct rte_mbuf **rx_pkts,
+				  u16 pkts_num)
+{
+	u16 ret = 0;
+
+	while (pkts_num > RTE_PMD_SXE_MAX_RX_BURST) {
+		u16 burst;
+
+		burst = sxe_scattered_burst_vec_recv(rx_queue,
+						rx_pkts + ret,
+						RTE_PMD_SXE_MAX_RX_BURST);
+		ret += burst;
+		pkts_num -= burst;
+		if (burst < RTE_PMD_SXE_MAX_RX_BURST)
+			goto l_out;
+	}
+
+	ret += sxe_scattered_burst_vec_recv(rx_queue,
+					rx_pkts + ret,
+					pkts_num);
+l_out:
+	return ret;
+}
+
+void __rte_cold
+sxe_rx_queue_vec_mbufs_release(sxe_rx_queue_s *rx_queue)
+{
+	sxe_rx_vec_mbufs_release(rx_queue);
+}
+
+s32 __rte_cold
+sxe_rxq_vec_setup(sxe_rx_queue_s *rx_queue)
+{
+	return sxe_default_rxq_vec_setup(rx_queue);
+}
+
+s32 __rte_cold
+sxe_rx_vec_condition_check(struct rte_eth_dev *dev)
+{
+	return sxe_default_rx_vec_condition_check(dev);
+}
+
+static inline void
+sxe_single_vec_desc_fill(volatile sxe_tx_data_desc_u *desc_ring,
+		struct rte_mbuf *pkts, u64 flags)
+{
+	__m128i descriptor = _mm_set_epi64x((u64)pkts->pkt_len << 46 |
+			flags | pkts->data_len,
+			pkts->buf_iova + pkts->data_off);
+	_mm_store_si128((__m128i *)&desc_ring->read, descriptor);
+}
+
+static inline void
+sxe_vec_desc_fill(volatile sxe_tx_data_desc_u *desc_ring,
+		struct rte_mbuf **pkts, u16 pkts_num, u64 flags)
+{
+	s32 i;
+
+	for (i = 0; i < pkts_num; ++i, ++desc_ring, ++pkts)
+		sxe_single_vec_desc_fill(desc_ring, *pkts, flags);
+
+}
+
+u16
+__sxe_pkts_vector_xmit(void *tx_queue, struct rte_mbuf **tx_pkts,
+			   u16 pkts_num)
+{
+	sxe_tx_queue_s *txq = (sxe_tx_queue_s *)tx_queue;
+	volatile sxe_tx_data_desc_u *desc_ring;
+	struct sxe_tx_buffer_vec *buffer_ring;
+	u16 n, commit_num, ntu, xmit_pkts_num;
+	u64 flags = SXE_TX_DESC_FLAGS;
+	u64 rs_flags = SXE_TX_DESC_RS_MASK | SXE_TX_DESC_FLAGS;
+	s32 i;
+
+	if (txq->desc_free_num < txq->free_thresh)
+		sxe_tx_bufs_vec_free(txq);
+
+	xmit_pkts_num = RTE_MIN(pkts_num, txq->rs_thresh);
+	xmit_pkts_num = (u16)RTE_MIN(txq->desc_free_num, xmit_pkts_num);
+
+	commit_num = xmit_pkts_num;
+	if (unlikely(commit_num == 0))
+		goto l_out;
+
+	ntu = txq->next_to_use;
+	desc_ring = &txq->desc_ring[ntu];
+	buffer_ring = &txq->buffer_ring_vec[ntu];
+
+	txq->desc_free_num = (u16)(txq->desc_free_num - xmit_pkts_num);
+
+	n = (u16)(txq->ring_depth - ntu);
+	if (commit_num >= n) {
+		sxe_vec_mbuf_fill(buffer_ring, tx_pkts, n);
+
+		for (i = 0; i < n - 1; ++i, ++tx_pkts, ++desc_ring)
+			sxe_single_vec_desc_fill(desc_ring, *tx_pkts, flags);
+
+		sxe_single_vec_desc_fill(desc_ring, *tx_pkts++, rs_flags);
+
+		commit_num = (u16)(commit_num - n);
+
+		ntu = 0;
+		txq->next_rs = (u16)(txq->rs_thresh - 1);
+
+		desc_ring = &txq->desc_ring[ntu];
+		buffer_ring = &txq->buffer_ring_vec[ntu];
+	}
+
+	sxe_vec_mbuf_fill(buffer_ring, tx_pkts, commit_num);
+
+	sxe_vec_desc_fill(desc_ring, tx_pkts, commit_num, flags);
+
+	ntu = (u16)(ntu + commit_num);
+	if (ntu > txq->next_rs) {
+		txq->desc_ring[txq->next_rs].read.cmd_type_len |=
+			rte_cpu_to_le_32(SXE_TX_DESC_RS_MASK);
+		txq->next_rs = (u16)(txq->next_rs +
+			txq->rs_thresh);
+	}
+
+	txq->next_to_use = ntu;
+	rte_wmb();
+	rte_write32_wc_relaxed((rte_cpu_to_le_32(txq->next_to_use)),
+							txq->tdt_reg_addr);
+
+l_out:
+	return xmit_pkts_num;
+}
+
+static void __rte_cold
+sxe_tx_queue_init(sxe_tx_queue_s *tx_queue)
+{
+	sxe_tx_queue_vec_init(tx_queue);
+}
+
+static void __rte_cold
+sxe_tx_queue_mbufs_release(sxe_tx_queue_s *tx_queue)
+{
+	sxe_tx_mbufs_vec_release(tx_queue);
+}
+
+static void __rte_cold
+sxe_tx_buffer_ring_free(sxe_tx_queue_s *tx_queue)
+{
+	sxe_tx_buffer_ring_vec_free(tx_queue);
+}
+
+static const struct sxe_txq_ops txq_vec_ops = {
+	.init			 = sxe_tx_queue_init,
+	.mbufs_release	= sxe_tx_queue_mbufs_release,
+	.buffer_ring_free = sxe_tx_buffer_ring_free,
+};
+
+s32 __rte_cold
+sxe_txq_vec_setup(sxe_tx_queue_s *tx_queue)
+{
+	return sxe_default_txq_vec_setup(tx_queue, &txq_vec_ops);
+}
+
+#endif
diff --git a/drivers/net/sxe/pf/sxe_vf.c b/drivers/net/sxe/pf/sxe_vf.c
index 74a0bbb370..4b8813e6de 100644
--- a/drivers/net/sxe/pf/sxe_vf.c
+++ b/drivers/net/sxe/pf/sxe_vf.c
@@ -33,7 +33,7 @@
 #define SXE_MR_VLAN_MASK  0xFFFFFFFF
 #define SXE_MR_VLAN_MSB_BIT_OFFSET 32
 
-#define SXE_MR_VIRTUAL_POOL_MASK         0xFFFFFFFF
+#define SXE_MR_VIRTUAL_POOL_MASK		 0xFFFFFFFF
 #define SXE_MR_VIRTUAL_POOL_MSB_BIT_MASK 32
 
 static inline s32 sxe_vf_mac_addr_generate(struct rte_eth_dev *eth_dev, u16 vf_num)
@@ -83,7 +83,6 @@ static void sxe_vt_mode_configure(struct rte_eth_dev *eth_dev)
 	sxe_hw_pcie_vt_mode_set(hw, pcie_ext);
 	sxe_hw_irq_general_reg_set(hw, gpie);
 
-	return;
 }
 
 s32 sxe_vt_init(struct rte_eth_dev *eth_dev)
@@ -126,13 +125,13 @@ s32 sxe_vt_init(struct rte_eth_dev *eth_dev)
 	memset(mirror_info, 0, sizeof(struct sxe_mirror_info));
 #endif
 
-	if (vf_num >= RTE_ETH_32_POOLS) { 
+	if (vf_num >= RTE_ETH_32_POOLS) {
 		nb_queue = 2;
 		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
-	} else if (vf_num >= RTE_ETH_16_POOLS) { 
+	} else if (vf_num >= RTE_ETH_16_POOLS) {
 		nb_queue = 4;
 		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
-	} else { 
+	} else {
 		nb_queue = 8;
 		RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
 	}
@@ -150,7 +149,7 @@ s32 sxe_vt_init(struct rte_eth_dev *eth_dev)
 	sxe_vt_mode_configure(eth_dev);
 
 	LOG_INFO_BDF("vf_num:%d domain id:%u init done.",
-		      vf_num, (*vf_info)->domain_id);
+			  vf_num, (*vf_info)->domain_id);
 
 l_out:
 	return ret;
@@ -166,7 +165,7 @@ static void sxe_pf_pool_enable(struct rte_eth_dev *eth_dev, u16 vf_num)
 	struct sxe_adapter *adapter = eth_dev->data->dev_private;
 	struct sxe_hw *hw = &adapter->hw;
 	u32 enable_mask = ~0;
-	u8 vf_reg_idx = ((vf_num >> 5) > 0) ? 1: 0;
+	u8 vf_reg_idx = ((vf_num >> 5) > 0) ? 1 : 0;
 	u8 vf_bit_index = vf_num & ((1 << 5) - 1);
 
 	sxe_hw_rx_pool_bitmap_set(hw, vf_reg_idx, enable_mask << vf_bit_index);
@@ -175,7 +174,6 @@ static void sxe_pf_pool_enable(struct rte_eth_dev *eth_dev, u16 vf_num)
 	sxe_hw_tx_pool_bitmap_set(hw, vf_reg_idx, enable_mask << vf_bit_index);
 	sxe_hw_tx_pool_bitmap_set(hw, (vf_reg_idx ^ 1), (vf_reg_idx - 1));
 
-	return;
 }
 
 static void sxe_vf_vlan_filter_enable(struct rte_eth_dev *eth_dev)
@@ -190,11 +188,9 @@ static void sxe_vf_vlan_filter_enable(struct rte_eth_dev *eth_dev)
 	vlan_ctl |= SXE_VLNCTRL_VFE;
 	sxe_hw_vlan_type_set(hw, vlan_ctl);
 
-	for (i = 0; i < SXE_VFT_TBL_SIZE; i++) {
+	for (i = 0; i < SXE_VFT_TBL_SIZE; i++)
 		sxe_hw_vlan_filter_array_write(hw, i, enable_mask);
-	}
 
-	return;
 }
 
 void sxe_vt_configure(struct rte_eth_dev *eth_dev)
@@ -207,7 +203,7 @@ void sxe_vt_configure(struct rte_eth_dev *eth_dev)
 	vf_num = sxe_vf_num_get(eth_dev);
 	if (vf_num == 0) {
 		LOG_WARN_BDF("no vf, no need configure vt");
-		goto l_out;
+		return;
 	}
 
 	sxe_hw_vt_ctrl_cfg(hw, pf_pool_idx);
@@ -229,8 +225,6 @@ void sxe_vt_configure(struct rte_eth_dev *eth_dev)
 
 	sxe_rx_fc_threshold_set(hw);
 
-l_out:
-	return;
 }
 
 void sxe_vt_uninit(struct rte_eth_dev *eth_dev)
@@ -250,20 +244,17 @@ void sxe_vt_uninit(struct rte_eth_dev *eth_dev)
 	vf_num = sxe_vf_num_get(eth_dev);
 	if ((vf_num == 0) || (*vf_info) == NULL) {
 		LOG_INFO_BDF("vf_num:%u vf_info:%p, no need free vf_info.",
-			     vf_num, *vf_info);
-		goto l_out;
+				 vf_num, *vf_info);
+		return;
 	}
 
 	ret = rte_eth_switch_domain_free((*vf_info)->domain_id);
-	if (ret) {
+	if (ret)
 		LOG_ERROR_BDF("failed to free switch domain: %d", ret);
-	}
 
 	rte_free(*vf_info);
 	*vf_info = NULL;
 
-l_out:
-	return;
 }
 
 s32 sxe_vf_rss_configure(struct rte_eth_dev *dev)
@@ -284,17 +275,17 @@ s32 sxe_vf_rss_configure(struct rte_eth_dev *dev)
 		is_4q_per_pool = true;
 		break;
 
-	default: 
+	default:
 		ret = -EINVAL;
 		LOG_ERROR_BDF("invalid pool number:%u in iov mode with rss.(err:%d)",
-			      RTE_ETH_DEV_SRIOV(dev).active, ret);
+				  RTE_ETH_DEV_SRIOV(dev).active, ret);
 		goto l_out;
 	}
 
 	sxe_hw_rx_multi_ring_configure(hw, 0, is_4q_per_pool, true);
 
 	LOG_INFO_BDF("pool num:%u is_4q_per_pool:%u configure done.",
-		    RTE_ETH_DEV_SRIOV(dev).active, is_4q_per_pool);
+			RTE_ETH_DEV_SRIOV(dev).active, is_4q_per_pool);
 
 l_out:
 	return ret;
@@ -323,7 +314,7 @@ s32 sxe_vf_default_mode_configure(struct rte_eth_dev *dev)
 	default:
 		ret = -SXE_ERR_CONFIG;
 		LOG_ERROR_BDF("invalid pool number:%u (err:%d)",
-			      RTE_ETH_DEV_SRIOV(dev).active, ret);
+				  RTE_ETH_DEV_SRIOV(dev).active, ret);
 		goto l_out;
 	}
 
@@ -349,7 +340,7 @@ static void sxe_filter_mode_configure(struct rte_eth_dev *dev)
 		filter_ctrl |= (SXE_FCTRL_UPE | SXE_FCTRL_MPE);
 		vm_l2_ctrl |= (SXE_VMOLR_ROPE | SXE_VMOLR_MPE);
 	} else {
-		if (dev->data->all_multicast) { 
+		if (dev->data->all_multicast) {
 			filter_ctrl |= SXE_FCTRL_MPE;
 			vm_l2_ctrl |= SXE_VMOLR_MPE;
 		} else {
@@ -386,7 +377,6 @@ static inline void sxe_vf_flr_handle(struct rte_eth_dev *dev, u16 vf)
 
 	sxe_filter_mode_configure(dev);
 
-	return;
 }
 
 static s32 sxe_vf_dev_mac_addr_set_handler(struct rte_eth_dev *dev, u32 *msgbuf, u32 vf)
@@ -403,15 +393,15 @@ static s32 sxe_vf_dev_mac_addr_set_handler(struct rte_eth_dev *dev, u32 *msgbuf,
 		ret = sxe_hw_uc_addr_add(&adapter->hw, rar_idx, mac_msg.uc_addr, vf);
 		if (ret) {
 			LOG_ERROR_BDF("vf:%u mac addr:"MAC_FMT" set fail.(err:%d)",
-				      vf, MAC_ADDR(mac_msg.uc_addr), ret);
+					  vf, MAC_ADDR(mac_msg.uc_addr), ret);
 		}
 	}
 
 	return ret;
 }
 
-STATIC s32 sxe_mbx_api_set_handler(struct rte_eth_dev *dev,
-						    u32 *msg, u32 vf_idx)
+static s32 sxe_mbx_api_set_handler(struct rte_eth_dev *dev,
+							u32 *msg, u32 vf_idx)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
 	struct sxe_mbx_api_msg *api_msg = (struct sxe_mbx_api_msg *)msg;
@@ -433,8 +423,7 @@ STATIC s32 sxe_mbx_api_set_handler(struct rte_eth_dev *dev,
 	}
 
 	LOG_INFO_BDF("mailbox api version:0x%x.(err:%d)",
-	             vf_info->mbx_version,
-	             ret);
+					vf_info->mbx_version, ret);
 
 	return ret;
 }
@@ -462,8 +451,8 @@ static s32 sxe_pf_ring_info_get(struct rte_eth_dev *dev, u32 *msgbuf, u32 vf)
 	default:
 		ret = -SXE_ERR_CONFIG;
 		LOG_ERROR_BDF("mailbod version:0x%x not support get ring"
-			      " info.(err:%d)",
-			      vf_info->mbx_version, ret);
+				  " info.(err:%d)",
+				  vf_info->mbx_version, ret);
 		goto l_out;
 	}
 
@@ -493,7 +482,7 @@ static s32 sxe_pf_ring_info_get(struct rte_eth_dev *dev, u32 *msgbuf, u32 vf)
 		default:
 			ret = -SXE_ERR_CONFIG;
 			LOG_ERROR_BDF("vf:%u sriov enable, tx queue mode:0x%x "
-				      "invalid pool num:%u.(err:%d)",
+					  "invalid pool num:%u.(err:%d)",
 					vf,
 					dev->data->dev_conf.txmode.mq_mode,
 					vmdq_dcb_tx_conf->nb_queue_pools,
@@ -526,8 +515,8 @@ static s32 sxe_pf_ring_info_get(struct rte_eth_dev *dev, u32 *msgbuf, u32 vf)
 	ring_msg->tc_num = num_tcs;
 
 	LOG_INFO_BDF("max_rx_num:%u max_tx_num:%u default queue:%u tc_num:%u.",
-		    ring_msg->max_rx_num, ring_msg->max_tx_num,
-		    ring_msg->default_tc, ring_msg->tc_num);
+			ring_msg->max_rx_num, ring_msg->max_tx_num,
+			ring_msg->default_tc, ring_msg->tc_num);
 
 l_out:
 	return ret;
@@ -566,11 +555,10 @@ static s32 sxe_vf_vlan_id_set_handler(struct rte_eth_dev *dev,
 
 	ret = sxe_hw_vlan_filter_configure(hw, vlan_id, vf, vlan_msg->add, false);
 	if (ret == 0) {
-		if (vlan_msg->add) {
-		vf_info[vf].vlan_cnt++;
-		} else if (vf_info[vf].vlan_cnt) {
+		if (vlan_msg->add)
+			vf_info[vf].vlan_cnt++;
+		else if (vf_info[vf].vlan_cnt)
 			vf_info[vf].vlan_cnt--;
-		}
 	}
 
 	LOG_INFO_BDF("vf[%u] %s vid[%u] done vlan_cnt:%u ret = %d",
@@ -605,14 +593,14 @@ static s32 sxe_vf_max_frame_set_handler(struct rte_eth_dev *dev,
 		// fall through
 	default:
 		if ((vf_max_frame > SXE_ETH_MAX_LEN) ||
-		    (frame_size > SXE_ETH_MAX_LEN)) {
-		        ret = -SXE_ERR_PARAM;
+			(frame_size > SXE_ETH_MAX_LEN)) {
+			ret = -SXE_ERR_PARAM;
 			LOG_ERROR_BDF("mbx version:0x%x pf max pkt len:0x%x vf:%u"
-				      " max_frames:0x%x max_len:0x%x.(err:%d)",
-				      vf_info->mbx_version,
-				      frame_size,
-				      vf, vf_max_frame,
-				      SXE_ETH_MAX_LEN, ret);
+					  " max_frames:0x%x max_len:0x%x.(err:%d)",
+					  vf_info->mbx_version,
+					  frame_size,
+					  vf, vf_max_frame,
+					  SXE_ETH_MAX_LEN, ret);
 			goto l_out;
 		}
 		break;
@@ -620,12 +608,12 @@ static s32 sxe_vf_max_frame_set_handler(struct rte_eth_dev *dev,
 
 	if ((vf_max_frame < RTE_ETHER_MIN_LEN) ||
 		(vf_max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)) {
-	        ret = -SXE_ERR_PARAM;
+		ret = -SXE_ERR_PARAM;
 		LOG_ERROR_BDF("mbx version:0x%x vf:%u invalid max_frame:%u (err:%d)",
-			      vf_info->mbx_version,
-			      vf,
-			      vf_max_frame,
-			      ret);
+				  vf_info->mbx_version,
+				  vf,
+				  vf_max_frame,
+				  ret);
 		goto l_out;
 	}
 
@@ -633,10 +621,10 @@ static s32 sxe_vf_max_frame_set_handler(struct rte_eth_dev *dev,
 	if (vf_max_frame > cur_max_frs) {
 		ret = -SXE_ERR_PARAM;
 		LOG_ERROR_BDF("mbx version:0x%x vf:%u invalid max_frame:%u >= cur_max_frs:%u",
-			      vf_info->mbx_version,
-			      vf,
-			      vf_max_frame,
-			      cur_max_frs);
+				  vf_info->mbx_version,
+				  vf,
+				  vf_max_frame,
+				  cur_max_frs);
 		goto l_out;
 	}
 
@@ -654,7 +642,6 @@ static void sxe_vf_mc_promisc_disable(struct rte_eth_dev *dev, u32 vf)
 
 	sxe_hw_pool_rx_mode_set(hw, vm_l2_ctrl, vf);
 
-	return;
 }
 
 static s32 sxe_vf_mc_addr_sync(struct rte_eth_dev *dev,
@@ -676,7 +663,7 @@ static s32 sxe_vf_mc_addr_sync(struct rte_eth_dev *dev,
 	for (i = 0; i < mc_cnt; i++) {
 		vf_info->mc_hash[i] = mc_msg->mc_addr_extract[i];
 		LOG_INFO_BDF("vf_idx:%u mc_cnt:%u mc_hash[%d]:0x%x\n",
-			     vf, mc_cnt, i, vf_info->mc_hash[i]);
+				 vf, mc_cnt, i, vf_info->mc_hash[i]);
 	}
 
 	if (mc_cnt == 0) {
@@ -690,7 +677,7 @@ static s32 sxe_vf_mc_addr_sync(struct rte_eth_dev *dev,
 			mta_shift = vf_info->mc_hash[i] & SXE_MC_ADDR_BIT_MASK;
 			sxe_hw_mta_hash_table_update(hw, mta_idx, mta_shift);
 
-			LOG_INFO_BDF("vf_idx:%u mc_cnt:%u mc_hash[%d]:0x%x"
+			LOG_INFO_BDF("vf_idx:%u mc_cnt:%u mc_hash[%d]:0x%x "
 				"reg_idx=%u, bit_idx=%u.\n",
 				vf, mc_cnt, i, vf_info->mc_hash[i],
 				mta_idx, mta_shift);
@@ -720,8 +707,8 @@ static s32 sxe_vf_cast_mode_handler(struct rte_eth_dev *dev,
 		if (cast_msg->cast_mode == SXE_CAST_MODE_PROMISC) {
 			ret = -EOPNOTSUPP;
 			LOG_ERROR_BDF("mbx api:12 vf:%u cast_mode:0x%x "
-				     "unsupport.(err:%d)",
-				      vf, cast_msg->cast_mode, ret);
+					 "unsupport.(err:%d)",
+					  vf, cast_msg->cast_mode, ret);
 			goto l_out;
 		}
 		break;
@@ -730,13 +717,13 @@ static s32 sxe_vf_cast_mode_handler(struct rte_eth_dev *dev,
 	default:
 		ret = -SXE_ERR_PARAM;
 		LOG_ERROR_BDF("vf:%u invalid mbx api version:0x%x.\n",
-			     vf, vf_info->mbx_version);
+				 vf, vf_info->mbx_version);
 		goto l_out;
 	}
 
 	if (vf_info->cast_mode == cast_msg->cast_mode) {
 		LOG_INFO_BDF("vf:%d currut mode equal set mode:0x%x, skip set.",
-			     vf, cast_msg->cast_mode);
+				 vf, cast_msg->cast_mode);
 		goto l_out;
 	}
 
@@ -760,13 +747,13 @@ static s32 sxe_vf_cast_mode_handler(struct rte_eth_dev *dev,
 	case SXE_CAST_MODE_PROMISC:
 		ret = -EOPNOTSUPP;
 		LOG_ERROR_BDF("vf:%d promisc mode not support.(ret:%d)\n",
-		              vf, ret);
+						vf, ret);
 		goto l_out;
 
 	default:
 		ret = -SXE_ERR_PARAM;
 		LOG_ERROR_BDF("vf:%u invalid cast mode:0x%x.\n",
-			     vf, cast_msg->cast_mode);
+				 vf, cast_msg->cast_mode);
 		goto l_out;
 	}
 
@@ -776,7 +763,7 @@ static s32 sxe_vf_cast_mode_handler(struct rte_eth_dev *dev,
 	sxe_hw_pool_rx_mode_set(hw, vm_l2_filter, vf);
 
 	LOG_INFO_BDF("vf:%d filter reg:0x%x mode:%d.\n",
-		     vf, vm_l2_filter, cast_msg->cast_mode);
+			 vf, vm_l2_filter, cast_msg->cast_mode);
 
 	vf_info->cast_mode = cast_msg->cast_mode;
 
@@ -799,13 +786,13 @@ static s32 sxe_vf_uc_addr_sync_handler(struct rte_eth_dev *dev,
 			(struct rte_ether_addr *)uc_msg->addr)) {
 			ret = -SXE_ERR_PARAM;
 			LOG_ERROR_BDF("vf:%u mac addr:"MAC_FMT" invalid.(err:%d).",
-				      vf, MAC_ADDR(uc_msg->addr), ret);
+					  vf, MAC_ADDR(uc_msg->addr), ret);
 			goto l_out;
 		}
 
 		vf_info->uc_mac_cnt++;
 		rar_idx = sxe_sw_uc_entry_vf_add(adapter, vf, (u8 *)uc_msg->addr, true);
-		sxe_hw_uc_addr_add\r(hw, rar_idx, (u8 *)uc_msg->addr, vf);
+		sxe_hw_uc_addr_add(hw, rar_idx, (u8 *)uc_msg->addr, vf);
 	} else {
 		if (vf_info->uc_mac_cnt) {
 			sxe_sw_uc_entry_vf_del(adapter, vf, true);
@@ -817,7 +804,7 @@ static s32 sxe_vf_uc_addr_sync_handler(struct rte_eth_dev *dev,
 	return ret;
 }
 
-STATIC struct sxe_msg_table msg_table[] = {
+static struct sxe_msg_table msg_table[] = {
 	[SXE_VFREQ_MAC_ADDR_SET] = {SXE_VFREQ_MAC_ADDR_SET, sxe_vf_dev_mac_addr_set_handler},
 	[SXE_VFREQ_MC_ADDR_SYNC] = {SXE_VFREQ_MC_ADDR_SYNC, sxe_vf_mc_addr_sync},
 	[SXE_VFREQ_VLAN_SET] = {SXE_VFREQ_VLAN_SET, sxe_vf_vlan_id_set_handler},
@@ -840,20 +827,19 @@ static void sxe_vf_pool_enable(struct rte_eth_dev *dev, u8 vf_idx)
 
 	enable_pool = sxe_hw_tx_pool_bitmap_get(hw, reg_idx);
 	enable_pool |= BIT(bit_idx);
-	sxe_hw_tx_pool_bitmap_set(hw, reg_idx,enable_pool);
+	sxe_hw_tx_pool_bitmap_set(hw, reg_idx, enable_pool);
 
 	sxe_hw_vf_queue_drop_enable(hw, vf_idx,
 				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
 
 	enable_pool = sxe_hw_rx_pool_bitmap_get(hw, reg_idx);
 	enable_pool |= BIT(bit_idx);
-	sxe_hw_rx_pool_bitmap_set(hw, reg_idx,enable_pool);
+	sxe_hw_rx_pool_bitmap_set(hw, reg_idx, enable_pool);
 
 	vf_info->is_ready = true;
 
 	sxe_hw_spoof_count_enable(hw, reg_idx, bit_idx);
 
-	return;
 }
 
 static void sxe_vf_reset_msg_handle(struct rte_eth_dev *dev, u8 vf_idx)
@@ -885,14 +871,13 @@ static void sxe_vf_reset_msg_handle(struct rte_eth_dev *dev, u8 vf_idx)
 	adapter->vt_ctxt.vf_info->is_ready = true;
 
 	LOG_INFO_BDF("vf_idx:%d reset msg:0x%x handle done.send mac addr:"MAC_FMT
-		    " mc type:%d to vf.",
-		    vf_idx, reply.msg_type,
-		    MAC_ADDR(mac_addr), SXE_MC_FILTER_TYPE0);
+			" mc type:%d to vf.",
+			vf_idx, reply.msg_type,
+			MAC_ADDR(mac_addr), SXE_MC_FILTER_TYPE0);
 
-	return;
 }
 
-STATIC s32 sxe_req_msg_handle(struct rte_eth_dev *dev, u32 *msg,
+static s32 sxe_req_msg_handle(struct rte_eth_dev *dev, u32 *msg,
 					u8 vf_idx)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
@@ -919,12 +904,12 @@ STATIC s32 sxe_req_msg_handle(struct rte_eth_dev *dev, u32 *msg,
 		sxe_vf_reset_msg_handle(dev, vf_idx);
 
 		sxe_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
-					      &user_param);
+						  &user_param);
 		goto l_out;
 	}
 
 	sxe_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
-					      &user_param);
+						  &user_param);
 
 	LOG_INFO_BDF("vf_idx:%u cmd_id:0x%x user configure:0x%x.",
 			vf_idx, cmd_id, user_param.ret);
@@ -940,8 +925,8 @@ STATIC s32 sxe_req_msg_handle(struct rte_eth_dev *dev, u32 *msg,
 
 	if (msg_table[cmd_id].msg_func) {
 		if ((user_param.ret == RTE_PMD_SXE_MB_EVENT_PROCEED) ||
-		    (cmd_id == SXE_VFREQ_API_NEGOTIATE) ||
-		    (cmd_id == SXE_VFREQ_RING_INFO_GET)) {
+			(cmd_id == SXE_VFREQ_API_NEGOTIATE) ||
+			(cmd_id == SXE_VFREQ_RING_INFO_GET)) {
 			ret = msg_table[cmd_id].msg_func(dev, msg, vf_idx);
 		}
 		LOG_INFO_BDF("msg:0x%x cmd_id:0x%x handle done.ret:%d\n",
@@ -955,7 +940,7 @@ STATIC s32 sxe_req_msg_handle(struct rte_eth_dev *dev, u32 *msg,
 	} else {
 		msg[0] |= SXE_MSGTYPE_NACK;
 		LOG_ERROR_BDF("vf_idx:%u msg_type:0x%x cmdId:0x%x invalid.(err:%d)\n",
-			      vf_idx, msg[0], cmd_id, ret);
+				  vf_idx, msg[0], cmd_id, ret);
 	}
 
 	ret = sxe_hw_send_msg_to_vf(hw, msg, SXE_MBX_MSG_NUM, vf_idx);
@@ -1012,7 +997,6 @@ static void sxe_vf_ack_msg_handle(struct rte_eth_dev *eth_dev, u8 vf_idx)
 					SXE_MSG_NUM(sizeof(msg)), vf_idx);
 	}
 
-	return;
 }
 
 void sxe_mbx_irq_handler(struct rte_eth_dev *eth_dev)
@@ -1030,16 +1014,14 @@ void sxe_mbx_irq_handler(struct rte_eth_dev *eth_dev)
 			sxe_vf_flr_handle(eth_dev, vf_idx);
 		}
 
-		if (sxe_hw_vf_req_check(hw, vf_idx)) {
+		if (sxe_hw_vf_req_check(hw, vf_idx))
 			sxe_vf_req_msg_handle(eth_dev, vf_idx);
-		}
 
-		if (sxe_hw_vf_ack_check(hw, vf_idx)) {
+		if (sxe_hw_vf_ack_check(hw, vf_idx))
 			sxe_vf_ack_msg_handle(eth_dev, vf_idx);
-		}
+
 	}
 
-	return;
 }
 
 #ifdef ETH_DEV_MIRROR_RULE
@@ -1051,21 +1033,21 @@ static s32 sxe_mirror_conf_check(struct sxe_hw *hw, u8 rule_id,
 	if (sxe_hw_vt_status(hw) == 0) {
 		ret = -ENOTSUP;
 		PMD_LOG_ERR(DRV, "virtual disabled, mirror rule not support.(err:%d)",
-		              ret);
+					ret);
 		goto l_out;
 	}
 
 	if (rule_id >= SXE_MIRROR_RULES_MAX) {
 		ret = -EINVAL;
 		PMD_LOG_ERR(DRV, "invalid rule_id:%u rule id max:%u.(err:%d)",
-		              rule_id, SXE_MIRROR_RULES_MAX, ret);
+					rule_id, SXE_MIRROR_RULES_MAX, ret);
 		goto l_out;
 	}
 
 	if (SXE_MIRROR_TYPE_INVALID(rule_type)) {
 		ret = -EINVAL;
 		PMD_LOG_ERR(DRV, "unsupported mirror type 0x%x.(err:%d)",
-			      rule_type, ret);
+					rule_type, ret);
 	}
 
 l_out:
@@ -1073,8 +1055,8 @@ static s32 sxe_mirror_conf_check(struct sxe_hw *hw, u8 rule_id,
 }
 
 static s32 sxe_vlan_mirror_configure(struct rte_eth_dev *dev,
-		      struct rte_eth_mirror_conf *mirror_conf,
-		      u8 rule_id, u8 on)
+			  struct rte_eth_mirror_conf *mirror_conf,
+			  u8 rule_id, u8 on)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
 	struct sxe_hw *hw = &adapter->hw;
@@ -1096,29 +1078,29 @@ static s32 sxe_vlan_mirror_configure(struct rte_eth_dev *dev,
 			if (ret < 0) {
 				ret = -EINVAL;
 				LOG_ERROR_BDF("vlan_id[%u]:0x%x no matched vlvf."
-					      "(err:%d)",
-					      i,
-					      mirror_conf->vlan.vlan_id[i],
-					      ret);
+						  "(err:%d)",
+						  i,
+						  mirror_conf->vlan.vlan_id[i],
+						  ret);
 				goto l_out;
 			}
 
 			reg_idx = ret;
 			vlvf = sxe_hw_vlan_pool_filter_read(hw, reg_idx);
 			if ((vlvf & SXE_VLVF_VIEN) &&
-			    ((vlvf & SXE_VLVF_VLANID_MASK) ==
-			      mirror_conf->vlan.vlan_id[i])) {
+				((vlvf & SXE_VLVF_VLANID_MASK) ==
+				  mirror_conf->vlan.vlan_id[i])) {
 				vlan_mask |= (1ULL << reg_idx);
 			} else{
 				ret = -EINVAL;
 				LOG_ERROR_BDF("i:%u vlan_id:0x%x "
-					      "vlvf[%u]:0x%x not meet request."
-					      "(err:%d)",
-					      i,
-					      mirror_conf->vlan.vlan_id[i],
-					      reg_idx,
-					      vlvf,
-					      ret);
+						  "vlvf[%u]:0x%x not meet request."
+						  "(err:%d)",
+						  i,
+						  mirror_conf->vlan.vlan_id[i],
+						  reg_idx,
+						  vlvf,
+						  ret);
 				goto l_out;
 			}
 		}
@@ -1136,10 +1118,10 @@ static s32 sxe_vlan_mirror_configure(struct rte_eth_dev *dev,
 				mirror_info->mr_conf[rule_id].vlan.vlan_id[i] =
 					mirror_conf->vlan.vlan_id[i];
 				LOG_INFO_BDF("rule_id:%u vlan id:0x%x add mirror"
-					     " to dst_pool:%u",
-					     rule_id,
-					     mirror_conf->vlan.vlan_id[i],
-					     mirror_conf->dst_pool);
+						 " to dst_pool:%u",
+						 rule_id,
+						 mirror_conf->vlan.vlan_id[i],
+						 mirror_conf->dst_pool);
 			}
 		}
 	} else {
@@ -1150,10 +1132,10 @@ static s32 sxe_vlan_mirror_configure(struct rte_eth_dev *dev,
 		for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
 			mirror_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
 			LOG_INFO_BDF("rule_id:%u vlan id:0x%x del mirror"
-				     " from dst_pool:%u",
-				     rule_id,
-				     mirror_conf->vlan.vlan_id[i],
-				     mirror_conf->dst_pool);
+					 " from dst_pool:%u",
+					 rule_id,
+					 mirror_conf->vlan.vlan_id[i],
+					 mirror_conf->dst_pool);
 		}
 	}
 
@@ -1164,8 +1146,8 @@ static s32 sxe_vlan_mirror_configure(struct rte_eth_dev *dev,
 }
 
 static void sxe_virtual_pool_mirror_configure(struct rte_eth_dev *dev,
-		      struct rte_eth_mirror_conf *mirror_conf,
-		      u8 rule_id, u8 on)
+			  struct rte_eth_mirror_conf *mirror_conf,
+			  u8 rule_id, u8 on)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
 	struct sxe_hw *hw = &adapter->hw;
@@ -1185,12 +1167,11 @@ static void sxe_virtual_pool_mirror_configure(struct rte_eth_dev *dev,
 
 	sxe_hw_mirror_virtual_pool_set(hw, rule_id, lsb, msb);
 
-	return;
 }
 
 s32 sxe_mirror_rule_set(struct rte_eth_dev *dev,
-		      struct rte_eth_mirror_conf *mirror_conf,
-		      u8 rule_id, u8 on)
+			  struct rte_eth_mirror_conf *mirror_conf,
+			  u8 rule_id, u8 on)
 {
 	struct sxe_adapter *adapter = dev->data->dev_private;
 	struct sxe_hw *hw = &adapter->hw;
@@ -1201,7 +1182,7 @@ s32 sxe_mirror_rule_set(struct rte_eth_dev *dev,
 	ret = sxe_mirror_conf_check(hw, rule_id, mirror_conf->rule_type);
 	if (ret) {
 		LOG_ERROR_BDF("rule_id:%u mirror config param invalid.(err:%d)",
-			      rule_id, ret);
+				  rule_id, ret);
 		goto l_out;
 	}
 
@@ -1219,13 +1200,11 @@ s32 sxe_mirror_rule_set(struct rte_eth_dev *dev,
 		sxe_virtual_pool_mirror_configure(dev, mirror_conf, rule_id, on);
 	}
 
-	if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT) {
+	if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
 		mirror_type |= SXE_MRCTL_UPME;
-	}
 
-	if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT) {
+	if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
 		mirror_type |= SXE_MRCTL_DPME;
-	}
 
 	sxe_hw_mirror_ctl_set(hw, rule_id, mirror_type, mirror_conf->dst_pool, on);
 
@@ -1233,14 +1212,14 @@ s32 sxe_mirror_rule_set(struct rte_eth_dev *dev,
 	mirror_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
 
 	LOG_INFO_BDF("rule_id:%u mirrror type:0x%x %s success. "
-		     "vlan id mask:0x%"SXE_PRIX64" virtaul pool mask:0x%"SXE_PRIX64
-		     " dst_pool:%u.",
-		     rule_id,
-		     mirror_conf->rule_type,
-		     on ? "add" : "delete",
-		     mirror_conf->vlan.vlan_mask,
-		     mirror_conf->pool_mask,
-		     mirror_conf->dst_pool);
+			 "vlan id mask:0x%"SXE_PRIX64" virtual pool mask:0x%"SXE_PRIX64
+			 " dst_pool:%u.",
+			 rule_id,
+			 mirror_conf->rule_type,
+			 on ? "add" : "delete",
+			 mirror_conf->vlan.vlan_mask,
+			 mirror_conf->pool_mask,
+			 mirror_conf->dst_pool);
 
 l_out:
 	return ret;
@@ -1256,12 +1235,12 @@ s32 sxe_mirror_rule_reset(struct rte_eth_dev *dev, u8 rule_id)
 	ret = sxe_mirror_conf_check(hw, rule_id, SXE_ETH_MIRROR_TYPE_MASK);
 	if (ret) {
 		LOG_ERROR_BDF("rule_id:%u mirror config param invalid.(err:%d)",
-			      rule_id, ret);
+				  rule_id, ret);
 		goto l_out;
 	}
 
 	memset(&mirror_info->mr_conf[rule_id], 0,
-	       sizeof(struct rte_eth_mirror_conf));
+		   sizeof(struct rte_eth_mirror_conf));
 
 	sxe_hw_mirror_rule_clear(hw, rule_id);
 
diff --git a/drivers/net/sxe/pf/sxe_vf.h b/drivers/net/sxe/pf/sxe_vf.h
index 8690b9e7fd..727b26dab9 100644
--- a/drivers/net/sxe/pf/sxe_vf.h
+++ b/drivers/net/sxe/pf/sxe_vf.h
@@ -17,31 +17,31 @@
 
 #define SXE_MIRROR_RULES_MAX   4
 
-#define SXE_MSG_NUM(size)         DIV_ROUND_UP(size, 4)
+#define SXE_MSG_NUM(size)		 DIV_ROUND_UP(size, 4)
 
-#define SXE_MSGTYPE_ACK    0x80000000
+#define SXE_MSGTYPE_ACK	0x80000000
 #define SXE_MSGTYPE_NACK   0x40000000
 
-#define SXE_VFREQ_RESET               0x01 
-#define SXE_VFREQ_MAC_ADDR_SET        0x02 
-#define SXE_VFREQ_MC_ADDR_SYNC        0x03 
-#define SXE_VFREQ_VLAN_SET            0x04 
-#define SXE_VFREQ_LPE_SET             0x05  
+#define SXE_VFREQ_RESET			   0x01
+#define SXE_VFREQ_MAC_ADDR_SET		0x02
+#define SXE_VFREQ_MC_ADDR_SYNC		0x03
+#define SXE_VFREQ_VLAN_SET			0x04
+#define SXE_VFREQ_LPE_SET			 0x05
 
-#define SXE_VFREQ_UC_ADDR_SYNC        0x06  
+#define SXE_VFREQ_UC_ADDR_SYNC		0x06
 
-#define SXE_VFREQ_API_NEGOTIATE       0x08  
+#define SXE_VFREQ_API_NEGOTIATE	   0x08
 
-#define SXE_VFREQ_RING_INFO_GET       0x09  
-#define SXE_VFREQ_REDIR_TBL_GET       0x0a
-#define SXE_VFREQ_RSS_KEY_GET         0x0b
-#define SXE_VFREQ_CAST_MODE_SET       0x0c  
-#define SXE_VFREQ_LINK_ENABLE_GET     0X0d  
-#define SXE_VFREQ_IPSEC_ADD           0x0e
-#define SXE_VFREQ_IPSEC_DEL           0x0f
-#define SXE_VFREQ_RSS_CONF_GET        0x10
+#define SXE_VFREQ_RING_INFO_GET	   0x09
+#define SXE_VFREQ_REDIR_TBL_GET	   0x0a
+#define SXE_VFREQ_RSS_KEY_GET		 0x0b
+#define SXE_VFREQ_CAST_MODE_SET	   0x0c
+#define SXE_VFREQ_LINK_ENABLE_GET	 0X0d
+#define SXE_VFREQ_IPSEC_ADD		   0x0e
+#define SXE_VFREQ_IPSEC_DEL		   0x0f
+#define SXE_VFREQ_RSS_CONF_GET		0x10
 
-#define SXE_VFREQ_MASK                0xFF
+#define SXE_VFREQ_MASK				0xFF
 
 #define SXE_MIRROR_TYPE_INVALID(mirror_type) \
 	((mirror_type) & ~(u8)(ETH_MIRROR_VIRTUAL_POOL_UP | \
@@ -62,35 +62,35 @@ enum sxe_mbx_api_version {
 	SXE_MBX_API_10 = 0,
 	SXE_MBX_API_11,
 	SXE_MBX_API_12,
-	SXE_MBX_API_13, 
-	SXE_MBX_API_14, 
+	SXE_MBX_API_13,
+	SXE_MBX_API_14,
 
-	SXE_MBX_API_NR, 
+	SXE_MBX_API_NR,
 };
 
 enum sxe_cast_mode {
-	SXE_CAST_MODE_NONE = 0, 
-	SXE_CAST_MODE_MULTI,    
-	SXE_CAST_MODE_ALLMULTI, 
-	SXE_CAST_MODE_PROMISC,  
+	SXE_CAST_MODE_NONE = 0,
+	SXE_CAST_MODE_MULTI,
+	SXE_CAST_MODE_ALLMULTI,
+	SXE_CAST_MODE_PROMISC,
 };
 
 struct sxe_vf_info {
-	u8 mac_addr[RTE_ETHER_ADDR_LEN]; 
-	u16 mc_hash[SXE_VF_MC_ENTRY_NUM_MAX]; 
-	u8  mc_hash_used; 
-	u8 cast_mode; 
-	u8  trusted :1;  
-	u8  is_ready :1; 
-	u8  spoof_chk_enabled :1; 
-	u8  rss_query_enabled :1; 
-	u8  mac_from_pf :1; 
-	u8  reserved :3;  
+	u8 mac_addr[RTE_ETHER_ADDR_LEN];
+	u16 mc_hash[SXE_VF_MC_ENTRY_NUM_MAX];
+	u8  mc_hash_used;
+	u8 cast_mode;
+	u8  trusted :1;
+	u8  is_ready :1;
+	u8  spoof_chk_enabled :1;
+	u8  rss_query_enabled :1;
+	u8  mac_from_pf :1;
+	u8  reserved :3;
 	u16 domain_id;
-	u16 tx_rate;    
-	u32 mbx_version; 
-	u32 vlan_cnt;     
-	u32 uc_mac_cnt;  
+	u16 tx_rate;
+	u32 mbx_version;
+	u32 vlan_cnt;
+	u32 uc_mac_cnt;
 };
 
 #ifdef ETH_DEV_MIRROR_RULE
@@ -102,10 +102,10 @@ struct sxe_mirror_info {
 
 struct sxe_virtual_context {
 	u8   pflink_fullchk;
-	u32 mbx_version; 
-	struct sxe_vf_info *vf_info;    
+	u32 mbx_version;
+	struct sxe_vf_info *vf_info;
 #ifdef ETH_DEV_MIRROR_RULE
-	struct sxe_mirror_info mr_info; 
+	struct sxe_mirror_info mr_info;
 #endif
 };
 
@@ -115,17 +115,17 @@ struct sxe_msg_table {
 };
 
 enum RTE_PMD_SXE_MB_event_rsp {
-	RTE_PMD_SXE_MB_EVENT_NOOP_ACK,  
-	RTE_PMD_SXE_MB_EVENT_NOOP_NACK, 
-	RTE_PMD_SXE_MB_EVENT_PROCEED,   
-	RTE_PMD_SXE_MB_EVENT_MAX        
+	RTE_PMD_SXE_MB_EVENT_NOOP_ACK,
+	RTE_PMD_SXE_MB_EVENT_NOOP_NACK,
+	RTE_PMD_SXE_MB_EVENT_PROCEED,
+	RTE_PMD_SXE_MB_EVENT_MAX
 };
 
 struct rte_pmd_sxe_mb_event_param {
-	u16 vf_idx;     
-	u16 msg_type;   
-	u16 ret;        
-	void *msg;      
+	u16 vf_idx;
+	u16 msg_type;
+	u16 ret;
+	void *msg;
 };
 
 struct sxe_mbx_api_msg {
@@ -158,10 +158,10 @@ struct sxe_rst_msg {
 
 struct sxe_ring_info_msg {
 	u32 msg_type;
-	u8  max_rx_num; 
-	u8  max_tx_num; 
-	u8  tc_num;     
-	u8  default_tc; 
+	u8  max_rx_num;
+	u8  max_tx_num;
+	u8  tc_num;
+	u8  default_tc;
 };
 
 struct sxe_rss_hash_msg {
@@ -178,7 +178,7 @@ struct sxe_vlan_msg {
 
 struct sxe_mc_sync_msg {
 	u16 msg_type;
-	u16 mc_cnt;  
+	u16 mc_cnt;
 	u16 mc_addr_extract[SXE_VF_MC_ENTRY_NUM_MAX];
 };
 
@@ -212,8 +212,8 @@ void sxe_mbx_irq_handler(struct rte_eth_dev *eth_dev);
 
 #ifdef ETH_DEV_MIRROR_RULE
 s32 sxe_mirror_rule_set(struct rte_eth_dev *dev,
-		      struct rte_eth_mirror_conf *mirror_conf,
-		      u8 rule_id, u8 on);
+			  struct rte_eth_mirror_conf *mirror_conf,
+			  u8 rule_id, u8 on);
 
 s32 sxe_mirror_rule_reset(struct rte_eth_dev *dev, u8 rule_id);
 
diff --git a/drivers/net/sxe/rte_pmd_sxe_version.map b/drivers/net/sxe/rte_pmd_sxe_version.map
index e85eb752b4..1ee53b5969 100644
--- a/drivers/net/sxe/rte_pmd_sxe_version.map
+++ b/drivers/net/sxe/rte_pmd_sxe_version.map
@@ -1,5 +1,5 @@
 DPDK_20.0 {
-	global: 
+	global:
 	rte_pmd_sxe_tx_loopback_set;
 	rte_pmd_sxe_tc_bw_set;
 	local: *;
diff --git a/drivers/net/sxe/version.map b/drivers/net/sxe/version.map
index 2064d17939..fe54b7732a 100644
--- a/drivers/net/sxe/version.map
+++ b/drivers/net/sxe/version.map
@@ -1,19 +1,19 @@
 DPDK_21 {
-	global: 
+	global:
 	rte_pmd_sxe_tx_loopback_set;
 	rte_pmd_sxe_tc_bw_set;
 	local: *;
 };
 
 DPDK_22 {
-	global: 
+	global:
 	rte_pmd_sxe_tx_loopback_set;
 	rte_pmd_sxe_tc_bw_set;
 	local: *;
 };
 
 DPDK_23 {
-	global: 
+	global:
 	rte_pmd_sxe_tx_loopback_set;
 	rte_pmd_sxe_tc_bw_set;
 	local: *;
diff --git a/drivers/net/sxe/vf/sxevf.h b/drivers/net/sxe/vf/sxevf.h
index 52d294d869..0db3d73d2c 100644
--- a/drivers/net/sxe/vf/sxevf.h
+++ b/drivers/net/sxe/vf/sxevf.h
@@ -11,12 +11,12 @@
 #include "sxevf_filter.h"
 #include "sxevf_stats.h"
 
-#define SXEVF_DEVARG_LINK_CHECK           "link_check"
+#define SXEVF_DEVARG_LINK_CHECK		   "link_check"
 
 struct sxevf_adapter {
-	s8 name[PCI_PRI_STR_SIZE+1]; 
-	u8 max_rx_queue; 
-	u8 max_tx_queue; 
+	s8 name[PCI_PRI_STR_SIZE+1];
+	u8 max_rx_queue;
+	u8 max_tx_queue;
 
 	struct sxevf_hw hw;
 	struct sxevf_irq_context irq_ctxt;
diff --git a/drivers/net/sxe/vf/sxevf_ethdev.c b/drivers/net/sxe/vf/sxevf_ethdev.c
index d656dc83fc..dd39798520 100644
--- a/drivers/net/sxe/vf/sxevf_ethdev.c
+++ b/drivers/net/sxe/vf/sxevf_ethdev.c
@@ -44,7 +44,7 @@
 #include "sxevf_offload.h"
 #include "sxe_compat_version.h"
 
-#define SXEVF_ETH_OVERHEAD     (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)  
+#define SXEVF_ETH_OVERHEAD	 (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
 #define SXEVF_HKEY_MAX_INDEX   (10)
 #define SXEVF_RSS_OFFLOAD_ALL ( \
 	RTE_ETH_RSS_IPV4 | \
@@ -58,27 +58,27 @@
 	RTE_ETH_RSS_IPV6_UDP_EX)
 
 #define SXEVF_DEFAULT_RX_FREE_THRESH  32
-#define SXEVF_DEFAULT_RX_PTHRESH      8
-#define SXEVF_DEFAULT_RX_HTHRESH      8
-#define SXEVF_DEFAULT_RX_WTHRESH      0
+#define SXEVF_DEFAULT_RX_PTHRESH	  8
+#define SXEVF_DEFAULT_RX_HTHRESH	  8
+#define SXEVF_DEFAULT_RX_WTHRESH	  0
 
 #define SXEVF_DEFAULT_TX_FREE_THRESH  32
-#define SXEVF_DEFAULT_TX_PTHRESH      32
-#define SXEVF_DEFAULT_TX_HTHRESH      0
-#define SXEVF_DEFAULT_TX_WTHRESH      0
+#define SXEVF_DEFAULT_TX_PTHRESH	  32
+#define SXEVF_DEFAULT_TX_HTHRESH	  0
+#define SXEVF_DEFAULT_TX_WTHRESH	  0
 #define SXEVF_DEFAULT_TX_RSBIT_THRESH 32
 
-#define	SXEVF_MIN_RING_DESC     32
-#define	SXEVF_MAX_RING_DESC     4096
+#define	SXEVF_MIN_RING_DESC	 32
+#define	SXEVF_MAX_RING_DESC	 4096
 
-#define	SXEVF_ALIGN             128
-#define SXEVF_RXD_ALIGN        (SXEVF_ALIGN / sizeof(sxevf_rx_data_desc_u))
-#define SXEVF_TXD_ALIGN        (SXEVF_ALIGN / sizeof(sxevf_tx_data_desc_u))
+#define	SXEVF_ALIGN			 128
+#define SXEVF_RXD_ALIGN		(SXEVF_ALIGN / sizeof(sxevf_rx_data_desc_u))
+#define SXEVF_TXD_ALIGN		(SXEVF_ALIGN / sizeof(sxevf_tx_data_desc_u))
 
-#define SXEVF_TX_MAX_SEG            40
+#define SXEVF_TX_MAX_SEG			40
 #define SXEVF_DEFAULT_TX_QUEUE_NUM  1
 #define SXEVF_DEFAULT_RX_QUEUE_NUM  1
-#define SXEVF_RX_BUF_MIN      1024
+#define SXEVF_RX_BUF_MIN	  1024
 #define SXEVF_RX_BUF_LEN_MAX  9728
 
 static const struct rte_eth_desc_lim rx_desc_lim = {
@@ -100,7 +100,7 @@ static const char * const sxevf_valid_arguments[] = {
 	NULL
 };
 
-STATIC s32 sxevf_devargs_handle(__rte_unused const char *key, const char *value,
+static s32 sxevf_devargs_handle(__rte_unused const char *key, const char *value,
 		  void *extra_args)
 {
 	u16 *n = extra_args;
@@ -125,15 +125,15 @@ STATIC s32 sxevf_devargs_handle(__rte_unused const char *key, const char *value,
 	return ret;
 }
 
-STATIC void sxevf_devargs_parse(struct sxevf_adapter *adapter,
-		      struct rte_devargs *devargs)
+static void sxevf_devargs_parse(struct sxevf_adapter *adapter,
+			  struct rte_devargs *devargs)
 {
 	struct rte_kvargs *kvlist;
 	u16 check;
 
 	if (devargs == NULL) {
 		LOG_INFO_BDF("no dev args.");
-		goto l_out;
+		return;
 	}
 
 	kvlist = rte_kvargs_parse(devargs->args, sxevf_valid_arguments);
@@ -141,17 +141,16 @@ STATIC void sxevf_devargs_parse(struct sxevf_adapter *adapter,
 		return;
 
 	if (rte_kvargs_count(kvlist, SXEVF_DEVARG_LINK_CHECK) == 1 &&
-	    rte_kvargs_process(kvlist, SXEVF_DEVARG_LINK_CHECK,
-			       sxevf_devargs_handle, &check) == 0 &&
-	    check == 1) {
+		rte_kvargs_process(kvlist, SXEVF_DEVARG_LINK_CHECK,
+				   sxevf_devargs_handle, &check) == 0 &&
+		check == 1) {
 		adapter->link_check = 1;
 	}
 
 	LOG_INFO_BDF("dev args link_check:%u", adapter->link_check);
 
 	rte_kvargs_free(kvlist);
-l_out:
-	return;
+
 }
 
 static s32 sxevf_hw_dev_reset(struct sxevf_hw *hw)
@@ -179,7 +178,7 @@ static s32 sxevf_hw_dev_reset(struct sxevf_hw *hw)
 	if (!retry) {
 		ret = -SXEVF_ERR_RESET_FAILED;
 		LOG_ERROR_BDF("retry:%u use up, pf has not reset done.(err:%d)\n",
-		               SXEVF_RST_CHECK_NUM, ret);
+						SXEVF_RST_CHECK_NUM, ret);
 		goto l_out;
 	}
 
@@ -192,7 +191,7 @@ static s32 sxevf_hw_dev_reset(struct sxevf_hw *hw)
 	/* Send reset message to pf */
 	msg.msg_type = SXEVF_RESET;
 	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg,
-				    SXEVF_MSG_NUM(sizeof(msg)));
+					SXEVF_MSG_NUM(sizeof(msg)));
 	if (ret) {
 		LOG_ERROR_BDF("vf reset msg:%d len:%zu mailbox fail.(err:%d)\n",
 			  msg.msg_type, SXEVF_MSG_NUM(sizeof(msg)), ret);
@@ -272,7 +271,6 @@ static void sxevf_txrx_start(struct rte_eth_dev *eth_dev)
 		sxevf_rx_desc_tail_set(hw, rxq->reg_idx, rxq->ring_depth - 1);
 	}
 
-	return;
 }
 
 static s32 sxevf_dev_start(struct rte_eth_dev *dev)
@@ -330,7 +328,7 @@ static s32 sxevf_dev_stop(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	if (adapter->stop) {
-		LOG_INFO_BDF("eth dev has been stoped.");
+		LOG_INFO_BDF("eth dev has been stopped.");
 		goto l_out;
 	}
 
@@ -348,7 +346,7 @@ static s32 sxevf_dev_stop(struct rte_eth_dev *dev)
 
 l_out:
 #ifdef DPDK_19_11_6
-	return;
+	LOG_DEBUG_BDF("at end of vf dev stop.");
 #else
 	return 0;
 #endif
@@ -372,9 +370,8 @@ static s32 sxevf_dev_close(struct rte_eth_dev *dev)
 	}
 
 	ret = sxevf_hw_dev_reset(hw);
-	if (ret) {
+	if (ret)
 		LOG_ERROR_BDF("dev reset fail.");
-	}
 
 	sxevf_dev_stop(dev);
 
@@ -386,13 +383,13 @@ static s32 sxevf_dev_close(struct rte_eth_dev *dev)
 
 l_out:
 #ifdef DPDK_19_11_6
-	return;
+	LOG_DEBUG_BDF("at end of vf dev close.");
 #else
 	return ret;
 #endif
 }
 
-STATIC s32 sxevf_dev_reset(struct rte_eth_dev *dev)
+static s32 sxevf_dev_reset(struct rte_eth_dev *dev)
 {
 	s32 ret;
 
@@ -403,24 +400,23 @@ STATIC s32 sxevf_dev_reset(struct rte_eth_dev *dev)
 	}
 
 	ret = sxevf_ethdev_init(dev);
-	if (ret) {
+	if (ret)
 		PMD_LOG_ERR(INIT, "dev init fail.");
-	}
 
 l_out:
 	return ret;
 }
 
 static s32 sxevf_dev_info_get(struct rte_eth_dev *dev,
-		     struct rte_eth_dev_info *dev_info)
+			 struct rte_eth_dev_info *dev_info)
 {
 	struct sxevf_adapter *adapter = dev->data->dev_private;
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
 	dev_info->max_rx_queues = adapter->max_rx_queue;
 	dev_info->max_tx_queues = adapter->max_tx_queue;
-	dev_info->min_rx_bufsize = SXEVF_RX_BUF_MIN; 
-	dev_info->max_rx_pktlen = SXEVF_RX_BUF_LEN_MAX; 
+	dev_info->min_rx_bufsize = SXEVF_RX_BUF_MIN;
+	dev_info->max_rx_pktlen = SXEVF_RX_BUF_LEN_MAX;
 	dev_info->max_mtu = dev_info->max_rx_pktlen - SXEVF_ETH_OVERHEAD;
 	dev_info->max_mac_addrs = adapter->mac_filter_ctxt.uc_table_size;
 	dev_info->max_hash_mac_addrs = SXEVF_UTA_HASH_BIT_MAX;
@@ -429,7 +425,7 @@ static s32 sxevf_dev_info_get(struct rte_eth_dev *dev,
 
 	dev_info->rx_queue_offload_capa = sxevf_rx_queue_offloads_get(dev);
 	dev_info->rx_offload_capa = (sxevf_rx_port_offloads_get(dev) |
-				     dev_info->rx_queue_offload_capa);
+					 dev_info->rx_queue_offload_capa);
 	dev_info->tx_queue_offload_capa = sxevf_tx_queue_offloads_get(dev);
 	dev_info->tx_offload_capa = sxevf_tx_port_offloads_get(dev);
 
@@ -484,11 +480,11 @@ static s32 sxevf_mtu_set(struct rte_eth_dev *dev, u16 mtu)
 	}
 
 	if (dev->data->dev_started && !dev->data->scattered_rx &&
-	    ((max_frame + 2 * SXEVF_VLAN_TAG_SIZE) >
-	     (dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))) {
+		((max_frame + 2 * SXEVF_VLAN_TAG_SIZE) >
+		 (dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))) {
 		ret = -EINVAL;
 		LOG_ERROR_BDF("max_frame:%u stop port first.(err:%d)",
-			      max_frame, ret);
+				  max_frame, ret);
 		goto l_out;
 	}
 
@@ -515,11 +511,10 @@ static s32 sxevf_dev_configure(struct rte_eth_dev *dev)
 	struct sxevf_adapter *adapter = dev->data->dev_private;
 
 	LOG_INFO_BDF("Configured Virtual Function port id: %d",
-		     dev->data->port_id);
+			 dev->data->port_id);
 
-	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
-	}
 
 #ifndef RTE_LIBRTE_SXEVF_PF_DISABLE_STRIP_CRC
 	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
@@ -604,9 +599,8 @@ static u32 sxevf_regs_group_count(const struct sxevf_reg_info *regs)
 	int i = 0;
 	int count = 0;
 
-	while (regs[i].count) {
+	while (regs[i].count)
 		count += regs[i++].count;
-	}
 
 	return count;
 };
@@ -618,9 +612,8 @@ u32 sxevf_regs_group_num_get(void)
 	const struct sxevf_reg_info *reg_group;
 	const struct sxevf_reg_info **reg_set = sxevf_regs_group;
 
-	while ((reg_group = reg_set[i++])) {
+	while ((reg_group = reg_set[i++]))
 		count += sxevf_regs_group_count(reg_group);
-	}
 
 	PMD_LOG_INFO(INIT, "read regs cnt=%u\n", count);
 
@@ -633,18 +626,16 @@ void sxevf_regs_group_read(struct sxevf_hw *hw, u32 *data)
 	const struct sxevf_reg_info *reg_group;
 	const struct sxevf_reg_info **reg_set = sxevf_regs_group;
 
-	while ((reg_group = reg_set[i++])) {
+	while ((reg_group = reg_set[i++]))
 		cnt += sxevf_hw_regs_group_read(hw, reg_group, &data[cnt]);
-	}
 
 	PMD_LOG_INFO(INIT, "read regs cnt=%u, regs num=%u\n",
-	             cnt, sxevf_regs_group_num_get());
+				 cnt, sxevf_regs_group_num_get());
 
-	return;
 }
 
 static int sxevf_get_regs(struct rte_eth_dev *dev,
-	      struct rte_dev_reg_info *regs)
+		  struct rte_dev_reg_info *regs)
 {
 	s32 ret = 0;
 	u32 *data = regs->data;
@@ -676,47 +667,47 @@ static int sxevf_get_regs(struct rte_eth_dev *dev,
 }
 
 static const struct eth_dev_ops sxevf_eth_dev_ops = {
-	.dev_configure        = sxevf_dev_configure,
-	.dev_start            = sxevf_dev_start,
-	.dev_stop             = sxevf_dev_stop,
-	.link_update          = sxevf_link_update,
-	.stats_get            = sxevf_eth_stats_get,
-	.xstats_get           = sxevf_xstats_get,
-	.stats_reset          = sxevf_dev_stats_reset,
-	.xstats_reset         = sxevf_dev_stats_reset,
-	.xstats_get_names     = sxevf_xstats_names_get,
-	.dev_close            = sxevf_dev_close,
-	.dev_reset	      = sxevf_dev_reset,
+	.dev_configure		= sxevf_dev_configure,
+	.dev_start			= sxevf_dev_start,
+	.dev_stop			 = sxevf_dev_stop,
+	.link_update		  = sxevf_link_update,
+	.stats_get			= sxevf_eth_stats_get,
+	.xstats_get		   = sxevf_xstats_get,
+	.stats_reset		  = sxevf_dev_stats_reset,
+	.xstats_reset		 = sxevf_dev_stats_reset,
+	.xstats_get_names	 = sxevf_xstats_names_get,
+	.dev_close			= sxevf_dev_close,
+	.dev_reset		  = sxevf_dev_reset,
 	.promiscuous_enable   = sxevf_promiscuous_enable,
 	.promiscuous_disable  = sxevf_promiscuous_disable,
 	.allmulticast_enable  = sxevf_allmulticast_enable,
 	.allmulticast_disable = sxevf_allmulticast_disable,
-	.dev_infos_get        = sxevf_dev_info_get,
+	.dev_infos_get		= sxevf_dev_info_get,
 	.dev_supported_ptypes_get = sxevf_dev_supported_ptypes_get,
-	.mtu_set              = sxevf_mtu_set,
-	.vlan_filter_set      = sxevf_vlan_filter_set,
+	.mtu_set			  = sxevf_mtu_set,
+	.vlan_filter_set	  = sxevf_vlan_filter_set,
 	.vlan_strip_queue_set = sxevf_vlan_strip_queue_set,
-	.vlan_offload_set     = sxevf_vlan_offload_set,
-	.rx_queue_setup       = sxevf_rx_queue_setup,
-	.rx_queue_release     = sxevf_rx_queue_release,
-	.tx_queue_setup       = sxevf_tx_queue_setup,
-	.tx_queue_release     = sxevf_tx_queue_release,
+	.vlan_offload_set	 = sxevf_vlan_offload_set,
+	.rx_queue_setup	   = sxevf_rx_queue_setup,
+	.rx_queue_release	 = sxevf_rx_queue_release,
+	.tx_queue_setup	   = sxevf_tx_queue_setup,
+	.tx_queue_release	 = sxevf_tx_queue_release,
 	.rx_queue_intr_enable = sxevf_rx_queue_intr_enable,
 	.rx_queue_intr_disable = sxevf_rx_queue_intr_disable,
-	.mac_addr_add         = sxevf_mac_addr_add,
-	.mac_addr_remove      = sxevf_mac_addr_remove,
-	.set_mc_addr_list     = sxevf_set_mc_addr_list,
-	.rxq_info_get         = sxevf_rx_queue_info_get,
-	.txq_info_get         = sxevf_tx_queue_info_get,
-	.mac_addr_set         = sxevf_default_mac_addr_set,
-	.get_reg              = sxevf_get_regs,
-	.reta_update          = sxevf_rss_reta_update,
-	.reta_query           = sxevf_rss_reta_query,
-	.rss_hash_update      = sxevf_rss_hash_update,
-	.rss_hash_conf_get    = sxevf_rss_hash_conf_get,
-	.tx_done_cleanup      = sxevf_tx_done_cleanup,
+	.mac_addr_add		 = sxevf_mac_addr_add,
+	.mac_addr_remove	  = sxevf_mac_addr_remove,
+	.set_mc_addr_list	 = sxevf_set_mc_addr_list,
+	.rxq_info_get		 = sxevf_rx_queue_info_get,
+	.txq_info_get		 = sxevf_tx_queue_info_get,
+	.mac_addr_set		 = sxevf_default_mac_addr_set,
+	.get_reg			  = sxevf_get_regs,
+	.reta_update		  = sxevf_rss_reta_update,
+	.reta_query		   = sxevf_rss_reta_query,
+	.rss_hash_update	  = sxevf_rss_hash_update,
+	.rss_hash_conf_get	= sxevf_rss_hash_conf_get,
+	.tx_done_cleanup	  = sxevf_tx_done_cleanup,
 #ifdef ETH_DEV_OPS_MONITOR
-	.get_monitor_addr     = sxe_monitor_addr_get,
+	.get_monitor_addr	 = sxe_monitor_addr_get,
 #endif
 #ifdef ETH_DEV_OPS_HAS_DESC_RELATE
 	.rx_descriptor_status = sxevf_rx_descriptor_status,
@@ -749,7 +740,7 @@ s32 sxevf_ethdev_init(struct rte_eth_dev *eth_dev)
 #endif
 #endif
 
-	eth_dev->rx_pkt_burst         = &sxevf_pkts_recv;
+	eth_dev->rx_pkt_burst		 = &sxevf_pkts_recv;
 	eth_dev->tx_pkt_burst = &sxevf_pkts_xmit_with_offload;
 
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
@@ -758,12 +749,12 @@ s32 sxevf_ethdev_init(struct rte_eth_dev *eth_dev)
 	}
 
 	sxevf_devargs_parse(eth_dev->data->dev_private,
-			      pci_dev->device.devargs);
+				  pci_dev->device.devargs);
 
 	rte_eth_copy_pci_info(eth_dev, pci_dev);
 
-#ifdef  DPDK_19_11_6
-    eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+#ifdef DPDK_19_11_6
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
 #else
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 #endif
diff --git a/drivers/net/sxe/vf/sxevf_filter.c b/drivers/net/sxe/vf/sxevf_filter.c
index 4f788ee4a1..18257ba43e 100644
--- a/drivers/net/sxe/vf/sxevf_filter.c
+++ b/drivers/net/sxe/vf/sxevf_filter.c
@@ -18,10 +18,10 @@
 #include "sxevf_queue.h"
 #include "sxe_compat_version.h"
 
-#define  SXEVF_MAC_ADDR_EXTRACT_MASK  (0xFFF) 
-#define  SXEVF_MAC_ADDR_SHIFT         (5)     
-#define  SXEVF_MAC_ADDR_REG_MASK      (0x7F)  
-#define  SXEVF_MAC_ADDR_BIT_MASK      (0x1F)  
+#define  SXEVF_MAC_ADDR_EXTRACT_MASK  (0xFFF)
+#define  SXEVF_MAC_ADDR_SHIFT		 (5)
+#define  SXEVF_MAC_ADDR_REG_MASK	  (0x7F)
+#define  SXEVF_MAC_ADDR_BIT_MASK	  (0x1F)
 
 #define SXEVF_STRIP_BITMAP_SET(h, q) \
 	do { \
@@ -57,7 +57,6 @@ static void sxevf_random_mac_addr_generate(struct rte_ether_addr *mac_addr)
 	random = rte_rand();
 	memcpy(&mac_addr->addr_bytes[3], &random, 3);
 
-	return;
 }
 
 s32 sxevf_mac_addr_init(struct rte_eth_dev *eth_dev)
@@ -71,7 +70,7 @@ s32 sxevf_mac_addr_init(struct rte_eth_dev *eth_dev)
 				RTE_ETHER_ADDR_LEN * SXEVF_HW_UC_ENTRY_NUM_MAX, 0);
 	if (eth_dev->data->mac_addrs == NULL) {
 		LOG_ERROR_BDF("mac addr allocate %u B fail.",
-			     RTE_ETHER_ADDR_LEN * SXEVF_HW_UC_ENTRY_NUM_MAX);
+				 RTE_ETHER_ADDR_LEN * SXEVF_HW_UC_ENTRY_NUM_MAX);
 		ret = -ENOMEM;
 		goto l_out;
 	}
@@ -117,15 +116,13 @@ void sxevf_vfta_sync(struct rte_eth_dev *eth_dev, bool on)
 			mask = 1;
 			for (bit_idx = 0; bit_idx < 32; bit_idx++) {
 				vlan_id = (reg_idx << 5) + bit_idx;
-				if (vfta & mask) {
+				if (vfta & mask)
 					sxevf_vlan_id_set(hw, vlan_id, on);
-				}
 				mask <<= 1;
 			}
 		}
 	}
 
-	return;
 }
 
 static void sxevf_vlan_strip_bitmap_set(struct rte_eth_dev *dev, u16 queue_idx, bool on)
@@ -138,20 +135,19 @@ static void sxevf_vlan_strip_bitmap_set(struct rte_eth_dev *dev, u16 queue_idx,
 		LOG_ERROR_BDF("invalid queue idx:%u exceed max"
 			   " queue  number:%u.",
 			   queue_idx, adapter->max_rx_queue);
-		goto l_out;
+		return;
 	}
 
-	if (on) {
+	if (on)
 		SXEVF_STRIP_BITMAP_SET(vlan_ctxt, queue_idx);
-	} else {
+	else
 		SXEVF_STRIP_BITMAP_CLEAR(vlan_ctxt, queue_idx);
-	}
 
 	if (queue_idx >= dev->data->nb_rx_queues) {
 		LOG_ERROR_BDF("invalid queue_idx id:%u exceed rx "
 			   " queue number:%u.",
 			   queue_idx, dev->data->nb_rx_queues);
-		goto l_out;
+		return;
 	}
 
 	rxq = dev->data->rx_queues[queue_idx];
@@ -165,10 +161,8 @@ static void sxevf_vlan_strip_bitmap_set(struct rte_eth_dev *dev, u16 queue_idx,
 	}
 
 	LOG_INFO_BDF("queue idx:%u vlan strip on:%d set bitmap and offload done.",
-		     queue_idx, on);
+			 queue_idx, on);
 
-l_out:
-	return;
 }
 
 static void sxevf_vlan_strip_switch_set(struct rte_eth_dev *dev)
@@ -184,26 +178,23 @@ static void sxevf_vlan_strip_switch_set(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
 
-		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 			on = true;
-		} else {
+		else
 			on = false;
-		}
+
 		sxevf_hw_vlan_tag_strip_switch(hw, i, on);
 
 		sxevf_vlan_strip_bitmap_set(dev, i, on);
 	}
 
-	return;
 }
 
 static void sxevf_vlan_offload_configure(struct rte_eth_dev *dev, s32 mask)
 {
-	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+	if (mask & RTE_ETH_VLAN_STRIP_MASK)
 		sxevf_vlan_strip_switch_set(dev);
-	}
 
-	return;
 }
 
 void sxevf_vlan_filter_configure(struct rte_eth_dev *eth_dev)
@@ -213,11 +204,10 @@ void sxevf_vlan_filter_configure(struct rte_eth_dev *eth_dev)
 	sxevf_vfta_sync(eth_dev, true);
 
 	vlan_mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
-		    RTE_ETH_VLAN_EXTEND_MASK;
+			RTE_ETH_VLAN_EXTEND_MASK;
 
 	sxevf_vlan_offload_configure(eth_dev, vlan_mask);
 
-	return;
 }
 
 s32 sxevf_promiscuous_enable(struct rte_eth_dev *eth_dev)
@@ -229,7 +219,7 @@ s32 sxevf_promiscuous_enable(struct rte_eth_dev *eth_dev)
 	ret = sxevf_cast_mode_set(hw, SXEVF_CAST_MODE_PROMISC);
 	if (ret) {
 		LOG_ERROR_BDF("cast mode:0x%x set fail.(err:%d)",
-			      SXEVF_CAST_MODE_PROMISC, ret);
+				  SXEVF_CAST_MODE_PROMISC, ret);
 	}
 
 	return ret;
@@ -242,13 +232,11 @@ s32 sxevf_promiscuous_disable(struct rte_eth_dev *eth_dev)
 	s32 mode = SXEVF_CAST_MODE_NONE;
 	s32 ret;
 
-	if (eth_dev->data->all_multicast) {
+	if (eth_dev->data->all_multicast)
 		mode = SXEVF_CAST_MODE_ALLMULTI;
-	}
 	ret = sxevf_cast_mode_set(hw, mode);
-	if (ret) {
+	if (ret)
 		LOG_ERROR_BDF("disable mc promiscuous fail.(err:%d)", ret);
-	}
 
 	return ret;
 }
@@ -259,15 +247,13 @@ s32 sxevf_allmulticast_enable(struct rte_eth_dev *eth_dev)
 	struct sxevf_hw *hw = &adapter->hw;
 	s32 ret = 0;
 
-	if (eth_dev->data->promiscuous) {
+	if (eth_dev->data->promiscuous)
 		goto l_out;
-	}
-	
+
 	ret = sxevf_cast_mode_set(hw, SXEVF_CAST_MODE_ALLMULTI);
-	if (ret) {
+	if (ret)
 		LOG_ERROR_BDF("cast mode:0x%x set fail.(err:%d)",
-			      SXEVF_CAST_MODE_ALLMULTI, ret);
-	}
+				  SXEVF_CAST_MODE_ALLMULTI, ret);
 
 l_out:
 	return ret;
@@ -279,14 +265,12 @@ s32 sxevf_allmulticast_disable(struct rte_eth_dev *eth_dev)
 	struct sxevf_hw *hw = &adapter->hw;
 	s32 ret = 0;
 
-	if (eth_dev->data->promiscuous) {
+	if (eth_dev->data->promiscuous)
 		goto l_out;
-	}
 
 	ret = sxevf_cast_mode_set(hw, SXEVF_CAST_MODE_MULTI);
-	if (ret) {
+	if (ret)
 		LOG_ERROR_BDF("disable mc promiscuous fail.(err:%d)", ret);
-	}
 
 l_out:
 	return ret;
@@ -304,18 +288,17 @@ s32 sxevf_vlan_filter_set(struct rte_eth_dev *eth_dev,  u16 vlan_id, s32 on)
 	ret = sxevf_vlan_id_set(hw, vlan_id, on);
 	if (ret) {
 		LOG_ERROR_BDF("vlan_id:0x%x status:%u set fail.(err:%d)",
-			      vlan_id, on, ret);
+				  vlan_id, on, ret);
 		goto l_out;
 	}
 
 	reg_idx = (vlan_id >> SXEVF_VLAN_ID_SHIFT) & SXEVF_VLAN_ID_REG_MASK;
 	bit_idx = (vlan_id & SXEVF_VLAN_ID_BIT_MASK);
 
-	if (on) {
+	if (on)
 		vlan_ctxt->vlan_table[reg_idx] |= (1 << bit_idx);
-	} else {
+	else
 		vlan_ctxt->vlan_table[reg_idx] &= ~(1 << bit_idx);
-	}
 
 	LOG_INFO_BDF("vlan_id:0x%x status:%u set success.", vlan_id, on);
 
@@ -330,8 +313,8 @@ void sxevf_vlan_strip_queue_set(struct rte_eth_dev *dev, u16 queue, s32 on)
 
 	if (queue > adapter->max_rx_queue) {
 		LOG_ERROR_BDF("queue id:%u invalid exceed max rx queue num:%u",
-			      queue, adapter->max_rx_queue);
-		goto l_out;
+				  queue, adapter->max_rx_queue);
+		return;
 	}
 
 	sxevf_hw_vlan_tag_strip_switch(hw, queue, on);
@@ -340,8 +323,6 @@ void sxevf_vlan_strip_queue_set(struct rte_eth_dev *dev, u16 queue, s32 on)
 
 	LOG_INFO_BDF("queue:%u vlan tag strip on:%u done", queue, on);
 
-l_out:
-	return;
 }
 
 static void sxevf_vlan_strip_offload_configure(struct rte_eth_dev *dev, s32 mask)
@@ -365,10 +346,9 @@ static void sxevf_vlan_strip_offload_configure(struct rte_eth_dev *dev, s32 mask
 	}
 
 	PMD_LOG_INFO(DRV, "mask:0x%x rx mode offload:0x%"SXE_PRIX64
-		     " all queue vlan strip offload flag configure done",
-		     mask, rxmode->offloads);
+			 " all queue vlan strip offload flag configure done",
+			 mask, rxmode->offloads);
 
-	return;
 }
 
 s32 sxevf_vlan_offload_set(struct rte_eth_dev *dev, s32 mask)
@@ -377,13 +357,13 @@ s32 sxevf_vlan_offload_set(struct rte_eth_dev *dev, s32 mask)
 
 	sxevf_vlan_offload_configure(dev, mask);
 
-	PMD_LOG_INFO(DRV, "vlan offload mask:0x%d set done.", mask);
+	PMD_LOG_INFO(DRV, "vlan offload mask:0x%x set done.", mask);
 
 	return 0;
 }
 
 s32 sxevf_default_mac_addr_set(struct rte_eth_dev *dev,
-			     struct rte_ether_addr *mac_addr)
+				 struct rte_ether_addr *mac_addr)
 {
 	s32 ret;
 	struct sxevf_adapter *adapter = dev->data->dev_private;
@@ -392,18 +372,18 @@ s32 sxevf_default_mac_addr_set(struct rte_eth_dev *dev,
 	ret = sxevf_mac_addr_set(hw, mac_addr->addr_bytes);
 	if (ret) {
 		LOG_ERROR_BDF("modify default mac addr to "MAC_FMT" fail.(err:%d)",
-			      MAC_ADDR(mac_addr->addr_bytes), ret);
+				  MAC_ADDR(mac_addr->addr_bytes), ret);
 	}
 
 	LOG_INFO_BDF("modify default mac addr to "MAC_FMT" success.",
-		      MAC_ADDR(mac_addr->addr_bytes));
+			  MAC_ADDR(mac_addr->addr_bytes));
 
 	return ret;
 }
 
 s32 sxevf_mac_addr_add(struct rte_eth_dev *dev,
-			     struct rte_ether_addr *mac_addr,
-			     __rte_unused u32 rar_idx ,__rte_unused u32 pool)
+				 struct rte_ether_addr *mac_addr,
+				 __rte_unused u32 rar_idx, __rte_unused u32 pool)
 {
 	s32 ret;
 	struct sxevf_adapter *adapter = dev->data->dev_private;
@@ -411,23 +391,23 @@ s32 sxevf_mac_addr_add(struct rte_eth_dev *dev,
 	struct sxevf_mac_filter_context *mac_ctxt = &adapter->mac_filter_ctxt;
 
 	if (memcmp(mac_ctxt->def_mac_addr.addr_bytes, mac_addr->addr_bytes,
-		    sizeof(*mac_addr)) == 0) {
+			sizeof(*mac_addr)) == 0) {
 		ret = -EINVAL;
 		LOG_ERROR_BDF("mac_addr:"MAC_FMT" eaqual to defalut mac addr"
-			     " skip mac addr add.(err:%d)",
-			     MAC_ADDR(mac_addr->addr_bytes), ret);
+				 " skip mac addr add.(err:%d)",
+				 MAC_ADDR(mac_addr->addr_bytes), ret);
 		goto l_out;
 	}
 
 	ret = sxevf_uc_addr_add(hw, 2, mac_addr->addr_bytes);
 	if (ret) {
 		LOG_ERROR_BDF("mac_addr:"MAC_FMT" add fail.(err:%d)",
-			      MAC_ADDR(mac_addr->addr_bytes), ret);
+				  MAC_ADDR(mac_addr->addr_bytes), ret);
 		goto l_out;
 	}
 
 	LOG_INFO_BDF("mac_addr:"MAC_FMT" add success.",
-	     	      MAC_ADDR(mac_addr->addr_bytes));
+				MAC_ADDR(mac_addr->addr_bytes));
 
 l_out:
 	return ret;
@@ -438,24 +418,23 @@ void sxevf_mac_addr_remove(struct rte_eth_dev *dev, u32 index)
 	struct sxevf_adapter *adapter = dev->data->dev_private;
 	struct sxevf_hw *hw = &adapter->hw;
 	struct sxevf_mac_filter_context *mac_ctxt = &adapter->mac_filter_ctxt;
-	struct rte_ether_addr *mac_addr; 
+	struct rte_ether_addr *mac_addr;
 	u8 i;
 
 	sxevf_uc_addr_add(hw, 0, NULL);
 
 	for (i = 0, mac_addr = dev->data->mac_addrs; i < mac_ctxt->uc_table_size;
-	     i++, mac_addr++) {
+		 i++, mac_addr++) {
 		if ((i == index) || rte_is_zero_ether_addr(mac_addr) ||
 		(memcmp(mac_ctxt->def_mac_addr.addr_bytes, mac_addr->addr_bytes,
-		        sizeof(*mac_addr)) == 0)) {
+				sizeof(*mac_addr)) == 0)) {
 			continue;
 		}
 		sxevf_uc_addr_add(hw, 2, mac_addr->addr_bytes);
 	}
 
 	LOG_INFO_BDF("index:%u mac addr"MAC_FMT" remove success.",
-		      index, MAC_ADDR(dev->data->mac_addrs[index].addr_bytes));
-	return;
+			  index, MAC_ADDR(dev->data->mac_addrs[index].addr_bytes));
 }
 
 static u16 sxevf_hash_mac_addr_parse(u8 *mac_addr)
@@ -483,7 +462,7 @@ s32 sxevf_set_mc_addr_list(struct rte_eth_dev *dev,
 	u32 i;
 
 	msg.msg_type = SXEVF_MC_ADDR_SYNC;
-	msg.mc_cnt = min(nb_mc_addr, (u32)SXEVF_MC_ENTRY_NUM_MAX);
+	msg.mc_cnt = RTE_MIN(nb_mc_addr, (u32)SXEVF_MC_ENTRY_NUM_MAX);
 
 	for (i = 0; i < msg.mc_cnt; i++) {
 		msg.mc_addr_extract[i] = sxevf_hash_mac_addr_parse(mc_addr_list->addr_bytes);
@@ -494,7 +473,7 @@ s32 sxevf_set_mc_addr_list(struct rte_eth_dev *dev,
 	result = (msg.mc_cnt << 16) | msg.msg_type;
 
 	if (ret || ((result & SXEVF_MC_ADDR_SYNC) &&
-		    (result & SXEVF_MSGTYPE_NACK))) {
+			(result & SXEVF_MSGTYPE_NACK))) {
 		ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR;
 		goto l_out;
 	}
diff --git a/drivers/net/sxe/vf/sxevf_filter.h b/drivers/net/sxe/vf/sxevf_filter.h
index 9e74718b95..e94be98ec5 100644
--- a/drivers/net/sxe/vf/sxevf_filter.h
+++ b/drivers/net/sxe/vf/sxevf_filter.h
@@ -13,32 +13,32 @@
 #include <ethdev_driver.h>
 #endif
 
-#define SXEVF_MTA_ENTRY_NUM_MAX        128
-#define SXEVF_UTA_HASH_BIT_MAX         4096 
-#define VLAN_N_VID     4096
+#define SXEVF_MTA_ENTRY_NUM_MAX		128
+#define SXEVF_UTA_HASH_BIT_MAX		 4096
+#define VLAN_N_VID	 4096
 #define BYTE_BIT_NUM   8
 
-#define  SXEVF_VLAN_ID_SHIFT         (5)     
-#define  SXEVF_VLAN_ID_REG_MASK      (0x7F)  
-#define  SXEVF_VLAN_ID_BIT_MASK      (0x1F)  
+#define  SXEVF_VLAN_ID_SHIFT		 (5)
+#define  SXEVF_VLAN_ID_REG_MASK	  (0x7F)
+#define  SXEVF_VLAN_ID_BIT_MASK	  (0x1F)
 
 #define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
-#define MAC_ADDR(x) ((u8*)(x))[0],((u8*)(x))[1], \
-			   ((u8*)(x))[2],((u8*)(x))[3], \
-			   ((u8*)(x))[4],((u8*)(x))[5]
+#define MAC_ADDR(x) ((u8 *)(x))[0], ((u8 *)(x))[1], \
+					   ((u8 *)(x))[2], ((u8 *)(x))[3], \
+					   ((u8 *)(x))[4], ((u8 *)(x))[5]
 
-#define SXEVF_VLAN_STRIP_BITMAP_SIZE    \
-        (SXEVF_HW_TXRX_RING_NUM_MAX / (sizeof(u32) * BYTE_BIT_NUM))
+#define SXEVF_VLAN_STRIP_BITMAP_SIZE \
+		(SXEVF_HW_TXRX_RING_NUM_MAX / (sizeof(u32) * BYTE_BIT_NUM))
 
 struct sxevf_vlan_context {
-	u32 vlan_table[SXEVF_VFT_TBL_SIZE];  
+	u32 vlan_table[SXEVF_VFT_TBL_SIZE];
 	u32 strip_bitmap[SXEVF_VLAN_STRIP_BITMAP_SIZE];
 	u32 vlan_table_size;
 };
 
 struct sxevf_mac_filter_context {
-	struct rte_ether_addr def_mac_addr; 
-	u8  mc_filter_type;        
+	struct rte_ether_addr def_mac_addr;
+	u8  mc_filter_type;
 	u32 uc_table_size;
 };
 
@@ -65,15 +65,16 @@ void sxevf_vlan_strip_queue_set(struct rte_eth_dev *dev, u16 queue, s32 on);
 s32 sxevf_vlan_offload_set(struct rte_eth_dev *dev, s32 mask);
 
 s32 sxevf_default_mac_addr_set(struct rte_eth_dev *dev,
-			     struct rte_ether_addr *mac_addr);
+				 struct rte_ether_addr *mac_addr);
 
 void sxevf_mac_addr_remove(struct rte_eth_dev *dev, u32 index);
 
 s32 sxevf_mac_addr_add(struct rte_eth_dev *dev,
-			     struct rte_ether_addr *mac_addr,
-			     __rte_unused u32 rar_idx ,__rte_unused u32 pool);
+				 struct rte_ether_addr *mac_addr,
+				 __rte_unused u32 rar_idx, __rte_unused u32 pool);
 
 s32 sxevf_set_mc_addr_list(struct rte_eth_dev *dev,
 			  struct rte_ether_addr *mc_addr_list,
 			  u32 nb_mc_addr);
+
 #endif
diff --git a/drivers/net/sxe/vf/sxevf_irq.c b/drivers/net/sxe/vf/sxevf_irq.c
index 646a10d6dc..eb374a920e 100644
--- a/drivers/net/sxe/vf/sxevf_irq.c
+++ b/drivers/net/sxe/vf/sxevf_irq.c
@@ -30,22 +30,23 @@
 #include "sxevf_queue.h"
 #include "sxe_compat_version.h"
 
-#define SXEVF_IRQ_LINK_CONFIG      (u32)(1 << 3)
+#define SXEVF_IRQ_LINK_CONFIG	  (u32)(1 << 3)
 
-#define SXEVF_RX_OTHER_IRQ_MASK     (3)
+#define SXEVF_RX_OTHER_IRQ_MASK	 (3)
 
-#define SXEVF_MISC_VEC_ID        RTE_INTR_VEC_ZERO_OFFSET
+#define SXEVF_MISC_VEC_ID		RTE_INTR_VEC_ZERO_OFFSET
 
-#define SXEVF_RX_VEC_BASE          RTE_INTR_VEC_RXTX_OFFSET
+#define SXEVF_RX_VEC_BASE		  RTE_INTR_VEC_RXTX_OFFSET
 
 #define SXEVF_EITR_INTERVAL_UNIT_NS	2048
-#define SXEVF_EITR_ITR_INT_SHIFT        3
-#define SXEVF_IRQ_ITR_MASK              (0x00000FF8)
+#define SXEVF_EITR_ITR_INT_SHIFT		3
+#define SXEVF_IRQ_ITR_MASK			  (0x00000FF8)
 #define SXEVF_EITR_INTERVAL_US(us) \
 	(((us) * 1000 / SXEVF_EITR_INTERVAL_UNIT_NS << SXEVF_EITR_ITR_INT_SHIFT) & \
 		SXEVF_IRQ_ITR_MASK)
 
-#define SXEVF_QUEUE_ITR_INTERVAL_DEFAULT   500 
+#define SXEVF_QUEUE_ITR_INTERVAL_DEFAULT   500
+#define SXEVF_QUEUE_ITR_INTERVAL   3
 
 void sxevf_intr_disable(struct rte_eth_dev *eth_dev)
 {
@@ -59,7 +60,6 @@ void sxevf_intr_disable(struct rte_eth_dev *eth_dev)
 
 	irq_ctxt->enable_mask = 0;
 
-	return;
 }
 
 void sxevf_intr_enable(struct rte_eth_dev *eth_dev)
@@ -74,7 +74,6 @@ void sxevf_intr_enable(struct rte_eth_dev *eth_dev)
 
 	irq_ctxt->enable_mask = SXEVF_RX_OTHER_IRQ_MASK;
 
-	return;
 }
 
 static s32 sxevf_ctrl_msg_check(struct rte_eth_dev *eth_dev)
@@ -93,7 +92,7 @@ static s32 sxevf_ctrl_msg_check(struct rte_eth_dev *eth_dev)
 
 	if (ctrl_msg & SXEVF_PF_CTRL_MSG_REINIT) {
 		sxe_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_RESET,
-					     NULL);
+						 NULL);
 		PMD_LOG_INFO(DRV, "rcv reinit msg.\n");
 	}
 
@@ -101,7 +100,7 @@ static s32 sxevf_ctrl_msg_check(struct rte_eth_dev *eth_dev)
 	return ret;
 }
 
-STATIC s32 sxevf_link_msg_check(struct rte_eth_dev *eth_dev, bool *link_up)
+static s32 sxevf_link_msg_check(struct rte_eth_dev *eth_dev, bool *link_up)
 {
 	struct sxevf_adapter *adapter = eth_dev->data->dev_private;
 	struct sxevf_hw *hw = &adapter->hw;
@@ -116,18 +115,18 @@ STATIC s32 sxevf_link_msg_check(struct rte_eth_dev *eth_dev, bool *link_up)
 	}
 
 	if (ctrl_msg & SXEVF_PF_CTRL_MSG_NETDEV_DOWN) {
-			*link_up = false;
-			PMD_LOG_INFO(DRV, "rcv ctrl msg:0x%x need link down.\n", ctrl_msg);
-		} else if (ctrl_msg & SXEVF_PF_CTRL_MSG_LINK_UPDATE) {
-			*link_up = true;
-			PMD_LOG_INFO(DRV, "rcv ctrl msg:0x%x physical link up.\n", ctrl_msg);
-		}
+		*link_up = false;
+		PMD_LOG_INFO(DRV, "rcv ctrl msg:0x%x need link down.\n", ctrl_msg);
+	} else if (ctrl_msg & SXEVF_PF_CTRL_MSG_LINK_UPDATE) {
+		*link_up = true;
+		PMD_LOG_INFO(DRV, "rcv ctrl msg:0x%x physical link up.\n", ctrl_msg);
+	}
 
 l_end:
 	return ret;
 }
 
-STATIC void sxevf_mbx_irq_handler(void *data)
+static void sxevf_mbx_irq_handler(void *data)
 {
 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)data;
 
@@ -137,7 +136,6 @@ STATIC void sxevf_mbx_irq_handler(void *data)
 
 	sxevf_intr_enable(eth_dev);
 
-	return;
 }
 
 void sxevf_irq_init(struct rte_eth_dev *eth_dev)
@@ -153,7 +151,6 @@ void sxevf_irq_init(struct rte_eth_dev *eth_dev)
 	rte_intr_enable(irq_handle);
 	sxevf_intr_enable(eth_dev);
 
-	return;
 }
 
 static s32 sxevf_msix_configure(struct rte_eth_dev *dev)
@@ -178,9 +175,8 @@ static s32 sxevf_msix_configure(struct rte_eth_dev *dev)
 		goto l_out;
 	}
 
-	if (rte_intr_allow_others(handle)) {
+	if (rte_intr_allow_others(handle))
 		vector = base = SXEVF_RX_VEC_BASE;
-	}
 
 	for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
 		queue_id++) {
@@ -194,12 +190,11 @@ static s32 sxevf_msix_configure(struct rte_eth_dev *dev)
 				queue_id,
 				rx_queue->reg_idx,
 				vector);
-		if (vector < base + handle->nb_efd - 1) {
+		if (vector < base + handle->nb_efd - 1)
 			vector++;
-		}
 	}
 
-	irq_interval = SXEVF_EITR_INTERVAL_US(SXEVF_QUEUE_ITR_INTERVAL_DEFAULT);
+	irq_interval = SXEVF_EITR_INTERVAL_US(SXEVF_QUEUE_ITR_INTERVAL);
 	sxevf_ring_irq_interval_set(hw, 0, irq_interval);
 
 l_out:
@@ -214,20 +209,20 @@ s32 sxevf_irq_configure(struct rte_eth_dev *eth_dev)
 	s32 ret = 0;
 
 	if (rte_intr_cap_multiple(handle) &&
-	     eth_dev->data->dev_conf.intr_conf.rxq != 0) {
+		 eth_dev->data->dev_conf.intr_conf.rxq != 0) {
 		irq_num = 1;
 		if (rte_intr_efd_enable(handle, irq_num)) {
 			ret = -SXE_ERR_CONFIG;
 			PMD_LOG_ERR(DRV,
-				      "intr_handle type:%d irq num:%d invalid",
-				      handle->type, irq_num);
+					  "intr_handle type:%d irq num:%d invalid",
+					  handle->type, irq_num);
 			goto l_out;
 		}
 	}
 
 	if (rte_intr_dp_is_en(handle) && !handle->intr_vec) {
 		handle->intr_vec = rte_zmalloc("intr_vec",
-				    eth_dev->data->nb_rx_queues * sizeof(u32), 0);
+					eth_dev->data->nb_rx_queues * sizeof(u32), 0);
 		if (handle->intr_vec == NULL) {
 			PMD_LOG_ERR(DRV, "rx queue irq vector "
 					 "allocate %zuB memory fail.",
@@ -251,13 +246,13 @@ s32 sxevf_irq_configure(struct rte_eth_dev *eth_dev)
 	sxevf_intr_enable(eth_dev);
 
 	PMD_LOG_INFO(DRV,
-		      "intr_handle type:%d rx queue num:%d "
-		      "queue irq num:%u total irq num:%u "
-		      "config done",
-		      handle->type,
-		      eth_dev->data->nb_rx_queues,
-		      handle->nb_efd,
-		      handle->max_intr);
+			  "intr_handle type:%d rx queue num:%d "
+			  "queue irq num:%u total irq num:%u "
+			  "config done",
+			  handle->type,
+			  eth_dev->data->nb_rx_queues,
+			  handle->nb_efd,
+			  handle->max_intr);
 
 l_out:
 	return ret;
@@ -275,7 +270,6 @@ void sxevf_irq_free(struct rte_eth_dev *eth_dev)
 		handle->intr_vec = NULL;
 	}
 
-	return;
 }
 
 void sxevf_irq_unregister(struct rte_eth_dev *eth_dev)
@@ -285,7 +279,6 @@ void sxevf_irq_unregister(struct rte_eth_dev *eth_dev)
 
 	rte_intr_callback_unregister(handle, sxevf_mbx_irq_handler, eth_dev);
 
-	return;
 }
 
 s32 sxevf_rx_queue_intr_enable(struct rte_eth_dev *dev, u16 queue_id)
@@ -299,9 +292,8 @@ s32 sxevf_rx_queue_intr_enable(struct rte_eth_dev *dev, u16 queue_id)
 
 	RTE_SET_USED(queue_id);
 
-	if (rte_intr_allow_others(intr_handle)) {
+	if (rte_intr_allow_others(intr_handle))
 		vector = SXEVF_RX_VEC_BASE;
-	}
 
 	irq_ctxt->enable_mask |= (1 << vector);
 
@@ -323,9 +315,8 @@ s32 sxevf_rx_queue_intr_disable(struct rte_eth_dev *dev, u16 queue_id)
 
 	RTE_SET_USED(queue_id);
 
-	if (rte_intr_allow_others(intr_handle)) {
+	if (rte_intr_allow_others(intr_handle))
 		vector = SXEVF_RX_VEC_BASE;
-	}
 
 	irq_ctxt->enable_mask &= ~(1 << vector);
 
@@ -373,7 +364,6 @@ static void sxevf_physical_link_check(struct rte_eth_dev *dev,  u32 *link_speed,
 
 l_end:
 	PMD_LOG_INFO(DRV, "link up status:%d.\n", *link_up);
-	return;
 }
 
 static void sxevf_link_info_get(struct rte_eth_dev *dev, int wait_to_complete,
@@ -385,24 +375,21 @@ static void sxevf_link_info_get(struct rte_eth_dev *dev, int wait_to_complete,
 	sxevf_physical_link_check(dev, link_speed, link_up);
 
 	if ((wait_to_complete == 0) && (adapter->link_check == 0)) {
-		if (*link_speed == SXEVF_LINK_SPEED_UNKNOWN) {
+		if (*link_speed == SXEVF_LINK_SPEED_UNKNOWN)
 			*link_up = false;
-		} else {
+		else
 			*link_up = true;
-		}
-		goto l_end;
+		return;
 	}
 
 	if (*link_up) {
 		ret = sxevf_link_msg_check(dev, link_up);
 		if (ret) {
 			PMD_LOG_ERR(DRV, "ctrl msg rcv fail, try to next workqueue.\n");
-			goto l_end;
+			return;
 		}
 	}
 
-l_end:
-	return;
 }
 
 s32 sxevf_link_update(struct rte_eth_dev *dev, int wait_to_complete)
@@ -420,9 +407,8 @@ s32 sxevf_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
 						RTE_ETH_LINK_SPEED_FIXED);
 
-	if ((wait_to_complete == 0) || dev->data->dev_conf.intr_conf.lsc) {
+	if ((wait_to_complete == 0) || dev->data->dev_conf.intr_conf.lsc)
 		wait_to_complete = 0;
-	}
 
 	sxevf_link_info_get(dev, wait_to_complete, &link_speed, &link_up);
 
diff --git a/drivers/net/sxe/vf/sxevf_irq.h b/drivers/net/sxe/vf/sxevf_irq.h
index 169eb1f0fd..8ebc319e83 100644
--- a/drivers/net/sxe/vf/sxevf_irq.h
+++ b/drivers/net/sxe/vf/sxevf_irq.h
@@ -14,8 +14,8 @@
 #include "sxe_compat_platform.h"
 
 struct sxevf_irq_context {
-	u32 enable_mask;    
-	u32 enable_mask_original; 
+	u32 enable_mask;
+	u32 enable_mask_original;
 };
 
 void sxevf_intr_disable(struct rte_eth_dev *eth_dev);
diff --git a/drivers/net/sxe/vf/sxevf_main.c b/drivers/net/sxe/vf/sxevf_main.c
index 72d600c0b1..1eb4c3b002 100644
--- a/drivers/net/sxe/vf/sxevf_main.c
+++ b/drivers/net/sxe/vf/sxevf_main.c
@@ -32,15 +32,15 @@
 #include "sxevf_ethdev.h"
 #include "sxe_queue_common.h"
 
-#define PCI_VENDOR_ID_STARS      0x1FF2
-#define SXEVF_DEV_ID_ASIC        0x10A2
+#define PCI_VENDOR_ID_STARS	  0x1FF2
+#define SXEVF_DEV_ID_ASIC		0x10A2
 
 static s32 sxevf_probe(struct rte_pci_driver *pci_drv __rte_unused,
 					struct rte_pci_device *pci_dev)
 {
 	s32 ret;
 
-	printf("sxe_version[%s], sxe_commit_id[%s], sxe_branch[%s], sxe_build_time[%s]\n", 
+	printf("sxe_version[%s], sxe_commit_id[%s], sxe_branch[%s], sxe_build_time[%s]\n",
 		SXE_VERSION, SXE_COMMIT_ID, SXE_BRANCH, SXE_BUILD_TIME);
 
 #ifdef SXE_DPDK_DEBUG
@@ -78,17 +78,17 @@ static const struct rte_pci_id sxevf_pci_tbl[] = {
 	{.vendor_id = 0,}
 };
 
-STATIC struct rte_pci_driver rte_sxevf_pmd = {
+static struct rte_pci_driver rte_sxevf_pmd = {
 	.id_table  = sxevf_pci_tbl,
 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
-	.probe     = sxevf_probe,
-	.remove    = sxevf_remove,
+	.probe	 = sxevf_probe,
+	.remove	= sxevf_remove,
 };
 
 RTE_PMD_REGISTER_PCI(net_sxevf, rte_sxevf_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_sxevf, sxevf_pci_tbl);
 RTE_PMD_REGISTER_KMOD_DEP(net_sxevf, "* igb_uio | vfio-pci");
 RTE_PMD_REGISTER_PARAM_STRING(net_sxevf,
-			      SXEVF_DEVARG_LINK_CHECK "=<0|1>");
+				  SXEVF_DEVARG_LINK_CHECK "=<0|1>");
 
 #endif
diff --git a/drivers/net/sxe/vf/sxevf_msg.c b/drivers/net/sxe/vf/sxevf_msg.c
index 6cd64fc1b3..aa832b5aeb 100644
--- a/drivers/net/sxe/vf/sxevf_msg.c
+++ b/drivers/net/sxe/vf/sxevf_msg.c
@@ -10,8 +10,8 @@
 #include "sxe_errno.h"
 #include "sxe_logs.h"
 
-#define SXEVF_PFMSG_MASK    0xFF00
-#define SXEVF_DEFAULT_TC_NUM        1
+#define SXEVF_PFMSG_MASK	0xFF00
+#define SXEVF_DEFAULT_TC_NUM		1
 
 void sxevf_mbx_init(struct sxevf_hw *hw)
 {
@@ -28,7 +28,6 @@ void sxevf_mbx_init(struct sxevf_hw *hw)
 
 	hw->mbx.api_version = SXEVF_MBX_API_10;
 
-	return;
 }
 
 static u32 sxevf_mbx_reg_read(struct sxevf_hw *hw)
@@ -47,16 +46,15 @@ static bool sxevf_mbx_bit_check(struct sxevf_hw *hw, u32 mask)
 	bool ret = false;
 	u32 value = sxevf_mbx_reg_read(hw);
 
-	if (value & mask) {
+	if (value & mask)
 		ret = true;
-	}
 
 	hw->mbx.reg_value &= ~mask;
 
 	return ret;
 }
 
-STATIC bool sxevf_pf_msg_check(struct sxevf_hw *hw)
+static bool sxevf_pf_msg_check(struct sxevf_hw *hw)
 {
 	bool ret = false;
 
@@ -68,7 +66,7 @@ STATIC bool sxevf_pf_msg_check(struct sxevf_hw *hw)
 	return ret;
 }
 
-STATIC bool sxevf_pf_ack_check(struct sxevf_hw *hw)
+static bool sxevf_pf_ack_check(struct sxevf_hw *hw)
 {
 	bool ret = false;
 
@@ -85,7 +83,7 @@ bool sxevf_pf_rst_check(struct sxevf_hw *hw)
 	bool ret = false;
 
 	if (!sxevf_mbx_bit_check(hw, (SXE_VFMAILBOX_RSTI |
-				      SXE_VFMAILBOX_RSTD))) {
+					  SXE_VFMAILBOX_RSTD))) {
 		hw->mbx.stats.rsts++;
 		ret = true;
 	}
@@ -93,7 +91,7 @@ bool sxevf_pf_rst_check(struct sxevf_hw *hw)
 	return ret;
 }
 
-STATIC s32 sxevf_mailbox_lock(struct sxevf_hw *hw)
+static s32 sxevf_mailbox_lock(struct sxevf_hw *hw)
 {
 	u32 mailbox;
 	u32 retry = SXEVF_MBX_RETRY_COUNT;
@@ -123,10 +121,9 @@ static void sxevf_mailbox_unlock(struct sxevf_hw *hw)
 	mailbox &= ~SXE_VFMAILBOX_VFU;
 	sxevf_mailbox_write(hw, mailbox);
 
-	return;
 }
 
-STATIC bool sxevf_msg_poll(struct sxevf_hw *hw)
+static bool sxevf_msg_poll(struct sxevf_hw *hw)
 {
 	struct sxevf_mbx_info *mbx = &hw->mbx;
 	u32 retry = mbx->retry;
@@ -148,7 +145,7 @@ STATIC bool sxevf_msg_poll(struct sxevf_hw *hw)
 	return ret;
 }
 
-STATIC bool sxevf_ack_poll(struct sxevf_hw *hw)
+static bool sxevf_ack_poll(struct sxevf_hw *hw)
 {
 	struct sxevf_mbx_info *mbx = &hw->mbx;
 	u32 retry = mbx->retry;
@@ -171,7 +168,7 @@ STATIC bool sxevf_ack_poll(struct sxevf_hw *hw)
 	return ret;
 }
 
-STATIC void sxevf_pf_msg_and_ack_clear(struct sxevf_hw *hw)
+static void sxevf_pf_msg_and_ack_clear(struct sxevf_hw *hw)
 {
 	struct sxevf_adapter *adapter = hw->adapter;
 
@@ -180,7 +177,6 @@ STATIC void sxevf_pf_msg_and_ack_clear(struct sxevf_hw *hw)
 	sxevf_pf_msg_check(hw);
 	sxevf_pf_ack_check(hw);
 
-	return;
 }
 
 static s32 sxevf_send_msg_to_pf(struct sxevf_hw *hw, u32 *msg, u16 msg_len)
@@ -218,9 +214,8 @@ static s32 sxevf_send_msg_to_pf(struct sxevf_hw *hw, u32 *msg, u16 msg_len)
 	old = sxevf_msg_read(hw, 0);
 	msg[0] |= (old & SXEVF_PFMSG_MASK);
 
-	for (i = 0; i < msg_len; i++) {
+	for (i = 0; i < msg_len; i++)
 		sxevf_msg_write(hw, i, msg[i]);
-	}
 
 	sxevf_pf_req_irq_trigger(hw);
 
@@ -259,9 +254,8 @@ s32 sxevf_mbx_msg_rcv(struct sxevf_hw *hw, u32 *msg, u16 msg_len)
 		goto l_end;
 	}
 
-	for (i = 0; i < msg_entry; i++) {
+	for (i = 0; i < msg_entry; i++)
 		msg[i] = sxevf_msg_read(hw, i);
-	}
 
 	msg[0] &= ~SXEVF_PFMSG_MASK;
 
@@ -290,9 +284,8 @@ s32 sxevf_ctrl_msg_rcv(struct sxevf_hw *hw, u32 *msg, u16 msg_len)
 		goto l_end;
 	}
 
-	for (i = 0; i < msg_entry; i++) {
+	for (i = 0; i < msg_entry; i++)
 		msg[i] = sxevf_msg_read(hw, i);
-	}
 
 	sxevf_mailbox_unlock(hw);
 
@@ -321,9 +314,8 @@ s32 sxevf_ctrl_msg_rcv_and_clear(struct sxevf_hw *hw, u32 *msg, u16 msg_len)
 		goto l_end;
 	}
 
-	for (i = 0; i < msg_entry; i++) {
+	for (i = 0; i < msg_entry; i++)
 		msg[i] = sxevf_msg_read(hw, i);
-	}
 
 	clear = msg[0] & (~SXEVF_PFMSG_MASK);
 	sxevf_msg_write(hw, 0, clear);
@@ -375,9 +367,8 @@ s32 sxevf_send_and_rcv_msg(struct sxevf_hw *hw, u32 *msg, u8 msg_len)
 		goto l_out;
 	}
 
-	if (msg_type == SXEVF_RESET) {
+	if (msg_type == SXEVF_RESET)
 		mdelay(10);
-	}
 
 	ret = sxevf_rcv_msg_from_pf(hw, msg, msg_len);
 	if (ret) {
@@ -414,14 +405,12 @@ void sxevf_mbx_api_version_init(struct sxevf_adapter *adapter)
 		if (!ret && (msg.msg_type == (SXEVF_API_NEGOTIATE | SXEVF_MSGTYPE_ACK))) {
 			hw->mbx.api_version = api[idx];
 			break;
-		} else {
-			idx++;
 		}
+		idx++;
 	}
 
 	LOG_INFO_BDF("mailbox api version:%u", hw->mbx.api_version);
 
-	return;
 }
 
 s32 sxevf_ring_info_get(struct sxevf_adapter *adapter,
@@ -433,7 +422,7 @@ s32 sxevf_ring_info_get(struct sxevf_adapter *adapter,
 
 	req.msg_type = SXEVF_RING_INFO_GET;
 	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&req,
-				     SXEVF_MSG_NUM(sizeof(req)));
+					 SXEVF_MSG_NUM(sizeof(req)));
 	if (ret) {
 		LOG_ERROR_BDF("msg:0x%x send or rcv reply failed.(err:%d)\n",
 			   req.msg_type, ret);
@@ -451,23 +440,22 @@ s32 sxevf_ring_info_get(struct sxevf_adapter *adapter,
 		 req.max_tx_num, req.max_rx_num, req.tc_num, req.default_tc);
 
 	if ((req.max_tx_num == 0) ||
-	    (req.max_tx_num > SXEVF_TXRX_RING_NUM_MAX)) {
+		(req.max_tx_num > SXEVF_TXRX_RING_NUM_MAX)) {
 		req.max_tx_num = SXEVF_TXRX_RING_NUM_MAX;
 	}
 
 	if ((req.max_rx_num == 0) ||
-	    (req.max_rx_num > SXEVF_TXRX_RING_NUM_MAX)) {
+		(req.max_rx_num > SXEVF_TXRX_RING_NUM_MAX)) {
 		req.max_rx_num = SXEVF_TXRX_RING_NUM_MAX;
 	}
 
-	if (req.tc_num > req.max_rx_num) {
+	if (req.tc_num > req.max_rx_num)
 		req.tc_num = SXEVF_DEFAULT_TC_NUM;
-	}
+
 	*tc_num = req.tc_num;
 
-	if (req.default_tc > req.max_tx_num) {
+	if (req.default_tc > req.max_tx_num)
 		req.default_tc = 0;
-	}
 
 	*default_tc = req.default_tc;
 
@@ -491,7 +479,7 @@ s32 sxevf_rss_hash_config_get(struct sxevf_adapter *adapter,
 
 	msg.msg_type = SXEVF_RSS_CONF_GET;
 	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg,
-				     SXEVF_MSG_NUM(sizeof(msg)));
+					 SXEVF_MSG_NUM(sizeof(msg)));
 	if (ret) {
 		LOG_ERROR_BDF("msg:0x%x send or rcv reply failed.(err:%d)\n",
 			   msg.msg_type, ret);
@@ -555,9 +543,9 @@ s32 sxevf_rx_max_frame_set(struct sxevf_hw *hw, u32 mtu)
 	msg.max_frame = mtu;
 
 	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg,
-					     SXEVF_MSG_NUM(sizeof(msg)));
+						 SXEVF_MSG_NUM(sizeof(msg)));
 	if (ret || ((msg.msg_type & SXEVF_LPE_SET) &&
-		    (msg.msg_type & SXEVF_MSGTYPE_NACK))) {
+			(msg.msg_type & SXEVF_MSGTYPE_NACK))) {
 		ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR;
 	}
 
@@ -580,14 +568,13 @@ s32 sxevf_vlan_id_set(struct sxevf_hw *hw, u32 vlan_id,
 
 	LOG_INFO_BDF("update vlan[%u], vlan on = %s\n", vlan_id, vlan_on ? "yes" : "no");
 	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg,
-					     SXEVF_MSG_NUM(sizeof(msg)));
-	LOG_INFO_BDF("update vlan[%u] ret = %d\n",vlan_id, ret);
+						 SXEVF_MSG_NUM(sizeof(msg)));
+	LOG_INFO_BDF("update vlan[%u] ret = %d\n", vlan_id, ret);
 
 	msg.msg_type &= ~(0xFF << SXEVF_MSGINFO_SHIFT);
 
-	if (ret || (msg.msg_type != (SXEVF_VLAN_SET | SXEVF_MSGTYPE_ACK))) {
+	if (ret || (msg.msg_type != (SXEVF_VLAN_SET | SXEVF_MSGTYPE_ACK)))
 		ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR;
-	}
 
 	return ret;
 }
@@ -602,9 +589,8 @@ s32 sxevf_cast_mode_set(struct sxevf_hw *hw, enum sxevf_cast_mode mode)
 	msg.cast_mode = mode;
 
 	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg)));
-	if (ret || (msg.msg_type != (SXEVF_CAST_MODE_SET | SXEVF_MSGTYPE_ACK))) {
+	if (ret || (msg.msg_type != (SXEVF_CAST_MODE_SET | SXEVF_MSGTYPE_ACK)))
 		ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR;
-	}
 
 	LOG_INFO_BDF("msg_type:0x%x mode:0x%x msg result:0x%x.(ret:%d)\n",
 		   msg.msg_type, mode, msg.msg_type, ret);
@@ -624,16 +610,14 @@ s32 sxevf_uc_addr_add(struct sxevf_hw *hw, u32 index, u8 *mac_addr)
 	msg.index = index;
 	check = *(u32 *)&msg;
 
-	if (mac_addr) {
+	if (mac_addr)
 		memcpy((u8 *)&msg.addr, mac_addr, SXEVF_MAC_ADDR_LEN);
-	}
 
 	ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg)));
 	result = *(u32 *)&msg;
 
-	if (ret || (result != (check | SXEVF_MSGTYPE_ACK))) {
+	if (ret || (result != (check | SXEVF_MSGTYPE_ACK)))
 		ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR;
-	}
 
 	LOG_INFO_BDF("msg_type:0x%x index:%d addr:%pM sync done "
 		 " result:0x%x msg.(ret:%d)\n",
diff --git a/drivers/net/sxe/vf/sxevf_msg.h b/drivers/net/sxe/vf/sxevf_msg.h
index c3e22d7785..aeca7b4bef 100644
--- a/drivers/net/sxe/vf/sxevf_msg.h
+++ b/drivers/net/sxe/vf/sxevf_msg.h
@@ -11,53 +11,53 @@ struct sxevf_adapter;
 #define SXEVF_UC_ENTRY_NUM_MAX 10
 #define SXEVF_MC_ENTRY_NUM_MAX 30
 
-#define SXEVF_MBX_MSG_NUM        16
+#define SXEVF_MBX_MSG_NUM		16
 #define SXEVF_MBX_RETRY_INTERVAL 500
-#define SXEVF_MBX_RETRY_COUNT    2000
+#define SXEVF_MBX_RETRY_COUNT	2000
 
-#define SXEVF_RST_CHECK_NUM          200
+#define SXEVF_RST_CHECK_NUM		  200
 
-#define SXEVF_DEFAULT_ADDR_LEN       4
-#define SXEVF_MC_FILTER_TYPE_WORD    3
+#define SXEVF_DEFAULT_ADDR_LEN	   4
+#define SXEVF_MC_FILTER_TYPE_WORD	3
 
-#define SXEVF_RESET               0x01 
-#define SXEVF_DEV_MAC_ADDR_SET    0x02 
-#define SXEVF_MC_ADDR_SYNC        0x03 
-#define SXEVF_VLAN_SET            0x04 
-#define SXEVF_LPE_SET             0x05  
+#define SXEVF_RESET			   0x01
+#define SXEVF_DEV_MAC_ADDR_SET	0x02
+#define SXEVF_MC_ADDR_SYNC		0x03
+#define SXEVF_VLAN_SET			0x04
+#define SXEVF_LPE_SET			 0x05
 
-#define SXEVF_UC_ADDR_SYNC        0x06  
+#define SXEVF_UC_ADDR_SYNC		0x06
 
-#define SXEVF_API_NEGOTIATE       0x08  
+#define SXEVF_API_NEGOTIATE	   0x08
 
-#define SXEVF_RING_INFO_GET       0x09  
+#define SXEVF_RING_INFO_GET	   0x09
 
-#define SXEVF_REDIR_TBL_GET       0x0a 
-#define SXEVF_RSS_KEY_GET         0x0b 
-#define SXEVF_CAST_MODE_SET       0x0c 
-#define SXEVF_LINK_ENABLE_GET 	  0X0d  
-#define SXEVF_IPSEC_ADD           0x0e 
-#define SXEVF_IPSEC_DEL           0x0f 
-#define SXEVF_RSS_CONF_GET    	  0x10 
+#define SXEVF_REDIR_TBL_GET		 0x0a
+#define SXEVF_RSS_KEY_GET		   0x0b
+#define SXEVF_CAST_MODE_SET		 0x0c
+#define SXEVF_LINK_ENABLE_GET	   0X0d
+#define SXEVF_IPSEC_ADD			 0x0e
+#define SXEVF_IPSEC_DEL			 0x0f
+#define SXEVF_RSS_CONF_GET		  0x10
 
 #define SXEVF_PF_CTRL_MSG_LINK_UPDATE  0x100
 #define SXEVF_PF_CTRL_MSG_NETDEV_DOWN   0x200
 
-#define SXEVF_PF_CTRL_MSG_REINIT        0x400
+#define SXEVF_PF_CTRL_MSG_REINIT		0x400
 
-#define SXEVF_PF_CTRL_MSG_MASK          0x700
-#define SXEVF_PFREQ_MASK                0xFF00 
+#define SXEVF_PF_CTRL_MSG_MASK		  0x700
+#define SXEVF_PFREQ_MASK				0xFF00
 
-#define SXEVF_RSS_HASH_KEY_SIZE   (40)  
-#define SXEVF_MAX_RETA_ENTRIES    (128) 
+#define SXEVF_RSS_HASH_KEY_SIZE   (40)
+#define SXEVF_MAX_RETA_ENTRIES	(128)
 #define SXEVF_RETA_ENTRIES_DWORDS (SXEVF_MAX_RETA_ENTRIES / 16)
 
-#define SXEVF_TX_QUEUES      1 
-#define SXEVF_RX_QUEUES      2 
-#define SXEVF_TRANS_VLAN     3 
-#define SXEVF_DEF_QUEUE      4 
+#define SXEVF_TX_QUEUES	  1
+#define SXEVF_RX_QUEUES	  2
+#define SXEVF_TRANS_VLAN	 3
+#define SXEVF_DEF_QUEUE	  4
 
-#define SXEVF_MSGTYPE_ACK    0x80000000
+#define SXEVF_MSGTYPE_ACK	0x80000000
 #define SXEVF_MSGTYPE_NACK   0x40000000
 
 #define SXEVF_MSGINFO_SHIFT  16
@@ -69,17 +69,17 @@ enum sxevf_mbx_api_version {
 	SXEVF_MBX_API_10 = 0,
 	SXEVF_MBX_API_11,
 	SXEVF_MBX_API_12,
-	SXEVF_MBX_API_13, 
-	SXEVF_MBX_API_14, 
+	SXEVF_MBX_API_13,
+	SXEVF_MBX_API_14,
 
-	SXEVF_MBX_API_NR, 
+	SXEVF_MBX_API_NR,
 };
 
 enum sxevf_cast_mode {
-	SXEVF_CAST_MODE_NONE = 0, 
-	SXEVF_CAST_MODE_MULTI,    
-	SXEVF_CAST_MODE_ALLMULTI, 
-	SXEVF_CAST_MODE_PROMISC,  
+	SXEVF_CAST_MODE_NONE = 0,
+	SXEVF_CAST_MODE_MULTI,
+	SXEVF_CAST_MODE_ALLMULTI,
+	SXEVF_CAST_MODE_PROMISC,
 };
 
 struct sxevf_rst_msg {
@@ -198,4 +198,4 @@ s32 sxevf_uc_addr_add(struct sxevf_hw *hw, u32 index, u8 *mac_addr);
 
 s32 sxevf_ctrl_msg_rcv_and_clear(struct sxevf_hw *hw, u32 *msg, u16 msg_len);
 
-#endif 
+#endif
diff --git a/drivers/net/sxe/vf/sxevf_queue.c b/drivers/net/sxe/vf/sxevf_queue.c
index 5e7d9ec17d..15a2461e5f 100644
--- a/drivers/net/sxe/vf/sxevf_queue.c
+++ b/drivers/net/sxe/vf/sxevf_queue.c
@@ -36,7 +36,7 @@ s32 __rte_cold sxevf_rx_queue_setup(struct rte_eth_dev *dev,
 			 struct rte_mempool *mp)
 {
 	struct sxevf_adapter *adapter = dev->data->dev_private;
-	struct sxevf_hw     *hw = &adapter->hw;
+	struct sxevf_hw	 *hw = &adapter->hw;
 	struct rx_setup rx_setup = {};
 	s32 ret;
 
@@ -52,9 +52,8 @@ s32 __rte_cold sxevf_rx_queue_setup(struct rte_eth_dev *dev,
 	rx_setup.rx_batch_alloc_allowed = &adapter->rx_batch_alloc_allowed;
 
 	ret = __sxe_rx_queue_setup(&rx_setup, true);
-	if (ret) {
+	if (ret)
 		LOG_ERROR_BDF("rx queue setup fail.(err:%d)", ret);
-	}
 
 	return ret;
 }
@@ -77,9 +76,8 @@ s32 __rte_cold sxevf_tx_queue_setup(struct rte_eth_dev *dev,
 	tx_setup.tx_conf = tx_conf;
 
 	ret = __sxe_tx_queue_setup(&tx_setup, true);
-	if (ret) {
+	if (ret)
 		PMD_LOG_ERR(DRV, "rx queue setup fail.(err:%d)", ret);
-	}
 
 	return ret;
 }
@@ -93,7 +91,6 @@ void __rte_cold sxevf_rx_queue_release(void *rxq)
 void __rte_cold sxevf_tx_queue_release(void *txq)
 {
 	__sxe_tx_queue_free(txq);
-	return;
 }
 
 #else
@@ -107,7 +104,6 @@ void __rte_cold
 sxevf_tx_queue_release(struct rte_eth_dev *dev, u16 queue_id)
 {
 	__sxe_tx_queue_free(dev->data->tx_queues[queue_id]);
-	return;
 }
 #endif
 
@@ -116,7 +112,6 @@ void sxevf_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
 {
 	__sxe_rx_queue_info_get(dev, queue_id, qinfo);
 
-	return;
 }
 
 void sxevf_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
@@ -124,7 +119,6 @@ void sxevf_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
 {
 	__sxe_tx_queue_info_get(dev, queue_id, q_info);
 
-	return;
 }
 
 s32 sxevf_tx_done_cleanup(void *tx_queue, u32 free_cnt)
@@ -133,9 +127,8 @@ s32 sxevf_tx_done_cleanup(void *tx_queue, u32 free_cnt)
 
 	/* Tx queue cleanup */
 	ret = __sxe_tx_done_cleanup(tx_queue, free_cnt);
-	if (ret) {
+	if (ret)
 		PMD_LOG_ERR(DRV, "tx cleanup fail.(err:%d)", ret);
-	}
 
 	return ret;
 }
@@ -183,7 +176,7 @@ s32 sxevf_rss_reta_query(struct rte_eth_dev *dev,
 }
 
 s32 sxevf_rss_hash_conf_get(struct rte_eth_dev *dev,
-			    struct rte_eth_rss_conf *rss_conf)
+				struct rte_eth_rss_conf *rss_conf)
 {
 	s32 ret = 0;
 	struct sxevf_adapter *adapter = dev->data->dev_private;
@@ -217,20 +210,17 @@ void sxevf_secondary_proc_init(struct rte_eth_dev *eth_dev)
 	bool rx_vec_allowed = 0;
 
 	__sxe_secondary_proc_init(eth_dev, adapter->rx_batch_alloc_allowed, &rx_vec_allowed);
-	return;
 }
 
 void __rte_cold sxevf_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed)
 {
 	__sxe_txrx_queues_clear(dev, rx_batch_alloc_allowed);
-	return;
 }
 
 void sxevf_queues_free(struct rte_eth_dev *dev)
 {
 	__sxe_queues_free(dev);
 
-	return;
 }
 
 #endif
diff --git a/drivers/net/sxe/vf/sxevf_queue.h b/drivers/net/sxe/vf/sxevf_queue.h
index 1a061231a5..f22bdb6768 100644
--- a/drivers/net/sxe/vf/sxevf_queue.h
+++ b/drivers/net/sxe/vf/sxevf_queue.h
@@ -12,8 +12,8 @@
 typedef union sxe_tx_data_desc sxevf_tx_data_desc_u;
 typedef struct sxe_rx_buffer   sxevf_rx_buffer_s;
 typedef union sxe_rx_data_desc sxevf_rx_data_desc_u;
-typedef struct sxe_tx_queue    sxevf_tx_queue_s;
-typedef struct sxe_rx_queue    sxevf_rx_queue_s;
+typedef struct sxe_tx_queue	sxevf_tx_queue_s;
+typedef struct sxe_rx_queue	sxevf_rx_queue_s;
 
 s32 __rte_cold sxevf_rx_queue_mbufs_alloc(sxevf_rx_queue_s *rxq);
 
@@ -56,7 +56,7 @@ s32 sxevf_rss_reta_query(struct rte_eth_dev *dev,
 			 u16 reta_size);
 
 s32 sxevf_rss_hash_conf_get(struct rte_eth_dev *dev,
-			    struct rte_eth_rss_conf *rss_conf);
+				struct rte_eth_rss_conf *rss_conf);
 
 s32 sxevf_rss_hash_update(struct rte_eth_dev *dev,
 			struct rte_eth_rss_conf *rss_conf);
@@ -69,7 +69,7 @@ s32 sxevf_rx_descriptor_done(void *rx_queue, u16 offset);
 
 s32 sxevf_rx_descriptor_status(void *rx_queue, u16 offset);
 
-u16 sxevf_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,u16 num_pkts);
+u16 sxevf_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts, u16 num_pkts);
 
 u16 sxevf_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num);
 s32 sxevf_tx_descriptor_status(void *tx_queue, u16 offset);
diff --git a/drivers/net/sxe/vf/sxevf_rx.c b/drivers/net/sxe/vf/sxevf_rx.c
index 53b9168345..85ed9bffcb 100644
--- a/drivers/net/sxe/vf/sxevf_rx.c
+++ b/drivers/net/sxe/vf/sxevf_rx.c
@@ -32,7 +32,6 @@ static void sxevf_rss_bit_num_configure(struct sxevf_hw *hw, u16 rx_queues_num)
 
 	sxevf_rss_bit_num_set(hw, psrtype);
 
-	return;
 }
 
 static void sxevf_rxmode_offload_configure(struct rte_eth_dev *eth_dev,
@@ -42,22 +41,20 @@ static void sxevf_rxmode_offload_configure(struct rte_eth_dev *eth_dev,
 	u32 frame_size = SXE_GET_FRAME_SIZE(eth_dev);
 
 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
-	    ((frame_size + 2 * SXEVF_VLAN_TAG_SIZE) > buf_size)) {
+		((frame_size + 2 * SXEVF_VLAN_TAG_SIZE) > buf_size)) {
 		if (!eth_dev->data->scattered_rx) {
 			PMD_LOG_WARN(DRV, "rxmode offload:0x%"SXE_PRIX64" max_rx_pkt_len:%u "
-				    "buf_size:%u enable rx scatter",
-				    rxmode->offloads,
-				    frame_size,
-				    buf_size);
+					"buf_size:%u enable rx scatter",
+					rxmode->offloads,
+					frame_size,
+					buf_size);
 		}
 		eth_dev->data->scattered_rx = 1;
 	}
 
-	if (queue_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+	if (queue_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
-	}
 
-	return;
 }
 
 static s32 sxevf_rx_queue_configure(struct rte_eth_dev *eth_dev)
@@ -79,8 +76,8 @@ static s32 sxevf_rx_queue_configure(struct rte_eth_dev *eth_dev)
 		ret = sxevf_rx_queue_mbufs_alloc(rxq);
 		if (ret) {
 			LOG_ERROR_BDF("rx queue num:%u queue id:%u alloc "
-				      "rx buffer fail.(err:%d)",
-				      eth_dev->data->nb_rx_queues, i, ret);
+					  "rx buffer fail.(err:%d)",
+					  eth_dev->data->nb_rx_queues, i, ret);
 			goto l_out;
 		}
 
@@ -130,14 +127,14 @@ s32 sxevf_rx_configure(struct rte_eth_dev *eth_dev)
 	ret = sxevf_rx_max_frame_set(hw, mtu);
 	if (ret) {
 		LOG_ERROR_BDF("max frame size:%u set fail.(err:%d)",
-			      frame_size, ret);
+				  frame_size, ret);
 		goto l_out;
 	}
 
 	ret = sxevf_rx_queue_configure(eth_dev);
 	if (ret) {
 		LOG_ERROR_BDF("rx queue num:%u configure fail.(err:%u)",
-			      eth_dev->data->nb_rx_queues, ret);
+				  eth_dev->data->nb_rx_queues, ret);
 	}
 
 l_out:
@@ -154,13 +151,12 @@ void __rte_cold sxevf_rx_function_set(struct rte_eth_dev *dev)
 	__sxe_rx_function_set(dev, adapter->rx_batch_alloc_allowed, NULL);
 #endif
 
-	return; 
 }
 
 #if defined DPDK_20_11_5 || defined DPDK_19_11_6
 s32 sxevf_rx_descriptor_done(void *rx_queue, u16 offset)
 {
-	return __sxe_rx_descriptor_done(rx_queue,offset);
+	return __sxe_rx_descriptor_done(rx_queue, offset);
 }
 #endif
 
@@ -169,7 +165,7 @@ s32 sxevf_rx_descriptor_status(void *rx_queue, u16 offset)
 	return __sxe_rx_descriptor_status(rx_queue, offset);
 }
 
-u16 sxevf_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,u16 num_pkts)
+u16 sxevf_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts, u16 num_pkts)
 {
 	return __sxe_pkts_recv(rx_queue, rx_pkts, num_pkts);
 }
diff --git a/drivers/net/sxe/vf/sxevf_stats.c b/drivers/net/sxe/vf/sxevf_stats.c
index f82ccf1fd7..007bd02887 100644
--- a/drivers/net/sxe/vf/sxevf_stats.c
+++ b/drivers/net/sxe/vf/sxevf_stats.c
@@ -16,14 +16,14 @@
 #endif
 
 #define SXE_HW_XSTATS_CNT (sizeof(sxevf_xstats_field) / \
-		      sizeof(sxevf_xstats_field[0]))
+			  sizeof(sxevf_xstats_field[0]))
 
 static const struct sxevf_stats_field sxevf_xstats_field[] = {
 	{"rx_multicast_packets", offsetof(struct sxevf_hw_stats, vfmprc)},
 };
 
 #ifdef SXE_TEST
-STATIC u32 sxevf_xstats_cnt_get(void)
+static u32 sxevf_xstats_cnt_get(void)
 {
 	return SXE_HW_XSTATS_CNT;
 }
@@ -80,7 +80,7 @@ static s32 sxevf_hw_xstat_offset_get(u32 id, u32 *offset)
 	} else {
 		ret = -SXE_ERR_PARAM;
 		PMD_LOG_ERR(DRV, "invalid id:%u exceed stats size cnt:%u.",
-			    id, size);
+				id, size);
 	}
 
 	return ret;
@@ -100,13 +100,13 @@ s32 sxevf_xstats_get(struct rte_eth_dev *eth_dev,
 
 	cnt = SXE_HW_XSTATS_CNT;
 	PMD_LOG_INFO(DRV, "xstat size:%u. hw xstat field cnt:%lu ",
-		    cnt,
-		    SXE_HW_XSTATS_CNT);
+			cnt,
+			SXE_HW_XSTATS_CNT);
 
 	if (usr_cnt < cnt) {
 		ret = cnt;
 		PMD_LOG_ERR(DRV, "user usr_cnt:%u less than stats cnt:%u.",
-			    usr_cnt, cnt);
+				usr_cnt, cnt);
 		goto l_out;
 	}
 
@@ -115,7 +115,7 @@ s32 sxevf_xstats_get(struct rte_eth_dev *eth_dev,
 	if (xstats == NULL) {
 		ret = 0;
 		PMD_LOG_ERR(DRV, "usr_cnt:%u, input param xstats is null.",
-		            usr_cnt);
+					usr_cnt);
 		goto l_out;
 	}
 
diff --git a/drivers/net/sxe/vf/sxevf_tx.c b/drivers/net/sxe/vf/sxevf_tx.c
index 667a165c64..3d80deee78 100644
--- a/drivers/net/sxe/vf/sxevf_tx.c
+++ b/drivers/net/sxe/vf/sxevf_tx.c
@@ -32,7 +32,6 @@ void sxevf_tx_configure(struct rte_eth_dev *eth_dev)
 	LOG_DEBUG_BDF("tx queue num:%u tx configure done.",
 			eth_dev->data->nb_tx_queues);
 
-	return;
 }
 
 s32 sxevf_tx_descriptor_status(void *tx_queue, u16 offset)
-- 
2.45.2.windows.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v2] net/sxe: add net driver sxe
  2024-09-06 23:39 ` [PATCH v2] " Jie Liu
@ 2024-09-09 11:35   ` Thomas Monjalon
  0 siblings, 0 replies; 3+ messages in thread
From: Thomas Monjalon @ 2024-09-09 11:35 UTC (permalink / raw)
  To: Jie Liu; +Cc: anatoly.burakov, dev

07/09/2024 01:39, Jie Liu:
>  MAINTAINERS                                |    2 +-
>  doc/guides/nics/features/sxe.ini           |    1 +
>  drivers/net/sxe-dpdk-0.0.0.0-src.tar.gz    |  Bin 0 -> 196888 bytes
>  drivers/net/sxe.zip                        |  Bin 0 -> 193908 bytes

Please do not submit archive binaries.

>  drivers/net/sxe/Makefile                   |   14 +

We do not use Makefile anymore.
Please check Meson.



^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2024-09-09 11:35 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-08-26  8:10 [PATCH] net/sxe: add net driver sxe Jie Liu
2024-09-06 23:39 ` [PATCH v2] " Jie Liu
2024-09-09 11:35   ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).