DPDK patches and discussions
 help / color / Atom feed
* [dpdk-dev] [RFC] remove unused functions
@ 2020-11-19  3:52 Ferruh Yigit
  2020-11-19  7:22 ` Xu, Rosen
                   ` (4 more replies)
  0 siblings, 5 replies; 14+ messages in thread
From: Ferruh Yigit @ 2020-11-19  3:52 UTC (permalink / raw)
  To: Jerin Jacob, Cristian Dumitrescu, Hemant Agrawal, Sachin Saxena,
	Ray Kinsella, Neil Horman, Rosen Xu, Jingjing Wu, Beilei Xing,
	Nithin Dabilpuram, Ajit Khaparde, Raveendra Padasalagi,
	Vikas Gupta, Gagandeep Singh, Somalapuram Amaranath, Akhil Goyal,
	Jay Zhou, Timothy McDaniel, Liang Ma, Peter Mccarthy,
	Shepard Siegel, Ed Czeck, John Miller, Igor Russkikh,
	Pavel Belous, Rasesh Mody, Shahed Shaikh, Somnath Kotur,
	Chas Williams, Min Hu (Connor),
	Rahul Lakkireddy, Jeff Guo, Haiyue Wang, Marcin Wojtas,
	Michal Krawczyk, Guy Tzalik, Evgeny Schemeilin, Igor Chauskin,
	Qi Zhang, Xiao Wang, Qiming Yang, Alfredo Cardigliano,
	Matan Azrad, Shahaf Shuler, Viacheslav Ovsiienko, Zyta Szpak,
	Liron Himi, Stephen Hemminger, K. Y. Srinivasan, Haiyang Zhang,
	Long Li, Heinrich Kuhn, Harman Kalra, Kiran Kumar K,
	Andrew Rybchenko, Jasvinder Singh, Jiawen Wu, Jian Wang,
	Tianfei zhang, Ori Kam, Guy Kaneti, Anatoly Burakov,
	Maxime Coquelin, Chenbo Xia
  Cc: Ferruh Yigit, dev

Removing unused functions, reported by cppcheck.

Easy way to remove clutter, since the code is already in the git repo,
they can be added back when needed.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
 app/test-eventdev/parser.c                    |   88 -
 app/test-eventdev/parser.h                    |    6 -
 app/test/test_table_pipeline.c                |   36 -
 drivers/bus/dpaa/base/fman/fman_hw.c          |  182 -
 drivers/bus/dpaa/base/fman/netcfg_layer.c     |   11 -
 drivers/bus/dpaa/base/qbman/bman.c            |   34 -
 drivers/bus/dpaa/base/qbman/bman_driver.c     |   16 -
 drivers/bus/dpaa/base/qbman/process.c         |   94 -
 drivers/bus/dpaa/base/qbman/qman.c            |  778 ----
 drivers/bus/dpaa/base/qbman/qman_priv.h       |    9 -
 drivers/bus/dpaa/dpaa_bus.c                   |   20 -
 drivers/bus/dpaa/include/fsl_bman.h           |   15 -
 drivers/bus/dpaa/include/fsl_fman.h           |   28 -
 drivers/bus/dpaa/include/fsl_qman.h           |  307 --
 drivers/bus/dpaa/include/fsl_usd.h            |   11 -
 drivers/bus/dpaa/include/netcfg.h             |    6 -
 drivers/bus/dpaa/rte_dpaa_bus.h               |   13 -
 drivers/bus/dpaa/version.map                  |   10 -
 drivers/bus/fslmc/fslmc_bus.c                 |   19 -
 drivers/bus/fslmc/mc/dpbp.c                   |  141 -
 drivers/bus/fslmc/mc/dpci.c                   |  320 --
 drivers/bus/fslmc/mc/dpcon.c                  |  241 --
 drivers/bus/fslmc/mc/dpdmai.c                 |  144 -
 drivers/bus/fslmc/mc/dpio.c                   |  191 -
 drivers/bus/fslmc/mc/fsl_dpbp.h               |   20 -
 drivers/bus/fslmc/mc/fsl_dpci.h               |   49 -
 drivers/bus/fslmc/mc/fsl_dpcon.h              |   37 -
 drivers/bus/fslmc/mc/fsl_dpdmai.h             |   20 -
 drivers/bus/fslmc/mc/fsl_dpio.h               |   26 -
 drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c      |    7 -
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h       |    3 -
 .../bus/fslmc/qbman/include/fsl_qbman_debug.h |    2 -
 .../fslmc/qbman/include/fsl_qbman_portal.h    |  463 ---
 drivers/bus/fslmc/qbman/qbman_debug.c         |    5 -
 drivers/bus/fslmc/qbman/qbman_portal.c        |  437 ---
 drivers/bus/fslmc/rte_fslmc.h                 |   10 -
 drivers/bus/fslmc/version.map                 |    6 -
 drivers/bus/ifpga/ifpga_common.c              |   23 -
 drivers/bus/ifpga/ifpga_common.h              |    3 -
 drivers/common/dpaax/dpaa_of.c                |   27 -
 drivers/common/dpaax/dpaa_of.h                |    5 -
 drivers/common/dpaax/dpaax_iova_table.c       |   39 -
 drivers/common/dpaax/dpaax_iova_table.h       |    2 -
 drivers/common/dpaax/version.map              |    1 -
 drivers/common/iavf/iavf_common.c             |  425 ---
 drivers/common/iavf/iavf_prototype.h          |   17 -
 drivers/common/octeontx2/otx2_mbox.c          |   13 -
 drivers/common/octeontx2/otx2_mbox.h          |    1 -
 drivers/crypto/bcmfs/bcmfs_sym_pmd.c          |   19 -
 drivers/crypto/bcmfs/bcmfs_sym_pmd.h          |    3 -
 drivers/crypto/bcmfs/bcmfs_vfio.c             |   24 -
 drivers/crypto/bcmfs/bcmfs_vfio.h             |    4 -
 drivers/crypto/caam_jr/caam_jr_pvt.h          |    1 -
 drivers/crypto/caam_jr/caam_jr_uio.c          |   28 -
 drivers/crypto/ccp/ccp_dev.c                  |   65 -
 drivers/crypto/ccp/ccp_dev.h                  |    8 -
 drivers/crypto/dpaa2_sec/mc/dpseci.c          |  401 --
 drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h      |   52 -
 drivers/crypto/virtio/virtio_pci.c            |   13 -
 drivers/crypto/virtio/virtio_pci.h            |    5 -
 drivers/event/dlb/dlb_priv.h                  |    2 -
 drivers/event/dlb/dlb_xstats.c                |    7 -
 drivers/event/dlb2/dlb2_priv.h                |    2 -
 drivers/event/dlb2/dlb2_xstats.c              |    7 -
 drivers/event/opdl/opdl_ring.c                |  210 --
 drivers/event/opdl/opdl_ring.h                |  236 --
 drivers/net/ark/ark_ddm.c                     |   13 -
 drivers/net/ark/ark_ddm.h                     |    1 -
 drivers/net/ark/ark_pktchkr.c                 |   52 -
 drivers/net/ark/ark_pktchkr.h                 |    3 -
 drivers/net/ark/ark_pktdir.c                  |   22 -
 drivers/net/ark/ark_pktdir.h                  |    3 -
 drivers/net/ark/ark_pktgen.c                  |   27 -
 drivers/net/ark/ark_pktgen.h                  |    2 -
 drivers/net/ark/ark_udm.c                     |   15 -
 drivers/net/ark/ark_udm.h                     |    2 -
 drivers/net/atlantic/hw_atl/hw_atl_b0.c       |   14 -
 drivers/net/atlantic/hw_atl/hw_atl_b0.h       |    2 -
 drivers/net/atlantic/hw_atl/hw_atl_llh.c      |  318 --
 drivers/net/atlantic/hw_atl/hw_atl_llh.h      |  153 -
 drivers/net/atlantic/hw_atl/hw_atl_utils.c    |   36 -
 drivers/net/atlantic/hw_atl/hw_atl_utils.h    |    4 -
 drivers/net/bnx2x/ecore_sp.c                  |   17 -
 drivers/net/bnx2x/ecore_sp.h                  |    2 -
 drivers/net/bnx2x/elink.c                     | 1367 -------
 drivers/net/bnx2x/elink.h                     |   57 -
 drivers/net/bnxt/tf_core/bitalloc.c           |  156 -
 drivers/net/bnxt/tf_core/bitalloc.h           |   26 -
 drivers/net/bnxt/tf_core/stack.c              |   25 -
 drivers/net/bnxt/tf_core/stack.h              |   12 -
 drivers/net/bnxt/tf_core/tf_core.c            |  241 --
 drivers/net/bnxt/tf_core/tf_core.h            |   81 -
 drivers/net/bnxt/tf_core/tf_msg.c             |   40 -
 drivers/net/bnxt/tf_core/tf_msg.h             |   31 -
 drivers/net/bnxt/tf_core/tf_session.c         |   33 -
 drivers/net/bnxt/tf_core/tf_session.h         |   16 -
 drivers/net/bnxt/tf_core/tf_shadow_tbl.c      |   53 -
 drivers/net/bnxt/tf_core/tf_shadow_tbl.h      |   14 -
 drivers/net/bnxt/tf_core/tf_tcam.c            |    7 -
 drivers/net/bnxt/tf_core/tf_tcam.h            |   17 -
 drivers/net/bnxt/tf_core/tfp.c                |   27 -
 drivers/net/bnxt/tf_core/tfp.h                |    4 -
 drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c          |   78 -
 drivers/net/bnxt/tf_ulp/ulp_port_db.c         |   31 -
 drivers/net/bnxt/tf_ulp/ulp_port_db.h         |   14 -
 drivers/net/bnxt/tf_ulp/ulp_utils.c           |   11 -
 drivers/net/bnxt/tf_ulp/ulp_utils.h           |    3 -
 drivers/net/bonding/eth_bond_private.h        |    4 -
 drivers/net/bonding/rte_eth_bond.h            |   38 -
 drivers/net/bonding/rte_eth_bond_api.c        |   39 -
 drivers/net/bonding/rte_eth_bond_pmd.c        |   22 -
 drivers/net/cxgbe/base/common.h               |    5 -
 drivers/net/cxgbe/base/t4_hw.c                |   41 -
 drivers/net/dpaa/fmlib/fm_vsp.c               |   19 -
 drivers/net/dpaa/fmlib/fm_vsp_ext.h           |    3 -
 drivers/net/dpaa2/mc/dpdmux.c                 |  725 ----
 drivers/net/dpaa2/mc/dpni.c                   |  818 +----
 drivers/net/dpaa2/mc/dprtc.c                  |  365 --
 drivers/net/dpaa2/mc/fsl_dpdmux.h             |  108 -
 drivers/net/dpaa2/mc/fsl_dpni.h               |  134 -
 drivers/net/dpaa2/mc/fsl_dprtc.h              |   57 -
 drivers/net/e1000/base/e1000_82542.c          |   97 -
 drivers/net/e1000/base/e1000_82543.c          |   78 -
 drivers/net/e1000/base/e1000_82543.h          |    4 -
 drivers/net/e1000/base/e1000_82571.c          |   35 -
 drivers/net/e1000/base/e1000_82571.h          |    1 -
 drivers/net/e1000/base/e1000_82575.c          |  298 --
 drivers/net/e1000/base/e1000_82575.h          |    8 -
 drivers/net/e1000/base/e1000_api.c            |  530 ---
 drivers/net/e1000/base/e1000_api.h            |   40 -
 drivers/net/e1000/base/e1000_base.c           |   78 -
 drivers/net/e1000/base/e1000_base.h           |    1 -
 drivers/net/e1000/base/e1000_ich8lan.c        |  266 --
 drivers/net/e1000/base/e1000_ich8lan.h        |    3 -
 drivers/net/e1000/base/e1000_mac.c            |   14 -
 drivers/net/e1000/base/e1000_mac.h            |    1 -
 drivers/net/e1000/base/e1000_manage.c         |  192 -
 drivers/net/e1000/base/e1000_manage.h         |    2 -
 drivers/net/e1000/base/e1000_nvm.c            |  129 -
 drivers/net/e1000/base/e1000_nvm.h            |    5 -
 drivers/net/e1000/base/e1000_phy.c            |  201 -
 drivers/net/e1000/base/e1000_phy.h            |    4 -
 drivers/net/e1000/base/e1000_vf.c             |   19 -
 drivers/net/e1000/base/e1000_vf.h             |    1 -
 drivers/net/ena/base/ena_com.c                |  222 --
 drivers/net/ena/base/ena_com.h                |  144 -
 drivers/net/ena/base/ena_eth_com.c            |   11 -
 drivers/net/ena/base/ena_eth_com.h            |    2 -
 drivers/net/fm10k/base/fm10k_api.c            |  104 -
 drivers/net/fm10k/base/fm10k_api.h            |   11 -
 drivers/net/fm10k/base/fm10k_tlv.c            |  183 -
 drivers/net/fm10k/base/fm10k_tlv.h            |    1 -
 drivers/net/i40e/base/i40e_common.c           | 2989 ++-------------
 drivers/net/i40e/base/i40e_dcb.c              |   43 -
 drivers/net/i40e/base/i40e_dcb.h              |    3 -
 drivers/net/i40e/base/i40e_diag.c             |  146 -
 drivers/net/i40e/base/i40e_diag.h             |   30 -
 drivers/net/i40e/base/i40e_lan_hmc.c          |  264 --
 drivers/net/i40e/base/i40e_lan_hmc.h          |    6 -
 drivers/net/i40e/base/i40e_nvm.c              |  988 -----
 drivers/net/i40e/base/i40e_prototype.h        |  202 -
 drivers/net/i40e/base/meson.build             |    1 -
 drivers/net/iavf/iavf.h                       |    2 -
 drivers/net/iavf/iavf_vchnl.c                 |   72 -
 drivers/net/ice/base/ice_acl.c                |  108 -
 drivers/net/ice/base/ice_acl.h                |   13 -
 drivers/net/ice/base/ice_common.c             | 2084 ++---------
 drivers/net/ice/base/ice_common.h             |   70 -
 drivers/net/ice/base/ice_dcb.c                |  161 -
 drivers/net/ice/base/ice_dcb.h                |   11 -
 drivers/net/ice/base/ice_fdir.c               |  262 --
 drivers/net/ice/base/ice_fdir.h               |   16 -
 drivers/net/ice/base/ice_flex_pipe.c          |  103 -
 drivers/net/ice/base/ice_flex_pipe.h          |    4 -
 drivers/net/ice/base/ice_flow.c               |  207 --
 drivers/net/ice/base/ice_flow.h               |   15 -
 drivers/net/ice/base/ice_nvm.c                |  200 -
 drivers/net/ice/base/ice_nvm.h                |    8 -
 drivers/net/ice/base/ice_sched.c              | 1440 +-------
 drivers/net/ice/base/ice_sched.h              |   78 -
 drivers/net/ice/base/ice_switch.c             | 1646 +--------
 drivers/net/ice/base/ice_switch.h             |   62 -
 drivers/net/igc/base/igc_api.c                |  598 ---
 drivers/net/igc/base/igc_api.h                |   41 -
 drivers/net/igc/base/igc_base.c               |   78 -
 drivers/net/igc/base/igc_base.h               |    1 -
 drivers/net/igc/base/igc_hw.h                 |    3 -
 drivers/net/igc/base/igc_i225.c               |  159 -
 drivers/net/igc/base/igc_i225.h               |    4 -
 drivers/net/igc/base/igc_mac.c                |  853 -----
 drivers/net/igc/base/igc_mac.h                |   22 -
 drivers/net/igc/base/igc_manage.c             |  262 --
 drivers/net/igc/base/igc_manage.h             |    4 -
 drivers/net/igc/base/igc_nvm.c                |  679 ----
 drivers/net/igc/base/igc_nvm.h                |   16 -
 drivers/net/igc/base/igc_osdep.c              |   25 -
 drivers/net/igc/base/igc_phy.c                | 3256 +----------------
 drivers/net/igc/base/igc_phy.h                |   49 -
 drivers/net/ionic/ionic.h                     |    2 -
 drivers/net/ionic/ionic_dev.c                 |   39 -
 drivers/net/ionic/ionic_dev.h                 |    4 -
 drivers/net/ionic/ionic_lif.c                 |   11 -
 drivers/net/ionic/ionic_lif.h                 |    1 -
 drivers/net/ionic/ionic_main.c                |   33 -
 drivers/net/ionic/ionic_rx_filter.c           |   14 -
 drivers/net/ionic/ionic_rx_filter.h           |    1 -
 drivers/net/mlx5/mlx5.h                       |    1 -
 drivers/net/mlx5/mlx5_utils.c                 |   21 -
 drivers/net/mlx5/mlx5_utils.h                 |   25 -
 drivers/net/mvneta/mvneta_ethdev.c            |   18 -
 drivers/net/netvsc/hn_rndis.c                 |   31 -
 drivers/net/netvsc/hn_rndis.h                 |    1 -
 drivers/net/netvsc/hn_var.h                   |    3 -
 drivers/net/netvsc/hn_vf.c                    |   25 -
 drivers/net/nfp/nfpcore/nfp_cpp.h             |  213 --
 drivers/net/nfp/nfpcore/nfp_cppcore.c         |  218 --
 drivers/net/nfp/nfpcore/nfp_mip.c             |    6 -
 drivers/net/nfp/nfpcore/nfp_mip.h             |    1 -
 drivers/net/nfp/nfpcore/nfp_mutex.c           |   93 -
 drivers/net/nfp/nfpcore/nfp_nsp.c             |   41 -
 drivers/net/nfp/nfpcore/nfp_nsp.h             |   16 -
 drivers/net/nfp/nfpcore/nfp_nsp_cmds.c        |   79 -
 drivers/net/nfp/nfpcore/nfp_nsp_eth.c         |  206 --
 drivers/net/nfp/nfpcore/nfp_resource.c        |   12 -
 drivers/net/nfp/nfpcore/nfp_resource.h        |    7 -
 drivers/net/nfp/nfpcore/nfp_rtsym.c           |   34 -
 drivers/net/nfp/nfpcore/nfp_rtsym.h           |    4 -
 drivers/net/octeontx/base/octeontx_bgx.c      |   54 -
 drivers/net/octeontx/base/octeontx_bgx.h      |    2 -
 drivers/net/octeontx/base/octeontx_pkivf.c    |   22 -
 drivers/net/octeontx/base/octeontx_pkivf.h    |    1 -
 drivers/net/octeontx2/otx2_ethdev.c           |   26 -
 drivers/net/octeontx2/otx2_ethdev.h           |    3 -
 drivers/net/octeontx2/otx2_ethdev_debug.c     |   55 -
 drivers/net/octeontx2/otx2_flow.h             |    2 -
 drivers/net/octeontx2/otx2_flow_utils.c       |   18 -
 drivers/net/pfe/base/pfe.h                    |   12 -
 drivers/net/pfe/pfe_hal.c                     |  144 -
 drivers/net/pfe/pfe_hif_lib.c                 |   20 -
 drivers/net/pfe/pfe_hif_lib.h                 |    1 -
 drivers/net/qede/base/ecore.h                 |    3 -
 drivers/net/qede/base/ecore_cxt.c             |  229 --
 drivers/net/qede/base/ecore_cxt.h             |   27 -
 drivers/net/qede/base/ecore_dcbx.c            |  266 --
 drivers/net/qede/base/ecore_dcbx_api.h        |   27 -
 drivers/net/qede/base/ecore_dev.c             |  306 --
 drivers/net/qede/base/ecore_dev_api.h         |  127 -
 drivers/net/qede/base/ecore_hw.c              |   16 -
 drivers/net/qede/base/ecore_hw.h              |   10 -
 drivers/net/qede/base/ecore_init_fw_funcs.c   |  616 ----
 drivers/net/qede/base/ecore_init_fw_funcs.h   |  227 --
 drivers/net/qede/base/ecore_int.c             |  193 -
 drivers/net/qede/base/ecore_int.h             |   13 -
 drivers/net/qede/base/ecore_int_api.h         |   60 -
 drivers/net/qede/base/ecore_iov_api.h         |  469 ---
 drivers/net/qede/base/ecore_l2.c              |  103 -
 drivers/net/qede/base/ecore_l2_api.h          |   24 -
 drivers/net/qede/base/ecore_mcp.c             | 1121 +-----
 drivers/net/qede/base/ecore_mcp.h             |   37 -
 drivers/net/qede/base/ecore_mcp_api.h         |  449 ---
 drivers/net/qede/base/ecore_sp_commands.c     |   89 -
 drivers/net/qede/base/ecore_sp_commands.h     |   21 -
 drivers/net/qede/base/ecore_sriov.c           |  767 ----
 drivers/net/qede/base/ecore_vf.c              |   48 -
 drivers/net/qede/base/ecore_vf_api.h          |   40 -
 drivers/net/qede/qede_debug.c                 |  532 ---
 drivers/net/qede/qede_debug.h                 |   97 -
 drivers/net/sfc/sfc_kvargs.c                  |   37 -
 drivers/net/sfc/sfc_kvargs.h                  |    2 -
 drivers/net/softnic/parser.c                  |  218 --
 drivers/net/softnic/parser.h                  |   10 -
 .../net/softnic/rte_eth_softnic_cryptodev.c   |   15 -
 .../net/softnic/rte_eth_softnic_internals.h   |   28 -
 drivers/net/softnic/rte_eth_softnic_thread.c  |  183 -
 drivers/net/txgbe/base/txgbe_eeprom.c         |   72 -
 drivers/net/txgbe/base/txgbe_eeprom.h         |    2 -
 drivers/raw/ifpga/base/opae_eth_group.c       |   25 -
 drivers/raw/ifpga/base/opae_eth_group.h       |    1 -
 drivers/raw/ifpga/base/opae_hw_api.c          |  212 --
 drivers/raw/ifpga/base/opae_hw_api.h          |   36 -
 drivers/raw/ifpga/base/opae_i2c.c             |   12 -
 drivers/raw/ifpga/base/opae_i2c.h             |    4 -
 drivers/raw/ifpga/base/opae_ifpga_hw_api.c    |   99 -
 drivers/raw/ifpga/base/opae_ifpga_hw_api.h    |   15 -
 drivers/regex/mlx5/mlx5_regex.h               |    2 -
 drivers/regex/mlx5/mlx5_regex_fastpath.c      |   25 -
 drivers/regex/mlx5/mlx5_rxp.c                 |   45 -
 .../regex/octeontx2/otx2_regexdev_hw_access.c |   58 -
 .../regex/octeontx2/otx2_regexdev_hw_access.h |    2 -
 drivers/regex/octeontx2/otx2_regexdev_mbox.c  |   28 -
 drivers/regex/octeontx2/otx2_regexdev_mbox.h  |    3 -
 examples/ip_pipeline/cryptodev.c              |    8 -
 examples/ip_pipeline/cryptodev.h              |    3 -
 examples/ip_pipeline/link.c                   |   21 -
 examples/ip_pipeline/link.h                   |    3 -
 examples/ip_pipeline/parser.c                 |  202 -
 examples/ip_pipeline/parser.h                 |    7 -
 examples/pipeline/obj.c                       |   21 -
 examples/pipeline/obj.h                       |    3 -
 lib/librte_eal/linux/eal_memory.c             |    8 -
 lib/librte_vhost/fd_man.c                     |   15 -
 lib/librte_vhost/fd_man.h                     |    2 -
 302 files changed, 833 insertions(+), 38856 deletions(-)
 delete mode 100644 drivers/net/i40e/base/i40e_diag.c
 delete mode 100644 drivers/net/i40e/base/i40e_diag.h

diff --git a/app/test-eventdev/parser.c b/app/test-eventdev/parser.c
index 24f1855e9a..131f7383d9 100644
--- a/app/test-eventdev/parser.c
+++ b/app/test-eventdev/parser.c
@@ -37,44 +37,6 @@ get_hex_val(char c)
 	}
 }
 
-int
-parser_read_arg_bool(const char *p)
-{
-	p = skip_white_spaces(p);
-	int result = -EINVAL;
-
-	if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) ||
-		((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) {
-		p += 3;
-		result = 1;
-	}
-
-	if (((p[0] == 'o') && (p[1] == 'n')) ||
-		((p[0] == 'O') && (p[1] == 'N'))) {
-		p += 2;
-		result = 1;
-	}
-
-	if (((p[0] == 'n') && (p[1] == 'o')) ||
-		((p[0] == 'N') && (p[1] == 'O'))) {
-		p += 2;
-		result = 0;
-	}
-
-	if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) ||
-		((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) {
-		p += 3;
-		result = 0;
-	}
-
-	p = skip_white_spaces(p);
-
-	if (p[0] != '\0')
-		return -EINVAL;
-
-	return result;
-}
-
 int
 parser_read_uint64(uint64_t *value, const char *p)
 {
@@ -115,24 +77,6 @@ parser_read_uint64(uint64_t *value, const char *p)
 	return 0;
 }
 
-int
-parser_read_int32(int32_t *value, const char *p)
-{
-	char *next;
-	int32_t val;
-
-	p = skip_white_spaces(p);
-	if (!isdigit(*p))
-		return -EINVAL;
-
-	val = strtol(p, &next, 10);
-	if (p == next)
-		return -EINVAL;
-
-	*value = val;
-	return 0;
-}
-
 int
 parser_read_uint64_hex(uint64_t *value, const char *p)
 {
@@ -169,22 +113,6 @@ parser_read_uint32(uint32_t *value, const char *p)
 	return 0;
 }
 
-int
-parser_read_uint32_hex(uint32_t *value, const char *p)
-{
-	uint64_t val = 0;
-	int ret = parser_read_uint64_hex(&val, p);
-
-	if (ret < 0)
-		return ret;
-
-	if (val > UINT32_MAX)
-		return -ERANGE;
-
-	*value = val;
-	return 0;
-}
-
 int
 parser_read_uint16(uint16_t *value, const char *p)
 {
@@ -201,22 +129,6 @@ parser_read_uint16(uint16_t *value, const char *p)
 	return 0;
 }
 
-int
-parser_read_uint16_hex(uint16_t *value, const char *p)
-{
-	uint64_t val = 0;
-	int ret = parser_read_uint64_hex(&val, p);
-
-	if (ret < 0)
-		return ret;
-
-	if (val > UINT16_MAX)
-		return -ERANGE;
-
-	*value = val;
-	return 0;
-}
-
 int
 parser_read_uint8(uint8_t *value, const char *p)
 {
diff --git a/app/test-eventdev/parser.h b/app/test-eventdev/parser.h
index 673ff22d78..94856e66e3 100644
--- a/app/test-eventdev/parser.h
+++ b/app/test-eventdev/parser.h
@@ -28,20 +28,14 @@ skip_digits(const char *src)
 	return i;
 }
 
-int parser_read_arg_bool(const char *p);
-
 int parser_read_uint64(uint64_t *value, const char *p);
 int parser_read_uint32(uint32_t *value, const char *p);
 int parser_read_uint16(uint16_t *value, const char *p);
 int parser_read_uint8(uint8_t *value, const char *p);
 
 int parser_read_uint64_hex(uint64_t *value, const char *p);
-int parser_read_uint32_hex(uint32_t *value, const char *p);
-int parser_read_uint16_hex(uint16_t *value, const char *p);
 int parser_read_uint8_hex(uint8_t *value, const char *p);
 
-int parser_read_int32(int32_t *value, const char *p);
-
 int parse_hex_string(char *src, uint8_t *dst, uint32_t *size);
 
 int parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens);
diff --git a/app/test/test_table_pipeline.c b/app/test/test_table_pipeline.c
index aabf4375db..4e5926a7c0 100644
--- a/app/test/test_table_pipeline.c
+++ b/app/test/test_table_pipeline.c
@@ -61,46 +61,10 @@ rte_pipeline_port_out_action_handler port_action_stub(struct rte_mbuf **pkts,
 
 #endif
 
-rte_pipeline_table_action_handler_hit
-table_action_0x00(struct rte_pipeline *p, struct rte_mbuf **pkts,
-	uint64_t pkts_mask, struct rte_pipeline_table_entry **entry, void *arg);
-
-rte_pipeline_table_action_handler_hit
-table_action_stub_hit(struct rte_pipeline *p, struct rte_mbuf **pkts,
-	uint64_t pkts_mask, struct rte_pipeline_table_entry **entry, void *arg);
-
 static int
 table_action_stub_miss(struct rte_pipeline *p, struct rte_mbuf **pkts,
 	uint64_t pkts_mask, struct rte_pipeline_table_entry *entry, void *arg);
 
-rte_pipeline_table_action_handler_hit
-table_action_0x00(__rte_unused struct rte_pipeline *p,
-	__rte_unused struct rte_mbuf **pkts,
-	uint64_t pkts_mask,
-	__rte_unused struct rte_pipeline_table_entry **entry,
-	__rte_unused void *arg)
-{
-	printf("Table Action, setting pkts_mask to 0x00\n");
-	pkts_mask = ~0x00;
-	rte_pipeline_ah_packet_drop(p, pkts_mask);
-	return 0;
-}
-
-rte_pipeline_table_action_handler_hit
-table_action_stub_hit(__rte_unused struct rte_pipeline *p,
-	__rte_unused struct rte_mbuf **pkts,
-	uint64_t pkts_mask,
-	__rte_unused struct rte_pipeline_table_entry **entry,
-	__rte_unused void *arg)
-{
-	printf("STUB Table Action Hit - doing nothing\n");
-	printf("STUB Table Action Hit - setting mask to 0x%"PRIx64"\n",
-		override_hit_mask);
-	pkts_mask = (~override_hit_mask) & 0x3;
-	rte_pipeline_ah_packet_drop(p, pkts_mask);
-	return 0;
-}
-
 static int
 table_action_stub_miss(struct rte_pipeline *p,
 	__rte_unused struct rte_mbuf **pkts,
diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
index 4ab49f7853..b69b133a90 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -56,74 +56,6 @@ fman_if_reset_mcast_filter_table(struct fman_if *p)
 		out_be32(hashtable_ctrl, i & ~HASH_CTRL_MCAST_EN);
 }
 
-static
-uint32_t get_mac_hash_code(uint64_t eth_addr)
-{
-	uint64_t	mask1, mask2;
-	uint32_t	xorVal = 0;
-	uint8_t		i, j;
-
-	for (i = 0; i < 6; i++) {
-		mask1 = eth_addr & (uint64_t)0x01;
-		eth_addr >>= 1;
-
-		for (j = 0; j < 7; j++) {
-			mask2 = eth_addr & (uint64_t)0x01;
-			mask1 ^= mask2;
-			eth_addr >>= 1;
-		}
-
-		xorVal |= (mask1 << (5 - i));
-	}
-
-	return xorVal;
-}
-
-int
-fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth)
-{
-	uint64_t eth_addr;
-	void *hashtable_ctrl;
-	uint32_t hash;
-
-	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
-
-	eth_addr = ETH_ADDR_TO_UINT64(eth);
-
-	if (!(eth_addr & GROUP_ADDRESS))
-		return -1;
-
-	hash = get_mac_hash_code(eth_addr) & HASH_CTRL_ADDR_MASK;
-	hash = hash | HASH_CTRL_MCAST_EN;
-
-	hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
-	out_be32(hashtable_ctrl, hash);
-
-	return 0;
-}
-
-int
-fman_if_get_primary_mac_addr(struct fman_if *p, uint8_t *eth)
-{
-	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
-	void *mac_reg =
-		&((struct memac_regs *)__if->ccsr_map)->mac_addr0.mac_addr_l;
-	u32 val = in_be32(mac_reg);
-
-	eth[0] = (val & 0x000000ff) >> 0;
-	eth[1] = (val & 0x0000ff00) >> 8;
-	eth[2] = (val & 0x00ff0000) >> 16;
-	eth[3] = (val & 0xff000000) >> 24;
-
-	mac_reg =  &((struct memac_regs *)__if->ccsr_map)->mac_addr0.mac_addr_u;
-	val = in_be32(mac_reg);
-
-	eth[4] = (val & 0x000000ff) >> 0;
-	eth[5] = (val & 0x0000ff00) >> 8;
-
-	return 0;
-}
-
 void
 fman_if_clear_mac_addr(struct fman_if *p, uint8_t addr_num)
 {
@@ -180,38 +112,6 @@ fman_if_add_mac_addr(struct fman_if *p, uint8_t *eth, uint8_t addr_num)
 	return 0;
 }
 
-void
-fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable)
-{
-	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
-	u32 value = 0;
-	void *cmdcfg;
-
-	assert(fman_ccsr_map_fd != -1);
-
-	/* Set Rx Ignore Pause Frames */
-	cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
-	if (enable)
-		value = in_be32(cmdcfg) | CMD_CFG_PAUSE_IGNORE;
-	else
-		value = in_be32(cmdcfg) & ~CMD_CFG_PAUSE_IGNORE;
-
-	out_be32(cmdcfg, value);
-}
-
-void
-fman_if_conf_max_frame_len(struct fman_if *p, unsigned int max_frame_len)
-{
-	struct __fman_if *__if = container_of(p, struct __fman_if, __if);
-	unsigned int *maxfrm;
-
-	assert(fman_ccsr_map_fd != -1);
-
-	/* Set Max frame length */
-	maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
-	out_be32(maxfrm, (MAXFRM_RX_MASK & max_frame_len));
-}
-
 void
 fman_if_stats_get(struct fman_if *p, struct rte_eth_stats *stats)
 {
@@ -422,23 +322,6 @@ fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta)
 	return 0;
 }
 
-int
-fman_if_get_fdoff(struct fman_if *fm_if)
-{
-	u32 fmbm_rebm;
-	int fdoff;
-
-	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
-
-	assert(fman_ccsr_map_fd != -1);
-
-	fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
-
-	fdoff = (fmbm_rebm >> FMAN_SP_EXT_BUF_MARG_START_SHIFT) & 0x1ff;
-
-	return fdoff;
-}
-
 void
 fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid)
 {
@@ -451,28 +334,6 @@ fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid)
 	out_be32(fmbm_refqid, err_fqid);
 }
 
-int
-fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params *icp)
-{
-	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
-	int val = 0;
-	int iceof_mask = 0x001f0000;
-	int icsz_mask = 0x0000001f;
-	int iciof_mask = 0x00000f00;
-
-	assert(fman_ccsr_map_fd != -1);
-
-	unsigned int *fmbm_ricp =
-		&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp;
-	val = in_be32(fmbm_ricp);
-
-	icp->iceof = (val & iceof_mask) >> 12;
-	icp->iciof = (val & iciof_mask) >> 4;
-	icp->icsz = (val & icsz_mask) << 4;
-
-	return 0;
-}
-
 int
 fman_if_set_ic_params(struct fman_if *fm_if,
 			  const struct fman_if_ic_params *icp)
@@ -526,19 +387,6 @@ fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm)
 	out_be32(reg_maxfrm, (in_be32(reg_maxfrm) & 0xFFFF0000) | max_frm);
 }
 
-uint16_t
-fman_if_get_maxfrm(struct fman_if *fm_if)
-{
-	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
-	unsigned int *reg_maxfrm;
-
-	assert(fman_ccsr_map_fd != -1);
-
-	reg_maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
-
-	return (in_be32(reg_maxfrm) | 0x0000FFFF);
-}
-
 /* MSB in fmbm_rebm register
  * 0 - If BMI cannot store the frame in a single buffer it may select a buffer
  *     of smaller size and store the frame in scatter gather (S/G) buffers
@@ -580,36 +428,6 @@ fman_if_set_sg(struct fman_if *fm_if, int enable)
 	out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
 }
 
-void
-fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia)
-{
-	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
-	unsigned int *fmqm_pndn;
-
-	assert(fman_ccsr_map_fd != -1);
-
-	fmqm_pndn = &((struct fman_port_qmi_regs *)__if->qmi_map)->fmqm_pndn;
-
-	out_be32(fmqm_pndn, nia);
-}
-
-void
-fman_if_discard_rx_errors(struct fman_if *fm_if)
-{
-	struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
-	unsigned int *fmbm_rfsdm, *fmbm_rfsem;
-
-	fmbm_rfsem = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsem;
-	out_be32(fmbm_rfsem, 0);
-
-	/* Configure the discard mask to discard the error packets which have
-	 * DMA errors, Frame size error, Header error etc. The mask 0x010EE3F0
-	 * is to configured discard all the errors which come in the FD[STATUS]
-	 */
-	fmbm_rfsdm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsdm;
-	out_be32(fmbm_rfsdm, 0x010EE3F0);
-}
-
 void
 fman_if_receive_rx_errors(struct fman_if *fm_if,
 	unsigned int err_eq)
diff --git a/drivers/bus/dpaa/base/fman/netcfg_layer.c b/drivers/bus/dpaa/base/fman/netcfg_layer.c
index b7009f2299..1d6460f1d1 100644
--- a/drivers/bus/dpaa/base/fman/netcfg_layer.c
+++ b/drivers/bus/dpaa/base/fman/netcfg_layer.c
@@ -148,14 +148,3 @@ netcfg_acquire(void)
 
 	return NULL;
 }
-
-void
-netcfg_release(struct netcfg_info *cfg_ptr)
-{
-	rte_free(cfg_ptr);
-	/* Close socket for shared interfaces */
-	if (skfd >= 0) {
-		close(skfd);
-		skfd = -1;
-	}
-}
diff --git a/drivers/bus/dpaa/base/qbman/bman.c b/drivers/bus/dpaa/base/qbman/bman.c
index 8a6290734f..95215bb24e 100644
--- a/drivers/bus/dpaa/base/qbman/bman.c
+++ b/drivers/bus/dpaa/base/qbman/bman.c
@@ -321,41 +321,7 @@ int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
 	return ret;
 }
 
-int bman_query_pools(struct bm_pool_state *state)
-{
-	struct bman_portal *p = get_affine_portal();
-	struct bm_mc_result *mcr;
-
-	bm_mc_start(&p->p);
-	bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
-	while (!(mcr = bm_mc_result(&p->p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) ==
-		    BM_MCR_VERB_CMD_QUERY);
-	*state = mcr->query;
-	state->as.state.state[0] = be32_to_cpu(state->as.state.state[0]);
-	state->as.state.state[1] = be32_to_cpu(state->as.state.state[1]);
-	state->ds.state.state[0] = be32_to_cpu(state->ds.state.state[0]);
-	state->ds.state.state[1] = be32_to_cpu(state->ds.state.state[1]);
-	return 0;
-}
-
 u32 bman_query_free_buffers(struct bman_pool *pool)
 {
 	return bm_pool_free_buffers(pool->params.bpid);
 }
-
-int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
-{
-	u32 bpid;
-
-	bpid = bman_get_params(pool)->bpid;
-
-	return bm_pool_set(bpid, thresholds);
-}
-
-int bman_shutdown_pool(u32 bpid)
-{
-	struct bman_portal *p = get_affine_portal();
-	return bm_shutdown_pool(&p->p, bpid);
-}
diff --git a/drivers/bus/dpaa/base/qbman/bman_driver.c b/drivers/bus/dpaa/base/qbman/bman_driver.c
index 750b756b93..8763ac6215 100644
--- a/drivers/bus/dpaa/base/qbman/bman_driver.c
+++ b/drivers/bus/dpaa/base/qbman/bman_driver.c
@@ -109,11 +109,6 @@ static int fsl_bman_portal_finish(void)
 	return ret;
 }
 
-int bman_thread_fd(void)
-{
-	return bmfd;
-}
-
 int bman_thread_init(void)
 {
 	/* Convert from contiguous/virtual cpu numbering to real cpu when
@@ -127,17 +122,6 @@ int bman_thread_finish(void)
 	return fsl_bman_portal_finish();
 }
 
-void bman_thread_irq(void)
-{
-	qbman_invoke_irq(pcfg.irq);
-	/* Now we need to uninhibit interrupts. This is the only code outside
-	 * the regular portal driver that manipulates any portal register, so
-	 * rather than breaking that encapsulation I am simply hard-coding the
-	 * offset to the inhibit register here.
-	 */
-	out_be32(pcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
-}
-
 int bman_init_ccsr(const struct device_node *node)
 {
 	static int ccsr_map_fd;
diff --git a/drivers/bus/dpaa/base/qbman/process.c b/drivers/bus/dpaa/base/qbman/process.c
index 9bc92681cd..9ce8ac8b12 100644
--- a/drivers/bus/dpaa/base/qbman/process.c
+++ b/drivers/bus/dpaa/base/qbman/process.c
@@ -204,100 +204,6 @@ struct dpaa_ioctl_raw_portal {
 #define DPAA_IOCTL_FREE_RAW_PORTAL \
 	_IOR(DPAA_IOCTL_MAGIC, 0x0D, struct dpaa_ioctl_raw_portal)
 
-static int process_portal_allocate(struct dpaa_ioctl_raw_portal *portal)
-{
-	int ret = check_fd();
-
-	if (ret)
-		return ret;
-
-	ret = ioctl(fd, DPAA_IOCTL_ALLOC_RAW_PORTAL, portal);
-	if (ret) {
-		perror("ioctl(DPAA_IOCTL_ALLOC_RAW_PORTAL)");
-		return ret;
-	}
-	return 0;
-}
-
-static int process_portal_free(struct dpaa_ioctl_raw_portal *portal)
-{
-	int ret = check_fd();
-
-	if (ret)
-		return ret;
-
-	ret = ioctl(fd, DPAA_IOCTL_FREE_RAW_PORTAL, portal);
-	if (ret) {
-		perror("ioctl(DPAA_IOCTL_FREE_RAW_PORTAL)");
-		return ret;
-	}
-	return 0;
-}
-
-int qman_allocate_raw_portal(struct dpaa_raw_portal *portal)
-{
-	struct dpaa_ioctl_raw_portal input;
-	int ret;
-
-	input.type = dpaa_portal_qman;
-	input.index = portal->index;
-	input.enable_stash = portal->enable_stash;
-	input.cpu = portal->cpu;
-	input.cache = portal->cache;
-	input.window = portal->window;
-	input.sdest = portal->sdest;
-
-	ret =  process_portal_allocate(&input);
-	if (ret)
-		return ret;
-	portal->index = input.index;
-	portal->cinh = input.cinh;
-	portal->cena  = input.cena;
-	return 0;
-}
-
-int qman_free_raw_portal(struct dpaa_raw_portal *portal)
-{
-	struct dpaa_ioctl_raw_portal input;
-
-	input.type = dpaa_portal_qman;
-	input.index = portal->index;
-	input.cinh = portal->cinh;
-	input.cena = portal->cena;
-
-	return process_portal_free(&input);
-}
-
-int bman_allocate_raw_portal(struct dpaa_raw_portal *portal)
-{
-	struct dpaa_ioctl_raw_portal input;
-	int ret;
-
-	input.type = dpaa_portal_bman;
-	input.index = portal->index;
-	input.enable_stash = 0;
-
-	ret =  process_portal_allocate(&input);
-	if (ret)
-		return ret;
-	portal->index = input.index;
-	portal->cinh = input.cinh;
-	portal->cena  = input.cena;
-	return 0;
-}
-
-int bman_free_raw_portal(struct dpaa_raw_portal *portal)
-{
-	struct dpaa_ioctl_raw_portal input;
-
-	input.type = dpaa_portal_bman;
-	input.index = portal->index;
-	input.cinh = portal->cinh;
-	input.cena = portal->cena;
-
-	return process_portal_free(&input);
-}
-
 #define DPAA_IOCTL_ENABLE_LINK_STATUS_INTERRUPT \
 	_IOW(DPAA_IOCTL_MAGIC, 0x0E, struct usdpaa_ioctl_link_status)
 
diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c
index 447c091770..a8deecf689 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -199,14 +199,6 @@ static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
 	return -ENOMEM;
 }
 
-static void clear_fq_table_entry(u32 entry)
-{
-	spin_lock(&fq_hash_table_lock);
-	DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
-	qman_fq_lookup_table[entry] = NULL;
-	spin_unlock(&fq_hash_table_lock);
-}
-
 static inline struct qman_fq *get_fq_table_entry(u32 entry)
 {
 	DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
@@ -235,13 +227,6 @@ static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
 	fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
 }
 
-static inline void cpu_to_hw_fd(struct qm_fd *fd)
-{
-	fd->addr = cpu_to_be40(fd->addr);
-	fd->status = cpu_to_be32(fd->status);
-	fd->opaque = cpu_to_be32(fd->opaque);
-}
-
 static inline void hw_fd_to_cpu(struct qm_fd *fd)
 {
 	fd->addr = be40_to_cpu(fd->addr);
@@ -285,15 +270,6 @@ static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
 	return IRQ_HANDLED;
 }
 
-/* This inner version is used privately by qman_create_affine_portal(), as well
- * as by the exported qman_stop_dequeues().
- */
-static inline void qman_stop_dequeues_ex(struct qman_portal *p)
-{
-	if (!(p->dqrr_disable_ref++))
-		qm_dqrr_set_maxfill(&p->p, 0);
-}
-
 static int drain_mr_fqrni(struct qm_portal *p)
 {
 	const struct qm_mr_entry *msg;
@@ -1173,17 +1149,6 @@ int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits)
 	return 0;
 }
 
-u16 qman_affine_channel(int cpu)
-{
-	if (cpu < 0) {
-		struct qman_portal *portal = get_affine_portal();
-
-		cpu = portal->config->cpu;
-	}
-	DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask));
-	return affine_channels[cpu];
-}
-
 unsigned int qman_portal_poll_rx(unsigned int poll_limit,
 				 void **bufs,
 				 struct qman_portal *p)
@@ -1247,14 +1212,6 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
 	return rx_number;
 }
 
-void qman_clear_irq(void)
-{
-	struct qman_portal *p = get_affine_portal();
-	u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
-		~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
-	qm_isr_status_clear(&p->p, clear);
-}
-
 u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
 			void **bufs)
 {
@@ -1370,51 +1327,6 @@ void qman_dqrr_consume(struct qman_fq *fq,
 	qm_dqrr_next(&p->p);
 }
 
-int qman_poll_dqrr(unsigned int limit)
-{
-	struct qman_portal *p = get_affine_portal();
-	int ret;
-
-	ret = __poll_portal_fast(p, limit);
-	return ret;
-}
-
-void qman_poll(void)
-{
-	struct qman_portal *p = get_affine_portal();
-
-	if ((~p->irq_sources) & QM_PIRQ_SLOW) {
-		if (!(p->slowpoll--)) {
-			u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
-			u32 active = __poll_portal_slow(p, is);
-
-			if (active) {
-				qm_isr_status_clear(&p->p, active);
-				p->slowpoll = SLOW_POLL_BUSY;
-			} else
-				p->slowpoll = SLOW_POLL_IDLE;
-		}
-	}
-	if ((~p->irq_sources) & QM_PIRQ_DQRI)
-		__poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
-}
-
-void qman_stop_dequeues(void)
-{
-	struct qman_portal *p = get_affine_portal();
-
-	qman_stop_dequeues_ex(p);
-}
-
-void qman_start_dequeues(void)
-{
-	struct qman_portal *p = get_affine_portal();
-
-	DPAA_ASSERT(p->dqrr_disable_ref > 0);
-	if (!(--p->dqrr_disable_ref))
-		qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
-}
-
 void qman_static_dequeue_add(u32 pools, struct qman_portal *qp)
 {
 	struct qman_portal *p = qp ? qp : get_affine_portal();
@@ -1424,28 +1336,6 @@ void qman_static_dequeue_add(u32 pools, struct qman_portal *qp)
 	qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
 }
 
-void qman_static_dequeue_del(u32 pools, struct qman_portal *qp)
-{
-	struct qman_portal *p = qp ? qp : get_affine_portal();
-
-	pools &= p->config->pools;
-	p->sdqcr &= ~pools;
-	qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
-}
-
-u32 qman_static_dequeue_get(struct qman_portal *qp)
-{
-	struct qman_portal *p = qp ? qp : get_affine_portal();
-	return p->sdqcr;
-}
-
-void qman_dca(const struct qm_dqrr_entry *dq, int park_request)
-{
-	struct qman_portal *p = get_affine_portal();
-
-	qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
-}
-
 void qman_dca_index(u8 index, int park_request)
 {
 	struct qman_portal *p = get_affine_portal();
@@ -1563,42 +1453,11 @@ int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
 	return -EIO;
 }
 
-void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
-{
-	/*
-	 * We don't need to lock the FQ as it is a pre-condition that the FQ be
-	 * quiesced. Instead, run some checks.
-	 */
-	switch (fq->state) {
-	case qman_fq_state_parked:
-		DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
-		/* Fallthrough */
-	case qman_fq_state_oos:
-		if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
-			qman_release_fqid(fq->fqid);
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-		clear_fq_table_entry(fq->key);
-#endif
-		return;
-	default:
-		break;
-	}
-	DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
-}
-
 u32 qman_fq_fqid(struct qman_fq *fq)
 {
 	return fq->fqid;
 }
 
-void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
-{
-	if (state)
-		*state = fq->state;
-	if (flags)
-		*flags = fq->flags;
-}
-
 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
 {
 	struct qm_mc_command *mcc;
@@ -1695,48 +1554,6 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
 	return 0;
 }
 
-int qman_schedule_fq(struct qman_fq *fq)
-{
-	struct qm_mc_command *mcc;
-	struct qm_mc_result *mcr;
-	struct qman_portal *p;
-
-	int ret = 0;
-	u8 res;
-
-	if (fq->state != qman_fq_state_parked)
-		return -EINVAL;
-#ifdef RTE_LIBRTE_DPAA_HWDEBUG
-	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
-		return -EINVAL;
-#endif
-	/* Issue a ALTERFQ_SCHED management command */
-	p = get_affine_portal();
-
-	FQLOCK(fq);
-	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
-		     (fq->state != qman_fq_state_parked))) {
-		ret = -EBUSY;
-		goto out;
-	}
-	mcc = qm_mc_start(&p->p);
-	mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
-	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
-	while (!(mcr = qm_mc_result(&p->p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
-	res = mcr->result;
-	if (res != QM_MCR_RESULT_OK) {
-		ret = -EIO;
-		goto out;
-	}
-	fq->state = qman_fq_state_sched;
-out:
-	FQUNLOCK(fq);
-
-	return ret;
-}
-
 int qman_retire_fq(struct qman_fq *fq, u32 *flags)
 {
 	struct qm_mc_command *mcc;
@@ -1866,98 +1683,6 @@ int qman_oos_fq(struct qman_fq *fq)
 	return ret;
 }
 
-int qman_fq_flow_control(struct qman_fq *fq, int xon)
-{
-	struct qm_mc_command *mcc;
-	struct qm_mc_result *mcr;
-	struct qman_portal *p;
-
-	int ret = 0;
-	u8 res;
-	u8 myverb;
-
-	if ((fq->state == qman_fq_state_oos) ||
-	    (fq->state == qman_fq_state_retired) ||
-		(fq->state == qman_fq_state_parked))
-		return -EINVAL;
-
-#ifdef RTE_LIBRTE_DPAA_HWDEBUG
-	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
-		return -EINVAL;
-#endif
-	/* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
-	p = get_affine_portal();
-	FQLOCK(fq);
-	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
-		     (fq->state == qman_fq_state_parked) ||
-			(fq->state == qman_fq_state_oos) ||
-			(fq->state == qman_fq_state_retired))) {
-		ret = -EBUSY;
-		goto out;
-	}
-	mcc = qm_mc_start(&p->p);
-	mcc->alterfq.fqid = fq->fqid;
-	mcc->alterfq.count = 0;
-	myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
-
-	qm_mc_commit(&p->p, myverb);
-	while (!(mcr = qm_mc_result(&p->p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
-
-	res = mcr->result;
-	if (res != QM_MCR_RESULT_OK) {
-		ret = -EIO;
-		goto out;
-	}
-out:
-	FQUNLOCK(fq);
-	return ret;
-}
-
-int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
-{
-	struct qm_mc_command *mcc;
-	struct qm_mc_result *mcr;
-	struct qman_portal *p = get_affine_portal();
-
-	u8 res;
-
-	mcc = qm_mc_start(&p->p);
-	mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
-	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
-	while (!(mcr = qm_mc_result(&p->p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
-	res = mcr->result;
-	if (res == QM_MCR_RESULT_OK)
-		*fqd = mcr->queryfq.fqd;
-	hw_fqd_to_cpu(fqd);
-	if (res != QM_MCR_RESULT_OK)
-		return -EIO;
-	return 0;
-}
-
-int qman_query_fq_has_pkts(struct qman_fq *fq)
-{
-	struct qm_mc_command *mcc;
-	struct qm_mc_result *mcr;
-	struct qman_portal *p = get_affine_portal();
-
-	int ret = 0;
-	u8 res;
-
-	mcc = qm_mc_start(&p->p);
-	mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
-	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
-	while (!(mcr = qm_mc_result(&p->p)))
-		cpu_relax();
-	res = mcr->result;
-	if (res == QM_MCR_RESULT_OK)
-		ret = !!mcr->queryfq_np.frm_cnt;
-	return ret;
-}
-
 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
 {
 	struct qm_mc_command *mcc;
@@ -2022,65 +1747,6 @@ int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt)
 	return 0;
 }
 
-int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
-{
-	struct qm_mc_command *mcc;
-	struct qm_mc_result *mcr;
-	struct qman_portal *p = get_affine_portal();
-
-	u8 res, myverb;
-
-	myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
-				 QM_MCR_VERB_QUERYWQ;
-	mcc = qm_mc_start(&p->p);
-	mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
-	qm_mc_commit(&p->p, myverb);
-	while (!(mcr = qm_mc_result(&p->p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
-	res = mcr->result;
-	if (res == QM_MCR_RESULT_OK) {
-		int i, array_len;
-
-		wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
-		array_len = ARRAY_SIZE(mcr->querywq.wq_len);
-		for (i = 0; i < array_len; i++)
-			wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
-	}
-	if (res != QM_MCR_RESULT_OK) {
-		pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
-		return -EIO;
-	}
-	return 0;
-}
-
-int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
-		       struct qm_mcr_cgrtestwrite *result)
-{
-	struct qm_mc_command *mcc;
-	struct qm_mc_result *mcr;
-	struct qman_portal *p = get_affine_portal();
-
-	u8 res;
-
-	mcc = qm_mc_start(&p->p);
-	mcc->cgrtestwrite.cgid = cgr->cgrid;
-	mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
-	mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
-	qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
-	while (!(mcr = qm_mc_result(&p->p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
-	res = mcr->result;
-	if (res == QM_MCR_RESULT_OK)
-		*result = mcr->cgrtestwrite;
-	if (res != QM_MCR_RESULT_OK) {
-		pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
-		return -EIO;
-	}
-	return 0;
-}
-
 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
 {
 	struct qm_mc_command *mcc;
@@ -2116,32 +1782,6 @@ int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
 	return 0;
 }
 
-int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
-{
-	struct qm_mc_result *mcr;
-	struct qman_portal *p = get_affine_portal();
-	u8 res;
-	unsigned int i;
-
-	qm_mc_start(&p->p);
-	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
-	while (!(mcr = qm_mc_result(&p->p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-			QM_MCC_VERB_QUERYCONGESTION);
-	res = mcr->result;
-	if (res == QM_MCR_RESULT_OK)
-		*congestion = mcr->querycongestion;
-	if (res != QM_MCR_RESULT_OK) {
-		pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
-		return -EIO;
-	}
-	for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++)
-		congestion->state.state[i] =
-			be32_to_cpu(congestion->state.state[i]);
-	return 0;
-}
-
 int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags)
 {
 	struct qman_portal *p = get_affine_portal();
@@ -2179,128 +1819,6 @@ int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags)
 	return ret;
 }
 
-int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
-			  u32 vdqcr)
-{
-	struct qman_portal *p;
-	int ret = -EBUSY;
-
-	if ((fq->state != qman_fq_state_parked) &&
-	    (fq->state != qman_fq_state_retired))
-		return -EINVAL;
-	if (vdqcr & QM_VDQCR_FQID_MASK)
-		return -EINVAL;
-	if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
-		return -EBUSY;
-	vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
-
-	p = get_affine_portal();
-
-	if (!p->vdqcr_owned) {
-		FQLOCK(fq);
-		if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
-			goto escape;
-		fq_set(fq, QMAN_FQ_STATE_VDQCR);
-		FQUNLOCK(fq);
-		p->vdqcr_owned = fq;
-		ret = 0;
-	}
-escape:
-	if (ret)
-		return ret;
-
-	/* VDQCR is set */
-	qm_dqrr_vdqcr_set(&p->p, vdqcr);
-	return 0;
-}
-
-static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
-{
-	if (avail)
-		qm_eqcr_cce_prefetch(&p->p);
-	else
-		qm_eqcr_cce_update(&p->p);
-}
-
-int qman_eqcr_is_empty(void)
-{
-	struct qman_portal *p = get_affine_portal();
-	u8 avail;
-
-	update_eqcr_ci(p, 0);
-	avail = qm_eqcr_get_fill(&p->p);
-	return (avail == 0);
-}
-
-void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
-{
-	if (affine) {
-		struct qman_portal *p = get_affine_portal();
-
-		p->cb_dc_ern = handler;
-	} else
-		cb_dc_ern = handler;
-}
-
-static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
-					struct qman_fq *fq,
-					const struct qm_fd *fd,
-					u32 flags)
-{
-	struct qm_eqcr_entry *eq;
-	u8 avail;
-
-	if (p->use_eqcr_ci_stashing) {
-		/*
-		 * The stashing case is easy, only update if we need to in
-		 * order to try and liberate ring entries.
-		 */
-		eq = qm_eqcr_start_stash(&p->p);
-	} else {
-		/*
-		 * The non-stashing case is harder, need to prefetch ahead of
-		 * time.
-		 */
-		avail = qm_eqcr_get_avail(&p->p);
-		if (avail < 2)
-			update_eqcr_ci(p, avail);
-		eq = qm_eqcr_start_no_stash(&p->p);
-	}
-
-	if (unlikely(!eq))
-		return NULL;
-
-	if (flags & QMAN_ENQUEUE_FLAG_DCA)
-		eq->dca = QM_EQCR_DCA_ENABLE |
-			((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
-					QM_EQCR_DCA_PARK : 0) |
-			((flags >> 8) & QM_EQCR_DCA_IDXMASK);
-	eq->fqid = cpu_to_be32(fq->fqid);
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
-	eq->tag = cpu_to_be32(fq->key);
-#else
-	eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
-#endif
-	eq->fd = *fd;
-	cpu_to_hw_fd(&eq->fd);
-	return eq;
-}
-
-int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
-{
-	struct qman_portal *p = get_affine_portal();
-	struct qm_eqcr_entry *eq;
-
-	eq = try_p_eq_start(p, fq, fd, flags);
-	if (!eq)
-		return -EBUSY;
-	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
-	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
-		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
-	/* Factor the below out, it's used from qman_enqueue_orp() too */
-	return 0;
-}
-
 int qman_enqueue_multi(struct qman_fq *fq,
 		       const struct qm_fd *fd, u32 *flags,
 		int frames_to_send)
@@ -2442,37 +1960,6 @@ qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
 	return sent;
 }
 
-int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
-		     struct qman_fq *orp, u16 orp_seqnum)
-{
-	struct qman_portal *p  = get_affine_portal();
-	struct qm_eqcr_entry *eq;
-
-	eq = try_p_eq_start(p, fq, fd, flags);
-	if (!eq)
-		return -EBUSY;
-	/* Process ORP-specifics here */
-	if (flags & QMAN_ENQUEUE_FLAG_NLIS)
-		orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
-	else {
-		orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
-		if (flags & QMAN_ENQUEUE_FLAG_NESN)
-			orp_seqnum |= QM_EQCR_SEQNUM_NESN;
-		else
-			/* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
-			orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
-	}
-	eq->seqnum = cpu_to_be16(orp_seqnum);
-	eq->orp = cpu_to_be32(orp->fqid);
-	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
-	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
-		((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
-				0 : QM_EQCR_VERB_CMD_ENQUEUE) |
-		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
-
-	return 0;
-}
-
 int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
 		    struct qm_mcc_initcgr *opts)
 {
@@ -2581,52 +2068,6 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
 	return ret;
 }
 
-int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
-			   struct qm_mcc_initcgr *opts)
-{
-	struct qm_mcc_initcgr local_opts;
-	struct qm_mcr_querycgr cgr_state;
-	int ret;
-
-	if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
-		pr_warn("QMan version doesn't support CSCN => DCP portal\n");
-		return -EINVAL;
-	}
-	/* We have to check that the provided CGRID is within the limits of the
-	 * data-structures, for obvious reasons. However we'll let h/w take
-	 * care of determining whether it's within the limits of what exists on
-	 * the SoC.
-	 */
-	if (cgr->cgrid >= __CGR_NUM)
-		return -EINVAL;
-
-	ret = qman_query_cgr(cgr, &cgr_state);
-	if (ret)
-		return ret;
-
-	memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
-	if (opts)
-		local_opts = *opts;
-
-	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
-		local_opts.cgr.cscn_targ_upd_ctrl =
-				QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
-				QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
-	else
-		local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
-					TARG_DCP_MASK(dcp_portal);
-	local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
-
-	/* send init if flags indicate so */
-	if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
-		ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
-				      &local_opts);
-	else
-		ret = qman_modify_cgr(cgr, 0, &local_opts);
-
-	return ret;
-}
-
 int qman_delete_cgr(struct qman_cgr *cgr)
 {
 	struct qm_mcr_querycgr cgr_state;
@@ -2674,222 +2115,3 @@ int qman_delete_cgr(struct qman_cgr *cgr)
 put_portal:
 	return ret;
 }
-
-int qman_shutdown_fq(u32 fqid)
-{
-	struct qman_portal *p;
-	struct qm_portal *low_p;
-	struct qm_mc_command *mcc;
-	struct qm_mc_result *mcr;
-	u8 state;
-	int orl_empty, fq_empty, drain = 0;
-	u32 result;
-	u32 channel, wq;
-	u16 dest_wq;
-
-	p = get_affine_portal();
-	low_p = &p->p;
-
-	/* Determine the state of the FQID */
-	mcc = qm_mc_start(low_p);
-	mcc->queryfq_np.fqid = cpu_to_be32(fqid);
-	qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP);
-	while (!(mcr = qm_mc_result(low_p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
-	state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
-	if (state == QM_MCR_NP_STATE_OOS)
-		return 0; /* Already OOS, no need to do anymore checks */
-
-	/* Query which channel the FQ is using */
-	mcc = qm_mc_start(low_p);
-	mcc->queryfq.fqid = cpu_to_be32(fqid);
-	qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ);
-	while (!(mcr = qm_mc_result(low_p)))
-		cpu_relax();
-	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
-
-	/* Need to store these since the MCR gets reused */
-	dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
-	channel = dest_wq & 0x7;
-	wq = dest_wq >> 3;
-
-	switch (state) {
-	case QM_MCR_NP_STATE_TEN_SCHED:
-	case QM_MCR_NP_STATE_TRU_SCHED:
-	case QM_MCR_NP_STATE_ACTIVE:
-	case QM_MCR_NP_STATE_PARKED:
-		orl_empty = 0;
-		mcc = qm_mc_start(low_p);
-		mcc->alterfq.fqid = cpu_to_be32(fqid);
-		qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE);
-		while (!(mcr = qm_mc_result(low_p)))
-			cpu_relax();
-		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-			   QM_MCR_VERB_ALTER_RETIRE);
-		result = mcr->result; /* Make a copy as we reuse MCR below */
-
-		if (result == QM_MCR_RESULT_PENDING) {
-			/* Need to wait for the FQRN in the message ring, which
-			 * will only occur once the FQ has been drained.  In
-			 * order for the FQ to drain the portal needs to be set
-			 * to dequeue from the channel the FQ is scheduled on
-			 */
-			const struct qm_mr_entry *msg;
-			const struct qm_dqrr_entry *dqrr = NULL;
-			int found_fqrn = 0;
-			__maybe_unused u16 dequeue_wq = 0;
-
-			/* Flag that we need to drain FQ */
-			drain = 1;
-
-			if (channel >= qm_channel_pool1 &&
-			    channel < (u16)(qm_channel_pool1 + 15)) {
-				/* Pool channel, enable the bit in the portal */
-				dequeue_wq = (channel -
-					      qm_channel_pool1 + 1) << 4 | wq;
-			} else if (channel < qm_channel_pool1) {
-				/* Dedicated channel */
-				dequeue_wq = wq;
-			} else {
-				pr_info("Cannot recover FQ 0x%x,"
-					" it is scheduled on channel 0x%x",
-					fqid, channel);
-				return -EBUSY;
-			}
-			/* Set the sdqcr to drain this channel */
-			if (channel < qm_channel_pool1)
-				qm_dqrr_sdqcr_set(low_p,
-						  QM_SDQCR_TYPE_ACTIVE |
-					  QM_SDQCR_CHANNELS_DEDICATED);
-			else
-				qm_dqrr_sdqcr_set(low_p,
-						  QM_SDQCR_TYPE_ACTIVE |
-						  QM_SDQCR_CHANNELS_POOL_CONV
-						  (channel));
-			while (!found_fqrn) {
-				/* Keep draining DQRR while checking the MR*/
-				qm_dqrr_pvb_update(low_p);
-				dqrr = qm_dqrr_current(low_p);
-				while (dqrr) {
-					qm_dqrr_cdc_consume_1ptr(
-						low_p, dqrr, 0);
-					qm_dqrr_pvb_update(low_p);
-					qm_dqrr_next(low_p);
-					dqrr = qm_dqrr_current(low_p);
-				}
-				/* Process message ring too */
-				qm_mr_pvb_update(low_p);
-				msg = qm_mr_current(low_p);
-				while (msg) {
-					if ((msg->ern.verb &
-					     QM_MR_VERB_TYPE_MASK)
-					    == QM_MR_VERB_FQRN)
-						found_fqrn = 1;
-					qm_mr_next(low_p);
-					qm_mr_cci_consume_to_current(low_p);
-					qm_mr_pvb_update(low_p);
-					msg = qm_mr_current(low_p);
-				}
-				cpu_relax();
-			}
-		}
-		if (result != QM_MCR_RESULT_OK &&
-		    result !=  QM_MCR_RESULT_PENDING) {
-			/* error */
-			pr_err("qman_retire_fq failed on FQ 0x%x,"
-			       " result=0x%x\n", fqid, result);
-			return -1;
-		}
-		if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
-			/* ORL had no entries, no need to wait until the
-			 * ERNs come in.
-			 */
-			orl_empty = 1;
-		}
-		/* Retirement succeeded, check to see if FQ needs
-		 * to be drained.
-		 */
-		if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
-			/* FQ is Not Empty, drain using volatile DQ commands */
-			fq_empty = 0;
-			do {
-				const struct qm_dqrr_entry *dqrr = NULL;
-				u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
-
-				qm_dqrr_vdqcr_set(low_p, vdqcr);
-
-				/* Wait for a dequeue to occur */
-				while (dqrr == NULL) {
-					qm_dqrr_pvb_update(low_p);
-					dqrr = qm_dqrr_current(low_p);
-					if (!dqrr)
-						cpu_relax();
-				}
-				/* Process the dequeues, making sure to
-				 * empty the ring completely.
-				 */
-				while (dqrr) {
-					if (dqrr->fqid == fqid &&
-					    dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
-						fq_empty = 1;
-					qm_dqrr_cdc_consume_1ptr(low_p,
-								 dqrr, 0);
-					qm_dqrr_pvb_update(low_p);
-					qm_dqrr_next(low_p);
-					dqrr = qm_dqrr_current(low_p);
-				}
-			} while (fq_empty == 0);
-		}
-		qm_dqrr_sdqcr_set(low_p, 0);
-
-		/* Wait for the ORL to have been completely drained */
-		while (orl_empty == 0) {
-			const struct qm_mr_entry *msg;
-
-			qm_mr_pvb_update(low_p);
-			msg = qm_mr_current(low_p);
-			while (msg) {
-				if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) ==
-				    QM_MR_VERB_FQRL)
-					orl_empty = 1;
-				qm_mr_next(low_p);
-				qm_mr_cci_consume_to_current(low_p);
-				qm_mr_pvb_update(low_p);
-				msg = qm_mr_current(low_p);
-			}
-			cpu_relax();
-		}
-		mcc = qm_mc_start(low_p);
-		mcc->alterfq.fqid = cpu_to_be32(fqid);
-		qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
-		while (!(mcr = qm_mc_result(low_p)))
-			cpu_relax();
-		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-			   QM_MCR_VERB_ALTER_OOS);
-		if (mcr->result != QM_MCR_RESULT_OK) {
-			pr_err(
-			"OOS after drain Failed on FQID 0x%x, result 0x%x\n",
-			       fqid, mcr->result);
-			return -1;
-		}
-		return 0;
-
-	case QM_MCR_NP_STATE_RETIRED:
-		/* Send OOS Command */
-		mcc = qm_mc_start(low_p);
-		mcc->alterfq.fqid = cpu_to_be32(fqid);
-		qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
-		while (!(mcr = qm_mc_result(low_p)))
-			cpu_relax();
-		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
-			   QM_MCR_VERB_ALTER_OOS);
-		if (mcr->result) {
-			pr_err("OOS Failed on FQID 0x%x\n", fqid);
-			return -1;
-		}
-		return 0;
-
-	}
-	return -1;
-}
diff --git a/drivers/bus/dpaa/base/qbman/qman_priv.h b/drivers/bus/dpaa/base/qbman/qman_priv.h
index 8254729e66..25306804a5 100644
--- a/drivers/bus/dpaa/base/qbman/qman_priv.h
+++ b/drivers/bus/dpaa/base/qbman/qman_priv.h
@@ -165,15 +165,6 @@ struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);
 void qm_put_unused_portal(struct qm_portal_config *pcfg);
 void qm_set_liodns(struct qm_portal_config *pcfg);
 
-/* This CGR feature is supported by h/w and required by unit-tests and the
- * debugfs hooks, so is implemented in the driver. However it allows an explicit
- * corruption of h/w fields by s/w that are usually incorruptible (because the
- * counters are usually maintained entirely within h/w). As such, we declare
- * this API internally.
- */
-int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
-		       struct qm_mcr_cgrtestwrite *result);
-
 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
 /* If the fq object pointer is greater than the size of context_b field,
  * than a lookup table is required.
diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c
index 3098e23093..ca1e27aeaf 100644
--- a/drivers/bus/dpaa/dpaa_bus.c
+++ b/drivers/bus/dpaa/dpaa_bus.c
@@ -359,11 +359,6 @@ rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
 	return 0;
 }
 
-int rte_dpaa_portal_fq_close(struct qman_fq *fq)
-{
-	return fsl_qman_fq_portal_destroy(fq->qp);
-}
-
 void
 dpaa_portal_finish(void *arg)
 {
@@ -488,21 +483,6 @@ rte_dpaa_driver_register(struct rte_dpaa_driver *driver)
 	driver->dpaa_bus = &rte_dpaa_bus;
 }
 
-/* un-register a dpaa bus based dpaa driver */
-void
-rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver)
-{
-	struct rte_dpaa_bus *dpaa_bus;
-
-	BUS_INIT_FUNC_TRACE();
-
-	dpaa_bus = driver->dpaa_bus;
-
-	TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next);
-	/* Update Bus references */
-	driver->dpaa_bus = NULL;
-}
-
 static int
 rte_dpaa_device_match(struct rte_dpaa_driver *drv,
 		      struct rte_dpaa_device *dev)
diff --git a/drivers/bus/dpaa/include/fsl_bman.h b/drivers/bus/dpaa/include/fsl_bman.h
index 82da2fcfe0..a06d29eb2d 100644
--- a/drivers/bus/dpaa/include/fsl_bman.h
+++ b/drivers/bus/dpaa/include/fsl_bman.h
@@ -252,8 +252,6 @@ static inline int bman_reserve_bpid(u32 bpid)
 
 void bman_seed_bpid_range(u32 bpid, unsigned int count);
 
-int bman_shutdown_pool(u32 bpid);
-
 /**
  * bman_new_pool - Allocates a Buffer Pool object
  * @params: parameters specifying the buffer pool ID and behaviour
@@ -310,12 +308,6 @@ __rte_internal
 int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
 		 u32 flags);
 
-/**
- * bman_query_pools - Query all buffer pool states
- * @state: storage for the queried availability and depletion states
- */
-int bman_query_pools(struct bm_pool_state *state);
-
 /**
  * bman_query_free_buffers - Query how many free buffers are in buffer pool
  * @pool: the buffer pool object to query
@@ -325,13 +317,6 @@ int bman_query_pools(struct bm_pool_state *state);
 __rte_internal
 u32 bman_query_free_buffers(struct bman_pool *pool);
 
-/**
- * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
- * @pool: the buffer pool object to which the thresholds will be set
- * @thresholds: the new thresholds
- */
-int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
-
 /**
  * bm_pool_set_hw_threshold - Change the buffer pool's thresholds
  * @pool: Pool id
diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h
index a3cf77f0e3..71f5a2f8cf 100644
--- a/drivers/bus/dpaa/include/fsl_fman.h
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -64,12 +64,6 @@ void fman_if_stats_reset(struct fman_if *p);
 __rte_internal
 void fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);
 
-/* Set ignore pause option for a specific interface */
-void fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable);
-
-/* Set max frame length */
-void fman_if_conf_max_frame_len(struct fman_if *p, unsigned int max_frame_len);
-
 /* Enable/disable Rx promiscuous mode on specified interface */
 __rte_internal
 void fman_if_promiscuous_enable(struct fman_if *p);
@@ -114,18 +108,11 @@ int fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta);
 __rte_internal
 void fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid);
 
-/* Get IC transfer params */
-int fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params *icp);
-
 /* Set IC transfer params */
 __rte_internal
 int fman_if_set_ic_params(struct fman_if *fm_if,
 			  const struct fman_if_ic_params *icp);
 
-/* Get interface fd->offset value */
-__rte_internal
-int fman_if_get_fdoff(struct fman_if *fm_if);
-
 /* Set interface fd->offset value */
 __rte_internal
 void fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset);
@@ -138,20 +125,10 @@ int fman_if_get_sg_enable(struct fman_if *fm_if);
 __rte_internal
 void fman_if_set_sg(struct fman_if *fm_if, int enable);
 
-/* Get interface Max Frame length (MTU) */
-uint16_t fman_if_get_maxfrm(struct fman_if *fm_if);
-
 /* Set interface  Max Frame length (MTU) */
 __rte_internal
 void fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm);
 
-/* Set interface next invoked action for dequeue operation */
-void fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia);
-
-/* discard error packets on rx */
-__rte_internal
-void fman_if_discard_rx_errors(struct fman_if *fm_if);
-
 __rte_internal
 void fman_if_receive_rx_errors(struct fman_if *fm_if,
 	unsigned int err_eq);
@@ -162,11 +139,6 @@ void fman_if_set_mcast_filter_table(struct fman_if *p);
 __rte_internal
 void fman_if_reset_mcast_filter_table(struct fman_if *p);
 
-int fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth);
-
-int fman_if_get_primary_mac_addr(struct fman_if *p, uint8_t *eth);
-
-
 /* Enable/disable Rx on all interfaces */
 static inline void fman_if_enable_all_rx(void)
 {
diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h
index 10212f0fd5..b24aa76409 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -1379,16 +1379,6 @@ int qman_irqsource_remove(u32 bits);
 __rte_internal
 int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits);
 
-/**
- * qman_affine_channel - return the channel ID of an portal
- * @cpu: the cpu whose affine portal is the subject of the query
- *
- * If @cpu is -1, the affine portal for the current CPU will be used. It is a
- * bug to call this function for any value of @cpu (other than -1) that is not a
- * member of the cpu mask.
- */
-u16 qman_affine_channel(int cpu);
-
 __rte_internal
 unsigned int qman_portal_poll_rx(unsigned int poll_limit,
 				 void **bufs, struct qman_portal *q);
@@ -1428,55 +1418,6 @@ __rte_internal
 void qman_dqrr_consume(struct qman_fq *fq,
 		       struct qm_dqrr_entry *dq);
 
-/**
- * qman_poll_dqrr - process DQRR (fast-path) entries
- * @limit: the maximum number of DQRR entries to process
- *
- * Use of this function requires that DQRR processing not be interrupt-driven.
- * Ie. the value returned by qman_irqsource_get() should not include
- * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
- * this function will return -EINVAL, otherwise the return value is >=0 and
- * represents the number of DQRR entries processed.
- */
-__rte_internal
-int qman_poll_dqrr(unsigned int limit);
-
-/**
- * qman_poll
- *
- * Dispatcher logic on a cpu can use this to trigger any maintenance of the
- * affine portal. There are two classes of portal processing in question;
- * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
- * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
- * thresholds, congestion state changes, etc). This function does whatever
- * processing is not triggered by interrupts.
- *
- * Note, if DQRR and some slow-path processing are poll-driven (rather than
- * interrupt-driven) then this function uses a heuristic to determine how often
- * to run slow-path processing - as slow-path processing introduces at least a
- * minimum latency each time it is run, whereas fast-path (DQRR) processing is
- * close to zero-cost if there is no work to be done.
- */
-void qman_poll(void);
-
-/**
- * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
- *
- * Disables DQRR processing of the portal. This is reference-counted, so
- * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
- * truly re-enable dequeuing.
- */
-void qman_stop_dequeues(void);
-
-/**
- * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
- *
- * Enables DQRR processing of the portal. This is reference-counted, so
- * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
- * truly re-enable dequeuing.
- */
-void qman_start_dequeues(void);
-
 /**
  * qman_static_dequeue_add - Add pool channels to the portal SDQCR
  * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
@@ -1488,39 +1429,6 @@ void qman_start_dequeues(void);
 __rte_internal
 void qman_static_dequeue_add(u32 pools, struct qman_portal *qm);
 
-/**
- * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
- * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
- *
- * Removes a set of pool channels from the portal's static dequeue command
- * register (SDQCR). The requested pools are limited to those the portal has
- * dequeue access to.
- */
-void qman_static_dequeue_del(u32 pools, struct qman_portal *qp);
-
-/**
- * qman_static_dequeue_get - return the portal's current SDQCR
- *
- * Returns the portal's current static dequeue command register (SDQCR). The
- * entire register is returned, so if only the currently-enabled pool channels
- * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
- */
-u32 qman_static_dequeue_get(struct qman_portal *qp);
-
-/**
- * qman_dca - Perform a Discrete Consumption Acknowledgment
- * @dq: the DQRR entry to be consumed
- * @park_request: indicates whether the held-active @fq should be parked
- *
- * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
- * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
- * does not take a 'portal' argument but implies the core affine portal from the
- * cpu that is currently executing the function. For reasons of locking, this
- * function must be called from the same CPU as that which processed the DQRR
- * entry in the first place.
- */
-void qman_dca(const struct qm_dqrr_entry *dq, int park_request);
-
 /**
  * qman_dca_index - Perform a Discrete Consumption Acknowledgment
  * @index: the DQRR index to be consumed
@@ -1536,36 +1444,6 @@ void qman_dca(const struct qm_dqrr_entry *dq, int park_request);
 __rte_internal
 void qman_dca_index(u8 index, int park_request);
 
-/**
- * qman_eqcr_is_empty - Determine if portal's EQCR is empty
- *
- * For use in situations where a cpu-affine caller needs to determine when all
- * enqueues for the local portal have been processed by Qman but can't use the
- * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
- * The function forces tracking of EQCR consumption (which normally doesn't
- * happen until enqueue processing needs to find space to put new enqueue
- * commands), and returns zero if the ring still has unprocessed entries,
- * non-zero if it is empty.
- */
-int qman_eqcr_is_empty(void);
-
-/**
- * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
- * @handler: callback for processing DCP ERNs
- * @affine: whether this handler is specific to the locally affine portal
- *
- * If a hardware block's interface to Qman (ie. its direct-connect portal, or
- * DCP) is configured not to receive enqueue rejections, then any enqueues
- * through that DCP that are rejected will be sent to a given software portal.
- * If @affine is non-zero, then this handler will only be used for DCP ERNs
- * received on the portal affine to the current CPU. If multiple CPUs share a
- * portal and they all call this function, they will be setting the handler for
- * the same portal! If @affine is zero, then this handler will be global to all
- * portals handled by this instance of the driver. Only those portals that do
- * not have their own affine handler will use the global handler.
- */
-void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
-
 	/* FQ management */
 	/* ------------- */
 /**
@@ -1594,18 +1472,6 @@ void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
 __rte_internal
 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
 
-/**
- * qman_destroy_fq - Deallocates a FQ
- * @fq: the frame queue object to release
- * @flags: bit-mask of QMAN_FQ_FREE_*** options
- *
- * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
- * not deallocated but the caller regains ownership, to do with as desired. The
- * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
- * is specified, in which case it may also be in the 'parked' state.
- */
-void qman_destroy_fq(struct qman_fq *fq, u32 flags);
-
 /**
  * qman_fq_fqid - Queries the frame queue ID of a FQ object
  * @fq: the frame queue object to query
@@ -1613,19 +1479,6 @@ void qman_destroy_fq(struct qman_fq *fq, u32 flags);
 __rte_internal
 u32 qman_fq_fqid(struct qman_fq *fq);
 
-/**
- * qman_fq_state - Queries the state of a FQ object
- * @fq: the frame queue object to query
- * @state: pointer to state enum to return the FQ scheduling state
- * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
- *
- * Queries the state of the FQ object, without performing any h/w commands.
- * This captures the state, as seen by the driver, at the time the function
- * executes.
- */
-__rte_internal
-void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
-
 /**
  * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
  * @fq: the frame queue object to modify, must be 'parked' or new.
@@ -1663,15 +1516,6 @@ void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
 __rte_internal
 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
 
-/**
- * qman_schedule_fq - Schedules a FQ
- * @fq: the frame queue object to schedule, must be 'parked'
- *
- * Schedules the frame queue, which must be Parked, which takes it to
- * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
- */
-int qman_schedule_fq(struct qman_fq *fq);
-
 /**
  * qman_retire_fq - Retires a FQ
  * @fq: the frame queue object to retire
@@ -1703,32 +1547,6 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags);
 __rte_internal
 int qman_oos_fq(struct qman_fq *fq);
 
-/**
- * qman_fq_flow_control - Set the XON/XOFF state of a FQ
- * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
- * or 'retired' or 'parked' state
- * @xon: boolean to set fq in XON or XOFF state
- *
- * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
- * otherwise the IFSI interrupt will be asserted.
- */
-int qman_fq_flow_control(struct qman_fq *fq, int xon);
-
-/**
- * qman_query_fq - Queries FQD fields (via h/w query command)
- * @fq: the frame queue object to be queried
- * @fqd: storage for the queried FQD fields
- */
-int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
-
-/**
- * qman_query_fq_has_pkts - Queries non-programmable FQD fields and returns '1'
- * if packets are in the frame queue. If there are no packets on frame
- * queue '0' is returned.
- * @fq: the frame queue object to be queried
- */
-int qman_query_fq_has_pkts(struct qman_fq *fq);
-
 /**
  * qman_query_fq_np - Queries non-programmable FQD fields
  * @fq: the frame queue object to be queried
@@ -1745,73 +1563,6 @@ int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
 __rte_internal
 int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt);
 
-/**
- * qman_query_wq - Queries work queue lengths
- * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
- *		to this software portal. Otherwise, query length of WQs in a
- *		channel  specified in wq.
- * @wq: storage for the queried WQs lengths. Also specified the channel to
- *	to query if query_dedicated is zero.
- */
-int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
-
-/**
- * qman_volatile_dequeue - Issue a volatile dequeue command
- * @fq: the frame queue object to dequeue from
- * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
- * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
- *
- * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
- * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
- * the VDQCR is already in use, otherwise returns non-zero for failure. If
- * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
- * the VDQCR command has finished executing (ie. once the callback for the last
- * DQRR entry resulting from the VDQCR command has been called). If not using
- * the FINISH flag, completion can be determined either by detecting the
- * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
- * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
- * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
- * "flags" retrieved from qman_fq_state().
- */
-__rte_internal
-int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
-
-/**
- * qman_enqueue - Enqueue a frame to a frame queue
- * @fq: the frame queue object to enqueue to
- * @fd: a descriptor of the frame to be enqueued
- * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
- *
- * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
- * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
- * field is ignored. The return value is non-zero on error, such as ring full
- * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
- * specified), etc. If the ring is full and FLAG_WAIT is specified, this
- * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
- * interrupt will assert when Qman consumes the EQCR entry (subject to "status
- * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will
- * perform an implied "discrete consumption acknowledgment" on the dequeue
- * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
- * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
- * this implicit DCA can delay the release of a "held active" frame queue
- * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing
- * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
- * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
- * acknowledgment should "park request" the "held active" frame queue. Ie.
- * when the portal eventually releases that frame queue, it will be left in the
- * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
- * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
- * is requested, and the FQ is a member of a congestion group, then this
- * function returns -EAGAIN if the congestion group is currently congested.
- * Note, this does not eliminate ERNs, as the async interface means we can be
- * sending enqueue commands to an un-congested FQ that becomes congested before
- * the enqueue commands are processed, but it does minimise needless thrashing
- * of an already busy hardware resource by throttling many of the to-be-dropped
- * enqueues "at the source".
- */
-__rte_internal
-int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
-
 __rte_internal
 int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,
 		       int frames_to_send);
@@ -1846,45 +1597,6 @@ qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
 
 typedef int (*qman_cb_precommit) (void *arg);
 
-/**
- * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
- * @fq: the frame queue object to enqueue to
- * @fd: a descriptor of the frame to be enqueued
- * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
- * @orp: the frame queue object used as an order restoration point.
- * @orp_seqnum: the sequence number of this frame in the order restoration path
- *
- * Similar to qman_enqueue(), but with the addition of an Order Restoration
- * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
- * enqueue operation to employ order restoration. Each frame queue object acts
- * as an Order Definition Point (ODP) by providing each frame dequeued from it
- * with an incrementing sequence number, this value is generally ignored unless
- * that sequence of dequeued frames will need order restoration later. Each
- * frame queue object also encapsulates an Order Restoration Point (ORP), which
- * is a re-assembly context for re-ordering frames relative to their sequence
- * numbers as they are enqueued. The ORP does not have to be within the frame
- * queue that receives the enqueued frame, in fact it is usually the frame
- * queue from which the frames were originally dequeued. For the purposes of
- * order restoration, multiple frames (or "fragments") can be enqueued for a
- * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
- * enqueues except the final fragment of a given sequence number. Ordering
- * between sequence numbers is guaranteed, even if fragments of different
- * sequence numbers are interlaced with one another. Fragments of the same
- * sequence number will retain the order in which they are enqueued. If no
- * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
- * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
- * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
- * sequence number should become the ORP's "Next Expected Sequence Number".
- *
- * Side note: a frame queue object can be used purely as an ORP, without
- * carrying any frames at all. Care should be taken not to deallocate a frame
- * queue object that is being actively used as an ORP, as a future allocation
- * of the frame queue object may start using the internal ORP before the
- * previous use has finished.
- */
-int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
-		     struct qman_fq *orp, u16 orp_seqnum);
-
 /**
  * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
  * @result: is set by the API to the base FQID of the allocated range
@@ -1922,8 +1634,6 @@ static inline void qman_release_fqid(u32 fqid)
 
 void qman_seed_fqid_range(u32 fqid, unsigned int count);
 
-int qman_shutdown_fq(u32 fqid);
-
 /**
  * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
  * @fqid: the base FQID of the range to deallocate
@@ -2001,17 +1711,6 @@ __rte_internal
 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
 		    struct qm_mcc_initcgr *opts);
 
-/**
- * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
- * @cgr: the 'cgr' object, with fields filled in
- * @flags: QMAN_CGR_FLAG_* values
- * @dcp_portal: the DCP portal to which the cgr object is registered.
- * @opts: optional state of CGR settings
- *
- */
-int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
-			   struct qm_mcc_initcgr *opts);
-
 /**
  * qman_delete_cgr - Deregisters a congestion group object
  * @cgr: the 'cgr' object to deregister
@@ -2048,12 +1747,6 @@ int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
  */
 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
 
-/**
- * qman_query_congestion - Queries the state of all congestion groups
- * @congestion: storage for the queried state of all congestion groups
- */
-int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
-
 /**
  * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
  * @result: is set by the API to the base CGR ID of the allocated range
diff --git a/drivers/bus/dpaa/include/fsl_usd.h b/drivers/bus/dpaa/include/fsl_usd.h
index dcf35e4adb..3a5df9bf7e 100644
--- a/drivers/bus/dpaa/include/fsl_usd.h
+++ b/drivers/bus/dpaa/include/fsl_usd.h
@@ -51,16 +51,9 @@ struct dpaa_raw_portal {
 	uint64_t cena;
 };
 
-int qman_allocate_raw_portal(struct dpaa_raw_portal *portal);
-int qman_free_raw_portal(struct dpaa_raw_portal *portal);
-
-int bman_allocate_raw_portal(struct dpaa_raw_portal *portal);
-int bman_free_raw_portal(struct dpaa_raw_portal *portal);
-
 /* Obtain thread-local UIO file-descriptors */
 __rte_internal
 int qman_thread_fd(void);
-int bman_thread_fd(void);
 
 /* Post-process interrupts. NB, the kernel IRQ handler disables the interrupt
  * line before notifying us, and this post-processing re-enables it once
@@ -70,12 +63,8 @@ int bman_thread_fd(void);
 __rte_internal
 void qman_thread_irq(void);
 
-__rte_internal
-void bman_thread_irq(void);
 __rte_internal
 void qman_fq_portal_thread_irq(struct qman_portal *qp);
-__rte_internal
-void qman_clear_irq(void);
 
 /* Global setup */
 int qman_global_init(void);
diff --git a/drivers/bus/dpaa/include/netcfg.h b/drivers/bus/dpaa/include/netcfg.h
index d7d1befd24..815b3ba087 100644
--- a/drivers/bus/dpaa/include/netcfg.h
+++ b/drivers/bus/dpaa/include/netcfg.h
@@ -49,12 +49,6 @@ struct netcfg_interface {
 __rte_internal
 struct netcfg_info *netcfg_acquire(void);
 
-/* cfg_ptr: configuration information pointer.
- * Frees the resources allocated by the configuration layer.
- */
-__rte_internal
-void netcfg_release(struct netcfg_info *cfg_ptr);
-
 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
 /* cfg_ptr: configuration information pointer.
  * This function dumps configuration data to stdout.
diff --git a/drivers/bus/dpaa/rte_dpaa_bus.h b/drivers/bus/dpaa/rte_dpaa_bus.h
index 48d5cf4625..40d82412df 100644
--- a/drivers/bus/dpaa/rte_dpaa_bus.h
+++ b/drivers/bus/dpaa/rte_dpaa_bus.h
@@ -214,16 +214,6 @@ rte_dpaa_mem_vtop(void *vaddr)
 __rte_internal
 void rte_dpaa_driver_register(struct rte_dpaa_driver *driver);
 
-/**
- * Unregister a DPAA driver.
- *
- * @param driver
- *	A pointer to a rte_dpaa_driver structure describing the driver
- *	to be unregistered.
- */
-__rte_internal
-void rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver);
-
 /**
  * Initialize a DPAA portal
  *
@@ -239,9 +229,6 @@ int rte_dpaa_portal_init(void *arg);
 __rte_internal
 int rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq);
 
-__rte_internal
-int rte_dpaa_portal_fq_close(struct qman_fq *fq);
-
 /**
  * Cleanup a DPAA Portal
  */
diff --git a/drivers/bus/dpaa/version.map b/drivers/bus/dpaa/version.map
index fe4f9ac5aa..98f1e00582 100644
--- a/drivers/bus/dpaa/version.map
+++ b/drivers/bus/dpaa/version.map
@@ -7,7 +7,6 @@ INTERNAL {
 	bman_new_pool;
 	bman_query_free_buffers;
 	bman_release;
-	bman_thread_irq;
 	dpaa_get_ioctl_version_number;
 	dpaa_get_eth_port_cfg;
 	dpaa_get_qm_channel_caam;
@@ -25,11 +24,9 @@ INTERNAL {
 	fman_if_add_mac_addr;
 	fman_if_clear_mac_addr;
 	fman_if_disable_rx;
-	fman_if_discard_rx_errors;
 	fman_if_enable_rx;
 	fman_if_get_fc_quanta;
 	fman_if_get_fc_threshold;
-	fman_if_get_fdoff;
 	fman_if_get_sg_enable;
 	fman_if_loopback_disable;
 	fman_if_loopback_enable;
@@ -52,19 +49,16 @@ INTERNAL {
 	fman_if_receive_rx_errors;
 	fsl_qman_fq_portal_create;
 	netcfg_acquire;
-	netcfg_release;
 	per_lcore_dpaa_io;
 	qman_alloc_cgrid_range;
 	qman_alloc_fqid_range;
 	qman_alloc_pool_range;
-	qman_clear_irq;
 	qman_create_cgr;
 	qman_create_fq;
 	qman_dca_index;
 	qman_delete_cgr;
 	qman_dequeue;
 	qman_dqrr_consume;
-	qman_enqueue;
 	qman_enqueue_multi;
 	qman_enqueue_multi_fq;
 	qman_ern_poll_free;
@@ -79,7 +73,6 @@ INTERNAL {
 	qman_irqsource_remove;
 	qman_modify_cgr;
 	qman_oos_fq;
-	qman_poll_dqrr;
 	qman_portal_dequeue;
 	qman_portal_poll_rx;
 	qman_query_fq_frm_cnt;
@@ -92,10 +85,7 @@ INTERNAL {
 	qman_static_dequeue_add;
 	qman_thread_fd;
 	qman_thread_irq;
-	qman_volatile_dequeue;
 	rte_dpaa_driver_register;
-	rte_dpaa_driver_unregister;
-	rte_dpaa_portal_fq_close;
 	rte_dpaa_portal_fq_init;
 	rte_dpaa_portal_init;
 
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index 58435589b2..51749764e7 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -521,25 +521,6 @@ rte_fslmc_driver_register(struct rte_dpaa2_driver *driver)
 	driver->fslmc_bus = &rte_fslmc_bus;
 }
 
-/*un-register a fslmc bus based dpaa2 driver */
-void
-rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver)
-{
-	struct rte_fslmc_bus *fslmc_bus;
-
-	fslmc_bus = driver->fslmc_bus;
-
-	/* Cleanup the PA->VA Translation table; From whereever this function
-	 * is called from.
-	 */
-	if (rte_eal_iova_mode() == RTE_IOVA_PA)
-		dpaax_iova_table_depopulate();
-
-	TAILQ_REMOVE(&fslmc_bus->driver_list, driver, next);
-	/* Update Bus references */
-	driver->fslmc_bus = NULL;
-}
-
 /*
  * All device has iova as va
  */
diff --git a/drivers/bus/fslmc/mc/dpbp.c b/drivers/bus/fslmc/mc/dpbp.c
index d9103409cf..f3af33b658 100644
--- a/drivers/bus/fslmc/mc/dpbp.c
+++ b/drivers/bus/fslmc/mc/dpbp.c
@@ -77,78 +77,6 @@ int dpbp_close(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpbp_create() - Create the DPBP object.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id:	Returned object id; use in subsequent API calls
- *
- * Create the DPBP object, allocate required resources and
- * perform required initialization.
- *
- * This function accepts an authentication token of a parent
- * container that this object should be assigned to and returns
- * an object id. This object_id will be used in all subsequent calls to
- * this specific object.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpbp_create(struct fsl_mc_io *mc_io,
-		uint16_t dprc_token,
-		uint32_t cmd_flags,
-		const struct dpbp_cfg *cfg,
-		uint32_t *obj_id)
-{
-	struct mc_command cmd = { 0 };
-	int err;
-
-	(void)(cfg); /* unused */
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
-					  cmd_flags, dprc_token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dpbp_destroy() - Destroy the DPBP object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @obj_id:	ID of DPBP object
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpbp_destroy(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 uint32_t obj_id)
-{
-	struct dpbp_cmd_destroy *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
-					  cmd_flags, dprc_token);
-
-	cmd_params = (struct dpbp_cmd_destroy *)cmd.params;
-	cmd_params->object_id = cpu_to_le32(obj_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpbp_enable() - Enable the DPBP.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -193,40 +121,6 @@ int dpbp_disable(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpbp_is_enabled() - Check if the DPBP is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPBP object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpbp_is_enabled(struct fsl_mc_io *mc_io,
-		    uint32_t cmd_flags,
-		    uint16_t token,
-		    int *en)
-{
-	struct dpbp_rsp_is_enabled *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpbp_rsp_is_enabled *)cmd.params;
-	*en = rsp_params->enabled & DPBP_ENABLE;
-
-	return 0;
-}
-
 /**
  * dpbp_reset() - Reset the DPBP, returns the object to initial state.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -284,41 +178,6 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
 	return 0;
 }
 
-/**
- * dpbp_get_api_version - Get Data Path Buffer Pool API version
- * @mc_io:	Pointer to Mc portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of Buffer Pool API
- * @minor_ver:	Minor version of Buffer Pool API
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpbp_get_api_version(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t *major_ver,
-			 uint16_t *minor_ver)
-{
-	struct dpbp_rsp_get_api_version *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION,
-					  cmd_flags, 0);
-
-	/* send command to mc */
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpbp_rsp_get_api_version *)cmd.params;
-	*major_ver = le16_to_cpu(rsp_params->major);
-	*minor_ver = le16_to_cpu(rsp_params->minor);
-
-	return 0;
-}
-
 /**
  * dpbp_get_num_free_bufs() - Get number of free buffers in the buffer pool
  * @mc_io:  Pointer to MC portal's I/O object
diff --git a/drivers/bus/fslmc/mc/dpci.c b/drivers/bus/fslmc/mc/dpci.c
index 7e31327afa..cd558d507c 100644
--- a/drivers/bus/fslmc/mc/dpci.c
+++ b/drivers/bus/fslmc/mc/dpci.c
@@ -53,116 +53,6 @@ int dpci_open(struct fsl_mc_io *mc_io,
 	return 0;
 }
 
-/**
- * dpci_close() - Close the control session of the object
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCI object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpci_close(struct fsl_mc_io *mc_io,
-	       uint32_t cmd_flags,
-	       uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_CLOSE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpci_create() - Create the DPCI object.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id:	Returned object id
- *
- * Create the DPCI object, allocate required resources and perform required
- * initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpci_create(struct fsl_mc_io *mc_io,
-		uint16_t dprc_token,
-		uint32_t cmd_flags,
-		const struct dpci_cfg *cfg,
-		uint32_t *obj_id)
-{
-	struct dpci_cmd_create *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_CREATE,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpci_cmd_create *)cmd.params;
-	cmd_params->num_of_priorities = cfg->num_of_priorities;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dpci_destroy() - Destroy the DPCI object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id:	The object id; it must be a valid id within the container that
- * created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpci_destroy(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 uint32_t object_id)
-{
-	struct dpci_cmd_destroy *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_DESTROY,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpci_cmd_destroy *)cmd.params;
-	cmd_params->dpci_id = cpu_to_le32(object_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpci_enable() - Enable the DPCI, allow sending and receiving frames.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -186,86 +76,6 @@ int dpci_enable(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpci_disable() - Disable the DPCI, stop sending and receiving frames.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCI object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpci_disable(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_DISABLE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpci_is_enabled() - Check if the DPCI is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCI object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpci_is_enabled(struct fsl_mc_io *mc_io,
-		    uint32_t cmd_flags,
-		    uint16_t token,
-		    int *en)
-{
-	struct dpci_rsp_is_enabled *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_IS_ENABLED, cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpci_rsp_is_enabled *)cmd.params;
-	*en = dpci_get_field(rsp_params->en, ENABLE);
-
-	return 0;
-}
-
-/**
- * dpci_reset() - Reset the DPCI, returns the object to initial state.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCI object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpci_reset(struct fsl_mc_io *mc_io,
-	       uint32_t cmd_flags,
-	       uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_RESET,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpci_get_attributes() - Retrieve DPCI attributes.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -431,133 +241,3 @@ int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
 
 	return 0;
 }
-
-/**
- * dpci_get_api_version() - Get communication interface API version
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of data path communication interface API
- * @minor_ver:	Minor version of data path communication interface API
- *
- * Return:  '0' on Success; Error code otherwise.
- */
-int dpci_get_api_version(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t *major_ver,
-			 uint16_t *minor_ver)
-{
-	struct dpci_rsp_get_api_version *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_API_VERSION,
-					cmd_flags,
-					0);
-
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	rsp_params = (struct dpci_rsp_get_api_version *)cmd.params;
-	*major_ver = le16_to_cpu(rsp_params->major);
-	*minor_ver = le16_to_cpu(rsp_params->minor);
-
-	return 0;
-}
-
-/**
- * dpci_set_opr() - Set Order Restoration configuration.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCI object
- * @index:	The queue index
- * @options:	Configuration mode options
- *		can be OPR_OPT_CREATE or OPR_OPT_RETIRE
- * @cfg:	Configuration options for the OPR
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpci_set_opr(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token,
-		 uint8_t index,
-		 uint8_t options,
-		 struct opr_cfg *cfg)
-{
-	struct dpci_cmd_set_opr *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_OPR,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpci_cmd_set_opr *)cmd.params;
-	cmd_params->index = index;
-	cmd_params->options = options;
-	cmd_params->oloe = cfg->oloe;
-	cmd_params->oeane = cfg->oeane;
-	cmd_params->olws = cfg->olws;
-	cmd_params->oa = cfg->oa;
-	cmd_params->oprrws = cfg->oprrws;
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpci_get_opr() - Retrieve Order Restoration config and query.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCI object
- * @index:	The queue index
- * @cfg:	Returned OPR configuration
- * @qry:	Returned OPR query
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpci_get_opr(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token,
-		 uint8_t index,
-		 struct opr_cfg *cfg,
-		 struct opr_qry *qry)
-{
-	struct dpci_rsp_get_opr *rsp_params;
-	struct dpci_cmd_get_opr *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_OPR,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpci_cmd_get_opr *)cmd.params;
-	cmd_params->index = index;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpci_rsp_get_opr *)cmd.params;
-	cfg->oloe = rsp_params->oloe;
-	cfg->oeane = rsp_params->oeane;
-	cfg->olws = rsp_params->olws;
-	cfg->oa = rsp_params->oa;
-	cfg->oprrws = rsp_params->oprrws;
-	qry->rip = dpci_get_field(rsp_params->flags, RIP);
-	qry->enable = dpci_get_field(rsp_params->flags, OPR_ENABLE);
-	qry->nesn = le16_to_cpu(rsp_params->nesn);
-	qry->ndsn = le16_to_cpu(rsp_params->ndsn);
-	qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
-	qry->tseq_nlis = dpci_get_field(rsp_params->tseq_nlis, TSEQ_NLIS);
-	qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
-	qry->hseq_nlis = dpci_get_field(rsp_params->hseq_nlis, HSEQ_NLIS);
-	qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
-	qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
-	qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
-	qry->opr_id = le16_to_cpu(rsp_params->opr_id);
-
-	return 0;
-}
diff --git a/drivers/bus/fslmc/mc/dpcon.c b/drivers/bus/fslmc/mc/dpcon.c
index 2c46638dcb..e9bf364507 100644
--- a/drivers/bus/fslmc/mc/dpcon.c
+++ b/drivers/bus/fslmc/mc/dpcon.c
@@ -53,212 +53,6 @@ int dpcon_open(struct fsl_mc_io *mc_io,
 	return 0;
 }
 
-/**
- * dpcon_close() - Close the control session of the object
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCON object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpcon_close(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpcon_create() - Create the DPCON object.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id:	Returned object id; use in subsequent API calls
- *
- * Create the DPCON object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * This function accepts an authentication token of a parent
- * container that this object should be assigned to and returns
- * an object id. This object_id will be used in all subsequent calls to
- * this specific object.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpcon_create(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 const struct dpcon_cfg *cfg,
-		 uint32_t *obj_id)
-{
-	struct dpcon_cmd_create *dpcon_cmd;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE,
-					  cmd_flags,
-					  dprc_token);
-	dpcon_cmd = (struct dpcon_cmd_create *)cmd.params;
-	dpcon_cmd->num_priorities = cfg->num_priorities;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dpcon_destroy() - Destroy the DPCON object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @obj_id:	ID of DPCON object
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpcon_destroy(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  uint32_t obj_id)
-{
-	struct dpcon_cmd_destroy *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpcon_cmd_destroy *)cmd.params;
-	cmd_params->object_id = cpu_to_le32(obj_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpcon_enable() - Enable the DPCON
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCON object
- *
- * Return:	'0' on Success; Error code otherwise
- */
-int dpcon_enable(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpcon_disable() - Disable the DPCON
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCON object
- *
- * Return:	'0' on Success; Error code otherwise
- */
-int dpcon_disable(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpcon_is_enabled() -	Check if the DPCON is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCON object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpcon_is_enabled(struct fsl_mc_io *mc_io,
-		     uint32_t cmd_flags,
-		     uint16_t token,
-		     int *en)
-{
-	struct dpcon_rsp_is_enabled *dpcon_rsp;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	dpcon_rsp = (struct dpcon_rsp_is_enabled *)cmd.params;
-	*en = dpcon_rsp->enabled & DPCON_ENABLE;
-
-	return 0;
-}
-
-/**
- * dpcon_reset() - Reset the DPCON, returns the object to initial state.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPCON object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpcon_reset(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
-					  cmd_flags, token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpcon_get_attributes() - Retrieve DPCON attributes.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -295,38 +89,3 @@ int dpcon_get_attributes(struct fsl_mc_io *mc_io,
 
 	return 0;
 }
-
-/**
- * dpcon_get_api_version - Get Data Path Concentrator API version
- * @mc_io:	Pointer to MC portal's DPCON object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of DPCON API
- * @minor_ver:	Minor version of DPCON API
- *
- * Return:	'0' on Success; Error code otherwise
- */
-int dpcon_get_api_version(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t *major_ver,
-			  uint16_t *minor_ver)
-{
-	struct dpcon_rsp_get_api_version *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_API_VERSION,
-					  cmd_flags, 0);
-
-	/* send command to mc */
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpcon_rsp_get_api_version *)cmd.params;
-	*major_ver = le16_to_cpu(rsp_params->major);
-	*minor_ver = le16_to_cpu(rsp_params->minor);
-
-	return 0;
-}
diff --git a/drivers/bus/fslmc/mc/dpdmai.c b/drivers/bus/fslmc/mc/dpdmai.c
index dcb9d516a1..30640fd353 100644
--- a/drivers/bus/fslmc/mc/dpdmai.c
+++ b/drivers/bus/fslmc/mc/dpdmai.c
@@ -76,92 +76,6 @@ int dpdmai_close(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpdmai_create() - Create the DPDMAI object
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id:	Returned object id
- *
- * Create the DPDMAI object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmai_create(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  const struct dpdmai_cfg *cfg,
-		  uint32_t *obj_id)
-{
-	struct dpdmai_cmd_create *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpdmai_cmd_create *)cmd.params;
-	cmd_params->num_queues = cfg->num_queues;
-	cmd_params->priorities[0] = cfg->priorities[0];
-	cmd_params->priorities[1] = cfg->priorities[1];
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id:	The object id; it must be a valid id within the container that
- *		created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpdmai_destroy(struct fsl_mc_io *mc_io,
-		   uint16_t dprc_token,
-		   uint32_t cmd_flags,
-		   uint32_t object_id)
-{
-	struct dpdmai_cmd_destroy *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpdmai_cmd_destroy *)cmd.params;
-	cmd_params->dpdmai_id = cpu_to_le32(object_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -208,64 +122,6 @@ int dpdmai_disable(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMAI object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      int *en)
-{
-	struct dpdmai_rsp_is_enabled *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpdmai_rsp_is_enabled *)cmd.params;
-	*en = dpdmai_get_field(rsp_params->en, ENABLE);
-
-	return 0;
-}
-
-/**
- * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMAI object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmai_reset(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
  * @mc_io:	Pointer to MC portal's I/O object
diff --git a/drivers/bus/fslmc/mc/dpio.c b/drivers/bus/fslmc/mc/dpio.c
index a3382ed142..317924c856 100644
--- a/drivers/bus/fslmc/mc/dpio.c
+++ b/drivers/bus/fslmc/mc/dpio.c
@@ -76,95 +76,6 @@ int dpio_close(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpio_create() - Create the DPIO object.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id:	Returned object id
- *
- * Create the DPIO object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpio_create(struct fsl_mc_io *mc_io,
-		uint16_t dprc_token,
-		uint32_t cmd_flags,
-		const struct dpio_cfg *cfg,
-		uint32_t *obj_id)
-{
-	struct dpio_cmd_create *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPIO_CMDID_CREATE,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpio_cmd_create *)cmd.params;
-	cmd_params->num_priorities = cfg->num_priorities;
-	dpio_set_field(cmd_params->channel_mode,
-		       CHANNEL_MODE,
-		       cfg->channel_mode);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dpio_destroy() - Destroy the DPIO object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id:	The object id; it must be a valid id within the container that
- *		created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return:	'0' on Success; Error code otherwise
- */
-int dpio_destroy(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 uint32_t object_id)
-{
-	struct dpio_cmd_destroy *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPIO_CMDID_DESTROY,
-			cmd_flags,
-			dprc_token);
-
-	/* set object id to destroy */
-	cmd_params = (struct dpio_cmd_destroy *)cmd.params;
-	cmd_params->dpio_id = cpu_to_le32(object_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpio_enable() - Enable the DPIO, allow I/O portal operations.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -211,40 +122,6 @@ int dpio_disable(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpio_is_enabled() - Check if the DPIO is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPIO object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpio_is_enabled(struct fsl_mc_io *mc_io,
-		    uint32_t cmd_flags,
-		    uint16_t token,
-		    int *en)
-{
-	struct dpio_rsp_is_enabled *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPIO_CMDID_IS_ENABLED, cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpio_rsp_is_enabled *)cmd.params;
-	*en = dpio_get_field(rsp_params->en, ENABLE);
-
-	return 0;
-}
-
 /**
  * dpio_reset() - Reset the DPIO, returns the object to initial state.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -341,41 +218,6 @@ int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpio_get_stashing_destination() - Get the stashing destination..
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPIO object
- * @sdest:	Returns the stashing destination value
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
-				  uint32_t cmd_flags,
-				  uint16_t token,
-				  uint8_t *sdest)
-{
-	struct dpio_stashing_dest *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_STASHING_DEST,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpio_stashing_dest *)cmd.params;
-	*sdest = rsp_params->sdest;
-
-	return 0;
-}
-
 /**
  * dpio_add_static_dequeue_channel() - Add a static dequeue channel.
  * @mc_io:		Pointer to MC portal's I/O object
@@ -444,36 +286,3 @@ int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
 	/* send command to mc*/
 	return mc_send_command(mc_io, &cmd);
 }
-
-/**
- * dpio_get_api_version() - Get Data Path I/O API version
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of data path i/o API
- * @minor_ver:	Minor version of data path i/o API
- *
- * Return:  '0' on Success; Error code otherwise.
- */
-int dpio_get_api_version(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t *major_ver,
-			 uint16_t *minor_ver)
-{
-	struct dpio_rsp_get_api_version *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_API_VERSION,
-					cmd_flags,
-					0);
-
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	rsp_params = (struct dpio_rsp_get_api_version *)cmd.params;
-	*major_ver = le16_to_cpu(rsp_params->major);
-	*minor_ver = le16_to_cpu(rsp_params->minor);
-
-	return 0;
-}
diff --git a/drivers/bus/fslmc/mc/fsl_dpbp.h b/drivers/bus/fslmc/mc/fsl_dpbp.h
index 8a021f55f1..f50131ba45 100644
--- a/drivers/bus/fslmc/mc/fsl_dpbp.h
+++ b/drivers/bus/fslmc/mc/fsl_dpbp.h
@@ -34,17 +34,6 @@ struct dpbp_cfg {
 	uint32_t options;
 };
 
-int dpbp_create(struct fsl_mc_io *mc_io,
-		uint16_t dprc_token,
-		uint32_t cmd_flags,
-		const struct dpbp_cfg *cfg,
-		uint32_t *obj_id);
-
-int dpbp_destroy(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 uint32_t obj_id);
-
 __rte_internal
 int dpbp_enable(struct fsl_mc_io *mc_io,
 		uint32_t cmd_flags,
@@ -55,11 +44,6 @@ int dpbp_disable(struct fsl_mc_io *mc_io,
 		 uint32_t cmd_flags,
 		 uint16_t token);
 
-int dpbp_is_enabled(struct fsl_mc_io *mc_io,
-		    uint32_t cmd_flags,
-		    uint16_t token,
-		    int *en);
-
 __rte_internal
 int dpbp_reset(struct fsl_mc_io *mc_io,
 	       uint32_t cmd_flags,
@@ -90,10 +74,6 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
  * BPSCN write will attempt to allocate into a cache (coherent write)
  */
 #define DPBP_NOTIF_OPT_COHERENT_WRITE	0x00000001
-int dpbp_get_api_version(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t *major_ver,
-			 uint16_t *minor_ver);
 
 __rte_internal
 int dpbp_get_num_free_bufs(struct fsl_mc_io *mc_io,
diff --git a/drivers/bus/fslmc/mc/fsl_dpci.h b/drivers/bus/fslmc/mc/fsl_dpci.h
index 81fd3438aa..9fdc3a8ea5 100644
--- a/drivers/bus/fslmc/mc/fsl_dpci.h
+++ b/drivers/bus/fslmc/mc/fsl_dpci.h
@@ -37,10 +37,6 @@ int dpci_open(struct fsl_mc_io *mc_io,
 	      int dpci_id,
 	      uint16_t *token);
 
-int dpci_close(struct fsl_mc_io *mc_io,
-	       uint32_t cmd_flags,
-	       uint16_t token);
-
 /**
  * Enable the Order Restoration support
  */
@@ -66,34 +62,10 @@ struct dpci_cfg {
 	uint8_t num_of_priorities;
 };
 
-int dpci_create(struct fsl_mc_io *mc_io,
-		uint16_t dprc_token,
-		uint32_t cmd_flags,
-		const struct dpci_cfg *cfg,
-		uint32_t *obj_id);
-
-int dpci_destroy(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 uint32_t object_id);
-
 int dpci_enable(struct fsl_mc_io *mc_io,
 		uint32_t cmd_flags,
 		uint16_t token);
 
-int dpci_disable(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token);
-
-int dpci_is_enabled(struct fsl_mc_io *mc_io,
-		    uint32_t cmd_flags,
-		    uint16_t token,
-		    int *en);
-
-int dpci_reset(struct fsl_mc_io *mc_io,
-	       uint32_t cmd_flags,
-	       uint16_t token);
-
 /**
  * struct dpci_attr - Structure representing DPCI attributes
  * @id:			DPCI object ID
@@ -224,25 +196,4 @@ int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
 		      uint8_t priority,
 		      struct dpci_tx_queue_attr *attr);
 
-int dpci_get_api_version(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t *major_ver,
-			 uint16_t *minor_ver);
-
-__rte_internal
-int dpci_set_opr(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token,
-		 uint8_t index,
-		 uint8_t options,
-		 struct opr_cfg *cfg);
-
-__rte_internal
-int dpci_get_opr(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token,
-		 uint8_t index,
-		 struct opr_cfg *cfg,
-		 struct opr_qry *qry);
-
 #endif /* __FSL_DPCI_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpcon.h b/drivers/bus/fslmc/mc/fsl_dpcon.h
index 7caa6c68a1..0b3add5d52 100644
--- a/drivers/bus/fslmc/mc/fsl_dpcon.h
+++ b/drivers/bus/fslmc/mc/fsl_dpcon.h
@@ -26,10 +26,6 @@ int dpcon_open(struct fsl_mc_io *mc_io,
 	       int dpcon_id,
 	       uint16_t *token);
 
-int dpcon_close(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token);
-
 /**
  * struct dpcon_cfg - Structure representing DPCON configuration
  * @num_priorities: Number of priorities for the DPCON channel (1-8)
@@ -38,34 +34,6 @@ struct dpcon_cfg {
 	uint8_t num_priorities;
 };
 
-int dpcon_create(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 const struct dpcon_cfg *cfg,
-		 uint32_t *obj_id);
-
-int dpcon_destroy(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  uint32_t obj_id);
-
-int dpcon_enable(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token);
-
-int dpcon_disable(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token);
-
-int dpcon_is_enabled(struct fsl_mc_io *mc_io,
-		     uint32_t cmd_flags,
-		     uint16_t token,
-		     int *en);
-
-int dpcon_reset(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token);
-
 /**
  * struct dpcon_attr - Structure representing DPCON attributes
  * @id:			DPCON object ID
@@ -84,9 +52,4 @@ int dpcon_get_attributes(struct fsl_mc_io *mc_io,
 			 uint16_t token,
 			 struct dpcon_attr *attr);
 
-int dpcon_get_api_version(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t *major_ver,
-			  uint16_t *minor_ver);
-
 #endif /* __FSL_DPCON_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpdmai.h b/drivers/bus/fslmc/mc/fsl_dpdmai.h
index 19328c00a0..eb1d3c1658 100644
--- a/drivers/bus/fslmc/mc/fsl_dpdmai.h
+++ b/drivers/bus/fslmc/mc/fsl_dpdmai.h
@@ -47,17 +47,6 @@ struct dpdmai_cfg {
 	uint8_t priorities[DPDMAI_PRIO_NUM];
 };
 
-int dpdmai_create(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  const struct dpdmai_cfg *cfg,
-		  uint32_t *obj_id);
-
-int dpdmai_destroy(struct fsl_mc_io *mc_io,
-		   uint16_t dprc_token,
-		   uint32_t cmd_flags,
-		   uint32_t object_id);
-
 __rte_internal
 int dpdmai_enable(struct fsl_mc_io *mc_io,
 		  uint32_t cmd_flags,
@@ -68,15 +57,6 @@ int dpdmai_disable(struct fsl_mc_io *mc_io,
 		   uint32_t cmd_flags,
 		   uint16_t token);
 
-int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      int *en);
-
-int dpdmai_reset(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token);
-
 /**
  * struct dpdmai_attr - Structure representing DPDMAI attributes
  * @id: DPDMAI object ID
diff --git a/drivers/bus/fslmc/mc/fsl_dpio.h b/drivers/bus/fslmc/mc/fsl_dpio.h
index c2db76bdf8..0ddcdb41ec 100644
--- a/drivers/bus/fslmc/mc/fsl_dpio.h
+++ b/drivers/bus/fslmc/mc/fsl_dpio.h
@@ -50,17 +50,6 @@ struct dpio_cfg {
 };
 
 
-int dpio_create(struct fsl_mc_io *mc_io,
-		uint16_t dprc_token,
-		uint32_t cmd_flags,
-		const struct dpio_cfg *cfg,
-		uint32_t *obj_id);
-
-int dpio_destroy(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 uint32_t object_id);
-
 __rte_internal
 int dpio_enable(struct fsl_mc_io *mc_io,
 		uint32_t cmd_flags,
@@ -71,11 +60,6 @@ int dpio_disable(struct fsl_mc_io *mc_io,
 		 uint32_t cmd_flags,
 		 uint16_t token);
 
-int dpio_is_enabled(struct fsl_mc_io *mc_io,
-		    uint32_t cmd_flags,
-		    uint16_t token,
-		    int *en);
-
 __rte_internal
 int dpio_reset(struct fsl_mc_io *mc_io,
 	       uint32_t cmd_flags,
@@ -87,11 +71,6 @@ int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
 				  uint16_t token,
 				  uint8_t sdest);
 
-int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
-				  uint32_t cmd_flags,
-				  uint16_t token,
-				  uint8_t *sdest);
-
 __rte_internal
 int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
 				    uint32_t cmd_flags,
@@ -135,9 +114,4 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io,
 			uint16_t token,
 			struct dpio_attr *attr);
 
-int dpio_get_api_version(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t *major_ver,
-			 uint16_t *minor_ver);
-
 #endif /* __FSL_DPIO_H */
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
index d9619848d8..06b3e81f26 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
@@ -109,13 +109,6 @@ void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp)
 	}
 }
 
-int dpaa2_dpbp_supported(void)
-{
-	if (TAILQ_EMPTY(&dpbp_dev_list))
-		return -1;
-	return 0;
-}
-
 static struct rte_dpaa2_object rte_dpaa2_dpbp_obj = {
 	.dev_type = DPAA2_BPOOL,
 	.create = dpaa2_create_dpbp_device,
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index ac24f01451..b72017bd32 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -454,9 +454,6 @@ struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void);
 __rte_internal
 void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp);
 
-__rte_internal
-int dpaa2_dpbp_supported(void);
-
 __rte_internal
 struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void);
 
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h
index 54096e8774..12beb148fb 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h
@@ -36,6 +36,4 @@ int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid,
 __rte_internal
 uint32_t qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
 
-uint32_t qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
-
 #endif /* !_FSL_QBMAN_DEBUG_H */
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
index eb68c9cab5..b24c809fa1 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
@@ -50,14 +50,6 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
  */
 int qbman_swp_update(struct qbman_swp *p, int stash_off);
 
-/**
- * qbman_swp_finish() - Create and destroy a functional object representing
- * the given QBMan portal descriptor.
- * @p: the qbman_swp object to be destroyed.
- *
- */
-void qbman_swp_finish(struct qbman_swp *p);
-
 /**
  * qbman_swp_invalidate() - Invalidate the cache enabled area of the QBMan
  * portal. This is required to be called if a portal moved to another core
@@ -67,14 +59,6 @@ void qbman_swp_finish(struct qbman_swp *p);
  */
 void qbman_swp_invalidate(struct qbman_swp *p);
 
-/**
- * qbman_swp_get_desc() - Get the descriptor of the given portal object.
- * @p: the given portal object.
- *
- * Return the descriptor for this portal.
- */
-const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p);
-
 	/**************/
 	/* Interrupts */
 	/**************/
@@ -92,32 +76,6 @@ const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p);
 /* Volatile dequeue command interrupt */
 #define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020)
 
-/**
- * qbman_swp_interrupt_get_vanish() - Get the data in software portal
- * interrupt status disable register.
- * @p: the given software portal object.
- *
- * Return the settings in SWP_ISDR register.
- */
-uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p);
-
-/**
- * qbman_swp_interrupt_set_vanish() - Set the data in software portal
- * interrupt status disable register.
- * @p: the given software portal object.
- * @mask: The value to set in SWP_IDSR register.
- */
-void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask);
-
-/**
- * qbman_swp_interrupt_read_status() - Get the data in software portal
- * interrupt status register.
- * @p: the given software portal object.
- *
- * Return the settings in SWP_ISR register.
- */
-uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
-
 /**
  * qbman_swp_interrupt_clear_status() - Set the data in software portal
  * interrupt status register.
@@ -127,13 +85,6 @@ uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
 __rte_internal
 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask);
 
-/**
- * qbman_swp_dqrr_thrshld_read_status() - Get the data in software portal
- * DQRR interrupt threshold register.
- * @p: the given software portal object.
- */
-uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p);
-
 /**
  * qbman_swp_dqrr_thrshld_write() - Set the data in software portal
  * DQRR interrupt threshold register.
@@ -142,13 +93,6 @@ uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p);
  */
 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask);
 
-/**
- * qbman_swp_intr_timeout_read_status() - Get the data in software portal
- * Interrupt Time-Out period register.
- * @p: the given software portal object.
- */
-uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p);
-
 /**
  * qbman_swp_intr_timeout_write() - Set the data in software portal
  * Interrupt Time-Out period register.
@@ -157,15 +101,6 @@ uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p);
  */
 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask);
 
-/**
- * qbman_swp_interrupt_get_trigger() - Get the data in software portal
- * interrupt enable register.
- * @p: the given software portal object.
- *
- * Return the settings in SWP_IER register.
- */
-uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
-
 /**
  * qbman_swp_interrupt_set_trigger() - Set the data in software portal
  * interrupt enable register.
@@ -174,15 +109,6 @@ uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
  */
 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask);
 
-/**
- * qbman_swp_interrupt_get_inhibit() - Get the data in software portal
- * interrupt inhibit register.
- * @p: the given software portal object.
- *
- * Return the settings in SWP_IIR register.
- */
-int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
-
 /**
  * qbman_swp_interrupt_set_inhibit() - Set the data in software portal
  * interrupt inhibit register.
@@ -268,21 +194,6 @@ int qbman_swp_dequeue_get_timeout(struct qbman_swp *s, unsigned int *timeout);
 /* Push-mode dequeuing */
 /* ------------------- */
 
-/* The user of a portal can enable and disable push-mode dequeuing of up to 16
- * channels independently. It does not specify this toggling by channel IDs, but
- * rather by specifying the index (from 0 to 15) that has been mapped to the
- * desired channel.
- */
-
-/**
- * qbman_swp_push_get() - Get the push dequeue setup.
- * @s: the software portal object.
- * @channel_idx: the channel index to query.
- * @enabled: returned boolean to show whether the push dequeue is enabled for
- * the given channel.
- */
-void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled);
-
 /**
  * qbman_swp_push_set() - Enable or disable push dequeue.
  * @s: the software portal object.
@@ -363,17 +274,6 @@ void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
 __rte_internal
 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
 				   uint8_t numframes);
-/**
- * qbman_pull_desc_set_token() - Set dequeue token for pull command
- * @d: the dequeue descriptor
- * @token: the token to be set
- *
- * token is the value that shows up in the dequeue response that can be used to
- * detect when the results have been published. The easiest technique is to zero
- * result "storage" before issuing a dequeue, and use any non-zero 'token' value
- */
-void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token);
-
 /* Exactly one of the following descriptor "actions" should be set. (Calling any
  * one of these will replace the effect of any prior call to one of these.)
  * - pull dequeue from the given frame queue (FQ)
@@ -387,30 +287,6 @@ void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token);
 __rte_internal
 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid);
 
-/**
- * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues.
- * @wqid: composed of channel id and wqid within the channel.
- * @dct: the dequeue command type.
- */
-void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
-			    enum qbman_pull_type_e dct);
-
-/* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
- * dequeues.
- * @chid: the channel id to be dequeued.
- * @dct: the dequeue command type.
- */
-void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
-				 enum qbman_pull_type_e dct);
-
-/**
- * qbman_pull_desc_set_rad() - Decide whether reschedule the fq after dequeue
- *
- * @rad: 1 = Reschedule the FQ after dequeue.
- *	 0 = Allow the FQ to remain active after dequeue.
- */
-void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad);
-
 /**
  * qbman_swp_pull() - Issue the pull dequeue command
  * @s: the software portal object.
@@ -471,17 +347,6 @@ void qbman_swp_dqrr_idx_consume(struct qbman_swp *s, uint8_t dqrr_index);
 __rte_internal
 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr);
 
-/**
- * qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the
- * given portal
- * @s: the given portal.
- * @idx: the dqrr index.
- *
- * Return dqrr entry object.
- */
-__rte_internal
-struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx);
-
 /* ------------------------------------------------- */
 /* Polling user-provided storage for dequeue results */
 /* ------------------------------------------------- */
@@ -549,78 +414,6 @@ static inline int qbman_result_is_SCN(const struct qbman_result *dq)
 	return !qbman_result_is_DQ(dq);
 }
 
-/* Recognise different notification types, only required if the user allows for
- * these to occur, and cares about them when they do.
- */
-
-/**
- * qbman_result_is_FQDAN() - Check for FQ Data Availability
- * @dq: the qbman_result object.
- *
- * Return 1 if this is FQDAN.
- */
-int qbman_result_is_FQDAN(const struct qbman_result *dq);
-
-/**
- * qbman_result_is_CDAN() - Check for Channel Data Availability
- * @dq: the qbman_result object to check.
- *
- * Return 1 if this is CDAN.
- */
-int qbman_result_is_CDAN(const struct qbman_result *dq);
-
-/**
- * qbman_result_is_CSCN() - Check for Congestion State Change
- * @dq: the qbman_result object to check.
- *
- * Return 1 if this is CSCN.
- */
-int qbman_result_is_CSCN(const struct qbman_result *dq);
-
-/**
- * qbman_result_is_BPSCN() - Check for Buffer Pool State Change.
- * @dq: the qbman_result object to check.
- *
- * Return 1 if this is BPSCN.
- */
-int qbman_result_is_BPSCN(const struct qbman_result *dq);
-
-/**
- * qbman_result_is_CGCU() - Check for Congestion Group Count Update.
- * @dq: the qbman_result object to check.
- *
- * Return 1 if this is CGCU.
- */
-int qbman_result_is_CGCU(const struct qbman_result *dq);
-
-/* Frame queue state change notifications; (FQDAN in theory counts too as it
- * leaves a FQ parked, but it is primarily a data availability notification)
- */
-
-/**
- * qbman_result_is_FQRN() - Check for FQ Retirement Notification.
- * @dq: the qbman_result object to check.
- *
- * Return 1 if this is FQRN.
- */
-int qbman_result_is_FQRN(const struct qbman_result *dq);
-
-/**
- * qbman_result_is_FQRNI() - Check for FQ Retirement Immediate
- * @dq: the qbman_result object to check.
- *
- * Return 1 if this is FQRNI.
- */
-int qbman_result_is_FQRNI(const struct qbman_result *dq);
-
-/**
- * qbman_result_is_FQPN() - Check for FQ Park Notification
- * @dq: the qbman_result object to check.
- *
- * Return 1 if this is FQPN.
- */
-int qbman_result_is_FQPN(const struct qbman_result *dq);
-
 /* Parsing frame dequeue results (qbman_result_is_DQ() must be TRUE)
  */
 /* FQ empty */
@@ -695,30 +488,6 @@ uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);
 __rte_internal
 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq);
 
-/**
- * qbman_result_DQ_fqid() - Get the fqid in dequeue response
- * @dq: the dequeue result.
- *
- * Return fqid.
- */
-uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq);
-
-/**
- * qbman_result_DQ_byte_count() - Get the byte count in dequeue response
- * @dq: the dequeue result.
- *
- * Return the byte count remaining in the FQ.
- */
-uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq);
-
-/**
- * qbman_result_DQ_frame_count - Get the frame count in dequeue response
- * @dq: the dequeue result.
- *
- * Return the frame count remaining in the FQ.
- */
-uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq);
-
 /**
  * qbman_result_DQ_fqd_ctx() - Get the frame queue context in dequeue response
  * @dq: the dequeue result.
@@ -780,66 +549,6 @@ uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn);
 /* Get the CGID from the CSCN */
 #define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
 
-/**
- * qbman_result_bpscn_bpid() - Get the bpid from BPSCN
- * @scn: the state change notification.
- *
- * Return the buffer pool id.
- */
-uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn);
-
-/**
- * qbman_result_bpscn_has_free_bufs() - Check whether there are free
- * buffers in the pool from BPSCN.
- * @scn: the state change notification.
- *
- * Return the number of free buffers.
- */
-int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn);
-
-/**
- * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the
- * buffer pool is depleted.
- * @scn: the state change notification.
- *
- * Return the status of buffer pool depletion.
- */
-int qbman_result_bpscn_is_depleted(const struct qbman_result *scn);
-
-/**
- * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer
- * pool is surplus or not.
- * @scn: the state change notification.
- *
- * Return the status of buffer pool surplus.
- */
-int qbman_result_bpscn_is_surplus(const struct qbman_result *scn);
-
-/**
- * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message
- * @scn: the state change notification.
- *
- * Return the BPSCN context.
- */
-uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn);
-
-/* Parsing CGCU */
-/**
- * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid
- * @scn: the state change notification.
- *
- * Return the CGCU resource id.
- */
-uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn);
-
-/**
- * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU
- * @scn: the state change notification.
- *
- * Return instantaneous count in the CGCU notification.
- */
-uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
-
 	/************/
 	/* Enqueues */
 	/************/
@@ -916,25 +625,6 @@ __rte_internal
 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
 			   uint16_t opr_id, uint16_t seqnum, int incomplete);
 
-/**
- * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
- * without any enqueue
- * @d: the enqueue descriptor.
- * @opr_id: the order point record id.
- * @seqnum: the order restoration sequence number.
- */
-void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
-				uint16_t seqnum);
-
-/**
- * qbman_eq_desc_set_orp_nesn() -  advance NESN (Next Expected Sequence Number)
- * without any enqueue
- * @d: the enqueue descriptor.
- * @opr_id: the order point record id.
- * @seqnum: the order restoration sequence number.
- */
-void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
-				uint16_t seqnum);
 /**
  * qbman_eq_desc_set_response() - Set the enqueue response info.
  * @d: the enqueue descriptor
@@ -981,27 +671,6 @@ void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token);
 __rte_internal
 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid);
 
-/**
- * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command.
- * @d: the enqueue descriptor
- * @qdid: the id of the queuing destination to be enqueued.
- * @qd_bin: the queuing destination bin
- * @qd_prio: the queuing destination priority.
- */
-__rte_internal
-void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
-			  uint16_t qd_bin, uint8_t qd_prio);
-
-/**
- * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt
- * @d: the enqueue descriptor
- * @enable: boolean to enable/disable EQDI
- *
- * Determines whether or not the portal's EQDI interrupt source should be
- * asserted after the enqueue command is completed.
- */
-void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);
-
 /**
  * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command.
  * @d: the enqueue descriptor.
@@ -1060,19 +729,6 @@ uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp);
 __rte_internal
 uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp);
 
-/**
- * qbman_swp_enqueue() - Issue an enqueue command.
- * @s: the software portal used for enqueue.
- * @d: the enqueue descriptor.
- * @fd: the frame descriptor to be enqueued.
- *
- * Please note that 'fd' should only be NULL if the "action" of the
- * descriptor is "orp_hole" or "orp_nesn".
- *
- * Return 0 for a successful enqueue, -EBUSY if the EQCR is not ready.
- */
-int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
-		      const struct qbman_fd *fd);
 /**
  * qbman_swp_enqueue_multiple() - Enqueue multiple frames with same
 				  eq descriptor
@@ -1171,13 +827,6 @@ void qbman_release_desc_clear(struct qbman_release_desc *d);
 __rte_internal
 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid);
 
-/**
- * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
- * interrupt source should be asserted after the release command is completed.
- * @d: the qbman release descriptor.
- */
-void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
-
 /**
  * qbman_swp_release() - Issue a buffer release command.
  * @s: the software portal object.
@@ -1217,116 +866,4 @@ __rte_internal
 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
 		      unsigned int num_buffers);
 
-	/*****************/
-	/* FQ management */
-	/*****************/
-/**
- * qbman_swp_fq_schedule() - Move the fq to the scheduled state.
- * @s: the software portal object.
- * @fqid: the index of frame queue to be scheduled.
- *
- * There are a couple of different ways that a FQ can end up parked state,
- * This schedules it.
- *
- * Return 0 for success, or negative error code for failure.
- */
-int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid);
-
-/**
- * qbman_swp_fq_force() - Force the FQ to fully scheduled state.
- * @s: the software portal object.
- * @fqid: the index of frame queue to be forced.
- *
- * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
- * and thus be available for selection by any channel-dequeuing behaviour (push
- * or pull). If the FQ is subsequently "dequeued" from the channel and is still
- * empty at the time this happens, the resulting dq_entry will have no FD.
- * (qbman_result_DQ_fd() will return NULL.)
- *
- * Return 0 for success, or negative error code for failure.
- */
-int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);
-
-/**
- * These functions change the FQ flow-control stuff between XON/XOFF. (The
- * default is XON.) This setting doesn't affect enqueues to the FQ, just
- * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when
- * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is
- * changed to XOFF after it had already become truly-scheduled to a channel, and
- * a pull dequeue of that channel occurs that selects that FQ for dequeuing,
- * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will
- * return NULL.)
- */
-/**
- * qbman_swp_fq_xon() - XON the frame queue.
- * @s: the software portal object.
- * @fqid: the index of frame queue.
- *
- * Return 0 for success, or negative error code for failure.
- */
-int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid);
-/**
- * qbman_swp_fq_xoff() - XOFF the frame queue.
- * @s: the software portal object.
- * @fqid: the index of frame queue.
- *
- * Return 0 for success, or negative error code for failure.
- */
-int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid);
-
-	/**********************/
-	/* Channel management */
-	/**********************/
-
-/**
- * If the user has been allocated a channel object that is going to generate
- * CDANs to another channel, then these functions will be necessary.
- * CDAN-enabled channels only generate a single CDAN notification, after which
- * it they need to be reenabled before they'll generate another. (The idea is
- * that pull dequeuing will occur in reaction to the CDAN, followed by a
- * reenable step.) Each function generates a distinct command to hardware, so a
- * combination function is provided if the user wishes to modify the "context"
- * (which shows up in each CDAN message) each time they reenable, as a single
- * command to hardware.
- */
-
-/**
- * qbman_swp_CDAN_set_context() - Set CDAN context
- * @s: the software portal object.
- * @channelid: the channel index.
- * @ctx: the context to be set in CDAN.
- *
- * Return 0 for success, or negative error code for failure.
- */
-int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
-			       uint64_t ctx);
-
-/**
- * qbman_swp_CDAN_enable() - Enable CDAN for the channel.
- * @s: the software portal object.
- * @channelid: the index of the channel to generate CDAN.
- *
- * Return 0 for success, or negative error code for failure.
- */
-int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid);
-
-/**
- * qbman_swp_CDAN_disable() - disable CDAN for the channel.
- * @s: the software portal object.
- * @channelid: the index of the channel to generate CDAN.
- *
- * Return 0 for success, or negative error code for failure.
- */
-int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid);
-
-/**
- * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
- * @s: the software portal object.
- * @channelid: the index of the channel to generate CDAN.
- * @ctx: the context set in CDAN.
- *
- * Return 0 for success, or negative error code for failure.
- */
-int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
-				      uint64_t ctx);
 #endif /* !_FSL_QBMAN_PORTAL_H */
diff --git a/drivers/bus/fslmc/qbman/qbman_debug.c b/drivers/bus/fslmc/qbman/qbman_debug.c
index 34374ae4b6..2c6a7dcd16 100644
--- a/drivers/bus/fslmc/qbman/qbman_debug.c
+++ b/drivers/bus/fslmc/qbman/qbman_debug.c
@@ -59,8 +59,3 @@ uint32_t qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
 {
 	return (r->frm_cnt & 0x00FFFFFF);
 }
-
-uint32_t qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
-{
-	return r->byte_cnt;
-}
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
index 77c9d508c4..b8bcfb7189 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.c
+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -82,10 +82,6 @@ qbman_swp_enqueue_ring_mode_cinh_read_direct(struct qbman_swp *s,
 		const struct qbman_eq_desc *d,
 		const struct qbman_fd *fd);
 static int
-qbman_swp_enqueue_ring_mode_cinh_direct(struct qbman_swp *s,
-		const struct qbman_eq_desc *d,
-		const struct qbman_fd *fd);
-static int
 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
 		const struct qbman_eq_desc *d,
 		const struct qbman_fd *fd);
@@ -377,80 +373,30 @@ int qbman_swp_update(struct qbman_swp *p, int stash_off)
 	return 0;
 }
 
-void qbman_swp_finish(struct qbman_swp *p)
-{
-#ifdef QBMAN_CHECKING
-	QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
-#endif
-	qbman_swp_sys_finish(&p->sys);
-	portal_idx_map[p->desc.idx] = NULL;
-	free(p);
-}
-
-const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
-{
-	return &p->desc;
-}
-
 /**************/
 /* Interrupts */
 /**************/
 
-uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
-{
-	return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
-}
-
-void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
-{
-	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
-}
-
-uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
-{
-	return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
-}
-
 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
 {
 	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
 }
 
-uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
-{
-	return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
-}
-
 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
 {
 	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
 }
 
-uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
-{
-	return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
-}
-
 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
 {
 	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
 }
 
-uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
-{
-	return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
-}
-
 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
 {
 	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
 }
 
-int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
-{
-	return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
-}
-
 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
 {
 	qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
@@ -643,28 +589,6 @@ void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
 		d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
 }
 
-void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
-				uint16_t seqnum)
-{
-	d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
-	d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
-	d->eq.orpid = opr_id;
-	d->eq.seqnum = seqnum;
-	d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
-	d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
-}
-
-void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
-				uint16_t seqnum)
-{
-	d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
-	d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
-	d->eq.orpid = opr_id;
-	d->eq.seqnum = seqnum;
-	d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
-	d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
-}
-
 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
 				dma_addr_t storage_phys,
 				int stash)
@@ -684,23 +608,6 @@ void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
 	d->eq.tgtid = fqid;
 }
 
-void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
-			  uint16_t qd_bin, uint8_t qd_prio)
-{
-	d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
-	d->eq.tgtid = qdid;
-	d->eq.qdbin = qd_bin;
-	d->eq.qpri = qd_prio;
-}
-
-void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
-{
-	if (enable)
-		d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
-	else
-		d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
-}
-
 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
 			   uint8_t dqrr_idx, int park)
 {
@@ -789,13 +696,6 @@ static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
 	return 0;
 }
 
-static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
-					       const struct qbman_eq_desc *d,
-					       const struct qbman_fd *fd)
-{
-	return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
-}
-
 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
 					      const struct qbman_eq_desc *d,
 					      const struct qbman_fd *fd)
@@ -873,44 +773,6 @@ static int qbman_swp_enqueue_ring_mode_cinh_read_direct(
 	return 0;
 }
 
-static int qbman_swp_enqueue_ring_mode_cinh_direct(
-		struct qbman_swp *s,
-		const struct qbman_eq_desc *d,
-		const struct qbman_fd *fd)
-{
-	uint32_t *p;
-	const uint32_t *cl = qb_cl(d);
-	uint32_t eqcr_ci, full_mask, half_mask;
-
-	half_mask = (s->eqcr.pi_ci_mask>>1);
-	full_mask = s->eqcr.pi_ci_mask;
-	if (!s->eqcr.available) {
-		eqcr_ci = s->eqcr.ci;
-		s->eqcr.ci = qbman_cinh_read(&s->sys,
-				QBMAN_CINH_SWP_EQCR_CI) & full_mask;
-		s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
-				eqcr_ci, s->eqcr.ci);
-		if (!s->eqcr.available)
-			return -EBUSY;
-	}
-
-	p = qbman_cinh_write_start_wo_shadow(&s->sys,
-			QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
-	memcpy_byte_by_byte(&p[1], &cl[1], 28);
-	memcpy_byte_by_byte(&p[8], fd, sizeof(*fd));
-	lwsync();
-
-	/* Set the verb byte, have to substitute in the valid-bit */
-	p[0] = cl[0] | s->eqcr.pi_vb;
-	s->eqcr.pi++;
-	s->eqcr.pi &= full_mask;
-	s->eqcr.available--;
-	if (!(s->eqcr.pi & half_mask))
-		s->eqcr.pi_vb ^= QB_VALID_BIT;
-
-	return 0;
-}
-
 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
 						const struct qbman_eq_desc *d,
 						const struct qbman_fd *fd)
@@ -949,25 +811,6 @@ static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
 	return 0;
 }
 
-static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
-				       const struct qbman_eq_desc *d,
-				       const struct qbman_fd *fd)
-{
-	if (!s->stash_off)
-		return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
-	else
-		return qbman_swp_enqueue_ring_mode_cinh_direct(s, d, fd);
-}
-
-int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
-		      const struct qbman_fd *fd)
-{
-	if (s->sys.eqcr_mode == qman_eqcr_vb_array)
-		return qbman_swp_enqueue_array_mode(s, d, fd);
-	else    /* Use ring mode by default */
-		return qbman_swp_enqueue_ring_mode(s, d, fd);
-}
-
 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
 					     const struct qbman_eq_desc *d,
 					     const struct qbman_fd *fd,
@@ -1769,14 +1612,6 @@ int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
 /* Static (push) dequeue */
 /*************************/
 
-void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
-{
-	uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
-
-	QBMAN_BUG_ON(channel_idx > 15);
-	*enabled = src | (1 << channel_idx);
-}
-
 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
 {
 	uint16_t dqsrc;
@@ -1845,11 +1680,6 @@ void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
 	d->pull.numf = numframes - 1;
 }
 
-void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
-{
-	d->pull.tok = token;
-}
-
 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
 {
 	d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
@@ -1857,34 +1687,6 @@ void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
 	d->pull.dq_src = fqid;
 }
 
-void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
-			    enum qbman_pull_type_e dct)
-{
-	d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
-	d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
-	d->pull.dq_src = wqid;
-}
-
-void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
-				 enum qbman_pull_type_e dct)
-{
-	d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
-	d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
-	d->pull.dq_src = chid;
-}
-
-void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
-{
-	if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
-		if (rad)
-			d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
-		else
-			d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
-	} else {
-		printf("The RAD feature is not valid when RLS = 0\n");
-	}
-}
-
 static int qbman_swp_pull_direct(struct qbman_swp *s,
 				 struct qbman_pull_desc *d)
 {
@@ -2303,47 +2105,6 @@ int qbman_result_is_DQ(const struct qbman_result *dq)
 	return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
 }
 
-int qbman_result_is_FQDAN(const struct qbman_result *dq)
-{
-	return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
-}
-
-int qbman_result_is_CDAN(const struct qbman_result *dq)
-{
-	return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
-}
-
-int qbman_result_is_CSCN(const struct qbman_result *dq)
-{
-	return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
-		__qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
-}
-
-int qbman_result_is_BPSCN(const struct qbman_result *dq)
-{
-	return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
-}
-
-int qbman_result_is_CGCU(const struct qbman_result *dq)
-{
-	return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
-}
-
-int qbman_result_is_FQRN(const struct qbman_result *dq)
-{
-	return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
-}
-
-int qbman_result_is_FQRNI(const struct qbman_result *dq)
-{
-	return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
-}
-
-int qbman_result_is_FQPN(const struct qbman_result *dq)
-{
-	return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
-}
-
 /*********************************/
 /* Parsing frame dequeue results */
 /*********************************/
@@ -2365,21 +2126,6 @@ uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
 	return dq->dq.oprid;
 }
 
-uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
-{
-	return dq->dq.fqid;
-}
-
-uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
-{
-	return dq->dq.fq_byte_cnt;
-}
-
-uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
-{
-	return dq->dq.fq_frm_cnt;
-}
-
 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
 {
 	return dq->dq.fqd_ctx;
@@ -2408,47 +2154,6 @@ uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
 	return scn->scn.ctx;
 }
 
-/*****************/
-/* Parsing BPSCN */
-/*****************/
-uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
-{
-	return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
-}
-
-int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
-{
-	return !(int)(qbman_result_SCN_state(scn) & 0x1);
-}
-
-int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
-{
-	return (int)(qbman_result_SCN_state(scn) & 0x2);
-}
-
-int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
-{
-	return (int)(qbman_result_SCN_state(scn) & 0x4);
-}
-
-uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
-{
-	return qbman_result_SCN_ctx(scn);
-}
-
-/*****************/
-/* Parsing CGCU  */
-/*****************/
-uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
-{
-	return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
-}
-
-uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
-{
-	return qbman_result_SCN_ctx(scn);
-}
-
 /********************/
 /* Parsing EQ RESP  */
 /********************/
@@ -2492,14 +2197,6 @@ void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
 	d->br.bpid = bpid;
 }
 
-void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
-{
-	if (enable)
-		d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
-	else
-		d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
-}
-
 #define RAR_IDX(rar)     ((rar) & 0x7)
 #define RAR_VB(rar)      ((rar) & 0x80)
 #define RAR_SUCCESS(rar) ((rar) & 0x100)
@@ -2751,60 +2448,6 @@ struct qbman_alt_fq_state_rslt {
 
 #define ALT_FQ_FQID_MASK 0x00FFFFFF
 
-static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
-				  uint8_t alt_fq_verb)
-{
-	struct qbman_alt_fq_state_desc *p;
-	struct qbman_alt_fq_state_rslt *r;
-
-	/* Start the management command */
-	p = qbman_swp_mc_start(s);
-	if (!p)
-		return -EBUSY;
-
-	p->fqid = fqid & ALT_FQ_FQID_MASK;
-
-	/* Complete the management command */
-	r = qbman_swp_mc_complete(s, p, alt_fq_verb);
-	if (!r) {
-		pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
-		       alt_fq_verb);
-		return -EIO;
-	}
-
-	/* Decode the outcome */
-	QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
-
-	/* Determine success or failure */
-	if (r->rslt != QBMAN_MC_RSLT_OK) {
-		pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
-		       fqid, alt_fq_verb, r->rslt);
-		return -EIO;
-	}
-
-	return 0;
-}
-
-int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
-{
-	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
-}
-
-int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
-{
-	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
-}
-
-int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
-{
-	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
-}
-
-int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
-{
-	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
-}
-
 /**********************/
 /* Channel management */
 /**********************/
@@ -2834,87 +2477,7 @@ struct qbman_cdan_ctrl_rslt {
 #define CODE_CDAN_WE_EN    0x1
 #define CODE_CDAN_WE_CTX   0x4
 
-static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
-			      uint8_t we_mask, uint8_t cdan_en,
-			      uint64_t ctx)
-{
-	struct qbman_cdan_ctrl_desc *p;
-	struct qbman_cdan_ctrl_rslt *r;
-
-	/* Start the management command */
-	p = qbman_swp_mc_start(s);
-	if (!p)
-		return -EBUSY;
-
-	/* Encode the caller-provided attributes */
-	p->ch = channelid;
-	p->we = we_mask;
-	if (cdan_en)
-		p->ctrl = 1;
-	else
-		p->ctrl = 0;
-	p->cdan_ctx = ctx;
-
-	/* Complete the management command */
-	r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
-	if (!r) {
-		pr_err("qbman: wqchan config failed, no response\n");
-		return -EIO;
-	}
-
-	/* Decode the outcome */
-	QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
-		     != QBMAN_WQCHAN_CONFIGURE);
-
-	/* Determine success or failure */
-	if (r->rslt != QBMAN_MC_RSLT_OK) {
-		pr_err("CDAN cQID %d failed: code = 0x%02x\n",
-		       channelid, r->rslt);
-		return -EIO;
-	}
-
-	return 0;
-}
-
-int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
-			       uint64_t ctx)
-{
-	return qbman_swp_CDAN_set(s, channelid,
-				  CODE_CDAN_WE_CTX,
-				  0, ctx);
-}
-
-int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
-{
-	return qbman_swp_CDAN_set(s, channelid,
-				  CODE_CDAN_WE_EN,
-				  1, 0);
-}
-
-int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
-{
-	return qbman_swp_CDAN_set(s, channelid,
-				  CODE_CDAN_WE_EN,
-				  0, 0);
-}
-
-int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
-				      uint64_t ctx)
-{
-	return qbman_swp_CDAN_set(s, channelid,
-				  CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
-				  1, ctx);
-}
-
 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
 {
 	return QBMAN_IDX_FROM_DQRR(dqrr);
 }
-
-struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
-{
-	struct qbman_result *dq;
-
-	dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
-	return dq;
-}
diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h
index 37d45dffe5..f6ded1717e 100644
--- a/drivers/bus/fslmc/rte_fslmc.h
+++ b/drivers/bus/fslmc/rte_fslmc.h
@@ -170,16 +170,6 @@ struct rte_fslmc_bus {
 __rte_internal
 void rte_fslmc_driver_register(struct rte_dpaa2_driver *driver);
 
-/**
- * Unregister a DPAA2 driver.
- *
- * @param driver
- *   A pointer to a rte_dpaa2_driver structure describing the driver
- *   to be unregistered.
- */
-__rte_internal
-void rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver);
-
 /** Helper for DPAA2 device registration from driver (eth, crypto) instance */
 #define RTE_PMD_REGISTER_DPAA2(nm, dpaa2_drv) \
 RTE_INIT(dpaa2initfn_ ##nm) \
diff --git a/drivers/bus/fslmc/version.map b/drivers/bus/fslmc/version.map
index f44c1a7988..a95c0faa00 100644
--- a/drivers/bus/fslmc/version.map
+++ b/drivers/bus/fslmc/version.map
@@ -11,7 +11,6 @@ INTERNAL {
 	dpaa2_affine_qbman_swp;
 	dpaa2_alloc_dpbp_dev;
 	dpaa2_alloc_dq_storage;
-	dpaa2_dpbp_supported;
 	dpaa2_dqrr_size;
 	dpaa2_eqcr_size;
 	dpaa2_free_dpbp_dev;
@@ -28,8 +27,6 @@ INTERNAL {
 	dpbp_get_num_free_bufs;
 	dpbp_open;
 	dpbp_reset;
-	dpci_get_opr;
-	dpci_set_opr;
 	dpci_set_rx_queue;
 	dpcon_get_attributes;
 	dpcon_open;
@@ -61,12 +58,10 @@ INTERNAL {
 	qbman_eq_desc_set_fq;
 	qbman_eq_desc_set_no_orp;
 	qbman_eq_desc_set_orp;
-	qbman_eq_desc_set_qd;
 	qbman_eq_desc_set_response;
 	qbman_eq_desc_set_token;
 	qbman_fq_query_state;
 	qbman_fq_state_frame_count;
-	qbman_get_dqrr_from_idx;
 	qbman_get_dqrr_idx;
 	qbman_pull_desc_clear;
 	qbman_pull_desc_set_fq;
@@ -103,7 +98,6 @@ INTERNAL {
 	rte_dpaa2_intr_disable;
 	rte_dpaa2_intr_enable;
 	rte_fslmc_driver_register;
-	rte_fslmc_driver_unregister;
 	rte_fslmc_get_device_count;
 	rte_fslmc_object_register;
 	rte_global_active_dqs_list;
diff --git a/drivers/bus/ifpga/ifpga_common.c b/drivers/bus/ifpga/ifpga_common.c
index 78e2eaee4e..7281b169d0 100644
--- a/drivers/bus/ifpga/ifpga_common.c
+++ b/drivers/bus/ifpga/ifpga_common.c
@@ -52,29 +52,6 @@ int rte_ifpga_get_integer32_arg(const char *key __rte_unused,
 
 	return 0;
 }
-int ifpga_get_integer64_arg(const char *key __rte_unused,
-	const char *value, void *extra_args)
-{
-	if (!value || !extra_args)
-		return -EINVAL;
-
-	*(uint64_t *)extra_args = strtoull(value, NULL, 0);
-
-	return 0;
-}
-int ifpga_get_unsigned_long(const char *str, int base)
-{
-	unsigned long num;
-	char *end = NULL;
-
-	errno = 0;
-
-	num = strtoul(str, &end, base);
-	if ((str[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
-		return -1;
-
-	return num;
-}
 
 int ifpga_afu_id_cmp(const struct rte_afu_id *afu_id0,
 	const struct rte_afu_id *afu_id1)
diff --git a/drivers/bus/ifpga/ifpga_common.h b/drivers/bus/ifpga/ifpga_common.h
index f9254b9d5d..44381eb78d 100644
--- a/drivers/bus/ifpga/ifpga_common.h
+++ b/drivers/bus/ifpga/ifpga_common.h
@@ -9,9 +9,6 @@ int rte_ifpga_get_string_arg(const char *key __rte_unused,
 	const char *value, void *extra_args);
 int rte_ifpga_get_integer32_arg(const char *key __rte_unused,
 	const char *value, void *extra_args);
-int ifpga_get_integer64_arg(const char *key __rte_unused,
-	const char *value, void *extra_args);
-int ifpga_get_unsigned_long(const char *str, int base);
 int ifpga_afu_id_cmp(const struct rte_afu_id *afu_id0,
 	const struct rte_afu_id *afu_id1);
 
diff --git a/drivers/common/dpaax/dpaa_of.c b/drivers/common/dpaax/dpaa_of.c
index bb2c8fc66b..ad96eb0b3d 100644
--- a/drivers/common/dpaax/dpaa_of.c
+++ b/drivers/common/dpaax/dpaa_of.c
@@ -242,33 +242,6 @@ of_init_path(const char *dt_path)
 	return 0;
 }
 
-static void
-destroy_dir(struct dt_dir *d)
-{
-	struct dt_file *f, *tmpf;
-	struct dt_dir *dd, *tmpd;
-
-	list_for_each_entry_safe(f, tmpf, &d->files, node.list) {
-		list_del(&f->node.list);
-		free(f);
-	}
-	list_for_each_entry_safe(dd, tmpd, &d->subdirs, node.list) {
-		destroy_dir(dd);
-		list_del(&dd->node.list);
-		free(dd);
-	}
-}
-
-void
-of_finish(void)
-{
-	DPAAX_HWWARN(!alive, "Double-finish of device-tree driver!");
-
-	destroy_dir(&root_dir);
-	INIT_LIST_HEAD(&linear);
-	alive = 0;
-}
-
 static const struct dt_dir *
 next_linear(const struct dt_dir *f)
 {
diff --git a/drivers/common/dpaax/dpaa_of.h b/drivers/common/dpaax/dpaa_of.h
index aed6bf98b0..0ba3794e9b 100644
--- a/drivers/common/dpaax/dpaa_of.h
+++ b/drivers/common/dpaax/dpaa_of.h
@@ -161,11 +161,6 @@ bool of_device_is_compatible(const struct device_node *dev_node,
 __rte_internal
 int of_init_path(const char *dt_path);
 
-/* of_finish() allows a controlled tear-down of the device-tree layer, eg. if a
- * full reload is desired without a process exit.
- */
-void of_finish(void);
-
 /* Use of this wrapper is recommended. */
 static inline int of_init(void)
 {
diff --git a/drivers/common/dpaax/dpaax_iova_table.c b/drivers/common/dpaax/dpaax_iova_table.c
index 91bee65e7b..357e62c164 100644
--- a/drivers/common/dpaax/dpaax_iova_table.c
+++ b/drivers/common/dpaax/dpaax_iova_table.c
@@ -346,45 +346,6 @@ dpaax_iova_table_update(phys_addr_t paddr, void *vaddr, size_t length)
 	return 0;
 }
 
-/* dpaax_iova_table_dump
- * Dump the table, with its entries, on screen. Only works in Debug Mode
- * Not for weak hearted - the tables can get quite large
- */
-void
-dpaax_iova_table_dump(void)
-{
-	unsigned int i, j;
-	struct dpaax_iovat_element *entry;
-
-	/* In case DEBUG is not enabled, some 'if' conditions might misbehave
-	 * as they have nothing else in them  except a DPAAX_DEBUG() which if
-	 * tuned out would leave 'if' naked.
-	 */
-	if (rte_log_get_global_level() < RTE_LOG_DEBUG) {
-		DPAAX_ERR("Set log level to Debug for PA->Table dump!");
-		return;
-	}
-
-	DPAAX_DEBUG(" === Start of PA->VA Translation Table ===");
-	if (dpaax_iova_table_p == NULL)
-		DPAAX_DEBUG("\tNULL");
-
-	entry = dpaax_iova_table_p->entries;
-	for (i = 0; i < dpaax_iova_table_p->count; i++) {
-		DPAAX_DEBUG("\t(%16i),(%16"PRIu64"),(%16zu),(%16p)",
-			    i, entry[i].start, entry[i].len, entry[i].pages);
-		DPAAX_DEBUG("\t\t          (PA),          (VA)");
-		for (j = 0; j < (entry->len/DPAAX_MEM_SPLIT); j++) {
-			if (entry[i].pages[j] == 0)
-				continue;
-			DPAAX_DEBUG("\t\t(%16"PRIx64"),(%16"PRIx64")",
-				    (entry[i].start + (j * sizeof(uint64_t))),
-				    entry[i].pages[j]);
-		}
-	}
-	DPAAX_DEBUG(" === End of PA->VA Translation Table ===");
-}
-
 static void
 dpaax_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
 		  void *arg __rte_unused)
diff --git a/drivers/common/dpaax/dpaax_iova_table.h b/drivers/common/dpaax/dpaax_iova_table.h
index 230fba8ba0..8c3ce45f6a 100644
--- a/drivers/common/dpaax/dpaax_iova_table.h
+++ b/drivers/common/dpaax/dpaax_iova_table.h
@@ -67,8 +67,6 @@ __rte_internal
 void dpaax_iova_table_depopulate(void);
 __rte_internal
 int dpaax_iova_table_update(phys_addr_t paddr, void *vaddr, size_t length);
-__rte_internal
-void dpaax_iova_table_dump(void);
 
 static inline void *dpaax_iova_table_get_va(phys_addr_t paddr) __rte_hot;
 
diff --git a/drivers/common/dpaax/version.map b/drivers/common/dpaax/version.map
index ee1ca6801c..7390954793 100644
--- a/drivers/common/dpaax/version.map
+++ b/drivers/common/dpaax/version.map
@@ -2,7 +2,6 @@ INTERNAL {
 	global:
 
 	dpaax_iova_table_depopulate;
-	dpaax_iova_table_dump;
 	dpaax_iova_table_p;
 	dpaax_iova_table_populate;
 	dpaax_iova_table_update;
diff --git a/drivers/common/iavf/iavf_common.c b/drivers/common/iavf/iavf_common.c
index c951b7d787..025c9e9ece 100644
--- a/drivers/common/iavf/iavf_common.c
+++ b/drivers/common/iavf/iavf_common.c
@@ -43,214 +43,6 @@ enum iavf_status iavf_set_mac_type(struct iavf_hw *hw)
 	return status;
 }
 
-/**
- * iavf_aq_str - convert AQ err code to a string
- * @hw: pointer to the HW structure
- * @aq_err: the AQ error code to convert
- **/
-const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err)
-{
-	switch (aq_err) {
-	case IAVF_AQ_RC_OK:
-		return "OK";
-	case IAVF_AQ_RC_EPERM:
-		return "IAVF_AQ_RC_EPERM";
-	case IAVF_AQ_RC_ENOENT:
-		return "IAVF_AQ_RC_ENOENT";
-	case IAVF_AQ_RC_ESRCH:
-		return "IAVF_AQ_RC_ESRCH";
-	case IAVF_AQ_RC_EINTR:
-		return "IAVF_AQ_RC_EINTR";
-	case IAVF_AQ_RC_EIO:
-		return "IAVF_AQ_RC_EIO";
-	case IAVF_AQ_RC_ENXIO:
-		return "IAVF_AQ_RC_ENXIO";
-	case IAVF_AQ_RC_E2BIG:
-		return "IAVF_AQ_RC_E2BIG";
-	case IAVF_AQ_RC_EAGAIN:
-		return "IAVF_AQ_RC_EAGAIN";
-	case IAVF_AQ_RC_ENOMEM:
-		return "IAVF_AQ_RC_ENOMEM";
-	case IAVF_AQ_RC_EACCES:
-		return "IAVF_AQ_RC_EACCES";
-	case IAVF_AQ_RC_EFAULT:
-		return "IAVF_AQ_RC_EFAULT";
-	case IAVF_AQ_RC_EBUSY:
-		return "IAVF_AQ_RC_EBUSY";
-	case IAVF_AQ_RC_EEXIST:
-		return "IAVF_AQ_RC_EEXIST";
-	case IAVF_AQ_RC_EINVAL:
-		return "IAVF_AQ_RC_EINVAL";
-	case IAVF_AQ_RC_ENOTTY:
-		return "IAVF_AQ_RC_ENOTTY";
-	case IAVF_AQ_RC_ENOSPC:
-		return "IAVF_AQ_RC_ENOSPC";
-	case IAVF_AQ_RC_ENOSYS:
-		return "IAVF_AQ_RC_ENOSYS";
-	case IAVF_AQ_RC_ERANGE:
-		return "IAVF_AQ_RC_ERANGE";
-	case IAVF_AQ_RC_EFLUSHED:
-		return "IAVF_AQ_RC_EFLUSHED";
-	case IAVF_AQ_RC_BAD_ADDR:
-		return "IAVF_AQ_RC_BAD_ADDR";
-	case IAVF_AQ_RC_EMODE:
-		return "IAVF_AQ_RC_EMODE";
-	case IAVF_AQ_RC_EFBIG:
-		return "IAVF_AQ_RC_EFBIG";
-	}
-
-	snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
-	return hw->err_str;
-}
-
-/**
- * iavf_stat_str - convert status err code to a string
- * @hw: pointer to the HW structure
- * @stat_err: the status error code to convert
- **/
-const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
-{
-	switch (stat_err) {
-	case IAVF_SUCCESS:
-		return "OK";
-	case IAVF_ERR_NVM:
-		return "IAVF_ERR_NVM";
-	case IAVF_ERR_NVM_CHECKSUM:
-		return "IAVF_ERR_NVM_CHECKSUM";
-	case IAVF_ERR_PHY:
-		return "IAVF_ERR_PHY";
-	case IAVF_ERR_CONFIG:
-		return "IAVF_ERR_CONFIG";
-	case IAVF_ERR_PARAM:
-		return "IAVF_ERR_PARAM";
-	case IAVF_ERR_MAC_TYPE:
-		return "IAVF_ERR_MAC_TYPE";
-	case IAVF_ERR_UNKNOWN_PHY:
-		return "IAVF_ERR_UNKNOWN_PHY";
-	case IAVF_ERR_LINK_SETUP:
-		return "IAVF_ERR_LINK_SETUP";
-	case IAVF_ERR_ADAPTER_STOPPED:
-		return "IAVF_ERR_ADAPTER_STOPPED";
-	case IAVF_ERR_INVALID_MAC_ADDR:
-		return "IAVF_ERR_INVALID_MAC_ADDR";
-	case IAVF_ERR_DEVICE_NOT_SUPPORTED:
-		return "IAVF_ERR_DEVICE_NOT_SUPPORTED";
-	case IAVF_ERR_MASTER_REQUESTS_PENDING:
-		return "IAVF_ERR_MASTER_REQUESTS_PENDING";
-	case IAVF_ERR_INVALID_LINK_SETTINGS:
-		return "IAVF_ERR_INVALID_LINK_SETTINGS";
-	case IAVF_ERR_AUTONEG_NOT_COMPLETE:
-		return "IAVF_ERR_AUTONEG_NOT_COMPLETE";
-	case IAVF_ERR_RESET_FAILED:
-		return "IAVF_ERR_RESET_FAILED";
-	case IAVF_ERR_SWFW_SYNC:
-		return "IAVF_ERR_SWFW_SYNC";
-	case IAVF_ERR_NO_AVAILABLE_VSI:
-		return "IAVF_ERR_NO_AVAILABLE_VSI";
-	case IAVF_ERR_NO_MEMORY:
-		return "IAVF_ERR_NO_MEMORY";
-	case IAVF_ERR_BAD_PTR:
-		return "IAVF_ERR_BAD_PTR";
-	case IAVF_ERR_RING_FULL:
-		return "IAVF_ERR_RING_FULL";
-	case IAVF_ERR_INVALID_PD_ID:
-		return "IAVF_ERR_INVALID_PD_ID";
-	case IAVF_ERR_INVALID_QP_ID:
-		return "IAVF_ERR_INVALID_QP_ID";
-	case IAVF_ERR_INVALID_CQ_ID:
-		return "IAVF_ERR_INVALID_CQ_ID";
-	case IAVF_ERR_INVALID_CEQ_ID:
-		return "IAVF_ERR_INVALID_CEQ_ID";
-	case IAVF_ERR_INVALID_AEQ_ID:
-		return "IAVF_ERR_INVALID_AEQ_ID";
-	case IAVF_ERR_INVALID_SIZE:
-		return "IAVF_ERR_INVALID_SIZE";
-	case IAVF_ERR_INVALID_ARP_INDEX:
-		return "IAVF_ERR_INVALID_ARP_INDEX";
-	case IAVF_ERR_INVALID_FPM_FUNC_ID:
-		return "IAVF_ERR_INVALID_FPM_FUNC_ID";
-	case IAVF_ERR_QP_INVALID_MSG_SIZE:
-		return "IAVF_ERR_QP_INVALID_MSG_SIZE";
-	case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
-		return "IAVF_ERR_QP_TOOMANY_WRS_POSTED";
-	case IAVF_ERR_INVALID_FRAG_COUNT:
-		return "IAVF_ERR_INVALID_FRAG_COUNT";
-	case IAVF_ERR_QUEUE_EMPTY:
-		return "IAVF_ERR_QUEUE_EMPTY";
-	case IAVF_ERR_INVALID_ALIGNMENT:
-		return "IAVF_ERR_INVALID_ALIGNMENT";
-	case IAVF_ERR_FLUSHED_QUEUE:
-		return "IAVF_ERR_FLUSHED_QUEUE";
-	case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
-		return "IAVF_ERR_INVALID_PUSH_PAGE_INDEX";
-	case IAVF_ERR_INVALID_IMM_DATA_SIZE:
-		return "IAVF_ERR_INVALID_IMM_DATA_SIZE";
-	case IAVF_ERR_TIMEOUT:
-		return "IAVF_ERR_TIMEOUT";
-	case IAVF_ERR_OPCODE_MISMATCH:
-		return "IAVF_ERR_OPCODE_MISMATCH";
-	case IAVF_ERR_CQP_COMPL_ERROR:
-		return "IAVF_ERR_CQP_COMPL_ERROR";
-	case IAVF_ERR_INVALID_VF_ID:
-		return "IAVF_ERR_INVALID_VF_ID";
-	case IAVF_ERR_INVALID_HMCFN_ID:
-		return "IAVF_ERR_INVALID_HMCFN_ID";
-	case IAVF_ERR_BACKING_PAGE_ERROR:
-		return "IAVF_ERR_BACKING_PAGE_ERROR";
-	case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
-		return "IAVF_ERR_NO_PBLCHUNKS_AVAILABLE";
-	case IAVF_ERR_INVALID_PBLE_INDEX:
-		return "IAVF_ERR_INVALID_PBLE_INDEX";
-	case IAVF_ERR_INVALID_SD_INDEX:
-		return "IAVF_ERR_INVALID_SD_INDEX";
-	case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
-		return "IAVF_ERR_INVALID_PAGE_DESC_INDEX";
-	case IAVF_ERR_INVALID_SD_TYPE:
-		return "IAVF_ERR_INVALID_SD_TYPE";
-	case IAVF_ERR_MEMCPY_FAILED:
-		return "IAVF_ERR_MEMCPY_FAILED";
-	case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
-		return "IAVF_ERR_INVALID_HMC_OBJ_INDEX";
-	case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
-		return "IAVF_ERR_INVALID_HMC_OBJ_COUNT";
-	case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
-		return "IAVF_ERR_INVALID_SRQ_ARM_LIMIT";
-	case IAVF_ERR_SRQ_ENABLED:
-		return "IAVF_ERR_SRQ_ENABLED";
-	case IAVF_ERR_ADMIN_QUEUE_ERROR:
-		return "IAVF_ERR_ADMIN_QUEUE_ERROR";
-	case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
-		return "IAVF_ERR_ADMIN_QUEUE_TIMEOUT";
-	case IAVF_ERR_BUF_TOO_SHORT:
-		return "IAVF_ERR_BUF_TOO_SHORT";
-	case IAVF_ERR_ADMIN_QUEUE_FULL:
-		return "IAVF_ERR_ADMIN_QUEUE_FULL";
-	case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
-		return "IAVF_ERR_ADMIN_QUEUE_NO_WORK";
-	case IAVF_ERR_BAD_IWARP_CQE:
-		return "IAVF_ERR_BAD_IWARP_CQE";
-	case IAVF_ERR_NVM_BLANK_MODE:
-		return "IAVF_ERR_NVM_BLANK_MODE";
-	case IAVF_ERR_NOT_IMPLEMENTED:
-		return "IAVF_ERR_NOT_IMPLEMENTED";
-	case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
-		return "IAVF_ERR_PE_DOORBELL_NOT_ENABLED";
-	case IAVF_ERR_DIAG_TEST_FAILED:
-		return "IAVF_ERR_DIAG_TEST_FAILED";
-	case IAVF_ERR_NOT_READY:
-		return "IAVF_ERR_NOT_READY";
-	case IAVF_NOT_SUPPORTED:
-		return "IAVF_NOT_SUPPORTED";
-	case IAVF_ERR_FIRMWARE_API_VERSION:
-		return "IAVF_ERR_FIRMWARE_API_VERSION";
-	case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
-		return "IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
-	}
-
-	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
-	return hw->err_str;
-}
-
 /**
  * iavf_debug_aq
  * @hw: debug mask related to admin queue
@@ -362,164 +154,6 @@ enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw,
 	return status;
 }
 
-/**
- * iavf_aq_get_set_rss_lut
- * @hw: pointer to the hardware structure
- * @vsi_id: vsi fw index
- * @pf_lut: for PF table set true, for VSI table set false
- * @lut: pointer to the lut buffer provided by the caller
- * @lut_size: size of the lut buffer
- * @set: set true to set the table, false to get the table
- *
- * Internal function to get or set RSS look up table
- **/
-STATIC enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
-						u16 vsi_id, bool pf_lut,
-						u8 *lut, u16 lut_size,
-						bool set)
-{
-	enum iavf_status status;
-	struct iavf_aq_desc desc;
-	struct iavf_aqc_get_set_rss_lut *cmd_resp =
-		   (struct iavf_aqc_get_set_rss_lut *)&desc.params.raw;
-
-	if (set)
-		iavf_fill_default_direct_cmd_desc(&desc,
-						  iavf_aqc_opc_set_rss_lut);
-	else
-		iavf_fill_default_direct_cmd_desc(&desc,
-						  iavf_aqc_opc_get_rss_lut);
-
-	/* Indirect command */
-	desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_BUF);
-	desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_RD);
-
-	cmd_resp->vsi_id =
-			CPU_TO_LE16((u16)((vsi_id <<
-					  IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
-					  IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK));
-	cmd_resp->vsi_id |= CPU_TO_LE16((u16)IAVF_AQC_SET_RSS_LUT_VSI_VALID);
-
-	if (pf_lut)
-		cmd_resp->flags |= CPU_TO_LE16((u16)
-					((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
-					IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
-					IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
-	else
-		cmd_resp->flags |= CPU_TO_LE16((u16)
-					((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
-					IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
-					IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
-
-	status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL);
-
-	return status;
-}
-
-/**
- * iavf_aq_get_rss_lut
- * @hw: pointer to the hardware structure
- * @vsi_id: vsi fw index
- * @pf_lut: for PF table set true, for VSI table set false
- * @lut: pointer to the lut buffer provided by the caller
- * @lut_size: size of the lut buffer
- *
- * get the RSS lookup table, PF or VSI type
- **/
-enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,
-				     bool pf_lut, u8 *lut, u16 lut_size)
-{
-	return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
-				       false);
-}
-
-/**
- * iavf_aq_set_rss_lut
- * @hw: pointer to the hardware structure
- * @vsi_id: vsi fw index
- * @pf_lut: for PF table set true, for VSI table set false
- * @lut: pointer to the lut buffer provided by the caller
- * @lut_size: size of the lut buffer
- *
- * set the RSS lookup table, PF or VSI type
- **/
-enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id,
-				     bool pf_lut, u8 *lut, u16 lut_size)
-{
-	return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
-}
-
-/**
- * iavf_aq_get_set_rss_key
- * @hw: pointer to the hw struct
- * @vsi_id: vsi fw index
- * @key: pointer to key info struct
- * @set: set true to set the key, false to get the key
- *
- * get the RSS key per VSI
- **/
-STATIC enum iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw,
-				      u16 vsi_id,
-				      struct iavf_aqc_get_set_rss_key_data *key,
-				      bool set)
-{
-	enum iavf_status status;
-	struct iavf_aq_desc desc;
-	struct iavf_aqc_get_set_rss_key *cmd_resp =
-			(struct iavf_aqc_get_set_rss_key *)&desc.params.raw;
-	u16 key_size = sizeof(struct iavf_aqc_get_set_rss_key_data);
-
-	if (set)
-		iavf_fill_default_direct_cmd_desc(&desc,
-						  iavf_aqc_opc_set_rss_key);
-	else
-		iavf_fill_default_direct_cmd_desc(&desc,
-						  iavf_aqc_opc_get_rss_key);
-
-	/* Indirect command */
-	desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_BUF);
-	desc.flags |= CPU_TO_LE16((u16)IAVF_AQ_FLAG_RD);
-
-	cmd_resp->vsi_id =
-			CPU_TO_LE16((u16)((vsi_id <<
-					  IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
-					  IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK));
-	cmd_resp->vsi_id |= CPU_TO_LE16((u16)IAVF_AQC_SET_RSS_KEY_VSI_VALID);
-
-	status = iavf_asq_send_command(hw, &desc, key, key_size, NULL);
-
-	return status;
-}
-
-/**
- * iavf_aq_get_rss_key
- * @hw: pointer to the hw struct
- * @vsi_id: vsi fw index
- * @key: pointer to key info struct
- *
- **/
-enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw,
-				     u16 vsi_id,
-				     struct iavf_aqc_get_set_rss_key_data *key)
-{
-	return iavf_aq_get_set_rss_key(hw, vsi_id, key, false);
-}
-
-/**
- * iavf_aq_set_rss_key
- * @hw: pointer to the hw struct
- * @vsi_id: vsi fw index
- * @key: pointer to key info struct
- *
- * set the RSS key per VSI
- **/
-enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw,
-				     u16 vsi_id,
-				     struct iavf_aqc_get_set_rss_key_data *key)
-{
-	return iavf_aq_get_set_rss_key(hw, vsi_id, key, true);
-}
-
 /* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the
  * hardware to a bit-field that can be used by SW to more easily determine the
  * packet type.
@@ -885,30 +519,6 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
 	IAVF_PTT_UNUSED_ENTRY(255)
 };
 
-/**
- * iavf_validate_mac_addr - Validate unicast MAC address
- * @mac_addr: pointer to MAC address
- *
- * Tests a MAC address to ensure it is a valid Individual Address
- **/
-enum iavf_status iavf_validate_mac_addr(u8 *mac_addr)
-{
-	enum iavf_status status = IAVF_SUCCESS;
-
-	DEBUGFUNC("iavf_validate_mac_addr");
-
-	/* Broadcast addresses ARE multicast addresses
-	 * Make sure it is not a multicast address
-	 * Reject the zero address
-	 */
-	if (IAVF_IS_MULTICAST(mac_addr) ||
-	    (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
-	      mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
-		status = IAVF_ERR_INVALID_MAC_ADDR;
-
-	return status;
-}
-
 /**
  * iavf_aq_send_msg_to_pf
  * @hw: pointer to the hardware structure
@@ -989,38 +599,3 @@ void iavf_vf_parse_hw_config(struct iavf_hw *hw,
 		vsi_res++;
 	}
 }
-
-/**
- * iavf_vf_reset
- * @hw: pointer to the hardware structure
- *
- * Send a VF_RESET message to the PF. Does not wait for response from PF
- * as none will be forthcoming. Immediately after calling this function,
- * the admin queue should be shut down and (optionally) reinitialized.
- **/
-enum iavf_status iavf_vf_reset(struct iavf_hw *hw)
-{
-	return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
-				      IAVF_SUCCESS, NULL, 0, NULL);
-}
-
-/**
-* iavf_aq_clear_all_wol_filters
-* @hw: pointer to the hw struct
-* @cmd_details: pointer to command details structure or NULL
-*
-* Get information for the reason of a Wake Up event
-**/
-enum iavf_status iavf_aq_clear_all_wol_filters(struct iavf_hw *hw,
-			struct iavf_asq_cmd_details *cmd_details)
-{
-	struct iavf_aq_desc desc;
-	enum iavf_status status;
-
-	iavf_fill_default_direct_cmd_desc(&desc,
-					  iavf_aqc_opc_clear_all_wol_filters);
-
-	status = iavf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
diff --git a/drivers/common/iavf/iavf_prototype.h b/drivers/common/iavf/iavf_prototype.h
index f34e77db0f..5d5deacfe2 100644
--- a/drivers/common/iavf/iavf_prototype.h
+++ b/drivers/common/iavf/iavf_prototype.h
@@ -30,7 +30,6 @@ enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw);
 u16 iavf_clean_asq(struct iavf_hw *hw);
 void iavf_free_adminq_asq(struct iavf_hw *hw);
 void iavf_free_adminq_arq(struct iavf_hw *hw);
-enum iavf_status iavf_validate_mac_addr(u8 *mac_addr);
 void iavf_adminq_init_ring_data(struct iavf_hw *hw);
 __rte_internal
 enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
@@ -51,19 +50,6 @@ void iavf_idle_aq(struct iavf_hw *hw);
 bool iavf_check_asq_alive(struct iavf_hw *hw);
 enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
 
-enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid,
-				     bool pf_lut, u8 *lut, u16 lut_size);
-enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
-				     bool pf_lut, u8 *lut, u16 lut_size);
-enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw,
-				     u16 seid,
-				     struct iavf_aqc_get_set_rss_key_data *key);
-enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw,
-				     u16 seid,
-				     struct iavf_aqc_get_set_rss_key_data *key);
-const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err);
-const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err);
-
 __rte_internal
 enum iavf_status iavf_set_mac_type(struct iavf_hw *hw);
 
@@ -83,7 +69,6 @@ void iavf_destroy_spinlock(struct iavf_spinlock *sp);
 __rte_internal
 void iavf_vf_parse_hw_config(struct iavf_hw *hw,
 			     struct virtchnl_vf_resource *msg);
-enum iavf_status iavf_vf_reset(struct iavf_hw *hw);
 __rte_internal
 enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
 				enum virtchnl_ops v_opcode,
@@ -95,6 +80,4 @@ enum iavf_status iavf_aq_debug_dump(struct iavf_hw *hw, u8 cluster_id,
 				    void *buff, u16 *ret_buff_size,
 				    u8 *ret_next_table, u32 *ret_next_index,
 				    struct iavf_asq_cmd_details *cmd_details);
-enum iavf_status iavf_aq_clear_all_wol_filters(struct iavf_hw *hw,
-			struct iavf_asq_cmd_details *cmd_details);
 #endif /* _IAVF_PROTOTYPE_H_ */
diff --git a/drivers/common/octeontx2/otx2_mbox.c b/drivers/common/octeontx2/otx2_mbox.c
index 6df1e8ea63..e65fe602f7 100644
--- a/drivers/common/octeontx2/otx2_mbox.c
+++ b/drivers/common/octeontx2/otx2_mbox.c
@@ -381,19 +381,6 @@ otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
 	return otx2_mbox_wait_for_rsp_tmo(mbox, devid, MBOX_RSP_TIMEOUT);
 }
 
-int
-otx2_mbox_get_availmem(struct otx2_mbox *mbox, int devid)
-{
-	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
-	int avail;
-
-	rte_spinlock_lock(&mdev->mbox_lock);
-	avail = mbox->tx_size - mdev->msg_size - msgs_offset();
-	rte_spinlock_unlock(&mdev->mbox_lock);
-
-	return avail;
-}
-
 int
 otx2_send_ready_msg(struct otx2_mbox *mbox, uint16_t *pcifunc)
 {
diff --git a/drivers/common/octeontx2/otx2_mbox.h b/drivers/common/octeontx2/otx2_mbox.h
index f6d884c198..7d9c018597 100644
--- a/drivers/common/octeontx2/otx2_mbox.h
+++ b/drivers/common/octeontx2/otx2_mbox.h
@@ -1785,7 +1785,6 @@ int otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, void **msg);
 __rte_internal
 int otx2_mbox_get_rsp_tmo(struct otx2_mbox *mbox, int devid, void **msg,
 			  uint32_t tmo);
-int otx2_mbox_get_availmem(struct otx2_mbox *mbox, int devid);
 __rte_internal
 struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
 					    int size, int size_rsp);
diff --git a/drivers/crypto/bcmfs/bcmfs_sym_pmd.c b/drivers/crypto/bcmfs/bcmfs_sym_pmd.c
index aa7fad6d70..d23e58ff6d 100644
--- a/drivers/crypto/bcmfs/bcmfs_sym_pmd.c
+++ b/drivers/crypto/bcmfs/bcmfs_sym_pmd.c
@@ -399,25 +399,6 @@ bcmfs_sym_dev_create(struct bcmfs_device *fsdev)
 	return 0;
 }
 
-int
-bcmfs_sym_dev_destroy(struct bcmfs_device *fsdev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (fsdev == NULL)
-		return -ENODEV;
-	if (fsdev->sym_dev == NULL)
-		return 0;
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(fsdev->sym_dev->sym_dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	fsdev->sym_rte_dev.name = NULL;
-	fsdev->sym_dev = NULL;
-
-	return 0;
-}
-
 static struct cryptodev_driver bcmfs_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(bcmfs_crypto_drv,
 			       cryptodev_bcmfs_sym_driver,
diff --git a/drivers/crypto/bcmfs/bcmfs_sym_pmd.h b/drivers/crypto/bcmfs/bcmfs_sym_pmd.h
index 65d7046090..d9ddd024ff 100644
--- a/drivers/crypto/bcmfs/bcmfs_sym_pmd.h
+++ b/drivers/crypto/bcmfs/bcmfs_sym_pmd.h
@@ -32,7 +32,4 @@ struct bcmfs_sym_dev_private {
 int
 bcmfs_sym_dev_create(struct bcmfs_device *fdev);
 
-int
-bcmfs_sym_dev_destroy(struct bcmfs_device *fdev);
-
 #endif /* _BCMFS_SYM_PMD_H_ */
diff --git a/drivers/crypto/bcmfs/bcmfs_vfio.c b/drivers/crypto/bcmfs/bcmfs_vfio.c
index dc2def580f..81994d9d56 100644
--- a/drivers/crypto/bcmfs/bcmfs_vfio.c
+++ b/drivers/crypto/bcmfs/bcmfs_vfio.c
@@ -74,34 +74,10 @@ bcmfs_attach_vfio(struct bcmfs_device *dev)
 
 	return 0;
 }
-
-void
-bcmfs_release_vfio(struct bcmfs_device *dev)
-{
-	int ret;
-
-	if (dev == NULL)
-		return;
-
-	/* unmap the addr */
-	munmap(dev->mmap_addr, dev->mmap_size);
-	/* release the device */
-	ret = rte_vfio_release_device(dev->dirname, dev->name,
-				      dev->vfio_dev_fd);
-	if (ret < 0) {
-		BCMFS_LOG(ERR, "cannot release device");
-		return;
-	}
-}
 #else
 int
 bcmfs_attach_vfio(struct bcmfs_device *dev __rte_unused)
 {
 	return -1;
 }
-
-void
-bcmfs_release_vfio(struct bcmfs_device *dev __rte_unused)
-{
-}
 #endif
diff --git a/drivers/crypto/bcmfs/bcmfs_vfio.h b/drivers/crypto/bcmfs/bcmfs_vfio.h
index d0fdf6483f..4177bc1fee 100644
--- a/drivers/crypto/bcmfs/bcmfs_vfio.h
+++ b/drivers/crypto/bcmfs/bcmfs_vfio.h
@@ -10,8 +10,4 @@
 int
 bcmfs_attach_vfio(struct bcmfs_device *dev);
 
-/* Release the bcmfs device from vfio */
-void
-bcmfs_release_vfio(struct bcmfs_device *dev);
-
 #endif /* _BCMFS_VFIO_H_ */
diff --git a/drivers/crypto/caam_jr/caam_jr_pvt.h b/drivers/crypto/caam_jr/caam_jr_pvt.h
index 552d6b9b1b..60cf1fa45b 100644
--- a/drivers/crypto/caam_jr/caam_jr_pvt.h
+++ b/drivers/crypto/caam_jr/caam_jr_pvt.h
@@ -222,7 +222,6 @@ struct uio_job_ring {
 	int uio_minor_number;
 };
 
-int sec_cleanup(void);
 int sec_configure(void);
 void sec_uio_job_rings_init(void);
 struct uio_job_ring *config_job_ring(void);
diff --git a/drivers/crypto/caam_jr/caam_jr_uio.c b/drivers/crypto/caam_jr/caam_jr_uio.c
index e4ee102344..60c551e4f2 100644
--- a/drivers/crypto/caam_jr/caam_jr_uio.c
+++ b/drivers/crypto/caam_jr/caam_jr_uio.c
@@ -471,34 +471,6 @@ sec_configure(void)
 	return config_jr_no;
 }
 
-int
-sec_cleanup(void)
-{
-	int i;
-	struct uio_job_ring *job_ring;
-
-	for (i = 0; i < g_uio_jr_num; i++) {
-		job_ring = &g_uio_job_ring[i];
-		/* munmap SEC's register memory */
-		if (job_ring->register_base_addr) {
-			munmap(job_ring->register_base_addr,
-				job_ring->map_size);
-			job_ring->register_base_addr = NULL;
-		}
-		/* I need to close the fd after shutdown UIO commands need to be
-		 * sent using the fd
-		 */
-		if (job_ring->uio_fd != -1) {
-			CAAM_JR_INFO(
-			"Closed device file for job ring %d , fd = %d",
-			job_ring->jr_id, job_ring->uio_fd);
-			close(job_ring->uio_fd);
-			job_ring->uio_fd = -1;
-		}
-	}
-	return 0;
-}
-
 void
 sec_uio_job_rings_init(void)
 {
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 664ddc1747..fc34b6a639 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -62,26 +62,6 @@ ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
 	return NULL;
 }
 
-int
-ccp_read_hwrng(uint32_t *value)
-{
-	struct ccp_device *dev;
-
-	TAILQ_FOREACH(dev, &ccp_list, next) {
-		void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
-
-		while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
-			*value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
-			if (*value) {
-				dev->hwrng_retries = 0;
-				return 0;
-			}
-		}
-		dev->hwrng_retries = 0;
-	}
-	return -1;
-}
-
 static const struct rte_memzone *
 ccp_queue_dma_zone_reserve(const char *queue_name,
 			   uint32_t queue_size,
@@ -180,28 +160,6 @@ ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
 	}
 }
 
-static void
-ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
-{
-	unsigned long *p = map + WORD_OFFSET(start);
-	const unsigned int size = start + len;
-	int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
-	unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
-
-	while (len - bits_to_clear >= 0) {
-		*p &= ~mask_to_clear;
-		len -= bits_to_clear;
-		bits_to_clear = BITS_PER_WORD;
-		mask_to_clear = ~0UL;
-		p++;
-	}
-	if (len) {
-		mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
-		*p &= ~mask_to_clear;
-	}
-}
-
-
 static unsigned long
 _ccp_find_next_bit(const unsigned long *addr,
 		   unsigned long nbits,
@@ -312,29 +270,6 @@ ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
 	return 0;
 }
 
-static void __rte_unused
-ccp_lsb_free(struct ccp_queue *cmd_q,
-	     unsigned int start,
-	     unsigned int count)
-{
-	int lsbno = start / LSB_SIZE;
-
-	if (!start)
-		return;
-
-	if (cmd_q->lsb == lsbno) {
-		/* An entry from the private LSB */
-		ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
-	} else {
-		/* From the shared LSBs */
-		struct ccp_device *ccp = cmd_q->dev;
-
-		rte_spinlock_lock(&ccp->lsb_lock);
-		ccp_bitmap_clear(ccp->lsbmap, start, count);
-		rte_spinlock_unlock(&ccp->lsb_lock);
-	}
-}
-
 static int
 ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
 {
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index 37e04218ce..8bfce5d9fb 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -484,12 +484,4 @@ int ccp_probe_devices(const struct rte_pci_id *ccp_id);
  */
 struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
 
-/**
- * read hwrng value
- *
- * @param trng_value data pointer to write RNG value
- * @return 0 on success otherwise -1
- */
-int ccp_read_hwrng(uint32_t *trng_value);
-
 #endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/dpaa2_sec/mc/dpseci.c b/drivers/crypto/dpaa2_sec/mc/dpseci.c
index 87e0defdc6..52bfd72f50 100644
--- a/drivers/crypto/dpaa2_sec/mc/dpseci.c
+++ b/drivers/crypto/dpaa2_sec/mc/dpseci.c
@@ -80,96 +80,6 @@ int dpseci_close(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpseci_create() - Create the DPSECI object
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id:	Returned object id
- *
- * Create the DPSECI object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpseci_create(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  const struct dpseci_cfg *cfg,
-		  uint32_t *obj_id)
-{
-	struct dpseci_cmd_create *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err, i;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpseci_cmd_create *)cmd.params;
-	for (i = 0; i < 8; i++)
-		cmd_params->priorities[i] = cfg->priorities[i];
-	for (i = 0; i < 8; i++)
-		cmd_params->priorities2[i] = cfg->priorities[8 + i];
-	cmd_params->num_tx_queues = cfg->num_tx_queues;
-	cmd_params->num_rx_queues = cfg->num_rx_queues;
-	cmd_params->options = cpu_to_le32(cfg->options);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dpseci_destroy() - Destroy the DPSECI object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id:	The object id; it must be a valid id within the container that
- * created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpseci_destroy(struct fsl_mc_io *mc_io,
-		   uint16_t dprc_token,
-		   uint32_t cmd_flags,
-		   uint32_t object_id)
-{
-	struct dpseci_cmd_destroy *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
-	cmd_params->dpseci_id = cpu_to_le32(object_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -216,41 +126,6 @@ int dpseci_disable(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpseci_is_enabled() - Check if the DPSECI is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPSECI object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpseci_is_enabled(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      int *en)
-{
-	struct dpseci_rsp_is_enabled *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
-	*en = dpseci_get_field(rsp_params->en, ENABLE);
-
-	return 0;
-}
-
 /**
  * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -446,59 +321,6 @@ int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
 	return 0;
 }
 
-/**
- * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPSECI object
- * @attr:	Returned SEC attributes
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
-			uint32_t cmd_flags,
-			uint16_t token,
-			struct dpseci_sec_attr *attr)
-{
-	struct dpseci_rsp_get_sec_attr *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
-	attr->ip_id = le16_to_cpu(rsp_params->ip_id);
-	attr->major_rev = rsp_params->major_rev;
-	attr->minor_rev = rsp_params->minor_rev;
-	attr->era = rsp_params->era;
-	attr->deco_num = rsp_params->deco_num;
-	attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
-	attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
-	attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
-	attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
-	attr->crc_acc_num = rsp_params->crc_acc_num;
-	attr->pk_acc_num = rsp_params->pk_acc_num;
-	attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
-	attr->rng_acc_num = rsp_params->rng_acc_num;
-	attr->md_acc_num = rsp_params->md_acc_num;
-	attr->arc4_acc_num = rsp_params->arc4_acc_num;
-	attr->des_acc_num = rsp_params->des_acc_num;
-	attr->aes_acc_num = rsp_params->aes_acc_num;
-	attr->ccha_acc_num = rsp_params->ccha_acc_num;
-	attr->ptha_acc_num = rsp_params->ptha_acc_num;
-
-	return 0;
-}
-
 /**
  * dpseci_get_sec_counters() - Retrieve SEC accelerator counters.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -540,226 +362,3 @@ int dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
 
 	return 0;
 }
-
-/**
- * dpseci_get_api_version() - Get Data Path SEC Interface API version
- * @mc_io:  Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of data path sec API
- * @minor_ver:	Minor version of data path sec API
- *
- * Return:  '0' on Success; Error code otherwise.
- */
-int dpseci_get_api_version(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t *major_ver,
-			   uint16_t *minor_ver)
-{
-	struct dpseci_rsp_get_api_version *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
-					cmd_flags,
-					0);
-
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
-	*major_ver = le16_to_cpu(rsp_params->major);
-	*minor_ver = le16_to_cpu(rsp_params->minor);
-
-	return 0;
-}
-
-/**
- * dpseci_set_opr() - Set Order Restoration configuration.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPSECI object
- * @index:	The queue index
- * @options:	Configuration mode options
- *			can be OPR_OPT_CREATE or OPR_OPT_RETIRE
- * @cfg:	Configuration options for the OPR
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpseci_set_opr(struct fsl_mc_io *mc_io,
-		   uint32_t cmd_flags,
-		   uint16_t token,
-		   uint8_t index,
-		   uint8_t options,
-		   struct opr_cfg *cfg)
-{
-	struct dpseci_cmd_set_opr *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_OPR,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpseci_cmd_set_opr *)cmd.params;
-	cmd_params->index = index;
-	cmd_params->options = options;
-	cmd_params->oloe = cfg->oloe;
-	cmd_params->oeane = cfg->oeane;
-	cmd_params->olws = cfg->olws;
-	cmd_params->oa = cfg->oa;
-	cmd_params->oprrws = cfg->oprrws;
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpseci_get_opr() - Retrieve Order Restoration config and query.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPSECI object
- * @index:	The queue index
- * @cfg:	Returned OPR configuration
- * @qry:	Returned OPR query
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dpseci_get_opr(struct fsl_mc_io *mc_io,
-		   uint32_t cmd_flags,
-		   uint16_t token,
-		   uint8_t index,
-		   struct opr_cfg *cfg,
-		   struct opr_qry *qry)
-{
-	struct dpseci_rsp_get_opr *rsp_params;
-	struct dpseci_cmd_get_opr *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpseci_cmd_get_opr *)cmd.params;
-	cmd_params->index = index;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
-	cfg->oloe = rsp_params->oloe;
-	cfg->oeane = rsp_params->oeane;
-	cfg->olws = rsp_params->olws;
-	cfg->oa = rsp_params->oa;
-	cfg->oprrws = rsp_params->oprrws;
-	qry->rip = dpseci_get_field(rsp_params->flags, RIP);
-	qry->enable = dpseci_get_field(rsp_params->flags, OPR_ENABLE);
-	qry->nesn = le16_to_cpu(rsp_params->nesn);
-	qry->ndsn = le16_to_cpu(rsp_params->ndsn);
-	qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
-	qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, TSEQ_NLIS);
-	qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
-	qry->hseq_nlis = dpseci_get_field(rsp_params->hseq_nlis, HSEQ_NLIS);
-	qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
-	qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
-	qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
-	qry->opr_id = le16_to_cpu(rsp_params->opr_id);
-
-	return 0;
-}
-
-/**
- * dpseci_set_congestion_notification() - Set congestion group
- *	notification configuration
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPSECI object
- * @cfg:	congestion notification configuration
- *
- * Return:	'0' on success, error code otherwise
- */
-int dpseci_set_congestion_notification(
-			struct fsl_mc_io *mc_io,
-			uint32_t cmd_flags,
-			uint16_t token,
-			const struct dpseci_congestion_notification_cfg *cfg)
-{
-	struct dpseci_cmd_set_congestion_notification *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(
-			DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
-			cmd_flags,
-			token);
-
-	cmd_params =
-		(struct dpseci_cmd_set_congestion_notification *)cmd.params;
-	cmd_params->dest_id = cfg->dest_cfg.dest_id;
-	cmd_params->dest_priority = cfg->dest_cfg.priority;
-	cmd_params->message_ctx = cfg->message_ctx;
-	cmd_params->message_iova = cfg->message_iova;
-	cmd_params->notification_mode = cfg->notification_mode;
-	cmd_params->threshold_entry = cfg->threshold_entry;
-	cmd_params->threshold_exit = cfg->threshold_exit;
-	dpseci_set_field(cmd_params->type_units,
-			 DEST_TYPE,
-			 cfg->dest_cfg.dest_type);
-	dpseci_set_field(cmd_params->type_units,
-			 CG_UNITS,
-			 cfg->units);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpseci_get_congestion_notification() - Get congestion group
- *	notification configuration
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPSECI object
- * @cfg:	congestion notification configuration
- *
- * Return:	'0' on success, error code otherwise
- */
-int dpseci_get_congestion_notification(
-				struct fsl_mc_io *mc_io,
-				uint32_t cmd_flags,
-				uint16_t token,
-				struct dpseci_congestion_notification_cfg *cfg)
-{
-	struct dpseci_cmd_set_congestion_notification *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(
-			DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
-			cmd_flags,
-			token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	rsp_params =
-		(struct dpseci_cmd_set_congestion_notification *)cmd.params;
-
-	cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
-	cfg->dest_cfg.priority = rsp_params->dest_priority;
-	cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
-	cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
-	cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
-	cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
-	cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
-	cfg->units = dpseci_get_field(rsp_params->type_units, CG_UNITS);
-	cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->type_units,
-						DEST_TYPE);
-
-	return 0;
-}
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
index 279e8f4d4a..fbbfd40815 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -61,17 +61,6 @@ struct dpseci_cfg {
 	uint8_t priorities[DPSECI_MAX_QUEUE_NUM];
 };
 
-int dpseci_create(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  const struct dpseci_cfg *cfg,
-		  uint32_t *obj_id);
-
-int dpseci_destroy(struct fsl_mc_io *mc_io,
-		   uint16_t dprc_token,
-		   uint32_t cmd_flags,
-		   uint32_t object_id);
-
 int dpseci_enable(struct fsl_mc_io *mc_io,
 		  uint32_t cmd_flags,
 		  uint16_t token);
@@ -80,11 +69,6 @@ int dpseci_disable(struct fsl_mc_io *mc_io,
 		   uint32_t cmd_flags,
 		   uint16_t token);
 
-int dpseci_is_enabled(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      int *en);
-
 int dpseci_reset(struct fsl_mc_io *mc_io,
 		 uint32_t cmd_flags,
 		 uint16_t token);
@@ -287,11 +271,6 @@ struct dpseci_sec_attr {
 	uint8_t ptha_acc_num;
 };
 
-int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
-			uint32_t cmd_flags,
-			uint16_t token,
-			struct dpseci_sec_attr *attr);
-
 /**
  * struct dpseci_sec_counters - Structure representing global SEC counters and
  *				not per dpseci counters
@@ -318,25 +297,6 @@ int dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
 			    uint16_t token,
 			    struct dpseci_sec_counters *counters);
 
-int dpseci_get_api_version(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t *major_ver,
-			   uint16_t *minor_ver);
-
-int dpseci_set_opr(struct fsl_mc_io *mc_io,
-		   uint32_t cmd_flags,
-		   uint16_t token,
-		   uint8_t index,
-		   uint8_t options,
-		   struct opr_cfg *cfg);
-
-int dpseci_get_opr(struct fsl_mc_io *mc_io,
-		   uint32_t cmd_flags,
-		   uint16_t token,
-		   uint8_t index,
-		   struct opr_cfg *cfg,
-		   struct opr_qry *qry);
-
 /**
  * enum dpseci_congestion_unit - DPSECI congestion units
  * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
@@ -405,16 +365,4 @@ struct dpseci_congestion_notification_cfg {
 	uint16_t notification_mode;
 };
 
-int dpseci_set_congestion_notification(
-			struct fsl_mc_io *mc_io,
-			uint32_t cmd_flags,
-			uint16_t token,
-			const struct dpseci_congestion_notification_cfg *cfg);
-
-int dpseci_get_congestion_notification(
-			struct fsl_mc_io *mc_io,
-			uint32_t cmd_flags,
-			uint16_t token,
-			struct dpseci_congestion_notification_cfg *cfg);
-
 #endif /* __FSL_DPSECI_H */
diff --git a/drivers/crypto/virtio/virtio_pci.c b/drivers/crypto/virtio/virtio_pci.c
index ae069794a6..40bd748094 100644
--- a/drivers/crypto/virtio/virtio_pci.c
+++ b/drivers/crypto/virtio/virtio_pci.c
@@ -246,13 +246,6 @@ vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
 	VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
 }
 
-void
-vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
-		const void *src, int length)
-{
-	VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
-}
-
 uint64_t
 vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
 		uint64_t host_features)
@@ -298,12 +291,6 @@ vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw)
 	return VTPCI_OPS(hw)->get_status(hw);
 }
 
-uint8_t
-vtpci_cryptodev_isr(struct virtio_crypto_hw *hw)
-{
-	return VTPCI_OPS(hw)->get_isr(hw);
-}
-
 static void *
 get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
 {
diff --git a/drivers/crypto/virtio/virtio_pci.h b/drivers/crypto/virtio/virtio_pci.h
index d9a214dfd0..3092b56952 100644
--- a/drivers/crypto/virtio/virtio_pci.h
+++ b/drivers/crypto/virtio/virtio_pci.h
@@ -242,12 +242,7 @@ void vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status);
 uint64_t vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
 	uint64_t host_features);
 
-void vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
-	const void *src, int length);
-
 void vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
 	void *dst, int length);
 
-uint8_t vtpci_cryptodev_isr(struct virtio_crypto_hw *hw);
-
 #endif /* _VIRTIO_PCI_H_ */
diff --git a/drivers/event/dlb/dlb_priv.h b/drivers/event/dlb/dlb_priv.h
index 58ff4287df..deaf467090 100644
--- a/drivers/event/dlb/dlb_priv.h
+++ b/drivers/event/dlb/dlb_priv.h
@@ -470,8 +470,6 @@ void dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f);
 
 int dlb_xstats_init(struct dlb_eventdev *dlb);
 
-void dlb_xstats_uninit(struct dlb_eventdev *dlb);
-
 int dlb_eventdev_xstats_get(const struct rte_eventdev *dev,
 			    enum rte_event_dev_xstats_mode mode,
 			    uint8_t queue_port_id, const unsigned int ids[],
diff --git a/drivers/event/dlb/dlb_xstats.c b/drivers/event/dlb/dlb_xstats.c
index 5f4c590307..6678a8b322 100644
--- a/drivers/event/dlb/dlb_xstats.c
+++ b/drivers/event/dlb/dlb_xstats.c
@@ -578,13 +578,6 @@ dlb_xstats_init(struct dlb_eventdev *dlb)
 	return 0;
 }
 
-void
-dlb_xstats_uninit(struct dlb_eventdev *dlb)
-{
-	rte_free(dlb->xstats);
-	dlb->xstats_count = 0;
-}
-
 int
 dlb_eventdev_xstats_get_names(const struct rte_eventdev *dev,
 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h
index b73cf3ff14..56bd4ebe1b 100644
--- a/drivers/event/dlb2/dlb2_priv.h
+++ b/drivers/event/dlb2/dlb2_priv.h
@@ -536,8 +536,6 @@ void dlb2_eventdev_dump(struct rte_eventdev *dev, FILE *f);
 
 int dlb2_xstats_init(struct dlb2_eventdev *dlb2);
 
-void dlb2_xstats_uninit(struct dlb2_eventdev *dlb2);
-
 int dlb2_eventdev_xstats_get(const struct rte_eventdev *dev,
 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
 		const unsigned int ids[], uint64_t values[], unsigned int n);
diff --git a/drivers/event/dlb2/dlb2_xstats.c b/drivers/event/dlb2/dlb2_xstats.c
index 8c3c3cda94..574fca89e8 100644
--- a/drivers/event/dlb2/dlb2_xstats.c
+++ b/drivers/event/dlb2/dlb2_xstats.c
@@ -634,13 +634,6 @@ dlb2_xstats_init(struct dlb2_eventdev *dlb2)
 	return 0;
 }
 
-void
-dlb2_xstats_uninit(struct dlb2_eventdev *dlb2)
-{
-	rte_free(dlb2->xstats);
-	dlb2->xstats_count = 0;
-}
-
 int
 dlb2_eventdev_xstats_get_names(const struct rte_eventdev *dev,
 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
diff --git a/drivers/event/opdl/opdl_ring.c b/drivers/event/opdl/opdl_ring.c
index 69392b56bb..3ddfcaf67c 100644
--- a/drivers/event/opdl/opdl_ring.c
+++ b/drivers/event/opdl/opdl_ring.c
@@ -586,52 +586,6 @@ opdl_stage_claim_multithread(struct opdl_stage *s, void *entries,
 	return i;
 }
 
-/* Claim and copy slot pointers, optimised for single-thread operation */
-static __rte_always_inline uint32_t
-opdl_stage_claim_copy_singlethread(struct opdl_stage *s, void *entries,
-		uint32_t num_entries, uint32_t *seq, bool block)
-{
-	num_entries = num_to_process(s, num_entries, block);
-	if (num_entries == 0)
-		return 0;
-	copy_entries_out(s->t, s->head, entries, num_entries);
-	if (seq != NULL)
-		*seq = s->head;
-	s->head += num_entries;
-	return num_entries;
-}
-
-/* Thread-safe version of function to claim and copy pointers to slots */
-static __rte_always_inline uint32_t
-opdl_stage_claim_copy_multithread(struct opdl_stage *s, void *entries,
-		uint32_t num_entries, uint32_t *seq, bool block)
-{
-	uint32_t old_head;
-
-	move_head_atomically(s, &num_entries, &old_head, block, true);
-	if (num_entries == 0)
-		return 0;
-	copy_entries_out(s->t, old_head, entries, num_entries);
-	if (seq != NULL)
-		*seq = old_head;
-	return num_entries;
-}
-
-static __rte_always_inline void
-opdl_stage_disclaim_singlethread_n(struct opdl_stage *s,
-		uint32_t num_entries)
-{
-	uint32_t old_tail = s->shared.tail;
-
-	if (unlikely(num_entries > (s->head - old_tail))) {
-		PMD_DRV_LOG(WARNING, "Attempt to disclaim (%u) more than claimed (%u)",
-				num_entries, s->head - old_tail);
-		num_entries = s->head - old_tail;
-	}
-	__atomic_store_n(&s->shared.tail, num_entries + old_tail,
-			__ATOMIC_RELEASE);
-}
-
 uint32_t
 opdl_ring_input(struct opdl_ring *t, const void *entries, uint32_t num_entries,
 		bool block)
@@ -644,26 +598,6 @@ opdl_ring_input(struct opdl_ring *t, const void *entries, uint32_t num_entries,
 				block);
 }
 
-uint32_t
-opdl_ring_copy_from_burst(struct opdl_ring *t, struct opdl_stage *s,
-		const void *entries, uint32_t num_entries, bool block)
-{
-	uint32_t head = s->head;
-
-	num_entries = num_to_process(s, num_entries, block);
-
-	if (num_entries == 0)
-		return 0;
-
-	copy_entries_in(t, head, entries, num_entries);
-
-	s->head += num_entries;
-	__atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
-
-	return num_entries;
-
-}
-
 uint32_t
 opdl_ring_copy_to_burst(struct opdl_ring *t, struct opdl_stage *s,
 		void *entries, uint32_t num_entries, bool block)
@@ -682,25 +616,6 @@ opdl_ring_copy_to_burst(struct opdl_ring *t, struct opdl_stage *s,
 	return num_entries;
 }
 
-uint32_t
-opdl_stage_find_num_available(struct opdl_stage *s, uint32_t num_entries)
-{
-	/* return (num_to_process(s, num_entries, false)); */
-
-	if (available(s) >= num_entries)
-		return num_entries;
-
-	update_available_seq(s);
-
-	uint32_t avail = available(s);
-
-	if (avail == 0) {
-		rte_pause();
-		return 0;
-	}
-	return (avail <= num_entries) ? avail : num_entries;
-}
-
 uint32_t
 opdl_stage_claim(struct opdl_stage *s, void *entries,
 		uint32_t num_entries, uint32_t *seq, bool block, bool atomic)
@@ -713,41 +628,6 @@ opdl_stage_claim(struct opdl_stage *s, void *entries,
 				seq, block);
 }
 
-uint32_t
-opdl_stage_claim_copy(struct opdl_stage *s, void *entries,
-		uint32_t num_entries, uint32_t *seq, bool block)
-{
-	if (s->threadsafe == false)
-		return opdl_stage_claim_copy_singlethread(s, entries,
-				num_entries, seq, block);
-	else
-		return opdl_stage_claim_copy_multithread(s, entries,
-				num_entries, seq, block);
-}
-
-void
-opdl_stage_disclaim_n(struct opdl_stage *s, uint32_t num_entries,
-		bool block)
-{
-
-	if (s->threadsafe == false) {
-		opdl_stage_disclaim_singlethread_n(s, s->num_claimed);
-	} else {
-		struct claim_manager *disclaims =
-			&s->pending_disclaims[rte_lcore_id()];
-
-		if (unlikely(num_entries > s->num_slots)) {
-			PMD_DRV_LOG(WARNING, "Attempt to disclaim (%u) more than claimed (%u)",
-					num_entries, disclaims->num_claimed);
-			num_entries = disclaims->num_claimed;
-		}
-
-		num_entries = RTE_MIN(num_entries + disclaims->num_to_disclaim,
-				disclaims->num_claimed);
-		opdl_stage_disclaim_multithread_n(s, num_entries, block);
-	}
-}
-
 int
 opdl_stage_disclaim(struct opdl_stage *s, uint32_t num_entries, bool block)
 {
@@ -769,12 +649,6 @@ opdl_stage_disclaim(struct opdl_stage *s, uint32_t num_entries, bool block)
 	return num_entries;
 }
 
-uint32_t
-opdl_ring_available(struct opdl_ring *t)
-{
-	return opdl_stage_available(&t->stages[0]);
-}
-
 uint32_t
 opdl_stage_available(struct opdl_stage *s)
 {
@@ -782,14 +656,6 @@ opdl_stage_available(struct opdl_stage *s)
 	return available(s);
 }
 
-void
-opdl_ring_flush(struct opdl_ring *t)
-{
-	struct opdl_stage *s = input_stage(t);
-
-	wait_for_available(s, s->num_slots);
-}
-
 /******************** Non performance sensitive functions ********************/
 
 /* Initial setup of a new stage's context */
@@ -962,12 +828,6 @@ opdl_ring_create(const char *name, uint32_t num_slots, uint32_t slot_size,
 	return NULL;
 }
 
-void *
-opdl_ring_get_slot(const struct opdl_ring *t, uint32_t index)
-{
-	return get_slot(t, index);
-}
-
 bool
 opdl_ring_cas_slot(struct opdl_stage *s, const struct rte_event *ev,
 		uint32_t index, bool atomic)
@@ -1046,24 +906,6 @@ opdl_ring_cas_slot(struct opdl_stage *s, const struct rte_event *ev,
 	return ev_updated;
 }
 
-int
-opdl_ring_get_socket(const struct opdl_ring *t)
-{
-	return t->socket;
-}
-
-uint32_t
-opdl_ring_get_num_slots(const struct opdl_ring *t)
-{
-	return t->num_slots;
-}
-
-const char *
-opdl_ring_get_name(const struct opdl_ring *t)
-{
-	return t->name;
-}
-
 /* Check dependency list is valid for a given opdl_ring */
 static int
 check_deps(struct opdl_ring *t, struct opdl_stage *deps[],
@@ -1146,36 +988,6 @@ opdl_stage_deps_add(struct opdl_ring *t, struct opdl_stage *s,
 	return ret;
 }
 
-struct opdl_stage *
-opdl_ring_get_input_stage(const struct opdl_ring *t)
-{
-	return input_stage(t);
-}
-
-int
-opdl_stage_set_deps(struct opdl_stage *s, struct opdl_stage *deps[],
-		uint32_t num_deps)
-{
-	unsigned int i;
-	int ret;
-
-	if ((num_deps == 0) || (!deps)) {
-		PMD_DRV_LOG(ERR, "cannot set NULL dependencies");
-		return -EINVAL;
-	}
-
-	ret = check_deps(s->t, deps, num_deps);
-	if (ret < 0)
-		return ret;
-
-	/* Update deps */
-	for (i = 0; i < num_deps; i++)
-		s->deps[i] = &deps[i]->shared;
-	s->num_deps = num_deps;
-
-	return 0;
-}
-
 struct opdl_ring *
 opdl_stage_get_opdl_ring(const struct opdl_stage *s)
 {
@@ -1245,25 +1057,3 @@ opdl_ring_free(struct opdl_ring *t)
 	if (rte_memzone_free(mz) != 0)
 		PMD_DRV_LOG(ERR, "Cannot free memzone for %s", t->name);
 }
-
-/* search a opdl_ring from its name */
-struct opdl_ring *
-opdl_ring_lookup(const char *name)
-{
-	const struct rte_memzone *mz;
-	char mz_name[RTE_MEMZONE_NAMESIZE];
-
-	snprintf(mz_name, sizeof(mz_name), "%s%s", LIB_NAME, name);
-
-	mz = rte_memzone_lookup(mz_name);
-	if (mz == NULL)
-		return NULL;
-
-	return mz->addr;
-}
-
-void
-opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe)
-{
-	s->threadsafe = threadsafe;
-}
diff --git a/drivers/event/opdl/opdl_ring.h b/drivers/event/opdl/opdl_ring.h
index 14ababe0bb..c9e2ab6b1b 100644
--- a/drivers/event/opdl/opdl_ring.h
+++ b/drivers/event/opdl/opdl_ring.h
@@ -83,57 +83,6 @@ struct opdl_ring *
 opdl_ring_create(const char *name, uint32_t num_slots, uint32_t slot_size,
 		uint32_t max_num_stages, int socket);
 
-/**
- * Get pointer to individual slot in a opdl_ring.
- *
- * @param t
- *   The opdl_ring.
- * @param index
- *   Index of slot. If greater than the number of slots it will be masked to be
- *   within correct range.
- *
- * @return
- *   A pointer to that slot.
- */
-void *
-opdl_ring_get_slot(const struct opdl_ring *t, uint32_t index);
-
-/**
- * Get NUMA socket used by a opdl_ring.
- *
- * @param t
- *   The opdl_ring.
- *
- * @return
- *   NUMA socket.
- */
-int
-opdl_ring_get_socket(const struct opdl_ring *t);
-
-/**
- * Get number of slots in a opdl_ring.
- *
- * @param t
- *   The opdl_ring.
- *
- * @return
- *   Number of slots.
- */
-uint32_t
-opdl_ring_get_num_slots(const struct opdl_ring *t);
-
-/**
- * Get name of a opdl_ring.
- *
- * @param t
- *   The opdl_ring.
- *
- * @return
- *   Name string.
- */
-const char *
-opdl_ring_get_name(const struct opdl_ring *t);
-
 /**
  * Adds a new processing stage to a specified opdl_ring instance. Adding a stage
  * while there are entries in the opdl_ring being processed will cause undefined
@@ -160,38 +109,6 @@ opdl_ring_get_name(const struct opdl_ring *t);
 struct opdl_stage *
 opdl_stage_add(struct opdl_ring *t, bool threadsafe, bool is_input);
 
-/**
- * Returns the input stage of a opdl_ring to be used by other API functions.
- *
- * @param t
- *   The opdl_ring.
- *
- * @return
- *   A pointer to the input stage.
- */
-struct opdl_stage *
-opdl_ring_get_input_stage(const struct opdl_ring *t);
-
-/**
- * Sets the dependencies for a stage (clears all the previous deps!). Changing
- * dependencies while there are entries in the opdl_ring being processed will
- * cause undefined behaviour.
- *
- * @param s
- *   The stage to set the dependencies for.
- * @param deps
- *   An array of pointers to other stages that this stage will depends on. The
- *   other stages must be part of the same opdl_ring!
- * @param num_deps
- *   The size of the deps array. This must be > 0.
- *
- * @return
- *   0 on success, a negative value on error.
- */
-int
-opdl_stage_set_deps(struct opdl_stage *s, struct opdl_stage *deps[],
-		uint32_t num_deps);
-
 /**
  * Returns the opdl_ring that a stage belongs to.
  *
@@ -228,32 +145,6 @@ uint32_t
 opdl_ring_input(struct opdl_ring *t, const void *entries, uint32_t num_entries,
 		bool block);
 
-/**
- * Inputs a new batch of entries into a opdl stage. This function is only
- * threadsafe (with the same opdl parameter) if the threadsafe parameter of
- * opdl_create() was true. For performance reasons, this function does not
- * check input parameters.
- *
- * @param t
- *   The opdl ring to input entries in to.
- * @param s
- *   The stage to copy entries to.
- * @param entries
- *   An array of entries that will be copied in to the opdl ring.
- * @param num_entries
- *   The size of the entries array.
- * @param block
- *   If this is true, the function blocks until enough slots are available to
- *   input all the requested entries. If false, then the function inputs as
- *   many entries as currently possible.
- *
- * @return
- *   The number of entries successfully input.
- */
-uint32_t
-opdl_ring_copy_from_burst(struct opdl_ring *t, struct opdl_stage *s,
-			const void *entries, uint32_t num_entries, bool block);
-
 /**
  * Copy a batch of entries from the opdl ring. This function is only
  * threadsafe (with the same opdl parameter) if the threadsafe parameter of
@@ -368,41 +259,6 @@ opdl_stage_claim_check(struct opdl_stage *s, void **entries,
 		uint32_t num_entries, uint32_t *seq, bool block,
 		opdl_ring_check_entries_t *check, void *arg);
 
-/**
- * Before processing a batch of entries, a stage must first claim them to get
- * access. This function is threadsafe using same opdl_stage parameter if
- * the stage was created with threadsafe set to true, otherwise it is only
- * threadsafe with a different opdl_stage per thread.
- *
- * The difference between this function and opdl_stage_claim() is that this
- * function copies the entries from the opdl_ring. Note that any changes made to
- * the copied entries will not be reflected back in to the entries in the
- * opdl_ring, so this function probably only makes sense if the entries are
- * pointers to other data. For performance reasons, this function does not check
- * input parameters.
- *
- * @param s
- *   The opdl_ring stage to read entries in.
- * @param entries
- *   An array of entries that will be filled in by this function.
- * @param num_entries
- *   The number of entries to attempt to claim for processing (and the size of
- *   the entries array).
- * @param seq
- *   If not NULL, this is set to the value of the internal stage sequence number
- *   associated with the first entry returned.
- * @param block
- *   If this is true, the function blocks until num_entries slots are available
- *   to process. If false, then the function claims as many entries as
- *   currently possible.
- *
- * @return
- *   The number of entries copied in to the entries array.
- */
-uint32_t
-opdl_stage_claim_copy(struct opdl_stage *s, void *entries,
-		uint32_t num_entries, uint32_t *seq, bool block);
-
 /**
  * This function must be called when a stage has finished its processing of
  * entries, to make them available to any dependent stages. All entries that are
@@ -433,48 +289,6 @@ int
 opdl_stage_disclaim(struct opdl_stage *s, uint32_t num_entries,
 		bool block);
 
-/**
- * This function can be called when a stage has finished its processing of
- * entries, to make them available to any dependent stages. The difference
- * between this function and opdl_stage_disclaim() is that here only a
- * portion of entries are disclaimed, not all of them. For performance reasons,
- * this function does not check input parameters.
- *
- * @param s
- *   The opdl_ring stage in which to disclaim entries.
- *
- * @param num_entries
- *   The number of entries to disclaim.
- *
- * @param block
- *   Entries are always made available to a stage in the same order that they
- *   were input in the stage. If a stage is multithread safe, this may mean that
- *   full disclaiming of a batch of entries can not be considered complete until
- *   all earlier threads in the stage have disclaimed. If this parameter is true
- *   then the function blocks until the specified number of entries has been
- *   disclaimed (or there are no more entries to disclaim). Otherwise it
- *   disclaims as many claims as currently possible and an attempt to disclaim
- *   them is made the next time a claim or disclaim function for this stage on
- *   this thread is called.
- *
- *   In a single threaded stage, this parameter has no effect.
- */
-void
-opdl_stage_disclaim_n(struct opdl_stage *s, uint32_t num_entries,
-		bool block);
-
-/**
- * Check how many entries can be input.
- *
- * @param t
- *   The opdl_ring instance to check.
- *
- * @return
- *   The number of new entries currently allowed to be input.
- */
-uint32_t
-opdl_ring_available(struct opdl_ring *t);
-
 /**
  * Check how many entries can be processed in a stage.
  *
@@ -487,23 +301,6 @@ opdl_ring_available(struct opdl_ring *t);
 uint32_t
 opdl_stage_available(struct opdl_stage *s);
 
-/**
- * Check how many entries are available to be processed.
- *
- * NOTE : DOES NOT CHANGE ANY STATE WITHIN THE STAGE
- *
- * @param s
- *   The stage to check.
- *
- * @param num_entries
- *   The number of entries to check for availability.
- *
- * @return
- *   The number of entries currently available to be processed in this stage.
- */
-uint32_t
-opdl_stage_find_num_available(struct opdl_stage *s, uint32_t num_entries);
-
 /**
  * Create empty stage instance and return the pointer.
  *
@@ -543,15 +340,6 @@ opdl_stage_set_queue_id(struct opdl_stage *s,
 void
 opdl_ring_dump(const struct opdl_ring *t, FILE *f);
 
-/**
- * Blocks until all entries in a opdl_ring have been processed by all stages.
- *
- * @param t
- *   The opdl_ring instance to flush.
- */
-void
-opdl_ring_flush(struct opdl_ring *t);
-
 /**
  * Deallocates all resources used by a opdl_ring instance
  *
@@ -561,30 +349,6 @@ opdl_ring_flush(struct opdl_ring *t);
 void
 opdl_ring_free(struct opdl_ring *t);
 
-/**
- * Search for a opdl_ring by its name
- *
- * @param name
- *   The name of the opdl_ring.
- * @return
- *   The pointer to the opdl_ring matching the name, or NULL if not found.
- *
- */
-struct opdl_ring *
-opdl_ring_lookup(const char *name);
-
-/**
- * Set a opdl_stage to threadsafe variable.
- *
- * @param s
- *   The opdl_stage.
- * @param threadsafe
- *   Threadsafe value.
- */
-void
-opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe);
-
-
 /**
  * Compare the event descriptor with original version in the ring.
  * if key field event descriptor is changed by application, then
diff --git a/drivers/net/ark/ark_ddm.c b/drivers/net/ark/ark_ddm.c
index 91d1179d88..2a6aa93ffe 100644
--- a/drivers/net/ark/ark_ddm.c
+++ b/drivers/net/ark/ark_ddm.c
@@ -92,19 +92,6 @@ ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg)
 		     );
 }
 
-void
-ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg)
-{
-	struct ark_ddm_stats_t *stats = &ddm->stats;
-
-	ARK_PMD_LOG(INFO, "DDM Stats: %s"
-		      ARK_SU64 ARK_SU64 ARK_SU64
-		      "\n", msg,
-		      "Bytes:", stats->tx_byte_count,
-		      "Packets:", stats->tx_pkt_count,
-		      "MBufs", stats->tx_mbuf_count);
-}
-
 int
 ark_ddm_is_stopped(struct ark_ddm_t *ddm)
 {
diff --git a/drivers/net/ark/ark_ddm.h b/drivers/net/ark/ark_ddm.h
index 5456b4b5cc..5b722b6ede 100644
--- a/drivers/net/ark/ark_ddm.h
+++ b/drivers/net/ark/ark_ddm.h
@@ -141,7 +141,6 @@ void ark_ddm_reset(struct ark_ddm_t *ddm);
 void ark_ddm_stats_reset(struct ark_ddm_t *ddm);
 void ark_ddm_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr,
 		   uint32_t interval);
-void ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg);
 void ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg);
 int ark_ddm_is_stopped(struct ark_ddm_t *ddm);
 uint64_t ark_ddm_queue_byte_count(struct ark_ddm_t *ddm);
diff --git a/drivers/net/ark/ark_pktchkr.c b/drivers/net/ark/ark_pktchkr.c
index b8fb69497d..5a7e686f0e 100644
--- a/drivers/net/ark/ark_pktchkr.c
+++ b/drivers/net/ark/ark_pktchkr.c
@@ -15,7 +15,6 @@
 #include "ark_logs.h"
 
 static int set_arg(char *arg, char *val);
-static int ark_pktchkr_is_gen_forever(ark_pkt_chkr_t handle);
 
 #define ARK_MAX_STR_LEN 64
 union OPTV {
@@ -136,15 +135,6 @@ ark_pktchkr_stop(ark_pkt_chkr_t handle)
 	ARK_PMD_LOG(DEBUG, "Pktchk %d stopped.\n", inst->ordinal);
 }
 
-int
-ark_pktchkr_is_running(ark_pkt_chkr_t handle)
-{
-	struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
-	uint32_t r = inst->sregs->pkt_start_stop;
-
-	return ((r & 1) == 1);
-}
-
 static void
 ark_pktchkr_set_pkt_ctrl(ark_pkt_chkr_t handle,
 			 uint32_t gen_forever,
@@ -173,48 +163,6 @@ ark_pktchkr_set_pkt_ctrl(ark_pkt_chkr_t handle,
 	inst->cregs->pkt_ctrl = r;
 }
 
-static
-int
-ark_pktchkr_is_gen_forever(ark_pkt_chkr_t handle)
-{
-	struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
-	uint32_t r = inst->cregs->pkt_ctrl;
-
-	return (((r >> 24) & 1) == 1);
-}
-
-int
-ark_pktchkr_wait_done(ark_pkt_chkr_t handle)
-{
-	struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
-
-	if (ark_pktchkr_is_gen_forever(handle)) {
-		ARK_PMD_LOG(NOTICE, "Pktchk wait_done will not terminate"
-			      " because gen_forever=1\n");
-		return -1;
-	}
-	int wait_cycle = 10;
-
-	while (!ark_pktchkr_stopped(handle) && (wait_cycle > 0)) {
-		usleep(1000);
-		wait_cycle--;
-		ARK_PMD_LOG(DEBUG, "Waiting for packet checker %d's"
-			      " internal pktgen to finish sending...\n",
-			      inst->ordinal);
-		ARK_PMD_LOG(DEBUG, "Pktchk %d's pktgen done.\n",
-			      inst->ordinal);
-	}
-	return 0;
-}
-
-int
-ark_pktchkr_get_pkts_sent(ark_pkt_chkr_t handle)
-{
-	struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
-
-	return inst->cregs->pkts_sent;
-}
-
 void
 ark_pktchkr_set_payload_byte(ark_pkt_chkr_t handle, uint32_t b)
 {
diff --git a/drivers/net/ark/ark_pktchkr.h b/drivers/net/ark/ark_pktchkr.h
index b362281776..2b0ba17d90 100644
--- a/drivers/net/ark/ark_pktchkr.h
+++ b/drivers/net/ark/ark_pktchkr.h
@@ -69,8 +69,6 @@ void ark_pktchkr_uninit(ark_pkt_chkr_t handle);
 void ark_pktchkr_run(ark_pkt_chkr_t handle);
 int ark_pktchkr_stopped(ark_pkt_chkr_t handle);
 void ark_pktchkr_stop(ark_pkt_chkr_t handle);
-int ark_pktchkr_is_running(ark_pkt_chkr_t handle);
-int ark_pktchkr_get_pkts_sent(ark_pkt_chkr_t handle);
 void ark_pktchkr_set_payload_byte(ark_pkt_chkr_t handle, uint32_t b);
 void ark_pktchkr_set_pkt_size_min(ark_pkt_chkr_t handle, uint32_t x);
 void ark_pktchkr_set_pkt_size_max(ark_pkt_chkr_t handle, uint32_t x);
@@ -83,6 +81,5 @@ void ark_pktchkr_set_hdr_dW(ark_pkt_chkr_t handle, uint32_t *hdr);
 void ark_pktchkr_parse(char *args);
 void ark_pktchkr_setup(ark_pkt_chkr_t handle);
 void ark_pktchkr_dump_stats(ark_pkt_chkr_t handle);
-int ark_pktchkr_wait_done(ark_pkt_chkr_t handle);
 
 #endif
diff --git a/drivers/net/ark/ark_pktdir.c b/drivers/net/ark/ark_pktdir.c
index 25e1218310..00bf165bff 100644
--- a/drivers/net/ark/ark_pktdir.c
+++ b/drivers/net/ark/ark_pktdir.c
@@ -26,31 +26,9 @@ ark_pktdir_init(void *base)
 	return inst;
 }
 
-void
-ark_pktdir_uninit(ark_pkt_dir_t handle)
-{
-	struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
-
-	rte_free(inst);
-}
-
 void
 ark_pktdir_setup(ark_pkt_dir_t handle, uint32_t v)
 {
 	struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
 	inst->regs->ctrl = v;
 }
-
-uint32_t
-ark_pktdir_status(ark_pkt_dir_t handle)
-{
-	struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
-	return inst->regs->ctrl;
-}
-
-uint32_t
-ark_pktdir_stall_cnt(ark_pkt_dir_t handle)
-{
-	struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
-	return inst->regs->stall_cnt;
-}
diff --git a/drivers/net/ark/ark_pktdir.h b/drivers/net/ark/ark_pktdir.h
index 4afd128f95..e7f2026a00 100644
--- a/drivers/net/ark/ark_pktdir.h
+++ b/drivers/net/ark/ark_pktdir.h
@@ -33,9 +33,6 @@ struct ark_pkt_dir_inst {
 };
 
 ark_pkt_dir_t ark_pktdir_init(void *base);
-void ark_pktdir_uninit(ark_pkt_dir_t handle);
 void ark_pktdir_setup(ark_pkt_dir_t handle, uint32_t v);
-uint32_t ark_pktdir_stall_cnt(ark_pkt_dir_t handle);
-uint32_t ark_pktdir_status(ark_pkt_dir_t handle);
 
 #endif
diff --git a/drivers/net/ark/ark_pktgen.c b/drivers/net/ark/ark_pktgen.c
index 4a02662a46..9769c46b47 100644
--- a/drivers/net/ark/ark_pktgen.c
+++ b/drivers/net/ark/ark_pktgen.c
@@ -186,33 +186,6 @@ ark_pktgen_is_gen_forever(ark_pkt_gen_t handle)
 	return (((r >> 24) & 1) == 1);
 }
 
-void
-ark_pktgen_wait_done(ark_pkt_gen_t handle)
-{
-	struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
-	int wait_cycle = 10;
-
-	if (ark_pktgen_is_gen_forever(handle))
-		ARK_PMD_LOG(NOTICE, "Pktgen wait_done will not terminate"
-			    " because gen_forever=1\n");
-
-	while (!ark_pktgen_tx_done(handle) && (wait_cycle > 0)) {
-		usleep(1000);
-		wait_cycle--;
-		ARK_PMD_LOG(DEBUG,
-			      "Waiting for pktgen %d to finish sending...\n",
-			      inst->ordinal);
-	}
-	ARK_PMD_LOG(DEBUG, "Pktgen %d done.\n", inst->ordinal);
-}
-
-uint32_t
-ark_pktgen_get_pkts_sent(ark_pkt_gen_t handle)
-{
-	struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
-	return inst->regs->pkts_sent;
-}
-
 void
 ark_pktgen_set_payload_byte(ark_pkt_gen_t handle, uint32_t b)
 {
diff --git a/drivers/net/ark/ark_pktgen.h b/drivers/net/ark/ark_pktgen.h
index c61dfee6db..cc78577d3d 100644
--- a/drivers/net/ark/ark_pktgen.h
+++ b/drivers/net/ark/ark_pktgen.h
@@ -60,8 +60,6 @@ uint32_t ark_pktgen_is_gen_forever(ark_pkt_gen_t handle);
 uint32_t ark_pktgen_is_running(ark_pkt_gen_t handle);
 uint32_t ark_pktgen_tx_done(ark_pkt_gen_t handle);
 void ark_pktgen_reset(ark_pkt_gen_t handle);
-void ark_pktgen_wait_done(ark_pkt_gen_t handle);
-uint32_t ark_pktgen_get_pkts_sent(ark_pkt_gen_t handle);
 void ark_pktgen_set_payload_byte(ark_pkt_gen_t handle, uint32_t b);
 void ark_pktgen_set_pkt_spacing(ark_pkt_gen_t handle, uint32_t x);
 void ark_pktgen_set_pkt_size_min(ark_pkt_gen_t handle, uint32_t x);
diff --git a/drivers/net/ark/ark_udm.c b/drivers/net/ark/ark_udm.c
index a740d36d43..2132f4e972 100644
--- a/drivers/net/ark/ark_udm.c
+++ b/drivers/net/ark/ark_udm.c
@@ -135,21 +135,6 @@ ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg)
 		      "MBuf Count", udm->stats.rx_mbuf_count);
 }
 
-void
-ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg, uint16_t qid)
-{
-	ARK_PMD_LOG(INFO, "UDM Queue %3u Stats: %s"
-		      ARK_SU64 ARK_SU64
-		      ARK_SU64 ARK_SU64
-		      ARK_SU64 "\n",
-		      qid, msg,
-		      "Pkts Received", udm->qstats.q_packet_count,
-		      "Pkts Finalized", udm->qstats.q_ff_packet_count,
-		      "Pkts Dropped", udm->qstats.q_pkt_drop,
-		      "Bytes Count", udm->qstats.q_byte_count,
-		      "MBuf Count", udm->qstats.q_mbuf_count);
-}
-
 void
 ark_udm_dump(struct ark_udm_t *udm, const char *msg)
 {
diff --git a/drivers/net/ark/ark_udm.h b/drivers/net/ark/ark_udm.h
index 5846c825b8..7f0d3c2a5e 100644
--- a/drivers/net/ark/ark_udm.h
+++ b/drivers/net/ark/ark_udm.h
@@ -145,8 +145,6 @@ void ark_udm_configure(struct ark_udm_t *udm,
 void ark_udm_write_addr(struct ark_udm_t *udm, rte_iova_t addr);
 void ark_udm_stats_reset(struct ark_udm_t *udm);
 void ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg);
-void ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg,
-			      uint16_t qid);
 void ark_udm_dump(struct ark_udm_t *udm, const char *msg);
 void ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg);
 void ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t q_id);
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/atlantic/hw_atl/hw_atl_b0.c
index 7d0e724019..415099e04a 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/atlantic/hw_atl/hw_atl_b0.c
@@ -480,20 +480,6 @@ int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, uint64_t base_addr,
 	return aq_hw_err_from_flags(self);
 }
 
-int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
-{
-	hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
-	return aq_hw_err_from_flags(self);
-}
-
-int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
-{
-	hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
-	hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
-
-	return aq_hw_err_from_flags(self);
-}
-
 int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
 {
 	*mask = hw_atl_itr_irq_statuslsw_get(self);
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/atlantic/hw_atl/hw_atl_b0.h
index d1ba2aceb3..4a155d2bc7 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/atlantic/hw_atl/hw_atl_b0.h
@@ -35,8 +35,6 @@ int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
 int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
 				struct aq_rss_parameters *rss_params);
 
-int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask);
-int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask);
 int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask);
 
 #endif /* HW_ATL_B0_H */
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/atlantic/hw_atl/hw_atl_llh.c
index 2dc5be2ff1..b29419bce3 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/atlantic/hw_atl/hw_atl_llh.c
@@ -22,28 +22,6 @@ u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
 	return aq_hw_read_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore));
 }
 
-void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_REG_RES_DIS_ADR,
-			    HW_ATL_GLB_REG_RES_DIS_MSK,
-			    HW_ATL_GLB_REG_RES_DIS_SHIFT,
-			    glb_reg_res_dis);
-}
-
-void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
-			    HW_ATL_GLB_SOFT_RES_MSK,
-			    HW_ATL_GLB_SOFT_RES_SHIFT, soft_res);
-}
-
-u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw)
-{
-	return aq_hw_read_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
-				  HW_ATL_GLB_SOFT_RES_MSK,
-				  HW_ATL_GLB_SOFT_RES_SHIFT);
-}
-
 u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
 {
 	return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MIF_ID_ADR);
@@ -275,13 +253,6 @@ void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
 	aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMSRLSW_ADR, irq_msk_setlsw);
 }
 
-void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_REG_RES_DSBL_ADR,
-			    HW_ATL_ITR_REG_RES_DSBL_MSK,
-			    HW_ATL_ITR_REG_RES_DSBL_SHIFT, irq_reg_res_dis);
-}
-
 void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
 					u32 irq_status_clearlsw)
 {
@@ -293,18 +264,6 @@ u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
 	return aq_hw_read_reg(aq_hw, HW_ATL_ITR_ISRLSW_ADR);
 }
 
-u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw)
-{
-	return aq_hw_read_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
-				  HW_ATL_ITR_RES_SHIFT);
-}
-
-void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
-			    HW_ATL_ITR_RES_SHIFT, res_irq);
-}
-
 /* rdm */
 void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
 {
@@ -374,13 +333,6 @@ void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
 			    rx_desc_head_splitting);
 }
 
-u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
-{
-	return aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_DESCDHD_ADR(descriptor),
-				  HW_ATL_RDM_DESCDHD_MSK,
-				  HW_ATL_RDM_DESCDHD_SHIFT);
-}
-
 void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
 				u32 descriptor)
 {
@@ -389,15 +341,6 @@ void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
 			    rx_desc_len);
 }
 
-void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
-				u32 descriptor)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDRESET_ADR(descriptor),
-			    HW_ATL_RDM_DESCDRESET_MSK,
-			    HW_ATL_RDM_DESCDRESET_SHIFT,
-			    rx_desc_res);
-}
-
 void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
 					 u32 rx_desc_wr_wb_irq_en)
 {
@@ -425,15 +368,6 @@ void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
 			    rx_pld_dca_en);
 }
 
-void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
-				      u32 rdm_intr_moder_en)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_RIM_EN_ADR,
-			    HW_ATL_RDM_INT_RIM_EN_MSK,
-			    HW_ATL_RDM_INT_RIM_EN_SHIFT,
-			    rdm_intr_moder_en);
-}
-
 /* reg */
 void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
 				u32 regidx)
@@ -441,21 +375,11 @@ void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
 	aq_hw_write_reg(aq_hw, HW_ATL_GEN_INTR_MAP_ADR(regidx), gen_intr_map);
 }
 
-u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
-{
-	return aq_hw_read_reg(aq_hw, HW_ATL_GEN_INTR_STAT_ADR);
-}
-
 void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
 {
 	aq_hw_write_reg(aq_hw, HW_ATL_INTR_GLB_CTL_ADR, intr_glb_ctl);
 }
 
-void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
-{
-	aq_hw_write_reg(aq_hw, HW_ATL_INTR_THR_ADR(throttle), intr_thr);
-}
-
 void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
 					       u32 rx_dma_desc_base_addrlsw,
 					       u32 descriptor)
@@ -472,11 +396,6 @@ void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
 			rx_dma_desc_base_addrmsw);
 }
 
-u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
-{
-	return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor));
-}
-
 void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
 					 u32 rx_dma_desc_tail_ptr,
 					 u32 descriptor)
@@ -506,26 +425,6 @@ void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
 			rx_flr_rss_control1);
 }
 
-void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw,
-				    u32 rx_filter_control2)
-{
-	aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_CONTROL2_ADR, rx_filter_control2);
-}
-
-void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
-				       u32 rx_intr_moderation_ctl,
-				       u32 queue)
-{
-	aq_hw_write_reg(aq_hw, HW_ATL_RX_INTR_MODERATION_CTL_ADR(queue),
-			rx_intr_moderation_ctl);
-}
-
-void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
-				     u32 tx_dma_debug_ctl)
-{
-	aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DEBUG_CTL_ADR, tx_dma_debug_ctl);
-}
-
 void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
 					       u32 tx_dma_desc_base_addrlsw,
 					       u32 descriptor)
@@ -552,22 +451,7 @@ void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
 			tx_dma_desc_tail_ptr);
 }
 
-void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
-				       u32 tx_intr_moderation_ctl,
-				       u32 queue)
-{
-	aq_hw_write_reg(aq_hw, HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue),
-			tx_intr_moderation_ctl);
-}
-
 /* RPB: rx packet buffer */
-void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_SYS_LBK_ADR,
-			    HW_ATL_RPB_DMA_SYS_LBK_MSK,
-			    HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk);
-}
-
 void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
 					   u32 rx_traf_class_mode)
 {
@@ -577,13 +461,6 @@ void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
 			    rx_traf_class_mode);
 }
 
-u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw)
-{
-	return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
-			HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
-			HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT);
-}
-
 void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
 {
 	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR,
@@ -664,15 +541,6 @@ void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
 			    HW_ATL_RPFL2BC_ACT_SHIFT, l2broadcast_flr_act);
 }
 
-void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
-				      u32 l2multicast_flr_en,
-				      u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ENF_ADR(filter),
-			    HW_ATL_RPFL2MC_ENF_MSK,
-			    HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en);
-}
-
 void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
 					 u32 l2promiscuous_mode_en)
 {
@@ -813,15 +681,6 @@ void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
 			    HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT, rss_redir_wr_en);
 }
 
-void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
-				       u32 tpo_to_rpf_sys_lbk)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR,
-			    HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK,
-			    HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT,
-			    tpo_to_rpf_sys_lbk);
-}
-
 void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
 {
 	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_INNER_TPID_ADR,
@@ -847,24 +706,6 @@ void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
 			    vlan_prom_mode_en);
 }
 
-void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
-						 u32 vlan_acc_untagged_packets)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR,
-			    HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK,
-			    HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT,
-			    vlan_acc_untagged_packets);
-}
-
-void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
-				      u32 vlan_untagged_act)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_UNTAGGED_ACT_ADR,
-			    HW_ATL_RPF_VL_UNTAGGED_ACT_MSK,
-			    HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT,
-			    vlan_untagged_act);
-}
-
 void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
 				u32 filter)
 {
@@ -892,73 +733,6 @@ void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
 			    vlan_id_flr);
 }
 
-void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
-				u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ENF_ADR(filter),
-			    HW_ATL_RPF_ET_ENF_MSK,
-			    HW_ATL_RPF_ET_ENF_SHIFT, etht_flr_en);
-}
-
-void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
-					  u32 etht_user_priority_en, u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPFEN_ADR(filter),
-			    HW_ATL_RPF_ET_UPFEN_MSK, HW_ATL_RPF_ET_UPFEN_SHIFT,
-			    etht_user_priority_en);
-}
-
-void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
-				     u32 etht_rx_queue_en,
-				     u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQFEN_ADR(filter),
-			    HW_ATL_RPF_ET_RXQFEN_MSK,
-			    HW_ATL_RPF_ET_RXQFEN_SHIFT,
-			    etht_rx_queue_en);
-}
-
-void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
-				       u32 etht_user_priority,
-				       u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPF_ADR(filter),
-			    HW_ATL_RPF_ET_UPF_MSK,
-			    HW_ATL_RPF_ET_UPF_SHIFT, etht_user_priority);
-}
-
-void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
-				  u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQF_ADR(filter),
-			    HW_ATL_RPF_ET_RXQF_MSK,
-			    HW_ATL_RPF_ET_RXQF_SHIFT, etht_rx_queue);
-}
-
-void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
-				   u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_MNG_RXQF_ADR(filter),
-			    HW_ATL_RPF_ET_MNG_RXQF_MSK,
-			    HW_ATL_RPF_ET_MNG_RXQF_SHIFT,
-			    etht_mgt_queue);
-}
-
-void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
-				 u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ACTF_ADR(filter),
-			    HW_ATL_RPF_ET_ACTF_MSK,
-			    HW_ATL_RPF_ET_ACTF_SHIFT, etht_flr_act);
-}
-
-void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_VALF_ADR(filter),
-			    HW_ATL_RPF_ET_VALF_MSK,
-			    HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
-}
-
 /* RPO: rx packet offload */
 void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
 					      u32 ipv4header_crc_offload_en)
@@ -1156,13 +930,6 @@ void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
 			    tx_desc_en);
 }
 
-u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
-{
-	return aq_hw_read_reg_bit(aq_hw, HW_ATL_TDM_DESCDHD_ADR(descriptor),
-				  HW_ATL_TDM_DESCDHD_MSK,
-				  HW_ATL_TDM_DESCDHD_SHIFT);
-}
-
 void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
 				u32 descriptor)
 {
@@ -1191,15 +958,6 @@ void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
 			    tx_desc_wr_wb_threshold);
 }
 
-void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
-				      u32 tdm_irq_moderation_en)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_MOD_EN_ADR,
-			    HW_ATL_TDM_INT_MOD_EN_MSK,
-			    HW_ATL_TDM_INT_MOD_EN_SHIFT,
-			    tdm_irq_moderation_en);
-}
-
 /* thm */
 void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
 					      u32 lso_tcp_flag_of_first_pkt)
@@ -1236,13 +994,6 @@ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
 			    HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
 }
 
-u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw)
-{
-	return aq_hw_read_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
-			HW_ATL_TPB_TX_TC_MODE_MSK,
-			HW_ATL_TPB_TX_TC_MODE_SHIFT);
-}
-
 void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
 				   u32 tx_traf_class_mode)
 {
@@ -1272,15 +1023,6 @@ void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
 			    tx_buff_lo_threshold_per_tc);
 }
 
-void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw,
-				      u32 tx_dma_sys_lbk_en)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_SYS_LBK_ADR,
-			    HW_ATL_TPB_DMA_SYS_LBK_MSK,
-			    HW_ATL_TPB_DMA_SYS_LBK_SHIFT,
-			    tx_dma_sys_lbk_en);
-}
-
 void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
 					    u32 tx_pkt_buff_size_per_tc,
 					    u32 buffer)
@@ -1319,15 +1061,6 @@ void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
 			    tcp_udp_crc_offload_en);
 }
 
-void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
-				      u32 tx_pkt_sys_lbk_en)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_PKT_SYS_LBK_ADR,
-			    HW_ATL_TPO_PKT_SYS_LBK_MSK,
-			    HW_ATL_TPO_PKT_SYS_LBK_SHIFT,
-			    tx_pkt_sys_lbk_en);
-}
-
 /* TPS: tx packet scheduler */
 void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
 					      u32 tx_pkt_shed_data_arb_mode)
@@ -1422,58 +1155,7 @@ void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
 			    HW_ATL_TX_REG_RES_DSBL_SHIFT, tx_reg_res_dis);
 }
 
-/* msm */
-u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw)
-{
-	return aq_hw_read_reg_bit(aq_hw, HW_ATL_MSM_REG_ACCESS_BUSY_ADR,
-				  HW_ATL_MSM_REG_ACCESS_BUSY_MSK,
-				  HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT);
-}
-
-void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
-					       u32 reg_addr_for_indirect_addr)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_ADDR_ADR,
-			    HW_ATL_MSM_REG_ADDR_MSK,
-			    HW_ATL_MSM_REG_ADDR_SHIFT,
-			    reg_addr_for_indirect_addr);
-}
-
-void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_RD_STROBE_ADR,
-			    HW_ATL_MSM_REG_RD_STROBE_MSK,
-			    HW_ATL_MSM_REG_RD_STROBE_SHIFT,
-			    reg_rd_strobe);
-}
-
-u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
-{
-	return aq_hw_read_reg(aq_hw, HW_ATL_MSM_REG_RD_DATA_ADR);
-}
-
-void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
-{
-	aq_hw_write_reg(aq_hw, HW_ATL_MSM_REG_WR_DATA_ADR, reg_wr_data);
-}
-
-void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_WR_STROBE_ADR,
-			    HW_ATL_MSM_REG_WR_STROBE_MSK,
-			    HW_ATL_MSM_REG_WR_STROBE_SHIFT,
-			    reg_wr_strobe);
-}
-
 /* pci */
-void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
-{
-	aq_hw_write_reg_bit(aq_hw, HW_ATL_PCI_REG_RES_DSBL_ADR,
-			    HW_ATL_PCI_REG_RES_DSBL_MSK,
-			    HW_ATL_PCI_REG_RES_DSBL_SHIFT,
-			    pci_reg_res_dis);
-}
-
 void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
 					u32 glb_cpu_scratch_scp,
 					u32 scratch_scp)
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/atlantic/hw_atl/hw_atl_llh.h
index e30083cea5..493fd88934 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/atlantic/hw_atl/hw_atl_llh.h
@@ -21,15 +21,6 @@ void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw,	u32 glb_cpu_sem,
 /* get global microprocessor semaphore */
 u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
 
-/* set global register reset disable */
-void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
-
-/* set soft reset */
-void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
-
-/* get soft reset */
-u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw);
-
 /* stats */
 
 u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
@@ -130,9 +121,6 @@ void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
 /* set interrupt mask set lsw */
 void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
 
-/* set interrupt register reset disable */
-void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
-
 /* set interrupt status clear lsw */
 void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
 					u32 irq_status_clearlsw);
@@ -140,12 +128,6 @@ void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
 /* get interrupt status lsw */
 u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
 
-/* get reset interrupt */
-u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw);
-
-/* set reset interrupt */
-void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
-
 /* rdm */
 
 /* set cpu id */
@@ -175,9 +157,6 @@ void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
 					   u32 rx_desc_head_splitting,
 				    u32 descriptor);
 
-/* get rx descriptor head pointer */
-u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
-
 /* set rx descriptor length */
 void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
 				u32 descriptor);
@@ -199,29 +178,15 @@ void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
 					   u32 rx_desc_head_buff_size,
 					   u32 descriptor);
 
-/* set rx descriptor reset */
-void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
-				u32 descriptor);
-
-/* Set RDM Interrupt Moderation Enable */
-void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
-				      u32 rdm_intr_moder_en);
-
 /* reg */
 
 /* set general interrupt mapping register */
 void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
 				u32 regidx);
 
-/* get general interrupt status register */
-u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
-
 /* set interrupt global control register */
 void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
 
-/* set interrupt throttle register */
-void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
-
 /* set rx dma descriptor base address lsw */
 void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
 					       u32 rx_dma_desc_base_addrlsw,
@@ -232,9 +197,6 @@ void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
 					       u32 rx_dma_desc_base_addrmsw,
 					u32 descriptor);
 
-/* get rx dma descriptor status register */
-u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
-
 /* set rx dma descriptor tail pointer register */
 void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
 					 u32 rx_dma_desc_tail_ptr,
@@ -252,18 +214,6 @@ void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
 void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
 				       u32 rx_flr_rss_control1);
 
-/* Set RX Filter Control Register 2 */
-void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
-
-/* Set RX Interrupt Moderation Control Register */
-void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
-				       u32 rx_intr_moderation_ctl,
-				u32 queue);
-
-/* set tx dma debug control */
-void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
-				     u32 tx_dma_debug_ctl);
-
 /* set tx dma descriptor base address lsw */
 void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
 					       u32 tx_dma_desc_base_addrlsw,
@@ -279,11 +229,6 @@ void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
 					 u32 tx_dma_desc_tail_ptr,
 					 u32 descriptor);
 
-/* Set TX Interrupt Moderation Control Register */
-void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
-				       u32 tx_intr_moderation_ctl,
-				       u32 queue);
-
 /* set global microprocessor scratch pad */
 void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
 					u32 glb_cpu_scratch_scp,
@@ -291,16 +236,10 @@ void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
 
 /* rpb */
 
-/* set dma system loopback */
-void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
-
 /* set rx traffic class mode */
 void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
 					   u32 rx_traf_class_mode);
 
-/* get rx traffic class mode */
-u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw);
-
 /* set rx buffer enable */
 void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
 
@@ -341,11 +280,6 @@ void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
 void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
 				       u32 l2broadcast_flr_act);
 
-/* set l2 multicast filter enable */
-void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
-				      u32 l2multicast_flr_en,
-				      u32 filter);
-
 /* set l2 promiscuous mode enable */
 void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
 					 u32 l2promiscuous_mode_en);
@@ -403,10 +337,6 @@ u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
 /* set rss redirection write enable */
 void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
 
-/* set tpo to rpf system loopback */
-void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
-				       u32 tpo_to_rpf_sys_lbk);
-
 /* set vlan inner ethertype */
 void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
 
@@ -417,14 +347,6 @@ void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
 void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
 				      u32 vlan_prom_mode_en);
 
-/* Set VLAN untagged action */
-void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
-				      u32 vlan_untagged_act);
-
-/* Set VLAN accept untagged packets */
-void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
-						 u32 vlan_acc_untagged_packets);
-
 /* Set VLAN filter enable */
 void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
 				u32 filter);
@@ -437,40 +359,6 @@ void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
 void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
 				u32 filter);
 
-/* set ethertype filter enable */
-void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
-				u32 filter);
-
-/* set  ethertype user-priority enable */
-void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
-					  u32 etht_user_priority_en,
-					  u32 filter);
-
-/* set  ethertype rx queue enable */
-void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
-				     u32 etht_rx_queue_en,
-				     u32 filter);
-
-/* set ethertype rx queue */
-void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
-				  u32 filter);
-
-/* set ethertype user-priority */
-void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
-				       u32 etht_user_priority,
-				       u32 filter);
-
-/* set ethertype management queue */
-void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
-				   u32 filter);
-
-/* set ethertype filter action */
-void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
-				 u32 filter);
-
-/* set ethertype filter */
-void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
-
 /* rpo */
 
 /* set ipv4 header checksum offload enable */
@@ -552,9 +440,6 @@ void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
 void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
 				   u32 dca);
 
-/* get tx descriptor head pointer */
-u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
-
 /* set tx descriptor length */
 void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
 				u32 descriptor);
@@ -568,9 +453,6 @@ void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
 					    u32 tx_desc_wr_wb_threshold,
 				     u32 descriptor);
 
-/* Set TDM Interrupt Moderation Enable */
-void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
-				      u32 tdm_irq_moderation_en);
 /* thm */
 
 /* set lso tcp flag of first packet */
@@ -591,9 +473,6 @@ void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
 void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
 				   u32 tx_traf_class_mode);
 
-/* get TX Traffic Class Mode */
-u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
-
 /* set tx buffer enable */
 void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
 
@@ -607,10 +486,6 @@ void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
 						u32 tx_buff_lo_threshold_per_tc,
 					 u32 buffer);
 
-/* set tx dma system loopback enable */
-void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw,
-				      u32 tx_dma_sys_lbk_en);
-
 /* set tx packet buffer size (per tc) */
 void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
 					    u32 tx_pkt_buff_size_per_tc,
@@ -630,10 +505,6 @@ void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
 void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
 					   u32 tcp_udp_crc_offload_en);
 
-/* set tx pkt system loopback enable */
-void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
-				      u32 tx_pkt_sys_lbk_en);
-
 /* tps */
 
 /* set tx packet scheduler data arbitration mode */
@@ -681,32 +552,8 @@ void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
 /* set tx register reset disable */
 void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
 
-/* msm */
-
-/* get register access status */
-u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw);
-
-/* set  register address for indirect address */
-void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
-					       u32 reg_addr_for_indirect_addr);
-
-/* set register read strobe */
-void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
-
-/* get  register read data */
-u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
-
-/* set  register write data */
-void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
-
-/* set register write strobe */
-void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
-
 /* pci */
 
-/* set pci register reset disable */
-void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
-
 /* set uP Force Interrupt */
 void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
 
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/atlantic/hw_atl/hw_atl_utils.c
index 84d11ab3a5..c94f5112f1 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/atlantic/hw_atl/hw_atl_utils.c
@@ -682,37 +682,6 @@ static int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
 	return err;
 }
 
-unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
-{
-	unsigned int ret = 0U;
-
-	switch (mbps) {
-	case 100U:
-		ret = 5U;
-		break;
-
-	case 1000U:
-		ret = 4U;
-		break;
-
-	case 2500U:
-		ret = 3U;
-		break;
-
-	case 5000U:
-		ret = 1U;
-		break;
-
-	case 10000U:
-		ret = 0U;
-		break;
-
-	default:
-		break;
-	}
-	return ret;
-}
-
 void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
 {
 	u32 chip_features = 0U;
@@ -795,11 +764,6 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
 	return 0;
 }
 
-struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
-{
-	return &self->curr_stats;
-}
-
 static const u32 hw_atl_utils_hw_mac_regs[] = {
 	0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U,
 	0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U,
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/atlantic/hw_atl/hw_atl_utils.h
index d8fab010cf..f5e2b472a9 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/atlantic/hw_atl/hw_atl_utils.h
@@ -617,8 +617,6 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
 
 int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
 
-unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps);
-
 unsigned int hw_atl_utils_hw_get_reg_length(void);
 
 int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
@@ -633,8 +631,6 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
 
 int hw_atl_utils_update_stats(struct aq_hw_s *self);
 
-struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
-
 int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
 				  u32 *p, u32 cnt);
 
diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c
index 61f99c6408..7ade8f42d3 100644
--- a/drivers/net/bnx2x/ecore_sp.c
+++ b/drivers/net/bnx2x/ecore_sp.c
@@ -456,23 +456,6 @@ static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
 	}
 }
 
-/**
- * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
- *
- * @sc:			device handle
- * @o:			vlan_mac object
- *
- * @details Notice if a pending execution exists, it would perform it -
- *          possibly releasing and reclaiming the execution queue lock.
- */
-void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
-				   struct ecore_vlan_mac_obj *o)
-{
-	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
-	__ecore_vlan_mac_h_write_unlock(sc, o);
-	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
-}
-
 /**
  * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
  *
diff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h
index d58072dac0..bfb55e8d01 100644
--- a/drivers/net/bnx2x/ecore_sp.h
+++ b/drivers/net/bnx2x/ecore_sp.h
@@ -1871,8 +1871,6 @@ void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
 				  struct ecore_vlan_mac_obj *o);
 int ecore_vlan_mac_h_write_lock(struct bnx2x_softc *sc,
 				struct ecore_vlan_mac_obj *o);
-void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
-					  struct ecore_vlan_mac_obj *o);
 int ecore_config_vlan_mac(struct bnx2x_softc *sc,
 			   struct ecore_vlan_mac_ramrod_params *p);
 
diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c
index b65126d718..67ebdaaa44 100644
--- a/drivers/net/bnx2x/elink.c
+++ b/drivers/net/bnx2x/elink.c
@@ -1154,931 +1154,6 @@ static uint32_t elink_get_cfg_pin(struct bnx2x_softc *sc, uint32_t pin_cfg,
 	return ELINK_STATUS_OK;
 }
 
-/******************************************************************/
-/*				ETS section			  */
-/******************************************************************/
-static void elink_ets_e2e3a0_disabled(struct elink_params *params)
-{
-	/* ETS disabled configuration*/
-	struct bnx2x_softc *sc = params->sc;
-
-	ELINK_DEBUG_P0(sc, "ETS E2E3 disabled configuration");
-
-	/* mapping between entry  priority to client number (0,1,2 -debug and
-	 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
-	 * 3bits client num.
-	 *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
-	 * cos1-100     cos0-011     dbg1-010     dbg0-001     MCP-000
-	 */
-
-	REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
-	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
-	 * as strict.  Bits 0,1,2 - debug and management entries, 3 -
-	 * COS0 entry, 4 - COS1 entry.
-	 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
-	 * bit4   bit3	  bit2   bit1	  bit0
-	 * MCP and debug are strict
-	 */
-
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
-	/* defines which entries (clients) are subjected to WFQ arbitration */
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
-	/* For strict priority entries defines the number of consecutive
-	 * slots for the highest priority.
-	 */
-	REG_WR(sc, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
-	/* mapping between the CREDIT_WEIGHT registers and actual client
-	 * numbers
-	 */
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0);
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0);
-
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0);
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0);
-	REG_WR(sc, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
-	/* ETS mode disable */
-	REG_WR(sc, PBF_REG_ETS_ENABLED, 0);
-	/* If ETS mode is enabled (there is no strict priority) defines a WFQ
-	 * weight for COS0/COS1.
-	 */
-	REG_WR(sc, PBF_REG_COS0_WEIGHT, 0x2710);
-	REG_WR(sc, PBF_REG_COS1_WEIGHT, 0x2710);
-	/* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */
-	REG_WR(sc, PBF_REG_COS0_UPPER_BOUND, 0x989680);
-	REG_WR(sc, PBF_REG_COS1_UPPER_BOUND, 0x989680);
-	/* Defines the number of consecutive slots for the strict priority */
-	REG_WR(sc, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
-}
-/******************************************************************************
- * Description:
- *	Getting min_w_val will be set according to line speed .
- *.
- ******************************************************************************/
-static uint32_t elink_ets_get_min_w_val_nig(const struct elink_vars *vars)
-{
-	uint32_t min_w_val = 0;
-	/* Calculate min_w_val.*/
-	if (vars->link_up) {
-		if (vars->line_speed == ELINK_SPEED_20000)
-			min_w_val = ELINK_ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
-		else
-			min_w_val = ELINK_ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
-	} else {
-		min_w_val = ELINK_ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
-	}
-	/* If the link isn't up (static configuration for example ) The
-	 * link will be according to 20GBPS.
-	 */
-	return min_w_val;
-}
-/******************************************************************************
- * Description:
- *	Getting credit upper bound form min_w_val.
- *.
- ******************************************************************************/
-static uint32_t elink_ets_get_credit_upper_bound(const uint32_t min_w_val)
-{
-	const uint32_t credit_upper_bound = (uint32_t)
-						ELINK_MAXVAL((150 * min_w_val),
-							ELINK_MAX_PACKET_SIZE);
-	return credit_upper_bound;
-}
-/******************************************************************************
- * Description:
- *	Set credit upper bound for NIG.
- *.
- ******************************************************************************/
-static void elink_ets_e3b0_set_credit_upper_bound_nig(
-	const struct elink_params *params,
-	const uint32_t min_w_val)
-{
-	struct bnx2x_softc *sc = params->sc;
-	const uint8_t port = params->port;
-	const uint32_t credit_upper_bound =
-	    elink_ets_get_credit_upper_bound(min_w_val);
-
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 :
-		NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 :
-		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 :
-		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 :
-		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 :
-		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
-		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
-
-	if (!port) {
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
-			credit_upper_bound);
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
-			credit_upper_bound);
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8,
-			credit_upper_bound);
-	}
-}
-/******************************************************************************
- * Description:
- *	Will return the NIG ETS registers to init values.Except
- *	credit_upper_bound.
- *	That isn't used in this configuration (No WFQ is enabled) and will be
- *	configured according to spec
- *.
- ******************************************************************************/
-static void elink_ets_e3b0_nig_disabled(const struct elink_params *params,
-					const struct elink_vars *vars)
-{
-	struct bnx2x_softc *sc = params->sc;
-	const uint8_t port = params->port;
-	const uint32_t min_w_val = elink_ets_get_min_w_val_nig(vars);
-	/* Mapping between entry  priority to client number (0,1,2 -debug and
-	 * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
-	 * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
-	 * reset value or init tool
-	 */
-	if (port) {
-		REG_WR(sc, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210);
-		REG_WR(sc, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0);
-	} else {
-		REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
-		REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
-	}
-	/* For strict priority entries defines the number of consecutive
-	 * slots for the highest priority.
-	 */
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
-		   NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
-	/* Mapping between the CREDIT_WEIGHT registers and actual client
-	 * numbers
-	 */
-	if (port) {
-		/*Port 1 has 6 COS*/
-		REG_WR(sc, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
-		REG_WR(sc, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0);
-	} else {
-		/*Port 0 has 9 COS*/
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB,
-		       0x43210876);
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
-	}
-
-	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
-	 * as strict.  Bits 0,1,2 - debug and management entries, 3 -
-	 * COS0 entry, 4 - COS1 entry.
-	 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
-	 * bit4   bit3	  bit2   bit1	  bit0
-	 * MCP and debug are strict
-	 */
-	if (port)
-		REG_WR(sc, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f);
-	else
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff);
-	/* defines which entries (clients) are subjected to WFQ arbitration */
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
-		   NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
-
-	/* Please notice the register address are note continuous and a
-	 * for here is note appropriate.In 2 port mode port0 only COS0-5
-	 * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
-	 * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
-	 * are never used for WFQ
-	 */
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
-		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
-		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
-		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 :
-		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 :
-		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
-		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
-	if (!port) {
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
-		REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
-	}
-
-	elink_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val);
-}
-/******************************************************************************
- * Description:
- *	Set credit upper bound for PBF.
- *.
- ******************************************************************************/
-static void elink_ets_e3b0_set_credit_upper_bound_pbf(
-	const struct elink_params *params,
-	const uint32_t min_w_val)
-{
-	struct bnx2x_softc *sc = params->sc;
-	const uint32_t credit_upper_bound =
-	    elink_ets_get_credit_upper_bound(min_w_val);
-	const uint8_t port = params->port;
-	uint32_t base_upper_bound = 0;
-	uint8_t max_cos = 0;
-	uint8_t i = 0;
-	/* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
-	 * port mode port1 has COS0-2 that can be used for WFQ.
-	 */
-	if (!port) {
-		base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
-		max_cos = ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0;
-	} else {
-		base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1;
-		max_cos = ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1;
-	}
-
-	for (i = 0; i < max_cos; i++)
-		REG_WR(sc, base_upper_bound + (i << 2), credit_upper_bound);
-}
-
-/******************************************************************************
- * Description:
- *	Will return the PBF ETS registers to init values.Except
- *	credit_upper_bound.
- *	That isn't used in this configuration (No WFQ is enabled) and will be
- *	configured according to spec
- *.
- ******************************************************************************/
-static void elink_ets_e3b0_pbf_disabled(const struct elink_params *params)
-{
-	struct bnx2x_softc *sc = params->sc;
-	const uint8_t port = params->port;
-	const uint32_t min_w_val_pbf = ELINK_ETS_E3B0_PBF_MIN_W_VAL;
-	uint8_t i = 0;
-	uint32_t base_weight = 0;
-	uint8_t max_cos = 0;
-
-	/* Mapping between entry  priority to client number 0 - COS0
-	 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
-	 * TODO_ETS - Should be done by reset value or init tool
-	 */
-	if (port)
-		/*  0x688 (|011|0 10|00 1|000) */
-		REG_WR(sc, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1, 0x688);
-	else
-		/*  (10 1|100 |011|0 10|00 1|000) */
-		REG_WR(sc, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0, 0x2C688);
-
-	/* TODO_ETS - Should be done by reset value or init tool */
-	if (port)
-		/* 0x688 (|011|0 10|00 1|000)*/
-		REG_WR(sc, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688);
-	else
-	/* 0x2C688 (10 1|100 |011|0 10|00 1|000) */
-	REG_WR(sc, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688);
-
-	REG_WR(sc, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 :
-		   PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0, 0x100);
-
-
-	REG_WR(sc, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
-		   PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0, 0);
-
-	REG_WR(sc, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
-		   PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0, 0);
-	/* In 2 port mode port0 has COS0-5 that can be used for WFQ.
-	 * In 4 port mode port1 has COS0-2 that can be used for WFQ.
-	 */
-	if (!port) {
-		base_weight = PBF_REG_COS0_WEIGHT_P0;
-		max_cos = ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0;
-	} else {
-		base_weight = PBF_REG_COS0_WEIGHT_P1;
-		max_cos = ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1;
-	}
-
-	for (i = 0; i < max_cos; i++)
-		REG_WR(sc, base_weight + (0x4 * i), 0);
-
-	elink_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
-}
-/******************************************************************************
- * Description:
- *	E3B0 disable will return basicly the values to init values.
- *.
- ******************************************************************************/
-static elink_status_t elink_ets_e3b0_disabled(const struct elink_params *params,
-				   const struct elink_vars *vars)
-{
-	struct bnx2x_softc *sc = params->sc;
-
-	if (!CHIP_IS_E3B0(sc)) {
-		ELINK_DEBUG_P0(sc,
-		   "elink_ets_e3b0_disabled the chip isn't E3B0");
-		return ELINK_STATUS_ERROR;
-	}
-
-	elink_ets_e3b0_nig_disabled(params, vars);
-
-	elink_ets_e3b0_pbf_disabled(params);
-
-	return ELINK_STATUS_OK;
-}
-
-/******************************************************************************
- * Description:
- *	Disable will return basicly the values to init values.
- *
- ******************************************************************************/
-elink_status_t elink_ets_disabled(struct elink_params *params,
-		      struct elink_vars *vars)
-{
-	struct bnx2x_softc *sc = params->sc;
-	elink_status_t elink_status = ELINK_STATUS_OK;
-
-	if ((CHIP_IS_E2(sc)) || (CHIP_IS_E3A0(sc))) {
-		elink_ets_e2e3a0_disabled(params);
-	} else if (CHIP_IS_E3B0(sc)) {
-		elink_status = elink_ets_e3b0_disabled(params, vars);
-	} else {
-		ELINK_DEBUG_P0(sc, "elink_ets_disabled - chip not supported");
-		return ELINK_STATUS_ERROR;
-	}
-
-	return elink_status;
-}
-
-/******************************************************************************
- * Description
- *	Set the COS mappimg to SP and BW until this point all the COS are not
- *	set as SP or BW.
- ******************************************************************************/
-static elink_status_t elink_ets_e3b0_cli_map(const struct elink_params *params,
-		  __rte_unused const struct elink_ets_params *ets_params,
-		  const uint8_t cos_sp_bitmap,
-		  const uint8_t cos_bw_bitmap)
-{
-	struct bnx2x_softc *sc = params->sc;
-	const uint8_t port = params->port;
-	const uint8_t nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3);
-	const uint8_t pbf_cli_sp_bitmap = cos_sp_bitmap;
-	const uint8_t nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3;
-	const uint8_t pbf_cli_subject2wfq_bitmap = cos_bw_bitmap;
-
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT :
-	       NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap);
-
-	REG_WR(sc, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
-	       PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0, pbf_cli_sp_bitmap);
-
-	REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
-	       NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
-	       nig_cli_subject2wfq_bitmap);
-
-	REG_WR(sc, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
-	       PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0,
-	       pbf_cli_subject2wfq_bitmap);
-
-	return ELINK_STATUS_OK;
-}
-
-/******************************************************************************
- * Description:
- *	This function is needed because NIG ARB_CREDIT_WEIGHT_X are
- *	not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
- ******************************************************************************/
-static elink_status_t elink_ets_e3b0_set_cos_bw(struct bnx2x_softc *sc,
-				     const uint8_t cos_entry,
-				     const uint32_t min_w_val_nig,
-				     const uint32_t min_w_val_pbf,
-				     const uint16_t total_bw,
-				     const uint8_t bw,
-				     const uint8_t port)
-{
-	uint32_t nig_reg_address_crd_weight = 0;
-	uint32_t pbf_reg_address_crd_weight = 0;
-	/* Calculate and set BW for this COS - use 1 instead of 0 for BW */
-	const uint32_t cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
-	const uint32_t cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
-
-	switch (cos_entry) {
-	case 0:
-	    nig_reg_address_crd_weight =
-		 (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
-		     NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
-	     pbf_reg_address_crd_weight = (port) ?
-		 PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
-		break;
-	case 1:
-	     nig_reg_address_crd_weight = (port) ?
-		 NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
-		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
-	     pbf_reg_address_crd_weight = (port) ?
-		 PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
-		break;
-	case 2:
-	     nig_reg_address_crd_weight = (port) ?
-		 NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
-		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
-
-		 pbf_reg_address_crd_weight = (port) ?
-		     PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
-		break;
-	case 3:
-		if (port)
-			return ELINK_STATUS_ERROR;
-		nig_reg_address_crd_weight =
-			NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
-		pbf_reg_address_crd_weight =
-			PBF_REG_COS3_WEIGHT_P0;
-		break;
-	case 4:
-		if (port)
-		return ELINK_STATUS_ERROR;
-	     nig_reg_address_crd_weight =
-		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
-	     pbf_reg_address_crd_weight = PBF_REG_COS4_WEIGHT_P0;
-		break;
-	case 5:
-		if (port)
-		return ELINK_STATUS_ERROR;
-	     nig_reg_address_crd_weight =
-		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
-	     pbf_reg_address_crd_weight = PBF_REG_COS5_WEIGHT_P0;
-		break;
-	}
-
-	REG_WR(sc, nig_reg_address_crd_weight, cos_bw_nig);
-
-	REG_WR(sc, pbf_reg_address_crd_weight, cos_bw_pbf);
-
-	return ELINK_STATUS_OK;
-}
-/******************************************************************************
- * Description:
- *	Calculate the total BW.A value of 0 isn't legal.
- *
- ******************************************************************************/
-static elink_status_t elink_ets_e3b0_get_total_bw(
-	const struct elink_params *params,
-	struct elink_ets_params *ets_params,
-	uint16_t *total_bw)
-{
-	struct bnx2x_softc *sc = params->sc;
-	uint8_t cos_idx = 0;
-	uint8_t is_bw_cos_exist = 0;
-
-	*total_bw = 0;
-	/* Calculate total BW requested */
-	for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
-		if (ets_params->cos[cos_idx].state == elink_cos_state_bw) {
-			is_bw_cos_exist = 1;
-			if (!ets_params->cos[cos_idx].params.bw_params.bw) {
-				ELINK_DEBUG_P0(sc, "elink_ets_E3B0_config BW"
-						   " was set to 0");
-				/* This is to prevent a state when ramrods
-				 * can't be sent
-				 */
-				ets_params->cos[cos_idx].params.bw_params.bw
-					 = 1;
-			}
-			*total_bw +=
-				ets_params->cos[cos_idx].params.bw_params.bw;
-		}
-	}
-
-	/* Check total BW is valid */
-	if ((is_bw_cos_exist == 1) && (*total_bw != 100)) {
-		if (*total_bw == 0) {
-			ELINK_DEBUG_P0(sc,
-			   "elink_ets_E3B0_config total BW shouldn't be 0");
-			return ELINK_STATUS_ERROR;
-		}
-		ELINK_DEBUG_P0(sc,
-		   "elink_ets_E3B0_config total BW should be 100");
-		/* We can handle a case whre the BW isn't 100 this can happen
-		 * if the TC are joined.
-		 */
-	}
-	return ELINK_STATUS_OK;
-}
-
-/******************************************************************************
- * Description:
- *	Invalidate all the sp_pri_to_cos.
- *
- ******************************************************************************/
-static void elink_ets_e3b0_sp_pri_to_cos_init(uint8_t *sp_pri_to_cos)
-{
-	uint8_t pri = 0;
-	for (pri = 0; pri < ELINK_DCBX_MAX_NUM_COS; pri++)
-		sp_pri_to_cos[pri] = DCBX_INVALID_COS;
-}
-/******************************************************************************
- * Description:
- *	Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
- *	according to sp_pri_to_cos.
- *
- ******************************************************************************/
-static elink_status_t elink_ets_e3b0_sp_pri_to_cos_set(
-					    const struct elink_params *params,
-					    uint8_t *sp_pri_to_cos,
-					    const uint8_t pri,
-					    const uint8_t cos_entry)
-{
-	struct bnx2x_softc *sc = params->sc;
-	const uint8_t port = params->port;
-	const uint8_t max_num_of_cos = (port) ?
-		ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1 :
-		ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0;
-
-	if (pri >= max_num_of_cos) {
-		ELINK_DEBUG_P0(sc, "elink_ets_e3b0_sp_pri_to_cos_set invalid "
-		   "parameter Illegal strict priority");
-		return ELINK_STATUS_ERROR;
-	}
-
-	if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
-		ELINK_DEBUG_P0(sc, "elink_ets_e3b0_sp_pri_to_cos_set invalid "
-				   "parameter There can't be two COS's with "
-				   "the same strict pri");
-		return ELINK_STATUS_ERROR;
-	}
-
-	sp_pri_to_cos[pri] = cos_entry;
-	return ELINK_STATUS_OK;
-}
-
-/******************************************************************************
- * Description:
- *	Returns the correct value according to COS and priority in
- *	the sp_pri_cli register.
- *
- ******************************************************************************/
-static uint64_t elink_e3b0_sp_get_pri_cli_reg(const uint8_t cos,
-					 const uint8_t cos_offset,
-					 const uint8_t pri_set,
-					 const uint8_t pri_offset,
-					 const uint8_t entry_size)
-{
-	uint64_t pri_cli_nig = 0;
-	pri_cli_nig = ((uint64_t)(cos + cos_offset)) << (entry_size *
-						    (pri_set + pri_offset));
-
-	return pri_cli_nig;
-}
-/******************************************************************************
- * Description:
- *	Returns the correct value according to COS and priority in the
- *	sp_pri_cli register for NIG.
- *
- ******************************************************************************/
-static uint64_t elink_e3b0_sp_get_pri_cli_reg_nig(const uint8_t cos,
-						  const uint8_t pri_set)
-{
-	/* MCP Dbg0 and dbg1 are always with higher strict pri*/
-	const uint8_t nig_cos_offset = 3;
-	const uint8_t nig_pri_offset = 3;
-
-	return elink_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set,
-		nig_pri_offset, 4);
-}
-
-/******************************************************************************
- * Description:
- *	Returns the correct value according to COS and priority in the
- *	sp_pri_cli register for PBF.
- *
- ******************************************************************************/
-static uint64_t elink_e3b0_sp_get_pri_cli_reg_pbf(const uint8_t cos,
-						  const uint8_t pri_set)
-{
-	const uint8_t pbf_cos_offset = 0;
-	const uint8_t pbf_pri_offset = 0;
-
-	return elink_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set,
-		pbf_pri_offset, 3);
-}
-
-/******************************************************************************
- * Description:
- *	Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
- *	according to sp_pri_to_cos.(which COS has higher priority)
- *
- ******************************************************************************/
-static elink_status_t elink_ets_e3b0_sp_set_pri_cli_reg(
-					     const struct elink_params *params,
-					     uint8_t *sp_pri_to_cos)
-{
-	struct bnx2x_softc *sc = params->sc;
-	uint8_t i = 0;
-	const uint8_t port = params->port;
-	/* MCP Dbg0 and dbg1 are always with higher strict pri*/
-	uint64_t pri_cli_nig = 0x210;
-	uint32_t pri_cli_pbf = 0x0;
-	uint8_t pri_set = 0;
-	uint8_t pri_bitmask = 0;
-	const uint8_t max_num_of_cos = (port) ?
-		ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1 :
-		ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0;
-
-	uint8_t cos_bit_to_set = (1 << max_num_of_cos) - 1;
-
-	/* Set all the strict priority first */
-	for (i = 0; i < max_num_of_cos; i++) {
-		if (sp_pri_to_cos[i] != DCBX_INVALID_COS) {
-			if (sp_pri_to_cos[i] >= ELINK_DCBX_MAX_NUM_COS) {
-				ELINK_DEBUG_P0(sc,
-					   "elink_ets_e3b0_sp_set_pri_cli_reg "
-					   "invalid cos entry");
-				return ELINK_STATUS_ERROR;
-			}
-
-			pri_cli_nig |= elink_e3b0_sp_get_pri_cli_reg_nig(
-			    sp_pri_to_cos[i], pri_set);
-
-			pri_cli_pbf |= elink_e3b0_sp_get_pri_cli_reg_pbf(
-			    sp_pri_to_cos[i], pri_set);
-			pri_bitmask = 1 << sp_pri_to_cos[i];
-			/* COS is used remove it from bitmap.*/
-			if (!(pri_bitmask & cos_bit_to_set)) {
-				ELINK_DEBUG_P0(sc,
-					"elink_ets_e3b0_sp_set_pri_cli_reg "
-					"invalid There can't be two COS's with"
-					" the same strict pri");
-				return ELINK_STATUS_ERROR;
-			}
-			cos_bit_to_set &= ~pri_bitmask;
-			pri_set++;
-		}
-	}
-
-	/* Set all the Non strict priority i= COS*/
-	for (i = 0; i < max_num_of_cos; i++) {
-		pri_bitmask = 1 << i;
-		/* Check if COS was already used for SP */
-		if (pri_bitmask & cos_bit_to_set) {
-			/* COS wasn't used for SP */
-			pri_cli_nig |= elink_e3b0_sp_get_pri_cli_reg_nig(
-			    i, pri_set);
-
-			pri_cli_pbf |= elink_e3b0_sp_get_pri_cli_reg_pbf(
-			    i, pri_set);
-			/* COS is used remove it from bitmap.*/
-			cos_bit_to_set &= ~pri_bitmask;
-			pri_set++;
-		}
-	}
-
-	if (pri_set != max_num_of_cos) {
-		ELINK_DEBUG_P0(sc, "elink_ets_e3b0_sp_set_pri_cli_reg not all "
-				   "entries were set");
-		return ELINK_STATUS_ERROR;
-	}
-
-	if (port) {
-		/* Only 6 usable clients*/
-		REG_WR(sc, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB,
-		       (uint32_t)pri_cli_nig);
-
-		REG_WR(sc, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1, pri_cli_pbf);
-	} else {
-		/* Only 9 usable clients*/
-		const uint32_t pri_cli_nig_lsb = (uint32_t)(pri_cli_nig);
-		const uint32_t pri_cli_nig_msb = (uint32_t)
-						((pri_cli_nig >> 32) & 0xF);
-
-		REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB,
-		       pri_cli_nig_lsb);
-		REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB,
-		       pri_cli_nig_msb);
-
-		REG_WR(sc, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0, pri_cli_pbf);
-	}
-	return ELINK_STATUS_OK;
-}
-
-/******************************************************************************
- * Description:
- *	Configure the COS to ETS according to BW and SP settings.
- ******************************************************************************/
-elink_status_t elink_ets_e3b0_config(const struct elink_params *params,
-			 const struct elink_vars *vars,
-			 struct elink_ets_params *ets_params)
-{
-	struct bnx2x_softc *sc = params->sc;
-	elink_status_t elink_status = ELINK_STATUS_OK;
-	const uint8_t port = params->port;
-	uint16_t total_bw = 0;
-	const uint32_t min_w_val_nig = elink_ets_get_min_w_val_nig(vars);
-	const uint32_t min_w_val_pbf = ELINK_ETS_E3B0_PBF_MIN_W_VAL;
-	uint8_t cos_bw_bitmap = 0;
-	uint8_t cos_sp_bitmap = 0;
-	uint8_t sp_pri_to_cos[ELINK_DCBX_MAX_NUM_COS] = {0};
-	const uint8_t max_num_of_cos = (port) ?
-		ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1 :
-		ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0;
-	uint8_t cos_entry = 0;
-
-	if (!CHIP_IS_E3B0(sc)) {
-		ELINK_DEBUG_P0(sc,
-		   "elink_ets_e3b0_disabled the chip isn't E3B0");
-		return ELINK_STATUS_ERROR;
-	}
-
-	if (ets_params->num_of_cos > max_num_of_cos) {
-		ELINK_DEBUG_P0(sc, "elink_ets_E3B0_config the number of COS "
-				   "isn't supported");
-		return ELINK_STATUS_ERROR;
-	}
-
-	/* Prepare sp strict priority parameters*/
-	elink_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos);
-
-	/* Prepare BW parameters*/
-	elink_status = elink_ets_e3b0_get_total_bw(params, ets_params,
-						   &total_bw);
-	if (elink_status != ELINK_STATUS_OK) {
-		ELINK_DEBUG_P0(sc,
-		   "elink_ets_E3B0_config get_total_bw failed");
-		return ELINK_STATUS_ERROR;
-	}
-
-	/* Upper bound is set according to current link speed (min_w_val
-	 * should be the same for upper bound and COS credit val).
-	 */
-	elink_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
-	elink_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
-
-
-	for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
-		if (elink_cos_state_bw == ets_params->cos[cos_entry].state) {
-			cos_bw_bitmap |= (1 << cos_entry);
-			/* The function also sets the BW in HW(not the mappin
-			 * yet)
-			 */
-			elink_status = elink_ets_e3b0_set_cos_bw(
-				sc, cos_entry, min_w_val_nig, min_w_val_pbf,
-				total_bw,
-				ets_params->cos[cos_entry].params.bw_params.bw,
-				 port);
-		} else if (elink_cos_state_strict ==
-			ets_params->cos[cos_entry].state){
-			cos_sp_bitmap |= (1 << cos_entry);
-
-			elink_status = elink_ets_e3b0_sp_pri_to_cos_set(
-				params,
-				sp_pri_to_cos,
-				ets_params->cos[cos_entry].params.sp_params.pri,
-				cos_entry);
-
-		} else {
-			ELINK_DEBUG_P0(sc,
-			   "elink_ets_e3b0_config cos state not valid");
-			return ELINK_STATUS_ERROR;
-		}
-		if (elink_status != ELINK_STATUS_OK) {
-			ELINK_DEBUG_P0(sc,
-			   "elink_ets_e3b0_config set cos bw failed");
-			return elink_status;
-		}
-	}
-
-	/* Set SP register (which COS has higher priority) */
-	elink_status = elink_ets_e3b0_sp_set_pri_cli_reg(params,
-							 sp_pri_to_cos);
-
-	if (elink_status != ELINK_STATUS_OK) {
-		ELINK_DEBUG_P0(sc,
-		   "elink_ets_E3B0_config set_pri_cli_reg failed");
-		return elink_status;
-	}
-
-	/* Set client mapping of BW and strict */
-	elink_status = elink_ets_e3b0_cli_map(params, ets_params,
-					      cos_sp_bitmap,
-					      cos_bw_bitmap);
-
-	if (elink_status != ELINK_STATUS_OK) {
-		ELINK_DEBUG_P0(sc, "elink_ets_E3B0_config SP failed");
-		return elink_status;
-	}
-	return ELINK_STATUS_OK;
-}
-static void elink_ets_bw_limit_common(const struct elink_params *params)
-{
-	/* ETS disabled configuration */
-	struct bnx2x_softc *sc = params->sc;
-	ELINK_DEBUG_P0(sc, "ETS enabled BW limit configuration");
-	/* Defines which entries (clients) are subjected to WFQ arbitration
-	 * COS0 0x8
-	 * COS1 0x10
-	 */
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
-	/* Mapping between the ARB_CREDIT_WEIGHT registers and actual
-	 * client numbers (WEIGHT_0 does not actually have to represent
-	 * client 0)
-	 *    PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
-	 *  cos1-001     cos0-000     dbg1-100     dbg0-011     MCP-010
-	 */
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
-
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
-	       ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1,
-	       ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
-
-	/* ETS mode enabled*/
-	REG_WR(sc, PBF_REG_ETS_ENABLED, 1);
-
-	/* Defines the number of consecutive slots for the strict priority */
-	REG_WR(sc, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
-	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
-	 * as strict.  Bits 0,1,2 - debug and management entries, 3 - COS0
-	 * entry, 4 - COS1 entry.
-	 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
-	 * bit4   bit3	  bit2     bit1	   bit0
-	 * MCP and debug are strict
-	 */
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
-
-	/* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
-	REG_WR(sc, PBF_REG_COS0_UPPER_BOUND,
-	       ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
-	REG_WR(sc, PBF_REG_COS1_UPPER_BOUND,
-	       ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
-}
-
-void elink_ets_bw_limit(const struct elink_params *params,
-			const uint32_t cos0_bw,
-			const uint32_t cos1_bw)
-{
-	/* ETS disabled configuration*/
-	struct bnx2x_softc *sc = params->sc;
-	const uint32_t total_bw = cos0_bw + cos1_bw;
-	uint32_t cos0_credit_weight = 0;
-	uint32_t cos1_credit_weight = 0;
-
-	ELINK_DEBUG_P0(sc, "ETS enabled BW limit configuration");
-
-	if ((!total_bw) ||
-	    (!cos0_bw) ||
-	    (!cos1_bw)) {
-		ELINK_DEBUG_P0(sc, "Total BW can't be zero");
-		return;
-	}
-
-	cos0_credit_weight = (cos0_bw * ELINK_ETS_BW_LIMIT_CREDIT_WEIGHT) /
-		total_bw;
-	cos1_credit_weight = (cos1_bw * ELINK_ETS_BW_LIMIT_CREDIT_WEIGHT) /
-		total_bw;
-
-	elink_ets_bw_limit_common(params);
-
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
-
-	REG_WR(sc, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
-	REG_WR(sc, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
-}
-
-elink_status_t elink_ets_strict(const struct elink_params *params,
-				const uint8_t strict_cos)
-{
-	/* ETS disabled configuration*/
-	struct bnx2x_softc *sc = params->sc;
-	uint32_t val	= 0;
-
-	ELINK_DEBUG_P0(sc, "ETS enabled strict configuration");
-	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
-	 * as strict.  Bits 0,1,2 - debug and management entries,
-	 * 3 - COS0 entry, 4 - COS1 entry.
-	 *  COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
-	 *  bit4   bit3	  bit2      bit1     bit0
-	 * MCP and debug are strict
-	 */
-	REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
-	/* For strict priority entries defines the number of consecutive slots
-	 * for the highest priority.
-	 */
-	REG_WR(sc, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
-	/* ETS mode disable */
-	REG_WR(sc, PBF_REG_ETS_ENABLED, 0);
-	/* Defines the number of consecutive slots for the strict priority */
-	REG_WR(sc, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100);
-
-	/* Defines the number of consecutive slots for the strict priority */
-	REG_WR(sc, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
-
-	/* Mapping between entry  priority to client number (0,1,2 -debug and
-	 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
-	 * 3bits client num.
-	 *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
-	 * dbg0-010     dbg1-001     cos1-100     cos0-011     MCP-000
-	 * dbg0-010     dbg1-001     cos0-011     cos1-100     MCP-000
-	 */
-	val = (!strict_cos) ? 0x2318 : 0x22E0;
-	REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
-
-	return ELINK_STATUS_OK;
-}
-
 /******************************************************************/
 /*			PFC section				  */
 /******************************************************************/
@@ -2143,56 +1218,6 @@ static void elink_update_pfc_xmac(struct elink_params *params,
 	DELAY(30);
 }
 
-static void elink_emac_get_pfc_stat(struct elink_params *params,
-				    uint32_t pfc_frames_sent[2],
-				    uint32_t pfc_frames_received[2])
-{
-	/* Read pfc statistic */
-	struct bnx2x_softc *sc = params->sc;
-	uint32_t emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-	uint32_t val_xon = 0;
-	uint32_t val_xoff = 0;
-
-	ELINK_DEBUG_P0(sc, "pfc statistic read from EMAC");
-
-	/* PFC received frames */
-	val_xoff = REG_RD(sc, emac_base +
-				EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
-	val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
-	val_xon = REG_RD(sc, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
-	val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
-
-	pfc_frames_received[0] = val_xon + val_xoff;
-
-	/* PFC received sent */
-	val_xoff = REG_RD(sc, emac_base +
-				EMAC_REG_RX_PFC_STATS_XOFF_SENT);
-	val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
-	val_xon = REG_RD(sc, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
-	val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
-
-	pfc_frames_sent[0] = val_xon + val_xoff;
-}
-
-/* Read pfc statistic*/
-void elink_pfc_statistic(struct elink_params *params, struct elink_vars *vars,
-			 uint32_t pfc_frames_sent[2],
-			 uint32_t pfc_frames_received[2])
-{
-	/* Read pfc statistic */
-	struct bnx2x_softc *sc = params->sc;
-
-	ELINK_DEBUG_P0(sc, "pfc statistic");
-
-	if (!vars->link_up)
-		return;
-
-	if (vars->mac_type == ELINK_MAC_TYPE_EMAC) {
-		ELINK_DEBUG_P0(sc, "About to read PFC stats from EMAC");
-		elink_emac_get_pfc_stat(params, pfc_frames_sent,
-					pfc_frames_received);
-	}
-}
 /******************************************************************/
 /*			MAC/PBF section				  */
 /******************************************************************/
@@ -2877,54 +1902,6 @@ static void elink_update_pfc_bmac2(struct elink_params *params,
 	REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
 }
 
-/******************************************************************************
- * Description:
- *  This function is needed because NIG ARB_CREDIT_WEIGHT_X are
- *  not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
- ******************************************************************************/
-static elink_status_t elink_pfc_nig_rx_priority_mask(struct bnx2x_softc *sc,
-					   uint8_t cos_entry,
-					   uint32_t priority_mask, uint8_t port)
-{
-	uint32_t nig_reg_rx_priority_mask_add = 0;
-
-	switch (cos_entry) {
-	case 0:
-	     nig_reg_rx_priority_mask_add = (port) ?
-		 NIG_REG_P1_RX_COS0_PRIORITY_MASK :
-		 NIG_REG_P0_RX_COS0_PRIORITY_MASK;
-		break;
-	case 1:
-	    nig_reg_rx_priority_mask_add = (port) ?
-		NIG_REG_P1_RX_COS1_PRIORITY_MASK :
-		NIG_REG_P0_RX_COS1_PRIORITY_MASK;
-		break;
-	case 2:
-	    nig_reg_rx_priority_mask_add = (port) ?
-		NIG_REG_P1_RX_COS2_PRIORITY_MASK :
-		NIG_REG_P0_RX_COS2_PRIORITY_MASK;
-		break;
-	case 3:
-		if (port)
-		return ELINK_STATUS_ERROR;
-	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
-		break;
-	case 4:
-		if (port)
-		return ELINK_STATUS_ERROR;
-	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
-		break;
-	case 5:
-		if (port)
-		return ELINK_STATUS_ERROR;
-	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
-		break;
-	}
-
-	REG_WR(sc, nig_reg_rx_priority_mask_add, priority_mask);
-
-	return ELINK_STATUS_OK;
-}
 static void elink_update_mng(struct elink_params *params, uint32_t link_status)
 {
 	struct bnx2x_softc *sc = params->sc;
@@ -2934,157 +1911,6 @@ static void elink_update_mng(struct elink_params *params, uint32_t link_status)
 			port_mb[params->port].link_status), link_status);
 }
 
-static void elink_update_pfc_nig(struct elink_params *params,
-		__rte_unused struct elink_vars *vars,
-		struct elink_nig_brb_pfc_port_params *nig_params)
-{
-	uint32_t xcm_mask = 0, ppp_enable = 0, pause_enable = 0;
-	uint32_t llfc_out_en = 0;
-	uint32_t llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0;
-	uint32_t pkt_priority_to_cos = 0;
-	struct bnx2x_softc *sc = params->sc;
-	uint8_t port = params->port;
-
-	int set_pfc = params->feature_config_flags &
-		ELINK_FEATURE_CONFIG_PFC_ENABLED;
-	ELINK_DEBUG_P0(sc, "updating pfc nig parameters");
-
-	/* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
-	 * MAC control frames (that are not pause packets)
-	 * will be forwarded to the XCM.
-	 */
-	xcm_mask = REG_RD(sc, port ? NIG_REG_LLH1_XCM_MASK :
-			  NIG_REG_LLH0_XCM_MASK);
-	/* NIG params will override non PFC params, since it's possible to
-	 * do transition from PFC to SAFC
-	 */
-	if (set_pfc) {
-		pause_enable = 0;
-		llfc_out_en = 0;
-		llfc_enable = 0;
-		if (CHIP_IS_E3(sc))
-			ppp_enable = 0;
-		else
-			ppp_enable = 1;
-		xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
-				     NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
-		xcm_out_en = 0;
-		hwpfc_enable = 1;
-	} else  {
-		if (nig_params) {
-			llfc_out_en = nig_params->llfc_out_en;
-			llfc_enable = nig_params->llfc_enable;
-			pause_enable = nig_params->pause_enable;
-		} else  /* Default non PFC mode - PAUSE */
-			pause_enable = 1;
-
-		xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
-			NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
-		xcm_out_en = 1;
-	}
-
-	if (CHIP_IS_E3(sc))
-		REG_WR(sc, port ? NIG_REG_BRB1_PAUSE_IN_EN :
-		       NIG_REG_BRB0_PAUSE_IN_EN, pause_enable);
-	REG_WR(sc, port ? NIG_REG_LLFC_OUT_EN_1 :
-	       NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
-	REG_WR(sc, port ? NIG_REG_LLFC_ENABLE_1 :
-	       NIG_REG_LLFC_ENABLE_0, llfc_enable);
-	REG_WR(sc, port ? NIG_REG_PAUSE_ENABLE_1 :
-	       NIG_REG_PAUSE_ENABLE_0, pause_enable);
-
-	REG_WR(sc, port ? NIG_REG_PPP_ENABLE_1 :
-	       NIG_REG_PPP_ENABLE_0, ppp_enable);
-
-	REG_WR(sc, port ? NIG_REG_LLH1_XCM_MASK :
-	       NIG_REG_LLH0_XCM_MASK, xcm_mask);
-
-	REG_WR(sc, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
-	       NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
-
-	/* Output enable for RX_XCM # IF */
-	REG_WR(sc, port ? NIG_REG_XCM1_OUT_EN :
-	       NIG_REG_XCM0_OUT_EN, xcm_out_en);
-
-	/* HW PFC TX enable */
-	REG_WR(sc, port ? NIG_REG_P1_HWPFC_ENABLE :
-	       NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable);
-
-	if (nig_params) {
-		uint8_t i = 0;
-		pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
-
-		for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
-			elink_pfc_nig_rx_priority_mask(sc, i,
-		nig_params->rx_cos_priority_mask[i], port);
-
-		REG_WR(sc, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
-		       NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
-		       nig_params->llfc_high_priority_classes);
-
-		REG_WR(sc, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 :
-		       NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0,
-		       nig_params->llfc_low_priority_classes);
-	}
-	REG_WR(sc, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
-	       NIG_REG_P0_PKT_PRIORITY_TO_COS,
-	       pkt_priority_to_cos);
-}
-
-elink_status_t elink_update_pfc(struct elink_params *params,
-		      struct elink_vars *vars,
-		      struct elink_nig_brb_pfc_port_params *pfc_params)
-{
-	/* The PFC and pause are orthogonal to one another, meaning when
-	 * PFC is enabled, the pause are disabled, and when PFC is
-	 * disabled, pause are set according to the pause result.
-	 */
-	uint32_t val;
-	struct bnx2x_softc *sc = params->sc;
-	uint8_t bmac_loopback = (params->loopback_mode == ELINK_LOOPBACK_BMAC);
-
-	if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)
-		vars->link_status |= LINK_STATUS_PFC_ENABLED;
-	else
-		vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
-
-	elink_update_mng(params, vars->link_status);
-
-	/* Update NIG params */
-	elink_update_pfc_nig(params, vars, pfc_params);
-
-	if (!vars->link_up)
-		return ELINK_STATUS_OK;
-
-	ELINK_DEBUG_P0(sc, "About to update PFC in BMAC");
-
-	if (CHIP_IS_E3(sc)) {
-		if (vars->mac_type == ELINK_MAC_TYPE_XMAC)
-			elink_update_pfc_xmac(params, vars, 0);
-	} else {
-		val = REG_RD(sc, MISC_REG_RESET_REG_2);
-		if ((val &
-		     (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
-		    == 0) {
-			ELINK_DEBUG_P0(sc, "About to update PFC in EMAC");
-			elink_emac_enable(params, vars, 0);
-			return ELINK_STATUS_OK;
-		}
-		if (CHIP_IS_E2(sc))
-			elink_update_pfc_bmac2(params, vars, bmac_loopback);
-		else
-			elink_update_pfc_bmac1(params, vars);
-
-		val = 0;
-		if ((params->feature_config_flags &
-		     ELINK_FEATURE_CONFIG_PFC_ENABLED) ||
-		    (vars->flow_ctrl & ELINK_FLOW_CTRL_TX))
-			val = 1;
-		REG_WR(sc, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port * 4, val);
-	}
-	return ELINK_STATUS_OK;
-}
-
 static elink_status_t elink_bmac1_enable(struct elink_params *params,
 			      struct elink_vars *vars,
 			      uint8_t is_lb)
@@ -4030,40 +2856,6 @@ static void elink_cl45_read_and_write(struct bnx2x_softc *sc,
 	elink_cl45_write(sc, phy, devad, reg, val & and_val);
 }
 
-elink_status_t elink_phy_read(struct elink_params *params, uint8_t phy_addr,
-		   uint8_t devad, uint16_t reg, uint16_t *ret_val)
-{
-	uint8_t phy_index;
-	/* Probe for the phy according to the given phy_addr, and execute
-	 * the read request on it
-	 */
-	for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
-		if (params->phy[phy_index].addr == phy_addr) {
-			return elink_cl45_read(params->sc,
-					       &params->phy[phy_index], devad,
-					       reg, ret_val);
-		}
-	}
-	return ELINK_STATUS_ERROR;
-}
-
-elink_status_t elink_phy_write(struct elink_params *params, uint8_t phy_addr,
-		    uint8_t devad, uint16_t reg, uint16_t val)
-{
-	uint8_t phy_index;
-	/* Probe for the phy according to the given phy_addr, and execute
-	 * the write request on it
-	 */
-	for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
-		if (params->phy[phy_index].addr == phy_addr) {
-			return elink_cl45_write(params->sc,
-						&params->phy[phy_index], devad,
-						reg, val);
-		}
-	}
-	return ELINK_STATUS_ERROR;
-}
-
 static uint8_t elink_get_warpcore_lane(__rte_unused struct elink_phy *phy,
 				  struct elink_params *params)
 {
@@ -7108,47 +5900,6 @@ static elink_status_t elink_null_format_ver(__rte_unused uint32_t spirom_ver,
 	return ELINK_STATUS_OK;
 }
 
-elink_status_t elink_get_ext_phy_fw_version(struct elink_params *params,
-				 uint8_t *version,
-				 uint16_t len)
-{
-	struct bnx2x_softc *sc;
-	uint32_t spirom_ver = 0;
-	elink_status_t status = ELINK_STATUS_OK;
-	uint8_t *ver_p = version;
-	uint16_t remain_len = len;
-	if (version == NULL || params == NULL)
-		return ELINK_STATUS_ERROR;
-	sc = params->sc;
-
-	/* Extract first external phy*/
-	version[0] = '\0';
-	spirom_ver = REG_RD(sc, params->phy[ELINK_EXT_PHY1].ver_addr);
-
-	if (params->phy[ELINK_EXT_PHY1].format_fw_ver) {
-		status |= params->phy[ELINK_EXT_PHY1].format_fw_ver(spirom_ver,
-							      ver_p,
-							      &remain_len);
-		ver_p += (len - remain_len);
-	}
-	if ((params->num_phys == ELINK_MAX_PHYS) &&
-	    (params->phy[ELINK_EXT_PHY2].ver_addr != 0)) {
-		spirom_ver = REG_RD(sc, params->phy[ELINK_EXT_PHY2].ver_addr);
-		if (params->phy[ELINK_EXT_PHY2].format_fw_ver) {
-			*ver_p = '/';
-			ver_p++;
-			remain_len--;
-			status |= params->phy[ELINK_EXT_PHY2].format_fw_ver(
-				spirom_ver,
-				ver_p,
-				&remain_len);
-			ver_p = version + (len - remain_len);
-		}
-	}
-	*ver_p = '\0';
-	return status;
-}
-
 static void elink_set_xgxs_loopback(struct elink_phy *phy,
 				    struct elink_params *params)
 {
@@ -7360,99 +6111,6 @@ elink_status_t elink_set_led(struct elink_params *params,
 
 }
 
-/* This function comes to reflect the actual link state read DIRECTLY from the
- * HW
- */
-elink_status_t elink_test_link(struct elink_params *params,
-			       __rte_unused struct elink_vars *vars,
-		    uint8_t is_serdes)
-{
-	struct bnx2x_softc *sc = params->sc;
-	uint16_t gp_status = 0, phy_index = 0;
-	uint8_t ext_phy_link_up = 0, serdes_phy_type;
-	struct elink_vars temp_vars;
-	struct elink_phy *int_phy = &params->phy[ELINK_INT_PHY];
-#ifdef ELINK_INCLUDE_FPGA
-	if (CHIP_REV_IS_FPGA(sc))
-		return ELINK_STATUS_OK;
-#endif
-#ifdef ELINK_INCLUDE_EMUL
-	if (CHIP_REV_IS_EMUL(sc))
-		return ELINK_STATUS_OK;
-#endif
-
-	if (CHIP_IS_E3(sc)) {
-		uint16_t link_up;
-		if (params->req_line_speed[ELINK_LINK_CONFIG_IDX(ELINK_INT_PHY)]
-		    > ELINK_SPEED_10000) {
-			/* Check 20G link */
-			elink_cl45_read(sc, int_phy, MDIO_WC_DEVAD,
-					1, &link_up);
-			elink_cl45_read(sc, int_phy, MDIO_WC_DEVAD,
-					1, &link_up);
-			link_up &= (1 << 2);
-		} else {
-			/* Check 10G link and below*/
-			uint8_t lane = elink_get_warpcore_lane(int_phy, params);
-			elink_cl45_read(sc, int_phy, MDIO_WC_DEVAD,
-					MDIO_WC_REG_GP2_STATUS_GP_2_1,
-					&gp_status);
-			gp_status = ((gp_status >> 8) & 0xf) |
-				((gp_status >> 12) & 0xf);
-			link_up = gp_status & (1 << lane);
-		}
-		if (!link_up)
-			return ELINK_STATUS_NO_LINK;
-	} else {
-		CL22_RD_OVER_CL45(sc, int_phy,
-			  MDIO_REG_BANK_GP_STATUS,
-			  MDIO_GP_STATUS_TOP_AN_STATUS1,
-			  &gp_status);
-	/* Link is up only if both local phy and external phy are up */
-	if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
-		return ELINK_STATUS_NO_LINK;
-	}
-	/* In XGXS loopback mode, do not check external PHY */
-	if (params->loopback_mode == ELINK_LOOPBACK_XGXS)
-		return ELINK_STATUS_OK;
-
-	switch (params->num_phys) {
-	case 1:
-		/* No external PHY */
-		return ELINK_STATUS_OK;
-	case 2:
-		ext_phy_link_up = params->phy[ELINK_EXT_PHY1].read_status(
-			&params->phy[ELINK_EXT_PHY1],
-			params, &temp_vars);
-		break;
-	case 3: /* Dual Media */
-		for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys;
-		      phy_index++) {
-			serdes_phy_type = ((params->phy[phy_index].media_type ==
-					    ELINK_ETH_PHY_SFPP_10G_FIBER) ||
-					   (params->phy[phy_index].media_type ==
-					    ELINK_ETH_PHY_SFP_1G_FIBER) ||
-					   (params->phy[phy_index].media_type ==
-					    ELINK_ETH_PHY_XFP_FIBER) ||
-					   (params->phy[phy_index].media_type ==
-					    ELINK_ETH_PHY_DA_TWINAX));
-
-			if (is_serdes != serdes_phy_type)
-				continue;
-			if (params->phy[phy_index].read_status) {
-				ext_phy_link_up |=
-					params->phy[phy_index].read_status(
-						&params->phy[phy_index],
-						params, &temp_vars);
-			}
-		}
-		break;
-	}
-	if (ext_phy_link_up)
-		return ELINK_STATUS_OK;
-	return ELINK_STATUS_NO_LINK;
-}
-
 static elink_status_t elink_link_initialize(struct elink_params *params,
 				 struct elink_vars *vars)
 {
@@ -12443,31 +11101,6 @@ static elink_status_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t *str,
 	return ELINK_STATUS_OK;
 }
 
-void elink_sfx7101_sp_sw_reset(struct bnx2x_softc *sc, struct elink_phy *phy)
-{
-	uint16_t val, cnt;
-
-	elink_cl45_read(sc, phy,
-			MDIO_PMA_DEVAD,
-			MDIO_PMA_REG_7101_RESET, &val);
-
-	for (cnt = 0; cnt < 10; cnt++) {
-		DELAY(1000 * 50);
-		/* Writes a self-clearing reset */
-		elink_cl45_write(sc, phy,
-				 MDIO_PMA_DEVAD,
-				 MDIO_PMA_REG_7101_RESET,
-				 (val | (1 << 15)));
-		/* Wait for clear */
-		elink_cl45_read(sc, phy,
-				MDIO_PMA_DEVAD,
-				MDIO_PMA_REG_7101_RESET, &val);
-
-		if ((val & (1 << 15)) == 0)
-			break;
-	}
-}
-
 static void elink_7101_hw_reset(__rte_unused struct elink_phy *phy,
 				struct elink_params *params) {
 	/* Low power mode is controlled by GPIO 2 */
diff --git a/drivers/net/bnx2x/elink.h b/drivers/net/bnx2x/elink.h
index dd70ac6c66..f5cdf7440b 100644
--- a/drivers/net/bnx2x/elink.h
+++ b/drivers/net/bnx2x/elink.h
@@ -515,26 +515,10 @@ elink_status_t elink_lfa_reset(struct elink_params *params, struct elink_vars *v
 /* elink_link_update should be called upon link interrupt */
 elink_status_t elink_link_update(struct elink_params *params, struct elink_vars *vars);
 
-/* use the following phy functions to read/write from external_phy
- * In order to use it to read/write internal phy registers, use
- * ELINK_DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
- * the register
- */
-elink_status_t elink_phy_read(struct elink_params *params, uint8_t phy_addr,
-		   uint8_t devad, uint16_t reg, uint16_t *ret_val);
-
-elink_status_t elink_phy_write(struct elink_params *params, uint8_t phy_addr,
-		    uint8_t devad, uint16_t reg, uint16_t val);
-
 /* Reads the link_status from the shmem,
    and update the link vars accordingly */
 void elink_link_status_update(struct elink_params *input,
 			    struct elink_vars *output);
-/* returns string representing the fw_version of the external phy */
-elink_status_t elink_get_ext_phy_fw_version(struct elink_params *params,
-				 uint8_t *version,
-				 uint16_t len);
-
 /* Set/Unset the led
    Basically, the CLC takes care of the led for the link, but in case one needs
    to set/unset the led unnaturally, set the "mode" to ELINK_LED_MODE_OPER to
@@ -551,14 +535,6 @@ elink_status_t elink_set_led(struct elink_params *params,
  */
 void elink_handle_module_detect_int(struct elink_params *params);
 
-/* Get the actual link status. In case it returns ELINK_STATUS_OK, link is up,
- * otherwise link is down
- */
-elink_status_t elink_test_link(struct elink_params *params,
-		    struct elink_vars *vars,
-		    uint8_t is_serdes);
-
-
 /* One-time initialization for external phy after power up */
 elink_status_t elink_common_init_phy(struct bnx2x_softc *sc, uint32_t shmem_base_path[],
 			  uint32_t shmem2_base_path[], uint32_t chip_id,
@@ -567,9 +543,6 @@ elink_status_t elink_common_init_phy(struct bnx2x_softc *sc, uint32_t shmem_base
 /* Reset the external PHY using GPIO */
 void elink_ext_phy_hw_reset(struct bnx2x_softc *sc, uint8_t port);
 
-/* Reset the external of SFX7101 */
-void elink_sfx7101_sp_sw_reset(struct bnx2x_softc *sc, struct elink_phy *phy);
-
 /* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
 elink_status_t elink_read_sfp_module_eeprom(struct elink_phy *phy,
 				 struct elink_params *params, uint8_t dev_addr,
@@ -650,36 +623,6 @@ struct elink_ets_params {
 	struct elink_ets_cos_params cos[ELINK_DCBX_MAX_NUM_COS];
 };
 
-/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
- * when link is already up
- */
-elink_status_t elink_update_pfc(struct elink_params *params,
-		      struct elink_vars *vars,
-		      struct elink_nig_brb_pfc_port_params *pfc_params);
-
-
-/* Used to configure the ETS to disable */
-elink_status_t elink_ets_disabled(struct elink_params *params,
-		       struct elink_vars *vars);
-
-/* Used to configure the ETS to BW limited */
-void elink_ets_bw_limit(const struct elink_params *params,
-			const uint32_t cos0_bw,
-			const uint32_t cos1_bw);
-
-/* Used to configure the ETS to strict */
-elink_status_t elink_ets_strict(const struct elink_params *params,
-				const uint8_t strict_cos);
-
-
-/*  Configure the COS to ETS according to BW and SP settings.*/
-elink_status_t elink_ets_e3b0_config(const struct elink_params *params,
-			 const struct elink_vars *vars,
-			 struct elink_ets_params *ets_params);
-/* Read pfc statistic*/
-void elink_pfc_statistic(struct elink_params *params, struct elink_vars *vars,
-			 uint32_t pfc_frames_sent[2],
-			 uint32_t pfc_frames_received[2]);
 void elink_init_mod_abs_int(struct bnx2x_softc *sc, struct elink_vars *vars,
 			    uint32_t chip_id, uint32_t shmem_base, uint32_t shmem2_base,
 			    uint8_t port);
diff --git a/drivers/net/bnxt/tf_core/bitalloc.c b/drivers/net/bnxt/tf_core/bitalloc.c
index 918cabf19c..cdb13607d5 100644
--- a/drivers/net/bnxt/tf_core/bitalloc.c
+++ b/drivers/net/bnxt/tf_core/bitalloc.c
@@ -227,62 +227,6 @@ ba_alloc_reverse(struct bitalloc *pool)
 	return ba_alloc_reverse_helper(pool, 0, 1, 32, 0, &clear);
 }
 
-static int
-ba_alloc_index_helper(struct bitalloc *pool,
-		      int              offset,
-		      int              words,
-		      unsigned int     size,
-		      int             *index,
-		      int             *clear)
-{
-	bitalloc_word_t *storage = &pool->storage[offset];
-	int       loc;
-	int       r;
-
-	if (pool->size > size)
-		r = ba_alloc_index_helper(pool,
-					  offset + words + 1,
-					  storage[words],
-					  size * 32,
-					  index,
-					  clear);
-	else
-		r = 1; /* Check if already allocated */
-
-	loc = (*index % 32);
-	*index = *index / 32;
-
-	if (r == 1) {
-		r = (storage[*index] & (1 << loc)) ? 0 : -1;
-		if (r == 0) {
-			*clear = 1;
-			pool->free_count--;
-		}
-	}
-
-	if (*clear) {
-		storage[*index] &= ~(1 << loc);
-		*clear = (storage[*index] == 0);
-	}
-
-	return r;
-}
-
-int
-ba_alloc_index(struct bitalloc *pool, int index)
-{
-	int clear = 0;
-	int index_copy = index;
-
-	if (index < 0 || index >= (int)pool->size)
-		return -1;
-
-	if (ba_alloc_index_helper(pool, 0, 1, 32, &index_copy, &clear) >= 0)
-		return index;
-	else
-		return -1;
-}
-
 static int
 ba_inuse_helper(struct bitalloc *pool,
 		int              offset,
@@ -365,107 +309,7 @@ ba_free(struct bitalloc *pool, int index)
 	return ba_free_helper(pool, 0, 1, 32, &index);
 }
 
-int
-ba_inuse_free(struct bitalloc *pool, int index)
-{
-	if (index < 0 || index >= (int)pool->size)
-		return -1;
-
-	return ba_free_helper(pool, 0, 1, 32, &index) + 1;
-}
-
-int
-ba_free_count(struct bitalloc *pool)
-{
-	return (int)pool->free_count;
-}
-
 int ba_inuse_count(struct bitalloc *pool)
 {
 	return (int)(pool->size) - (int)(pool->free_count);
 }
-
-static int
-ba_find_next_helper(struct bitalloc *pool,
-		    int              offset,
-		    int              words,
-		    unsigned int     size,
-		    int             *index,
-		    int              free)
-{
-	bitalloc_word_t *storage = &pool->storage[offset];
-	int       loc, r, bottom = 0;
-
-	if (pool->size > size)
-		r = ba_find_next_helper(pool,
-					offset + words + 1,
-					storage[words],
-					size * 32,
-					index,
-					free);
-	else
-		bottom = 1; /* Bottom of tree */
-
-	loc = (*index % 32);
-	*index = *index / 32;
-
-	if (bottom) {
-		int bit_index = *index * 32;
-
-		loc = ba_ffs(~storage[*index] & ((bitalloc_word_t)-1 << loc));
-		if (loc > 0) {
-			loc--;
-			r = (bit_index + loc);
-			if (r >= (int)pool->size)
-				r = -1;
-		} else {
-			/* Loop over array at bottom of tree */
-			r = -1;
-			bit_index += 32;
-			*index = *index + 1;
-			while ((int)pool->size > bit_index) {
-				loc = ba_ffs(~storage[*index]);
-
-				if (loc > 0) {
-					loc--;
-					r = (bit_index + loc);
-					if (r >= (int)pool->size)
-						r = -1;
-					break;
-				}
-				bit_index += 32;
-				*index = *index + 1;
-			}
-		}
-	}
-
-	if (r >= 0 && (free)) {
-		if (bottom)
-			pool->free_count++;
-		storage[*index] |= (1 << loc);
-	}
-
-	return r;
-}
-
-int
-ba_find_next_inuse(struct bitalloc *pool, int index)
-{
-	if (index < 0 ||
-	    index >= (int)pool->size ||
-	    pool->free_count == pool->size)
-		return -1;
-
-	return ba_find_next_helper(pool, 0, 1, 32, &index, 0);
-}
-
-int
-ba_find_next_inuse_free(struct bitalloc *pool, int index)
-{
-	if (index < 0 ||
-	    index >= (int)pool->size ||
-	    pool->free_count == pool->size)
-		return -1;
-
-	return ba_find_next_helper(pool, 0, 1, 32, &index, 1);
-}
diff --git a/drivers/net/bnxt/tf_core/bitalloc.h b/drivers/net/bnxt/tf_core/bitalloc.h
index 2825bb37e5..9ac6eadd81 100644
--- a/drivers/net/bnxt/tf_core/bitalloc.h
+++ b/drivers/net/bnxt/tf_core/bitalloc.h
@@ -70,7 +70,6 @@ int ba_init(struct bitalloc *pool, int size);
  * Returns -1 on failure, or index of allocated entry
  */
 int ba_alloc(struct bitalloc *pool);
-int ba_alloc_index(struct bitalloc *pool, int index);
 
 /**
  * Returns -1 on failure, or index of allocated entry
@@ -85,37 +84,12 @@ int ba_alloc_reverse(struct bitalloc *pool);
  */
 int ba_inuse(struct bitalloc *pool, int index);
 
-/**
- * Variant of ba_inuse that frees the index if it is allocated, same
- * return codes as ba_inuse
- */
-int ba_inuse_free(struct bitalloc *pool, int index);
-
-/**
- * Find next index that is in use, start checking at index 'idx'
- *
- * Returns next index that is in use on success, or
- * -1 if no in use index is found
- */
-int ba_find_next_inuse(struct bitalloc *pool, int idx);
-
-/**
- * Variant of ba_find_next_inuse that also frees the next in use index,
- * same return codes as ba_find_next_inuse
- */
-int ba_find_next_inuse_free(struct bitalloc *pool, int idx);
-
 /**
  * Multiple freeing of the same index has no negative side effects,
  * but will return -1.  returns -1 on failure, 0 on success.
  */
 int ba_free(struct bitalloc *pool, int index);
 
-/**
- * Returns the pool's free count
- */
-int ba_free_count(struct bitalloc *pool);
-
 /**
  * Returns the pool's in use count
  */
diff --git a/drivers/net/bnxt/tf_core/stack.c b/drivers/net/bnxt/tf_core/stack.c
index 954806377e..bda415e82e 100644
--- a/drivers/net/bnxt/tf_core/stack.c
+++ b/drivers/net/bnxt/tf_core/stack.c
@@ -88,28 +88,3 @@ stack_pop(struct stack *st, uint32_t *x)
 
 	return 0;
 }
-
-/* Dump the stack
- */
-void stack_dump(struct stack *st)
-{
-	int i, j;
-
-	printf("top=%d\n", st->top);
-	printf("max=%d\n", st->max);
-
-	if (st->top == -1) {
-		printf("stack is empty\n");
-		return;
-	}
-
-	for (i = 0; i < st->max + 7 / 8; i++) {
-		printf("item[%d] 0x%08x", i, st->items[i]);
-
-		for (j = 0; j < 7; j++) {
-			if (i++ < st->max - 1)
-				printf(" 0x%08x", st->items[i]);
-		}
-		printf("\n");
-	}
-}
diff --git a/drivers/net/bnxt/tf_core/stack.h b/drivers/net/bnxt/tf_core/stack.h
index 6732e03132..7e2f5dfec6 100644
--- a/drivers/net/bnxt/tf_core/stack.h
+++ b/drivers/net/bnxt/tf_core/stack.h
@@ -102,16 +102,4 @@ int stack_push(struct stack *st, uint32_t x);
  */
 int stack_pop(struct stack *st, uint32_t *x);
 
-/** Dump stack information
- *
- * Warning: Don't use for large stacks due to prints
- *
- * [in] st
- *   pointer to the stack
- *
- * return
- *    none
- */
-void stack_dump(struct stack *st);
-
 #endif /* _STACK_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_core.c b/drivers/net/bnxt/tf_core/tf_core.c
index 0f49a00256..a4276d1bcc 100644
--- a/drivers/net/bnxt/tf_core/tf_core.c
+++ b/drivers/net/bnxt/tf_core/tf_core.c
@@ -90,69 +90,6 @@ tf_open_session(struct tf *tfp,
 	return 0;
 }
 
-int
-tf_attach_session(struct tf *tfp,
-		  struct tf_attach_session_parms *parms)
-{
-	int rc;
-	unsigned int domain, bus, slot, device;
-	struct tf_session_attach_session_parms aparms;
-
-	TF_CHECK_PARMS2(tfp, parms);
-
-	/* Verify control channel */
-	rc = sscanf(parms->ctrl_chan_name,
-		    "%x:%x:%x.%d",
-		    &domain,
-		    &bus,
-		    &slot,
-		    &device);
-	if (rc != 4) {
-		TFP_DRV_LOG(ERR,
-			    "Failed to scan device ctrl_chan_name\n");
-		return -EINVAL;
-	}
-
-	/* Verify 'attach' channel */
-	rc = sscanf(parms->attach_chan_name,
-		    "%x:%x:%x.%d",
-		    &domain,
-		    &bus,
-		    &slot,
-		    &device);
-	if (rc != 4) {
-		TFP_DRV_LOG(ERR,
-			    "Failed to scan device attach_chan_name\n");
-		return -EINVAL;
-	}
-
-	/* Prepare return value of session_id, using ctrl_chan_name
-	 * device values as it becomes the session id.
-	 */
-	parms->session_id.internal.domain = domain;
-	parms->session_id.internal.bus = bus;
-	parms->session_id.internal.device = device;
-	aparms.attach_cfg = parms;
-	rc = tf_session_attach_session(tfp,
-				       &aparms);
-	/* Logging handled by dev_bind */
-	if (rc)
-		return rc;
-
-	TFP_DRV_LOG(INFO,
-		    "Attached to session, session_id:%d\n",
-		    parms->session_id.id);
-
-	TFP_DRV_LOG(INFO,
-		    "domain:%d, bus:%d, device:%d, fw_session_id:%d\n",
-		    parms->session_id.internal.domain,
-		    parms->session_id.internal.bus,
-		    parms->session_id.internal.device,
-		    parms->session_id.internal.fw_session_id);
-
-	return rc;
-}
-
 int
 tf_close_session(struct tf *tfp)
 {
@@ -792,14 +729,6 @@ tf_set_tcam_entry(struct tf *tfp,
 	return 0;
 }
 
-int
-tf_get_tcam_entry(struct tf *tfp __rte_unused,
-		  struct tf_get_tcam_entry_parms *parms __rte_unused)
-{
-	TF_CHECK_PARMS2(tfp, parms);
-	return -EOPNOTSUPP;
-}
-
 int
 tf_free_tcam_entry(struct tf *tfp,
 		   struct tf_free_tcam_entry_parms *parms)
@@ -1228,80 +1157,6 @@ tf_get_tbl_entry(struct tf *tfp,
 	return rc;
 }
 
-int
-tf_bulk_get_tbl_entry(struct tf *tfp,
-		 struct tf_bulk_get_tbl_entry_parms *parms)
-{
-	int rc = 0;
-	struct tf_session *tfs;
-	struct tf_dev_info *dev;
-	struct tf_tbl_get_bulk_parms bparms;
-
-	TF_CHECK_PARMS2(tfp, parms);
-
-	/* Can't do static initialization due to UT enum check */
-	memset(&bparms, 0, sizeof(struct tf_tbl_get_bulk_parms));
-
-	/* Retrieve the session information */
-	rc = tf_session_get_session(tfp, &tfs);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "%s: Failed to lookup session, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	/* Retrieve the device information */
-	rc = tf_session_get_device(tfs, &dev);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "%s: Failed to lookup device, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	if (parms->type == TF_TBL_TYPE_EXT) {
-		/* Not supported, yet */
-		rc = -EOPNOTSUPP;
-		TFP_DRV_LOG(ERR,
-			    "%s, External table type not supported, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-
-		return rc;
-	}
-
-	/* Internal table type processing */
-
-	if (dev->ops->tf_dev_get_bulk_tbl == NULL) {
-		rc = -EOPNOTSUPP;
-		TFP_DRV_LOG(ERR,
-			    "%s: Operation not supported, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return -EOPNOTSUPP;
-	}
-
-	bparms.dir = parms->dir;
-	bparms.type = parms->type;
-	bparms.starting_idx = parms->starting_idx;
-	bparms.num_entries = parms->num_entries;
-	bparms.entry_sz_in_bytes = parms->entry_sz_in_bytes;
-	bparms.physical_mem_addr = parms->physical_mem_addr;
-	rc = dev->ops->tf_dev_get_bulk_tbl(tfp, &bparms);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "%s: Table get bulk failed, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	return rc;
-}
-
 int
 tf_alloc_tbl_scope(struct tf *tfp,
 		   struct tf_alloc_tbl_scope_parms *parms)
@@ -1340,44 +1195,6 @@ tf_alloc_tbl_scope(struct tf *tfp,
 
 	return rc;
 }
-int
-tf_map_tbl_scope(struct tf *tfp,
-		   struct tf_map_tbl_scope_parms *parms)
-{
-	struct tf_session *tfs;
-	struct tf_dev_info *dev;
-	int rc;
-
-	TF_CHECK_PARMS2(tfp, parms);
-
-	/* Retrieve the session information */
-	rc = tf_session_get_session(tfp, &tfs);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "Failed to lookup session, rc:%s\n",
-			    strerror(-rc));
-		return rc;
-	}
-
-	/* Retrieve the device information */
-	rc = tf_session_get_device(tfs, &dev);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "Failed to lookup device, rc:%s\n",
-			    strerror(-rc));
-		return rc;
-	}
-
-	if (dev->ops->tf_dev_map_tbl_scope != NULL) {
-		rc = dev->ops->tf_dev_map_tbl_scope(tfp, parms);
-	} else {
-		TFP_DRV_LOG(ERR,
-			    "Map table scope not supported by device\n");
-		return -EINVAL;
-	}
-
-	return rc;
-}
 
 int
 tf_free_tbl_scope(struct tf *tfp,
@@ -1475,61 +1292,3 @@ tf_set_if_tbl_entry(struct tf *tfp,
 
 	return 0;
 }
-
-int
-tf_get_if_tbl_entry(struct tf *tfp,
-		    struct tf_get_if_tbl_entry_parms *parms)
-{
-	int rc;
-	struct tf_session *tfs;
-	struct tf_dev_info *dev;
-	struct tf_if_tbl_get_parms gparms = { 0 };
-
-	TF_CHECK_PARMS2(tfp, parms);
-
-	/* Retrieve the session information */
-	rc = tf_session_get_session(tfp, &tfs);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "%s: Failed to lookup session, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	/* Retrieve the device information */
-	rc = tf_session_get_device(tfs, &dev);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "%s: Failed to lookup device, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	if (dev->ops->tf_dev_get_if_tbl == NULL) {
-		rc = -EOPNOTSUPP;
-		TFP_DRV_LOG(ERR,
-			    "%s: Operation not supported, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	gparms.dir = parms->dir;
-	gparms.type = parms->type;
-	gparms.idx = parms->idx;
-	gparms.data_sz_in_bytes = parms->data_sz_in_bytes;
-	gparms.data = parms->data;
-
-	rc = dev->ops->tf_dev_get_if_tbl(tfp, &gparms);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "%s: If_tbl get failed, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	return 0;
-}
diff --git a/drivers/net/bnxt/tf_core/tf_core.h b/drivers/net/bnxt/tf_core/tf_core.h
index fa8ab52af1..2d556be752 100644
--- a/drivers/net/bnxt/tf_core/tf_core.h
+++ b/drivers/net/bnxt/tf_core/tf_core.h
@@ -657,27 +657,6 @@ struct tf_attach_session_parms {
 	union tf_session_id session_id;
 };
 
-/**
- * Experimental
- *
- * Allows a 2nd application instance to attach to an existing
- * session. Used when a session is to be shared between two processes.
- *
- * Attach will increment a ref count as to manage the shared session data.
- *
- * [in] tfp
- *   Pointer to TF handle
- *
- * [in] parms
- *   Pointer to attach parameters
- *
- * Returns
- *   - (0) if successful.
- *   - (-EINVAL) on failure.
- */
-int tf_attach_session(struct tf *tfp,
-		      struct tf_attach_session_parms *parms);
-
 /**
  * Closes an existing session client or the session it self. The
  * session client is default closed and if the session reference count
@@ -961,25 +940,6 @@ struct tf_map_tbl_scope_parms {
 int tf_alloc_tbl_scope(struct tf *tfp,
 		       struct tf_alloc_tbl_scope_parms *parms);
 
-/**
- * map a table scope (legacy device only Wh+/SR)
- *
- * Map a table scope to one or more partition interfaces (parifs).
- * The parif can be remapped in the L2 context lookup for legacy devices.  This
- * API allows a number of parifs to be mapped to the same table scope.  On
- * legacy devices a table scope identifies one of 16 sets of EEM table base
- * addresses and is associated with a PF communication channel.  The associated
- * PF must be configured for the table scope to operate.
- *
- * An L2 context TCAM lookup returns a remapped parif value used to
- * index into the set of 16 parif_to_pf registers which are used to map to one
- * of the 16 table scopes.  This API allows the user to map the parifs in the
- * mask to the previously allocated table scope (EEM table).
-
- * Returns success or failure code.
- */
-int tf_map_tbl_scope(struct tf *tfp,
-		      struct tf_map_tbl_scope_parms *parms);
 /**
  * free a table scope
  *
@@ -1256,18 +1216,6 @@ struct tf_get_tcam_entry_parms {
 	uint16_t result_sz_in_bits;
 };
 
-/**
- * get TCAM entry
- *
- * Program a TCAM table entry for a TruFlow session.
- *
- * If the entry has not been allocated, an error will be returned.
- *
- * Returns success or failure code.
- */
-int tf_get_tcam_entry(struct tf *tfp,
-		      struct tf_get_tcam_entry_parms *parms);
-
 /**
  * tf_free_tcam_entry parameter definition
  */
@@ -1638,22 +1586,6 @@ struct tf_bulk_get_tbl_entry_parms {
 	uint64_t physical_mem_addr;
 };
 
-/**
- * Bulk get index table entry
- *
- * Used to retrieve a set of index table entries.
- *
- * Entries within the range may not have been allocated using
- * tf_alloc_tbl_entry() at the time of access. But the range must
- * be within the bounds determined from tf_open_session() for the
- * given table type.  Currently, this is only used for collecting statistics.
- *
- * Returns success or failure code. Failure will be returned if the
- * provided data buffer is too small for the data type requested.
- */
-int tf_bulk_get_tbl_entry(struct tf *tfp,
-			  struct tf_bulk_get_tbl_entry_parms *parms);
-
 /**
  * @page exact_match Exact Match Table
  *
@@ -2066,17 +1998,4 @@ struct tf_get_if_tbl_entry_parms {
 	uint32_t idx;
 };
 
-/**
- * get interface table entry
- *
- * Used to retrieve an interface table entry.
- *
- * Reads the interface table entry value
- *
- * Returns success or failure code. Failure will be returned if the
- * provided data buffer is too small for the data type requested.
- */
-int tf_get_if_tbl_entry(struct tf *tfp,
-			struct tf_get_if_tbl_entry_parms *parms);
-
 #endif /* _TF_CORE_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_msg.c b/drivers/net/bnxt/tf_core/tf_msg.c
index 5615eedbbe..e4fe5fe055 100644
--- a/drivers/net/bnxt/tf_core/tf_msg.c
+++ b/drivers/net/bnxt/tf_core/tf_msg.c
@@ -148,14 +148,6 @@ tf_msg_session_open(struct tf *tfp,
 	return rc;
 }
 
-int
-tf_msg_session_attach(struct tf *tfp __rte_unused,
-		      char *ctrl_chan_name __rte_unused,
-		      uint8_t tf_fw_session_id __rte_unused)
-{
-	return -1;
-}
-
 int
 tf_msg_session_client_register(struct tf *tfp,
 			       char *ctrl_channel_name,
@@ -266,38 +258,6 @@ tf_msg_session_close(struct tf *tfp)
 	return rc;
 }
 
-int
-tf_msg_session_qcfg(struct tf *tfp)
-{
-	int rc;
-	struct hwrm_tf_session_qcfg_input req = { 0 };
-	struct hwrm_tf_session_qcfg_output resp = { 0 };
-	struct tfp_send_msg_parms parms = { 0 };
-	uint8_t fw_session_id;
-
-	rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "Unable to lookup FW id, rc:%s\n",
-			    strerror(-rc));
-		return rc;
-	}
-
-	/* Populate the request */
-	req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
-
-	parms.tf_type = HWRM_TF_SESSION_QCFG,
-	parms.req_data = (uint32_t *)&req;
-	parms.req_size = sizeof(req);
-	parms.resp_data = (uint32_t *)&resp;
-	parms.resp_size = sizeof(resp);
-	parms.mailbox = TF_KONG_MB;
-
-	rc = tfp_send_msg_direct(tfp,
-				 &parms);
-	return rc;
-}
-
 int
 tf_msg_session_resc_qcaps(struct tf *tfp,
 			  enum tf_dir dir,
diff --git a/drivers/net/bnxt/tf_core/tf_msg.h b/drivers/net/bnxt/tf_core/tf_msg.h
index 72bf850487..4483017ada 100644
--- a/drivers/net/bnxt/tf_core/tf_msg.h
+++ b/drivers/net/bnxt/tf_core/tf_msg.h
@@ -38,26 +38,6 @@ int tf_msg_session_open(struct tf *tfp,
 			uint8_t *fw_session_id,
 			uint8_t *fw_session_client_id);
 
-/**
- * Sends session close request to Firmware
- *
- * [in] session
- *   Pointer to session handle
- *
- * [in] ctrl_chan_name
- *   PCI name of the control channel
- *
- * [in] fw_session_id
- *   Pointer to the fw_session_id that is assigned to the session at
- *   time of session open
- *
- * Returns:
- *   0 on Success else internal Truflow error
- */
-int tf_msg_session_attach(struct tf *tfp,
-			  char *ctrl_channel_name,
-			  uint8_t tf_fw_session_id);
-
 /**
  * Sends session client register request to Firmware
  *
@@ -105,17 +85,6 @@ int tf_msg_session_client_unregister(struct tf *tfp,
  */
 int tf_msg_session_close(struct tf *tfp);
 
-/**
- * Sends session query config request to TF Firmware
- *
- * [in] session
- *   Pointer to session handle
- *
- * Returns:
- *   0 on Success else internal Truflow error
- */
-int tf_msg_session_qcfg(struct tf *tfp);
-
 /**
  * Sends session HW resource query capability request to TF Firmware
  *
diff --git a/drivers/net/bnxt/tf_core/tf_session.c b/drivers/net/bnxt/tf_core/tf_session.c
index c95c4bdbd3..912b2837f9 100644
--- a/drivers/net/bnxt/tf_core/tf_session.c
+++ b/drivers/net/bnxt/tf_core/tf_session.c
@@ -749,36 +749,3 @@ tf_session_get_fw_session_id(struct tf *tfp,
 
 	return 0;
 }
-
-int
-tf_session_get_session_id(struct tf *tfp,
-			  union tf_session_id *session_id)
-{
-	int rc;
-	struct tf_session *tfs = NULL;
-
-	if (tfp->session == NULL) {
-		rc = -EINVAL;
-		TFP_DRV_LOG(ERR,
-			    "Session not created, rc:%s\n",
-			    strerror(-rc));
-		return rc;
-	}
-
-	if (session_id == NULL) {
-		rc = -EINVAL;
-		TFP_DRV_LOG(ERR,
-			    "Invalid Argument(s), rc:%s\n",
-			    strerror(-rc));
-		return rc;
-	}
-
-	/* Using internal version as session client may not exist yet */
-	rc = tf_session_get_session_internal(tfp, &tfs);
-	if (rc)
-		return rc;
-
-	*session_id = tfs->session_id;
-
-	return 0;
-}
diff --git a/drivers/net/bnxt/tf_core/tf_session.h b/drivers/net/bnxt/tf_core/tf_session.h
index 6a5c894033..37d4703cc1 100644
--- a/drivers/net/bnxt/tf_core/tf_session.h
+++ b/drivers/net/bnxt/tf_core/tf_session.h
@@ -394,20 +394,4 @@ int tf_session_get_device(struct tf_session *tfs,
 int tf_session_get_fw_session_id(struct tf *tfp,
 				 uint8_t *fw_session_id);
 
-/**
- * Looks up the Session id the requested TF handle.
- *
- * [in] tfp
- *   Pointer to TF handle
- *
- * [out] session_id
- *   Pointer to the session_id
- *
- * Returns
- *   - (0) if successful.
- *   - (-EINVAL) on failure.
- */
-int tf_session_get_session_id(struct tf *tfp,
-			      union tf_session_id *session_id);
-
 #endif /* _TF_SESSION_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
index a4207eb3ab..2caf4f8747 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
@@ -637,59 +637,6 @@ tf_shadow_tbl_search(struct tf_shadow_tbl_search_parms *parms)
 	return 0;
 }
 
-int
-tf_shadow_tbl_insert(struct tf_shadow_tbl_insert_parms *parms)
-{
-	uint16_t idx;
-	struct tf_shadow_tbl_ctxt *ctxt;
-	struct tf_tbl_set_parms *sparms;
-	struct tf_shadow_tbl_db *shadow_db;
-	struct tf_shadow_tbl_shadow_result_entry *sr_entry;
-
-	if (!parms || !parms->sparms) {
-		TFP_DRV_LOG(ERR, "Null parms\n");
-		return -EINVAL;
-	}
-
-	sparms = parms->sparms;
-	if (!sparms->data || !sparms->data_sz_in_bytes) {
-		TFP_DRV_LOG(ERR, "%s:%s No result to set.\n",
-			    tf_dir_2_str(sparms->dir),
-			    tf_tbl_type_2_str(sparms->type));
-		return -EINVAL;
-	}
-
-	shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
-	ctxt = tf_shadow_tbl_ctxt_get(shadow_db, sparms->type);
-	if (!ctxt) {
-		/* We aren't tracking this table, so return success */
-		TFP_DRV_LOG(DEBUG, "%s Unable to get tbl mgr context\n",
-			    tf_tbl_type_2_str(sparms->type));
-		return 0;
-	}
-
-	idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, sparms->idx);
-	if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt)) {
-		TFP_DRV_LOG(ERR, "%s:%s Invalid idx(0x%x)\n",
-			    tf_dir_2_str(sparms->dir),
-			    tf_tbl_type_2_str(sparms->type),
-			    sparms->idx);
-		return -EINVAL;
-	}
-
-	/* Write the result table, the key/hash has been written already */
-	sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
-
-	/*
-	 * If the handle is not valid, the bind was never called.  We aren't
-	 * tracking this entry.
-	 */
-	if (!TF_SHADOW_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
-		return 0;
-
-	return 0;
-}
-
 int
 tf_shadow_tbl_free_db(struct tf_shadow_tbl_free_db_parms *parms)
 {
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tbl.h b/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
index 96a34309b2..bbd8cfd3a9 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
@@ -225,20 +225,6 @@ int tf_shadow_tbl_search(struct tf_shadow_tbl_search_parms *parms);
  */
 int tf_shadow_tbl_bind_index(struct tf_shadow_tbl_bind_index_parms *parms);
 
-/**
- * Inserts an element into the Shadow table DB. Will fail if the
- * elements ref_count is different from 0. Ref_count after insert will
- * be incremented.
- *
- * [in] parms
- *   Pointer to insert parameters
- *
- * Returns
- *   - (0) if successful.
- *   - (-EINVAL) on failure.
- */
-int tf_shadow_tbl_insert(struct tf_shadow_tbl_insert_parms *parms);
-
 /**
  * Removes an element from the Shadow table DB. Will fail if the
  * elements ref_count is 0. Ref_count after removal will be
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.c b/drivers/net/bnxt/tf_core/tf_tcam.c
index 7679d09eea..e3fec46926 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_tcam.c
@@ -683,10 +683,3 @@ tf_tcam_set(struct tf *tfp __rte_unused,
 
 	return 0;
 }
-
-int
-tf_tcam_get(struct tf *tfp __rte_unused,
-	    struct tf_tcam_get_parms *parms __rte_unused)
-{
-	return 0;
-}
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.h b/drivers/net/bnxt/tf_core/tf_tcam.h
index 280f138dd3..9614cf52c7 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.h
+++ b/drivers/net/bnxt/tf_core/tf_tcam.h
@@ -355,21 +355,4 @@ int tf_tcam_alloc_search(struct tf *tfp,
 int tf_tcam_set(struct tf *tfp,
 		struct tf_tcam_set_parms *parms);
 
-/**
- * Retrieves the requested element by sending a firmware request to get
- * the element.
- *
- * [in] tfp
- *   Pointer to TF handle, used for HCAPI communication
- *
- * [in] parms
- *   Pointer to parameters
- *
- * Returns
- *   - (0) if successful.
- *   - (-EINVAL) on failure.
- */
-int tf_tcam_get(struct tf *tfp,
-		struct tf_tcam_get_parms *parms);
-
 #endif /* _TF_TCAM_H */
diff --git a/drivers/net/bnxt/tf_core/tfp.c b/drivers/net/bnxt/tf_core/tfp.c
index 0f6d63cc00..49ca034241 100644
--- a/drivers/net/bnxt/tf_core/tfp.c
+++ b/drivers/net/bnxt/tf_core/tfp.c
@@ -135,33 +135,6 @@ tfp_memcpy(void *dest, void *src, size_t n)
 	rte_memcpy(dest, src, n);
 }
 
-/**
- * Used to initialize portable spin lock
- */
-void
-tfp_spinlock_init(struct tfp_spinlock_parms *parms)
-{
-	rte_spinlock_init(&parms->slock);
-}
-
-/**
- * Used to lock portable spin lock
- */
-void
-tfp_spinlock_lock(struct tfp_spinlock_parms *parms)
-{
-	rte_spinlock_lock(&parms->slock);
-}
-
-/**
- * Used to unlock portable spin lock
- */
-void
-tfp_spinlock_unlock(struct tfp_spinlock_parms *parms)
-{
-	rte_spinlock_unlock(&parms->slock);
-}
-
 int
 tfp_get_fid(struct tf *tfp, uint16_t *fw_fid)
 {
diff --git a/drivers/net/bnxt/tf_core/tfp.h b/drivers/net/bnxt/tf_core/tfp.h
index 551b9c569f..fc2409371a 100644
--- a/drivers/net/bnxt/tf_core/tfp.h
+++ b/drivers/net/bnxt/tf_core/tfp.h
@@ -202,10 +202,6 @@ int tfp_calloc(struct tfp_calloc_parms *parms);
 void tfp_memcpy(void *dest, void *src, size_t n);
 void tfp_free(void *addr);
 
-void tfp_spinlock_init(struct tfp_spinlock_parms *slock);
-void tfp_spinlock_lock(struct tfp_spinlock_parms *slock);
-void tfp_spinlock_unlock(struct tfp_spinlock_parms *slock);
-
 /**
  * Lookup of the FID in the platform specific structure.
  *
diff --git a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
index 45025516f4..4a6105a05e 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
@@ -214,74 +214,6 @@ void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
 	rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, (void *)ctxt);
 }
 
-/*
- * DMA-in the raw counter data from the HW and accumulate in the
- * local accumulator table using the TF-Core API
- *
- * tfp [in] The TF-Core context
- *
- * fc_info [in] The ULP Flow counter info ptr
- *
- * dir [in] The direction of the flow
- *
- * num_counters [in] The number of counters
- *
- */
-__rte_unused static int32_t
-ulp_bulk_get_flow_stats(struct tf *tfp,
-			struct bnxt_ulp_fc_info *fc_info,
-			enum tf_dir dir,
-			struct bnxt_ulp_device_params *dparms)
-/* MARK AS UNUSED FOR NOW TO AVOID COMPILATION ERRORS TILL API is RESOLVED */
-{
-	int rc = 0;
-	struct tf_tbl_get_bulk_parms parms = { 0 };
-	enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD: Template? */
-	struct sw_acc_counter *sw_acc_tbl_entry = NULL;
-	uint64_t *stats = NULL;
-	uint16_t i = 0;
-
-	parms.dir = dir;
-	parms.type = stype;
-	parms.starting_idx = fc_info->shadow_hw_tbl[dir].start_idx;
-	parms.num_entries = dparms->flow_count_db_entries / 2; /* direction */
-	/*
-	 * TODO:
-	 * Size of an entry needs to obtained from template
-	 */
-	parms.entry_sz_in_bytes = sizeof(uint64_t);
-	stats = (uint64_t *)fc_info->shadow_hw_tbl[dir].mem_va;
-	parms.physical_mem_addr = (uintptr_t)fc_info->shadow_hw_tbl[dir].mem_pa;
-
-	if (!stats) {
-		PMD_DRV_LOG(ERR,
-			    "BULK: Memory not initialized id:0x%x dir:%d\n",
-			    parms.starting_idx, dir);
-		return -EINVAL;
-	}
-
-	rc = tf_tbl_bulk_get(tfp, &parms);
-	if (rc) {
-		PMD_DRV_LOG(ERR,
-			    "BULK: Get failed for id:0x%x rc:%d\n",
-			    parms.starting_idx, rc);
-		return rc;
-	}
-
-	for (i = 0; i < parms.num_entries; i++) {
-		/* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
-		sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][i];
-		if (!sw_acc_tbl_entry->valid)
-			continue;
-		sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats[i],
-							      dparms);
-		sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats[i],
-								dparms);
-	}
-
-	return rc;
-}
-
 static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
 				    struct tf *tfp,
 				    struct bnxt_ulp_fc_info *fc_info,
@@ -387,16 +319,6 @@ ulp_fc_mgr_alarm_cb(void *arg)
 		ulp_fc_mgr_thread_cancel(ctxt);
 		return;
 	}
-	/*
-	 * Commented for now till GET_BULK is resolved, just get the first flow
-	 * stat for now
-	 for (i = 0; i < TF_DIR_MAX; i++) {
-		rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i,
-					     dparms->flow_count_db_entries);
-		if (rc)
-			break;
-	}
-	*/
 
 	/* reset the parent accumulation counters before accumulation if any */
 	ulp_flow_db_parent_flow_count_reset(ctxt);
diff --git a/drivers/net/bnxt/tf_ulp/ulp_port_db.c b/drivers/net/bnxt/tf_ulp/ulp_port_db.c
index 4b4eaeb126..2d1dbb7e6e 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_port_db.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_port_db.c
@@ -226,37 +226,6 @@ ulp_port_db_dev_port_to_ulp_index(struct bnxt_ulp_context *ulp_ctxt,
 	return 0;
 }
 
-/*
- * Api to get the function id for a given ulp ifindex.
- *
- * ulp_ctxt [in] Ptr to ulp context
- * ifindex [in] ulp ifindex
- * func_id [out] the function id of the given ifindex.
- *
- * Returns 0 on success or negative number on failure.
- */
-int32_t
-ulp_port_db_function_id_get(struct bnxt_ulp_context *ulp_ctxt,
-			    uint32_t ifindex,
-			    uint32_t fid_type,
-			    uint16_t *func_id)
-{
-	struct bnxt_ulp_port_db *port_db;
-
-	port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt);
-	if (!port_db || ifindex >= port_db->ulp_intf_list_size || !ifindex) {
-		BNXT_TF_DBG(ERR, "Invalid Arguments\n");
-		return -EINVAL;
-	}
-
-	if (fid_type == BNXT_ULP_DRV_FUNC_FID)
-		*func_id =  port_db->ulp_intf_list[ifindex].drv_func_id;
-	else
-		*func_id =  port_db->ulp_intf_list[ifindex].vf_func_id;
-
-	return 0;
-}
-
 /*
  * Api to get the svif for a given ulp ifindex.
  *
diff --git a/drivers/net/bnxt/tf_ulp/ulp_port_db.h b/drivers/net/bnxt/tf_ulp/ulp_port_db.h
index 7b85987a0c..bd7032004f 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_port_db.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_port_db.h
@@ -122,20 +122,6 @@ int32_t
 ulp_port_db_dev_port_to_ulp_index(struct bnxt_ulp_context *ulp_ctxt,
 				  uint32_t port_id, uint32_t *ifindex);
 
-/*
- * Api to get the function id for a given ulp ifindex.
- *
- * ulp_ctxt [in] Ptr to ulp context
- * ifindex [in] ulp ifindex
- * func_id [out] the function id of the given ifindex.
- *
- * Returns 0 on success or negative number on failure.
- */
-int32_t
-ulp_port_db_function_id_get(struct bnxt_ulp_context *ulp_ctxt,
-			    uint32_t ifindex, uint32_t fid_type,
-			    uint16_t *func_id);
-
 /*
  * Api to get the svif for a given ulp ifindex.
  *
diff --git a/drivers/net/bnxt/tf_ulp/ulp_utils.c b/drivers/net/bnxt/tf_ulp/ulp_utils.c
index a13a3bbf65..b5a4f85fcf 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_utils.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_utils.c
@@ -803,17 +803,6 @@ int32_t ulp_buffer_is_empty(const uint8_t *buf, uint32_t size)
 	return buf[0] == 0 && !memcmp(buf, buf + 1, size - 1);
 }
 
-/* Function to check if bitmap is zero.Return 1 on success */
-uint32_t ulp_bitmap_is_zero(uint8_t *bitmap, int32_t size)
-{
-	while (size-- > 0) {
-		if (*bitmap != 0)
-			return 0;
-		bitmap++;
-	}
-	return 1;
-}
-
 /* Function to check if bitmap is ones. Return 1 on success */
 uint32_t ulp_bitmap_is_ones(uint8_t *bitmap, int32_t size)
 {
diff --git a/drivers/net/bnxt/tf_ulp/ulp_utils.h b/drivers/net/bnxt/tf_ulp/ulp_utils.h
index 749ac06d87..a45a2705da 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_utils.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_utils.h
@@ -384,9 +384,6 @@ ulp_encap_buffer_copy(uint8_t *dst,
  */
 int32_t ulp_buffer_is_empty(const uint8_t *buf, uint32_t size);
 
-/* Function to check if bitmap is zero.Return 1 on success */
-uint32_t ulp_bitmap_is_zero(uint8_t *bitmap, int32_t size);
-
 /* Function to check if bitmap is ones. Return 1 on success */
 uint32_t ulp_bitmap_is_ones(uint8_t *bitmap, int32_t size);
 
diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h
index 8f198bd50e..e5645a10ab 100644
--- a/drivers/net/bonding/eth_bond_private.h
+++ b/drivers/net/bonding/eth_bond_private.h
@@ -224,10 +224,6 @@ int
 mac_address_set(struct rte_eth_dev *eth_dev,
 		struct rte_ether_addr *new_mac_addr);
 
-int
-mac_address_get(struct rte_eth_dev *eth_dev,
-		struct rte_ether_addr *dst_mac_addr);
-
 int
 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev);
 
diff --git a/drivers/net/bonding/rte_eth_bond.h b/drivers/net/bonding/rte_eth_bond.h
index 874aa91a5f..23a4393f23 100644
--- a/drivers/net/bonding/rte_eth_bond.h
+++ b/drivers/net/bonding/rte_eth_bond.h
@@ -278,19 +278,6 @@ rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id);
 int
 rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms);
 
-/**
- * Get the current link monitoring frequency (in ms) for monitoring of the link
- * status of slave devices
- *
- * @param bonded_port_id	Port ID of bonded device.
- *
- * @return
- *	Monitoring interval on success, negative value otherwise.
- */
-int
-rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id);
-
-
 /**
  * Set the period in milliseconds for delaying the disabling of a bonded link
  * when the link down status has been detected
@@ -305,18 +292,6 @@ int
 rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id,
 				       uint32_t delay_ms);
 
-/**
- * Get the period in milliseconds set for delaying the disabling of a bonded
- * link when the link down status has been detected
- *
- * @param bonded_port_id	Port ID of bonded device.
- *
- * @return
- *  Delay period on success, negative value otherwise.
- */
-int
-rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id);
-
 /**
  * Set the period in milliseconds for delaying the enabling of a bonded link
  * when the link up status has been detected
@@ -331,19 +306,6 @@ int
 rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id,
 				    uint32_t delay_ms);
 
-/**
- * Get the period in milliseconds set for delaying the enabling of a bonded
- * link when the link up status has been detected
- *
- * @param bonded_port_id	Port ID of bonded device.
- *
- * @return
- *  Delay period on success, negative value otherwise.
- */
-int
-rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id);
-
-
 #ifdef __cplusplus
 }
 #endif
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 55c8e3167c..1c09d2e4ba 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -981,19 +981,6 @@ rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms)
 	return 0;
 }
 
-int
-rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id)
-{
-	struct bond_dev_private *internals;
-
-	if (valid_bonded_port_id(bonded_port_id) != 0)
-		return -1;
-
-	internals = rte_eth_devices[bonded_port_id].data->dev_private;
-
-	return internals->link_status_polling_interval_ms;
-}
-
 int
 rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id,
 				       uint32_t delay_ms)
@@ -1010,19 +997,6 @@ rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id,
 	return 0;
 }
 
-int
-rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id)
-{
-	struct bond_dev_private *internals;
-
-	if (valid_bonded_port_id(bonded_port_id) != 0)
-		return -1;
-
-	internals = rte_eth_devices[bonded_port_id].data->dev_private;
-
-	return internals->link_down_delay_ms;
-}
-
 int
 rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id, uint32_t delay_ms)
 
@@ -1037,16 +1011,3 @@ rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id, uint32_t delay_ms)
 
 	return 0;
 }
-
-int
-rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id)
-{
-	struct bond_dev_private *internals;
-
-	if (valid_bonded_port_id(bonded_port_id) != 0)
-		return -1;
-
-	internals = rte_eth_devices[bonded_port_id].data->dev_private;
-
-	return internals->link_up_delay_ms;
-}
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 057b1ada54..d9a0154de1 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1396,28 +1396,6 @@ link_properties_valid(struct rte_eth_dev *ethdev,
 	return 0;
 }
 
-int
-mac_address_get(struct rte_eth_dev *eth_dev,
-		struct rte_ether_addr *dst_mac_addr)
-{
-	struct rte_ether_addr *mac_addr;
-
-	if (eth_dev == NULL) {
-		RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
-		return -1;
-	}
-
-	if (dst_mac_addr == NULL) {
-		RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
-		return -1;
-	}
-
-	mac_addr = eth_dev->data->mac_addrs;
-
-	rte_ether_addr_copy(mac_addr, dst_mac_addr);
-	return 0;
-}
-
 int
 mac_address_set(struct rte_eth_dev *eth_dev,
 		struct rte_ether_addr *new_mac_addr)
diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h
index 8fe8e2a36b..6e360bc42d 100644
--- a/drivers/net/cxgbe/base/common.h
+++ b/drivers/net/cxgbe/base/common.h
@@ -363,8 +363,6 @@ int t4vf_get_vfres(struct adapter *adap);
 int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size,
 				unsigned int cache_line_size,
 				enum chip_type chip_compat);
-int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
-			 unsigned int cache_line_size);
 int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
 		    unsigned int vf, unsigned int nparams, const u32 *params,
@@ -485,9 +483,6 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
 		      unsigned int start_idx);
-void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
-		       unsigned int data_reg, const u32 *vals,
-		       unsigned int nregs, unsigned int start_idx);
 
 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
 int t4_get_pfres(struct adapter *adapter);
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 9217956b42..d5b916ccf5 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -189,28 +189,6 @@ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
 	}
 }
 
-/**
- * t4_write_indirect - write indirectly addressed registers
- * @adap: the adapter
- * @addr_reg: register holding the indirect addresses
- * @data_reg: register holding the value for the indirect registers
- * @vals: values to write
- * @nregs: how many indirect registers to write
- * @start_idx: address of first indirect register to write
- *
- * Writes a sequential block of registers that are accessed indirectly
- * through an address/data register pair.
- */
-void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
-		       unsigned int data_reg, const u32 *vals,
-		       unsigned int nregs, unsigned int start_idx)
-{
-	while (nregs--) {
-		t4_write_reg(adap, addr_reg, start_idx++);
-		t4_write_reg(adap, data_reg, *vals++);
-	}
-}
-
 /**
  * t4_report_fw_error - report firmware error
  * @adap: the adapter
@@ -3860,25 +3838,6 @@ int t4_fixup_host_params_compat(struct adapter *adap,
 	return 0;
 }
 
-/**
- * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
- * @adap: the adapter
- * @page_size: the host's Base Page Size
- * @cache_line_size: the host's Cache Line Size
- *
- * Various registers in T4 contain values which are dependent on the
- * host's Base Page and Cache Line Sizes.  This function will fix all of
- * those registers with the appropriate values as passed in ...
- *
- * This routine makes changes which are compatible with T4 chips.
- */
-int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
-			 unsigned int cache_line_size)
-{
-	return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
-					   T4_LAST_REV);
-}
-
 /**
  * t4_fw_initialize - ask FW to initialize the device
  * @adap: the adapter
diff --git a/drivers/net/dpaa/fmlib/fm_vsp.c b/drivers/net/dpaa/fmlib/fm_vsp.c
index 78efd93f22..0e261e3d1a 100644
--- a/drivers/net/dpaa/fmlib/fm_vsp.c
+++ b/drivers/net/dpaa/fmlib/fm_vsp.c
@@ -19,25 +19,6 @@
 #include "fm_vsp_ext.h"
 #include <dpaa_ethdev.h>
 
-uint32_t
-fm_port_vsp_alloc(t_handle h_fm_port,
-		  t_fm_port_vspalloc_params *p_params)
-{
-	t_device *p_dev = (t_device *)h_fm_port;
-	ioc_fm_port_vsp_alloc_params_t params;
-
-	_fml_dbg("Calling...\n");
-	memset(&params, 0, sizeof(ioc_fm_port_vsp_alloc_params_t));
-	memcpy(&params.params, p_params, sizeof(t_fm_port_vspalloc_params));
-
-	if (ioctl(p_dev->fd, FM_PORT_IOC_VSP_ALLOC, &params))
-		RETURN_ERROR(MINOR, E_INVALID_OPERATION, NO_MSG);
-
-	_fml_dbg("Called.\n");
-
-	return E_OK;
-}
-
 t_handle
 fm_vsp_config(t_fm_vsp_params *p_fm_vsp_params)
 {
diff --git a/drivers/net/dpaa/fmlib/fm_vsp_ext.h b/drivers/net/dpaa/fmlib/fm_vsp_ext.h
index b51c46162d..97590ea4c0 100644
--- a/drivers/net/dpaa/fmlib/fm_vsp_ext.h
+++ b/drivers/net/dpaa/fmlib/fm_vsp_ext.h
@@ -99,9 +99,6 @@ typedef struct ioc_fm_buffer_prefix_content_params_t {
 	ioc_fm_buffer_prefix_content_t fm_buffer_prefix_content;
 } ioc_fm_buffer_prefix_content_params_t;
 
-uint32_t fm_port_vsp_alloc(t_handle h_fm_port,
-			  t_fm_port_vspalloc_params *p_params);
-
 t_handle fm_vsp_config(t_fm_vsp_params *p_fm_vsp_params);
 
 uint32_t fm_vsp_init(t_handle h_fm_vsp);
diff --git a/drivers/net/dpaa2/mc/dpdmux.c b/drivers/net/dpaa2/mc/dpdmux.c
index 63f1ec7d30..dce9c55a9a 100644
--- a/drivers/net/dpaa2/mc/dpdmux.c
+++ b/drivers/net/dpaa2/mc/dpdmux.c
@@ -57,227 +57,6 @@ int dpdmux_open(struct fsl_mc_io *mc_io,
 	return 0;
 }
 
-/**
- * dpdmux_close() - Close the control session of the object
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:		Token of DPDMUX object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_close(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_create() - Create the DPDMUX object
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id: returned object id
- *
- * Create the DPDMUX object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_create(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  const struct dpdmux_cfg	*cfg,
-		  uint32_t *obj_id)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_create *cmd_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpdmux_cmd_create *)cmd.params;
-	cmd_params->method = cfg->method;
-	cmd_params->manip = cfg->manip;
-	cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
-	cmd_params->adv_max_dmat_entries =
-			cpu_to_le16(cfg->adv.max_dmat_entries);
-	cmd_params->adv_max_mc_groups = cpu_to_le16(cfg->adv.max_mc_groups);
-	cmd_params->adv_max_vlan_ids = cpu_to_le16(cfg->adv.max_vlan_ids);
-	cmd_params->options = cpu_to_le64(cfg->adv.options);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id:	The object id; it must be a valid id within the container that
- * created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpdmux_destroy(struct fsl_mc_io *mc_io,
-		   uint16_t dprc_token,
-		   uint32_t cmd_flags,
-		   uint32_t object_id)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_destroy *cmd_params;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpdmux_cmd_destroy *)cmd.params;
-	cmd_params->dpdmux_id = cpu_to_le32(object_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_enable() - Enable DPDMUX functionality
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_enable(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_disable() - Disable DPDMUX functionality
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_disable(struct fsl_mc_io *mc_io,
-		   uint32_t cmd_flags,
-		   uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_is_enabled() - Check if the DPDMUX is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      int *en)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_rsp_is_enabled *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpdmux_rsp_is_enabled *)cmd.params;
-	*en = dpdmux_get_field(rsp_params->en, ENABLE);
-
-	return 0;
-}
-
-/**
- * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_reset(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpdmux_get_attributes() - Retrieve DPDMUX attributes
  * @mc_io:	Pointer to MC portal's I/O object
@@ -318,407 +97,6 @@ int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
 	return 0;
 }
 
-/**
- * dpdmux_if_enable() - Enable Interface
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- * @if_id:	Interface Identifier
- *
- * Return:	Completion status. '0' on Success; Error code otherwise.
- */
-int dpdmux_if_enable(struct fsl_mc_io *mc_io,
-		     uint32_t cmd_flags,
-		     uint16_t token,
-		     uint16_t if_id)
-{
-	struct dpdmux_cmd_if *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_if_disable() - Disable Interface
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- * @if_id:	Interface Identifier
- *
- * Return:	Completion status. '0' on Success; Error code otherwise.
- */
-int dpdmux_if_disable(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      uint16_t if_id)
-{
-	struct dpdmux_cmd_if *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_set_max_frame_length() - Set the maximum frame length in DPDMUX
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:		Token of DPDMUX object
- * @max_frame_length:	The required maximum frame length
- *
- * Update the maximum frame length on all DMUX interfaces.
- * In case of VEPA, the maximum frame length on all dmux interfaces
- * will be updated with the minimum value of the mfls of the connected
- * dpnis and the actual value of dmux mfl.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
-				uint32_t cmd_flags,
-				uint16_t token,
-				uint16_t max_frame_length)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_set_max_frame_length *cmd_params;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_MAX_FRAME_LENGTH,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_set_max_frame_length *)cmd.params;
-	cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_ul_reset_counters() - Function resets the uplink counter
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_if_set_accepted_frames() - Set the accepted frame types
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- * @if_id:	Interface ID (0 for uplink, or 1-num_ifs);
- * @cfg:	Frame types configuration
- *
- * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or
- * priority-tagged frames are discarded.
- * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or
- * priority-tagged frames are accepted.
- * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged,
- * untagged and priority-tagged frame are accepted;
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
-				  uint32_t cmd_flags,
-				  uint16_t token,
-				  uint16_t if_id,
-				  const struct dpdmux_accepted_frames *cfg)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_if_set_accepted_frames *cmd_params;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if_set_accepted_frames *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-	dpdmux_set_field(cmd_params->frames_options,
-			 ACCEPTED_FRAMES_TYPE,
-			 cfg->type);
-	dpdmux_set_field(cmd_params->frames_options,
-			 UNACCEPTED_FRAMES_ACTION,
-			 cfg->unaccept_act);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- * @if_id:	Interface ID (0 for uplink, or 1-num_ifs);
- * @attr:	Interface attributes
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token,
-			     uint16_t if_id,
-			     struct dpdmux_if_attr *attr)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_if *cmd_params;
-	struct dpdmux_rsp_if_get_attr *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpdmux_rsp_if_get_attr *)cmd.params;
-	attr->rate = le32_to_cpu(rsp_params->rate);
-	attr->enabled = dpdmux_get_field(rsp_params->enabled, ENABLE);
-	attr->is_default = dpdmux_get_field(rsp_params->enabled, IS_DEFAULT);
-	attr->accept_frame_type = dpdmux_get_field(
-				  rsp_params->accepted_frames_type,
-				  ACCEPTED_FRAMES_TYPE);
-
-	return 0;
-}
-
-/**
- * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- * @if_id:	Destination interface ID
- * @rule:	L2 rule
- *
- * Function removes a L2 rule from DPDMUX table
- * or adds an interface to an existing multicast address
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token,
-			     uint16_t if_id,
-			     const struct dpdmux_l2_rule *rule)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_if_l2_rule *cmd_params;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-	cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
-	cmd_params->mac_addr5 = rule->mac_addr[5];
-	cmd_params->mac_addr4 = rule->mac_addr[4];
-	cmd_params->mac_addr3 = rule->mac_addr[3];
-	cmd_params->mac_addr2 = rule->mac_addr[2];
-	cmd_params->mac_addr1 = rule->mac_addr[1];
-	cmd_params->mac_addr0 = rule->mac_addr[0];
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPDMUX object
- * @if_id:	Destination interface ID
- * @rule:	L2 rule
- *
- * Function adds a L2 rule into DPDMUX table
- * or adds an interface to an existing multicast address
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t token,
-			  uint16_t if_id,
-			  const struct dpdmux_l2_rule *rule)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_if_l2_rule *cmd_params;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-	cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
-	cmd_params->mac_addr5 = rule->mac_addr[5];
-	cmd_params->mac_addr4 = rule->mac_addr[4];
-	cmd_params->mac_addr3 = rule->mac_addr[3];
-	cmd_params->mac_addr2 = rule->mac_addr[2];
-	cmd_params->mac_addr1 = rule->mac_addr[1];
-	cmd_params->mac_addr0 = rule->mac_addr[0];
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_if_get_counter() - Functions obtains specific counter of an interface
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPDMUX object
- * @if_id:  Interface Id
- * @counter_type: counter type
- * @counter: Returned specific counter information
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t token,
-			  uint16_t if_id,
-			  enum dpdmux_counter_type counter_type,
-			  uint64_t *counter)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_if_get_counter *cmd_params;
-	struct dpdmux_rsp_if_get_counter *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if_get_counter *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-	cmd_params->counter_type = counter_type;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpdmux_rsp_if_get_counter *)cmd.params;
-	*counter = le64_to_cpu(rsp_params->counter);
-
-	return 0;
-}
-
-/**
- * dpdmux_if_set_link_cfg() - set the link configuration.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPSW object
- * @if_id: interface id
- * @cfg: Link configuration
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t token,
-			   uint16_t if_id,
-			   struct dpdmux_link_cfg *cfg)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_if_set_link_cfg *cmd_params;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if_set_link_cfg *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-	cmd_params->rate = cpu_to_le32(cfg->rate);
-	cmd_params->options = cpu_to_le64(cfg->options);
-	cmd_params->advertising = cpu_to_le64(cfg->advertising);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_if_get_link_state - Return the link state
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPSW object
- * @if_id: interface id
- * @state: link state
- *
- * @returns	'0' on Success; Error code otherwise.
- */
-int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token,
-			     uint16_t if_id,
-			     struct dpdmux_link_state *state)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_cmd_if_get_link_state *cmd_params;
-	struct dpdmux_rsp_if_get_link_state *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_if_get_link_state *)cmd.params;
-	cmd_params->if_id = cpu_to_le16(if_id);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpdmux_rsp_if_get_link_state *)cmd.params;
-	state->rate = le32_to_cpu(rsp_params->rate);
-	state->options = le64_to_cpu(rsp_params->options);
-	state->up = dpdmux_get_field(rsp_params->up, UP);
-	state->state_valid = dpdmux_get_field(rsp_params->up, STATE_VALID);
-	state->supported = le64_to_cpu(rsp_params->supported);
-	state->advertising = le64_to_cpu(rsp_params->advertising);
-
-	return 0;
-}
-
 /**
  * dpdmux_if_set_default - Set default interface
  * @mc_io:	Pointer to MC portal's I/O object
@@ -747,41 +125,6 @@ int dpdmux_if_set_default(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpdmux_if_get_default - Get default interface
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPSW object
- * @if_id: interface id
- *
- * @returns	'0' on Success; Error code otherwise.
- */
-int dpdmux_if_get_default(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token,
-		uint16_t *if_id)
-{
-	struct dpdmux_cmd_if *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_DEFAULT,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpdmux_cmd_if *)cmd.params;
-	*if_id = le16_to_cpu(rsp_params->if_id);
-
-	return 0;
-}
-
 /**
  * dpdmux_set_custom_key - Set a custom classification key.
  *
@@ -859,71 +202,3 @@ int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
 	/* send command to mc*/
 	return mc_send_command(mc_io, &cmd);
 }
-
-/**
- * dpdmux_remove_custom_cls_entry - Removes a custom classification entry.
- *
- * This API is only available for DPDMUX instances created with
- * DPDMUX_METHOD_CUSTOM.  The API can be used to remove classification
- * entries previously inserted using dpdmux_add_custom_cls_entry.
- *
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPSW object
- * @rule: Classification rule to remove
- *
- * @returns	'0' on Success; Error code otherwise.
- */
-int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token,
-		struct dpdmux_rule_cfg *rule)
-{
-	struct dpdmux_cmd_remove_custom_cls_entry *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpdmux_cmd_remove_custom_cls_entry *)cmd.params;
-	cmd_params->key_size = rule->key_size;
-	cmd_params->key_iova = cpu_to_le64(rule->key_iova);
-	cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpdmux_get_api_version() - Get Data Path Demux API version
- * @mc_io:  Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of data path demux API
- * @minor_ver:	Minor version of data path demux API
- *
- * Return:  '0' on Success; Error code otherwise.
- */
-int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t *major_ver,
-			   uint16_t *minor_ver)
-{
-	struct mc_command cmd = { 0 };
-	struct dpdmux_rsp_get_api_version *rsp_params;
-	int err;
-
-	cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_API_VERSION,
-					cmd_flags,
-					0);
-
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	rsp_params = (struct dpdmux_rsp_get_api_version *)cmd.params;
-	*major_ver = le16_to_cpu(rsp_params->major);
-	*minor_ver = le16_to_cpu(rsp_params->minor);
-
-	return 0;
-}
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index 683d7bcc17..ad4df05dfc 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -80,99 +80,6 @@ int dpni_close(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_create() - Create the DPNI object
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id:	Returned object id
- *
- * Create the DPNI object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_create(struct fsl_mc_io *mc_io,
-		uint16_t dprc_token,
-		uint32_t cmd_flags,
-		const struct dpni_cfg *cfg,
-		uint32_t *obj_id)
-{
-	struct dpni_cmd_create *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dpni_cmd_create *)cmd.params;
-	cmd_params->options = cpu_to_le32(cfg->options);
-	cmd_params->num_queues = cfg->num_queues;
-	cmd_params->num_tcs = cfg->num_tcs;
-	cmd_params->mac_filter_entries = cfg->mac_filter_entries;
-	cmd_params->num_rx_tcs = cfg->num_rx_tcs;
-	cmd_params->vlan_filter_entries =  cfg->vlan_filter_entries;
-	cmd_params->qos_entries = cfg->qos_entries;
-	cmd_params->fs_entries = cpu_to_le16(cfg->fs_entries);
-	cmd_params->num_cgs = cfg->num_cgs;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dpni_destroy() - Destroy the DPNI object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id:	The object id; it must be a valid id within the container that
- * created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpni_destroy(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 uint32_t object_id)
-{
-	struct dpni_cmd_destroy *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY,
-					  cmd_flags,
-					  dprc_token);
-	/* set object id to destroy */
-	cmd_params = (struct dpni_cmd_destroy *)cmd.params;
-	cmd_params->dpsw_id = cpu_to_le32(object_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpni_set_pools() - Set buffer pools configuration
  * @mc_io:	Pointer to MC portal's I/O object
@@ -356,47 +263,6 @@ int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_irq_enable() - Get overall interrupt state
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @irq_index:	The interrupt index to configure
- * @en:		Returned interrupt state - enable = 1, disable = 0
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
-			uint32_t cmd_flags,
-			uint16_t token,
-			uint8_t irq_index,
-			uint8_t *en)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_cmd_get_irq_enable *cmd_params;
-	struct dpni_rsp_get_irq_enable *rsp_params;
-
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
-	cmd_params->irq_index = irq_index;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
-	*en = dpni_get_field(rsp_params->enabled, ENABLE);
-
-	return 0;
-}
-
 /**
  * dpni_set_irq_mask() - Set interrupt mask.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -434,49 +300,6 @@ int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_irq_mask() - Get interrupt mask.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @irq_index:	The interrupt index to configure
- * @mask:	Returned event mask to trigger interrupt
- *
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      uint8_t irq_index,
-		      uint32_t *mask)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_cmd_get_irq_mask *cmd_params;
-	struct dpni_rsp_get_irq_mask *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
-	cmd_params->irq_index = irq_index;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
-	*mask = le32_to_cpu(rsp_params->mask);
-
-	return 0;
-}
-
 /**
  * dpni_get_irq_status() - Get the current status of any pending interrupts.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -633,57 +456,6 @@ int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @qtype:	Type of queue to retrieve configuration for
- * @layout:	Returns buffer layout attributes
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t token,
-			   enum dpni_queue_type qtype,
-			   struct dpni_buffer_layout *layout)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_cmd_get_buffer_layout *cmd_params;
-	struct dpni_rsp_get_buffer_layout *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
-	cmd_params->qtype = qtype;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
-	layout->pass_timestamp =
-				(int)dpni_get_field(rsp_params->flags, PASS_TS);
-	layout->pass_parser_result =
-				(int)dpni_get_field(rsp_params->flags, PASS_PR);
-	layout->pass_frame_status =
-				(int)dpni_get_field(rsp_params->flags, PASS_FS);
-	layout->pass_sw_opaque =
-			(int)dpni_get_field(rsp_params->flags, PASS_SWO);
-	layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
-	layout->data_align = le16_to_cpu(rsp_params->data_align);
-	layout->data_head_room = le16_to_cpu(rsp_params->head_room);
-	layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
-
-	return 0;
-}
-
 /**
  * dpni_set_buffer_layout() - Set buffer layout configuration.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -758,50 +530,6 @@ int dpni_set_offload(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_offload() - Get DPNI offload configuration.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @type:	Type of DPNI offload
- * @config:	Offload configuration.
- *			For checksum offloads, a value of 1 indicates that the
- *			offload is enabled.
- *
- * Return:	'0' on Success; Error code otherwise.
- *
- * @warning	Allowed only when DPNI is disabled
- */
-int dpni_get_offload(struct fsl_mc_io *mc_io,
-		     uint32_t cmd_flags,
-		     uint16_t token,
-		     enum dpni_offload type,
-		     uint32_t *config)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_cmd_get_offload *cmd_params;
-	struct dpni_rsp_get_offload *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
-	cmd_params->dpni_offload = type;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
-	*config = le32_to_cpu(rsp_params->config);
-
-	return 0;
-}
-
 /**
  * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
  *			for enqueue operations
@@ -844,41 +572,6 @@ int dpni_get_qdid(struct fsl_mc_io *mc_io,
 	return 0;
 }
 
-/**
- * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @data_offset: Tx data offset (from start of buffer)
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
-			    uint32_t cmd_flags,
-			    uint16_t token,
-			    uint16_t *data_offset)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_rsp_get_tx_data_offset *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
-	*data_offset = le16_to_cpu(rsp_params->data_offset);
-
-	return 0;
-}
-
 /**
  * dpni_set_link_cfg() - set the link configuration.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -978,42 +671,6 @@ int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_max_frame_length() - Get the maximum received frame length.
- * @mc_io:		Pointer to MC portal's I/O object
- * @cmd_flags:		Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:		Token of DPNI object
- * @max_frame_length:	Maximum received frame length (in bytes);
- *			frame is discarded if its length exceeds this value
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
-			      uint32_t cmd_flags,
-			      uint16_t token,
-			      uint16_t *max_frame_length)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_rsp_get_max_frame_length *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
-	*max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
-
-	return 0;
-}
-
 /**
  * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
  * @mc_io:	Pointer to MC portal's I/O object
@@ -1042,41 +699,6 @@ int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_multicast_promisc() - Get multicast promiscuous mode
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @en:		Returns '1' if enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
-			       uint32_t cmd_flags,
-			       uint16_t token,
-			       int *en)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_rsp_get_multicast_promisc *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
-	*en = dpni_get_field(rsp_params->enabled, ENABLE);
-
-	return 0;
-}
-
 /**
  * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
  * @mc_io:	Pointer to MC portal's I/O object
@@ -1096,48 +718,13 @@ int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
-	dpni_set_field(cmd_params->enable, ENABLE, en);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpni_get_unicast_promisc() - Get unicast promiscuous mode
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @en:		Returns '1' if enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token,
-			     int *en)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_rsp_get_unicast_promisc *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
-	*en = dpni_get_field(rsp_params->enabled, ENABLE);
+					  cmd_flags,
+					  token);
+	cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
+	dpni_set_field(cmd_params->enable, ENABLE, en);
 
-	return 0;
+	/* send command to mc*/
+	return mc_send_command(mc_io, &cmd);
 }
 
 /**
@@ -1281,39 +868,6 @@ int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @unicast:	Set to '1' to clear unicast addresses
- * @multicast:	Set to '1' to clear multicast addresses
- *
- * The primary MAC address is not cleared by this operation.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t token,
-			   int unicast,
-			   int multicast)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_cmd_clear_mac_filters *cmd_params;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
-	dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
-	dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
  *			port the DPNI is attached to
@@ -1453,29 +1007,6 @@ int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_clear_vlan_filters() - Clear all VLAN filters
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
-			    uint32_t cmd_flags,
-			    uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
  * @mc_io:	Pointer to MC portal's I/O object
@@ -1675,32 +1206,6 @@ int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_clear_qos_table() - Clear all QoS mapping entries
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- *
- * Following this function call, all frames are directed to
- * the default traffic class (0)
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
  *			(to select a flow ID)
@@ -1779,35 +1284,6 @@ int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_clear_fs_entries() - Clear all Flow Steering entries of a specific
- *			traffic class
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @tc_id:	Traffic class selection (0-7)
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_clear_fs_entries(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t token,
-			  uint8_t tc_id)
-{
-	struct dpni_cmd_clear_fs_entries *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_FS_ENT,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_clear_fs_entries *)cmd.params;
-	cmd_params->tc_id = tc_id;
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dpni_set_congestion_notification() - Set traffic class congestion
  *	notification configuration
@@ -1858,94 +1334,6 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_congestion_notification() - Get traffic class congestion
- *	notification configuration
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @qtype:	Type of queue - Rx, Tx and Tx confirm types are supported
- * @tc_id:	Traffic class selection (0-7)
- * @cfg:	congestion notification configuration
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
-				     uint32_t cmd_flags,
-				     uint16_t token,
-				     enum dpni_queue_type qtype,
-				     uint8_t tc_id,
-				struct dpni_congestion_notification_cfg *cfg)
-{
-	struct dpni_rsp_get_congestion_notification *rsp_params;
-	struct dpni_cmd_get_congestion_notification *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(
-					DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
-					cmd_flags,
-					token);
-	cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
-	cmd_params->qtype = qtype;
-	cmd_params->tc = tc_id;
-	cmd_params->congestion_point = cfg->cg_point;
-	cmd_params->cgid = cfg->cgid;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params;
-	cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS);
-	cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
-	cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
-	cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
-	cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
-	cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
-	cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
-	cfg->dest_cfg.priority = rsp_params->dest_priority;
-	cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units,
-						 DEST_TYPE);
-
-	return 0;
-}
-
-/**
- * dpni_get_api_version() - Get Data Path Network Interface API version
- * @mc_io:  Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of data path network interface API
- * @minor_ver:	Minor version of data path network interface API
- *
- * Return:  '0' on Success; Error code otherwise.
- */
-int dpni_get_api_version(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t *major_ver,
-			 uint16_t *minor_ver)
-{
-	struct dpni_rsp_get_api_version *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
-					cmd_flags,
-					0);
-
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
-	*major_ver = le16_to_cpu(rsp_params->major);
-	*minor_ver = le16_to_cpu(rsp_params->minor);
-
-	return 0;
-}
-
 /**
  * dpni_set_queue() - Set queue parameters
  * @mc_io:	Pointer to MC portal's I/O object
@@ -2184,67 +1572,6 @@ int dpni_set_taildrop(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_taildrop() - Get taildrop information
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @cg_point:	Congestion point
- * @q_type:	Queue type on which the taildrop is configured.
- *		Only Rx queues are supported for now
- * @tc:		Traffic class to apply this taildrop to
- * @q_index:	Index of the queue if the DPNI supports multiple queues for
- *		traffic distribution. Ignored if CONGESTION_POINT is not 0.
- * @taildrop:	Taildrop structure
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_taildrop(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      enum dpni_congestion_point cg_point,
-		      enum dpni_queue_type qtype,
-		      uint8_t tc,
-		      uint8_t index,
-		      struct dpni_taildrop *taildrop)
-{
-	struct mc_command cmd = { 0 };
-	struct dpni_cmd_get_taildrop *cmd_params;
-	struct dpni_rsp_get_taildrop *rsp_params;
-	uint8_t oal_lo, oal_hi;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
-	cmd_params->congestion_point = cg_point;
-	cmd_params->qtype = qtype;
-	cmd_params->tc = tc;
-	cmd_params->index = index;
-
-	/* send command to mc */
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
-	taildrop->enable = dpni_get_field(rsp_params->enable_oal_lo, ENABLE);
-	taildrop->units = rsp_params->units;
-	taildrop->threshold = le32_to_cpu(rsp_params->threshold);
-	oal_lo = dpni_get_field(rsp_params->enable_oal_lo, OAL_LO);
-	oal_hi = dpni_get_field(rsp_params->oal_hi, OAL_HI);
-	taildrop->oal = oal_hi << DPNI_OAL_LO_SIZE | oal_lo;
-
-	/* Fill the first 4 bits, 'oal' is a 2's complement value of 12 bits */
-	if (taildrop->oal >= 0x0800)
-		taildrop->oal |= 0xF000;
-
-	return 0;
-}
-
 /**
  * dpni_set_opr() - Set Order Restoration configuration.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -2290,69 +1617,6 @@ int dpni_set_opr(struct fsl_mc_io *mc_io,
 	return mc_send_command(mc_io, &cmd);
 }
 
-/**
- * dpni_get_opr() - Retrieve Order Restoration config and query.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @tc:		Traffic class, in range 0 to NUM_TCS - 1
- * @index:	Selects the specific queue out of the set allocated
- *			for the same TC. Value must be in range 0 to
- *			NUM_QUEUES - 1
- * @cfg:	Returned OPR configuration
- * @qry:	Returned OPR query
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpni_get_opr(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token,
-		 uint8_t tc,
-		 uint8_t index,
-		 struct opr_cfg *cfg,
-		 struct opr_qry *qry)
-{
-	struct dpni_rsp_get_opr *rsp_params;
-	struct dpni_cmd_get_opr *cmd_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OPR,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dpni_cmd_get_opr *)cmd.params;
-	cmd_params->index = index;
-	cmd_params->tc_id = tc;
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpni_rsp_get_opr *)cmd.params;
-	cfg->oloe = rsp_params->oloe;
-	cfg->oeane = rsp_params->oeane;
-	cfg->olws = rsp_params->olws;
-	cfg->oa = rsp_params->oa;
-	cfg->oprrws = rsp_params->oprrws;
-	qry->rip = dpni_get_field(rsp_params->flags, RIP);
-	qry->enable = dpni_get_field(rsp_params->flags, OPR_ENABLE);
-	qry->nesn = le16_to_cpu(rsp_params->nesn);
-	qry->ndsn = le16_to_cpu(rsp_params->ndsn);
-	qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
-	qry->tseq_nlis = dpni_get_field(rsp_params->tseq_nlis, TSEQ_NLIS);
-	qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
-	qry->hseq_nlis = dpni_get_field(rsp_params->hseq_nlis, HSEQ_NLIS);
-	qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
-	qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
-	qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
-	qry->opr_id = le16_to_cpu(rsp_params->opr_id);
-
-	return 0;
-}
-
 /**
  * dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
  * @mc_io:	Pointer to MC portal's I/O object
@@ -2567,73 +1831,3 @@ int dpni_enable_sw_sequence(struct fsl_mc_io *mc_io,
 	/* send command to mc*/
 	return mc_send_command(mc_io, &cmd);
 }
-
-/**
- * dpni_get_sw_sequence_layout() - Get the soft sequence layout
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @src:	Source of the layout (WRIOP Rx or Tx)
- * @ss_layout_iova:  I/O virtual address of 264 bytes DMA-able memory
- *
- * warning: After calling this function, call dpni_extract_sw_sequence_layout()
- *		to get the layout.
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpni_get_sw_sequence_layout(struct fsl_mc_io *mc_io,
-	      uint32_t cmd_flags,
-	      uint16_t token,
-		  enum dpni_soft_sequence_dest src,
-		  uint64_t ss_layout_iova)
-{
-	struct dpni_get_sw_sequence_layout *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SW_SEQUENCE_LAYOUT,
-					  cmd_flags,
-					  token);
-
-	cmd_params = (struct dpni_get_sw_sequence_layout *)cmd.params;
-	cmd_params->src = src;
-	cmd_params->layout_iova = cpu_to_le64(ss_layout_iova);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpni_extract_sw_sequence_layout() - extract the software sequence layout
- * @layout:		software sequence layout
- * @sw_sequence_layout_buf:	Zeroed 264 bytes of memory before mapping it
- *				to DMA
- *
- * This function has to be called after dpni_get_sw_sequence_layout
- *
- */
-void dpni_extract_sw_sequence_layout(struct dpni_sw_sequence_layout *layout,
-			     const uint8_t *sw_sequence_layout_buf)
-{
-	const struct dpni_sw_sequence_layout_entry *ext_params;
-	int i;
-	uint16_t ss_size, ss_offset;
-
-	ext_params = (const struct dpni_sw_sequence_layout_entry *)
-						sw_sequence_layout_buf;
-
-	for (i = 0; i < DPNI_SW_SEQUENCE_LAYOUT_SIZE; i++) {
-		ss_offset = le16_to_cpu(ext_params[i].ss_offset);
-		ss_size = le16_to_cpu(ext_params[i].ss_size);
-
-		if (ss_offset == 0 && ss_size == 0) {
-			layout->num_ss = i;
-			return;
-		}
-
-		layout->ss[i].ss_offset = ss_offset;
-		layout->ss[i].ss_size = ss_size;
-		layout->ss[i].param_offset = ext_params[i].param_offset;
-		layout->ss[i].param_size = ext_params[i].param_size;
-	}
-}
diff --git a/drivers/net/dpaa2/mc/dprtc.c b/drivers/net/dpaa2/mc/dprtc.c
index 42ac89150e..96e20bce81 100644
--- a/drivers/net/dpaa2/mc/dprtc.c
+++ b/drivers/net/dpaa2/mc/dprtc.c
@@ -54,213 +54,6 @@ int dprtc_open(struct fsl_mc_io *mc_io,
 	return err;
 }
 
-/**
- * dprtc_close() - Close the control session of the object
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPRTC object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_close(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_create() - Create the DPRTC object.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token:	Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:	Configuration structure
- * @obj_id:	Returned object id
- *
- * Create the DPRTC object, allocate required resources and
- * perform required initialization.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_create(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 const struct dprtc_cfg *cfg,
-		 uint32_t *obj_id)
-{
-	struct mc_command cmd = { 0 };
-	int err;
-
-	(void)(cfg); /* unused */
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE,
-					  cmd_flags,
-					  dprc_token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	*obj_id = mc_cmd_read_object_id(&cmd);
-
-	return 0;
-}
-
-/**
- * dprtc_destroy() - Destroy the DPRTC object and release all its resources.
- * @mc_io:	Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id:	The object id; it must be a valid id within the container that
- * created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dprtc_destroy(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  uint32_t object_id)
-{
-	struct dprtc_cmd_destroy *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
-					  cmd_flags,
-					  dprc_token);
-	cmd_params = (struct dprtc_cmd_destroy *)cmd.params;
-	cmd_params->object_id = cpu_to_le32(object_id);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_enable() - Enable the DPRTC.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPRTC object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_enable(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_disable() - Disable the DPRTC.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPRTC object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_disable(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_is_enabled() - Check if the DPRTC is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPRTC object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_is_enabled(struct fsl_mc_io *mc_io,
-		     uint32_t cmd_flags,
-		     uint16_t token,
-		     int *en)
-{
-	struct dprtc_rsp_is_enabled *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dprtc_rsp_is_enabled *)cmd.params;
-	*en = dprtc_get_field(rsp_params->en, ENABLE);
-
-	return 0;
-}
-
-/**
- * dprtc_reset() - Reset the DPRTC, returns the object to initial state.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPRTC object
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_reset(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token)
-{
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
 /**
  * dprtc_get_attributes - Retrieve DPRTC attributes.
  *
@@ -299,101 +92,6 @@ int dprtc_get_attributes(struct fsl_mc_io *mc_io,
 	return 0;
 }
 
-/**
- * dprtc_set_clock_offset() - Sets the clock's offset
- * (usually relative to another clock).
- *
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPRTC object
- * @offset:	New clock offset (in nanoseconds).
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t token,
-			   int64_t offset)
-{
-	struct dprtc_cmd_set_clock_offset *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dprtc_cmd_set_clock_offset *)cmd.params;
-	cmd_params->offset = cpu_to_le64(offset);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
- *
- * @mc_io:		Pointer to MC portal's I/O object
- * @cmd_flags:		Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:		Token of DPRTC object
- * @freq_compensation:	The new frequency compensation value to set.
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
-				uint32_t cmd_flags,
-				uint16_t token,
-				uint32_t freq_compensation)
-{
-	struct dprtc_get_freq_compensation *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dprtc_get_freq_compensation *)cmd.params;
-	cmd_params->freq_compensation = cpu_to_le32(freq_compensation);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
- *
- * @mc_io:		Pointer to MC portal's I/O object
- * @cmd_flags:		Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:		Token of DPRTC object
- * @freq_compensation:	Frequency compensation value
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
-				uint32_t cmd_flags,
-				uint16_t token,
-				uint32_t *freq_compensation)
-{
-	struct dprtc_get_freq_compensation *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
-					  cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dprtc_get_freq_compensation *)cmd.params;
-	*freq_compensation = le32_to_cpu(rsp_params->freq_compensation);
-
-	return 0;
-}
-
 /**
  * dprtc_get_time() - Returns the current RTC time.
  *
@@ -458,66 +156,3 @@ int dprtc_set_time(struct fsl_mc_io *mc_io,
 	/* send command to mc*/
 	return mc_send_command(mc_io, &cmd);
 }
-
-/**
- * dprtc_set_alarm() - Defines and sets alarm.
- *
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPRTC object
- * @time:	In nanoseconds, the time when the alarm
- *			should go off - must be a multiple of
- *			1 microsecond
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dprtc_set_alarm(struct fsl_mc_io *mc_io,
-		    uint32_t cmd_flags,
-		    uint16_t token, uint64_t time)
-{
-	struct dprtc_time *cmd_params;
-	struct mc_command cmd = { 0 };
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
-					  cmd_flags,
-					  token);
-	cmd_params = (struct dprtc_time *)cmd.params;
-	cmd_params->time = cpu_to_le64(time);
-
-	/* send command to mc*/
-	return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_get_api_version() - Get Data Path Real Time Counter API version
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of data path real time counter API
- * @minor_ver:	Minor version of data path real time counter API
- *
- * Return:  '0' on Success; Error code otherwise.
- */
-int dprtc_get_api_version(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t *major_ver,
-			  uint16_t *minor_ver)
-{
-	struct dprtc_rsp_get_api_version *rsp_params;
-	struct mc_command cmd = { 0 };
-	int err;
-
-	cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION,
-					cmd_flags,
-					0);
-
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	rsp_params = (struct dprtc_rsp_get_api_version *)cmd.params;
-	*major_ver = le16_to_cpu(rsp_params->major);
-	*minor_ver = le16_to_cpu(rsp_params->minor);
-
-	return 0;
-}
diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux.h b/drivers/net/dpaa2/mc/fsl_dpdmux.h
index accd1ef5c1..eb768fafbb 100644
--- a/drivers/net/dpaa2/mc/fsl_dpdmux.h
+++ b/drivers/net/dpaa2/mc/fsl_dpdmux.h
@@ -21,10 +21,6 @@ int dpdmux_open(struct fsl_mc_io *mc_io,
 		int  dpdmux_id,
 		uint16_t  *token);
 
-int dpdmux_close(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token);
-
 /**
  * DPDMUX general options
  */
@@ -102,34 +98,6 @@ struct dpdmux_cfg {
 	} adv;
 };
 
-int dpdmux_create(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  const struct dpdmux_cfg *cfg,
-		  uint32_t *obj_id);
-
-int dpdmux_destroy(struct fsl_mc_io *mc_io,
-		   uint16_t dprc_token,
-		   uint32_t cmd_flags,
-		   uint32_t object_id);
-
-int dpdmux_enable(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token);
-
-int dpdmux_disable(struct fsl_mc_io *mc_io,
-		   uint32_t cmd_flags,
-		   uint16_t token);
-
-int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      int *en);
-
-int dpdmux_reset(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token);
-
 /**
  * struct dpdmux_attr - Structure representing DPDMUX attributes
  * @id: DPDMUX object ID
@@ -153,11 +121,6 @@ int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
 			  uint16_t token,
 			  struct dpdmux_attr *attr);
 
-int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
-				uint32_t cmd_flags,
-				uint16_t token,
-				uint16_t max_frame_length);
-
 /**
  * enum dpdmux_counter_type - Counter types
  * @DPDMUX_CNT_ING_FRAME: Counts ingress frames
@@ -223,12 +186,6 @@ struct dpdmux_accepted_frames {
 	enum dpdmux_action unaccept_act;
 };
 
-int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
-				  uint32_t cmd_flags,
-				  uint16_t token,
-				  uint16_t if_id,
-				  const struct dpdmux_accepted_frames *cfg);
-
 /**
  * struct dpdmux_if_attr - Structure representing frame types configuration
  * @rate: Configured interface rate (in bits per second)
@@ -242,22 +199,6 @@ struct dpdmux_if_attr {
 	enum dpdmux_accepted_frames_type accept_frame_type;
 };
 
-int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token,
-			     uint16_t if_id,
-			     struct dpdmux_if_attr *attr);
-
-int dpdmux_if_enable(struct fsl_mc_io *mc_io,
-		     uint32_t cmd_flags,
-		     uint16_t token,
-		     uint16_t if_id);
-
-int dpdmux_if_disable(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      uint16_t if_id);
-
 /**
  * struct dpdmux_l2_rule - Structure representing L2 rule
  * @mac_addr: MAC address
@@ -268,29 +209,6 @@ struct dpdmux_l2_rule {
 	uint16_t vlan_id;
 };
 
-int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token,
-			     uint16_t if_id,
-			     const struct dpdmux_l2_rule *rule);
-
-int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t token,
-			  uint16_t if_id,
-			  const struct dpdmux_l2_rule *rule);
-
-int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t token,
-			  uint16_t if_id,
-			  enum dpdmux_counter_type counter_type,
-			  uint64_t *counter);
-
-int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token);
-
 /**
  * Enable auto-negotiation
  */
@@ -319,11 +237,6 @@ struct dpdmux_link_cfg {
 	uint64_t advertising;
 };
 
-int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t token,
-			   uint16_t if_id,
-			   struct dpdmux_link_cfg *cfg);
 /**
  * struct dpdmux_link_state - Structure representing DPDMUX link state
  * @rate: Rate
@@ -342,22 +255,11 @@ struct dpdmux_link_state {
 	uint64_t advertising;
 };
 
-int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token,
-			     uint16_t if_id,
-			     struct dpdmux_link_state *state);
-
 int dpdmux_if_set_default(struct fsl_mc_io *mc_io,
 		uint32_t cmd_flags,
 		uint16_t token,
 		uint16_t if_id);
 
-int dpdmux_if_get_default(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token,
-		uint16_t *if_id);
-
 int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
 			uint32_t cmd_flags,
 			uint16_t token,
@@ -397,14 +299,4 @@ int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
 		struct dpdmux_rule_cfg *rule,
 		struct dpdmux_cls_action *action);
 
-int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token,
-		struct dpdmux_rule_cfg *rule);
-
-int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t *major_ver,
-			   uint16_t *minor_ver);
-
 #endif /* __FSL_DPDMUX_H */
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
index 598911ddd1..2e2012d0bf 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -185,17 +185,6 @@ struct dpni_cfg {
 	uint8_t  num_cgs;
 };
 
-int dpni_create(struct fsl_mc_io *mc_io,
-		uint16_t dprc_token,
-		uint32_t cmd_flags,
-		const struct dpni_cfg *cfg,
-		uint32_t *obj_id);
-
-int dpni_destroy(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 uint32_t object_id);
-
 /**
  * struct dpni_pools_cfg - Structure representing buffer pools configuration
  * @num_dpbp:	Number of DPBPs
@@ -265,24 +254,12 @@ int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
 			uint8_t irq_index,
 			uint8_t en);
 
-int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
-			uint32_t cmd_flags,
-			uint16_t token,
-			uint8_t irq_index,
-			uint8_t *en);
-
 int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
 		      uint32_t cmd_flags,
 		      uint16_t token,
 		      uint8_t irq_index,
 		      uint32_t mask);
 
-int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      uint8_t irq_index,
-		      uint32_t *mask);
-
 int dpni_get_irq_status(struct fsl_mc_io *mc_io,
 			uint32_t cmd_flags,
 			uint16_t token,
@@ -495,12 +472,6 @@ enum dpni_queue_type {
 	DPNI_QUEUE_RX_ERR,
 };
 
-int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t token,
-			   enum dpni_queue_type qtype,
-			   struct dpni_buffer_layout *layout);
-
 int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
 			   uint32_t cmd_flags,
 			   uint16_t token,
@@ -530,23 +501,12 @@ int dpni_set_offload(struct fsl_mc_io *mc_io,
 		     enum dpni_offload type,
 		     uint32_t config);
 
-int dpni_get_offload(struct fsl_mc_io *mc_io,
-		     uint32_t cmd_flags,
-		     uint16_t token,
-		     enum dpni_offload type,
-		     uint32_t *config);
-
 int dpni_get_qdid(struct fsl_mc_io *mc_io,
 		  uint32_t cmd_flags,
 		  uint16_t token,
 		  enum dpni_queue_type qtype,
 		  uint16_t *qdid);
 
-int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
-			    uint32_t cmd_flags,
-			    uint16_t token,
-			    uint16_t *data_offset);
-
 #define DPNI_STATISTICS_CNT		7
 
 /**
@@ -736,11 +696,6 @@ int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
 			      uint16_t token,
 			      uint16_t max_frame_length);
 
-int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
-			      uint32_t cmd_flags,
-			      uint16_t token,
-			      uint16_t *max_frame_length);
-
 int dpni_set_mtu(struct fsl_mc_io *mc_io,
 		 uint32_t cmd_flags,
 		 uint16_t token,
@@ -756,21 +711,11 @@ int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
 			       uint16_t token,
 			       int en);
 
-int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
-			       uint32_t cmd_flags,
-			       uint16_t token,
-			       int *en);
-
 int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
 			     uint32_t cmd_flags,
 			     uint16_t token,
 			     int en);
 
-int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
-			     uint32_t cmd_flags,
-			     uint16_t token,
-			     int *en);
-
 int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
 			      uint32_t cmd_flags,
 			      uint16_t token,
@@ -794,12 +739,6 @@ int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
 			 uint16_t token,
 			 const uint8_t mac_addr[6]);
 
-int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t token,
-			   int unicast,
-			   int multicast);
-
 int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
 			   uint32_t cmd_flags,
 			   uint16_t token,
@@ -828,10 +767,6 @@ int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
 			uint16_t token,
 			uint16_t vlan_id);
 
-int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
-			    uint32_t cmd_flags,
-			    uint16_t token);
-
 /**
  * enum dpni_dist_mode - DPNI distribution mode
  * @DPNI_DIST_MODE_NONE: No distribution
@@ -1042,13 +977,6 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
 				     uint8_t tc_id,
 			const struct dpni_congestion_notification_cfg *cfg);
 
-int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
-				     uint32_t cmd_flags,
-				     uint16_t token,
-				     enum dpni_queue_type qtype,
-				     uint8_t tc_id,
-				struct dpni_congestion_notification_cfg *cfg);
-
 /* DPNI FLC stash options */
 
 /**
@@ -1212,10 +1140,6 @@ int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
 			  uint16_t token,
 			  const struct dpni_rule_cfg *cfg);
 
-int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t token);
-
 /**
  * Discard matching traffic.  If set, this takes precedence over any other
  * configuration and matching traffic is always discarded.
@@ -1273,16 +1197,6 @@ int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
 			 uint8_t tc_id,
 			 const struct dpni_rule_cfg *cfg);
 
-int dpni_clear_fs_entries(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t token,
-			  uint8_t tc_id);
-
-int dpni_get_api_version(struct fsl_mc_io *mc_io,
-			 uint32_t cmd_flags,
-			 uint16_t *major_ver,
-			 uint16_t *minor_ver);
-
 /**
  * Set User Context
  */
@@ -1372,15 +1286,6 @@ int dpni_set_taildrop(struct fsl_mc_io *mc_io,
 		      uint8_t q_index,
 		      struct dpni_taildrop *taildrop);
 
-int dpni_get_taildrop(struct fsl_mc_io *mc_io,
-		      uint32_t cmd_flags,
-		      uint16_t token,
-		      enum dpni_congestion_point cg_point,
-		      enum dpni_queue_type q_type,
-		      uint8_t tc,
-		      uint8_t q_index,
-		      struct dpni_taildrop *taildrop);
-
 int dpni_set_opr(struct fsl_mc_io *mc_io,
 		 uint32_t cmd_flags,
 		 uint16_t token,
@@ -1389,14 +1294,6 @@ int dpni_set_opr(struct fsl_mc_io *mc_io,
 		 uint8_t options,
 		 struct opr_cfg *cfg);
 
-int dpni_get_opr(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token,
-		 uint8_t tc,
-		 uint8_t index,
-		 struct opr_cfg *cfg,
-		 struct opr_qry *qry);
-
 /**
  * When used for queue_idx in function dpni_set_rx_dist_default_queue will
  * signal to dpni to drop all unclassified frames
@@ -1550,35 +1447,4 @@ struct dpni_sw_sequence_layout {
 	} ss[DPNI_SW_SEQUENCE_LAYOUT_SIZE];
 };
 
-/**
- * dpni_get_sw_sequence_layout() - Get the soft sequence layout
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPNI object
- * @src:	Source of the layout (WRIOP Rx or Tx)
- * @ss_layout_iova:  I/O virtual address of 264 bytes DMA-able memory
- *
- * warning: After calling this function, call dpni_extract_sw_sequence_layout()
- *		to get the layout
- *
- * Return:	'0' on Success; error code otherwise.
- */
-int dpni_get_sw_sequence_layout(struct fsl_mc_io *mc_io,
-				uint32_t cmd_flags,
-				uint16_t token,
-				enum dpni_soft_sequence_dest src,
-				uint64_t ss_layout_iova);
-
-/**
- * dpni_extract_sw_sequence_layout() - extract the software sequence layout
- * @layout:		software sequence layout
- * @sw_sequence_layout_buf:	Zeroed 264 bytes of memory before mapping it
- *				to DMA
- *
- * This function has to be called after dpni_get_sw_sequence_layout
- *
- */
-void dpni_extract_sw_sequence_layout(struct dpni_sw_sequence_layout *layout,
-				     const uint8_t *sw_sequence_layout_buf);
-
 #endif /* __FSL_DPNI_H */
diff --git a/drivers/net/dpaa2/mc/fsl_dprtc.h b/drivers/net/dpaa2/mc/fsl_dprtc.h
index 49edb5a050..d8be107ef1 100644
--- a/drivers/net/dpaa2/mc/fsl_dprtc.h
+++ b/drivers/net/dpaa2/mc/fsl_dprtc.h
@@ -16,10 +16,6 @@ int dprtc_open(struct fsl_mc_io *mc_io,
 	       int dprtc_id,
 	       uint16_t *token);
 
-int dprtc_close(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token);
-
 /**
  * struct dprtc_cfg - Structure representing DPRTC configuration
  * @options:	place holder
@@ -28,49 +24,6 @@ struct dprtc_cfg {
 	uint32_t options;
 };
 
-int dprtc_create(struct fsl_mc_io *mc_io,
-		 uint16_t dprc_token,
-		 uint32_t cmd_flags,
-		 const struct dprtc_cfg *cfg,
-		 uint32_t *obj_id);
-
-int dprtc_destroy(struct fsl_mc_io *mc_io,
-		  uint16_t dprc_token,
-		  uint32_t cmd_flags,
-		  uint32_t object_id);
-
-int dprtc_enable(struct fsl_mc_io *mc_io,
-		 uint32_t cmd_flags,
-		 uint16_t token);
-
-int dprtc_disable(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token);
-
-int dprtc_is_enabled(struct fsl_mc_io *mc_io,
-		     uint32_t cmd_flags,
-		     uint16_t token,
-		     int *en);
-
-int dprtc_reset(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token);
-
-int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
-			   uint32_t cmd_flags,
-			   uint16_t token,
-			   int64_t offset);
-
-int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token,
-		  uint32_t freq_compensation);
-
-int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token,
-		  uint32_t *freq_compensation);
-
 int dprtc_get_time(struct fsl_mc_io *mc_io,
 		   uint32_t cmd_flags,
 		   uint16_t token,
@@ -81,11 +34,6 @@ int dprtc_set_time(struct fsl_mc_io *mc_io,
 		   uint16_t token,
 		   uint64_t time);
 
-int dprtc_set_alarm(struct fsl_mc_io *mc_io,
-		    uint32_t cmd_flags,
-		    uint16_t token,
-		    uint64_t time);
-
 /**
  * struct dprtc_attr - Structure representing DPRTC attributes
  * @id:		DPRTC object ID
@@ -101,9 +49,4 @@ int dprtc_get_attributes(struct fsl_mc_io *mc_io,
 			 uint16_t token,
 			 struct dprtc_attr *attr);
 
-int dprtc_get_api_version(struct fsl_mc_io *mc_io,
-			  uint32_t cmd_flags,
-			  uint16_t *major_ver,
-			  uint16_t *minor_ver);
-
 #endif /* __FSL_DPRTC_H */
diff --git a/drivers/net/e1000/base/e1000_82542.c b/drivers/net/e1000/base/e1000_82542.c
index fd473c1c6f..e14e9e9e58 100644
--- a/drivers/net/e1000/base/e1000_82542.c
+++ b/drivers/net/e1000/base/e1000_82542.c
@@ -406,103 +406,6 @@ STATIC int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index)
 	return E1000_SUCCESS;
 }
 
-/**
- *  e1000_translate_register_82542 - Translate the proper register offset
- *  @reg: e1000 register to be read
- *
- *  Registers in 82542 are located in different offsets than other adapters
- *  even though they function in the same manner.  This function takes in
- *  the name of the register to read and returns the correct offset for
- *  82542 silicon.
- **/
-u32 e1000_translate_register_82542(u32 reg)
-{
-	/*
-	 * Some of the 82542 registers are located at different
-	 * offsets than they are in newer adapters.
-	 * Despite the difference in location, the registers
-	 * function in the same manner.
-	 */
-	switch (reg) {
-	case E1000_RA:
-		reg = 0x00040;
-		break;
-	case E1000_RDTR:
-		reg = 0x00108;
-		break;
-	case E1000_RDBAL(0):
-		reg = 0x00110;
-		break;
-	case E1000_RDBAH(0):
-		reg = 0x00114;
-		break;
-	case E1000_RDLEN(0):
-		reg = 0x00118;
-		break;
-	case E1000_RDH(0):
-		reg = 0x00120;
-		break;
-	case E1000_RDT(0):
-		reg = 0x00128;
-		break;
-	case E1000_RDBAL(1):
-		reg = 0x00138;
-		break;
-	case E1000_RDBAH(1):
-		reg = 0x0013C;
-		break;
-	case E1000_RDLEN(1):
-		reg = 0x00140;
-		break;
-	case E1000_RDH(1):
-		reg = 0x00148;
-		break;
-	case E1000_RDT(1):
-		reg = 0x00150;
-		break;
-	case E1000_FCRTH:
-		reg = 0x00160;
-		break;
-	case E1000_FCRTL:
-		reg = 0x00168;
-		break;
-	case E1000_MTA:
-		reg = 0x00200;
-		break;
-	case E1000_TDBAL(0):
-		reg = 0x00420;
-		break;
-	case E1000_TDBAH(0):
-		reg = 0x00424;
-		break;
-	case E1000_TDLEN(0):
-		reg = 0x00428;
-		break;
-	case E1000_TDH(0):
-		reg = 0x00430;
-		break;
-	case E1000_TDT(0):
-		reg = 0x00438;
-		break;
-	case E1000_TIDV:
-		reg = 0x00440;
-		break;
-	case E1000_VFTA:
-		reg = 0x00600;
-		break;
-	case E1000_TDFH:
-		reg = 0x08010;
-		break;
-	case E1000_TDFT:
-		reg = 0x08018;
-		break;
-	default:
-		break;
-	}
-
-	return reg;
-}
-
 /**
  *  e1000_clear_hw_cntrs_82542 - Clear device specific hardware counters
  *  @hw: pointer to the HW structure
diff --git a/drivers/net/e1000/base/e1000_82543.c b/drivers/net/e1000/base/e1000_82543.c
index ca273b4368..992dffe1ff 100644
--- a/drivers/net/e1000/base/e1000_82543.c
+++ b/drivers/net/e1000/base/e1000_82543.c
@@ -364,84 +364,6 @@ STATIC bool e1000_init_phy_disabled_82543(struct e1000_hw *hw)
 	return ret_val;
 }
 
-/**
- *  e1000_tbi_adjust_stats_82543 - Adjust stats when TBI enabled
- *  @hw: pointer to the HW structure
- *  @stats: Struct containing statistic register values
- *  @frame_len: The length of the frame in question
- *  @mac_addr: The Ethernet destination address of the frame in question
- *  @max_frame_size: The maximum frame size
- *
- *  Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
- **/
-void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
-				  struct e1000_hw_stats *stats, u32 frame_len,
-				  u8 *mac_addr, u32 max_frame_size)
-{
-	if (!(e1000_tbi_sbp_enabled_82543(hw)))
-		goto out;
-
-	/* First adjust the frame length. */
-	frame_len--;
-	/*
-	 * We need to adjust the statistics counters, since the hardware
-	 * counters overcount this packet as a CRC error and undercount
-	 * the packet as a good packet
-	 */
-	/* This packet should not be counted as a CRC error. */
-	stats->crcerrs--;
-	/* This packet does count as a Good Packet Received. */
-	stats->gprc++;
-
-	/* Adjust the Good Octets received counters */
-	stats->gorc += frame_len;
-
-	/*
-	 * Is this a broadcast or multicast?  Check broadcast first,
-	 * since the test for a multicast frame will test positive on
-	 * a broadcast frame.
-	 */
-	if ((mac_addr[0] == 0xff) && (mac_addr[1] == 0xff))
-		/* Broadcast packet */
-		stats->bprc++;
-	else if (*mac_addr & 0x01)
-		/* Multicast packet */
-		stats->mprc++;
-
-	/*
-	 * In this case, the hardware has over counted the number of
-	 * oversize frames.
-	 */
-	if ((frame_len == max_frame_size) && (stats->roc > 0))
-		stats->roc--;
-
-	/*
-	 * Adjust the bin counters when the extra byte put the frame in the
-	 * wrong bin. Remember that the frame_len was adjusted above.
-	 */
-	if (frame_len == 64) {
-		stats->prc64++;
-		stats->prc127--;
-	} else if (frame_len == 127) {
-		stats->prc127++;
-		stats->prc255--;
-	} else if (frame_len == 255) {
-		stats->prc255++;
-		stats->prc511--;
-	} else if (frame_len == 511) {
-		stats->prc511++;
-		stats->prc1023--;
-	} else if (frame_len == 1023) {
-		stats->prc1023++;
-		stats->prc1522--;
-	} else if (frame_len == 1522) {
-		stats->prc1522++;
-	}
-
-out:
-	return;
-}
-
 /**
  *  e1000_read_phy_reg_82543 - Read PHY register
  *  @hw: pointer to the HW structure
diff --git a/drivers/net/e1000/base/e1000_82543.h b/drivers/net/e1000/base/e1000_82543.h
index cf81e4e848..8af412bc77 100644
--- a/drivers/net/e1000/base/e1000_82543.h
+++ b/drivers/net/e1000/base/e1000_82543.h
@@ -16,10 +16,6 @@
 /* If TBI_COMPAT_ENABLED, then this is the current state (on/off) */
 #define TBI_SBP_ENABLED		0x2
 
-void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
-				  struct e1000_hw_stats *stats,
-				  u32 frame_len, u8 *mac_addr,
-				  u32 max_frame_size);
 void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw,
 				       bool state);
 bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw);
diff --git a/drivers/net/e1000/base/e1000_82571.c b/drivers/net/e1000/base/e1000_82571.c
index 9dc7f6025c..9da1fbf856 100644
--- a/drivers/net/e1000/base/e1000_82571.c
+++ b/drivers/net/e1000/base/e1000_82571.c
@@ -1467,41 +1467,6 @@ STATIC s32 e1000_led_on_82574(struct e1000_hw *hw)
 	return E1000_SUCCESS;
 }
 
-/**
- *  e1000_check_phy_82574 - check 82574 phy hung state
- *  @hw: pointer to the HW structure
- *
- *  Returns whether phy is hung or not
- **/
-bool e1000_check_phy_82574(struct e1000_hw *hw)
-{
-	u16 status_1kbt = 0;
-	u16 receive_errors = 0;
-	s32 ret_val;
-
-	DEBUGFUNC("e1000_check_phy_82574");
-
-	/* Read PHY Receive Error counter first, if its is max - all F's then
-	 * read the Base1000T status register If both are max then PHY is hung.
-	 */
-	ret_val = hw->phy.ops.read_reg(hw, E1000_RECEIVE_ERROR_COUNTER,
-				       &receive_errors);
-	if (ret_val)
-		return false;
-	if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
-		ret_val = hw->phy.ops.read_reg(hw, E1000_BASE1000T_STATUS,
-					       &status_1kbt);
-		if (ret_val)
-			return false;
-		if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
-		    E1000_IDLE_ERROR_COUNT_MASK)
-			return true;
-	}
-
-	return false;
-}
-
-
 /**
  *  e1000_setup_link_82571 - Setup flow control and link settings
  *  @hw: pointer to the HW structure
diff --git a/drivers/net/e1000/base/e1000_82571.h b/drivers/net/e1000/base/e1000_82571.h
index 0d8412678d..3c1840d0e8 100644
--- a/drivers/net/e1000/base/e1000_82571.h
+++ b/drivers/net/e1000/base/e1000_82571.h
@@ -29,7 +29,6 @@
 #define E1000_IDLE_ERROR_COUNT_MASK	0xFF
 #define E1000_RECEIVE_ERROR_COUNTER	21
 #define E1000_RECEIVE_ERROR_MAX		0xFFFF
-bool e1000_check_phy_82574(struct e1000_hw *hw);
 bool e1000_get_laa_state_82571(struct e1000_hw *hw);
 void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state);
 
diff --git a/drivers/net/e1000/base/e1000_82575.c b/drivers/net/e1000/base/e1000_82575.c
index 7c78649393..074bd34f11 100644
--- a/drivers/net/e1000/base/e1000_82575.c
+++ b/drivers/net/e1000/base/e1000_82575.c
@@ -2119,62 +2119,6 @@ void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
 	E1000_WRITE_REG(hw, reg_offset, reg_val);
 }
 
-/**
- *  e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
- *  @hw: pointer to the hardware struct
- *  @enable: state to enter, either enabled or disabled
- *
- *  enables/disables L2 switch loopback functionality.
- **/
-void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
-{
-	u32 dtxswc;
-
-	switch (hw->mac.type) {
-	case e1000_82576:
-		dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
-		if (enable)
-			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
-		else
-			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
-		E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
-		break;
-	case e1000_i350:
-	case e1000_i354:
-		dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
-		if (enable)
-			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
-		else
-			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
-		E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
-		break;
-	default:
-		/* Currently no other hardware supports loopback */
-		break;
-	}
-
-
-}
-
-/**
- *  e1000_vmdq_set_replication_pf - enable or disable vmdq replication
- *  @hw: pointer to the hardware struct
- *  @enable: state to enter, either enabled or disabled
- *
- *  enables/disables replication of packets across multiple pools.
- **/
-void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
-{
-	u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
-
-	if (enable)
-		vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
-	else
-		vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
-
-	E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
-}
-
 /**
  *  e1000_read_phy_reg_82580 - Read 82580 MDI control register
  *  @hw: pointer to the HW structure
@@ -2596,45 +2540,6 @@ STATIC s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
 	return ret_val;
 }
 
-/**
- *  __e1000_access_emi_reg - Read/write EMI register
- *  @hw: pointer to the HW structure
- *  @address: EMI address to program
- *  @data: pointer to value to read/write from/to the EMI address
- *  @read: boolean flag to indicate read or write
- **/
-STATIC s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address,
-				  u16 *data, bool read)
-{
-	s32 ret_val;
-
-	DEBUGFUNC("__e1000_access_emi_reg");
-
-	ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
-	if (ret_val)
-		return ret_val;
-
-	if (read)
-		ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
-	else
-		ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
-
-	return ret_val;
-}
-
-/**
- *  e1000_read_emi_reg - Read Extended Management Interface register
- *  @hw: pointer to the HW structure
- *  @addr: EMI address to program
- *  @data: value to be read from the EMI address
- **/
-s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
-{
-	DEBUGFUNC("e1000_read_emi_reg");
-
-	return __e1000_access_emi_reg(hw, addr, data, true);
-}
-
 /**
  *  e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY
  *  @hw: pointer to the HW structure
@@ -2823,179 +2728,6 @@ s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw)
 	return ret_val;
 }
 
-/**
- *  e1000_set_eee_i350 - Enable/disable EEE support
- *  @hw: pointer to the HW structure
- *  @adv1G: boolean flag enabling 1G EEE advertisement
- *  @adv100M: boolean flag enabling 100M EEE advertisement
- *
- *  Enable/disable EEE based on setting in dev_spec structure.
- *
- **/
-s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M)
-{
-	u32 ipcnfg, eeer;
-
-	DEBUGFUNC("e1000_set_eee_i350");
-
-	if ((hw->mac.type < e1000_i350) ||
-	    (hw->phy.media_type != e1000_media_type_copper))
-		goto out;
-	ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
-	eeer = E1000_READ_REG(hw, E1000_EEER);
-
-	/* enable or disable per user setting */
-	if (!(hw->dev_spec._82575.eee_disable)) {
-		u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU);
-
-		if (adv100M)
-			ipcnfg |= E1000_IPCNFG_EEE_100M_AN;
-		else
-			ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN;
-
-		if (adv1G)
-			ipcnfg |= E1000_IPCNFG_EEE_1G_AN;
-		else
-			ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN;
-
-		eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
-			 E1000_EEER_LPI_FC);
-
-		/* This bit should not be set in normal operation. */
-		if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
-			DEBUGOUT("LPI Clock Stop Bit should not be set!\n");
-	} else {
-		ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
-		eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
-			  E1000_EEER_LPI_FC);
-	}
-	E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
-	E1000_WRITE_REG(hw, E1000_EEER, eeer);
-	E1000_READ_REG(hw, E1000_IPCNFG);
-	E1000_READ_REG(hw, E1000_EEER);
-out:
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_set_eee_i354 - Enable/disable EEE support
- *  @hw: pointer to the HW structure
- *  @adv1G: boolean flag enabling 1G EEE advertisement
- *  @adv100M: boolean flag enabling 100M EEE advertisement
- *
- *  Enable/disable EEE legacy mode based on setting in dev_spec structure.
- *
- **/
-s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
-{
-	struct e1000_phy_info *phy = &hw->phy;
-	s32 ret_val = E1000_SUCCESS;
-	u16 phy_data;
-
-	DEBUGFUNC("e1000_set_eee_i354");
-
-	if ((hw->phy.media_type != e1000_media_type_copper) ||
-	    ((phy->id != M88E1543_E_PHY_ID) &&
-	    (phy->id != M88E1512_E_PHY_ID)))
-		goto out;
-
-	if (!hw->dev_spec._82575.eee_disable) {
-		/* Switch to PHY page 18. */
-		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
-		if (ret_val)
-			goto out;
-
-		ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
-					    &phy_data);
-		if (ret_val)
-			goto out;
-
-		phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
-		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
-					     phy_data);
-		if (ret_val)
-			goto out;
-
-		/* Return the PHY to page 0. */
-		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
-		if (ret_val)
-			goto out;
-
-		/* Turn on EEE advertisement. */
-		ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
-					       E1000_EEE_ADV_DEV_I354,
-					       &phy_data);
-		if (ret_val)
-			goto out;
-
-		if (adv100M)
-			phy_data |= E1000_EEE_ADV_100_SUPPORTED;
-		else
-			phy_data &= ~E1000_EEE_ADV_100_SUPPORTED;
-
-		if (adv1G)
-			phy_data |= E1000_EEE_ADV_1000_SUPPORTED;
-		else
-			phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED;
-
-		ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
-						E1000_EEE_ADV_DEV_I354,
-						phy_data);
-	} else {
-		/* Turn off EEE advertisement. */
-		ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
-					       E1000_EEE_ADV_DEV_I354,
-					       &phy_data);
-		if (ret_val)
-			goto out;
-
-		phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
-			      E1000_EEE_ADV_1000_SUPPORTED);
-		ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
-						E1000_EEE_ADV_DEV_I354,
-						phy_data);
-	}
-
-out:
-	return ret_val;
-}
-
-/**
- *  e1000_get_eee_status_i354 - Get EEE status
- *  @hw: pointer to the HW structure
- *  @status: EEE status
- *
- *  Get EEE status by guessing based on whether Tx or Rx LPI indications have
- *  been received.
- **/
-s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status)
-{
-	struct e1000_phy_info *phy = &hw->phy;
-	s32 ret_val = E1000_SUCCESS;
-	u16 phy_data;
-
-	DEBUGFUNC("e1000_get_eee_status_i354");
-
-	/* Check if EEE is supported on this device. */
-	if ((hw->phy.media_type != e1000_media_type_copper) ||
-	    ((phy->id != M88E1543_E_PHY_ID) &&
-	    (phy->id != M88E1512_E_PHY_ID)))
-		goto out;
-
-	ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
-				       E1000_PCS_STATUS_DEV_I354,
-				       &phy_data);
-	if (ret_val)
-		goto out;
-
-	*status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
-			      E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
-
-out:
-	return ret_val;
-}
-
 /* Due to a hw errata, if the host tries to  configure the VFTA register
  * while performing queries from the BMC or DMA, then the VFTA in some
  * cases won't be written.
@@ -3044,36 +2776,6 @@ void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
 	E1000_WRITE_FLUSH(hw);
 }
 
-
-/**
- *  e1000_set_i2c_bb - Enable I2C bit-bang
- *  @hw: pointer to the HW structure
- *
- *  Enable I2C bit-bang interface
- *
- **/
-s32 e1000_set_i2c_bb(struct e1000_hw *hw)
-{
-	s32 ret_val = E1000_SUCCESS;
-	u32 ctrl_ext, i2cparams;
-
-	DEBUGFUNC("e1000_set_i2c_bb");
-
-	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
-	ctrl_ext |= E1000_CTRL_I2C_ENA;
-	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
-	E1000_WRITE_FLUSH(hw);
-
-	i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS);
-	i2cparams |= E1000_I2CBB_EN;
-	i2cparams |= E1000_I2C_DATA_OE_N;
-	i2cparams |= E1000_I2C_CLK_OE_N;
-	E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams);
-	E1000_WRITE_FLUSH(hw);
-
-	return ret_val;
-}
-
 /**
  *  e1000_read_i2c_byte_generic - Reads 8 bit word over I2C
  *  @hw: pointer to hardware structure
diff --git a/drivers/net/e1000/base/e1000_82575.h b/drivers/net/e1000/base/e1000_82575.h
index 006b37ae98..03284ca946 100644
--- a/drivers/net/e1000/base/e1000_82575.h
+++ b/drivers/net/e1000/base/e1000_82575.h
@@ -361,9 +361,7 @@ s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
 
 /* Rx packet buffer size defines */
 #define E1000_RXPBS_SIZE_MASK_82576	0x0000007F
-void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
 void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf);
-void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
 
 enum e1000_promisc_type {
 	e1000_promisc_disabled = 0,   /* all promisc modes disabled */
@@ -373,15 +371,10 @@ enum e1000_promisc_type {
 	e1000_num_promisc_types
 };
 
-void e1000_vfta_set_vf(struct e1000_hw *, u16, bool);
 void e1000_rlpml_set_vf(struct e1000_hw *, u16);
 s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type);
 void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
 u16 e1000_rxpbs_adjust_82580(u32 data);
-s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data);
-s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M);
-s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M);
-s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *);
 s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw);
 s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw);
 
@@ -397,7 +390,6 @@ s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw);
 #define E1000_I2C_T_SU_STO	4
 #define E1000_I2C_T_BUF		5
 
-s32 e1000_set_i2c_bb(struct e1000_hw *hw);
 s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
 				u8 dev_addr, u8 *data);
 s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
diff --git a/drivers/net/e1000/base/e1000_api.c b/drivers/net/e1000/base/e1000_api.c
index 6a2376f40f..c3a8892c47 100644
--- a/drivers/net/e1000/base/e1000_api.c
+++ b/drivers/net/e1000/base/e1000_api.c
@@ -530,21 +530,6 @@ void e1000_clear_vfta(struct e1000_hw *hw)
 		hw->mac.ops.clear_vfta(hw);
 }
 
-/**
- *  e1000_write_vfta - Write value to VLAN filter table
- *  @hw: pointer to the HW structure
- *  @offset: the 32-bit offset in which to write the value to.
- *  @value: the 32-bit value to write at location offset.
- *
- *  This writes a 32-bit value to a 32-bit offset in the VLAN filter
- *  table. This is a function pointer entry point called by drivers.
- **/
-void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
-{
-	if (hw->mac.ops.write_vfta)
-		hw->mac.ops.write_vfta(hw, offset, value);
-}
-
 /**
  *  e1000_update_mc_addr_list - Update Multicast addresses
  *  @hw: pointer to the HW structure
@@ -562,19 +547,6 @@ void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
 						mc_addr_count);
 }
 
-/**
- *  e1000_force_mac_fc - Force MAC flow control
- *  @hw: pointer to the HW structure
- *
- *  Force the MAC's flow control settings. Currently no func pointer exists
- *  and all implementations are handled in the generic version of this
- *  function.
- **/
-s32 e1000_force_mac_fc(struct e1000_hw *hw)
-{
-	return e1000_force_mac_fc_generic(hw);
-}
-
 /**
  *  e1000_check_for_link - Check/Store link connection
  *  @hw: pointer to the HW structure
@@ -591,34 +563,6 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
 	return -E1000_ERR_CONFIG;
 }
 
-/**
- *  e1000_check_mng_mode - Check management mode
- *  @hw: pointer to the HW structure
- *
- *  This checks if the adapter has manageability enabled.
- *  This is a function pointer entry point called by drivers.
- **/
-bool e1000_check_mng_mode(struct e1000_hw *hw)
-{
-	if (hw->mac.ops.check_mng_mode)
-		return hw->mac.ops.check_mng_mode(hw);
-
-	return false;
-}
-
-/**
- *  e1000_mng_write_dhcp_info - Writes DHCP info to host interface
- *  @hw: pointer to the HW structure
- *  @buffer: pointer to the host interface
- *  @length: size of the buffer
- *
- *  Writes the DHCP information to the host interface.
- **/
-s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
-{
-	return e1000_mng_write_dhcp_info_generic(hw, buffer, length);
-}
-
 /**
  *  e1000_reset_hw - Reset hardware
  *  @hw: pointer to the HW structure
@@ -665,86 +609,6 @@ s32 e1000_setup_link(struct e1000_hw *hw)
 	return -E1000_ERR_CONFIG;
 }
 
-/**
- *  e1000_get_speed_and_duplex - Returns current speed and duplex
- *  @hw: pointer to the HW structure
- *  @speed: pointer to a 16-bit value to store the speed
- *  @duplex: pointer to a 16-bit value to store the duplex.
- *
- *  This returns the speed and duplex of the adapter in the two 'out'
- *  variables passed in. This is a function pointer entry point called
- *  by drivers.
- **/
-s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
-{
-	if (hw->mac.ops.get_link_up_info)
-		return hw->mac.ops.get_link_up_info(hw, speed, duplex);
-
-	return -E1000_ERR_CONFIG;
-}
-
-/**
- *  e1000_setup_led - Configures SW controllable LED
- *  @hw: pointer to the HW structure
- *
- *  This prepares the SW controllable LED for use and saves the current state
- *  of the LED so it can be later restored. This is a function pointer entry
- *  point called by drivers.
- **/
-s32 e1000_setup_led(struct e1000_hw *hw)
-{
-	if (hw->mac.ops.setup_led)
-		return hw->mac.ops.setup_led(hw);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_cleanup_led - Restores SW controllable LED
- *  @hw: pointer to the HW structure
- *
- *  This restores the SW controllable LED to the value saved off by
- *  e1000_setup_led. This is a function pointer entry point called by drivers.
- **/
-s32 e1000_cleanup_led(struct e1000_hw *hw)
-{
-	if (hw->mac.ops.cleanup_led)
-		return hw->mac.ops.cleanup_led(hw);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_blink_led - Blink SW controllable LED
- *  @hw: pointer to the HW structure
- *
- *  This starts the adapter LED blinking. Request the LED to be setup first
- *  and cleaned up after. This is a function pointer entry point called by
- *  drivers.
- **/
-s32 e1000_blink_led(struct e1000_hw *hw)
-{
-	if (hw->mac.ops.blink_led)
-		return hw->mac.ops.blink_led(hw);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_id_led_init - store LED configurations in SW
- *  @hw: pointer to the HW structure
- *
- *  Initializes the LED config in SW. This is a function pointer entry point
- *  called by drivers.
- **/
-s32 e1000_id_led_init(struct e1000_hw *hw)
-{
-	if (hw->mac.ops.id_led_init)
-		return hw->mac.ops.id_led_init(hw);
-
-	return E1000_SUCCESS;
-}
-
 /**
  *  e1000_led_on - Turn on SW controllable LED
  *  @hw: pointer to the HW structure
@@ -775,43 +639,6 @@ s32 e1000_led_off(struct e1000_hw *hw)
 	return E1000_SUCCESS;
 }
 
-/**
- *  e1000_reset_adaptive - Reset adaptive IFS
- *  @hw: pointer to the HW structure
- *
- *  Resets the adaptive IFS. Currently no func pointer exists and all
- *  implementations are handled in the generic version of this function.
- **/
-void e1000_reset_adaptive(struct e1000_hw *hw)
-{
-	e1000_reset_adaptive_generic(hw);
-}
-
-/**
- *  e1000_update_adaptive - Update adaptive IFS
- *  @hw: pointer to the HW structure
- *
- *  Updates adapter IFS. Currently no func pointer exists and all
- *  implementations are handled in the generic version of this function.
- **/
-void e1000_update_adaptive(struct e1000_hw *hw)
-{
-	e1000_update_adaptive_generic(hw);
-}
-
-/**
- *  e1000_disable_pcie_master - Disable PCI-Express master access
- *  @hw: pointer to the HW structure
- *
- *  Disables PCI-Express master access and verifies there are no pending
- *  requests. Currently no func pointer exists and all implementations are
- *  handled in the generic version of this function.
- **/
-s32 e1000_disable_pcie_master(struct e1000_hw *hw)
-{
-	return e1000_disable_pcie_master_generic(hw);
-}
-
 /**
  *  e1000_config_collision_dist - Configure collision distance
  *  @hw: pointer to the HW structure
@@ -841,94 +668,6 @@ int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
 	return E1000_SUCCESS;
 }
 
-/**
- *  e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
- *  @hw: pointer to the HW structure
- *
- *  Ensures that the MDI/MDIX SW state is valid.
- **/
-s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
-{
-	if (hw->mac.ops.validate_mdi_setting)
-		return hw->mac.ops.validate_mdi_setting(hw);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_hash_mc_addr - Determines address location in multicast table
- *  @hw: pointer to the HW structure
- *  @mc_addr: Multicast address to hash.
- *
- *  This hashes an address to determine its location in the multicast
- *  table. Currently no func pointer exists and all implementations
- *  are handled in the generic version of this function.
- **/
-u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
-{
-	return e1000_hash_mc_addr_generic(hw, mc_addr);
-}
-
-/**
- *  e1000_enable_tx_pkt_filtering - Enable packet filtering on TX
- *  @hw: pointer to the HW structure
- *
- *  Enables packet filtering on transmit packets if manageability is enabled
- *  and host interface is enabled.
- *  Currently no func pointer exists and all implementations are handled in the
- *  generic version of this function.
- **/
-bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
-{
-	return e1000_enable_tx_pkt_filtering_generic(hw);
-}
-
-/**
- *  e1000_mng_host_if_write - Writes to the manageability host interface
- *  @hw: pointer to the HW structure
- *  @buffer: pointer to the host interface buffer
- *  @length: size of the buffer
- *  @offset: location in the buffer to write to
- *  @sum: sum of the data (not checksum)
- *
- *  This function writes the buffer content at the offset given on the host if.
- *  It also does alignment considerations to do the writes in most efficient
- *  way.  Also fills up the sum of the buffer in *buffer parameter.
- **/
-s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
-			    u16 offset, u8 *sum)
-{
-	return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum);
-}
-
-/**
- *  e1000_mng_write_cmd_header - Writes manageability command header
- *  @hw: pointer to the HW structure
- *  @hdr: pointer to the host interface command header
- *
- *  Writes the command header after does the checksum calculation.
- **/
-s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
-			       struct e1000_host_mng_command_header *hdr)
-{
-	return e1000_mng_write_cmd_header_generic(hw, hdr);
-}
-
-/**
- *  e1000_mng_enable_host_if - Checks host interface is enabled
- *  @hw: pointer to the HW structure
- *
- *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
- *
- *  This function checks whether the HOST IF is enabled for command operation
- *  and also checks whether the previous command is completed.  It busy waits
- *  in case of previous command is not completed.
- **/
-s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
-{
-	return e1000_mng_enable_host_if_generic(hw);
-}
-
 /**
  *  e1000_check_reset_block - Verifies PHY can be reset
  *  @hw: pointer to the HW structure
@@ -944,126 +683,6 @@ s32 e1000_check_reset_block(struct e1000_hw *hw)
 	return E1000_SUCCESS;
 }
 
-/**
- *  e1000_read_phy_reg - Reads PHY register
- *  @hw: pointer to the HW structure
- *  @offset: the register to read
- *  @data: the buffer to store the 16-bit read.
- *
- *  Reads the PHY register and returns the value in data.
- *  This is a function pointer entry point called by drivers.
- **/
-s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
-{
-	if (hw->phy.ops.read_reg)
-		return hw->phy.ops.read_reg(hw, offset, data);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_write_phy_reg - Writes PHY register
- *  @hw: pointer to the HW structure
- *  @offset: the register to write
- *  @data: the value to write.
- *
- *  Writes the PHY register at offset with the value in data.
- *  This is a function pointer entry point called by drivers.
- **/
-s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
-{
-	if (hw->phy.ops.write_reg)
-		return hw->phy.ops.write_reg(hw, offset, data);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_release_phy - Generic release PHY
- *  @hw: pointer to the HW structure
- *
- *  Return if silicon family does not require a semaphore when accessing the
- *  PHY.
- **/
-void e1000_release_phy(struct e1000_hw *hw)
-{
-	if (hw->phy.ops.release)
-		hw->phy.ops.release(hw);
-}
-
-/**
- *  e1000_acquire_phy - Generic acquire PHY
- *  @hw: pointer to the HW structure
- *
- *  Return success if silicon family does not require a semaphore when
- *  accessing the PHY.
- **/
-s32 e1000_acquire_phy(struct e1000_hw *hw)
-{
-	if (hw->phy.ops.acquire)
-		return hw->phy.ops.acquire(hw);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_cfg_on_link_up - Configure PHY upon link up
- *  @hw: pointer to the HW structure
- **/
-s32 e1000_cfg_on_link_up(struct e1000_hw *hw)
-{
-	if (hw->phy.ops.cfg_on_link_up)
-		return hw->phy.ops.cfg_on_link_up(hw);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_read_kmrn_reg - Reads register using Kumeran interface
- *  @hw: pointer to the HW structure
- *  @offset: the register to read
- *  @data: the location to store the 16-bit value read.
- *
- *  Reads a register out of the Kumeran interface. Currently no func pointer
- *  exists and all implementations are handled in the generic version of
- *  this function.
- **/
-s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
-{
-	return e1000_read_kmrn_reg_generic(hw, offset, data);
-}
-
-/**
- *  e1000_write_kmrn_reg - Writes register using Kumeran interface
- *  @hw: pointer to the HW structure
- *  @offset: the register to write
- *  @data: the value to write.
- *
- *  Writes a register to the Kumeran interface. Currently no func pointer
- *  exists and all implementations are handled in the generic version of
- *  this function.
- **/
-s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
-{
-	return e1000_write_kmrn_reg_generic(hw, offset, data);
-}
-
-/**
- *  e1000_get_cable_length - Retrieves cable length estimation
- *  @hw: pointer to the HW structure
- *
- *  This function estimates the cable length and stores them in
- *  hw->phy.min_length and hw->phy.max_length. This is a function pointer
- *  entry point called by drivers.
- **/
-s32 e1000_get_cable_length(struct e1000_hw *hw)
-{
-	if (hw->phy.ops.get_cable_length)
-		return hw->phy.ops.get_cable_length(hw);
-
-	return E1000_SUCCESS;
-}
-
 /**
  *  e1000_get_phy_info - Retrieves PHY information from registers
  *  @hw: pointer to the HW structure
@@ -1095,65 +714,6 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
 	return E1000_SUCCESS;
 }
 
-/**
- *  e1000_phy_commit - Soft PHY reset
- *  @hw: pointer to the HW structure
- *
- *  Performs a soft PHY reset on those that apply. This is a function pointer
- *  entry point called by drivers.
- **/
-s32 e1000_phy_commit(struct e1000_hw *hw)
-{
-	if (hw->phy.ops.commit)
-		return hw->phy.ops.commit(hw);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_set_d0_lplu_state - Sets low power link up state for D0
- *  @hw: pointer to the HW structure
- *  @active: boolean used to enable/disable lplu
- *
- *  Success returns 0, Failure returns 1
- *
- *  The low power link up (lplu) state is set to the power management level D0
- *  and SmartSpeed is disabled when active is true, else clear lplu for D0
- *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
- *  is used during Dx states where the power conservation is most important.
- *  During driver activity, SmartSpeed should be enabled so performance is
- *  maintained.  This is a function pointer entry point called by drivers.
- **/
-s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
-{
-	if (hw->phy.ops.set_d0_lplu_state)
-		return hw->phy.ops.set_d0_lplu_state(hw, active);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_set_d3_lplu_state - Sets low power link up state for D3
- *  @hw: pointer to the HW structure
- *  @active: boolean used to enable/disable lplu
- *
- *  Success returns 0, Failure returns 1
- *
- *  The low power link up (lplu) state is set to the power management level D3
- *  and SmartSpeed is disabled when active is true, else clear lplu for D3
- *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
- *  is used during Dx states where the power conservation is most important.
- *  During driver activity, SmartSpeed should be enabled so performance is
- *  maintained.  This is a function pointer entry point called by drivers.
- **/
-s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
-{
-	if (hw->phy.ops.set_d3_lplu_state)
-		return hw->phy.ops.set_d3_lplu_state(hw, active);
-
-	return E1000_SUCCESS;
-}
-
 /**
  *  e1000_read_mac_addr - Reads MAC address
  *  @hw: pointer to the HW structure
@@ -1170,52 +730,6 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw)
 	return e1000_read_mac_addr_generic(hw);
 }
 
-/**
- *  e1000_read_pba_string - Read device part number string
- *  @hw: pointer to the HW structure
- *  @pba_num: pointer to device part number
- *  @pba_num_size: size of part number buffer
- *
- *  Reads the product board assembly (PBA) number from the EEPROM and stores
- *  the value in pba_num.
- *  Currently no func pointer exists and all implementations are handled in the
- *  generic version of this function.
- **/
-s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size)
-{
-	return e1000_read_pba_string_generic(hw, pba_num, pba_num_size);
-}
-
-/**
- *  e1000_read_pba_length - Read device part number string length
- *  @hw: pointer to the HW structure
- *  @pba_num_size: size of part number buffer
- *
- *  Reads the product board assembly (PBA) number length from the EEPROM and
- *  stores the value in pba_num.
- *  Currently no func pointer exists and all implementations are handled in the
- *  generic version of this function.
- **/
-s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size)
-{
-	return e1000_read_pba_length_generic(hw, pba_num_size);
-}
-
-/**
- *  e1000_read_pba_num - Read device part number
- *  @hw: pointer to the HW structure
- *  @pba_num: pointer to device part number
- *
- *  Reads the product board assembly (PBA) number from the EEPROM and stores
- *  the value in pba_num.
- *  Currently no func pointer exists and all implementations are handled in the
- *  generic version of this function.
- **/
-s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
-{
-	return e1000_read_pba_num_generic(hw, pba_num);
-}
-
 /**
  *  e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
  *  @hw: pointer to the HW structure
@@ -1231,34 +745,6 @@ s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
 	return -E1000_ERR_CONFIG;
 }
 
-/**
- *  e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
- *  @hw: pointer to the HW structure
- *
- *  Updates the NVM checksum. Currently no func pointer exists and all
- *  implementations are handled in the generic version of this function.
- **/
-s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
-{
-	if (hw->nvm.ops.update)
-		return hw->nvm.ops.update(hw);
-
-	return -E1000_ERR_CONFIG;
-}
-
-/**
- *  e1000_reload_nvm - Reloads EEPROM
- *  @hw: pointer to the HW structure
- *
- *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
- *  extended control register.
- **/
-void e1000_reload_nvm(struct e1000_hw *hw)
-{
-	if (hw->nvm.ops.reload)
-		hw->nvm.ops.reload(hw);
-}
-
 /**
  *  e1000_read_nvm - Reads NVM (EEPROM)
  *  @hw: pointer to the HW structure
@@ -1295,22 +781,6 @@ s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 	return E1000_SUCCESS;
 }
 
-/**
- *  e1000_write_8bit_ctrl_reg - Writes 8bit Control register
- *  @hw: pointer to the HW structure
- *  @reg: 32bit register offset
- *  @offset: the register to write
- *  @data: the value to write.
- *
- *  Writes the PHY register at offset with the value in data.
- *  This is a function pointer entry point called by drivers.
- **/
-s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
-			      u8 data)
-{
-	return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
-}
-
 /**
  * e1000_power_up_phy - Restores link in case of PHY power down
  * @hw: pointer to the HW structure
diff --git a/drivers/net/e1000/base/e1000_api.h b/drivers/net/e1000/base/e1000_api.h
index 6b38e2b7bb..1c240dfcdf 100644
--- a/drivers/net/e1000/base/e1000_api.h
+++ b/drivers/net/e1000/base/e1000_api.h
@@ -29,65 +29,25 @@ s32 e1000_init_phy_params(struct e1000_hw *hw);
 s32 e1000_init_mbx_params(struct e1000_hw *hw);
 s32 e1000_get_bus_info(struct e1000_hw *hw);
 void e1000_clear_vfta(struct e1000_hw *hw);
-void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
-s32 e1000_force_mac_fc(struct e1000_hw *hw);
 s32 e1000_check_for_link(struct e1000_hw *hw);
 s32 e1000_reset_hw(struct e1000_hw *hw);
 s32 e1000_init_hw(struct e1000_hw *hw);
 s32 e1000_setup_link(struct e1000_hw *hw);
-s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
-s32 e1000_disable_pcie_master(struct e1000_hw *hw);
 void e1000_config_collision_dist(struct e1000_hw *hw);
 int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
-u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
 void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
 			       u32 mc_addr_count);
-s32 e1000_setup_led(struct e1000_hw *hw);
-s32 e1000_cleanup_led(struct e1000_hw *hw);
 s32 e1000_check_reset_block(struct e1000_hw *hw);
-s32 e1000_blink_led(struct e1000_hw *hw);
 s32 e1000_led_on(struct e1000_hw *hw);
 s32 e1000_led_off(struct e1000_hw *hw);
-s32 e1000_id_led_init(struct e1000_hw *hw);
-void e1000_reset_adaptive(struct e1000_hw *hw);
-void e1000_update_adaptive(struct e1000_hw *hw);
-s32 e1000_get_cable_length(struct e1000_hw *hw);
-s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
-s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
-s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
-			      u8 data);
 s32 e1000_get_phy_info(struct e1000_hw *hw);
-void e1000_release_phy(struct e1000_hw *hw);
-s32 e1000_acquire_phy(struct e1000_hw *hw);
-s32 e1000_cfg_on_link_up(struct e1000_hw *hw);
 s32 e1000_phy_hw_reset(struct e1000_hw *hw);
-s32 e1000_phy_commit(struct e1000_hw *hw);
 void e1000_power_up_phy(struct e1000_hw *hw);
 void e1000_power_down_phy(struct e1000_hw *hw);
 s32 e1000_read_mac_addr(struct e1000_hw *hw);
-s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *part_num);
-s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size);
-s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size);
-void e1000_reload_nvm(struct e1000_hw *hw);
-s32 e1000_update_nvm_checksum(struct e1000_hw *hw);
 s32 e1000_validate_nvm_checksum(struct e1000_hw *hw);
 s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
 s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
-s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
-bool e1000_check_mng_mode(struct e1000_hw *hw);
-bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
-s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
-s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
-			    u16 offset, u8 *sum);
-s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
-			       struct e1000_host_mng_command_header *hdr);
-s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
-u32  e1000_translate_register_82542(u32 reg);
-
 
 
 /*
diff --git a/drivers/net/e1000/base/e1000_base.c b/drivers/net/e1000/base/e1000_base.c
index ab73e1e59e..958aca14b2 100644
--- a/drivers/net/e1000/base/e1000_base.c
+++ b/drivers/net/e1000/base/e1000_base.c
@@ -110,81 +110,3 @@ void e1000_power_down_phy_copper_base(struct e1000_hw *hw)
 	if (phy->ops.check_reset_block(hw))
 		e1000_power_down_phy_copper(hw);
 }
-
-/**
- *  e1000_rx_fifo_flush_base - Clean Rx FIFO after Rx enable
- *  @hw: pointer to the HW structure
- *
- *  After Rx enable, if manageability is enabled then there is likely some
- *  bad data at the start of the FIFO and possibly in the DMA FIFO.  This
- *  function clears the FIFOs and flushes any packets that came in as Rx was
- *  being enabled.
- **/
-void e1000_rx_fifo_flush_base(struct e1000_hw *hw)
-{
-	u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
-	int i, ms_wait;
-
-	DEBUGFUNC("e1000_rx_fifo_flush_base");
-
-	/* disable IPv6 options as per hardware errata */
-	rfctl = E1000_READ_REG(hw, E1000_RFCTL);
-	rfctl |= E1000_RFCTL_IPV6_EX_DIS;
-	E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
-
-	if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
-		return;
-
-	/* Disable all Rx queues */
-	for (i = 0; i < 4; i++) {
-		rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
-		E1000_WRITE_REG(hw, E1000_RXDCTL(i),
-				rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
-	}
-	/* Poll all queues to verify they have shut down */
-	for (ms_wait = 0; ms_wait < 10; ms_wait++) {
-		msec_delay(1);
-		rx_enabled = 0;
-		for (i = 0; i < 4; i++)
-			rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
-		if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
-			break;
-	}
-
-	if (ms_wait == 10)
-		DEBUGOUT("Queue disable timed out after 10ms\n");
-
-	/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
-	 * incoming packets are rejected.  Set enable and wait 2ms so that
-	 * any packet that was coming in as RCTL.EN was set is flushed
-	 */
-	E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
-
-	rlpml = E1000_READ_REG(hw, E1000_RLPML);
-	E1000_WRITE_REG(hw, E1000_RLPML, 0);
-
-	rctl = E1000_READ_REG(hw, E1000_RCTL);
-	temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
-	temp_rctl |= E1000_RCTL_LPE;
-
-	E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
-	E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
-	E1000_WRITE_FLUSH(hw);
-	msec_delay(2);
-
-	/* Enable Rx queues that were previously enabled and restore our
-	 * previous state
-	 */
-	for (i = 0; i < 4; i++)
-		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
-	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
-	E1000_WRITE_FLUSH(hw);
-
-	E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
-	E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
-
-	/* Flush receive errors generated by workaround */
-	E1000_READ_REG(hw, E1000_ROC);
-	E1000_READ_REG(hw, E1000_RNBC);
-	E1000_READ_REG(hw, E1000_MPC);
-}
diff --git a/drivers/net/e1000/base/e1000_base.h b/drivers/net/e1000/base/e1000_base.h
index 0d6172b6d8..16d7ca98a7 100644
--- a/drivers/net/e1000/base/e1000_base.h
+++ b/drivers/net/e1000/base/e1000_base.h
@@ -8,7 +8,6 @@
 /* forward declaration */
 s32 e1000_init_hw_base(struct e1000_hw *hw);
 void e1000_power_down_phy_copper_base(struct e1000_hw *hw);
-extern void e1000_rx_fifo_flush_base(struct e1000_hw *hw);
 s32 e1000_acquire_phy_base(struct e1000_hw *hw);
 void e1000_release_phy_base(struct e1000_hw *hw);
 
diff --git a/drivers/net/e1000/base/e1000_ich8lan.c b/drivers/net/e1000/base/e1000_ich8lan.c
index 14f86b7bdc..4f9a7bc3f1 100644
--- a/drivers/net/e1000/base/e1000_ich8lan.c
+++ b/drivers/net/e1000/base/e1000_ich8lan.c
@@ -5467,60 +5467,6 @@ void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
 	return;
 }
 
-/**
- *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
- *  @hw: pointer to the HW structure
- *
- *  Workaround for 82566 power-down on D3 entry:
- *    1) disable gigabit link
- *    2) write VR power-down enable
- *    3) read it back
- *  Continue if successful, else issue LCD reset and repeat
- **/
-void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
-{
-	u32 reg;
-	u16 data;
-	u8  retry = 0;
-
-	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
-
-	if (hw->phy.type != e1000_phy_igp_3)
-		return;
-
-	/* Try the workaround twice (if needed) */
-	do {
-		/* Disable link */
-		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
-		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
-			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
-		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
-
-		/* Call gig speed drop workaround on Gig disable before
-		 * accessing any PHY registers
-		 */
-		if (hw->mac.type == e1000_ich8lan)
-			e1000_gig_downshift_workaround_ich8lan(hw);
-
-		/* Write VR power-down enable */
-		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
-		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
-		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
-				      data | IGP3_VR_CTRL_MODE_SHUTDOWN);
-
-		/* Read it back and test */
-		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
-		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
-		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
-			break;
-
-		/* Issue PHY reset and repeat at most one more time */
-		reg = E1000_READ_REG(hw, E1000_CTRL);
-		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
-		retry++;
-	} while (retry);
-}
-
 /**
  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
  *  @hw: pointer to the HW structure
@@ -5557,218 +5503,6 @@ void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
 				     reg_data);
 }
 
-/**
- *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
- *  @hw: pointer to the HW structure
- *
- *  During S0 to Sx transition, it is possible the link remains at gig
- *  instead of negotiating to a lower speed.  Before going to Sx, set
- *  'Gig Disable' to force link speed negotiation to a lower speed based on
- *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
- *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
- *  needs to be written.
- *  Parts that support (and are linked to a partner which support) EEE in
- *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
- *  than 10Mbps w/o EEE.
- **/
-void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
-{
-	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
-	u32 phy_ctrl;
-	s32 ret_val;
-
-	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
-
-	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
-	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
-
-	if (hw->phy.type == e1000_phy_i217) {
-		u16 phy_reg, device_id = hw->device_id;
-
-		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
-		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
-		    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
-		    (device_id == E1000_DEV_ID_PCH_I218_V3) ||
-		    (hw->mac.type >= e1000_pch_spt)) {
-			u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
-
-			E1000_WRITE_REG(hw, E1000_FEXTNVM6,
-					fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
-		}
-
-		ret_val = hw->phy.ops.acquire(hw);
-		if (ret_val)
-			goto out;
-
-		if (!dev_spec->eee_disable) {
-			u16 eee_advert;
-
-			ret_val =
-			    e1000_read_emi_reg_locked(hw,
-						      I217_EEE_ADVERTISEMENT,
-						      &eee_advert);
-			if (ret_val)
-				goto release;
-
-			/* Disable LPLU if both link partners support 100BaseT
-			 * EEE and 100Full is advertised on both ends of the
-			 * link, and enable Auto Enable LPI since there will
-			 * be no driver to enable LPI while in Sx.
-			 */
-			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
-			    (dev_spec->eee_lp_ability &
-			     I82579_EEE_100_SUPPORTED) &&
-			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
-				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
-					      E1000_PHY_CTRL_NOND0A_LPLU);
-
-				/* Set Auto Enable LPI after link up */
-				hw->phy.ops.read_reg_locked(hw,
-							    I217_LPI_GPIO_CTRL,
-							    &phy_reg);
-				phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
-				hw->phy.ops.write_reg_locked(hw,
-							     I217_LPI_GPIO_CTRL,
-							     phy_reg);
-			}
-		}
-
-		/* For i217 Intel Rapid Start Technology support,
-		 * when the system is going into Sx and no manageability engine
-		 * is present, the driver must configure proxy to reset only on
-		 * power good.  LPI (Low Power Idle) state must also reset only
-		 * on power good, as well as the MTA (Multicast table array).
-		 * The SMBus release must also be disabled on LCD reset.
-		 */
-		if (!(E1000_READ_REG(hw, E1000_FWSM) &
-		      E1000_ICH_FWSM_FW_VALID)) {
-			/* Enable proxy to reset only on power good. */
-			hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
-						    &phy_reg);
-			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
-			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
-						     phy_reg);
-
-			/* Set bit enable LPI (EEE) to reset only on
-			 * power good.
-			*/
-			hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
-			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
-			hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
-
-			/* Disable the SMB release on LCD reset. */
-			hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
-			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
-			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
-		}
-
-		/* Enable MTA to reset for Intel Rapid Start Technology
-		 * Support
-		 */
-		hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
-		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
-		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
-
-release:
-		hw->phy.ops.release(hw);
-	}
-out:
-	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
-
-	if (hw->mac.type == e1000_ich8lan)
-		e1000_gig_downshift_workaround_ich8lan(hw);
-
-	if (hw->mac.type >= e1000_pchlan) {
-		e1000_oem_bits_config_ich8lan(hw, false);
-
-		/* Reset PHY to activate OEM bits on 82577/8 */
-		if (hw->mac.type == e1000_pchlan)
-			e1000_phy_hw_reset_generic(hw);
-
-		ret_val = hw->phy.ops.acquire(hw);
-		if (ret_val)
-			return;
-		e1000_write_smbus_addr(hw);
-		hw->phy.ops.release(hw);
-	}
-
-	return;
-}
-
-/**
- *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
- *  @hw: pointer to the HW structure
- *
- *  During Sx to S0 transitions on non-managed devices or managed devices
- *  on which PHY resets are not blocked, if the PHY registers cannot be
- *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
- *  the PHY.
- *  On i217, setup Intel Rapid Start Technology.
- **/
-u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
-{
-	s32 ret_val;
-
-	DEBUGFUNC("e1000_resume_workarounds_pchlan");
-	if (hw->mac.type < e1000_pch2lan)
-		return E1000_SUCCESS;
-
-	ret_val = e1000_init_phy_workarounds_pchlan(hw);
-	if (ret_val) {
-		DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
-		return ret_val;
-	}
-
-	/* For i217 Intel Rapid Start Technology support when the system
-	 * is transitioning from Sx and no manageability engine is present
-	 * configure SMBus to restore on reset, disable proxy, and enable
-	 * the reset on MTA (Multicast table array).
-	 */
-	if (hw->phy.type == e1000_phy_i217) {
-		u16 phy_reg;
-
-		ret_val = hw->phy.ops.acquire(hw);
-		if (ret_val) {
-			DEBUGOUT("Failed to setup iRST\n");
-			return ret_val;
-		}
-
-		/* Clear Auto Enable LPI after link up */
-		hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
-		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
-		hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
-
-		if (!(E1000_READ_REG(hw, E1000_FWSM) &
-		    E1000_ICH_FWSM_FW_VALID)) {
-			/* Restore clear on SMB if no manageability engine
-			 * is present
-			 */
-			ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
-							      &phy_reg);
-			if (ret_val)
-				goto release;
-			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
-			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
-
-			/* Disable Proxy */
-			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
-		}
-		/* Enable reset on MTA */
-		ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
-						      &phy_reg);
-		if (ret_val)
-			goto release;
-		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
-		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
-release:
-		if (ret_val)
-			DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
-		hw->phy.ops.release(hw);
-		return ret_val;
-	}
-	return E1000_SUCCESS;
-}
-
 /**
  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
  *  @hw: pointer to the HW structure
diff --git a/drivers/net/e1000/base/e1000_ich8lan.h b/drivers/net/e1000/base/e1000_ich8lan.h
index e456e5132e..e28ebb55ba 100644
--- a/drivers/net/e1000/base/e1000_ich8lan.h
+++ b/drivers/net/e1000/base/e1000_ich8lan.h
@@ -281,10 +281,7 @@
 #define E1000_PCI_REVISION_ID_REG	0x08
 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
 						 bool state);
-void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
-void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
-u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
 s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time);
 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
diff --git a/drivers/net/e1000/base/e1000_mac.c b/drivers/net/e1000/base/e1000_mac.c
index d3b3a6bac9..fe1516bd92 100644
--- a/drivers/net/e1000/base/e1000_mac.c
+++ b/drivers/net/e1000/base/e1000_mac.c
@@ -124,20 +124,6 @@ void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw,
 	return;
 }
 
-/**
- *  e1000_null_rar_set - No-op function, return 0
- *  @hw: pointer to the HW structure
- *  @h: dummy variable
- *  @a: dummy variable
- **/
-int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw,
-			u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
-{
-	DEBUGFUNC("e1000_null_rar_set");
-	UNREFERENCED_3PARAMETER(hw, h, a);
-	return E1000_SUCCESS;
-}
-
 /**
  *  e1000_get_bus_info_pci_generic - Get PCI(x) bus information
  *  @hw: pointer to the HW structure
diff --git a/drivers/net/e1000/base/e1000_mac.h b/drivers/net/e1000/base/e1000_mac.h
index 86fcad23bb..0abaf2f452 100644
--- a/drivers/net/e1000/base/e1000_mac.h
+++ b/drivers/net/e1000/base/e1000_mac.h
@@ -13,7 +13,6 @@ s32  e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
 bool e1000_null_mng_mode(struct e1000_hw *hw);
 void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
 void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
-int  e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
 s32  e1000_blink_led_generic(struct e1000_hw *hw);
 s32  e1000_check_for_copper_link_generic(struct e1000_hw *hw);
 s32  e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
diff --git a/drivers/net/e1000/base/e1000_manage.c b/drivers/net/e1000/base/e1000_manage.c
index 4b81028302..266bb9ec91 100644
--- a/drivers/net/e1000/base/e1000_manage.c
+++ b/drivers/net/e1000/base/e1000_manage.c
@@ -353,195 +353,3 @@ bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
 
 	return false;
 }
-
-/**
- *  e1000_host_interface_command - Writes buffer to host interface
- *  @hw: pointer to the HW structure
- *  @buffer: contains a command to write
- *  @length: the byte length of the buffer, must be multiple of 4 bytes
- *
- *  Writes a buffer to the Host Interface.  Upon success, returns E1000_SUCCESS
- *  else returns E1000_ERR_HOST_INTERFACE_COMMAND.
- **/
-s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length)
-{
-	u32 hicr, i;
-
-	DEBUGFUNC("e1000_host_interface_command");
-
-	if (!(hw->mac.arc_subsystem_valid)) {
-		DEBUGOUT("Hardware doesn't support host interface command.\n");
-		return E1000_SUCCESS;
-	}
-
-	if (!hw->mac.asf_firmware_present) {
-		DEBUGOUT("Firmware is not present.\n");
-		return E1000_SUCCESS;
-	}
-
-	if (length == 0 || length & 0x3 ||
-	    length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) {
-		DEBUGOUT("Buffer length failure.\n");
-		return -E1000_ERR_HOST_INTERFACE_COMMAND;
-	}
-
-	/* Check that the host interface is enabled. */
-	hicr = E1000_READ_REG(hw, E1000_HICR);
-	if (!(hicr & E1000_HICR_EN)) {
-		DEBUGOUT("E1000_HOST_EN bit disabled.\n");
-		return -E1000_ERR_HOST_INTERFACE_COMMAND;
-	}
-
-	/* Calculate length in DWORDs */
-	length >>= 2;
-
-	/* The device driver writes the relevant command block
-	 * into the ram area.
-	 */
-	for (i = 0; i < length; i++)
-		E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
-					    *((u32 *)buffer + i));
-
-	/* Setting this bit tells the ARC that a new command is pending. */
-	E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
-
-	for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
-		hicr = E1000_READ_REG(hw, E1000_HICR);
-		if (!(hicr & E1000_HICR_C))
-			break;
-		msec_delay(1);
-	}
-
-	/* Check command successful completion. */
-	if (i == E1000_HI_COMMAND_TIMEOUT ||
-	    (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) {
-		DEBUGOUT("Command has failed with no status valid.\n");
-		return -E1000_ERR_HOST_INTERFACE_COMMAND;
-	}
-
-	for (i = 0; i < length; i++)
-		*((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
-								  E1000_HOST_IF,
-								  i);
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_load_firmware - Writes proxy FW code buffer to host interface
- *                        and execute.
- *  @hw: pointer to the HW structure
- *  @buffer: contains a firmware to write
- *  @length: the byte length of the buffer, must be multiple of 4 bytes
- *
- *  Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled
- *  in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND.
- **/
-s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length)
-{
-	u32 hicr, hibba, fwsm, icr, i;
-
-	DEBUGFUNC("e1000_load_firmware");
-
-	if (hw->mac.type < e1000_i210) {
-		DEBUGOUT("Hardware doesn't support loading FW by the driver\n");
-		return -E1000_ERR_CONFIG;
-	}
-
-	/* Check that the host interface is enabled. */
-	hicr = E1000_READ_REG(hw, E1000_HICR);
-	if (!(hicr & E1000_HICR_EN)) {
-		DEBUGOUT("E1000_HOST_EN bit disabled.\n");
-		return -E1000_ERR_CONFIG;
-	}
-	if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) {
-		DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n");
-		return -E1000_ERR_CONFIG;
-	}
-
-	if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) {
-		DEBUGOUT("Buffer length failure.\n");
-		return -E1000_ERR_INVALID_ARGUMENT;
-	}
-
-	/* Clear notification from ROM-FW by reading ICR register */
-	icr = E1000_READ_REG(hw, E1000_ICR_V2);
-
-	/* Reset ROM-FW */
-	hicr = E1000_READ_REG(hw, E1000_HICR);
-	hicr |= E1000_HICR_FW_RESET_ENABLE;
-	E1000_WRITE_REG(hw, E1000_HICR, hicr);
-	hicr |= E1000_HICR_FW_RESET;
-	E1000_WRITE_REG(hw, E1000_HICR, hicr);
-	E1000_WRITE_FLUSH(hw);
-
-	/* Wait till MAC notifies about its readiness after ROM-FW reset */
-	for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) {
-		icr = E1000_READ_REG(hw, E1000_ICR_V2);
-		if (icr & E1000_ICR_MNG)
-			break;
-		msec_delay(1);
-	}
-
-	/* Check for timeout */
-	if (i == E1000_HI_COMMAND_TIMEOUT) {
-		DEBUGOUT("FW reset failed.\n");
-		return -E1000_ERR_HOST_INTERFACE_COMMAND;
-	}
-
-	/* Wait till MAC is ready to accept new FW code */
-	for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
-		fwsm = E1000_READ_REG(hw, E1000_FWSM);
-		if ((fwsm & E1000_FWSM_FW_VALID) &&
-		    ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT ==
-		    E1000_FWSM_HI_EN_ONLY_MODE))
-			break;
-		msec_delay(1);
-	}
-
-	/* Check for timeout */
-	if (i == E1000_HI_COMMAND_TIMEOUT) {
-		DEBUGOUT("FW reset failed.\n");
-		return -E1000_ERR_HOST_INTERFACE_COMMAND;
-	}
-
-	/* Calculate length in DWORDs */
-	length >>= 2;
-
-	/* The device driver writes the relevant FW code block
-	 * into the ram area in DWORDs via 1kB ram addressing window.
-	 */
-	for (i = 0; i < length; i++) {
-		if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) {
-			/* Point to correct 1kB ram window */
-			hibba = E1000_HI_FW_BASE_ADDRESS +
-				((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) *
-				(i / E1000_HI_FW_BLOCK_DWORD_LENGTH));
-
-			E1000_WRITE_REG(hw, E1000_HIBBA, hibba);
-		}
-
-		E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
-					    i % E1000_HI_FW_BLOCK_DWORD_LENGTH,
-					    *((u32 *)buffer + i));
-	}
-
-	/* Setting this bit tells the ARC that a new FW is ready to execute. */
-	hicr = E1000_READ_REG(hw, E1000_HICR);
-	E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
-
-	for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
-		hicr = E1000_READ_REG(hw, E1000_HICR);
-		if (!(hicr & E1000_HICR_C))
-			break;
-		msec_delay(1);
-	}
-
-	/* Check for successful FW start. */
-	if (i == E1000_HI_COMMAND_TIMEOUT) {
-		DEBUGOUT("New FW did not start within timeout period.\n");
-		return -E1000_ERR_HOST_INTERFACE_COMMAND;
-	}
-
-	return E1000_SUCCESS;
-}
diff --git a/drivers/net/e1000/base/e1000_manage.h b/drivers/net/e1000/base/e1000_manage.h
index 268a13381d..da0246b6a9 100644
--- a/drivers/net/e1000/base/e1000_manage.h
+++ b/drivers/net/e1000/base/e1000_manage.h
@@ -16,8 +16,6 @@ s32  e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
 				       u8 *buffer, u16 length);
 bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
 u8 e1000_calculate_checksum(u8 *buffer, u32 length);
-s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length);
-s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length);
 
 enum e1000_mng_mode {
 	e1000_mng_mode_none = 0,
diff --git a/drivers/net/e1000/base/e1000_nvm.c b/drivers/net/e1000/base/e1000_nvm.c
index 430fecaf6d..4b3ce7d634 100644
--- a/drivers/net/e1000/base/e1000_nvm.c
+++ b/drivers/net/e1000/base/e1000_nvm.c
@@ -947,135 +947,6 @@ s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num)
 	return E1000_SUCCESS;
 }
 
-
-/**
- *  e1000_read_pba_raw
- *  @hw: pointer to the HW structure
- *  @eeprom_buf: optional pointer to EEPROM image
- *  @eeprom_buf_size: size of EEPROM image in words
- *  @max_pba_block_size: PBA block size limit
- *  @pba: pointer to output PBA structure
- *
- *  Reads PBA from EEPROM image when eeprom_buf is not NULL.
- *  Reads PBA from physical EEPROM device when eeprom_buf is NULL.
- *
- **/
-s32 e1000_read_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf,
-		       u32 eeprom_buf_size, u16 max_pba_block_size,
-		       struct e1000_pba *pba)
-{
-	s32 ret_val;
-	u16 pba_block_size;
-
-	if (pba == NULL)
-		return -E1000_ERR_PARAM;
-
-	if (eeprom_buf == NULL) {
-		ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 2,
-					 &pba->word[0]);
-		if (ret_val)
-			return ret_val;
-	} else {
-		if (eeprom_buf_size > NVM_PBA_OFFSET_1) {
-			pba->word[0] = eeprom_buf[NVM_PBA_OFFSET_0];
-			pba->word[1] = eeprom_buf[NVM_PBA_OFFSET_1];
-		} else {
-			return -E1000_ERR_PARAM;
-		}
-	}
-
-	if (pba->word[0] == NVM_PBA_PTR_GUARD) {
-		if (pba->pba_block == NULL)
-			return -E1000_ERR_PARAM;
-
-		ret_val = e1000_get_pba_block_size(hw, eeprom_buf,
-						   eeprom_buf_size,
-						   &pba_block_size);
-		if (ret_val)
-			return ret_val;
-
-		if (pba_block_size > max_pba_block_size)
-			return -E1000_ERR_PARAM;
-
-		if (eeprom_buf == NULL) {
-			ret_val = e1000_read_nvm(hw, pba->word[1],
-						 pba_block_size,
-						 pba->pba_block);
-			if (ret_val)
-				return ret_val;
-		} else {
-			if (eeprom_buf_size > (u32)(pba->word[1] +
-					      pba_block_size)) {
-				memcpy(pba->pba_block,
-				       &eeprom_buf[pba->word[1]],
-				       pba_block_size * sizeof(u16));
-			} else {
-				return -E1000_ERR_PARAM;
-			}
-		}
-	}
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_write_pba_raw
- *  @hw: pointer to the HW structure
- *  @eeprom_buf: optional pointer to EEPROM image
- *  @eeprom_buf_size: size of EEPROM image in words
- *  @pba: pointer to PBA structure
- *
- *  Writes PBA to EEPROM image when eeprom_buf is not NULL.
- *  Writes PBA to physical EEPROM device when eeprom_buf is NULL.
- *
- **/
-s32 e1000_write_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf,
-			u32 eeprom_buf_size, struct e1000_pba *pba)
-{
-	s32 ret_val;
-
-	if (pba == NULL)
-		return -E1000_ERR_PARAM;
-
-	if (eeprom_buf == NULL) {
-		ret_val = e1000_write_nvm(hw, NVM_PBA_OFFSET_0, 2,
-					  &pba->word[0]);
-		if (ret_val)
-			return ret_val;
-	} else {
-		if (eeprom_buf_size > NVM_PBA_OFFSET_1) {
-			eeprom_buf[NVM_PBA_OFFSET_0] = pba->word[0];
-			eeprom_buf[NVM_PBA_OFFSET_1] = pba->word[1];
-		} else {
-			return -E1000_ERR_PARAM;
-		}
-	}
-
-	if (pba->word[0] == NVM_PBA_PTR_GUARD) {
-		if (pba->pba_block == NULL)
-			return -E1000_ERR_PARAM;
-
-		if (eeprom_buf == NULL) {
-			ret_val = e1000_write_nvm(hw, pba->word[1],
-						  pba->pba_block[0],
-						  pba->pba_block);
-			if (ret_val)
-				return ret_val;
-		} else {
-			if (eeprom_buf_size > (u32)(pba->word[1] +
-					      pba->pba_block[0])) {
-				memcpy(&eeprom_buf[pba->word[1]],
-				       pba->pba_block,
-				       pba->pba_block[0] * sizeof(u16));
-			} else {
-				return -E1000_ERR_PARAM;
-			}
-		}
-	}
-
-	return E1000_SUCCESS;
-}
-
 /**
  *  e1000_get_pba_block_size
  *  @hw: pointer to the HW structure
diff --git a/drivers/net/e1000/base/e1000_nvm.h b/drivers/net/e1000/base/e1000_nvm.h
index 056f823537..e48d638795 100644
--- a/drivers/net/e1000/base/e1000_nvm.h
+++ b/drivers/net/e1000/base/e1000_nvm.h
@@ -40,11 +40,6 @@ s32  e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num);
 s32  e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
 				   u32 pba_num_size);
 s32  e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size);
-s32 e1000_read_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf,
-		       u32 eeprom_buf_size, u16 max_pba_block_size,
-		       struct e1000_pba *pba);
-s32 e1000_write_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf,
-			u32 eeprom_buf_size, struct e1000_pba *pba);
 s32 e1000_get_pba_block_size(struct e1000_hw *hw, u16 *eeprom_buf,
 			     u32 eeprom_buf_size, u16 *pba_block_size);
 s32  e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
diff --git a/drivers/net/e1000/base/e1000_phy.c b/drivers/net/e1000/base/e1000_phy.c
index 62d0be5080..b3be39f7bd 100644
--- a/drivers/net/e1000/base/e1000_phy.c
+++ b/drivers/net/e1000/base/e1000_phy.c
@@ -545,79 +545,6 @@ s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
 	return E1000_SUCCESS;
 }
 
-/**
- *  e1000_write_sfp_data_byte - Writes SFP module data.
- *  @hw: pointer to the HW structure
- *  @offset: byte location offset to write to
- *  @data: data to write
- *
- *  Writes one byte to SFP module data stored
- *  in SFP resided EEPROM memory or SFP diagnostic area.
- *  Function should be called with
- *  E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
- *  E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
- *  access
- **/
-s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
-{
-	u32 i = 0;
-	u32 i2ccmd = 0;
-	u32 data_local = 0;
-
-	DEBUGFUNC("e1000_write_sfp_data_byte");
-
-	if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
-		DEBUGOUT("I2CCMD command address exceeds upper limit\n");
-		return -E1000_ERR_PHY;
-	}
-	/* The programming interface is 16 bits wide
-	 * so we need to read the whole word first
-	 * then update appropriate byte lane and write
-	 * the updated word back.
-	 */
-	/* Set up Op-code, EEPROM Address,in the I2CCMD
-	 * register. The MAC will take care of interfacing
-	 * with an EEPROM to write the data given.
-	 */
-	i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
-		  E1000_I2CCMD_OPCODE_READ);
-	/* Set a command to read single word */
-	E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
-	for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
-		usec_delay(50);
-		/* Poll the ready bit to see if lastly
-		 * launched I2C operation completed
-		 */
-		i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
-		if (i2ccmd & E1000_I2CCMD_READY) {
-			/* Check if this is READ or WRITE phase */
-			if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
-			    E1000_I2CCMD_OPCODE_READ) {
-				/* Write the selected byte
-				 * lane and update whole word
-				 */
-				data_local = i2ccmd & 0xFF00;
-				data_local |= (u32)data;
-				i2ccmd = ((offset <<
-					E1000_I2CCMD_REG_ADDR_SHIFT) |
-					E1000_I2CCMD_OPCODE_WRITE | data_local);
-				E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
-			} else {
-				break;
-			}
-		}
-	}
-	if (!(i2ccmd & E1000_I2CCMD_READY)) {
-		DEBUGOUT("I2CCMD Write did not complete\n");
-		return -E1000_ERR_PHY;
-	}
-	if (i2ccmd & E1000_I2CCMD_ERROR) {
-		DEBUGOUT("I2CCMD Error bit set\n");
-		return -E1000_ERR_PHY;
-	}
-	return E1000_SUCCESS;
-}
-
 /**
  *  e1000_read_phy_reg_m88 - Read m88 PHY register
  *  @hw: pointer to the HW structure
@@ -4083,134 +4010,6 @@ s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data)
 	return ret_val;
 }
 
-/**
- *  e1000_read_phy_reg_mphy - Read mPHY control register
- *  @hw: pointer to the HW structure
- *  @address: address to be read
- *  @data: pointer to the read data
- *
- *  Reads the mPHY control register in the PHY at offset and stores the
- *  information read to data.
- **/
-s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data)
-{
-	u32 mphy_ctrl = 0;
-	bool locked = false;
-	bool ready;
-
-	DEBUGFUNC("e1000_read_phy_reg_mphy");
-
-	/* Check if mPHY is ready to read/write operations */
-	ready = e1000_is_mphy_ready(hw);
-	if (!ready)
-		return -E1000_ERR_PHY;
-
-	/* Check if mPHY access is disabled and enable it if so */
-	mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
-	if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
-		locked = true;
-		ready = e1000_is_mphy_ready(hw);
-		if (!ready)
-			return -E1000_ERR_PHY;
-		mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
-		E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
-	}
-
-	/* Set the address that we want to read */
-	ready = e1000_is_mphy_ready(hw);
-	if (!ready)
-		return -E1000_ERR_PHY;
-
-	/* We mask address, because we want to use only current lane */
-	mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK &
-		~E1000_MPHY_ADDRESS_FNC_OVERRIDE) |
-		(address & E1000_MPHY_ADDRESS_MASK);
-	E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
-
-	/* Read data from the address */
-	ready = e1000_is_mphy_ready(hw);
-	if (!ready)
-		return -E1000_ERR_PHY;
-	*data = E1000_READ_REG(hw, E1000_MPHY_DATA);
-
-	/* Disable access to mPHY if it was originally disabled */
-	if (locked) {
-		ready = e1000_is_mphy_ready(hw);
-		if (!ready)
-			return -E1000_ERR_PHY;
-		E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
-				E1000_MPHY_DIS_ACCESS);
-	}
-
-	return E1000_SUCCESS;
-}
-
-/**
- *  e1000_write_phy_reg_mphy - Write mPHY control register
- *  @hw: pointer to the HW structure
- *  @address: address to write to
- *  @data: data to write to register at offset
- *  @line_override: used when we want to use different line than default one
- *
- *  Writes data to mPHY control register.
- **/
-s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
-			     bool line_override)
-{
-	u32 mphy_ctrl = 0;
-	bool locked = false;
-	bool ready;
-
-	DEBUGFUNC("e1000_write_phy_reg_mphy");
-
-	/* Check if mPHY is ready to read/write operations */
-	ready = e1000_is_mphy_ready(hw);
-	if (!ready)
-		return -E1000_ERR_PHY;
-
-	/* Check if mPHY access is disabled and enable it if so */
-	mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
-	if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
-		locked = true;
-		ready = e1000_is_mphy_ready(hw);
-		if (!ready)
-			return -E1000_ERR_PHY;
-		mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
-		E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
-	}
-
-	/* Set the address that we want to read */
-	ready = e1000_is_mphy_ready(hw);
-	if (!ready)
-		return -E1000_ERR_PHY;
-
-	/* We mask address, because we want to use only current lane */
-	if (line_override)
-		mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE;
-	else
-		mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE;
-	mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) |
-		(address & E1000_MPHY_ADDRESS_MASK);
-	E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
-
-	/* Read data from the address */
-	ready = e1000_is_mphy_ready(hw);
-	if (!ready)
-		return -E1000_ERR_PHY;
-	E1000_WRITE_REG(hw, E1000_MPHY_DATA, data);
-
-	/* Disable access to mPHY if it was originally disabled */
-	if (locked) {
-		ready = e1000_is_mphy_ready(hw);
-		if (!ready)
-			return -E1000_ERR_PHY;
-		E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
-				E1000_MPHY_DIS_ACCESS);
-	}
-
-	return E1000_SUCCESS;
-}
-
 /**
  *  e1000_is_mphy_ready - Check if mPHY control register is not busy
  *  @hw: pointer to the HW structure
diff --git a/drivers/net/e1000/base/e1000_phy.h b/drivers/net/e1000/base/e1000_phy.h
index 81c5308589..fcd1e09f42 100644
--- a/drivers/net/e1000/base/e1000_phy.h
+++ b/drivers/net/e1000/base/e1000_phy.h
@@ -71,7 +71,6 @@ s32  e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
 s32  e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
 s32  e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
 s32  e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
-s32  e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
 s32  e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
 s32  e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data);
 s32  e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data);
@@ -86,9 +85,6 @@ s32  e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
 s32  e1000_get_cable_length_82577(struct e1000_hw *hw);
 s32  e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
 s32  e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data);
-s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
-			     bool line_override);
 bool e1000_is_mphy_ready(struct e1000_hw *hw);
 
 s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
diff --git a/drivers/net/e1000/base/e1000_vf.c b/drivers/net/e1000/base/e1000_vf.c
index 44ebe07ee4..9b001f9c2e 100644
--- a/drivers/net/e1000/base/e1000_vf.c
+++ b/drivers/net/e1000/base/e1000_vf.c
@@ -411,25 +411,6 @@ void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
 	e1000_write_msg_read_ack(hw, msgbuf, E1000_VFMAILBOX_SIZE);
 }
 
-/**
- *  e1000_vfta_set_vf - Set/Unset vlan filter table address
- *  @hw: pointer to the HW structure
- *  @vid: determines the vfta register and bit to set/unset
- *  @set: if true then set bit, else clear bit
- **/
-void e1000_vfta_set_vf(struct e1000_hw *hw, u16 vid, bool set)
-{
-	u32 msgbuf[2];
-
-	msgbuf[0] = E1000_VF_SET_VLAN;
-	msgbuf[1] = vid;
-	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
-	if (set)
-		msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
-
-	e1000_write_msg_read_ack(hw, msgbuf, 2);
-}
-
 /** e1000_rlpml_set_vf - Set the maximum receive packet length
  *  @hw: pointer to the HW structure
  *  @max_size: value to assign to max frame size
diff --git a/drivers/net/e1000/base/e1000_vf.h b/drivers/net/e1000/base/e1000_vf.h
index 4bec21c935..ff62970132 100644
--- a/drivers/net/e1000/base/e1000_vf.h
+++ b/drivers/net/e1000/base/e1000_vf.h
@@ -260,7 +260,6 @@ enum e1000_promisc_type {
 
 /* These functions must be implemented by drivers */
 s32  e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-void e1000_vfta_set_vf(struct e1000_hw *, u16, bool);
 void e1000_rlpml_set_vf(struct e1000_hw *, u16);
 s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type);
 #endif /* _E1000_VF_H_ */
diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c
index aae68721fb..04fd15c998 100644
--- a/drivers/net/ena/base/ena_com.c
+++ b/drivers/net/ena/base/ena_com.c
@@ -1064,11 +1064,6 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev,
 				      feature_ver);
 }
 
-int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
-{
-	return ena_dev->rss.hash_func;
-}
-
 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
 {
 	struct ena_admin_feature_rss_flow_hash_control *hash_key =
@@ -1318,31 +1313,6 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
 	return 0;
 }
 
-static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
-						 u16 intr_delay_resolution)
-{
-	u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
-
-	if (unlikely(!intr_delay_resolution)) {
-		ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
-		intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
-	}
-
-	/* update Rx */
-	ena_dev->intr_moder_rx_interval =
-		ena_dev->intr_moder_rx_interval *
-		prev_intr_delay_resolution /
-		intr_delay_resolution;
-
-	/* update Tx */
-	ena_dev->intr_moder_tx_interval =
-		ena_dev->intr_moder_tx_interval *
-		prev_intr_delay_resolution /
-		intr_delay_resolution;
-
-	ena_dev->intr_delay_resolution = intr_delay_resolution;
-}
-
 /*****************************************************************************/
 /*******************************      API       ******************************/
 /*****************************************************************************/
@@ -1703,17 +1673,6 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
 	ena_dev->admin_queue.polling = polling;
 }
 
-bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev)
-{
-	return ena_dev->admin_queue.polling;
-}
-
-void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
-					 bool polling)
-{
-	ena_dev->admin_queue.auto_polling = polling;
-}
-
 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
@@ -1942,12 +1901,6 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
 	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
 }
 
-int ena_com_get_link_params(struct ena_com_dev *ena_dev,
-			    struct ena_admin_get_feat_resp *resp)
-{
-	return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
-}
-
 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
 			      struct ena_com_dev_get_features_ctx *get_feat_ctx)
 {
@@ -2277,24 +2230,6 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
 	return ret;
 }
 
-int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
-				 struct ena_admin_feature_offload_desc *offload)
-{
-	int ret;
-	struct ena_admin_get_feat_resp resp;
-
-	ret = ena_com_get_feature(ena_dev, &resp,
-				  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
-	if (unlikely(ret)) {
-		ena_trc_err("Failed to get offload capabilities %d\n", ret);
-		return ret;
-	}
-
-	memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
-
-	return 0;
-}
-
 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
 {
 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
@@ -2416,44 +2351,6 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
 	return rc;
 }
 
-int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
-			      enum ena_admin_hash_functions *func)
-{
-	struct ena_rss *rss = &ena_dev->rss;
-	struct ena_admin_get_feat_resp get_resp;
-	int rc;
-
-	if (unlikely(!func))
-		return ENA_COM_INVAL;
-
-	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
-				    ENA_ADMIN_RSS_HASH_FUNCTION,
-				    rss->hash_key_dma_addr,
-				    sizeof(*rss->hash_key), 0);
-	if (unlikely(rc))
-		return rc;
-
-	/* ENA_FFS() returns 1 in case the lsb is set */
-	rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func);
-	if (rss->hash_func)
-		rss->hash_func--;
-
-	*func = rss->hash_func;
-
-	return 0;
-}
-
-int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
-{
-	struct ena_admin_feature_rss_flow_hash_control *hash_key =
-		ena_dev->rss.hash_key;
-
-	if (key)
-		memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
-
-	return 0;
-}
-
 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
 			  enum ena_admin_flow_hash_proto proto,
 			  u16 *fields)
@@ -2582,43 +2479,6 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
 	return rc;
 }
 
-int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
-			   enum ena_admin_flow_hash_proto proto,
-			   u16 hash_fields)
-{
-	struct ena_rss *rss = &ena_dev->rss;
-	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
-	u16 supported_fields;
-	int rc;
-
-	if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
-		ena_trc_err("Invalid proto num (%u)\n", proto);
-		return ENA_COM_INVAL;
-	}
-
-	/* Get the ctrl table */
-	rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
-	if (unlikely(rc))
-		return rc;
-
-	/* Make sure all the fields are supported */
-	supported_fields = hash_ctrl->supported_fields[proto].fields;
-	if ((hash_fields & supported_fields) != hash_fields) {
-		ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
-			    proto, hash_fields, supported_fields);
-	}
-
-	hash_ctrl->selected_fields[proto].fields = hash_fields;
-
-	rc = ena_com_set_hash_ctrl(ena_dev);
-
-	/* In case of failure, restore the old hash ctrl */
-	if (unlikely(rc))
-		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
-
-	return 0;
-}
-
 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
 				      u16 entry_idx, u16 entry_value)
 {
@@ -2874,88 +2734,6 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
 	return ret;
 }
 
-/* Interrupt moderation */
-bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
-{
-	return ena_com_check_supported_feature_id(ena_dev,
-						  ENA_ADMIN_INTERRUPT_MODERATION);
-}
-
-static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
-							  u32 intr_delay_resolution,
-							  u32 *intr_moder_interval)
-{
-	if (!intr_delay_resolution) {
-		ena_trc_err("Illegal interrupt delay granularity value\n");
-		return ENA_COM_FAULT;
-	}
-
-	*intr_moder_interval = coalesce_usecs / intr_delay_resolution;
-
-	return 0;
-}
-
-
-int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
-						      u32 tx_coalesce_usecs)
-{
-	return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
-							      ena_dev->intr_delay_resolution,
-							      &ena_dev->intr_moder_tx_interval);
-}
-
-int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
-						      u32 rx_coalesce_usecs)
-{
-	return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
-							      ena_dev->intr_delay_resolution,
-							      &ena_dev->intr_moder_rx_interval);
-}
-
-int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
-{
-	struct ena_admin_get_feat_resp get_resp;
-	u16 delay_resolution;
-	int rc;
-
-	rc = ena_com_get_feature(ena_dev, &get_resp,
-				 ENA_ADMIN_INTERRUPT_MODERATION, 0);
-
-	if (rc) {
-		if (rc == ENA_COM_UNSUPPORTED) {
-			ena_trc_dbg("Feature %d isn't supported\n",
-				    ENA_ADMIN_INTERRUPT_MODERATION);
-			rc = 0;
-		} else {
-			ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
-				    rc);
-		}
-
-		/* no moderation supported, disable adaptive support */
-		ena_com_disable_adaptive_moderation(ena_dev);
-		return rc;
-	}
-
-	/* if moderation is supported by device we set adaptive moderation */
-	delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
-	ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
-
-	/* Disable adaptive moderation by default - can be enabled later */
-	ena_com_disable_adaptive_moderation(ena_dev);
-
-	return 0;
-}
-
-unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
-{
-	return ena_dev->intr_moder_tx_interval;
-}
-
-unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
-{
-	return ena_dev->intr_moder_rx_interval;
-}
-
 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
 			    struct ena_admin_feature_llq_desc *llq_features,
 			    struct ena_llq_configurations *llq_default_cfg)
diff --git a/drivers/net/ena/base/ena_com.h b/drivers/net/ena/base/ena_com.h
index 64d8f247cb..f82c9f1876 100644
--- a/drivers/net/ena/base/ena_com.h
+++ b/drivers/net/ena/base/ena_com.h
@@ -483,29 +483,6 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
  */
 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
 
-/* ena_com_get_admin_polling_mode - Get the admin completion queue polling mode
- * @ena_dev: ENA communication layer struct
- *
- * Get the admin completion mode.
- * If polling mode is on, ena_com_execute_admin_command will perform a
- * polling on the admin completion queue for the commands completion,
- * otherwise it will wait on wait event.
- *
- * @return state
- */
-bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev);
-
-/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode
- * @ena_dev: ENA communication layer struct
- * @polling: Enable/Disable polling mode
- *
- * Set the autopolling mode.
- * If autopolling is on:
- * In case of missing interrupt when data is available switch to polling.
- */
-void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
-					 bool polling);
-
 /* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
  * @ena_dev: ENA communication layer struct
  *
@@ -552,18 +529,6 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
  */
 int ena_com_validate_version(struct ena_com_dev *ena_dev);
 
-/* ena_com_get_link_params - Retrieve physical link parameters.
- * @ena_dev: ENA communication layer struct
- * @resp: Link parameters
- *
- * Retrieve the physical link parameters,
- * like speed, auto-negotiation and full duplex support.
- *
- * @return - 0 on Success negative value otherwise.
- */
-int ena_com_get_link_params(struct ena_com_dev *ena_dev,
-			    struct ena_admin_get_feat_resp *resp);
-
 /* ena_com_get_dma_width - Retrieve physical dma address width the device
  * supports.
  * @ena_dev: ENA communication layer struct
@@ -619,15 +584,6 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
  */
 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu);
 
-/* ena_com_get_offload_settings - Retrieve the device offloads capabilities
- * @ena_dev: ENA communication layer struct
- * @offlad: offload return value
- *
- * @return: 0 on Success and negative value otherwise.
- */
-int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
-				 struct ena_admin_feature_offload_desc *offload);
-
 /* ena_com_rss_init - Init RSS
  * @ena_dev: ENA communication layer struct
  * @log_size: indirection log size
@@ -647,14 +603,6 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
  */
 void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
 
-/* ena_com_get_current_hash_function - Get RSS hash function
- * @ena_dev: ENA communication layer struct
- *
- * Return the current hash function.
- * @return: 0 or one of the ena_admin_hash_functions values.
- */
-int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev);
-
 /* ena_com_fill_hash_function - Fill RSS hash function
  * @ena_dev: ENA communication layer struct
  * @func: The hash function (Toeplitz or crc)
@@ -686,48 +634,6 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
  */
 int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
 
-/* ena_com_get_hash_function - Retrieve the hash function from the device.
- * @ena_dev: ENA communication layer struct
- * @func: hash function
- *
- * Retrieve the hash function from the device.
- *
- * @note: If the caller called ena_com_fill_hash_function but didn't flush
- * it to the device, the new configuration will be lost.
- *
- * @return: 0 on Success and negative value otherwise.
- */
-int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
-			      enum ena_admin_hash_functions *func);
-
-/* ena_com_get_hash_key - Retrieve the hash key
- * @ena_dev: ENA communication layer struct
- * @key: hash key
- *
- * Retrieve the hash key.
- *
- * @note: If the caller called ena_com_fill_hash_key but didn't flush
- * it to the device, the new configuration will be lost.
- *
- * @return: 0 on Success and negative value otherwise.
- */
-int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key);
-/* ena_com_fill_hash_ctrl - Fill RSS hash control
- * @ena_dev: ENA communication layer struct.
- * @proto: The protocol to configure.
- * @hash_fields: bit mask of ena_admin_flow_hash_fields
- *
- * Fill the ena_dev resources with the desire hash control (the ethernet
- * fields that take part of the hash) for a specific protocol.
- * To flush the hash control to the device, the caller should call
- * ena_com_set_hash_ctrl.
- *
- * @return: 0 on Success and negative value otherwise.
- */
-int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
-			   enum ena_admin_flow_hash_proto proto,
-			   u16 hash_fields);
-
 /* ena_com_set_hash_ctrl - Flush the hash control resources to the device.
  * @ena_dev: ENA communication layer struct
  *
@@ -884,56 +790,6 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
 				  struct ena_admin_acq_entry *cmd_comp,
 				  size_t cmd_comp_size);
 
-/* ena_com_init_interrupt_moderation - Init interrupt moderation
- * @ena_dev: ENA communication layer struct
- *
- * @return - 0 on success, negative value on failure.
- */
-int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev);
-
-/* ena_com_interrupt_moderation_supported - Return if interrupt moderation
- * capability is supported by the device.
- *
- * @return - supported or not.
- */
-bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
-
-/* ena_com_update_nonadaptive_moderation_interval_tx - Update the
- * non-adaptive interval in Tx direction.
- * @ena_dev: ENA communication layer struct
- * @tx_coalesce_usecs: Interval in usec.
- *
- * @return - 0 on success, negative value on failure.
- */
-int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
-						      u32 tx_coalesce_usecs);
-
-/* ena_com_update_nonadaptive_moderation_interval_rx - Update the
- * non-adaptive interval in Rx direction.
- * @ena_dev: ENA communication layer struct
- * @rx_coalesce_usecs: Interval in usec.
- *
- * @return - 0 on success, negative value on failure.
- */
-int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
-						      u32 rx_coalesce_usecs);
-
-/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
- * non-adaptive interval in Tx direction.
- * @ena_dev: ENA communication layer struct
- *
- * @return - interval in usec
- */
-unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
-
-/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
- * non-adaptive interval in Rx direction.
- * @ena_dev: ENA communication layer struct
- *
- * @return - interval in usec
- */
-unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
-
 /* ena_com_config_dev_mode - Configure the placement policy of the device.
  * @ena_dev: ENA communication layer struct
  * @llq_features: LLQ feature descriptor, retrieve via
diff --git a/drivers/net/ena/base/ena_eth_com.c b/drivers/net/ena/base/ena_eth_com.c
index a35d92fbd3..05ab030d07 100644
--- a/drivers/net/ena/base/ena_eth_com.c
+++ b/drivers/net/ena/base/ena_eth_com.c
@@ -613,14 +613,3 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
 
 	return ena_com_sq_update_tail(io_sq);
 }
-
-bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
-{
-	struct ena_eth_io_rx_cdesc_base *cdesc;
-
-	cdesc = ena_com_get_next_rx_cdesc(io_cq);
-	if (cdesc)
-		return false;
-	else
-		return true;
-}
diff --git a/drivers/net/ena/base/ena_eth_com.h b/drivers/net/ena/base/ena_eth_com.h
index 7dda16cd9f..3799f08bf4 100644
--- a/drivers/net/ena/base/ena_eth_com.h
+++ b/drivers/net/ena/base/ena_eth_com.h
@@ -64,8 +64,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
 			       struct ena_com_buf *ena_buf,
 			       u16 req_id);
 
-bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
-
 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
 				       struct ena_eth_io_intr_reg *intr_reg)
 {
diff --git a/drivers/net/fm10k/base/fm10k_api.c b/drivers/net/fm10k/base/fm10k_api.c
index dfb50a10d1..631babcdd6 100644
--- a/drivers/net/fm10k/base/fm10k_api.c
+++ b/drivers/net/fm10k/base/fm10k_api.c
@@ -140,34 +140,6 @@ s32 fm10k_start_hw(struct fm10k_hw *hw)
 			       FM10K_NOT_IMPLEMENTED);
 }
 
-/**
- *  fm10k_get_bus_info - Set PCI bus info
- *  @hw: pointer to hardware structure
- *
- *  Sets the PCI bus info (speed, width, type) within the fm10k_hw structure
- **/
-s32 fm10k_get_bus_info(struct fm10k_hw *hw)
-{
-	return fm10k_call_func(hw, hw->mac.ops.get_bus_info, (hw),
-			       FM10K_NOT_IMPLEMENTED);
-}
-
-#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
-/**
- *  fm10k_is_slot_appropriate - Indicate appropriate slot for this SKU
- *  @hw: pointer to hardware structure
- *
- *  Looks at the PCIe bus info to confirm whether or not this slot can support
- *  the necessary bandwidth for this device.
- **/
-bool fm10k_is_slot_appropriate(struct fm10k_hw *hw)
-{
-	if (hw->mac.ops.is_slot_appropriate)
-		return hw->mac.ops.is_slot_appropriate(hw);
-	return true;
-}
-
-#endif
 /**
  *  fm10k_update_vlan - Clear VLAN ID to VLAN filter table
  *  @hw: pointer to hardware structure
@@ -233,36 +205,6 @@ void fm10k_rebind_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats)
 	}
 }
 
-/**
- *  fm10k_configure_dglort_map - Configures GLORT entry and queues
- *  @hw: pointer to hardware structure
- *  @dglort: pointer to dglort configuration structure
- *
- *  Reads the configuration structure contained in dglort_cfg and uses
- *  that information to then populate a DGLORTMAP/DEC entry and the queues
- *  to which it has been assigned.
- **/
-s32 fm10k_configure_dglort_map(struct fm10k_hw *hw,
-			       struct fm10k_dglort_cfg *dglort)
-{
-	return fm10k_call_func(hw, hw->mac.ops.configure_dglort_map,
-			       (hw, dglort), FM10K_NOT_IMPLEMENTED);
-}
-
-/**
- *  fm10k_set_dma_mask - Configures PhyAddrSpace to limit DMA to system
- *  @hw: pointer to hardware structure
- *  @dma_mask: 64 bit DMA mask required for platform
- *
- *  This function configures the endpoint to limit the access to memory
- *  beyond what is physically in the system.
- **/
-void fm10k_set_dma_mask(struct fm10k_hw *hw, u64 dma_mask)
-{
-	if (hw->mac.ops.set_dma_mask)
-		hw->mac.ops.set_dma_mask(hw, dma_mask);
-}
-
 /**
  *  fm10k_get_fault - Record a fault in one of the interface units
  *  @hw: pointer to hardware structure
@@ -298,49 +240,3 @@ s32 fm10k_update_uc_addr(struct fm10k_hw *hw, u16 lport,
 			       (hw, lport, mac, vid, add, flags),
 			       FM10K_NOT_IMPLEMENTED);
 }
-
-/**
- *  fm10k_update_mc_addr - Update device multicast address
- *  @hw: pointer to the HW structure
- *  @lport: logical port ID to update - unused
- *  @mac: MAC address to add/remove from table
- *  @vid: VLAN ID to add/remove from table
- *  @add: Indicates if this is an add or remove operation
- *
- *  This function is used to add or remove multicast MAC addresses
- **/
-s32 fm10k_update_mc_addr(struct fm10k_hw *hw, u16 lport,
-			 const u8 *mac, u16 vid, bool add)
-{
-	return fm10k_call_func(hw, hw->mac.ops.update_mc_addr,
-			       (hw, lport, mac, vid, add),
-			       FM10K_NOT_IMPLEMENTED);
-}
-
-/**
- *  fm10k_adjust_systime - Adjust systime frequency
- *  @hw: pointer to hardware structure
- *  @ppb: adjustment rate in parts per billion
- *
- *  This function is meant to update the frequency of the clock represented
- *  by the SYSTIME register.
- **/
-s32 fm10k_adjust_systime(struct fm10k_hw *hw, s32 ppb)
-{
-	return fm10k_call_func(hw, hw->mac.ops.adjust_systime,
-			       (hw, ppb), FM10K_NOT_IMPLEMENTED);
-}
-
-/**
- *  fm10k_notify_offset - Notify switch of change in PTP offset
- *  @hw: pointer to hardware structure
- *  @offset: 64bit unsigned offset from hardware SYSTIME value
- *
- *  This function is meant to notify switch of change in the PTP offset for
- *  the hardware SYSTIME registers.
- **/
-s32 fm10k_notify_offset(struct fm10k_hw *hw, u64 offset)
-{
-	return fm10k_call_func(hw, hw->mac.ops.notify_offset,
-			       (hw, offset), FM10K_NOT_IMPLEMENTED);
-}
diff --git a/drivers/net/fm10k/base/fm10k_api.h b/drivers/net/fm10k/base/fm10k_api.h
index d9593bba00..4ffe41cd08 100644
--- a/drivers/net/fm10k/base/fm10k_api.h
+++ b/drivers/net/fm10k/base/fm10k_api.h
@@ -14,22 +14,11 @@ s32 fm10k_init_hw(struct fm10k_hw *hw);
 s32 fm10k_stop_hw(struct fm10k_hw *hw);
 s32 fm10k_start_hw(struct fm10k_hw *hw);
 s32 fm10k_init_shared_code(struct fm10k_hw *hw);
-s32 fm10k_get_bus_info(struct fm10k_hw *hw);
-#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
-bool fm10k_is_slot_appropriate(struct fm10k_hw *hw);
-#endif
 s32 fm10k_update_vlan(struct fm10k_hw *hw, u32 vid, u8 idx, bool set);
 s32 fm10k_read_mac_addr(struct fm10k_hw *hw);
 void fm10k_update_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats);
 void fm10k_rebind_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats);
-s32 fm10k_configure_dglort_map(struct fm10k_hw *hw,
-			       struct fm10k_dglort_cfg *dglort);
-void fm10k_set_dma_mask(struct fm10k_hw *hw, u64 dma_mask);
 s32 fm10k_get_fault(struct fm10k_hw *hw, int type, struct fm10k_fault *fault);
 s32 fm10k_update_uc_addr(struct fm10k_hw *hw, u16 lport,
 			  const u8 *mac, u16 vid, bool add, u8 flags);
-s32 fm10k_update_mc_addr(struct fm10k_hw *hw, u16 lport,
-			 const u8 *mac, u16 vid, bool add);
-s32 fm10k_adjust_systime(struct fm10k_hw *hw, s32 ppb);
-s32 fm10k_notify_offset(struct fm10k_hw *hw, u64 offset);
 #endif /* _FM10K_API_H_ */
diff --git a/drivers/net/fm10k/base/fm10k_tlv.c b/drivers/net/fm10k/base/fm10k_tlv.c
index adffc1bcef..72b0ffd4cb 100644
--- a/drivers/net/fm10k/base/fm10k_tlv.c
+++ b/drivers/net/fm10k/base/fm10k_tlv.c
@@ -24,59 +24,6 @@ s32 fm10k_tlv_msg_init(u32 *msg, u16 msg_id)
 	return FM10K_SUCCESS;
 }
 
-/**
- *  fm10k_tlv_attr_put_null_string - Place null terminated string on message
- *  @msg: Pointer to message block
- *  @attr_id: Attribute ID
- *  @string: Pointer to string to be stored in attribute
- *
- *  This function will reorder a string to be CPU endian and store it in
- *  the attribute buffer.  It will return success if provided with a valid
- *  pointers.
- **/
-static s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id,
-					  const unsigned char *string)
-{
-	u32 attr_data = 0, len = 0;
-	u32 *attr;
-
-	DEBUGFUNC("fm10k_tlv_attr_put_null_string");
-
-	/* verify pointers are not NULL */
-	if (!string || !msg)
-		return FM10K_ERR_PARAM;
-
-	attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
-
-	/* copy string into local variable and then write to msg */
-	do {
-		/* write data to message */
-		if (len && !(len % 4)) {
-			attr[len / 4] = attr_data;
-			attr_data = 0;
-		}
-
-		/* record character to offset location */
-		attr_data |= (u32)(*string) << (8 * (len % 4));
-		len++;
-
-		/* test for NULL and then increment */
-	} while (*(string++));
-
-	/* write last piece of data to message */
-	attr[(len + 3) / 4] = attr_data;
-
-	/* record attribute header, update message length */
-	len <<= FM10K_TLV_LEN_SHIFT;
-	attr[0] = len | attr_id;
-
-	/* add header length to length */
-	len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
-	*msg += FM10K_TLV_LEN_ALIGN(len);
-
-	return FM10K_SUCCESS;
-}
-
 /**
  *  fm10k_tlv_attr_get_null_string - Get null terminated string from attribute
  *  @attr: Pointer to attribute
@@ -346,68 +293,6 @@ s32 fm10k_tlv_attr_get_le_struct(u32 *attr, void *le_struct, u32 len)
 	return FM10K_SUCCESS;
 }
 
-/**
- *  fm10k_tlv_attr_nest_start - Start a set of nested attributes
- *  @msg: Pointer to message block
- *  @attr_id: Attribute ID
- *
- *  This function will mark off a new nested region for encapsulating
- *  a given set of attributes.  The idea is if you wish to place a secondary
- *  structure within the message this mechanism allows for that.  The
- *  function will return NULL on failure, and a pointer to the start
- *  of the nested attributes on success.
- **/
-static u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id)
-{
-	u32 *attr;
-
-	DEBUGFUNC("fm10k_tlv_attr_nest_start");
-
-	/* verify pointer is not NULL */
-	if (!msg)
-		return NULL;
-
-	attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
-
-	attr[0] = attr_id;
-
-	/* return pointer to nest header */
-	return attr;
-}
-
-/**
- *  fm10k_tlv_attr_nest_stop - Stop a set of nested attributes
- *  @msg: Pointer to message block
- *
- *  This function closes off an existing set of nested attributes.  The
- *  message pointer should be pointing to the parent of the nest.  So in
- *  the case of a nest within the nest this would be the outer nest pointer.
- *  This function will return success provided all pointers are valid.
- **/
-static s32 fm10k_tlv_attr_nest_stop(u32 *msg)
-{
-	u32 *attr;
-	u32 len;
-
-	DEBUGFUNC("fm10k_tlv_attr_nest_stop");
-
-	/* verify pointer is not NULL */
-	if (!msg)
-		return FM10K_ERR_PARAM;
-
-	/* locate the nested header and retrieve its length */
-	attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
-	len = (attr[0] >> FM10K_TLV_LEN_SHIFT) << FM10K_TLV_LEN_SHIFT;
-
-	/* only include nest if data was added to it */
-	if (len) {
-		len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
-		*msg += len;
-	}
-
-	return FM10K_SUCCESS;
-}
-
 /**
  *  fm10k_tlv_attr_validate - Validate attribute metadata
  *  @attr: Pointer to attribute
@@ -661,74 +546,6 @@ const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = {
 	FM10K_TLV_ATTR_LAST
 };
 
-/**
- *  fm10k_tlv_msg_test_generate_data - Stuff message with data
- *  @msg: Pointer to message
- *  @attr_flags: List of flags indicating what attributes to add
- *
- *  This function is meant to load a message buffer with attribute data
- **/
-STATIC void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags)
-{
-	DEBUGFUNC("fm10k_tlv_msg_test_generate_data");
-
-	if (attr_flags & BIT(FM10K_TEST_MSG_STRING))
-		fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING,
-					       test_str);
-	if (attr_flags & BIT(FM10K_TEST_MSG_MAC_ADDR))
-		fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR,
-					    test_mac, test_vlan);
-	if (attr_flags & BIT(FM10K_TEST_MSG_U8))
-		fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8,  test_u8);
-	if (attr_flags & BIT(FM10K_TEST_MSG_U16))
-		fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16);
-	if (attr_flags & BIT(FM10K_TEST_MSG_U32))
-		fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32);
-	if (attr_flags & BIT(FM10K_TEST_MSG_U64))
-		fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64);
-	if (attr_flags & BIT(FM10K_TEST_MSG_S8))
-		fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8,  test_s8);
-	if (attr_flags & BIT(FM10K_TEST_MSG_S16))
-		fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16);
-	if (attr_flags & BIT(FM10K_TEST_MSG_S32))
-		fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32);
-	if (attr_flags & BIT(FM10K_TEST_MSG_S64))
-		fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64);
-	if (attr_flags & BIT(FM10K_TEST_MSG_LE_STRUCT))
-		fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT,
-					     test_le, 8);
-}
-
-/**
- *  fm10k_tlv_msg_test_create - Create a test message testing all attributes
- *  @msg: Pointer to message
- *  @attr_flags: List of flags indicating what attributes to add
- *
- *  This function is meant to load a message buffer with all attribute types
- *  including a nested attribute.
- **/
-void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags)
-{
-	u32 *nest = NULL;
-
-	DEBUGFUNC("fm10k_tlv_msg_test_create");
-
-	fm10k_tlv_msg_init(msg, FM10K_TLV_MSG_ID_TEST);
-
-	fm10k_tlv_msg_test_generate_data(msg, attr_flags);
-
-	/* check for nested attributes */
-	attr_flags >>= FM10K_TEST_MSG_NESTED;
-
-	if (attr_flags) {
-		nest = fm10k_tlv_attr_nest_start(msg, FM10K_TEST_MSG_NESTED);
-
-		fm10k_tlv_msg_test_generate_data(nest, attr_flags);
-
-		fm10k_tlv_attr_nest_stop(msg);
-	}
-}
-
 /**
  *  fm10k_tlv_msg_test - Validate all results on test message receive
  *  @hw: Pointer to hardware structure
diff --git a/drivers/net/fm10k/base/fm10k_tlv.h b/drivers/net/fm10k/base/fm10k_tlv.h
index af2e4c76a3..1665709d3d 100644
--- a/drivers/net/fm10k/base/fm10k_tlv.h
+++ b/drivers/net/fm10k/base/fm10k_tlv.h
@@ -155,7 +155,6 @@ enum fm10k_tlv_test_attr_id {
 };
 
 extern const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[];
-void fm10k_tlv_msg_test_create(u32 *, u32);
 s32 fm10k_tlv_msg_test(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
 
 #define FM10K_TLV_MSG_TEST_HANDLER(func) \
diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c
index e20bb9ac35..b93000a2aa 100644
--- a/drivers/net/i40e/base/i40e_common.c
+++ b/drivers/net/i40e/base/i40e_common.c
@@ -1115,32 +1115,6 @@ enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
 	return status;
 }
 
-/**
- * i40e_get_port_mac_addr - get Port MAC address
- * @hw: pointer to the HW structure
- * @mac_addr: pointer to Port MAC address
- *
- * Reads the adapter's Port MAC address
- **/
-enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
-{
-	struct i40e_aqc_mac_address_read_data addrs;
-	enum i40e_status_code status;
-	u16 flags = 0;
-
-	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
-	if (status)
-		return status;
-
-	if (flags & I40E_AQC_PORT_ADDR_VALID)
-		i40e_memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac),
-			I40E_NONDMA_TO_NONDMA);
-	else
-		status = I40E_ERR_INVALID_MAC_ADDR;
-
-	return status;
-}
-
 /**
  * i40e_pre_tx_queue_cfg - pre tx queue configure
  * @hw: pointer to the HW structure
@@ -1173,92 +1147,6 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
 	wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
 }
 
-/**
- * i40e_get_san_mac_addr - get SAN MAC address
- * @hw: pointer to the HW structure
- * @mac_addr: pointer to SAN MAC address
- *
- * Reads the adapter's SAN MAC address from NVM
- **/
-enum i40e_status_code i40e_get_san_mac_addr(struct i40e_hw *hw,
-					    u8 *mac_addr)
-{
-	struct i40e_aqc_mac_address_read_data addrs;
-	enum i40e_status_code status;
-	u16 flags = 0;
-
-	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
-	if (status)
-		return status;
-
-	if (flags & I40E_AQC_SAN_ADDR_VALID)
-		i40e_memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac),
-			I40E_NONDMA_TO_NONDMA);
-	else
-		status = I40E_ERR_INVALID_MAC_ADDR;
-
-	return status;
-}
-
-/**
- *  i40e_read_pba_string - Reads part number string from EEPROM
- *  @hw: pointer to hardware structure
- *  @pba_num: stores the part number string from the EEPROM
- *  @pba_num_size: part number string buffer length
- *
- *  Reads the part number string from the EEPROM.
- **/
-enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
-					    u32 pba_num_size)
-{
-	enum i40e_status_code status = I40E_SUCCESS;
-	u16 pba_word = 0;
-	u16 pba_size = 0;
-	u16 pba_ptr = 0;
-	u16 i = 0;
-
-	status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
-	if ((status != I40E_SUCCESS) || (pba_word != 0xFAFA)) {
-		DEBUGOUT("Failed to read PBA flags or flag is invalid.\n");
-		return status;
-	}
-
-	status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
-	if (status != I40E_SUCCESS) {
-		DEBUGOUT("Failed to read PBA Block pointer.\n");
-		return status;
-	}
-
-	status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
-	if (status != I40E_SUCCESS) {
-		DEBUGOUT("Failed to read PBA Block size.\n");
-		return status;
-	}
-
-	/* Subtract one to get PBA word count (PBA Size word is included in
-	 * total size)
-	 */
-	pba_size--;
-	if (pba_num_size < (((u32)pba_size * 2) + 1)) {
-		DEBUGOUT("Buffer to small for PBA data.\n");
-		return I40E_ERR_PARAM;
-	}
-
-	for (i = 0; i < pba_size; i++) {
-		status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
-		if (status != I40E_SUCCESS) {
-			DEBUGOUT1("Failed to read PBA Block word %d.\n", i);
-			return status;
-		}
-
-		pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
-		pba_num[(i * 2) + 1] = pba_word & 0xFF;
-	}
-	pba_num[(pba_size * 2)] = '\0';
-
-	return status;
-}
-
 /**
  * i40e_get_media_type - Gets media type
  * @hw: pointer to the hardware structure
@@ -1970,36 +1858,6 @@ enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
 	return status;
 }
 
-/**
- * i40e_aq_set_link_restart_an
- * @hw: pointer to the hw struct
- * @enable_link: if true: enable link, if false: disable link
- * @cmd_details: pointer to command details structure or NULL
- *
- * Sets up the link and restarts the Auto-Negotiation over the link.
- **/
-enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
-		bool enable_link, struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_set_link_restart_an *cmd =
-		(struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_set_link_restart_an);
-
-	cmd->command = I40E_AQ_PHY_RESTART_AN;
-	if (enable_link)
-		cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
-	else
-		cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
 /**
  * i40e_aq_get_link_info
  * @hw: pointer to the hw struct
@@ -2127,98 +1985,6 @@ enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
 	return status;
 }
 
-/**
- * i40e_aq_get_local_advt_reg
- * @hw: pointer to the hw struct
- * @advt_reg: local AN advertisement register value
- * @cmd_details: pointer to command details structure or NULL
- *
- * Get the Local AN advertisement register value.
- **/
-enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
-				u64 *advt_reg,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_an_advt_reg *resp =
-		(struct i40e_aqc_an_advt_reg *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_get_local_advt_reg);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	if (status != I40E_SUCCESS)
-		goto aq_get_local_advt_reg_exit;
-
-	*advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
-	*advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
-
-aq_get_local_advt_reg_exit:
-	return status;
-}
-
-/**
- * i40e_aq_set_local_advt_reg
- * @hw: pointer to the hw struct
- * @advt_reg: local AN advertisement register value
- * @cmd_details: pointer to command details structure or NULL
- *
- * Get the Local AN advertisement register value.
- **/
-enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
-				u64 advt_reg,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_an_advt_reg *cmd =
-		(struct i40e_aqc_an_advt_reg *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_get_local_advt_reg);
-
-	cmd->local_an_reg0 = CPU_TO_LE32(I40E_LO_DWORD(advt_reg));
-	cmd->local_an_reg1 = CPU_TO_LE16(I40E_HI_DWORD(advt_reg));
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
-/**
- * i40e_aq_get_partner_advt
- * @hw: pointer to the hw struct
- * @advt_reg: AN partner advertisement register value
- * @cmd_details: pointer to command details structure or NULL
- *
- * Get the link partner AN advertisement register value.
- **/
-enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
-				u64 *advt_reg,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_an_advt_reg *resp =
-		(struct i40e_aqc_an_advt_reg *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_get_partner_advt);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	if (status != I40E_SUCCESS)
-		goto aq_get_partner_advt_exit;
-
-	*advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
-	*advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
-
-aq_get_partner_advt_exit:
-	return status;
-}
-
 /**
  * i40e_aq_set_lb_modes
  * @hw: pointer to the hw struct
@@ -2246,32 +2012,6 @@ enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw,
 	return status;
 }
 
-/**
- * i40e_aq_set_phy_debug
- * @hw: pointer to the hw struct
- * @cmd_flags: debug command flags
- * @cmd_details: pointer to command details structure or NULL
- *
- * Reset the external PHY.
- **/
-enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_set_phy_debug *cmd =
-		(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_set_phy_debug);
-
-	cmd->command_flags = cmd_flags;
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
 /**
  * i40e_hw_ver_ge
  * @hw: pointer to the hw struct
@@ -2333,62 +2073,6 @@ enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
 	return status;
 }
 
-/**
- * i40e_aq_set_default_vsi
- * @hw: pointer to the hw struct
- * @seid: vsi number
- * @cmd_details: pointer to command details structure or NULL
- **/
-enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw,
-				u16 seid,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
-		(struct i40e_aqc_set_vsi_promiscuous_modes *)
-		&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					i40e_aqc_opc_set_vsi_promiscuous_modes);
-
-	cmd->promiscuous_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
-	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
-	cmd->seid = CPU_TO_LE16(seid);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
-/**
- * i40e_aq_clear_default_vsi
- * @hw: pointer to the hw struct
- * @seid: vsi number
- * @cmd_details: pointer to command details structure or NULL
- **/
-enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw,
-				u16 seid,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
-		(struct i40e_aqc_set_vsi_promiscuous_modes *)
-		&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					i40e_aqc_opc_set_vsi_promiscuous_modes);
-
-	cmd->promiscuous_flags = CPU_TO_LE16(0);
-	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
-	cmd->seid = CPU_TO_LE16(seid);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
 /**
  * i40e_aq_set_vsi_unicast_promiscuous
  * @hw: pointer to the hw struct
@@ -2463,36 +2147,34 @@ enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
 }
 
 /**
-* i40e_aq_set_vsi_full_promiscuous
-* @hw: pointer to the hw struct
-* @seid: VSI number
-* @set: set promiscuous enable/disable
-* @cmd_details: pointer to command details structure or NULL
-**/
-enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
-				u16 seid, bool set,
+ * i40e_aq_set_vsi_broadcast
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set_filter: true to set filter, false to clear filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
+ **/
+enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+				u16 seid, bool set_filter,
 				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
 	enum i40e_status_code status;
-	u16 flags = 0;
 
 	i40e_fill_default_direct_cmd_desc(&desc,
-		i40e_aqc_opc_set_vsi_promiscuous_modes);
-
-	if (set)
-		flags = I40E_AQC_SET_VSI_PROMISC_UNICAST   |
-			I40E_AQC_SET_VSI_PROMISC_MULTICAST |
-			I40E_AQC_SET_VSI_PROMISC_BROADCAST;
-
-	cmd->promiscuous_flags = CPU_TO_LE16(flags);
+					i40e_aqc_opc_set_vsi_promiscuous_modes);
 
-	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST   |
-				       I40E_AQC_SET_VSI_PROMISC_MULTICAST |
-				       I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+	if (set_filter)
+		cmd->promiscuous_flags
+			    |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+	else
+		cmd->promiscuous_flags
+			    &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
 
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
 	cmd->seid = CPU_TO_LE16(seid);
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
@@ -2500,15 +2182,14 @@ enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
 }
 
 /**
- * i40e_aq_set_vsi_mc_promisc_on_vlan
+ * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
  * @hw: pointer to the hw struct
  * @seid: vsi number
  * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
- * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
  * @cmd_details: pointer to command details structure or NULL
  **/
-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
-				u16 seid, bool enable, u16 vid,
+enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+				u16 seid, bool enable,
 				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
@@ -2519,14 +2200,12 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
 
 	i40e_fill_default_direct_cmd_desc(&desc,
 					i40e_aqc_opc_set_vsi_promiscuous_modes);
-
 	if (enable)
-		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+		flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
 
 	cmd->promiscuous_flags = CPU_TO_LE16(flags);
-	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_VLAN);
 	cmd->seid = CPU_TO_LE16(seid);
-	cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
@@ -2534,166 +2213,26 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
 }
 
 /**
- * i40e_aq_set_vsi_uc_promisc_on_vlan
+ * i40e_get_vsi_params - get VSI configuration info
  * @hw: pointer to the hw struct
- * @seid: vsi number
- * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
- * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+ * @vsi_ctx: pointer to a vsi context struct
  * @cmd_details: pointer to command details structure or NULL
  **/
-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
-				u16 seid, bool enable, u16 vid,
+enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
+				struct i40e_vsi_context *vsi_ctx,
 				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
-	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
-		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+	struct i40e_aqc_add_get_update_vsi *cmd =
+		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+	struct i40e_aqc_add_get_update_vsi_completion *resp =
+		(struct i40e_aqc_add_get_update_vsi_completion *)
+		&desc.params.raw;
 	enum i40e_status_code status;
-	u16 flags = 0;
 
+	UNREFERENCED_1PARAMETER(cmd_details);
 	i40e_fill_default_direct_cmd_desc(&desc,
-					i40e_aqc_opc_set_vsi_promiscuous_modes);
-
-	if (enable) {
-		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
-		if (i40e_hw_ver_ge(hw, 1, 5))
-			flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
-	}
-
-	cmd->promiscuous_flags = CPU_TO_LE16(flags);
-	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
-	if (i40e_hw_ver_ge(hw, 1, 5))
-		cmd->valid_flags |=
-			CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
-	cmd->seid = CPU_TO_LE16(seid);
-	cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
-/**
- * i40e_aq_set_vsi_bc_promisc_on_vlan
- * @hw: pointer to the hw struct
- * @seid: vsi number
- * @enable: set broadcast promiscuous enable/disable for a given VLAN
- * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
- * @cmd_details: pointer to command details structure or NULL
- **/
-enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
-				u16 seid, bool enable, u16 vid,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
-		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
-	enum i40e_status_code status;
-	u16 flags = 0;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					i40e_aqc_opc_set_vsi_promiscuous_modes);
-
-	if (enable)
-		flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
-
-	cmd->promiscuous_flags = CPU_TO_LE16(flags);
-	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
-	cmd->seid = CPU_TO_LE16(seid);
-	cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
-/**
- * i40e_aq_set_vsi_broadcast
- * @hw: pointer to the hw struct
- * @seid: vsi number
- * @set_filter: true to set filter, false to clear filter
- * @cmd_details: pointer to command details structure or NULL
- *
- * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
- **/
-enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
-				u16 seid, bool set_filter,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
-		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					i40e_aqc_opc_set_vsi_promiscuous_modes);
-
-	if (set_filter)
-		cmd->promiscuous_flags
-			    |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
-	else
-		cmd->promiscuous_flags
-			    &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
-
-	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
-	cmd->seid = CPU_TO_LE16(seid);
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
-/**
- * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
- * @hw: pointer to the hw struct
- * @seid: vsi number
- * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
- * @cmd_details: pointer to command details structure or NULL
- **/
-enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
-				u16 seid, bool enable,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
-		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
-	enum i40e_status_code status;
-	u16 flags = 0;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					i40e_aqc_opc_set_vsi_promiscuous_modes);
-	if (enable)
-		flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
-
-	cmd->promiscuous_flags = CPU_TO_LE16(flags);
-	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_VLAN);
-	cmd->seid = CPU_TO_LE16(seid);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
-/**
- * i40e_get_vsi_params - get VSI configuration info
- * @hw: pointer to the hw struct
- * @vsi_ctx: pointer to a vsi context struct
- * @cmd_details: pointer to command details structure or NULL
- **/
-enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
-				struct i40e_vsi_context *vsi_ctx,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_add_get_update_vsi *cmd =
-		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
-	struct i40e_aqc_add_get_update_vsi_completion *resp =
-		(struct i40e_aqc_add_get_update_vsi_completion *)
-		&desc.params.raw;
-	enum i40e_status_code status;
-
-	UNREFERENCED_1PARAMETER(cmd_details);
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_get_vsi_parameters);
+					  i40e_aqc_opc_get_vsi_parameters);
 
 	cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
 
@@ -2867,73 +2406,6 @@ enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
 	return status;
 }
 
-/**
- * i40e_aq_send_driver_version
- * @hw: pointer to the hw struct
- * @dv: driver's major, minor version
- * @cmd_details: pointer to command details structure or NULL
- *
- * Send the driver version to the firmware
- **/
-enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
-				struct i40e_driver_version *dv,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_driver_version *cmd =
-		(struct i40e_aqc_driver_version *)&desc.params.raw;
-	enum i40e_status_code status;
-	u16 len;
-
-	if (dv == NULL)
-		return I40E_ERR_PARAM;
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
-
-	desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
-	cmd->driver_major_ver = dv->major_version;
-	cmd->driver_minor_ver = dv->minor_version;
-	cmd->driver_build_ver = dv->build_version;
-	cmd->driver_subbuild_ver = dv->subbuild_version;
-
-	len = 0;
-	while (len < sizeof(dv->driver_string) &&
-	       (dv->driver_string[len] < 0x80) &&
-	       dv->driver_string[len])
-		len++;
-	status = i40e_asq_send_command(hw, &desc, dv->driver_string,
-				       len, cmd_details);
-
-	return status;
-}
-
-/**
- * i40e_get_link_status - get status of the HW network link
- * @hw: pointer to the hw struct
- * @link_up: pointer to bool (true/false = linkup/linkdown)
- *
- * Variable link_up true if link is up, false if link is down.
- * The variable link_up is invalid if returned value of status != I40E_SUCCESS
- *
- * Side effect: LinkStatusEvent reporting becomes enabled
- **/
-enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
-{
-	enum i40e_status_code status = I40E_SUCCESS;
-
-	if (hw->phy.get_link_info) {
-		status = i40e_update_link_info(hw);
-
-		if (status != I40E_SUCCESS)
-			i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
-				   status);
-	}
-
-	*link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
-
-	return status;
-}
-
 /**
  * i40e_updatelink_status - update status of the HW network link
  * @hw: pointer to the hw struct
@@ -2973,31 +2445,6 @@ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
 	return status;
 }
 
-
-/**
- * i40e_get_link_speed
- * @hw: pointer to the hw struct
- *
- * Returns the link speed of the adapter.
- **/
-enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw)
-{
-	enum i40e_aq_link_speed speed = I40E_LINK_SPEED_UNKNOWN;
-	enum i40e_status_code status = I40E_SUCCESS;
-
-	if (hw->phy.get_link_info) {
-		status = i40e_aq_get_link_info(hw, true, NULL, NULL);
-
-		if (status != I40E_SUCCESS)
-			goto i40e_link_speed_exit;
-	}
-
-	speed = hw->phy.link_info.link_speed;
-
-i40e_link_speed_exit:
-	return speed;
-}
-
 /**
  * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
  * @hw: pointer to the hw struct
@@ -3204,134 +2651,6 @@ enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
 	return status;
 }
 
-/**
- * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
- * @hw: pointer to the hw struct
- * @opcode: AQ opcode for add or delete mirror rule
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @id: Destination VSI SEID or Rule ID
- * @count: length of the list
- * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
- * @cmd_details: pointer to command details structure or NULL
- * @rule_id: Rule ID returned from FW
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
- * VEBs/VEPA elements only
- **/
-static enum i40e_status_code i40e_mirrorrule_op(struct i40e_hw *hw,
-			u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
-			u16 count, __le16 *mr_list,
-			struct i40e_asq_cmd_details *cmd_details,
-			u16 *rule_id, u16 *rules_used, u16 *rules_free)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_add_delete_mirror_rule *cmd =
-		(struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
-	struct i40e_aqc_add_delete_mirror_rule_completion *resp =
-	(struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
-	enum i40e_status_code status;
-	u16 buf_size;
-
-	buf_size = count * sizeof(*mr_list);
-
-	/* prep the rest of the request */
-	i40e_fill_default_direct_cmd_desc(&desc, opcode);
-	cmd->seid = CPU_TO_LE16(sw_seid);
-	cmd->rule_type = CPU_TO_LE16(rule_type &
-				     I40E_AQC_MIRROR_RULE_TYPE_MASK);
-	cmd->num_entries = CPU_TO_LE16(count);
-	/* Dest VSI for add, rule_id for delete */
-	cmd->destination = CPU_TO_LE16(id);
-	if (mr_list) {
-		desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
-						I40E_AQ_FLAG_RD));
-		if (buf_size > I40E_AQ_LARGE_BUF)
-			desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
-	}
-
-	status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
-				       cmd_details);
-	if (status == I40E_SUCCESS ||
-	    hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
-		if (rule_id)
-			*rule_id = LE16_TO_CPU(resp->rule_id);
-		if (rules_used)
-			*rules_used = LE16_TO_CPU(resp->mirror_rules_used);
-		if (rules_free)
-			*rules_free = LE16_TO_CPU(resp->mirror_rules_free);
-	}
-	return status;
-}
-
-/**
- * i40e_aq_add_mirrorrule - add a mirror rule
- * @hw: pointer to the hw struct
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @dest_vsi: SEID of VSI to which packets will be mirrored
- * @count: length of the list
- * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
- * @cmd_details: pointer to command details structure or NULL
- * @rule_id: Rule ID returned from FW
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
- **/
-enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
-			u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
-			struct i40e_asq_cmd_details *cmd_details,
-			u16 *rule_id, u16 *rules_used, u16 *rules_free)
-{
-	if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
-	    rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
-		if (count == 0 || !mr_list)
-			return I40E_ERR_PARAM;
-	}
-
-	return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
-				  rule_type, dest_vsi, count, mr_list,
-				  cmd_details, rule_id, rules_used, rules_free);
-}
-
-/**
- * i40e_aq_delete_mirrorrule - delete a mirror rule
- * @hw: pointer to the hw struct
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @count: length of the list
- * @rule_id: Rule ID that is returned in the receive desc as part of
- *		add_mirrorrule.
- * @mr_list: list of mirrored VLAN IDs to be removed
- * @cmd_details: pointer to command details structure or NULL
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
- **/
-enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
-			u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
-			struct i40e_asq_cmd_details *cmd_details,
-			u16 *rules_used, u16 *rules_free)
-{
-	/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
-	if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
-		/* count and mr_list shall be valid for rule_type INGRESS VLAN
-		 * mirroring. For other rule_type, count and rule_type should
-		 * not matter.
-		 */
-		if (count == 0 || !mr_list)
-			return I40E_ERR_PARAM;
-	}
-
-	return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
-				  rule_type, rule_id, count, mr_list,
-				  cmd_details, NULL, rules_used, rules_free);
-}
-
 /**
  * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
  * @hw: pointer to the hw struct
@@ -3638,196 +2957,41 @@ enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
 }
 
 /**
- * i40e_aq_read_nvm_config - read an nvm config block
+ * i40e_aq_erase_nvm
  * @hw: pointer to the hw struct
- * @cmd_flags: NVM access admin command bits
- * @field_id: field or feature id
- * @data: buffer for result
- * @buf_size: buffer size
- * @element_count: pointer to count of elements read by FW
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in the module (expressed in 4 KB from module's beginning)
+ * @length: length of the section to be erased (expressed in 4 KB)
+ * @last_command: tells if this is the last command in a series
  * @cmd_details: pointer to command details structure or NULL
+ *
+ * Erase the NVM sector using the admin queue commands
  **/
-enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw,
-				u8 cmd_flags, u32 field_id, void *data,
-				u16 buf_size, u16 *element_count,
+enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+				u32 offset, u16 length, bool last_command,
 				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
-	struct i40e_aqc_nvm_config_read *cmd =
-		(struct i40e_aqc_nvm_config_read *)&desc.params.raw;
+	struct i40e_aqc_nvm_update *cmd =
+		(struct i40e_aqc_nvm_update *)&desc.params.raw;
 	enum i40e_status_code status;
 
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_read);
-	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF));
-	if (buf_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+	DEBUGFUNC("i40e_aq_erase_nvm");
 
-	cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
-	cmd->element_id = CPU_TO_LE16((u16)(0xffff & field_id));
-	if (cmd_flags & I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK)
-		cmd->element_id_msw = CPU_TO_LE16((u16)(field_id >> 16));
-	else
-		cmd->element_id_msw = 0;
+	/* In offset the highest byte must be zeroed. */
+	if (offset & 0xFF000000) {
+		status = I40E_ERR_PARAM;
+		goto i40e_aq_erase_nvm_exit;
+	}
 
-	status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
 
-	if (!status && element_count)
-		*element_count = LE16_TO_CPU(cmd->element_count);
-
-	return status;
-}
-
-/**
- * i40e_aq_write_nvm_config - write an nvm config block
- * @hw: pointer to the hw struct
- * @cmd_flags: NVM access admin command bits
- * @data: buffer for result
- * @buf_size: buffer size
- * @element_count: count of elements to be written
- * @cmd_details: pointer to command details structure or NULL
- **/
-enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
-				u8 cmd_flags, void *data, u16 buf_size,
-				u16 element_count,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_nvm_config_write *cmd =
-		(struct i40e_aqc_nvm_config_write *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_write);
-	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
-	if (buf_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
-
-	cmd->element_count = CPU_TO_LE16(element_count);
-	cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
-	status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
-
-	return status;
-}
-
-/**
- * i40e_aq_nvm_update_in_process
- * @hw: pointer to the hw struct
- * @update_flow_state: True indicates that update flow starts, false that ends
- * @cmd_details: pointer to command details structure or NULL
- *
- * Indicate NVM update in process.
- **/
-enum i40e_status_code
-i40e_aq_nvm_update_in_process(struct i40e_hw *hw,
-			      bool update_flow_state,
-			      struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_nvm_update_in_process *cmd =
-		(struct i40e_aqc_nvm_update_in_process *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_nvm_update_in_process);
-
-	cmd->command = I40E_AQ_UPDATE_FLOW_END;
-
-	if (update_flow_state)
-		cmd->command |= I40E_AQ_UPDATE_FLOW_START;
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
-/**
- * i40e_aq_min_rollback_rev_update - triggers an ow after update
- * @hw: pointer to the hw struct
- * @mode: opt-in mode, 1b for single module update, 0b for bulk update
- * @module: module to be updated. Ignored if mode is 0b
- * @min_rrev: value of the new minimal version. Ignored if mode is 0b
- * @cmd_details: pointer to command details structure or NULL
- **/
-enum i40e_status_code
-i40e_aq_min_rollback_rev_update(struct i40e_hw *hw, u8 mode, u8 module,
-				u32 min_rrev,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_rollback_revision_update *cmd =
-		(struct i40e_aqc_rollback_revision_update *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-		i40e_aqc_opc_rollback_revision_update);
-	cmd->optin_mode = mode;
-	cmd->module_selected = module;
-	cmd->min_rrev = min_rrev;
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
-/**
- * i40e_aq_oem_post_update - triggers an OEM specific flow after update
- * @hw: pointer to the hw struct
- * @buff: buffer for result
- * @buff_size: buffer size
- * @cmd_details: pointer to command details structure or NULL
- **/
-enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
-				void *buff, u16 buff_size,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	enum i40e_status_code status;
-
-	UNREFERENCED_2PARAMETER(buff, buff_size);
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update);
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-	if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH)
-		status = I40E_ERR_NOT_IMPLEMENTED;
-
-	return status;
-}
-
-/**
- * i40e_aq_erase_nvm
- * @hw: pointer to the hw struct
- * @module_pointer: module pointer location in words from the NVM beginning
- * @offset: offset in the module (expressed in 4 KB from module's beginning)
- * @length: length of the section to be erased (expressed in 4 KB)
- * @last_command: tells if this is the last command in a series
- * @cmd_details: pointer to command details structure or NULL
- *
- * Erase the NVM sector using the admin queue commands
- **/
-enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
-				u32 offset, u16 length, bool last_command,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_nvm_update *cmd =
-		(struct i40e_aqc_nvm_update *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	DEBUGFUNC("i40e_aq_erase_nvm");
-
-	/* In offset the highest byte must be zeroed. */
-	if (offset & 0xFF000000) {
-		status = I40E_ERR_PARAM;
-		goto i40e_aq_erase_nvm_exit;
-	}
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
-
-	/* If this is the last command in a series, set the proper flag. */
-	if (last_command)
-		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
-	cmd->module_pointer = module_pointer;
-	cmd->offset = CPU_TO_LE32(offset);
-	cmd->length = CPU_TO_LE16(length);
+	/* If this is the last command in a series, set the proper flag. */
+	if (last_command)
+		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+	cmd->module_pointer = module_pointer;
+	cmd->offset = CPU_TO_LE32(offset);
+	cmd->length = CPU_TO_LE16(length);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
@@ -4302,43 +3466,6 @@ enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
 	return status;
 }
 
-/**
- * i40e_aq_rearrange_nvm
- * @hw: pointer to the hw struct
- * @rearrange_nvm: defines direction of rearrangement
- * @cmd_details: pointer to command details structure or NULL
- *
- * Rearrange NVM structure, available only for transition FW
- **/
-enum i40e_status_code i40e_aq_rearrange_nvm(struct i40e_hw *hw,
-				u8 rearrange_nvm,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aqc_nvm_update *cmd;
-	enum i40e_status_code status;
-	struct i40e_aq_desc desc;
-
-	DEBUGFUNC("i40e_aq_rearrange_nvm");
-
-	cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
-
-	rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
-			 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
-
-	if (!rearrange_nvm) {
-		status = I40E_ERR_PARAM;
-		goto i40e_aq_rearrange_nvm_exit;
-	}
-
-	cmd->command_flags |= rearrange_nvm;
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-i40e_aq_rearrange_nvm_exit:
-	return status;
-}
-
 /**
  * i40e_aq_get_lldp_mib
  * @hw: pointer to the hw struct
@@ -4459,44 +3586,6 @@ enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
 	return status;
 }
 
-/**
- * i40e_aq_restore_lldp
- * @hw: pointer to the hw struct
- * @setting: pointer to factory setting variable or NULL
- * @restore: True if factory settings should be restored
- * @cmd_details: pointer to command details structure or NULL
- *
- * Restore LLDP Agent factory settings if @restore set to True. In other case
- * only returns factory setting in AQ response.
- **/
-enum i40e_status_code
-i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
-		     struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_lldp_restore *cmd =
-		(struct i40e_aqc_lldp_restore *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
-		i40e_debug(hw, I40E_DEBUG_ALL,
-			   "Restore LLDP not supported by current FW version.\n");
-		return I40E_ERR_DEVICE_NOT_SUPPORTED;
-	}
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
-
-	if (restore)
-		cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	if (setting)
-		*setting = cmd->command & 1;
-
-	return status;
-}
-
 /**
  * i40e_aq_stop_lldp
  * @hw: pointer to the hw struct
@@ -4567,37 +3656,6 @@ enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
 	return status;
 }
 
-/**
- * i40e_aq_set_dcb_parameters
- * @hw: pointer to the hw struct
- * @cmd_details: pointer to command details structure or NULL
- * @dcb_enable: True if DCB configuration needs to be applied
- *
- **/
-enum i40e_status_code
-i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
-			   struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_set_dcb_parameters *cmd =
-		(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
-		return I40E_ERR_DEVICE_NOT_SUPPORTED;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_set_dcb_parameters);
-
-	if (dcb_enable) {
-		cmd->valid_flags = I40E_DCB_VALID;
-		cmd->command = I40E_AQ_DCB_SET_AGENT;
-	}
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
 /**
  * i40e_aq_get_cee_dcb_config
  * @hw: pointer to the hw struct
@@ -4626,36 +3684,6 @@ enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
 	return status;
 }
 
-/**
- * i40e_aq_start_stop_dcbx - Start/Stop DCBx service in FW
- * @hw: pointer to the hw struct
- * @start_agent: True if DCBx Agent needs to be Started
- *				False if DCBx Agent needs to be Stopped
- * @cmd_details: pointer to command details structure or NULL
- *
- * Start/Stop the embedded dcbx Agent
- **/
-enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
-				bool start_agent,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_lldp_stop_start_specific_agent *cmd =
-		(struct i40e_aqc_lldp_stop_start_specific_agent *)
-				&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-				i40e_aqc_opc_lldp_stop_start_spec_agent);
-
-	if (start_agent)
-		cmd->command = I40E_AQC_START_SPECIFIC_AGENT_MASK;
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
 /**
  * i40e_aq_add_udp_tunnel
  * @hw: pointer to the hw struct
@@ -4716,45 +3744,6 @@ enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
 	return status;
 }
 
-/**
- * i40e_aq_get_switch_resource_alloc (0x0204)
- * @hw: pointer to the hw struct
- * @num_entries: pointer to u8 to store the number of resource entries returned
- * @buf: pointer to a user supplied buffer.  This buffer must be large enough
- *        to store the resource information for all resource types.  Each
- *        resource type is a i40e_aqc_switch_resource_alloc_data structure.
- * @count: size, in bytes, of the buffer provided
- * @cmd_details: pointer to command details structure or NULL
- *
- * Query the resources allocated to a function.
- **/
-enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
-			u8 *num_entries,
-			struct i40e_aqc_switch_resource_alloc_element_resp *buf,
-			u16 count,
-			struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_get_switch_resource_alloc *cmd_resp =
-		(struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw;
-	enum i40e_status_code status;
-	u16 length = count * sizeof(*buf);
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					i40e_aqc_opc_get_switch_resource_alloc);
-
-	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
-	if (length > I40E_AQ_LARGE_BUF)
-		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
-
-	status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
-
-	if (!status && num_entries)
-		*num_entries = cmd_resp->num_entries;
-
-	return status;
-}
-
 /**
  * i40e_aq_delete_element - Delete switch element
  * @hw: pointer to the hw struct
@@ -4784,178 +3773,45 @@ enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
 }
 
 /**
- * i40e_aq_add_pvirt - Instantiate a Port Virtualizer on a port
- * @hw: pointer to the hw struct
- * @flags: component flags
- * @mac_seid: uplink seid (MAC SEID)
- * @vsi_seid: connected vsi seid
- * @ret_seid: seid of create pv component
- *
- * This instantiates an i40e port virtualizer with specified flags.
- * Depending on specified flags the port virtualizer can act as a
- * 802.1Qbr port virtualizer or a 802.1Qbg S-component.
- */
-enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
-				       u16 mac_seid, u16 vsi_seid,
-				       u16 *ret_seid)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_add_update_pv *cmd =
-		(struct i40e_aqc_add_update_pv *)&desc.params.raw;
-	struct i40e_aqc_add_update_pv_completion *resp =
-		(struct i40e_aqc_add_update_pv_completion *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	if (vsi_seid == 0)
-		return I40E_ERR_PARAM;
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_pv);
-	cmd->command_flags = CPU_TO_LE16(flags);
-	cmd->uplink_seid = CPU_TO_LE16(mac_seid);
-	cmd->connected_seid = CPU_TO_LE16(vsi_seid);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
-	if (!status && ret_seid)
-		*ret_seid = LE16_TO_CPU(resp->pv_seid);
-
-	return status;
-}
-
-/**
- * i40e_aq_add_tag - Add an S/E-tag
+ * i40e_aq_add_mcast_etag - Add a multicast E-tag
  * @hw: pointer to the hw struct
- * @direct_to_queue: should s-tag direct flow to a specific queue
- * @vsi_seid: VSI SEID to use this tag
- * @tag: value of the tag
- * @queue_num: queue number, only valid is direct_to_queue is true
- * @tags_used: return value, number of tags in use by this PF
- * @tags_free: return value, number of unallocated tags
+ * @pv_seid: Port Virtualizer of this SEID to associate E-tag with
+ * @etag: value of E-tag to add
+ * @num_tags_in_buf: number of unicast E-tags in indirect buffer
+ * @buf: address of indirect buffer
+ * @tags_used: return value, number of E-tags in use by this port
+ * @tags_free: return value, number of unallocated M-tags
  * @cmd_details: pointer to command details structure or NULL
  *
- * This associates an S- or E-tag to a VSI in the switch complex.  It returns
+ * This associates a multicast E-tag to a port virtualizer.  It will return
  * the number of tags allocated by the PF, and the number of unallocated
  * tags available.
+ *
+ * The indirect buffer pointed to by buf is a list of 2-byte E-tags,
+ * num_tags_in_buf long.
  **/
-enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
-				u16 vsi_seid, u16 tag, u16 queue_num,
+enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
+				u16 etag, u8 num_tags_in_buf, void *buf,
 				u16 *tags_used, u16 *tags_free,
 				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
-	struct i40e_aqc_add_tag *cmd =
-		(struct i40e_aqc_add_tag *)&desc.params.raw;
-	struct i40e_aqc_add_remove_tag_completion *resp =
-		(struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+	struct i40e_aqc_add_remove_mcast_etag *cmd =
+		(struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
+	struct i40e_aqc_add_remove_mcast_etag_completion *resp =
+	   (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
 	enum i40e_status_code status;
+	u16 length = sizeof(u16) * num_tags_in_buf;
 
-	if (vsi_seid == 0)
+	if ((pv_seid == 0) || (buf == NULL) || (num_tags_in_buf == 0))
 		return I40E_ERR_PARAM;
 
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_tag);
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_add_multicast_etag);
 
-	cmd->seid = CPU_TO_LE16(vsi_seid);
-	cmd->tag = CPU_TO_LE16(tag);
-	if (direct_to_queue) {
-		cmd->flags = CPU_TO_LE16(I40E_AQC_ADD_TAG_FLAG_TO_QUEUE);
-		cmd->queue_number = CPU_TO_LE16(queue_num);
-	}
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	if (!status) {
-		if (tags_used != NULL)
-			*tags_used = LE16_TO_CPU(resp->tags_used);
-		if (tags_free != NULL)
-			*tags_free = LE16_TO_CPU(resp->tags_free);
-	}
-
-	return status;
-}
-
-/**
- * i40e_aq_remove_tag - Remove an S- or E-tag
- * @hw: pointer to the hw struct
- * @vsi_seid: VSI SEID this tag is associated with
- * @tag: value of the S-tag to delete
- * @tags_used: return value, number of tags in use by this PF
- * @tags_free: return value, number of unallocated tags
- * @cmd_details: pointer to command details structure or NULL
- *
- * This deletes an S- or E-tag from a VSI in the switch complex.  It returns
- * the number of tags allocated by the PF, and the number of unallocated
- * tags available.
- **/
-enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
-				u16 tag, u16 *tags_used, u16 *tags_free,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_remove_tag *cmd =
-		(struct i40e_aqc_remove_tag *)&desc.params.raw;
-	struct i40e_aqc_add_remove_tag_completion *resp =
-		(struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	if (vsi_seid == 0)
-		return I40E_ERR_PARAM;
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_tag);
-
-	cmd->seid = CPU_TO_LE16(vsi_seid);
-	cmd->tag = CPU_TO_LE16(tag);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	if (!status) {
-		if (tags_used != NULL)
-			*tags_used = LE16_TO_CPU(resp->tags_used);
-		if (tags_free != NULL)
-			*tags_free = LE16_TO_CPU(resp->tags_free);
-	}
-
-	return status;
-}
-
-/**
- * i40e_aq_add_mcast_etag - Add a multicast E-tag
- * @hw: pointer to the hw struct
- * @pv_seid: Port Virtualizer of this SEID to associate E-tag with
- * @etag: value of E-tag to add
- * @num_tags_in_buf: number of unicast E-tags in indirect buffer
- * @buf: address of indirect buffer
- * @tags_used: return value, number of E-tags in use by this port
- * @tags_free: return value, number of unallocated M-tags
- * @cmd_details: pointer to command details structure or NULL
- *
- * This associates a multicast E-tag to a port virtualizer.  It will return
- * the number of tags allocated by the PF, and the number of unallocated
- * tags available.
- *
- * The indirect buffer pointed to by buf is a list of 2-byte E-tags,
- * num_tags_in_buf long.
- **/
-enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
-				u16 etag, u8 num_tags_in_buf, void *buf,
-				u16 *tags_used, u16 *tags_free,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_add_remove_mcast_etag *cmd =
-		(struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
-	struct i40e_aqc_add_remove_mcast_etag_completion *resp =
-	   (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
-	enum i40e_status_code status;
-	u16 length = sizeof(u16) * num_tags_in_buf;
-
-	if ((pv_seid == 0) || (buf == NULL) || (num_tags_in_buf == 0))
-		return I40E_ERR_PARAM;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_add_multicast_etag);
-
-	cmd->pv_seid = CPU_TO_LE16(pv_seid);
-	cmd->etag = CPU_TO_LE16(etag);
-	cmd->num_unicast_etags = num_tags_in_buf;
+	cmd->pv_seid = CPU_TO_LE16(pv_seid);
+	cmd->etag = CPU_TO_LE16(etag);
+	cmd->num_unicast_etags = num_tags_in_buf;
 
 	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
 
@@ -4971,239 +3827,6 @@ enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
 	return status;
 }
 
-/**
- * i40e_aq_remove_mcast_etag - Remove a multicast E-tag
- * @hw: pointer to the hw struct
- * @pv_seid: Port Virtualizer SEID this M-tag is associated with
- * @etag: value of the E-tag to remove
- * @tags_used: return value, number of tags in use by this port
- * @tags_free: return value, number of unallocated tags
- * @cmd_details: pointer to command details structure or NULL
- *
- * This deletes an E-tag from the port virtualizer.  It will return
- * the number of tags allocated by the port, and the number of unallocated
- * tags available.
- **/
-enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
-				u16 etag, u16 *tags_used, u16 *tags_free,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_add_remove_mcast_etag *cmd =
-		(struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
-	struct i40e_aqc_add_remove_mcast_etag_completion *resp =
-	   (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
-	enum i40e_status_code status;
-
-
-	if (pv_seid == 0)
-		return I40E_ERR_PARAM;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_remove_multicast_etag);
-
-	cmd->pv_seid = CPU_TO_LE16(pv_seid);
-	cmd->etag = CPU_TO_LE16(etag);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	if (!status) {
-		if (tags_used != NULL)
-			*tags_used = LE16_TO_CPU(resp->mcast_etags_used);
-		if (tags_free != NULL)
-			*tags_free = LE16_TO_CPU(resp->mcast_etags_free);
-	}
-
-	return status;
-}
-
-/**
- * i40e_aq_update_tag - Update an S/E-tag
- * @hw: pointer to the hw struct
- * @vsi_seid: VSI SEID using this S-tag
- * @old_tag: old tag value
- * @new_tag: new tag value
- * @tags_used: return value, number of tags in use by this PF
- * @tags_free: return value, number of unallocated tags
- * @cmd_details: pointer to command details structure or NULL
- *
- * This updates the value of the tag currently attached to this VSI
- * in the switch complex.  It will return the number of tags allocated
- * by the PF, and the number of unallocated tags available.
- **/
-enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
-				u16 old_tag, u16 new_tag, u16 *tags_used,
-				u16 *tags_free,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_update_tag *cmd =
-		(struct i40e_aqc_update_tag *)&desc.params.raw;
-	struct i40e_aqc_update_tag_completion *resp =
-		(struct i40e_aqc_update_tag_completion *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	if (vsi_seid == 0)
-		return I40E_ERR_PARAM;
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_tag);
-
-	cmd->seid = CPU_TO_LE16(vsi_seid);
-	cmd->old_tag = CPU_TO_LE16(old_tag);
-	cmd->new_tag = CPU_TO_LE16(new_tag);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	if (!status) {
-		if (tags_used != NULL)
-			*tags_used = LE16_TO_CPU(resp->tags_used);
-		if (tags_free != NULL)
-			*tags_free = LE16_TO_CPU(resp->tags_free);
-	}
-
-	return status;
-}
-
-/**
- * i40e_aq_dcb_ignore_pfc - Ignore PFC for given TCs
- * @hw: pointer to the hw struct
- * @tcmap: TC map for request/release any ignore PFC condition
- * @request: request or release ignore PFC condition
- * @tcmap_ret: return TCs for which PFC is currently ignored
- * @cmd_details: pointer to command details structure or NULL
- *
- * This sends out request/release to ignore PFC condition for a TC.
- * It will return the TCs for which PFC is currently ignored.
- **/
-enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap,
-				bool request, u8 *tcmap_ret,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_pfc_ignore *cmd_resp =
-		(struct i40e_aqc_pfc_ignore *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_ignore_pfc);
-
-	if (request)
-		cmd_resp->command_flags = I40E_AQC_PFC_IGNORE_SET;
-
-	cmd_resp->tc_bitmap = tcmap;
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	if (!status) {
-		if (tcmap_ret != NULL)
-			*tcmap_ret = cmd_resp->tc_bitmap;
-	}
-
-	return status;
-}
-
-/**
- * i40e_aq_dcb_updated - DCB Updated Command
- * @hw: pointer to the hw struct
- * @cmd_details: pointer to command details structure or NULL
- *
- * When LLDP is handled in PF this command is used by the PF
- * to notify EMP that a DCB setting is modified.
- * When LLDP is handled in EMP this command is used by the PF
- * to notify EMP whenever one of the following parameters get
- * modified:
- *   - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA
- *   - PCIRTT in PRTDCB_GENC.PCIRTT
- *   - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME.
- * EMP will return when the shared RPB settings have been
- * recomputed and modified. The retval field in the descriptor
- * will be set to 0 when RPB is modified.
- **/
-enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
-/**
- * i40e_aq_add_statistics - Add a statistics block to a VLAN in a switch.
- * @hw: pointer to the hw struct
- * @seid: defines the SEID of the switch for which the stats are requested
- * @vlan_id: the VLAN ID for which the statistics are requested
- * @stat_index: index of the statistics counters block assigned to this VLAN
- * @cmd_details: pointer to command details structure or NULL
- *
- * XL710 supports 128 smonVlanStats counters.This command is used to
- * allocate a set of smonVlanStats counters to a specific VLAN in a specific
- * switch.
- **/
-enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
-				u16 vlan_id, u16 *stat_index,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_add_remove_statistics *cmd_resp =
-		(struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	if ((seid == 0) || (stat_index == NULL))
-		return I40E_ERR_PARAM;
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_statistics);
-
-	cmd_resp->seid = CPU_TO_LE16(seid);
-	cmd_resp->vlan = CPU_TO_LE16(vlan_id);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	if (!status && stat_index)
-		*stat_index = LE16_TO_CPU(cmd_resp->stat_index);
-
-	return status;
-}
-
-/**
- * i40e_aq_remove_statistics - Remove a statistics block to a VLAN in a switch.
- * @hw: pointer to the hw struct
- * @seid: defines the SEID of the switch for which the stats are requested
- * @vlan_id: the VLAN ID for which the statistics are requested
- * @stat_index: index of the statistics counters block assigned to this VLAN
- * @cmd_details: pointer to command details structure or NULL
- *
- * XL710 supports 128 smonVlanStats counters.This command is used to
- * deallocate a set of smonVlanStats counters to a specific VLAN in a specific
- * switch.
- **/
-enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
-				u16 vlan_id, u16 stat_index,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_add_remove_statistics *cmd =
-		(struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	if (seid == 0)
-		return I40E_ERR_PARAM;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_remove_statistics);
-
-	cmd->seid = CPU_TO_LE16(seid);
-	cmd->vlan  = CPU_TO_LE16(vlan_id);
-	cmd->stat_index = CPU_TO_LE16(stat_index);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
 /**
  * i40e_aq_set_port_parameters - set physical port parameters.
  * @hw: pointer to the hw struct
@@ -5332,35 +3955,6 @@ enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
 	return status;
 }
 
-/**
- * i40e_aq_config_switch_comp_bw_limit - Configure Switching component BW Limit
- * @hw: pointer to the hw struct
- * @seid: switching component seid
- * @credit: BW limit credits (0 = disabled)
- * @max_bw: Max BW limit credits
- * @cmd_details: pointer to command details structure or NULL
- **/
-enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
-				u16 seid, u16 credit, u8 max_bw,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_configure_switching_comp_bw_limit *cmd =
-	  (struct i40e_aqc_configure_switching_comp_bw_limit *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-				i40e_aqc_opc_configure_switching_comp_bw_limit);
-
-	cmd->seid = CPU_TO_LE16(seid);
-	cmd->credit = CPU_TO_LE16(credit);
-	cmd->max_bw = max_bw;
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
 /**
  * i40e_aq_config_vsi_ets_sla_bw_limit - Config VSI BW Limit per TC
  * @hw: pointer to the hw struct
@@ -5430,23 +4024,6 @@ enum i40e_status_code i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
 			    cmd_details);
 }
 
-/**
- * i40e_aq_config_switch_comp_ets_bw_limit - Config Switch comp BW Limit per TC
- * @hw: pointer to the hw struct
- * @seid: seid of the switching component
- * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
- * @cmd_details: pointer to command details structure or NULL
- **/
-enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
-	struct i40e_hw *hw, u16 seid,
-	struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
-	struct i40e_asq_cmd_details *cmd_details)
-{
-	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
-			    i40e_aqc_opc_configure_switching_comp_ets_bw_limit,
-			    cmd_details);
-}
-
 /**
  * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
  * @hw: pointer to the hw struct
@@ -5499,27 +4076,10 @@ enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
 }
 
 /**
- * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
+ * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
  * @hw: pointer to the hw struct
- * @seid: seid of the VSI or switching component connected to Physical Port
- * @bw_data: Buffer to hold current ETS configuration for the Physical Port
- * @cmd_details: pointer to command details structure or NULL
- **/
-enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw,
-			u16 seid,
-			struct i40e_aqc_query_port_ets_config_resp *bw_data,
-			struct i40e_asq_cmd_details *cmd_details)
-{
-	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
-				    i40e_aqc_opc_query_port_ets_config,
-				    cmd_details);
-}
-
-/**
- * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
- * @hw: pointer to the hw struct
- * @seid: seid of the switching component
- * @bw_data: Buffer to hold switching component's BW configuration
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's BW configuration
  * @cmd_details: pointer to command details structure or NULL
  **/
 enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
@@ -5758,28 +4318,6 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
 	return status;
 }
 
-/**
- * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
- * @hw: pointer to the hw struct
- * @seid: VSI seid to add ethertype filter from
- **/
-void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
-						    u16 seid)
-{
-#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
-	u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
-		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
-		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
-	u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
-	enum i40e_status_code status;
-
-	status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
-						       seid, 0, true, NULL,
-						       NULL);
-	if (status)
-		DEBUGOUT("Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
-}
-
 /**
  * i40e_fix_up_geneve_vni - adjust Geneve VNI for HW issue
  * @filters: list of cloud filters
@@ -5900,649 +4438,195 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
 		}
 	}
 
-	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
-
-	return status;
-}
-
-/**
- * i40e_aq_rem_cloud_filters
- * @hw: pointer to the hardware structure
- * @seid: VSI seid to remove cloud filters from
- * @filters: Buffer which contains the filters to be removed
- * @filter_count: number of filters contained in the buffer
- *
- * Remove the cloud filters for a given VSI.  The contents of the
- * i40e_aqc_cloud_filters_element_data are filled in by the caller
- * of the function.
- *
- **/
-enum i40e_status_code
-i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
-			  struct i40e_aqc_cloud_filters_element_data *filters,
-			  u8 filter_count)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_add_remove_cloud_filters *cmd =
-	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
-	enum i40e_status_code status;
-	u16 buff_len;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_remove_cloud_filters);
-
-	buff_len = filter_count * sizeof(*filters);
-	desc.datalen = CPU_TO_LE16(buff_len);
-	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
-	cmd->num_filters = filter_count;
-	cmd->seid = CPU_TO_LE16(seid);
-
-	i40e_fix_up_geneve_vni(filters, filter_count);
-
-	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
-
-	return status;
-}
-
-/**
- * i40e_aq_rem_cloud_filters_bb
- * @hw: pointer to the hardware structure
- * @seid: VSI seid to remove cloud filters from
- * @filters: Buffer which contains the filters in big buffer to be removed
- * @filter_count: number of filters contained in the buffer
- *
- * Remove the big buffer cloud filters for a given VSI.  The contents of the
- * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
- * function.
- *
- **/
-enum i40e_status_code
-i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
-			     struct i40e_aqc_cloud_filters_element_bb *filters,
-			     u8 filter_count)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_add_remove_cloud_filters *cmd =
-	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
-	enum i40e_status_code status;
-	u16 buff_len;
-	int i;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_remove_cloud_filters);
-
-	buff_len = filter_count * sizeof(*filters);
-	desc.datalen = CPU_TO_LE16(buff_len);
-	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
-	cmd->num_filters = filter_count;
-	cmd->seid = CPU_TO_LE16(seid);
-	cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
-
-	for (i = 0; i < filter_count; i++) {
-		u16 tnl_type;
-		u32 ti;
-
-		tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
-			   I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
-			   I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
-
-		/* Due to hardware eccentricities, the VNI for Geneve is shifted
-		 * one more byte further than normally used for Tenant ID in
-		 * other tunnel types.
-		 */
-		if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
-			ti = LE32_TO_CPU(filters[i].element.tenant_id);
-			filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
-		}
-	}
-
-	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
-
-	return status;
-}
-
-/**
- * i40e_aq_replace_cloud_filters - Replace cloud filter command
- * @hw: pointer to the hw struct
- * @filters: pointer to the i40e_aqc_replace_cloud_filter_cmd struct
- * @cmd_buf: pointer to the i40e_aqc_replace_cloud_filter_cmd_buf struct
- *
- **/
-enum
-i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
-	struct i40e_aqc_replace_cloud_filters_cmd *filters,
-	struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_replace_cloud_filters_cmd *cmd =
-		(struct i40e_aqc_replace_cloud_filters_cmd *)&desc.params.raw;
-	enum i40e_status_code status = I40E_SUCCESS;
-	int i = 0;
-
-	/* X722 doesn't support this command */
-	if (hw->mac.type == I40E_MAC_X722)
-		return I40E_ERR_DEVICE_NOT_SUPPORTED;
-
-	/* need FW version greater than 6.00 */
-	if (hw->aq.fw_maj_ver < 6)
-		return I40E_NOT_SUPPORTED;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_replace_cloud_filters);
-
-	desc.datalen = CPU_TO_LE16(32);
-	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
-	cmd->old_filter_type = filters->old_filter_type;
-	cmd->new_filter_type = filters->new_filter_type;
-	cmd->valid_flags = filters->valid_flags;
-	cmd->tr_bit = filters->tr_bit;
-	cmd->tr_bit2 = filters->tr_bit2;
-
-	status = i40e_asq_send_command(hw, &desc, cmd_buf,
-		sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf),  NULL);
-
-	/* for get cloud filters command */
-	for (i = 0; i < 32; i += 4) {
-		cmd_buf->filters[i / 4].filter_type = cmd_buf->data[i];
-		cmd_buf->filters[i / 4].input[0] = cmd_buf->data[i + 1];
-		cmd_buf->filters[i / 4].input[1] = cmd_buf->data[i + 2];
-		cmd_buf->filters[i / 4].input[2] = cmd_buf->data[i + 3];
-	}
-
-	return status;
-}
-
-
-/**
- * i40e_aq_alternate_write
- * @hw: pointer to the hardware structure
- * @reg_addr0: address of first dword to be read
- * @reg_val0: value to be written under 'reg_addr0'
- * @reg_addr1: address of second dword to be read
- * @reg_val1: value to be written under 'reg_addr1'
- *
- * Write one or two dwords to alternate structure. Fields are indicated
- * by 'reg_addr0' and 'reg_addr1' register numbers.
- *
- **/
-enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
-				u32 reg_addr0, u32 reg_val0,
-				u32 reg_addr1, u32 reg_val1)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_alternate_write *cmd_resp =
-		(struct i40e_aqc_alternate_write *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write);
-	cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
-	cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
-	cmd_resp->data0 = CPU_TO_LE32(reg_val0);
-	cmd_resp->data1 = CPU_TO_LE32(reg_val1);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
-
-	return status;
-}
-
-/**
- * i40e_aq_alternate_write_indirect
- * @hw: pointer to the hardware structure
- * @addr: address of a first register to be modified
- * @dw_count: number of alternate structure fields to write
- * @buffer: pointer to the command buffer
- *
- * Write 'dw_count' dwords from 'buffer' to alternate structure
- * starting at 'addr'.
- *
- **/
-enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
-				u32 addr, u32 dw_count, void *buffer)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_alternate_ind_write *cmd_resp =
-		(struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	if (buffer == NULL)
-		return I40E_ERR_PARAM;
-
-	/* Indirect command */
-	i40e_fill_default_direct_cmd_desc(&desc,
-					 i40e_aqc_opc_alternate_write_indirect);
-
-	desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
-	desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
-	if (dw_count > (I40E_AQ_LARGE_BUF/4))
-		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
-
-	cmd_resp->address = CPU_TO_LE32(addr);
-	cmd_resp->length = CPU_TO_LE32(dw_count);
-
-	status = i40e_asq_send_command(hw, &desc, buffer,
-				       I40E_LO_DWORD(4*dw_count), NULL);
-
-	return status;
-}
-
-/**
- * i40e_aq_alternate_read
- * @hw: pointer to the hardware structure
- * @reg_addr0: address of first dword to be read
- * @reg_val0: pointer for data read from 'reg_addr0'
- * @reg_addr1: address of second dword to be read
- * @reg_val1: pointer for data read from 'reg_addr1'
- *
- * Read one or two dwords from alternate structure. Fields are indicated
- * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
- * is not passed then only register at 'reg_addr0' is read.
- *
- **/
-enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
-				u32 reg_addr0, u32 *reg_val0,
-				u32 reg_addr1, u32 *reg_val1)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_alternate_write *cmd_resp =
-		(struct i40e_aqc_alternate_write *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	if (reg_val0 == NULL)
-		return I40E_ERR_PARAM;
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
-	cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
-	cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
-
-	if (status == I40E_SUCCESS) {
-		*reg_val0 = LE32_TO_CPU(cmd_resp->data0);
-
-		if (reg_val1 != NULL)
-			*reg_val1 = LE32_TO_CPU(cmd_resp->data1);
-	}
-
-	return status;
-}
-
-/**
- * i40e_aq_alternate_read_indirect
- * @hw: pointer to the hardware structure
- * @addr: address of the alternate structure field
- * @dw_count: number of alternate structure fields to read
- * @buffer: pointer to the command buffer
- *
- * Read 'dw_count' dwords from alternate structure starting at 'addr' and
- * place them in 'buffer'. The buffer should be allocated by caller.
- *
- **/
-enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
-				u32 addr, u32 dw_count, void *buffer)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_alternate_ind_write *cmd_resp =
-		(struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	if (buffer == NULL)
-		return I40E_ERR_PARAM;
-
-	/* Indirect command */
-	i40e_fill_default_direct_cmd_desc(&desc,
-		i40e_aqc_opc_alternate_read_indirect);
-
-	desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
-	desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
-	if (dw_count > (I40E_AQ_LARGE_BUF/4))
-		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
-
-	cmd_resp->address = CPU_TO_LE32(addr);
-	cmd_resp->length = CPU_TO_LE32(dw_count);
-
-	status = i40e_asq_send_command(hw, &desc, buffer,
-				       I40E_LO_DWORD(4*dw_count), NULL);
-
-	return status;
-}
-
-/**
- *  i40e_aq_alternate_clear
- *  @hw: pointer to the HW structure.
- *
- *  Clear the alternate structures of the port from which the function
- *  is called.
- *
- **/
-enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw)
-{
-	struct i40e_aq_desc desc;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_alternate_clear_port);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
-
-	return status;
-}
-
-/**
- *  i40e_aq_alternate_write_done
- *  @hw: pointer to the HW structure.
- *  @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
- *  @reset_needed: indicates the SW should trigger GLOBAL reset
- *
- *  Indicates to the FW that alternate structures have been changed.
- *
- **/
-enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
-		u8 bios_mode, bool *reset_needed)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_alternate_write_done *cmd =
-		(struct i40e_aqc_alternate_write_done *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	if (reset_needed == NULL)
-		return I40E_ERR_PARAM;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_alternate_write_done);
-
-	cmd->cmd_flags = CPU_TO_LE16(bios_mode);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
-	if (!status && reset_needed)
-		*reset_needed = ((LE16_TO_CPU(cmd->cmd_flags) &
-				 I40E_AQ_ALTERNATE_RESET_NEEDED) != 0);
-
-	return status;
-}
-
-/**
- *  i40e_aq_set_oem_mode
- *  @hw: pointer to the HW structure.
- *  @oem_mode: the OEM mode to be used
- *
- *  Sets the device to a specific operating mode. Currently the only supported
- *  mode is no_clp, which causes FW to refrain from using Alternate RAM.
- *
- **/
-enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw,
-		u8 oem_mode)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_alternate_write_done *cmd =
-		(struct i40e_aqc_alternate_write_done *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_alternate_set_mode);
-
-	cmd->cmd_flags = CPU_TO_LE16(oem_mode);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
 
 	return status;
 }
 
 /**
- * i40e_aq_resume_port_tx
+ * i40e_aq_rem_cloud_filters
  * @hw: pointer to the hardware structure
- * @cmd_details: pointer to command details structure or NULL
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the cloud filters for a given VSI.  The contents of the
+ * i40e_aqc_cloud_filters_element_data are filled in by the caller
+ * of the function.
  *
- * Resume port's Tx traffic
  **/
-enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
-				struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code
+i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
+			  struct i40e_aqc_cloud_filters_element_data *filters,
+			  u8 filter_count)
 {
 	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_remove_cloud_filters *cmd =
+	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
 	enum i40e_status_code status;
+	u16 buff_len;
 
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_remove_cloud_filters);
 
-	return status;
-}
+	buff_len = filter_count * sizeof(*filters);
+	desc.datalen = CPU_TO_LE16(buff_len);
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	cmd->num_filters = filter_count;
+	cmd->seid = CPU_TO_LE16(seid);
 
-/**
- * i40e_set_pci_config_data - store PCI bus info
- * @hw: pointer to hardware structure
- * @link_status: the link status word from PCI config space
- *
- * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
- **/
-void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
-{
-	hw->bus.type = i40e_bus_type_pci_express;
+	i40e_fix_up_geneve_vni(filters, filter_count);
 
-	switch (link_status & I40E_PCI_LINK_WIDTH) {
-	case I40E_PCI_LINK_WIDTH_1:
-		hw->bus.width = i40e_bus_width_pcie_x1;
-		break;
-	case I40E_PCI_LINK_WIDTH_2:
-		hw->bus.width = i40e_bus_width_pcie_x2;
-		break;
-	case I40E_PCI_LINK_WIDTH_4:
-		hw->bus.width = i40e_bus_width_pcie_x4;
-		break;
-	case I40E_PCI_LINK_WIDTH_8:
-		hw->bus.width = i40e_bus_width_pcie_x8;
-		break;
-	default:
-		hw->bus.width = i40e_bus_width_unknown;
-		break;
-	}
+	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
 
-	switch (link_status & I40E_PCI_LINK_SPEED) {
-	case I40E_PCI_LINK_SPEED_2500:
-		hw->bus.speed = i40e_bus_speed_2500;
-		break;
-	case I40E_PCI_LINK_SPEED_5000:
-		hw->bus.speed = i40e_bus_speed_5000;
-		break;
-	case I40E_PCI_LINK_SPEED_8000:
-		hw->bus.speed = i40e_bus_speed_8000;
-		break;
-	default:
-		hw->bus.speed = i40e_bus_speed_unknown;
-		break;
-	}
+	return status;
 }
 
 /**
- * i40e_aq_debug_dump
+ * i40e_aq_rem_cloud_filters_bb
  * @hw: pointer to the hardware structure
- * @cluster_id: specific cluster to dump
- * @table_id: table id within cluster
- * @start_index: index of line in the block to read
- * @buff_size: dump buffer size
- * @buff: dump buffer
- * @ret_buff_size: actual buffer size returned
- * @ret_next_table: next block to read
- * @ret_next_index: next index to read
- * @cmd_details: pointer to command details structure or NULL
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be removed
+ * @filter_count: number of filters contained in the buffer
  *
- * Dump internal FW/HW data for debug purposes.
+ * Remove the big buffer cloud filters for a given VSI.  The contents of the
+ * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
+ * function.
  *
  **/
-enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
-				u8 table_id, u32 start_index, u16 buff_size,
-				void *buff, u16 *ret_buff_size,
-				u8 *ret_next_table, u32 *ret_next_index,
-				struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code
+i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+			     struct i40e_aqc_cloud_filters_element_bb *filters,
+			     u8 filter_count)
 {
 	struct i40e_aq_desc desc;
-	struct i40e_aqc_debug_dump_internals *cmd =
-		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
-	struct i40e_aqc_debug_dump_internals *resp =
-		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+	struct i40e_aqc_add_remove_cloud_filters *cmd =
+	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
 	enum i40e_status_code status;
-
-	if (buff_size == 0 || !buff)
-		return I40E_ERR_PARAM;
+	u16 buff_len;
+	int i;
 
 	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_debug_dump_internals);
-	/* Indirect Command */
-	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
-	if (buff_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+					  i40e_aqc_opc_remove_cloud_filters);
+
+	buff_len = filter_count * sizeof(*filters);
+	desc.datalen = CPU_TO_LE16(buff_len);
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	cmd->num_filters = filter_count;
+	cmd->seid = CPU_TO_LE16(seid);
+	cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
 
-	cmd->cluster_id = cluster_id;
-	cmd->table_id = table_id;
-	cmd->idx = CPU_TO_LE32(start_index);
+	for (i = 0; i < filter_count; i++) {
+		u16 tnl_type;
+		u32 ti;
 
-	desc.datalen = CPU_TO_LE16(buff_size);
+		tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
+			   I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+			   I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
 
-	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
-	if (!status) {
-		if (ret_buff_size != NULL)
-			*ret_buff_size = LE16_TO_CPU(desc.datalen);
-		if (ret_next_table != NULL)
-			*ret_next_table = resp->table_id;
-		if (ret_next_index != NULL)
-			*ret_next_index = LE32_TO_CPU(resp->idx);
+		/* Due to hardware eccentricities, the VNI for Geneve is shifted
+		 * one more byte further than normally used for Tenant ID in
+		 * other tunnel types.
+		 */
+		if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+			ti = LE32_TO_CPU(filters[i].element.tenant_id);
+			filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
+		}
 	}
 
+	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
 	return status;
 }
 
-
 /**
- * i40e_enable_eee
- * @hw: pointer to the hardware structure
- * @enable: state of Energy Efficient Ethernet mode to be set
+ * i40e_aq_replace_cloud_filters - Replace cloud filter command
+ * @hw: pointer to the hw struct
+ * @filters: pointer to the i40e_aqc_replace_cloud_filter_cmd struct
+ * @cmd_buf: pointer to the i40e_aqc_replace_cloud_filter_cmd_buf struct
  *
- * Enables or disables Energy Efficient Ethernet (EEE) mode
- * accordingly to @enable parameter.
  **/
-enum i40e_status_code i40e_enable_eee(struct i40e_hw *hw, bool enable)
+enum
+i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
+	struct i40e_aqc_replace_cloud_filters_cmd *filters,
+	struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf)
 {
-	struct i40e_aq_get_phy_abilities_resp abilities;
-	struct i40e_aq_set_phy_config config;
-	enum i40e_status_code status;
-	__le16 eee_capability;
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_replace_cloud_filters_cmd *cmd =
+		(struct i40e_aqc_replace_cloud_filters_cmd *)&desc.params.raw;
+	enum i40e_status_code status = I40E_SUCCESS;
+	int i = 0;
 
-	/* Get initial PHY capabilities */
-	status = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
-					      NULL);
-	if (status)
-		goto err;
+	/* X722 doesn't support this command */
+	if (hw->mac.type == I40E_MAC_X722)
+		return I40E_ERR_DEVICE_NOT_SUPPORTED;
 
-	/* Check whether NIC configuration is compatible with Energy Efficient
-	 * Ethernet (EEE) mode.
-	 */
-	if (abilities.eee_capability == 0) {
-		status = I40E_ERR_CONFIG;
-		goto err;
-	}
+	/* need FW version greater than 6.00 */
+	if (hw->aq.fw_maj_ver < 6)
+		return I40E_NOT_SUPPORTED;
 
-	/* Cache initial EEE capability */
-	eee_capability = abilities.eee_capability;
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_replace_cloud_filters);
 
-	/* Get current configuration */
-	status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
-					      NULL);
-	if (status)
-		goto err;
+	desc.datalen = CPU_TO_LE16(32);
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	cmd->old_filter_type = filters->old_filter_type;
+	cmd->new_filter_type = filters->new_filter_type;
+	cmd->valid_flags = filters->valid_flags;
+	cmd->tr_bit = filters->tr_bit;
+	cmd->tr_bit2 = filters->tr_bit2;
 
-	/* Cache current configuration */
-	config.phy_type = abilities.phy_type;
-	config.phy_type_ext = abilities.phy_type_ext;
-	config.link_speed = abilities.link_speed;
-	config.abilities = abilities.abilities |
-			   I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
-	config.eeer = abilities.eeer_val;
-	config.low_power_ctrl = abilities.d3_lpan;
-	config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
-			    I40E_AQ_PHY_FEC_CONFIG_MASK;
-
-	/* Set desired EEE state */
-	if (enable) {
-		config.eee_capability = eee_capability;
-		config.eeer |= I40E_PRTPM_EEER_TX_LPI_EN_MASK;
-	} else {
-		config.eee_capability = 0;
-		config.eeer &= ~I40E_PRTPM_EEER_TX_LPI_EN_MASK;
+	status = i40e_asq_send_command(hw, &desc, cmd_buf,
+		sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf),  NULL);
+
+	/* for get cloud filters command */
+	for (i = 0; i < 32; i += 4) {
+		cmd_buf->filters[i / 4].filter_type = cmd_buf->data[i];
+		cmd_buf->filters[i / 4].input[0] = cmd_buf->data[i + 1];
+		cmd_buf->filters[i / 4].input[1] = cmd_buf->data[i + 2];
+		cmd_buf->filters[i / 4].input[2] = cmd_buf->data[i + 3];
 	}
 
-	/* Save modified config */
-	status = i40e_aq_set_phy_config(hw, &config, NULL);
-err:
 	return status;
 }
 
 /**
- * i40e_read_bw_from_alt_ram
+ * i40e_aq_alternate_read
  * @hw: pointer to the hardware structure
- * @max_bw: pointer for max_bw read
- * @min_bw: pointer for min_bw read
- * @min_valid: pointer for bool that is true if min_bw is a valid value
- * @max_valid: pointer for bool that is true if max_bw is a valid value
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
  *
- * Read bw from the alternate ram for the given pf
- **/
-enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
-					u32 *max_bw, u32 *min_bw,
-					bool *min_valid, bool *max_valid)
-{
-	enum i40e_status_code status;
-	u32 max_bw_addr, min_bw_addr;
-
-	/* Calculate the address of the min/max bw registers */
-	max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
-		      I40E_ALT_STRUCT_MAX_BW_OFFSET +
-		      (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
-	min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
-		      I40E_ALT_STRUCT_MIN_BW_OFFSET +
-		      (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
-
-	/* Read the bandwidths from alt ram */
-	status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
-					min_bw_addr, min_bw);
-
-	if (*min_bw & I40E_ALT_BW_VALID_MASK)
-		*min_valid = true;
-	else
-		*min_valid = false;
-
-	if (*max_bw & I40E_ALT_BW_VALID_MASK)
-		*max_valid = true;
-	else
-		*max_valid = false;
-
-	return status;
-}
-
-/**
- * i40e_aq_configure_partition_bw
- * @hw: pointer to the hardware structure
- * @bw_data: Buffer holding valid pfs and bw limits
- * @cmd_details: pointer to command details
+ * Read one or two dwords from alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
+ * is not passed then only register at 'reg_addr0' is read.
  *
- * Configure partitions guaranteed/max bw
  **/
-enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
-			struct i40e_aqc_configure_partition_bw_data *bw_data,
-			struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
+				u32 reg_addr0, u32 *reg_val0,
+				u32 reg_addr1, u32 *reg_val1)
 {
-	enum i40e_status_code status;
 	struct i40e_aq_desc desc;
-	u16 bwd_size = sizeof(*bw_data);
+	struct i40e_aqc_alternate_write *cmd_resp =
+		(struct i40e_aqc_alternate_write *)&desc.params.raw;
+	enum i40e_status_code status;
 
-	i40e_fill_default_direct_cmd_desc(&desc,
-				i40e_aqc_opc_configure_partition_bw);
+	if (reg_val0 == NULL)
+		return I40E_ERR_PARAM;
 
-	/* Indirect command */
-	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
-	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+	cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
+	cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
 
-	desc.datalen = CPU_TO_LE16(bwd_size);
+	if (status == I40E_SUCCESS) {
+		*reg_val0 = LE32_TO_CPU(cmd_resp->data0);
 
-	status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
+		if (reg_val1 != NULL)
+			*reg_val1 = LE32_TO_CPU(cmd_resp->data1);
+	}
 
 	return status;
 }
@@ -6758,93 +4842,18 @@ enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
 		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
 	status = I40E_ERR_TIMEOUT;
 	retry = 1000;
-	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
-	do {
-		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
-		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
-			status = I40E_SUCCESS;
-			break;
-		}
-		i40e_usec_delay(10);
-		retry--;
-	} while (retry);
-
-phy_write_end:
-	return status;
-}
-
-/**
- * i40e_write_phy_register
- * @hw: pointer to the HW structure
- * @page: registers page number
- * @reg: register address in the page
- * @phy_addr: PHY address on MDIO interface
- * @value: PHY register value
- *
- * Writes value to specified PHY register
- **/
-enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
-				u8 page, u16 reg, u8 phy_addr, u16 value)
-{
-	enum i40e_status_code status;
-
-	switch (hw->device_id) {
-	case I40E_DEV_ID_1G_BASE_T_X722:
-		status = i40e_write_phy_register_clause22(hw,
-			reg, phy_addr, value);
-		break;
-	case I40E_DEV_ID_10G_BASE_T:
-	case I40E_DEV_ID_10G_BASE_T4:
-	case I40E_DEV_ID_10G_BASE_T_BC:
-	case I40E_DEV_ID_5G_BASE_T_BC:
-	case I40E_DEV_ID_10G_BASE_T_X722:
-	case I40E_DEV_ID_25G_B:
-	case I40E_DEV_ID_25G_SFP28:
-		status = i40e_write_phy_register_clause45(hw,
-			page, reg, phy_addr, value);
-		break;
-	default:
-		status = I40E_ERR_UNKNOWN_PHY;
-		break;
-	}
-
-	return status;
-}
-
-/**
- * i40e_read_phy_register
- * @hw: pointer to the HW structure
- * @page: registers page number
- * @reg: register address in the page
- * @phy_addr: PHY address on MDIO interface
- * @value: PHY register value
- *
- * Reads specified PHY register value
- **/
-enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
-				u8 page, u16 reg, u8 phy_addr, u16 *value)
-{
-	enum i40e_status_code status;
-
-	switch (hw->device_id) {
-	case I40E_DEV_ID_1G_BASE_T_X722:
-		status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
-							 value);
-		break;
-	case I40E_DEV_ID_10G_BASE_T:
-	case I40E_DEV_ID_10G_BASE_T4:
-	case I40E_DEV_ID_5G_BASE_T_BC:
-	case I40E_DEV_ID_10G_BASE_T_X722:
-	case I40E_DEV_ID_25G_B:
-	case I40E_DEV_ID_25G_SFP28:
-		status = i40e_read_phy_register_clause45(hw, page, reg,
-							 phy_addr, value);
-		break;
-	default:
-		status = I40E_ERR_UNKNOWN_PHY;
-		break;
-	}
+	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+	do {
+		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+			status = I40E_SUCCESS;
+			break;
+		}
+		i40e_usec_delay(10);
+		retry--;
+	} while (retry);
 
+phy_write_end:
 	return status;
 }
 
@@ -6863,80 +4872,6 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
 	return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
 }
 
-/**
- * i40e_blink_phy_led
- * @hw: pointer to the HW structure
- * @time: time how long led will blinks in secs
- * @interval: gap between LED on and off in msecs
- *
- * Blinks PHY link LED
- **/
-enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
-					      u32 time, u32 interval)
-{
-	enum i40e_status_code status = I40E_SUCCESS;
-	u32 i;
-	u16 led_ctl = 0;
-	u16 gpio_led_port;
-	u16 led_reg;
-	u16 led_addr = I40E_PHY_LED_PROV_REG_1;
-	u8 phy_addr = 0;
-	u8 port_num;
-
-	i = rd32(hw, I40E_PFGEN_PORTNUM);
-	port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
-	phy_addr = i40e_get_phy_address(hw, port_num);
-
-	for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
-	     led_addr++) {
-		status = i40e_read_phy_register_clause45(hw,
-							 I40E_PHY_COM_REG_PAGE,
-							 led_addr, phy_addr,
-							 &led_reg);
-		if (status)
-			goto phy_blinking_end;
-		led_ctl = led_reg;
-		if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
-			led_reg = 0;
-			status = i40e_write_phy_register_clause45(hw,
-							 I40E_PHY_COM_REG_PAGE,
-							 led_addr, phy_addr,
-							 led_reg);
-			if (status)
-				goto phy_blinking_end;
-			break;
-		}
-	}
-
-	if (time > 0 && interval > 0) {
-		for (i = 0; i < time * 1000; i += interval) {
-			status = i40e_read_phy_register_clause45(hw,
-						I40E_PHY_COM_REG_PAGE,
-						led_addr, phy_addr, &led_reg);
-			if (status)
-				goto restore_config;
-			if (led_reg & I40E_PHY_LED_MANUAL_ON)
-				led_reg = 0;
-			else
-				led_reg = I40E_PHY_LED_MANUAL_ON;
-			status = i40e_write_phy_register_clause45(hw,
-						I40E_PHY_COM_REG_PAGE,
-						led_addr, phy_addr, led_reg);
-			if (status)
-				goto restore_config;
-			i40e_msec_delay(interval);
-		}
-	}
-
-restore_config:
-	status = i40e_write_phy_register_clause45(hw,
-						  I40E_PHY_COM_REG_PAGE,
-						  led_addr, phy_addr, led_ctl);
-
-phy_blinking_end:
-	return status;
-}
-
 /**
  * i40e_led_get_reg - read LED register
  * @hw: pointer to the HW structure
@@ -6995,153 +4930,7 @@ enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
 	return status;
 }
 
-/**
- * i40e_led_get_phy - return current on/off mode
- * @hw: pointer to the hw struct
- * @led_addr: address of led register to use
- * @val: original value of register to use
- *
- **/
-enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
-				       u16 *val)
-{
-	enum i40e_status_code status = I40E_SUCCESS;
-	u16 gpio_led_port;
-	u32 reg_val_aq;
-	u16 temp_addr;
-	u8 phy_addr = 0;
-	u16 reg_val;
-
-	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
-		status = i40e_aq_get_phy_register(hw,
-						I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
-						I40E_PHY_COM_REG_PAGE, true,
-						I40E_PHY_LED_PROV_REG_1,
-						&reg_val_aq, NULL);
-		if (status == I40E_SUCCESS)
-			*val = (u16)reg_val_aq;
-		return status;
-	}
-	temp_addr = I40E_PHY_LED_PROV_REG_1;
-	phy_addr = i40e_get_phy_address(hw, hw->port);
-	for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
-	     temp_addr++) {
-		status = i40e_read_phy_register_clause45(hw,
-							 I40E_PHY_COM_REG_PAGE,
-							 temp_addr, phy_addr,
-							 &reg_val);
-		if (status)
-			return status;
-		*val = reg_val;
-		if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
-			*led_addr = temp_addr;
-			break;
-		}
-	}
-	return status;
-}
-
-/**
- * i40e_led_set_phy
- * @hw: pointer to the HW structure
- * @on: true or false
- * @led_addr: address of led register to use
- * @mode: original val plus bit for set or ignore
- *
- * Set led's on or off when controlled by the PHY
- *
- **/
-enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
-				       u16 led_addr, u32 mode)
-{
-	enum i40e_status_code status = I40E_SUCCESS;
-	u32 led_ctl = 0;
-	u32 led_reg = 0;
-
-	status = i40e_led_get_reg(hw, led_addr, &led_reg);
-	if (status)
-		return status;
-	led_ctl = led_reg;
-	if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
-		led_reg = 0;
-		status = i40e_led_set_reg(hw, led_addr, led_reg);
-		if (status)
-			return status;
-	}
-	status = i40e_led_get_reg(hw, led_addr, &led_reg);
-	if (status)
-		goto restore_config;
-	if (on)
-		led_reg = I40E_PHY_LED_MANUAL_ON;
-	else
-		led_reg = 0;
-	status = i40e_led_set_reg(hw, led_addr, led_reg);
-	if (status)
-		goto restore_config;
-	if (mode & I40E_PHY_LED_MODE_ORIG) {
-		led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
-		status = i40e_led_set_reg(hw, led_addr, led_ctl);
-	}
-	return status;
-
-restore_config:
-	status = i40e_led_set_reg(hw, led_addr, led_ctl);
-	return status;
-}
 #endif /* PF_DRIVER */
-/**
- * i40e_get_phy_lpi_status - read LPI status from PHY or MAC register
- * @hw: pointer to the hw struct
- * @stat: pointer to structure with status of rx and tx lpi
- *
- * Read LPI state directly from external PHY register or from MAC
- * register, depending on device ID and current link speed.
- */
-enum i40e_status_code i40e_get_phy_lpi_status(struct i40e_hw *hw,
-					      struct i40e_hw_port_stats *stat)
-{
-	enum i40e_status_code ret = I40E_SUCCESS;
-	bool eee_mrvl_phy;
-	bool eee_bcm_phy;
-	u32 val;
-
-	stat->rx_lpi_status = 0;
-	stat->tx_lpi_status = 0;
-
-	eee_bcm_phy =
-		(hw->device_id == I40E_DEV_ID_10G_BASE_T_BC ||
-		 hw->device_id == I40E_DEV_ID_5G_BASE_T_BC) &&
-		(hw->phy.link_info.link_speed == I40E_LINK_SPEED_2_5GB ||
-		 hw->phy.link_info.link_speed == I40E_LINK_SPEED_5GB);
-	eee_mrvl_phy =
-		hw->device_id == I40E_DEV_ID_1G_BASE_T_X722;
-
-	if (eee_bcm_phy || eee_mrvl_phy) {
-		/* read Clause 45 PCS Status 1 register */
-		ret = i40e_aq_get_phy_register(hw,
-					       I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
-					       I40E_BCM_PHY_PCS_STATUS1_PAGE,
-					       true,
-					       I40E_BCM_PHY_PCS_STATUS1_REG,
-					       &val, NULL);
-
-		if (ret != I40E_SUCCESS)
-			return ret;
-
-		stat->rx_lpi_status = !!(val & I40E_BCM_PHY_PCS_STATUS1_RX_LPI);
-		stat->tx_lpi_status = !!(val & I40E_BCM_PHY_PCS_STATUS1_TX_LPI);
-
-		return ret;
-	}
-
-	val = rd32(hw, I40E_PRTPM_EEE_STAT);
-	stat->rx_lpi_status = (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
-			       I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
-	stat->tx_lpi_status = (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
-			       I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
-
-	return ret;
-}
 
 /**
  * i40e_get_lpi_counters - read LPI counters from EEE statistics
@@ -7185,108 +4974,6 @@ enum i40e_status_code i40e_get_lpi_counters(struct i40e_hw *hw,
 	return I40E_SUCCESS;
 }
 
-/**
- * i40e_get_lpi_duration - read LPI time duration from EEE statistics
- * @hw: pointer to the hw struct
- * @stat: pointer to structure with status of rx and tx lpi
- * @tx_duration: pointer to memory for TX LPI time duration
- * @rx_duration: pointer to memory for RX LPI time duration
- *
- * Read Low Power Idle (LPI) mode time duration from Energy Efficient
- * Ethernet (EEE) statistics.
- */
-enum i40e_status_code i40e_get_lpi_duration(struct i40e_hw *hw,
-					    struct i40e_hw_port_stats *stat,
-					    u64 *tx_duration, u64 *rx_duration)
-{
-	u32 tx_time_dur, rx_time_dur;
-	enum i40e_status_code retval;
-	u32 cmd_status;
-
-	if (hw->device_id != I40E_DEV_ID_10G_BASE_T_BC &&
-	    hw->device_id != I40E_DEV_ID_5G_BASE_T_BC)
-		return I40E_ERR_NOT_IMPLEMENTED;
-
-	retval = i40e_aq_run_phy_activity
-		(hw, I40E_AQ_RUN_PHY_ACT_ID_USR_DFND,
-		I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_DUR,
-		&cmd_status, &tx_time_dur, &rx_time_dur, NULL);
-
-	if (retval)
-		return retval;
-	if ((cmd_status & I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK) !=
-	    I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC)
-		return I40E_ERR_ADMIN_QUEUE_ERROR;
-
-	if (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB &&
-	    !tx_time_dur && !rx_time_dur &&
-	    stat->tx_lpi_status && stat->rx_lpi_status) {
-		retval = i40e_aq_run_phy_activity
-			(hw, I40E_AQ_RUN_PHY_ACT_ID_USR_DFND,
-			I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT_DUR,
-			&cmd_status,
-			&tx_time_dur, &rx_time_dur, NULL);
-
-		if (retval)
-			return retval;
-		if ((cmd_status & I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK) !=
-		    I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC)
-			return I40E_ERR_ADMIN_QUEUE_ERROR;
-		tx_time_dur = 0;
-		rx_time_dur = 0;
-	}
-
-	*tx_duration = tx_time_dur;
-	*rx_duration = rx_time_dur;
-
-	return retval;
-}
-
-/**
- * i40e_lpi_stat_update - update LPI counters with values relative to offset
- * @hw: pointer to the hw struct
- * @offset_loaded: flag indicating need of writing current value to offset
- * @tx_offset: pointer to offset of TX LPI counter
- * @tx_stat: pointer to value of TX LPI counter
- * @rx_offset: pointer to offset of RX LPI counter
- * @rx_stat: pointer to value of RX LPI counter
- *
- * Update Low Power Idle (LPI) mode counters while having regard to passed
- * offsets.
- **/
-enum i40e_status_code i40e_lpi_stat_update(struct i40e_hw *hw,
-					   bool offset_loaded, u64 *tx_offset,
-					   u64 *tx_stat, u64 *rx_offset,
-					   u64 *rx_stat)
-{
-	enum i40e_status_code retval;
-	u32 tx_counter, rx_counter;
-	bool is_clear;
-
-	retval = i40e_get_lpi_counters(hw, &tx_counter, &rx_counter, &is_clear);
-	if (retval)
-		goto err;
-
-	if (is_clear) {
-		*tx_stat += tx_counter;
-		*rx_stat += rx_counter;
-	} else {
-		if (!offset_loaded) {
-			*tx_offset = tx_counter;
-			*rx_offset = rx_counter;
-		}
-
-		*tx_stat = (tx_counter >= *tx_offset) ?
-			(u32)(tx_counter - *tx_offset) :
-			(u32)((tx_counter + BIT_ULL(32)) - *tx_offset);
-		*rx_stat = (rx_counter >= *rx_offset) ?
-			(u32)(rx_counter - *rx_offset) :
-			(u32)((rx_counter + BIT_ULL(32)) - *rx_offset);
-	}
-err:
-	return retval;
-}
-
 /**
  * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
  * @hw: pointer to the hw struct
@@ -7674,195 +5361,6 @@ enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
 }
 #endif /* VF_DRIVER */
 
-/**
- * i40e_aq_set_arp_proxy_config
- * @hw: pointer to the HW structure
- * @proxy_config: pointer to proxy config command table struct
- * @cmd_details: pointer to command details
- *
- * Set ARP offload parameters from pre-populated
- * i40e_aqc_arp_proxy_data struct
- **/
-enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
-				struct i40e_aqc_arp_proxy_data *proxy_config,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	enum i40e_status_code status;
-
-	if (!proxy_config)
-		return I40E_ERR_PARAM;
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
-
-	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
-	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
-	desc.params.external.addr_high =
-				  CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config));
-	desc.params.external.addr_low =
-				  CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config));
-	desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_arp_proxy_data));
-
-	status = i40e_asq_send_command(hw, &desc, proxy_config,
-				       sizeof(struct i40e_aqc_arp_proxy_data),
-				       cmd_details);
-
-	return status;
-}
-
-/**
- * i40e_aq_opc_set_ns_proxy_table_entry
- * @hw: pointer to the HW structure
- * @ns_proxy_table_entry: pointer to NS table entry command struct
- * @cmd_details: pointer to command details
- *
- * Set IPv6 Neighbor Solicitation (NS) protocol offload parameters
- * from pre-populated i40e_aqc_ns_proxy_data struct
- **/
-enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
-			struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
-			struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	enum i40e_status_code status;
-
-	if (!ns_proxy_table_entry)
-		return I40E_ERR_PARAM;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-				i40e_aqc_opc_set_ns_proxy_table_entry);
-
-	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
-	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
-	desc.params.external.addr_high =
-		CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry));
-	desc.params.external.addr_low =
-		CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry));
-	desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_ns_proxy_data));
-
-	status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
-				       sizeof(struct i40e_aqc_ns_proxy_data),
-				       cmd_details);
-
-	return status;
-}
-
-/**
- * i40e_aq_set_clear_wol_filter
- * @hw: pointer to the hw struct
- * @filter_index: index of filter to modify (0-7)
- * @filter: buffer containing filter to be set
- * @set_filter: true to set filter, false to clear filter
- * @no_wol_tco: if true, pass through packets cannot cause wake-up
- *		if false, pass through packets may cause wake-up
- * @filter_valid: true if filter action is valid
- * @no_wol_tco_valid: true if no WoL in TCO traffic action valid
- * @cmd_details: pointer to command details structure or NULL
- *
- * Set or clear WoL filter for port attached to the PF
- **/
-enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
-				u8 filter_index,
-				struct i40e_aqc_set_wol_filter_data *filter,
-				bool set_filter, bool no_wol_tco,
-				bool filter_valid, bool no_wol_tco_valid,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_set_wol_filter *cmd =
-		(struct i40e_aqc_set_wol_filter *)&desc.params.raw;
-	enum i40e_status_code status;
-	u16 cmd_flags = 0;
-	u16 valid_flags = 0;
-	u16 buff_len = 0;
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_wol_filter);
-
-	if (filter_index >= I40E_AQC_MAX_NUM_WOL_FILTERS)
-		return  I40E_ERR_PARAM;
-	cmd->filter_index = CPU_TO_LE16(filter_index);
-
-	if (set_filter) {
-		if (!filter)
-			return  I40E_ERR_PARAM;
-
-		cmd_flags |= I40E_AQC_SET_WOL_FILTER;
-		cmd_flags |= I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR;
-	}
-
-	if (no_wol_tco)
-		cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
-	cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
-
-	if (filter_valid)
-		valid_flags |= I40E_AQC_SET_WOL_FILTER_ACTION_VALID;
-	if (no_wol_tco_valid)
-		valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
-	cmd->valid_flags = CPU_TO_LE16(valid_flags);
-
-	buff_len = sizeof(*filter);
-	desc.datalen = CPU_TO_LE16(buff_len);
-
-	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
-	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
-
-	cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter));
-	cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter));
-
-	status = i40e_asq_send_command(hw, &desc, filter,
-				       buff_len, cmd_details);
-
-	return status;
-}
-
-/**
- * i40e_aq_get_wake_event_reason
- * @hw: pointer to the hw struct
- * @wake_reason: return value, index of matching filter
- * @cmd_details: pointer to command details structure or NULL
- *
- * Get information for the reason of a Wake Up event
- **/
-enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
-				u16 *wake_reason,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_get_wake_reason_completion *resp =
-		(struct i40e_aqc_get_wake_reason_completion *)&desc.params.raw;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_wake_reason);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	if (status == I40E_SUCCESS)
-		*wake_reason = LE16_TO_CPU(resp->wake_reason);
-
-	return status;
-}
-
-/**
-* i40e_aq_clear_all_wol_filters
-* @hw: pointer to the hw struct
-* @cmd_details: pointer to command details structure or NULL
-*
-* Get information for the reason of a Wake Up event
-**/
-enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
-	struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	enum i40e_status_code status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_clear_all_wol_filters);
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
 /**
  * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
  * @hw: pointer to the hw struct
@@ -8243,42 +5741,3 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
 	}
 	return status;
 }
-
-/**
- * i40e_add_pinfo_to_list
- * @hw: pointer to the hardware structure
- * @profile: pointer to the profile segment of the package
- * @profile_info_sec: buffer for information section
- * @track_id: package tracking id
- *
- * Register a profile to the list of loaded profiles.
- */
-enum i40e_status_code
-i40e_add_pinfo_to_list(struct i40e_hw *hw,
-		       struct i40e_profile_segment *profile,
-		       u8 *profile_info_sec, u32 track_id)
-{
-	enum i40e_status_code status = I40E_SUCCESS;
-	struct i40e_profile_section_header *sec = NULL;
-	struct i40e_profile_info *pinfo;
-	u32 offset = 0, info = 0;
-
-	sec = (struct i40e_profile_section_header *)profile_info_sec;
-	sec->tbl_size = 1;
-	sec->data_end = sizeof(struct i40e_profile_section_header) +
-			sizeof(struct i40e_profile_info);
-	sec->section.type = SECTION_TYPE_INFO;
-	sec->section.offset = sizeof(struct i40e_profile_section_header);
-	sec->section.size = sizeof(struct i40e_profile_info);
-	pinfo = (struct i40e_profile_info *)(profile_info_sec +
-					     sec->section.offset);
-	pinfo->track_id = track_id;
-	pinfo->version = profile->version;
-	pinfo->op = I40E_DDP_ADD_TRACKID;
-	i40e_memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE,
-		    I40E_NONDMA_TO_NONDMA);
-
-	status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
-				   track_id, &offset, &info, NULL);
-	return status;
-}
diff --git a/drivers/net/i40e/base/i40e_dcb.c b/drivers/net/i40e/base/i40e_dcb.c
index 388af3d64d..ceb2f37927 100644
--- a/drivers/net/i40e/base/i40e_dcb.c
+++ b/drivers/net/i40e/base/i40e_dcb.c
@@ -932,49 +932,6 @@ enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
 	return ret;
 }
 
-/**
- * i40e_get_fw_lldp_status
- * @hw: pointer to the hw struct
- * @lldp_status: pointer to the status enum
- *
- * Get status of FW Link Layer Discovery Protocol (LLDP) Agent.
- * Status of agent is reported via @lldp_status parameter.
- **/
-enum i40e_status_code
-i40e_get_fw_lldp_status(struct i40e_hw *hw,
-			enum i40e_get_fw_lldp_status_resp *lldp_status)
-{
-	enum i40e_status_code ret;
-	struct i40e_virt_mem mem;
-	u8 *lldpmib;
-
-	if (!lldp_status)
-		return I40E_ERR_PARAM;
-
-	/* Allocate buffer for the LLDPDU */
-	ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
-	if (ret)
-		return ret;
-
-	lldpmib = (u8 *)mem.va;
-	ret = i40e_aq_get_lldp_mib(hw, 0, 0, (void *)lldpmib,
-				   I40E_LLDPDU_SIZE, NULL, NULL, NULL);
-
-	if (ret == I40E_SUCCESS) {
-		*lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
-	} else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) {
-		/* MIB is not available yet but the agent is running */
-		*lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
-		ret = I40E_SUCCESS;
-	} else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
-		*lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED;
-		ret = I40E_SUCCESS;
-	}
-
-	i40e_free_virt_mem(hw, &mem);
-	return ret;
-}
-
 /**
  * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
  * @tlv: Fill the ETS config data in IEEE format
diff --git a/drivers/net/i40e/base/i40e_dcb.h b/drivers/net/i40e/base/i40e_dcb.h
index 0409fd3e1a..01c1d8af11 100644
--- a/drivers/net/i40e/base/i40e_dcb.h
+++ b/drivers/net/i40e/base/i40e_dcb.h
@@ -199,9 +199,6 @@ enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
 enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw);
 enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw,
 				    bool enable_mib_change);
-enum i40e_status_code
-i40e_get_fw_lldp_status(struct i40e_hw *hw,
-			enum i40e_get_fw_lldp_status_resp *lldp_status);
 enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw);
 enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
 					      struct i40e_dcbx_config *dcbcfg);
diff --git a/drivers/net/i40e/base/i40e_diag.c b/drivers/net/i40e/base/i40e_diag.c
deleted file mode 100644
index b3c4cfd3aa..0000000000
--- a/drivers/net/i40e/base/i40e_diag.c
+++ /dev/null
@@ -1,146 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
- */
-
-#include "i40e_diag.h"
-#include "i40e_prototype.h"
-
-/**
- * i40e_diag_set_loopback
- * @hw: pointer to the hw struct
- * @mode: loopback mode
- *
- * Set chosen loopback mode
- **/
-enum i40e_status_code i40e_diag_set_loopback(struct i40e_hw *hw,
-					     enum i40e_lb_mode mode)
-{
-	enum i40e_status_code ret_code = I40E_SUCCESS;
-
-	if (i40e_aq_set_lb_modes(hw, mode, NULL))
-		ret_code = I40E_ERR_DIAG_TEST_FAILED;
-
-	return ret_code;
-}
-
-/**
- * i40e_diag_reg_pattern_test
- * @hw: pointer to the hw struct
- * @reg: reg to be tested
- * @mask: bits to be touched
- **/
-static enum i40e_status_code i40e_diag_reg_pattern_test(struct i40e_hw *hw,
-							u32 reg, u32 mask)
-{
-	const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
-	u32 pat, val, orig_val;
-	int i;
-
-	orig_val = rd32(hw, reg);
-	for (i = 0; i < ARRAY_SIZE(patterns); i++) {
-		pat = patterns[i];
-		wr32(hw, reg, (pat & mask));
-		val = rd32(hw, reg);
-		if ((val & mask) != (pat & mask)) {
-			return I40E_ERR_DIAG_TEST_FAILED;
-		}
-	}
-
-	wr32(hw, reg, orig_val);
-	val = rd32(hw, reg);
-	if (val != orig_val) {
-		return I40E_ERR_DIAG_TEST_FAILED;
-	}
-
-	return I40E_SUCCESS;
-}
-
-static struct i40e_diag_reg_test_info i40e_reg_list[] = {
-	/* offset               mask         elements   stride */
-	{I40E_QTX_CTL(0),       0x0000FFBF, 1, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
-	{I40E_PFINT_ITR0(0),    0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
-	{I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
-	{I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
-	{I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
-	{I40E_PFINT_STAT_CTL0,  0x0000000C, 1, 0},
-	{I40E_PFINT_LNKLST0,    0x00001FFF, 1, 0},
-	{I40E_PFINT_LNKLSTN(0), 0x000007FF, 1, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
-	{I40E_QINT_TQCTL(0),    0x000000FF, 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
-	{I40E_QINT_RQCTL(0),    0x000000FF, 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
-	{I40E_PFINT_ICR0_ENA,   0xF7F20000, 1, 0},
-	{ 0 }
-};
-
-/**
- * i40e_diag_reg_test
- * @hw: pointer to the hw struct
- *
- * Perform registers diagnostic test
- **/
-enum i40e_status_code i40e_diag_reg_test(struct i40e_hw *hw)
-{
-	enum i40e_status_code ret_code = I40E_SUCCESS;
-	u32 reg, mask;
-	u32 i, j;
-
-	for (i = 0; i40e_reg_list[i].offset != 0 &&
-					     ret_code == I40E_SUCCESS; i++) {
-
-		/* set actual reg range for dynamically allocated resources */
-		if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
-		    hw->func_caps.num_tx_qp != 0)
-			i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
-		if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
-		     i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
-		     i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
-		     i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
-		     i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
-		    hw->func_caps.num_msix_vectors != 0)
-			i40e_reg_list[i].elements =
-				hw->func_caps.num_msix_vectors - 1;
-
-		/* test register access */
-		mask = i40e_reg_list[i].mask;
-		for (j = 0; j < i40e_reg_list[i].elements &&
-			    ret_code == I40E_SUCCESS; j++) {
-			reg = i40e_reg_list[i].offset
-				+ (j * i40e_reg_list[i].stride);
-			ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
-		}
-	}
-
-	return ret_code;
-}
-
-/**
- * i40e_diag_eeprom_test
- * @hw: pointer to the hw struct
- *
- * Perform EEPROM diagnostic test
- **/
-enum i40e_status_code i40e_diag_eeprom_test(struct i40e_hw *hw)
-{
-	enum i40e_status_code ret_code;
-	u16 reg_val;
-
-	/* read NVM control word and if NVM valid, validate EEPROM checksum*/
-	ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
-	if ((ret_code == I40E_SUCCESS) &&
-	    ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
-	     BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
-		return i40e_validate_nvm_checksum(hw, NULL);
-	else
-		return I40E_ERR_DIAG_TEST_FAILED;
-}
-
-/**
- * i40e_diag_fw_alive_test
- * @hw: pointer to the hw struct
- *
- * Perform FW alive diagnostic test
- **/
-enum i40e_status_code i40e_diag_fw_alive_test(struct i40e_hw *hw)
-{
-	UNREFERENCED_1PARAMETER(hw);
-	return I40E_SUCCESS;
-}
diff --git a/drivers/net/i40e/base/i40e_diag.h b/drivers/net/i40e/base/i40e_diag.h
deleted file mode 100644
index cb59285d9c..0000000000
--- a/drivers/net/i40e/base/i40e_diag.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2020 Intel Corporation
- */
-
-#ifndef _I40E_DIAG_H_
-#define _I40E_DIAG_H_
-
-#include "i40e_type.h"
-
-enum i40e_lb_mode {
-	I40E_LB_MODE_NONE       = 0x0,
-	I40E_LB_MODE_PHY_LOCAL  = I40E_AQ_LB_PHY_LOCAL,
-	I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE,
-	I40E_LB_MODE_MAC_LOCAL  = I40E_AQ_LB_MAC_LOCAL,
-};
-
-struct i40e_diag_reg_test_info {
-	u32 offset;	/* the base register */
-	u32 mask;	/* bits that can be tested */
-	u32 elements;	/* number of elements if array */
-	u32 stride;	/* bytes between each element */
-};
-
-enum i40e_status_code i40e_diag_set_loopback(struct i40e_hw *hw,
-					     enum i40e_lb_mode mode);
-enum i40e_status_code i40e_diag_fw_alive_test(struct i40e_hw *hw);
-enum i40e_status_code i40e_diag_reg_test(struct i40e_hw *hw);
-enum i40e_status_code i40e_diag_eeprom_test(struct i40e_hw *hw);
-
-#endif /* _I40E_DIAG_H_ */
diff --git a/drivers/net/i40e/base/i40e_lan_hmc.c b/drivers/net/i40e/base/i40e_lan_hmc.c
index d3969396f0..5242ba8deb 100644
--- a/drivers/net/i40e/base/i40e_lan_hmc.c
+++ b/drivers/net/i40e/base/i40e_lan_hmc.c
@@ -914,228 +914,6 @@ static void i40e_write_qword(u8 *hmc_bits,
 	i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
 }
 
-/**
- * i40e_read_byte - read HMC context byte into struct
- * @hmc_bits: pointer to the HMC memory
- * @ce_info: a description of the struct to be filled
- * @dest: the struct to be filled
- **/
-static void i40e_read_byte(u8 *hmc_bits,
-			   struct i40e_context_ele *ce_info,
-			   u8 *dest)
-{
-	u8 dest_byte, mask;
-	u8 *src, *target;
-	u16 shift_width;
-
-	/* prepare the bits and mask */
-	shift_width = ce_info->lsb % 8;
-	mask = (u8)(BIT(ce_info->width) - 1);
-
-	/* shift to correct alignment */
-	mask <<= shift_width;
-
-	/* get the current bits from the src bit string */
-	src = hmc_bits + (ce_info->lsb / 8);
-
-	i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
-
-	dest_byte &= ~(mask);
-
-	dest_byte >>= shift_width;
-
-	/* get the address from the struct field */
-	target = dest + ce_info->offset;
-
-	/* put it back in the struct */
-	i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
-}
-
-/**
- * i40e_read_word - read HMC context word into struct
- * @hmc_bits: pointer to the HMC memory
- * @ce_info: a description of the struct to be filled
- * @dest: the struct to be filled
- **/
-static void i40e_read_word(u8 *hmc_bits,
-			   struct i40e_context_ele *ce_info,
-			   u8 *dest)
-{
-	u16 dest_word, mask;
-	u8 *src, *target;
-	u16 shift_width;
-	__le16 src_word;
-
-	/* prepare the bits and mask */
-	shift_width = ce_info->lsb % 8;
-	mask = BIT(ce_info->width) - 1;
-
-	/* shift to correct alignment */
-	mask <<= shift_width;
-
-	/* get the current bits from the src bit string */
-	src = hmc_bits + (ce_info->lsb / 8);
-
-	i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
-
-	/* the data in the memory is stored as little endian so mask it
-	 * correctly
-	 */
-	src_word &= ~(CPU_TO_LE16(mask));
-
-	/* get the data back into host order before shifting */
-	dest_word = LE16_TO_CPU(src_word);
-
-	dest_word >>= shift_width;
-
-	/* get the address from the struct field */
-	target = dest + ce_info->offset;
-
-	/* put it back in the struct */
-	i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
-}
-
-/**
- * i40e_read_dword - read HMC context dword into struct
- * @hmc_bits: pointer to the HMC memory
- * @ce_info: a description of the struct to be filled
- * @dest: the struct to be filled
- **/
-static void i40e_read_dword(u8 *hmc_bits,
-			    struct i40e_context_ele *ce_info,
-			    u8 *dest)
-{
-	u32 dest_dword, mask;
-	u8 *src, *target;
-	u16 shift_width;
-	__le32 src_dword;
-
-	/* prepare the bits and mask */
-	shift_width = ce_info->lsb % 8;
-
-	/* if the field width is exactly 32 on an x86 machine, then the shift
-	 * operation will not work because the SHL instructions count is masked
-	 * to 5 bits so the shift will do nothing
-	 */
-	if (ce_info->width < 32)
-		mask = BIT(ce_info->width) - 1;
-	else
-		mask = ~(u32)0;
-
-	/* shift to correct alignment */
-	mask <<= shift_width;
-
-	/* get the current bits from the src bit string */
-	src = hmc_bits + (ce_info->lsb / 8);
-
-	i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
-
-	/* the data in the memory is stored as little endian so mask it
-	 * correctly
-	 */
-	src_dword &= ~(CPU_TO_LE32(mask));
-
-	/* get the data back into host order before shifting */
-	dest_dword = LE32_TO_CPU(src_dword);
-
-	dest_dword >>= shift_width;
-
-	/* get the address from the struct field */
-	target = dest + ce_info->offset;
-
-	/* put it back in the struct */
-	i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
-		    I40E_NONDMA_TO_DMA);
-}
-
-/**
- * i40e_read_qword - read HMC context qword into struct
- * @hmc_bits: pointer to the HMC memory
- * @ce_info: a description of the struct to be filled
- * @dest: the struct to be filled
- **/
-static void i40e_read_qword(u8 *hmc_bits,
-			    struct i40e_context_ele *ce_info,
-			    u8 *dest)
-{
-	u64 dest_qword, mask;
-	u8 *src, *target;
-	u16 shift_width;
-	__le64 src_qword;
-
-	/* prepare the bits and mask */
-	shift_width = ce_info->lsb % 8;
-
-	/* if the field width is exactly 64 on an x86 machine, then the shift
-	 * operation will not work because the SHL instructions count is masked
-	 * to 6 bits so the shift will do nothing
-	 */
-	if (ce_info->width < 64)
-		mask = BIT_ULL(ce_info->width) - 1;
-	else
-		mask = ~(u64)0;
-
-	/* shift to correct alignment */
-	mask <<= shift_width;
-
-	/* get the current bits from the src bit string */
-	src = hmc_bits + (ce_info->lsb / 8);
-
-	i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
-
-	/* the data in the memory is stored as little endian so mask it
-	 * correctly
-	 */
-	src_qword &= ~(CPU_TO_LE64(mask));
-
-	/* get the data back into host order before shifting */
-	dest_qword = LE64_TO_CPU(src_qword);
-
-	dest_qword >>= shift_width;
-
-	/* get the address from the struct field */
-	target = dest + ce_info->offset;
-
-	/* put it back in the struct */
-	i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
-		    I40E_NONDMA_TO_DMA);
-}
-
-/**
- * i40e_get_hmc_context - extract HMC context bits
- * @context_bytes: pointer to the context bit array
- * @ce_info: a description of the struct to be filled
- * @dest: the struct to be filled
- **/
-static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
-					struct i40e_context_ele *ce_info,
-					u8 *dest)
-{
-	int f;
-
-	for (f = 0; ce_info[f].width != 0; f++) {
-		switch (ce_info[f].size_of) {
-		case 1:
-			i40e_read_byte(context_bytes, &ce_info[f], dest);
-			break;
-		case 2:
-			i40e_read_word(context_bytes, &ce_info[f], dest);
-			break;
-		case 4:
-			i40e_read_dword(context_bytes, &ce_info[f], dest);
-			break;
-		case 8:
-			i40e_read_qword(context_bytes, &ce_info[f], dest);
-			break;
-		default:
-			/* nothing to do, just keep going */
-			break;
-		}
-	}
-
-	return I40E_SUCCESS;
-}
-
 /**
  * i40e_clear_hmc_context - zero out the HMC context bits
  * @hw:       the hardware struct
@@ -1261,27 +1039,6 @@ enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
 	return ret_code;
 }
 
-/**
- * i40e_get_lan_tx_queue_context - return the HMC context for the queue
- * @hw:    the hardware struct
- * @queue: the queue we care about
- * @s:     the struct to be filled
- **/
-enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
-						    u16 queue,
-						    struct i40e_hmc_obj_txq *s)
-{
-	enum i40e_status_code err;
-	u8 *context_bytes;
-
-	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
-	if (err < 0)
-		return err;
-
-	return i40e_get_hmc_context(context_bytes,
-				    i40e_hmc_txq_ce_info, (u8 *)s);
-}
-
 /**
  * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
  * @hw:    the hardware struct
@@ -1321,27 +1078,6 @@ enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
 				    i40e_hmc_txq_ce_info, (u8 *)s);
 }
 
-/**
- * i40e_get_lan_rx_queue_context - return the HMC context for the queue
- * @hw:    the hardware struct
- * @queue: the queue we care about
- * @s:     the struct to be filled
- **/
-enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
-						    u16 queue,
-						    struct i40e_hmc_obj_rxq *s)
-{
-	enum i40e_status_code err;
-	u8 *context_bytes;
-
-	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
-	if (err < 0)
-		return err;
-
-	return i40e_get_hmc_context(context_bytes,
-				    i40e_hmc_rxq_ce_info, (u8 *)s);
-}
-
 /**
  * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
  * @hw:    the hardware struct
diff --git a/drivers/net/i40e/base/i40e_lan_hmc.h b/drivers/net/i40e/base/i40e_lan_hmc.h
index aa5dceb792..1d2707e5ad 100644
--- a/drivers/net/i40e/base/i40e_lan_hmc.h
+++ b/drivers/net/i40e/base/i40e_lan_hmc.h
@@ -147,17 +147,11 @@ enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw);
 
 u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
 			      u32 fcoe_cntx_num, u32 fcoe_filt_num);
-enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
-						    u16 queue,
-						    struct i40e_hmc_obj_txq *s);
 enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
 						      u16 queue);
 enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
 						    u16 queue,
 						    struct i40e_hmc_obj_txq *s);
-enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
-						    u16 queue,
-						    struct i40e_hmc_obj_rxq *s);
 enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
 						      u16 queue);
 enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
diff --git a/drivers/net/i40e/base/i40e_nvm.c b/drivers/net/i40e/base/i40e_nvm.c
index 561ed21136..f1d1ff3685 100644
--- a/drivers/net/i40e/base/i40e_nvm.c
+++ b/drivers/net/i40e/base/i40e_nvm.c
@@ -599,61 +599,6 @@ enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
 	return ret_code;
 }
 
-/**
- * __i40e_write_nvm_word - Writes Shadow RAM word
- * @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to write
- * @data: word to write to the Shadow RAM
- *
- * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
- * NVM ownership have to be acquired and released (on ARQ completion event
- * reception) by caller. To commit SR to NVM update checksum function
- * should be called.
- **/
-enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
-					    void *data)
-{
-	DEBUGFUNC("i40e_write_nvm_word");
-
-	*((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
-
-	/* Value 0x00 below means that we treat SR as a flat mem */
-	return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
-}
-
-/**
- * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
- * @hw: pointer to the HW structure
- * @module_pointer: module pointer location in words from the NVM beginning
- * @offset: offset of the Shadow RAM buffer to write
- * @words: number of words to write
- * @data: words to write to the Shadow RAM
- *
- * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
- * NVM ownership must be acquired before calling this function and released
- * on ARQ completion event reception by caller. To commit SR to NVM update
- * checksum function should be called.
- **/
-enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
-					      u8 module_pointer, u32 offset,
-					      u16 words, void *data)
-{
-	__le16 *le_word_ptr = (__le16 *)data;
-	u16 *word_ptr = (u16 *)data;
-	u32 i = 0;
-
-	DEBUGFUNC("i40e_write_nvm_buffer");
-
-	for (i = 0; i < words; i++)
-		le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
-
-	/* Here we will only write one buffer as the size of the modules
-	 * mirrored in the Shadow RAM is always less than 4K.
-	 */
-	return i40e_write_nvm_aq(hw, module_pointer, offset, words,
-				 data, false);
-}
-
 /**
  * i40e_calc_nvm_checksum - Calculates and returns the checksum
  * @hw: pointer to hardware structure
@@ -807,521 +752,6 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
 	return ret_code;
 }
 
-STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
-						    struct i40e_nvm_access *cmd,
-						    u8 *bytes, int *perrno);
-STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
-						    struct i40e_nvm_access *cmd,
-						    u8 *bytes, int *perrno);
-STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
-						    struct i40e_nvm_access *cmd,
-						    u8 *bytes, int *perrno);
-STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
-						    struct i40e_nvm_access *cmd,
-						    int *perrno);
-STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
-						   struct i40e_nvm_access *cmd,
-						   int *perrno);
-STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
-						   struct i40e_nvm_access *cmd,
-						   u8 *bytes, int *perrno);
-STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
-						  struct i40e_nvm_access *cmd,
-						  u8 *bytes, int *perrno);
-STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
-						 struct i40e_nvm_access *cmd,
-						 u8 *bytes, int *perrno);
-STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
-						    struct i40e_nvm_access *cmd,
-						    u8 *bytes, int *perrno);
-STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
-						    struct i40e_nvm_access *cmd,
-						    u8 *bytes, int *perrno);
-STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
-{
-	return (u8)(val & I40E_NVM_MOD_PNT_MASK);
-}
-STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
-{
-	return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
-}
-
-STATIC INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val)
-{
-	return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
-		    I40E_NVM_PRESERVATION_FLAGS_SHIFT);
-}
-
-STATIC const char *i40e_nvm_update_state_str[] = {
-	"I40E_NVMUPD_INVALID",
-	"I40E_NVMUPD_READ_CON",
-	"I40E_NVMUPD_READ_SNT",
-	"I40E_NVMUPD_READ_LCB",
-	"I40E_NVMUPD_READ_SA",
-	"I40E_NVMUPD_WRITE_ERA",
-	"I40E_NVMUPD_WRITE_CON",
-	"I40E_NVMUPD_WRITE_SNT",
-	"I40E_NVMUPD_WRITE_LCB",
-	"I40E_NVMUPD_WRITE_SA",
-	"I40E_NVMUPD_CSUM_CON",
-	"I40E_NVMUPD_CSUM_SA",
-	"I40E_NVMUPD_CSUM_LCB",
-	"I40E_NVMUPD_STATUS",
-	"I40E_NVMUPD_EXEC_AQ",
-	"I40E_NVMUPD_GET_AQ_RESULT",
-	"I40E_NVMUPD_GET_AQ_EVENT",
-	"I40E_NVMUPD_GET_FEATURES",
-};
-
-/**
- * i40e_nvmupd_command - Process an NVM update command
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * Dispatches command depending on what update state is current
- **/
-enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
-					  struct i40e_nvm_access *cmd,
-					  u8 *bytes, int *perrno)
-{
-	enum i40e_status_code status;
-	enum i40e_nvmupd_cmd upd_cmd;
-
-	DEBUGFUNC("i40e_nvmupd_command");
-
-	/* assume success */
-	*perrno = 0;
-
-	/* early check for status command and debug msgs */
-	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
-
-	i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
-		   i40e_nvm_update_state_str[upd_cmd],
-		   hw->nvmupd_state,
-		   hw->nvm_release_on_done, hw->nvm_wait_opcode,
-		   cmd->command, cmd->config, cmd->offset, cmd->data_size);
-
-	if (upd_cmd == I40E_NVMUPD_INVALID) {
-		*perrno = -EFAULT;
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "i40e_nvmupd_validate_command returns %d errno %d\n",
-			   upd_cmd, *perrno);
-	}
-
-	/* a status request returns immediately rather than
-	 * going into the state machine
-	 */
-	if (upd_cmd == I40E_NVMUPD_STATUS) {
-		if (!cmd->data_size) {
-			*perrno = -EFAULT;
-			return I40E_ERR_BUF_TOO_SHORT;
-		}
-
-		bytes[0] = hw->nvmupd_state;
-
-		if (cmd->data_size >= 4) {
-			bytes[1] = 0;
-			*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
-		}
-
-		/* Clear error status on read */
-		if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
-			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
-
-		return I40E_SUCCESS;
-	}
-
-	/*
-	 * A supported features request returns immediately
-	 * rather than going into state machine
-	 */
-	if (upd_cmd == I40E_NVMUPD_FEATURES) {
-		if (cmd->data_size < hw->nvmupd_features.size) {
-			*perrno = -EFAULT;
-			return I40E_ERR_BUF_TOO_SHORT;
-		}
-
-		/*
-		 * If buffer is bigger than i40e_nvmupd_features structure,
-		 * make sure the trailing bytes are set to 0x0.
-		 */
-		if (cmd->data_size > hw->nvmupd_features.size)
-			i40e_memset(bytes + hw->nvmupd_features.size, 0x0,
-				    cmd->data_size - hw->nvmupd_features.size,
-				    I40E_NONDMA_MEM);
-
-		i40e_memcpy(bytes, &hw->nvmupd_features,
-			    hw->nvmupd_features.size, I40E_NONDMA_MEM);
-
-		return I40E_SUCCESS;
-	}
-
-	/* Clear status even it is not read and log */
-	if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
-		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
-	}
-
-	/* Acquire lock to prevent race condition where adminq_task
-	 * can execute after i40e_nvmupd_nvm_read/write but before state
-	 * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
-	 *
-	 * During NVMUpdate, it is observed that lock could be held for
-	 * ~5ms for most commands. However lock is held for ~60ms for
-	 * NVMUPD_CSUM_LCB command.
-	 */
-	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
-	switch (hw->nvmupd_state) {
-	case I40E_NVMUPD_STATE_INIT:
-		status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
-		break;
-
-	case I40E_NVMUPD_STATE_READING:
-		status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
-		break;
-
-	case I40E_NVMUPD_STATE_WRITING:
-		status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
-		break;
-
-	case I40E_NVMUPD_STATE_INIT_WAIT:
-	case I40E_NVMUPD_STATE_WRITE_WAIT:
-		/* if we need to stop waiting for an event, clear
-		 * the wait info and return before doing anything else
-		 */
-		if (cmd->offset == 0xffff) {
-			i40e_nvmupd_clear_wait_state(hw);
-			status = I40E_SUCCESS;
-			break;
-		}
-
-		status = I40E_ERR_NOT_READY;
-		*perrno = -EBUSY;
-		break;
-
-	default:
-		/* invalid state, should never happen */
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "NVMUPD: no such state %d\n", hw->nvmupd_state);
-		status = I40E_NOT_SUPPORTED;
-		*perrno = -ESRCH;
-		break;
-	}
-
-	i40e_release_spinlock(&hw->aq.arq_spinlock);
-	return status;
-}
-
-/**
- * i40e_nvmupd_state_init - Handle NVM update state Init
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * Process legitimate commands of the Init state and conditionally set next
- * state. Reject all other commands.
- **/
-STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
-						    struct i40e_nvm_access *cmd,
-						    u8 *bytes, int *perrno)
-{
-	enum i40e_status_code status = I40E_SUCCESS;
-	enum i40e_nvmupd_cmd upd_cmd;
-
-	DEBUGFUNC("i40e_nvmupd_state_init");
-
-	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
-
-	switch (upd_cmd) {
-	case I40E_NVMUPD_READ_SA:
-		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
-		if (status) {
-			*perrno = i40e_aq_rc_to_posix(status,
-						     hw->aq.asq_last_status);
-		} else {
-			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
-			i40e_release_nvm(hw);
-		}
-		break;
-
-	case I40E_NVMUPD_READ_SNT:
-		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
-		if (status) {
-			*perrno = i40e_aq_rc_to_posix(status,
-						     hw->aq.asq_last_status);
-		} else {
-			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
-			if (status)
-				i40e_release_nvm(hw);
-			else
-				hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
-		}
-		break;
-
-	case I40E_NVMUPD_WRITE_ERA:
-		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
-		if (status) {
-			*perrno = i40e_aq_rc_to_posix(status,
-						     hw->aq.asq_last_status);
-		} else {
-			status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
-			if (status) {
-				i40e_release_nvm(hw);
-			} else {
-				hw->nvm_release_on_done = true;
-				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
-				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
-			}
-		}
-		break;
-
-	case I40E_NVMUPD_WRITE_SA:
-		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
-		if (status) {
-			*perrno = i40e_aq_rc_to_posix(status,
-						     hw->aq.asq_last_status);
-		} else {
-			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
-			if (status) {
-				i40e_release_nvm(hw);
-			} else {
-				hw->nvm_release_on_done = true;
-				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
-				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
-			}
-		}
-		break;
-
-	case I40E_NVMUPD_WRITE_SNT:
-		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
-		if (status) {
-			*perrno = i40e_aq_rc_to_posix(status,
-						     hw->aq.asq_last_status);
-		} else {
-			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
-			if (status) {
-				i40e_release_nvm(hw);
-			} else {
-				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
-				hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
-			}
-		}
-		break;
-
-	case I40E_NVMUPD_CSUM_SA:
-		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
-		if (status) {
-			*perrno = i40e_aq_rc_to_posix(status,
-						     hw->aq.asq_last_status);
-		} else {
-			status = i40e_update_nvm_checksum(hw);
-			if (status) {
-				*perrno = hw->aq.asq_last_status ?
-				   i40e_aq_rc_to_posix(status,
-						       hw->aq.asq_last_status) :
-				   -EIO;
-				i40e_release_nvm(hw);
-			} else {
-				hw->nvm_release_on_done = true;
-				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
-				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
-			}
-		}
-		break;
-
-	case I40E_NVMUPD_EXEC_AQ:
-		status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
-		break;
-
-	case I40E_NVMUPD_GET_AQ_RESULT:
-		status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
-		break;
-
-	case I40E_NVMUPD_GET_AQ_EVENT:
-		status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
-		break;
-
-	default:
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "NVMUPD: bad cmd %s in init state\n",
-			   i40e_nvm_update_state_str[upd_cmd]);
-		status = I40E_ERR_NVM;
-		*perrno = -ESRCH;
-		break;
-	}
-	return status;
-}
-
-/**
- * i40e_nvmupd_state_reading - Handle NVM update state Reading
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * NVM ownership is already held.  Process legitimate commands and set any
- * change in state; reject all other commands.
- **/
-STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
-						    struct i40e_nvm_access *cmd,
-						    u8 *bytes, int *perrno)
-{
-	enum i40e_status_code status = I40E_SUCCESS;
-	enum i40e_nvmupd_cmd upd_cmd;
-
-	DEBUGFUNC("i40e_nvmupd_state_reading");
-
-	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
-
-	switch (upd_cmd) {
-	case I40E_NVMUPD_READ_SA:
-	case I40E_NVMUPD_READ_CON:
-		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
-		break;
-
-	case I40E_NVMUPD_READ_LCB:
-		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
-		i40e_release_nvm(hw);
-		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
-		break;
-
-	default:
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "NVMUPD: bad cmd %s in reading state.\n",
-			   i40e_nvm_update_state_str[upd_cmd]);
-		status = I40E_NOT_SUPPORTED;
-		*perrno = -ESRCH;
-		break;
-	}
-	return status;
-}
-
-/**
- * i40e_nvmupd_state_writing - Handle NVM update state Writing
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * NVM ownership is already held.  Process legitimate commands and set any
- * change in state; reject all other commands
- **/
-STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
-						    struct i40e_nvm_access *cmd,
-						    u8 *bytes, int *perrno)
-{
-	enum i40e_status_code status = I40E_SUCCESS;
-	enum i40e_nvmupd_cmd upd_cmd;
-	bool retry_attempt = false;
-
-	DEBUGFUNC("i40e_nvmupd_state_writing");
-
-	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
-
-retry:
-	switch (upd_cmd) {
-	case I40E_NVMUPD_WRITE_CON:
-		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
-		if (!status) {
-			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
-			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
-		}
-		break;
-
-	case I40E_NVMUPD_WRITE_LCB:
-		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
-		if (status) {
-			*perrno = hw->aq.asq_last_status ?
-				   i40e_aq_rc_to_posix(status,
-						       hw->aq.asq_last_status) :
-				   -EIO;
-			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
-		} else {
-			hw->nvm_release_on_done = true;
-			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
-			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
-		}
-		break;
-
-	case I40E_NVMUPD_CSUM_CON:
-		/* Assumes the caller has acquired the nvm */
-		status = i40e_update_nvm_checksum(hw);
-		if (status) {
-			*perrno = hw->aq.asq_last_status ?
-				   i40e_aq_rc_to_posix(status,
-						       hw->aq.asq_last_status) :
-				   -EIO;
-			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
-		} else {
-			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
-			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
-		}
-		break;
-
-	case I40E_NVMUPD_CSUM_LCB:
-		/* Assumes the caller has acquired the nvm */
-		status = i40e_update_nvm_checksum(hw);
-		if (status) {
-			*perrno = hw->aq.asq_last_status ?
-				   i40e_aq_rc_to_posix(status,
-						       hw->aq.asq_last_status) :
-				   -EIO;
-			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
-		} else {
-			hw->nvm_release_on_done = true;
-			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
-			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
-		}
-		break;
-
-	default:
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "NVMUPD: bad cmd %s in writing state.\n",
-			   i40e_nvm_update_state_str[upd_cmd]);
-		status = I40E_NOT_SUPPORTED;
-		*perrno = -ESRCH;
-		break;
-	}
-
-	/* In some circumstances, a multi-write transaction takes longer
-	 * than the default 3 minute timeout on the write semaphore.  If
-	 * the write failed with an EBUSY status, this is likely the problem,
-	 * so here we try to reacquire the semaphore then retry the write.
-	 * We only do one retry, then give up.
-	 */
-	if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
-	    !retry_attempt) {
-		enum i40e_status_code old_status = status;
-		u32 old_asq_status = hw->aq.asq_last_status;
-		u32 gtime;
-
-		gtime = rd32(hw, I40E_GLVFGEN_TIMER);
-		if (gtime >= hw->nvm.hw_semaphore_timeout) {
-			i40e_debug(hw, I40E_DEBUG_ALL,
-				   "NVMUPD: write semaphore expired (%d >= %" PRIu64 "), retrying\n",
-				   gtime, hw->nvm.hw_semaphore_timeout);
-			i40e_release_nvm(hw);
-			status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
-			if (status) {
-				i40e_debug(hw, I40E_DEBUG_ALL,
-					   "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
-					   hw->aq.asq_last_status);
-				status = old_status;
-				hw->aq.asq_last_status = old_asq_status;
-			} else {
-				retry_attempt = true;
-				goto retry;
-			}
-		}
-	}
-
-	return status;
-}
-
 /**
  * i40e_nvmupd_clear_wait_state - clear wait state on hw
  * @hw: pointer to the hardware structure
@@ -1374,421 +804,3 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
 		i40e_nvmupd_clear_wait_state(hw);
 	}
 }
-
-/**
- * i40e_nvmupd_validate_command - Validate given command
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @perrno: pointer to return error code
- *
- * Return one of the valid command types or I40E_NVMUPD_INVALID
- **/
-STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
-						    struct i40e_nvm_access *cmd,
-						    int *perrno)
-{
-	enum i40e_nvmupd_cmd upd_cmd;
-	u8 module, transaction;
-
-	DEBUGFUNC("i40e_nvmupd_validate_command\n");
-
-	/* anything that doesn't match a recognized case is an error */
-	upd_cmd = I40E_NVMUPD_INVALID;
-
-	transaction = i40e_nvmupd_get_transaction(cmd->config);
-	module = i40e_nvmupd_get_module(cmd->config);
-
-	/* limits on data size */
-	if ((cmd->data_size < 1) ||
-	    (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "i40e_nvmupd_validate_command data_size %d\n",
-			   cmd->data_size);
-		*perrno = -EFAULT;
-		return I40E_NVMUPD_INVALID;
-	}
-
-	switch (cmd->command) {
-	case I40E_NVM_READ:
-		switch (transaction) {
-		case I40E_NVM_CON:
-			upd_cmd = I40E_NVMUPD_READ_CON;
-			break;
-		case I40E_NVM_SNT:
-			upd_cmd = I40E_NVMUPD_READ_SNT;
-			break;
-		case I40E_NVM_LCB:
-			upd_cmd = I40E_NVMUPD_READ_LCB;
-			break;
-		case I40E_NVM_SA:
-			upd_cmd = I40E_NVMUPD_READ_SA;
-			break;
-		case I40E_NVM_EXEC:
-			switch (module) {
-			case I40E_NVM_EXEC_GET_AQ_RESULT:
-				upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
-				break;
-			case I40E_NVM_EXEC_FEATURES:
-				upd_cmd = I40E_NVMUPD_FEATURES;
-				break;
-			case I40E_NVM_EXEC_STATUS:
-				upd_cmd = I40E_NVMUPD_STATUS;
-				break;
-			default:
-				*perrno = -EFAULT;
-				return I40E_NVMUPD_INVALID;
-			}
-			break;
-		case I40E_NVM_AQE:
-			upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
-			break;
-		}
-		break;
-
-	case I40E_NVM_WRITE:
-		switch (transaction) {
-		case I40E_NVM_CON:
-			upd_cmd = I40E_NVMUPD_WRITE_CON;
-			break;
-		case I40E_NVM_SNT:
-			upd_cmd = I40E_NVMUPD_WRITE_SNT;
-			break;
-		case I40E_NVM_LCB:
-			upd_cmd = I40E_NVMUPD_WRITE_LCB;
-			break;
-		case I40E_NVM_SA:
-			upd_cmd = I40E_NVMUPD_WRITE_SA;
-			break;
-		case I40E_NVM_ERA:
-			upd_cmd = I40E_NVMUPD_WRITE_ERA;
-			break;
-		case I40E_NVM_CSUM:
-			upd_cmd = I40E_NVMUPD_CSUM_CON;
-			break;
-		case (I40E_NVM_CSUM|I40E_NVM_SA):
-			upd_cmd = I40E_NVMUPD_CSUM_SA;
-			break;
-		case (I40E_NVM_CSUM|I40E_NVM_LCB):
-			upd_cmd = I40E_NVMUPD_CSUM_LCB;
-			break;
-		case I40E_NVM_EXEC:
-			if (module == 0)
-				upd_cmd = I40E_NVMUPD_EXEC_AQ;
-			break;
-		}
-		break;
-	}
-
-	return upd_cmd;
-}
-
-/**
- * i40e_nvmupd_exec_aq - Run an AQ command
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * cmd structure contains identifiers and data buffer
- **/
-STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
-						 struct i40e_nvm_access *cmd,
-						 u8 *bytes, int *perrno)
-{
-	struct i40e_asq_cmd_details cmd_details;
-	enum i40e_status_code status;
-	struct i40e_aq_desc *aq_desc;
-	u32 buff_size = 0;
-	u8 *buff = NULL;
-	u32 aq_desc_len;
-	u32 aq_data_len;
-
-	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
-	if (cmd->offset == 0xffff)
-		return I40E_SUCCESS;
-
-	memset(&cmd_details, 0, sizeof(cmd_details));
-	cmd_details.wb_desc = &hw->nvm_wb_desc;
-
-	aq_desc_len = sizeof(struct i40e_aq_desc);
-	memset(&hw->nvm_wb_desc, 0, aq_desc_len);
-
-	/* get the aq descriptor */
-	if (cmd->data_size < aq_desc_len) {
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
-			   cmd->data_size, aq_desc_len);
-		*perrno = -EINVAL;
-		return I40E_ERR_PARAM;
-	}
-	aq_desc = (struct i40e_aq_desc *)bytes;
-
-	/* if data buffer needed, make sure it's ready */
-	aq_data_len = cmd->data_size - aq_desc_len;
-	buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
-	if (buff_size) {
-		if (!hw->nvm_buff.va) {
-			status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
-							hw->aq.asq_buf_size);
-			if (status)
-				i40e_debug(hw, I40E_DEBUG_NVM,
-					   "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
-					   status);
-		}
-
-		if (hw->nvm_buff.va) {
-			buff = hw->nvm_buff.va;
-			i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
-				I40E_NONDMA_TO_NONDMA);
-		}
-	}
-
-	if (cmd->offset)
-		memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
-
-	/* and away we go! */
-	status = i40e_asq_send_command(hw, aq_desc, buff,
-				       buff_size, &cmd_details);
-	if (status) {
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "i40e_nvmupd_exec_aq err %s aq_err %s\n",
-			   i40e_stat_str(hw, status),
-			   i40e_aq_str(hw, hw->aq.asq_last_status));
-		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
-		return status;
-	}
-
-	/* should we wait for a followup event? */
-	if (cmd->offset) {
-		hw->nvm_wait_opcode = cmd->offset;
-		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
-	}
-
-	return status;
-}
-
-/**
- * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * cmd structure contains identifiers and data buffer
- **/
-STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
-						    struct i40e_nvm_access *cmd,
-						    u8 *bytes, int *perrno)
-{
-	u32 aq_total_len;
-	u32 aq_desc_len;
-	int remainder;
-	u8 *buff;
-
-	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
-
-	aq_desc_len = sizeof(struct i40e_aq_desc);
-	aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
-
-	/* check offset range */
-	if (cmd->offset > aq_total_len) {
-		i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
-			   __func__, cmd->offset, aq_total_len);
-		*perrno = -EINVAL;
-		return I40E_ERR_PARAM;
-	}
-
-	/* check copylength range */
-	if (cmd->data_size > (aq_total_len - cmd->offset)) {
-		int new_len = aq_total_len - cmd->offset;
-
-		i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
-			   __func__, cmd->data_size, new_len);
-		cmd->data_size = new_len;
-	}
-
-	remainder = cmd->data_size;
-	if (cmd->offset < aq_desc_len) {
-		u32 len = aq_desc_len - cmd->offset;
-
-		len = min(len, cmd->data_size);
-		i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
-			   __func__, cmd->offset, cmd->offset + len);
-
-		buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
-		i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
-
-		bytes += len;
-		remainder -= len;
-		buff = hw->nvm_buff.va;
-	} else {
-		buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
-	}
-
-	if (remainder > 0) {
-		int start_byte = buff - (u8 *)hw->nvm_buff.va;
-
-		i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
-			   __func__, start_byte, start_byte + remainder);
-		i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
-	}
-
-	return I40E_SUCCESS;
-}
-
-/**
- * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * cmd structure contains identifiers and data buffer
- **/
-STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
-						    struct i40e_nvm_access *cmd,
-						    u8 *bytes, int *perrno)
-{
-	u32 aq_total_len;
-	u32 aq_desc_len;
-
-	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
-
-	aq_desc_len = sizeof(struct i40e_aq_desc);
-	aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen);
-
-	/* check copylength range */
-	if (cmd->data_size > aq_total_len) {
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "%s: copy length %d too big, trimming to %d\n",
-			   __func__, cmd->data_size, aq_total_len);
-		cmd->data_size = aq_total_len;
-	}
-
-	i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size,
-		    I40E_NONDMA_TO_NONDMA);
-
-	return I40E_SUCCESS;
-}
-
-/**
- * i40e_nvmupd_nvm_read - Read NVM
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * cmd structure contains identifiers and data buffer
- **/
-STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
-						  struct i40e_nvm_access *cmd,
-						  u8 *bytes, int *perrno)
-{
-	struct i40e_asq_cmd_details cmd_details;
-	enum i40e_status_code status;
-	u8 module, transaction;
-	bool last;
-
-	transaction = i40e_nvmupd_get_transaction(cmd->config);
-	module = i40e_nvmupd_get_module(cmd->config);
-	last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
-
-	memset(&cmd_details, 0, sizeof(cmd_details));
-	cmd_details.wb_desc = &hw->nvm_wb_desc;
-
-	status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
-				  bytes, last, &cmd_details);
-	if (status) {
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
-			   module, cmd->offset, cmd->data_size);
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "i40e_nvmupd_nvm_read status %d aq %d\n",
-			   status, hw->aq.asq_last_status);
-		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
-	}
-
-	return status;
-}
-
-/**
- * i40e_nvmupd_nvm_erase - Erase an NVM module
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @perrno: pointer to return error code
- *
- * module, offset, data_size and data are in cmd structure
- **/
-STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
-						   struct i40e_nvm_access *cmd,
-						   int *perrno)
-{
-	enum i40e_status_code status = I40E_SUCCESS;
-	struct i40e_asq_cmd_details cmd_details;
-	u8 module, transaction;
-	bool last;
-
-	transaction = i40e_nvmupd_get_transaction(cmd->config);
-	module = i40e_nvmupd_get_module(cmd->config);
-	last = (transaction & I40E_NVM_LCB);
-
-	memset(&cmd_details, 0, sizeof(cmd_details));
-	cmd_details.wb_desc = &hw->nvm_wb_desc;
-
-	status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
-				   last, &cmd_details);
-	if (status) {
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x len 0x%x\n",
-			   module, cmd->offset, cmd->data_size);
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "i40e_nvmupd_nvm_erase status %d aq %d\n",
-			   status, hw->aq.asq_last_status);
-		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
-	}
-
-	return status;
-}
-
-/**
- * i40e_nvmupd_nvm_write - Write NVM
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * module, offset, data_size and data are in cmd structure
- **/
-STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
-						   struct i40e_nvm_access *cmd,
-						   u8 *bytes, int *perrno)
-{
-	enum i40e_status_code status = I40E_SUCCESS;
-	struct i40e_asq_cmd_details cmd_details;
-	u8 module, transaction;
-	u8 preservation_flags;
-	bool last;
-
-	transaction = i40e_nvmupd_get_transaction(cmd->config);
-	module = i40e_nvmupd_get_module(cmd->config);
-	last = (transaction & I40E_NVM_LCB);
-	preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
-
-	memset(&cmd_details, 0, sizeof(cmd_details));
-	cmd_details.wb_desc = &hw->nvm_wb_desc;
-
-	status = i40e_aq_update_nvm(hw, module, cmd->offset,
-				    (u16)cmd->data_size, bytes, last,
-				    preservation_flags, &cmd_details);
-	if (status) {
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
-			   module, cmd->offset, cmd->data_size);
-		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "i40e_nvmupd_nvm_write status %d aq %d\n",
-			   status, hw->aq.asq_last_status);
-		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
-	}
-
-	return status;
-}
diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h
index 124222e476..73ec0e340a 100644
--- a/drivers/net/i40e/base/i40e_prototype.h
+++ b/drivers/net/i40e/base/i40e_prototype.h
@@ -67,27 +67,12 @@ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
 
 u32 i40e_led_get(struct i40e_hw *hw);
 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
-enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
-				       u16 led_addr, u32 mode);
-enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
-				       u16 *val);
-enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
-					      u32 time, u32 interval);
 enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
 				       u32 *reg_val);
 enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
 				       u32 reg_val);
-enum i40e_status_code i40e_get_phy_lpi_status(struct i40e_hw *hw,
-					      struct i40e_hw_port_stats *stats);
 enum i40e_status_code i40e_get_lpi_counters(struct i40e_hw *hw, u32 *tx_counter,
 					    u32 *rx_counter, bool *is_clear);
-enum i40e_status_code i40e_lpi_stat_update(struct i40e_hw *hw,
-					   bool offset_loaded, u64 *tx_offset,
-					   u64 *tx_stat, u64 *rx_offset,
-					   u64 *rx_stat);
-enum i40e_status_code i40e_get_lpi_duration(struct i40e_hw *hw,
-					    struct i40e_hw_port_stats *stat,
-					    u64 *tx_duration, u64 *rx_duration);
 /* admin send queue commands */
 
 enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
@@ -101,12 +86,6 @@ enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
 enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw,
 				u32  reg_addr, u64 *reg_val,
 				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
-				struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
 			bool qualified_modules, bool report_init,
 			struct i40e_aq_get_phy_abilities_resp *abilities,
@@ -122,27 +101,13 @@ enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
 				u16 max_frame_size, bool crc_en, u16 pacing,
 				bool auto_drop_blocking_packets,
 				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
-				u64 *advt_reg,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
-				u64 *advt_reg,
-				struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes,
 				struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
 			struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
-		bool enable_link, struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
 				bool enable_lse, struct i40e_link_status *link,
 				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
-				u64 advt_reg,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
-				struct i40e_driver_version *dv,
-				struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
 				struct i40e_vsi_context *vsi_ctx,
 				struct i40e_asq_cmd_details *cmd_details);
@@ -154,18 +119,6 @@ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
 		bool rx_only_promisc);
 enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
 		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
-				u16 seid, bool set,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
-				u16 seid, bool enable, u16 vid,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
-				u16 seid, bool enable, u16 vid,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
-				u16 seid, bool enable, u16 vid,
-				struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
 				u16 seid, bool enable,
 				struct i40e_asq_cmd_details *cmd_details);
@@ -191,15 +144,6 @@ enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
 enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
 			struct i40e_aqc_remove_macvlan_element_data *mv_list,
 			u16 count, struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
-			u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
-			struct i40e_asq_cmd_details *cmd_details,
-			u16 *rule_id, u16 *rules_used, u16 *rules_free);
-enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
-			u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
-			struct i40e_asq_cmd_details *cmd_details,
-			u16 *rules_used, u16 *rules_free);
-
 enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
 			struct i40e_aqc_add_remove_vlan_element_data *v_list,
 			u8 count, struct i40e_asq_cmd_details *cmd_details);
@@ -232,21 +176,6 @@ enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
 enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
 				u32 offset, u16 length, bool last_command,
 				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw,
-				u8 cmd_flags, u32 field_id, void *data,
-				u16 buf_size, u16 *element_count,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
-				u8 cmd_flags, void *data, u16 buf_size,
-				u16 element_count,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
-i40e_aq_min_rollback_rev_update(struct i40e_hw *hw, u8 mode, u8 module,
-				u32 min_rrev,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
-				void *buff, u16 buff_size,
-				struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
 				void *buff, u16 buff_size, u16 *data_size,
 				enum i40e_admin_queue_opc list_type_opc,
@@ -255,13 +184,6 @@ enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
 				u32 offset, u16 length, void *data,
 				bool last_command, u8 preservation_flags,
 				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_rearrange_nvm(struct i40e_hw *hw,
-				u8 rearrange_nvm,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
-i40e_aq_nvm_update_in_process(struct i40e_hw *hw,
-			      bool update_flow_state,
-			      struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
 				u8 mib_type, void *buff, u16 buff_size,
 				u16 *local_len, u16 *remote_len,
@@ -272,63 +194,25 @@ enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
 enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
 				bool enable_update,
 				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
-i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
-		     struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
 				bool persist,
 				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
-						 bool dcb_enable,
-						 struct i40e_asq_cmd_details
-						 *cmd_details);
 enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
 				bool persist,
 				struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
 				void *buff, u16 buff_size,
 				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
-				bool start_agent,
-				struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
 				u16 udp_port, u8 protocol_index,
 				u8 *filter_index,
 				struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
 				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
-			u8 *num_entries,
-			struct i40e_aqc_switch_resource_alloc_element_resp *buf,
-			u16 count,
-			struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
-				       u16 mac_seid, u16 vsi_seid,
-				       u16 *ret_seid);
-enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
-				u16 vsi_seid, u16 tag, u16 queue_num,
-				u16 *tags_used, u16 *tags_free,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
-				u16 tag, u16 *tags_used, u16 *tags_free,
-				struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
 				u16 etag, u8 num_tags_in_buf, void *buf,
 				u16 *tags_used, u16 *tags_free,
 				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
-				u16 etag, u16 *tags_used, u16 *tags_free,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
-				u16 old_tag, u16 new_tag, u16 *tags_used,
-				u16 *tags_free,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
-				u16 vlan_id, u16 *stat_index,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
-				u16 vlan_id, u16 stat_index,
-				struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw,
 				u16 bad_frame_vsi, bool save_bad_pac,
 				bool pad_short_pac, bool double_vlan,
@@ -341,22 +225,10 @@ enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw,
 enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
 				u16 seid, u16 credit, u8 max_credit,
 				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw,
-				u8 tcmap, bool request, u8 *tcmap_ret,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
-	struct i40e_hw *hw, u16 seid,
-	struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
-	struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
 			u16 seid,
 			struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
 			struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
-				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
-				u16 seid, u16 credit, u8 max_bw,
-				struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
 			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
 			struct i40e_asq_cmd_details *cmd_details);
@@ -381,16 +253,10 @@ enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
 		u16 seid,
 		struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
 		struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw,
-		u16 seid,
-		struct i40e_aqc_query_port_ets_config_resp *bw_data,
-		struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
 		u16 seid,
 		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
 		struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
-				struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code
 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
 			     struct i40e_aqc_cloud_filters_element_bb *filters,
@@ -415,38 +281,15 @@ enum i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
 enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
 				u32 reg_addr0, u32 *reg_val0,
 				u32 reg_addr1, u32 *reg_val1);
-enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
-				u32 addr, u32 dw_count, void *buffer);
-enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
-				u32 reg_addr0, u32 reg_val0,
-				u32 reg_addr1, u32 reg_val1);
-enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
-				u32 addr, u32 dw_count, void *buffer);
-enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw);
-enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
-				u8 bios_mode, bool *reset_needed);
-enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw,
-				u8 oem_mode);
 
 /* i40e_common */
 enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw);
 enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw);
 void i40e_clear_hw(struct i40e_hw *hw);
 void i40e_clear_pxe_mode(struct i40e_hw *hw);
-enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
 enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw);
 enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
-enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
-		u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid);
-enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
-			struct i40e_aqc_configure_partition_bw_data *bw_data,
-			struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
-enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
-					    u32 pba_num_size);
 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
-enum i40e_status_code i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
-enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw);
 /* prototype for functions used for NVM access */
 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw);
 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
@@ -466,24 +309,14 @@ enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
 					   u16 *data);
 enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
 					     u16 *words, u16 *data);
-enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
-					  void *data);
-enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module,
-					    u32 offset, u16 words, void *data);
 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum);
 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw);
 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
 						 u16 *checksum);
-enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
-					  struct i40e_nvm_access *cmd,
-					  u8 *bytes, int *);
 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
 				  struct i40e_aq_desc *desc);
 void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
-void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
 #endif /* PF_DRIVER */
-enum i40e_status_code i40e_enable_eee(struct i40e_hw *hw, bool enable);
-
 enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw);
 
 extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
@@ -551,13 +384,6 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
 				u16 vsi_seid, u16 queue, bool is_add,
 				struct i40e_control_filter_stats *stats,
 				struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
-				u8 table_id, u32 start_index, u16 buff_size,
-				void *buff, u16 *ret_buff_size,
-				u8 *ret_next_table, u32 *ret_next_index,
-				struct i40e_asq_cmd_details *cmd_details);
-void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
-						    u16 vsi_seid);
 enum i40e_status_code i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
 				u32 reg_addr, u32 *reg_val,
 				struct i40e_asq_cmd_details *cmd_details);
@@ -589,24 +415,6 @@ enum i40e_status_code
 i40e_aq_run_phy_activity(struct i40e_hw *hw, u16 activity_id, u32 opcode,
 			 u32 *cmd_status, u32 *data0, u32 *data1,
 			 struct i40e_asq_cmd_details *cmd_details);
-
-enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
-			struct i40e_aqc_arp_proxy_data *proxy_config,
-			struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
-			struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
-			struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
-			u8 filter_index,
-			struct i40e_aqc_set_wol_filter_data *filter,
-			bool set_filter, bool no_wol_tco,
-			bool filter_valid, bool no_wol_tco_valid,
-			struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
-			u16 *wake_reason,
-			struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
-			struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
 					u16 reg, u8 phy_addr, u16 *value);
 enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
@@ -615,13 +423,7 @@ enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
 				u8 page, u16 reg, u8 phy_addr, u16 *value);
 enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
 				u8 page, u16 reg, u8 phy_addr, u16 value);
-enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
-				u8 page, u16 reg, u8 phy_addr, u16 *value);
-enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
-				u8 page, u16 reg, u8 phy_addr, u16 value);
 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
-enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
-					      u32 time, u32 interval);
 enum i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
 					u16 buff_size, u32 track_id,
 					u32 *error_offset, u32 *error_info,
@@ -643,8 +445,4 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
 enum i40e_status_code
 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
 		      u32 track_id);
-enum i40e_status_code
-i40e_add_pinfo_to_list(struct i40e_hw *hw,
-		       struct i40e_profile_segment *profile,
-		       u8 *profile_info_sec, u32 track_id);
 #endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/i40e/base/meson.build b/drivers/net/i40e/base/meson.build
index 8bc6a0fa0b..1a07449fa5 100644
--- a/drivers/net/i40e/base/meson.build
+++ b/drivers/net/i40e/base/meson.build
@@ -5,7 +5,6 @@ sources = [
 	'i40e_adminq.c',
 	'i40e_common.c',
 	'i40e_dcb.c',
-	'i40e_diag.c',
 	'i40e_hmc.c',
 	'i40e_lan_hmc.c',
 	'i40e_nvm.c'
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 6d5912d8c1..db3dbbda48 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -293,8 +293,6 @@ int iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 		     bool rx, bool on);
 int iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
 		     bool rx, bool on);
-int iavf_enable_queues(struct iavf_adapter *adapter);
-int iavf_enable_queues_lv(struct iavf_adapter *adapter);
 int iavf_disable_queues(struct iavf_adapter *adapter);
 int iavf_disable_queues_lv(struct iavf_adapter *adapter);
 int iavf_configure_rss_lut(struct iavf_adapter *adapter);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 33d03af653..badcd312cc 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -521,34 +521,6 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter)
 	return 0;
 }
 
-int
-iavf_enable_queues(struct iavf_adapter *adapter)
-{
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
-	struct virtchnl_queue_select queue_select;
-	struct iavf_cmd_info args;
-	int err;
-
-	memset(&queue_select, 0, sizeof(queue_select));
-	queue_select.vsi_id = vf->vsi_res->vsi_id;
-
-	queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
-	queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
-
-	args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
-	args.in_args = (u8 *)&queue_select;
-	args.in_args_size = sizeof(queue_select);
-	args.out_buffer = vf->aq_resp;
-	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
-	if (err) {
-		PMD_DRV_LOG(ERR,
-			    "Failed to execute command of OP_ENABLE_QUEUES");
-		return err;
-	}
-	return 0;
-}
-
 int
 iavf_disable_queues(struct iavf_adapter *adapter)
 {
@@ -608,50 +580,6 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	return err;
 }
 
-int
-iavf_enable_queues_lv(struct iavf_adapter *adapter)
-{
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
-	struct virtchnl_del_ena_dis_queues *queue_select;
-	struct virtchnl_queue_chunk *queue_chunk;
-	struct iavf_cmd_info args;
-	int err, len;
-
-	len = sizeof(struct virtchnl_del_ena_dis_queues) +
-		  sizeof(struct virtchnl_queue_chunk) *
-		  (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1);
-	queue_select = rte_zmalloc("queue_select", len, 0);
-	if (!queue_select)
-		return -ENOMEM;
-
-	queue_chunk = queue_select->chunks.chunks;
-	queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM;
-	queue_select->vport_id = vf->vsi_res->vsi_id;
-
-	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
-	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
-	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
-		adapter->eth_dev->data->nb_tx_queues;
-
-	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
-	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
-	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
-		adapter->eth_dev->data->nb_rx_queues;
-
-	args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
-	args.in_args = (u8 *)queue_select;
-	args.in_args_size = len;
-	args.out_buffer = vf->aq_resp;
-	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
-	if (err) {
-		PMD_DRV_LOG(ERR,
-			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
-		return err;
-	}
-	return 0;
-}
-
 int
 iavf_disable_queues_lv(struct iavf_adapter *adapter)
 {
diff --git a/drivers/net/ice/base/ice_acl.c b/drivers/net/ice/base/ice_acl.c
index 763cd2af9e..0f73f4a0e7 100644
--- a/drivers/net/ice/base/ice_acl.c
+++ b/drivers/net/ice/base/ice_acl.c
@@ -115,79 +115,6 @@ ice_aq_program_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx,
 				entry_idx, buf, cd);
 }
 
-/**
- * ice_aq_query_acl_entry - query ACL entry
- * @hw: pointer to the HW struct
- * @tcam_idx: Updated TCAM block index
- * @entry_idx: updated entry index
- * @buf: address of indirect data buffer
- * @cd: pointer to command details structure or NULL
- *
- * Query ACL entry (direct 0x0C24)
- *
- * NOTE: Caller of this API to parse 'buf' appropriately since it contains
- * response (key and key invert)
- */
-enum ice_status
-ice_aq_query_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx,
-		       struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd)
-{
-	return ice_aq_acl_entry(hw, ice_aqc_opc_query_acl_entry, tcam_idx,
-				entry_idx, buf, cd);
-}
-
-/* Helper function to alloc/dealloc ACL action pair */
-static enum ice_status
-ice_aq_actpair_a_d(struct ice_hw *hw, u16 opcode, u16 alloc_id,
-		   struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd)
-{
-	struct ice_aqc_acl_tbl_actpair *cmd;
-	struct ice_aq_desc desc;
-
-	ice_fill_dflt_direct_cmd_desc(&desc, opcode);
-	cmd = &desc.params.tbl_actpair;
-	cmd->alloc_id = CPU_TO_LE16(alloc_id);
-
-	return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd);
-}
-
-/**
- * ice_aq_alloc_actpair - allocate actionpair for specified ACL table
- * @hw: pointer to the HW struct
- * @alloc_id: allocation ID of the table being associated with the actionpair
- * @buf: address of indirect data buffer
- * @cd: pointer to command details structure or NULL
- *
- * Allocate ACL actionpair (direct 0x0C12)
- *
- * This command doesn't need and doesn't have its own command buffer
- * but for response format is as specified in 'struct ice_aqc_acl_generic'
- */
-enum ice_status
-ice_aq_alloc_actpair(struct ice_hw *hw, u16 alloc_id,
-		     struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd)
-{
-	return ice_aq_actpair_a_d(hw, ice_aqc_opc_alloc_acl_actpair, alloc_id,
-				  buf, cd);
-}
-
-/**
- * ice_aq_dealloc_actpair - dealloc actionpair for specified ACL table
- * @hw: pointer to the HW struct
- * @alloc_id: allocation ID of the table being associated with the actionpair
- * @buf: address of indirect data buffer
- * @cd: pointer to command details structure or NULL
- *
- *  Deallocate ACL actionpair (direct 0x0C13)
- */
-enum ice_status
-ice_aq_dealloc_actpair(struct ice_hw *hw, u16 alloc_id,
-		       struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd)
-{
-	return ice_aq_actpair_a_d(hw, ice_aqc_opc_dealloc_acl_actpair, alloc_id,
-				  buf, cd);
-}
-
 /* Helper function to program/query ACL action pair */
 static enum ice_status
 ice_aq_actpair_p_q(struct ice_hw *hw, u16 opcode, u8 act_mem_idx,
@@ -227,41 +154,6 @@ ice_aq_program_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx,
 				  act_mem_idx, act_entry_idx, buf, cd);
 }
 
-/**
- * ice_aq_query_actpair - query ACL actionpair
- * @hw: pointer to the HW struct
- * @act_mem_idx: action memory index to program/update/query
- * @act_entry_idx: the entry index in action memory to be programmed/updated
- * @buf: address of indirect data buffer
- * @cd: pointer to command details structure or NULL
- *
- * Query ACL actionpair (indirect 0x0C25)
- */
-enum ice_status
-ice_aq_query_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx,
-		     struct ice_aqc_actpair *buf, struct ice_sq_cd *cd)
-{
-	return ice_aq_actpair_p_q(hw, ice_aqc_opc_query_acl_actpair,
-				  act_mem_idx, act_entry_idx, buf, cd);
-}
-
-/**
- * ice_aq_dealloc_acl_res - deallocate ACL resources
- * @hw: pointer to the HW struct
- * @cd: pointer to command details structure or NULL
- *
- * De-allocate ACL resources (direct 0x0C1A). Used by SW to release all the
- * resources allocated for it using a single command
- */
-enum ice_status ice_aq_dealloc_acl_res(struct ice_hw *hw, struct ice_sq_cd *cd)
-{
-	struct ice_aq_desc desc;
-
-	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dealloc_acl_res);
-
-	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
-}
-
 /**
  * ice_acl_prof_aq_send - sending ACL profile AQ commands
  * @hw: pointer to the HW struct
diff --git a/drivers/net/ice/base/ice_acl.h b/drivers/net/ice/base/ice_acl.h
index 21aa5088f7..ef5a8245a3 100644
--- a/drivers/net/ice/base/ice_acl.h
+++ b/drivers/net/ice/base/ice_acl.h
@@ -142,22 +142,9 @@ enum ice_status
 ice_aq_program_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx,
 			 struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd);
 enum ice_status
-ice_aq_query_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx,
-		       struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd);
-enum ice_status
-ice_aq_alloc_actpair(struct ice_hw *hw, u16 alloc_id,
-		     struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd);
-enum ice_status
-ice_aq_dealloc_actpair(struct ice_hw *hw, u16 alloc_id,
-		       struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd);
-enum ice_status
 ice_aq_program_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx,
 		       struct ice_aqc_actpair *buf, struct ice_sq_cd *cd);
 enum ice_status
-ice_aq_query_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx,
-		     struct ice_aqc_actpair *buf, struct ice_sq_cd *cd);
-enum ice_status ice_aq_dealloc_acl_res(struct ice_hw *hw, struct ice_sq_cd *cd);
-enum ice_status
 ice_prgm_acl_prof_xtrct(struct ice_hw *hw, u8 prof_id,
 			struct ice_aqc_acl_prof_generic_frmt *buf,
 			struct ice_sq_cd *cd);
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index 304e55e210..b6d80fd383 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -844,36 +844,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
 	return status;
 }
 
-/**
- * ice_deinit_hw - unroll initialization operations done by ice_init_hw
- * @hw: pointer to the hardware structure
- *
- * This should be called only during nominal operation, not as a result of
- * ice_init_hw() failing since ice_init_hw() will take care of unrolling
- * applicable initializations if it fails for any reason.
- */
-void ice_deinit_hw(struct ice_hw *hw)
-{
-	ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
-	ice_cleanup_fltr_mgmt_struct(hw);
-
-	ice_sched_cleanup_all(hw);
-	ice_sched_clear_agg(hw);
-	ice_free_seg(hw);
-	ice_free_hw_tbls(hw);
-	ice_destroy_lock(&hw->tnl_lock);
-
-	if (hw->port_info) {
-		ice_free(hw, hw->port_info);
-		hw->port_info = NULL;
-	}
-
-	ice_destroy_all_ctrlq(hw);
-
-	/* Clear VSI contexts if not already cleared */
-	ice_clear_all_vsi_ctx(hw);
-}
-
 /**
  * ice_check_reset - Check to see if a global reset is complete
  * @hw: pointer to the hardware structure
@@ -1157,38 +1127,6 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
 	{ 0 }
 };
 
-/**
- * ice_copy_tx_cmpltnq_ctx_to_hw
- * @hw: pointer to the hardware structure
- * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
- * @tx_cmpltnq_index: the index of the completion queue
- *
- * Copies Tx completion queue context from dense structure to HW register space
- */
-static enum ice_status
-ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
-			      u32 tx_cmpltnq_index)
-{
-	u8 i;
-
-	if (!ice_tx_cmpltnq_ctx)
-		return ICE_ERR_BAD_PTR;
-
-	if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
-		return ICE_ERR_PARAM;
-
-	/* Copy each dword separately to HW */
-	for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
-		wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
-		     *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
-
-		ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
-			  *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
-	}
-
-	return ICE_SUCCESS;
-}
-
 /* LAN Tx Completion Queue Context */
 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
 				       /* Field			Width   LSB */
@@ -1205,80 +1143,6 @@ static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
 	{ 0 }
 };
 
-/**
- * ice_write_tx_cmpltnq_ctx
- * @hw: pointer to the hardware structure
- * @tx_cmpltnq_ctx: pointer to the completion queue context
- * @tx_cmpltnq_index: the index of the completion queue
- *
- * Converts completion queue context from sparse to dense structure and then
- * writes it to HW register space
- */
-enum ice_status
-ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
-			 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
-			 u32 tx_cmpltnq_index)
-{
-	u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
-
-	ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
-	return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
-}
-
-/**
- * ice_clear_tx_cmpltnq_ctx
- * @hw: pointer to the hardware structure
- * @tx_cmpltnq_index: the index of the completion queue to clear
- *
- * Clears Tx completion queue context in HW register space
- */
-enum ice_status
-ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
-{
-	u8 i;
-
-	if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
-		return ICE_ERR_PARAM;
-
-	/* Clear each dword register separately */
-	for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
-		wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
-
-	return ICE_SUCCESS;
-}
-
-/**
- * ice_copy_tx_drbell_q_ctx_to_hw
- * @hw: pointer to the hardware structure
- * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
- * @tx_drbell_q_index: the index of the doorbell queue
- *
- * Copies doorbell queue context from dense structure to HW register space
- */
-static enum ice_status
-ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
-			       u32 tx_drbell_q_index)
-{
-	u8 i;
-
-	if (!ice_tx_drbell_q_ctx)
-		return ICE_ERR_BAD_PTR;
-
-	if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
-		return ICE_ERR_PARAM;
-
-	/* Copy each dword separately to HW */
-	for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
-		wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
-		     *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
-
-		ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
-			  *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
-	}
-
-	return ICE_SUCCESS;
-}
-
 /* LAN Tx Doorbell Queue Context info */
 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
 					/* Field		Width   LSB */
@@ -1296,49 +1160,6 @@ static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
 	{ 0 }
 };
 
-/**
- * ice_write_tx_drbell_q_ctx
- * @hw: pointer to the hardware structure
- * @tx_drbell_q_ctx: pointer to the doorbell queue context
- * @tx_drbell_q_index: the index of the doorbell queue
- *
- * Converts doorbell queue context from sparse to dense structure and then
- * writes it to HW register space
- */
-enum ice_status
-ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
-			  struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
-			  u32 tx_drbell_q_index)
-{
-	u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
-
-	ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
-		    ice_tx_drbell_q_ctx_info);
-	return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
-}
-
-/**
- * ice_clear_tx_drbell_q_ctx
- * @hw: pointer to the hardware structure
- * @tx_drbell_q_index: the index of the doorbell queue to clear
- *
- * Clears doorbell queue context in HW register space
- */
-enum ice_status
-ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
-{
-	u8 i;
-
-	if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
-		return ICE_ERR_PARAM;
-
-	/* Clear each dword register separately */
-	for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
-		wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
-
-	return ICE_SUCCESS;
-}
-
 /* FW Admin Queue command wrappers */
 
 /**
@@ -2238,69 +2059,6 @@ ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
 	return status;
 }
 
-/**
- * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
- * @hw: pointer to the hardware structure
- */
-void ice_set_safe_mode_caps(struct ice_hw *hw)
-{
-	struct ice_hw_func_caps *func_caps = &hw->func_caps;
-	struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
-	struct ice_hw_common_caps cached_caps;
-	u32 num_funcs;
-
-	/* cache some func_caps values that should be restored after memset */
-	cached_caps = func_caps->common_cap;
-
-	/* unset func capabilities */
-	memset(func_caps, 0, sizeof(*func_caps));
-
-#define ICE_RESTORE_FUNC_CAP(name) \
-	func_caps->common_cap.name = cached_caps.name
-
-	/* restore cached values */
-	ICE_RESTORE_FUNC_CAP(valid_functions);
-	ICE_RESTORE_FUNC_CAP(txq_first_id);
-	ICE_RESTORE_FUNC_CAP(rxq_first_id);
-	ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
-	ICE_RESTORE_FUNC_CAP(max_mtu);
-	ICE_RESTORE_FUNC_CAP(nvm_unified_update);
-
-	/* one Tx and one Rx queue in safe mode */
-	func_caps->common_cap.num_rxq = 1;
-	func_caps->common_cap.num_txq = 1;
-
-	/* two MSIX vectors, one for traffic and one for misc causes */
-	func_caps->common_cap.num_msix_vectors = 2;
-	func_caps->guar_num_vsi = 1;
-
-	/* cache some dev_caps values that should be restored after memset */
-	cached_caps = dev_caps->common_cap;
-	num_funcs = dev_caps->num_funcs;
-
-	/* unset dev capabilities */
-	memset(dev_caps, 0, sizeof(*dev_caps));
-
-#define ICE_RESTORE_DEV_CAP(name) \
-	dev_caps->common_cap.name = cached_caps.name
-
-	/* restore cached values */
-	ICE_RESTORE_DEV_CAP(valid_functions);
-	ICE_RESTORE_DEV_CAP(txq_first_id);
-	ICE_RESTORE_DEV_CAP(rxq_first_id);
-	ICE_RESTORE_DEV_CAP(msix_vector_first_id);
-	ICE_RESTORE_DEV_CAP(max_mtu);
-	ICE_RESTORE_DEV_CAP(nvm_unified_update);
-	dev_caps->num_funcs = num_funcs;
-
-	/* one Tx and one Rx queue per function in safe mode */
-	dev_caps->common_cap.num_rxq = num_funcs;
-	dev_caps->common_cap.num_txq = num_funcs;
-
-	/* two MSIX vectors per function */
-	dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
-}
-
 /**
  * ice_get_caps - get info about the HW
  * @hw: pointer to the hardware structure
@@ -2370,182 +2128,6 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
 		ice_aq_clear_pxe_mode(hw);
 }
 
-/**
- * ice_get_link_speed_based_on_phy_type - returns link speed
- * @phy_type_low: lower part of phy_type
- * @phy_type_high: higher part of phy_type
- *
- * This helper function will convert an entry in PHY type structure
- * [phy_type_low, phy_type_high] to its corresponding link speed.
- * Note: In the structure of [phy_type_low, phy_type_high], there should
- * be one bit set, as this function will convert one PHY type to its
- * speed.
- * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
- * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
- */
-static u16
-ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
-{
-	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
-	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
-
-	switch (phy_type_low) {
-	case ICE_PHY_TYPE_LOW_100BASE_TX:
-	case ICE_PHY_TYPE_LOW_100M_SGMII:
-		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
-		break;
-	case ICE_PHY_TYPE_LOW_1000BASE_T:
-	case ICE_PHY_TYPE_LOW_1000BASE_SX:
-	case ICE_PHY_TYPE_LOW_1000BASE_LX:
-	case ICE_PHY_TYPE_LOW_1000BASE_KX:
-	case ICE_PHY_TYPE_LOW_1G_SGMII:
-		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
-		break;
-	case ICE_PHY_TYPE_LOW_2500BASE_T:
-	case ICE_PHY_TYPE_LOW_2500BASE_X:
-	case ICE_PHY_TYPE_LOW_2500BASE_KX:
-		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
-		break;
-	case ICE_PHY_TYPE_LOW_5GBASE_T:
-	case ICE_PHY_TYPE_LOW_5GBASE_KR:
-		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
-		break;
-	case ICE_PHY_TYPE_LOW_10GBASE_T:
-	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
-	case ICE_PHY_TYPE_LOW_10GBASE_SR:
-	case ICE_PHY_TYPE_LOW_10GBASE_LR:
-	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
-	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
-	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
-		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
-		break;
-	case ICE_PHY_TYPE_LOW_25GBASE_T:
-	case ICE_PHY_TYPE_LOW_25GBASE_CR:
-	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
-	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
-	case ICE_PHY_TYPE_LOW_25GBASE_SR:
-	case ICE_PHY_TYPE_LOW_25GBASE_LR:
-	case ICE_PHY_TYPE_LOW_25GBASE_KR:
-	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
-	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
-	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
-	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
-		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
-		break;
-	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
-	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
-	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
-	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
-	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
-	case ICE_PHY_TYPE_LOW_40G_XLAUI:
-		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
-		break;
-	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
-	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
-	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
-	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
-	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
-	case ICE_PHY_TYPE_LOW_50G_LAUI2:
-	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
-	case ICE_PHY_TYPE_LOW_50G_AUI2:
-	case ICE_PHY_TYPE_LOW_50GBASE_CP:
-	case ICE_PHY_TYPE_LOW_50GBASE_SR:
-	case ICE_PHY_TYPE_LOW_50GBASE_FR:
-	case ICE_PHY_TYPE_LOW_50GBASE_LR:
-	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
-	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
-	case ICE_PHY_TYPE_LOW_50G_AUI1:
-		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
-		break;
-	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
-	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
-	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
-	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
-	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
-	case ICE_PHY_TYPE_LOW_100G_CAUI4:
-	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
-	case ICE_PHY_TYPE_LOW_100G_AUI4:
-	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
-	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
-	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
-	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
-	case ICE_PHY_TYPE_LOW_100GBASE_DR:
-		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
-		break;
-	default:
-		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
-		break;
-	}
-
-	switch (phy_type_high) {
-	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
-	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
-	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
-	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
-	case ICE_PHY_TYPE_HIGH_100G_AUI2:
-		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
-		break;
-	default:
-		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
-		break;
-	}
-
-	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
-	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
-		return ICE_AQ_LINK_SPEED_UNKNOWN;
-	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
-		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
-		return ICE_AQ_LINK_SPEED_UNKNOWN;
-	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
-		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
-		return speed_phy_type_low;
-	else
-		return speed_phy_type_high;
-}
-
-/**
- * ice_update_phy_type
- * @phy_type_low: pointer to the lower part of phy_type
- * @phy_type_high: pointer to the higher part of phy_type
- * @link_speeds_bitmap: targeted link speeds bitmap
- *
- * Note: For the link_speeds_bitmap structure, you can check it at
- * [ice_aqc_get_link_status->link_speed]. Caller can pass in
- * link_speeds_bitmap include multiple speeds.
- *
- * Each entry in this [phy_type_low, phy_type_high] structure will
- * present a certain link speed. This helper function will turn on bits
- * in [phy_type_low, phy_type_high] structure based on the value of
- * link_speeds_bitmap input parameter.
- */
-void
-ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
-		    u16 link_speeds_bitmap)
-{
-	u64 pt_high;
-	u64 pt_low;
-	int index;
-	u16 speed;
-
-	/* We first check with low part of phy_type */
-	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
-		pt_low = BIT_ULL(index);
-		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
-
-		if (link_speeds_bitmap & speed)
-			*phy_type_low |= BIT_ULL(index);
-	}
-
-	/* We then check with high part of phy_type */
-	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
-		pt_high = BIT_ULL(index);
-		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
-
-		if (link_speeds_bitmap & speed)
-			*phy_type_high |= BIT_ULL(index);
-	}
-}
-
 /**
  * ice_aq_set_phy_cfg
  * @hw: pointer to the HW struct
@@ -2642,787 +2224,279 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
 }
 
 /**
- * ice_cache_phy_user_req
+ * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
  * @pi: port information structure
- * @cache_data: PHY logging data
- * @cache_mode: PHY logging mode
+ * @caps: PHY ability structure to copy date from
+ * @cfg: PHY configuration structure to copy data to
  *
- * Log the user request on (FC, FEC, SPEED) for later user.
+ * Helper function to copy AQC PHY get ability data to PHY set configuration
+ * data structure
  */
-static void
-ice_cache_phy_user_req(struct ice_port_info *pi,
-		       struct ice_phy_cache_mode_data cache_data,
-		       enum ice_phy_cache_mode cache_mode)
+void
+ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
+			 struct ice_aqc_get_phy_caps_data *caps,
+			 struct ice_aqc_set_phy_cfg_data *cfg)
 {
-	if (!pi)
+	if (!pi || !caps || !cfg)
 		return;
 
-	switch (cache_mode) {
-	case ICE_FC_MODE:
-		pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
-		break;
-	case ICE_SPEED_MODE:
-		pi->phy.curr_user_speed_req =
-			cache_data.data.curr_user_speed_req;
-		break;
-	case ICE_FEC_MODE:
-		pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
-		break;
-	default:
-		break;
-	}
-}
-
-/**
- * ice_caps_to_fc_mode
- * @caps: PHY capabilities
- *
- * Convert PHY FC capabilities to ice FC mode
- */
-enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
-{
-	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
-	    caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
-		return ICE_FC_FULL;
+	ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
+	cfg->phy_type_low = caps->phy_type_low;
+	cfg->phy_type_high = caps->phy_type_high;
+	cfg->caps = caps->caps;
+	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
+	cfg->eee_cap = caps->eee_cap;
+	cfg->eeer_value = caps->eeer_value;
+	cfg->link_fec_opt = caps->link_fec_options;
+	cfg->module_compliance_enforcement =
+		caps->module_compliance_enforcement;
 
-	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
-		return ICE_FC_TX_PAUSE;
+	if (ice_fw_supports_link_override(pi->hw)) {
+		struct ice_link_default_override_tlv tlv;
 
-	if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
-		return ICE_FC_RX_PAUSE;
+		if (ice_get_link_default_override(&tlv, pi))
+			return;
 
-	return ICE_FC_NONE;
+		if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
+			cfg->module_compliance_enforcement |=
+				ICE_LINK_OVERRIDE_STRICT_MODE;
+	}
 }
 
 /**
- * ice_caps_to_fec_mode
- * @caps: PHY capabilities
- * @fec_options: Link FEC options
+ * ice_aq_set_event_mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ * @cd: pointer to command details structure or NULL
  *
- * Convert PHY FEC capabilities to ice FEC mode
+ * Set event mask (0x0613)
  */
-enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
+enum ice_status
+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
+		      struct ice_sq_cd *cd)
 {
-	if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
-		return ICE_FEC_AUTO;
+	struct ice_aqc_set_event_mask *cmd;
+	struct ice_aq_desc desc;
+
+	cmd = &desc.params.set_event_mask;
 
-	if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
-			   ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
-			   ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
-			   ICE_AQC_PHY_FEC_25G_KR_REQ))
-		return ICE_FEC_BASER;
+	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
 
-	if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
-			   ICE_AQC_PHY_FEC_25G_RS_544_REQ |
-			   ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
-		return ICE_FEC_RS;
+	cmd->lport_num = port_num;
 
-	return ICE_FEC_NONE;
+	cmd->event_mask = CPU_TO_LE16(mask);
+	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
 }
 
 /**
- * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
- * @pi: port information structure
- * @cfg: PHY configuration data to set FC mode
- * @req_mode: FC mode to configure
+ * __ice_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @params: RSS LUT parameters
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
  */
 static enum ice_status
-ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
-	       enum ice_fc_mode req_mode)
+__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
 {
-	struct ice_phy_cache_mode_data cache_data;
-	u8 pause_mask = 0x0;
+	u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
+	struct ice_aqc_get_set_rss_lut *cmd_resp;
+	struct ice_aq_desc desc;
+	enum ice_status status;
+	u8 *lut;
 
-	if (!pi || !cfg)
-		return ICE_ERR_BAD_PTR;
+	if (!params)
+		return ICE_ERR_PARAM;
 
-	switch (req_mode) {
-	case ICE_FC_AUTO:
-	{
-		struct ice_aqc_get_phy_caps_data *pcaps;
-		enum ice_status status;
+	vsi_handle = params->vsi_handle;
+	lut = params->lut;
 
-		pcaps = (struct ice_aqc_get_phy_caps_data *)
-			ice_malloc(pi->hw, sizeof(*pcaps));
-		if (!pcaps)
-			return ICE_ERR_NO_MEMORY;
+	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+		return ICE_ERR_PARAM;
 
-		/* Query the value of FC that both the NIC and attached media
-		 * can do.
-		 */
-		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
-					     pcaps, NULL);
-		if (status) {
-			ice_free(pi->hw, pcaps);
-			return status;
-		}
+	lut_size = params->lut_size;
+	lut_type = params->lut_type;
+	glob_lut_idx = params->global_lut_id;
+	vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
 
-		pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
-		pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
+	cmd_resp = &desc.params.get_set_rss_lut;
 
-		ice_free(pi->hw, pcaps);
-		break;
-	}
-	case ICE_FC_FULL:
-		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
-		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
-		break;
-	case ICE_FC_RX_PAUSE:
-		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
-		break;
-	case ICE_FC_TX_PAUSE:
-		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
-		break;
-	default:
-		break;
+	if (set) {
+		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
+		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+	} else {
+		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
 	}
 
-	/* clear the old pause settings */
-	cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
-		ICE_AQC_PHY_EN_RX_LINK_PAUSE);
-
-	/* set the new capabilities */
-	cfg->caps |= pause_mask;
-
-	/* Cache user FC request */
-	cache_data.data.curr_user_fc_req = req_mode;
-	ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
+	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
+					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
+					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
+				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
 
-	return ICE_SUCCESS;
-}
-
-/**
- * ice_set_fc
- * @pi: port information structure
- * @aq_failures: pointer to status code, specific to ice_set_fc routine
- * @ena_auto_link_update: enable automatic link update
- *
- * Set the requested flow control mode.
- */
-enum ice_status
-ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
-{
-	struct ice_aqc_set_phy_cfg_data  cfg = { 0 };
-	struct ice_aqc_get_phy_caps_data *pcaps;
-	enum ice_status status;
-	struct ice_hw *hw;
-
-	if (!pi || !aq_failures)
-		return ICE_ERR_BAD_PTR;
-
-	*aq_failures = 0;
-	hw = pi->hw;
-
-	pcaps = (struct ice_aqc_get_phy_caps_data *)
-		ice_malloc(hw, sizeof(*pcaps));
-	if (!pcaps)
-		return ICE_ERR_NO_MEMORY;
-
-	/* Get the current PHY config */
-	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
-				     NULL);
-	if (status) {
-		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
-		goto out;
+	switch (lut_type) {
+	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
+	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
+	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
+		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
+			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
+		break;
+	default:
+		status = ICE_ERR_PARAM;
+		goto ice_aq_get_set_rss_lut_exit;
 	}
 
-	ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
-
-	/* Configure the set PHY data */
-	status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
-	if (status) {
-		if (status != ICE_ERR_BAD_PTR)
-			*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
+	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
+		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
+			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
 
-		goto out;
+		if (!set)
+			goto ice_aq_get_set_rss_lut_send;
+	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
+		if (!set)
+			goto ice_aq_get_set_rss_lut_send;
+	} else {
+		goto ice_aq_get_set_rss_lut_send;
 	}
 
-	/* If the capabilities have changed, then set the new config */
-	if (cfg.caps != pcaps->caps) {
-		int retry_count, retry_max = 10;
-
-		/* Auto restart link so settings take effect */
-		if (ena_auto_link_update)
-			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
-
-		status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
-		if (status) {
-			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
-			goto out;
-		}
-
-		/* Update the link info
-		 * It sometimes takes a really long time for link to
-		 * come back from the atomic reset. Thus, we wait a
-		 * little bit.
-		 */
-		for (retry_count = 0; retry_count < retry_max; retry_count++) {
-			status = ice_update_link_info(pi);
-
-			if (status == ICE_SUCCESS)
-				break;
-
-			ice_msec_delay(100, true);
+	/* LUT size is only valid for Global and PF table types */
+	switch (lut_size) {
+	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
+		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
+			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
+			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
+		break;
+	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
+		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
+			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
+			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
+		break;
+	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
+		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
+			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
+				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
+				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
+			break;
 		}
-
-		if (status)
-			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
+		/* fall-through */
+	default:
+		status = ICE_ERR_PARAM;
+		goto ice_aq_get_set_rss_lut_exit;
 	}
 
-out:
-	ice_free(hw, pcaps);
+ice_aq_get_set_rss_lut_send:
+	cmd_resp->flags = CPU_TO_LE16(flags);
+	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
+
+ice_aq_get_set_rss_lut_exit:
 	return status;
 }
 
 /**
- * ice_phy_caps_equals_cfg
- * @phy_caps: PHY capabilities
- * @phy_cfg: PHY configuration
+ * ice_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @get_params: RSS LUT parameters used to specify which RSS LUT to get
  *
- * Helper function to determine if PHY capabilities matches PHY
- * configuration
+ * get the RSS lookup table, PF or VSI type
  */
-bool
-ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
-			struct ice_aqc_set_phy_cfg_data *phy_cfg)
+enum ice_status
+ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
 {
-	u8 caps_mask, cfg_mask;
-
-	if (!phy_caps || !phy_cfg)
-		return false;
-
-	/* These bits are not common between capabilities and configuration.
-	 * Do not use them to determine equality.
-	 */
-	caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
-					      ICE_AQC_PHY_EN_MOD_QUAL);
-	cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
-
-	if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
-	    phy_caps->phy_type_high != phy_cfg->phy_type_high ||
-	    ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
-	    phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
-	    phy_caps->eee_cap != phy_cfg->eee_cap ||
-	    phy_caps->eeer_value != phy_cfg->eeer_value ||
-	    phy_caps->link_fec_options != phy_cfg->link_fec_opt)
-		return false;
-
-	return true;
+	return __ice_aq_get_set_rss_lut(hw, get_params, false);
 }
 
 /**
- * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
- * @pi: port information structure
- * @caps: PHY ability structure to copy date from
- * @cfg: PHY configuration structure to copy data to
+ * ice_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
  *
- * Helper function to copy AQC PHY get ability data to PHY set configuration
- * data structure
- */
-void
-ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
-			 struct ice_aqc_get_phy_caps_data *caps,
-			 struct ice_aqc_set_phy_cfg_data *cfg)
-{
-	if (!pi || !caps || !cfg)
-		return;
-
-	ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
-	cfg->phy_type_low = caps->phy_type_low;
-	cfg->phy_type_high = caps->phy_type_high;
-	cfg->caps = caps->caps;
-	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
-	cfg->eee_cap = caps->eee_cap;
-	cfg->eeer_value = caps->eeer_value;
-	cfg->link_fec_opt = caps->link_fec_options;
-	cfg->module_compliance_enforcement =
-		caps->module_compliance_enforcement;
-
-	if (ice_fw_supports_link_override(pi->hw)) {
-		struct ice_link_default_override_tlv tlv;
-
-		if (ice_get_link_default_override(&tlv, pi))
-			return;
-
-		if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
-			cfg->module_compliance_enforcement |=
-				ICE_LINK_OVERRIDE_STRICT_MODE;
-	}
-}
-
-/**
- * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
- * @pi: port information structure
- * @cfg: PHY configuration data to set FEC mode
- * @fec: FEC mode to configure
+ * set the RSS lookup table, PF or VSI type
  */
 enum ice_status
-ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
-		enum ice_fec_mode fec)
+ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
 {
-	struct ice_aqc_get_phy_caps_data *pcaps;
-	enum ice_status status = ICE_SUCCESS;
-	struct ice_hw *hw;
-
-	if (!pi || !cfg)
-		return ICE_ERR_BAD_PTR;
-
-	hw = pi->hw;
-
-	pcaps = (struct ice_aqc_get_phy_caps_data *)
-		ice_malloc(hw, sizeof(*pcaps));
-	if (!pcaps)
-		return ICE_ERR_NO_MEMORY;
-
-	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
-				     NULL);
-	if (status)
-		goto out;
-
-	cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
-	cfg->link_fec_opt = pcaps->link_fec_options;
-
-	switch (fec) {
-	case ICE_FEC_BASER:
-		/* Clear RS bits, and AND BASE-R ability
-		 * bits and OR request bits.
-		 */
-		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
-			ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
-		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
-			ICE_AQC_PHY_FEC_25G_KR_REQ;
-		break;
-	case ICE_FEC_RS:
-		/* Clear BASE-R bits, and AND RS ability
-		 * bits and OR request bits.
-		 */
-		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
-		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
-			ICE_AQC_PHY_FEC_25G_RS_544_REQ;
-		break;
-	case ICE_FEC_NONE:
-		/* Clear all FEC option bits. */
-		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
-		break;
-	case ICE_FEC_AUTO:
-		/* AND auto FEC bit, and all caps bits. */
-		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
-		cfg->link_fec_opt |= pcaps->link_fec_options;
-		break;
-	default:
-		status = ICE_ERR_PARAM;
-		break;
-	}
-
-	if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
-		struct ice_link_default_override_tlv tlv;
-
-		if (ice_get_link_default_override(&tlv, pi))
-			goto out;
-
-		if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
-		    (tlv.options & ICE_LINK_OVERRIDE_EN))
-			cfg->link_fec_opt = tlv.fec_options;
-	}
-
-out:
-	ice_free(hw, pcaps);
-
-	return status;
+	return __ice_aq_get_set_rss_lut(hw, set_params, true);
 }
 
 /**
- * ice_get_link_status - get status of the HW network link
- * @pi: port information structure
- * @link_up: pointer to bool (true/false = linkup/linkdown)
+ * __ice_aq_get_set_rss_key
+ * @hw: pointer to the HW struct
+ * @vsi_id: VSI FW index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
  *
- * Variable link_up is true if link is up, false if link is down.
- * The variable link_up is invalid if status is non zero. As a
- * result of this call, link status reporting becomes enabled
+ * get (0x0B04) or set (0x0B02) the RSS key per VSI
  */
-enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
+static enum
+ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+				    struct ice_aqc_get_set_rss_keys *key,
+				    bool set)
 {
-	struct ice_phy_info *phy_info;
-	enum ice_status status = ICE_SUCCESS;
-
-	if (!pi || !link_up)
-		return ICE_ERR_PARAM;
-
-	phy_info = &pi->phy;
+	struct ice_aqc_get_set_rss_key *cmd_resp;
+	u16 key_size = sizeof(*key);
+	struct ice_aq_desc desc;
 
-	if (phy_info->get_link_info) {
-		status = ice_update_link_info(pi);
+	cmd_resp = &desc.params.get_set_rss_key;
 
-		if (status)
-			ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
-				  status);
+	if (set) {
+		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
+		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+	} else {
+		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
 	}
 
-	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
+	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
+					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
+					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
+				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
 
-	return status;
+	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
 }
 
 /**
- * ice_aq_set_link_restart_an
- * @pi: pointer to the port information structure
- * @ena_link: if true: enable link, if false: disable link
- * @cd: pointer to command details structure or NULL
+ * ice_aq_get_rss_key
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ * @key: pointer to key info struct
  *
- * Sets up the link and restarts the Auto-Negotiation over the link.
+ * get the RSS key per VSI
  */
 enum ice_status
-ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
-			   struct ice_sq_cd *cd)
+ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
+		   struct ice_aqc_get_set_rss_keys *key)
 {
-	struct ice_aqc_restart_an *cmd;
-	struct ice_aq_desc desc;
-
-	cmd = &desc.params.restart_an;
-
-	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
-
-	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
-	cmd->lport_num = pi->lport;
-	if (ena_link)
-		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
-	else
-		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
+	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
+		return ICE_ERR_PARAM;
 
-	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
+	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+					key, false);
 }
 
 /**
- * ice_aq_set_event_mask
+ * ice_aq_set_rss_key
  * @hw: pointer to the HW struct
- * @port_num: port number of the physical function
- * @mask: event mask to be set
- * @cd: pointer to command details structure or NULL
+ * @vsi_handle: software VSI handle
+ * @keys: pointer to key info struct
  *
- * Set event mask (0x0613)
+ * set the RSS key per VSI
  */
 enum ice_status
-ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
-		      struct ice_sq_cd *cd)
+ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
+		   struct ice_aqc_get_set_rss_keys *keys)
 {
-	struct ice_aqc_set_event_mask *cmd;
-	struct ice_aq_desc desc;
-
-	cmd = &desc.params.set_event_mask;
-
-	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
-
-	cmd->lport_num = port_num;
+	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
+		return ICE_ERR_PARAM;
 
-	cmd->event_mask = CPU_TO_LE16(mask);
-	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+					keys, true);
 }
 
 /**
- * ice_aq_set_mac_loopback
- * @hw: pointer to the HW struct
- * @ena_lpbk: Enable or Disable loopback
- * @cd: pointer to command details structure or NULL
- *
- * Enable/disable loopback on a given port
- */
-enum ice_status
-ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
-{
-	struct ice_aqc_set_mac_lb *cmd;
-	struct ice_aq_desc desc;
-
-	cmd = &desc.params.set_mac_lb;
-
-	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
-	if (ena_lpbk)
-		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
-
-	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
-}
-
-/**
- * ice_aq_set_port_id_led
- * @pi: pointer to the port information
- * @is_orig_mode: is this LED set to original mode (by the net-list)
- * @cd: pointer to command details structure or NULL
- *
- * Set LED value for the given port (0x06e9)
- */
-enum ice_status
-ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
-		       struct ice_sq_cd *cd)
-{
-	struct ice_aqc_set_port_id_led *cmd;
-	struct ice_hw *hw = pi->hw;
-	struct ice_aq_desc desc;
-
-	cmd = &desc.params.set_port_id_led;
-
-	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
-
-	if (is_orig_mode)
-		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
-	else
-		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
-
-	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
-}
-
-/**
- * ice_aq_sff_eeprom
- * @hw: pointer to the HW struct
- * @lport: bits [7:0] = logical port, bit [8] = logical port valid
- * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
- * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
- * @page: QSFP page
- * @set_page: set or ignore the page
- * @data: pointer to data buffer to be read/written to the I2C device.
- * @length: 1-16 for read, 1 for write.
- * @write: 0 read, 1 for write.
- * @cd: pointer to command details structure or NULL
- *
- * Read/Write SFF EEPROM (0x06EE)
- */
-enum ice_status
-ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
-		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
-		  bool write, struct ice_sq_cd *cd)
-{
-	struct ice_aqc_sff_eeprom *cmd;
-	struct ice_aq_desc desc;
-	enum ice_status status;
-
-	if (!data || (mem_addr & 0xff00))
-		return ICE_ERR_PARAM;
-
-	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
-	cmd = &desc.params.read_write_sff_param;
-	desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
-	cmd->lport_num = (u8)(lport & 0xff);
-	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
-	cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
-					 ICE_AQC_SFF_I2CBUS_7BIT_M) |
-					((set_page <<
-					  ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
-					 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
-	cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
-	cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
-	if (write)
-		cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
-
-	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
-	return status;
-}
-
-/**
- * __ice_aq_get_set_rss_lut
- * @hw: pointer to the hardware structure
- * @params: RSS LUT parameters
- * @set: set true to set the table, false to get the table
- *
- * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
- */
-static enum ice_status
-__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
-{
-	u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
-	struct ice_aqc_get_set_rss_lut *cmd_resp;
-	struct ice_aq_desc desc;
-	enum ice_status status;
-	u8 *lut;
-
-	if (!params)
-		return ICE_ERR_PARAM;
-
-	vsi_handle = params->vsi_handle;
-	lut = params->lut;
-
-	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
-		return ICE_ERR_PARAM;
-
-	lut_size = params->lut_size;
-	lut_type = params->lut_type;
-	glob_lut_idx = params->global_lut_id;
-	vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
-
-	cmd_resp = &desc.params.get_set_rss_lut;
-
-	if (set) {
-		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
-		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
-	} else {
-		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
-	}
-
-	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
-					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
-					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
-				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
-
-	switch (lut_type) {
-	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
-	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
-	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
-		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
-			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
-		break;
-	default:
-		status = ICE_ERR_PARAM;
-		goto ice_aq_get_set_rss_lut_exit;
-	}
-
-	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
-		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
-			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
-
-		if (!set)
-			goto ice_aq_get_set_rss_lut_send;
-	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
-		if (!set)
-			goto ice_aq_get_set_rss_lut_send;
-	} else {
-		goto ice_aq_get_set_rss_lut_send;
-	}
-
-	/* LUT size is only valid for Global and PF table types */
-	switch (lut_size) {
-	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
-		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
-			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
-			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
-		break;
-	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
-		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
-			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
-			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
-		break;
-	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
-		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
-			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
-				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
-				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
-			break;
-		}
-		/* fall-through */
-	default:
-		status = ICE_ERR_PARAM;
-		goto ice_aq_get_set_rss_lut_exit;
-	}
-
-ice_aq_get_set_rss_lut_send:
-	cmd_resp->flags = CPU_TO_LE16(flags);
-	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
-
-ice_aq_get_set_rss_lut_exit:
-	return status;
-}
-
-/**
- * ice_aq_get_rss_lut
- * @hw: pointer to the hardware structure
- * @get_params: RSS LUT parameters used to specify which RSS LUT to get
- *
- * get the RSS lookup table, PF or VSI type
- */
-enum ice_status
-ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
-{
-	return __ice_aq_get_set_rss_lut(hw, get_params, false);
-}
-
-/**
- * ice_aq_set_rss_lut
- * @hw: pointer to the hardware structure
- * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
- *
- * set the RSS lookup table, PF or VSI type
- */
-enum ice_status
-ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
-{
-	return __ice_aq_get_set_rss_lut(hw, set_params, true);
-}
-
-/**
- * __ice_aq_get_set_rss_key
- * @hw: pointer to the HW struct
- * @vsi_id: VSI FW index
- * @key: pointer to key info struct
- * @set: set true to set the key, false to get the key
- *
- * get (0x0B04) or set (0x0B02) the RSS key per VSI
- */
-static enum
-ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
-				    struct ice_aqc_get_set_rss_keys *key,
-				    bool set)
-{
-	struct ice_aqc_get_set_rss_key *cmd_resp;
-	u16 key_size = sizeof(*key);
-	struct ice_aq_desc desc;
-
-	cmd_resp = &desc.params.get_set_rss_key;
-
-	if (set) {
-		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
-		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
-	} else {
-		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
-	}
-
-	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
-					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
-					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
-				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
-
-	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
-}
-
-/**
- * ice_aq_get_rss_key
- * @hw: pointer to the HW struct
- * @vsi_handle: software VSI handle
- * @key: pointer to key info struct
- *
- * get the RSS key per VSI
- */
-enum ice_status
-ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
-		   struct ice_aqc_get_set_rss_keys *key)
-{
-	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
-		return ICE_ERR_PARAM;
-
-	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
-					key, false);
-}
-
-/**
- * ice_aq_set_rss_key
- * @hw: pointer to the HW struct
- * @vsi_handle: software VSI handle
- * @keys: pointer to key info struct
- *
- * set the RSS key per VSI
- */
-enum ice_status
-ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
-		   struct ice_aqc_get_set_rss_keys *keys)
-{
-	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
-		return ICE_ERR_PARAM;
-
-	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
-					keys, true);
-}
-
-/**
- * ice_aq_add_lan_txq
- * @hw: pointer to the hardware structure
- * @num_qgrps: Number of added queue groups
- * @qg_list: list of queue groups to be added
- * @buf_size: size of buffer for indirect command
+ * ice_aq_add_lan_txq
+ * @hw: pointer to the hardware structure
+ * @num_qgrps: Number of added queue groups
+ * @qg_list: list of queue groups to be added
+ * @buf_size: size of buffer for indirect command
  * @cd: pointer to command details structure or NULL
  *
  * Add Tx LAN queue (0x0C30)
@@ -3567,400 +2641,107 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
 	return status;
 }
 
-/**
- * ice_aq_move_recfg_lan_txq
- * @hw: pointer to the hardware structure
- * @num_qs: number of queues to move/reconfigure
- * @is_move: true if this operation involves node movement
- * @is_tc_change: true if this operation involves a TC change
- * @subseq_call: true if this operation is a subsequent call
- * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
- * @timeout: timeout in units of 100 usec (valid values 0-50)
- * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
- * @buf: struct containing src/dest TEID and per-queue info
- * @buf_size: size of buffer for indirect command
- * @txqs_moved: out param, number of queues successfully moved
- * @cd: pointer to command details structure or NULL
- *
- * Move / Reconfigure Tx LAN queues (0x0C32)
- */
-enum ice_status
-ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
-			  bool is_tc_change, bool subseq_call, bool flush_pipe,
-			  u8 timeout, u32 *blocked_cgds,
-			  struct ice_aqc_move_txqs_data *buf, u16 buf_size,
-			  u8 *txqs_moved, struct ice_sq_cd *cd)
-{
-	struct ice_aqc_move_txqs *cmd;
-	struct ice_aq_desc desc;
-	enum ice_status status;
-
-	cmd = &desc.params.move_txqs;
-	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
-
-#define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
-	if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
-		return ICE_ERR_PARAM;
-
-	if (is_tc_change && !flush_pipe && !blocked_cgds)
-		return ICE_ERR_PARAM;
-
-	if (!is_move && !is_tc_change)
-		return ICE_ERR_PARAM;
-
-	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
-
-	if (is_move)
-		cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
-
-	if (is_tc_change)
-		cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
-
-	if (subseq_call)
-		cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
-
-	if (flush_pipe)
-		cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
-
-	cmd->num_qs = num_qs;
-	cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
-			ICE_AQC_Q_CMD_TIMEOUT_M);
-
-	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
-
-	if (!status && txqs_moved)
-		*txqs_moved = cmd->num_qs;
-
-	if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
-	    is_tc_change && !flush_pipe)
-		*blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
-
-	return status;
-}
-
 /* End of FW Admin Queue command wrappers */
 
 /**
- * ice_write_byte - write a byte to a packed context structure
- * @src_ctx:  the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info:  a description of the struct to be filled
- */
-static void
-ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
-{
-	u8 src_byte, dest_byte, mask;
-	u8 *from, *dest;
-	u16 shift_width;
-
-	/* copy from the next struct field */
-	from = src_ctx + ce_info->offset;
-
-	/* prepare the bits and mask */
-	shift_width = ce_info->lsb % 8;
-	mask = (u8)(BIT(ce_info->width) - 1);
-
-	src_byte = *from;
-	src_byte &= mask;
-
-	/* shift to correct alignment */
-	mask <<= shift_width;
-	src_byte <<= shift_width;
-
-	/* get the current bits from the target bit string */
-	dest = dest_ctx + (ce_info->lsb / 8);
-
-	ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
-
-	dest_byte &= ~mask;	/* get the bits not changing */
-	dest_byte |= src_byte;	/* add in the new bits */
-
-	/* put it all back */
-	ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
-}
-
-/**
- * ice_write_word - write a word to a packed context structure
- * @src_ctx:  the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info:  a description of the struct to be filled
- */
-static void
-ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
-{
-	u16 src_word, mask;
-	__le16 dest_word;
-	u8 *from, *dest;
-	u16 shift_width;
-
-	/* copy from the next struct field */
-	from = src_ctx + ce_info->offset;
-
-	/* prepare the bits and mask */
-	shift_width = ce_info->lsb % 8;
-	mask = BIT(ce_info->width) - 1;
-
-	/* don't swizzle the bits until after the mask because the mask bits
-	 * will be in a different bit position on big endian machines
-	 */
-	src_word = *(u16 *)from;
-	src_word &= mask;
-
-	/* shift to correct alignment */
-	mask <<= shift_width;
-	src_word <<= shift_width;
-
-	/* get the current bits from the target bit string */
-	dest = dest_ctx + (ce_info->lsb / 8);
-
-	ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
-
-	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
-	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
-
-	/* put it all back */
-	ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
-}
-
-/**
- * ice_write_dword - write a dword to a packed context structure
- * @src_ctx:  the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info:  a description of the struct to be filled
- */
-static void
-ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
-{
-	u32 src_dword, mask;
-	__le32 dest_dword;
-	u8 *from, *dest;
-	u16 shift_width;
-
-	/* copy from the next struct field */
-	from = src_ctx + ce_info->offset;
-
-	/* prepare the bits and mask */
-	shift_width = ce_info->lsb % 8;
-
-	/* if the field width is exactly 32 on an x86 machine, then the shift
-	 * operation will not work because the SHL instructions count is masked
-	 * to 5 bits so the shift will do nothing
-	 */
-	if (ce_info->width < 32)
-		mask = BIT(ce_info->width) - 1;
-	else
-		mask = (u32)~0;
-
-	/* don't swizzle the bits until after the mask because the mask bits
-	 * will be in a different bit position on big endian machines
-	 */
-	src_dword = *(u32 *)from;
-	src_dword &= mask;
-
-	/* shift to correct alignment */
-	mask <<= shift_width;
-	src_dword <<= shift_width;
-
-	/* get the current bits from the target bit string */
-	dest = dest_ctx + (ce_info->lsb / 8);
-
-	ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
-
-	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
-	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
-
-	/* put it all back */
-	ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
-}
-
-/**
- * ice_write_qword - write a qword to a packed context structure
- * @src_ctx:  the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info:  a description of the struct to be filled
- */
-static void
-ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
-{
-	u64 src_qword, mask;
-	__le64 dest_qword;
-	u8 *from, *dest;
-	u16 shift_width;
-
-	/* copy from the next struct field */
-	from = src_ctx + ce_info->offset;
-
-	/* prepare the bits and mask */
-	shift_width = ce_info->lsb % 8;
-
-	/* if the field width is exactly 64 on an x86 machine, then the shift
-	 * operation will not work because the SHL instructions count is masked
-	 * to 6 bits so the shift will do nothing
-	 */
-	if (ce_info->width < 64)
-		mask = BIT_ULL(ce_info->width) - 1;
-	else
-		mask = (u64)~0;
-
-	/* don't swizzle the bits until after the mask because the mask bits
-	 * will be in a different bit position on big endian machines
-	 */
-	src_qword = *(u64 *)from;
-	src_qword &= mask;
-
-	/* shift to correct alignment */
-	mask <<= shift_width;
-	src_qword <<= shift_width;
-
-	/* get the current bits from the target bit string */
-	dest = dest_ctx + (ce_info->lsb / 8);
-
-	ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
-
-	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
-	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
-
-	/* put it all back */
-	ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
-}
-
-/**
- * ice_set_ctx - set context bits in packed structure
- * @hw: pointer to the hardware structure
- * @src_ctx:  pointer to a generic non-packed context structure
- * @dest_ctx: pointer to memory for the packed structure
- * @ce_info:  a description of the structure to be transformed
- */
-enum ice_status
-ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
-	    const struct ice_ctx_ele *ce_info)
-{
-	int f;
-
-	for (f = 0; ce_info[f].width; f++) {
-		/* We have to deal with each element of the FW response
-		 * using the correct size so that we are correct regardless
-		 * of the endianness of the machine.
-		 */
-		if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
-			ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
-				  f, ce_info[f].width, ce_info[f].size_of);
-			continue;
-		}
-		switch (ce_info[f].size_of) {
-		case sizeof(u8):
-			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
-			break;
-		case sizeof(u16):
-			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
-			break;
-		case sizeof(u32):
-			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
-			break;
-		case sizeof(u64):
-			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
-			break;
-		default:
-			return ICE_ERR_INVAL_SIZE;
-		}
-	}
-
-	return ICE_SUCCESS;
-}
-
-/**
- * ice_read_byte - read context byte into struct
+ * ice_write_byte - write a byte to a packed context structure
  * @src_ctx:  the context structure to read from
  * @dest_ctx: the context to be written to
  * @ce_info:  a description of the struct to be filled
  */
 static void
-ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
+ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
 {
-	u8 dest_byte, mask;
-	u8 *src, *target;
+	u8 src_byte, dest_byte, mask;
+	u8 *from, *dest;
 	u16 shift_width;
 
+	/* copy from the next struct field */
+	from = src_ctx + ce_info->offset;
+
 	/* prepare the bits and mask */
 	shift_width = ce_info->lsb % 8;
 	mask = (u8)(BIT(ce_info->width) - 1);
 
+	src_byte = *from;
+	src_byte &= mask;
+
 	/* shift to correct alignment */
 	mask <<= shift_width;
+	src_byte <<= shift_width;
 
-	/* get the current bits from the src bit string */
-	src = src_ctx + (ce_info->lsb / 8);
-
-	ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
-
-	dest_byte &= ~(mask);
+	/* get the current bits from the target bit string */
+	dest = dest_ctx + (ce_info->lsb / 8);
 
-	dest_byte >>= shift_width;
+	ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
 
-	/* get the address from the struct field */
-	target = dest_ctx + ce_info->offset;
+	dest_byte &= ~mask;	/* get the bits not changing */
+	dest_byte |= src_byte;	/* add in the new bits */
 
-	/* put it back in the struct */
-	ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
+	/* put it all back */
+	ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
 }
 
 /**
- * ice_read_word - read context word into struct
+ * ice_write_word - write a word to a packed context structure
  * @src_ctx:  the context structure to read from
  * @dest_ctx: the context to be written to
  * @ce_info:  a description of the struct to be filled
  */
 static void
-ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
+ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
 {
-	u16 dest_word, mask;
-	u8 *src, *target;
-	__le16 src_word;
+	u16 src_word, mask;
+	__le16 dest_word;
+	u8 *from, *dest;
 	u16 shift_width;
 
+	/* copy from the next struct field */
+	from = src_ctx + ce_info->offset;
+
 	/* prepare the bits and mask */
 	shift_width = ce_info->lsb % 8;
 	mask = BIT(ce_info->width) - 1;
 
+	/* don't swizzle the bits until after the mask because the mask bits
+	 * will be in a different bit position on big endian machines
+	 */
+	src_word = *(u16 *)from;
+	src_word &= mask;
+
 	/* shift to correct alignment */
 	mask <<= shift_width;
+	src_word <<= shift_width;
 
-	/* get the current bits from the src bit string */
-	src = src_ctx + (ce_info->lsb / 8);
-
-	ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
-
-	/* the data in the memory is stored as little endian so mask it
-	 * correctly
-	 */
-	src_word &= ~(CPU_TO_LE16(mask));
-
-	/* get the data back into host order before shifting */
-	dest_word = LE16_TO_CPU(src_word);
+	/* get the current bits from the target bit string */
+	dest = dest_ctx + (ce_info->lsb / 8);
 
-	dest_word >>= shift_width;
+	ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
 
-	/* get the address from the struct field */
-	target = dest_ctx + ce_info->offset;
+	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
+	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
 
-	/* put it back in the struct */
-	ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
+	/* put it all back */
+	ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
 }
 
 /**
- * ice_read_dword - read context dword into struct
+ * ice_write_dword - write a dword to a packed context structure
  * @src_ctx:  the context structure to read from
  * @dest_ctx: the context to be written to
  * @ce_info:  a description of the struct to be filled
  */
 static void
-ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
+ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
 {
-	u32 dest_dword, mask;
-	__le32 src_dword;
-	u8 *src, *target;
+	u32 src_dword, mask;
+	__le32 dest_dword;
+	u8 *from, *dest;
 	u16 shift_width;
 
+	/* copy from the next struct field */
+	from = src_ctx + ce_info->offset;
+
 	/* prepare the bits and mask */
 	shift_width = ce_info->lsb % 8;
 
@@ -3973,45 +2754,45 @@ ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
 	else
 		mask = (u32)~0;
 
+	/* don't swizzle the bits until after the mask because the mask bits
+	 * will be in a different bit position on big endian machines
+	 */
+	src_dword = *(u32 *)from;
+	src_dword &= mask;
+
 	/* shift to correct alignment */
 	mask <<= shift_width;
+	src_dword <<= shift_width;
 
-	/* get the current bits from the src bit string */
-	src = src_ctx + (ce_info->lsb / 8);
-
-	ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
-
-	/* the data in the memory is stored as little endian so mask it
-	 * correctly
-	 */
-	src_dword &= ~(CPU_TO_LE32(mask));
-
-	/* get the data back into host order before shifting */
-	dest_dword = LE32_TO_CPU(src_dword);
+	/* get the current bits from the target bit string */
+	dest = dest_ctx + (ce_info->lsb / 8);
 
-	dest_dword >>= shift_width;
+	ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
 
-	/* get the address from the struct field */
-	target = dest_ctx + ce_info->offset;
+	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
+	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
 
-	/* put it back in the struct */
-	ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
+	/* put it all back */
+	ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
 }
 
 /**
- * ice_read_qword - read context qword into struct
+ * ice_write_qword - write a qword to a packed context structure
  * @src_ctx:  the context structure to read from
  * @dest_ctx: the context to be written to
  * @ce_info:  a description of the struct to be filled
  */
 static void
-ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
+ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
 {
-	u64 dest_qword, mask;
-	__le64 src_qword;
-	u8 *src, *target;
+	u64 src_qword, mask;
+	__le64 dest_qword;
+	u8 *from, *dest;
 	u16 shift_width;
 
+	/* copy from the next struct field */
+	from = src_ctx + ce_info->offset;
+
 	/* prepare the bits and mask */
 	shift_width = ce_info->lsb % 8;
 
@@ -4024,59 +2805,66 @@ ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
 	else
 		mask = (u64)~0;
 
+	/* don't swizzle the bits until after the mask because the mask bits
+	 * will be in a different bit position on big endian machines
+	 */
+	src_qword = *(u64 *)from;
+	src_qword &= mask;
+
 	/* shift to correct alignment */
 	mask <<= shift_width;
+	src_qword <<= shift_width;
 
-	/* get the current bits from the src bit string */
-	src = src_ctx + (ce_info->lsb / 8);
-
-	ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
-
-	/* the data in the memory is stored as little endian so mask it
-	 * correctly
-	 */
-	src_qword &= ~(CPU_TO_LE64(mask));
-
-	/* get the data back into host order before shifting */
-	dest_qword = LE64_TO_CPU(src_qword);
+	/* get the current bits from the target bit string */
+	dest = dest_ctx + (ce_info->lsb / 8);
 
-	dest_qword >>= shift_width;
+	ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
 
-	/* get the address from the struct field */
-	target = dest_ctx + ce_info->offset;
+	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
+	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
 
-	/* put it back in the struct */
-	ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
+	/* put it all back */
+	ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
 }
 
 /**
- * ice_get_ctx - extract context bits from a packed structure
- * @src_ctx:  pointer to a generic packed context structure
- * @dest_ctx: pointer to a generic non-packed context structure
- * @ce_info:  a description of the structure to be read from
+ * ice_set_ctx - set context bits in packed structure
+ * @hw: pointer to the hardware structure
+ * @src_ctx:  pointer to a generic non-packed context structure
+ * @dest_ctx: pointer to memory for the packed structure
+ * @ce_info:  a description of the structure to be transformed
  */
 enum ice_status
-ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
+ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+	    const struct ice_ctx_ele *ce_info)
 {
 	int f;
 
 	for (f = 0; ce_info[f].width; f++) {
+		/* We have to deal with each element of the FW response
+		 * using the correct size so that we are correct regardless
+		 * of the endianness of the machine.
+		 */
+		if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
+			ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
+				  f, ce_info[f].width, ce_info[f].size_of);
+			continue;
+		}
 		switch (ce_info[f].size_of) {
-		case 1:
-			ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
+		case sizeof(u8):
+			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
 			break;
-		case 2:
-			ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
+		case sizeof(u16):
+			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
 			break;
-		case 4:
-			ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
+		case sizeof(u32):
+			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
 			break;
-		case 8:
-			ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
+		case sizeof(u64):
+			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
 			break;
 		default:
-			/* nothing to do, just keep going */
-			break;
+			return ICE_ERR_INVAL_SIZE;
 		}
 	}
 
@@ -4350,224 +3138,6 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
 			      ICE_SCHED_NODE_OWNER_LAN);
 }
 
-/**
- * ice_is_main_vsi - checks whether the VSI is main VSI
- * @hw: pointer to the HW struct
- * @vsi_handle: VSI handle
- *
- * Checks whether the VSI is the main VSI (the first PF VSI created on
- * given PF).
- */
-static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
-{
-	return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
-}
-
-/**
- * ice_replay_pre_init - replay pre initialization
- * @hw: pointer to the HW struct
- * @sw: pointer to switch info struct for which function initializes filters
- *
- * Initializes required config data for VSI, FD, ACL, and RSS before replay.
- */
-static enum ice_status
-ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
-{
-	enum ice_status status;
-	u8 i;
-
-	/* Delete old entries from replay filter list head if there is any */
-	ice_rm_sw_replay_rule_info(hw, sw);
-	/* In start of replay, move entries into replay_rules list, it
-	 * will allow adding rules entries back to filt_rules list,
-	 * which is operational list.
-	 */
-	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
-		LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
-				  &sw->recp_list[i].filt_replay_rules);
-	ice_sched_replay_agg_vsi_preinit(hw);
-
-	status = ice_sched_replay_root_node_bw(hw->port_info);
-	if (status)
-		return status;
-
-	return ice_sched_replay_tc_node_bw(hw->port_info);
-}
-
-/**
- * ice_replay_vsi - replay VSI configuration
- * @hw: pointer to the HW struct
- * @vsi_handle: driver VSI handle
- *
- * Restore all VSI configuration after reset. It is required to call this
- * function with main VSI first.
- */
-enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
-{
-	struct ice_switch_info *sw = hw->switch_info;
-	struct ice_port_info *pi = hw->port_info;
-	enum ice_status status;
-
-	if (!ice_is_vsi_valid(hw, vsi_handle))
-		return ICE_ERR_PARAM;
-
-	/* Replay pre-initialization if there is any */
-	if (ice_is_main_vsi(hw, vsi_handle)) {
-		status = ice_replay_pre_init(hw, sw);
-		if (status)
-			return status;
-	}
-	/* Replay per VSI all RSS configurations */
-	status = ice_replay_rss_cfg(hw, vsi_handle);
-	if (status)
-		return status;
-	/* Replay per VSI all filters */
-	status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
-	if (!status)
-		status = ice_replay_vsi_agg(hw, vsi_handle);
-	return status;
-}
-
-/**
- * ice_replay_post - post replay configuration cleanup
- * @hw: pointer to the HW struct
- *
- * Post replay cleanup.
- */
-void ice_replay_post(struct ice_hw *hw)
-{
-	/* Delete old entries from replay filter list head */
-	ice_rm_all_sw_replay_rule_info(hw);
-	ice_sched_replay_agg(hw);
-}
-
-/**
- * ice_stat_update40 - read 40 bit stat from the chip and update stat values
- * @hw: ptr to the hardware info
- * @reg: offset of 64 bit HW register to read from
- * @prev_stat_loaded: bool to specify if previous stats are loaded
- * @prev_stat: ptr to previous loaded stat value
- * @cur_stat: ptr to current stat value
- */
-void
-ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
-		  u64 *prev_stat, u64 *cur_stat)
-{
-	u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
-
-	/* device stats are not reset at PFR, they likely will not be zeroed
-	 * when the driver starts. Thus, save the value from the first read
-	 * without adding to the statistic value so that we report stats which
-	 * count up from zero.
-	 */
-	if (!prev_stat_loaded) {
-		*prev_stat = new_data;
-		return;
-	}
-
-	/* Calculate the difference between the new and old values, and then
-	 * add it to the software stat value.
-	 */
-	if (new_data >= *prev_stat)
-		*cur_stat += new_data - *prev_stat;
-	else
-		/* to manage the potential roll-over */
-		*cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
-
-	/* Update the previously stored value to prepare for next read */
-	*prev_stat = new_data;
-}
-
-/**
- * ice_stat_update32 - read 32 bit stat from the chip and update stat values
- * @hw: ptr to the hardware info
- * @reg: offset of HW register to read from
- * @prev_stat_loaded: bool to specify if previous stats are loaded
- * @prev_stat: ptr to previous loaded stat value
- * @cur_stat: ptr to current stat value
- */
-void
-ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
-		  u64 *prev_stat, u64 *cur_stat)
-{
-	u32 new_data;
-
-	new_data = rd32(hw, reg);
-
-	/* device stats are not reset at PFR, they likely will not be zeroed
-	 * when the driver starts. Thus, save the value from the first read
-	 * without adding to the statistic value so that we report stats which
-	 * count up from zero.
-	 */
-	if (!prev_stat_loaded) {
-		*prev_stat = new_data;
-		return;
-	}
-
-	/* Calculate the difference between the new and old values, and then
-	 * add it to the software stat value.
-	 */
-	if (new_data >= *prev_stat)
-		*cur_stat += new_data - *prev_stat;
-	else
-		/* to manage the potential roll-over */
-		*cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
-
-	/* Update the previously stored value to prepare for next read */
-	*prev_stat = new_data;
-}
-
-/**
- * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
- * @hw: ptr to the hardware info
- * @vsi_handle: VSI handle
- * @prev_stat_loaded: bool to specify if the previous stat values are loaded
- * @cur_stats: ptr to current stats structure
- *
- * The GLV_REPC statistic register actually tracks two 16bit statistics, and
- * thus cannot be read using the normal ice_stat_update32 function.
- *
- * Read the GLV_REPC register associated with the given VSI, and update the
- * rx_no_desc and rx_error values in the ice_eth_stats structure.
- *
- * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
- * cleared each time it's read.
- *
- * Note that the GLV_RDPC register also counts the causes that would trigger
- * GLV_REPC. However, it does not give the finer grained detail about why the
- * packets are being dropped. The GLV_REPC values can be used to distinguish
- * whether Rx packets are dropped due to errors or due to no available
- * descriptors.
- */
-void
-ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
-		     struct ice_eth_stats *cur_stats)
-{
-	u16 vsi_num, no_desc, error_cnt;
-	u32 repc;
-
-	if (!ice_is_vsi_valid(hw, vsi_handle))
-		return;
-
-	vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
-
-	/* If we haven't loaded stats yet, just clear the current value */
-	if (!prev_stat_loaded) {
-		wr32(hw, GLV_REPC(vsi_num), 0);
-		return;
-	}
-
-	repc = rd32(hw, GLV_REPC(vsi_num));
-	no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
-	error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
-
-	/* Clear the count by writing to the stats register */
-	wr32(hw, GLV_REPC(vsi_num), 0);
-
-	cur_stats->rx_no_desc += no_desc;
-	cur_stats->rx_errors += error_cnt;
-}
-
 /**
  * ice_sched_query_elem - query element information from HW
  * @hw: pointer to the HW struct
@@ -4711,21 +3281,6 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
 	return status;
 }
 
-/**
- * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
- * @caps: get PHY capability data
- */
-bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
-{
-	if (caps->caps & ICE_AQC_PHY_AN_MODE ||
-	    caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
-				       ICE_AQC_PHY_AN_EN_CLAUSE73 |
-				       ICE_AQC_PHY_AN_EN_CLAUSE37))
-		return true;
-
-	return false;
-}
-
 /**
  * ice_aq_set_lldp_mib - Set the LLDP MIB
  * @hw: pointer to the HW struct
@@ -4758,50 +3313,3 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
 
 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
 }
-
-/**
- * ice_fw_supports_lldp_fltr - check NVM version supports lldp_fltr_ctrl
- * @hw: pointer to HW struct
- */
-bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
-{
-	if (hw->mac_type != ICE_MAC_E810)
-		return false;
-
-	if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
-		if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
-			return true;
-		if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
-		    hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
-			return true;
-	} else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
-		return true;
-	}
-	return false;
-}
-
-/**
- * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
- * @hw: pointer to HW struct
- * @vsi_num: absolute HW index for VSI
- * @add: boolean for if adding or removing a filter
- */
-enum ice_status
-ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
-{
-	struct ice_aqc_lldp_filter_ctrl *cmd;
-	struct ice_aq_desc desc;
-
-	cmd = &desc.params.lldp_filter_ctrl;
-
-	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
-
-	if (add)
-		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
-	else
-		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
-
-	cmd->vsi_num = CPU_TO_LE16(vsi_num);
-
-	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
-}
diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index 8c16c7a024..1cf03e52e7 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -21,7 +21,6 @@ enum ice_fw_modes {
 enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw);
 void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw);
 enum ice_status ice_init_hw(struct ice_hw *hw);
-void ice_deinit_hw(struct ice_hw *hw);
 enum ice_status ice_check_reset(struct ice_hw *hw);
 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
 
@@ -32,8 +31,6 @@ void ice_destroy_all_ctrlq(struct ice_hw *hw);
 enum ice_status
 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
 		  struct ice_rq_event_info *e, u16 *pending);
-enum ice_status
-ice_get_link_status(struct ice_port_info *pi, bool *link_up);
 enum ice_status ice_update_link_info(struct ice_port_info *pi);
 enum ice_status
 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
@@ -55,8 +52,6 @@ void ice_clear_pxe_mode(struct ice_hw *hw);
 
 enum ice_status ice_get_caps(struct ice_hw *hw);
 
-void ice_set_safe_mode_caps(struct ice_hw *hw);
-
 /* Define a macro that will align a pointer to point to the next memory address
  * that falls on the given power of 2 (i.e., 2, 4, 8, 16, 32, 64...). For
  * example, given the variable pointer = 0x1006, then after the following call:
@@ -72,18 +67,6 @@ enum ice_status
 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
 		  u32 rxq_index);
 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index);
-enum ice_status
-ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index);
-enum ice_status
-ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
-			 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
-			 u32 tx_cmpltnq_index);
-enum ice_status
-ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index);
-enum ice_status
-ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
-			  struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
-			  u32 tx_drbell_q_index);
 
 enum ice_status
 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
@@ -99,13 +82,6 @@ enum ice_status
 ice_aq_add_lan_txq(struct ice_hw *hw, u8 count,
 		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
 		   struct ice_sq_cd *cd);
-enum ice_status
-ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
-			  bool is_tc_change, bool subseq_call, bool flush_pipe,
-			  u8 timeout, u32 *blocked_cgds,
-			  struct ice_aqc_move_txqs_data *buf, u16 buf_size,
-			  u8 *txqs_moved, struct ice_sq_cd *cd);
-
 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
@@ -126,9 +102,6 @@ enum ice_status
 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
 		    struct ice_aqc_get_phy_caps_data *caps,
 		    struct ice_sq_cd *cd);
-void
-ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
-		    u16 link_speeds_bitmap);
 enum ice_status
 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
 			struct ice_sq_cd *cd);
@@ -141,27 +114,11 @@ bool ice_fw_supports_link_override(struct ice_hw *hw);
 enum ice_status
 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
 			      struct ice_port_info *pi);
-bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
-
-enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
-enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
-enum ice_status
-ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
-	   bool ena_auto_link_update);
-bool
-ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps,
-			struct ice_aqc_set_phy_cfg_data *cfg);
 void
 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
 			 struct ice_aqc_get_phy_caps_data *caps,
 			 struct ice_aqc_set_phy_cfg_data *cfg);
 enum ice_status
-ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
-		enum ice_fec_mode fec);
-enum ice_status
-ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
-			   struct ice_sq_cd *cd);
-enum ice_status
 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd);
 enum ice_status
 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
@@ -170,19 +127,6 @@ enum ice_status
 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
 		      struct ice_sq_cd *cd);
 enum ice_status
-ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd);
-
-enum ice_status
-ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
-		       struct ice_sq_cd *cd);
-enum ice_status
-ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
-		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
-		  bool write, struct ice_sq_cd *cd);
-
-enum ice_status
-ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info);
-enum ice_status
 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
 		u16 *q_handle, u16 *q_ids, u32 *q_teids,
 		enum ice_disq_rst_src rst_src, u16 vmvf_num,
@@ -194,19 +138,8 @@ enum ice_status
 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
 		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
 		struct ice_sq_cd *cd);
-enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
-void ice_replay_post(struct ice_hw *hw);
 struct ice_q_ctx *
 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
-void
-ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
-		  u64 *prev_stat, u64 *cur_stat);
-void
-ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
-		  u64 *prev_stat, u64 *cur_stat);
-void
-ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
-		     struct ice_eth_stats *cur_stats);
 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw);
 void ice_print_rollback_msg(struct ice_hw *hw);
 enum ice_status
@@ -215,7 +148,4 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
 enum ice_status
 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
 		    struct ice_sq_cd *cd);
-bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
-enum ice_status
-ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
 #endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ice/base/ice_dcb.c b/drivers/net/ice/base/ice_dcb.c
index 351038528b..09b5d89bc0 100644
--- a/drivers/net/ice/base/ice_dcb.c
+++ b/drivers/net/ice/base/ice_dcb.c
@@ -109,32 +109,6 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
 }
 
-/**
- * ice_aq_start_lldp
- * @hw: pointer to the HW struct
- * @persist: True if Start of LLDP Agent needs to be persistent across reboots
- * @cd: pointer to command details structure or NULL
- *
- * Start the embedded LLDP Agent on all ports. (0x0A06)
- */
-enum ice_status
-ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
-{
-	struct ice_aqc_lldp_start *cmd;
-	struct ice_aq_desc desc;
-
-	cmd = &desc.params.lldp_start;
-
-	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_start);
-
-	cmd->command = ICE_AQ_LLDP_AGENT_START;
-
-	if (persist)
-		cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_ENA;
-
-	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
-}
-
 /**
  * ice_get_dcbx_status
  * @hw: pointer to the HW struct
@@ -672,49 +646,6 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
 	return ret;
 }
 
-/**
- * ice_aq_start_stop_dcbx - Start/Stop DCBX service in FW
- * @hw: pointer to the HW struct
- * @start_dcbx_agent: True if DCBX Agent needs to be started
- *		      False if DCBX Agent needs to be stopped
- * @dcbx_agent_status: FW indicates back the DCBX agent status
- *		       True if DCBX Agent is active
- *		       False if DCBX Agent is stopped
- * @cd: pointer to command details structure or NULL
- *
- * Start/Stop the embedded dcbx Agent. In case that this wrapper function
- * returns ICE_SUCCESS, caller will need to check if FW returns back the same
- * value as stated in dcbx_agent_status, and react accordingly. (0x0A09)
- */
-enum ice_status
-ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
-		       bool *dcbx_agent_status, struct ice_sq_cd *cd)
-{
-	struct ice_aqc_lldp_stop_start_specific_agent *cmd;
-	enum ice_status status;
-	struct ice_aq_desc desc;
-	u16 opcode;
-
-	cmd = &desc.params.lldp_agent_ctrl;
-
-	opcode = ice_aqc_opc_lldp_stop_start_specific_agent;
-
-	ice_fill_dflt_direct_cmd_desc(&desc, opcode);
-
-	if (start_dcbx_agent)
-		cmd->command = ICE_AQC_START_STOP_AGENT_START_DCBX;
-
-	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
-
-	*dcbx_agent_status = false;
-
-	if (status == ICE_SUCCESS &&
-	    cmd->command == ICE_AQC_START_STOP_AGENT_START_DCBX)
-		*dcbx_agent_status = true;
-
-	return status;
-}
-
 /**
  * ice_aq_get_cee_dcb_cfg
  * @hw: pointer to the HW struct
@@ -969,34 +900,6 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
 	return ret;
 }
 
-/**
- * ice_cfg_lldp_mib_change
- * @hw: pointer to the HW struct
- * @ena_mib: enable/disable MIB change event
- *
- * Configure (disable/enable) MIB
- */
-enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
-{
-	struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
-	enum ice_status ret;
-
-	if (!hw->func_caps.common_cap.dcb)
-		return ICE_ERR_NOT_SUPPORTED;
-
-	/* Get DCBX status */
-	qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
-
-	if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS)
-		return ICE_ERR_NOT_READY;
-
-	ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL);
-	if (!ret)
-		qos_cfg->is_sw_lldp = !ena_mib;
-
-	return ret;
-}
-
 /**
  * ice_add_ieee_ets_common_tlv
  * @buf: Data buffer to be populated with ice_dcb_ets_cfg data
@@ -1269,45 +1172,6 @@ void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
 	*miblen = offset;
 }
 
-/**
- * ice_set_dcb_cfg - Set the local LLDP MIB to FW
- * @pi: port information structure
- *
- * Set DCB configuration to the Firmware
- */
-enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
-{
-	u8 mib_type, *lldpmib = NULL;
-	struct ice_dcbx_cfg *dcbcfg;
-	enum ice_status ret;
-	struct ice_hw *hw;
-	u16 miblen;
-
-	if (!pi)
-		return ICE_ERR_PARAM;
-
-	hw = pi->hw;
-
-	/* update the HW local config */
-	dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
-	/* Allocate the LLDPDU */
-	lldpmib = (u8 *)ice_malloc(hw, ICE_LLDPDU_SIZE);
-	if (!lldpmib)
-		return ICE_ERR_NO_MEMORY;
-
-	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
-	if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
-		mib_type |= SET_LOCAL_MIB_TYPE_CEE_NON_WILLING;
-
-	ice_dcb_cfg_to_lldp(lldpmib, &miblen, dcbcfg);
-	ret = ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen,
-				  NULL);
-
-	ice_free(hw, lldpmib);
-
-	return ret;
-}
-
 /**
  * ice_aq_query_port_ets - query port ETS configuration
  * @pi: port information structure
@@ -1400,28 +1264,3 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
 	}
 	return status;
 }
-
-/**
- * ice_query_port_ets - query port ETS configuration
- * @pi: port information structure
- * @buf: pointer to buffer
- * @buf_size: buffer size in bytes
- * @cd: pointer to command details structure or NULL
- *
- * query current port ETS configuration and update the
- * SW DB with the TC changes
- */
-enum ice_status
-ice_query_port_ets(struct ice_port_info *pi,
-		   struct ice_aqc_port_ets_elem *buf, u16 buf_size,
-		   struct ice_sq_cd *cd)
-{
-	enum ice_status status;
-
-	ice_acquire_lock(&pi->sched_lock);
-	status = ice_aq_query_port_ets(pi, buf, buf_size, cd);
-	if (!status)
-		status = ice_update_port_tc_tree_cfg(pi, buf);
-	ice_release_lock(&pi->sched_lock);
-	return status;
-}
diff --git a/drivers/net/ice/base/ice_dcb.h b/drivers/net/ice/base/ice_dcb.h
index 8f0e09d50a..157845d592 100644
--- a/drivers/net/ice/base/ice_dcb.h
+++ b/drivers/net/ice/base/ice_dcb.h
@@ -186,14 +186,9 @@ enum ice_status
 ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
 		   struct ice_dcbx_cfg *dcbcfg);
 enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
-enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
 enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
 void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg);
 enum ice_status
-ice_query_port_ets(struct ice_port_info *pi,
-		   struct ice_aqc_port_ets_elem *buf, u16 buf_size,
-		   struct ice_sq_cd *cmd_details);
-enum ice_status
 ice_aq_query_port_ets(struct ice_port_info *pi,
 		      struct ice_aqc_port_ets_elem *buf, u16 buf_size,
 		      struct ice_sq_cd *cd);
@@ -204,12 +199,6 @@ enum ice_status
 ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
 		 struct ice_sq_cd *cd);
 enum ice_status
-ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
-enum ice_status
-ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
-		       bool *dcbx_agent_status, struct ice_sq_cd *cd);
-enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
-enum ice_status
 ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
 			   struct ice_sq_cd *cd);
 #endif /* _ICE_DCB_H_ */
diff --git a/drivers/net/ice/base/ice_fdir.c b/drivers/net/ice/base/ice_fdir.c
index aeff7af55d..dfc46ade5d 100644
--- a/drivers/net/ice/base/ice_fdir.c
+++ b/drivers/net/ice/base/ice_fdir.c
@@ -816,20 +816,6 @@ ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
 				  cntr_id);
 }
 
-/**
- * ice_free_fd_guar_item - Free flow director guaranteed entries
- * @hw: pointer to the hardware structure
- * @cntr_id: counter index that needs to be freed
- * @num_fltr: number of filters to be freed
- */
-enum ice_status
-ice_free_fd_guar_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr)
-{
-	return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES,
-				 ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr,
-				 cntr_id);
-}
-
 /**
  * ice_alloc_fd_shrd_item - allocate resource for flow director shared entries
  * @hw: pointer to the hardware structure
@@ -844,31 +830,6 @@ ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
 				  cntr_id);
 }
 
-/**
- * ice_free_fd_shrd_item - Free flow director shared entries
- * @hw: pointer to the hardware structure
- * @cntr_id: counter index that needs to be freed
- * @num_fltr: number of filters to be freed
- */
-enum ice_status
-ice_free_fd_shrd_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr)
-{
-	return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES,
-				 ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr,
-				 cntr_id);
-}
-
-/**
- * ice_get_fdir_cnt_all - get the number of Flow Director filters
- * @hw: hardware data structure
- *
- * Returns the number of filters available on device
- */
-int ice_get_fdir_cnt_all(struct ice_hw *hw)
-{
-	return hw->func_caps.fd_fltr_guar + hw->func_caps.fd_fltr_best_effort;
-}
-
 /**
  * ice_pkt_insert_ipv6_addr - insert a be32 IPv6 address into a memory buffer.
  * @pkt: packet buffer
@@ -1254,226 +1215,3 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
 
 	return ICE_SUCCESS;
 }
-
-/**
- * ice_fdir_get_prgm_pkt - generate a training packet
- * @input: flow director filter data structure
- * @pkt: pointer to return filter packet
- * @frag: generate a fragment packet
- */
-enum ice_status
-ice_fdir_get_prgm_pkt(struct ice_fdir_fltr *input, u8 *pkt, bool frag)
-{
-	return ice_fdir_get_gen_prgm_pkt(NULL, input, pkt, frag, false);
-}
-
-/**
- * ice_fdir_has_frag - does flow type have 2 ptypes
- * @flow: flow ptype
- *
- * returns true is there is a fragment packet for this ptype
- */
-bool ice_fdir_has_frag(enum ice_fltr_ptype flow)
-{
-	if (flow == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
-		return true;
-	else
-		return false;
-}
-
-/**
- * ice_fdir_find_by_idx - find filter with idx
- * @hw: pointer to hardware structure
- * @fltr_idx: index to find.
- *
- * Returns pointer to filter if found or null
- */
-struct ice_fdir_fltr *
-ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx)
-{
-	struct ice_fdir_fltr *rule;
-
-	LIST_FOR_EACH_ENTRY(rule, &hw->fdir_list_head, ice_fdir_fltr,
-			    fltr_node) {
-		/* rule ID found in the list */
-		if (fltr_idx == rule->fltr_id)
-			return rule;
-		if (fltr_idx < rule->fltr_id)
-			break;
-	}
-	return NULL;
-}
-
-/**
- * ice_fdir_list_add_fltr - add a new node to the flow director filter list
- * @hw: hardware structure
- * @fltr: filter node to add to structure
- */
-void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *fltr)
-{
-	struct ice_fdir_fltr *rule, *parent = NULL;
-
-	LIST_FOR_EACH_ENTRY(rule, &hw->fdir_list_head, ice_fdir_fltr,
-			    fltr_node) {
-		/* rule ID found or pass its spot in the list */
-		if (rule->fltr_id >= fltr->fltr_id)
-			break;
-		parent = rule;
-	}
-
-	if (parent)
-		LIST_ADD_AFTER(&fltr->fltr_node, &parent->fltr_node);
-	else
-		LIST_ADD(&fltr->fltr_node, &hw->fdir_list_head);
-}
-
-/**
- * ice_fdir_update_cntrs - increment / decrement filter counter
- * @hw: pointer to hardware structure
- * @flow: filter flow type
- * @acl_fltr: true indicates an ACL filter
- * @add: true implies filters added
- */
-void
-ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow,
-		      bool acl_fltr, bool add)
-{
-	int incr;
-
-	incr = add ? 1 : -1;
-	hw->fdir_active_fltr += incr;
-	if (flow == ICE_FLTR_PTYPE_NONF_NONE || flow >= ICE_FLTR_PTYPE_MAX) {
-		ice_debug(hw, ICE_DBG_SW, "Unknown filter type %d\n", flow);
-	} else {
-		if (acl_fltr)
-			hw->acl_fltr_cnt[flow] += incr;
-		else
-			hw->fdir_fltr_cnt[flow] += incr;
-	}
-}
-
-/**
- * ice_cmp_ipv6_addr - compare 2 IP v6 addresses
- * @a: IP v6 address
- * @b: IP v6 address
- *
- * Returns 0 on equal, returns non-0 if different
- */
-static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b)
-{
-	return memcmp(a, b, 4 * sizeof(__be32));
-}
-
-/**
- * ice_fdir_comp_rules - compare 2 filters
- * @a: a Flow Director filter data structure
- * @b: a Flow Director filter data structure
- * @v6: bool true if v6 filter
- *
- * Returns true if the filters match
- */
-static bool
-ice_fdir_comp_rules(struct ice_fdir_fltr *a,  struct ice_fdir_fltr *b, bool v6)
-{
-	enum ice_fltr_ptype flow_type = a->flow_type;
-
-	/* The calling function already checks that the two filters have the
-	 * same flow_type.
-	 */
-	if (!v6) {
-		if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
-		    flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
-		    flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) {
-			if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
-			    a->ip.v4.src_ip == b->ip.v4.src_ip &&
-			    a->ip.v4.dst_port == b->ip.v4.dst_port &&
-			    a->ip.v4.src_port == b->ip.v4.src_port)
-				return true;
-		} else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
-			if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
-			    a->ip.v4.src_ip == b->ip.v4.src_ip &&
-			    a->ip.v4.l4_header == b->ip.v4.l4_header &&
-			    a->ip.v4.proto == b->ip.v4.proto &&
-			    a->ip.v4.ip_ver == b->ip.v4.ip_ver &&
-			    a->ip.v4.tos == b->ip.v4.tos)
-				return true;
-		}
-	} else {
-		if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
-		    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
-		    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) {
-			if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
-			    a->ip.v6.src_port == b->ip.v6.src_port &&
-			    !ice_cmp_ipv6_addr(a->ip.v6.dst_ip,
-					       b->ip.v6.dst_ip) &&
-			    !ice_cmp_ipv6_addr(a->ip.v6.src_ip,
-					       b->ip.v6.src_ip))
-				return true;
-		} else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
-			if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
-			    a->ip.v6.src_port == b->ip.v6.src_port)
-				return true;
-		}
-	}
-
-	return false;
-}
-
-/**
- * ice_fdir_is_dup_fltr - test if filter is already in list for PF
- * @hw: hardware data structure
- * @input: Flow Director filter data structure
- *
- * Returns true if the filter is found in the list
- */
-bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input)
-{
-	struct ice_fdir_fltr *rule;
-	bool ret = false;
-
-	LIST_FOR_EACH_ENTRY(rule, &hw->fdir_list_head, ice_fdir_fltr,
-			    fltr_node) {
-		enum ice_fltr_ptype flow_type;
-
-		if (rule->flow_type != input->flow_type)
-			continue;
-
-		flow_type = input->flow_type;
-		if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
-		    flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
-		    flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP ||
-		    flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
-			ret = ice_fdir_comp_rules(rule, input, false);
-		else
-			ret = ice_fdir_comp_rules(rule, input, true);
-		if (ret) {
-			if (rule->fltr_id == input->fltr_id &&
-			    rule->q_index != input->q_index)
-				ret = false;
-			else
-				break;
-		}
-	}
-
-	return ret;
-}
-
-/**
- * ice_clear_pf_fd_table - admin command to clear FD table for PF
- * @hw: hardware data structure
- *
- * Clears FD table entries for a PF by issuing admin command (direct, 0x0B06)
- */
-enum ice_status ice_clear_pf_fd_table(struct ice_hw *hw)
-{
-	struct ice_aqc_clear_fd_table *cmd;
-	struct ice_aq_desc desc;
-
-	cmd = &desc.params.clear_fd_table;
-	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_fd_table);
-	cmd->clear_type = CL_FD_VM_VF_TYPE_PF_IDX;
-	/* vsi_index must be 0 to clear FD table for a PF */
-	cmd->vsi_index = CPU_TO_LE16(0);
-
-	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
-}
diff --git a/drivers/net/ice/base/ice_fdir.h b/drivers/net/ice/base/ice_fdir.h
index d363de385d..1f0f5bda7d 100644
--- a/drivers/net/ice/base/ice_fdir.h
+++ b/drivers/net/ice/base/ice_fdir.h
@@ -234,27 +234,11 @@ enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
 enum ice_status
 ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
 enum ice_status
-ice_free_fd_guar_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr);
-enum ice_status
 ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
-enum ice_status
-ice_free_fd_shrd_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr);
-enum ice_status ice_clear_pf_fd_table(struct ice_hw *hw);
 void
 ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
 		       struct ice_fltr_desc *fdesc, bool add);
 enum ice_status
 ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
 			  u8 *pkt, bool frag, bool tun);
-enum ice_status
-ice_fdir_get_prgm_pkt(struct ice_fdir_fltr *input, u8 *pkt, bool frag);
-int ice_get_fdir_cnt_all(struct ice_hw *hw);
-bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);
-bool ice_fdir_has_frag(enum ice_fltr_ptype flow);
-struct ice_fdir_fltr *
-ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx);
-void
-ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow,
-		      bool acl_fltr, bool add);
-void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);
 #endif /* _ICE_FDIR_H_ */
diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
index 7594df1696..aec2c63c30 100644
--- a/drivers/net/ice/base/ice_flex_pipe.c
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -1950,54 +1950,6 @@ static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
 	return false;
 }
 
-/**
- * ice_tunnel_port_in_use
- * @hw: pointer to the HW structure
- * @port: port to search for
- * @index: optionally returns index
- *
- * Returns whether a port is already in use as a tunnel, and optionally its
- * index
- */
-bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
-{
-	bool res;
-
-	ice_acquire_lock(&hw->tnl_lock);
-	res = ice_tunnel_port_in_use_hlpr(hw, port, index);
-	ice_release_lock(&hw->tnl_lock);
-
-	return res;
-}
-
-/**
- * ice_tunnel_get_type
- * @hw: pointer to the HW structure
- * @port: port to search for
- * @type: returns tunnel index
- *
- * For a given port number, will return the type of tunnel.
- */
-bool
-ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type)
-{
-	bool res = false;
-	u16 i;
-
-	ice_acquire_lock(&hw->tnl_lock);
-
-	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
-		if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
-			*type = hw->tnl.tbl[i].type;
-			res = true;
-			break;
-		}
-
-	ice_release_lock(&hw->tnl_lock);
-
-	return res;
-}
-
 /**
  * ice_find_free_tunnel_entry
  * @hw: pointer to the HW structure
@@ -3797,61 +3749,6 @@ static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
 	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
 }
 
-/**
- * ice_clear_hw_tbls - clear HW tables and flow profiles
- * @hw: pointer to the hardware structure
- */
-void ice_clear_hw_tbls(struct ice_hw *hw)
-{
-	u8 i;
-
-	for (i = 0; i < ICE_BLK_COUNT; i++) {
-		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
-		struct ice_prof_tcam *prof = &hw->blk[i].prof;
-		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
-		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
-		struct ice_es *es = &hw->blk[i].es;
-
-		if (hw->blk[i].is_list_init) {
-			ice_free_prof_map(hw, i);
-			ice_free_flow_profs(hw, i);
-		}
-
-		ice_free_vsig_tbl(hw, (enum ice_block)i);
-
-		ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes),
-			   ICE_NONDMA_MEM);
-		ice_memset(xlt1->ptg_tbl, 0,
-			   ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl),
-			   ICE_NONDMA_MEM);
-		ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t),
-			   ICE_NONDMA_MEM);
-
-		ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis),
-			   ICE_NONDMA_MEM);
-		ice_memset(xlt2->vsig_tbl, 0,
-			   xlt2->count * sizeof(*xlt2->vsig_tbl),
-			   ICE_NONDMA_MEM);
-		ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t),
-			   ICE_NONDMA_MEM);
-
-		ice_memset(prof->t, 0, prof->count * sizeof(*prof->t),
-			   ICE_NONDMA_MEM);
-		ice_memset(prof_redir->t, 0,
-			   prof_redir->count * sizeof(*prof_redir->t),
-			   ICE_NONDMA_MEM);
-
-		ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw,
-			   ICE_NONDMA_MEM);
-		ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count),
-			   ICE_NONDMA_MEM);
-		ice_memset(es->written, 0, es->count * sizeof(*es->written),
-			   ICE_NONDMA_MEM);
-		ice_memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena),
-			   ICE_NONDMA_MEM);
-	}
-}
-
 /**
  * ice_init_hw_tbls - init hardware table memory
  * @hw: pointer to the hardware structure
diff --git a/drivers/net/ice/base/ice_flex_pipe.h b/drivers/net/ice/base/ice_flex_pipe.h
index 214c7a2837..257351adfe 100644
--- a/drivers/net/ice/base/ice_flex_pipe.h
+++ b/drivers/net/ice/base/ice_flex_pipe.h
@@ -44,9 +44,6 @@ ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
 enum ice_status
 ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);
 enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
-bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);
-bool
-ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type);
 
 /* XLT2/VSI group functions */
 enum ice_status
@@ -71,7 +68,6 @@ ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
 enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
 void ice_free_seg(struct ice_hw *hw);
 void ice_fill_blk_tbls(struct ice_hw *hw);
-void ice_clear_hw_tbls(struct ice_hw *hw);
 void ice_free_hw_tbls(struct ice_hw *hw);
 enum ice_status
 ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
index 1b36c2b897..312e9b1ba4 100644
--- a/drivers/net/ice/base/ice_flow.c
+++ b/drivers/net/ice/base/ice_flow.c
@@ -1576,26 +1576,6 @@ ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
 	return prof;
 }
 
-/**
- * ice_flow_find_prof - Look up a profile matching headers and matched fields
- * @hw: pointer to the HW struct
- * @blk: classification stage
- * @dir: flow direction
- * @segs: array of one or more packet segments that describe the flow
- * @segs_cnt: number of packet segments provided
- */
-u64
-ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
-		   struct ice_flow_seg_info *segs, u8 segs_cnt)
-{
-	struct ice_flow_prof *p;
-
-	p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
-				     ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
-
-	return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
-}
-
 /**
  * ice_flow_find_prof_id - Look up a profile with given profile ID
  * @hw: pointer to the HW struct
@@ -2087,34 +2067,6 @@ ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
 	return status;
 }
 
-/**
- * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
- * @hw: pointer to the hardware structure
- * @blk: classification stage
- * @vsi_handle: software VSI handle
- * @vsig: target VSI group
- *
- * Assumption: the caller has already verified that the VSI to
- * be added has the same characteristics as the VSIG and will
- * thereby have access to all resources added to that VSIG.
- */
-enum ice_status
-ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
-			u16 vsig)
-{
-	enum ice_status status;
-
-	if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
-		return ICE_ERR_PARAM;
-
-	ice_acquire_lock(&hw->fl_profs_locks[blk]);
-	status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
-				  vsig);
-	ice_release_lock(&hw->fl_profs_locks[blk]);
-
-	return status;
-}
-
 /**
  * ice_flow_assoc_prof - associate a VSI with a flow profile
  * @hw: pointer to the hardware structure
@@ -2256,44 +2208,6 @@ ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
 	return status;
 }
 
-/**
- * ice_flow_find_entry - look for a flow entry using its unique ID
- * @hw: pointer to the HW struct
- * @blk: classification stage
- * @entry_id: unique ID to identify this flow entry
- *
- * This function looks for the flow entry with the specified unique ID in all
- * flow profiles of the specified classification stage. If the entry is found,
- * and it returns the handle to the flow entry. Otherwise, it returns
- * ICE_FLOW_ENTRY_ID_INVAL.
- */
-u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
-{
-	struct ice_flow_entry *found = NULL;
-	struct ice_flow_prof *p;
-
-	ice_acquire_lock(&hw->fl_profs_locks[blk]);
-
-	LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
-		struct ice_flow_entry *e;
-
-		ice_acquire_lock(&p->entries_lock);
-		LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
-			if (e->id == entry_id) {
-				found = e;
-				break;
-			}
-		ice_release_lock(&p->entries_lock);
-
-		if (found)
-			break;
-	}
-
-	ice_release_lock(&hw->fl_profs_locks[blk]);
-
-	return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
-}
-
 /**
  * ice_flow_acl_check_actions - Checks the ACL rule's actions
  * @hw: pointer to the hardware structure
@@ -3162,71 +3076,6 @@ ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
 	ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
 }
 
-/**
- * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
- * @seg: packet segment the field being set belongs to
- * @fld: field to be set
- * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
- *           entry's input buffer
- * @pref_loc: location of prefix value from entry's input buffer
- * @pref_sz: size of the location holding the prefix value
- *
- * This function specifies the locations, in the form of byte offsets from the
- * start of the input buffer for a flow entry, from where the value to match
- * and the IPv4 prefix value can be extracted. These locations are then stored
- * in the flow profile. When adding flow entries to the associated flow profile,
- * these locations can be used to quickly extract the values to create the
- * content of a match entry. This function should only be used for fixed-size
- * data structures.
- */
-void
-ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
-			u16 val_loc, u16 pref_loc, u8 pref_sz)
-{
-	/* For this type of field, the "mask" location is for the prefix value's
-	 * location and the "last" location is for the size of the location of
-	 * the prefix value.
-	 */
-	ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
-			     pref_loc, (u16)pref_sz);
-}
-
-/**
- * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
- * @seg: packet segment the field being set belongs to
- * @off: offset of the raw field from the beginning of the segment in bytes
- * @len: length of the raw pattern to be matched
- * @val_loc: location of the value to match from entry's input buffer
- * @mask_loc: location of mask value from entry's input buffer
- *
- * This function specifies the offset of the raw field to be match from the
- * beginning of the specified packet segment, and the locations, in the form of
- * byte offsets from the start of the input buffer for a flow entry, from where
- * the value to match and the mask value to be extracted. These locations are
- * then stored in the flow profile. When adding flow entries to the associated
- * flow profile, these locations can be used to quickly extract the values to
- * create the content of a match entry. This function should only be used for
- * fixed-size data structures.
- */
-void
-ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
-		     u16 val_loc, u16 mask_loc)
-{
-	if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
-		seg->raws[seg->raws_cnt].off = off;
-		seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
-		seg->raws[seg->raws_cnt].info.src.val = val_loc;
-		seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
-		/* The "last" field is used to store the length of the field */
-		seg->raws[seg->raws_cnt].info.src.last = len;
-	}
-
-	/* Overflows of "raws" will be handled as an error condition later in
-	 * the flow when this information is processed.
-	 */
-	seg->raws_cnt++;
-}
-
 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
 
@@ -3293,31 +3142,6 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
 	return ICE_SUCCESS;
 }
 
-/**
- * ice_rem_vsi_rss_list - remove VSI from RSS list
- * @hw: pointer to the hardware structure
- * @vsi_handle: software VSI handle
- *
- * Remove the VSI from all RSS configurations in the list.
- */
-void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
-{
-	struct ice_rss_cfg *r, *tmp;
-
-	if (LIST_EMPTY(&hw->rss_list_head))
-		return;
-
-	ice_acquire_lock(&hw->rss_locks);
-	LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
-				 ice_rss_cfg, l_entry)
-		if (ice_test_and_clear_bit(vsi_handle, r->vsis))
-			if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
-				LIST_DEL(&r->l_entry);
-				ice_free(hw, r);
-			}
-	ice_release_lock(&hw->rss_locks);
-}
-
 /**
  * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
  * @hw: pointer to the hardware structure
@@ -3880,34 +3704,3 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
 
 	return status;
 }
-
-/**
- * ice_get_rss_cfg - returns hashed fields for the given header types
- * @hw: pointer to the hardware structure
- * @vsi_handle: software VSI handle
- * @hdrs: protocol header type
- *
- * This function will return the match fields of the first instance of flow
- * profile having the given header types and containing input VSI
- */
-u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
-{
-	u64 rss_hash = ICE_HASH_INVALID;
-	struct ice_rss_cfg *r;
-
-	/* verify if the protocol header is non zero and VSI is valid */
-	if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
-		return ICE_HASH_INVALID;
-
-	ice_acquire_lock(&hw->rss_locks);
-	LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
-			    ice_rss_cfg, l_entry)
-		if (ice_is_bit_set(r->vsis, vsi_handle) &&
-		    r->hash.addl_hdrs == hdrs) {
-			rss_hash = r->hash.hash_flds;
-			break;
-		}
-	ice_release_lock(&hw->rss_locks);
-
-	return rss_hash;
-}
diff --git a/drivers/net/ice/base/ice_flow.h b/drivers/net/ice/base/ice_flow.h
index 2a9ae66454..2675202240 100644
--- a/drivers/net/ice/base/ice_flow.h
+++ b/drivers/net/ice/base/ice_flow.h
@@ -504,9 +504,6 @@ struct ice_flow_action {
 	} data;
 };
 
-u64
-ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
-		   struct ice_flow_seg_info *segs, u8 segs_cnt);
 enum ice_status
 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
 		  u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
@@ -518,13 +515,9 @@ enum ice_status
 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
 		    struct ice_flow_prof *prof, u16 vsi_handle);
 enum ice_status
-ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
-			u16 vsig);
-enum ice_status
 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
 		     u8 *hw_prof);
 
-u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id);
 enum ice_status
 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
 		   u64 entry_id, u16 vsi, enum ice_flow_priority prio,
@@ -535,13 +528,6 @@ ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
 void
 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
 		 u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
-void
-ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
-			u16 val_loc, u16 prefix_loc, u8 prefix_sz);
-void
-ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
-		     u16 val_loc, u16 mask_loc);
-void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
 enum ice_status
 ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
@@ -552,5 +538,4 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
 enum ice_status
 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
 		const struct ice_rss_hash_cfg *cfg);
-u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
 #endif /* _ICE_FLOW_H_ */
diff --git a/drivers/net/ice/base/ice_nvm.c b/drivers/net/ice/base/ice_nvm.c
index 7b76af7b6f..75ff992b9c 100644
--- a/drivers/net/ice/base/ice_nvm.c
+++ b/drivers/net/ice/base/ice_nvm.c
@@ -145,39 +145,6 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
 	return ICE_SUCCESS;
 }
 
-/**
- * ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ
- * @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
- * @words: (in) number of words to read; (out) number of words actually read
- * @data: words read from the Shadow RAM
- *
- * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is
- * taken before reading the buffer and later released.
- */
-static enum ice_status
-ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
-{
-	u32 bytes = *words * 2, i;
-	enum ice_status status;
-
-	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
-	/* ice_read_flat_nvm takes into account the 4KB AdminQ and Shadow RAM
-	 * sector restrictions necessary when reading from the NVM.
-	 */
-	status = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
-
-	/* Report the number of words successfully read */
-	*words = bytes / 2;
-
-	/* Byte swap the words up to the amount we actually read */
-	for (i = 0; i < *words; i++)
-		data[i] = LE16_TO_CPU(((_FORCE_ __le16 *)data)[i]);
-
-	return status;
-}
-
 /**
  * ice_acquire_nvm - Generic request for acquiring the NVM ownership
  * @hw: pointer to the HW structure
@@ -400,65 +367,6 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
 	return ICE_ERR_DOES_NOT_EXIST;
 }
 
-/**
- * ice_read_pba_string - Reads part number string from NVM
- * @hw: pointer to hardware structure
- * @pba_num: stores the part number string from the NVM
- * @pba_num_size: part number string buffer length
- *
- * Reads the part number string from the NVM.
- */
-enum ice_status
-ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
-{
-	u16 pba_tlv, pba_tlv_len;
-	enum ice_status status;
-	u16 pba_word, pba_size;
-	u16 i;
-
-	status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
-					ICE_SR_PBA_BLOCK_PTR);
-	if (status != ICE_SUCCESS) {
-		ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n");
-		return status;
-	}
-
-	/* pba_size is the next word */
-	status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size);
-	if (status != ICE_SUCCESS) {
-		ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n");
-		return status;
-	}
-
-	if (pba_tlv_len < pba_size) {
-		ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
-		return ICE_ERR_INVAL_SIZE;
-	}
-
-	/* Subtract one to get PBA word count (PBA Size word is included in
-	 * total size)
-	 */
-	pba_size--;
-	if (pba_num_size < (((u32)pba_size * 2) + 1)) {
-		ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
-		return ICE_ERR_PARAM;
-	}
-
-	for (i = 0; i < pba_size; i++) {
-		status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word);
-		if (status != ICE_SUCCESS) {
-			ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i);
-			return status;
-		}
-
-		pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
-		pba_num[(i * 2) + 1] = pba_word & 0xFF;
-	}
-	pba_num[(pba_size * 2)] = '\0';
-
-	return status;
-}
-
 /**
  * ice_get_nvm_srev - Read the security revision from the NVM CSS header
  * @hw: pointer to the HW struct
@@ -884,62 +792,6 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
 	return ICE_SUCCESS;
 }
 
-/**
- * ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary
- * @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
- * @words: (in) number of words to read; (out) number of words actually read
- * @data: words read from the Shadow RAM
- *
- * Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq
- * method. The buf read is preceded by the NVM ownership take
- * and followed by the release.
- */
-enum ice_status
-ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
-{
-	enum ice_status status;
-
-	status = ice_acquire_nvm(hw, ICE_RES_READ);
-	if (!status) {
-		status = ice_read_sr_buf_aq(hw, offset, words, data);
-		ice_release_nvm(hw);
-	}
-
-	return status;
-}
-
-/**
- * ice_nvm_validate_checksum
- * @hw: pointer to the HW struct
- *
- * Verify NVM PFA checksum validity (0x0706)
- */
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
-{
-	struct ice_aqc_nvm_checksum *cmd;
-	struct ice_aq_desc desc;
-	enum ice_status status;
-
-	status = ice_acquire_nvm(hw, ICE_RES_READ);
-	if (status)
-		return status;
-
-	cmd = &desc.params.nvm_checksum;
-
-	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum);
-	cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY;
-
-	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
-	ice_release_nvm(hw);
-
-	if (!status)
-		if (LE16_TO_CPU(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT)
-			status = ICE_ERR_NVM_CHECKSUM;
-
-	return status;
-}
-
 /**
  * ice_nvm_access_get_features - Return the NVM access features structure
  * @cmd: NVM access command to process
@@ -1129,55 +981,3 @@ ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
 
 	return ICE_SUCCESS;
 }
-
-/**
- * ice_handle_nvm_access - Handle an NVM access request
- * @hw: pointer to the HW struct
- * @cmd: NVM access command info
- * @data: pointer to read or return data
- *
- * Process an NVM access request. Read the command structure information and
- * determine if it is valid. If not, report an error indicating the command
- * was invalid.
- *
- * For valid commands, perform the necessary function, copying the data into
- * the provided data buffer.
- */
-enum ice_status
-ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
-		      union ice_nvm_access_data *data)
-{
-	u32 module, flags, adapter_info;
-
-	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
-	/* Extended flags are currently reserved and must be zero */
-	if ((cmd->config & ICE_NVM_CFG_EXT_FLAGS_M) != 0)
-		return ICE_ERR_PARAM;
-
-	/* Adapter info must match the HW device ID */
-	adapter_info = ice_nvm_access_get_adapter(cmd);
-	if (adapter_info != hw->device_id)
-		return ICE_ERR_PARAM;
-
-	switch (cmd->command) {
-	case ICE_NVM_CMD_READ:
-		module = ice_nvm_access_get_module(cmd);
-		flags = ice_nvm_access_get_flags(cmd);
-
-		/* Getting the driver's NVM features structure shares the same
-		 * command type as reading a register. Read the config field
-		 * to determine if this is a request to get features.
-		 */
-		if (module == ICE_NVM_GET_FEATURES_MODULE &&
-		    flags == ICE_NVM_GET_FEATURES_FLAGS &&
-		    cmd->offset == 0)
-			return ice_nvm_access_get_features(cmd, data);
-		else
-			return ice_nvm_access_read(hw, cmd, data);
-	case ICE_NVM_CMD_WRITE:
-		return ice_nvm_access_write(hw, cmd, data);
-	default:
-		return ICE_ERR_PARAM;
-	}
-}
diff --git a/drivers/net/ice/base/ice_nvm.h b/drivers/net/ice/base/ice_nvm.h
index 8e2eb4df1b..e46562f862 100644
--- a/drivers/net/ice/base/ice_nvm.h
+++ b/drivers/net/ice/base/ice_nvm.h
@@ -82,9 +82,6 @@ enum ice_status
 ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,
 			    union ice_nvm_access_data *data);
 enum ice_status
-ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
-		      union ice_nvm_access_data *data);
-enum ice_status
 ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
 void ice_release_nvm(struct ice_hw *hw);
 enum ice_status
@@ -97,11 +94,6 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
 enum ice_status
 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
 		       u16 module_type);
-enum ice_status
-ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
 enum ice_status ice_init_nvm(struct ice_hw *hw);
 enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
-enum ice_status
-ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
 #endif /* _ICE_NVM_H_ */
diff --git a/drivers/net/ice/base/ice_sched.c b/drivers/net/ice/base/ice_sched.c
index ac48bbe279..d7f0866dac 100644
--- a/drivers/net/ice/base/ice_sched.c
+++ b/drivers/net/ice/base/ice_sched.c
@@ -644,25 +644,6 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
 				 buf, buf_size, num_profiles_added, cd);
 }
 
-/**
- * ice_aq_query_rl_profile - query rate limiting profile(s)
- * @hw: pointer to the HW struct
- * @num_profiles: the number of profile(s) to query
- * @buf: pointer to buffer
- * @buf_size: buffer size in bytes
- * @cd: pointer to command details structure
- *
- * Query RL profile (0x0411)
- */
-enum ice_status
-ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
-			struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
-			struct ice_sq_cd *cd)
-{
-	return ice_aq_rl_profile(hw, ice_aqc_opc_query_rl_profiles,
-				 num_profiles, buf, buf_size, NULL, cd);
-}
-
 /**
  * ice_aq_remove_rl_profile - removes RL profile(s)
  * @hw: pointer to the HW struct
@@ -839,32 +820,6 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
 	hw->max_cgds = 0;
 }
 
-/**
- * ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping
- * @hw: pointer to the HW struct
- * @num_l2_nodes: the number of L2 nodes whose CGDs to configure
- * @buf: pointer to buffer
- * @buf_size: buffer size in bytes
- * @cd: pointer to command details structure or NULL
- *
- * Configure L2 Node CGD (0x0414)
- */
-enum ice_status
-ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes,
-		       struct ice_aqc_cfg_l2_node_cgd_elem *buf,
-		       u16 buf_size, struct ice_sq_cd *cd)
-{
-	struct ice_aqc_cfg_l2_node_cgd *cmd;
-	struct ice_aq_desc desc;
-
-	cmd = &desc.params.cfg_l2_node_cgd;
-	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_l2_node_cgd);
-	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
-
-	cmd->num_l2_nodes = CPU_TO_LE16(num_l2_nodes);
-	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
-}
-
 /**
  * ice_sched_add_elems - add nodes to HW and SW DB
  * @pi: port information structure
@@ -1959,137 +1914,6 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
 	return status;
 }
 
-/**
- * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
- * @pi: port information structure
- * @vsi_handle: software VSI handle
- *
- * This function removes single aggregator VSI info entry from
- * aggregator list.
- */
-static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
-{
-	struct ice_sched_agg_info *agg_info;
-	struct ice_sched_agg_info *atmp;
-
-	LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &pi->hw->agg_list,
-				 ice_sched_agg_info,
-				 list_entry) {
-		struct ice_sched_agg_vsi_info *agg_vsi_info;
-		struct ice_sched_agg_vsi_info *vtmp;
-
-		LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp,
-					 &agg_info->agg_vsi_list,
-					 ice_sched_agg_vsi_info, list_entry)
-			if (agg_vsi_info->vsi_handle == vsi_handle) {
-				LIST_DEL(&agg_vsi_info->list_entry);
-				ice_free(pi->hw, agg_vsi_info);
-				return;
-			}
-	}
-}
-
-/**
- * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
- * @node: pointer to the sub-tree node
- *
- * This function checks for a leaf node presence in a given sub-tree node.
- */
-static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
-{
-	u8 i;
-
-	for (i = 0; i < node->num_children; i++)
-		if (ice_sched_is_leaf_node_present(node->children[i]))
-			return true;
-	/* check for a leaf node */
-	return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
-}
-
-/**
- * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
- * @pi: port information structure
- * @vsi_handle: software VSI handle
- * @owner: LAN or RDMA
- *
- * This function removes the VSI and its LAN or RDMA children nodes from the
- * scheduler tree.
- */
-static enum ice_status
-ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
-{
-	enum ice_status status = ICE_ERR_PARAM;
-	struct ice_vsi_ctx *vsi_ctx;
-	u8 i;
-
-	ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
-	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
-		return status;
-	ice_acquire_lock(&pi->sched_lock);
-	vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
-	if (!vsi_ctx)
-		goto exit_sched_rm_vsi_cfg;
-
-	ice_for_each_traffic_class(i) {
-		struct ice_sched_node *vsi_node, *tc_node;
-		u8 j = 0;
-
-		tc_node = ice_sched_get_tc_node(pi, i);
-		if (!tc_node)
-			continue;
-
-		vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
-		if (!vsi_node)
-			continue;
-
-		if (ice_sched_is_leaf_node_present(vsi_node)) {
-			ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
-			status = ICE_ERR_IN_USE;
-			goto exit_sched_rm_vsi_cfg;
-		}
-		while (j < vsi_node->num_children) {
-			if (vsi_node->children[j]->owner == owner) {
-				ice_free_sched_node(pi, vsi_node->children[j]);
-
-				/* reset the counter again since the num
-				 * children will be updated after node removal
-				 */
-				j = 0;
-			} else {
-				j++;
-			}
-		}
-		/* remove the VSI if it has no children */
-		if (!vsi_node->num_children) {
-			ice_free_sched_node(pi, vsi_node);
-			vsi_ctx->sched.vsi_node[i] = NULL;
-
-			/* clean up aggregator related VSI info if any */
-			ice_sched_rm_agg_vsi_info(pi, vsi_handle);
-		}
-		if (owner == ICE_SCHED_NODE_OWNER_LAN)
-			vsi_ctx->sched.max_lanq[i] = 0;
-	}
-	status = ICE_SUCCESS;
-
-exit_sched_rm_vsi_cfg:
-	ice_release_lock(&pi->sched_lock);
-	return status;
-}
-
-/**
- * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
- * @pi: port information structure
- * @vsi_handle: software VSI handle
- *
- * This function clears the VSI and its LAN children nodes from scheduler tree
- * for all TCs.
- */
-enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
-{
-	return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
-}
-
 /**
  * ice_sched_is_tree_balanced - Check tree nodes are identical or not
  * @hw: pointer to the HW struct
@@ -2114,31 +1938,6 @@ bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node)
 	return ice_sched_check_node(hw, node);
 }
 
-/**
- * ice_aq_query_node_to_root - retrieve the tree topology for a given node TEID
- * @hw: pointer to the HW struct
- * @node_teid: node TEID
- * @buf: pointer to buffer
- * @buf_size: buffer size in bytes
- * @cd: pointer to command details structure or NULL
- *
- * This function retrieves the tree topology from the firmware for a given
- * node TEID to the root node.
- */
-enum ice_status
-ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
-			  struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
-			  struct ice_sq_cd *cd)
-{
-	struct ice_aqc_query_node_to_root *cmd;
-	struct ice_aq_desc desc;
-
-	cmd = &desc.params.query_node_to_root;
-	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_node_to_root);
-	cmd->teid = CPU_TO_LE32(node_teid);
-	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
-}
-
 /**
  * ice_get_agg_info - get the aggregator ID
  * @hw: pointer to the hardware structure
@@ -2526,29 +2325,6 @@ ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
 	return status;
 }
 
-/**
- * ice_save_agg_tc_bitmap - save aggregator TC bitmap
- * @pi: port information structure
- * @agg_id: aggregator ID
- * @tc_bitmap: 8 bits TC bitmap
- *
- * Save aggregator TC bitmap. This function needs to be called with scheduler
- * lock held.
- */
-static enum ice_status
-ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
-		       ice_bitmap_t *tc_bitmap)
-{
-	struct ice_sched_agg_info *agg_info;
-
-	agg_info = ice_get_agg_info(pi->hw, agg_id);
-	if (!agg_info)
-		return ICE_ERR_PARAM;
-	ice_cp_bitmap(agg_info->replay_tc_bitmap, tc_bitmap,
-		      ICE_MAX_TRAFFIC_CLASS);
-	return ICE_SUCCESS;
-}
-
 /**
  * ice_sched_add_agg_cfg - create an aggregator node
  * @pi: port information structure
@@ -2701,32 +2477,6 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
 	return status;
 }
 
-/**
- * ice_cfg_agg - config aggregator node
- * @pi: port information structure
- * @agg_id: aggregator ID
- * @agg_type: aggregator type queue, VSI, or aggregator group
- * @tc_bitmap: bits TC bitmap
- *
- * This function configures aggregator node(s).
- */
-enum ice_status
-ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
-	    u8 tc_bitmap)
-{
-	ice_bitmap_t bitmap = tc_bitmap;
-	enum ice_status status;
-
-	ice_acquire_lock(&pi->sched_lock);
-	status = ice_sched_cfg_agg(pi, agg_id, agg_type,
-				   (ice_bitmap_t *)&bitmap);
-	if (!status)
-		status = ice_save_agg_tc_bitmap(pi, agg_id,
-						(ice_bitmap_t *)&bitmap);
-	ice_release_lock(&pi->sched_lock);
-	return status;
-}
-
 /**
  * ice_get_agg_vsi_info - get the aggregator ID
  * @agg_info: aggregator info
@@ -2773,35 +2523,6 @@ ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
 	return NULL;
 }
 
-/**
- * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap
- * @pi: port information structure
- * @agg_id: aggregator ID
- * @vsi_handle: software VSI handle
- * @tc_bitmap: TC bitmap of enabled TC(s)
- *
- * Save VSI to aggregator TC bitmap. This function needs to call with scheduler
- * lock held.
- */
-static enum ice_status
-ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
-			   ice_bitmap_t *tc_bitmap)
-{
-	struct ice_sched_agg_vsi_info *agg_vsi_info;
-	struct ice_sched_agg_info *agg_info;
-
-	agg_info = ice_get_agg_info(pi->hw, agg_id);
-	if (!agg_info)
-		return ICE_ERR_PARAM;
-	/* check if entry already exist */
-	agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
-	if (!agg_vsi_info)
-		return ICE_ERR_PARAM;
-	ice_cp_bitmap(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
-		      ICE_MAX_TRAFFIC_CLASS);
-	return ICE_SUCCESS;
-}
-
 /**
  * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator
  * @pi: port information structure
@@ -2959,124 +2680,75 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
 }
 
 /**
- * ice_move_vsi_to_agg - moves VSI to new or default aggregator
- * @pi: port information structure
- * @agg_id: aggregator ID
- * @vsi_handle: software VSI handle
- * @tc_bitmap: TC bitmap of enabled TC(s)
- *
- * Move or associate VSI to a new or default aggregator node.
- */
-enum ice_status
-ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
-		    u8 tc_bitmap)
-{
-	ice_bitmap_t bitmap = tc_bitmap;
-	enum ice_status status;
-
-	ice_acquire_lock(&pi->sched_lock);
-	status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
-					    (ice_bitmap_t *)&bitmap);
-	if (!status)
-		status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle,
-						    (ice_bitmap_t *)&bitmap);
-	ice_release_lock(&pi->sched_lock);
-	return status;
-}
-
-/**
- * ice_rm_agg_cfg - remove aggregator configuration
- * @pi: port information structure
- * @agg_id: aggregator ID
+ * ice_set_clear_cir_bw - set or clear CIR BW
+ * @bw_t_info: bandwidth type information structure
+ * @bw: bandwidth in Kbps - Kilo bits per sec
  *
- * This function removes aggregator reference to VSI and delete aggregator ID
- * info. It removes the aggregator configuration completely.
+ * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
  */
-enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id)
+static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
 {
-	struct ice_sched_agg_info *agg_info;
-	enum ice_status status = ICE_SUCCESS;
-	u8 tc;
-
-	ice_acquire_lock(&pi->sched_lock);
-	agg_info = ice_get_agg_info(pi->hw, agg_id);
-	if (!agg_info) {
-		status = ICE_ERR_DOES_NOT_EXIST;
-		goto exit_ice_rm_agg_cfg;
-	}
-
-	ice_for_each_traffic_class(tc) {
-		status = ice_rm_agg_cfg_tc(pi, agg_info, tc, true);
-		if (status)
-			goto exit_ice_rm_agg_cfg;
-	}
-
-	if (ice_is_any_bit_set(agg_info->tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) {
-		status = ICE_ERR_IN_USE;
-		goto exit_ice_rm_agg_cfg;
+	if (bw == ICE_SCHED_DFLT_BW) {
+		ice_clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
+		bw_t_info->cir_bw.bw = 0;
+	} else {
+		/* Save type of BW information */
+		ice_set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
+		bw_t_info->cir_bw.bw = bw;
 	}
-
-	/* Safe to delete entry now */
-	LIST_DEL(&agg_info->list_entry);
-	ice_free(pi->hw, agg_info);
-
-	/* Remove unused RL profile IDs from HW and SW DB */
-	ice_sched_rm_unused_rl_prof(pi->hw);
-
-exit_ice_rm_agg_cfg:
-	ice_release_lock(&pi->sched_lock);
-	return status;
 }
 
 /**
- * ice_set_clear_cir_bw_alloc - set or clear CIR BW alloc information
+ * ice_set_clear_eir_bw - set or clear EIR BW
  * @bw_t_info: bandwidth type information structure
- * @bw_alloc: Bandwidth allocation information
+ * @bw: bandwidth in Kbps - Kilo bits per sec
  *
- * Save or clear CIR BW alloc information (bw_alloc) in the passed param
- * bw_t_info.
+ * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
  */
-static void
-ice_set_clear_cir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc)
+static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
 {
-	bw_t_info->cir_bw.bw_alloc = bw_alloc;
-	if (bw_t_info->cir_bw.bw_alloc)
-		ice_set_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap);
-	else
-		ice_clear_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap);
+	if (bw == ICE_SCHED_DFLT_BW) {
+		ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
+		bw_t_info->eir_bw.bw = 0;
+	} else {
+		/* save EIR BW information */
+		ice_set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
+		bw_t_info->eir_bw.bw = bw;
+	}
 }
 
 /**
- * ice_set_clear_eir_bw_alloc - set or clear EIR BW alloc information
+ * ice_set_clear_shared_bw - set or clear shared BW
  * @bw_t_info: bandwidth type information structure
- * @bw_alloc: Bandwidth allocation information
+ * @bw: bandwidth in Kbps - Kilo bits per sec
  *
- * Save or clear EIR BW alloc information (bw_alloc) in the passed param
- * bw_t_info.
+ * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
  */
-static void
-ice_set_clear_eir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc)
+static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
 {
-	bw_t_info->eir_bw.bw_alloc = bw_alloc;
-	if (bw_t_info->eir_bw.bw_alloc)
-		ice_set_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap);
-	else
-		ice_clear_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap);
+	if (bw == ICE_SCHED_DFLT_BW) {
+		ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
+		bw_t_info->shared_bw = 0;
+	} else {
+		/* save shared BW information */
+		ice_set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
+		bw_t_info->shared_bw = bw;
+	}
 }
 
 /**
- * ice_sched_save_vsi_bw_alloc - save VSI node's BW alloc information
+ * ice_sched_save_vsi_bw - save VSI node's BW information
  * @pi: port information structure
  * @vsi_handle: sw VSI handle
  * @tc: traffic class
- * @rl_type: rate limit type min or max
- * @bw_alloc: Bandwidth allocation information
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
  *
- * Save BW alloc information of VSI type node for post replay use.
+ * Save BW information of VSI type node for post replay use.
  */
 static enum ice_status
-ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
-			    enum ice_rl_type rl_type, u16 bw_alloc)
+ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+		      enum ice_rl_type rl_type, u32 bw)
 {
 	struct ice_vsi_ctx *vsi_ctx;
 
@@ -3087,100 +2759,7 @@ ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
 		return ICE_ERR_PARAM;
 	switch (rl_type) {
 	case ICE_MIN_BW:
-		ice_set_clear_cir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc],
-					   bw_alloc);
-		break;
-	case ICE_MAX_BW:
-		ice_set_clear_eir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc],
-					   bw_alloc);
-		break;
-	default:
-		return ICE_ERR_PARAM;
-	}
-	return ICE_SUCCESS;
-}
-
-/**
- * ice_set_clear_cir_bw - set or clear CIR BW
- * @bw_t_info: bandwidth type information structure
- * @bw: bandwidth in Kbps - Kilo bits per sec
- *
- * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
- */
-static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
-{
-	if (bw == ICE_SCHED_DFLT_BW) {
-		ice_clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
-		bw_t_info->cir_bw.bw = 0;
-	} else {
-		/* Save type of BW information */
-		ice_set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
-		bw_t_info->cir_bw.bw = bw;
-	}
-}
-
-/**
- * ice_set_clear_eir_bw - set or clear EIR BW
- * @bw_t_info: bandwidth type information structure
- * @bw: bandwidth in Kbps - Kilo bits per sec
- *
- * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
- */
-static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
-{
-	if (bw == ICE_SCHED_DFLT_BW) {
-		ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
-		bw_t_info->eir_bw.bw = 0;
-	} else {
-		/* save EIR BW information */
-		ice_set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
-		bw_t_info->eir_bw.bw = bw;
-	}
-}
-
-/**
- * ice_set_clear_shared_bw - set or clear shared BW
- * @bw_t_info: bandwidth type information structure
- * @bw: bandwidth in Kbps - Kilo bits per sec
- *
- * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
- */
-static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
-{
-	if (bw == ICE_SCHED_DFLT_BW) {
-		ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
-		bw_t_info->shared_bw = 0;
-	} else {
-		/* save shared BW information */
-		ice_set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
-		bw_t_info->shared_bw = bw;
-	}
-}
-
-/**
- * ice_sched_save_vsi_bw - save VSI node's BW information
- * @pi: port information structure
- * @vsi_handle: sw VSI handle
- * @tc: traffic class
- * @rl_type: rate limit type min, max, or shared
- * @bw: bandwidth in Kbps - Kilo bits per sec
- *
- * Save BW information of VSI type node for post replay use.
- */
-static enum ice_status
-ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
-		      enum ice_rl_type rl_type, u32 bw)
-{
-	struct ice_vsi_ctx *vsi_ctx;
-
-	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
-		return ICE_ERR_PARAM;
-	vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
-	if (!vsi_ctx)
-		return ICE_ERR_PARAM;
-	switch (rl_type) {
-	case ICE_MIN_BW:
-		ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
+		ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
 		break;
 	case ICE_MAX_BW:
 		ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
@@ -3194,82 +2773,6 @@ ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
 	return ICE_SUCCESS;
 }
 
-/**
- * ice_set_clear_prio - set or clear priority information
- * @bw_t_info: bandwidth type information structure
- * @prio: priority to save
- *
- * Save or clear priority (prio) in the passed param bw_t_info.
- */
-static void ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio)
-{
-	bw_t_info->generic = prio;
-	if (bw_t_info->generic)
-		ice_set_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap);
-	else
-		ice_clear_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap);
-}
-
-/**
- * ice_sched_save_vsi_prio - save VSI node's priority information
- * @pi: port information structure
- * @vsi_handle: Software VSI handle
- * @tc: traffic class
- * @prio: priority to save
- *
- * Save priority information of VSI type node for post replay use.
- */
-static enum ice_status
-ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
-			u8 prio)
-{
-	struct ice_vsi_ctx *vsi_ctx;
-
-	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
-		return ICE_ERR_PARAM;
-	vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
-	if (!vsi_ctx)
-		return ICE_ERR_PARAM;
-	if (tc >= ICE_MAX_TRAFFIC_CLASS)
-		return ICE_ERR_PARAM;
-	ice_set_clear_prio(&vsi_ctx->sched.bw_t_info[tc], prio);
-	return ICE_SUCCESS;
-}
-
-/**
- * ice_sched_save_agg_bw_alloc - save aggregator node's BW alloc information
- * @pi: port information structure
- * @agg_id: node aggregator ID
- * @tc: traffic class
- * @rl_type: rate limit type min or max
- * @bw_alloc: bandwidth alloc information
- *
- * Save BW alloc information of AGG type node for post replay use.
- */
-static enum ice_status
-ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc,
-			    enum ice_rl_type rl_type, u16 bw_alloc)
-{
-	struct ice_sched_agg_info *agg_info;
-
-	agg_info = ice_get_agg_info(pi->hw, agg_id);
-	if (!agg_info)
-		return ICE_ERR_PARAM;
-	if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
-		return ICE_ERR_PARAM;
-	switch (rl_type) {
-	case ICE_MIN_BW:
-		ice_set_clear_cir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc);
-		break;
-	case ICE_MAX_BW:
-		ice_set_clear_eir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc);
-		break;
-	default:
-		return ICE_ERR_PARAM;
-	}
-	return ICE_SUCCESS;
-}
-
 /**
  * ice_sched_save_agg_bw - save aggregator node's BW information
  * @pi: port information structure
@@ -3284,490 +2787,27 @@ static enum ice_status
 ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
 		      enum ice_rl_type rl_type, u32 bw)
 {
-	struct ice_sched_agg_info *agg_info;
-
-	agg_info = ice_get_agg_info(pi->hw, agg_id);
-	if (!agg_info)
-		return ICE_ERR_PARAM;
-	if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
-		return ICE_ERR_PARAM;
-	switch (rl_type) {
-	case ICE_MIN_BW:
-		ice_set_clear_cir_bw(&agg_info->bw_t_info[tc], bw);
-		break;
-	case ICE_MAX_BW:
-		ice_set_clear_eir_bw(&agg_info->bw_t_info[tc], bw);
-		break;
-	case ICE_SHARED_BW:
-		ice_set_clear_shared_bw(&agg_info->bw_t_info[tc], bw);
-		break;
-	default:
-		return ICE_ERR_PARAM;
-	}
-	return ICE_SUCCESS;
-}
-
-/**
- * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC
- * @pi: port information structure
- * @vsi_handle: software VSI handle
- * @tc: traffic class
- * @rl_type: min or max
- * @bw: bandwidth in Kbps
- *
- * This function configures BW limit of VSI scheduling node based on TC
- * information.
- */
-enum ice_status
-ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
-			  enum ice_rl_type rl_type, u32 bw)
-{
-	enum ice_status status;
-
-	status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
-						  ICE_AGG_TYPE_VSI,
-						  tc, rl_type, bw);
-	if (!status) {
-		ice_acquire_lock(&pi->sched_lock);
-		status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
-		ice_release_lock(&pi->sched_lock);
-	}
-	return status;
-}
-
-/**
- * ice_cfg_dflt_vsi_bw_lmt_per_tc - configure default VSI BW limit per TC
- * @pi: port information structure
- * @vsi_handle: software VSI handle
- * @tc: traffic class
- * @rl_type: min or max
- *
- * This function configures default BW limit of VSI scheduling node based on TC
- * information.
- */
-enum ice_status
-ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
-			       enum ice_rl_type rl_type)
-{
-	enum ice_status status;
-
-	status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
-						  ICE_AGG_TYPE_VSI,
-						  tc, rl_type,
-						  ICE_SCHED_DFLT_BW);
-	if (!status) {
-		ice_acquire_lock(&pi->sched_lock);
-		status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type,
-					       ICE_SCHED_DFLT_BW);
-		ice_release_lock(&pi->sched_lock);
-	}
-	return status;
-}
-
-/**
- * ice_cfg_agg_bw_lmt_per_tc - configure aggregator BW limit per TC
- * @pi: port information structure
- * @agg_id: aggregator ID
- * @tc: traffic class
- * @rl_type: min or max
- * @bw: bandwidth in Kbps
- *
- * This function applies BW limit to aggregator scheduling node based on TC
- * information.
- */
-enum ice_status
-ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
-			  enum ice_rl_type rl_type, u32 bw)
-{
-	enum ice_status status;
-
-	status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG,
-						  tc, rl_type, bw);
-	if (!status) {
-		ice_acquire_lock(&pi->sched_lock);
-		status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw);
-		ice_release_lock(&pi->sched_lock);
-	}
-	return status;
-}
-
-/**
- * ice_cfg_agg_bw_dflt_lmt_per_tc - configure aggregator BW default limit per TC
- * @pi: port information structure
- * @agg_id: aggregator ID
- * @tc: traffic class
- * @rl_type: min or max
- *
- * This function applies default BW limit to aggregator scheduling node based
- * on TC information.
- */
-enum ice_status
-ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
-			       enum ice_rl_type rl_type)
-{
-	enum ice_status status;
-
-	status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG,
-						  tc, rl_type,
-						  ICE_SCHED_DFLT_BW);
-	if (!status) {
-		ice_acquire_lock(&pi->sched_lock);
-		status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type,
-					       ICE_SCHED_DFLT_BW);
-		ice_release_lock(&pi->sched_lock);
-	}
-	return status;
-}
-
-/**
- * ice_cfg_vsi_bw_shared_lmt - configure VSI BW shared limit
- * @pi: port information structure
- * @vsi_handle: software VSI handle
- * @min_bw: minimum bandwidth in Kbps
- * @max_bw: maximum bandwidth in Kbps
- * @shared_bw: shared bandwidth in Kbps
- *
- * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic
- * classes for VSI matching handle.
- */
-enum ice_status
-ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw,
-			  u32 max_bw, u32 shared_bw)
-{
-	return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, min_bw, max_bw,
-					       shared_bw);
-}
-
-/**
- * ice_cfg_vsi_bw_no_shared_lmt - configure VSI BW for no shared limiter
- * @pi: port information structure
- * @vsi_handle: software VSI handle
- *
- * This function removes the shared rate limiter(SRL) of all VSI type nodes
- * across all traffic classes for VSI matching handle.
- */
-enum ice_status
-ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle)
-{
-	return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle,
-					       ICE_SCHED_DFLT_BW,
-					       ICE_SCHED_DFLT_BW,
-					       ICE_SCHED_DFLT_BW);
-}
-
-/**
- * ice_cfg_agg_bw_shared_lmt - configure aggregator BW shared limit
- * @pi: port information structure
- * @agg_id: aggregator ID
- * @min_bw: minimum bandwidth in Kbps
- * @max_bw: maximum bandwidth in Kbps
- * @shared_bw: shared bandwidth in Kbps
- *
- * This function configures the shared rate limiter(SRL) of all aggregator type
- * nodes across all traffic classes for aggregator matching agg_id.
- */
-enum ice_status
-ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw,
-			  u32 max_bw, u32 shared_bw)
-{
-	return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, min_bw, max_bw,
-					       shared_bw);
-}
-
-/**
- * ice_cfg_agg_bw_no_shared_lmt - configure aggregator BW for no shared limiter
- * @pi: port information structure
- * @agg_id: aggregator ID
- *
- * This function removes the shared rate limiter(SRL) of all aggregator type
- * nodes across all traffic classes for aggregator matching agg_id.
- */
-enum ice_status
-ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id)
-{
-	return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW,
-					       ICE_SCHED_DFLT_BW,
-					       ICE_SCHED_DFLT_BW);
-}
-
-/**
- * ice_cfg_agg_bw_shared_lmt_per_tc - configure aggregator BW shared limit per tc
- * @pi: port information structure
- * @agg_id: aggregator ID
- * @tc: traffic class
- * @min_bw: minimum bandwidth in Kbps
- * @max_bw: maximum bandwidth in Kbps
- * @shared_bw: shared ba