provided rss hash config/update, reta update/get ops.

Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
 doc/guides/nics/features/zxdh.ini  |   3 +
 doc/guides/nics/zxdh.rst           |   1 +
 drivers/net/zxdh/zxdh_ethdev.c     |  51 ++++
 drivers/net/zxdh/zxdh_ethdev.h     |   4 +-
 drivers/net/zxdh/zxdh_ethdev_ops.c | 410 +++++++++++++++++++++++++++++
 drivers/net/zxdh/zxdh_ethdev_ops.h |  26 ++
 drivers/net/zxdh/zxdh_msg.h        |  22 ++
 drivers/net/zxdh/zxdh_tables.c     |  82 ++++++
 drivers/net/zxdh/zxdh_tables.h     |   7 +
 9 files changed, 605 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini
index 6fb006c2da..415ca547d0 100644
--- a/doc/guides/nics/features/zxdh.ini
+++ b/doc/guides/nics/features/zxdh.ini
@@ -19,3 +19,6 @@ Allmulticast mode    = Y
 VLAN filter          = Y
 VLAN offload         = Y
 QinQ offload         = Y
+RSS hash             = Y
+RSS reta update      = Y
+Inner RSS            = Y
diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst
index 3a7585d123..3cc6a1d348 100644
--- a/doc/guides/nics/zxdh.rst
+++ b/doc/guides/nics/zxdh.rst
@@ -31,6 +31,7 @@ Features of the ZXDH PMD are:
 - VLAN filter and VLAN offload
 - VLAN stripping and inserting
 - QINQ stripping and inserting
+- Receive Side Scaling (RSS)
 
 
 Driver compilation and testing
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index cc32b467a9..17fca8e909 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -60,6 +60,8 @@ zxdh_dev_infos_get(struct rte_eth_dev *dev,
     dev_info->rx_offload_capa |= (RTE_ETH_RX_OFFLOAD_SCATTER);
     dev_info->rx_offload_capa |=  RTE_ETH_RX_OFFLOAD_TCP_LRO;
     dev_info->rx_offload_capa |=  RTE_ETH_RX_OFFLOAD_RSS_HASH;
+    dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_256;
+    dev_info->flow_type_rss_offloads = ZXDH_RSS_HF;
 
     dev_info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS);
     dev_info->tx_offload_capa |= (RTE_ETH_TX_OFFLOAD_TCP_TSO |
@@ -784,9 +786,48 @@ zxdh_dev_conf_offload(struct rte_eth_dev *dev)
         return ret;
     }
 
+    ret = zxdh_rss_configure(dev);
+    if (ret) {
+        PMD_DRV_LOG(ERR, "rss configure failed");
+        return ret;
+    }
+
     return 0;
 }
 
+static int
+zxdh_rss_qid_config(struct rte_eth_dev *dev)
+{
+    struct zxdh_hw *hw = dev->data->dev_private;
+    struct zxdh_port_attr_table port_attr = {0};
+    struct zxdh_msg_info msg_info = {0};
+    int ret = 0;
+
+    if (hw->is_pf) {
+        ret = zxdh_get_port_attr(hw->vport.vfid, &port_attr);
+        port_attr.port_base_qid = hw->channel_context[0].ph_chno & 0xfff;
+
+        ret = zxdh_set_port_attr(hw->vport.vfid, &port_attr);
+        if (ret) {
+            PMD_DRV_LOG(ERR, "PF:%d port_base_qid insert failed", hw->vfid);
+            return ret;
+        }
+    } else {
+        struct zxdh_port_attr_set_msg *attr_msg = &msg_info.data.port_attr_msg;
+
+        zxdh_msg_head_build(hw, ZXDH_PORT_ATTRS_SET, &msg_info);
+        attr_msg->mode = ZXDH_PORT_BASE_QID_FLAG;
+        attr_msg->value = hw->channel_context[0].ph_chno & 0xfff;
+        ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+        if (ret) {
+            PMD_DRV_LOG(ERR, "Failed to send msg: port 0x%x msg type %d ",
+                    hw->vport.vport, ZXDH_PORT_BASE_QID_FLAG);
+            return ret;
+        }
+    }
+    return ret;
+}
+
 static int32_t
 zxdh_dev_configure(struct rte_eth_dev *dev)
 {
@@ -873,6 +914,12 @@ zxdh_dev_configure(struct rte_eth_dev *dev)
         return -1;
     }
 
+    ret = zxdh_rss_qid_config(dev);
+    if (ret) {
+        PMD_DRV_LOG(ERR, "Failed to configure base qid!");
+        return -1;
+    }
+
     zxdh_pci_reinit_complete(hw);
 
 end:
@@ -1099,6 +1146,10 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = {
     .allmulticast_disable     = zxdh_dev_allmulticast_disable,
     .vlan_filter_set         = zxdh_dev_vlan_filter_set,
     .vlan_offload_set         = zxdh_dev_vlan_offload_set,
+    .reta_update             = zxdh_dev_rss_reta_update,
+    .reta_query                 = zxdh_dev_rss_reta_query,
+    .rss_hash_update         = zxdh_rss_hash_update,
+    .rss_hash_conf_get         = zxdh_rss_hash_conf_get,
 };
 
 static int32_t
diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h
index 3cdac5de73..bd4e1587c8 100644
--- a/drivers/net/zxdh/zxdh_ethdev.h
+++ b/drivers/net/zxdh/zxdh_ethdev.h
@@ -82,7 +82,7 @@ struct zxdh_hw {
     uint16_t queue_num;
     uint16_t mc_num;
     uint16_t uc_num;
-
+    uint16_t *rss_reta;
     uint8_t *isr;
     uint8_t weak_barriers;
     uint8_t intr_enabled;
@@ -100,6 +100,8 @@ struct zxdh_hw {
     uint8_t admin_status;
     uint8_t promisc_status;
     uint8_t allmulti_status;
+    uint8_t rss_enable;
+    uint8_t rss_init;
 };
 
 struct zxdh_dtb_shared_data {
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c
index 94c5e6dbc8..c12947cb4d 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.c
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.c
@@ -3,6 +3,7 @@
  */
 
 #include <rte_malloc.h>
+#include <rte_ether.h>
 
 #include "zxdh_ethdev.h"
 #include "zxdh_pci.h"
@@ -12,6 +13,14 @@
 #include "zxdh_logs.h"
 
 #define ZXDH_VLAN_FILTER_GROUPS       64
+#define ZXDH_INVALID_LOGIC_QID        0xFFFFU
+
+/* Supported RSS */
+#define ZXDH_RSS_HF_MASK     (~(ZXDH_RSS_HF))
+#define ZXDH_HF_F5           1
+#define ZXDH_HF_F3           2
+#define ZXDH_HF_MAC_VLAN     4
+#define ZXDH_HF_ALL          0
 
 static int32_t zxdh_config_port_status(struct rte_eth_dev *dev, uint16_t link_status)
 {
@@ -752,3 +761,404 @@ zxdh_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
     return ret;
 }
+
+int
+zxdh_dev_rss_reta_update(struct rte_eth_dev *dev,
+             struct rte_eth_rss_reta_entry64 *reta_conf,
+             uint16_t reta_size)
+{
+    struct zxdh_hw *hw = dev->data->dev_private;
+    struct zxdh_msg_info msg = {0};
+    uint16_t old_reta[RTE_ETH_RSS_RETA_SIZE_256];
+    uint16_t idx;
+    uint16_t i;
+    uint16_t pos;
+    int ret;
+
+    if (reta_size != RTE_ETH_RSS_RETA_SIZE_256) {
+        PMD_DRV_LOG(ERR, "reta_size is illegal(%u).reta_size should be 256", reta_size);
+        return -EINVAL;
+    }
+    if (!hw->rss_reta) {
+        hw->rss_reta = rte_zmalloc(NULL, RTE_ETH_RSS_RETA_SIZE_256 * sizeof(uint16_t), 4);
+        if (hw->rss_reta == NULL) {
+            PMD_DRV_LOG(ERR, "Failed to allocate RSS reta");
+            return -ENOMEM;
+        }
+    }
+    for (idx = 0, i = 0; (i < reta_size); ++i) {
+        idx = i / RTE_ETH_RETA_GROUP_SIZE;
+        pos = i % RTE_ETH_RETA_GROUP_SIZE;
+        if (((reta_conf[idx].mask >> pos) & 0x1) == 0)
+            continue;
+        if (reta_conf[idx].reta[pos] > dev->data->nb_rx_queues) {
+            PMD_DRV_LOG(ERR, "reta table value err(%u >= %u)",
+                reta_conf[idx].reta[pos], dev->data->nb_rx_queues);
+            return -EINVAL;
+        }
+        if (hw->rss_reta[i] != reta_conf[idx].reta[pos])
+            break;
+    }
+    if (i == reta_size) {
+        PMD_DRV_LOG(DEBUG, "reta table same with buffered table");
+        return 0;
+    }
+    memcpy(old_reta, hw->rss_reta, sizeof(old_reta));
+
+    for (idx = 0, i = 0; i < reta_size; ++i) {
+        idx = i / RTE_ETH_RETA_GROUP_SIZE;
+        pos = i % RTE_ETH_RETA_GROUP_SIZE;
+        if (((reta_conf[idx].mask >> pos) & 0x1) == 0)
+            continue;
+        hw->rss_reta[i] = reta_conf[idx].reta[pos];
+    }
+
+    zxdh_msg_head_build(hw, ZXDH_RSS_RETA_SET, &msg);
+    for (i = 0; i < reta_size; i++)
+        msg.data.rss_reta.reta[i] =
+            (hw->channel_context[hw->rss_reta[i] * 2].ph_chno);
+
+
+    if (hw->is_pf) {
+        ret = zxdh_rss_table_set(hw->vport.vport, &msg.data.rss_reta);
+        if (ret) {
+            PMD_DRV_LOG(ERR, "rss reta table set failed");
+            return -EINVAL;
+        }
+    } else {
+        ret = zxdh_vf_send_msg_to_pf(dev, &msg, sizeof(struct zxdh_msg_info), NULL, 0);
+        if (ret) {
+            PMD_DRV_LOG(ERR, "vf rss reta table set failed");
+            return -EINVAL;
+        }
+    }
+    return ret;
+}
+
+static uint16_t
+zxdh_hw_qid_to_logic_qid(struct rte_eth_dev *dev, uint16_t qid)
+{
+    struct zxdh_hw *hw = (struct zxdh_hw *)dev->data->dev_private;
+    uint16_t rx_queues = dev->data->nb_rx_queues;
+    uint16_t i;
+
+    for (i = 0; i < rx_queues; i++) {
+        if (qid == hw->channel_context[i * 2].ph_chno)
+            return i;
+    }
+    return ZXDH_INVALID_LOGIC_QID;
+}
+
+int
+zxdh_dev_rss_reta_query(struct rte_eth_dev *dev,
+            struct rte_eth_rss_reta_entry64 *reta_conf,
+            uint16_t reta_size)
+{
+    struct zxdh_hw *hw = (struct zxdh_hw *)dev->data->dev_private;
+    struct zxdh_msg_info msg = {0};
+    struct zxdh_msg_reply_info reply_msg = {0};
+    uint16_t idx;
+    uint16_t i;
+    int ret = 0;
+    uint16_t qid_logic;
+
+    ret = (!reta_size || reta_size > RTE_ETH_RSS_RETA_SIZE_256);
+    if (ret) {
+        PMD_DRV_LOG(ERR, "request reta size(%u) not same with buffered(%u)",
+            reta_size, RTE_ETH_RSS_RETA_SIZE_256);
+        return -EINVAL;
+    }
+
+    /* Fill each entry of the table even if its bit is not set. */
+    for (idx = 0, i = 0; (i != reta_size); ++i) {
+        idx = i / RTE_ETH_RETA_GROUP_SIZE;
+        reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] = hw->rss_reta[i];
+    }
+
+
+
+    zxdh_msg_head_build(hw, ZXDH_RSS_RETA_GET, &msg);
+
+    if (hw->is_pf) {
+        ret = zxdh_rss_table_get(hw->vport.vport, &reply_msg.reply_body.rss_reta);
+        if (ret) {
+            PMD_DRV_LOG(ERR, "rss reta table set failed");
+            return -EINVAL;
+        }
+    } else {
+        ret = zxdh_vf_send_msg_to_pf(dev, &msg, sizeof(struct zxdh_msg_info),
+                    &reply_msg, sizeof(struct zxdh_msg_reply_info));
+        if (ret) {
+            PMD_DRV_LOG(ERR, "vf rss reta table get failed");
+            return -EINVAL;
+        }
+    }
+
+    struct zxdh_rss_reta *reta_table = &reply_msg.reply_body.rss_reta;
+
+    for (idx = 0, i = 0; i < reta_size; ++i) {
+        idx = i / RTE_ETH_RETA_GROUP_SIZE;
+
+        qid_logic = zxdh_hw_qid_to_logic_qid(dev, reta_table->reta[i]);
+        if (qid_logic == ZXDH_INVALID_LOGIC_QID) {
+            PMD_DRV_LOG(ERR, "rsp phy reta qid (%u) is illegal(%u)",
+                reta_table->reta[i], qid_logic);
+            return -EINVAL;
+        }
+        reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] = qid_logic;
+    }
+    return 0;
+}
+
+static uint32_t
+zxdh_rss_hf_to_hw(uint64_t hf)
+{
+    uint32_t hw_hf = 0;
+
+    if (hf & ZXDH_HF_MAC_VLAN_ETH)
+        hw_hf |= ZXDH_HF_MAC_VLAN;
+    if (hf & ZXDH_HF_F3_ETH)
+        hw_hf |= ZXDH_HF_F3;
+    if (hf & ZXDH_HF_F5_ETH)
+        hw_hf |= ZXDH_HF_F5;
+
+    if (hw_hf == (ZXDH_HF_MAC_VLAN | ZXDH_HF_F3 | ZXDH_HF_F5))
+        hw_hf = ZXDH_HF_ALL;
+    return hw_hf;
+}
+
+static uint64_t
+zxdh_rss_hf_to_eth(uint32_t hw_hf)
+{
+    uint64_t hf = 0;
+
+    if (hw_hf == ZXDH_HF_ALL)
+        return (ZXDH_HF_MAC_VLAN_ETH | ZXDH_HF_F3_ETH | ZXDH_HF_F5_ETH);
+
+    if (hw_hf & ZXDH_HF_MAC_VLAN)
+        hf |= ZXDH_HF_MAC_VLAN_ETH;
+    if (hw_hf & ZXDH_HF_F3)
+        hf |= ZXDH_HF_F3_ETH;
+    if (hw_hf & ZXDH_HF_F5)
+        hf |= ZXDH_HF_F5_ETH;
+
+    return hf;
+}
+
+int
+zxdh_rss_hash_update(struct rte_eth_dev *dev,
+             struct rte_eth_rss_conf *rss_conf)
+{
+    struct zxdh_hw *hw = dev->data->dev_private;
+    struct rte_eth_rss_conf *old_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+    struct zxdh_msg_info msg = {0};
+    struct zxdh_port_attr_table port_attr = {0};
+    uint32_t hw_hf_new, hw_hf_old;
+    int need_update_hf = 0;
+    int ret = 0;
+
+    ret = rss_conf->rss_hf & ZXDH_RSS_HF_MASK;
+    if (ret) {
+        PMD_DRV_LOG(ERR, "Not support some hash function (%08lx)", rss_conf->rss_hf);
+        return -EINVAL;
+    }
+
+    hw_hf_new = zxdh_rss_hf_to_hw(rss_conf->rss_hf);
+    hw_hf_old = zxdh_rss_hf_to_hw(old_rss_conf->rss_hf);
+
+    if ((hw_hf_new != hw_hf_old || !!rss_conf->rss_hf))
+        need_update_hf = 1;
+
+    if (need_update_hf) {
+        if (hw->is_pf) {
+            ret = zxdh_get_port_attr(hw->vport.vfid, &port_attr);
+            port_attr.rss_enable = !!rss_conf->rss_hf;
+            ret = zxdh_set_port_attr(hw->vport.vfid, &port_attr);
+            if (ret) {
+                PMD_DRV_LOG(ERR, "rss enable set failed");
+                return -EINVAL;
+            }
+        } else {
+            msg.data.rss_enable.enable = !!rss_conf->rss_hf;
+            zxdh_msg_head_build(hw, ZXDH_RSS_ENABLE, &msg);
+            ret = zxdh_vf_send_msg_to_pf(dev, &msg,
+                        sizeof(struct zxdh_msg_info), NULL, 0);
+            if (ret) {
+                PMD_DRV_LOG(ERR, "rss enable set failed");
+                return -EINVAL;
+            }
+        }
+        if (hw->is_pf) {
+            ret = zxdh_get_port_attr(hw->vport.vfid, &port_attr);
+            port_attr.rss_hash_factor = hw_hf_new;
+            ret = zxdh_set_port_attr(hw->vport.vfid, &port_attr);
+            if (ret) {
+                PMD_DRV_LOG(ERR, "rss hash factor set failed");
+                return -EINVAL;
+            }
+        } else {
+            msg.data.rss_hf.rss_hf = hw_hf_new;
+            zxdh_msg_head_build(hw, ZXDH_RSS_HF_SET, &msg);
+            ret = zxdh_vf_send_msg_to_pf(dev, &msg,
+                        sizeof(struct zxdh_msg_info), NULL, 0);
+            if (ret) {
+                PMD_DRV_LOG(ERR, "rss hash factor set failed");
+                return -EINVAL;
+            }
+        }
+        old_rss_conf->rss_hf = rss_conf->rss_hf;
+    }
+
+    return 0;
+}
+
+int
+zxdh_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
+{
+    struct zxdh_hw *hw = (struct zxdh_hw *)dev->data->dev_private;
+    struct rte_eth_rss_conf *old_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+    struct zxdh_msg_info msg = {0};
+    struct zxdh_msg_reply_info reply_msg = {0};
+    struct zxdh_port_attr_table port_attr = {0};
+    int ret;
+    uint32_t hw_hf;
+
+    if (rss_conf == NULL) {
+        PMD_DRV_LOG(ERR, "rss conf is NULL");
+        return -ENOMEM;
+    }
+
+    hw_hf = zxdh_rss_hf_to_hw(old_rss_conf->rss_hf);
+    rss_conf->rss_hf = zxdh_rss_hf_to_eth(hw_hf);
+
+    zxdh_msg_head_build(hw, ZXDH_RSS_HF_GET, &msg);
+    if (hw->is_pf) {
+        ret = zxdh_get_port_attr(hw->vport.vfid, &port_attr);
+        if (ret) {
+            PMD_DRV_LOG(ERR, "rss hash factor set failed");
+            return -EINVAL;
+        }
+        reply_msg.reply_body.rss_hf.rss_hf = port_attr.rss_hash_factor;
+    } else {
+        zxdh_msg_head_build(hw, ZXDH_RSS_HF_SET, &msg);
+        ret = zxdh_vf_send_msg_to_pf(dev, &msg, sizeof(struct zxdh_msg_info),
+                &reply_msg, sizeof(struct zxdh_msg_reply_info));
+        if (ret) {
+            PMD_DRV_LOG(ERR, "rss hash factor set failed");
+            return -EINVAL;
+        }
+    }
+    rss_conf->rss_hf = zxdh_rss_hf_to_eth(reply_msg.reply_body.rss_hf.rss_hf);
+
+    return 0;
+}
+
+static int
+zxdh_get_rss_enable_conf(struct rte_eth_dev *dev)
+{
+    if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
+        return dev->data->nb_rx_queues == 1 ? 0 : 1;
+    else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
+        return 0;
+
+    return 0;
+}
+
+int
+zxdh_rss_configure(struct rte_eth_dev *dev)
+{
+    struct rte_eth_dev_data *dev_data = dev->data;
+    struct zxdh_hw *hw = (struct zxdh_hw *)dev->data->dev_private;
+    struct zxdh_port_attr_table port_attr = {0};
+    struct zxdh_msg_info msg = {0};
+    int ret = 0;
+    uint32_t hw_hf;
+    uint32_t i;
+
+    if (dev->data->nb_rx_queues == 0) {
+        PMD_DRV_LOG(ERR, "port %u nb_rx_queues is 0", dev->data->port_id);
+        return -1;
+    }
+
+    /* config rss enable */
+    uint8_t curr_rss_enable = zxdh_get_rss_enable_conf(dev);
+
+    if (hw->rss_enable != curr_rss_enable) {
+        if (hw->is_pf) {
+            ret = zxdh_get_port_attr(hw->vport.vfid, &port_attr);
+            port_attr.rss_enable = curr_rss_enable;
+            ret = zxdh_set_port_attr(hw->vport.vfid, &port_attr);
+            if (ret) {
+                PMD_DRV_LOG(ERR, "rss enable set failed");
+                return -EINVAL;
+            }
+        } else {
+            msg.data.rss_enable.enable = curr_rss_enable;
+            zxdh_msg_head_build(hw, ZXDH_RSS_ENABLE, &msg);
+            ret = zxdh_vf_send_msg_to_pf(dev, &msg,
+                        sizeof(struct zxdh_msg_info), NULL, 0);
+            if (ret) {
+                PMD_DRV_LOG(ERR, "rss enable set failed");
+                return -EINVAL;
+            }
+        }
+        hw->rss_enable = curr_rss_enable;
+    }
+
+    if (curr_rss_enable && hw->rss_init == 0) {
+        /* config hash factor */
+        dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = ZXDH_HF_F5_ETH;
+        hw_hf = zxdh_rss_hf_to_hw(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
+        memset(&msg, 0, sizeof(msg));
+        if (hw->is_pf) {
+            ret = zxdh_get_port_attr(hw->vport.vfid, &port_attr);
+            port_attr.rss_hash_factor = hw_hf;
+            ret = zxdh_set_port_attr(hw->vport.vfid, &port_attr);
+            if (ret) {
+                PMD_DRV_LOG(ERR, "rss hash factor set failed");
+                return -EINVAL;
+            }
+        } else {
+            msg.data.rss_hf.rss_hf = hw_hf;
+            zxdh_msg_head_build(hw, ZXDH_RSS_HF_SET, &msg);
+            ret = zxdh_vf_send_msg_to_pf(dev, &msg,
+                        sizeof(struct zxdh_msg_info), NULL, 0);
+            if (ret) {
+                PMD_DRV_LOG(ERR, "rss hash factor set failed");
+                return -EINVAL;
+            }
+        }
+        hw->rss_init = 1;
+    }
+
+    if (!hw->rss_reta) {
+        hw->rss_reta = rte_zmalloc(NULL, RTE_ETH_RSS_RETA_SIZE_256 * sizeof(uint16_t), 4);
+        if (hw->rss_reta == NULL) {
+            PMD_DRV_LOG(ERR, "alloc memory fail");
+            return -1;
+        }
+    }
+    for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_256; i++)
+        hw->rss_reta[i] = i % dev_data->nb_rx_queues;
+
+    /* hw config reta */
+    zxdh_msg_head_build(hw, ZXDH_RSS_RETA_SET, &msg);
+    for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_256; i++)
+        msg.data.rss_reta.reta[i] =
+            hw->channel_context[hw->rss_reta[i] * 2].ph_chno;
+
+    if (hw->is_pf) {
+        ret = zxdh_rss_table_set(hw->vport.vport, &msg.data.rss_reta);
+        if (ret) {
+            PMD_DRV_LOG(ERR, "rss reta table set failed");
+            return -EINVAL;
+        }
+    } else {
+        ret = zxdh_vf_send_msg_to_pf(dev, &msg, sizeof(struct zxdh_msg_info), NULL, 0);
+        if (ret) {
+            PMD_DRV_LOG(ERR, "vf rss reta table set failed");
+            return -EINVAL;
+        }
+    }
+    return 0;
+}
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h
index 058d271ab3..860716d079 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.h
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.h
@@ -5,8 +5,25 @@
 #ifndef ZXDH_ETHDEV_OPS_H
 #define ZXDH_ETHDEV_OPS_H
 
+#include <rte_ether.h>
+
 #include "zxdh_ethdev.h"
 
+#define ZXDH_ETH_RSS_L2  RTE_ETH_RSS_L2_PAYLOAD
+#define ZXDH_ETH_RSS_IP \
+            (RTE_ETH_RSS_IPV4 | \
+             RTE_ETH_RSS_FRAG_IPV4 | \
+             RTE_ETH_RSS_IPV6 | \
+             RTE_ETH_RSS_FRAG_IPV6)
+#define ZXDH_ETH_RSS_TCP    (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+#define ZXDH_ETH_RSS_UDP    (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+#define ZXDH_ETH_RSS_SCTP   (RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
+
+#define ZXDH_HF_F5_ETH       (ZXDH_ETH_RSS_TCP | ZXDH_ETH_RSS_UDP | ZXDH_ETH_RSS_SCTP)
+#define ZXDH_HF_F3_ETH        ZXDH_ETH_RSS_IP
+#define ZXDH_HF_MAC_VLAN_ETH  ZXDH_ETH_RSS_L2
+#define ZXDH_RSS_HF  ((ZXDH_HF_MAC_VLAN_ETH | ZXDH_HF_F3_ETH | ZXDH_HF_F5_ETH))
+
 int zxdh_dev_set_link_up(struct rte_eth_dev *dev);
 int zxdh_dev_set_link_down(struct rte_eth_dev *dev);
 int32_t zxdh_dev_link_update(struct rte_eth_dev *dev, int32_t wait_to_complete __rte_unused);
@@ -20,5 +37,14 @@ int zxdh_dev_allmulticast_enable(struct rte_eth_dev *dev);
 int zxdh_dev_allmulticast_disable(struct rte_eth_dev *dev);
 int zxdh_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
 int zxdh_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+int zxdh_dev_rss_reta_update(struct rte_eth_dev *dev,
+             struct rte_eth_rss_reta_entry64 *reta_conf,
+             uint16_t reta_size);
+int zxdh_dev_rss_reta_query(struct rte_eth_dev *dev,
+            struct rte_eth_rss_reta_entry64 *reta_conf,
+            uint16_t reta_size);
+int zxdh_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf);
+int zxdh_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf);
+int zxdh_rss_configure(struct rte_eth_dev *dev);
 
 #endif /* ZXDH_ETHDEV_OPS_H */
diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h
index ec15388f7a..45a9b10aa4 100644
--- a/drivers/net/zxdh/zxdh_msg.h
+++ b/drivers/net/zxdh/zxdh_msg.h
@@ -182,6 +182,11 @@ enum zxdh_msg_type {
     ZXDH_VF_PORT_UNINIT = 2,
     ZXDH_MAC_ADD = 3,
     ZXDH_MAC_DEL = 4,
+    ZXDH_RSS_ENABLE = 7,
+    ZXDH_RSS_RETA_SET = 8,
+    ZXDH_RSS_RETA_GET = 9,
+    ZXDH_RSS_HF_SET = 15,
+    ZXDH_RSS_HF_GET = 16,
     ZXDH_VLAN_FILTER_SET = 17,
     ZXDH_VLAN_FILTER_ADD = 18,
     ZXDH_VLAN_FILTER_DEL = 19,
@@ -291,6 +296,14 @@ struct zxdh_link_info_msg {
     uint32_t speed;
 } __rte_packed;
 
+struct zxdh_rss_reta {
+    uint32_t reta[RTE_ETH_RSS_RETA_SIZE_256];
+};
+
+struct zxdh_rss_hf {
+    uint32_t rss_hf;
+};
+
 struct zxdh_msg_reply_head {
     uint8_t flag;
     uint16_t reps_len;
@@ -307,6 +320,8 @@ struct zxdh_msg_reply_body {
     union {
         uint8_t reply_data[ZXDH_MSG_REPLY_BODY_MAX_LEN - sizeof(enum zxdh_reps_flag)];
         struct zxdh_link_info_msg link_msg;
+        struct zxdh_rss_hf rss_hf;
+        struct zxdh_rss_reta rss_reta;
     } __rte_packed;
 } __rte_packed;
 
@@ -360,6 +375,10 @@ struct zxdh_vlan_offload {
     uint8_t type;
 } __rte_packed;
 
+struct zxdh_rss_enable {
+    uint8_t enable;
+};
+
 struct zxdh_agent_msg_head {
     enum zxdh_agent_msg_type msg_type;
     uint8_t panel_id;
@@ -385,6 +404,9 @@ struct zxdh_msg_info {
         struct zxdh_vlan_filter vlan_filter_msg;
         struct zxdh_vlan_filter_set vlan_filter_set_msg;
         struct zxdh_vlan_offload vlan_offload_msg;
+        struct zxdh_rss_reta rss_reta;
+        struct zxdh_rss_enable rss_enable;
+        struct zxdh_rss_hf rss_hf;
     } __rte_packed data;
 } __rte_packed;
 
diff --git a/drivers/net/zxdh/zxdh_tables.c b/drivers/net/zxdh/zxdh_tables.c
index ca98b36da2..af148a974e 100644
--- a/drivers/net/zxdh/zxdh_tables.c
+++ b/drivers/net/zxdh/zxdh_tables.c
@@ -10,6 +10,7 @@
 
 #define ZXDH_SDT_VPORT_ATT_TABLE          1
 #define ZXDH_SDT_PANEL_ATT_TABLE          2
+#define ZXDH_SDT_RSS_ATT_TABLE            3
 #define ZXDH_SDT_VLAN_ATT_TABLE           4
 #define ZXDH_SDT_BROCAST_ATT_TABLE        6
 #define ZXDH_SDT_UNICAST_ATT_TABLE        10
@@ -668,3 +669,84 @@ zxdh_vlan_filter_table_set(uint16_t vport, uint16_t vlan_id, uint8_t enable)
     }
     return 0;
 }
+
+int
+zxdh_rss_table_set(uint16_t vport, struct zxdh_rss_reta *rss_reta)
+{
+    struct zxdh_rss_to_vqid_table rss_vqid = {0};
+    union zxdh_virport_num vport_num = (union zxdh_virport_num)vport;
+    int ret = 0;
+
+    for (uint16_t i = 0; i < RTE_ETH_RSS_RETA_SIZE_256 / 8; i++) {
+        for (uint16_t j = 0; j < 8; j++) {
+        #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+            if (j % 2 == 0)
+                rss_vqid.vqm_qid[j + 1] =  rss_reta->reta[i * 8 + j];
+            else
+                rss_vqid.vqm_qid[j - 1] =  rss_reta->reta[i * 8 + j];
+        #else
+            rss_vqid.vqm_qid[j] = rss_init->reta[i * 8 + j];
+        #endif
+        }
+
+        #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+            rss_vqid.vqm_qid[1] |= 0x8000;
+        #else
+            rss_vqid.vqm_qid[0] |= 0x8000;
+        #endif
+        ZXDH_DTB_ERAM_ENTRY_INFO_T entry = {
+            .index = vport_num.vfid * 32 + i,
+            .p_data = (uint32_t *)&rss_vqid
+        };
+        ZXDH_DTB_USER_ENTRY_T user_entry_write = {
+            .sdt_no = ZXDH_SDT_RSS_ATT_TABLE,
+            .p_entry_data = &entry
+        };
+        ret = zxdh_np_dtb_table_entry_write(ZXDH_DEVICE_NO,
+                    g_dtb_data.queueid, 1, &user_entry_write);
+        if (ret != 0) {
+            PMD_DRV_LOG(ERR, "write rss base qid failed vfid:%d", vport_num.vfid);
+            return ret;
+        }
+    }
+    return 0;
+}
+
+int
+zxdh_rss_table_get(uint16_t vport, struct zxdh_rss_reta *rss_reta)
+{
+    struct zxdh_rss_to_vqid_table rss_vqid = {0};
+    union zxdh_virport_num vport_num = (union zxdh_virport_num)vport;
+    int ret = 0;
+
+    for (uint16_t i = 0; i < RTE_ETH_RSS_RETA_SIZE_256 / 8; i++) {
+        ZXDH_DTB_ERAM_ENTRY_INFO_T entry = {vport_num.vfid * 32 + i, (uint32_t *)&rss_vqid};
+        ZXDH_DTB_USER_ENTRY_T user_entry = {ZXDH_SDT_RSS_ATT_TABLE, &entry};
+
+        ret = zxdh_np_dtb_table_entry_get(ZXDH_DEVICE_NO,
+                    g_dtb_data.queueid, &user_entry, 1);
+        if (ret != 0) {
+            PMD_DRV_LOG(ERR, "get rss tbl failed, vfid:%d", vport_num.vfid);
+            return -1;
+        }
+
+        #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+                rss_vqid.vqm_qid[1] &= 0x7FFF;
+        #else
+                rss_vqid.vqm_qid[0] &= 0x7FFF;
+        #endif
+        uint8_t size = sizeof(struct zxdh_rss_to_vqid_table) / sizeof(uint16_t);
+
+        for (int j = 0; j < size; j++) {
+        #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+            if (j % 2 == 0)
+                rss_reta->reta[i * 8 + j] = rss_vqid.vqm_qid[j + 1];
+            else
+                rss_reta->reta[i * 8 + j] = rss_vqid.vqm_qid[j - 1];
+        #else
+            rss_reta->reta[i * 8 + j] = rss_vqid.vqm_qid[j];
+        #endif
+        }
+    }
+    return 0;
+}
diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h
index 28d4f6f7cf..c8d1de3bbb 100644
--- a/drivers/net/zxdh/zxdh_tables.h
+++ b/drivers/net/zxdh/zxdh_tables.h
@@ -8,6 +8,7 @@
 #include <stdint.h>
 
 #define ZXDH_DEVICE_NO                    0
+#define ZXDH_PORT_BASE_QID_FLAG           10
 #define ZXDH_PORT_ATTR_IS_UP_FLAG         35
 
 extern struct zxdh_dtb_shared_data g_dtb_data;
@@ -198,6 +199,10 @@ struct zxdh_vlan_filter_table {
     uint32_t vlans[4];
 };
 
+struct zxdh_rss_to_vqid_table {
+    uint16_t vqm_qid[8];
+};
+
 int zxdh_port_attr_init(struct rte_eth_dev *dev);
 int zxdh_panel_table_init(struct rte_eth_dev *dev);
 int zxdh_set_port_attr(uint16_t vfid, struct zxdh_port_attr_table *port_attr);
@@ -211,5 +216,7 @@ int zxdh_dev_unicast_table_set(struct zxdh_hw *hw, uint16_t vport, bool enable);
 int zxdh_dev_multicast_table_set(struct zxdh_hw *hw, uint16_t vport, bool enable);
 int zxdh_vlan_filter_table_init(struct rte_eth_dev *dev);
 int zxdh_vlan_filter_table_set(uint16_t vport, uint16_t vlan_id, uint8_t enable);
+int zxdh_rss_table_set(uint16_t vport, struct zxdh_rss_reta *rss_reta);
+int zxdh_rss_table_get(uint16_t vport, struct zxdh_rss_reta *rss_reta);
 
 #endif /* ZXDH_TABLES_H */
-- 
2.27.0