From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id DD5D8461C1; Sat, 8 Feb 2025 03:46:18 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C15204067E; Sat, 8 Feb 2025 03:45:03 +0100 (CET) Received: from localhost.localdomain (unknown [103.233.162.252]) by mails.dpdk.org (Postfix) with ESMTP id 9938740673 for ; Sat, 8 Feb 2025 03:44:57 +0100 (CET) Received: by localhost.localdomain (Postfix, from userid 0) id 6D2DDA3248; Sat, 8 Feb 2025 10:44:21 +0800 (CST) From: Wenbo Cao To: thomas@monjalon.net, Wenbo Cao Cc: stephen@networkplumber.org, dev@dpdk.org, ferruh.yigit@amd.com, andrew.rybchenko@oktetlabs.ru, yaojun@mucse.com Subject: [PATCH v7 16/28] net/rnp: add MTU set operation Date: Sat, 8 Feb 2025 10:43:53 +0800 Message-Id: <1738982645-34550-17-git-send-email-caowenbo@mucse.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1738982645-34550-1-git-send-email-caowenbo@mucse.com> References: <1738982645-34550-1-git-send-email-caowenbo@mucse.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org add mtu update limit for multiple port mode. multiple mode just used the max-mtu of ports to limit receive. Signed-off-by: Wenbo Cao --- doc/guides/nics/features/rnp.ini | 1 + doc/guides/nics/rnp.rst | 1 + drivers/net/rnp/base/rnp_eth_regs.h | 3 + drivers/net/rnp/rnp.h | 3 + drivers/net/rnp/rnp_ethdev.c | 144 +++++++++++++++++++++++++++++++++++- 5 files changed, 151 insertions(+), 1 deletion(-) diff --git a/doc/guides/nics/features/rnp.ini b/doc/guides/nics/features/rnp.ini index 695b9c0..6d13370 100644 --- a/doc/guides/nics/features/rnp.ini +++ b/doc/guides/nics/features/rnp.ini @@ -10,6 +10,7 @@ Link status event = Y Queue start/stop = Y Promiscuous mode = Y Allmulticast mode = Y +MTU update = Y RSS hash = Y RSS key update = Y RSS reta update = Y diff --git a/doc/guides/nics/rnp.rst b/doc/guides/nics/rnp.rst index 82dd2d8..9fa7ad9 100644 --- a/doc/guides/nics/rnp.rst +++ b/doc/guides/nics/rnp.rst @@ -16,6 +16,7 @@ Features Inner RSS is only support for vxlan/nvgre - Promiscuous mode - Link state information +- MTU update Prerequisites ------------- diff --git a/drivers/net/rnp/base/rnp_eth_regs.h b/drivers/net/rnp/base/rnp_eth_regs.h index c74886e..91a18dd 100644 --- a/drivers/net/rnp/base/rnp_eth_regs.h +++ b/drivers/net/rnp/base/rnp_eth_regs.h @@ -16,6 +16,9 @@ #define RNP_RX_ETH_F_CTRL(n) _ETH_(0x8070 + ((n) * 0x8)) #define RNP_RX_ETH_F_OFF (0x7ff) #define RNP_RX_ETH_F_ON (0x270) +/* max/min pkts length receive limit ctrl */ +#define RNP_MIN_FRAME_CTRL _ETH_(0x80f0) +#define RNP_MAX_FRAME_CTRL _ETH_(0x80f4) /* rx queue flow ctrl */ #define RNP_RX_FC_ENABLE _ETH_(0x8520) #define RNP_RING_FC_EN(n) _ETH_(0x8524 + ((0x4) * ((n) / 32))) diff --git a/drivers/net/rnp/rnp.h b/drivers/net/rnp/rnp.h index 97222f3..054382e 100644 --- a/drivers/net/rnp/rnp.h +++ b/drivers/net/rnp/rnp.h @@ -120,6 +120,9 @@ struct rnp_eth_port { bool hw_rss_en; uint32_t indirtbl[RNP_RSS_INDIR_SIZE]; + uint16_t cur_mtu; + bool jumbo_en; + rte_spinlock_t rx_mac_lock; bool port_stopped; }; diff --git a/drivers/net/rnp/rnp_ethdev.c b/drivers/net/rnp/rnp_ethdev.c index 11cf2eb..0fcb256 100644 --- a/drivers/net/rnp/rnp_ethdev.c +++ b/drivers/net/rnp/rnp_ethdev.c @@ -20,6 +20,7 @@ #include "rnp_rss.h" #include "rnp_link.h" +static int rnp_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static struct rte_eth_dev * rnp_alloc_eth_port(struct rte_pci_device *pci, char *name) { @@ -140,6 +141,13 @@ static void rnp_mac_rx_enable(struct rte_eth_dev *dev) mac_cfg = RNP_MAC_REG_RD(hw, lane, RNP_MAC_RX_CFG); mac_cfg |= RNP_MAC_RE; + if (port->jumbo_en) { + mac_cfg |= RNP_MAC_JE; + mac_cfg |= RNP_MAC_GPSLCE | RNP_MAC_WD; + } else { + mac_cfg &= ~RNP_MAC_JE; + mac_cfg &= ~RNP_MAC_WD; + } mac_cfg &= ~RNP_MAC_GPSL_MASK; mac_cfg |= (RNP_MAC_MAX_GPSL << RNP_MAC_CPSL_SHIFT); RNP_MAC_REG_WR(hw, lane, RNP_MAC_RX_CFG, mac_cfg); @@ -209,6 +217,7 @@ static void rnp_mac_init(struct rte_eth_dev *dev) { uint16_t max_pkt_size = dev->data->dev_conf.rxmode.mtu + RNP_ETH_OVERHEAD; + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev); struct rnp_hw *hw = port->hw; struct rnp_rx_queue *rxq; @@ -234,6 +243,12 @@ static void rnp_mac_init(struct rte_eth_dev *dev) return -ENOTSUP; } dma_buf_size = hw->min_dma_size; + if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER || + max_pkt_size > dma_buf_size || + dev->data->mtu + RNP_ETH_OVERHEAD > dma_buf_size) + dev->data->scattered_rx = 1; + else + dev->data->scattered_rx = 0; /* Setup max dma scatter engine split size */ dma_ctrl = RNP_E_REG_RD(hw, RNP_DMA_CTRL); if (max_pkt_size == dma_buf_size) @@ -294,6 +309,7 @@ static int rnp_dev_start(struct rte_eth_dev *eth_dev) { struct rnp_eth_port *port = RNP_DEV_TO_PORT(eth_dev); struct rte_eth_dev_data *data = eth_dev->data; + uint16_t max_rx_pkt_len = eth_dev->data->mtu; bool lsc = data->dev_conf.intr_conf.lsc; struct rnp_hw *hw = port->hw; uint16_t lane = 0; @@ -316,6 +332,9 @@ static int rnp_dev_start(struct rte_eth_dev *eth_dev) ret = rnp_rx_scattered_setup(eth_dev); if (ret) return ret; + ret = rnp_mtu_set(eth_dev, max_rx_pkt_len); + if (ret) + return ret; ret = rnp_enable_all_tx_queue(eth_dev); if (ret) goto txq_start_failed; @@ -628,6 +647,129 @@ static int rnp_allmulticast_disable(struct rte_eth_dev *eth_dev) return rnp_update_mpfm(port, RNP_MPF_MODE_ALLMULTI, 0); } +static bool +rnp_verify_pf_scatter(struct rnp_eth_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct rte_eth_dev *eth_dev; + uint8_t i = 0; + + for (i = 0; i < hw->max_port_num; i++) { + eth_dev = adapter->ports[i]->eth_dev; + /* sub port of pf eth_dev state is not + * started so the scatter_rx attr isn't + * setup dont't check this sub port. + */ + if (!eth_dev->data->dev_started) + continue; + if (eth_dev && !eth_dev->data->scattered_rx) + return false; + } + + return true; +} + +static int +rnp_update_vaild_mtu(struct rnp_eth_port *port, uint16_t *set_mtu) +{ + struct rnp_eth_adapter *adapter = port->hw->back; + struct rnp_eth_port *sub_port = NULL; + struct rnp_hw *hw = port->hw; + uint16_t origin_mtu = 0; + uint16_t mtu = 0; + uint8_t i = 0; + + if (hw->max_port_num == 1) { + port->cur_mtu = *set_mtu; + + return 0; + } + origin_mtu = port->cur_mtu; + port->cur_mtu = *set_mtu; + mtu = *set_mtu; + for (i = 0; i < hw->max_port_num; i++) { + sub_port = adapter->ports[i]; + if (sub_port == NULL) + continue; + mtu = RTE_MAX(mtu, sub_port->cur_mtu); + } + if (hw->max_port_num > 1 && + mtu + RNP_ETH_OVERHEAD > hw->min_dma_size) { + if (!rnp_verify_pf_scatter(adapter)) { + RNP_PMD_ERR("single pf multiple port max_frame_sz " + "is bigger than min_dma_size please " + "stop all pf port before set mtu."); + port->cur_mtu = origin_mtu; + return -EINVAL; + } + } + *set_mtu = mtu; + + return 0; +} + +static int +rnp_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev); + uint32_t frame_size = mtu + RNP_ETH_OVERHEAD; + uint16_t lane = port->attr.nr_lane; + struct rnp_hw *hw = port->hw; + bool jumbo_en = false; + uint32_t reg; + int ret = 0; + + PMD_INIT_FUNC_TRACE(); + /* check that mtu is within the allowed range */ + if (frame_size < RTE_ETHER_MIN_LEN || + frame_size > RNP_MAC_MAXFRM_SIZE) + return -EINVAL; + /* + * Refuse mtu that requires the support of scattered packets + * when this feature has not been enabled before. + */ + if (dev->data->dev_started && !dev->data->scattered_rx && + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { + RNP_PMD_ERR("port %d mtu update must be stopped " + "before configuration when scatter rx off.", + dev->data->port_id); + + return -EBUSY; + } + if (frame_size < RTE_ETHER_MIN_LEN) { + RNP_PMD_ERR("valid packet length must be " + "range from %u to %u, " + "when Jumbo Frame Feature disabled", + (uint32_t)RTE_ETHER_MIN_LEN, + (uint32_t)RTE_ETHER_MAX_LEN); + return -EINVAL; + } + /* For one pf multiple port the mtu we must set + * the biggest mtu the ports selong to pf + * because of the control button is only one + */ + ret = rnp_update_vaild_mtu(port, &mtu); + if (ret < 0) + return ret; + frame_size = mtu + RNP_ETH_OVERHEAD; + if (frame_size > RTE_ETHER_MAX_LEN) + jumbo_en = true; + /* setting the MTU */ + RNP_E_REG_WR(hw, RNP_MAX_FRAME_CTRL, frame_size); + RNP_E_REG_WR(hw, RNP_MIN_FRAME_CTRL, 60); + if (jumbo_en) { + /* To protect conflict hw resource */ + rte_spinlock_lock(&port->rx_mac_lock); + reg = RNP_MAC_REG_RD(hw, lane, RNP_MAC_RX_CFG); + reg |= RNP_MAC_JE; + RNP_MAC_REG_WR(hw, lane, RNP_MAC_RX_CFG, reg); + rte_spinlock_unlock(&port->rx_mac_lock); + } + port->jumbo_en = jumbo_en; + + return 0; +} + /* Features supported by this driver */ static const struct eth_dev_ops rnp_eth_dev_ops = { .dev_configure = rnp_dev_configure, @@ -641,7 +783,7 @@ static int rnp_allmulticast_disable(struct rte_eth_dev *eth_dev) .promiscuous_disable = rnp_promiscuous_disable, .allmulticast_enable = rnp_allmulticast_enable, .allmulticast_disable = rnp_allmulticast_disable, - + .mtu_set = rnp_mtu_set, .rx_queue_setup = rnp_rx_queue_setup, .rx_queue_release = rnp_dev_rx_queue_release, .tx_queue_setup = rnp_tx_queue_setup, -- 1.8.3.1