From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-we0-f175.google.com (mail-we0-f175.google.com [74.125.82.175]) by dpdk.org (Postfix) with ESMTP id 7E583B0A3 for ; Tue, 17 Jun 2014 20:09:34 +0200 (CEST) Received: by mail-we0-f175.google.com with SMTP id k48so6785233wev.6 for ; Tue, 17 Jun 2014 11:09:50 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:subject:date:message-id:in-reply-to :references; bh=WrkkxaQJVL8/YOtaQdPBcLoIw7n3wDrwO3kH2EhrtQc=; b=eCrVYrB8YSV3hpliZMHVEY2QCnHzNgxoBfK3Q1GV88QyzPjJV0N3ABXn3kVrFhBMbr Ppgy/2W+87JdEDVFSXmWFe9+uIfzF3vxljxWOkFVW/j3znX5t3Vilthtq5x0pj3EYwKt 05gKGhWaFzVGh3mZ9rC0dyciNBxvp/dU75JW+QKRq5rafTzsB5lfbiCn432UlZVfkuVD s3GzqcEkho85P7Ec4yP9GKDzZNUZ9q0QwKnJcUWIabcL4y5NFr2ZxzU+600Uet2QiXNp ig5mt7v+bTIVz104B4gc8tWO7Cr4R3eZfLhymcSWJklhXDzRJpAZeSPRg5xsnLdmOXoP Ilaw== X-Gm-Message-State: ALoCoQkyL+bdFUkXZpMS+YNKo0PmV6REKEABraZf91nJyvAagb/82IK+wcmFn3ZhbfZmnw6ratcK X-Received: by 10.180.221.229 with SMTP id qh5mr38353973wic.33.1403028590830; Tue, 17 Jun 2014 11:09:50 -0700 (PDT) Received: from alcyon.dev.6wind.com (guy78-3-82-239-227-177.fbx.proxad.net. [82.239.227.177]) by mx.google.com with ESMTPSA id ge17sm1711552wic.0.2014.06.17.11.09.49 for (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Tue, 17 Jun 2014 11:09:50 -0700 (PDT) From: David Marchand To: dev@dpdk.org Date: Tue, 17 Jun 2014 20:09:30 +0200 Message-Id: <1403028572-24794-6-git-send-email-david.marchand@6wind.com> X-Mailer: git-send-email 1.7.10.4 In-Reply-To: <1403028572-24794-1-git-send-email-david.marchand@6wind.com> References: <1403028572-24794-1-git-send-email-david.marchand@6wind.com> Subject: [dpdk-dev] [PATCH v3 5/7] ethdev: add mtu accessors X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 17 Jun 2014 18:09:35 -0000 From: Samuel Gauthier This patch adds two new functions in ethdev api to retrieve current MTU and change MTU of a port. Only .mtu_set function is pmd specific. pmd should update its max_rx_pkt_len if needed. This operation has been implemented for rte_em_pmd, rte_igb_pmd and rte_ixgbe_pmd. Signed-off-by: Samuel Gauthier Signed-off-by: Ivan Boule Signed-off-by: David Marchand --- lib/librte_ether/rte_ethdev.c | 41 +++++++++++++++++++++++++-- lib/librte_ether/rte_ethdev.h | 34 +++++++++++++++++++++- lib/librte_ether/rte_ether.h | 2 ++ lib/librte_pmd_e1000/em_ethdev.c | 42 +++++++++++++++++++++++++++ lib/librte_pmd_e1000/igb_ethdev.c | 53 +++++++++++++++++++++++++++++++++++ lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 50 +++++++++++++++++++++++++++++++++ lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 2 +- 7 files changed, 220 insertions(+), 4 deletions(-) diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index 9061c7d..7256841 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -201,9 +201,9 @@ rte_eth_dev_init(struct rte_pci_driver *pci_drv, TAILQ_INIT(&(eth_dev->callbacks)); /* - * Set the default maximum frame size. + * Set the default MTU. */ - eth_dev->data->max_frame_size = ETHER_MAX_LEN; + eth_dev->data->mtu = ETHER_MTU; /* Invoke PMD device initialization function */ diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev); @@ -1234,6 +1234,43 @@ rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr) ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); } + +int +rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + *mtu = dev->data->mtu; + return 0; +} + +int +rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu) +{ + int ret; + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); + + ret = (*dev->dev_ops->mtu_set)(dev, mtu); + if (!ret) + dev->data->mtu = mtu; + + return ret; +} + int rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on) { diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h index 2b98700..2406e45 100644 --- a/lib/librte_ether/rte_ethdev.h +++ b/lib/librte_ether/rte_ethdev.h @@ -1071,6 +1071,9 @@ typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev, typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset); /**< @Check DD bit of specific RX descriptor */ +typedef int (*mtu_set_t)(struct rte_eth_dev *dev, uint16_t mtu); +/**< @internal Set MTU. */ + typedef int (*vlan_filter_set_t)(struct rte_eth_dev *dev, uint16_t vlan_id, int on); @@ -1378,6 +1381,7 @@ struct eth_dev_ops { eth_queue_stats_mapping_set_t queue_stats_mapping_set; /**< Configure per queue stat counter mapping. */ eth_dev_infos_get_t dev_infos_get; /**< Get device info. */ + mtu_set_t mtu_set; /**< Set MTU. */ vlan_filter_set_t vlan_filter_set; /**< Filter VLAN Setup. */ vlan_tpid_set_t vlan_tpid_set; /**< Outer VLAN TPID Setup. */ vlan_strip_queue_set_t vlan_strip_queue_set; /**< VLAN Stripping on queue. */ @@ -1514,7 +1518,7 @@ struct rte_eth_dev_data { /**< Link-level information & status */ struct rte_eth_conf dev_conf; /**< Configuration applied to device. */ - uint16_t max_frame_size; /**< Default is ETHER_MAX_LEN (1518). */ + uint16_t mtu; /**< Maximum Transmission Unit. */ uint32_t min_rx_buf_size; /**< Common rx buffer size handled by all queues */ @@ -2061,6 +2065,34 @@ extern void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info); /** + * Retrieve the MTU of an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param mtu + * A pointer to a uint16_t where the retrieved MTU is to be stored. + * @return + * - (0) if successful. + * - (-ENODEV) if *port_id* invalid. + */ +extern int rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu); + +/** + * Change the MTU of an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param mtu + * A uint16_t for the MTU to be applied. + * @return + * - (0) if successful. + * - (-ENOTSUP) if operation is not supported. + * - (-ENODEV) if *port_id* invalid. + * - (-EINVAL) if *mtu* invalid. + */ +extern int rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu); + +/** * Enable/Disable hardware filtering by an Ethernet device of received * VLAN packets tagged with a given VLAN Tag Identifier. * diff --git a/lib/librte_ether/rte_ether.h b/lib/librte_ether/rte_ether.h index a25ca10..cd0c7c4 100644 --- a/lib/librte_ether/rte_ether.h +++ b/lib/librte_ether/rte_ether.h @@ -67,6 +67,8 @@ extern "C" { #define ETHER_MAX_VLAN_ID 4095 /**< Maximum VLAN ID. */ +#define ETHER_MIN_MTU 68 /**< Minimum MTU for IPv4 packets, see RFC 791. */ + /** * Ethernet address: * A universally administered address is uniquely assigned to a device by its diff --git a/lib/librte_pmd_e1000/em_ethdev.c b/lib/librte_pmd_e1000/em_ethdev.c index 7913ff0..8b2a340 100644 --- a/lib/librte_pmd_e1000/em_ethdev.c +++ b/lib/librte_pmd_e1000/em_ethdev.c @@ -94,6 +94,8 @@ static void em_hw_control_release(struct e1000_hw *hw); static void em_init_manageability(struct e1000_hw *hw); static void em_release_manageability(struct e1000_hw *hw); +static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + static int eth_em_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static void eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask); @@ -145,6 +147,7 @@ static struct eth_dev_ops eth_em_ops = { .stats_get = eth_em_stats_get, .stats_reset = eth_em_stats_reset, .dev_infos_get = eth_em_infos_get, + .mtu_set = eth_em_mtu_set, .vlan_filter_set = eth_em_vlan_filter_set, .vlan_offload_set = eth_em_vlan_offload_set, .rx_queue_setup = eth_em_rx_queue_setup, @@ -1487,6 +1490,45 @@ eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) e1000_rar_set(hw, addr, index); } +static int +eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct rte_eth_dev_info dev_info; + struct e1000_hw *hw; + uint32_t frame_size; + uint32_t rctl; + + eth_em_infos_get(dev, &dev_info); + frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE; + + /* check that mtu is within the allowed range */ + if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + return -EINVAL; + + /* refuse mtu that requires the support of scattered packets when this + * feature has not been enabled before. */ + if (!dev->data->scattered_rx && + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + return -EINVAL; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + rctl = E1000_READ_REG(hw, E1000_RCTL); + + /* switch to jumbo mode if needed */ + if (frame_size > ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.jumbo_frame = 1; + rctl |= E1000_RCTL_LPE; + } else { + dev->data->dev_conf.rxmode.jumbo_frame = 0; + rctl &= ~E1000_RCTL_LPE; + } + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + return 0; +} + struct rte_driver em_pmd_drv = { .type = PMD_PDEV, .init = rte_em_pmd_init, diff --git a/lib/librte_pmd_e1000/igb_ethdev.c b/lib/librte_pmd_e1000/igb_ethdev.c index c92b737..e633a5d 100644 --- a/lib/librte_pmd_e1000/igb_ethdev.c +++ b/lib/librte_pmd_e1000/igb_ethdev.c @@ -87,6 +87,8 @@ static void igb_hw_control_release(struct e1000_hw *hw); static void igb_init_manageability(struct e1000_hw *hw); static void igb_release_manageability(struct e1000_hw *hw); +static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id); @@ -218,6 +220,7 @@ static struct eth_dev_ops eth_igb_ops = { .stats_get = eth_igb_stats_get, .stats_reset = eth_igb_stats_reset, .dev_infos_get = eth_igb_infos_get, + .mtu_set = eth_igb_mtu_set, .vlan_filter_set = eth_igb_vlan_filter_set, .vlan_tpid_set = eth_igb_vlan_tpid_set, .vlan_offload_set = eth_igb_vlan_offload_set, @@ -3005,6 +3008,56 @@ eth_igb_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index, return -ENOENT; } +static int +eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + uint32_t rctl; + struct e1000_hw *hw; + struct rte_eth_dev_info dev_info; + uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + + VLAN_TAG_SIZE); + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + +#ifdef RTE_LIBRTE_82571_SUPPORT + /* XXX: not bigger than max_rx_pktlen */ + if (hw->mac.type == e1000_82571) + return -ENOTSUP; +#endif + eth_igb_infos_get(dev, &dev_info); + + /* check that mtu is within the allowed range */ + if ((mtu < ETHER_MIN_MTU) || + (frame_size > dev_info.max_rx_pktlen)) + return -EINVAL; + + /* refuse mtu that requires the support of scattered packets when this + * feature has not been enabled before. */ + if (!dev->data->scattered_rx && + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + return -EINVAL; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + + /* switch to jumbo mode if needed */ + if (frame_size > ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.jumbo_frame = 1; + rctl |= E1000_RCTL_LPE; + } else { + dev->data->dev_conf.rxmode.jumbo_frame = 0; + rctl &= ~E1000_RCTL_LPE; + } + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + E1000_WRITE_REG(hw, E1000_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + + return 0; +} + static struct rte_driver pmd_igb_drv = { .type = PMD_PDEV, .init = rte_igb_pmd_init, diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c index 559d246..fca8fd7 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c @@ -119,6 +119,9 @@ static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, uint8_t is_rx); static void ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); + +static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id); @@ -293,6 +296,7 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = { .stats_reset = ixgbe_dev_stats_reset, .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, .dev_infos_get = ixgbe_dev_info_get, + .mtu_set = ixgbe_dev_mtu_set, .vlan_filter_set = ixgbe_vlan_filter_set, .vlan_tpid_set = ixgbe_vlan_tpid_set, .vlan_offload_set = ixgbe_vlan_offload_set, @@ -2706,6 +2710,52 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) ixgbe_clear_rar(hw, index); } +static int +ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + uint32_t hlreg0; + uint32_t maxfrs; + struct ixgbe_hw *hw; + struct rte_eth_dev_info dev_info; + uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + + ixgbe_dev_info_get(dev, &dev_info); + + /* check that mtu is within the allowed range */ + if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + return -EINVAL; + + /* refuse mtu that requires the support of scattered packets when this + * feature has not been enabled before. */ + if (!dev->data->scattered_rx && + (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + + /* switch to jumbo mode if needed */ + if (frame_size > ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.jumbo_frame = 1; + hlreg0 |= IXGBE_HLREG0_JUMBOEN; + } else { + dev->data->dev_conf.rxmode.jumbo_frame = 0; + hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; + } + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); + maxfrs &= 0x0000FFFF; + maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16); + IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); + + return 0; +} + /* * Virtual Function operations */ diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c index f487859..9b640e5 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c @@ -2886,7 +2886,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; struct ixgbe_dcb_tc_config *tc; - uint32_t max_frame = dev->data->max_frame_size; + uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); -- 1.7.10.4