From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wi0-f171.google.com (mail-wi0-f171.google.com [209.85.212.171]) by dpdk.org (Postfix) with ESMTP id 57E015AA0 for ; Tue, 1 Sep 2015 23:32:15 +0200 (CEST) Received: by wicjd9 with SMTP id jd9so46847961wic.1 for ; Tue, 01 Sep 2015 14:32:15 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:subject:date:message-id:in-reply-to :references; bh=KI1+a9uco+ACbC/rh4jzAI8KTo5LvmPLCGFFlUicves=; b=c9L37cjmwNeqDcfy2lB/W+m2d+KE2ocDJ0ZNNtZtmT+z1ISew0ZXHepPpk+6OIrOVX LrgMFY4Zi/8t0f8KIgZMXKEpkV4ozFYUCfRpmLCYafIuSI6beO80R4WmoyFnwX2nQQSj HNfBtAeFYLO24cp5tjFGWGHtV51tIJjl0C7ftnZj6z8r4Yaes+A9Rv3EaiJzbWpxmygy aPFeoMMzIbq+xCCuSNl4LzKzWOLyYvcMuBaAy6yc8B8KUsMnrLaNXA5R0aoSynUerYc6 McgIwF2vfat+JdgwHLQ7ykty1RTRaZ721ufMSHFV1mx3axemTcAojNnffeqAYky8AK6x QUjw== X-Gm-Message-State: ALoCoQkPS3TxEGD7xaVrZUeEXGpK8QWqIOPHqb3iIaYx7svurHbAX0ppHOhMtXVuNpigULkTXy63 X-Received: by 10.194.90.70 with SMTP id bu6mr36046503wjb.149.1441143135078; Tue, 01 Sep 2015 14:32:15 -0700 (PDT) Received: from localhost.localdomain (136-92-190-109.dsl.ovh.fr. [109.190.92.136]) by smtp.gmail.com with ESMTPSA id ej5sm29085583wjd.22.2015.09.01.14.32.13 for (version=TLSv1/SSLv3 cipher=OTHER); Tue, 01 Sep 2015 14:32:14 -0700 (PDT) From: Thomas Monjalon To: dev@dpdk.org Date: Tue, 1 Sep 2015 23:30:54 +0200 Message-Id: <1441143062-2557-1-git-send-email-thomas.monjalon@6wind.com> X-Mailer: git-send-email 2.5.1 In-Reply-To: <1441138697-25157-1-git-send-email-thomas.monjalon@6wind.com> References: <1441138697-25157-1-git-send-email-thomas.monjalon@6wind.com> Subject: [dpdk-dev] [PATCH 1/9] ethdev: remove Rx interrupt switch X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 01 Sep 2015 21:32:15 -0000 The Rx interrupt feature is now part of the standard ABI. Because of changes in rte_intr_handle and struct rte_eth_conf, the eal and ethdev library versions are bumped. Signed-off-by: Thomas Monjalon --- doc/guides/rel_notes/deprecation.rst | 4 -- drivers/net/e1000/igb_ethdev.c | 26 ----------- drivers/net/ixgbe/ixgbe_ethdev.c | 40 ---------------- examples/l3fwd-power/main.c | 2 - lib/librte_eal/bsdapp/eal/Makefile | 2 +- .../bsdapp/eal/include/exec-env/rte_interrupts.h | 2 - lib/librte_eal/linuxapp/eal/Makefile | 2 +- lib/librte_eal/linuxapp/eal/eal_interrupts.c | 53 ---------------------- .../linuxapp/eal/include/exec-env/rte_interrupts.h | 2 - lib/librte_ether/Makefile | 2 +- lib/librte_ether/rte_ethdev.c | 40 ---------------- lib/librte_ether/rte_ethdev.h | 4 -- 12 files changed, 3 insertions(+), 176 deletions(-) diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst index da17880..991a777 100644 --- a/doc/guides/rel_notes/deprecation.rst +++ b/doc/guides/rel_notes/deprecation.rst @@ -13,10 +13,6 @@ Deprecation Notices There is no backward compatibility planned from release 2.2. All binaries will need to be rebuilt from release 2.2. -* ABI changes are planned for struct rte_intr_handle, struct rte_eth_conf - and struct eth_dev_ops to support interrupt mode feature from release 2.1. - Those changes may be enabled in the release 2.1 with CONFIG_RTE_NEXT_ABI. - * The EAL function rte_eal_pci_close_one is deprecated because renamed to rte_eal_pci_detach. diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index c7e6d55..848ef6e 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -106,9 +106,7 @@ static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev); -#ifdef RTE_NEXT_ABI static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev); -#endif static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev); static int eth_igb_interrupt_action(struct rte_eth_dev *dev); static void eth_igb_interrupt_handler(struct rte_intr_handle *handle, @@ -232,7 +230,6 @@ static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, uint32_t flags); static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, struct timespec *timestamp); -#ifdef RTE_NEXT_ABI static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, @@ -241,7 +238,6 @@ static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, uint8_t index, uint8_t offset); -#endif static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev); /* @@ -303,10 +299,8 @@ static const struct eth_dev_ops eth_igb_ops = { .vlan_tpid_set = eth_igb_vlan_tpid_set, .vlan_offload_set = eth_igb_vlan_offload_set, .rx_queue_setup = eth_igb_rx_queue_setup, -#ifdef RTE_NEXT_ABI .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable, .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable, -#endif .rx_queue_release = eth_igb_rx_queue_release, .rx_queue_count = eth_igb_rx_queue_count, .rx_descriptor_done = eth_igb_rx_descriptor_done, @@ -893,9 +887,7 @@ eth_igb_start(struct rte_eth_dev *dev) E1000_DEV_PRIVATE(dev->data->dev_private); struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; int ret, mask; -#ifdef RTE_NEXT_ABI uint32_t intr_vector = 0; -#endif uint32_t ctrl_ext; PMD_INIT_FUNC_TRACE(); @@ -936,7 +928,6 @@ eth_igb_start(struct rte_eth_dev *dev) /* configure PF module if SRIOV enabled */ igb_pf_host_configure(dev); -#ifdef RTE_NEXT_ABI /* check and configure queue intr-vector mapping */ if (dev->data->dev_conf.intr_conf.rxq != 0) intr_vector = dev->data->nb_rx_queues; @@ -954,7 +945,6 @@ eth_igb_start(struct rte_eth_dev *dev) return -ENOMEM; } } -#endif /* confiugre msix for rx interrupt */ eth_igb_configure_msix_intr(dev); @@ -1050,11 +1040,9 @@ eth_igb_start(struct rte_eth_dev *dev) " no intr multiplex\n"); } -#ifdef RTE_NEXT_ABI /* check if rxq interrupt is enabled */ if (dev->data->dev_conf.intr_conf.rxq != 0) eth_igb_rxq_interrupt_setup(dev); -#endif /* enable uio/vfio intr/eventfd mapping */ rte_intr_enable(intr_handle); @@ -1146,14 +1134,12 @@ eth_igb_stop(struct rte_eth_dev *dev) } filter_info->twotuple_mask = 0; -#ifdef RTE_NEXT_ABI /* Clean datapath event and queue/vec mapping */ rte_intr_efd_disable(intr_handle); if (intr_handle->intr_vec != NULL) { rte_free(intr_handle->intr_vec); intr_handle->intr_vec = NULL; } -#endif } static void @@ -1163,9 +1149,7 @@ eth_igb_close(struct rte_eth_dev *dev) struct e1000_adapter *adapter = E1000_DEV_PRIVATE(dev->data->dev_private); struct rte_eth_link link; -#ifdef RTE_NEXT_ABI struct rte_pci_device *pci_dev; -#endif eth_igb_stop(dev); adapter->stopped = 1; @@ -1185,13 +1169,11 @@ eth_igb_close(struct rte_eth_dev *dev) igb_dev_free_queues(dev); -#ifdef RTE_NEXT_ABI pci_dev = dev->pci_dev; if (pci_dev->intr_handle.intr_vec) { rte_free(pci_dev->intr_handle.intr_vec); pci_dev->intr_handle.intr_vec = NULL; } -#endif memset(&link, 0, sizeof(link)); rte_igb_dev_atomic_write_link_status(dev, &link); @@ -2017,7 +1999,6 @@ eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev) return 0; } -#ifdef RTE_NEXT_ABI /* It clears the interrupt causes and enables the interrupt. * It will be called once only during nic initialized. * @@ -2044,7 +2025,6 @@ static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev) return 0; } -#endif /* * It reads ICR and gets interrupt causes, check it and set a bit flag @@ -4144,7 +4124,6 @@ static struct rte_driver pmd_igbvf_drv = { .init = rte_igbvf_pmd_init, }; -#ifdef RTE_NEXT_ABI static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { @@ -4219,7 +4198,6 @@ eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, 8 * direction); } } -#endif /* Sets up the hardware to generate MSI-X interrupts properly * @hw @@ -4228,13 +4206,11 @@ eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev) { -#ifdef RTE_NEXT_ABI int queue_id; uint32_t tmpval, regval, intr_mask; struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t vec = 0; -#endif struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; /* won't configure msix register if no mapping is done @@ -4243,7 +4219,6 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev) if (!rte_intr_dp_is_en(intr_handle)) return; -#ifdef RTE_NEXT_ABI /* set interrupt vector for other causes */ if (hw->mac.type == e1000_82575) { tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT); @@ -4299,7 +4274,6 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev) } E1000_WRITE_FLUSH(hw); -#endif } PMD_REGISTER_DRIVER(pmd_igb_drv); diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index b8ee1e9..ec2918c 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -190,9 +190,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, uint16_t reta_size); static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev); -#ifdef RTE_NEXT_ABI static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); -#endif static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, @@ -227,14 +225,12 @@ static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle, void *param); -#ifdef RTE_NEXT_ABI static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); -#endif static void ixgbevf_configure_msix(struct rte_eth_dev *dev); /* For Eth VMDQ APIs support */ @@ -252,14 +248,12 @@ static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, uint8_t rule_id, uint8_t on); static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id); -#ifdef RTE_NEXT_ABI static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); -#endif static void ixgbe_configure_msix(struct rte_eth_dev *dev); static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, @@ -420,10 +414,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .tx_queue_start = ixgbe_dev_tx_queue_start, .tx_queue_stop = ixgbe_dev_tx_queue_stop, .rx_queue_setup = ixgbe_dev_rx_queue_setup, -#ifdef RTE_NEXT_ABI .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable, .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable, -#endif .rx_queue_release = ixgbe_dev_rx_queue_release, .rx_queue_count = ixgbe_dev_rx_queue_count, .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, @@ -497,10 +489,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, .tx_queue_setup = ixgbe_dev_tx_queue_setup, .tx_queue_release = ixgbe_dev_tx_queue_release, -#ifdef RTE_NEXT_ABI .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable, -#endif .mac_addr_add = ixgbevf_add_mac_addr, .mac_addr_remove = ixgbevf_remove_mac_addr, .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, @@ -1680,9 +1670,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; -#ifdef RTE_NEXT_ABI uint32_t intr_vector = 0; -#endif int err, link_up = 0, negotiate = 0; uint32_t speed = 0; int mask = 0; @@ -1715,7 +1703,6 @@ ixgbe_dev_start(struct rte_eth_dev *dev) /* configure PF module if SRIOV enabled */ ixgbe_pf_host_configure(dev); -#ifdef RTE_NEXT_ABI /* check and configure queue intr-vector mapping */ if (dev->data->dev_conf.intr_conf.rxq != 0) intr_vector = dev->data->nb_rx_queues; @@ -1734,7 +1721,6 @@ ixgbe_dev_start(struct rte_eth_dev *dev) return -ENOMEM; } } -#endif /* confiugre msix for sleep until rx interrupt */ ixgbe_configure_msix(dev); @@ -1827,11 +1813,9 @@ skip_link_setup: " no intr multiplex\n"); } -#ifdef RTE_NEXT_ABI /* check if rxq interrupt is enabled */ if (dev->data->dev_conf.intr_conf.rxq != 0) ixgbe_dev_rxq_interrupt_setup(dev); -#endif /* enable uio/vfio intr/eventfd mapping */ rte_intr_enable(intr_handle); @@ -1942,14 +1926,12 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) memset(filter_info->fivetuple_mask, 0, sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); -#ifdef RTE_NEXT_ABI /* Clean datapath event and queue/vec mapping */ rte_intr_efd_disable(intr_handle); if (intr_handle->intr_vec != NULL) { rte_free(intr_handle->intr_vec); intr_handle->intr_vec = NULL; } -#endif } /* @@ -2623,7 +2605,6 @@ ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev) * - On success, zero. * - On failure, a negative value. */ -#ifdef RTE_NEXT_ABI static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) { @@ -2634,7 +2615,6 @@ ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) return 0; } -#endif /* * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. @@ -3435,9 +3415,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); -#ifdef RTE_NEXT_ABI uint32_t intr_vector = 0; -#endif struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; int err, mask = 0; @@ -3470,7 +3448,6 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) ixgbevf_dev_rxtx_start(dev); -#ifdef RTE_NEXT_ABI /* check and configure queue intr-vector mapping */ if (dev->data->dev_conf.intr_conf.rxq != 0) intr_vector = dev->data->nb_rx_queues; @@ -3488,7 +3465,6 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) return -ENOMEM; } } -#endif ixgbevf_configure_msix(dev); if (dev->data->dev_conf.intr_conf.lsc != 0) { @@ -3534,23 +3510,19 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) /* disable intr eventfd mapping */ rte_intr_disable(intr_handle); -#ifdef RTE_NEXT_ABI /* Clean datapath event and queue/vec mapping */ rte_intr_efd_disable(intr_handle); if (intr_handle->intr_vec != NULL) { rte_free(intr_handle->intr_vec); intr_handle->intr_vec = NULL; } -#endif } static void ixgbevf_dev_close(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); -#ifdef RTE_NEXT_ABI struct rte_pci_device *pci_dev; -#endif PMD_INIT_FUNC_TRACE(); @@ -3563,13 +3535,11 @@ ixgbevf_dev_close(struct rte_eth_dev *dev) /* reprogram the RAR[0] in case user changed it. */ ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); -#ifdef RTE_NEXT_ABI pci_dev = dev->pci_dev; if (pci_dev->intr_handle.intr_vec) { rte_free(pci_dev->intr_handle.intr_vec); pci_dev->intr_handle.intr_vec = NULL; } -#endif } static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) @@ -4087,7 +4057,6 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) return 0; } -#ifdef RTE_NEXT_ABI static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { @@ -4240,18 +4209,15 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, } } } -#endif static void ixgbevf_configure_msix(struct rte_eth_dev *dev) { struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; -#ifdef RTE_NEXT_ABI struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t q_idx; uint32_t vector_idx = 0; -#endif /* won't configure msix register if no mapping is done * between intr vector and event fd. @@ -4259,7 +4225,6 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) if (!rte_intr_dp_is_en(intr_handle)) return; -#ifdef RTE_NEXT_ABI /* Configure all RX queues of VF */ for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { /* Force all queue use vector 0, @@ -4271,7 +4236,6 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) /* Configure VF Rx queue ivar */ ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); -#endif } /** @@ -4283,13 +4247,11 @@ static void ixgbe_configure_msix(struct rte_eth_dev *dev) { struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; -#ifdef RTE_NEXT_ABI struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t queue_id, vec = 0; uint32_t mask; uint32_t gpie; -#endif /* won't configure msix register if no mapping is done * between intr vector and event fd @@ -4297,7 +4259,6 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) if (!rte_intr_dp_is_en(intr_handle)) return; -#ifdef RTE_NEXT_ABI /* setup GPIE for MSI-x mode */ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | @@ -4347,7 +4308,6 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) IXGBE_EIMS_LSC); IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); -#endif } static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c index 2f205ea..086f29b 100644 --- a/examples/l3fwd-power/main.c +++ b/examples/l3fwd-power/main.c @@ -239,9 +239,7 @@ static struct rte_eth_conf port_conf = { }, .intr_conf = { .lsc = 1, -#ifdef RTE_NEXT_ABI .rxq = 1, -#endif }, }; diff --git a/lib/librte_eal/bsdapp/eal/Makefile b/lib/librte_eal/bsdapp/eal/Makefile index a969435..a49dcec 100644 --- a/lib/librte_eal/bsdapp/eal/Makefile +++ b/lib/librte_eal/bsdapp/eal/Makefile @@ -44,7 +44,7 @@ CFLAGS += $(WERROR_FLAGS) -O3 EXPORT_MAP := rte_eal_version.map -LIBABIVER := 1 +LIBABIVER := 2 # specific to linuxapp exec-env SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) := eal.c diff --git a/lib/librte_eal/bsdapp/eal/include/exec-env/rte_interrupts.h b/lib/librte_eal/bsdapp/eal/include/exec-env/rte_interrupts.h index bffa902..88d4ae1 100644 --- a/lib/librte_eal/bsdapp/eal/include/exec-env/rte_interrupts.h +++ b/lib/librte_eal/bsdapp/eal/include/exec-env/rte_interrupts.h @@ -50,11 +50,9 @@ struct rte_intr_handle { int fd; /**< file descriptor */ int uio_cfg_fd; /**< UIO config file descriptor */ enum rte_intr_handle_type type; /**< handle type */ -#ifdef RTE_NEXT_ABI int max_intr; /**< max interrupt requested */ uint32_t nb_efd; /**< number of available efds */ int *intr_vec; /**< intr vector number array */ -#endif }; /** diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile index 376d275..d62196e 100644 --- a/lib/librte_eal/linuxapp/eal/Makefile +++ b/lib/librte_eal/linuxapp/eal/Makefile @@ -35,7 +35,7 @@ LIB = librte_eal.a EXPORT_MAP := rte_eal_version.map -LIBABIVER := 1 +LIBABIVER := 2 VPATH += $(RTE_SDK)/lib/librte_eal/common diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c index 3f87875..66e1fe3 100644 --- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c +++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c @@ -290,26 +290,18 @@ vfio_enable_msix(struct rte_intr_handle *intr_handle) { irq_set = (struct vfio_irq_set *) irq_set_buf; irq_set->argsz = len; -#ifdef RTE_NEXT_ABI if (!intr_handle->max_intr) intr_handle->max_intr = 1; else if (intr_handle->max_intr > RTE_MAX_RXTX_INTR_VEC_ID) intr_handle->max_intr = RTE_MAX_RXTX_INTR_VEC_ID + 1; irq_set->count = intr_handle->max_intr; -#else - irq_set->count = 1; -#endif irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; irq_set->start = 0; fd_ptr = (int *) &irq_set->data; -#ifdef RTE_NEXT_ABI memcpy(fd_ptr, intr_handle->efds, sizeof(intr_handle->efds)); fd_ptr[intr_handle->max_intr - 1] = intr_handle->fd; -#else - fd_ptr[0] = intr_handle->fd; -#endif ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); @@ -886,7 +878,6 @@ rte_eal_intr_init(void) return -ret; } -#ifdef RTE_NEXT_ABI static void eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle) { @@ -929,7 +920,6 @@ eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle) return; } while (1); } -#endif static int eal_epoll_process_event(struct epoll_event *evs, unsigned int n, @@ -1068,7 +1058,6 @@ rte_epoll_ctl(int epfd, int op, int fd, return 0; } -#ifdef RTE_NEXT_ABI int rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd, int op, unsigned int vec, void *data) @@ -1192,45 +1181,3 @@ rte_intr_allow_others(struct rte_intr_handle *intr_handle) { return !!(intr_handle->max_intr - intr_handle->nb_efd); } - -#else -int -rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, - int epfd, int op, unsigned int vec, void *data) -{ - RTE_SET_USED(intr_handle); - RTE_SET_USED(epfd); - RTE_SET_USED(op); - RTE_SET_USED(vec); - RTE_SET_USED(data); - return -ENOTSUP; -} - -int -rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd) -{ - RTE_SET_USED(intr_handle); - RTE_SET_USED(nb_efd); - return 0; -} - -void -rte_intr_efd_disable(struct rte_intr_handle *intr_handle) -{ - RTE_SET_USED(intr_handle); -} - -int -rte_intr_dp_is_en(struct rte_intr_handle *intr_handle) -{ - RTE_SET_USED(intr_handle); - return 0; -} - -int -rte_intr_allow_others(struct rte_intr_handle *intr_handle) -{ - RTE_SET_USED(intr_handle); - return 1; -} -#endif diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h index b05f4c8..45071b7 100644 --- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h +++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h @@ -86,14 +86,12 @@ struct rte_intr_handle { }; int fd; /**< interrupt event file descriptor */ enum rte_intr_handle_type type; /**< handle type */ -#ifdef RTE_NEXT_ABI uint32_t max_intr; /**< max interrupt requested */ uint32_t nb_efd; /**< number of available efd(event fd) */ int efds[RTE_MAX_RXTX_INTR_VEC_ID]; /**< intr vectors/efds mapping */ struct rte_epoll_event elist[RTE_MAX_RXTX_INTR_VEC_ID]; /**< intr vector epoll event */ int *intr_vec; /**< intr vector number array */ -#endif }; #define RTE_EPOLL_PER_THREAD -1 /**< to hint using per thread epfd */ diff --git a/lib/librte_ether/Makefile b/lib/librte_ether/Makefile index fc45a71..3e81a0e 100644 --- a/lib/librte_ether/Makefile +++ b/lib/librte_ether/Makefile @@ -41,7 +41,7 @@ CFLAGS += $(WERROR_FLAGS) EXPORT_MAP := rte_ether_version.map -LIBABIVER := 1 +LIBABIVER := 2 SRCS-y += rte_ethdev.c diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index 6b2400c..b309309 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -3033,7 +3033,6 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev, rte_spinlock_unlock(&rte_eth_dev_cb_lock); } -#ifdef RTE_NEXT_ABI int rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data) { @@ -3139,45 +3138,6 @@ rte_eth_dev_rx_intr_disable(uint8_t port_id, FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id); } -#else -int -rte_eth_dev_rx_intr_enable(uint8_t port_id, uint16_t queue_id) -{ - RTE_SET_USED(port_id); - RTE_SET_USED(queue_id); - return -ENOTSUP; -} - -int -rte_eth_dev_rx_intr_disable(uint8_t port_id, uint16_t queue_id) -{ - RTE_SET_USED(port_id); - RTE_SET_USED(queue_id); - return -ENOTSUP; -} - -int -rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data) -{ - RTE_SET_USED(port_id); - RTE_SET_USED(epfd); - RTE_SET_USED(op); - RTE_SET_USED(data); - return -1; -} - -int -rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id, - int epfd, int op, void *data) -{ - RTE_SET_USED(port_id); - RTE_SET_USED(queue_id); - RTE_SET_USED(epfd); - RTE_SET_USED(op); - RTE_SET_USED(data); - return -1; -} -#endif #ifdef RTE_NIC_BYPASS int rte_eth_dev_bypass_init(uint8_t port_id) diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h index 544afe0..fa06554 100644 --- a/lib/librte_ether/rte_ethdev.h +++ b/lib/librte_ether/rte_ethdev.h @@ -845,10 +845,8 @@ struct rte_eth_fdir { struct rte_intr_conf { /** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */ uint16_t lsc; -#ifdef RTE_NEXT_ABI /** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */ uint16_t rxq; -#endif }; /** @@ -1392,12 +1390,10 @@ struct eth_dev_ops { eth_queue_release_t rx_queue_release;/**< Release RX queue.*/ eth_rx_queue_count_t rx_queue_count; /**< Get Rx queue count. */ eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit */ -#ifdef RTE_NEXT_ABI /**< Enable Rx queue interrupt. */ eth_rx_enable_intr_t rx_queue_intr_enable; /**< Disable Rx queue interrupt.*/ eth_rx_disable_intr_t rx_queue_intr_disable; -#endif eth_tx_queue_setup_t tx_queue_setup;/**< Set up device TX queue.*/ eth_queue_release_t tx_queue_release;/**< Release TX queue.*/ eth_dev_led_on_t dev_led_on; /**< Turn on LED. */ -- 2.5.1