From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wi0-f171.google.com (mail-wi0-f171.google.com [209.85.212.171]) by dpdk.org (Postfix) with ESMTP id 8369FC788 for ; Wed, 29 Apr 2015 10:38:23 +0200 (CEST) Received: by widdi4 with SMTP id di4so170496926wid.0 for ; Wed, 29 Apr 2015 01:38:23 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=w50AZvF1x7qcg5CXHZxAEXTA190EEWP5sM89vYtvULQ=; b=WtsSuIkSN2/xwMFCiP9huA7fjWfgsVfLDWVOGbMjqBnuyufYCcAAEP5qnb/hwXcoa1 DoAtJVjPKQAbV8hI7HuzCmehaJNoR6FwJfeZwx+Pie1vFdbExOzB2FngiYfqUdscAeJi DaF/F+uKT8YZsxirf7TwL5tQRzaawVexx82Tubb34buL/MV5NuZCaWl6D6x2gmPUTLQ9 4EQhqGyUoia2AaXF9dgdVhr1V0BmwySA4zX2Z+m4tPmTY7kkUNsydJQAPLkig8ZeDm/A TixG7m5IUAnSzh9i222RUvH2pS9yMR/F13pjn0QTT8+enfDgOb3HA8GdFPMCEdXbWw9d sSjg== X-Gm-Message-State: ALoCoQkv02Ya0+5k9k/gPWYcYDP5UfX4oagIxAFVOuK4hI6QV8zr3q1CVRdOqFFgVtdL4b1Ou6MF X-Received: by 10.194.248.132 with SMTP id ym4mr3862905wjc.74.1430296703343; Wed, 29 Apr 2015 01:38:23 -0700 (PDT) Received: from vladz-laptop.localdomain (bzq-79-182-192-238.red.bezeqint.net. [79.182.192.238]) by mx.google.com with ESMTPSA id yr1sm37946370wjc.37.2015.04.29.01.38.22 (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Wed, 29 Apr 2015 01:38:22 -0700 (PDT) From: Vlad Zolotarov To: dev@dpdk.org Date: Wed, 29 Apr 2015 11:38:13 +0300 Message-Id: <1430296697-32650-2-git-send-email-vladz@cloudius-systems.com> X-Mailer: git-send-email 2.1.0 In-Reply-To: <1430296697-32650-1-git-send-email-vladz@cloudius-systems.com> References: <1430296697-32650-1-git-send-email-vladz@cloudius-systems.com> Subject: [dpdk-dev] [PATCH v2 1/5] ixgbe: move rx_bulk_alloc_allowed and rx_vec_allowed to ixgbe_adapter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 29 Apr 2015 08:38:23 -0000 Move the above fields from ixgbe_hw to ixgbe_adapter. Signed-off-by: Vlad Zolotarov --- lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h | 2 -- lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 8 +++---- lib/librte_pmd_ixgbe/ixgbe_ethdev.h | 3 +++ lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 38 +++++++++++++++++++-------------- 4 files changed, 29 insertions(+), 22 deletions(-) diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h index 9a66370..c67d462 100644 --- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h @@ -3657,8 +3657,6 @@ struct ixgbe_hw { bool force_full_reset; bool allow_unsupported_sfp; bool wol_enabled; - bool rx_bulk_alloc_allowed; - bool rx_vec_allowed; }; #define ixgbe_call_func(hw, func, params, error) \ diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c index 366aa45..aec1de9 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c @@ -1428,8 +1428,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; PMD_INIT_FUNC_TRACE(); @@ -1440,8 +1440,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) * Initialize to TRUE. If any of Rx queues doesn't meet the bulk * allocation or vector Rx preconditions we will reset it. */ - hw->rx_bulk_alloc_allowed = true; - hw->rx_vec_allowed = true; + adapter->rx_bulk_alloc_allowed = true; + adapter->rx_vec_allowed = true; return 0; } diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h index e45e727..5b90115 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h @@ -265,6 +265,9 @@ struct ixgbe_adapter { struct ixgbe_bypass_info bps; #endif /* RTE_NIC_BYPASS */ struct ixgbe_filter_info filter; + + bool rx_bulk_alloc_allowed; + bool rx_vec_allowed; }; #define IXGBE_DEV_PRIVATE_TO_HW(adapter)\ diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c index 3c61d1c..60344a9 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c @@ -2442,7 +2442,7 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) /* Reset dynamic ixgbe_rx_queue fields back to defaults */ static void -ixgbe_reset_rx_queue(struct ixgbe_hw *hw, struct ixgbe_rx_queue *rxq) +ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) { static const union ixgbe_adv_rx_desc zeroed_desc = {{0}}; unsigned i; @@ -2458,7 +2458,7 @@ ixgbe_reset_rx_queue(struct ixgbe_hw *hw, struct ixgbe_rx_queue *rxq) * constraints here to see if we need to zero out memory after the end * of the H/W descriptor ring. */ - if (hw->rx_bulk_alloc_allowed) + if (adapter->rx_bulk_alloc_allowed) /* zero out extra memory */ len += RTE_PMD_IXGBE_RX_MAX_BURST; @@ -2504,6 +2504,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, struct ixgbe_rx_queue *rxq; struct ixgbe_hw *hw; uint16_t len; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; struct rte_eth_dev_info dev_info = { 0 }; struct rte_eth_rxmode *dev_rx_mode = &dev->data->dev_conf.rxmode; bool rsc_requested = false; @@ -2602,7 +2604,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, "preconditions - canceling the feature for " "the whole port[%d]", rxq->queue_id, rxq->port_id); - hw->rx_bulk_alloc_allowed = false; + adapter->rx_bulk_alloc_allowed = false; } /* @@ -2611,7 +2613,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, * function does not access an invalid memory region. */ len = nb_desc; - if (hw->rx_bulk_alloc_allowed) + if (adapter->rx_bulk_alloc_allowed) len += RTE_PMD_IXGBE_RX_MAX_BURST; rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring", @@ -2644,13 +2646,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, "preconditions - canceling the feature for " "the whole port[%d]", rxq->queue_id, rxq->port_id); - hw->rx_vec_allowed = false; + adapter->rx_vec_allowed = false; } else ixgbe_rxq_vec_setup(rxq); dev->data->rx_queues[queue_idx] = rxq; - ixgbe_reset_rx_queue(hw, rxq); + ixgbe_reset_rx_queue(adapter, rxq); return 0; } @@ -2704,7 +2706,8 @@ void ixgbe_dev_clear_queues(struct rte_eth_dev *dev) { unsigned i; - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; PMD_INIT_FUNC_TRACE(); @@ -2720,7 +2723,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; if (rxq != NULL) { ixgbe_rx_queue_release_mbufs(rxq); - ixgbe_reset_rx_queue(hw, rxq); + ixgbe_reset_rx_queue(adapter, rxq); } } } @@ -3969,20 +3972,21 @@ ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type) void ixgbe_set_rx_function(struct rte_eth_dev *dev) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; /* * In order to allow Vector Rx there are a few configuration * conditions to be met and Rx Bulk Allocation should be allowed. */ if (ixgbe_rx_vec_dev_conf_condition_check(dev) || - !hw->rx_bulk_alloc_allowed) { + !adapter->rx_bulk_alloc_allowed) { PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx " "preconditions or RTE_IXGBE_INC_VECTOR is " "not enabled", dev->data->port_id); - hw->rx_vec_allowed = false; + adapter->rx_vec_allowed = false; } /* @@ -3993,7 +3997,7 @@ void ixgbe_set_rx_function(struct rte_eth_dev *dev) * Otherwise use a single allocation version. */ if (dev->data->lro) { - if (hw->rx_bulk_alloc_allowed) { + if (adapter->rx_bulk_alloc_allowed) { PMD_INIT_LOG(INFO, "LRO is requested. Using a bulk " "allocation version"); dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc; @@ -4007,7 +4011,7 @@ void ixgbe_set_rx_function(struct rte_eth_dev *dev) * Set the non-LRO scattered callback: there are Vector and * single allocation versions. */ - if (hw->rx_vec_allowed) { + if (adapter->rx_vec_allowed) { PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx " "callback (port=%d).", dev->data->port_id); @@ -4029,12 +4033,12 @@ void ixgbe_set_rx_function(struct rte_eth_dev *dev) * - Bulk Allocation * - Single buffer allocation (the simplest one) */ - } else if (hw->rx_vec_allowed) { + } else if (adapter->rx_vec_allowed) { PMD_INIT_LOG(INFO, "Vector rx enabled, please make sure RX " "burst size no less than 32."); dev->rx_pkt_burst = ixgbe_recv_pkts_vec; - } else if (hw->rx_bulk_alloc_allowed) { + } else if (adapter->rx_bulk_alloc_allowed) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function " "will be used on port=%d.", @@ -4594,6 +4598,8 @@ int ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; struct ixgbe_rx_queue *rxq; uint32_t rxdctl; int poll_ms; @@ -4621,7 +4627,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) rte_delay_us(RTE_IXGBE_WAIT_100_US); ixgbe_rx_queue_release_mbufs(rxq); - ixgbe_reset_rx_queue(hw, rxq); + ixgbe_reset_rx_queue(adapter, rxq); } else return -1; -- 2.1.0