From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 524351B621 for ; Tue, 3 Apr 2018 04:55:36 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga105.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 02 Apr 2018 19:55:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.48,398,1517904000"; d="scan'208";a="30550622" Received: from dw2.bj.intel.com ([172.16.117.110]) by orsmga008.jf.intel.com with ESMTP; 02 Apr 2018 19:55:33 -0700 From: Wei Dai To: wenzhuo.lu@intel.com, qi.z.zhang@intel.com Cc: dev@dpdk.org, Wei Dai Date: Tue, 3 Apr 2018 10:54:56 +0800 Message-Id: <20180403025456.735-3-wei.dai@intel.com> X-Mailer: git-send-email 2.9.5 In-Reply-To: <20180403025456.735-1-wei.dai@intel.com> References: <20180301185431.26204-1-wei.dai@intel.com> <20180403025456.735-1-wei.dai@intel.com> Subject: [dpdk-dev] [PATCH v2 2/2] net/e1000: convert to new Tx offloads API X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 03 Apr 2018 02:55:37 -0000 Ethdev Tx offloads API has changed since: commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new Tx offloads API. Signed-off-by: Wei Dai --- drivers/net/e1000/e1000_ethdev.h | 6 ++++ drivers/net/e1000/em_ethdev.c | 16 +++++++---- drivers/net/e1000/em_rxtx.c | 62 ++++++++++++++++++++++++++++++++++++++++ drivers/net/e1000/igb_ethdev.c | 15 +++++----- drivers/net/e1000/igb_rxtx.c | 58 +++++++++++++++++++++++++++++++++++++ 5 files changed, 145 insertions(+), 12 deletions(-) diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h index 17b5806..6354b89 100644 --- a/drivers/net/e1000/e1000_ethdev.h +++ b/drivers/net/e1000/e1000_ethdev.h @@ -373,6 +373,9 @@ int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset); int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset); int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset); +uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev); + int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); @@ -447,6 +450,9 @@ int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset); int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset); int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset); +uint64_t em_get_tx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev); + int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c index e2ec4b1..d37df8a 100644 --- a/drivers/net/e1000/em_ethdev.c +++ b/drivers/net/e1000/em_ethdev.c @@ -453,6 +453,7 @@ eth_em_configure(struct rte_eth_dev *dev) E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct rte_eth_dev_info dev_info; uint64_t rx_offloads; + uint64_t tx_offloads; PMD_INIT_FUNC_TRACE(); intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; @@ -465,6 +466,13 @@ eth_em_configure(struct rte_eth_dev *dev) rx_offloads, dev_info.rx_offload_capa); return -ENOTSUP; } + tx_offloads = dev->data->dev_conf.txmode.offloads; + if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) { + PMD_DRV_LOG(ERR, "Some Tx offloads are not supported " + "requested 0x%" PRIx64 " supported 0x%" PRIx64, + tx_offloads, dev_info.tx_offload_capa); + return -ENOTSUP; + } PMD_INIT_FUNC_TRACE(); @@ -1066,11 +1074,6 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ dev_info->max_rx_pktlen = em_get_max_pktlen(dev); dev_info->max_mac_addrs = hw->mac.rar_entry_count; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM; /* * Starting with 631xESB hw supports 2 TX/RX queues per port. @@ -1095,6 +1098,9 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev); dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) | dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = E1000_MAX_RING_DESC, diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c index 3291b5e..2b3c63e 100644 --- a/drivers/net/e1000/em_rxtx.c +++ b/drivers/net/e1000/em_rxtx.c @@ -164,6 +164,7 @@ struct em_tx_queue { uint8_t wthresh; /**< Write-back threshold register. */ struct em_ctx_info ctx_cache; /**< Hardware context history.*/ + uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */ }; #if 1 @@ -1152,6 +1153,52 @@ em_reset_tx_queue(struct em_tx_queue *txq) memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache)); } +uint64_t +em_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_offload_capa; + + RTE_SET_USED(dev); + tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + + return tx_offload_capa; +} + +uint64_t +em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_queue_offload_capa; + + /* + * As only one Tx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + tx_queue_offload_capa = em_get_tx_port_offloads_capa(dev); + + return tx_queue_offload_capa; +} + +static int +em_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested) +{ + uint64_t port_offloads = dev->data->dev_conf.txmode.offloads; + uint64_t queue_supported = em_get_tx_queue_offloads_capa(dev); + uint64_t port_supported = em_get_tx_port_offloads_capa(dev); + + if ((requested & (queue_supported | port_supported)) != requested) + return 0; + + if ((port_offloads ^ requested) & port_supported) + return 0; + + return 1; +} + int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1167,6 +1214,19 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!em_check_tx_queue_offloads(dev, tx_conf->offloads)) { + PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64 + " don't match port offloads 0x%" PRIx64 + " or supported port offloads 0x%" PRIx64 + " or supported queue offloads 0x%" PRIx64, + (void *)dev, + tx_conf->offloads, + dev->data->dev_conf.txmode.offloads, + em_get_tx_port_offloads_capa(dev), + em_get_tx_queue_offloads_capa(dev)); + return -ENOTSUP; + } + /* * Validate number of transmit descriptors. * It must not exceed hardware maximum, and must be multiple @@ -1270,6 +1330,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, em_reset_tx_queue(txq); dev->data->tx_queues[queue_idx] = txq; + txq->offloads = tx_conf->offloads; return 0; } @@ -1982,4 +2043,5 @@ em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_thresh.wthresh = txq->wthresh; qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.offloads = txq->offloads; } diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index 97c7089..8d42266 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -2151,13 +2151,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | dev_info->rx_queue_offload_capa; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; switch (hw->mac.type) { case e1000_82575: @@ -2230,6 +2226,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .wthresh = IGB_DEFAULT_TX_WTHRESH, }, .txq_flags = 0, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; @@ -2299,6 +2296,9 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -2318,6 +2318,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .wthresh = IGB_DEFAULT_TX_WTHRESH, }, .txq_flags = 0, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index 450beea..323913f 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -181,6 +181,7 @@ struct igb_tx_queue { /**< Start context position for transmit queue. */ struct igb_advctx_info ctx_cache[IGB_CTX_NUM]; /**< Hardware context history.*/ + uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */ }; #if 1 @@ -1448,6 +1449,48 @@ igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev) igb_reset_tx_queue_stat(txq); } +uint64_t +igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_offload_capa; + + RTE_SET_USED(dev); + rx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + + return rx_offload_capa; +} + +uint64_t +igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_queue_offload_capa; + + rx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev); + + return rx_queue_offload_capa; +} + +static int +igb_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested) +{ + uint64_t port_offloads = dev->data->dev_conf.txmode.offloads; + uint64_t queue_supported = igb_get_tx_queue_offloads_capa(dev); + uint64_t port_supported = igb_get_tx_port_offloads_capa(dev); + + if ((requested & (queue_supported | port_supported)) != requested) + return 0; + + if ((port_offloads ^ requested) & port_supported) + return 0; + + return 1; +} + int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1460,6 +1503,19 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, struct e1000_hw *hw; uint32_t size; + if (!igb_check_tx_queue_offloads(dev, tx_conf->offloads)) { + PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64 + " don't match port offloads 0x%" PRIx64 + " or supported port offloads 0x%" PRIx64 + " or supported queue offloads 0x%" PRIx64, + (void *)dev, + tx_conf->offloads, + dev->data->dev_conf.txmode.offloads, + igb_get_tx_port_offloads_capa(dev), + igb_get_tx_queue_offloads_capa(dev)); + return -ENOTSUP; + } + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); /* @@ -1543,6 +1599,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, dev->tx_pkt_burst = eth_igb_xmit_pkts; dev->tx_pkt_prepare = ð_igb_prep_pkts; dev->data->tx_queues[queue_idx] = txq; + txq->offloads = tx_conf->offloads; return 0; } @@ -2837,6 +2894,7 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_thresh.pthresh = txq->pthresh; qinfo->conf.tx_thresh.hthresh = txq->hthresh; qinfo->conf.tx_thresh.wthresh = txq->wthresh; + qinfo->conf.offloads = txq->offloads; } int -- 2.9.5