From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id EAE1B1B4E3 for ; Fri, 23 Nov 2018 07:52:05 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 22 Nov 2018 22:52:05 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.56,268,1539673200"; d="scan'208";a="283483557" Received: from dpdk26.sh.intel.com ([10.67.110.161]) by fmsmga006.fm.intel.com with ESMTP; 22 Nov 2018 22:52:04 -0800 From: Wenzhuo Lu To: dev@dpdk.org Cc: Wenzhuo Lu , Qiming Yang , Xiaoyun Li , Jingjing Wu Date: Fri, 23 Nov 2018 14:56:18 +0800 Message-Id: <1542956179-80951-19-git-send-email-wenzhuo.lu@intel.com> X-Mailer: git-send-email 1.9.3 In-Reply-To: <1542956179-80951-1-git-send-email-wenzhuo.lu@intel.com> References: <1542956179-80951-1-git-send-email-wenzhuo.lu@intel.com> Subject: [dpdk-dev] [PATCH 18/19] net/ice: support descriptor ops X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 23 Nov 2018 06:52:06 -0000 Add below ops, rx_descriptor_done rx_descriptor_status tx_descriptor_status Signed-off-by: Wenzhuo Lu Signed-off-by: Qiming Yang Signed-off-by: Xiaoyun Li Signed-off-by: Jingjing Wu --- drivers/net/ice/ice_ethdev.c | 3 ++ drivers/net/ice/ice_lan_rxtx.c | 84 ++++++++++++++++++++++++++++++++++++++++++ drivers/net/ice/ice_rxtx.h | 3 ++ 3 files changed, 90 insertions(+) diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 21a251f..c9dca15 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -112,6 +112,9 @@ static int ice_xstats_get_names(struct rte_eth_dev *dev, .get_eeprom_length = ice_get_eeprom_length, .get_eeprom = ice_get_eeprom, .rx_queue_count = ice_rx_queue_count, + .rx_descriptor_done = ice_rx_descriptor_done, + .rx_descriptor_status = ice_rx_descriptor_status, + .tx_descriptor_status = ice_tx_descriptor_status, .stats_get = ice_stats_get, .stats_reset = ice_stats_reset, .xstats_get = ice_xstats_get, diff --git a/drivers/net/ice/ice_lan_rxtx.c b/drivers/net/ice/ice_lan_rxtx.c index 07ab677..4e6c0ff 100644 --- a/drivers/net/ice/ice_lan_rxtx.c +++ b/drivers/net/ice/ice_lan_rxtx.c @@ -1514,6 +1514,90 @@ return desc; } +int +ice_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + volatile union ice_rx_desc *rxdp; + struct ice_rx_queue *rxq = rx_queue; + uint16_t desc; + int ret; + + if (unlikely(offset >= rxq->nb_rx_desc)) { + PMD_DRV_LOG(ERR, "Invalid RX descriptor id %u", offset); + return 0; + } + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + rxdp = &rxq->rx_ring[desc]; + + ret = !!(((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) & + ICE_RXD_QW1_STATUS_M) >> ICE_RXD_QW1_STATUS_S) & + (1 << ICE_RX_DESC_STATUS_DD_S)); + + return ret; +} + +int +ice_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct ice_rx_queue *rxq = rx_queue; + volatile uint64_t *status; + uint64_t mask; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return -EINVAL; + + if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + status = &rxq->rx_ring[desc].wb.qword1.status_error_len; + mask = rte_cpu_to_le_64((1ULL << ICE_RX_DESC_STATUS_DD_S) << + ICE_RXD_QW1_STATUS_S); + if (*status & mask) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +int +ice_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct ice_tx_queue *txq = tx_queue; + volatile uint64_t *status; + uint64_t mask, expect; + uint32_t desc; + + if (unlikely(offset >= txq->nb_tx_desc)) + return -EINVAL; + + desc = txq->tx_tail + offset; + /* go to next desc that has the RS bit */ + desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) * + txq->tx_rs_thresh; + if (desc >= txq->nb_tx_desc) { + desc -= txq->nb_tx_desc; + if (desc >= txq->nb_tx_desc) + desc -= txq->nb_tx_desc; + } + + status = &txq->tx_ring[desc].cmd_type_offset_bsz; + mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M); + expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE << + ICE_TXD_QW1_DTYPE_S); + if ((*status & mask) == expect) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + void ice_clear_queues(struct rte_eth_dev *dev) { diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index e0218b3..12ad383 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -143,6 +143,9 @@ uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); void ice_set_tx_function(struct rte_eth_dev *dev); uint32_t ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int ice_rx_descriptor_done(void *rx_queue, uint16_t offset); +int ice_rx_descriptor_status(void *rx_queue, uint16_t offset); +int ice_tx_descriptor_status(void *tx_queue, uint16_t offset); void ice_set_default_ptype_table(struct rte_eth_dev *dev); const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev); void ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, -- 1.9.3