From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-lf0-f68.google.com (mail-lf0-f68.google.com [209.85.215.68]) by dpdk.org (Postfix) with ESMTP id 6E9C01B405 for ; Wed, 9 May 2018 14:47:36 +0200 (CEST) Received: by mail-lf0-f68.google.com with SMTP id p85-v6so15370219lfg.7 for ; Wed, 09 May 2018 05:47:36 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=semihalf-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=SGMkejAP6LiyOv1jeI7tj0JINOFRRls6Fjyz8FgXKM0=; b=IgyIjJE48e5zyUlb5c/tMnidLtpDv9WoBLyITvolGQsSyuTe/vPEmAZUarXtVfLaek F7bxWpMlZi3ZNZsMreSIc4XAlH6yCL0noG5eaOl1lcR6ouLqGsZ8ODuBSZUflpc/vWvF a30p9CdJ4/IpnF1nqf1Y/m2g3vd3kj2i/QkgT9hqbMRvfFZrFJwKgY6raRCB7wHpPjN5 yBNl7TQxDH3+NQV15iafpMO+lk1WQfLYNDhzeZwXtn617yEVbrQeMZG3FXwMWowAQpuf C189B2cFKZ4ShfGj3OjbqVBW8cC3iAWyYLkDBxtCM+EzQCybFcWf7Lpa2iXxYtuxaYC3 Cucg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=SGMkejAP6LiyOv1jeI7tj0JINOFRRls6Fjyz8FgXKM0=; b=uh5HxbpwqXk5rVIUDakunYTRrRjKeVtNkNTB296Z15QCN2O+5UFaryIRYm2cvjf2tx oqFAujDDkJyA+oPEd7aOGAIvr50y3OAbCSQn5cPzZVrheZZUn59MAlElE0aMz6Kb/sh2 GjGwwzEviVRPI/B/K7SpdBajtew5DCSUqRF034yGrqqprdSf0J0o4Wln9Q71UmGnZzOz tZu4oXeB4dj5C0gzy90LC8LxfcEsmCPjGsgVTrav0H+p2MqVpab5badOhEoY0IRLah7O WJ4jF+FytFsZx1EoF9xsbbFxN/gUQP/Fc3fFYry0NvkyCY1LEKhA4ZU5ggHcjj4Ofd17 gvFw== X-Gm-Message-State: ALKqPweBVg9xrm5iFLWLv7uRFPYDYMPG5nQSSzAK/b0hxhp1PSqF3nUw mDr3bDf2K/O/me4WDujL5A/RLw== X-Google-Smtp-Source: AB8JxZp1zKoJOvmuXligCgsFta6heRCf0787TA70BX6+wNqsqBnvHkLwD4snU2Y4xYBh8UpWzQJ8ug== X-Received: by 2002:a19:8214:: with SMTP id e20-v6mr3782568lfd.44.1525870055871; Wed, 09 May 2018 05:47:35 -0700 (PDT) Received: from mkPC.semihalf.local (31-172-191-173.noc.fibertech.net.pl. [31.172.191.173]) by smtp.gmail.com with ESMTPSA id l10-v6sm5149258lja.62.2018.05.09.05.47.34 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Wed, 09 May 2018 05:47:35 -0700 (PDT) From: Michal Krawczyk To: Marcin Wojtas , Michal Krawczyk , Guy Tzalik , Evgeny Schemeilin Cc: dev@dpdk.org, matua@amazon.com, Rafal Kozik Date: Wed, 9 May 2018 14:47:10 +0200 Message-Id: <20180509124714.23305-11-mk@semihalf.com> X-Mailer: git-send-email 2.14.1 In-Reply-To: <20180509124714.23305-1-mk@semihalf.com> References: <20180509124714.23305-1-mk@semihalf.com> Subject: [dpdk-dev] [PATCH v1 20/24] net/ena: adjust error checking and cleaning X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 09 May 2018 12:47:36 -0000 From: Rafal Kozik Adjust error checking and cleaning to Linux driver: * add checking if MTU is to small, * fix error messages (mismatched Rx and Tx), * return error received from base driver or proper error code instead of -1, * in case of error release occupied resources, * in case of Rx error trigger NIC reset. Fixes: 1173fca25af9 ("ena: add polling-mode driver") Fixes: 1daff5260ff8 ("net/ena: use unmasked head and tail") Signed-off-by: Rafal Kozik Acked-by: Michal Krawczyk --- drivers/net/ena/ena_ethdev.c | 158 ++++++++++++++++++++++++++++--------------- drivers/net/ena/ena_ethdev.h | 2 + 2 files changed, 104 insertions(+), 56 deletions(-) diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index b3aa751e9..6a2c4699a 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -440,9 +440,12 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) rc = ena_com_set_host_attributes(ena_dev); if (rc) { - RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); - if (rc != -ENA_COM_UNSUPPORTED) - goto err; + if (rc == -ENA_COM_UNSUPPORTED) + RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); + else + RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); + + goto err; } return; @@ -493,9 +496,12 @@ static void ena_config_debug_area(struct ena_adapter *adapter) rc = ena_com_set_host_attributes(&adapter->ena_dev); if (rc) { - RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); - if (rc != -ENA_COM_UNSUPPORTED) - goto err; + if (rc == -ENA_COM_UNSUPPORTED) + RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); + else + RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); + + goto err; } return; @@ -538,7 +544,9 @@ ena_dev_reset(struct rte_eth_dev *dev) ena_com_set_admin_running_state(ena_dev, false); - ena_com_dev_reset(ena_dev, adapter->reset_reason); + rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); + if (rc) + RTE_LOG(ERR, PMD, "Device reset failed\n"); for (i = 0; i < nb_queues; i++) mb_pool_rx[i] = adapter->rx_ring[i].mb_pool; @@ -583,7 +591,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); struct ena_com_dev *ena_dev = &adapter->ena_dev; - int ret, i; + int rc, i; u16 entry_value; int conf_idx; int idx; @@ -595,8 +603,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, RTE_LOG(WARNING, PMD, "indirection table %d is bigger than supported (%d)\n", reta_size, ENA_RX_RSS_TABLE_SIZE); - ret = -EINVAL; - goto err; + return -EINVAL; } for (i = 0 ; i < reta_size ; i++) { @@ -608,29 +615,28 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { entry_value = ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); - ret = ena_com_indirect_table_fill_entry(ena_dev, - i, - entry_value); - if (unlikely(ret && (ret != ENA_COM_UNSUPPORTED))) { + + rc = ena_com_indirect_table_fill_entry(ena_dev, + i, + entry_value); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); - ret = -ENOTSUP; - goto err; + return rc; } } } - ret = ena_com_indirect_table_set(ena_dev); - if (unlikely(ret && (ret != ENA_COM_UNSUPPORTED))) { + rc = ena_com_indirect_table_set(ena_dev); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); - ret = -ENOTSUP; - goto err; + return rc; } RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n", __func__, reta_size, adapter->rte_dev->data->port_id); -err: - return ret; + + return 0; } /* Query redirection table. */ @@ -641,7 +647,7 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); struct ena_com_dev *ena_dev = &adapter->ena_dev; - int ret; + int rc; int i; u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; int reta_conf_idx; @@ -651,11 +657,10 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) return -EINVAL; - ret = ena_com_indirect_table_get(ena_dev, indirect_table); - if (unlikely(ret && (ret != ENA_COM_UNSUPPORTED))) { + rc = ena_com_indirect_table_get(ena_dev, indirect_table); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { RTE_LOG(ERR, PMD, "cannot get indirect table\n"); - ret = -ENOTSUP; - goto err; + return -ENOTSUP; } for (i = 0 ; i < reta_size ; i++) { @@ -665,8 +670,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, reta_conf[reta_conf_idx].reta[reta_idx] = ENA_IO_RXQ_IDX_REV(indirect_table[i]); } -err: - return ret; + + return 0; } static int ena_rss_init_default(struct ena_adapter *adapter) @@ -888,7 +893,7 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev, PMD_INIT_LOG(ERR, "failed to restart queue %d type(%d)", i, ring_type); - return -1; + return rc; } } } @@ -912,9 +917,11 @@ static int ena_check_valid_conf(struct ena_adapter *adapter) { uint32_t max_frame_len = ena_get_mtu_conf(adapter); - if (max_frame_len > adapter->max_mtu) { - PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len); - return -1; + if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) { + PMD_INIT_LOG(ERR, "Unsupported MTU of %d. " + "max mtu: %d, min mtu: %d\n", + max_frame_len, adapter->max_mtu, ENA_MIN_MTU); + return ENA_COM_UNSUPPORTED; } return 0; @@ -1012,12 +1019,12 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) ena_dev = &adapter->ena_dev; ena_assert_msg(ena_dev != NULL, "Uninitialized device"); - if (mtu > ena_get_mtu_conf(adapter)) { + if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) { RTE_LOG(ERR, PMD, - "Given MTU (%d) exceeds maximum MTU supported (%d)\n", - mtu, ena_get_mtu_conf(adapter)); - rc = -EINVAL; - goto err; + "Invalid MTU setting. new_mtu: %d " + "max mtu: %d min mtu: %d\n", + mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU); + return -EINVAL; } rc = ena_com_set_dev_mtu(ena_dev, mtu); @@ -1026,7 +1033,6 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) else RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu); -err: return rc; } @@ -1097,7 +1103,7 @@ static int ena_queue_restart(struct ena_ring *ring) rc = ena_populate_rx_queue(ring, bufs_num); if (rc != bufs_num) { PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); - return (-1); + return ENA_COM_FAULT; } return 0; @@ -1127,12 +1133,12 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(CRIT, PMD, "API violation. Queue %d is already configured\n", queue_idx); - return -1; + return ENA_COM_FAULT; } if (!rte_is_power_of_2(nb_desc)) { RTE_LOG(ERR, PMD, - "Unsupported size of RX queue: %d is not a power of 2.", + "Unsupported size of TX queue: %d is not a power of 2.", nb_desc); return -EINVAL; } @@ -1167,6 +1173,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(ERR, PMD, "failed to create io TX queue #%d (qid:%d) rc: %d\n", queue_idx, ena_qid, rc); + return rc; } txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; @@ -1178,8 +1185,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(ERR, PMD, "Failed to get TX queue handlers. TX queue num %d rc: %d\n", queue_idx, rc); - ena_com_destroy_io_queue(ena_dev, ena_qid); - goto err; + goto err_destroy_io_queue; } txq->port_id = dev->data->port_id; @@ -1193,7 +1199,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (!txq->tx_buffer_info) { RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n"); - return -ENOMEM; + rc = -ENOMEM; + goto err_destroy_io_queue; } txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", @@ -1201,9 +1208,10 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (!txq->empty_tx_reqs) { RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n"); - rte_free(txq->tx_buffer_info); - return -ENOMEM; + rc = -ENOMEM; + goto err_free; } + for (i = 0; i < txq->ring_size; i++) txq->empty_tx_reqs[i] = i; @@ -1213,7 +1221,14 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, /* Store pointer to this queue in upper layer */ txq->configured = 1; dev->data->tx_queues[queue_idx] = txq; -err: + + return 0; + +err_free: + rte_free(txq->tx_buffer_info); + +err_destroy_io_queue: + ena_com_destroy_io_queue(ena_dev, ena_qid); return rc; } @@ -1240,12 +1255,12 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(CRIT, PMD, "API violation. Queue %d is already configured\n", queue_idx); - return -1; + return ENA_COM_FAULT; } if (!rte_is_power_of_2(nb_desc)) { RTE_LOG(ERR, PMD, - "Unsupported size of TX queue: %d is not a power of 2.", + "Unsupported size of RX queue: %d is not a power of 2.", nb_desc); return -EINVAL; } @@ -1275,9 +1290,11 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, ctx.numa_node = ena_cpu_to_node(queue_idx); rc = ena_com_create_io_queue(ena_dev, &ctx); - if (rc) + if (rc) { RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n", queue_idx, rc); + return rc; + } rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; @@ -1290,6 +1307,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, "Failed to get RX queue handlers. RX queue num %d rc: %d\n", queue_idx, rc); ena_com_destroy_io_queue(ena_dev, ena_qid); + return rc; } rxq->port_id = dev->data->port_id; @@ -1303,6 +1321,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (!rxq->rx_buffer_info) { RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n"); + ena_com_destroy_io_queue(ena_dev, ena_qid); return -ENOMEM; } @@ -1313,6 +1332,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n"); rte_free(rxq->rx_buffer_info); rxq->rx_buffer_info = NULL; + ena_com_destroy_io_queue(ena_dev, ena_qid); return -ENOMEM; } @@ -1363,6 +1383,10 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]); req_id = rxq->empty_rx_reqs[next_to_use_masked]; + rc = validate_rx_req_id(rxq, req_id); + if (unlikely(rc < 0)) + break; + /* prepare physical address for DMA transaction */ ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; @@ -1378,9 +1402,17 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) next_to_use++; } + if (unlikely(i < count)) + RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d " + "buffers (from %d)\n", rxq->id, i, count); + /* When we submitted free recources to device... */ if (likely(i > 0)) { - /* ...let HW know that it can fill buffers with data */ + /* ...let HW know that it can fill buffers with data + * + * Add memory barrier to make sure the desc were written before + * issue a doorbell + */ rte_wmb(); ena_com_write_sq_doorbell(rxq->ena_com_io_sq); @@ -1608,7 +1640,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); if (rc) { PMD_INIT_LOG(CRIT, "Failed to init ENA device"); - return -1; + goto err; } adapter->wd_state = wd_state; @@ -1617,8 +1649,10 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) &get_feat_ctx); queue_size = ena_calc_queue_size(ena_dev, &tx_sgl_size, &get_feat_ctx); - if ((queue_size <= 0) || (adapter->num_queues <= 0)) - return -EFAULT; + if (queue_size <= 0 || adapter->num_queues <= 0) { + rc = -EFAULT; + goto err_device_destroy; + } adapter->tx_ring_size = queue_size; adapter->rx_ring_size = queue_size; @@ -1647,7 +1681,8 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) RTE_CACHE_LINE_SIZE); if (!adapter->drv_stats) { RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n"); - return -ENOMEM; + rc = -ENOMEM; + goto err_delete_debug_area; } rte_intr_callback_register(intr_handle, @@ -1665,6 +1700,16 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) adapter->state = ENA_ADAPTER_STATE_INIT; return 0; + +err_delete_debug_area: + ena_com_delete_debug_area(ena_dev); + +err_device_destroy: + ena_com_delete_host_info(ena_dev); + ena_com_admin_destroy(ena_dev); + +err: + return rc; } static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) @@ -1900,6 +1945,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, &ena_rx_ctx); if (unlikely(rc)) { RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc); + rx_ring->adapter->trigger_reset = true; return 0; } diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h index 73c110ab9..2dc8129e0 100644 --- a/drivers/net/ena/ena_ethdev.h +++ b/drivers/net/ena/ena_ethdev.h @@ -50,6 +50,8 @@ #define ENA_NAME_MAX_LEN 20 #define ENA_PKT_MAX_BUFS 17 +#define ENA_MIN_MTU 128 + #define ENA_MMIO_DISABLE_REG_READ BIT(0) #define ENA_WD_TIMEOUT_SEC 3 -- 2.14.1