From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by dpdk.org (Postfix) with ESMTP id A35624F91; Fri, 12 Apr 2019 03:48:23 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.0.27/8.16.0.27) with SMTP id x3C1YVlv020781; Thu, 11 Apr 2019 18:48:23 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-type; s=pfpt0818; bh=pFA3YLu9xhX5lWxD5jSr1b0HBq8pzxiMao+QP1e5NjE=; b=CrXAuemHm4P/o2eNbOPVuViRklWM7qEAdUy0sd/xCbglM5KJ06Os6nMtj59gCHEuTT+t kU6nuOiX6j144/3FjT8leENKEAq4z+w6DWIpGQd0dFy90symhIL/ydf/AEgoBunQ54EG BlsNT4DZPKson571lQLENt/hnOJu8011+P9ljSA1B/kDOovSBUhLcpLHVge5pOyEhSDb AJA3SgydHq0qhWQ9JCgLwwdN/Wh9ylCdjIxlZ1TF2WilfMfJbP9KcWL7tqn0TaqeQffT 8EKT+t9pXU6ZtMcCZGs9mQ2NeQ77pYlPNQqUJUcGn5eAxZonz8JUIL9NdRWQzsotkEw5 qQ== Received: from sc-exch04.marvell.com ([199.233.58.184]) by mx0b-0016f401.pphosted.com with ESMTP id 2rt2xvb3ac-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Thu, 11 Apr 2019 18:48:22 -0700 Received: from SC-EXCH03.marvell.com (10.93.176.83) by SC-EXCH04.marvell.com (10.93.176.84) with Microsoft SMTP Server (TLS) id 15.0.1367.3; Thu, 11 Apr 2019 18:48:21 -0700 Received: from maili.marvell.com (10.93.176.43) by SC-EXCH03.marvell.com (10.93.176.83) with Microsoft SMTP Server id 15.0.1367.3 via Frontend Transport; Thu, 11 Apr 2019 18:48:21 -0700 Received: from irv1user08.caveonetworks.com (unknown [10.104.116.105]) by maili.marvell.com (Postfix) with ESMTP id 5D2B83F7041; Thu, 11 Apr 2019 18:48:21 -0700 (PDT) Received: (from rmody@localhost) by irv1user08.caveonetworks.com (8.14.4/8.14.4/Submit) id x3C1mL36023000; Thu, 11 Apr 2019 18:48:21 -0700 X-Authentication-Warning: irv1user08.caveonetworks.com: rmody set sender to rmody@marvell.com using -f From: Rasesh Mody To: CC: Shahed Shaikh , , , Date: Thu, 11 Apr 2019 18:47:39 -0700 Message-ID: <1555033662-22935-3-git-send-email-rmody@marvell.com> X-Mailer: git-send-email 1.7.10.3 In-Reply-To: <1555033662-22935-1-git-send-email-rmody@marvell.com> References: <1555033662-22935-1-git-send-email-rmody@marvell.com> MIME-Version: 1.0 Content-Type: text/plain X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:, , definitions=2019-04-12_01:, , signatures=0 Subject: [dpdk-dev] [PATCH 3/6] net/bnx2x: fix ramrod timeout X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 12 Apr 2019 01:48:24 -0000 From: Shahed Shaikh There is a race condition while processing RAMROD completion in fast path queue through interrupt handler and polling method. Interrupt handler invoked from actual interrupt event and from RAMROD processing polling flow may create a situation where one flow will read and clear a fastpath interrupt without actually processing the RAMROD completion. Thus, causing a RAMROD timeout even though HW sent an completion event. Fix this by introducing an atomic variable which will be set only when interrupt handler needs to process RAMROD completion. Fixes: 540a211084a7 ("bnx2x: driver core") Cc: stable@dpdk.org Signed-off-by: Shahed Shaikh --- drivers/net/bnx2x/bnx2x.c | 18 ++++++++++++------ drivers/net/bnx2x/bnx2x.h | 4 ++-- drivers/net/bnx2x/bnx2x_ethdev.c | 2 +- drivers/net/bnx2x/ecore_sp.c | 12 ++++++++++-- 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c index 298bc41..f32db6d 100644 --- a/drivers/net/bnx2x/bnx2x.c +++ b/drivers/net/bnx2x/bnx2x.c @@ -124,7 +124,7 @@ static void bnx2x_update_rx_prod(struct bnx2x_softc *sc, int bnx2x_nic_load(struct bnx2x_softc *sc); static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc); -static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp); +static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp); static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t storm, uint16_t index, uint8_t op, uint8_t update); @@ -1114,6 +1114,12 @@ static int bnx2x_is_contextless_ramrod(int cmd, int cmd_type) atomic_load_acq_long(&sc->cq_spq_left), atomic_load_acq_long(&sc->eq_spq_left)); + /* RAMROD completion is processed in bnx2x_intr_legacy() + * which can run from different contexts. + * Ask bnx2x_intr_intr() to process RAMROD + * completion whenever it gets scheduled. + */ + rte_atomic32_set(&sc->scan_fp, 1); bnx2x_sp_prod_update(sc); return 0; @@ -4539,7 +4545,7 @@ static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc) return rc; } -static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp) +static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp) { struct bnx2x_softc *sc = fp->sc; uint8_t more_rx = FALSE; @@ -4554,14 +4560,14 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp) /* update the fastpath index */ bnx2x_update_fp_sb_idx(fp); - if (scan_fp) { + if (rte_atomic32_read(&sc->scan_fp) == 1) { if (bnx2x_has_rx_work(fp)) { more_rx = bnx2x_rxeof(sc, fp); } if (more_rx) { /* still more work to do */ - bnx2x_handle_fp_tq(fp, scan_fp); + bnx2x_handle_fp_tq(fp); return; } } @@ -4577,7 +4583,7 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp) * then calls a separate routine to handle the various * interrupt causes: link, RX, and TX. */ -int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp) +int bnx2x_intr_legacy(struct bnx2x_softc *sc) { struct bnx2x_fastpath *fp; uint32_t status, mask; @@ -4609,7 +4615,7 @@ int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp) /* acknowledge and disable further fastpath interrupts */ bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); - bnx2x_handle_fp_tq(fp, scan_fp); + bnx2x_handle_fp_tq(fp); status &= ~mask; } } diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 9e82a89..bb80310 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -1090,7 +1090,7 @@ struct bnx2x_softc { #define PERIODIC_STOP 0 #define PERIODIC_GO 1 volatile unsigned long periodic_flags; - + rte_atomic32_t scan_fp; struct bnx2x_fastpath fp[MAX_RSS_CHAINS]; struct bnx2x_sp_objs sp_objs[MAX_RSS_CHAINS]; @@ -1939,7 +1939,7 @@ void ecore_storm_memset_struct(struct bnx2x_softc *sc, uint32_t addr, uint8_t bnx2x_txeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp); void bnx2x_print_adapter_info(struct bnx2x_softc *sc); void bnx2x_print_device_info(struct bnx2x_softc *sc); -int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp); +int bnx2x_intr_legacy(struct bnx2x_softc *sc); void bnx2x_link_status_update(struct bnx2x_softc *sc); int bnx2x_complete_sp(struct bnx2x_softc *sc); int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc); diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c index 5b4c5cf..bcb899a 100644 --- a/drivers/net/bnx2x/bnx2x_ethdev.c +++ b/drivers/net/bnx2x/bnx2x_ethdev.c @@ -113,7 +113,7 @@ struct rte_bnx2x_xstats_name_off { struct bnx2x_softc *sc = dev->data->dev_private; uint32_t link_status; - bnx2x_intr_legacy(sc, 0); + bnx2x_intr_legacy(sc); if (sc->periodic_flags & PERIODIC_GO) bnx2x_periodic_callout(sc); diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c index 6d2bb81..4319409 100644 --- a/drivers/net/bnx2x/ecore_sp.c +++ b/drivers/net/bnx2x/ecore_sp.c @@ -291,25 +291,33 @@ static int ecore_state_wait(struct bnx2x_softc *sc, int state, cnt *= 20; ECORE_MSG(sc, "waiting for state to become %d", state); + /* being over protective to remind bnx2x_intr_legacy() to + * process RAMROD + */ + rte_atomic32_set(&sc->scan_fp, 1); ECORE_MIGHT_SLEEP(); while (cnt--) { - bnx2x_intr_legacy(sc, 1); + bnx2x_intr_legacy(sc); if (!ECORE_TEST_BIT(state, pstate)) { #ifdef ECORE_STOP_ON_ERROR ECORE_MSG(sc, "exit (cnt %d)", 5000 - cnt); #endif + rte_atomic32_set(&sc->scan_fp, 0); return ECORE_SUCCESS; } ECORE_WAIT(sc, delay_us); - if (sc->panic) + if (sc->panic) { + rte_atomic32_set(&sc->scan_fp, 0); return ECORE_IO; + } } /* timeout! */ PMD_DRV_LOG(ERR, sc, "timeout waiting for state %d", state); + rte_atomic32_set(&sc->scan_fp, 0); #ifdef ECORE_STOP_ON_ERROR ecore_panic(); #endif -- 1.7.10.3 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id 28AB4A0096 for ; Fri, 12 Apr 2019 03:48:34 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id B2F175424; Fri, 12 Apr 2019 03:48:28 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by dpdk.org (Postfix) with ESMTP id A35624F91; Fri, 12 Apr 2019 03:48:23 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.0.27/8.16.0.27) with SMTP id x3C1YVlv020781; Thu, 11 Apr 2019 18:48:23 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-type; s=pfpt0818; bh=pFA3YLu9xhX5lWxD5jSr1b0HBq8pzxiMao+QP1e5NjE=; b=CrXAuemHm4P/o2eNbOPVuViRklWM7qEAdUy0sd/xCbglM5KJ06Os6nMtj59gCHEuTT+t kU6nuOiX6j144/3FjT8leENKEAq4z+w6DWIpGQd0dFy90symhIL/ydf/AEgoBunQ54EG BlsNT4DZPKson571lQLENt/hnOJu8011+P9ljSA1B/kDOovSBUhLcpLHVge5pOyEhSDb AJA3SgydHq0qhWQ9JCgLwwdN/Wh9ylCdjIxlZ1TF2WilfMfJbP9KcWL7tqn0TaqeQffT 8EKT+t9pXU6ZtMcCZGs9mQ2NeQ77pYlPNQqUJUcGn5eAxZonz8JUIL9NdRWQzsotkEw5 qQ== Received: from sc-exch04.marvell.com ([199.233.58.184]) by mx0b-0016f401.pphosted.com with ESMTP id 2rt2xvb3ac-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Thu, 11 Apr 2019 18:48:22 -0700 Received: from SC-EXCH03.marvell.com (10.93.176.83) by SC-EXCH04.marvell.com (10.93.176.84) with Microsoft SMTP Server (TLS) id 15.0.1367.3; Thu, 11 Apr 2019 18:48:21 -0700 Received: from maili.marvell.com (10.93.176.43) by SC-EXCH03.marvell.com (10.93.176.83) with Microsoft SMTP Server id 15.0.1367.3 via Frontend Transport; Thu, 11 Apr 2019 18:48:21 -0700 Received: from irv1user08.caveonetworks.com (unknown [10.104.116.105]) by maili.marvell.com (Postfix) with ESMTP id 5D2B83F7041; Thu, 11 Apr 2019 18:48:21 -0700 (PDT) Received: (from rmody@localhost) by irv1user08.caveonetworks.com (8.14.4/8.14.4/Submit) id x3C1mL36023000; Thu, 11 Apr 2019 18:48:21 -0700 X-Authentication-Warning: irv1user08.caveonetworks.com: rmody set sender to rmody@marvell.com using -f From: Rasesh Mody To: CC: Shahed Shaikh , , , Date: Thu, 11 Apr 2019 18:47:39 -0700 Message-ID: <1555033662-22935-3-git-send-email-rmody@marvell.com> X-Mailer: git-send-email 1.7.10.3 In-Reply-To: <1555033662-22935-1-git-send-email-rmody@marvell.com> References: <1555033662-22935-1-git-send-email-rmody@marvell.com> MIME-Version: 1.0 Content-Type: text/plain; charset="UTF-8" X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:, , definitions=2019-04-12_01:, , signatures=0 Subject: [dpdk-dev] [PATCH 3/6] net/bnx2x: fix ramrod timeout X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Message-ID: <20190412014739.mfN5tuzb9ZpEwPywZFqqVMsj5Q4NAlryVESlM8XYGPo@z> From: Shahed Shaikh There is a race condition while processing RAMROD completion in fast path queue through interrupt handler and polling method. Interrupt handler invoked from actual interrupt event and from RAMROD processing polling flow may create a situation where one flow will read and clear a fastpath interrupt without actually processing the RAMROD completion. Thus, causing a RAMROD timeout even though HW sent an completion event. Fix this by introducing an atomic variable which will be set only when interrupt handler needs to process RAMROD completion. Fixes: 540a211084a7 ("bnx2x: driver core") Cc: stable@dpdk.org Signed-off-by: Shahed Shaikh --- drivers/net/bnx2x/bnx2x.c | 18 ++++++++++++------ drivers/net/bnx2x/bnx2x.h | 4 ++-- drivers/net/bnx2x/bnx2x_ethdev.c | 2 +- drivers/net/bnx2x/ecore_sp.c | 12 ++++++++++-- 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c index 298bc41..f32db6d 100644 --- a/drivers/net/bnx2x/bnx2x.c +++ b/drivers/net/bnx2x/bnx2x.c @@ -124,7 +124,7 @@ static void bnx2x_update_rx_prod(struct bnx2x_softc *sc, int bnx2x_nic_load(struct bnx2x_softc *sc); static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc); -static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp); +static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp); static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t storm, uint16_t index, uint8_t op, uint8_t update); @@ -1114,6 +1114,12 @@ static int bnx2x_is_contextless_ramrod(int cmd, int cmd_type) atomic_load_acq_long(&sc->cq_spq_left), atomic_load_acq_long(&sc->eq_spq_left)); + /* RAMROD completion is processed in bnx2x_intr_legacy() + * which can run from different contexts. + * Ask bnx2x_intr_intr() to process RAMROD + * completion whenever it gets scheduled. + */ + rte_atomic32_set(&sc->scan_fp, 1); bnx2x_sp_prod_update(sc); return 0; @@ -4539,7 +4545,7 @@ static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc) return rc; } -static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp) +static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp) { struct bnx2x_softc *sc = fp->sc; uint8_t more_rx = FALSE; @@ -4554,14 +4560,14 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp) /* update the fastpath index */ bnx2x_update_fp_sb_idx(fp); - if (scan_fp) { + if (rte_atomic32_read(&sc->scan_fp) == 1) { if (bnx2x_has_rx_work(fp)) { more_rx = bnx2x_rxeof(sc, fp); } if (more_rx) { /* still more work to do */ - bnx2x_handle_fp_tq(fp, scan_fp); + bnx2x_handle_fp_tq(fp); return; } } @@ -4577,7 +4583,7 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp) * then calls a separate routine to handle the various * interrupt causes: link, RX, and TX. */ -int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp) +int bnx2x_intr_legacy(struct bnx2x_softc *sc) { struct bnx2x_fastpath *fp; uint32_t status, mask; @@ -4609,7 +4615,7 @@ int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp) /* acknowledge and disable further fastpath interrupts */ bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); - bnx2x_handle_fp_tq(fp, scan_fp); + bnx2x_handle_fp_tq(fp); status &= ~mask; } } diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 9e82a89..bb80310 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -1090,7 +1090,7 @@ struct bnx2x_softc { #define PERIODIC_STOP 0 #define PERIODIC_GO 1 volatile unsigned long periodic_flags; - + rte_atomic32_t scan_fp; struct bnx2x_fastpath fp[MAX_RSS_CHAINS]; struct bnx2x_sp_objs sp_objs[MAX_RSS_CHAINS]; @@ -1939,7 +1939,7 @@ void ecore_storm_memset_struct(struct bnx2x_softc *sc, uint32_t addr, uint8_t bnx2x_txeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp); void bnx2x_print_adapter_info(struct bnx2x_softc *sc); void bnx2x_print_device_info(struct bnx2x_softc *sc); -int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp); +int bnx2x_intr_legacy(struct bnx2x_softc *sc); void bnx2x_link_status_update(struct bnx2x_softc *sc); int bnx2x_complete_sp(struct bnx2x_softc *sc); int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc); diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c index 5b4c5cf..bcb899a 100644 --- a/drivers/net/bnx2x/bnx2x_ethdev.c +++ b/drivers/net/bnx2x/bnx2x_ethdev.c @@ -113,7 +113,7 @@ struct rte_bnx2x_xstats_name_off { struct bnx2x_softc *sc = dev->data->dev_private; uint32_t link_status; - bnx2x_intr_legacy(sc, 0); + bnx2x_intr_legacy(sc); if (sc->periodic_flags & PERIODIC_GO) bnx2x_periodic_callout(sc); diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c index 6d2bb81..4319409 100644 --- a/drivers/net/bnx2x/ecore_sp.c +++ b/drivers/net/bnx2x/ecore_sp.c @@ -291,25 +291,33 @@ static int ecore_state_wait(struct bnx2x_softc *sc, int state, cnt *= 20; ECORE_MSG(sc, "waiting for state to become %d", state); + /* being over protective to remind bnx2x_intr_legacy() to + * process RAMROD + */ + rte_atomic32_set(&sc->scan_fp, 1); ECORE_MIGHT_SLEEP(); while (cnt--) { - bnx2x_intr_legacy(sc, 1); + bnx2x_intr_legacy(sc); if (!ECORE_TEST_BIT(state, pstate)) { #ifdef ECORE_STOP_ON_ERROR ECORE_MSG(sc, "exit (cnt %d)", 5000 - cnt); #endif + rte_atomic32_set(&sc->scan_fp, 0); return ECORE_SUCCESS; } ECORE_WAIT(sc, delay_us); - if (sc->panic) + if (sc->panic) { + rte_atomic32_set(&sc->scan_fp, 0); return ECORE_IO; + } } /* timeout! */ PMD_DRV_LOG(ERR, sc, "timeout waiting for state %d", state); + rte_atomic32_set(&sc->scan_fp, 0); #ifdef ECORE_STOP_ON_ERROR ecore_panic(); #endif -- 1.7.10.3