From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E4816A0524; Tue, 13 Apr 2021 22:18:31 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7A7C7161346; Tue, 13 Apr 2021 22:16:40 +0200 (CEST) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by mails.dpdk.org (Postfix) with ESMTP id EAEB41612D8 for ; Tue, 13 Apr 2021 22:16:18 +0200 (CEST) IronPort-SDR: 7JgcsLAr2r91hwwFNIUZoLJU5rBL8oV6pdquto6AVPplu47lJbC3BSgjlTsChnN/t2iJpGvvq+ 1Pu1crTnTSMg== X-IronPort-AV: E=McAfee;i="6200,9189,9953"; a="194519724" X-IronPort-AV: E=Sophos;i="5.82,220,1613462400"; d="scan'208";a="194519724" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2021 13:16:18 -0700 IronPort-SDR: u92GVzouizvApM8UR1I4kMHVJGjYfLBtItuz7TlyFiSHxNXJ8kRaHUYtyeEl5c6+1bBvJUKXtY 1Z8rYRg1o+hw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.82,220,1613462400"; d="scan'208";a="424406553" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by orsmga008.jf.intel.com with ESMTP; 13 Apr 2021 13:16:18 -0700 From: Timothy McDaniel To: Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com, harry.van.haaren@intel.com, jerinj@marvell.com, thomas@monjalon.net Date: Tue, 13 Apr 2021 15:14:46 -0500 Message-Id: <1618344896-2090-17-git-send-email-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 1.7.10 In-Reply-To: <1618344896-2090-1-git-send-email-timothy.mcdaniel@intel.com> References: <20210316221857.2254-2-timothy.mcdaniel@intel.com> <1618344896-2090-1-git-send-email-timothy.mcdaniel@intel.com> Subject: [dpdk-dev] [PATCH v3 16/26] event/dlb2: add v2.5 finish map/unmap X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Update low level hardware funcs with map/unmap interfaces, accounting for new combined register file and hardware access macros. Signed-off-by: Timothy McDaniel --- drivers/event/dlb2/pf/base/dlb2_resource.c | 1054 ----------------- .../event/dlb2/pf/base/dlb2_resource_new.c | 50 + 2 files changed, 50 insertions(+), 1054 deletions(-) diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c index 8c1d8c782..f05f750f5 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c @@ -54,1060 +54,6 @@ void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw) DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val); } -/* - * The PF driver cannot assume that a register write will affect subsequent HCW - * writes. To ensure a write completes, the driver must read back a CSR. This - * function only need be called for configuration that can occur after the - * domain has started; prior to starting, applications can't send HCWs. - */ -static inline void dlb2_flush_csr(struct dlb2_hw *hw) -{ - DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS); -} - -static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw, - struct dlb2_ldb_port *port) -{ - union dlb2_lsp_cq_ldb_dsbl reg; - - /* - * Don't re-enable the port if a removal is pending. The caller should - * mark this port as enabled (if it isn't already), and when the - * removal completes the port will be enabled. - */ - if (port->num_pending_removals) - return; - - reg.field.disabled = 0; - - DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val); - - dlb2_flush_csr(hw); -} - -static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw, - struct dlb2_ldb_port *port) -{ - union dlb2_lsp_cq_ldb_dsbl reg; - - reg.field.disabled = 1; - - DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val); - - dlb2_flush_csr(hw); -} - -static struct dlb2_ldb_queue * -dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw, - u32 id, - bool vdev_req, - unsigned int vdev_id) -{ - struct dlb2_list_entry *iter1; - struct dlb2_list_entry *iter2; - struct dlb2_function_resources *rsrcs; - struct dlb2_hw_domain *domain; - struct dlb2_ldb_queue *queue; - RTE_SET_USED(iter1); - RTE_SET_USED(iter2); - - if (id >= DLB2_MAX_NUM_LDB_QUEUES) - return NULL; - - rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf; - - if (!vdev_req) - return &hw->rsrcs.ldb_queues[id]; - - DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) { - DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2) - if (queue->id.virt_id == id) - return queue; - } - - DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1) - if (queue->id.virt_id == id) - return queue; - - return NULL; -} - -static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw, - u32 id, - bool vdev_req, - unsigned int vdev_id) -{ - struct dlb2_list_entry *iteration; - struct dlb2_function_resources *rsrcs; - struct dlb2_hw_domain *domain; - RTE_SET_USED(iteration); - - if (id >= DLB2_MAX_NUM_DOMAINS) - return NULL; - - if (!vdev_req) - return &hw->domains[id]; - - rsrcs = &hw->vdev[vdev_id]; - - DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration) - if (domain->id.virt_id == id) - return domain; - - return NULL; -} - -static int dlb2_port_slot_state_transition(struct dlb2_hw *hw, - struct dlb2_ldb_port *port, - struct dlb2_ldb_queue *queue, - int slot, - enum dlb2_qid_map_state new_state) -{ - enum dlb2_qid_map_state curr_state = port->qid_map[slot].state; - struct dlb2_hw_domain *domain; - int domain_id; - - domain_id = port->domain_id.phys_id; - - domain = dlb2_get_domain_from_id(hw, domain_id, false, 0); - if (domain == NULL) { - DLB2_HW_ERR(hw, - "[%s()] Internal error: unable to find domain %d\n", - __func__, domain_id); - return -EINVAL; - } - - switch (curr_state) { - case DLB2_QUEUE_UNMAPPED: - switch (new_state) { - case DLB2_QUEUE_MAPPED: - queue->num_mappings++; - port->num_mappings++; - break; - case DLB2_QUEUE_MAP_IN_PROG: - queue->num_pending_additions++; - domain->num_pending_additions++; - break; - default: - goto error; - } - break; - case DLB2_QUEUE_MAPPED: - switch (new_state) { - case DLB2_QUEUE_UNMAPPED: - queue->num_mappings--; - port->num_mappings--; - break; - case DLB2_QUEUE_UNMAP_IN_PROG: - port->num_pending_removals++; - domain->num_pending_removals++; - break; - case DLB2_QUEUE_MAPPED: - /* Priority change, nothing to update */ - break; - default: - goto error; - } - break; - case DLB2_QUEUE_MAP_IN_PROG: - switch (new_state) { - case DLB2_QUEUE_UNMAPPED: - queue->num_pending_additions--; - domain->num_pending_additions--; - break; - case DLB2_QUEUE_MAPPED: - queue->num_mappings++; - port->num_mappings++; - queue->num_pending_additions--; - domain->num_pending_additions--; - break; - default: - goto error; - } - break; - case DLB2_QUEUE_UNMAP_IN_PROG: - switch (new_state) { - case DLB2_QUEUE_UNMAPPED: - port->num_pending_removals--; - domain->num_pending_removals--; - queue->num_mappings--; - port->num_mappings--; - break; - case DLB2_QUEUE_MAPPED: - port->num_pending_removals--; - domain->num_pending_removals--; - break; - case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP: - /* Nothing to update */ - break; - default: - goto error; - } - break; - case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP: - switch (new_state) { - case DLB2_QUEUE_UNMAP_IN_PROG: - /* Nothing to update */ - break; - case DLB2_QUEUE_UNMAPPED: - /* - * An UNMAP_IN_PROG_PENDING_MAP slot briefly - * becomes UNMAPPED before it transitions to - * MAP_IN_PROG. - */ - queue->num_mappings--; - port->num_mappings--; - port->num_pending_removals--; - domain->num_pending_removals--; - break; - default: - goto error; - } - break; - default: - goto error; - } - - port->qid_map[slot].state = new_state; - - DLB2_HW_DBG(hw, - "[%s()] queue %d -> port %d state transition (%d -> %d)\n", - __func__, queue->id.phys_id, port->id.phys_id, - curr_state, new_state); - return 0; - -error: - DLB2_HW_ERR(hw, - "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n", - __func__, queue->id.phys_id, port->id.phys_id, - curr_state, new_state); - return -EFAULT; -} - -static bool dlb2_port_find_slot(struct dlb2_ldb_port *port, - enum dlb2_qid_map_state state, - int *slot) -{ - int i; - - for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) { - if (port->qid_map[i].state == state) - break; - } - - *slot = i; - - return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ); -} - -static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port, - enum dlb2_qid_map_state state, - struct dlb2_ldb_queue *queue, - int *slot) -{ - int i; - - for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) { - if (port->qid_map[i].state == state && - port->qid_map[i].qid == queue->id.phys_id) - break; - } - - *slot = i; - - return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ); -} - -/* - * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as - * their function names imply, and should only be called by the dynamic CQ - * mapping code. - */ -static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw, - struct dlb2_hw_domain *domain, - struct dlb2_ldb_queue *queue) -{ - struct dlb2_list_entry *iter; - struct dlb2_ldb_port *port; - int slot, i; - RTE_SET_USED(iter); - - for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) { - DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) { - enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED; - - if (!dlb2_port_find_slot_queue(port, state, - queue, &slot)) - continue; - - if (port->enabled) - dlb2_ldb_port_cq_disable(hw, port); - } - } -} - -static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw, - struct dlb2_hw_domain *domain, - struct dlb2_ldb_queue *queue) -{ - struct dlb2_list_entry *iter; - struct dlb2_ldb_port *port; - int slot, i; - RTE_SET_USED(iter); - - for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) { - DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) { - enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED; - - if (!dlb2_port_find_slot_queue(port, state, - queue, &slot)) - continue; - - if (port->enabled) - dlb2_ldb_port_cq_enable(hw, port); - } - } -} - -static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw, - struct dlb2_ldb_port *port, - int slot) -{ - union dlb2_lsp_ldb_sched_ctrl r0 = { {0} }; - - r0.field.cq = port->id.phys_id; - r0.field.qidix = slot; - r0.field.value = 0; - r0.field.inflight_ok_v = 1; - - DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val); - - dlb2_flush_csr(hw); -} - -static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw, - struct dlb2_ldb_port *port, - int slot) -{ - union dlb2_lsp_ldb_sched_ctrl r0 = { {0} }; - - r0.field.cq = port->id.phys_id; - r0.field.qidix = slot; - r0.field.value = 1; - r0.field.inflight_ok_v = 1; - - DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val); - - dlb2_flush_csr(hw); -} - -static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw, - struct dlb2_ldb_port *p, - struct dlb2_ldb_queue *q, - u8 priority) -{ - union dlb2_lsp_cq2priov r0; - union dlb2_lsp_cq2qid0 r1; - union dlb2_atm_qid2cqidix_00 r2; - union dlb2_lsp_qid2cqidix_00 r3; - union dlb2_lsp_qid2cqidix2_00 r4; - enum dlb2_qid_map_state state; - int i; - - /* Look for a pending or already mapped slot, else an unused slot */ - if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) && - !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) && - !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) { - DLB2_HW_ERR(hw, - "[%s():%d] Internal error: CQ has no available QID mapping slots\n", - __func__, __LINE__); - return -EFAULT; - } - - if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) { - DLB2_HW_ERR(hw, - "[%s():%d] Internal error: port slot tracking failed\n", - __func__, __LINE__); - return -EFAULT; - } - - /* Read-modify-write the priority and valid bit register */ - r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id)); - - r0.field.v |= 1 << i; - r0.field.prio |= (priority & 0x7) << i * 3; - - DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id), r0.val); - - /* Read-modify-write the QID map register */ - if (i < 4) - r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(p->id.phys_id)); - else - r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(p->id.phys_id)); - - if (i == 0 || i == 4) - r1.field.qid_p0 = q->id.phys_id; - if (i == 1 || i == 5) - r1.field.qid_p1 = q->id.phys_id; - if (i == 2 || i == 6) - r1.field.qid_p2 = q->id.phys_id; - if (i == 3 || i == 7) - r1.field.qid_p3 = q->id.phys_id; - - if (i < 4) - DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID0(p->id.phys_id), r1.val); - else - DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID1(p->id.phys_id), r1.val); - - r2.val = DLB2_CSR_RD(hw, - DLB2_ATM_QID2CQIDIX(q->id.phys_id, - p->id.phys_id / 4)); - - r3.val = DLB2_CSR_RD(hw, - DLB2_LSP_QID2CQIDIX(q->id.phys_id, - p->id.phys_id / 4)); - - r4.val = DLB2_CSR_RD(hw, - DLB2_LSP_QID2CQIDIX2(q->id.phys_id, - p->id.phys_id / 4)); - - switch (p->id.phys_id % 4) { - case 0: - r2.field.cq_p0 |= 1 << i; - r3.field.cq_p0 |= 1 << i; - r4.field.cq_p0 |= 1 << i; - break; - - case 1: - r2.field.cq_p1 |= 1 << i; - r3.field.cq_p1 |= 1 << i; - r4.field.cq_p1 |= 1 << i; - break; - - case 2: - r2.field.cq_p2 |= 1 << i; - r3.field.cq_p2 |= 1 << i; - r4.field.cq_p2 |= 1 << i; - break; - - case 3: - r2.field.cq_p3 |= 1 << i; - r3.field.cq_p3 |= 1 << i; - r4.field.cq_p3 |= 1 << i; - break; - } - - DLB2_CSR_WR(hw, - DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4), - r2.val); - - DLB2_CSR_WR(hw, - DLB2_LSP_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4), - r3.val); - - DLB2_CSR_WR(hw, - DLB2_LSP_QID2CQIDIX2(q->id.phys_id, p->id.phys_id / 4), - r4.val); - - dlb2_flush_csr(hw); - - p->qid_map[i].qid = q->id.phys_id; - p->qid_map[i].priority = priority; - - state = DLB2_QUEUE_MAPPED; - - return dlb2_port_slot_state_transition(hw, p, q, i, state); -} - -static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw, - struct dlb2_ldb_port *port, - struct dlb2_ldb_queue *queue, - int slot) -{ - union dlb2_lsp_qid_aqed_active_cnt r0; - union dlb2_lsp_qid_ldb_enqueue_cnt r1; - union dlb2_lsp_ldb_sched_ctrl r2 = { {0} }; - - /* Set the atomic scheduling haswork bit */ - r0.val = DLB2_CSR_RD(hw, - DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id)); - - r2.field.cq = port->id.phys_id; - r2.field.qidix = slot; - r2.field.value = 1; - r2.field.rlist_haswork_v = r0.field.count > 0; - - /* Set the non-atomic scheduling haswork bit */ - DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val); - - r1.val = DLB2_CSR_RD(hw, - DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id)); - - memset(&r2, 0, sizeof(r2)); - - r2.field.cq = port->id.phys_id; - r2.field.qidix = slot; - r2.field.value = 1; - r2.field.nalb_haswork_v = (r1.field.count > 0); - - DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val); - - dlb2_flush_csr(hw); - - return 0; -} - -static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw, - struct dlb2_ldb_port *port, - u8 slot) -{ - union dlb2_lsp_ldb_sched_ctrl r2 = { {0} }; - - r2.field.cq = port->id.phys_id; - r2.field.qidix = slot; - r2.field.value = 0; - r2.field.rlist_haswork_v = 1; - - DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val); - - memset(&r2, 0, sizeof(r2)); - - r2.field.cq = port->id.phys_id; - r2.field.qidix = slot; - r2.field.value = 0; - r2.field.nalb_haswork_v = 1; - - DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val); - - dlb2_flush_csr(hw); -} - -static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw, - struct dlb2_ldb_queue *queue) -{ - union dlb2_lsp_qid_ldb_infl_lim r0 = { {0} }; - - r0.field.limit = queue->num_qid_inflights; - - DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), r0.val); -} - -static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw, - struct dlb2_ldb_queue *queue) -{ - DLB2_CSR_WR(hw, - DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), - DLB2_LSP_QID_LDB_INFL_LIM_RST); -} - -static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw, - struct dlb2_hw_domain *domain, - struct dlb2_ldb_port *port, - struct dlb2_ldb_queue *queue) -{ - struct dlb2_list_entry *iter; - union dlb2_lsp_qid_ldb_infl_cnt r0; - enum dlb2_qid_map_state state; - int slot, ret, i; - u8 prio; - RTE_SET_USED(iter); - - r0.val = DLB2_CSR_RD(hw, - DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id)); - - if (r0.field.count) { - DLB2_HW_ERR(hw, - "[%s()] Internal error: non-zero QID inflight count\n", - __func__); - return -EINVAL; - } - - /* - * Static map the port and set its corresponding has_work bits. - */ - state = DLB2_QUEUE_MAP_IN_PROG; - if (!dlb2_port_find_slot_queue(port, state, queue, &slot)) - return -EINVAL; - - if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) { - DLB2_HW_ERR(hw, - "[%s():%d] Internal error: port slot tracking failed\n", - __func__, __LINE__); - return -EFAULT; - } - - prio = port->qid_map[slot].priority; - - /* - * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and - * the port's qid_map state. - */ - ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio); - if (ret) - return ret; - - ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot); - if (ret) - return ret; - - /* - * Ensure IF_status(cq,qid) is 0 before enabling the port to - * prevent spurious schedules to cause the queue's inflight - * count to increase. - */ - dlb2_ldb_port_clear_queue_if_status(hw, port, slot); - - /* Reset the queue's inflight status */ - for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) { - DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) { - state = DLB2_QUEUE_MAPPED; - if (!dlb2_port_find_slot_queue(port, state, - queue, &slot)) - continue; - - dlb2_ldb_port_set_queue_if_status(hw, port, slot); - } - } - - dlb2_ldb_queue_set_inflight_limit(hw, queue); - - /* Re-enable CQs mapped to this queue */ - dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue); - - /* If this queue has other mappings pending, clear its inflight limit */ - if (queue->num_pending_additions > 0) - dlb2_ldb_queue_clear_inflight_limit(hw, queue); - - return 0; -} - -/** - * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping - * @hw: dlb2_hw handle for a particular device. - * @port: load-balanced port - * @queue: load-balanced queue - * @priority: queue servicing priority - * - * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur - * at a later point, and <0 if an error occurred. - */ -static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw, - struct dlb2_ldb_port *port, - struct dlb2_ldb_queue *queue, - u8 priority) -{ - union dlb2_lsp_qid_ldb_infl_cnt r0 = { {0} }; - enum dlb2_qid_map_state state; - struct dlb2_hw_domain *domain; - int domain_id, slot, ret; - - domain_id = port->domain_id.phys_id; - - domain = dlb2_get_domain_from_id(hw, domain_id, false, 0); - if (domain == NULL) { - DLB2_HW_ERR(hw, - "[%s()] Internal error: unable to find domain %d\n", - __func__, port->domain_id.phys_id); - return -EINVAL; - } - - /* - * Set the QID inflight limit to 0 to prevent further scheduling of the - * queue. - */ - DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), 0); - - if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) { - DLB2_HW_ERR(hw, - "Internal error: No available unmapped slots\n"); - return -EFAULT; - } - - if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) { - DLB2_HW_ERR(hw, - "[%s():%d] Internal error: port slot tracking failed\n", - __func__, __LINE__); - return -EFAULT; - } - - port->qid_map[slot].qid = queue->id.phys_id; - port->qid_map[slot].priority = priority; - - state = DLB2_QUEUE_MAP_IN_PROG; - ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state); - if (ret) - return ret; - - r0.val = DLB2_CSR_RD(hw, - DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id)); - - if (r0.field.count) { - /* - * The queue is owed completions so it's not safe to map it - * yet. Schedule a kernel thread to complete the mapping later, - * once software has completed all the queue's inflight events. - */ - if (!os_worker_active(hw)) - os_schedule_work(hw); - - return 1; - } - - /* - * Disable the affected CQ, and the CQs already mapped to the QID, - * before reading the QID's inflight count a second time. There is an - * unlikely race in which the QID may schedule one more QE after we - * read an inflight count of 0, and disabling the CQs guarantees that - * the race will not occur after a re-read of the inflight count - * register. - */ - if (port->enabled) - dlb2_ldb_port_cq_disable(hw, port); - - dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue); - - r0.val = DLB2_CSR_RD(hw, - DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id)); - - if (r0.field.count) { - if (port->enabled) - dlb2_ldb_port_cq_enable(hw, port); - - dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue); - - /* - * The queue is owed completions so it's not safe to map it - * yet. Schedule a kernel thread to complete the mapping later, - * once software has completed all the queue's inflight events. - */ - if (!os_worker_active(hw)) - os_schedule_work(hw); - - return 1; - } - - return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue); -} - -static void dlb2_domain_finish_map_port(struct dlb2_hw *hw, - struct dlb2_hw_domain *domain, - struct dlb2_ldb_port *port) -{ - int i; - - for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) { - union dlb2_lsp_qid_ldb_infl_cnt r0; - struct dlb2_ldb_queue *queue; - int qid; - - if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG) - continue; - - qid = port->qid_map[i].qid; - - queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0); - - if (queue == NULL) { - DLB2_HW_ERR(hw, - "[%s()] Internal error: unable to find queue %d\n", - __func__, qid); - continue; - } - - r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid)); - - if (r0.field.count) - continue; - - /* - * Disable the affected CQ, and the CQs already mapped to the - * QID, before reading the QID's inflight count a second time. - * There is an unlikely race in which the QID may schedule one - * more QE after we read an inflight count of 0, and disabling - * the CQs guarantees that the race will not occur after a - * re-read of the inflight count register. - */ - if (port->enabled) - dlb2_ldb_port_cq_disable(hw, port); - - dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue); - - r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid)); - - if (r0.field.count) { - if (port->enabled) - dlb2_ldb_port_cq_enable(hw, port); - - dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue); - - continue; - } - - dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue); - } -} - -static unsigned int -dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw, - struct dlb2_hw_domain *domain) -{ - struct dlb2_list_entry *iter; - struct dlb2_ldb_port *port; - int i; - RTE_SET_USED(iter); - - if (!domain->configured || domain->num_pending_additions == 0) - return 0; - - for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) { - DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) - dlb2_domain_finish_map_port(hw, domain, port); - } - - return domain->num_pending_additions; -} - -static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw, - struct dlb2_ldb_port *port, - struct dlb2_ldb_queue *queue) -{ - enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped; - union dlb2_lsp_cq2priov r0; - union dlb2_atm_qid2cqidix_00 r1; - union dlb2_lsp_qid2cqidix_00 r2; - union dlb2_lsp_qid2cqidix2_00 r3; - u32 queue_id; - u32 port_id; - int i; - - /* Find the queue's slot */ - mapped = DLB2_QUEUE_MAPPED; - in_progress = DLB2_QUEUE_UNMAP_IN_PROG; - pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP; - - if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) && - !dlb2_port_find_slot_queue(port, in_progress, queue, &i) && - !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) { - DLB2_HW_ERR(hw, - "[%s():%d] Internal error: QID %d isn't mapped\n", - __func__, __LINE__, queue->id.phys_id); - return -EFAULT; - } - - if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) { - DLB2_HW_ERR(hw, - "[%s():%d] Internal error: port slot tracking failed\n", - __func__, __LINE__); - return -EFAULT; - } - - port_id = port->id.phys_id; - queue_id = queue->id.phys_id; - - /* Read-modify-write the priority and valid bit register */ - r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(port_id)); - - r0.field.v &= ~(1 << i); - - DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port_id), r0.val); - - r1.val = DLB2_CSR_RD(hw, - DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4)); - - r2.val = DLB2_CSR_RD(hw, - DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4)); - - r3.val = DLB2_CSR_RD(hw, - DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4)); - - switch (port_id % 4) { - case 0: - r1.field.cq_p0 &= ~(1 << i); - r2.field.cq_p0 &= ~(1 << i); - r3.field.cq_p0 &= ~(1 << i); - break; - - case 1: - r1.field.cq_p1 &= ~(1 << i); - r2.field.cq_p1 &= ~(1 << i); - r3.field.cq_p1 &= ~(1 << i); - break; - - case 2: - r1.field.cq_p2 &= ~(1 << i); - r2.field.cq_p2 &= ~(1 << i); - r3.field.cq_p2 &= ~(1 << i); - break; - - case 3: - r1.field.cq_p3 &= ~(1 << i); - r2.field.cq_p3 &= ~(1 << i); - r3.field.cq_p3 &= ~(1 << i); - break; - } - - DLB2_CSR_WR(hw, - DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4), - r1.val); - - DLB2_CSR_WR(hw, - DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4), - r2.val); - - DLB2_CSR_WR(hw, - DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4), - r3.val); - - dlb2_flush_csr(hw); - - unmapped = DLB2_QUEUE_UNMAPPED; - - return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped); -} - -static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw, - struct dlb2_hw_domain *domain, - struct dlb2_ldb_port *port, - struct dlb2_ldb_queue *queue, - u8 prio) -{ - if (domain->started) - return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio); - else - return dlb2_ldb_port_map_qid_static(hw, port, queue, prio); -} - -static void -dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw, - struct dlb2_hw_domain *domain, - struct dlb2_ldb_port *port, - int slot) -{ - enum dlb2_qid_map_state state; - struct dlb2_ldb_queue *queue; - - queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid]; - - state = port->qid_map[slot].state; - - /* Update the QID2CQIDX and CQ2QID vectors */ - dlb2_ldb_port_unmap_qid(hw, port, queue); - - /* - * Ensure the QID will not be serviced by this {CQ, slot} by clearing - * the has_work bits - */ - dlb2_ldb_port_clear_has_work_bits(hw, port, slot); - - /* Reset the {CQ, slot} to its default state */ - dlb2_ldb_port_set_queue_if_status(hw, port, slot); - - /* Re-enable the CQ if it wasn't manually disabled by the user */ - if (port->enabled) - dlb2_ldb_port_cq_enable(hw, port); - - /* - * If there is a mapping that is pending this slot's removal, perform - * the mapping now. - */ - if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) { - struct dlb2_ldb_port_qid_map *map; - struct dlb2_ldb_queue *map_queue; - u8 prio; - - map = &port->qid_map[slot]; - - map->qid = map->pending_qid; - map->priority = map->pending_priority; - - map_queue = &hw->rsrcs.ldb_queues[map->qid]; - prio = map->priority; - - dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio); - } -} - -static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw, - struct dlb2_hw_domain *domain, - struct dlb2_ldb_port *port) -{ - union dlb2_lsp_cq_ldb_infl_cnt r0; - int i; - - if (port->num_pending_removals == 0) - return false; - - /* - * The unmap requires all the CQ's outstanding inflights to be - * completed. - */ - r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id)); - if (r0.field.count > 0) - return false; - - for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) { - struct dlb2_ldb_port_qid_map *map; - - map = &port->qid_map[i]; - - if (map->state != DLB2_QUEUE_UNMAP_IN_PROG && - map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) - continue; - - dlb2_domain_finish_unmap_port_slot(hw, domain, port, i); - } - - return true; -} - -static unsigned int -dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw, - struct dlb2_hw_domain *domain) -{ - struct dlb2_list_entry *iter; - struct dlb2_ldb_port *port; - int i; - RTE_SET_USED(iter); - - if (!domain->configured || domain->num_pending_removals == 0) - return 0; - - for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) { - DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) - dlb2_domain_finish_unmap_port(hw, domain, port); - } - - return domain->num_pending_removals; -} - -unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw) -{ - int i, num = 0; - - /* Finish queue unmap jobs for any domain that needs it */ - for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) { - struct dlb2_hw_domain *domain = &hw->domains[i]; - - num += dlb2_domain_finish_unmap_qid_procedures(hw, domain); - } - - return num; -} - -unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw) -{ - int i, num = 0; - - /* Finish queue map jobs for any domain that needs it */ - for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) { - struct dlb2_hw_domain *domain = &hw->domains[i]; - - num += dlb2_domain_finish_map_qid_procedures(hw, domain); - } - - return num; -} - int dlb2_get_group_sequence_numbers(struct dlb2_hw *hw, unsigned int group_id) { if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS) diff --git a/drivers/event/dlb2/pf/base/dlb2_resource_new.c b/drivers/event/dlb2/pf/base/dlb2_resource_new.c index 6a5af0c1e..8cd1762cf 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource_new.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource_new.c @@ -6039,3 +6039,53 @@ int dlb2_hw_get_ldb_queue_depth(struct dlb2_hw *hw, return 0; } + +/** + * dlb2_finish_unmap_qid_procedures() - finish any pending unmap procedures + * @hw: dlb2_hw handle for a particular device. + * + * This function attempts to finish any outstanding unmap procedures. + * This function should be called by the kernel thread responsible for + * finishing map/unmap procedures. + * + * Return: + * Returns the number of procedures that weren't completed. + */ +unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw) +{ + int i, num = 0; + + /* Finish queue unmap jobs for any domain that needs it */ + for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) { + struct dlb2_hw_domain *domain = &hw->domains[i]; + + num += dlb2_domain_finish_unmap_qid_procedures(hw, domain); + } + + return num; +} + +/** + * dlb2_finish_map_qid_procedures() - finish any pending map procedures + * @hw: dlb2_hw handle for a particular device. + * + * This function attempts to finish any outstanding map procedures. + * This function should be called by the kernel thread responsible for + * finishing map/unmap procedures. + * + * Return: + * Returns the number of procedures that weren't completed. + */ +unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw) +{ + int i, num = 0; + + /* Finish queue map jobs for any domain that needs it */ + for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) { + struct dlb2_hw_domain *domain = &hw->domains[i]; + + num += dlb2_domain_finish_map_qid_procedures(hw, domain); + } + + return num; +} -- 2.23.0