From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7BD4AA04C1; Fri, 11 Sep 2020 21:23:56 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 862CF1C1E3; Fri, 11 Sep 2020 21:22:23 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id E193B1C1A5 for ; Fri, 11 Sep 2020 21:22:10 +0200 (CEST) IronPort-SDR: CL+G70iRugsPICAKiVrkJM+tsHdWtoB/tYkPDqcmyx5CykQ2f6r3FqZEKt6xSZk+0n63Hpnuoh Q62xAo8xwG4A== X-IronPort-AV: E=McAfee;i="6000,8403,9741"; a="138352272" X-IronPort-AV: E=Sophos;i="5.76,416,1592895600"; d="scan'208";a="138352272" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 11 Sep 2020 12:22:10 -0700 IronPort-SDR: KU+Xjms4MeFCE9d0oT0/+r/84oL8r4LkNJ92jGpPa1/f+sl9z6390FnBMPmDX9+DPA/qHtg2Mg l6fWUE+WbXrw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,416,1592895600"; d="scan'208";a="506375676" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by fmsmga005.fm.intel.com with ESMTP; 11 Sep 2020 12:22:09 -0700 From: Timothy McDaniel To: Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com, harry.van.haaren@intel.com, jerinj@marvell.com Date: Fri, 11 Sep 2020 14:18:29 -0500 Message-Id: <1599851920-16802-12-git-send-email-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 1.7.10 In-Reply-To: <1599851920-16802-1-git-send-email-timothy.mcdaniel@intel.com> References: <1599851920-16802-1-git-send-email-timothy.mcdaniel@intel.com> Subject: [dpdk-dev] [PATCH v4 11/22] event/dlb: add port setup X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Configure the load balanded (ldb) or directed (dir) port. The consumer queue (CQ) and producer port (PP) are also set up here. Signed-off-by: Timothy McDaniel --- drivers/event/dlb/dlb.c | 539 +++++++++++ drivers/event/dlb/dlb_iface.c | 11 + drivers/event/dlb/dlb_iface.h | 14 + drivers/event/dlb/pf/base/dlb_resource.c | 1430 ++++++++++++++++++++++++++++++ drivers/event/dlb/pf/dlb_pf.c | 204 +++++ 5 files changed, 2198 insertions(+) diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c index 0b474a5..e90a088 100644 --- a/drivers/event/dlb/dlb.c +++ b/drivers/event/dlb/dlb.c @@ -157,6 +157,75 @@ dlb_free_qe_mem(struct dlb_port *qm_port) } } +static int +dlb_init_consume_qe(struct dlb_port *qm_port, char *mz_name) +{ + struct dlb_cq_pop_qe *qe; + + qe = rte_malloc(mz_name, + DLB_NUM_QES_PER_CACHE_LINE * + sizeof(struct dlb_cq_pop_qe), + RTE_CACHE_LINE_SIZE); + + if (qe == NULL) { + DLB_LOG_ERR("dlb: no memory for consume_qe\n"); + return -ENOMEM; + } + + qm_port->consume_qe = qe; + + memset(qe, 0, DLB_NUM_QES_PER_CACHE_LINE * + sizeof(struct dlb_cq_pop_qe)); + + qe->qe_valid = 0; + qe->qe_frag = 0; + qe->qe_comp = 0; + qe->cq_token = 1; + /* Tokens value is 0-based; i.e. '0' returns 1 token, '1' returns 2, + * and so on. + */ + qe->tokens = 0; /* set at run time */ + qe->meas_lat = 0; + qe->no_dec = 0; + /* Completion IDs are disabled */ + qe->cmp_id = 0; + + return 0; +} + +int +dlb_init_qe_mem(struct dlb_port *qm_port, char *mz_name) +{ + int ret, sz; + + sz = DLB_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb_enqueue_qe); + + qm_port->qe4 = rte_malloc(mz_name, sz, RTE_CACHE_LINE_SIZE); + + if (qm_port->qe4 == NULL) { + DLB_LOG_ERR("dlb: no qe4 memory\n"); + ret = -ENOMEM; + goto error_exit; + } + + memset(qm_port->qe4, 0, sz); + + ret = dlb_init_consume_qe(qm_port, mz_name); + if (ret < 0) { + DLB_LOG_ERR("dlb: dlb_init_consume_qe ret=%d\n", + ret); + goto error_exit; + } + + return 0; + +error_exit: + + dlb_free_qe_mem(qm_port); + + return ret; +} + /* Wrapper for string to int conversion. Substituted for atoi(...), which is * unsafe. */ @@ -662,6 +731,348 @@ dlb_eventdev_queue_default_conf_get(struct rte_eventdev *dev, queue_conf->priority = 0; } +static int +dlb_hw_create_ldb_port(struct dlb_eventdev *dlb, + struct dlb_eventdev_port *ev_port, + uint32_t dequeue_depth, + uint32_t cq_depth, + uint32_t enqueue_depth, + uint16_t rsvd_tokens, + bool use_rsvd_token_scheme) +{ + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_create_ldb_port_args cfg = {0}; + struct dlb_cmd_response response = {0}; + int ret; + struct dlb_port *qm_port = NULL; + char mz_name[RTE_MEMZONE_NAMESIZE]; + uint32_t qm_port_id; + + if (handle == NULL) + return -EINVAL; + + if (cq_depth < DLB_MIN_LDB_CQ_DEPTH || + cq_depth > DLB_MAX_INPUT_QUEUE_DEPTH) { + DLB_LOG_ERR("dlb: invalid cq_depth, must be %d-%d\n", + DLB_MIN_LDB_CQ_DEPTH, DLB_MAX_INPUT_QUEUE_DEPTH); + return -EINVAL; + } + + if (enqueue_depth < DLB_MIN_ENQUEUE_DEPTH) { + DLB_LOG_ERR("dlb: invalid enqueue_depth, must be at least %d\n", + DLB_MIN_ENQUEUE_DEPTH); + return -EINVAL; + } + + rte_spinlock_lock(&handle->resource_lock); + + cfg.response = (uintptr_t)&response; + + /* We round up to the next power of 2 if necessary */ + cfg.cq_depth = rte_align32pow2(cq_depth); + cfg.cq_depth_threshold = rsvd_tokens; + + cfg.cq_history_list_size = DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT; + + /* User controls the LDB high watermark via enqueue depth. The DIR high + * watermark is equal, unless the directed credit pool is too small. + */ + cfg.ldb_credit_high_watermark = enqueue_depth; + + /* If there are no directed ports, the kernel driver will ignore this + * port's directed credit settings. Don't use enqueue_depth if it would + * require more directed credits than are available. + */ + cfg.dir_credit_high_watermark = + RTE_MIN(enqueue_depth, + handle->cfg.num_dir_credits / dlb->num_ports); + + cfg.ldb_credit_quantum = cfg.ldb_credit_high_watermark / 2; + cfg.ldb_credit_low_watermark = RTE_MIN(16, cfg.ldb_credit_quantum); + + cfg.dir_credit_quantum = cfg.dir_credit_high_watermark / 2; + cfg.dir_credit_low_watermark = RTE_MIN(16, cfg.dir_credit_quantum); + + /* Per QM values */ + + cfg.ldb_credit_pool_id = handle->cfg.ldb_credit_pool_id; + cfg.dir_credit_pool_id = handle->cfg.dir_credit_pool_id; + + ret = dlb_iface_ldb_port_create(handle, &cfg, dlb->poll_mode); + if (ret < 0) { + DLB_LOG_ERR("dlb: dlb_ldb_port_create error, ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + goto error_exit; + } + + qm_port_id = response.id; + + DLB_LOG_DBG("dlb: ev_port %d uses qm LB port %d <<<<<\n", + ev_port->id, qm_port_id); + + qm_port = &ev_port->qm_port; + qm_port->ev_port = ev_port; /* back ptr */ + qm_port->dlb = dlb; /* back ptr */ + + /* + * Allocate and init local qe struct(s). + * Note: MOVDIR64 requires the enqueue QE (qe4) to be aligned. + */ + + snprintf(mz_name, sizeof(mz_name), "%s_ldb_port%d", + handle->device_name, + ev_port->id); + + ret = dlb_init_qe_mem(qm_port, mz_name); + if (ret < 0) { + DLB_LOG_ERR("dlb: init_qe_mem failed, ret=%d\n", ret); + goto error_exit; + } + + qm_port->pp_mmio_base = DLB_LDB_PP_BASE + PAGE_SIZE * qm_port_id; + qm_port->id = qm_port_id; + + /* The credit window is one high water mark of QEs */ + qm_port->ldb_pushcount_at_credit_expiry = 0; + qm_port->cached_ldb_credits = cfg.ldb_credit_high_watermark; + /* The credit window is one high water mark of QEs */ + qm_port->dir_pushcount_at_credit_expiry = 0; + qm_port->cached_dir_credits = cfg.dir_credit_high_watermark; + qm_port->cq_depth = cfg.cq_depth; + /* CQs with depth < 8 use an 8-entry queue, but withhold credits so + * the effective depth is smaller. + */ + qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth; + qm_port->cq_idx = 0; + qm_port->cq_idx_unmasked = 0; + if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE) + qm_port->cq_depth_mask = (qm_port->cq_depth * 4) - 1; + else + qm_port->cq_depth_mask = qm_port->cq_depth - 1; + + qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask); + /* starting value of gen bit - it toggles at wrap time */ + qm_port->gen_bit = 1; + + qm_port->use_rsvd_token_scheme = use_rsvd_token_scheme; + qm_port->cq_rsvd_token_deficit = rsvd_tokens; + qm_port->int_armed = false; + + /* Save off for later use in info and lookup APIs. */ + qm_port->qid_mappings = &dlb->qm_ldb_to_ev_queue_id[0]; + + /* When using the reserved token scheme, token_pop_thresh is + * initially 2 * dequeue_depth. Once the tokens are reserved, + * the enqueue code re-assigns it to dequeue_depth. + */ + qm_port->dequeue_depth = dequeue_depth; + qm_port->token_pop_thresh = cq_depth; + + /* When the deferred scheduling vdev arg is selected, use deferred pop + * for all single-entry CQs. + */ + if (cfg.cq_depth == 1 || (cfg.cq_depth == 2 && use_rsvd_token_scheme)) { + if (dlb->defer_sched) + qm_port->token_pop_mode = DEFERRED_POP; + } + + qm_port->owed_tokens = 0; + qm_port->issued_releases = 0; + + /* Save config message too. */ + rte_memcpy(&qm_port->cfg.ldb, &cfg, sizeof(cfg)); + + /* update state */ + qm_port->state = PORT_STARTED; /* enabled at create time */ + qm_port->config_state = DLB_CONFIGURED; + + qm_port->dir_credits = cfg.dir_credit_high_watermark; + qm_port->ldb_credits = cfg.ldb_credit_high_watermark; + + DLB_LOG_DBG("dlb: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n", + qm_port_id, + cq_depth, + qm_port->ldb_credits, + qm_port->dir_credits); + + rte_spinlock_unlock(&handle->resource_lock); + + return 0; + +error_exit: + if (qm_port) { + dlb_free_qe_mem(qm_port); + qm_port->pp_mmio_base = 0; + } + + rte_spinlock_unlock(&handle->resource_lock); + + DLB_LOG_ERR("dlb: create ldb port failed!\n"); + + return ret; +} + +static int +dlb_hw_create_dir_port(struct dlb_eventdev *dlb, + struct dlb_eventdev_port *ev_port, + uint32_t dequeue_depth, + uint32_t cq_depth, + uint32_t enqueue_depth, + uint16_t rsvd_tokens, + bool use_rsvd_token_scheme) +{ + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_create_dir_port_args cfg = {0}; + struct dlb_cmd_response response = {0}; + int ret; + struct dlb_port *qm_port = NULL; + char mz_name[RTE_MEMZONE_NAMESIZE]; + uint32_t qm_port_id; + + if (dlb == NULL || handle == NULL) + return -EINVAL; + + if (cq_depth < DLB_MIN_DIR_CQ_DEPTH || + cq_depth > DLB_MAX_INPUT_QUEUE_DEPTH) { + DLB_LOG_ERR("dlb: invalid cq_depth, must be %d-%d\n", + DLB_MIN_DIR_CQ_DEPTH, DLB_MAX_INPUT_QUEUE_DEPTH); + return -EINVAL; + } + + rte_spinlock_lock(&handle->resource_lock); + + /* Directed queues are configured at link time. */ + cfg.queue_id = -1; + + cfg.response = (uintptr_t)&response; + + /* We round up to the next power of 2 if necessary */ + cfg.cq_depth = rte_align32pow2(cq_depth); + cfg.cq_depth_threshold = rsvd_tokens; + + /* User controls the LDB high watermark via enqueue depth. The DIR high + * watermark is equal, unless the directed credit pool is too small. + */ + cfg.ldb_credit_high_watermark = enqueue_depth; + + /* Don't use enqueue_depth if it would require more directed credits + * than are available. + */ + cfg.dir_credit_high_watermark = + RTE_MIN(enqueue_depth, + handle->cfg.num_dir_credits / dlb->num_ports); + + cfg.ldb_credit_quantum = cfg.ldb_credit_high_watermark / 2; + cfg.ldb_credit_low_watermark = RTE_MIN(16, cfg.ldb_credit_quantum); + + cfg.dir_credit_quantum = cfg.dir_credit_high_watermark / 2; + cfg.dir_credit_low_watermark = RTE_MIN(16, cfg.dir_credit_quantum); + + /* Per QM values */ + + cfg.ldb_credit_pool_id = handle->cfg.ldb_credit_pool_id; + cfg.dir_credit_pool_id = handle->cfg.dir_credit_pool_id; + + ret = dlb_iface_dir_port_create(handle, &cfg, dlb->poll_mode); + if (ret < 0) { + DLB_LOG_ERR("dlb: dlb_dir_port_create error, ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + goto error_exit; + } + + qm_port_id = response.id; + + DLB_LOG_DBG("dlb: ev_port %d uses qm DIR port %d <<<<<\n", + ev_port->id, qm_port_id); + + qm_port = &ev_port->qm_port; + qm_port->ev_port = ev_port; /* back ptr */ + qm_port->dlb = dlb; /* back ptr */ + + /* + * Init local qe struct(s). + * Note: MOVDIR64 requires the enqueue QE to be aligned + */ + + snprintf(mz_name, sizeof(mz_name), "%s_dir_port%d", + handle->device_name, + ev_port->id); + + ret = dlb_init_qe_mem(qm_port, mz_name); + + if (ret < 0) { + DLB_LOG_ERR("dlb: init_qe_mem failed, ret=%d\n", ret); + goto error_exit; + } + + qm_port->pp_mmio_base = DLB_DIR_PP_BASE + PAGE_SIZE * qm_port_id; + qm_port->id = qm_port_id; + + /* The credit window is one high water mark of QEs */ + qm_port->ldb_pushcount_at_credit_expiry = 0; + qm_port->cached_ldb_credits = cfg.ldb_credit_high_watermark; + /* The credit window is one high water mark of QEs */ + qm_port->dir_pushcount_at_credit_expiry = 0; + qm_port->cached_dir_credits = cfg.dir_credit_high_watermark; + qm_port->cq_depth = cfg.cq_depth; + qm_port->cq_idx = 0; + qm_port->cq_idx_unmasked = 0; + if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE) + qm_port->cq_depth_mask = (cfg.cq_depth * 4) - 1; + else + qm_port->cq_depth_mask = cfg.cq_depth - 1; + + qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask); + /* starting value of gen bit - it toggles at wrap time */ + qm_port->gen_bit = 1; + + qm_port->use_rsvd_token_scheme = use_rsvd_token_scheme; + qm_port->cq_rsvd_token_deficit = rsvd_tokens; + qm_port->int_armed = false; + + /* Save off for later use in info and lookup APIs. */ + qm_port->qid_mappings = &dlb->qm_dir_to_ev_queue_id[0]; + + qm_port->dequeue_depth = dequeue_depth; + + /* Directed ports are auto-pop, by default. */ + qm_port->token_pop_mode = AUTO_POP; + qm_port->owed_tokens = 0; + qm_port->issued_releases = 0; + + /* Save config message too. */ + rte_memcpy(&qm_port->cfg.dir, &cfg, sizeof(cfg)); + + /* update state */ + qm_port->state = PORT_STARTED; /* enabled at create time */ + qm_port->config_state = DLB_CONFIGURED; + + qm_port->dir_credits = cfg.dir_credit_high_watermark; + qm_port->ldb_credits = cfg.ldb_credit_high_watermark; + + DLB_LOG_DBG("dlb: created dir port %d, depth = %d cr=%d,%d\n", + qm_port_id, + cq_depth, + cfg.dir_credit_high_watermark, + cfg.ldb_credit_high_watermark); + + rte_spinlock_unlock(&handle->resource_lock); + + return 0; + +error_exit: + if (qm_port) { + qm_port->pp_mmio_base = 0; + dlb_free_qe_mem(qm_port); + } + + rte_spinlock_unlock(&handle->resource_lock); + + DLB_LOG_ERR("dlb: create dir port failed!\n"); + + return ret; +} + static int32_t dlb_hw_create_ldb_queue(struct dlb_eventdev *dlb, struct dlb_queue *queue, @@ -955,6 +1366,133 @@ dlb_eventdev_queue_setup(struct rte_eventdev *dev, return ret; } +static void +dlb_port_link_teardown(struct dlb_eventdev *dlb, + struct dlb_eventdev_port *ev_port) +{ + struct dlb_eventdev_queue *ev_queue; + int i; + + for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) { + if (!ev_port->link[i].valid) + continue; + + ev_queue = &dlb->ev_queues[ev_port->link[i].queue_id]; + + ev_port->link[i].valid = false; + ev_port->num_links--; + ev_queue->num_links--; + } +} + +static int +dlb_eventdev_port_setup(struct rte_eventdev *dev, + uint8_t ev_port_id, + const struct rte_event_port_conf *port_conf) +{ + struct dlb_eventdev *dlb; + struct dlb_eventdev_port *ev_port; + bool use_rsvd_token_scheme; + uint32_t adj_cq_depth; + uint16_t rsvd_tokens; + int ret; + + if (dev == NULL || port_conf == NULL) { + DLB_LOG_ERR("Null parameter\n"); + return -EINVAL; + } + + dlb = dlb_pmd_priv(dev); + + if (ev_port_id >= DLB_MAX_NUM_PORTS) + return -EINVAL; + + if (port_conf->dequeue_depth > + evdev_dlb_default_info.max_event_port_dequeue_depth || + port_conf->enqueue_depth > + evdev_dlb_default_info.max_event_port_enqueue_depth) + return -EINVAL; + + ev_port = &dlb->ev_ports[ev_port_id]; + /* configured? */ + if (ev_port->setup_done) { + DLB_LOG_ERR("evport %d is already configured\n", ev_port_id); + return -EINVAL; + } + + /* The reserved token interrupt arming scheme requires that one or more + * CQ tokens be reserved by the PMD. This limits the amount of CQ space + * usable by the DLB, so in order to give an *effective* CQ depth equal + * to the user-requested value, we double CQ depth and reserve half of + * its tokens. If the user requests the max CQ depth (256) then we + * cannot double it, so we reserve one token and give an effective + * depth of 255 entries. + */ + use_rsvd_token_scheme = true; + rsvd_tokens = 1; + adj_cq_depth = port_conf->dequeue_depth; + + if (use_rsvd_token_scheme && adj_cq_depth < 256) { + rsvd_tokens = adj_cq_depth; + adj_cq_depth *= 2; + } + + ev_port->qm_port.is_directed = port_conf->event_port_cfg & + RTE_EVENT_PORT_CFG_SINGLE_LINK; + + if (!ev_port->qm_port.is_directed) { + ret = dlb_hw_create_ldb_port(dlb, + ev_port, + port_conf->dequeue_depth, + adj_cq_depth, + port_conf->enqueue_depth, + rsvd_tokens, + use_rsvd_token_scheme); + if (ret < 0) { + DLB_LOG_ERR("Failed to create the lB port ve portId=%d\n", + ev_port_id); + return ret; + } + } else { + ret = dlb_hw_create_dir_port(dlb, + ev_port, + port_conf->dequeue_depth, + adj_cq_depth, + port_conf->enqueue_depth, + rsvd_tokens, + use_rsvd_token_scheme); + if (ret < 0) { + DLB_LOG_ERR("Failed to create the DIR port\n"); + return ret; + } + } + + /* Save off port config for reconfig */ + dlb->ev_ports[ev_port_id].conf = *port_conf; + + dlb->ev_ports[ev_port_id].id = ev_port_id; + dlb->ev_ports[ev_port_id].enq_configured = true; + dlb->ev_ports[ev_port_id].setup_done = true; + dlb->ev_ports[ev_port_id].inflight_max = + port_conf->new_event_threshold; + dlb->ev_ports[ev_port_id].implicit_release = + !(port_conf->event_port_cfg & + RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL); + dlb->ev_ports[ev_port_id].outstanding_releases = 0; + dlb->ev_ports[ev_port_id].inflight_credits = 0; + dlb->ev_ports[ev_port_id].credit_update_quanta = + RTE_LIBRTE_PMD_DLB_SW_CREDIT_QUANTA; + dlb->ev_ports[ev_port_id].dlb = dlb; /* reverse link */ + + /* Tear down pre-existing port->queue links */ + if (dlb->run_state == DLB_RUN_STATE_STOPPED) + dlb_port_link_teardown(dlb, &dlb->ev_ports[ev_port_id]); + + dev->data->ports[ev_port_id] = &dlb->ev_ports[ev_port_id]; + + return 0; +} + static int set_dev_id(const char *key __rte_unused, const char *value, @@ -1034,6 +1572,7 @@ dlb_entry_points_init(struct rte_eventdev *dev) .queue_def_conf = dlb_eventdev_queue_default_conf_get, .port_def_conf = dlb_eventdev_port_default_conf_get, .queue_setup = dlb_eventdev_queue_setup, + .port_setup = dlb_eventdev_port_setup, .dump = dlb_eventdev_dump, .xstats_get = dlb_eventdev_xstats_get, .xstats_get_names = dlb_eventdev_xstats_get_names, diff --git a/drivers/event/dlb/dlb_iface.c b/drivers/event/dlb/dlb_iface.c index b5757c9..0302be4 100644 --- a/drivers/event/dlb/dlb_iface.c +++ b/drivers/event/dlb/dlb_iface.c @@ -47,9 +47,20 @@ int (*dlb_iface_ldb_credit_pool_create)(struct dlb_hw_dev *handle, int (*dlb_iface_dir_credit_pool_create)(struct dlb_hw_dev *handle, struct dlb_create_dir_pool_args *cfg); +int (*dlb_iface_dir_queue_create)(struct dlb_hw_dev *handle, + struct dlb_create_dir_queue_args *cfg); + int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle, struct dlb_create_ldb_queue_args *cfg); +int (*dlb_iface_ldb_port_create)(struct dlb_hw_dev *handle, + struct dlb_create_ldb_port_args *cfg, + enum dlb_cq_poll_modes poll_mode); + +int (*dlb_iface_dir_port_create)(struct dlb_hw_dev *handle, + struct dlb_create_dir_port_args *cfg, + enum dlb_cq_poll_modes poll_mode); + int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle, enum dlb_cq_poll_modes *mode); diff --git a/drivers/event/dlb/dlb_iface.h b/drivers/event/dlb/dlb_iface.h index af1416d..d578185 100644 --- a/drivers/event/dlb/dlb_iface.h +++ b/drivers/event/dlb/dlb_iface.h @@ -35,6 +35,20 @@ extern int (*dlb_iface_dir_credit_pool_create)(struct dlb_hw_dev *handle, extern int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle, struct dlb_create_ldb_queue_args *cfg); +extern int (*dlb_iface_dir_queue_create)(struct dlb_hw_dev *handle, + struct dlb_create_dir_queue_args *cfg); + +extern int (*dlb_iface_ldb_port_create)(struct dlb_hw_dev *handle, + struct dlb_create_ldb_port_args *cfg, + enum dlb_cq_poll_modes poll_mode); + +extern int (*dlb_iface_dir_port_create)(struct dlb_hw_dev *handle, + struct dlb_create_dir_port_args *cfg, + enum dlb_cq_poll_modes poll_mode); + +extern int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle, + struct dlb_create_ldb_queue_args *cfg); + extern int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle, enum dlb_cq_poll_modes *mode); diff --git a/drivers/event/dlb/pf/base/dlb_resource.c b/drivers/event/dlb/pf/base/dlb_resource.c index 2b80e03..a4d9e4a 100644 --- a/drivers/event/dlb/pf/base/dlb_resource.c +++ b/drivers/event/dlb/pf/base/dlb_resource.c @@ -4598,3 +4598,1433 @@ int dlb_hw_create_ldb_queue(struct dlb_hw *hw, return 0; } + + +static void +dlb_log_create_dir_queue_args(struct dlb_hw *hw, + u32 domain_id, + struct dlb_create_dir_queue_args *args) +{ + DLB_HW_INFO(hw, "DLB create directed queue arguments:\n"); + DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id); + DLB_HW_INFO(hw, "\tPort ID: %d\n", args->port_id); +} + +static struct dlb_dir_pq_pair * +dlb_get_domain_used_dir_pq(u32 id, struct dlb_domain *domain) +{ + struct dlb_list_entry *iter; + struct dlb_dir_pq_pair *port; + RTE_SET_USED(iter); + + if (id >= DLB_MAX_NUM_DIR_PORTS) + return NULL; + + DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) + if (port->id == id) + return port; + + return NULL; +} + +static int +dlb_verify_create_dir_queue_args(struct dlb_hw *hw, + u32 domain_id, + struct dlb_create_dir_queue_args *args, + struct dlb_cmd_response *resp) +{ + struct dlb_domain *domain; + + domain = dlb_get_domain_from_id(hw, domain_id); + + if (!domain) { + resp->status = DLB_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + if (domain->started) { + resp->status = DLB_ST_DOMAIN_STARTED; + return -1; + } + + /* If the user claims the port is already configured, validate the port + * ID, its domain, and whether the port is configured. + */ + if (args->port_id != -1) { + struct dlb_dir_pq_pair *port; + + port = dlb_get_domain_used_dir_pq(args->port_id, domain); + + if (!port || port->domain_id != domain->id || + !port->port_configured) { + resp->status = DLB_ST_INVALID_PORT_ID; + return -1; + } + } + + /* If the queue's port is not configured, validate that a free + * port-queue pair is available. + */ + if (args->port_id == -1 && + dlb_list_empty(&domain->avail_dir_pq_pairs)) { + resp->status = DLB_ST_DIR_QUEUES_UNAVAILABLE; + return -1; + } + + return 0; +} + +static void dlb_configure_dir_queue(struct dlb_hw *hw, + struct dlb_domain *domain, + struct dlb_dir_pq_pair *queue) +{ + union dlb_sys_dir_vasqid_v r0 = { {0} }; + union dlb_sys_dir_qid_v r1 = { {0} }; + unsigned int offs; + + /* QID write permissions are turned on when the domain is started */ + r0.field.vasqid_v = 0; + + offs = (domain->id * DLB_MAX_NUM_DIR_PORTS) + queue->id; + + DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(offs), r0.val); + + r1.field.qid_v = 1; + + DLB_CSR_WR(hw, DLB_SYS_DIR_QID_V(queue->id), r1.val); + + queue->queue_configured = true; +} + +/** + * dlb_hw_create_dir_queue() - Allocate and initialize a DLB DIR queue. + * @hw: Contains the current state of the DLB hardware. + * @args: User-provided arguments. + * @resp: Response to user. + * + * Return: returns < 0 on error, 0 otherwise. If the driver is unable to + * satisfy a request, resp->status will be set accordingly. + */ +int dlb_hw_create_dir_queue(struct dlb_hw *hw, + u32 domain_id, + struct dlb_create_dir_queue_args *args, + struct dlb_cmd_response *resp) +{ + struct dlb_dir_pq_pair *queue; + struct dlb_domain *domain; + + dlb_log_create_dir_queue_args(hw, domain_id, args); + + /* Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + if (dlb_verify_create_dir_queue_args(hw, domain_id, args, resp)) + return -EINVAL; + + domain = dlb_get_domain_from_id(hw, domain_id); + if (!domain) { + DLB_HW_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + if (args->port_id != -1) + queue = dlb_get_domain_used_dir_pq(args->port_id, domain); + else + queue = DLB_DOM_LIST_HEAD(domain->avail_dir_pq_pairs, + typeof(*queue)); + + /* Verification should catch this. */ + if (!queue) { + DLB_HW_ERR(hw, + "[%s():%d] Internal error: no available dir queues\n", + __func__, __LINE__); + return -EFAULT; + } + + dlb_configure_dir_queue(hw, domain, queue); + + /* Configuration succeeded, so move the resource from the 'avail' to + * the 'used' list (if it's not already there). + */ + if (args->port_id == -1) { + dlb_list_del(&domain->avail_dir_pq_pairs, &queue->domain_list); + + dlb_list_add(&domain->used_dir_pq_pairs, &queue->domain_list); + } + + resp->status = 0; + + resp->id = queue->id; + + return 0; +} + +static void dlb_log_create_ldb_port_args(struct dlb_hw *hw, + u32 domain_id, + u64 pop_count_dma_base, + u64 cq_dma_base, + struct dlb_create_ldb_port_args *args) +{ + DLB_HW_INFO(hw, "DLB create load-balanced port arguments:\n"); + DLB_HW_INFO(hw, "\tDomain ID: %d\n", + domain_id); + DLB_HW_INFO(hw, "\tLDB credit pool ID: %d\n", + args->ldb_credit_pool_id); + DLB_HW_INFO(hw, "\tLDB credit high watermark: %d\n", + args->ldb_credit_high_watermark); + DLB_HW_INFO(hw, "\tLDB credit low watermark: %d\n", + args->ldb_credit_low_watermark); + DLB_HW_INFO(hw, "\tLDB credit quantum: %d\n", + args->ldb_credit_quantum); + DLB_HW_INFO(hw, "\tDIR credit pool ID: %d\n", + args->dir_credit_pool_id); + DLB_HW_INFO(hw, "\tDIR credit high watermark: %d\n", + args->dir_credit_high_watermark); + DLB_HW_INFO(hw, "\tDIR credit low watermark: %d\n", + args->dir_credit_low_watermark); + DLB_HW_INFO(hw, "\tDIR credit quantum: %d\n", + args->dir_credit_quantum); + DLB_HW_INFO(hw, "\tpop_count_address: 0x%"PRIx64"\n", + pop_count_dma_base); + DLB_HW_INFO(hw, "\tCQ depth: %d\n", + args->cq_depth); + DLB_HW_INFO(hw, "\tCQ hist list size: %d\n", + args->cq_history_list_size); + DLB_HW_INFO(hw, "\tCQ base address: 0x%"PRIx64"\n", + cq_dma_base); +} + +static struct dlb_credit_pool * +dlb_get_domain_ldb_pool(u32 id, struct dlb_domain *domain) +{ + struct dlb_list_entry *iter; + struct dlb_credit_pool *pool; + RTE_SET_USED(iter); + + if (id >= DLB_MAX_NUM_LDB_CREDIT_POOLS) + return NULL; + + DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) + if (pool->id == id) + return pool; + + return NULL; +} + +static struct dlb_credit_pool * +dlb_get_domain_dir_pool(u32 id, struct dlb_domain *domain) +{ + struct dlb_list_entry *iter; + struct dlb_credit_pool *pool; + RTE_SET_USED(iter); + + if (id >= DLB_MAX_NUM_DIR_CREDIT_POOLS) + return NULL; + + DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) + if (pool->id == id) + return pool; + + return NULL; +} + +static int +dlb_verify_create_ldb_port_args(struct dlb_hw *hw, + u32 domain_id, + u64 pop_count_dma_base, + u64 cq_dma_base, + struct dlb_create_ldb_port_args *args, + struct dlb_cmd_response *resp) +{ + struct dlb_domain *domain; + struct dlb_credit_pool *pool; + + domain = dlb_get_domain_from_id(hw, domain_id); + + if (!domain) { + resp->status = DLB_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + if (domain->started) { + resp->status = DLB_ST_DOMAIN_STARTED; + return -1; + } + + if (dlb_list_empty(&domain->avail_ldb_ports)) { + resp->status = DLB_ST_LDB_PORTS_UNAVAILABLE; + return -1; + } + + /* If the scheduling domain has no LDB queues, we configure the + * hardware to not supply the port with any LDB credits. In that + * case, ignore the LDB credit arguments. + */ + if (!dlb_list_empty(&domain->used_ldb_queues) || + !dlb_list_empty(&domain->avail_ldb_queues)) { + pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id, + domain); + + if (!pool || !pool->configured || + pool->domain_id != domain->id) { + resp->status = DLB_ST_INVALID_LDB_CREDIT_POOL_ID; + return -1; + } + + if (args->ldb_credit_high_watermark > pool->avail_credits) { + resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE; + return -1; + } + + if (args->ldb_credit_low_watermark >= + args->ldb_credit_high_watermark) { + resp->status = DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK; + return -1; + } + + if (args->ldb_credit_quantum >= + args->ldb_credit_high_watermark) { + resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM; + return -1; + } + + if (args->ldb_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) { + resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM; + return -1; + } + } + + /* Likewise, if the scheduling domain has no DIR queues, we configure + * the hardware to not supply the port with any DIR credits. In that + * case, ignore the DIR credit arguments. + */ + if (!dlb_list_empty(&domain->used_dir_pq_pairs) || + !dlb_list_empty(&domain->avail_dir_pq_pairs)) { + pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id, + domain); + + if (!pool || !pool->configured || + pool->domain_id != domain->id) { + resp->status = DLB_ST_INVALID_DIR_CREDIT_POOL_ID; + return -1; + } + + if (args->dir_credit_high_watermark > pool->avail_credits) { + resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE; + return -1; + } + + if (args->dir_credit_low_watermark >= + args->dir_credit_high_watermark) { + resp->status = DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK; + return -1; + } + + if (args->dir_credit_quantum >= + args->dir_credit_high_watermark) { + resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM; + return -1; + } + + if (args->dir_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) { + resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM; + return -1; + } + } + + /* Check cache-line alignment */ + if ((pop_count_dma_base & 0x3F) != 0) { + resp->status = DLB_ST_INVALID_POP_COUNT_VIRT_ADDR; + return -1; + } + + if ((cq_dma_base & 0x3F) != 0) { + resp->status = DLB_ST_INVALID_CQ_VIRT_ADDR; + return -1; + } + + if (args->cq_depth != 1 && + args->cq_depth != 2 && + args->cq_depth != 4 && + args->cq_depth != 8 && + args->cq_depth != 16 && + args->cq_depth != 32 && + args->cq_depth != 64 && + args->cq_depth != 128 && + args->cq_depth != 256 && + args->cq_depth != 512 && + args->cq_depth != 1024) { + resp->status = DLB_ST_INVALID_CQ_DEPTH; + return -1; + } + + /* The history list size must be >= 1 */ + if (!args->cq_history_list_size) { + resp->status = DLB_ST_INVALID_HIST_LIST_DEPTH; + return -1; + } + + if (args->cq_history_list_size > domain->avail_hist_list_entries) { + resp->status = DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE; + return -1; + } + + return 0; +} + +static void dlb_ldb_pool_update_credit_count(struct dlb_hw *hw, + u32 pool_id, + u32 count) +{ + hw->rsrcs.ldb_credit_pools[pool_id].avail_credits -= count; +} + +static void dlb_dir_pool_update_credit_count(struct dlb_hw *hw, + u32 pool_id, + u32 count) +{ + hw->rsrcs.dir_credit_pools[pool_id].avail_credits -= count; +} + +static int dlb_ldb_port_configure_pp(struct dlb_hw *hw, + struct dlb_domain *domain, + struct dlb_ldb_port *port, + struct dlb_create_ldb_port_args *args) +{ + union dlb_sys_ldb_pp2ldbpool r0 = { {0} }; + union dlb_sys_ldb_pp2dirpool r1 = { {0} }; + union dlb_sys_ldb_pp2vf_pf r2 = { {0} }; + union dlb_sys_ldb_pp2vas r3 = { {0} }; + union dlb_sys_ldb_pp_v r4 = { {0} }; + union dlb_chp_ldb_pp_ldb_crd_hwm r6 = { {0} }; + union dlb_chp_ldb_pp_dir_crd_hwm r7 = { {0} }; + union dlb_chp_ldb_pp_ldb_crd_lwm r8 = { {0} }; + union dlb_chp_ldb_pp_dir_crd_lwm r9 = { {0} }; + union dlb_chp_ldb_pp_ldb_min_crd_qnt r10 = { {0} }; + union dlb_chp_ldb_pp_dir_min_crd_qnt r11 = { {0} }; + union dlb_chp_ldb_pp_ldb_crd_cnt r12 = { {0} }; + union dlb_chp_ldb_pp_dir_crd_cnt r13 = { {0} }; + union dlb_chp_ldb_ldb_pp2pool r14 = { {0} }; + union dlb_chp_ldb_dir_pp2pool r15 = { {0} }; + union dlb_chp_ldb_pp_crd_req_state r16 = { {0} }; + union dlb_chp_ldb_pp_ldb_push_ptr r17 = { {0} }; + union dlb_chp_ldb_pp_dir_push_ptr r18 = { {0} }; + + struct dlb_credit_pool *ldb_pool = NULL; + struct dlb_credit_pool *dir_pool = NULL; + + if (port->ldb_pool_used) { + ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id, + domain); + if (!ldb_pool) { + DLB_HW_ERR(hw, + "[%s()] Internal error: port validation failed\n", + __func__); + return -EFAULT; + } + } + + if (port->dir_pool_used) { + dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id, + domain); + if (!dir_pool) { + DLB_HW_ERR(hw, + "[%s()] Internal error: port validation failed\n", + __func__); + return -EFAULT; + } + } + + r0.field.ldbpool = (port->ldb_pool_used) ? ldb_pool->id : 0; + + DLB_CSR_WR(hw, DLB_SYS_LDB_PP2LDBPOOL(port->id), r0.val); + + r1.field.dirpool = (port->dir_pool_used) ? dir_pool->id : 0; + + DLB_CSR_WR(hw, DLB_SYS_LDB_PP2DIRPOOL(port->id), r1.val); + + r2.field.is_pf = 1; + + DLB_CSR_WR(hw, DLB_SYS_LDB_PP2VF_PF(port->id), r2.val); + + r3.field.vas = domain->id; + + DLB_CSR_WR(hw, DLB_SYS_LDB_PP2VAS(port->id), r3.val); + + r6.field.hwm = args->ldb_credit_high_watermark; + + DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_HWM(port->id), r6.val); + + r7.field.hwm = args->dir_credit_high_watermark; + + DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_HWM(port->id), r7.val); + + r8.field.lwm = args->ldb_credit_low_watermark; + + DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_LWM(port->id), r8.val); + + r9.field.lwm = args->dir_credit_low_watermark; + + DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_LWM(port->id), r9.val); + + r10.field.quanta = args->ldb_credit_quantum; + + DLB_CSR_WR(hw, + DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT(port->id), + r10.val); + + r11.field.quanta = args->dir_credit_quantum; + + DLB_CSR_WR(hw, + DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT(port->id), + r11.val); + + r12.field.count = args->ldb_credit_high_watermark; + + DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_CNT(port->id), r12.val); + + r13.field.count = args->dir_credit_high_watermark; + + DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_CNT(port->id), r13.val); + + r14.field.pool = (port->ldb_pool_used) ? ldb_pool->id : 0; + + DLB_CSR_WR(hw, DLB_CHP_LDB_LDB_PP2POOL(port->id), r14.val); + + r15.field.pool = (port->dir_pool_used) ? dir_pool->id : 0; + + DLB_CSR_WR(hw, DLB_CHP_LDB_DIR_PP2POOL(port->id), r15.val); + + r16.field.no_pp_credit_update = 0; + + DLB_CSR_WR(hw, DLB_CHP_LDB_PP_CRD_REQ_STATE(port->id), r16.val); + + r17.field.push_pointer = 0; + + DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_PUSH_PTR(port->id), r17.val); + + r18.field.push_pointer = 0; + + DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_PUSH_PTR(port->id), r18.val); + + r4.field.pp_v = 1; + + DLB_CSR_WR(hw, + DLB_SYS_LDB_PP_V(port->id), + r4.val); + + return 0; +} + +static int dlb_ldb_port_configure_cq(struct dlb_hw *hw, + struct dlb_ldb_port *port, + u64 pop_count_dma_base, + u64 cq_dma_base, + struct dlb_create_ldb_port_args *args) +{ + int i; + + union dlb_sys_ldb_cq_addr_l r0 = { {0} }; + union dlb_sys_ldb_cq_addr_u r1 = { {0} }; + union dlb_sys_ldb_cq2vf_pf r2 = { {0} }; + union dlb_chp_ldb_cq_tkn_depth_sel r3 = { {0} }; + union dlb_chp_hist_list_lim r4 = { {0} }; + union dlb_chp_hist_list_base r5 = { {0} }; + union dlb_lsp_cq_ldb_infl_lim r6 = { {0} }; + union dlb_lsp_cq2priov r7 = { {0} }; + union dlb_chp_hist_list_push_ptr r8 = { {0} }; + union dlb_chp_hist_list_pop_ptr r9 = { {0} }; + union dlb_lsp_cq_ldb_tkn_depth_sel r10 = { {0} }; + union dlb_sys_ldb_pp_addr_l r11 = { {0} }; + union dlb_sys_ldb_pp_addr_u r12 = { {0} }; + + /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */ + r0.field.addr_l = cq_dma_base >> 6; + + DLB_CSR_WR(hw, + DLB_SYS_LDB_CQ_ADDR_L(port->id), + r0.val); + + r1.field.addr_u = cq_dma_base >> 32; + + DLB_CSR_WR(hw, + DLB_SYS_LDB_CQ_ADDR_U(port->id), + r1.val); + + r2.field.is_pf = 1; + + DLB_CSR_WR(hw, + DLB_SYS_LDB_CQ2VF_PF(port->id), + r2.val); + + if (args->cq_depth <= 8) { + r3.field.token_depth_select = 1; + } else if (args->cq_depth == 16) { + r3.field.token_depth_select = 2; + } else if (args->cq_depth == 32) { + r3.field.token_depth_select = 3; + } else if (args->cq_depth == 64) { + r3.field.token_depth_select = 4; + } else if (args->cq_depth == 128) { + r3.field.token_depth_select = 5; + } else if (args->cq_depth == 256) { + r3.field.token_depth_select = 6; + } else if (args->cq_depth == 512) { + r3.field.token_depth_select = 7; + } else if (args->cq_depth == 1024) { + r3.field.token_depth_select = 8; + } else { + DLB_HW_ERR(hw, "[%s():%d] Internal error: invalid CQ depth\n", + __func__, __LINE__); + return -EFAULT; + } + + DLB_CSR_WR(hw, + DLB_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id), + r3.val); + + r10.field.token_depth_select = r3.field.token_depth_select; + r10.field.ignore_depth = 0; + /* TDT algorithm: DLB must be able to write CQs with depth < 4 */ + r10.field.enab_shallow_cq = 1; + + DLB_CSR_WR(hw, + DLB_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id), + r10.val); + + /* To support CQs with depth less than 8, program the token count + * register with a non-zero initial value. Operations such as domain + * reset must take this initial value into account when quiescing the + * CQ. + */ + port->init_tkn_cnt = 0; + + if (args->cq_depth < 8) { + union dlb_lsp_cq_ldb_tkn_cnt r12 = { {0} }; + + port->init_tkn_cnt = 8 - args->cq_depth; + + r12.field.token_count = port->init_tkn_cnt; + + DLB_CSR_WR(hw, + DLB_LSP_CQ_LDB_TKN_CNT(port->id), + r12.val); + } + + r4.field.limit = port->hist_list_entry_limit - 1; + + DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_LIM(port->id), r4.val); + + r5.field.base = port->hist_list_entry_base; + + DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_BASE(port->id), r5.val); + + r8.field.push_ptr = r5.field.base; + r8.field.generation = 0; + + DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_PUSH_PTR(port->id), r8.val); + + r9.field.pop_ptr = r5.field.base; + r9.field.generation = 0; + + DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_POP_PTR(port->id), r9.val); + + /* The inflight limit sets a cap on the number of QEs for which this CQ + * can owe completions at one time. + */ + r6.field.limit = args->cq_history_list_size; + + DLB_CSR_WR(hw, DLB_LSP_CQ_LDB_INFL_LIM(port->id), r6.val); + + /* Disable the port's QID mappings */ + r7.field.v = 0; + + DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(port->id), r7.val); + + /* Two cache lines (128B) are dedicated for the port's pop counts */ + r11.field.addr_l = pop_count_dma_base >> 7; + + DLB_CSR_WR(hw, DLB_SYS_LDB_PP_ADDR_L(port->id), r11.val); + + r12.field.addr_u = pop_count_dma_base >> 32; + + DLB_CSR_WR(hw, DLB_SYS_LDB_PP_ADDR_U(port->id), r12.val); + + for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) + port->qid_map[i].state = DLB_QUEUE_UNMAPPED; + + return 0; +} + +static void dlb_update_ldb_arb_threshold(struct dlb_hw *hw) +{ + union dlb_lsp_ctrl_config_0 r0 = { {0} }; + + /* From the hardware spec: + * "The optimal value for ldb_arb_threshold is in the region of {8 * + * #CQs}. It is expected therefore that the PF will change this value + * dynamically as the number of active ports changes." + */ + r0.val = DLB_CSR_RD(hw, DLB_LSP_CTRL_CONFIG_0); + + r0.field.ldb_arb_threshold = hw->pf.num_enabled_ldb_ports * 8; + r0.field.ldb_arb_ignore_empty = 1; + r0.field.ldb_arb_mode = 1; + + DLB_CSR_WR(hw, DLB_LSP_CTRL_CONFIG_0, r0.val); + + dlb_flush_csr(hw); +} + +static int dlb_configure_ldb_port(struct dlb_hw *hw, + struct dlb_domain *domain, + struct dlb_ldb_port *port, + u64 pop_count_dma_base, + u64 cq_dma_base, + struct dlb_create_ldb_port_args *args) +{ + struct dlb_credit_pool *ldb_pool, *dir_pool; + int ret; + + port->hist_list_entry_base = domain->hist_list_entry_base + + domain->hist_list_entry_offset; + port->hist_list_entry_limit = port->hist_list_entry_base + + args->cq_history_list_size; + + domain->hist_list_entry_offset += args->cq_history_list_size; + domain->avail_hist_list_entries -= args->cq_history_list_size; + + port->ldb_pool_used = !dlb_list_empty(&domain->used_ldb_queues) || + !dlb_list_empty(&domain->avail_ldb_queues); + port->dir_pool_used = !dlb_list_empty(&domain->used_dir_pq_pairs) || + !dlb_list_empty(&domain->avail_dir_pq_pairs); + + if (port->ldb_pool_used) { + u32 cnt = args->ldb_credit_high_watermark; + + ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id, + domain); + if (!ldb_pool) { + DLB_HW_ERR(hw, + "[%s()] Internal error: port validation failed\n", + __func__); + return -EFAULT; + } + + dlb_ldb_pool_update_credit_count(hw, ldb_pool->id, cnt); + } else { + args->ldb_credit_high_watermark = 0; + args->ldb_credit_low_watermark = 0; + args->ldb_credit_quantum = 0; + } + + if (port->dir_pool_used) { + u32 cnt = args->dir_credit_high_watermark; + + dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id, + domain); + if (!dir_pool) { + DLB_HW_ERR(hw, + "[%s()] Internal error: port validation failed\n", + __func__); + return -EFAULT; + } + + dlb_dir_pool_update_credit_count(hw, dir_pool->id, cnt); + } else { + args->dir_credit_high_watermark = 0; + args->dir_credit_low_watermark = 0; + args->dir_credit_quantum = 0; + } + + ret = dlb_ldb_port_configure_cq(hw, + port, + pop_count_dma_base, + cq_dma_base, + args); + if (ret < 0) + return ret; + + ret = dlb_ldb_port_configure_pp(hw, domain, port, args); + if (ret < 0) + return ret; + + dlb_ldb_port_cq_enable(hw, port); + + port->num_mappings = 0; + + port->enabled = true; + + hw->pf.num_enabled_ldb_ports++; + + dlb_update_ldb_arb_threshold(hw); + + port->configured = true; + + return 0; +} + +/** + * dlb_hw_create_ldb_port() - Allocate and initialize a load-balanced port and + * its resources. + * @hw: Contains the current state of the DLB hardware. + * @args: User-provided arguments. + * @resp: Response to user. + * + * Return: returns < 0 on error, 0 otherwise. If the driver is unable to + * satisfy a request, resp->status will be set accordingly. + */ +int dlb_hw_create_ldb_port(struct dlb_hw *hw, + u32 domain_id, + struct dlb_create_ldb_port_args *args, + u64 pop_count_dma_base, + u64 cq_dma_base, + struct dlb_cmd_response *resp) +{ + struct dlb_ldb_port *port; + struct dlb_domain *domain; + int ret; + + dlb_log_create_ldb_port_args(hw, + domain_id, + pop_count_dma_base, + cq_dma_base, + args); + + /* Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + if (dlb_verify_create_ldb_port_args(hw, + domain_id, + pop_count_dma_base, + cq_dma_base, + args, + resp)) + return -EINVAL; + + domain = dlb_get_domain_from_id(hw, domain_id); + if (!domain) { + DLB_HW_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + port = DLB_DOM_LIST_HEAD(domain->avail_ldb_ports, typeof(*port)); + + /* Verification should catch this. */ + if (!port) { + DLB_HW_ERR(hw, + "[%s():%d] Internal error: no available ldb ports\n", + __func__, __LINE__); + return -EFAULT; + } + + if (port->configured) { + DLB_HW_ERR(hw, + "[%s()] Internal error: avail_ldb_ports contains configured ports.\n", + __func__); + return -EFAULT; + } + + ret = dlb_configure_ldb_port(hw, + domain, + port, + pop_count_dma_base, + cq_dma_base, + args); + if (ret < 0) + return ret; + + /* Configuration succeeded, so move the resource from the 'avail' to + * the 'used' list. + */ + dlb_list_del(&domain->avail_ldb_ports, &port->domain_list); + + dlb_list_add(&domain->used_ldb_ports, &port->domain_list); + + resp->status = 0; + resp->id = port->id; + + return 0; +} + +static void dlb_log_create_dir_port_args(struct dlb_hw *hw, + u32 domain_id, + u64 pop_count_dma_base, + u64 cq_dma_base, + struct dlb_create_dir_port_args *args) +{ + DLB_HW_INFO(hw, "DLB create directed port arguments:\n"); + DLB_HW_INFO(hw, "\tDomain ID: %d\n", + domain_id); + DLB_HW_INFO(hw, "\tLDB credit pool ID: %d\n", + args->ldb_credit_pool_id); + DLB_HW_INFO(hw, "\tLDB credit high watermark: %d\n", + args->ldb_credit_high_watermark); + DLB_HW_INFO(hw, "\tLDB credit low watermark: %d\n", + args->ldb_credit_low_watermark); + DLB_HW_INFO(hw, "\tLDB credit quantum: %d\n", + args->ldb_credit_quantum); + DLB_HW_INFO(hw, "\tDIR credit pool ID: %d\n", + args->dir_credit_pool_id); + DLB_HW_INFO(hw, "\tDIR credit high watermark: %d\n", + args->dir_credit_high_watermark); + DLB_HW_INFO(hw, "\tDIR credit low watermark: %d\n", + args->dir_credit_low_watermark); + DLB_HW_INFO(hw, "\tDIR credit quantum: %d\n", + args->dir_credit_quantum); + DLB_HW_INFO(hw, "\tpop_count_address: 0x%"PRIx64"\n", + pop_count_dma_base); + DLB_HW_INFO(hw, "\tCQ depth: %d\n", + args->cq_depth); + DLB_HW_INFO(hw, "\tCQ base address: 0x%"PRIx64"\n", + cq_dma_base); +} + +static int +dlb_verify_create_dir_port_args(struct dlb_hw *hw, + u32 domain_id, + u64 pop_count_dma_base, + u64 cq_dma_base, + struct dlb_create_dir_port_args *args, + struct dlb_cmd_response *resp) +{ + struct dlb_domain *domain; + struct dlb_credit_pool *pool; + + domain = dlb_get_domain_from_id(hw, domain_id); + + if (!domain) { + resp->status = DLB_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + if (domain->started) { + resp->status = DLB_ST_DOMAIN_STARTED; + return -1; + } + + /* If the user claims the queue is already configured, validate + * the queue ID, its domain, and whether the queue is configured. + */ + if (args->queue_id != -1) { + struct dlb_dir_pq_pair *queue; + + queue = dlb_get_domain_used_dir_pq(args->queue_id, + domain); + + if (!queue || queue->domain_id != domain->id || + !queue->queue_configured) { + resp->status = DLB_ST_INVALID_DIR_QUEUE_ID; + return -1; + } + } + + /* If the port's queue is not configured, validate that a free + * port-queue pair is available. + */ + if (args->queue_id == -1 && + dlb_list_empty(&domain->avail_dir_pq_pairs)) { + resp->status = DLB_ST_DIR_PORTS_UNAVAILABLE; + return -1; + } + + /* If the scheduling domain has no LDB queues, we configure the + * hardware to not supply the port with any LDB credits. In that + * case, ignore the LDB credit arguments. + */ + if (!dlb_list_empty(&domain->used_ldb_queues) || + !dlb_list_empty(&domain->avail_ldb_queues)) { + pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id, + domain); + + if (!pool || !pool->configured || + pool->domain_id != domain->id) { + resp->status = DLB_ST_INVALID_LDB_CREDIT_POOL_ID; + return -1; + } + + if (args->ldb_credit_high_watermark > pool->avail_credits) { + resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE; + return -1; + } + + if (args->ldb_credit_low_watermark >= + args->ldb_credit_high_watermark) { + resp->status = DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK; + return -1; + } + + if (args->ldb_credit_quantum >= + args->ldb_credit_high_watermark) { + resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM; + return -1; + } + + if (args->ldb_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) { + resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM; + return -1; + } + } + + pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id, + domain); + + if (!pool || !pool->configured || + pool->domain_id != domain->id) { + resp->status = DLB_ST_INVALID_DIR_CREDIT_POOL_ID; + return -1; + } + + if (args->dir_credit_high_watermark > pool->avail_credits) { + resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE; + return -1; + } + + if (args->dir_credit_low_watermark >= args->dir_credit_high_watermark) { + resp->status = DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK; + return -1; + } + + if (args->dir_credit_quantum >= args->dir_credit_high_watermark) { + resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM; + return -1; + } + + if (args->dir_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) { + resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM; + return -1; + } + + /* Check cache-line alignment */ + if ((pop_count_dma_base & 0x3F) != 0) { + resp->status = DLB_ST_INVALID_POP_COUNT_VIRT_ADDR; + return -1; + } + + if ((cq_dma_base & 0x3F) != 0) { + resp->status = DLB_ST_INVALID_CQ_VIRT_ADDR; + return -1; + } + + if (args->cq_depth != 8 && + args->cq_depth != 16 && + args->cq_depth != 32 && + args->cq_depth != 64 && + args->cq_depth != 128 && + args->cq_depth != 256 && + args->cq_depth != 512 && + args->cq_depth != 1024) { + resp->status = DLB_ST_INVALID_CQ_DEPTH; + return -1; + } + + return 0; +} + +static int dlb_dir_port_configure_pp(struct dlb_hw *hw, + struct dlb_domain *domain, + struct dlb_dir_pq_pair *port, + struct dlb_create_dir_port_args *args) +{ + union dlb_sys_dir_pp2ldbpool r0 = { {0} }; + union dlb_sys_dir_pp2dirpool r1 = { {0} }; + union dlb_sys_dir_pp2vf_pf r2 = { {0} }; + union dlb_sys_dir_pp2vas r3 = { {0} }; + union dlb_sys_dir_pp_v r4 = { {0} }; + union dlb_chp_dir_pp_ldb_crd_hwm r6 = { {0} }; + union dlb_chp_dir_pp_dir_crd_hwm r7 = { {0} }; + union dlb_chp_dir_pp_ldb_crd_lwm r8 = { {0} }; + union dlb_chp_dir_pp_dir_crd_lwm r9 = { {0} }; + union dlb_chp_dir_pp_ldb_min_crd_qnt r10 = { {0} }; + union dlb_chp_dir_pp_dir_min_crd_qnt r11 = { {0} }; + union dlb_chp_dir_pp_ldb_crd_cnt r12 = { {0} }; + union dlb_chp_dir_pp_dir_crd_cnt r13 = { {0} }; + union dlb_chp_dir_ldb_pp2pool r14 = { {0} }; + union dlb_chp_dir_dir_pp2pool r15 = { {0} }; + union dlb_chp_dir_pp_crd_req_state r16 = { {0} }; + union dlb_chp_dir_pp_ldb_push_ptr r17 = { {0} }; + union dlb_chp_dir_pp_dir_push_ptr r18 = { {0} }; + + struct dlb_credit_pool *ldb_pool = NULL; + struct dlb_credit_pool *dir_pool = NULL; + + if (port->ldb_pool_used) { + ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id, + domain); + if (!ldb_pool) { + DLB_HW_ERR(hw, + "[%s()] Internal error: port validation failed\n", + __func__); + return -EFAULT; + } + } + + if (port->dir_pool_used) { + dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id, + domain); + if (!dir_pool) { + DLB_HW_ERR(hw, + "[%s()] Internal error: port validation failed\n", + __func__); + return -EFAULT; + } + } + + r0.field.ldbpool = (port->ldb_pool_used) ? ldb_pool->id : 0; + + DLB_CSR_WR(hw, + DLB_SYS_DIR_PP2LDBPOOL(port->id), + r0.val); + + r1.field.dirpool = (port->dir_pool_used) ? dir_pool->id : 0; + + DLB_CSR_WR(hw, + DLB_SYS_DIR_PP2DIRPOOL(port->id), + r1.val); + + r2.field.is_pf = 1; + r2.field.is_hw_dsi = 0; + + DLB_CSR_WR(hw, + DLB_SYS_DIR_PP2VF_PF(port->id), + r2.val); + + r3.field.vas = domain->id; + + DLB_CSR_WR(hw, + DLB_SYS_DIR_PP2VAS(port->id), + r3.val); + + r6.field.hwm = args->ldb_credit_high_watermark; + + DLB_CSR_WR(hw, + DLB_CHP_DIR_PP_LDB_CRD_HWM(port->id), + r6.val); + + r7.field.hwm = args->dir_credit_high_watermark; + + DLB_CSR_WR(hw, + DLB_CHP_DIR_PP_DIR_CRD_HWM(port->id), + r7.val); + + r8.field.lwm = args->ldb_credit_low_watermark; + + DLB_CSR_WR(hw, + DLB_CHP_DIR_PP_LDB_CRD_LWM(port->id), + r8.val); + + r9.field.lwm = args->dir_credit_low_watermark; + + DLB_CSR_WR(hw, + DLB_CHP_DIR_PP_DIR_CRD_LWM(port->id), + r9.val); + + r10.field.quanta = args->ldb_credit_quantum; + + DLB_CSR_WR(hw, + DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT(port->id), + r10.val); + + r11.field.quanta = args->dir_credit_quantum; + + DLB_CSR_WR(hw, + DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT(port->id), + r11.val); + + r12.field.count = args->ldb_credit_high_watermark; + + DLB_CSR_WR(hw, + DLB_CHP_DIR_PP_LDB_CRD_CNT(port->id), + r12.val); + + r13.field.count = args->dir_credit_high_watermark; + + DLB_CSR_WR(hw, + DLB_CHP_DIR_PP_DIR_CRD_CNT(port->id), + r13.val); + + r14.field.pool = (port->ldb_pool_used) ? ldb_pool->id : 0; + + DLB_CSR_WR(hw, + DLB_CHP_DIR_LDB_PP2POOL(port->id), + r14.val); + + r15.field.pool = (port->dir_pool_used) ? dir_pool->id : 0; + + DLB_CSR_WR(hw, + DLB_CHP_DIR_DIR_PP2POOL(port->id), + r15.val); + + r16.field.no_pp_credit_update = 0; + + DLB_CSR_WR(hw, + DLB_CHP_DIR_PP_CRD_REQ_STATE(port->id), + r16.val); + + r17.field.push_pointer = 0; + + DLB_CSR_WR(hw, + DLB_CHP_DIR_PP_LDB_PUSH_PTR(port->id), + r17.val); + + r18.field.push_pointer = 0; + + DLB_CSR_WR(hw, + DLB_CHP_DIR_PP_DIR_PUSH_PTR(port->id), + r18.val); + + r4.field.pp_v = 1; + r4.field.mb_dm = 0; + + DLB_CSR_WR(hw, DLB_SYS_DIR_PP_V(port->id), r4.val); + + return 0; +} + +static int dlb_dir_port_configure_cq(struct dlb_hw *hw, + struct dlb_dir_pq_pair *port, + u64 pop_count_dma_base, + u64 cq_dma_base, + struct dlb_create_dir_port_args *args) +{ + union dlb_sys_dir_cq_addr_l r0 = { {0} }; + union dlb_sys_dir_cq_addr_u r1 = { {0} }; + union dlb_sys_dir_cq2vf_pf r2 = { {0} }; + union dlb_chp_dir_cq_tkn_depth_sel r3 = { {0} }; + union dlb_lsp_cq_dir_tkn_depth_sel_dsi r4 = { {0} }; + union dlb_sys_dir_pp_addr_l r5 = { {0} }; + union dlb_sys_dir_pp_addr_u r6 = { {0} }; + + /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */ + r0.field.addr_l = cq_dma_base >> 6; + + DLB_CSR_WR(hw, DLB_SYS_DIR_CQ_ADDR_L(port->id), r0.val); + + r1.field.addr_u = cq_dma_base >> 32; + + DLB_CSR_WR(hw, DLB_SYS_DIR_CQ_ADDR_U(port->id), r1.val); + + r2.field.is_pf = 1; + + DLB_CSR_WR(hw, DLB_SYS_DIR_CQ2VF_PF(port->id), r2.val); + + if (args->cq_depth == 8) { + r3.field.token_depth_select = 1; + } else if (args->cq_depth == 16) { + r3.field.token_depth_select = 2; + } else if (args->cq_depth == 32) { + r3.field.token_depth_select = 3; + } else if (args->cq_depth == 64) { + r3.field.token_depth_select = 4; + } else if (args->cq_depth == 128) { + r3.field.token_depth_select = 5; + } else if (args->cq_depth == 256) { + r3.field.token_depth_select = 6; + } else if (args->cq_depth == 512) { + r3.field.token_depth_select = 7; + } else if (args->cq_depth == 1024) { + r3.field.token_depth_select = 8; + } else { + DLB_HW_ERR(hw, "[%s():%d] Internal error: invalid CQ depth\n", + __func__, __LINE__); + return -EFAULT; + } + + DLB_CSR_WR(hw, + DLB_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id), + r3.val); + + r4.field.token_depth_select = r3.field.token_depth_select; + r4.field.disable_wb_opt = 0; + + DLB_CSR_WR(hw, + DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id), + r4.val); + + /* Two cache lines (128B) are dedicated for the port's pop counts */ + r5.field.addr_l = pop_count_dma_base >> 7; + + DLB_CSR_WR(hw, DLB_SYS_DIR_PP_ADDR_L(port->id), r5.val); + + r6.field.addr_u = pop_count_dma_base >> 32; + + DLB_CSR_WR(hw, DLB_SYS_DIR_PP_ADDR_U(port->id), r6.val); + + return 0; +} + +static int dlb_configure_dir_port(struct dlb_hw *hw, + struct dlb_domain *domain, + struct dlb_dir_pq_pair *port, + u64 pop_count_dma_base, + u64 cq_dma_base, + struct dlb_create_dir_port_args *args) +{ + struct dlb_credit_pool *ldb_pool, *dir_pool; + int ret; + + port->ldb_pool_used = !dlb_list_empty(&domain->used_ldb_queues) || + !dlb_list_empty(&domain->avail_ldb_queues); + + /* Each directed port has a directed queue, hence this port requires + * directed credits. + */ + port->dir_pool_used = true; + + if (port->ldb_pool_used) { + u32 cnt = args->ldb_credit_high_watermark; + + ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id, + domain); + if (!ldb_pool) { + DLB_HW_ERR(hw, + "[%s()] Internal error: port validation failed\n", + __func__); + return -EFAULT; + } + + dlb_ldb_pool_update_credit_count(hw, ldb_pool->id, cnt); + } else { + args->ldb_credit_high_watermark = 0; + args->ldb_credit_low_watermark = 0; + args->ldb_credit_quantum = 0; + } + + dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id, domain); + if (!dir_pool) { + DLB_HW_ERR(hw, + "[%s()] Internal error: port validation failed\n", + __func__); + return -EFAULT; + } + + dlb_dir_pool_update_credit_count(hw, + dir_pool->id, + args->dir_credit_high_watermark); + + ret = dlb_dir_port_configure_cq(hw, + port, + pop_count_dma_base, + cq_dma_base, + args); + + if (ret < 0) + return ret; + + ret = dlb_dir_port_configure_pp(hw, domain, port, args); + if (ret < 0) + return ret; + + dlb_dir_port_cq_enable(hw, port); + + port->enabled = true; + + port->port_configured = true; + + return 0; +} + +/** + * dlb_hw_create_dir_port() - Allocate and initialize a DLB directed port and + * queue. The port/queue pair have the same ID and name. + * @hw: Contains the current state of the DLB hardware. + * @args: User-provided arguments. + * @resp: Response to user. + * + * Return: returns < 0 on error, 0 otherwise. If the driver is unable to + * satisfy a request, resp->status will be set accordingly. + */ +int dlb_hw_create_dir_port(struct dlb_hw *hw, + u32 domain_id, + struct dlb_create_dir_port_args *args, + u64 pop_count_dma_base, + u64 cq_dma_base, + struct dlb_cmd_response *resp) +{ + struct dlb_dir_pq_pair *port; + struct dlb_domain *domain; + int ret; + + dlb_log_create_dir_port_args(hw, + domain_id, + pop_count_dma_base, + cq_dma_base, + args); + + /* Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + if (dlb_verify_create_dir_port_args(hw, + domain_id, + pop_count_dma_base, + cq_dma_base, + args, + resp)) + return -EINVAL; + + domain = dlb_get_domain_from_id(hw, domain_id); + if (!domain) { + DLB_HW_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + if (args->queue_id != -1) + port = dlb_get_domain_used_dir_pq(args->queue_id, + domain); + else + port = DLB_DOM_LIST_HEAD(domain->avail_dir_pq_pairs, + typeof(*port)); + + /* Verification should catch this. */ + if (!port) { + DLB_HW_ERR(hw, + "[%s():%d] Internal error: no available dir ports\n", + __func__, __LINE__); + return -EFAULT; + } + + ret = dlb_configure_dir_port(hw, + domain, + port, + pop_count_dma_base, + cq_dma_base, + args); + if (ret < 0) + return ret; + + /* Configuration succeeded, so move the resource from the 'avail' to + * the 'used' list (if it's not already there). + */ + if (args->queue_id == -1) { + dlb_list_del(&domain->avail_dir_pq_pairs, &port->domain_list); + + dlb_list_add(&domain->used_dir_pq_pairs, &port->domain_list); + } + + resp->status = 0; + resp->id = port->id; + + return 0; +} + diff --git a/drivers/event/dlb/pf/dlb_pf.c b/drivers/event/dlb/pf/dlb_pf.c index fffb88b..cd766d3 100644 --- a/drivers/event/dlb/pf/dlb_pf.c +++ b/drivers/event/dlb/pf/dlb_pf.c @@ -221,6 +221,207 @@ dlb_pf_ldb_queue_create(struct dlb_hw_dev *handle, } static int +dlb_pf_dir_queue_create(struct dlb_hw_dev *handle, + struct dlb_create_dir_queue_args *cfg) +{ + struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev; + struct dlb_cmd_response response = {0}; + int ret; + + DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__); + + ret = dlb_hw_create_dir_queue(&dlb_dev->hw, + handle->domain_id, + cfg, + &response); + + *(struct dlb_cmd_response *)cfg->response = response; + + DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret); + + return ret; +} + +static void * +dlb_alloc_coherent_aligned(rte_iova_t *phys, size_t size, int align) +{ + const struct rte_memzone *mz; + char mz_name[RTE_MEMZONE_NAMESIZE]; + uint32_t core_id = rte_lcore_id(); + unsigned int socket_id; + + snprintf(mz_name, sizeof(mz_name) - 1, "%lx", + (unsigned long)rte_get_timer_cycles()); + if (core_id == (unsigned int)LCORE_ID_ANY) + core_id = rte_get_master_lcore(); + socket_id = rte_lcore_to_socket_id(core_id); + mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, + RTE_MEMZONE_IOVA_CONTIG, align); + if (!mz) { + DLB_LOG_ERR("Unable to allocate DMA memory of size %zu bytes\n", + size); + *phys = 0; + return NULL; + } + *phys = mz->iova; + return mz->addr; +} + +static int +dlb_pf_ldb_port_create(struct dlb_hw_dev *handle, + struct dlb_create_ldb_port_args *cfg, + enum dlb_cq_poll_modes poll_mode) +{ + struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev; + struct dlb_cmd_response response = {0}; + int ret; + uint8_t *port_base; + int alloc_sz, qe_sz, cq_alloc_depth; + rte_iova_t pp_dma_base; + rte_iova_t pc_dma_base; + rte_iova_t cq_dma_base; + int is_dir = false; + + DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__); + + if (poll_mode == DLB_CQ_POLL_MODE_STD) + qe_sz = sizeof(struct dlb_dequeue_qe); + else + qe_sz = RTE_CACHE_LINE_SIZE; + + /* The hardware always uses a CQ depth of at least + * DLB_MIN_HARDWARE_CQ_DEPTH, even though from the user + * perspective we support a depth as low as 1 for LDB ports. + */ + cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB_MIN_HARDWARE_CQ_DEPTH); + + /* Calculate the port memory required, including two cache lines for + * credit pop counts. Round up to the nearest cache line. + */ + alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cq_alloc_depth * qe_sz; + alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz); + + port_base = dlb_alloc_coherent_aligned(&pc_dma_base, + alloc_sz, + PAGE_SIZE); + if (port_base == NULL) + return -ENOMEM; + + /* Lock the page in memory */ + ret = rte_mem_lock_page(port_base); + if (ret < 0) { + DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n"); + goto create_port_err; + } + + memset(port_base, 0, alloc_sz); + cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE)); + + ret = dlb_hw_create_ldb_port(&dlb_dev->hw, + handle->domain_id, + cfg, + pc_dma_base, + cq_dma_base, + &response); + if (ret) + goto create_port_err; + + pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir); + dlb_port[response.id][DLB_LDB].pp_addr = + (void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id)); + + dlb_port[response.id][DLB_LDB].cq_base = + (void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE)); + + dlb_port[response.id][DLB_LDB].ldb_popcount = + (void *)(uintptr_t)port_base; + dlb_port[response.id][DLB_LDB].dir_popcount = (void *)(uintptr_t) + (port_base + RTE_CACHE_LINE_SIZE); + + *(struct dlb_cmd_response *)cfg->response = response; + + DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret); + +create_port_err: + + return ret; +} + +static int +dlb_pf_dir_port_create(struct dlb_hw_dev *handle, + struct dlb_create_dir_port_args *cfg, + enum dlb_cq_poll_modes poll_mode) +{ + struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev; + struct dlb_cmd_response response = {0}; + int ret; + uint8_t *port_base; + int alloc_sz, qe_sz; + rte_iova_t pp_dma_base; + rte_iova_t pc_dma_base; + rte_iova_t cq_dma_base; + int is_dir = true; + + DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__); + + if (poll_mode == DLB_CQ_POLL_MODE_STD) + qe_sz = sizeof(struct dlb_dequeue_qe); + else + qe_sz = RTE_CACHE_LINE_SIZE; + + /* Calculate the port memory required, including two cache lines for + * credit pop counts. Round up to the nearest cache line. + */ + alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cfg->cq_depth * qe_sz; + alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz); + + port_base = dlb_alloc_coherent_aligned(&pc_dma_base, + alloc_sz, + PAGE_SIZE); + if (port_base == NULL) + return -ENOMEM; + + /* Lock the page in memory */ + ret = rte_mem_lock_page(port_base); + if (ret < 0) { + DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n"); + goto create_port_err; + } + + memset(port_base, 0, alloc_sz); + cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE)); + + ret = dlb_hw_create_dir_port(&dlb_dev->hw, + handle->domain_id, + cfg, + pc_dma_base, + cq_dma_base, + &response); + if (ret) + goto create_port_err; + + pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir); + dlb_port[response.id][DLB_DIR].pp_addr = + (void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id)); + + dlb_port[response.id][DLB_DIR].cq_base = + (void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE)); + + dlb_port[response.id][DLB_DIR].ldb_popcount = + (void *)(uintptr_t)port_base; + dlb_port[response.id][DLB_DIR].dir_popcount = (void *)(uintptr_t) + (port_base + RTE_CACHE_LINE_SIZE); + + *(struct dlb_cmd_response *)cfg->response = response; + + DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret); + +create_port_err: + + return ret; +} + +static int dlb_pf_get_sn_allocation(struct dlb_hw_dev *handle, struct dlb_get_sn_allocation_args *args) { @@ -287,6 +488,9 @@ dlb_pf_iface_fn_ptrs_init(void) dlb_iface_ldb_credit_pool_create = dlb_pf_ldb_credit_pool_create; dlb_iface_dir_credit_pool_create = dlb_pf_dir_credit_pool_create; dlb_iface_ldb_queue_create = dlb_pf_ldb_queue_create; + dlb_iface_dir_queue_create = dlb_pf_dir_queue_create; + dlb_iface_ldb_port_create = dlb_pf_ldb_port_create; + dlb_iface_dir_port_create = dlb_pf_dir_port_create; dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode; dlb_iface_get_sn_allocation = dlb_pf_get_sn_allocation; dlb_iface_set_sn_allocation = dlb_pf_set_sn_allocation; -- 2.6.4