From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 34BC0A0520; Sat, 27 Jun 2020 06:43:32 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 695781BFFA; Sat, 27 Jun 2020 06:40:27 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by dpdk.org (Postfix) with ESMTP id F13A61BF5F for ; Sat, 27 Jun 2020 06:40:08 +0200 (CEST) IronPort-SDR: uVsM06hNTydA3CLs5KLA0/4fFPojwTBeC4YGsRV58Bw1D++Mc3CXiDpG3IzgiAI9M8gL9BTVE3 fzM/4m5hp91Q== X-IronPort-AV: E=McAfee;i="6000,8403,9664"; a="125753078" X-IronPort-AV: E=Sophos;i="5.75,286,1589266800"; d="scan'208";a="125753078" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Jun 2020 21:40:08 -0700 IronPort-SDR: vI43VlGwsM6sYgohm3prriL11rtWiTT1P8VAPppEQRupQEfq51rlX2EQ+aLbnLIXrN0TRuzcZe /krLlYPpO9pg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,286,1589266800"; d="scan'208";a="480023002" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by fmsmga006.fm.intel.com with ESMTP; 26 Jun 2020 21:40:07 -0700 From: Tim McDaniel To: jerinj@marvell.com Cc: mattias.ronnblom@ericsson.com, dev@dpdk.org, gage.eads@intel.com, harry.van.haaren@intel.com, "McDaniel, Timothy" Date: Fri, 26 Jun 2020 23:37:42 -0500 Message-Id: <1593232671-5690-19-git-send-email-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 1.7.10 In-Reply-To: <1593232671-5690-1-git-send-email-timothy.mcdaniel@intel.com> References: <1593232671-5690-1-git-send-email-timothy.mcdaniel@intel.com> Subject: [dpdk-dev] [PATCH 18/27] event/dlb: add queue setup X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: "McDaniel, Timothy" Signed-off-by: McDaniel, Timothy --- drivers/event/dlb/dlb.c | 295 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 295 insertions(+) diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c index b527f2c..ded3b18 100644 --- a/drivers/event/dlb/dlb.c +++ b/drivers/event/dlb/dlb.c @@ -221,6 +221,65 @@ int dlb_string_to_int(int *result, const char *str) return 0; } +static int32_t +dlb_hw_create_ldb_queue(struct dlb_eventdev *dlb, + struct dlb_queue *queue, + const struct rte_event_queue_conf *evq_conf) +{ + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_create_ldb_queue_args cfg; + struct dlb_cmd_response response; + int32_t ret; + uint32_t qm_qid; + int sched_type = -1; + + if (evq_conf == NULL) + return -EINVAL; + + if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) { + if (evq_conf->nb_atomic_order_sequences != 0) + sched_type = RTE_SCHED_TYPE_ORDERED; + else + sched_type = RTE_SCHED_TYPE_PARALLEL; + } else { + sched_type = evq_conf->schedule_type; + } + + cfg.response = (uintptr_t)&response; + cfg.num_atomic_inflights = dlb->num_atm_inflights_per_queue; + cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences; + cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences; + + if (sched_type != RTE_SCHED_TYPE_ORDERED) { + cfg.num_sequence_numbers = 0; + cfg.num_qid_inflights = DLB_DEF_UNORDERED_QID_INFLIGHTS; + } + + ret = dlb_iface_ldb_queue_create(handle, &cfg); + if (ret < 0) { + DLB_LOG_ERR("dlb: create LB event queue error, ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + return -EINVAL; + } + + qm_qid = response.id; + + /* Save off queue config for debug, resource lookups, and reconfig */ + queue->num_qid_inflights = cfg.num_qid_inflights; + queue->num_atm_inflights = cfg.num_atomic_inflights; + + queue->sched_type = sched_type; + queue->config_state = DLB_CONFIGURED; + + DLB_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n", + qm_qid, + cfg.num_atomic_inflights, + cfg.num_sequence_numbers, + cfg.num_qid_inflights); + + return qm_qid; +} + /* VDEV-only notes: * This function first unmaps all memory mappings and closes the * domain's file descriptor, which causes the driver to reset the @@ -442,6 +501,7 @@ int dlb_string_to_int(int *result, const char *str) } /* End HW specific */ + static void dlb_eventdev_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info) @@ -640,6 +700,240 @@ int dlb_string_to_int(int *result, const char *str) queue_conf->priority = 0; } +static int32_t +dlb_get_sn_allocation(struct dlb_eventdev *dlb, int group) +{ + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_get_sn_allocation_args cfg; + struct dlb_cmd_response response; + int ret; + + cfg.group = group; + cfg.response = (uintptr_t)&response; + + ret = dlb_iface_get_sn_allocation(handle, &cfg); + if (ret < 0) { + DLB_LOG_ERR("dlb: get_sn_allocation ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + return ret; + } + + return response.id; +} + +static int +dlb_set_sn_allocation(struct dlb_eventdev *dlb, int group, int num) +{ + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_set_sn_allocation_args cfg; + struct dlb_cmd_response response; + int ret; + + cfg.num = num; + cfg.group = group; + cfg.response = (uintptr_t)&response; + + ret = dlb_iface_set_sn_allocation(handle, &cfg); + if (ret < 0) { + DLB_LOG_ERR("dlb: set_sn_allocation ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + return ret; + } + + return ret; +} + +static int32_t +dlb_get_sn_occupancy(struct dlb_eventdev *dlb, int group) +{ + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_get_sn_occupancy_args cfg; + struct dlb_cmd_response response; + int ret; + + cfg.group = group; + cfg.response = (uintptr_t)&response; + + ret = dlb_iface_get_sn_occupancy(handle, &cfg); + if (ret < 0) { + DLB_LOG_ERR("dlb: get_sn_occupancy ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + return ret; + } + + return response.id; +} + +/* Query the current sequence number allocations and, if they conflict with the + * requested LDB queue configuration, attempt to re-allocate sequence numbers. + * This is best-effort; if it fails, the PMD will attempt to configure the + * load-balanced queue and return an error. + */ +static void +dlb_program_sn_allocation(struct dlb_eventdev *dlb, + const struct rte_event_queue_conf *queue_conf) +{ + int grp_occupancy[DLB_NUM_SN_GROUPS]; + int grp_alloc[DLB_NUM_SN_GROUPS]; + int i, sequence_numbers; + + sequence_numbers = (int)queue_conf->nb_atomic_order_sequences; + + for (i = 0; i < DLB_NUM_SN_GROUPS; i++) { + int total_slots; + + grp_alloc[i] = dlb_get_sn_allocation(dlb, i); + if (grp_alloc[i] < 0) + return; + + total_slots = DLB_MAX_LDB_SN_ALLOC / grp_alloc[i]; + + grp_occupancy[i] = dlb_get_sn_occupancy(dlb, i); + if (grp_occupancy[i] < 0) + return; + + /* DLB has at least one available slot for the requested + * sequence numbers, so no further configuration required. + */ + if (grp_alloc[i] == sequence_numbers && + grp_occupancy[i] < total_slots) + return; + } + + /* None of the sequence number groups are configured for the requested + * sequence numbers, so we have to reconfigure one of them. This is + * only possible if a group is not in use. + */ + for (i = 0; i < DLB_NUM_SN_GROUPS; i++) { + if (grp_occupancy[i] == 0) + break; + } + + if (i == DLB_NUM_SN_GROUPS) { + printf("[%s()] No groups with %d sequence_numbers are available or have free slots\n", + __func__, sequence_numbers); + return; + } + + /* Attempt to configure slot i with the requested number of sequence + * numbers. Ignore the return value -- if this fails, the error will be + * caught during subsequent queue configuration. + */ + dlb_set_sn_allocation(dlb, i, sequence_numbers); +} + +static int +dlb_eventdev_ldb_queue_setup(struct rte_eventdev *dev, + struct dlb_eventdev_queue *ev_queue, + const struct rte_event_queue_conf *queue_conf) +{ + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); + int32_t qm_qid; + + if (queue_conf->nb_atomic_order_sequences) + dlb_program_sn_allocation(dlb, queue_conf); + + qm_qid = dlb_hw_create_ldb_queue(dlb, + &ev_queue->qm_queue, + queue_conf); + if (qm_qid < 0) { + DLB_LOG_ERR("Failed to create the load-balanced queue\n"); + + return qm_qid; + } + + dlb->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id; + + ev_queue->qm_queue.id = qm_qid; + + return 0; +} + +static int dlb_num_dir_queues_setup(struct dlb_eventdev *dlb) +{ + int i, num = 0; + + for (i = 0; i < dlb->num_queues; i++) { + if (dlb->ev_queues[i].setup_done && + dlb->ev_queues[i].qm_queue.is_directed) + num++; + } + + return num; +} + +static void +dlb_queue_link_teardown(struct dlb_eventdev *dlb, + struct dlb_eventdev_queue *ev_queue) +{ + struct dlb_eventdev_port *ev_port; + int i, j; + + for (i = 0; i < dlb->num_ports; i++) { + ev_port = &dlb->ev_ports[i]; + + for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) { + if (!ev_port->link[j].valid || + ev_port->link[j].queue_id != ev_queue->id) + continue; + + ev_port->link[j].valid = false; + ev_port->num_links--; + } + } + + ev_queue->num_links = 0; +} + +static int +dlb_eventdev_queue_setup(struct rte_eventdev *dev, + uint8_t ev_qid, + const struct rte_event_queue_conf *queue_conf) +{ + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); + struct dlb_eventdev_queue *ev_queue; + int ret; + + if (!queue_conf) + return -EINVAL; + + if (ev_qid >= dlb->num_queues) + return -EINVAL; + + ev_queue = &dlb->ev_queues[ev_qid]; + + ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg & + RTE_EVENT_QUEUE_CFG_SINGLE_LINK; + ev_queue->id = ev_qid; + ev_queue->conf = *queue_conf; + + if (!ev_queue->qm_queue.is_directed) { + ret = dlb_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf); + } else { + /* The directed queue isn't setup until link time, at which + * point we know its directed port ID. Directed queue setup + * will only fail if this queue is already setup or there are + * no directed queues left to configure. + */ + ret = 0; + + ev_queue->qm_queue.config_state = DLB_NOT_CONFIGURED; + + if (ev_queue->setup_done || + dlb_num_dir_queues_setup(dlb) == dlb->num_dir_queues) + ret = -EINVAL; + } + + /* Tear down pre-existing port->queue links */ + if (!ret && dlb->run_state == DLB_RUN_STATE_STOPPED) + dlb_queue_link_teardown(dlb, ev_queue); + + if (!ret) + ev_queue->setup_done = true; + + return ret; +} + static int set_dev_id(const char *key __rte_unused, const char *value, @@ -717,6 +1011,7 @@ int dlb_string_to_int(int *result, const char *str) .dev_infos_get = dlb_eventdev_info_get, .dev_configure = dlb_eventdev_configure, .queue_def_conf = dlb_eventdev_queue_default_conf_get, + .queue_setup = dlb_eventdev_queue_setup, .port_def_conf = dlb_eventdev_port_default_conf_get, }; -- 1.7.10