* [PATCH 0/2] Rebase DLB2 port_cos and cq_weight patches @ 2022-06-16 14:23 Timothy McDaniel 2022-06-16 14:23 ` [PATCH 1/2] event/dlb2: add CQ weight support Timothy McDaniel 2022-06-16 14:23 ` [PATCH 2/2] event/dlb2: add ldb port specific COS support Timothy McDaniel 0 siblings, 2 replies; 13+ messages in thread From: Timothy McDaniel @ 2022-06-16 14:23 UTC (permalink / raw) To: jerinj; +Cc: dev This patch simply rebases the following previously submitted patches against the latest dpdk-next-eventdev (for-main) branch. Timothy McDaniel (2): event/dlb2: add CQ weight support event/dlb2: add ldb port specific COS support drivers/event/dlb2/dlb2.c | 323 ++++++++++++++++----- drivers/event/dlb2/dlb2_avx512.c | 8 + drivers/event/dlb2/dlb2_iface.c | 7 + drivers/event/dlb2/dlb2_iface.h | 8 + drivers/event/dlb2/dlb2_priv.h | 29 +- drivers/event/dlb2/dlb2_sse.c | 8 + drivers/event/dlb2/dlb2_user.h | 29 ++ drivers/event/dlb2/pf/base/dlb2_resource.c | 286 ++++++++++++++++++ drivers/event/dlb2/pf/base/dlb2_resource.h | 33 +++ drivers/event/dlb2/pf/dlb2_pf.c | 90 +++++- 10 files changed, 745 insertions(+), 76 deletions(-) -- 2.25.1 ^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 1/2] event/dlb2: add CQ weight support 2022-06-16 14:23 [PATCH 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel @ 2022-06-16 14:23 ` Timothy McDaniel 2022-06-16 14:23 ` [PATCH 2/2] event/dlb2: add ldb port specific COS support Timothy McDaniel 1 sibling, 0 replies; 13+ messages in thread From: Timothy McDaniel @ 2022-06-16 14:23 UTC (permalink / raw) To: jerinj; +Cc: dev Enabling the weight limit on a CQ allows the enqueued QEs' 2-bit weight value (representing weights of 1, 2, 4, and 8) to factor into whether a CQ is full. If the sum of the weights of the QEs in the CQ meet or exceed its weight limit, DLB will stop scheduling QEs to it (until software pops enough QEs from the CQ to reverse that). CQ weight support is enabled via the command line, and applies to DLB 2.5 (and above) load balanced ports. The DLB2 documentation will be updated with further details. Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com> --- V3: Rebased against dpdk-next-eventdev V2: Added patch dependency line in commit message --- drivers/event/dlb2/dlb2.c | 99 +++++++++- drivers/event/dlb2/dlb2_avx512.c | 8 + drivers/event/dlb2/dlb2_iface.c | 4 + drivers/event/dlb2/dlb2_iface.h | 5 + drivers/event/dlb2/dlb2_priv.h | 10 +- drivers/event/dlb2/dlb2_sse.c | 8 + drivers/event/dlb2/dlb2_user.h | 29 +++ drivers/event/dlb2/pf/base/dlb2_resource.c | 220 +++++++++++++++++++++ drivers/event/dlb2/pf/base/dlb2_resource.h | 33 ++++ drivers/event/dlb2/pf/dlb2_pf.c | 69 +++++++ 10 files changed, 479 insertions(+), 6 deletions(-) diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index 5b0b33bc77..e1687e3d63 100644 --- a/drivers/event/dlb2/dlb2.c +++ b/drivers/event/dlb2/dlb2.c @@ -107,6 +107,63 @@ dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2, } } +/* override defaults with value(s) provided on command line */ +static void +dlb2_init_cq_weight(struct dlb2_eventdev *dlb2, int *cq_weight) +{ + int q; + + for (q = 0; q < DLB2_MAX_NUM_PORTS_ALL; q++) + dlb2->ev_ports[q].cq_weight = cq_weight[q]; +} + +static int +set_cq_weight(const char *key __rte_unused, + const char *value, + void *opaque) +{ + struct dlb2_cq_weight *cq_weight = opaque; + int first, last, weight, i; + + if (value == NULL || opaque == NULL) { + DLB2_LOG_ERR("NULL pointer\n"); + return -EINVAL; + } + + /* command line override may take one of the following 3 forms: + * qid_depth_thresh=all:<threshold_value> ... all queues + * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues + * qid_depth_thresh=qid:<threshold_value> ... just one queue + */ + if (sscanf(value, "all:%d", &weight) == 1) { + first = 0; + last = DLB2_MAX_NUM_LDB_PORTS - 1; + } else if (sscanf(value, "%d-%d:%d", &first, &last, &weight) == 3) { + /* we have everything we need */ + } else if (sscanf(value, "%d:%d", &first, &weight) == 2) { + last = first; + } else { + DLB2_LOG_ERR("Error parsing ldb port qe weight devarg. Should be all:val, qid-qid:val, or qid:val\n"); + return -EINVAL; + } + + if (first > last || first < 0 || + last >= DLB2_MAX_NUM_LDB_PORTS) { + DLB2_LOG_ERR("Error parsing ldb port qe weight arg, invalid port value\n"); + return -EINVAL; + } + + if (weight < 0 || weight > DLB2_MAX_CQ_DEPTH_OVERRIDE) { + DLB2_LOG_ERR("Error parsing ldb port qe weight devarg, must be < cq depth\n"); + return -EINVAL; + } + + for (i = first; i <= last; i++) + cq_weight->limit[i] = weight; /* indexed by qid */ + + return 0; +} + static int dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) { @@ -1372,13 +1429,14 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, return -EINVAL; if (dequeue_depth < DLB2_MIN_CQ_DEPTH) { - DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n", + DLB2_LOG_ERR("dlb2: invalid cq depth, must be at least %d\n", DLB2_MIN_CQ_DEPTH); return -EINVAL; } - if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) { - DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n", + if (dlb2->version == DLB2_HW_V2 && ev_port->cq_weight != 0 && + ev_port->cq_weight > dequeue_depth) { + DLB2_LOG_ERR("dlb2: invalid cq depth, must be >= cq weight%d\n", DLB2_MIN_ENQUEUE_DEPTH); return -EINVAL; } @@ -1450,8 +1508,24 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, if (dlb2->version == DLB2_HW_V2) { qm_port->cached_ldb_credits = 0; qm_port->cached_dir_credits = 0; - } else + if (ev_port->cq_weight) { + struct dlb2_enable_cq_weight_args cq_weight_args = {0}; + + cq_weight_args.port_id = qm_port->id; + cq_weight_args.limit = ev_port->cq_weight; + ret = dlb2_iface_enable_cq_weight(handle, &cq_weight_args); + if (ret < 0) { + DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n", + ret, + dlb2_error_strings[cfg.response. status]); + goto error_exit; + } + } + qm_port->cq_weight = ev_port->cq_weight; + } else { qm_port->cached_credits = 0; + qm_port->cq_weight = 0; + } /* CQs with depth < 8 use an 8-entry queue, but withhold credits so * the effective depth is smaller. @@ -4435,6 +4509,9 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2_init_queue_depth_thresholds(dlb2, dlb2_args->qid_depth_thresholds.val); + dlb2_init_cq_weight(dlb2, + dlb2_args->cq_weight.limit); + return 0; } @@ -4489,6 +4566,7 @@ dlb2_parse_params(const char *params, DLB2_DEPTH_THRESH_ARG, DLB2_VECTOR_OPTS_ENAB_ARG, DLB2_MAX_CQ_DEPTH, + DLB2_CQ_WEIGHT, NULL }; if (params != NULL && params[0] != '\0') { @@ -4629,7 +4707,18 @@ dlb2_parse_params(const char *params, set_max_cq_depth, &dlb2_args->max_cq_depth); if (ret != 0) { - DLB2_LOG_ERR("%s: Error parsing vector opts enabled", + DLB2_LOG_ERR("%s: Error parsing max cq depth", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, + DLB2_CQ_WEIGHT, + set_cq_weight, + &dlb2_args->cq_weight); + if (ret != 0) { + DLB2_LOG_ERR("%s: Error parsing cq weight on", name); rte_kvargs_free(kvlist); return ret; diff --git a/drivers/event/dlb2/dlb2_avx512.c b/drivers/event/dlb2/dlb2_avx512.c index d4aaa04a01..3c8906af9d 100644 --- a/drivers/event/dlb2/dlb2_avx512.c +++ b/drivers/event/dlb2/dlb2_avx512.c @@ -237,6 +237,14 @@ dlb2_event_build_hcws(struct dlb2_port *qm_port, qe[3].data = ev[3].u64; } + /* will only be set for DLB 2.5 + */ + if (qm_port->cq_weight) { + qe[0].weight = ev[0].impl_opaque & 3; + qe[1].weight = ev[1].impl_opaque & 3; + qe[2].weight = ev[2].impl_opaque & 3; + qe[3].weight = ev[3].impl_opaque & 3; + } + break; case 3: case 2: diff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c index 5471dd8da7..b77faa967c 100644 --- a/drivers/event/dlb2/dlb2_iface.c +++ b/drivers/event/dlb2/dlb2_iface.c @@ -72,3 +72,7 @@ int (*dlb2_iface_get_ldb_queue_depth)(struct dlb2_hw_dev *handle, int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, struct dlb2_get_dir_queue_depth_args *args); + + +int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, + struct dlb2_enable_cq_weight_args *args); diff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h index b508eb0936..fef717392f 100644 --- a/drivers/event/dlb2/dlb2_iface.h +++ b/drivers/event/dlb2/dlb2_iface.h @@ -71,4 +71,9 @@ extern int (*dlb2_iface_get_ldb_queue_depth)(struct dlb2_hw_dev *handle, extern int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, struct dlb2_get_dir_queue_depth_args *args); + + +extern int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, + struct dlb2_enable_cq_weight_args *args); + #endif /* _DLB2_IFACE_H_ */ diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h index df69d57b83..63b092fc47 100644 --- a/drivers/event/dlb2/dlb2_priv.h +++ b/drivers/event/dlb2/dlb2_priv.h @@ -44,6 +44,7 @@ #define DLB2_DEPTH_THRESH_ARG "default_depth_thresh" #define DLB2_VECTOR_OPTS_ENAB_ARG "vector_opts_enable" #define DLB2_MAX_CQ_DEPTH "max_cq_depth" +#define DLB2_CQ_WEIGHT "cq_weight" /* Begin HW related defines and structs */ @@ -249,7 +250,7 @@ struct dlb2_enqueue_qe { /* Word 4 */ uint16_t lock_id; uint8_t meas_lat:1; - uint8_t rsvd1:2; + uint8_t weight:2; /* DLB 2.5 and above */ uint8_t no_dec:1; uint8_t cmp_id:4; union { @@ -378,6 +379,7 @@ struct dlb2_port { bool use_scalar; /* force usage of scalar code */ uint16_t hw_credit_quanta; bool use_avx512; + uint32_t cq_weight; }; /* Per-process per-port mmio and memory pointers */ @@ -526,6 +528,7 @@ struct dlb2_eventdev_port { /* enq_configured is set when the qm port is created */ bool enq_configured; uint8_t implicit_release; /* release events before dequeuing */ + uint32_t cq_weight; /* DLB2.5 and above ldb ports only */ } __rte_cache_aligned; struct dlb2_queue { @@ -627,6 +630,10 @@ struct dlb2_qid_depth_thresholds { int val[DLB2_MAX_NUM_QUEUES_ALL]; }; +struct dlb2_cq_weight { + int limit[DLB2_MAX_NUM_LDB_PORTS]; +}; + struct dlb2_devargs { int socket_id; int max_num_events; @@ -640,6 +647,7 @@ struct dlb2_devargs { int default_depth_thresh; bool vector_opts_enabled; int max_cq_depth; + struct dlb2_cq_weight cq_weight; }; /* End Eventdev related defines and structs */ diff --git a/drivers/event/dlb2/dlb2_sse.c b/drivers/event/dlb2/dlb2_sse.c index 8fc12d47f7..248d7519d5 100644 --- a/drivers/event/dlb2/dlb2_sse.c +++ b/drivers/event/dlb2/dlb2_sse.c @@ -189,6 +189,14 @@ dlb2_event_build_hcws(struct dlb2_port *qm_port, qe[2].data = ev[2].u64; qe[3].data = ev[3].u64; + /* will only be set for DLB 2.5 + */ + if (qm_port->cq_weight) { + qe[0].weight = ev[0].impl_opaque & 3; + qe[1].weight = ev[1].impl_opaque & 3; + qe[2].weight = ev[2].impl_opaque & 3; + qe[3].weight = ev[3].impl_opaque & 3; + } + break; case 3: case 2: diff --git a/drivers/event/dlb2/dlb2_user.h b/drivers/event/dlb2/dlb2_user.h index 9760e9bda6..901e2e0c66 100644 --- a/drivers/event/dlb2/dlb2_user.h +++ b/drivers/event/dlb2/dlb2_user.h @@ -47,6 +47,8 @@ enum dlb2_error { DLB2_ST_NO_MEMORY, DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL, DLB2_ST_INVALID_COS_ID, + DLB2_ST_INVALID_CQ_WEIGHT_LIMIT, + DLB2_ST_FEATURE_UNAVAILABLE, }; static const char dlb2_error_strings[][128] = { @@ -87,6 +89,8 @@ static const char dlb2_error_strings[][128] = { "DLB2_ST_NO_MEMORY", "DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL", "DLB2_ST_INVALID_COS_ID", + "DLB2_ST_INVALID_CQ_WEIGHT_LIMIT", + "DLB2_ST_FEATURE_UNAVAILABLE", }; struct dlb2_cmd_response { @@ -687,6 +691,31 @@ struct dlb2_pending_port_unmaps_args { __u32 padding0; }; +/* + * DLB2_DOMAIN_CMD_ENABLE_CQ_WEIGHT: Enable QE-weight based scheduling on a + * load-balanced port's CQ and configures the CQ's weight limit. + * + * This must be called after creating the port but before starting the + * domain. The QE weight limit must be non-zero and cannot exceed the + * CQ's depth. + * + * Input parameters: + * - port_id: Load-balanced port ID. + * - limit: QE weight limit. + * + * Output parameters: + * - response.status: Detailed error code. In certain cases, such as if the + * ioctl request arg is invalid, the driver won't set status. + * - response.id: number of unmaps in progress. + */ +struct dlb2_enable_cq_weight_args { + /* Output parameters */ + struct dlb2_cmd_response response; + /* Input parameters */ + __u32 port_id; + __u32 limit; +}; + /* * Mapping sizes for memory mapping the consumer queue (CQ) memory space, and * producer port (PP) MMIO space. diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c index 4011c24aef..e52a896bad 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c @@ -6273,3 +6273,223 @@ int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw, return 0; } +/** + * dlb2_hw_set_qe_arbiter_weights() - program QE arbiter weights + * @hw: dlb2_hw handle for a particular device. + * @weight: 8-entry array of arbiter weights. + * + * weight[N] programs priority N's weight. In cases where the 8 priorities are + * reduced to 4 bins, the mapping is: + * - weight[1] programs bin 0 + * - weight[3] programs bin 1 + * - weight[5] programs bin 2 + * - weight[7] programs bin 3 + */ +void dlb2_hw_set_qe_arbiter_weights(struct dlb2_hw *hw, u8 weight[8]) +{ + u32 reg = 0; + + DLB2_BITS_SET(reg, weight[1], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN0); + DLB2_BITS_SET(reg, weight[3], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN1); + DLB2_BITS_SET(reg, weight[5], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN2); + DLB2_BITS_SET(reg, weight[7], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN3); + DLB2_CSR_WR(hw, DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN, reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI3); + DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0(hw->ver), reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI3); + DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0(hw->ver), reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI3); + DLB2_CSR_WR(hw, DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0, reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI3); + DLB2_CSR_WR(hw, DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0, reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI3); + DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0(hw->ver), reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN0); + DLB2_BITS_SET(reg, weight[3], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN1); + DLB2_BITS_SET(reg, weight[5], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN2); + DLB2_BITS_SET(reg, weight[7], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN3); + DLB2_CSR_WR(hw, DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN, reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI3); + DLB2_CSR_WR(hw, DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0, reg); +} + +/** + * dlb2_hw_set_qid_arbiter_weights() - program QID arbiter weights + * @hw: dlb2_hw handle for a particular device. + * @weight: 8-entry array of arbiter weights. + * + * weight[N] programs priority N's weight. In cases where the 8 priorities are + * reduced to 4 bins, the mapping is: + * - weight[1] programs bin 0 + * - weight[3] programs bin 1 + * - weight[5] programs bin 2 + * - weight[7] programs bin 3 + */ +void dlb2_hw_set_qid_arbiter_weights(struct dlb2_hw *hw, u8 weight[8]) +{ + u32 reg = 0; + + DLB2_BITS_SET(reg, weight[1], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI0_WEIGHT); + DLB2_BITS_SET(reg, weight[3], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI1_WEIGHT); + DLB2_BITS_SET(reg, weight[5], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI2_WEIGHT); + DLB2_BITS_SET(reg, weight[7], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI3_WEIGHT); + DLB2_CSR_WR(hw, DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0(hw->ver), reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI0_WEIGHT); + DLB2_BITS_SET(reg, weight[3], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI1_WEIGHT); + DLB2_BITS_SET(reg, weight[5], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI2_WEIGHT); + DLB2_BITS_SET(reg, weight[7], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI3_WEIGHT); + DLB2_CSR_WR(hw, DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0(hw->ver), reg); +} + +static void dlb2_log_enable_cq_weight(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_enable_cq_weight_args *args, + bool vdev_req, + unsigned int vdev_id) +{ + DLB2_HW_DBG(hw, "DLB2 enable CQ weight arguments:\n"); + if (vdev_req) + DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id); + DLB2_HW_DBG(hw, "\tDomain ID: %d\n", + domain_id); + DLB2_HW_DBG(hw, "\tPort ID: %d\n", + args->port_id); + DLB2_HW_DBG(hw, "\tLimit: %d\n", + args->limit); +} + +static int +dlb2_verify_enable_cq_weight_args(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_enable_cq_weight_args *args, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id) +{ + struct dlb2_hw_domain *domain; + struct dlb2_ldb_port *port; + + if (hw->ver == DLB2_HW_V2) { + resp->status = DLB2_ST_FEATURE_UNAVAILABLE; + return -EINVAL; + } + + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); + + if (!domain) { + resp->status = DLB2_ST_INVALID_DOMAIN_ID; + return -EINVAL; + } + + if (!domain->configured) { + resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED; + return -EINVAL; + } + + if (domain->started) { + resp->status = DLB2_ST_DOMAIN_STARTED; + return -EINVAL; + } + + port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain); + if (!port || !port->configured) { + resp->status = DLB2_ST_INVALID_PORT_ID; + return -EINVAL; + } + + if (args->limit == 0 || args->limit > port->cq_depth) { + resp->status = DLB2_ST_INVALID_CQ_WEIGHT_LIMIT; + return -EINVAL; + } + + return 0; +} + +int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_enable_cq_weight_args *args, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id) +{ + struct dlb2_hw_domain *domain; + struct dlb2_ldb_port *port; + int ret, id; + u32 reg = 0; + + dlb2_log_enable_cq_weight(hw, domain_id, args, vdev_req, vdev_id); + + /* + * Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + ret = dlb2_verify_enable_cq_weight_args(hw, + domain_id, + args, + resp, + vdev_req, + vdev_id); + if (ret) + return ret; + + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); + if (!domain) { + DLB2_HW_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + id = args->port_id; + + port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain); + if (!port) { + DLB2_HW_ERR(hw, + "[%s(): %d] Internal error: port not found\n", + __func__, __LINE__); + return -EFAULT; + } + + DLB2_BIT_SET(reg, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_V); + DLB2_BITS_SET(reg, args->limit, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_LIMIT); + + DLB2_CSR_WR(hw, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id), reg); + + resp->status = 0; + + return 0; +} diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.h b/drivers/event/dlb2/pf/base/dlb2_resource.h index 684049cd6e..a7e6c90888 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.h +++ b/drivers/event/dlb2/pf/base/dlb2_resource.h @@ -1910,4 +1910,37 @@ void dlb2_hw_dir_cq_interrupt_set_mode(struct dlb2_hw *hw, int port_id, int mode); +/** + * dlb2_hw_enable_cq_weight() - Enable QE-weight based scheduling on an LDB port. + * @hw: dlb2_hw handle for a particular device. + * @domain_id: domain ID. + * @args: CQ weight enablement arguments. + * @resp: response structure. + * @vdev_request: indicates whether this request came from a vdev. + * @vdev_id: If vdev_request is true, this contains the vdev's ID. + * + * This function enables QE-weight based scheduling on a load-balanced port's + * CQ and configures the CQ's weight limit. + * + * This must be called after creating the port but before starting the + * domain. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum dlb2_error. If successful, resp->id + * contains the queue ID. + * + * Errors: + * EINVAL - The domain or port is not configured, the domainhas already been + * started, the requested limit exceeds the port's CQ depth, or this + * feature is unavailable on the device. + * EFAULT - Internal error (resp->status not set). + */ +int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_enable_cq_weight_args *args, + struct dlb2_cmd_response *resp, + bool vdev_request, + unsigned int vdev_id); + #endif /* __DLB2_RESOURCE_H */ diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c index 5c80c724f1..1d0415e46f 100644 --- a/drivers/event/dlb2/pf/dlb2_pf.c +++ b/drivers/event/dlb2/pf/dlb2_pf.c @@ -41,6 +41,8 @@ #include "base/dlb2_resource.h" static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD); +static unsigned int dlb2_qe_sa_pct = 1; +static unsigned int dlb2_qid_sa_pct; static void dlb2_pf_low_level_io_init(void) @@ -80,6 +82,27 @@ dlb2_pf_get_device_version(struct dlb2_hw_dev *handle, return 0; } +static void dlb2_pf_calc_arbiter_weights(u8 *weight, + unsigned int pct) +{ + int val, i; + + /* Largest possible weight (100% SA case): 32 */ + val = (DLB2_MAX_WEIGHT + 1) / DLB2_NUM_ARB_WEIGHTS; + + /* Scale val according to the starvation avoidance percentage */ + val = (val * pct) / 100; + if (val == 0 && pct != 0) + val = 1; + + /* Prio 7 always has weight 0xff */ + weight[DLB2_NUM_ARB_WEIGHTS - 1] = DLB2_MAX_WEIGHT; + + for (i = DLB2_NUM_ARB_WEIGHTS - 2; i >= 0; i--) + weight[i] = weight[i + 1] - val; +} + + static void dlb2_pf_hardware_init(struct dlb2_hw_dev *handle) { @@ -87,6 +110,27 @@ dlb2_pf_hardware_init(struct dlb2_hw_dev *handle) dlb2_hw_enable_sparse_ldb_cq_mode(&dlb2_dev->hw); dlb2_hw_enable_sparse_dir_cq_mode(&dlb2_dev->hw); + + /* Configure arbitration weights for QE selection */ + if (dlb2_qe_sa_pct <= 100) { + u8 weight[DLB2_NUM_ARB_WEIGHTS]; + + dlb2_pf_calc_arbiter_weights(weight, + dlb2_qe_sa_pct); + + dlb2_hw_set_qe_arbiter_weights(&dlb2_dev->hw, weight); + } + + /* Configure arbitration weights for QID selection */ + if (dlb2_qid_sa_pct <= 100) { + u8 weight[DLB2_NUM_ARB_WEIGHTS]; + + dlb2_pf_calc_arbiter_weights(weight, + dlb2_qid_sa_pct); + + dlb2_hw_set_qid_arbiter_weights(&dlb2_dev->hw, weight); + } + } static int @@ -578,6 +622,30 @@ dlb2_pf_get_dir_queue_depth(struct dlb2_hw_dev *handle, return ret; } +static int +dlb2_pf_enable_cq_weight(struct dlb2_hw_dev *handle, + struct dlb2_enable_cq_weight_args *args) +{ + struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev; + struct dlb2_cmd_response response = {0}; + int ret = 0; + + DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__); + + ret = dlb2_hw_enable_cq_weight(&dlb2_dev->hw, + handle->domain_id, + args, + &response, + false, + 0); + args->response = response; + + DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n", + __func__, ret); + + return ret; +} + static void dlb2_pf_iface_fn_ptrs_init(void) { @@ -602,6 +670,7 @@ dlb2_pf_iface_fn_ptrs_init(void) dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation; dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation; dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy; + dlb2_iface_enable_cq_weight = dlb2_pf_enable_cq_weight; } /* PCI DEV HOOKS */ -- 2.25.1 ^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 2/2] event/dlb2: add ldb port specific COS support 2022-06-16 14:23 [PATCH 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel 2022-06-16 14:23 ` [PATCH 1/2] event/dlb2: add CQ weight support Timothy McDaniel @ 2022-06-16 14:23 ` Timothy McDaniel 2022-06-16 19:55 ` [PATCH v2 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel ` (2 more replies) 1 sibling, 3 replies; 13+ messages in thread From: Timothy McDaniel @ 2022-06-16 14:23 UTC (permalink / raw) To: jerinj; +Cc: dev DLB supports 4 class of service domains, to aid in managing the device bandwidth across ldb ports. This commit allows specifying which ldb ports will participate in the COS scheme, which class they are a part of, and the specific bandwidth percentage associated with each class. The cumulative bandwidth associated with the 4 classes must not exceed 100%. This feature is enabled on the command line, and will be documented in the DLB2 programmers guide. Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com> --- V2: Rebased against dpdk-next-eventdev --- drivers/event/dlb2/dlb2.c | 224 +++++++++++++++------ drivers/event/dlb2/dlb2_iface.c | 3 + drivers/event/dlb2/dlb2_iface.h | 3 + drivers/event/dlb2/dlb2_priv.h | 19 +- drivers/event/dlb2/pf/base/dlb2_resource.c | 66 ++++++ drivers/event/dlb2/pf/dlb2_pf.c | 21 +- 6 files changed, 266 insertions(+), 70 deletions(-) diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index e1687e3d63..cf20f15911 100644 --- a/drivers/event/dlb2/dlb2.c +++ b/drivers/event/dlb2/dlb2.c @@ -164,6 +164,28 @@ set_cq_weight(const char *key __rte_unused, return 0; } +/* override defaults with value(s) provided on command line */ +static void +dlb2_init_port_cos(struct dlb2_eventdev *dlb2, int *port_cos) +{ + int q; + + for (q = 0; q < DLB2_MAX_NUM_PORTS_ALL; q++) { + dlb2->ev_ports[q].cos_id = port_cos[q]; + dlb2->cos_ports[port_cos[q]]++; + } +} + +static void +dlb2_init_cos_bw(struct dlb2_eventdev *dlb2, + struct dlb2_cos_bw *cos_bw) +{ + int q; + for (q = 0; q < DLB2_COS_NUM_VALS; q++) + dlb2->cos_bw[q] = cos_bw->val[q]; + +} + static int dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) { @@ -379,12 +401,11 @@ set_dev_id(const char *key __rte_unused, } static int -set_cos(const char *key __rte_unused, +set_poll_interval(const char *key __rte_unused, const char *value, void *opaque) { - enum dlb2_cos *cos_id = opaque; - int x = 0; + int *poll_interval = opaque; int ret; if (value == NULL || opaque == NULL) { @@ -392,38 +413,83 @@ set_cos(const char *key __rte_unused, return -EINVAL; } - ret = dlb2_string_to_int(&x, value); + ret = dlb2_string_to_int(poll_interval, value); if (ret < 0) return ret; - if (x != DLB2_COS_DEFAULT && (x < DLB2_COS_0 || x > DLB2_COS_3)) { - DLB2_LOG_ERR( - "COS %d out of range, must be DLB2_COS_DEFAULT or 0-3\n", - x); + return 0; +} + +static int +set_port_cos(const char *key __rte_unused, + const char *value, + void *opaque) +{ + struct dlb2_port_cos *port_cos = opaque; + int first, last, cos_id, i; + + if (value == NULL || opaque == NULL) { + DLB2_LOG_ERR("NULL pointer\n"); return -EINVAL; } - *cos_id = x; + /* command line override may take one of the following 3 forms: + * port_cos=all:<cos_id> ... all ports + * port_cos=port-port:<cos_id> ... a range of ports + * port_cos=port:<cos_id> ... just one port + */ + if (sscanf(value, "all:%d", &cos_id) == 1) { + first = 0; + last = DLB2_MAX_NUM_LDB_PORTS - 1; + } else if (sscanf(value, "%d-%d:%d", &first, &last, &cos_id) == 3) { + /* we have everything we need */ + } else if (sscanf(value, "%d:%d", &first, &cos_id) == 2) { + last = first; + } else { + DLB2_LOG_ERR("Error parsing ldb port port_cos devarg. Should be all:val, port-port:val, or port:val\n"); + return -EINVAL; + } + + if (first > last || first < 0 || + last >= DLB2_MAX_NUM_LDB_PORTS) { + DLB2_LOG_ERR("Error parsing ldb port cos_id arg, invalid port value\n"); + return -EINVAL; + } + + if (cos_id < DLB2_COS_0 || cos_id > DLB2_COS_3) { + DLB2_LOG_ERR("Error parsing ldb port cos_id devarg, must be between 0 and 4\n"); + return -EINVAL; + } + + for (i = first; i <= last; i++) + port_cos->cos_id[i] = cos_id; /* indexed by port */ return 0; } static int -set_poll_interval(const char *key __rte_unused, - const char *value, - void *opaque) +set_cos_bw(const char *key __rte_unused, + const char *value, + void *opaque) { - int *poll_interval = opaque; - int ret; + struct dlb2_cos_bw *cos_bw = opaque; - if (value == NULL || opaque == NULL) { + if (opaque == NULL) { DLB2_LOG_ERR("NULL pointer\n"); return -EINVAL; } - ret = dlb2_string_to_int(poll_interval, value); - if (ret < 0) - return ret; + /* format must be %d,%d,%d,%d */ + + if (sscanf(value, "%d,%d,%d,%d", &cos_bw->val[0], &cos_bw->val[1], + &cos_bw->val[2], &cos_bw->val[3]) != 4) { + DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0,bw1,bw2,bw3 where all values combined are <= 100\n"); + return -EINVAL; + } + if (cos_bw->val[0] + cos_bw->val[1] + cos_bw->val[2] + cos_bw->val[4] > 100) { + DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0,bw1,bw2,bw3 where all values combined are <= 100\n"); + return -EINVAL; + } return 0; } @@ -653,11 +719,13 @@ dlb2_eventdev_info_get(struct rte_eventdev *dev, } static int -dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle, +dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2, + struct dlb2_hw_dev *handle, const struct dlb2_hw_rsrcs *resources_asked, uint8_t device_version) { int ret = 0; + uint32_t cos_ports = 0; struct dlb2_create_sched_domain_args *cfg; if (resources_asked == NULL) { @@ -683,38 +751,22 @@ dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle, /* LDB ports */ - cfg->cos_strict = 0; /* Best effort */ - cfg->num_cos_ldb_ports[0] = 0; - cfg->num_cos_ldb_ports[1] = 0; - cfg->num_cos_ldb_ports[2] = 0; - cfg->num_cos_ldb_ports[3] = 0; - - switch (handle->cos_id) { - case DLB2_COS_0: - cfg->num_ldb_ports = 0; /* no don't care ports */ - cfg->num_cos_ldb_ports[0] = - resources_asked->num_ldb_ports; - break; - case DLB2_COS_1: - cfg->num_ldb_ports = 0; /* no don't care ports */ - cfg->num_cos_ldb_ports[1] = resources_asked->num_ldb_ports; - break; - case DLB2_COS_2: - cfg->num_ldb_ports = 0; /* no don't care ports */ - cfg->num_cos_ldb_ports[2] = resources_asked->num_ldb_ports; - break; - case DLB2_COS_3: - cfg->num_ldb_ports = 0; /* no don't care ports */ - cfg->num_cos_ldb_ports[3] = - resources_asked->num_ldb_ports; - break; - case DLB2_COS_DEFAULT: - /* all ldb ports are don't care ports from a cos perspective */ - cfg->num_ldb_ports = - resources_asked->num_ldb_ports; - break; + /* tally of ports with non default COS */ + cos_ports = dlb2->cos_ports[1] + dlb2->cos_ports[2] + + dlb2->cos_ports[3]; + + if (cos_ports > resources_asked->num_ldb_ports) { + DLB2_LOG_ERR("dlb2: num_ldb_ports < nonzero cos_ports\n"); + ret = EINVAL; + goto error_exit; } + cfg->cos_strict = 0; /* Best effort */ + cfg->num_cos_ldb_ports[0] = resources_asked->num_ldb_ports - cos_ports; + cfg->num_cos_ldb_ports[1] = dlb2->cos_ports[1]; + cfg->num_cos_ldb_ports[2] = dlb2->cos_ports[2]; + cfg->num_cos_ldb_ports[3] = dlb2->cos_ports[3]; + if (device_version == DLB2_HW_V2) cfg->num_ldb_credits = resources_asked->num_ldb_credits; @@ -892,7 +944,8 @@ dlb2_eventdev_configure(const struct rte_eventdev *dev) rsrcs->num_dir_credits = dlb2->num_dir_credits_override; } - if (dlb2_hw_create_sched_domain(handle, rsrcs, dlb2->version) < 0) { + if (dlb2_hw_create_sched_domain(dlb2, handle, rsrcs, + dlb2->version) < 0) { DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n"); return -ENODEV; } @@ -1449,12 +1502,8 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, cfg.cq_history_list_size = DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT; - if (handle->cos_id == DLB2_COS_DEFAULT) - cfg.cos_id = 0; - else - cfg.cos_id = handle->cos_id; - - cfg.cos_strict = 0; + cfg.cos_id = ev_port->cos_id; + cfg.cos_strict = 0;/* best effots */ /* User controls the LDB high watermark via enqueue depth. The DIR high * watermark is equal, unless the directed credit pool is too small. @@ -4450,7 +4499,6 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2->max_num_events_override = dlb2_args->max_num_events; dlb2->num_dir_credits_override = dlb2_args->num_dir_credits_override; - dlb2->qm_instance.cos_id = dlb2_args->cos_id; dlb2->poll_interval = dlb2_args->poll_interval; dlb2->sw_credit_quanta = dlb2_args->sw_credit_quanta; dlb2->hw_credit_quanta = dlb2_args->hw_credit_quanta; @@ -4482,6 +4530,27 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2_iface_hardware_init(&dlb2->qm_instance); + /* configure class of service */ + { + struct dlb2_set_cos_bw_args set_cos_bw_args = {0}; + int id; + int ret = 0; + + for (id = 0; id < DLB2_COS_NUM_VALS; id++) { + set_cos_bw_args.cos_id = id; + set_cos_bw_args.cos_id = dlb2->cos_bw[id]; + ret = dlb2_iface_set_cos_bw(&dlb2->qm_instance, + &set_cos_bw_args); + if (ret != 0) + break; + } + if (ret) { + DLB2_LOG_ERR("dlb2: failed to configure class of service, err=%d\n", + err); + return err; + } + } + err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode); if (err < 0) { DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n", @@ -4512,6 +4581,12 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2_init_cq_weight(dlb2, dlb2_args->cq_weight.limit); + dlb2_init_port_cos(dlb2, + dlb2_args->port_cos.cos_id); + + dlb2_init_cos_bw(dlb2, + &dlb2_args->cos_bw); + return 0; } @@ -4567,6 +4642,8 @@ dlb2_parse_params(const char *params, DLB2_VECTOR_OPTS_ENAB_ARG, DLB2_MAX_CQ_DEPTH, DLB2_CQ_WEIGHT, + DLB2_PORT_COS, + DLB2_COS_BW, NULL }; if (params != NULL && params[0] != '\0') { @@ -4639,16 +4716,6 @@ dlb2_parse_params(const char *params, return ret; } - ret = rte_kvargs_process(kvlist, DLB2_COS_ARG, - set_cos, - &dlb2_args->cos_id); - if (ret != 0) { - DLB2_LOG_ERR("%s: Error parsing cos parameter", - name); - rte_kvargs_free(kvlist); - return ret; - } - ret = rte_kvargs_process(kvlist, DLB2_POLL_INTERVAL_ARG, set_poll_interval, &dlb2_args->poll_interval); @@ -4724,6 +4791,29 @@ dlb2_parse_params(const char *params, return ret; } + ret = rte_kvargs_process(kvlist, + DLB2_PORT_COS, + set_port_cos, + &dlb2_args->port_cos); + if (ret != 0) { + DLB2_LOG_ERR("%s: Error parsing port cos", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, + DLB2_COS_BW, + set_cos_bw, + &dlb2_args->cos_bw); + if (ret != 0) { + DLB2_LOG_ERR("%s: Error parsing cos_bw", + name); + rte_kvargs_free(kvlist); + return ret; + } + + rte_kvargs_free(kvlist); } } diff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c index b77faa967c..100db434d0 100644 --- a/drivers/event/dlb2/dlb2_iface.c +++ b/drivers/event/dlb2/dlb2_iface.c @@ -76,3 +76,6 @@ int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, struct dlb2_enable_cq_weight_args *args); + +int (*dlb2_iface_set_cos_bw)(struct dlb2_hw_dev *handle, + struct dlb2_set_cos_bw_args *args); diff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h index fef717392f..dc0c446ce8 100644 --- a/drivers/event/dlb2/dlb2_iface.h +++ b/drivers/event/dlb2/dlb2_iface.h @@ -76,4 +76,7 @@ extern int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, extern int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, struct dlb2_enable_cq_weight_args *args); +extern int (*dlb2_iface_set_cos_bw)(struct dlb2_hw_dev *handle, + struct dlb2_set_cos_bw_args *args); + #endif /* _DLB2_IFACE_H_ */ diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h index 63b092fc47..528e2ede61 100644 --- a/drivers/event/dlb2/dlb2_priv.h +++ b/drivers/event/dlb2/dlb2_priv.h @@ -45,6 +45,8 @@ #define DLB2_VECTOR_OPTS_ENAB_ARG "vector_opts_enable" #define DLB2_MAX_CQ_DEPTH "max_cq_depth" #define DLB2_CQ_WEIGHT "cq_weight" +#define DLB2_PORT_COS "port_cos" +#define DLB2_COS_BW "cos_bw" /* Begin HW related defines and structs */ @@ -416,7 +418,8 @@ enum dlb2_cos { DLB2_COS_0 = 0, DLB2_COS_1, DLB2_COS_2, - DLB2_COS_3 + DLB2_COS_3, + DLB2_COS_NUM_VALS }; struct dlb2_hw_dev { @@ -424,7 +427,6 @@ struct dlb2_hw_dev { struct dlb2_hw_resource_info info; void *pf_dev; /* opaque pointer to PF PMD dev (struct dlb2_dev) */ uint32_t domain_id; - enum dlb2_cos cos_id; rte_spinlock_t resource_lock; /* for MP support */ } __rte_cache_aligned; @@ -529,6 +531,7 @@ struct dlb2_eventdev_port { bool enq_configured; uint8_t implicit_release; /* release events before dequeuing */ uint32_t cq_weight; /* DLB2.5 and above ldb ports only */ + int cos_id; /*ldb port class of service */ } __rte_cache_aligned; struct dlb2_queue { @@ -623,6 +626,8 @@ struct dlb2_eventdev { uint32_t credit_pool __rte_cache_aligned; }; }; + uint32_t cos_ports[DLB2_COS_NUM_VALS]; /* total ldb ports in each class */ + uint32_t cos_bw[DLB2_COS_NUM_VALS]; /* bandwidth per cos domain */ }; /* used for collecting and passing around the dev args */ @@ -634,6 +639,14 @@ struct dlb2_cq_weight { int limit[DLB2_MAX_NUM_LDB_PORTS]; }; +struct dlb2_port_cos { + int cos_id[DLB2_MAX_NUM_LDB_PORTS]; +}; + +struct dlb2_cos_bw { + int val[DLB2_COS_NUM_VALS]; +}; + struct dlb2_devargs { int socket_id; int max_num_events; @@ -648,6 +661,8 @@ struct dlb2_devargs { bool vector_opts_enabled; int max_cq_depth; struct dlb2_cq_weight cq_weight; + struct dlb2_port_cos port_cos; + struct dlb2_cos_bw cos_bw; }; /* End Eventdev related defines and structs */ diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c index e52a896bad..d4471de5a0 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c @@ -6493,3 +6493,69 @@ int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw, return 0; } + +static void dlb2_log_set_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id, u8 bw) +{ + DLB2_HW_DBG(hw, "DLB2 set port CoS bandwidth:\n"); + DLB2_HW_DBG(hw, "\tCoS ID: %u\n", cos_id); + DLB2_HW_DBG(hw, "\tBandwidth: %u\n", bw); +} + +#define DLB2_MAX_BW_PCT 100 + +/** + * dlb2_hw_set_cos_bandwidth() - set a bandwidth allocation percentage for a + * port class-of-service. + * @hw: dlb2_hw handle for a particular device. + * @cos_id: class-of-service ID. + * @bandwidth: class-of-service bandwidth. + * + * Return: + * Returns 0 upon success, < 0 otherwise. + * + * Errors: + * EINVAL - Invalid cos ID, bandwidth is greater than 100, or bandwidth would + * cause the total bandwidth across all classes of service to exceed + * 100%. + */ +int dlb2_hw_set_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id, u8 bandwidth) +{ + unsigned int i; + u32 reg; + u8 total; + + if (cos_id >= DLB2_NUM_COS_DOMAINS) + return -EINVAL; + + if (bandwidth > DLB2_MAX_BW_PCT) + return -EINVAL; + + total = 0; + + for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) + total += (i == cos_id) ? bandwidth : hw->cos_reservation[i]; + + if (total > DLB2_MAX_BW_PCT) + return -EINVAL; + + reg = DLB2_CSR_RD(hw, DLB2_LSP_CFG_SHDW_RANGE_COS(hw->ver, cos_id)); + + /* + * Normalize the bandwidth to a value in the range 0-255. Integer + * division may leave unreserved scheduling slots; these will be + * divided among the 4 classes of service. + */ + DLB2_BITS_SET(reg, (bandwidth * 256) / 100, DLB2_LSP_CFG_SHDW_RANGE_COS_BW_RANGE); + DLB2_CSR_WR(hw, DLB2_LSP_CFG_SHDW_RANGE_COS(hw->ver, cos_id), reg); + + reg = 0; + DLB2_BIT_SET(reg, DLB2_LSP_CFG_SHDW_CTRL_TRANSFER); + /* Atomically transfer the newly configured service weight */ + DLB2_CSR_WR(hw, DLB2_LSP_CFG_SHDW_CTRL(hw->ver), reg); + + dlb2_log_set_cos_bandwidth(hw, cos_id, bandwidth); + + hw->cos_reservation[cos_id] = bandwidth; + + return 0; +} diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c index 1d0415e46f..0627f06a6e 100644 --- a/drivers/event/dlb2/pf/dlb2_pf.c +++ b/drivers/event/dlb2/pf/dlb2_pf.c @@ -646,6 +646,25 @@ dlb2_pf_enable_cq_weight(struct dlb2_hw_dev *handle, return ret; } +static int +dlb2_pf_set_cos_bandwidth(struct dlb2_hw_dev *handle, + struct dlb2_set_cos_bw_args *args) +{ + struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev; + int ret = 0; + + DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__); + + ret = dlb2_hw_set_cos_bandwidth(&dlb2_dev->hw, + args->cos_id, + args->bandwidth); + + DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n", + __func__, ret); + + return ret; +} + static void dlb2_pf_iface_fn_ptrs_init(void) { @@ -671,6 +690,7 @@ dlb2_pf_iface_fn_ptrs_init(void) dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation; dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy; dlb2_iface_enable_cq_weight = dlb2_pf_enable_cq_weight; + dlb2_iface_set_cos_bw = dlb2_pf_set_cos_bandwidth; } /* PCI DEV HOOKS */ @@ -684,7 +704,6 @@ dlb2_eventdev_pci_init(struct rte_eventdev *eventdev) .max_num_events = DLB2_MAX_NUM_LDB_CREDITS, .num_dir_credits_override = -1, .qid_depth_thresholds = { {0} }, - .cos_id = DLB2_COS_DEFAULT, .poll_interval = DLB2_POLL_INTERVAL_DEFAULT, .sw_credit_quanta = DLB2_SW_CREDIT_QUANTA_DEFAULT, .hw_credit_quanta = DLB2_SW_CREDIT_BATCH_SZ, -- 2.25.1 ^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v2 0/2] Rebase DLB2 port_cos and cq_weight patches 2022-06-16 14:23 ` [PATCH 2/2] event/dlb2: add ldb port specific COS support Timothy McDaniel @ 2022-06-16 19:55 ` Timothy McDaniel 2022-06-16 19:55 ` [PATCH v2 1/2] event/dlb2: add CQ weight support Timothy McDaniel 2022-06-16 19:55 ` [PATCH v2 2/2] event/dlb2: add ldb port specific COS support Timothy McDaniel 2022-06-16 20:36 ` [PATCH v3 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel 2022-06-16 22:21 ` [PATCH v4 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel 2 siblings, 2 replies; 13+ messages in thread From: Timothy McDaniel @ 2022-06-16 19:55 UTC (permalink / raw) To: jerinj; +Cc: dev This patch simply rebases the following previously submitted patches against the latest dpdk-next-eventdev (for-main) branch. Rebase DLB2 port_cos and cq_weight patches This patch simply rebases the following previously submitted patches against the latest dpdk-next-eventdev (for-main) branch. Changes since V1: Fixed a bug in the port-specific cos patch where we were accessing beyond the end of the cos_bw array. Timothy McDaniel (2): event/dlb2: add CQ weight support event/dlb2: add ldb port specific COS support drivers/event/dlb2/dlb2.c | 323 ++++++++++++++++----- drivers/event/dlb2/dlb2_avx512.c | 8 + drivers/event/dlb2/dlb2_iface.c | 7 + drivers/event/dlb2/dlb2_iface.h | 8 + drivers/event/dlb2/dlb2_priv.h | 29 +- drivers/event/dlb2/dlb2_sse.c | 8 + drivers/event/dlb2/dlb2_user.h | 29 ++ drivers/event/dlb2/pf/base/dlb2_resource.c | 286 ++++++++++++++++++ drivers/event/dlb2/pf/base/dlb2_resource.h | 33 +++ drivers/event/dlb2/pf/dlb2_pf.c | 90 +++++- 10 files changed, 745 insertions(+), 76 deletions(-) -- 2.25.1 ^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v2 1/2] event/dlb2: add CQ weight support 2022-06-16 19:55 ` [PATCH v2 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel @ 2022-06-16 19:55 ` Timothy McDaniel 2022-06-16 19:55 ` [PATCH v2 2/2] event/dlb2: add ldb port specific COS support Timothy McDaniel 1 sibling, 0 replies; 13+ messages in thread From: Timothy McDaniel @ 2022-06-16 19:55 UTC (permalink / raw) To: jerinj; +Cc: dev Enabling the weight limit on a CQ allows the enqueued QEs' 2-bit weight value (representing weights of 1, 2, 4, and 8) to factor into whether a CQ is full. If the sum of the weights of the QEs in the CQ meet or exceed its weight limit, DLB will stop scheduling QEs to it (until software pops enough QEs from the CQ to reverse that). CQ weight support is enabled via the command line, and applies to DLB 2.5 (and above) load balanced ports. The DLB2 documentation will be updated with further details. Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com> --- V3: Rebased against dpdk-next-eventdev V2: Added patch dependency line in commit message --- drivers/event/dlb2/dlb2.c | 99 +++++++++- drivers/event/dlb2/dlb2_avx512.c | 8 + drivers/event/dlb2/dlb2_iface.c | 4 + drivers/event/dlb2/dlb2_iface.h | 5 + drivers/event/dlb2/dlb2_priv.h | 10 +- drivers/event/dlb2/dlb2_sse.c | 8 + drivers/event/dlb2/dlb2_user.h | 29 +++ drivers/event/dlb2/pf/base/dlb2_resource.c | 220 +++++++++++++++++++++ drivers/event/dlb2/pf/base/dlb2_resource.h | 33 ++++ drivers/event/dlb2/pf/dlb2_pf.c | 69 +++++++ 10 files changed, 479 insertions(+), 6 deletions(-) diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index 5b0b33bc77..e1687e3d63 100644 --- a/drivers/event/dlb2/dlb2.c +++ b/drivers/event/dlb2/dlb2.c @@ -107,6 +107,63 @@ dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2, } } +/* override defaults with value(s) provided on command line */ +static void +dlb2_init_cq_weight(struct dlb2_eventdev *dlb2, int *cq_weight) +{ + int q; + + for (q = 0; q < DLB2_MAX_NUM_PORTS_ALL; q++) + dlb2->ev_ports[q].cq_weight = cq_weight[q]; +} + +static int +set_cq_weight(const char *key __rte_unused, + const char *value, + void *opaque) +{ + struct dlb2_cq_weight *cq_weight = opaque; + int first, last, weight, i; + + if (value == NULL || opaque == NULL) { + DLB2_LOG_ERR("NULL pointer\n"); + return -EINVAL; + } + + /* command line override may take one of the following 3 forms: + * qid_depth_thresh=all:<threshold_value> ... all queues + * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues + * qid_depth_thresh=qid:<threshold_value> ... just one queue + */ + if (sscanf(value, "all:%d", &weight) == 1) { + first = 0; + last = DLB2_MAX_NUM_LDB_PORTS - 1; + } else if (sscanf(value, "%d-%d:%d", &first, &last, &weight) == 3) { + /* we have everything we need */ + } else if (sscanf(value, "%d:%d", &first, &weight) == 2) { + last = first; + } else { + DLB2_LOG_ERR("Error parsing ldb port qe weight devarg. Should be all:val, qid-qid:val, or qid:val\n"); + return -EINVAL; + } + + if (first > last || first < 0 || + last >= DLB2_MAX_NUM_LDB_PORTS) { + DLB2_LOG_ERR("Error parsing ldb port qe weight arg, invalid port value\n"); + return -EINVAL; + } + + if (weight < 0 || weight > DLB2_MAX_CQ_DEPTH_OVERRIDE) { + DLB2_LOG_ERR("Error parsing ldb port qe weight devarg, must be < cq depth\n"); + return -EINVAL; + } + + for (i = first; i <= last; i++) + cq_weight->limit[i] = weight; /* indexed by qid */ + + return 0; +} + static int dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) { @@ -1372,13 +1429,14 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, return -EINVAL; if (dequeue_depth < DLB2_MIN_CQ_DEPTH) { - DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n", + DLB2_LOG_ERR("dlb2: invalid cq depth, must be at least %d\n", DLB2_MIN_CQ_DEPTH); return -EINVAL; } - if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) { - DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n", + if (dlb2->version == DLB2_HW_V2 && ev_port->cq_weight != 0 && + ev_port->cq_weight > dequeue_depth) { + DLB2_LOG_ERR("dlb2: invalid cq depth, must be >= cq weight%d\n", DLB2_MIN_ENQUEUE_DEPTH); return -EINVAL; } @@ -1450,8 +1508,24 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, if (dlb2->version == DLB2_HW_V2) { qm_port->cached_ldb_credits = 0; qm_port->cached_dir_credits = 0; - } else + if (ev_port->cq_weight) { + struct dlb2_enable_cq_weight_args cq_weight_args = {0}; + + cq_weight_args.port_id = qm_port->id; + cq_weight_args.limit = ev_port->cq_weight; + ret = dlb2_iface_enable_cq_weight(handle, &cq_weight_args); + if (ret < 0) { + DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n", + ret, + dlb2_error_strings[cfg.response. status]); + goto error_exit; + } + } + qm_port->cq_weight = ev_port->cq_weight; + } else { qm_port->cached_credits = 0; + qm_port->cq_weight = 0; + } /* CQs with depth < 8 use an 8-entry queue, but withhold credits so * the effective depth is smaller. @@ -4435,6 +4509,9 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2_init_queue_depth_thresholds(dlb2, dlb2_args->qid_depth_thresholds.val); + dlb2_init_cq_weight(dlb2, + dlb2_args->cq_weight.limit); + return 0; } @@ -4489,6 +4566,7 @@ dlb2_parse_params(const char *params, DLB2_DEPTH_THRESH_ARG, DLB2_VECTOR_OPTS_ENAB_ARG, DLB2_MAX_CQ_DEPTH, + DLB2_CQ_WEIGHT, NULL }; if (params != NULL && params[0] != '\0') { @@ -4629,7 +4707,18 @@ dlb2_parse_params(const char *params, set_max_cq_depth, &dlb2_args->max_cq_depth); if (ret != 0) { - DLB2_LOG_ERR("%s: Error parsing vector opts enabled", + DLB2_LOG_ERR("%s: Error parsing max cq depth", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, + DLB2_CQ_WEIGHT, + set_cq_weight, + &dlb2_args->cq_weight); + if (ret != 0) { + DLB2_LOG_ERR("%s: Error parsing cq weight on", name); rte_kvargs_free(kvlist); return ret; diff --git a/drivers/event/dlb2/dlb2_avx512.c b/drivers/event/dlb2/dlb2_avx512.c index d4aaa04a01..3c8906af9d 100644 --- a/drivers/event/dlb2/dlb2_avx512.c +++ b/drivers/event/dlb2/dlb2_avx512.c @@ -237,6 +237,14 @@ dlb2_event_build_hcws(struct dlb2_port *qm_port, qe[3].data = ev[3].u64; } + /* will only be set for DLB 2.5 + */ + if (qm_port->cq_weight) { + qe[0].weight = ev[0].impl_opaque & 3; + qe[1].weight = ev[1].impl_opaque & 3; + qe[2].weight = ev[2].impl_opaque & 3; + qe[3].weight = ev[3].impl_opaque & 3; + } + break; case 3: case 2: diff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c index 5471dd8da7..b77faa967c 100644 --- a/drivers/event/dlb2/dlb2_iface.c +++ b/drivers/event/dlb2/dlb2_iface.c @@ -72,3 +72,7 @@ int (*dlb2_iface_get_ldb_queue_depth)(struct dlb2_hw_dev *handle, int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, struct dlb2_get_dir_queue_depth_args *args); + + +int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, + struct dlb2_enable_cq_weight_args *args); diff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h index b508eb0936..fef717392f 100644 --- a/drivers/event/dlb2/dlb2_iface.h +++ b/drivers/event/dlb2/dlb2_iface.h @@ -71,4 +71,9 @@ extern int (*dlb2_iface_get_ldb_queue_depth)(struct dlb2_hw_dev *handle, extern int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, struct dlb2_get_dir_queue_depth_args *args); + + +extern int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, + struct dlb2_enable_cq_weight_args *args); + #endif /* _DLB2_IFACE_H_ */ diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h index df69d57b83..63b092fc47 100644 --- a/drivers/event/dlb2/dlb2_priv.h +++ b/drivers/event/dlb2/dlb2_priv.h @@ -44,6 +44,7 @@ #define DLB2_DEPTH_THRESH_ARG "default_depth_thresh" #define DLB2_VECTOR_OPTS_ENAB_ARG "vector_opts_enable" #define DLB2_MAX_CQ_DEPTH "max_cq_depth" +#define DLB2_CQ_WEIGHT "cq_weight" /* Begin HW related defines and structs */ @@ -249,7 +250,7 @@ struct dlb2_enqueue_qe { /* Word 4 */ uint16_t lock_id; uint8_t meas_lat:1; - uint8_t rsvd1:2; + uint8_t weight:2; /* DLB 2.5 and above */ uint8_t no_dec:1; uint8_t cmp_id:4; union { @@ -378,6 +379,7 @@ struct dlb2_port { bool use_scalar; /* force usage of scalar code */ uint16_t hw_credit_quanta; bool use_avx512; + uint32_t cq_weight; }; /* Per-process per-port mmio and memory pointers */ @@ -526,6 +528,7 @@ struct dlb2_eventdev_port { /* enq_configured is set when the qm port is created */ bool enq_configured; uint8_t implicit_release; /* release events before dequeuing */ + uint32_t cq_weight; /* DLB2.5 and above ldb ports only */ } __rte_cache_aligned; struct dlb2_queue { @@ -627,6 +630,10 @@ struct dlb2_qid_depth_thresholds { int val[DLB2_MAX_NUM_QUEUES_ALL]; }; +struct dlb2_cq_weight { + int limit[DLB2_MAX_NUM_LDB_PORTS]; +}; + struct dlb2_devargs { int socket_id; int max_num_events; @@ -640,6 +647,7 @@ struct dlb2_devargs { int default_depth_thresh; bool vector_opts_enabled; int max_cq_depth; + struct dlb2_cq_weight cq_weight; }; /* End Eventdev related defines and structs */ diff --git a/drivers/event/dlb2/dlb2_sse.c b/drivers/event/dlb2/dlb2_sse.c index 8fc12d47f7..248d7519d5 100644 --- a/drivers/event/dlb2/dlb2_sse.c +++ b/drivers/event/dlb2/dlb2_sse.c @@ -189,6 +189,14 @@ dlb2_event_build_hcws(struct dlb2_port *qm_port, qe[2].data = ev[2].u64; qe[3].data = ev[3].u64; + /* will only be set for DLB 2.5 + */ + if (qm_port->cq_weight) { + qe[0].weight = ev[0].impl_opaque & 3; + qe[1].weight = ev[1].impl_opaque & 3; + qe[2].weight = ev[2].impl_opaque & 3; + qe[3].weight = ev[3].impl_opaque & 3; + } + break; case 3: case 2: diff --git a/drivers/event/dlb2/dlb2_user.h b/drivers/event/dlb2/dlb2_user.h index 9760e9bda6..901e2e0c66 100644 --- a/drivers/event/dlb2/dlb2_user.h +++ b/drivers/event/dlb2/dlb2_user.h @@ -47,6 +47,8 @@ enum dlb2_error { DLB2_ST_NO_MEMORY, DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL, DLB2_ST_INVALID_COS_ID, + DLB2_ST_INVALID_CQ_WEIGHT_LIMIT, + DLB2_ST_FEATURE_UNAVAILABLE, }; static const char dlb2_error_strings[][128] = { @@ -87,6 +89,8 @@ static const char dlb2_error_strings[][128] = { "DLB2_ST_NO_MEMORY", "DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL", "DLB2_ST_INVALID_COS_ID", + "DLB2_ST_INVALID_CQ_WEIGHT_LIMIT", + "DLB2_ST_FEATURE_UNAVAILABLE", }; struct dlb2_cmd_response { @@ -687,6 +691,31 @@ struct dlb2_pending_port_unmaps_args { __u32 padding0; }; +/* + * DLB2_DOMAIN_CMD_ENABLE_CQ_WEIGHT: Enable QE-weight based scheduling on a + * load-balanced port's CQ and configures the CQ's weight limit. + * + * This must be called after creating the port but before starting the + * domain. The QE weight limit must be non-zero and cannot exceed the + * CQ's depth. + * + * Input parameters: + * - port_id: Load-balanced port ID. + * - limit: QE weight limit. + * + * Output parameters: + * - response.status: Detailed error code. In certain cases, such as if the + * ioctl request arg is invalid, the driver won't set status. + * - response.id: number of unmaps in progress. + */ +struct dlb2_enable_cq_weight_args { + /* Output parameters */ + struct dlb2_cmd_response response; + /* Input parameters */ + __u32 port_id; + __u32 limit; +}; + /* * Mapping sizes for memory mapping the consumer queue (CQ) memory space, and * producer port (PP) MMIO space. diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c index 4011c24aef..e52a896bad 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c @@ -6273,3 +6273,223 @@ int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw, return 0; } +/** + * dlb2_hw_set_qe_arbiter_weights() - program QE arbiter weights + * @hw: dlb2_hw handle for a particular device. + * @weight: 8-entry array of arbiter weights. + * + * weight[N] programs priority N's weight. In cases where the 8 priorities are + * reduced to 4 bins, the mapping is: + * - weight[1] programs bin 0 + * - weight[3] programs bin 1 + * - weight[5] programs bin 2 + * - weight[7] programs bin 3 + */ +void dlb2_hw_set_qe_arbiter_weights(struct dlb2_hw *hw, u8 weight[8]) +{ + u32 reg = 0; + + DLB2_BITS_SET(reg, weight[1], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN0); + DLB2_BITS_SET(reg, weight[3], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN1); + DLB2_BITS_SET(reg, weight[5], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN2); + DLB2_BITS_SET(reg, weight[7], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN3); + DLB2_CSR_WR(hw, DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN, reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI3); + DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0(hw->ver), reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI3); + DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0(hw->ver), reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI3); + DLB2_CSR_WR(hw, DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0, reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI3); + DLB2_CSR_WR(hw, DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0, reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI3); + DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0(hw->ver), reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN0); + DLB2_BITS_SET(reg, weight[3], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN1); + DLB2_BITS_SET(reg, weight[5], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN2); + DLB2_BITS_SET(reg, weight[7], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN3); + DLB2_CSR_WR(hw, DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN, reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI3); + DLB2_CSR_WR(hw, DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0, reg); +} + +/** + * dlb2_hw_set_qid_arbiter_weights() - program QID arbiter weights + * @hw: dlb2_hw handle for a particular device. + * @weight: 8-entry array of arbiter weights. + * + * weight[N] programs priority N's weight. In cases where the 8 priorities are + * reduced to 4 bins, the mapping is: + * - weight[1] programs bin 0 + * - weight[3] programs bin 1 + * - weight[5] programs bin 2 + * - weight[7] programs bin 3 + */ +void dlb2_hw_set_qid_arbiter_weights(struct dlb2_hw *hw, u8 weight[8]) +{ + u32 reg = 0; + + DLB2_BITS_SET(reg, weight[1], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI0_WEIGHT); + DLB2_BITS_SET(reg, weight[3], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI1_WEIGHT); + DLB2_BITS_SET(reg, weight[5], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI2_WEIGHT); + DLB2_BITS_SET(reg, weight[7], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI3_WEIGHT); + DLB2_CSR_WR(hw, DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0(hw->ver), reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI0_WEIGHT); + DLB2_BITS_SET(reg, weight[3], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI1_WEIGHT); + DLB2_BITS_SET(reg, weight[5], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI2_WEIGHT); + DLB2_BITS_SET(reg, weight[7], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI3_WEIGHT); + DLB2_CSR_WR(hw, DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0(hw->ver), reg); +} + +static void dlb2_log_enable_cq_weight(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_enable_cq_weight_args *args, + bool vdev_req, + unsigned int vdev_id) +{ + DLB2_HW_DBG(hw, "DLB2 enable CQ weight arguments:\n"); + if (vdev_req) + DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id); + DLB2_HW_DBG(hw, "\tDomain ID: %d\n", + domain_id); + DLB2_HW_DBG(hw, "\tPort ID: %d\n", + args->port_id); + DLB2_HW_DBG(hw, "\tLimit: %d\n", + args->limit); +} + +static int +dlb2_verify_enable_cq_weight_args(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_enable_cq_weight_args *args, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id) +{ + struct dlb2_hw_domain *domain; + struct dlb2_ldb_port *port; + + if (hw->ver == DLB2_HW_V2) { + resp->status = DLB2_ST_FEATURE_UNAVAILABLE; + return -EINVAL; + } + + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); + + if (!domain) { + resp->status = DLB2_ST_INVALID_DOMAIN_ID; + return -EINVAL; + } + + if (!domain->configured) { + resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED; + return -EINVAL; + } + + if (domain->started) { + resp->status = DLB2_ST_DOMAIN_STARTED; + return -EINVAL; + } + + port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain); + if (!port || !port->configured) { + resp->status = DLB2_ST_INVALID_PORT_ID; + return -EINVAL; + } + + if (args->limit == 0 || args->limit > port->cq_depth) { + resp->status = DLB2_ST_INVALID_CQ_WEIGHT_LIMIT; + return -EINVAL; + } + + return 0; +} + +int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_enable_cq_weight_args *args, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id) +{ + struct dlb2_hw_domain *domain; + struct dlb2_ldb_port *port; + int ret, id; + u32 reg = 0; + + dlb2_log_enable_cq_weight(hw, domain_id, args, vdev_req, vdev_id); + + /* + * Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + ret = dlb2_verify_enable_cq_weight_args(hw, + domain_id, + args, + resp, + vdev_req, + vdev_id); + if (ret) + return ret; + + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); + if (!domain) { + DLB2_HW_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + id = args->port_id; + + port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain); + if (!port) { + DLB2_HW_ERR(hw, + "[%s(): %d] Internal error: port not found\n", + __func__, __LINE__); + return -EFAULT; + } + + DLB2_BIT_SET(reg, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_V); + DLB2_BITS_SET(reg, args->limit, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_LIMIT); + + DLB2_CSR_WR(hw, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id), reg); + + resp->status = 0; + + return 0; +} diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.h b/drivers/event/dlb2/pf/base/dlb2_resource.h index 684049cd6e..a7e6c90888 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.h +++ b/drivers/event/dlb2/pf/base/dlb2_resource.h @@ -1910,4 +1910,37 @@ void dlb2_hw_dir_cq_interrupt_set_mode(struct dlb2_hw *hw, int port_id, int mode); +/** + * dlb2_hw_enable_cq_weight() - Enable QE-weight based scheduling on an LDB port. + * @hw: dlb2_hw handle for a particular device. + * @domain_id: domain ID. + * @args: CQ weight enablement arguments. + * @resp: response structure. + * @vdev_request: indicates whether this request came from a vdev. + * @vdev_id: If vdev_request is true, this contains the vdev's ID. + * + * This function enables QE-weight based scheduling on a load-balanced port's + * CQ and configures the CQ's weight limit. + * + * This must be called after creating the port but before starting the + * domain. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum dlb2_error. If successful, resp->id + * contains the queue ID. + * + * Errors: + * EINVAL - The domain or port is not configured, the domainhas already been + * started, the requested limit exceeds the port's CQ depth, or this + * feature is unavailable on the device. + * EFAULT - Internal error (resp->status not set). + */ +int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_enable_cq_weight_args *args, + struct dlb2_cmd_response *resp, + bool vdev_request, + unsigned int vdev_id); + #endif /* __DLB2_RESOURCE_H */ diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c index 5c80c724f1..1d0415e46f 100644 --- a/drivers/event/dlb2/pf/dlb2_pf.c +++ b/drivers/event/dlb2/pf/dlb2_pf.c @@ -41,6 +41,8 @@ #include "base/dlb2_resource.h" static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD); +static unsigned int dlb2_qe_sa_pct = 1; +static unsigned int dlb2_qid_sa_pct; static void dlb2_pf_low_level_io_init(void) @@ -80,6 +82,27 @@ dlb2_pf_get_device_version(struct dlb2_hw_dev *handle, return 0; } +static void dlb2_pf_calc_arbiter_weights(u8 *weight, + unsigned int pct) +{ + int val, i; + + /* Largest possible weight (100% SA case): 32 */ + val = (DLB2_MAX_WEIGHT + 1) / DLB2_NUM_ARB_WEIGHTS; + + /* Scale val according to the starvation avoidance percentage */ + val = (val * pct) / 100; + if (val == 0 && pct != 0) + val = 1; + + /* Prio 7 always has weight 0xff */ + weight[DLB2_NUM_ARB_WEIGHTS - 1] = DLB2_MAX_WEIGHT; + + for (i = DLB2_NUM_ARB_WEIGHTS - 2; i >= 0; i--) + weight[i] = weight[i + 1] - val; +} + + static void dlb2_pf_hardware_init(struct dlb2_hw_dev *handle) { @@ -87,6 +110,27 @@ dlb2_pf_hardware_init(struct dlb2_hw_dev *handle) dlb2_hw_enable_sparse_ldb_cq_mode(&dlb2_dev->hw); dlb2_hw_enable_sparse_dir_cq_mode(&dlb2_dev->hw); + + /* Configure arbitration weights for QE selection */ + if (dlb2_qe_sa_pct <= 100) { + u8 weight[DLB2_NUM_ARB_WEIGHTS]; + + dlb2_pf_calc_arbiter_weights(weight, + dlb2_qe_sa_pct); + + dlb2_hw_set_qe_arbiter_weights(&dlb2_dev->hw, weight); + } + + /* Configure arbitration weights for QID selection */ + if (dlb2_qid_sa_pct <= 100) { + u8 weight[DLB2_NUM_ARB_WEIGHTS]; + + dlb2_pf_calc_arbiter_weights(weight, + dlb2_qid_sa_pct); + + dlb2_hw_set_qid_arbiter_weights(&dlb2_dev->hw, weight); + } + } static int @@ -578,6 +622,30 @@ dlb2_pf_get_dir_queue_depth(struct dlb2_hw_dev *handle, return ret; } +static int +dlb2_pf_enable_cq_weight(struct dlb2_hw_dev *handle, + struct dlb2_enable_cq_weight_args *args) +{ + struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev; + struct dlb2_cmd_response response = {0}; + int ret = 0; + + DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__); + + ret = dlb2_hw_enable_cq_weight(&dlb2_dev->hw, + handle->domain_id, + args, + &response, + false, + 0); + args->response = response; + + DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n", + __func__, ret); + + return ret; +} + static void dlb2_pf_iface_fn_ptrs_init(void) { @@ -602,6 +670,7 @@ dlb2_pf_iface_fn_ptrs_init(void) dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation; dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation; dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy; + dlb2_iface_enable_cq_weight = dlb2_pf_enable_cq_weight; } /* PCI DEV HOOKS */ -- 2.25.1 ^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v2 2/2] event/dlb2: add ldb port specific COS support 2022-06-16 19:55 ` [PATCH v2 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel 2022-06-16 19:55 ` [PATCH v2 1/2] event/dlb2: add CQ weight support Timothy McDaniel @ 2022-06-16 19:55 ` Timothy McDaniel 1 sibling, 0 replies; 13+ messages in thread From: Timothy McDaniel @ 2022-06-16 19:55 UTC (permalink / raw) To: jerinj; +Cc: dev DLB supports 4 class of service domains, to aid in managing the device bandwidth across ldb ports. This commit allows specifying which ldb ports will participate in the COS scheme, which class they are a part of, and the specific bandwidth percentage associated with each class. The cumulative bandwidth associated with the 4 classes must not exceed 100%. This feature is enabled on the command line, and will be documented in the DLB2 programmers guide. Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com> --- V3: fixed a typo/bug that caused us to read beyond the end of an array V2: Rebased against dpdk-next-eventdev --- drivers/event/dlb2/dlb2.c | 224 +++++++++++++++------ drivers/event/dlb2/dlb2_iface.c | 3 + drivers/event/dlb2/dlb2_iface.h | 3 + drivers/event/dlb2/dlb2_priv.h | 19 +- drivers/event/dlb2/pf/base/dlb2_resource.c | 66 ++++++ drivers/event/dlb2/pf/dlb2_pf.c | 21 +- 6 files changed, 266 insertions(+), 70 deletions(-) diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index e1687e3d63..f3382b5d2a 100644 --- a/drivers/event/dlb2/dlb2.c +++ b/drivers/event/dlb2/dlb2.c @@ -164,6 +164,28 @@ set_cq_weight(const char *key __rte_unused, return 0; } +/* override defaults with value(s) provided on command line */ +static void +dlb2_init_port_cos(struct dlb2_eventdev *dlb2, int *port_cos) +{ + int q; + + for (q = 0; q < DLB2_MAX_NUM_PORTS_ALL; q++) { + dlb2->ev_ports[q].cos_id = port_cos[q]; + dlb2->cos_ports[port_cos[q]]++; + } +} + +static void +dlb2_init_cos_bw(struct dlb2_eventdev *dlb2, + struct dlb2_cos_bw *cos_bw) +{ + int q; + for (q = 0; q < DLB2_COS_NUM_VALS; q++) + dlb2->cos_bw[q] = cos_bw->val[q]; + +} + static int dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) { @@ -379,12 +401,11 @@ set_dev_id(const char *key __rte_unused, } static int -set_cos(const char *key __rte_unused, +set_poll_interval(const char *key __rte_unused, const char *value, void *opaque) { - enum dlb2_cos *cos_id = opaque; - int x = 0; + int *poll_interval = opaque; int ret; if (value == NULL || opaque == NULL) { @@ -392,38 +413,83 @@ set_cos(const char *key __rte_unused, return -EINVAL; } - ret = dlb2_string_to_int(&x, value); + ret = dlb2_string_to_int(poll_interval, value); if (ret < 0) return ret; - if (x != DLB2_COS_DEFAULT && (x < DLB2_COS_0 || x > DLB2_COS_3)) { - DLB2_LOG_ERR( - "COS %d out of range, must be DLB2_COS_DEFAULT or 0-3\n", - x); + return 0; +} + +static int +set_port_cos(const char *key __rte_unused, + const char *value, + void *opaque) +{ + struct dlb2_port_cos *port_cos = opaque; + int first, last, cos_id, i; + + if (value == NULL || opaque == NULL) { + DLB2_LOG_ERR("NULL pointer\n"); return -EINVAL; } - *cos_id = x; + /* command line override may take one of the following 3 forms: + * port_cos=all:<cos_id> ... all ports + * port_cos=port-port:<cos_id> ... a range of ports + * port_cos=port:<cos_id> ... just one port + */ + if (sscanf(value, "all:%d", &cos_id) == 1) { + first = 0; + last = DLB2_MAX_NUM_LDB_PORTS - 1; + } else if (sscanf(value, "%d-%d:%d", &first, &last, &cos_id) == 3) { + /* we have everything we need */ + } else if (sscanf(value, "%d:%d", &first, &cos_id) == 2) { + last = first; + } else { + DLB2_LOG_ERR("Error parsing ldb port port_cos devarg. Should be all:val, port-port:val, or port:val\n"); + return -EINVAL; + } + + if (first > last || first < 0 || + last >= DLB2_MAX_NUM_LDB_PORTS) { + DLB2_LOG_ERR("Error parsing ldb port cos_id arg, invalid port value\n"); + return -EINVAL; + } + + if (cos_id < DLB2_COS_0 || cos_id > DLB2_COS_3) { + DLB2_LOG_ERR("Error parsing ldb port cos_id devarg, must be between 0 and 4\n"); + return -EINVAL; + } + + for (i = first; i <= last; i++) + port_cos->cos_id[i] = cos_id; /* indexed by port */ return 0; } static int -set_poll_interval(const char *key __rte_unused, - const char *value, - void *opaque) +set_cos_bw(const char *key __rte_unused, + const char *value, + void *opaque) { - int *poll_interval = opaque; - int ret; + struct dlb2_cos_bw *cos_bw = opaque; - if (value == NULL || opaque == NULL) { + if (opaque == NULL) { DLB2_LOG_ERR("NULL pointer\n"); return -EINVAL; } - ret = dlb2_string_to_int(poll_interval, value); - if (ret < 0) - return ret; + /* format must be %d,%d,%d,%d */ + + if (sscanf(value, "%d,%d,%d,%d", &cos_bw->val[0], &cos_bw->val[1], + &cos_bw->val[2], &cos_bw->val[3]) != 4) { + DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0,bw1,bw2,bw3 where all values combined are <= 100\n"); + return -EINVAL; + } + if (cos_bw->val[0] + cos_bw->val[1] + cos_bw->val[2] + cos_bw->val[3] > 100) { + DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0,bw1,bw2,bw3 where all values combined are <= 100\n"); + return -EINVAL; + } return 0; } @@ -653,11 +719,13 @@ dlb2_eventdev_info_get(struct rte_eventdev *dev, } static int -dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle, +dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2, + struct dlb2_hw_dev *handle, const struct dlb2_hw_rsrcs *resources_asked, uint8_t device_version) { int ret = 0; + uint32_t cos_ports = 0; struct dlb2_create_sched_domain_args *cfg; if (resources_asked == NULL) { @@ -683,38 +751,22 @@ dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle, /* LDB ports */ - cfg->cos_strict = 0; /* Best effort */ - cfg->num_cos_ldb_ports[0] = 0; - cfg->num_cos_ldb_ports[1] = 0; - cfg->num_cos_ldb_ports[2] = 0; - cfg->num_cos_ldb_ports[3] = 0; - - switch (handle->cos_id) { - case DLB2_COS_0: - cfg->num_ldb_ports = 0; /* no don't care ports */ - cfg->num_cos_ldb_ports[0] = - resources_asked->num_ldb_ports; - break; - case DLB2_COS_1: - cfg->num_ldb_ports = 0; /* no don't care ports */ - cfg->num_cos_ldb_ports[1] = resources_asked->num_ldb_ports; - break; - case DLB2_COS_2: - cfg->num_ldb_ports = 0; /* no don't care ports */ - cfg->num_cos_ldb_ports[2] = resources_asked->num_ldb_ports; - break; - case DLB2_COS_3: - cfg->num_ldb_ports = 0; /* no don't care ports */ - cfg->num_cos_ldb_ports[3] = - resources_asked->num_ldb_ports; - break; - case DLB2_COS_DEFAULT: - /* all ldb ports are don't care ports from a cos perspective */ - cfg->num_ldb_ports = - resources_asked->num_ldb_ports; - break; + /* tally of ports with non default COS */ + cos_ports = dlb2->cos_ports[1] + dlb2->cos_ports[2] + + dlb2->cos_ports[3]; + + if (cos_ports > resources_asked->num_ldb_ports) { + DLB2_LOG_ERR("dlb2: num_ldb_ports < nonzero cos_ports\n"); + ret = EINVAL; + goto error_exit; } + cfg->cos_strict = 0; /* Best effort */ + cfg->num_cos_ldb_ports[0] = resources_asked->num_ldb_ports - cos_ports; + cfg->num_cos_ldb_ports[1] = dlb2->cos_ports[1]; + cfg->num_cos_ldb_ports[2] = dlb2->cos_ports[2]; + cfg->num_cos_ldb_ports[3] = dlb2->cos_ports[3]; + if (device_version == DLB2_HW_V2) cfg->num_ldb_credits = resources_asked->num_ldb_credits; @@ -892,7 +944,8 @@ dlb2_eventdev_configure(const struct rte_eventdev *dev) rsrcs->num_dir_credits = dlb2->num_dir_credits_override; } - if (dlb2_hw_create_sched_domain(handle, rsrcs, dlb2->version) < 0) { + if (dlb2_hw_create_sched_domain(dlb2, handle, rsrcs, + dlb2->version) < 0) { DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n"); return -ENODEV; } @@ -1449,12 +1502,8 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, cfg.cq_history_list_size = DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT; - if (handle->cos_id == DLB2_COS_DEFAULT) - cfg.cos_id = 0; - else - cfg.cos_id = handle->cos_id; - - cfg.cos_strict = 0; + cfg.cos_id = ev_port->cos_id; + cfg.cos_strict = 0;/* best effots */ /* User controls the LDB high watermark via enqueue depth. The DIR high * watermark is equal, unless the directed credit pool is too small. @@ -4450,7 +4499,6 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2->max_num_events_override = dlb2_args->max_num_events; dlb2->num_dir_credits_override = dlb2_args->num_dir_credits_override; - dlb2->qm_instance.cos_id = dlb2_args->cos_id; dlb2->poll_interval = dlb2_args->poll_interval; dlb2->sw_credit_quanta = dlb2_args->sw_credit_quanta; dlb2->hw_credit_quanta = dlb2_args->hw_credit_quanta; @@ -4482,6 +4530,27 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2_iface_hardware_init(&dlb2->qm_instance); + /* configure class of service */ + { + struct dlb2_set_cos_bw_args set_cos_bw_args = {0}; + int id; + int ret = 0; + + for (id = 0; id < DLB2_COS_NUM_VALS; id++) { + set_cos_bw_args.cos_id = id; + set_cos_bw_args.cos_id = dlb2->cos_bw[id]; + ret = dlb2_iface_set_cos_bw(&dlb2->qm_instance, + &set_cos_bw_args); + if (ret != 0) + break; + } + if (ret) { + DLB2_LOG_ERR("dlb2: failed to configure class of service, err=%d\n", + err); + return err; + } + } + err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode); if (err < 0) { DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n", @@ -4512,6 +4581,12 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2_init_cq_weight(dlb2, dlb2_args->cq_weight.limit); + dlb2_init_port_cos(dlb2, + dlb2_args->port_cos.cos_id); + + dlb2_init_cos_bw(dlb2, + &dlb2_args->cos_bw); + return 0; } @@ -4567,6 +4642,8 @@ dlb2_parse_params(const char *params, DLB2_VECTOR_OPTS_ENAB_ARG, DLB2_MAX_CQ_DEPTH, DLB2_CQ_WEIGHT, + DLB2_PORT_COS, + DLB2_COS_BW, NULL }; if (params != NULL && params[0] != '\0') { @@ -4639,16 +4716,6 @@ dlb2_parse_params(const char *params, return ret; } - ret = rte_kvargs_process(kvlist, DLB2_COS_ARG, - set_cos, - &dlb2_args->cos_id); - if (ret != 0) { - DLB2_LOG_ERR("%s: Error parsing cos parameter", - name); - rte_kvargs_free(kvlist); - return ret; - } - ret = rte_kvargs_process(kvlist, DLB2_POLL_INTERVAL_ARG, set_poll_interval, &dlb2_args->poll_interval); @@ -4724,6 +4791,29 @@ dlb2_parse_params(const char *params, return ret; } + ret = rte_kvargs_process(kvlist, + DLB2_PORT_COS, + set_port_cos, + &dlb2_args->port_cos); + if (ret != 0) { + DLB2_LOG_ERR("%s: Error parsing port cos", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, + DLB2_COS_BW, + set_cos_bw, + &dlb2_args->cos_bw); + if (ret != 0) { + DLB2_LOG_ERR("%s: Error parsing cos_bw", + name); + rte_kvargs_free(kvlist); + return ret; + } + + rte_kvargs_free(kvlist); } } diff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c index b77faa967c..100db434d0 100644 --- a/drivers/event/dlb2/dlb2_iface.c +++ b/drivers/event/dlb2/dlb2_iface.c @@ -76,3 +76,6 @@ int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, struct dlb2_enable_cq_weight_args *args); + +int (*dlb2_iface_set_cos_bw)(struct dlb2_hw_dev *handle, + struct dlb2_set_cos_bw_args *args); diff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h index fef717392f..dc0c446ce8 100644 --- a/drivers/event/dlb2/dlb2_iface.h +++ b/drivers/event/dlb2/dlb2_iface.h @@ -76,4 +76,7 @@ extern int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, extern int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, struct dlb2_enable_cq_weight_args *args); +extern int (*dlb2_iface_set_cos_bw)(struct dlb2_hw_dev *handle, + struct dlb2_set_cos_bw_args *args); + #endif /* _DLB2_IFACE_H_ */ diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h index 63b092fc47..528e2ede61 100644 --- a/drivers/event/dlb2/dlb2_priv.h +++ b/drivers/event/dlb2/dlb2_priv.h @@ -45,6 +45,8 @@ #define DLB2_VECTOR_OPTS_ENAB_ARG "vector_opts_enable" #define DLB2_MAX_CQ_DEPTH "max_cq_depth" #define DLB2_CQ_WEIGHT "cq_weight" +#define DLB2_PORT_COS "port_cos" +#define DLB2_COS_BW "cos_bw" /* Begin HW related defines and structs */ @@ -416,7 +418,8 @@ enum dlb2_cos { DLB2_COS_0 = 0, DLB2_COS_1, DLB2_COS_2, - DLB2_COS_3 + DLB2_COS_3, + DLB2_COS_NUM_VALS }; struct dlb2_hw_dev { @@ -424,7 +427,6 @@ struct dlb2_hw_dev { struct dlb2_hw_resource_info info; void *pf_dev; /* opaque pointer to PF PMD dev (struct dlb2_dev) */ uint32_t domain_id; - enum dlb2_cos cos_id; rte_spinlock_t resource_lock; /* for MP support */ } __rte_cache_aligned; @@ -529,6 +531,7 @@ struct dlb2_eventdev_port { bool enq_configured; uint8_t implicit_release; /* release events before dequeuing */ uint32_t cq_weight; /* DLB2.5 and above ldb ports only */ + int cos_id; /*ldb port class of service */ } __rte_cache_aligned; struct dlb2_queue { @@ -623,6 +626,8 @@ struct dlb2_eventdev { uint32_t credit_pool __rte_cache_aligned; }; }; + uint32_t cos_ports[DLB2_COS_NUM_VALS]; /* total ldb ports in each class */ + uint32_t cos_bw[DLB2_COS_NUM_VALS]; /* bandwidth per cos domain */ }; /* used for collecting and passing around the dev args */ @@ -634,6 +639,14 @@ struct dlb2_cq_weight { int limit[DLB2_MAX_NUM_LDB_PORTS]; }; +struct dlb2_port_cos { + int cos_id[DLB2_MAX_NUM_LDB_PORTS]; +}; + +struct dlb2_cos_bw { + int val[DLB2_COS_NUM_VALS]; +}; + struct dlb2_devargs { int socket_id; int max_num_events; @@ -648,6 +661,8 @@ struct dlb2_devargs { bool vector_opts_enabled; int max_cq_depth; struct dlb2_cq_weight cq_weight; + struct dlb2_port_cos port_cos; + struct dlb2_cos_bw cos_bw; }; /* End Eventdev related defines and structs */ diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c index e52a896bad..d4471de5a0 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c @@ -6493,3 +6493,69 @@ int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw, return 0; } + +static void dlb2_log_set_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id, u8 bw) +{ + DLB2_HW_DBG(hw, "DLB2 set port CoS bandwidth:\n"); + DLB2_HW_DBG(hw, "\tCoS ID: %u\n", cos_id); + DLB2_HW_DBG(hw, "\tBandwidth: %u\n", bw); +} + +#define DLB2_MAX_BW_PCT 100 + +/** + * dlb2_hw_set_cos_bandwidth() - set a bandwidth allocation percentage for a + * port class-of-service. + * @hw: dlb2_hw handle for a particular device. + * @cos_id: class-of-service ID. + * @bandwidth: class-of-service bandwidth. + * + * Return: + * Returns 0 upon success, < 0 otherwise. + * + * Errors: + * EINVAL - Invalid cos ID, bandwidth is greater than 100, or bandwidth would + * cause the total bandwidth across all classes of service to exceed + * 100%. + */ +int dlb2_hw_set_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id, u8 bandwidth) +{ + unsigned int i; + u32 reg; + u8 total; + + if (cos_id >= DLB2_NUM_COS_DOMAINS) + return -EINVAL; + + if (bandwidth > DLB2_MAX_BW_PCT) + return -EINVAL; + + total = 0; + + for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) + total += (i == cos_id) ? bandwidth : hw->cos_reservation[i]; + + if (total > DLB2_MAX_BW_PCT) + return -EINVAL; + + reg = DLB2_CSR_RD(hw, DLB2_LSP_CFG_SHDW_RANGE_COS(hw->ver, cos_id)); + + /* + * Normalize the bandwidth to a value in the range 0-255. Integer + * division may leave unreserved scheduling slots; these will be + * divided among the 4 classes of service. + */ + DLB2_BITS_SET(reg, (bandwidth * 256) / 100, DLB2_LSP_CFG_SHDW_RANGE_COS_BW_RANGE); + DLB2_CSR_WR(hw, DLB2_LSP_CFG_SHDW_RANGE_COS(hw->ver, cos_id), reg); + + reg = 0; + DLB2_BIT_SET(reg, DLB2_LSP_CFG_SHDW_CTRL_TRANSFER); + /* Atomically transfer the newly configured service weight */ + DLB2_CSR_WR(hw, DLB2_LSP_CFG_SHDW_CTRL(hw->ver), reg); + + dlb2_log_set_cos_bandwidth(hw, cos_id, bandwidth); + + hw->cos_reservation[cos_id] = bandwidth; + + return 0; +} diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c index 1d0415e46f..0627f06a6e 100644 --- a/drivers/event/dlb2/pf/dlb2_pf.c +++ b/drivers/event/dlb2/pf/dlb2_pf.c @@ -646,6 +646,25 @@ dlb2_pf_enable_cq_weight(struct dlb2_hw_dev *handle, return ret; } +static int +dlb2_pf_set_cos_bandwidth(struct dlb2_hw_dev *handle, + struct dlb2_set_cos_bw_args *args) +{ + struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev; + int ret = 0; + + DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__); + + ret = dlb2_hw_set_cos_bandwidth(&dlb2_dev->hw, + args->cos_id, + args->bandwidth); + + DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n", + __func__, ret); + + return ret; +} + static void dlb2_pf_iface_fn_ptrs_init(void) { @@ -671,6 +690,7 @@ dlb2_pf_iface_fn_ptrs_init(void) dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation; dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy; dlb2_iface_enable_cq_weight = dlb2_pf_enable_cq_weight; + dlb2_iface_set_cos_bw = dlb2_pf_set_cos_bandwidth; } /* PCI DEV HOOKS */ @@ -684,7 +704,6 @@ dlb2_eventdev_pci_init(struct rte_eventdev *eventdev) .max_num_events = DLB2_MAX_NUM_LDB_CREDITS, .num_dir_credits_override = -1, .qid_depth_thresholds = { {0} }, - .cos_id = DLB2_COS_DEFAULT, .poll_interval = DLB2_POLL_INTERVAL_DEFAULT, .sw_credit_quanta = DLB2_SW_CREDIT_QUANTA_DEFAULT, .hw_credit_quanta = DLB2_SW_CREDIT_BATCH_SZ, -- 2.25.1 ^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v3 0/2] Rebase DLB2 port_cos and cq_weight patches 2022-06-16 14:23 ` [PATCH 2/2] event/dlb2: add ldb port specific COS support Timothy McDaniel 2022-06-16 19:55 ` [PATCH v2 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel @ 2022-06-16 20:36 ` Timothy McDaniel 2022-06-16 20:36 ` [PATCH v3 1/2] event/dlb2: add CQ weight support Timothy McDaniel 2022-06-16 20:36 ` [PATCH v3 2/2] event/dlb2: add ldb port specific COS support Timothy McDaniel 2022-06-16 22:21 ` [PATCH v4 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel 2 siblings, 2 replies; 13+ messages in thread From: Timothy McDaniel @ 2022-06-16 20:36 UTC (permalink / raw) To: jerinj; +Cc: dev This patch rebases the following previously submitted patches against the latest dpdk-next-eventdev (for-main) branch. Rebase DLB2 port_cos and cq_weight patches Changes since V2: Fix a misleading if statement (guard error) Changes since V1: Fixed a bug in the port-specific cos patch where we were accessing beyond the end of the cos_bw array. Timothy McDaniel (2): event/dlb2: add CQ weight support event/dlb2: add ldb port specific COS support drivers/event/dlb2/dlb2.c | 323 ++++++++++++++++----- drivers/event/dlb2/dlb2_avx512.c | 8 + drivers/event/dlb2/dlb2_iface.c | 7 + drivers/event/dlb2/dlb2_iface.h | 8 + drivers/event/dlb2/dlb2_priv.h | 29 +- drivers/event/dlb2/dlb2_sse.c | 8 + drivers/event/dlb2/dlb2_user.h | 29 ++ drivers/event/dlb2/pf/base/dlb2_resource.c | 282 ++++++++++++++++++ drivers/event/dlb2/pf/base/dlb2_resource.h | 33 +++ drivers/event/dlb2/pf/dlb2_pf.c | 90 +++++- 10 files changed, 741 insertions(+), 76 deletions(-) -- 2.25.1 ^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v3 1/2] event/dlb2: add CQ weight support 2022-06-16 20:36 ` [PATCH v3 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel @ 2022-06-16 20:36 ` Timothy McDaniel 2022-06-16 20:36 ` [PATCH v3 2/2] event/dlb2: add ldb port specific COS support Timothy McDaniel 1 sibling, 0 replies; 13+ messages in thread From: Timothy McDaniel @ 2022-06-16 20:36 UTC (permalink / raw) To: jerinj; +Cc: dev Enabling the weight limit on a CQ allows the enqueued QEs' 2-bit weight value (representing weights of 1, 2, 4, and 8) to factor into whether a CQ is full. If the sum of the weights of the QEs in the CQ meet or exceed its weight limit, DLB will stop scheduling QEs to it (until software pops enough QEs from the CQ to reverse that). CQ weight support is enabled via the command line, and applies to DLB 2.5 (and above) load balanced ports. The DLB2 documentation will be updated with further details. Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com> --- V4: Fix a ler detected guard error on a debug log message V3: Rebased against dpdk-next-eventdev V2: Added patch dependency line in commit message --- drivers/event/dlb2/dlb2.c | 99 +++++++++- drivers/event/dlb2/dlb2_avx512.c | 8 + drivers/event/dlb2/dlb2_iface.c | 4 + drivers/event/dlb2/dlb2_iface.h | 5 + drivers/event/dlb2/dlb2_priv.h | 10 +- drivers/event/dlb2/dlb2_sse.c | 8 + drivers/event/dlb2/dlb2_user.h | 29 +++ drivers/event/dlb2/pf/base/dlb2_resource.c | 216 +++++++++++++++++++++ drivers/event/dlb2/pf/base/dlb2_resource.h | 33 ++++ drivers/event/dlb2/pf/dlb2_pf.c | 69 +++++++ 10 files changed, 475 insertions(+), 6 deletions(-) diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index 5b0b33bc77..e1687e3d63 100644 --- a/drivers/event/dlb2/dlb2.c +++ b/drivers/event/dlb2/dlb2.c @@ -107,6 +107,63 @@ dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2, } } +/* override defaults with value(s) provided on command line */ +static void +dlb2_init_cq_weight(struct dlb2_eventdev *dlb2, int *cq_weight) +{ + int q; + + for (q = 0; q < DLB2_MAX_NUM_PORTS_ALL; q++) + dlb2->ev_ports[q].cq_weight = cq_weight[q]; +} + +static int +set_cq_weight(const char *key __rte_unused, + const char *value, + void *opaque) +{ + struct dlb2_cq_weight *cq_weight = opaque; + int first, last, weight, i; + + if (value == NULL || opaque == NULL) { + DLB2_LOG_ERR("NULL pointer\n"); + return -EINVAL; + } + + /* command line override may take one of the following 3 forms: + * qid_depth_thresh=all:<threshold_value> ... all queues + * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues + * qid_depth_thresh=qid:<threshold_value> ... just one queue + */ + if (sscanf(value, "all:%d", &weight) == 1) { + first = 0; + last = DLB2_MAX_NUM_LDB_PORTS - 1; + } else if (sscanf(value, "%d-%d:%d", &first, &last, &weight) == 3) { + /* we have everything we need */ + } else if (sscanf(value, "%d:%d", &first, &weight) == 2) { + last = first; + } else { + DLB2_LOG_ERR("Error parsing ldb port qe weight devarg. Should be all:val, qid-qid:val, or qid:val\n"); + return -EINVAL; + } + + if (first > last || first < 0 || + last >= DLB2_MAX_NUM_LDB_PORTS) { + DLB2_LOG_ERR("Error parsing ldb port qe weight arg, invalid port value\n"); + return -EINVAL; + } + + if (weight < 0 || weight > DLB2_MAX_CQ_DEPTH_OVERRIDE) { + DLB2_LOG_ERR("Error parsing ldb port qe weight devarg, must be < cq depth\n"); + return -EINVAL; + } + + for (i = first; i <= last; i++) + cq_weight->limit[i] = weight; /* indexed by qid */ + + return 0; +} + static int dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) { @@ -1372,13 +1429,14 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, return -EINVAL; if (dequeue_depth < DLB2_MIN_CQ_DEPTH) { - DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n", + DLB2_LOG_ERR("dlb2: invalid cq depth, must be at least %d\n", DLB2_MIN_CQ_DEPTH); return -EINVAL; } - if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) { - DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n", + if (dlb2->version == DLB2_HW_V2 && ev_port->cq_weight != 0 && + ev_port->cq_weight > dequeue_depth) { + DLB2_LOG_ERR("dlb2: invalid cq depth, must be >= cq weight%d\n", DLB2_MIN_ENQUEUE_DEPTH); return -EINVAL; } @@ -1450,8 +1508,24 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, if (dlb2->version == DLB2_HW_V2) { qm_port->cached_ldb_credits = 0; qm_port->cached_dir_credits = 0; - } else + if (ev_port->cq_weight) { + struct dlb2_enable_cq_weight_args cq_weight_args = {0}; + + cq_weight_args.port_id = qm_port->id; + cq_weight_args.limit = ev_port->cq_weight; + ret = dlb2_iface_enable_cq_weight(handle, &cq_weight_args); + if (ret < 0) { + DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n", + ret, + dlb2_error_strings[cfg.response. status]); + goto error_exit; + } + } + qm_port->cq_weight = ev_port->cq_weight; + } else { qm_port->cached_credits = 0; + qm_port->cq_weight = 0; + } /* CQs with depth < 8 use an 8-entry queue, but withhold credits so * the effective depth is smaller. @@ -4435,6 +4509,9 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2_init_queue_depth_thresholds(dlb2, dlb2_args->qid_depth_thresholds.val); + dlb2_init_cq_weight(dlb2, + dlb2_args->cq_weight.limit); + return 0; } @@ -4489,6 +4566,7 @@ dlb2_parse_params(const char *params, DLB2_DEPTH_THRESH_ARG, DLB2_VECTOR_OPTS_ENAB_ARG, DLB2_MAX_CQ_DEPTH, + DLB2_CQ_WEIGHT, NULL }; if (params != NULL && params[0] != '\0') { @@ -4629,7 +4707,18 @@ dlb2_parse_params(const char *params, set_max_cq_depth, &dlb2_args->max_cq_depth); if (ret != 0) { - DLB2_LOG_ERR("%s: Error parsing vector opts enabled", + DLB2_LOG_ERR("%s: Error parsing max cq depth", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, + DLB2_CQ_WEIGHT, + set_cq_weight, + &dlb2_args->cq_weight); + if (ret != 0) { + DLB2_LOG_ERR("%s: Error parsing cq weight on", name); rte_kvargs_free(kvlist); return ret; diff --git a/drivers/event/dlb2/dlb2_avx512.c b/drivers/event/dlb2/dlb2_avx512.c index d4aaa04a01..3c8906af9d 100644 --- a/drivers/event/dlb2/dlb2_avx512.c +++ b/drivers/event/dlb2/dlb2_avx512.c @@ -237,6 +237,14 @@ dlb2_event_build_hcws(struct dlb2_port *qm_port, qe[3].data = ev[3].u64; } + /* will only be set for DLB 2.5 + */ + if (qm_port->cq_weight) { + qe[0].weight = ev[0].impl_opaque & 3; + qe[1].weight = ev[1].impl_opaque & 3; + qe[2].weight = ev[2].impl_opaque & 3; + qe[3].weight = ev[3].impl_opaque & 3; + } + break; case 3: case 2: diff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c index 5471dd8da7..b77faa967c 100644 --- a/drivers/event/dlb2/dlb2_iface.c +++ b/drivers/event/dlb2/dlb2_iface.c @@ -72,3 +72,7 @@ int (*dlb2_iface_get_ldb_queue_depth)(struct dlb2_hw_dev *handle, int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, struct dlb2_get_dir_queue_depth_args *args); + + +int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, + struct dlb2_enable_cq_weight_args *args); diff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h index b508eb0936..fef717392f 100644 --- a/drivers/event/dlb2/dlb2_iface.h +++ b/drivers/event/dlb2/dlb2_iface.h @@ -71,4 +71,9 @@ extern int (*dlb2_iface_get_ldb_queue_depth)(struct dlb2_hw_dev *handle, extern int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, struct dlb2_get_dir_queue_depth_args *args); + + +extern int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, + struct dlb2_enable_cq_weight_args *args); + #endif /* _DLB2_IFACE_H_ */ diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h index df69d57b83..63b092fc47 100644 --- a/drivers/event/dlb2/dlb2_priv.h +++ b/drivers/event/dlb2/dlb2_priv.h @@ -44,6 +44,7 @@ #define DLB2_DEPTH_THRESH_ARG "default_depth_thresh" #define DLB2_VECTOR_OPTS_ENAB_ARG "vector_opts_enable" #define DLB2_MAX_CQ_DEPTH "max_cq_depth" +#define DLB2_CQ_WEIGHT "cq_weight" /* Begin HW related defines and structs */ @@ -249,7 +250,7 @@ struct dlb2_enqueue_qe { /* Word 4 */ uint16_t lock_id; uint8_t meas_lat:1; - uint8_t rsvd1:2; + uint8_t weight:2; /* DLB 2.5 and above */ uint8_t no_dec:1; uint8_t cmp_id:4; union { @@ -378,6 +379,7 @@ struct dlb2_port { bool use_scalar; /* force usage of scalar code */ uint16_t hw_credit_quanta; bool use_avx512; + uint32_t cq_weight; }; /* Per-process per-port mmio and memory pointers */ @@ -526,6 +528,7 @@ struct dlb2_eventdev_port { /* enq_configured is set when the qm port is created */ bool enq_configured; uint8_t implicit_release; /* release events before dequeuing */ + uint32_t cq_weight; /* DLB2.5 and above ldb ports only */ } __rte_cache_aligned; struct dlb2_queue { @@ -627,6 +630,10 @@ struct dlb2_qid_depth_thresholds { int val[DLB2_MAX_NUM_QUEUES_ALL]; }; +struct dlb2_cq_weight { + int limit[DLB2_MAX_NUM_LDB_PORTS]; +}; + struct dlb2_devargs { int socket_id; int max_num_events; @@ -640,6 +647,7 @@ struct dlb2_devargs { int default_depth_thresh; bool vector_opts_enabled; int max_cq_depth; + struct dlb2_cq_weight cq_weight; }; /* End Eventdev related defines and structs */ diff --git a/drivers/event/dlb2/dlb2_sse.c b/drivers/event/dlb2/dlb2_sse.c index 8fc12d47f7..248d7519d5 100644 --- a/drivers/event/dlb2/dlb2_sse.c +++ b/drivers/event/dlb2/dlb2_sse.c @@ -189,6 +189,14 @@ dlb2_event_build_hcws(struct dlb2_port *qm_port, qe[2].data = ev[2].u64; qe[3].data = ev[3].u64; + /* will only be set for DLB 2.5 + */ + if (qm_port->cq_weight) { + qe[0].weight = ev[0].impl_opaque & 3; + qe[1].weight = ev[1].impl_opaque & 3; + qe[2].weight = ev[2].impl_opaque & 3; + qe[3].weight = ev[3].impl_opaque & 3; + } + break; case 3: case 2: diff --git a/drivers/event/dlb2/dlb2_user.h b/drivers/event/dlb2/dlb2_user.h index 9760e9bda6..901e2e0c66 100644 --- a/drivers/event/dlb2/dlb2_user.h +++ b/drivers/event/dlb2/dlb2_user.h @@ -47,6 +47,8 @@ enum dlb2_error { DLB2_ST_NO_MEMORY, DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL, DLB2_ST_INVALID_COS_ID, + DLB2_ST_INVALID_CQ_WEIGHT_LIMIT, + DLB2_ST_FEATURE_UNAVAILABLE, }; static const char dlb2_error_strings[][128] = { @@ -87,6 +89,8 @@ static const char dlb2_error_strings[][128] = { "DLB2_ST_NO_MEMORY", "DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL", "DLB2_ST_INVALID_COS_ID", + "DLB2_ST_INVALID_CQ_WEIGHT_LIMIT", + "DLB2_ST_FEATURE_UNAVAILABLE", }; struct dlb2_cmd_response { @@ -687,6 +691,31 @@ struct dlb2_pending_port_unmaps_args { __u32 padding0; }; +/* + * DLB2_DOMAIN_CMD_ENABLE_CQ_WEIGHT: Enable QE-weight based scheduling on a + * load-balanced port's CQ and configures the CQ's weight limit. + * + * This must be called after creating the port but before starting the + * domain. The QE weight limit must be non-zero and cannot exceed the + * CQ's depth. + * + * Input parameters: + * - port_id: Load-balanced port ID. + * - limit: QE weight limit. + * + * Output parameters: + * - response.status: Detailed error code. In certain cases, such as if the + * ioctl request arg is invalid, the driver won't set status. + * - response.id: number of unmaps in progress. + */ +struct dlb2_enable_cq_weight_args { + /* Output parameters */ + struct dlb2_cmd_response response; + /* Input parameters */ + __u32 port_id; + __u32 limit; +}; + /* * Mapping sizes for memory mapping the consumer queue (CQ) memory space, and * producer port (PP) MMIO space. diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c index 4011c24aef..24a8215452 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c @@ -6273,3 +6273,219 @@ int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw, return 0; } +/** + * dlb2_hw_set_qe_arbiter_weights() - program QE arbiter weights + * @hw: dlb2_hw handle for a particular device. + * @weight: 8-entry array of arbiter weights. + * + * weight[N] programs priority N's weight. In cases where the 8 priorities are + * reduced to 4 bins, the mapping is: + * - weight[1] programs bin 0 + * - weight[3] programs bin 1 + * - weight[5] programs bin 2 + * - weight[7] programs bin 3 + */ +void dlb2_hw_set_qe_arbiter_weights(struct dlb2_hw *hw, u8 weight[8]) +{ + u32 reg = 0; + + DLB2_BITS_SET(reg, weight[1], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN0); + DLB2_BITS_SET(reg, weight[3], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN1); + DLB2_BITS_SET(reg, weight[5], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN2); + DLB2_BITS_SET(reg, weight[7], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN3); + DLB2_CSR_WR(hw, DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN, reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI3); + DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0(hw->ver), reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI3); + DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0(hw->ver), reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI3); + DLB2_CSR_WR(hw, DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0, reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI3); + DLB2_CSR_WR(hw, DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0, reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI3); + DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0(hw->ver), reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN0); + DLB2_BITS_SET(reg, weight[3], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN1); + DLB2_BITS_SET(reg, weight[5], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN2); + DLB2_BITS_SET(reg, weight[7], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN3); + DLB2_CSR_WR(hw, DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN, reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI3); + DLB2_CSR_WR(hw, DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0, reg); +} + +/** + * dlb2_hw_set_qid_arbiter_weights() - program QID arbiter weights + * @hw: dlb2_hw handle for a particular device. + * @weight: 8-entry array of arbiter weights. + * + * weight[N] programs priority N's weight. In cases where the 8 priorities are + * reduced to 4 bins, the mapping is: + * - weight[1] programs bin 0 + * - weight[3] programs bin 1 + * - weight[5] programs bin 2 + * - weight[7] programs bin 3 + */ +void dlb2_hw_set_qid_arbiter_weights(struct dlb2_hw *hw, u8 weight[8]) +{ + u32 reg = 0; + + DLB2_BITS_SET(reg, weight[1], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI0_WEIGHT); + DLB2_BITS_SET(reg, weight[3], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI1_WEIGHT); + DLB2_BITS_SET(reg, weight[5], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI2_WEIGHT); + DLB2_BITS_SET(reg, weight[7], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI3_WEIGHT); + DLB2_CSR_WR(hw, DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0(hw->ver), reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI0_WEIGHT); + DLB2_BITS_SET(reg, weight[3], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI1_WEIGHT); + DLB2_BITS_SET(reg, weight[5], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI2_WEIGHT); + DLB2_BITS_SET(reg, weight[7], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI3_WEIGHT); + DLB2_CSR_WR(hw, DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0(hw->ver), reg); +} + +static void dlb2_log_enable_cq_weight(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_enable_cq_weight_args *args, + bool vdev_req, + unsigned int vdev_id) +{ + DLB2_HW_DBG(hw, "DLB2 enable CQ weight arguments:\n"); + DLB2_HW_DBG(hw, "\tvdev_req %d, vdev_id %d\n", vdev_req, vdev_id); + DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id); + DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id); + DLB2_HW_DBG(hw, "\tLimit: %d\n", args->limit); +} + +static int +dlb2_verify_enable_cq_weight_args(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_enable_cq_weight_args *args, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id) +{ + struct dlb2_hw_domain *domain; + struct dlb2_ldb_port *port; + + if (hw->ver == DLB2_HW_V2) { + resp->status = DLB2_ST_FEATURE_UNAVAILABLE; + return -EINVAL; + } + + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); + + if (!domain) { + resp->status = DLB2_ST_INVALID_DOMAIN_ID; + return -EINVAL; + } + + if (!domain->configured) { + resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED; + return -EINVAL; + } + + if (domain->started) { + resp->status = DLB2_ST_DOMAIN_STARTED; + return -EINVAL; + } + + port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain); + if (!port || !port->configured) { + resp->status = DLB2_ST_INVALID_PORT_ID; + return -EINVAL; + } + + if (args->limit == 0 || args->limit > port->cq_depth) { + resp->status = DLB2_ST_INVALID_CQ_WEIGHT_LIMIT; + return -EINVAL; + } + + return 0; +} + +int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_enable_cq_weight_args *args, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id) +{ + struct dlb2_hw_domain *domain; + struct dlb2_ldb_port *port; + int ret, id; + u32 reg = 0; + + dlb2_log_enable_cq_weight(hw, domain_id, args, vdev_req, vdev_id); + + /* + * Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + ret = dlb2_verify_enable_cq_weight_args(hw, + domain_id, + args, + resp, + vdev_req, + vdev_id); + if (ret) + return ret; + + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); + if (!domain) { + DLB2_HW_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + id = args->port_id; + + port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain); + if (!port) { + DLB2_HW_ERR(hw, + "[%s(): %d] Internal error: port not found\n", + __func__, __LINE__); + return -EFAULT; + } + + DLB2_BIT_SET(reg, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_V); + DLB2_BITS_SET(reg, args->limit, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_LIMIT); + + DLB2_CSR_WR(hw, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id), reg); + + resp->status = 0; + + return 0; +} diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.h b/drivers/event/dlb2/pf/base/dlb2_resource.h index 684049cd6e..a7e6c90888 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.h +++ b/drivers/event/dlb2/pf/base/dlb2_resource.h @@ -1910,4 +1910,37 @@ void dlb2_hw_dir_cq_interrupt_set_mode(struct dlb2_hw *hw, int port_id, int mode); +/** + * dlb2_hw_enable_cq_weight() - Enable QE-weight based scheduling on an LDB port. + * @hw: dlb2_hw handle for a particular device. + * @domain_id: domain ID. + * @args: CQ weight enablement arguments. + * @resp: response structure. + * @vdev_request: indicates whether this request came from a vdev. + * @vdev_id: If vdev_request is true, this contains the vdev's ID. + * + * This function enables QE-weight based scheduling on a load-balanced port's + * CQ and configures the CQ's weight limit. + * + * This must be called after creating the port but before starting the + * domain. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum dlb2_error. If successful, resp->id + * contains the queue ID. + * + * Errors: + * EINVAL - The domain or port is not configured, the domainhas already been + * started, the requested limit exceeds the port's CQ depth, or this + * feature is unavailable on the device. + * EFAULT - Internal error (resp->status not set). + */ +int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_enable_cq_weight_args *args, + struct dlb2_cmd_response *resp, + bool vdev_request, + unsigned int vdev_id); + #endif /* __DLB2_RESOURCE_H */ diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c index 5c80c724f1..1d0415e46f 100644 --- a/drivers/event/dlb2/pf/dlb2_pf.c +++ b/drivers/event/dlb2/pf/dlb2_pf.c @@ -41,6 +41,8 @@ #include "base/dlb2_resource.h" static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD); +static unsigned int dlb2_qe_sa_pct = 1; +static unsigned int dlb2_qid_sa_pct; static void dlb2_pf_low_level_io_init(void) @@ -80,6 +82,27 @@ dlb2_pf_get_device_version(struct dlb2_hw_dev *handle, return 0; } +static void dlb2_pf_calc_arbiter_weights(u8 *weight, + unsigned int pct) +{ + int val, i; + + /* Largest possible weight (100% SA case): 32 */ + val = (DLB2_MAX_WEIGHT + 1) / DLB2_NUM_ARB_WEIGHTS; + + /* Scale val according to the starvation avoidance percentage */ + val = (val * pct) / 100; + if (val == 0 && pct != 0) + val = 1; + + /* Prio 7 always has weight 0xff */ + weight[DLB2_NUM_ARB_WEIGHTS - 1] = DLB2_MAX_WEIGHT; + + for (i = DLB2_NUM_ARB_WEIGHTS - 2; i >= 0; i--) + weight[i] = weight[i + 1] - val; +} + + static void dlb2_pf_hardware_init(struct dlb2_hw_dev *handle) { @@ -87,6 +110,27 @@ dlb2_pf_hardware_init(struct dlb2_hw_dev *handle) dlb2_hw_enable_sparse_ldb_cq_mode(&dlb2_dev->hw); dlb2_hw_enable_sparse_dir_cq_mode(&dlb2_dev->hw); + + /* Configure arbitration weights for QE selection */ + if (dlb2_qe_sa_pct <= 100) { + u8 weight[DLB2_NUM_ARB_WEIGHTS]; + + dlb2_pf_calc_arbiter_weights(weight, + dlb2_qe_sa_pct); + + dlb2_hw_set_qe_arbiter_weights(&dlb2_dev->hw, weight); + } + + /* Configure arbitration weights for QID selection */ + if (dlb2_qid_sa_pct <= 100) { + u8 weight[DLB2_NUM_ARB_WEIGHTS]; + + dlb2_pf_calc_arbiter_weights(weight, + dlb2_qid_sa_pct); + + dlb2_hw_set_qid_arbiter_weights(&dlb2_dev->hw, weight); + } + } static int @@ -578,6 +622,30 @@ dlb2_pf_get_dir_queue_depth(struct dlb2_hw_dev *handle, return ret; } +static int +dlb2_pf_enable_cq_weight(struct dlb2_hw_dev *handle, + struct dlb2_enable_cq_weight_args *args) +{ + struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev; + struct dlb2_cmd_response response = {0}; + int ret = 0; + + DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__); + + ret = dlb2_hw_enable_cq_weight(&dlb2_dev->hw, + handle->domain_id, + args, + &response, + false, + 0); + args->response = response; + + DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n", + __func__, ret); + + return ret; +} + static void dlb2_pf_iface_fn_ptrs_init(void) { @@ -602,6 +670,7 @@ dlb2_pf_iface_fn_ptrs_init(void) dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation; dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation; dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy; + dlb2_iface_enable_cq_weight = dlb2_pf_enable_cq_weight; } /* PCI DEV HOOKS */ -- 2.25.1 ^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v3 2/2] event/dlb2: add ldb port specific COS support 2022-06-16 20:36 ` [PATCH v3 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel 2022-06-16 20:36 ` [PATCH v3 1/2] event/dlb2: add CQ weight support Timothy McDaniel @ 2022-06-16 20:36 ` Timothy McDaniel 1 sibling, 0 replies; 13+ messages in thread From: Timothy McDaniel @ 2022-06-16 20:36 UTC (permalink / raw) To: jerinj; +Cc: dev DLB supports 4 class of service domains, to aid in managing the device bandwidth across ldb ports. This commit allows specifying which ldb ports will participate in the COS scheme, which class they are a part of, and the specific bandwidth percentage associated with each class. The cumulative bandwidth associated with the 4 classes must not exceed 100%. This feature is enabled on the command line, and will be documented in the DLB2 programmers guide. Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com> --- V3: fixed a typo/bug that caused us to read beyond the end of an array V2: Rebased against dpdk-next-eventdev --- drivers/event/dlb2/dlb2.c | 224 +++++++++++++++------ drivers/event/dlb2/dlb2_iface.c | 3 + drivers/event/dlb2/dlb2_iface.h | 3 + drivers/event/dlb2/dlb2_priv.h | 19 +- drivers/event/dlb2/pf/base/dlb2_resource.c | 66 ++++++ drivers/event/dlb2/pf/dlb2_pf.c | 21 +- 6 files changed, 266 insertions(+), 70 deletions(-) diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index e1687e3d63..f3382b5d2a 100644 --- a/drivers/event/dlb2/dlb2.c +++ b/drivers/event/dlb2/dlb2.c @@ -164,6 +164,28 @@ set_cq_weight(const char *key __rte_unused, return 0; } +/* override defaults with value(s) provided on command line */ +static void +dlb2_init_port_cos(struct dlb2_eventdev *dlb2, int *port_cos) +{ + int q; + + for (q = 0; q < DLB2_MAX_NUM_PORTS_ALL; q++) { + dlb2->ev_ports[q].cos_id = port_cos[q]; + dlb2->cos_ports[port_cos[q]]++; + } +} + +static void +dlb2_init_cos_bw(struct dlb2_eventdev *dlb2, + struct dlb2_cos_bw *cos_bw) +{ + int q; + for (q = 0; q < DLB2_COS_NUM_VALS; q++) + dlb2->cos_bw[q] = cos_bw->val[q]; + +} + static int dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) { @@ -379,12 +401,11 @@ set_dev_id(const char *key __rte_unused, } static int -set_cos(const char *key __rte_unused, +set_poll_interval(const char *key __rte_unused, const char *value, void *opaque) { - enum dlb2_cos *cos_id = opaque; - int x = 0; + int *poll_interval = opaque; int ret; if (value == NULL || opaque == NULL) { @@ -392,38 +413,83 @@ set_cos(const char *key __rte_unused, return -EINVAL; } - ret = dlb2_string_to_int(&x, value); + ret = dlb2_string_to_int(poll_interval, value); if (ret < 0) return ret; - if (x != DLB2_COS_DEFAULT && (x < DLB2_COS_0 || x > DLB2_COS_3)) { - DLB2_LOG_ERR( - "COS %d out of range, must be DLB2_COS_DEFAULT or 0-3\n", - x); + return 0; +} + +static int +set_port_cos(const char *key __rte_unused, + const char *value, + void *opaque) +{ + struct dlb2_port_cos *port_cos = opaque; + int first, last, cos_id, i; + + if (value == NULL || opaque == NULL) { + DLB2_LOG_ERR("NULL pointer\n"); return -EINVAL; } - *cos_id = x; + /* command line override may take one of the following 3 forms: + * port_cos=all:<cos_id> ... all ports + * port_cos=port-port:<cos_id> ... a range of ports + * port_cos=port:<cos_id> ... just one port + */ + if (sscanf(value, "all:%d", &cos_id) == 1) { + first = 0; + last = DLB2_MAX_NUM_LDB_PORTS - 1; + } else if (sscanf(value, "%d-%d:%d", &first, &last, &cos_id) == 3) { + /* we have everything we need */ + } else if (sscanf(value, "%d:%d", &first, &cos_id) == 2) { + last = first; + } else { + DLB2_LOG_ERR("Error parsing ldb port port_cos devarg. Should be all:val, port-port:val, or port:val\n"); + return -EINVAL; + } + + if (first > last || first < 0 || + last >= DLB2_MAX_NUM_LDB_PORTS) { + DLB2_LOG_ERR("Error parsing ldb port cos_id arg, invalid port value\n"); + return -EINVAL; + } + + if (cos_id < DLB2_COS_0 || cos_id > DLB2_COS_3) { + DLB2_LOG_ERR("Error parsing ldb port cos_id devarg, must be between 0 and 4\n"); + return -EINVAL; + } + + for (i = first; i <= last; i++) + port_cos->cos_id[i] = cos_id; /* indexed by port */ return 0; } static int -set_poll_interval(const char *key __rte_unused, - const char *value, - void *opaque) +set_cos_bw(const char *key __rte_unused, + const char *value, + void *opaque) { - int *poll_interval = opaque; - int ret; + struct dlb2_cos_bw *cos_bw = opaque; - if (value == NULL || opaque == NULL) { + if (opaque == NULL) { DLB2_LOG_ERR("NULL pointer\n"); return -EINVAL; } - ret = dlb2_string_to_int(poll_interval, value); - if (ret < 0) - return ret; + /* format must be %d,%d,%d,%d */ + + if (sscanf(value, "%d,%d,%d,%d", &cos_bw->val[0], &cos_bw->val[1], + &cos_bw->val[2], &cos_bw->val[3]) != 4) { + DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0,bw1,bw2,bw3 where all values combined are <= 100\n"); + return -EINVAL; + } + if (cos_bw->val[0] + cos_bw->val[1] + cos_bw->val[2] + cos_bw->val[3] > 100) { + DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0,bw1,bw2,bw3 where all values combined are <= 100\n"); + return -EINVAL; + } return 0; } @@ -653,11 +719,13 @@ dlb2_eventdev_info_get(struct rte_eventdev *dev, } static int -dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle, +dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2, + struct dlb2_hw_dev *handle, const struct dlb2_hw_rsrcs *resources_asked, uint8_t device_version) { int ret = 0; + uint32_t cos_ports = 0; struct dlb2_create_sched_domain_args *cfg; if (resources_asked == NULL) { @@ -683,38 +751,22 @@ dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle, /* LDB ports */ - cfg->cos_strict = 0; /* Best effort */ - cfg->num_cos_ldb_ports[0] = 0; - cfg->num_cos_ldb_ports[1] = 0; - cfg->num_cos_ldb_ports[2] = 0; - cfg->num_cos_ldb_ports[3] = 0; - - switch (handle->cos_id) { - case DLB2_COS_0: - cfg->num_ldb_ports = 0; /* no don't care ports */ - cfg->num_cos_ldb_ports[0] = - resources_asked->num_ldb_ports; - break; - case DLB2_COS_1: - cfg->num_ldb_ports = 0; /* no don't care ports */ - cfg->num_cos_ldb_ports[1] = resources_asked->num_ldb_ports; - break; - case DLB2_COS_2: - cfg->num_ldb_ports = 0; /* no don't care ports */ - cfg->num_cos_ldb_ports[2] = resources_asked->num_ldb_ports; - break; - case DLB2_COS_3: - cfg->num_ldb_ports = 0; /* no don't care ports */ - cfg->num_cos_ldb_ports[3] = - resources_asked->num_ldb_ports; - break; - case DLB2_COS_DEFAULT: - /* all ldb ports are don't care ports from a cos perspective */ - cfg->num_ldb_ports = - resources_asked->num_ldb_ports; - break; + /* tally of ports with non default COS */ + cos_ports = dlb2->cos_ports[1] + dlb2->cos_ports[2] + + dlb2->cos_ports[3]; + + if (cos_ports > resources_asked->num_ldb_ports) { + DLB2_LOG_ERR("dlb2: num_ldb_ports < nonzero cos_ports\n"); + ret = EINVAL; + goto error_exit; } + cfg->cos_strict = 0; /* Best effort */ + cfg->num_cos_ldb_ports[0] = resources_asked->num_ldb_ports - cos_ports; + cfg->num_cos_ldb_ports[1] = dlb2->cos_ports[1]; + cfg->num_cos_ldb_ports[2] = dlb2->cos_ports[2]; + cfg->num_cos_ldb_ports[3] = dlb2->cos_ports[3]; + if (device_version == DLB2_HW_V2) cfg->num_ldb_credits = resources_asked->num_ldb_credits; @@ -892,7 +944,8 @@ dlb2_eventdev_configure(const struct rte_eventdev *dev) rsrcs->num_dir_credits = dlb2->num_dir_credits_override; } - if (dlb2_hw_create_sched_domain(handle, rsrcs, dlb2->version) < 0) { + if (dlb2_hw_create_sched_domain(dlb2, handle, rsrcs, + dlb2->version) < 0) { DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n"); return -ENODEV; } @@ -1449,12 +1502,8 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, cfg.cq_history_list_size = DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT; - if (handle->cos_id == DLB2_COS_DEFAULT) - cfg.cos_id = 0; - else - cfg.cos_id = handle->cos_id; - - cfg.cos_strict = 0; + cfg.cos_id = ev_port->cos_id; + cfg.cos_strict = 0;/* best effots */ /* User controls the LDB high watermark via enqueue depth. The DIR high * watermark is equal, unless the directed credit pool is too small. @@ -4450,7 +4499,6 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2->max_num_events_override = dlb2_args->max_num_events; dlb2->num_dir_credits_override = dlb2_args->num_dir_credits_override; - dlb2->qm_instance.cos_id = dlb2_args->cos_id; dlb2->poll_interval = dlb2_args->poll_interval; dlb2->sw_credit_quanta = dlb2_args->sw_credit_quanta; dlb2->hw_credit_quanta = dlb2_args->hw_credit_quanta; @@ -4482,6 +4530,27 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2_iface_hardware_init(&dlb2->qm_instance); + /* configure class of service */ + { + struct dlb2_set_cos_bw_args set_cos_bw_args = {0}; + int id; + int ret = 0; + + for (id = 0; id < DLB2_COS_NUM_VALS; id++) { + set_cos_bw_args.cos_id = id; + set_cos_bw_args.cos_id = dlb2->cos_bw[id]; + ret = dlb2_iface_set_cos_bw(&dlb2->qm_instance, + &set_cos_bw_args); + if (ret != 0) + break; + } + if (ret) { + DLB2_LOG_ERR("dlb2: failed to configure class of service, err=%d\n", + err); + return err; + } + } + err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode); if (err < 0) { DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n", @@ -4512,6 +4581,12 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2_init_cq_weight(dlb2, dlb2_args->cq_weight.limit); + dlb2_init_port_cos(dlb2, + dlb2_args->port_cos.cos_id); + + dlb2_init_cos_bw(dlb2, + &dlb2_args->cos_bw); + return 0; } @@ -4567,6 +4642,8 @@ dlb2_parse_params(const char *params, DLB2_VECTOR_OPTS_ENAB_ARG, DLB2_MAX_CQ_DEPTH, DLB2_CQ_WEIGHT, + DLB2_PORT_COS, + DLB2_COS_BW, NULL }; if (params != NULL && params[0] != '\0') { @@ -4639,16 +4716,6 @@ dlb2_parse_params(const char *params, return ret; } - ret = rte_kvargs_process(kvlist, DLB2_COS_ARG, - set_cos, - &dlb2_args->cos_id); - if (ret != 0) { - DLB2_LOG_ERR("%s: Error parsing cos parameter", - name); - rte_kvargs_free(kvlist); - return ret; - } - ret = rte_kvargs_process(kvlist, DLB2_POLL_INTERVAL_ARG, set_poll_interval, &dlb2_args->poll_interval); @@ -4724,6 +4791,29 @@ dlb2_parse_params(const char *params, return ret; } + ret = rte_kvargs_process(kvlist, + DLB2_PORT_COS, + set_port_cos, + &dlb2_args->port_cos); + if (ret != 0) { + DLB2_LOG_ERR("%s: Error parsing port cos", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, + DLB2_COS_BW, + set_cos_bw, + &dlb2_args->cos_bw); + if (ret != 0) { + DLB2_LOG_ERR("%s: Error parsing cos_bw", + name); + rte_kvargs_free(kvlist); + return ret; + } + + rte_kvargs_free(kvlist); } } diff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c index b77faa967c..100db434d0 100644 --- a/drivers/event/dlb2/dlb2_iface.c +++ b/drivers/event/dlb2/dlb2_iface.c @@ -76,3 +76,6 @@ int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, struct dlb2_enable_cq_weight_args *args); + +int (*dlb2_iface_set_cos_bw)(struct dlb2_hw_dev *handle, + struct dlb2_set_cos_bw_args *args); diff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h index fef717392f..dc0c446ce8 100644 --- a/drivers/event/dlb2/dlb2_iface.h +++ b/drivers/event/dlb2/dlb2_iface.h @@ -76,4 +76,7 @@ extern int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, extern int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, struct dlb2_enable_cq_weight_args *args); +extern int (*dlb2_iface_set_cos_bw)(struct dlb2_hw_dev *handle, + struct dlb2_set_cos_bw_args *args); + #endif /* _DLB2_IFACE_H_ */ diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h index 63b092fc47..528e2ede61 100644 --- a/drivers/event/dlb2/dlb2_priv.h +++ b/drivers/event/dlb2/dlb2_priv.h @@ -45,6 +45,8 @@ #define DLB2_VECTOR_OPTS_ENAB_ARG "vector_opts_enable" #define DLB2_MAX_CQ_DEPTH "max_cq_depth" #define DLB2_CQ_WEIGHT "cq_weight" +#define DLB2_PORT_COS "port_cos" +#define DLB2_COS_BW "cos_bw" /* Begin HW related defines and structs */ @@ -416,7 +418,8 @@ enum dlb2_cos { DLB2_COS_0 = 0, DLB2_COS_1, DLB2_COS_2, - DLB2_COS_3 + DLB2_COS_3, + DLB2_COS_NUM_VALS }; struct dlb2_hw_dev { @@ -424,7 +427,6 @@ struct dlb2_hw_dev { struct dlb2_hw_resource_info info; void *pf_dev; /* opaque pointer to PF PMD dev (struct dlb2_dev) */ uint32_t domain_id; - enum dlb2_cos cos_id; rte_spinlock_t resource_lock; /* for MP support */ } __rte_cache_aligned; @@ -529,6 +531,7 @@ struct dlb2_eventdev_port { bool enq_configured; uint8_t implicit_release; /* release events before dequeuing */ uint32_t cq_weight; /* DLB2.5 and above ldb ports only */ + int cos_id; /*ldb port class of service */ } __rte_cache_aligned; struct dlb2_queue { @@ -623,6 +626,8 @@ struct dlb2_eventdev { uint32_t credit_pool __rte_cache_aligned; }; }; + uint32_t cos_ports[DLB2_COS_NUM_VALS]; /* total ldb ports in each class */ + uint32_t cos_bw[DLB2_COS_NUM_VALS]; /* bandwidth per cos domain */ }; /* used for collecting and passing around the dev args */ @@ -634,6 +639,14 @@ struct dlb2_cq_weight { int limit[DLB2_MAX_NUM_LDB_PORTS]; }; +struct dlb2_port_cos { + int cos_id[DLB2_MAX_NUM_LDB_PORTS]; +}; + +struct dlb2_cos_bw { + int val[DLB2_COS_NUM_VALS]; +}; + struct dlb2_devargs { int socket_id; int max_num_events; @@ -648,6 +661,8 @@ struct dlb2_devargs { bool vector_opts_enabled; int max_cq_depth; struct dlb2_cq_weight cq_weight; + struct dlb2_port_cos port_cos; + struct dlb2_cos_bw cos_bw; }; /* End Eventdev related defines and structs */ diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c index 24a8215452..da1949c763 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c @@ -6489,3 +6489,69 @@ int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw, return 0; } + +static void dlb2_log_set_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id, u8 bw) +{ + DLB2_HW_DBG(hw, "DLB2 set port CoS bandwidth:\n"); + DLB2_HW_DBG(hw, "\tCoS ID: %u\n", cos_id); + DLB2_HW_DBG(hw, "\tBandwidth: %u\n", bw); +} + +#define DLB2_MAX_BW_PCT 100 + +/** + * dlb2_hw_set_cos_bandwidth() - set a bandwidth allocation percentage for a + * port class-of-service. + * @hw: dlb2_hw handle for a particular device. + * @cos_id: class-of-service ID. + * @bandwidth: class-of-service bandwidth. + * + * Return: + * Returns 0 upon success, < 0 otherwise. + * + * Errors: + * EINVAL - Invalid cos ID, bandwidth is greater than 100, or bandwidth would + * cause the total bandwidth across all classes of service to exceed + * 100%. + */ +int dlb2_hw_set_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id, u8 bandwidth) +{ + unsigned int i; + u32 reg; + u8 total; + + if (cos_id >= DLB2_NUM_COS_DOMAINS) + return -EINVAL; + + if (bandwidth > DLB2_MAX_BW_PCT) + return -EINVAL; + + total = 0; + + for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) + total += (i == cos_id) ? bandwidth : hw->cos_reservation[i]; + + if (total > DLB2_MAX_BW_PCT) + return -EINVAL; + + reg = DLB2_CSR_RD(hw, DLB2_LSP_CFG_SHDW_RANGE_COS(hw->ver, cos_id)); + + /* + * Normalize the bandwidth to a value in the range 0-255. Integer + * division may leave unreserved scheduling slots; these will be + * divided among the 4 classes of service. + */ + DLB2_BITS_SET(reg, (bandwidth * 256) / 100, DLB2_LSP_CFG_SHDW_RANGE_COS_BW_RANGE); + DLB2_CSR_WR(hw, DLB2_LSP_CFG_SHDW_RANGE_COS(hw->ver, cos_id), reg); + + reg = 0; + DLB2_BIT_SET(reg, DLB2_LSP_CFG_SHDW_CTRL_TRANSFER); + /* Atomically transfer the newly configured service weight */ + DLB2_CSR_WR(hw, DLB2_LSP_CFG_SHDW_CTRL(hw->ver), reg); + + dlb2_log_set_cos_bandwidth(hw, cos_id, bandwidth); + + hw->cos_reservation[cos_id] = bandwidth; + + return 0; +} diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c index 1d0415e46f..0627f06a6e 100644 --- a/drivers/event/dlb2/pf/dlb2_pf.c +++ b/drivers/event/dlb2/pf/dlb2_pf.c @@ -646,6 +646,25 @@ dlb2_pf_enable_cq_weight(struct dlb2_hw_dev *handle, return ret; } +static int +dlb2_pf_set_cos_bandwidth(struct dlb2_hw_dev *handle, + struct dlb2_set_cos_bw_args *args) +{ + struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev; + int ret = 0; + + DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__); + + ret = dlb2_hw_set_cos_bandwidth(&dlb2_dev->hw, + args->cos_id, + args->bandwidth); + + DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n", + __func__, ret); + + return ret; +} + static void dlb2_pf_iface_fn_ptrs_init(void) { @@ -671,6 +690,7 @@ dlb2_pf_iface_fn_ptrs_init(void) dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation; dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy; dlb2_iface_enable_cq_weight = dlb2_pf_enable_cq_weight; + dlb2_iface_set_cos_bw = dlb2_pf_set_cos_bandwidth; } /* PCI DEV HOOKS */ @@ -684,7 +704,6 @@ dlb2_eventdev_pci_init(struct rte_eventdev *eventdev) .max_num_events = DLB2_MAX_NUM_LDB_CREDITS, .num_dir_credits_override = -1, .qid_depth_thresholds = { {0} }, - .cos_id = DLB2_COS_DEFAULT, .poll_interval = DLB2_POLL_INTERVAL_DEFAULT, .sw_credit_quanta = DLB2_SW_CREDIT_QUANTA_DEFAULT, .hw_credit_quanta = DLB2_SW_CREDIT_BATCH_SZ, -- 2.25.1 ^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v4 0/2] Rebase DLB2 port_cos and cq_weight patches 2022-06-16 14:23 ` [PATCH 2/2] event/dlb2: add ldb port specific COS support Timothy McDaniel 2022-06-16 19:55 ` [PATCH v2 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel 2022-06-16 20:36 ` [PATCH v3 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel @ 2022-06-16 22:21 ` Timothy McDaniel 2022-06-16 22:21 ` [PATCH v4 1/2] event/dlb2: add CQ weight support Timothy McDaniel 2022-06-16 22:21 ` [PATCH v4 2/2] event/dlb2: add ldb port specific COS support Timothy McDaniel 2 siblings, 2 replies; 13+ messages in thread From: Timothy McDaniel @ 2022-06-16 22:21 UTC (permalink / raw) To: jerinj; +Cc: dev This patch rebases the following previously submitted patches against the latest dpdk-next-eventdev (for-main) branch, and organizes them in a patch series instead of individually. Changes since V3: Fix a CentOS79-64 build error Changes since V2: Fix a misleading if statement (guard error) Changes since V1: Fixed a bug in the port-specific cos patch where we were accessing beyond the end of the cos_bw array. Timothy McDaniel (2): event/dlb2: add CQ weight support event/dlb2: add ldb port specific COS support drivers/event/dlb2/dlb2.c | 325 ++++++++++++++++----- drivers/event/dlb2/dlb2_avx512.c | 8 + drivers/event/dlb2/dlb2_iface.c | 7 + drivers/event/dlb2/dlb2_iface.h | 8 + drivers/event/dlb2/dlb2_priv.h | 29 +- drivers/event/dlb2/dlb2_sse.c | 8 + drivers/event/dlb2/dlb2_user.h | 29 ++ drivers/event/dlb2/pf/base/dlb2_resource.c | 282 ++++++++++++++++++ drivers/event/dlb2/pf/base/dlb2_resource.h | 33 +++ drivers/event/dlb2/pf/dlb2_pf.c | 90 +++++- 10 files changed, 743 insertions(+), 76 deletions(-) -- 2.25.1 ^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v4 1/2] event/dlb2: add CQ weight support 2022-06-16 22:21 ` [PATCH v4 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel @ 2022-06-16 22:21 ` Timothy McDaniel 2022-06-20 17:33 ` Jerin Jacob 2022-06-16 22:21 ` [PATCH v4 2/2] event/dlb2: add ldb port specific COS support Timothy McDaniel 1 sibling, 1 reply; 13+ messages in thread From: Timothy McDaniel @ 2022-06-16 22:21 UTC (permalink / raw) To: jerinj; +Cc: dev Enabling the weight limit on a CQ allows the enqueued QEs' 2-bit weight value (representing weights of 1, 2, 4, and 8) to factor into whether a CQ is full. If the sum of the weights of the QEs in the CQ meet or exceed its weight limit, DLB will stop scheduling QEs to it (until software pops enough QEs from the CQ to reverse that). CQ weight support is enabled via the command line, and applies to DLB 2.5 (and above) load balanced ports. The DLB2 documentation will be updated with further details. Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com> --- V4: Fix a ler detected guard error on a debug log message V3: Rebased against dpdk-next-eventdev V2: Added patch dependency line in commit message --- drivers/event/dlb2/dlb2.c | 99 +++++++++- drivers/event/dlb2/dlb2_avx512.c | 8 + drivers/event/dlb2/dlb2_iface.c | 4 + drivers/event/dlb2/dlb2_iface.h | 5 + drivers/event/dlb2/dlb2_priv.h | 10 +- drivers/event/dlb2/dlb2_sse.c | 8 + drivers/event/dlb2/dlb2_user.h | 29 +++ drivers/event/dlb2/pf/base/dlb2_resource.c | 216 +++++++++++++++++++++ drivers/event/dlb2/pf/base/dlb2_resource.h | 33 ++++ drivers/event/dlb2/pf/dlb2_pf.c | 69 +++++++ 10 files changed, 475 insertions(+), 6 deletions(-) diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index 5b0b33bc77..e1687e3d63 100644 --- a/drivers/event/dlb2/dlb2.c +++ b/drivers/event/dlb2/dlb2.c @@ -107,6 +107,63 @@ dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2, } } +/* override defaults with value(s) provided on command line */ +static void +dlb2_init_cq_weight(struct dlb2_eventdev *dlb2, int *cq_weight) +{ + int q; + + for (q = 0; q < DLB2_MAX_NUM_PORTS_ALL; q++) + dlb2->ev_ports[q].cq_weight = cq_weight[q]; +} + +static int +set_cq_weight(const char *key __rte_unused, + const char *value, + void *opaque) +{ + struct dlb2_cq_weight *cq_weight = opaque; + int first, last, weight, i; + + if (value == NULL || opaque == NULL) { + DLB2_LOG_ERR("NULL pointer\n"); + return -EINVAL; + } + + /* command line override may take one of the following 3 forms: + * qid_depth_thresh=all:<threshold_value> ... all queues + * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues + * qid_depth_thresh=qid:<threshold_value> ... just one queue + */ + if (sscanf(value, "all:%d", &weight) == 1) { + first = 0; + last = DLB2_MAX_NUM_LDB_PORTS - 1; + } else if (sscanf(value, "%d-%d:%d", &first, &last, &weight) == 3) { + /* we have everything we need */ + } else if (sscanf(value, "%d:%d", &first, &weight) == 2) { + last = first; + } else { + DLB2_LOG_ERR("Error parsing ldb port qe weight devarg. Should be all:val, qid-qid:val, or qid:val\n"); + return -EINVAL; + } + + if (first > last || first < 0 || + last >= DLB2_MAX_NUM_LDB_PORTS) { + DLB2_LOG_ERR("Error parsing ldb port qe weight arg, invalid port value\n"); + return -EINVAL; + } + + if (weight < 0 || weight > DLB2_MAX_CQ_DEPTH_OVERRIDE) { + DLB2_LOG_ERR("Error parsing ldb port qe weight devarg, must be < cq depth\n"); + return -EINVAL; + } + + for (i = first; i <= last; i++) + cq_weight->limit[i] = weight; /* indexed by qid */ + + return 0; +} + static int dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) { @@ -1372,13 +1429,14 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, return -EINVAL; if (dequeue_depth < DLB2_MIN_CQ_DEPTH) { - DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n", + DLB2_LOG_ERR("dlb2: invalid cq depth, must be at least %d\n", DLB2_MIN_CQ_DEPTH); return -EINVAL; } - if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) { - DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n", + if (dlb2->version == DLB2_HW_V2 && ev_port->cq_weight != 0 && + ev_port->cq_weight > dequeue_depth) { + DLB2_LOG_ERR("dlb2: invalid cq depth, must be >= cq weight%d\n", DLB2_MIN_ENQUEUE_DEPTH); return -EINVAL; } @@ -1450,8 +1508,24 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, if (dlb2->version == DLB2_HW_V2) { qm_port->cached_ldb_credits = 0; qm_port->cached_dir_credits = 0; - } else + if (ev_port->cq_weight) { + struct dlb2_enable_cq_weight_args cq_weight_args = {0}; + + cq_weight_args.port_id = qm_port->id; + cq_weight_args.limit = ev_port->cq_weight; + ret = dlb2_iface_enable_cq_weight(handle, &cq_weight_args); + if (ret < 0) { + DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n", + ret, + dlb2_error_strings[cfg.response. status]); + goto error_exit; + } + } + qm_port->cq_weight = ev_port->cq_weight; + } else { qm_port->cached_credits = 0; + qm_port->cq_weight = 0; + } /* CQs with depth < 8 use an 8-entry queue, but withhold credits so * the effective depth is smaller. @@ -4435,6 +4509,9 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2_init_queue_depth_thresholds(dlb2, dlb2_args->qid_depth_thresholds.val); + dlb2_init_cq_weight(dlb2, + dlb2_args->cq_weight.limit); + return 0; } @@ -4489,6 +4566,7 @@ dlb2_parse_params(const char *params, DLB2_DEPTH_THRESH_ARG, DLB2_VECTOR_OPTS_ENAB_ARG, DLB2_MAX_CQ_DEPTH, + DLB2_CQ_WEIGHT, NULL }; if (params != NULL && params[0] != '\0') { @@ -4629,7 +4707,18 @@ dlb2_parse_params(const char *params, set_max_cq_depth, &dlb2_args->max_cq_depth); if (ret != 0) { - DLB2_LOG_ERR("%s: Error parsing vector opts enabled", + DLB2_LOG_ERR("%s: Error parsing max cq depth", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, + DLB2_CQ_WEIGHT, + set_cq_weight, + &dlb2_args->cq_weight); + if (ret != 0) { + DLB2_LOG_ERR("%s: Error parsing cq weight on", name); rte_kvargs_free(kvlist); return ret; diff --git a/drivers/event/dlb2/dlb2_avx512.c b/drivers/event/dlb2/dlb2_avx512.c index d4aaa04a01..3c8906af9d 100644 --- a/drivers/event/dlb2/dlb2_avx512.c +++ b/drivers/event/dlb2/dlb2_avx512.c @@ -237,6 +237,14 @@ dlb2_event_build_hcws(struct dlb2_port *qm_port, qe[3].data = ev[3].u64; } + /* will only be set for DLB 2.5 + */ + if (qm_port->cq_weight) { + qe[0].weight = ev[0].impl_opaque & 3; + qe[1].weight = ev[1].impl_opaque & 3; + qe[2].weight = ev[2].impl_opaque & 3; + qe[3].weight = ev[3].impl_opaque & 3; + } + break; case 3: case 2: diff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c index 5471dd8da7..b77faa967c 100644 --- a/drivers/event/dlb2/dlb2_iface.c +++ b/drivers/event/dlb2/dlb2_iface.c @@ -72,3 +72,7 @@ int (*dlb2_iface_get_ldb_queue_depth)(struct dlb2_hw_dev *handle, int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, struct dlb2_get_dir_queue_depth_args *args); + + +int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, + struct dlb2_enable_cq_weight_args *args); diff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h index b508eb0936..fef717392f 100644 --- a/drivers/event/dlb2/dlb2_iface.h +++ b/drivers/event/dlb2/dlb2_iface.h @@ -71,4 +71,9 @@ extern int (*dlb2_iface_get_ldb_queue_depth)(struct dlb2_hw_dev *handle, extern int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, struct dlb2_get_dir_queue_depth_args *args); + + +extern int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, + struct dlb2_enable_cq_weight_args *args); + #endif /* _DLB2_IFACE_H_ */ diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h index df69d57b83..63b092fc47 100644 --- a/drivers/event/dlb2/dlb2_priv.h +++ b/drivers/event/dlb2/dlb2_priv.h @@ -44,6 +44,7 @@ #define DLB2_DEPTH_THRESH_ARG "default_depth_thresh" #define DLB2_VECTOR_OPTS_ENAB_ARG "vector_opts_enable" #define DLB2_MAX_CQ_DEPTH "max_cq_depth" +#define DLB2_CQ_WEIGHT "cq_weight" /* Begin HW related defines and structs */ @@ -249,7 +250,7 @@ struct dlb2_enqueue_qe { /* Word 4 */ uint16_t lock_id; uint8_t meas_lat:1; - uint8_t rsvd1:2; + uint8_t weight:2; /* DLB 2.5 and above */ uint8_t no_dec:1; uint8_t cmp_id:4; union { @@ -378,6 +379,7 @@ struct dlb2_port { bool use_scalar; /* force usage of scalar code */ uint16_t hw_credit_quanta; bool use_avx512; + uint32_t cq_weight; }; /* Per-process per-port mmio and memory pointers */ @@ -526,6 +528,7 @@ struct dlb2_eventdev_port { /* enq_configured is set when the qm port is created */ bool enq_configured; uint8_t implicit_release; /* release events before dequeuing */ + uint32_t cq_weight; /* DLB2.5 and above ldb ports only */ } __rte_cache_aligned; struct dlb2_queue { @@ -627,6 +630,10 @@ struct dlb2_qid_depth_thresholds { int val[DLB2_MAX_NUM_QUEUES_ALL]; }; +struct dlb2_cq_weight { + int limit[DLB2_MAX_NUM_LDB_PORTS]; +}; + struct dlb2_devargs { int socket_id; int max_num_events; @@ -640,6 +647,7 @@ struct dlb2_devargs { int default_depth_thresh; bool vector_opts_enabled; int max_cq_depth; + struct dlb2_cq_weight cq_weight; }; /* End Eventdev related defines and structs */ diff --git a/drivers/event/dlb2/dlb2_sse.c b/drivers/event/dlb2/dlb2_sse.c index 8fc12d47f7..248d7519d5 100644 --- a/drivers/event/dlb2/dlb2_sse.c +++ b/drivers/event/dlb2/dlb2_sse.c @@ -189,6 +189,14 @@ dlb2_event_build_hcws(struct dlb2_port *qm_port, qe[2].data = ev[2].u64; qe[3].data = ev[3].u64; + /* will only be set for DLB 2.5 + */ + if (qm_port->cq_weight) { + qe[0].weight = ev[0].impl_opaque & 3; + qe[1].weight = ev[1].impl_opaque & 3; + qe[2].weight = ev[2].impl_opaque & 3; + qe[3].weight = ev[3].impl_opaque & 3; + } + break; case 3: case 2: diff --git a/drivers/event/dlb2/dlb2_user.h b/drivers/event/dlb2/dlb2_user.h index 9760e9bda6..901e2e0c66 100644 --- a/drivers/event/dlb2/dlb2_user.h +++ b/drivers/event/dlb2/dlb2_user.h @@ -47,6 +47,8 @@ enum dlb2_error { DLB2_ST_NO_MEMORY, DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL, DLB2_ST_INVALID_COS_ID, + DLB2_ST_INVALID_CQ_WEIGHT_LIMIT, + DLB2_ST_FEATURE_UNAVAILABLE, }; static const char dlb2_error_strings[][128] = { @@ -87,6 +89,8 @@ static const char dlb2_error_strings[][128] = { "DLB2_ST_NO_MEMORY", "DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL", "DLB2_ST_INVALID_COS_ID", + "DLB2_ST_INVALID_CQ_WEIGHT_LIMIT", + "DLB2_ST_FEATURE_UNAVAILABLE", }; struct dlb2_cmd_response { @@ -687,6 +691,31 @@ struct dlb2_pending_port_unmaps_args { __u32 padding0; }; +/* + * DLB2_DOMAIN_CMD_ENABLE_CQ_WEIGHT: Enable QE-weight based scheduling on a + * load-balanced port's CQ and configures the CQ's weight limit. + * + * This must be called after creating the port but before starting the + * domain. The QE weight limit must be non-zero and cannot exceed the + * CQ's depth. + * + * Input parameters: + * - port_id: Load-balanced port ID. + * - limit: QE weight limit. + * + * Output parameters: + * - response.status: Detailed error code. In certain cases, such as if the + * ioctl request arg is invalid, the driver won't set status. + * - response.id: number of unmaps in progress. + */ +struct dlb2_enable_cq_weight_args { + /* Output parameters */ + struct dlb2_cmd_response response; + /* Input parameters */ + __u32 port_id; + __u32 limit; +}; + /* * Mapping sizes for memory mapping the consumer queue (CQ) memory space, and * producer port (PP) MMIO space. diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c index 4011c24aef..24a8215452 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c @@ -6273,3 +6273,219 @@ int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw, return 0; } +/** + * dlb2_hw_set_qe_arbiter_weights() - program QE arbiter weights + * @hw: dlb2_hw handle for a particular device. + * @weight: 8-entry array of arbiter weights. + * + * weight[N] programs priority N's weight. In cases where the 8 priorities are + * reduced to 4 bins, the mapping is: + * - weight[1] programs bin 0 + * - weight[3] programs bin 1 + * - weight[5] programs bin 2 + * - weight[7] programs bin 3 + */ +void dlb2_hw_set_qe_arbiter_weights(struct dlb2_hw *hw, u8 weight[8]) +{ + u32 reg = 0; + + DLB2_BITS_SET(reg, weight[1], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN0); + DLB2_BITS_SET(reg, weight[3], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN1); + DLB2_BITS_SET(reg, weight[5], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN2); + DLB2_BITS_SET(reg, weight[7], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN3); + DLB2_CSR_WR(hw, DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN, reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI3); + DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0(hw->ver), reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI3); + DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0(hw->ver), reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI3); + DLB2_CSR_WR(hw, DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0, reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI3); + DLB2_CSR_WR(hw, DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0, reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI3); + DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0(hw->ver), reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN0); + DLB2_BITS_SET(reg, weight[3], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN1); + DLB2_BITS_SET(reg, weight[5], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN2); + DLB2_BITS_SET(reg, weight[7], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN3); + DLB2_CSR_WR(hw, DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN, reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI0); + DLB2_BITS_SET(reg, weight[3], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI1); + DLB2_BITS_SET(reg, weight[5], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI2); + DLB2_BITS_SET(reg, weight[7], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI3); + DLB2_CSR_WR(hw, DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0, reg); +} + +/** + * dlb2_hw_set_qid_arbiter_weights() - program QID arbiter weights + * @hw: dlb2_hw handle for a particular device. + * @weight: 8-entry array of arbiter weights. + * + * weight[N] programs priority N's weight. In cases where the 8 priorities are + * reduced to 4 bins, the mapping is: + * - weight[1] programs bin 0 + * - weight[3] programs bin 1 + * - weight[5] programs bin 2 + * - weight[7] programs bin 3 + */ +void dlb2_hw_set_qid_arbiter_weights(struct dlb2_hw *hw, u8 weight[8]) +{ + u32 reg = 0; + + DLB2_BITS_SET(reg, weight[1], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI0_WEIGHT); + DLB2_BITS_SET(reg, weight[3], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI1_WEIGHT); + DLB2_BITS_SET(reg, weight[5], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI2_WEIGHT); + DLB2_BITS_SET(reg, weight[7], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI3_WEIGHT); + DLB2_CSR_WR(hw, DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0(hw->ver), reg); + + reg = 0; + DLB2_BITS_SET(reg, weight[1], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI0_WEIGHT); + DLB2_BITS_SET(reg, weight[3], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI1_WEIGHT); + DLB2_BITS_SET(reg, weight[5], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI2_WEIGHT); + DLB2_BITS_SET(reg, weight[7], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI3_WEIGHT); + DLB2_CSR_WR(hw, DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0(hw->ver), reg); +} + +static void dlb2_log_enable_cq_weight(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_enable_cq_weight_args *args, + bool vdev_req, + unsigned int vdev_id) +{ + DLB2_HW_DBG(hw, "DLB2 enable CQ weight arguments:\n"); + DLB2_HW_DBG(hw, "\tvdev_req %d, vdev_id %d\n", vdev_req, vdev_id); + DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id); + DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id); + DLB2_HW_DBG(hw, "\tLimit: %d\n", args->limit); +} + +static int +dlb2_verify_enable_cq_weight_args(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_enable_cq_weight_args *args, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id) +{ + struct dlb2_hw_domain *domain; + struct dlb2_ldb_port *port; + + if (hw->ver == DLB2_HW_V2) { + resp->status = DLB2_ST_FEATURE_UNAVAILABLE; + return -EINVAL; + } + + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); + + if (!domain) { + resp->status = DLB2_ST_INVALID_DOMAIN_ID; + return -EINVAL; + } + + if (!domain->configured) { + resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED; + return -EINVAL; + } + + if (domain->started) { + resp->status = DLB2_ST_DOMAIN_STARTED; + return -EINVAL; + } + + port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain); + if (!port || !port->configured) { + resp->status = DLB2_ST_INVALID_PORT_ID; + return -EINVAL; + } + + if (args->limit == 0 || args->limit > port->cq_depth) { + resp->status = DLB2_ST_INVALID_CQ_WEIGHT_LIMIT; + return -EINVAL; + } + + return 0; +} + +int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_enable_cq_weight_args *args, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id) +{ + struct dlb2_hw_domain *domain; + struct dlb2_ldb_port *port; + int ret, id; + u32 reg = 0; + + dlb2_log_enable_cq_weight(hw, domain_id, args, vdev_req, vdev_id); + + /* + * Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + ret = dlb2_verify_enable_cq_weight_args(hw, + domain_id, + args, + resp, + vdev_req, + vdev_id); + if (ret) + return ret; + + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); + if (!domain) { + DLB2_HW_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + id = args->port_id; + + port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain); + if (!port) { + DLB2_HW_ERR(hw, + "[%s(): %d] Internal error: port not found\n", + __func__, __LINE__); + return -EFAULT; + } + + DLB2_BIT_SET(reg, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_V); + DLB2_BITS_SET(reg, args->limit, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_LIMIT); + + DLB2_CSR_WR(hw, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id), reg); + + resp->status = 0; + + return 0; +} diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.h b/drivers/event/dlb2/pf/base/dlb2_resource.h index 684049cd6e..a7e6c90888 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.h +++ b/drivers/event/dlb2/pf/base/dlb2_resource.h @@ -1910,4 +1910,37 @@ void dlb2_hw_dir_cq_interrupt_set_mode(struct dlb2_hw *hw, int port_id, int mode); +/** + * dlb2_hw_enable_cq_weight() - Enable QE-weight based scheduling on an LDB port. + * @hw: dlb2_hw handle for a particular device. + * @domain_id: domain ID. + * @args: CQ weight enablement arguments. + * @resp: response structure. + * @vdev_request: indicates whether this request came from a vdev. + * @vdev_id: If vdev_request is true, this contains the vdev's ID. + * + * This function enables QE-weight based scheduling on a load-balanced port's + * CQ and configures the CQ's weight limit. + * + * This must be called after creating the port but before starting the + * domain. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum dlb2_error. If successful, resp->id + * contains the queue ID. + * + * Errors: + * EINVAL - The domain or port is not configured, the domainhas already been + * started, the requested limit exceeds the port's CQ depth, or this + * feature is unavailable on the device. + * EFAULT - Internal error (resp->status not set). + */ +int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_enable_cq_weight_args *args, + struct dlb2_cmd_response *resp, + bool vdev_request, + unsigned int vdev_id); + #endif /* __DLB2_RESOURCE_H */ diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c index 5c80c724f1..1d0415e46f 100644 --- a/drivers/event/dlb2/pf/dlb2_pf.c +++ b/drivers/event/dlb2/pf/dlb2_pf.c @@ -41,6 +41,8 @@ #include "base/dlb2_resource.h" static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD); +static unsigned int dlb2_qe_sa_pct = 1; +static unsigned int dlb2_qid_sa_pct; static void dlb2_pf_low_level_io_init(void) @@ -80,6 +82,27 @@ dlb2_pf_get_device_version(struct dlb2_hw_dev *handle, return 0; } +static void dlb2_pf_calc_arbiter_weights(u8 *weight, + unsigned int pct) +{ + int val, i; + + /* Largest possible weight (100% SA case): 32 */ + val = (DLB2_MAX_WEIGHT + 1) / DLB2_NUM_ARB_WEIGHTS; + + /* Scale val according to the starvation avoidance percentage */ + val = (val * pct) / 100; + if (val == 0 && pct != 0) + val = 1; + + /* Prio 7 always has weight 0xff */ + weight[DLB2_NUM_ARB_WEIGHTS - 1] = DLB2_MAX_WEIGHT; + + for (i = DLB2_NUM_ARB_WEIGHTS - 2; i >= 0; i--) + weight[i] = weight[i + 1] - val; +} + + static void dlb2_pf_hardware_init(struct dlb2_hw_dev *handle) { @@ -87,6 +110,27 @@ dlb2_pf_hardware_init(struct dlb2_hw_dev *handle) dlb2_hw_enable_sparse_ldb_cq_mode(&dlb2_dev->hw); dlb2_hw_enable_sparse_dir_cq_mode(&dlb2_dev->hw); + + /* Configure arbitration weights for QE selection */ + if (dlb2_qe_sa_pct <= 100) { + u8 weight[DLB2_NUM_ARB_WEIGHTS]; + + dlb2_pf_calc_arbiter_weights(weight, + dlb2_qe_sa_pct); + + dlb2_hw_set_qe_arbiter_weights(&dlb2_dev->hw, weight); + } + + /* Configure arbitration weights for QID selection */ + if (dlb2_qid_sa_pct <= 100) { + u8 weight[DLB2_NUM_ARB_WEIGHTS]; + + dlb2_pf_calc_arbiter_weights(weight, + dlb2_qid_sa_pct); + + dlb2_hw_set_qid_arbiter_weights(&dlb2_dev->hw, weight); + } + } static int @@ -578,6 +622,30 @@ dlb2_pf_get_dir_queue_depth(struct dlb2_hw_dev *handle, return ret; } +static int +dlb2_pf_enable_cq_weight(struct dlb2_hw_dev *handle, + struct dlb2_enable_cq_weight_args *args) +{ + struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev; + struct dlb2_cmd_response response = {0}; + int ret = 0; + + DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__); + + ret = dlb2_hw_enable_cq_weight(&dlb2_dev->hw, + handle->domain_id, + args, + &response, + false, + 0); + args->response = response; + + DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n", + __func__, ret); + + return ret; +} + static void dlb2_pf_iface_fn_ptrs_init(void) { @@ -602,6 +670,7 @@ dlb2_pf_iface_fn_ptrs_init(void) dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation; dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation; dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy; + dlb2_iface_enable_cq_weight = dlb2_pf_enable_cq_weight; } /* PCI DEV HOOKS */ -- 2.25.1 ^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v4 1/2] event/dlb2: add CQ weight support 2022-06-16 22:21 ` [PATCH v4 1/2] event/dlb2: add CQ weight support Timothy McDaniel @ 2022-06-20 17:33 ` Jerin Jacob 0 siblings, 0 replies; 13+ messages in thread From: Jerin Jacob @ 2022-06-20 17:33 UTC (permalink / raw) To: Timothy McDaniel; +Cc: Jerin Jacob, dpdk-dev On Fri, Jun 17, 2022 at 3:51 AM Timothy McDaniel <timothy.mcdaniel@intel.com> wrote: > > Enabling the weight limit on a CQ allows the enqueued QEs' 2-bit weight > value (representing weights of 1, 2, 4, and 8) to factor into whether a > CQ is full. If the sum of the weights of the QEs in the CQ meet or exceed > its weight limit, DLB will stop scheduling QEs to it (until software pops > enough QEs from the CQ to reverse that). > > CQ weight support is enabled via the command line, and applies to > DLB 2.5 (and above) load balanced ports. The DLB2 documentation will > be updated with further details. > > Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com> Series applied to dpdk-next-net-eventdev/for-main. Thanks > > --- > > V4: Fix a ler detected guard error on a debug log message > > V3: Rebased against dpdk-next-eventdev > > V2: Added patch dependency line in commit message > --- > drivers/event/dlb2/dlb2.c | 99 +++++++++- > drivers/event/dlb2/dlb2_avx512.c | 8 + > drivers/event/dlb2/dlb2_iface.c | 4 + > drivers/event/dlb2/dlb2_iface.h | 5 + > drivers/event/dlb2/dlb2_priv.h | 10 +- > drivers/event/dlb2/dlb2_sse.c | 8 + > drivers/event/dlb2/dlb2_user.h | 29 +++ > drivers/event/dlb2/pf/base/dlb2_resource.c | 216 +++++++++++++++++++++ > drivers/event/dlb2/pf/base/dlb2_resource.h | 33 ++++ > drivers/event/dlb2/pf/dlb2_pf.c | 69 +++++++ > 10 files changed, 475 insertions(+), 6 deletions(-) > > diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c > index 5b0b33bc77..e1687e3d63 100644 > --- a/drivers/event/dlb2/dlb2.c > +++ b/drivers/event/dlb2/dlb2.c > @@ -107,6 +107,63 @@ dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2, > } > } > > +/* override defaults with value(s) provided on command line */ > +static void > +dlb2_init_cq_weight(struct dlb2_eventdev *dlb2, int *cq_weight) > +{ > + int q; > + > + for (q = 0; q < DLB2_MAX_NUM_PORTS_ALL; q++) > + dlb2->ev_ports[q].cq_weight = cq_weight[q]; > +} > + > +static int > +set_cq_weight(const char *key __rte_unused, > + const char *value, > + void *opaque) > +{ > + struct dlb2_cq_weight *cq_weight = opaque; > + int first, last, weight, i; > + > + if (value == NULL || opaque == NULL) { > + DLB2_LOG_ERR("NULL pointer\n"); > + return -EINVAL; > + } > + > + /* command line override may take one of the following 3 forms: > + * qid_depth_thresh=all:<threshold_value> ... all queues > + * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues > + * qid_depth_thresh=qid:<threshold_value> ... just one queue > + */ > + if (sscanf(value, "all:%d", &weight) == 1) { > + first = 0; > + last = DLB2_MAX_NUM_LDB_PORTS - 1; > + } else if (sscanf(value, "%d-%d:%d", &first, &last, &weight) == 3) { > + /* we have everything we need */ > + } else if (sscanf(value, "%d:%d", &first, &weight) == 2) { > + last = first; > + } else { > + DLB2_LOG_ERR("Error parsing ldb port qe weight devarg. Should be all:val, qid-qid:val, or qid:val\n"); > + return -EINVAL; > + } > + > + if (first > last || first < 0 || > + last >= DLB2_MAX_NUM_LDB_PORTS) { > + DLB2_LOG_ERR("Error parsing ldb port qe weight arg, invalid port value\n"); > + return -EINVAL; > + } > + > + if (weight < 0 || weight > DLB2_MAX_CQ_DEPTH_OVERRIDE) { > + DLB2_LOG_ERR("Error parsing ldb port qe weight devarg, must be < cq depth\n"); > + return -EINVAL; > + } > + > + for (i = first; i <= last; i++) > + cq_weight->limit[i] = weight; /* indexed by qid */ > + > + return 0; > +} > + > static int > dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) > { > @@ -1372,13 +1429,14 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, > return -EINVAL; > > if (dequeue_depth < DLB2_MIN_CQ_DEPTH) { > - DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n", > + DLB2_LOG_ERR("dlb2: invalid cq depth, must be at least %d\n", > DLB2_MIN_CQ_DEPTH); > return -EINVAL; > } > > - if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) { > - DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n", > + if (dlb2->version == DLB2_HW_V2 && ev_port->cq_weight != 0 && > + ev_port->cq_weight > dequeue_depth) { > + DLB2_LOG_ERR("dlb2: invalid cq depth, must be >= cq weight%d\n", > DLB2_MIN_ENQUEUE_DEPTH); > return -EINVAL; > } > @@ -1450,8 +1508,24 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, > if (dlb2->version == DLB2_HW_V2) { > qm_port->cached_ldb_credits = 0; > qm_port->cached_dir_credits = 0; > - } else > + if (ev_port->cq_weight) { > + struct dlb2_enable_cq_weight_args cq_weight_args = {0}; > + > + cq_weight_args.port_id = qm_port->id; > + cq_weight_args.limit = ev_port->cq_weight; > + ret = dlb2_iface_enable_cq_weight(handle, &cq_weight_args); > + if (ret < 0) { > + DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n", > + ret, > + dlb2_error_strings[cfg.response. status]); > + goto error_exit; > + } > + } > + qm_port->cq_weight = ev_port->cq_weight; > + } else { > qm_port->cached_credits = 0; > + qm_port->cq_weight = 0; > + } > > /* CQs with depth < 8 use an 8-entry queue, but withhold credits so > * the effective depth is smaller. > @@ -4435,6 +4509,9 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, > dlb2_init_queue_depth_thresholds(dlb2, > dlb2_args->qid_depth_thresholds.val); > > + dlb2_init_cq_weight(dlb2, > + dlb2_args->cq_weight.limit); > + > return 0; > } > > @@ -4489,6 +4566,7 @@ dlb2_parse_params(const char *params, > DLB2_DEPTH_THRESH_ARG, > DLB2_VECTOR_OPTS_ENAB_ARG, > DLB2_MAX_CQ_DEPTH, > + DLB2_CQ_WEIGHT, > NULL }; > > if (params != NULL && params[0] != '\0') { > @@ -4629,7 +4707,18 @@ dlb2_parse_params(const char *params, > set_max_cq_depth, > &dlb2_args->max_cq_depth); > if (ret != 0) { > - DLB2_LOG_ERR("%s: Error parsing vector opts enabled", > + DLB2_LOG_ERR("%s: Error parsing max cq depth", > + name); > + rte_kvargs_free(kvlist); > + return ret; > + } > + > + ret = rte_kvargs_process(kvlist, > + DLB2_CQ_WEIGHT, > + set_cq_weight, > + &dlb2_args->cq_weight); > + if (ret != 0) { > + DLB2_LOG_ERR("%s: Error parsing cq weight on", > name); > rte_kvargs_free(kvlist); > return ret; > diff --git a/drivers/event/dlb2/dlb2_avx512.c b/drivers/event/dlb2/dlb2_avx512.c > index d4aaa04a01..3c8906af9d 100644 > --- a/drivers/event/dlb2/dlb2_avx512.c > +++ b/drivers/event/dlb2/dlb2_avx512.c > @@ -237,6 +237,14 @@ dlb2_event_build_hcws(struct dlb2_port *qm_port, > qe[3].data = ev[3].u64; > } > > + /* will only be set for DLB 2.5 + */ > + if (qm_port->cq_weight) { > + qe[0].weight = ev[0].impl_opaque & 3; > + qe[1].weight = ev[1].impl_opaque & 3; > + qe[2].weight = ev[2].impl_opaque & 3; > + qe[3].weight = ev[3].impl_opaque & 3; > + } > + > break; > case 3: > case 2: > diff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c > index 5471dd8da7..b77faa967c 100644 > --- a/drivers/event/dlb2/dlb2_iface.c > +++ b/drivers/event/dlb2/dlb2_iface.c > @@ -72,3 +72,7 @@ int (*dlb2_iface_get_ldb_queue_depth)(struct dlb2_hw_dev *handle, > > int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, > struct dlb2_get_dir_queue_depth_args *args); > + > + > +int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, > + struct dlb2_enable_cq_weight_args *args); > diff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h > index b508eb0936..fef717392f 100644 > --- a/drivers/event/dlb2/dlb2_iface.h > +++ b/drivers/event/dlb2/dlb2_iface.h > @@ -71,4 +71,9 @@ extern int (*dlb2_iface_get_ldb_queue_depth)(struct dlb2_hw_dev *handle, > > extern int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, > struct dlb2_get_dir_queue_depth_args *args); > + > + > +extern int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, > + struct dlb2_enable_cq_weight_args *args); > + > #endif /* _DLB2_IFACE_H_ */ > diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h > index df69d57b83..63b092fc47 100644 > --- a/drivers/event/dlb2/dlb2_priv.h > +++ b/drivers/event/dlb2/dlb2_priv.h > @@ -44,6 +44,7 @@ > #define DLB2_DEPTH_THRESH_ARG "default_depth_thresh" > #define DLB2_VECTOR_OPTS_ENAB_ARG "vector_opts_enable" > #define DLB2_MAX_CQ_DEPTH "max_cq_depth" > +#define DLB2_CQ_WEIGHT "cq_weight" > > /* Begin HW related defines and structs */ > > @@ -249,7 +250,7 @@ struct dlb2_enqueue_qe { > /* Word 4 */ > uint16_t lock_id; > uint8_t meas_lat:1; > - uint8_t rsvd1:2; > + uint8_t weight:2; /* DLB 2.5 and above */ > uint8_t no_dec:1; > uint8_t cmp_id:4; > union { > @@ -378,6 +379,7 @@ struct dlb2_port { > bool use_scalar; /* force usage of scalar code */ > uint16_t hw_credit_quanta; > bool use_avx512; > + uint32_t cq_weight; > }; > > /* Per-process per-port mmio and memory pointers */ > @@ -526,6 +528,7 @@ struct dlb2_eventdev_port { > /* enq_configured is set when the qm port is created */ > bool enq_configured; > uint8_t implicit_release; /* release events before dequeuing */ > + uint32_t cq_weight; /* DLB2.5 and above ldb ports only */ > } __rte_cache_aligned; > > struct dlb2_queue { > @@ -627,6 +630,10 @@ struct dlb2_qid_depth_thresholds { > int val[DLB2_MAX_NUM_QUEUES_ALL]; > }; > > +struct dlb2_cq_weight { > + int limit[DLB2_MAX_NUM_LDB_PORTS]; > +}; > + > struct dlb2_devargs { > int socket_id; > int max_num_events; > @@ -640,6 +647,7 @@ struct dlb2_devargs { > int default_depth_thresh; > bool vector_opts_enabled; > int max_cq_depth; > + struct dlb2_cq_weight cq_weight; > }; > > /* End Eventdev related defines and structs */ > diff --git a/drivers/event/dlb2/dlb2_sse.c b/drivers/event/dlb2/dlb2_sse.c > index 8fc12d47f7..248d7519d5 100644 > --- a/drivers/event/dlb2/dlb2_sse.c > +++ b/drivers/event/dlb2/dlb2_sse.c > @@ -189,6 +189,14 @@ dlb2_event_build_hcws(struct dlb2_port *qm_port, > qe[2].data = ev[2].u64; > qe[3].data = ev[3].u64; > > + /* will only be set for DLB 2.5 + */ > + if (qm_port->cq_weight) { > + qe[0].weight = ev[0].impl_opaque & 3; > + qe[1].weight = ev[1].impl_opaque & 3; > + qe[2].weight = ev[2].impl_opaque & 3; > + qe[3].weight = ev[3].impl_opaque & 3; > + } > + > break; > case 3: > case 2: > diff --git a/drivers/event/dlb2/dlb2_user.h b/drivers/event/dlb2/dlb2_user.h > index 9760e9bda6..901e2e0c66 100644 > --- a/drivers/event/dlb2/dlb2_user.h > +++ b/drivers/event/dlb2/dlb2_user.h > @@ -47,6 +47,8 @@ enum dlb2_error { > DLB2_ST_NO_MEMORY, > DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL, > DLB2_ST_INVALID_COS_ID, > + DLB2_ST_INVALID_CQ_WEIGHT_LIMIT, > + DLB2_ST_FEATURE_UNAVAILABLE, > }; > > static const char dlb2_error_strings[][128] = { > @@ -87,6 +89,8 @@ static const char dlb2_error_strings[][128] = { > "DLB2_ST_NO_MEMORY", > "DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL", > "DLB2_ST_INVALID_COS_ID", > + "DLB2_ST_INVALID_CQ_WEIGHT_LIMIT", > + "DLB2_ST_FEATURE_UNAVAILABLE", > }; > > struct dlb2_cmd_response { > @@ -687,6 +691,31 @@ struct dlb2_pending_port_unmaps_args { > __u32 padding0; > }; > > +/* > + * DLB2_DOMAIN_CMD_ENABLE_CQ_WEIGHT: Enable QE-weight based scheduling on a > + * load-balanced port's CQ and configures the CQ's weight limit. > + * > + * This must be called after creating the port but before starting the > + * domain. The QE weight limit must be non-zero and cannot exceed the > + * CQ's depth. > + * > + * Input parameters: > + * - port_id: Load-balanced port ID. > + * - limit: QE weight limit. > + * > + * Output parameters: > + * - response.status: Detailed error code. In certain cases, such as if the > + * ioctl request arg is invalid, the driver won't set status. > + * - response.id: number of unmaps in progress. > + */ > +struct dlb2_enable_cq_weight_args { > + /* Output parameters */ > + struct dlb2_cmd_response response; > + /* Input parameters */ > + __u32 port_id; > + __u32 limit; > +}; > + > /* > * Mapping sizes for memory mapping the consumer queue (CQ) memory space, and > * producer port (PP) MMIO space. > diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c > index 4011c24aef..24a8215452 100644 > --- a/drivers/event/dlb2/pf/base/dlb2_resource.c > +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c > @@ -6273,3 +6273,219 @@ int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw, > return 0; > } > > +/** > + * dlb2_hw_set_qe_arbiter_weights() - program QE arbiter weights > + * @hw: dlb2_hw handle for a particular device. > + * @weight: 8-entry array of arbiter weights. > + * > + * weight[N] programs priority N's weight. In cases where the 8 priorities are > + * reduced to 4 bins, the mapping is: > + * - weight[1] programs bin 0 > + * - weight[3] programs bin 1 > + * - weight[5] programs bin 2 > + * - weight[7] programs bin 3 > + */ > +void dlb2_hw_set_qe_arbiter_weights(struct dlb2_hw *hw, u8 weight[8]) > +{ > + u32 reg = 0; > + > + DLB2_BITS_SET(reg, weight[1], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN0); > + DLB2_BITS_SET(reg, weight[3], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN1); > + DLB2_BITS_SET(reg, weight[5], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN2); > + DLB2_BITS_SET(reg, weight[7], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN3); > + DLB2_CSR_WR(hw, DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN, reg); > + > + reg = 0; > + DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI0); > + DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI1); > + DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI2); > + DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI3); > + DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0(hw->ver), reg); > + > + reg = 0; > + DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI0); > + DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI1); > + DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI2); > + DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI3); > + DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0(hw->ver), reg); > + > + reg = 0; > + DLB2_BITS_SET(reg, weight[1], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI0); > + DLB2_BITS_SET(reg, weight[3], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI1); > + DLB2_BITS_SET(reg, weight[5], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI2); > + DLB2_BITS_SET(reg, weight[7], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI3); > + DLB2_CSR_WR(hw, DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0, reg); > + > + reg = 0; > + DLB2_BITS_SET(reg, weight[1], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI0); > + DLB2_BITS_SET(reg, weight[3], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI1); > + DLB2_BITS_SET(reg, weight[5], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI2); > + DLB2_BITS_SET(reg, weight[7], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI3); > + DLB2_CSR_WR(hw, DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0, reg); > + > + reg = 0; > + DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI0); > + DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI1); > + DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI2); > + DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI3); > + DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0(hw->ver), reg); > + > + reg = 0; > + DLB2_BITS_SET(reg, weight[1], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN0); > + DLB2_BITS_SET(reg, weight[3], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN1); > + DLB2_BITS_SET(reg, weight[5], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN2); > + DLB2_BITS_SET(reg, weight[7], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN3); > + DLB2_CSR_WR(hw, DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN, reg); > + > + reg = 0; > + DLB2_BITS_SET(reg, weight[1], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI0); > + DLB2_BITS_SET(reg, weight[3], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI1); > + DLB2_BITS_SET(reg, weight[5], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI2); > + DLB2_BITS_SET(reg, weight[7], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI3); > + DLB2_CSR_WR(hw, DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0, reg); > +} > + > +/** > + * dlb2_hw_set_qid_arbiter_weights() - program QID arbiter weights > + * @hw: dlb2_hw handle for a particular device. > + * @weight: 8-entry array of arbiter weights. > + * > + * weight[N] programs priority N's weight. In cases where the 8 priorities are > + * reduced to 4 bins, the mapping is: > + * - weight[1] programs bin 0 > + * - weight[3] programs bin 1 > + * - weight[5] programs bin 2 > + * - weight[7] programs bin 3 > + */ > +void dlb2_hw_set_qid_arbiter_weights(struct dlb2_hw *hw, u8 weight[8]) > +{ > + u32 reg = 0; > + > + DLB2_BITS_SET(reg, weight[1], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI0_WEIGHT); > + DLB2_BITS_SET(reg, weight[3], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI1_WEIGHT); > + DLB2_BITS_SET(reg, weight[5], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI2_WEIGHT); > + DLB2_BITS_SET(reg, weight[7], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI3_WEIGHT); > + DLB2_CSR_WR(hw, DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0(hw->ver), reg); > + > + reg = 0; > + DLB2_BITS_SET(reg, weight[1], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI0_WEIGHT); > + DLB2_BITS_SET(reg, weight[3], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI1_WEIGHT); > + DLB2_BITS_SET(reg, weight[5], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI2_WEIGHT); > + DLB2_BITS_SET(reg, weight[7], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI3_WEIGHT); > + DLB2_CSR_WR(hw, DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0(hw->ver), reg); > +} > + > +static void dlb2_log_enable_cq_weight(struct dlb2_hw *hw, > + u32 domain_id, > + struct dlb2_enable_cq_weight_args *args, > + bool vdev_req, > + unsigned int vdev_id) > +{ > + DLB2_HW_DBG(hw, "DLB2 enable CQ weight arguments:\n"); > + DLB2_HW_DBG(hw, "\tvdev_req %d, vdev_id %d\n", vdev_req, vdev_id); > + DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id); > + DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id); > + DLB2_HW_DBG(hw, "\tLimit: %d\n", args->limit); > +} > + > +static int > +dlb2_verify_enable_cq_weight_args(struct dlb2_hw *hw, > + u32 domain_id, > + struct dlb2_enable_cq_weight_args *args, > + struct dlb2_cmd_response *resp, > + bool vdev_req, > + unsigned int vdev_id) > +{ > + struct dlb2_hw_domain *domain; > + struct dlb2_ldb_port *port; > + > + if (hw->ver == DLB2_HW_V2) { > + resp->status = DLB2_ST_FEATURE_UNAVAILABLE; > + return -EINVAL; > + } > + > + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); > + > + if (!domain) { > + resp->status = DLB2_ST_INVALID_DOMAIN_ID; > + return -EINVAL; > + } > + > + if (!domain->configured) { > + resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED; > + return -EINVAL; > + } > + > + if (domain->started) { > + resp->status = DLB2_ST_DOMAIN_STARTED; > + return -EINVAL; > + } > + > + port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain); > + if (!port || !port->configured) { > + resp->status = DLB2_ST_INVALID_PORT_ID; > + return -EINVAL; > + } > + > + if (args->limit == 0 || args->limit > port->cq_depth) { > + resp->status = DLB2_ST_INVALID_CQ_WEIGHT_LIMIT; > + return -EINVAL; > + } > + > + return 0; > +} > + > +int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw, > + u32 domain_id, > + struct dlb2_enable_cq_weight_args *args, > + struct dlb2_cmd_response *resp, > + bool vdev_req, > + unsigned int vdev_id) > +{ > + struct dlb2_hw_domain *domain; > + struct dlb2_ldb_port *port; > + int ret, id; > + u32 reg = 0; > + > + dlb2_log_enable_cq_weight(hw, domain_id, args, vdev_req, vdev_id); > + > + /* > + * Verify that hardware resources are available before attempting to > + * satisfy the request. This simplifies the error unwinding code. > + */ > + ret = dlb2_verify_enable_cq_weight_args(hw, > + domain_id, > + args, > + resp, > + vdev_req, > + vdev_id); > + if (ret) > + return ret; > + > + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); > + if (!domain) { > + DLB2_HW_ERR(hw, > + "[%s():%d] Internal error: domain not found\n", > + __func__, __LINE__); > + return -EFAULT; > + } > + > + id = args->port_id; > + > + port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain); > + if (!port) { > + DLB2_HW_ERR(hw, > + "[%s(): %d] Internal error: port not found\n", > + __func__, __LINE__); > + return -EFAULT; > + } > + > + DLB2_BIT_SET(reg, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_V); > + DLB2_BITS_SET(reg, args->limit, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_LIMIT); > + > + DLB2_CSR_WR(hw, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id), reg); > + > + resp->status = 0; > + > + return 0; > +} > diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.h b/drivers/event/dlb2/pf/base/dlb2_resource.h > index 684049cd6e..a7e6c90888 100644 > --- a/drivers/event/dlb2/pf/base/dlb2_resource.h > +++ b/drivers/event/dlb2/pf/base/dlb2_resource.h > @@ -1910,4 +1910,37 @@ void dlb2_hw_dir_cq_interrupt_set_mode(struct dlb2_hw *hw, > int port_id, > int mode); > > +/** > + * dlb2_hw_enable_cq_weight() - Enable QE-weight based scheduling on an LDB port. > + * @hw: dlb2_hw handle for a particular device. > + * @domain_id: domain ID. > + * @args: CQ weight enablement arguments. > + * @resp: response structure. > + * @vdev_request: indicates whether this request came from a vdev. > + * @vdev_id: If vdev_request is true, this contains the vdev's ID. > + * > + * This function enables QE-weight based scheduling on a load-balanced port's > + * CQ and configures the CQ's weight limit. > + * > + * This must be called after creating the port but before starting the > + * domain. > + * > + * Return: > + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is > + * assigned a detailed error code from enum dlb2_error. If successful, resp->id > + * contains the queue ID. > + * > + * Errors: > + * EINVAL - The domain or port is not configured, the domainhas already been > + * started, the requested limit exceeds the port's CQ depth, or this > + * feature is unavailable on the device. > + * EFAULT - Internal error (resp->status not set). > + */ > +int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw, > + u32 domain_id, > + struct dlb2_enable_cq_weight_args *args, > + struct dlb2_cmd_response *resp, > + bool vdev_request, > + unsigned int vdev_id); > + > #endif /* __DLB2_RESOURCE_H */ > diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c > index 5c80c724f1..1d0415e46f 100644 > --- a/drivers/event/dlb2/pf/dlb2_pf.c > +++ b/drivers/event/dlb2/pf/dlb2_pf.c > @@ -41,6 +41,8 @@ > #include "base/dlb2_resource.h" > > static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD); > +static unsigned int dlb2_qe_sa_pct = 1; > +static unsigned int dlb2_qid_sa_pct; > > static void > dlb2_pf_low_level_io_init(void) > @@ -80,6 +82,27 @@ dlb2_pf_get_device_version(struct dlb2_hw_dev *handle, > return 0; > } > > +static void dlb2_pf_calc_arbiter_weights(u8 *weight, > + unsigned int pct) > +{ > + int val, i; > + > + /* Largest possible weight (100% SA case): 32 */ > + val = (DLB2_MAX_WEIGHT + 1) / DLB2_NUM_ARB_WEIGHTS; > + > + /* Scale val according to the starvation avoidance percentage */ > + val = (val * pct) / 100; > + if (val == 0 && pct != 0) > + val = 1; > + > + /* Prio 7 always has weight 0xff */ > + weight[DLB2_NUM_ARB_WEIGHTS - 1] = DLB2_MAX_WEIGHT; > + > + for (i = DLB2_NUM_ARB_WEIGHTS - 2; i >= 0; i--) > + weight[i] = weight[i + 1] - val; > +} > + > + > static void > dlb2_pf_hardware_init(struct dlb2_hw_dev *handle) > { > @@ -87,6 +110,27 @@ dlb2_pf_hardware_init(struct dlb2_hw_dev *handle) > > dlb2_hw_enable_sparse_ldb_cq_mode(&dlb2_dev->hw); > dlb2_hw_enable_sparse_dir_cq_mode(&dlb2_dev->hw); > + > + /* Configure arbitration weights for QE selection */ > + if (dlb2_qe_sa_pct <= 100) { > + u8 weight[DLB2_NUM_ARB_WEIGHTS]; > + > + dlb2_pf_calc_arbiter_weights(weight, > + dlb2_qe_sa_pct); > + > + dlb2_hw_set_qe_arbiter_weights(&dlb2_dev->hw, weight); > + } > + > + /* Configure arbitration weights for QID selection */ > + if (dlb2_qid_sa_pct <= 100) { > + u8 weight[DLB2_NUM_ARB_WEIGHTS]; > + > + dlb2_pf_calc_arbiter_weights(weight, > + dlb2_qid_sa_pct); > + > + dlb2_hw_set_qid_arbiter_weights(&dlb2_dev->hw, weight); > + } > + > } > > static int > @@ -578,6 +622,30 @@ dlb2_pf_get_dir_queue_depth(struct dlb2_hw_dev *handle, > return ret; > } > > +static int > +dlb2_pf_enable_cq_weight(struct dlb2_hw_dev *handle, > + struct dlb2_enable_cq_weight_args *args) > +{ > + struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev; > + struct dlb2_cmd_response response = {0}; > + int ret = 0; > + > + DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__); > + > + ret = dlb2_hw_enable_cq_weight(&dlb2_dev->hw, > + handle->domain_id, > + args, > + &response, > + false, > + 0); > + args->response = response; > + > + DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n", > + __func__, ret); > + > + return ret; > +} > + > static void > dlb2_pf_iface_fn_ptrs_init(void) > { > @@ -602,6 +670,7 @@ dlb2_pf_iface_fn_ptrs_init(void) > dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation; > dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation; > dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy; > + dlb2_iface_enable_cq_weight = dlb2_pf_enable_cq_weight; > } > > /* PCI DEV HOOKS */ > -- > 2.25.1 > ^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v4 2/2] event/dlb2: add ldb port specific COS support 2022-06-16 22:21 ` [PATCH v4 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel 2022-06-16 22:21 ` [PATCH v4 1/2] event/dlb2: add CQ weight support Timothy McDaniel @ 2022-06-16 22:21 ` Timothy McDaniel 1 sibling, 0 replies; 13+ messages in thread From: Timothy McDaniel @ 2022-06-16 22:21 UTC (permalink / raw) To: jerinj; +Cc: dev DLB supports 4 class of service domains, to aid in managing the device bandwidth across ldb ports. This commit allows specifying which ldb ports will participate in the COS scheme, which class they are a part of, and the specific bandwidth percentage associated with each class. The cumulative bandwidth associated with the 4 classes must not exceed 100%. This feature is enabled on the command line, and will be documented in the DLB2 programmers guide. Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com> --- V4: Fixed a build error on CentOS79-64 V3: fixed a typo/bug that caused us to read beyond the end of an array V2: Rebased against dpdk-next-eventdev --- drivers/event/dlb2/dlb2.c | 228 +++++++++++++++------ drivers/event/dlb2/dlb2_iface.c | 3 + drivers/event/dlb2/dlb2_iface.h | 3 + drivers/event/dlb2/dlb2_priv.h | 19 +- drivers/event/dlb2/pf/base/dlb2_resource.c | 66 ++++++ drivers/event/dlb2/pf/dlb2_pf.c | 21 +- 6 files changed, 269 insertions(+), 71 deletions(-) diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index e1687e3d63..78433d2fe0 100644 --- a/drivers/event/dlb2/dlb2.c +++ b/drivers/event/dlb2/dlb2.c @@ -164,6 +164,28 @@ set_cq_weight(const char *key __rte_unused, return 0; } +/* override defaults with value(s) provided on command line */ +static void +dlb2_init_port_cos(struct dlb2_eventdev *dlb2, int *port_cos) +{ + int q; + + for (q = 0; q < DLB2_MAX_NUM_PORTS_ALL; q++) { + dlb2->ev_ports[q].cos_id = port_cos[q]; + dlb2->cos_ports[port_cos[q]]++; + } +} + +static void +dlb2_init_cos_bw(struct dlb2_eventdev *dlb2, + struct dlb2_cos_bw *cos_bw) +{ + int q; + for (q = 0; q < DLB2_COS_NUM_VALS; q++) + dlb2->cos_bw[q] = cos_bw->val[q]; + +} + static int dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) { @@ -379,12 +401,11 @@ set_dev_id(const char *key __rte_unused, } static int -set_cos(const char *key __rte_unused, +set_poll_interval(const char *key __rte_unused, const char *value, void *opaque) { - enum dlb2_cos *cos_id = opaque; - int x = 0; + int *poll_interval = opaque; int ret; if (value == NULL || opaque == NULL) { @@ -392,38 +413,83 @@ set_cos(const char *key __rte_unused, return -EINVAL; } - ret = dlb2_string_to_int(&x, value); + ret = dlb2_string_to_int(poll_interval, value); if (ret < 0) return ret; - if (x != DLB2_COS_DEFAULT && (x < DLB2_COS_0 || x > DLB2_COS_3)) { - DLB2_LOG_ERR( - "COS %d out of range, must be DLB2_COS_DEFAULT or 0-3\n", - x); + return 0; +} + +static int +set_port_cos(const char *key __rte_unused, + const char *value, + void *opaque) +{ + struct dlb2_port_cos *port_cos = opaque; + int first, last, cos_id, i; + + if (value == NULL || opaque == NULL) { + DLB2_LOG_ERR("NULL pointer\n"); return -EINVAL; } - *cos_id = x; + /* command line override may take one of the following 3 forms: + * port_cos=all:<cos_id> ... all ports + * port_cos=port-port:<cos_id> ... a range of ports + * port_cos=port:<cos_id> ... just one port + */ + if (sscanf(value, "all:%d", &cos_id) == 1) { + first = 0; + last = DLB2_MAX_NUM_LDB_PORTS - 1; + } else if (sscanf(value, "%d-%d:%d", &first, &last, &cos_id) == 3) { + /* we have everything we need */ + } else if (sscanf(value, "%d:%d", &first, &cos_id) == 2) { + last = first; + } else { + DLB2_LOG_ERR("Error parsing ldb port port_cos devarg. Should be all:val, port-port:val, or port:val\n"); + return -EINVAL; + } + + if (first > last || first < 0 || + last >= DLB2_MAX_NUM_LDB_PORTS) { + DLB2_LOG_ERR("Error parsing ldb port cos_id arg, invalid port value\n"); + return -EINVAL; + } + + if (cos_id < DLB2_COS_0 || cos_id > DLB2_COS_3) { + DLB2_LOG_ERR("Error parsing ldb port cos_id devarg, must be between 0 and 4\n"); + return -EINVAL; + } + + for (i = first; i <= last; i++) + port_cos->cos_id[i] = cos_id; /* indexed by port */ return 0; } static int -set_poll_interval(const char *key __rte_unused, - const char *value, - void *opaque) +set_cos_bw(const char *key __rte_unused, + const char *value, + void *opaque) { - int *poll_interval = opaque; - int ret; + struct dlb2_cos_bw *cos_bw = opaque; - if (value == NULL || opaque == NULL) { + if (opaque == NULL) { DLB2_LOG_ERR("NULL pointer\n"); return -EINVAL; } - ret = dlb2_string_to_int(poll_interval, value); - if (ret < 0) - return ret; + /* format must be %d,%d,%d,%d */ + + if (sscanf(value, "%d,%d,%d,%d", &cos_bw->val[0], &cos_bw->val[1], + &cos_bw->val[2], &cos_bw->val[3]) != 4) { + DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0,bw1,bw2,bw3 where all values combined are <= 100\n"); + return -EINVAL; + } + if (cos_bw->val[0] + cos_bw->val[1] + cos_bw->val[2] + cos_bw->val[3] > 100) { + DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0,bw1,bw2,bw3 where all values combined are <= 100\n"); + return -EINVAL; + } return 0; } @@ -653,11 +719,13 @@ dlb2_eventdev_info_get(struct rte_eventdev *dev, } static int -dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle, +dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2, + struct dlb2_hw_dev *handle, const struct dlb2_hw_rsrcs *resources_asked, uint8_t device_version) { int ret = 0; + uint32_t cos_ports = 0; struct dlb2_create_sched_domain_args *cfg; if (resources_asked == NULL) { @@ -683,38 +751,22 @@ dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle, /* LDB ports */ - cfg->cos_strict = 0; /* Best effort */ - cfg->num_cos_ldb_ports[0] = 0; - cfg->num_cos_ldb_ports[1] = 0; - cfg->num_cos_ldb_ports[2] = 0; - cfg->num_cos_ldb_ports[3] = 0; - - switch (handle->cos_id) { - case DLB2_COS_0: - cfg->num_ldb_ports = 0; /* no don't care ports */ - cfg->num_cos_ldb_ports[0] = - resources_asked->num_ldb_ports; - break; - case DLB2_COS_1: - cfg->num_ldb_ports = 0; /* no don't care ports */ - cfg->num_cos_ldb_ports[1] = resources_asked->num_ldb_ports; - break; - case DLB2_COS_2: - cfg->num_ldb_ports = 0; /* no don't care ports */ - cfg->num_cos_ldb_ports[2] = resources_asked->num_ldb_ports; - break; - case DLB2_COS_3: - cfg->num_ldb_ports = 0; /* no don't care ports */ - cfg->num_cos_ldb_ports[3] = - resources_asked->num_ldb_ports; - break; - case DLB2_COS_DEFAULT: - /* all ldb ports are don't care ports from a cos perspective */ - cfg->num_ldb_ports = - resources_asked->num_ldb_ports; - break; + /* tally of ports with non default COS */ + cos_ports = dlb2->cos_ports[1] + dlb2->cos_ports[2] + + dlb2->cos_ports[3]; + + if (cos_ports > resources_asked->num_ldb_ports) { + DLB2_LOG_ERR("dlb2: num_ldb_ports < nonzero cos_ports\n"); + ret = EINVAL; + goto error_exit; } + cfg->cos_strict = 0; /* Best effort */ + cfg->num_cos_ldb_ports[0] = resources_asked->num_ldb_ports - cos_ports; + cfg->num_cos_ldb_ports[1] = dlb2->cos_ports[1]; + cfg->num_cos_ldb_ports[2] = dlb2->cos_ports[2]; + cfg->num_cos_ldb_ports[3] = dlb2->cos_ports[3]; + if (device_version == DLB2_HW_V2) cfg->num_ldb_credits = resources_asked->num_ldb_credits; @@ -892,7 +944,8 @@ dlb2_eventdev_configure(const struct rte_eventdev *dev) rsrcs->num_dir_credits = dlb2->num_dir_credits_override; } - if (dlb2_hw_create_sched_domain(handle, rsrcs, dlb2->version) < 0) { + if (dlb2_hw_create_sched_domain(dlb2, handle, rsrcs, + dlb2->version) < 0) { DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n"); return -ENODEV; } @@ -1449,12 +1502,8 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, cfg.cq_history_list_size = DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT; - if (handle->cos_id == DLB2_COS_DEFAULT) - cfg.cos_id = 0; - else - cfg.cos_id = handle->cos_id; - - cfg.cos_strict = 0; + cfg.cos_id = ev_port->cos_id; + cfg.cos_strict = 0;/* best effots */ /* User controls the LDB high watermark via enqueue depth. The DIR high * watermark is equal, unless the directed credit pool is too small. @@ -1509,7 +1558,8 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, qm_port->cached_ldb_credits = 0; qm_port->cached_dir_credits = 0; if (ev_port->cq_weight) { - struct dlb2_enable_cq_weight_args cq_weight_args = {0}; + struct dlb2_enable_cq_weight_args + cq_weight_args = { {0} }; cq_weight_args.port_id = qm_port->id; cq_weight_args.limit = ev_port->cq_weight; @@ -4450,7 +4500,6 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2->max_num_events_override = dlb2_args->max_num_events; dlb2->num_dir_credits_override = dlb2_args->num_dir_credits_override; - dlb2->qm_instance.cos_id = dlb2_args->cos_id; dlb2->poll_interval = dlb2_args->poll_interval; dlb2->sw_credit_quanta = dlb2_args->sw_credit_quanta; dlb2->hw_credit_quanta = dlb2_args->hw_credit_quanta; @@ -4482,6 +4531,28 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2_iface_hardware_init(&dlb2->qm_instance); + /* configure class of service */ + { + struct dlb2_set_cos_bw_args + set_cos_bw_args = { {0} }; + int id; + int ret = 0; + + for (id = 0; id < DLB2_COS_NUM_VALS; id++) { + set_cos_bw_args.cos_id = id; + set_cos_bw_args.cos_id = dlb2->cos_bw[id]; + ret = dlb2_iface_set_cos_bw(&dlb2->qm_instance, + &set_cos_bw_args); + if (ret != 0) + break; + } + if (ret) { + DLB2_LOG_ERR("dlb2: failed to configure class of service, err=%d\n", + err); + return err; + } + } + err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode); if (err < 0) { DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n", @@ -4512,6 +4583,12 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, dlb2_init_cq_weight(dlb2, dlb2_args->cq_weight.limit); + dlb2_init_port_cos(dlb2, + dlb2_args->port_cos.cos_id); + + dlb2_init_cos_bw(dlb2, + &dlb2_args->cos_bw); + return 0; } @@ -4567,6 +4644,8 @@ dlb2_parse_params(const char *params, DLB2_VECTOR_OPTS_ENAB_ARG, DLB2_MAX_CQ_DEPTH, DLB2_CQ_WEIGHT, + DLB2_PORT_COS, + DLB2_COS_BW, NULL }; if (params != NULL && params[0] != '\0') { @@ -4639,16 +4718,6 @@ dlb2_parse_params(const char *params, return ret; } - ret = rte_kvargs_process(kvlist, DLB2_COS_ARG, - set_cos, - &dlb2_args->cos_id); - if (ret != 0) { - DLB2_LOG_ERR("%s: Error parsing cos parameter", - name); - rte_kvargs_free(kvlist); - return ret; - } - ret = rte_kvargs_process(kvlist, DLB2_POLL_INTERVAL_ARG, set_poll_interval, &dlb2_args->poll_interval); @@ -4724,6 +4793,29 @@ dlb2_parse_params(const char *params, return ret; } + ret = rte_kvargs_process(kvlist, + DLB2_PORT_COS, + set_port_cos, + &dlb2_args->port_cos); + if (ret != 0) { + DLB2_LOG_ERR("%s: Error parsing port cos", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, + DLB2_COS_BW, + set_cos_bw, + &dlb2_args->cos_bw); + if (ret != 0) { + DLB2_LOG_ERR("%s: Error parsing cos_bw", + name); + rte_kvargs_free(kvlist); + return ret; + } + + rte_kvargs_free(kvlist); } } diff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c index b77faa967c..100db434d0 100644 --- a/drivers/event/dlb2/dlb2_iface.c +++ b/drivers/event/dlb2/dlb2_iface.c @@ -76,3 +76,6 @@ int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, struct dlb2_enable_cq_weight_args *args); + +int (*dlb2_iface_set_cos_bw)(struct dlb2_hw_dev *handle, + struct dlb2_set_cos_bw_args *args); diff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h index fef717392f..dc0c446ce8 100644 --- a/drivers/event/dlb2/dlb2_iface.h +++ b/drivers/event/dlb2/dlb2_iface.h @@ -76,4 +76,7 @@ extern int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, extern int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle, struct dlb2_enable_cq_weight_args *args); +extern int (*dlb2_iface_set_cos_bw)(struct dlb2_hw_dev *handle, + struct dlb2_set_cos_bw_args *args); + #endif /* _DLB2_IFACE_H_ */ diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h index 63b092fc47..528e2ede61 100644 --- a/drivers/event/dlb2/dlb2_priv.h +++ b/drivers/event/dlb2/dlb2_priv.h @@ -45,6 +45,8 @@ #define DLB2_VECTOR_OPTS_ENAB_ARG "vector_opts_enable" #define DLB2_MAX_CQ_DEPTH "max_cq_depth" #define DLB2_CQ_WEIGHT "cq_weight" +#define DLB2_PORT_COS "port_cos" +#define DLB2_COS_BW "cos_bw" /* Begin HW related defines and structs */ @@ -416,7 +418,8 @@ enum dlb2_cos { DLB2_COS_0 = 0, DLB2_COS_1, DLB2_COS_2, - DLB2_COS_3 + DLB2_COS_3, + DLB2_COS_NUM_VALS }; struct dlb2_hw_dev { @@ -424,7 +427,6 @@ struct dlb2_hw_dev { struct dlb2_hw_resource_info info; void *pf_dev; /* opaque pointer to PF PMD dev (struct dlb2_dev) */ uint32_t domain_id; - enum dlb2_cos cos_id; rte_spinlock_t resource_lock; /* for MP support */ } __rte_cache_aligned; @@ -529,6 +531,7 @@ struct dlb2_eventdev_port { bool enq_configured; uint8_t implicit_release; /* release events before dequeuing */ uint32_t cq_weight; /* DLB2.5 and above ldb ports only */ + int cos_id; /*ldb port class of service */ } __rte_cache_aligned; struct dlb2_queue { @@ -623,6 +626,8 @@ struct dlb2_eventdev { uint32_t credit_pool __rte_cache_aligned; }; }; + uint32_t cos_ports[DLB2_COS_NUM_VALS]; /* total ldb ports in each class */ + uint32_t cos_bw[DLB2_COS_NUM_VALS]; /* bandwidth per cos domain */ }; /* used for collecting and passing around the dev args */ @@ -634,6 +639,14 @@ struct dlb2_cq_weight { int limit[DLB2_MAX_NUM_LDB_PORTS]; }; +struct dlb2_port_cos { + int cos_id[DLB2_MAX_NUM_LDB_PORTS]; +}; + +struct dlb2_cos_bw { + int val[DLB2_COS_NUM_VALS]; +}; + struct dlb2_devargs { int socket_id; int max_num_events; @@ -648,6 +661,8 @@ struct dlb2_devargs { bool vector_opts_enabled; int max_cq_depth; struct dlb2_cq_weight cq_weight; + struct dlb2_port_cos port_cos; + struct dlb2_cos_bw cos_bw; }; /* End Eventdev related defines and structs */ diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c index 24a8215452..da1949c763 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c @@ -6489,3 +6489,69 @@ int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw, return 0; } + +static void dlb2_log_set_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id, u8 bw) +{ + DLB2_HW_DBG(hw, "DLB2 set port CoS bandwidth:\n"); + DLB2_HW_DBG(hw, "\tCoS ID: %u\n", cos_id); + DLB2_HW_DBG(hw, "\tBandwidth: %u\n", bw); +} + +#define DLB2_MAX_BW_PCT 100 + +/** + * dlb2_hw_set_cos_bandwidth() - set a bandwidth allocation percentage for a + * port class-of-service. + * @hw: dlb2_hw handle for a particular device. + * @cos_id: class-of-service ID. + * @bandwidth: class-of-service bandwidth. + * + * Return: + * Returns 0 upon success, < 0 otherwise. + * + * Errors: + * EINVAL - Invalid cos ID, bandwidth is greater than 100, or bandwidth would + * cause the total bandwidth across all classes of service to exceed + * 100%. + */ +int dlb2_hw_set_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id, u8 bandwidth) +{ + unsigned int i; + u32 reg; + u8 total; + + if (cos_id >= DLB2_NUM_COS_DOMAINS) + return -EINVAL; + + if (bandwidth > DLB2_MAX_BW_PCT) + return -EINVAL; + + total = 0; + + for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) + total += (i == cos_id) ? bandwidth : hw->cos_reservation[i]; + + if (total > DLB2_MAX_BW_PCT) + return -EINVAL; + + reg = DLB2_CSR_RD(hw, DLB2_LSP_CFG_SHDW_RANGE_COS(hw->ver, cos_id)); + + /* + * Normalize the bandwidth to a value in the range 0-255. Integer + * division may leave unreserved scheduling slots; these will be + * divided among the 4 classes of service. + */ + DLB2_BITS_SET(reg, (bandwidth * 256) / 100, DLB2_LSP_CFG_SHDW_RANGE_COS_BW_RANGE); + DLB2_CSR_WR(hw, DLB2_LSP_CFG_SHDW_RANGE_COS(hw->ver, cos_id), reg); + + reg = 0; + DLB2_BIT_SET(reg, DLB2_LSP_CFG_SHDW_CTRL_TRANSFER); + /* Atomically transfer the newly configured service weight */ + DLB2_CSR_WR(hw, DLB2_LSP_CFG_SHDW_CTRL(hw->ver), reg); + + dlb2_log_set_cos_bandwidth(hw, cos_id, bandwidth); + + hw->cos_reservation[cos_id] = bandwidth; + + return 0; +} diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c index 1d0415e46f..0627f06a6e 100644 --- a/drivers/event/dlb2/pf/dlb2_pf.c +++ b/drivers/event/dlb2/pf/dlb2_pf.c @@ -646,6 +646,25 @@ dlb2_pf_enable_cq_weight(struct dlb2_hw_dev *handle, return ret; } +static int +dlb2_pf_set_cos_bandwidth(struct dlb2_hw_dev *handle, + struct dlb2_set_cos_bw_args *args) +{ + struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev; + int ret = 0; + + DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__); + + ret = dlb2_hw_set_cos_bandwidth(&dlb2_dev->hw, + args->cos_id, + args->bandwidth); + + DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n", + __func__, ret); + + return ret; +} + static void dlb2_pf_iface_fn_ptrs_init(void) { @@ -671,6 +690,7 @@ dlb2_pf_iface_fn_ptrs_init(void) dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation; dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy; dlb2_iface_enable_cq_weight = dlb2_pf_enable_cq_weight; + dlb2_iface_set_cos_bw = dlb2_pf_set_cos_bandwidth; } /* PCI DEV HOOKS */ @@ -684,7 +704,6 @@ dlb2_eventdev_pci_init(struct rte_eventdev *eventdev) .max_num_events = DLB2_MAX_NUM_LDB_CREDITS, .num_dir_credits_override = -1, .qid_depth_thresholds = { {0} }, - .cos_id = DLB2_COS_DEFAULT, .poll_interval = DLB2_POLL_INTERVAL_DEFAULT, .sw_credit_quanta = DLB2_SW_CREDIT_QUANTA_DEFAULT, .hw_credit_quanta = DLB2_SW_CREDIT_BATCH_SZ, -- 2.25.1 ^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2022-06-20 17:34 UTC | newest] Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed) -- links below jump to the message on this page -- 2022-06-16 14:23 [PATCH 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel 2022-06-16 14:23 ` [PATCH 1/2] event/dlb2: add CQ weight support Timothy McDaniel 2022-06-16 14:23 ` [PATCH 2/2] event/dlb2: add ldb port specific COS support Timothy McDaniel 2022-06-16 19:55 ` [PATCH v2 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel 2022-06-16 19:55 ` [PATCH v2 1/2] event/dlb2: add CQ weight support Timothy McDaniel 2022-06-16 19:55 ` [PATCH v2 2/2] event/dlb2: add ldb port specific COS support Timothy McDaniel 2022-06-16 20:36 ` [PATCH v3 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel 2022-06-16 20:36 ` [PATCH v3 1/2] event/dlb2: add CQ weight support Timothy McDaniel 2022-06-16 20:36 ` [PATCH v3 2/2] event/dlb2: add ldb port specific COS support Timothy McDaniel 2022-06-16 22:21 ` [PATCH v4 0/2] Rebase DLB2 port_cos and cq_weight patches Timothy McDaniel 2022-06-16 22:21 ` [PATCH v4 1/2] event/dlb2: add CQ weight support Timothy McDaniel 2022-06-20 17:33 ` Jerin Jacob 2022-06-16 22:21 ` [PATCH v4 2/2] event/dlb2: add ldb port specific COS support Timothy McDaniel
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).