This patch adds counter, connection tracking, meter and age actions support to non-template mode. For CT, counter and meter: if no previous allocation was handled by hw configure routine, Half of the maximum supported number of objects will be allocated. For AGE action, if no counters were allocated, allocate half of the maximum, and then allocate same number of AGE objects. Also extracted the shared host handling to the configure function. And align all ASO actions to have init function for future code improvement. This patch does not affect SW Steering flow engine. Signed-off-by: Maayan Kashani Acked-by: Dariusz Sosnowski --- drivers/net/mlx5/mlx5.h | 7 ++ drivers/net/mlx5/mlx5_flow_hw.c | 185 +++++++++++++++++++++++++++----- drivers/net/mlx5/mlx5_hws_cnt.c | 47 ++++---- drivers/net/mlx5/mlx5_hws_cnt.h | 10 +- 4 files changed, 190 insertions(+), 59 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 69e0dcff4c..8940f7c3a2 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -812,6 +812,13 @@ struct mlx5_dev_shared_port { /* Only yellow color valid. */ #define MLX5_MTR_POLICY_MODE_OY 3 +/* Max number of meters allocated in non template mode. */ +#define MLX5_MTR_NT_MAX (1 << 23) /*TODO verify number */ +/* Max number of connection tracking allocated in non template mode */ +#define MLX5_CT_NT_MAX (1 << 23) /*TODO verify number */ +/* Max number of counters allocated in non template mode */ +#define MLX5_CNT_MAX (1 << 23) /*TODO verify number */ + enum mlx5_meter_domain { MLX5_MTR_DOMAIN_INGRESS, MLX5_MTR_DOMAIN_EGRESS, diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c index b8f8c70c42..85da86167f 100644 --- a/drivers/net/mlx5/mlx5_flow_hw.c +++ b/drivers/net/mlx5/mlx5_flow_hw.c @@ -302,6 +302,11 @@ static void flow_hw_construct_quota(struct mlx5_priv *priv, struct mlx5dr_rule_action *rule_act, uint32_t qid); +static int +mlx5_flow_ct_init(struct rte_eth_dev *dev, + uint32_t nb_conn_tracks, + uint16_t nb_queue); + static __rte_always_inline uint32_t flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev); static __rte_always_inline uint32_t flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev); @@ -1623,7 +1628,7 @@ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue, } if (meter_mark->profile == NULL) return NULL; - aso_mtr = mlx5_ipool_malloc(priv->hws_mpool->idx_pool, &mtr_id); + aso_mtr = mlx5_ipool_malloc(pool->idx_pool, &mtr_id); if (!aso_mtr) return NULL; /* Fill the flow meter parameters. */ @@ -2443,8 +2448,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, recom_type = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT; break; case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL: - flow_hw_translate_group(dev, cfg, attr->group, + ret = flow_hw_translate_group(dev, cfg, attr->group, &target_grp, error); + if (ret) + return ret; if (target_grp == 0) { __flow_hw_action_template_destroy(dev, acts); return rte_flow_error_set(error, ENOTSUP, @@ -2491,8 +2498,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, goto err; break; case RTE_FLOW_ACTION_TYPE_AGE: - flow_hw_translate_group(dev, cfg, attr->group, + ret = flow_hw_translate_group(dev, cfg, attr->group, &target_grp, error); + if (ret) + return ret; if (target_grp == 0) { __flow_hw_action_template_destroy(dev, acts); return rte_flow_error_set(error, ENOTSUP, @@ -2507,8 +2516,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, goto err; break; case RTE_FLOW_ACTION_TYPE_COUNT: - flow_hw_translate_group(dev, cfg, attr->group, + ret = flow_hw_translate_group(dev, cfg, attr->group, &target_grp, error); + if (ret) + return ret; if (target_grp == 0) { __flow_hw_action_template_destroy(dev, acts); return rte_flow_error_set(error, ENOTSUP, @@ -10285,12 +10296,12 @@ flow_hw_ct_pool_destroy(struct rte_eth_dev *dev, static struct mlx5_aso_ct_pool * flow_hw_ct_pool_create(struct rte_eth_dev *dev, - const struct rte_flow_port_attr *port_attr) + uint32_t nb_conn_tracks) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_aso_ct_pool *pool; struct mlx5_devx_obj *obj; - uint32_t nb_cts = rte_align32pow2(port_attr->nb_conn_tracks); + uint32_t nb_cts = rte_align32pow2(nb_conn_tracks); uint32_t log_obj_size = rte_log2_u32(nb_cts); struct mlx5_indexed_pool_config cfg = { .size = sizeof(struct mlx5_aso_ct_action), @@ -10342,7 +10353,7 @@ flow_hw_ct_pool_create(struct rte_eth_dev *dev, pool->devx_obj = host_priv->hws_ctpool->devx_obj; pool->cts = host_priv->hws_ctpool->cts; MLX5_ASSERT(pool->cts); - MLX5_ASSERT(!port_attr->nb_conn_tracks); + MLX5_ASSERT(!nb_conn_tracks); } reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, NULL); flags |= MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX; @@ -10362,6 +10373,46 @@ flow_hw_ct_pool_create(struct rte_eth_dev *dev, return NULL; } +static int +mlx5_flow_ct_init(struct rte_eth_dev *dev, + uint32_t nb_conn_tracks, + uint16_t nb_queue) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t mem_size; + int ret = -ENOMEM; + + if (!priv->shared_host) { + mem_size = sizeof(struct mlx5_aso_sq) * nb_queue + + sizeof(*priv->ct_mng); + priv->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, mem_size, + RTE_CACHE_LINE_SIZE, + SOCKET_ID_ANY); + if (!priv->ct_mng) + goto err; + ret = mlx5_aso_ct_queue_init(priv->sh, priv->ct_mng, + nb_queue); + if (ret) + goto err; + } + priv->hws_ctpool = flow_hw_ct_pool_create(dev, nb_conn_tracks); + if (!priv->hws_ctpool) + goto err; + priv->sh->ct_aso_en = 1; + return 0; + +err: + if (priv->hws_ctpool) { + flow_hw_ct_pool_destroy(dev, priv->hws_ctpool); + priv->hws_ctpool = NULL; + } + if (priv->ct_mng) { + flow_hw_ct_mng_destroy(dev, priv->ct_mng); + priv->ct_mng = NULL; + } + return ret; +} + static void flow_hw_destroy_vlan(struct rte_eth_dev *dev) { @@ -11009,6 +11060,7 @@ flow_hw_configure(struct rte_eth_dev *dev, bool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master); int ret = 0; uint32_t action_flags; + bool strict_queue = false; if (mlx5dr_rule_get_handle_size() != MLX5_DR_RULE_SIZE) { rte_errno = EINVAL; @@ -11250,25 +11302,13 @@ flow_hw_configure(struct rte_eth_dev *dev, if (!priv->shared_host) flow_hw_create_send_to_kernel_actions(priv); if (port_attr->nb_conn_tracks || (host_priv && host_priv->hws_ctpool)) { - if (!priv->shared_host) { - mem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated + - sizeof(*priv->ct_mng); - priv->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, mem_size, - RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); - if (!priv->ct_mng) - goto err; - if (mlx5_aso_ct_queue_init(priv->sh, priv->ct_mng, nb_q_updated)) - goto err; - } - priv->hws_ctpool = flow_hw_ct_pool_create(dev, port_attr); - if (!priv->hws_ctpool) + if (mlx5_flow_ct_init(dev, port_attr->nb_conn_tracks, nb_q_updated)) goto err; - priv->sh->ct_aso_en = 1; } if (port_attr->nb_counters || (host_priv && host_priv->hws_cpool)) { - priv->hws_cpool = mlx5_hws_cnt_pool_create(dev, port_attr, - nb_queue); - if (priv->hws_cpool == NULL) + if (mlx5_hws_cnt_pool_create(dev, port_attr->nb_counters, + nb_queue, + (host_priv ? host_priv->hws_cpool : NULL))) goto err; } if (port_attr->nb_aging_objects) { @@ -11285,12 +11325,17 @@ flow_hw_configure(struct rte_eth_dev *dev, rte_errno = EINVAL; goto err; } - ret = mlx5_hws_age_pool_init(dev, port_attr, nb_queue); - if (ret < 0) { - rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "Failed to init age pool."); + if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) { + DRV_LOG(ERR, "Aging is not supported " + "in cross vHCA sharing mode"); + ret = -ENOTSUP; goto err; } + strict_queue = !!(port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE); + ret = mlx5_hws_age_pool_init(dev, port_attr->nb_aging_objects, + nb_queue, strict_queue); + if (ret < 0) + goto err; } ret = flow_hw_create_vlan(dev); if (ret) { @@ -12914,7 +12959,78 @@ static int flow_hw_register_matcher(struct rte_eth_dev *dev, } } -static int flow_hw_apply(struct rte_eth_dev *dev __rte_unused, /* TODO: remove if not used */ +static int flow_hw_allocate_actions(struct rte_eth_dev *dev, + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + bool actions_end = false; + struct mlx5_priv *priv = dev->data->dev_private; + int ret; + + for (; !actions_end; actions++) { + switch ((int)actions->type) { + case RTE_FLOW_ACTION_TYPE_AGE: + /* If no age objects were previously allocated. */ + if (!priv->hws_age_req) { + /* If no counters were previously allocated. */ + if (!priv->hws_cpool) { + ret = mlx5_hws_cnt_pool_create(dev, MLX5_CNT_MAX, + priv->nb_queue, NULL); + if (ret) + goto err; + } + if (priv->hws_cpool) { + /* Allocate same number of counters. */ + ret = mlx5_hws_age_pool_init(dev, + priv->hws_cpool->cfg.request_num, + priv->nb_queue, false); + if (ret) + goto err; + } + } + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + /* If no counters were previously allocated. */ + if (!priv->hws_cpool) { + ret = mlx5_hws_cnt_pool_create(dev, MLX5_CNT_MAX, + priv->nb_queue, NULL); + if (ret) + goto err; + } + break; + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + /* If no CT were previously allocated. */ + if (!priv->hws_ctpool) { + ret = mlx5_flow_ct_init(dev, MLX5_CT_NT_MAX, priv->nb_queue); + if (ret) + goto err; + } + break; + case RTE_FLOW_ACTION_TYPE_METER_MARK: + /* If no meters were previously allocated. */ + if (!priv->hws_mpool) { + ret = mlx5_flow_meter_init(dev, MLX5_MTR_NT_MAX, 0, 0, + priv->nb_queue); + if (ret) + goto err; + } + break; + case RTE_FLOW_ACTION_TYPE_END: + actions_end = true; + break; + default: + break; + } + } + return 0; +err: + return rte_flow_error_set(error, ret, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "fail to allocate actions"); +} + +/* TODO: remove dev if not used */ +static int flow_hw_apply(struct rte_eth_dev *dev __rte_unused, const struct rte_flow_item items[], struct mlx5dr_rule_action rule_actions[], struct rte_flow_hw *flow, @@ -13024,11 +13140,24 @@ static int flow_hw_create_flow(struct rte_eth_dev *dev, if (ret) goto error; + /* + * ASO allocation – iterating on actions list to allocate missing resources. + * In the future when validate function in hws will be added, + * The output actions bit mask instead of + * looping on the actions array twice. + */ + ret = flow_hw_allocate_actions(dev, actions, error); + /* Note: the actions should be saved in the sub-flow rule itself for reference. */ ret = flow_hw_translate_actions(dev, attr, actions, *flow, &hw_act, external, error); if (ret) goto error; + /* + * TODO: check regarding release: CT index is not saved per rule, + * the index is in the conf of given action. + */ + /* * If the flow is external (from application) OR device is started, * OR mreg discover, then apply immediately. diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c index 1b625e07bd..36d422bdfa 100644 --- a/drivers/net/mlx5/mlx5_hws_cnt.c +++ b/drivers/net/mlx5/mlx5_hws_cnt.c @@ -443,7 +443,7 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, (uint32_t)cnt_num, SOCKET_ID_ANY, RING_F_MP_HTS_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ); if (cntp->wait_reset_list == NULL) { - DRV_LOG(ERR, "failed to create free list ring"); + DRV_LOG(ERR, "failed to create wait reset list ring"); goto error; } snprintf(mz_name, sizeof(mz_name), "%s_U_RING", pcfg->name); @@ -631,16 +631,17 @@ mlx5_hws_cnt_pool_action_create(struct mlx5_priv *priv, return ret; } -struct mlx5_hws_cnt_pool * +int mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, - const struct rte_flow_port_attr *pattr, uint16_t nb_queue) + uint32_t nb_counters, uint16_t nb_queue, + struct mlx5_hws_cnt_pool *chost) { struct mlx5_hws_cnt_pool *cpool = NULL; struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hws_cache_param cparam = {0}; struct mlx5_hws_cnt_pool_cfg pcfg = {0}; char *mp_name; - int ret = 0; + int ret = -1; size_t sz; mp_name = mlx5_malloc(MLX5_MEM_ZERO, RTE_MEMZONE_NAMESIZE, 0, SOCKET_ID_ANY); @@ -648,13 +649,9 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, goto error; snprintf(mp_name, RTE_MEMZONE_NAMESIZE, "MLX5_HWS_CNT_P_%x", dev->data->port_id); pcfg.name = mp_name; - pcfg.request_num = pattr->nb_counters; + pcfg.request_num = nb_counters; pcfg.alloc_factor = HWS_CNT_ALLOC_FACTOR_DEFAULT; - if (pattr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) { - struct mlx5_priv *host_priv = - priv->shared_host->data->dev_private; - struct mlx5_hws_cnt_pool *chost = host_priv->hws_cpool; - + if (chost) { pcfg.host_cpool = chost; cpool = mlx5_hws_cnt_pool_init(priv->sh, &pcfg, &cparam); if (cpool == NULL) @@ -662,13 +659,13 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, ret = mlx5_hws_cnt_pool_action_create(priv, cpool); if (ret != 0) goto error; - return cpool; + goto success; } /* init cnt service if not. */ if (priv->sh->cnt_svc == NULL) { ret = mlx5_hws_cnt_svc_init(priv->sh); - if (ret != 0) - return NULL; + if (ret) + return ret; } cparam.fetch_sz = HWS_CNT_CACHE_FETCH_DEFAULT; cparam.preload_sz = HWS_CNT_CACHE_PRELOAD_DEFAULT; @@ -701,10 +698,13 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, rte_spinlock_lock(&priv->sh->cpool_lock); LIST_INSERT_HEAD(&priv->sh->hws_cpool_list, cpool, next); rte_spinlock_unlock(&priv->sh->cpool_lock); - return cpool; +success: + priv->hws_cpool = cpool; + return 0; error: mlx5_hws_cnt_pool_destroy(priv->sh, cpool); - return NULL; + priv->hws_cpool = NULL; + return ret; } void @@ -1217,8 +1217,9 @@ mlx5_hws_age_info_destroy(struct mlx5_priv *priv) */ int mlx5_hws_age_pool_init(struct rte_eth_dev *dev, - const struct rte_flow_port_attr *attr, - uint16_t nb_queues) + uint32_t nb_aging_objects, + uint16_t nb_queues, + bool strict_queue) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv); @@ -1233,28 +1234,20 @@ mlx5_hws_age_pool_init(struct rte_eth_dev *dev, .free = mlx5_free, .type = "mlx5_hws_age_pool", }; - bool strict_queue = false; uint32_t nb_alloc_cnts; uint32_t rsize; uint32_t nb_ages_updated; int ret; - strict_queue = !!(attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE); MLX5_ASSERT(priv->hws_cpool); - if (attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) { - DRV_LOG(ERR, "Aging sn not supported " - "in cross vHCA sharing mode"); - rte_errno = ENOTSUP; - return -ENOTSUP; - } nb_alloc_cnts = mlx5_hws_cnt_pool_get_size(priv->hws_cpool); if (strict_queue) { rsize = mlx5_hws_aged_out_q_ring_size_get(nb_alloc_cnts, nb_queues); - nb_ages_updated = rsize * nb_queues + attr->nb_aging_objects; + nb_ages_updated = rsize * nb_queues + nb_aging_objects; } else { rsize = mlx5_hws_aged_out_ring_size_get(nb_alloc_cnts); - nb_ages_updated = rsize + attr->nb_aging_objects; + nb_ages_updated = rsize + nb_aging_objects; } ret = mlx5_hws_age_info_init(dev, nb_queues, strict_queue, rsize); if (ret < 0) diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h index db4e99e37c..996ac8dd9a 100644 --- a/drivers/net/mlx5/mlx5_hws_cnt.h +++ b/drivers/net/mlx5/mlx5_hws_cnt.h @@ -712,9 +712,10 @@ mlx5_hws_cnt_service_thread_create(struct mlx5_dev_ctx_shared *sh); void mlx5_hws_cnt_service_thread_destroy(struct mlx5_dev_ctx_shared *sh); -struct mlx5_hws_cnt_pool * +int mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, - const struct rte_flow_port_attr *pattr, uint16_t nb_queue); + uint32_t nb_counters, uint16_t nb_queue, + struct mlx5_hws_cnt_pool *chost); void mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh, @@ -744,8 +745,9 @@ mlx5_hws_age_context_get(struct mlx5_priv *priv, uint32_t idx); int mlx5_hws_age_pool_init(struct rte_eth_dev *dev, - const struct rte_flow_port_attr *attr, - uint16_t nb_queues); + uint32_t nb_aging_objects, + uint16_t nb_queues, + bool strict_queue); void mlx5_hws_age_pool_destroy(struct mlx5_priv *priv); -- 2.21.0