From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wm0-f50.google.com (mail-wm0-f50.google.com [74.125.82.50]) by dpdk.org (Postfix) with ESMTP id 0A30A9B8D for ; Wed, 2 Aug 2017 16:11:01 +0200 (CEST) Received: by mail-wm0-f50.google.com with SMTP id t138so41148938wmt.1 for ; Wed, 02 Aug 2017 07:11:01 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=6wind-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references :in-reply-to:references; bh=6BAHEUPu86f6tzYRacboGeQHvmf8fL2hxEukC/oJRsk=; b=twLgp/g9LXmc2SKhZmm59BIXeVL5b9q3FMljjTEORuQe1reolzXcQ/i7+g5YCLxuZ9 ok1xpytCgs8ORfN3VMuhFyVCR51bJQFLkaQWZFoC7vCebwteTrCehuY4z8a2k/Cfhqj3 dehbZRcr0m79kvBiXT9QJmhCg2onr2+0D9io6CvsiVsEyA6o7zFaqc023ZXJG6iqKtPo zcEn5YJ3Dr30423gcwzdPD00Hf4QGx6WtpejGodcGS+UtWM16IXWTkAiEWJPEp7XSSir aw/hT1EJvVl2i4qOGD9kbU5xKygkAA/40ST+vS5/64GCm4gcE/gQ9vdK/2qvqfWLHBki UgAg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:in-reply-to:references; bh=6BAHEUPu86f6tzYRacboGeQHvmf8fL2hxEukC/oJRsk=; b=WN4vXHrQjGg6xJAM5ha77G8PPN6WvCx5fxC9ngUbAgX28eYmiSloRtkRoqL+zqroqI VI2EKGOb8NxfIRmVVzObVuE0DT/68/D0OQwwVWX9SoRLmegui5NpOm1bQpFKjvlgqj0G l+5ojk1p1qc4AIzjXIn5ATNo+CwMnr0r09c2SX3gj2YFDaCrfRCozQsolJ8dEf6gG1au 8vPHTLJ+uMPho89aGY+nACOdKS+7GH5udDj1S+lagph3u2OLsfqW2jXZP9T5Sx26pCJ5 NniA3lVrMQh8fzelaNW6NJkw5O9aGf//2oM9GkNjLlpPvUTsV0h90MMf0Z0PRgOeAMFa 1ITg== X-Gm-Message-State: AIVw111xDqufmuUZQZ2/WoCmh+icscax33fdhlM+cylFn3PcOuHcVj6J YRiAsSZ8qK3QZnZuF8OSLg== X-Received: by 10.28.93.74 with SMTP id r71mr4177161wmb.126.1501683059687; Wed, 02 Aug 2017 07:10:59 -0700 (PDT) Received: from ping.dev.6wind.com (host.78.145.23.62.rev.coltfrance.com. [62.23.145.78]) by smtp.gmail.com with ESMTPSA id d53sm39449552wrd.81.2017.08.02.07.10.58 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Wed, 02 Aug 2017 07:10:58 -0700 (PDT) From: Nelio Laranjeiro To: dev@dpdk.org Cc: adrien.mazarguil@6wind.com Date: Wed, 2 Aug 2017 16:10:16 +0200 Message-Id: <625f46a92fc14c1aa52ccaf1ebbcbf9f488f4122.1501681927.git.nelio.laranjeiro@6wind.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: References: In-Reply-To: References: Subject: [dpdk-dev] [PATCH v1 01/21] net/mlx5: merge action and flow parser structure X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 02 Aug 2017 14:11:02 -0000 mlx5_flow_create() and mlx5_flow_validate() are making common checks. Signed-off-by: Nelio Laranjeiro --- drivers/net/mlx5/mlx5_flow.c | 151 +++++++++++++++++++++---------------------- 1 file changed, 72 insertions(+), 79 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 7dd3ebb..00355f4 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -271,12 +271,23 @@ static const struct mlx5_flow_items mlx5_flow_items[] = { }, }; +/* Structure to parse actions. */ +struct mlx5_flow_action { + uint32_t queue:1; /**< Target is a receive queue. */ + uint32_t drop:1; /**< Target is a drop queue. */ + uint32_t mark:1; /**< Mark is present in the flow. */ + uint32_t mark_id; /**< Mark identifier. */ + uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */ + uint16_t queues_n; /**< Number of entries in queue[]. */ +}; + /** Structure to pass to the conversion function. */ -struct mlx5_flow { +struct mlx5_flow_parse { struct ibv_exp_flow_attr *ibv_attr; /**< Verbs attribute. */ unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */ uint32_t inner; /**< Set once VXLAN is encountered. */ uint64_t hash_fields; /**< Fields that participate in the hash. */ + struct mlx5_flow_action actions; /**< Parsed action result. */ }; /** Structure for Drop queue. */ @@ -287,15 +298,6 @@ struct rte_flow_drop { struct ibv_cq *cq; /**< Verbs completion queue. */ }; -struct mlx5_flow_action { - uint32_t queue:1; /**< Target is a receive queue. */ - uint32_t drop:1; /**< Target is a drop queue. */ - uint32_t mark:1; /**< Mark is present in the flow. */ - uint32_t mark_id; /**< Mark identifier. */ - uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */ - uint16_t queues_n; /**< Number of entries in queue[]. */ -}; - /** * Check support for a given item. * @@ -374,8 +376,6 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, * Perform verbose error reporting if not NULL. * @param[in, out] flow * Flow structure to update. - * @param[in, out] action - * Action structure to update. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. @@ -386,8 +386,7 @@ priv_flow_validate(struct priv *priv, const struct rte_flow_item items[], const struct rte_flow_action actions[], struct rte_flow_error *error, - struct mlx5_flow *flow, - struct mlx5_flow_action *action) + struct mlx5_flow_parse *flow) { const struct mlx5_flow_items *cur_item = mlx5_flow_items; @@ -469,7 +468,7 @@ priv_flow_validate(struct priv *priv, if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) { continue; } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) { - action->drop = 1; + flow->actions.drop = 1; } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) { const struct rte_flow_action_queue *queue = (const struct rte_flow_action_queue *) @@ -479,13 +478,13 @@ priv_flow_validate(struct priv *priv, if (!queue || (queue->index > (priv->rxqs_n - 1))) goto exit_action_not_supported; - for (n = 0; n < action->queues_n; ++n) { - if (action->queues[n] == queue->index) { + for (n = 0; n < flow->actions.queues_n; ++n) { + if (flow->actions.queues[n] == queue->index) { found = 1; break; } } - if (action->queues_n > 1 && !found) { + if (flow->actions.queues_n > 1 && !found) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, @@ -493,9 +492,9 @@ priv_flow_validate(struct priv *priv, return -rte_errno; } if (!found) { - action->queue = 1; - action->queues_n = 1; - action->queues[0] = queue->index; + flow->actions.queue = 1; + flow->actions.queues_n = 1; + flow->actions.queues[0] = queue->index; } } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) { const struct rte_flow_action_rss *rss = @@ -510,12 +509,12 @@ priv_flow_validate(struct priv *priv, "no valid queues"); return -rte_errno; } - if (action->queues_n == 1) { + if (flow->actions.queues_n == 1) { uint16_t found = 0; - assert(action->queues_n); + assert(flow->actions.queues_n); for (n = 0; n < rss->num; ++n) { - if (action->queues[0] == + if (flow->actions.queues[0] == rss->queue[n]) { found = 1; break; @@ -540,10 +539,10 @@ priv_flow_validate(struct priv *priv, return -rte_errno; } } - action->queue = 1; + flow->actions.queue = 1; for (n = 0; n < rss->num; ++n) - action->queues[n] = rss->queue[n]; - action->queues_n = rss->num; + flow->actions.queues[n] = rss->queue[n]; + flow->actions.queues_n = rss->num; } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) { const struct rte_flow_action_mark *mark = (const struct rte_flow_action_mark *) @@ -563,19 +562,19 @@ priv_flow_validate(struct priv *priv, " and 16777199"); return -rte_errno; } - action->mark = 1; - action->mark_id = mark->id; + flow->actions.mark = 1; + flow->actions.mark_id = mark->id; } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) { - action->mark = 1; + flow->actions.mark = 1; } else { goto exit_action_not_supported; } } - if (action->mark && !flow->ibv_attr && !action->drop) + if (flow->actions.mark && !flow->ibv_attr && !flow->actions.drop) flow->offset += sizeof(struct ibv_exp_flow_spec_action_tag); - if (!flow->ibv_attr && action->drop) + if (!flow->ibv_attr && flow->actions.drop) flow->offset += sizeof(struct ibv_exp_flow_spec_action_drop); - if (!action->queue && !action->drop) { + if (!flow->actions.queue && !flow->actions.drop) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "no valid action"); return -rte_errno; @@ -606,18 +605,16 @@ mlx5_flow_validate(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; int ret; - struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr) }; - struct mlx5_flow_action action = { - .queue = 0, - .drop = 0, - .mark = 0, - .mark_id = MLX5_FLOW_MARK_DEFAULT, - .queues_n = 0, + struct mlx5_flow_parse flow = { + .offset = sizeof(struct ibv_exp_flow_attr), + .actions = { + .mark_id = MLX5_FLOW_MARK_DEFAULT, + .queues_n = 0, + }, }; priv_lock(priv); - ret = priv_flow_validate(priv, attr, items, actions, error, &flow, - &action); + ret = priv_flow_validate(priv, attr, items, actions, error, &flow); priv_unlock(priv); return ret; } @@ -639,7 +636,7 @@ mlx5_flow_create_eth(const struct rte_flow_item *item, { const struct rte_flow_item_eth *spec = item->spec; const struct rte_flow_item_eth *mask = item->mask; - struct mlx5_flow *flow = (struct mlx5_flow *)data; + struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data; struct ibv_exp_flow_spec_eth *eth; const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth); unsigned int i; @@ -688,7 +685,7 @@ mlx5_flow_create_vlan(const struct rte_flow_item *item, { const struct rte_flow_item_vlan *spec = item->spec; const struct rte_flow_item_vlan *mask = item->mask; - struct mlx5_flow *flow = (struct mlx5_flow *)data; + struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data; struct ibv_exp_flow_spec_eth *eth; const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth); @@ -720,7 +717,7 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item, { const struct rte_flow_item_ipv4 *spec = item->spec; const struct rte_flow_item_ipv4 *mask = item->mask; - struct mlx5_flow *flow = (struct mlx5_flow *)data; + struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data; struct ibv_exp_flow_spec_ipv4_ext *ipv4; unsigned int ipv4_size = sizeof(struct ibv_exp_flow_spec_ipv4_ext); @@ -774,7 +771,7 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item, { const struct rte_flow_item_ipv6 *spec = item->spec; const struct rte_flow_item_ipv6 *mask = item->mask; - struct mlx5_flow *flow = (struct mlx5_flow *)data; + struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data; struct ibv_exp_flow_spec_ipv6_ext *ipv6; unsigned int ipv6_size = sizeof(struct ibv_exp_flow_spec_ipv6_ext); unsigned int i; @@ -831,7 +828,7 @@ mlx5_flow_create_udp(const struct rte_flow_item *item, { const struct rte_flow_item_udp *spec = item->spec; const struct rte_flow_item_udp *mask = item->mask; - struct mlx5_flow *flow = (struct mlx5_flow *)data; + struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data; struct ibv_exp_flow_spec_tcp_udp *udp; unsigned int udp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp); @@ -875,7 +872,7 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item, { const struct rte_flow_item_tcp *spec = item->spec; const struct rte_flow_item_tcp *mask = item->mask; - struct mlx5_flow *flow = (struct mlx5_flow *)data; + struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data; struct ibv_exp_flow_spec_tcp_udp *tcp; unsigned int tcp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp); @@ -919,7 +916,7 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item, { const struct rte_flow_item_vxlan *spec = item->spec; const struct rte_flow_item_vxlan *mask = item->mask; - struct mlx5_flow *flow = (struct mlx5_flow *)data; + struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data; struct ibv_exp_flow_spec_tunnel *vxlan; unsigned int size = sizeof(struct ibv_exp_flow_spec_tunnel); union vni { @@ -958,7 +955,7 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item, * Mark identifier. */ static int -mlx5_flow_create_flag_mark(struct mlx5_flow *flow, uint32_t mark_id) +mlx5_flow_create_flag_mark(struct mlx5_flow_parse *flow, uint32_t mark_id) { struct ibv_exp_flow_spec_action_tag *tag; unsigned int size = sizeof(struct ibv_exp_flow_spec_action_tag); @@ -988,7 +985,7 @@ mlx5_flow_create_flag_mark(struct mlx5_flow *flow, uint32_t mark_id) */ static struct rte_flow * priv_flow_create_action_queue_drop(struct priv *priv, - struct mlx5_flow *flow, + struct mlx5_flow_parse *flow, struct rte_flow_error *error) { struct rte_flow *rte_flow; @@ -1036,8 +1033,6 @@ priv_flow_create_action_queue_drop(struct priv *priv, * Pointer to private structure. * @param flow * MLX5 flow attributes (filled by mlx5_flow_validate()). - * @param action - * Target action structure. * @param[out] error * Perform verbose error reporting if not NULL. * @@ -1046,50 +1041,50 @@ priv_flow_create_action_queue_drop(struct priv *priv, */ static struct rte_flow * priv_flow_create_action_queue(struct priv *priv, - struct mlx5_flow *flow, - struct mlx5_flow_action *action, + struct mlx5_flow_parse *flow, struct rte_flow_error *error) { struct rte_flow *rte_flow; unsigned int i; unsigned int j; - const unsigned int wqs_n = 1 << log2above(action->queues_n); + const unsigned int wqs_n = 1 << log2above(flow->actions.queues_n); struct ibv_exp_wq *wqs[wqs_n]; assert(priv->pd); assert(priv->ctx); - assert(!action->drop); + assert(!flow->actions.drop); rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow) + - sizeof(*rte_flow->rxqs) * action->queues_n, 0); + sizeof(*rte_flow->rxqs) * flow->actions.queues_n, + 0); if (!rte_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "cannot allocate flow memory"); return NULL; } - for (i = 0; i < action->queues_n; ++i) { + for (i = 0; i < flow->actions.queues_n; ++i) { struct rxq_ctrl *rxq; - rxq = container_of((*priv->rxqs)[action->queues[i]], + rxq = container_of((*priv->rxqs)[flow->actions.queues[i]], struct rxq_ctrl, rxq); wqs[i] = rxq->wq; rte_flow->rxqs[i] = &rxq->rxq; ++rte_flow->rxqs_n; - rxq->rxq.mark |= action->mark; + rxq->rxq.mark |= flow->actions.mark; } /* finalise indirection table. */ for (j = 0; i < wqs_n; ++i, ++j) { wqs[i] = wqs[j]; - if (j == action->queues_n) + if (j == flow->actions.queues_n) j = 0; } - rte_flow->mark = action->mark; + rte_flow->mark = flow->actions.mark; rte_flow->ibv_attr = flow->ibv_attr; rte_flow->hash_fields = flow->hash_fields; rte_flow->ind_table = ibv_exp_create_rwq_ind_table( priv->ctx, &(struct ibv_exp_rwq_ind_table_init_attr){ .pd = priv->pd, - .log_ind_tbl_size = log2above(action->queues_n), + .log_ind_tbl_size = log2above(flow->actions.queues_n), .ind_tbl = wqs, .comp_mask = 0, }); @@ -1167,18 +1162,17 @@ priv_flow_create(struct priv *priv, struct rte_flow_error *error) { struct rte_flow *rte_flow; - struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr), }; - struct mlx5_flow_action action = { - .queue = 0, - .drop = 0, - .mark = 0, - .mark_id = MLX5_FLOW_MARK_DEFAULT, - .queues_n = 0, + struct mlx5_flow_parse flow = { + .offset = sizeof(struct ibv_exp_flow_attr), + .actions = { + .mark_id = MLX5_FLOW_MARK_DEFAULT, + .queues = { 0 }, + .queues_n = 0, + }, }; int err; - err = priv_flow_validate(priv, attr, items, actions, error, &flow, - &action); + err = priv_flow_validate(priv, attr, items, actions, error, &flow); if (err) goto exit; flow.ibv_attr = rte_malloc(__func__, flow.offset, 0); @@ -1200,17 +1194,16 @@ priv_flow_create(struct priv *priv, flow.inner = 0; flow.hash_fields = 0; claim_zero(priv_flow_validate(priv, attr, items, actions, - error, &flow, &action)); - if (action.mark && !action.drop) { - mlx5_flow_create_flag_mark(&flow, action.mark_id); + error, &flow)); + if (flow.actions.mark && !flow.actions.drop) { + mlx5_flow_create_flag_mark(&flow, flow.actions.mark_id); flow.offset += sizeof(struct ibv_exp_flow_spec_action_tag); } - if (action.drop) + if (flow.actions.drop) rte_flow = priv_flow_create_action_queue_drop(priv, &flow, error); else - rte_flow = priv_flow_create_action_queue(priv, &flow, &action, - error); + rte_flow = priv_flow_create_action_queue(priv, &flow, error); if (!rte_flow) goto exit; return rte_flow; -- 2.1.4