Test-Label: Intel-compilation Test-Status: FAILURE http://dpdk.org/patch/40466 _apply issues_ Submitter: Nlio Laranjeiro Date: 2018-05-28 11:21:34 DPDK git baseline: Repo:dpdk-master, CommitID: 830410b2657d2af0f5a4422139d715c04fc54419 Repo:dpdk-next-eventdev, CommitID: 3c955492b601d65402c02ac81dfdc7eeb62f23c9 Repo:dpdk-next-net, CommitID: c94cd308f343568d512f55b899b7685da0096fb2 Repo:dpdk-next-crypto, CommitID: 830410b2657d2af0f5a4422139d715c04fc54419 Repo:dpdk-next-virtio, CommitID: e282c7ebd5c70350346b3e13b32d70a2b4541d5b *Repo: dpdk-master Checking patch drivers/net/mlx5/mlx5_flow.c... error: while searching for: #include "mlx5_prm.h" #include "mlx5_glue.h" /* Flow priority for control plane flows. */ #define MLX5_CTRL_FLOW_PRIORITY 1 -- /** Structure give to the conversion functions. */ struct mlx5_flow_data { struct rte_eth_dev *dev; /** Ethernet device. */ struct mlx5_flow_parse *parser; /** Parser context. */ struct rte_flow_error *error; /** Error context. */ }; static int mlx5_flow_create_eth(const struct rte_flow_item *item, const void *default_mask, -- const uint8_t *spec = item->spec; const uint8_t *last = item->last; const uint8_t *m = item->mask ? item->mask : mask; if (!spec && (item->mask || last)) goto error; if (!spec) return 0; /* * Single-pass check to make sure that: * - item->mask is supported, no bits are set outside mask. -- */ for (i = 0; i < size; i++) { if (!m[i]) continue; if ((m[i] | mask[i]) != mask[i]) goto error; if (last && ((spec[i] & m[i]) != (last[i] & m[i]))) goto error; } return 0; error: rte_errno = ENOTSUP; return -rte_errno; } /** * Extract attribute to the parser. * * @param[in] attr * Flow rule attributes. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_attributes(const struct rte_flow_attr *attr, struct rte_flow_error *error) { if (attr->group) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, "groups are not supported"); return -rte_errno; } if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL, "priorities are not supported"); return -rte_errno; } if (attr->egress) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, "egress is not supported"); return -rte_errno; } if (attr->transfer) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL, "transfer is not supported"); return -rte_errno; } if (!attr->ingress) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL, "only ingress is supported"); return -rte_errno; } -- * * @param dev * Pointer to Ethernet device. * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] error * Perform verbose error reporting if not NULL. * @param[in, out] parser * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_actions(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { enum { FATE = 1, MARK = 2, COUNT = 4, }; uint32_t overlap = 0; struct priv *priv = dev->data->dev_private; -- if (overlap & FATE) goto exit_action_overlap; overlap |= FATE; if (rss->func && rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "the only supported RSS hash" " function is Toeplitz"); return -rte_errno; } #ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT if (parser->rss_conf.level > 1) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "a nonzero RSS encapsulation" " level is not supported"); return -rte_errno; } #endif if (parser->rss_conf.level > 2) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "RSS encapsulation level" " > 1 is not supported"); return -rte_errno; } if (rss->types & MLX5_RSS_HF_MASK) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "unsupported RSS type" " requested"); return -rte_errno; -- } else { rss_key_len = rss_hash_default_key_len; rss_key = rss_hash_default_key; } if (rss_key_len != RTE_DIM(parser->rss_key)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "RSS hash key must be" " exactly 40 bytes long"); return -rte_errno; } if (!rss->queue_num) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "no valid queues"); return -rte_errno; } if (rss->queue_num > RTE_DIM(parser->queues)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "too many queues for RSS" " context"); return -rte_errno; } for (n = 0; n < rss->queue_num; ++n) { if (rss->queue[n] >= priv->rxqs_n) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "queue id > number of" " queues"); return -rte_errno; -- if (overlap & MARK) goto exit_action_overlap; overlap |= MARK; if (!mark) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "mark must be defined"); return -rte_errno; } else if (mark->id >= MLX5_FLOW_MARK_MAX) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "mark must be between 0" " and 16777199"); return -rte_errno; -- if (!(overlap & FATE)) parser->drop = 1; if (parser->drop && parser->mark) parser->mark = 0; if (!parser->rss_conf.queue_num && !parser->drop) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "no valid action"); return -rte_errno; } return 0; exit_action_not_supported: rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "action not supported"); return -rte_errno; exit_action_overlap: rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "overlapping actions are not supported"); return -rte_errno; } /** * Validate items. * * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[out] error * Perform verbose error reporting if not NULL. * @param[in, out] parser * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_items_validate(struct rte_eth_dev *dev, const struct rte_flow_item items[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { struct priv *priv = dev->data->dev_private; const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; -- cur_item->mask_sz); if (ret) goto exit_item_not_supported; if (IS_TUNNEL(items->type)) { if (parser->tunnel) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, items, "Cannot recognize multiple" " tunnel encapsulations."); return -rte_errno; } if (!priv->config.tunnel_en && parser->rss_conf.level > 1) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, items, "RSS on tunnel is not supported"); return -rte_errno; } -- for (i = 0; i != hash_rxq_init_n; ++i) parser->queue[i].offset += size; } return 0; exit_item_not_supported: return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM, items, "item not supported"); } /** * Allocate memory space to store verbs flow attributes. * * @param[in] size * Amount of byte to allocate. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * A verbs flow attribute on success, NULL otherwise and rte_errno is set. */ static struct ibv_flow_attr * mlx5_flow_convert_allocate(unsigned int size, struct rte_flow_error *error) { struct ibv_flow_attr *ibv_attr; ibv_attr = rte_calloc(__func__, 1, size, 0); if (!ibv_attr) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate verbs spec attributes"); return NULL; } -- * Flow rule attributes. * @param[in] pattern * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] error * Perform verbose error reporting if not NULL. * @param[in, out] parser * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. -- static int mlx5_flow_convert(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; int ret; -- *parser = (struct mlx5_flow_parse){ .create = parser->create, .layer = HASH_RXQ_ETH, .mark_id = MLX5_FLOW_MARK_DEFAULT, }; ret = mlx5_flow_convert_attributes(attr, error); if (ret) return ret; ret = mlx5_flow_convert_actions(dev, actions, error, parser); if (ret) return ret; ret = mlx5_flow_convert_items_validate(dev, items, error, parser); if (ret) return ret; mlx5_flow_convert_finalise(parser); /* * Second step. -- */ if (parser->drop) { unsigned int offset = parser->queue[HASH_RXQ_ETH].offset; parser->queue[HASH_RXQ_ETH].ibv_attr = mlx5_flow_convert_allocate(offset, error); if (!parser->queue[HASH_RXQ_ETH].ibv_attr) goto exit_enomem; parser->queue[HASH_RXQ_ETH].offset = sizeof(struct ibv_flow_attr); } else { for (i = 0; i != hash_rxq_init_n; ++i) { unsigned int offset; offset = parser->queue[i].offset; parser->queue[i].ibv_attr = mlx5_flow_convert_allocate(offset, error); if (!parser->queue[i].ibv_attr) goto exit_enomem; parser->queue[i].offset = sizeof(struct ibv_flow_attr); } } -- parser->layer = HASH_RXQ_ETH; for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { struct mlx5_flow_data data = { .dev = dev, .parser = parser, .error = error, }; if (items->type == RTE_FLOW_ITEM_TYPE_VOID) continue; cur_item = &mlx5_flow_items[items->type]; -- if (parser->mark) mlx5_flow_create_flag_mark(parser, parser->mark_id); if (parser->count && parser->create) { mlx5_flow_create_count(dev, parser); if (!parser->cs) goto exit_count_error; } exit_free: /* Only verification is expected, all resources should be released. */ if (!parser->create) { for (i = 0; i != hash_rxq_init_n; ++i) { -- if (parser->queue[i].ibv_attr) { rte_free(parser->queue[i].ibv_attr); parser->queue[i].ibv_attr = NULL; } } rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate verbs spec attributes"); return -rte_errno; exit_count_error: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create counter"); return -rte_errno; } /** -- /* * From verbs perspective an empty VLAN is equivalent * to a packet without VLAN layer. */ if (!eth->mask.vlan_tag) goto error; /* Outer TPID cannot be matched. */ if (eth->mask.ether_type) { msg = "VLAN TPID matching is not supported"; goto error; } eth->val.ether_type = spec->inner_type; eth->mask.ether_type = mask->inner_type; eth->val.ether_type &= eth->mask.ether_type; } return 0; } error: return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, msg); } /** * Convert IPv4 item to Verbs specification. -- }; if (parser->layer == HASH_RXQ_TUNNEL && parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] && !priv->config.l3_vxlan_en) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); -- }; if (parser->layer == HASH_RXQ_TUNNEL && parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] && !priv->config.l3_vxlan_en) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); -- * before will also match this rule. * To avoid such situation, VNI 0 is currently refused. */ /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */ if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VxLAN vni cannot be 0"); mlx5_flow_create_copy(parser, &vxlan, size); return 0; -- uint32_t vlan_id; uint8_t vni[4]; } id; if (!priv->config.l3_vxlan_en) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); -- memcpy(&id.vni[1], spec->vni, 3); vxlan.val.tunnel_id = id.vlan_id; memcpy(&id.vni[1], mask->vni, 3); vxlan.mask.tunnel_id = id.vlan_id; if (spec->protocol) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VxLAN-GPE protocol not" " supported"); /* Remove unwanted bits from values. */ -- * before will also match this rule. * To avoid such situation, VNI 0 is currently refused. */ /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */ if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VxLAN-GPE vni cannot be 0"); mlx5_flow_create_copy(parser, &vxlan, size); return 0; -- ipv6->val.next_hdr = MLX5_GRE; ipv6->mask.next_hdr = 0xff; } } if (i != hash_rxq_init_n) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "IP protocol of GRE must be 47"); mlx5_flow_create_copy(parser, &tunnel, size); return 0; -- * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow * Pointer to the rte_flow. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { struct priv *priv = dev->data->dev_private; struct ibv_flow_spec_action_drop *drop; unsigned int size = sizeof(struct ibv_flow_spec_action_drop); -- parser->queue[HASH_RXQ_ETH].ibv_attr = NULL; flow->frxq[HASH_RXQ_ETH].ibv_flow = mlx5_glue->create_flow(priv->flow_drop_queue->qp, flow->frxq[HASH_RXQ_ETH].ibv_attr); if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); goto error; } return 0; error: assert(flow); if (flow->frxq[HASH_RXQ_ETH].ibv_flow) { claim_zero(mlx5_glue->destroy_flow (flow->frxq[HASH_RXQ_ETH].ibv_flow)); flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL; -- * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow * Pointer to the rte_flow. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { unsigned int i; for (i = 0; i != hash_rxq_init_n; ++i) { if (!parser->queue[i].ibv_attr) -- parser->rss_conf.queue, parser->rss_conf.queue_num, parser->tunnel, parser->rss_conf.level); if (!flow->frxq[i].hrxq) { return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "cannot create hash rxq"); } } -- * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow * Pointer to the rte_flow. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { struct priv *priv __rte_unused = dev->data->dev_private; int ret; unsigned int i; unsigned int flows_n = 0; assert(priv->pd); assert(priv->ctx); assert(!parser->drop); ret = mlx5_flow_create_action_queue_rss(dev, parser, flow, error); if (ret) goto error; if (parser->count) flow->cs = parser->cs; if (!dev->data->dev_started) return 0; for (i = 0; i != hash_rxq_init_n; ++i) { -- flow->frxq[i].ibv_flow = mlx5_glue->create_flow(flow->frxq[i].hrxq->qp, flow->frxq[i].ibv_attr); mlx5_flow_dump(dev, flow, i); if (!flow->frxq[i].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); goto error; } ++flows_n; } if (!flows_n) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "internal error in flow creation"); goto error; } mlx5_flow_create_update_rxqs(dev, flow); return 0; error: ret = rte_errno; /* Save rte_errno before cleanup. */ assert(flow); for (i = 0; i != hash_rxq_init_n; ++i) { if (flow->frxq[i].ibv_flow) { struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow; -- } /** * Convert a flow. error: patch failed: drivers/net/mlx5/mlx5_flow.c:31 error: drivers/net/mlx5/mlx5_flow.c: patch does not apply *Repo: dpdk-next-eventdev Checking patch drivers/net/mlx5/mlx5_flow.c... error: while searching for: #include "mlx5_prm.h" #include "mlx5_glue.h" /* Flow priority for control plane flows. */ #define MLX5_CTRL_FLOW_PRIORITY 1 -- /** Structure give to the conversion functions. */ struct mlx5_flow_data { struct rte_eth_dev *dev; /** Ethernet device. */ struct mlx5_flow_parse *parser; /** Parser context. */ struct rte_flow_error *error; /** Error context. */ }; static int mlx5_flow_create_eth(const struct rte_flow_item *item, const void *default_mask, -- const uint8_t *spec = item->spec; const uint8_t *last = item->last; const uint8_t *m = item->mask ? item->mask : mask; if (!spec && (item->mask || last)) goto error; if (!spec) return 0; /* * Single-pass check to make sure that: * - item->mask is supported, no bits are set outside mask. -- */ for (i = 0; i < size; i++) { if (!m[i]) continue; if ((m[i] | mask[i]) != mask[i]) goto error; if (last && ((spec[i] & m[i]) != (last[i] & m[i]))) goto error; } return 0; error: rte_errno = ENOTSUP; return -rte_errno; } /** * Extract attribute to the parser. * * @param[in] attr * Flow rule attributes. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_attributes(const struct rte_flow_attr *attr, struct rte_flow_error *error) { if (attr->group) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, "groups are not supported"); return -rte_errno; } if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL, "priorities are not supported"); return -rte_errno; } if (attr->egress) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, "egress is not supported"); return -rte_errno; } if (attr->transfer) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL, "transfer is not supported"); return -rte_errno; } if (!attr->ingress) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL, "only ingress is supported"); return -rte_errno; } -- * * @param dev * Pointer to Ethernet device. * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] error * Perform verbose error reporting if not NULL. * @param[in, out] parser * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_actions(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { enum { FATE = 1, MARK = 2, COUNT = 4, }; uint32_t overlap = 0; struct priv *priv = dev->data->dev_private; -- if (overlap & FATE) goto exit_action_overlap; overlap |= FATE; if (rss->func && rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "the only supported RSS hash" " function is Toeplitz"); return -rte_errno; } #ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT if (parser->rss_conf.level > 1) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "a nonzero RSS encapsulation" " level is not supported"); return -rte_errno; } #endif if (parser->rss_conf.level > 2) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "RSS encapsulation level" " > 1 is not supported"); return -rte_errno; } if (rss->types & MLX5_RSS_HF_MASK) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "unsupported RSS type" " requested"); return -rte_errno; -- } else { rss_key_len = rss_hash_default_key_len; rss_key = rss_hash_default_key; } if (rss_key_len != RTE_DIM(parser->rss_key)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "RSS hash key must be" " exactly 40 bytes long"); return -rte_errno; } if (!rss->queue_num) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "no valid queues"); return -rte_errno; } if (rss->queue_num > RTE_DIM(parser->queues)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "too many queues for RSS" " context"); return -rte_errno; } for (n = 0; n < rss->queue_num; ++n) { if (rss->queue[n] >= priv->rxqs_n) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "queue id > number of" " queues"); return -rte_errno; -- if (overlap & MARK) goto exit_action_overlap; overlap |= MARK; if (!mark) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "mark must be defined"); return -rte_errno; } else if (mark->id >= MLX5_FLOW_MARK_MAX) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "mark must be between 0" " and 16777199"); return -rte_errno; -- if (!(overlap & FATE)) parser->drop = 1; if (parser->drop && parser->mark) parser->mark = 0; if (!parser->rss_conf.queue_num && !parser->drop) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "no valid action"); return -rte_errno; } return 0; exit_action_not_supported: rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "action not supported"); return -rte_errno; exit_action_overlap: rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "overlapping actions are not supported"); return -rte_errno; } /** * Validate items. * * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[out] error * Perform verbose error reporting if not NULL. * @param[in, out] parser * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_items_validate(struct rte_eth_dev *dev, const struct rte_flow_item items[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { struct priv *priv = dev->data->dev_private; const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; -- cur_item->mask_sz); if (ret) goto exit_item_not_supported; if (IS_TUNNEL(items->type)) { if (parser->tunnel) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, items, "Cannot recognize multiple" " tunnel encapsulations."); return -rte_errno; } if (!priv->config.tunnel_en && parser->rss_conf.level > 1) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, items, "RSS on tunnel is not supported"); return -rte_errno; } -- for (i = 0; i != hash_rxq_init_n; ++i) parser->queue[i].offset += size; } return 0; exit_item_not_supported: return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM, items, "item not supported"); } /** * Allocate memory space to store verbs flow attributes. * * @param[in] size * Amount of byte to allocate. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * A verbs flow attribute on success, NULL otherwise and rte_errno is set. */ static struct ibv_flow_attr * mlx5_flow_convert_allocate(unsigned int size, struct rte_flow_error *error) { struct ibv_flow_attr *ibv_attr; ibv_attr = rte_calloc(__func__, 1, size, 0); if (!ibv_attr) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate verbs spec attributes"); return NULL; } -- * Flow rule attributes. * @param[in] pattern * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] error * Perform verbose error reporting if not NULL. * @param[in, out] parser * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. -- static int mlx5_flow_convert(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; int ret; -- *parser = (struct mlx5_flow_parse){ .create = parser->create, .layer = HASH_RXQ_ETH, .mark_id = MLX5_FLOW_MARK_DEFAULT, }; ret = mlx5_flow_convert_attributes(attr, error); if (ret) return ret; ret = mlx5_flow_convert_actions(dev, actions, error, parser); if (ret) return ret; ret = mlx5_flow_convert_items_validate(dev, items, error, parser); if (ret) return ret; mlx5_flow_convert_finalise(parser); /* * Second step. -- */ if (parser->drop) { unsigned int offset = parser->queue[HASH_RXQ_ETH].offset; parser->queue[HASH_RXQ_ETH].ibv_attr = mlx5_flow_convert_allocate(offset, error); if (!parser->queue[HASH_RXQ_ETH].ibv_attr) goto exit_enomem; parser->queue[HASH_RXQ_ETH].offset = sizeof(struct ibv_flow_attr); } else { for (i = 0; i != hash_rxq_init_n; ++i) { unsigned int offset; offset = parser->queue[i].offset; parser->queue[i].ibv_attr = mlx5_flow_convert_allocate(offset, error); if (!parser->queue[i].ibv_attr) goto exit_enomem; parser->queue[i].offset = sizeof(struct ibv_flow_attr); } } -- parser->layer = HASH_RXQ_ETH; for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { struct mlx5_flow_data data = { .dev = dev, .parser = parser, .error = error, }; if (items->type == RTE_FLOW_ITEM_TYPE_VOID) continue; cur_item = &mlx5_flow_items[items->type]; -- if (parser->mark) mlx5_flow_create_flag_mark(parser, parser->mark_id); if (parser->count && parser->create) { mlx5_flow_create_count(dev, parser); if (!parser->cs) goto exit_count_error; } exit_free: /* Only verification is expected, all resources should be released. */ if (!parser->create) { for (i = 0; i != hash_rxq_init_n; ++i) { -- if (parser->queue[i].ibv_attr) { rte_free(parser->queue[i].ibv_attr); parser->queue[i].ibv_attr = NULL; } } rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate verbs spec attributes"); return -rte_errno; exit_count_error: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create counter"); return -rte_errno; } /** -- /* * From verbs perspective an empty VLAN is equivalent * to a packet without VLAN layer. */ if (!eth->mask.vlan_tag) goto error; /* Outer TPID cannot be matched. */ if (eth->mask.ether_type) { msg = "VLAN TPID matching is not supported"; goto error; } eth->val.ether_type = spec->inner_type; eth->mask.ether_type = mask->inner_type; eth->val.ether_type &= eth->mask.ether_type; } return 0; } error: return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, msg); } /** * Convert IPv4 item to Verbs specification. -- }; if (parser->layer == HASH_RXQ_TUNNEL && parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] && !priv->config.l3_vxlan_en) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); -- }; if (parser->layer == HASH_RXQ_TUNNEL && parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] && !priv->config.l3_vxlan_en) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); -- * before will also match this rule. * To avoid such situation, VNI 0 is currently refused. */ /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */ if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VxLAN vni cannot be 0"); mlx5_flow_create_copy(parser, &vxlan, size); return 0; -- uint32_t vlan_id; uint8_t vni[4]; } id; if (!priv->config.l3_vxlan_en) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); -- memcpy(&id.vni[1], spec->vni, 3); vxlan.val.tunnel_id = id.vlan_id; memcpy(&id.vni[1], mask->vni, 3); vxlan.mask.tunnel_id = id.vlan_id; if (spec->protocol) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VxLAN-GPE protocol not" " supported"); /* Remove unwanted bits from values. */ -- * before will also match this rule. * To avoid such situation, VNI 0 is currently refused. */ /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */ if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VxLAN-GPE vni cannot be 0"); mlx5_flow_create_copy(parser, &vxlan, size); return 0; -- ipv6->val.next_hdr = MLX5_GRE; ipv6->mask.next_hdr = 0xff; } } if (i != hash_rxq_init_n) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "IP protocol of GRE must be 47"); mlx5_flow_create_copy(parser, &tunnel, size); return 0; -- * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow * Pointer to the rte_flow. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { struct priv *priv = dev->data->dev_private; struct ibv_flow_spec_action_drop *drop; unsigned int size = sizeof(struct ibv_flow_spec_action_drop); -- parser->queue[HASH_RXQ_ETH].ibv_attr = NULL; flow->frxq[HASH_RXQ_ETH].ibv_flow = mlx5_glue->create_flow(priv->flow_drop_queue->qp, flow->frxq[HASH_RXQ_ETH].ibv_attr); if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); goto error; } return 0; error: assert(flow); if (flow->frxq[HASH_RXQ_ETH].ibv_flow) { claim_zero(mlx5_glue->destroy_flow (flow->frxq[HASH_RXQ_ETH].ibv_flow)); flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL; -- * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow * Pointer to the rte_flow. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { unsigned int i; for (i = 0; i != hash_rxq_init_n; ++i) { if (!parser->queue[i].ibv_attr) -- parser->rss_conf.queue, parser->rss_conf.queue_num, parser->tunnel, parser->rss_conf.level); if (!flow->frxq[i].hrxq) { return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "cannot create hash rxq"); } } -- * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow * Pointer to the rte_flow. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { struct priv *priv __rte_unused = dev->data->dev_private; int ret; unsigned int i; unsigned int flows_n = 0; assert(priv->pd); assert(priv->ctx); assert(!parser->drop); ret = mlx5_flow_create_action_queue_rss(dev, parser, flow, error); if (ret) goto error; if (parser->count) flow->cs = parser->cs; if (!dev->data->dev_started) return 0; for (i = 0; i != hash_rxq_init_n; ++i) { -- flow->frxq[i].ibv_flow = mlx5_glue->create_flow(flow->frxq[i].hrxq->qp, flow->frxq[i].ibv_attr); mlx5_flow_dump(dev, flow, i); if (!flow->frxq[i].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); goto error; } ++flows_n; } if (!flows_n) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "internal error in flow creation"); goto error; } mlx5_flow_create_update_rxqs(dev, flow); return 0; error: ret = rte_errno; /* Save rte_errno before cleanup. */ assert(flow); for (i = 0; i != hash_rxq_init_n; ++i) { if (flow->frxq[i].ibv_flow) { struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow; -- } /** * Convert a flow. error: patch failed: drivers/net/mlx5/mlx5_flow.c:31 error: drivers/net/mlx5/mlx5_flow.c: patch does not apply *Repo: dpdk-next-net Checking patch drivers/net/mlx5/mlx5_flow.c... error: while searching for: #include "mlx5_prm.h" #include "mlx5_glue.h" /* Flow priority for control plane flows. */ #define MLX5_CTRL_FLOW_PRIORITY 1 -- /** Structure give to the conversion functions. */ struct mlx5_flow_data { struct rte_eth_dev *dev; /** Ethernet device. */ struct mlx5_flow_parse *parser; /** Parser context. */ struct rte_flow_error *error; /** Error context. */ }; static int mlx5_flow_create_eth(const struct rte_flow_item *item, const void *default_mask, -- const uint8_t *spec = item->spec; const uint8_t *last = item->last; const uint8_t *m = item->mask ? item->mask : mask; if (!spec && (item->mask || last)) goto error; if (!spec) return 0; /* * Single-pass check to make sure that: * - item->mask is supported, no bits are set outside mask. -- */ for (i = 0; i < size; i++) { if (!m[i]) continue; if ((m[i] | mask[i]) != mask[i]) goto error; if (last && ((spec[i] & m[i]) != (last[i] & m[i]))) goto error; } return 0; error: rte_errno = ENOTSUP; return -rte_errno; } /** * Extract attribute to the parser. * * @param[in] attr * Flow rule attributes. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_attributes(const struct rte_flow_attr *attr, struct rte_flow_error *error) { if (attr->group) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, "groups are not supported"); return -rte_errno; } if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL, "priorities are not supported"); return -rte_errno; } if (attr->egress) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, "egress is not supported"); return -rte_errno; } if (attr->transfer) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL, "transfer is not supported"); return -rte_errno; } if (!attr->ingress) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL, "only ingress is supported"); return -rte_errno; } -- * * @param dev * Pointer to Ethernet device. * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] error * Perform verbose error reporting if not NULL. * @param[in, out] parser * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_actions(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { enum { FATE = 1, MARK = 2, COUNT = 4, }; uint32_t overlap = 0; struct priv *priv = dev->data->dev_private; -- if (overlap & FATE) goto exit_action_overlap; overlap |= FATE; if (rss->func && rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "the only supported RSS hash" " function is Toeplitz"); return -rte_errno; } #ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT if (parser->rss_conf.level > 1) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "a nonzero RSS encapsulation" " level is not supported"); return -rte_errno; } #endif if (parser->rss_conf.level > 2) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "RSS encapsulation level" " > 1 is not supported"); return -rte_errno; } if (rss->types & MLX5_RSS_HF_MASK) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "unsupported RSS type" " requested"); return -rte_errno; -- } else { rss_key_len = rss_hash_default_key_len; rss_key = rss_hash_default_key; } if (rss_key_len != RTE_DIM(parser->rss_key)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "RSS hash key must be" " exactly 40 bytes long"); return -rte_errno; } if (!rss->queue_num) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "no valid queues"); return -rte_errno; } if (rss->queue_num > RTE_DIM(parser->queues)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "too many queues for RSS" " context"); return -rte_errno; } for (n = 0; n < rss->queue_num; ++n) { if (rss->queue[n] >= priv->rxqs_n) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "queue id > number of" " queues"); return -rte_errno; -- if (overlap & MARK) goto exit_action_overlap; overlap |= MARK; if (!mark) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "mark must be defined"); return -rte_errno; } else if (mark->id >= MLX5_FLOW_MARK_MAX) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "mark must be between 0" " and 16777199"); return -rte_errno; -- if (!(overlap & FATE)) parser->drop = 1; if (parser->drop && parser->mark) parser->mark = 0; if (!parser->rss_conf.queue_num && !parser->drop) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "no valid action"); return -rte_errno; } return 0; exit_action_not_supported: rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "action not supported"); return -rte_errno; exit_action_overlap: rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "overlapping actions are not supported"); return -rte_errno; } /** * Validate items. * * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[out] error * Perform verbose error reporting if not NULL. * @param[in, out] parser * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_items_validate(struct rte_eth_dev *dev, const struct rte_flow_item items[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { struct priv *priv = dev->data->dev_private; const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; -- cur_item->mask_sz); if (ret) goto exit_item_not_supported; if (IS_TUNNEL(items->type)) { if (parser->tunnel) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, items, "Cannot recognize multiple" " tunnel encapsulations."); return -rte_errno; } if (!priv->config.tunnel_en && parser->rss_conf.level > 1) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, items, "RSS on tunnel is not supported"); return -rte_errno; } -- for (i = 0; i != hash_rxq_init_n; ++i) parser->queue[i].offset += size; } return 0; exit_item_not_supported: return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM, items, "item not supported"); } /** * Allocate memory space to store verbs flow attributes. * * @param[in] size * Amount of byte to allocate. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * A verbs flow attribute on success, NULL otherwise and rte_errno is set. */ static struct ibv_flow_attr * mlx5_flow_convert_allocate(unsigned int size, struct rte_flow_error *error) { struct ibv_flow_attr *ibv_attr; ibv_attr = rte_calloc(__func__, 1, size, 0); if (!ibv_attr) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate verbs spec attributes"); return NULL; } -- * Flow rule attributes. * @param[in] pattern * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] error * Perform verbose error reporting if not NULL. * @param[in, out] parser * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. -- static int mlx5_flow_convert(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; int ret; -- *parser = (struct mlx5_flow_parse){ .create = parser->create, .layer = HASH_RXQ_ETH, .mark_id = MLX5_FLOW_MARK_DEFAULT, }; ret = mlx5_flow_convert_attributes(attr, error); if (ret) return ret; ret = mlx5_flow_convert_actions(dev, actions, error, parser); if (ret) return ret; ret = mlx5_flow_convert_items_validate(dev, items, error, parser); if (ret) return ret; mlx5_flow_convert_finalise(parser); /* * Second step. -- */ if (parser->drop) { unsigned int offset = parser->queue[HASH_RXQ_ETH].offset; parser->queue[HASH_RXQ_ETH].ibv_attr = mlx5_flow_convert_allocate(offset, error); if (!parser->queue[HASH_RXQ_ETH].ibv_attr) goto exit_enomem; parser->queue[HASH_RXQ_ETH].offset = sizeof(struct ibv_flow_attr); } else { for (i = 0; i != hash_rxq_init_n; ++i) { unsigned int offset; offset = parser->queue[i].offset; parser->queue[i].ibv_attr = mlx5_flow_convert_allocate(offset, error); if (!parser->queue[i].ibv_attr) goto exit_enomem; parser->queue[i].offset = sizeof(struct ibv_flow_attr); } } -- parser->layer = HASH_RXQ_ETH; for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { struct mlx5_flow_data data = { .dev = dev, .parser = parser, .error = error, }; if (items->type == RTE_FLOW_ITEM_TYPE_VOID) continue; cur_item = &mlx5_flow_items[items->type]; -- if (parser->mark) mlx5_flow_create_flag_mark(parser, parser->mark_id); if (parser->count && parser->create) { mlx5_flow_create_count(dev, parser); if (!parser->cs) goto exit_count_error; } exit_free: /* Only verification is expected, all resources should be released. */ if (!parser->create) { for (i = 0; i != hash_rxq_init_n; ++i) { -- if (parser->queue[i].ibv_attr) { rte_free(parser->queue[i].ibv_attr); parser->queue[i].ibv_attr = NULL; } } rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate verbs spec attributes"); return -rte_errno; exit_count_error: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create counter"); return -rte_errno; } /** -- /* * From verbs perspective an empty VLAN is equivalent * to a packet without VLAN layer. */ if (!eth->mask.vlan_tag) goto error; /* Outer TPID cannot be matched. */ if (eth->mask.ether_type) { msg = "VLAN TPID matching is not supported"; goto error; } eth->val.ether_type = spec->inner_type; eth->mask.ether_type = mask->inner_type; eth->val.ether_type &= eth->mask.ether_type; } return 0; } error: return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, msg); } /** * Convert IPv4 item to Verbs specification. -- }; if (parser->layer == HASH_RXQ_TUNNEL && parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] && !priv->config.l3_vxlan_en) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); -- }; if (parser->layer == HASH_RXQ_TUNNEL && parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] && !priv->config.l3_vxlan_en) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); -- * before will also match this rule. * To avoid such situation, VNI 0 is currently refused. */ /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */ if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VxLAN vni cannot be 0"); mlx5_flow_create_copy(parser, &vxlan, size); return 0; -- uint32_t vlan_id; uint8_t vni[4]; } id; if (!priv->config.l3_vxlan_en) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); -- memcpy(&id.vni[1], spec->vni, 3); vxlan.val.tunnel_id = id.vlan_id; memcpy(&id.vni[1], mask->vni, 3); vxlan.mask.tunnel_id = id.vlan_id; if (spec->protocol) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VxLAN-GPE protocol not" " supported"); /* Remove unwanted bits from values. */ -- * before will also match this rule. * To avoid such situation, VNI 0 is currently refused. */ /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */ if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VxLAN-GPE vni cannot be 0"); mlx5_flow_create_copy(parser, &vxlan, size); return 0; -- ipv6->val.next_hdr = MLX5_GRE; ipv6->mask.next_hdr = 0xff; } } if (i != hash_rxq_init_n) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "IP protocol of GRE must be 47"); mlx5_flow_create_copy(parser, &tunnel, size); return 0; -- * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow * Pointer to the rte_flow. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { struct priv *priv = dev->data->dev_private; struct ibv_flow_spec_action_drop *drop; unsigned int size = sizeof(struct ibv_flow_spec_action_drop); -- parser->queue[HASH_RXQ_ETH].ibv_attr = NULL; flow->frxq[HASH_RXQ_ETH].ibv_flow = mlx5_glue->create_flow(priv->flow_drop_queue->qp, flow->frxq[HASH_RXQ_ETH].ibv_attr); if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); goto error; } return 0; error: assert(flow); if (flow->frxq[HASH_RXQ_ETH].ibv_flow) { claim_zero(mlx5_glue->destroy_flow (flow->frxq[HASH_RXQ_ETH].ibv_flow)); flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL; -- * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow * Pointer to the rte_flow. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { unsigned int i; for (i = 0; i != hash_rxq_init_n; ++i) { if (!parser->queue[i].ibv_attr) -- parser->rss_conf.queue, parser->rss_conf.queue_num, parser->tunnel, parser->rss_conf.level); if (!flow->frxq[i].hrxq) { return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "cannot create hash rxq"); } } -- * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow * Pointer to the rte_flow. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { struct priv *priv __rte_unused = dev->data->dev_private; int ret; unsigned int i; unsigned int flows_n = 0; assert(priv->pd); assert(priv->ctx); assert(!parser->drop); ret = mlx5_flow_create_action_queue_rss(dev, parser, flow, error); if (ret) goto error; if (parser->count) flow->cs = parser->cs; if (!dev->data->dev_started) return 0; for (i = 0; i != hash_rxq_init_n; ++i) { -- flow->frxq[i].ibv_flow = mlx5_glue->create_flow(flow->frxq[i].hrxq->qp, flow->frxq[i].ibv_attr); mlx5_flow_dump(dev, flow, i); if (!flow->frxq[i].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); goto error; } ++flows_n; } if (!flows_n) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "internal error in flow creation"); goto error; } mlx5_flow_create_update_rxqs(dev, flow); return 0; error: ret = rte_errno; /* Save rte_errno before cleanup. */ assert(flow); for (i = 0; i != hash_rxq_init_n; ++i) { if (flow->frxq[i].ibv_flow) { struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow; -- } /** * Convert a flow. error: patch failed: drivers/net/mlx5/mlx5_flow.c:31 error: drivers/net/mlx5/mlx5_flow.c: patch does not apply *Repo: dpdk-next-crypto Checking patch drivers/net/mlx5/mlx5_flow.c... error: while searching for: #include "mlx5_prm.h" #include "mlx5_glue.h" /* Flow priority for control plane flows. */ #define MLX5_CTRL_FLOW_PRIORITY 1 -- /** Structure give to the conversion functions. */ struct mlx5_flow_data { struct rte_eth_dev *dev; /** Ethernet device. */ struct mlx5_flow_parse *parser; /** Parser context. */ struct rte_flow_error *error; /** Error context. */ }; static int mlx5_flow_create_eth(const struct rte_flow_item *item, const void *default_mask, -- const uint8_t *spec = item->spec; const uint8_t *last = item->last; const uint8_t *m = item->mask ? item->mask : mask; if (!spec && (item->mask || last)) goto error; if (!spec) return 0; /* * Single-pass check to make sure that: * - item->mask is supported, no bits are set outside mask. -- */ for (i = 0; i < size; i++) { if (!m[i]) continue; if ((m[i] | mask[i]) != mask[i]) goto error; if (last && ((spec[i] & m[i]) != (last[i] & m[i]))) goto error; } return 0; error: rte_errno = ENOTSUP; return -rte_errno; } /** * Extract attribute to the parser. * * @param[in] attr * Flow rule attributes. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_attributes(const struct rte_flow_attr *attr, struct rte_flow_error *error) { if (attr->group) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, "groups are not supported"); return -rte_errno; } if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL, "priorities are not supported"); return -rte_errno; } if (attr->egress) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, "egress is not supported"); return -rte_errno; } if (attr->transfer) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL, "transfer is not supported"); return -rte_errno; } if (!attr->ingress) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL, "only ingress is supported"); return -rte_errno; } -- * * @param dev * Pointer to Ethernet device. * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] error * Perform verbose error reporting if not NULL. * @param[in, out] parser * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_actions(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { enum { FATE = 1, MARK = 2, COUNT = 4, }; uint32_t overlap = 0; struct priv *priv = dev->data->dev_private; -- if (overlap & FATE) goto exit_action_overlap; overlap |= FATE; if (rss->func && rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "the only supported RSS hash" " function is Toeplitz"); return -rte_errno; } #ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT if (parser->rss_conf.level > 1) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "a nonzero RSS encapsulation" " level is not supported"); return -rte_errno; } #endif if (parser->rss_conf.level > 2) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "RSS encapsulation level" " > 1 is not supported"); return -rte_errno; } if (rss->types & MLX5_RSS_HF_MASK) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "unsupported RSS type" " requested"); return -rte_errno; -- } else { rss_key_len = rss_hash_default_key_len; rss_key = rss_hash_default_key; } if (rss_key_len != RTE_DIM(parser->rss_key)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "RSS hash key must be" " exactly 40 bytes long"); return -rte_errno; } if (!rss->queue_num) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "no valid queues"); return -rte_errno; } if (rss->queue_num > RTE_DIM(parser->queues)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "too many queues for RSS" " context"); return -rte_errno; } for (n = 0; n < rss->queue_num; ++n) { if (rss->queue[n] >= priv->rxqs_n) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "queue id > number of" " queues"); return -rte_errno; -- if (overlap & MARK) goto exit_action_overlap; overlap |= MARK; if (!mark) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "mark must be defined"); return -rte_errno; } else if (mark->id >= MLX5_FLOW_MARK_MAX) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "mark must be between 0" " and 16777199"); return -rte_errno; -- if (!(overlap & FATE)) parser->drop = 1; if (parser->drop && parser->mark) parser->mark = 0; if (!parser->rss_conf.queue_num && !parser->drop) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "no valid action"); return -rte_errno; } return 0; exit_action_not_supported: rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "action not supported"); return -rte_errno; exit_action_overlap: rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "overlapping actions are not supported"); return -rte_errno; } /** * Validate items. * * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[out] error * Perform verbose error reporting if not NULL. * @param[in, out] parser * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_items_validate(struct rte_eth_dev *dev, const struct rte_flow_item items[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { struct priv *priv = dev->data->dev_private; const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; -- cur_item->mask_sz); if (ret) goto exit_item_not_supported; if (IS_TUNNEL(items->type)) { if (parser->tunnel) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, items, "Cannot recognize multiple" " tunnel encapsulations."); return -rte_errno; } if (!priv->config.tunnel_en && parser->rss_conf.level > 1) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, items, "RSS on tunnel is not supported"); return -rte_errno; } -- for (i = 0; i != hash_rxq_init_n; ++i) parser->queue[i].offset += size; } return 0; exit_item_not_supported: return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM, items, "item not supported"); } /** * Allocate memory space to store verbs flow attributes. * * @param[in] size * Amount of byte to allocate. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * A verbs flow attribute on success, NULL otherwise and rte_errno is set. */ static struct ibv_flow_attr * mlx5_flow_convert_allocate(unsigned int size, struct rte_flow_error *error) { struct ibv_flow_attr *ibv_attr; ibv_attr = rte_calloc(__func__, 1, size, 0); if (!ibv_attr) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate verbs spec attributes"); return NULL; } -- * Flow rule attributes. * @param[in] pattern * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] error * Perform verbose error reporting if not NULL. * @param[in, out] parser * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. -- static int mlx5_flow_convert(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; int ret; -- *parser = (struct mlx5_flow_parse){ .create = parser->create, .layer = HASH_RXQ_ETH, .mark_id = MLX5_FLOW_MARK_DEFAULT, }; ret = mlx5_flow_convert_attributes(attr, error); if (ret) return ret; ret = mlx5_flow_convert_actions(dev, actions, error, parser); if (ret) return ret; ret = mlx5_flow_convert_items_validate(dev, items, error, parser); if (ret) return ret; mlx5_flow_convert_finalise(parser); /* * Second step. -- */ if (parser->drop) { unsigned int offset = parser->queue[HASH_RXQ_ETH].offset; parser->queue[HASH_RXQ_ETH].ibv_attr = mlx5_flow_convert_allocate(offset, error); if (!parser->queue[HASH_RXQ_ETH].ibv_attr) goto exit_enomem; parser->queue[HASH_RXQ_ETH].offset = sizeof(struct ibv_flow_attr); } else { for (i = 0; i != hash_rxq_init_n; ++i) { unsigned int offset; offset = parser->queue[i].offset; parser->queue[i].ibv_attr = mlx5_flow_convert_allocate(offset, error); if (!parser->queue[i].ibv_attr) goto exit_enomem; parser->queue[i].offset = sizeof(struct ibv_flow_attr); } } -- parser->layer = HASH_RXQ_ETH; for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { struct mlx5_flow_data data = { .dev = dev, .parser = parser, .error = error, }; if (items->type == RTE_FLOW_ITEM_TYPE_VOID) continue; cur_item = &mlx5_flow_items[items->type]; -- if (parser->mark) mlx5_flow_create_flag_mark(parser, parser->mark_id); if (parser->count && parser->create) { mlx5_flow_create_count(dev, parser); if (!parser->cs) goto exit_count_error; } exit_free: /* Only verification is expected, all resources should be released. */ if (!parser->create) { for (i = 0; i != hash_rxq_init_n; ++i) { -- if (parser->queue[i].ibv_attr) { rte_free(parser->queue[i].ibv_attr); parser->queue[i].ibv_attr = NULL; } } rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate verbs spec attributes"); return -rte_errno; exit_count_error: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create counter"); return -rte_errno; } /** -- /* * From verbs perspective an empty VLAN is equivalent * to a packet without VLAN layer. */ if (!eth->mask.vlan_tag) goto error; /* Outer TPID cannot be matched. */ if (eth->mask.ether_type) { msg = "VLAN TPID matching is not supported"; goto error; } eth->val.ether_type = spec->inner_type; eth->mask.ether_type = mask->inner_type; eth->val.ether_type &= eth->mask.ether_type; } return 0; } error: return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, msg); } /** * Convert IPv4 item to Verbs specification. -- }; if (parser->layer == HASH_RXQ_TUNNEL && parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] && !priv->config.l3_vxlan_en) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); -- }; if (parser->layer == HASH_RXQ_TUNNEL && parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] && !priv->config.l3_vxlan_en) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); -- * before will also match this rule. * To avoid such situation, VNI 0 is currently refused. */ /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */ if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VxLAN vni cannot be 0"); mlx5_flow_create_copy(parser, &vxlan, size); return 0; -- uint32_t vlan_id; uint8_t vni[4]; } id; if (!priv->config.l3_vxlan_en) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); -- memcpy(&id.vni[1], spec->vni, 3); vxlan.val.tunnel_id = id.vlan_id; memcpy(&id.vni[1], mask->vni, 3); vxlan.mask.tunnel_id = id.vlan_id; if (spec->protocol) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VxLAN-GPE protocol not" " supported"); /* Remove unwanted bits from values. */ -- * before will also match this rule. * To avoid such situation, VNI 0 is currently refused. */ /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */ if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VxLAN-GPE vni cannot be 0"); mlx5_flow_create_copy(parser, &vxlan, size); return 0; -- ipv6->val.next_hdr = MLX5_GRE; ipv6->mask.next_hdr = 0xff; } } if (i != hash_rxq_init_n) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "IP protocol of GRE must be 47"); mlx5_flow_create_copy(parser, &tunnel, size); return 0; -- * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow * Pointer to the rte_flow. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { struct priv *priv = dev->data->dev_private; struct ibv_flow_spec_action_drop *drop; unsigned int size = sizeof(struct ibv_flow_spec_action_drop); -- parser->queue[HASH_RXQ_ETH].ibv_attr = NULL; flow->frxq[HASH_RXQ_ETH].ibv_flow = mlx5_glue->create_flow(priv->flow_drop_queue->qp, flow->frxq[HASH_RXQ_ETH].ibv_attr); if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); goto error; } return 0; error: assert(flow); if (flow->frxq[HASH_RXQ_ETH].ibv_flow) { claim_zero(mlx5_glue->destroy_flow (flow->frxq[HASH_RXQ_ETH].ibv_flow)); flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL; -- * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow * Pointer to the rte_flow. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { unsigned int i; for (i = 0; i != hash_rxq_init_n; ++i) { if (!parser->queue[i].ibv_attr) -- parser->rss_conf.queue, parser->rss_conf.queue_num, parser->tunnel, parser->rss_conf.level); if (!flow->frxq[i].hrxq) { return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "cannot create hash rxq"); } } -- * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow * Pointer to the rte_flow. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { struct priv *priv __rte_unused = dev->data->dev_private; int ret; unsigned int i; unsigned int flows_n = 0; assert(priv->pd); assert(priv->ctx); assert(!parser->drop); ret = mlx5_flow_create_action_queue_rss(dev, parser, flow, error); if (ret) goto error; if (parser->count) flow->cs = parser->cs; if (!dev->data->dev_started) return 0; for (i = 0; i != hash_rxq_init_n; ++i) { -- flow->frxq[i].ibv_flow = mlx5_glue->create_flow(flow->frxq[i].hrxq->qp, flow->frxq[i].ibv_attr); mlx5_flow_dump(dev, flow, i); if (!flow->frxq[i].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); goto error; } ++flows_n; } if (!flows_n) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "internal error in flow creation"); goto error; } mlx5_flow_create_update_rxqs(dev, flow); return 0; error: ret = rte_errno; /* Save rte_errno before cleanup. */ assert(flow); for (i = 0; i != hash_rxq_init_n; ++i) { if (flow->frxq[i].ibv_flow) { struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow; -- } /** * Convert a flow. error: patch failed: drivers/net/mlx5/mlx5_flow.c:31 error: drivers/net/mlx5/mlx5_flow.c: patch does not apply *Repo: dpdk-next-virtio Checking patch drivers/net/mlx5/mlx5_flow.c... error: while searching for: #include "mlx5_prm.h" #include "mlx5_glue.h" /* Flow priority for control plane flows. */ #define MLX5_CTRL_FLOW_PRIORITY 1 -- /** Structure give to the conversion functions. */ struct mlx5_flow_data { struct rte_eth_dev *dev; /** Ethernet device. */ struct mlx5_flow_parse *parser; /** Parser context. */ struct rte_flow_error *error; /** Error context. */ }; static int mlx5_flow_create_eth(const struct rte_flow_item *item, const void *default_mask, -- const uint8_t *spec = item->spec; const uint8_t *last = item->last; const uint8_t *m = item->mask ? item->mask : mask; if (!spec && (item->mask || last)) goto error; if (!spec) return 0; /* * Single-pass check to make sure that: * - item->mask is supported, no bits are set outside mask. -- */ for (i = 0; i < size; i++) { if (!m[i]) continue; if ((m[i] | mask[i]) != mask[i]) goto error; if (last && ((spec[i] & m[i]) != (last[i] & m[i]))) goto error; } return 0; error: rte_errno = ENOTSUP; return -rte_errno; } /** * Extract attribute to the parser. * * @param[in] attr * Flow rule attributes. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_attributes(const struct rte_flow_attr *attr, struct rte_flow_error *error) { if (attr->group) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, "groups are not supported"); return -rte_errno; } if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL, "priorities are not supported"); return -rte_errno; } if (attr->egress) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, "egress is not supported"); return -rte_errno; } if (attr->transfer) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL, "transfer is not supported"); return -rte_errno; } if (!attr->ingress) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL, "only ingress is supported"); return -rte_errno; } -- * * @param dev * Pointer to Ethernet device. * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] error * Perform verbose error reporting if not NULL. * @param[in, out] parser * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_actions(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { enum { FATE = 1, MARK = 2, COUNT = 4, }; uint32_t overlap = 0; struct priv *priv = dev->data->dev_private; -- if (overlap & FATE) goto exit_action_overlap; overlap |= FATE; if (rss->func && rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "the only supported RSS hash" " function is Toeplitz"); return -rte_errno; } #ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT if (parser->rss_conf.level > 1) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "a nonzero RSS encapsulation" " level is not supported"); return -rte_errno; } #endif if (parser->rss_conf.level > 2) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "RSS encapsulation level" " > 1 is not supported"); return -rte_errno; } if (rss->types & MLX5_RSS_HF_MASK) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "unsupported RSS type" " requested"); return -rte_errno; -- } else { rss_key_len = rss_hash_default_key_len; rss_key = rss_hash_default_key; } if (rss_key_len != RTE_DIM(parser->rss_key)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "RSS hash key must be" " exactly 40 bytes long"); return -rte_errno; } if (!rss->queue_num) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "no valid queues"); return -rte_errno; } if (rss->queue_num > RTE_DIM(parser->queues)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "too many queues for RSS" " context"); return -rte_errno; } for (n = 0; n < rss->queue_num; ++n) { if (rss->queue[n] >= priv->rxqs_n) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "queue id > number of" " queues"); return -rte_errno; -- if (overlap & MARK) goto exit_action_overlap; overlap |= MARK; if (!mark) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "mark must be defined"); return -rte_errno; } else if (mark->id >= MLX5_FLOW_MARK_MAX) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "mark must be between 0" " and 16777199"); return -rte_errno; -- if (!(overlap & FATE)) parser->drop = 1; if (parser->drop && parser->mark) parser->mark = 0; if (!parser->rss_conf.queue_num && !parser->drop) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "no valid action"); return -rte_errno; } return 0; exit_action_not_supported: rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "action not supported"); return -rte_errno; exit_action_overlap: rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "overlapping actions are not supported"); return -rte_errno; } /** * Validate items. * * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[out] error * Perform verbose error reporting if not NULL. * @param[in, out] parser * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_convert_items_validate(struct rte_eth_dev *dev, const struct rte_flow_item items[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { struct priv *priv = dev->data->dev_private; const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; -- cur_item->mask_sz); if (ret) goto exit_item_not_supported; if (IS_TUNNEL(items->type)) { if (parser->tunnel) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, items, "Cannot recognize multiple" " tunnel encapsulations."); return -rte_errno; } if (!priv->config.tunnel_en && parser->rss_conf.level > 1) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, items, "RSS on tunnel is not supported"); return -rte_errno; } -- for (i = 0; i != hash_rxq_init_n; ++i) parser->queue[i].offset += size; } return 0; exit_item_not_supported: return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM, items, "item not supported"); } /** * Allocate memory space to store verbs flow attributes. * * @param[in] size * Amount of byte to allocate. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * A verbs flow attribute on success, NULL otherwise and rte_errno is set. */ static struct ibv_flow_attr * mlx5_flow_convert_allocate(unsigned int size, struct rte_flow_error *error) { struct ibv_flow_attr *ibv_attr; ibv_attr = rte_calloc(__func__, 1, size, 0); if (!ibv_attr) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate verbs spec attributes"); return NULL; } -- * Flow rule attributes. * @param[in] pattern * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] error * Perform verbose error reporting if not NULL. * @param[in, out] parser * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. -- static int mlx5_flow_convert(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; int ret; -- *parser = (struct mlx5_flow_parse){ .create = parser->create, .layer = HASH_RXQ_ETH, .mark_id = MLX5_FLOW_MARK_DEFAULT, }; ret = mlx5_flow_convert_attributes(attr, error); if (ret) return ret; ret = mlx5_flow_convert_actions(dev, actions, error, parser); if (ret) return ret; ret = mlx5_flow_convert_items_validate(dev, items, error, parser); if (ret) return ret; mlx5_flow_convert_finalise(parser); /* * Second step. -- */ if (parser->drop) { unsigned int offset = parser->queue[HASH_RXQ_ETH].offset; parser->queue[HASH_RXQ_ETH].ibv_attr = mlx5_flow_convert_allocate(offset, error); if (!parser->queue[HASH_RXQ_ETH].ibv_attr) goto exit_enomem; parser->queue[HASH_RXQ_ETH].offset = sizeof(struct ibv_flow_attr); } else { for (i = 0; i != hash_rxq_init_n; ++i) { unsigned int offset; offset = parser->queue[i].offset; parser->queue[i].ibv_attr = mlx5_flow_convert_allocate(offset, error); if (!parser->queue[i].ibv_attr) goto exit_enomem; parser->queue[i].offset = sizeof(struct ibv_flow_attr); } } -- parser->layer = HASH_RXQ_ETH; for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { struct mlx5_flow_data data = { .dev = dev, .parser = parser, .error = error, }; if (items->type == RTE_FLOW_ITEM_TYPE_VOID) continue; cur_item = &mlx5_flow_items[items->type]; -- if (parser->mark) mlx5_flow_create_flag_mark(parser, parser->mark_id); if (parser->count && parser->create) { mlx5_flow_create_count(dev, parser); if (!parser->cs) goto exit_count_error; } exit_free: /* Only verification is expected, all resources should be released. */ if (!parser->create) { for (i = 0; i != hash_rxq_init_n; ++i) { -- if (parser->queue[i].ibv_attr) { rte_free(parser->queue[i].ibv_attr); parser->queue[i].ibv_attr = NULL; } } rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate verbs spec attributes"); return -rte_errno; exit_count_error: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create counter"); return -rte_errno; } /** -- /* * From verbs perspective an empty VLAN is equivalent * to a packet without VLAN layer. */ if (!eth->mask.vlan_tag) goto error; /* Outer TPID cannot be matched. */ if (eth->mask.ether_type) { msg = "VLAN TPID matching is not supported"; goto error; } eth->val.ether_type = spec->inner_type; eth->mask.ether_type = mask->inner_type; eth->val.ether_type &= eth->mask.ether_type; } return 0; } error: return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, msg); } /** * Convert IPv4 item to Verbs specification. -- }; if (parser->layer == HASH_RXQ_TUNNEL && parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] && !priv->config.l3_vxlan_en) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); -- }; if (parser->layer == HASH_RXQ_TUNNEL && parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] && !priv->config.l3_vxlan_en) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); -- * before will also match this rule. * To avoid such situation, VNI 0 is currently refused. */ /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */ if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VxLAN vni cannot be 0"); mlx5_flow_create_copy(parser, &vxlan, size); return 0; -- uint32_t vlan_id; uint8_t vni[4]; } id; if (!priv->config.l3_vxlan_en) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); -- memcpy(&id.vni[1], spec->vni, 3); vxlan.val.tunnel_id = id.vlan_id; memcpy(&id.vni[1], mask->vni, 3); vxlan.mask.tunnel_id = id.vlan_id; if (spec->protocol) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VxLAN-GPE protocol not" " supported"); /* Remove unwanted bits from values. */ -- * before will also match this rule. * To avoid such situation, VNI 0 is currently refused. */ /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */ if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VxLAN-GPE vni cannot be 0"); mlx5_flow_create_copy(parser, &vxlan, size); return 0; -- ipv6->val.next_hdr = MLX5_GRE; ipv6->mask.next_hdr = 0xff; } } if (i != hash_rxq_init_n) return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "IP protocol of GRE must be 47"); mlx5_flow_create_copy(parser, &tunnel, size); return 0; -- * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow * Pointer to the rte_flow. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { struct priv *priv = dev->data->dev_private; struct ibv_flow_spec_action_drop *drop; unsigned int size = sizeof(struct ibv_flow_spec_action_drop); -- parser->queue[HASH_RXQ_ETH].ibv_attr = NULL; flow->frxq[HASH_RXQ_ETH].ibv_flow = mlx5_glue->create_flow(priv->flow_drop_queue->qp, flow->frxq[HASH_RXQ_ETH].ibv_attr); if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); goto error; } return 0; error: assert(flow); if (flow->frxq[HASH_RXQ_ETH].ibv_flow) { claim_zero(mlx5_glue->destroy_flow (flow->frxq[HASH_RXQ_ETH].ibv_flow)); flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL; -- * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow * Pointer to the rte_flow. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { unsigned int i; for (i = 0; i != hash_rxq_init_n; ++i) { if (!parser->queue[i].ibv_attr) -- parser->rss_conf.queue, parser->rss_conf.queue_num, parser->tunnel, parser->rss_conf.level); if (!flow->frxq[i].hrxq) { return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "cannot create hash rxq"); } } -- * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow * Pointer to the rte_flow. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_action_queue(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { struct priv *priv __rte_unused = dev->data->dev_private; int ret; unsigned int i; unsigned int flows_n = 0; assert(priv->pd); assert(priv->ctx); assert(!parser->drop); ret = mlx5_flow_create_action_queue_rss(dev, parser, flow, error); if (ret) goto error; if (parser->count) flow->cs = parser->cs; if (!dev->data->dev_started) return 0; for (i = 0; i != hash_rxq_init_n; ++i) { -- flow->frxq[i].ibv_flow = mlx5_glue->create_flow(flow->frxq[i].hrxq->qp, flow->frxq[i].ibv_attr); mlx5_flow_dump(dev, flow, i); if (!flow->frxq[i].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); goto error; } ++flows_n; } if (!flows_n) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "internal error in flow creation"); goto error; } mlx5_flow_create_update_rxqs(dev, flow); return 0; error: ret = rte_errno; /* Save rte_errno before cleanup. */ assert(flow); for (i = 0; i != hash_rxq_init_n; ++i) { if (flow->frxq[i].ibv_flow) { struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow; -- } /** * Convert a flow. error: patch failed: drivers/net/mlx5/mlx5_flow.c:31 error: drivers/net/mlx5/mlx5_flow.c: patch does not apply DPDK STV team