Test-Label: Intel-compilation Test-Status: FAILURE http://dpdk.org/patch/44899 _apply issues_ Submitter: Yongseok Koh Date: 2018-09-19 07:21:56 DPDK git baseline: Repo:dpdk-master, CommitID: 7abb521d2c4489bfb4fd0c2bd37e3050b06a9a8a Repo:dpdk-next-eventdev, CommitID: 76b9d9de5c7d747c381027156aac07735cb1bc0c Repo:dpdk-next-net, CommitID: c5738cd95205210398170e39b071d9a0f180f7d9 Repo:dpdk-next-crypto, CommitID: 7ee5afa5db967a211cad28e83ab589d188cd3ec4 Repo:dpdk-next-virtio, CommitID: dead0602d14c6d6e7ef152989c58c1b58473c3dd *Repo: dpdk-master Checking patch drivers/net/mlx5/Makefile... error: while searching for: SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_verbs.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl_flow.c ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y) INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE) error: patch failed: drivers/net/mlx5/Makefile:35 error: drivers/net/mlx5/Makefile: patch does not apply Checking patch drivers/net/mlx5/mlx5.c... Hunk #1 succeeded at 282 (offset -4 lines). Hunk #2 succeeded at 1128 (offset -7 lines). Hunk #3 succeeded at 1180 (offset -7 lines). Checking patch drivers/net/mlx5/mlx5.h... *Repo: dpdk-next-eventdev Checking patch drivers/net/mlx5/Makefile... error: while searching for: SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_verbs.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl_flow.c ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y) INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE) error: patch failed: drivers/net/mlx5/Makefile:35 error: drivers/net/mlx5/Makefile: patch does not apply Checking patch drivers/net/mlx5/mlx5.c... Hunk #1 succeeded at 282 (offset -4 lines). Hunk #2 succeeded at 1126 (offset -9 lines). Hunk #3 succeeded at 1178 (offset -9 lines). Checking patch drivers/net/mlx5/mlx5.h... Hunk #1 succeeded at 156 (offset -1 lines). Hunk #2 succeeded at 390 (offset -2 lines). Checking patch drivers/net/mlx5/mlx5_nl_flow.c... error: while searching for: /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2018 6WIND S.A. * Copyright 2018 Mellanox Technologies, Ltd */ -- * Default mask for pattern item as specified by the flow API. * @param[in] mask_supported * Mask fields supported by the implementation. * @param[in] mask_empty * Empty mask to return when there is no specification. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * Either @p item->mask or one of the mask parameters on success, NULL * otherwise and rte_errno is set. */ -- mlx5_nl_flow_item_mask(const struct rte_flow_item *item, const void *mask_default, const void *mask_supported, const void *mask_empty, size_t mask_size, struct rte_flow_error *error) { const uint8_t *mask; size_t i; /* item->last and item->mask cannot exist without item->spec. */ if (!item->spec && (item->mask || item->last)) { rte_flow_error_set (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "\"mask\" or \"last\" field provided without a" " corresponding \"spec\""); return NULL; } /* No spec, no mask, no problem. */ -- for (i = 0; i != mask_size; ++i) { if (!mask[i]) continue; if ((mask[i] | ((const uint8_t *)mask_supported)[i]) != ((const uint8_t *)mask_supported)[i]) { rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, "unsupported field found in \"mask\""); return NULL; } if (item->last && (((const uint8_t *)item->spec)[i] & mask[i]) != (((const uint8_t *)item->last)[i] & mask[i])) { rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_LAST, item->last, "range between \"spec\" and \"last\" not" " comprised in \"mask\""); return NULL; } -- * Flow rule attributes. * @param[in] pattern * Pattern specification. * @param[in] actions * Associated actions. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * A positive value representing the exact size of the message in bytes * regardless of the @p size parameter on success, a negative errno value * otherwise and rte_errno is set. -- size_t size, const struct mlx5_nl_flow_ptoi *ptoi, const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, const struct rte_flow_action *actions, struct rte_flow_error *error) { alignas(struct nlmsghdr) uint8_t buf_tmp[mnl_nlmsg_size(sizeof(struct tcmsg) + 1024)]; const struct rte_flow_item *item; const struct rte_flow_action *action; -- struct nlattr *na_vlan_priority; const enum mlx5_nl_flow_trans *trans; const enum mlx5_nl_flow_trans *back; if (!size) goto error_nobufs; init: item = pattern; action = actions; n = 0; act_index_cur = 0; -- struct nlattr *act; unsigned int i; case INVALID: if (item->type) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "unsupported pattern item combination"); else if (action->type) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action, "unsupported action combination"); return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "flow rule lacks some kind of fate action"); case BACK: trans = back; n = 0; goto trans; -- * Supported attributes: no groups, some priorities and * ingress only. Don't care about transfer as it is the * caller's problem. */ if (attr->group) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr, "groups are not supported"); if (attr->priority > 0xfffe) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, "lowest priority level is 0xfffe"); if (!attr->ingress) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, "only ingress is supported"); if (attr->egress) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, "egress is not supported"); if (size < mnl_nlmsg_size(sizeof(*tcm))) goto error_nobufs; nlh = mnl_nlmsg_put_header(buf); nlh->nlmsg_type = 0; nlh->nlmsg_flags = 0; nlh->nlmsg_seq = 0; tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm)); -- tcm->tcm_info = TC_H_MAKE((attr->priority + 1) << 16, RTE_BE16(ETH_P_ALL)); break; case PATTERN: if (!mnl_attr_put_strz_check(buf, size, TCA_KIND, "flower")) goto error_nobufs; na_flower = mnl_attr_nest_start_check(buf, size, TCA_OPTIONS); if (!na_flower) goto error_nobufs; if (!mnl_attr_put_u32_check(buf, size, TCA_FLOWER_FLAGS, TCA_CLS_FLAGS_SKIP_SW)) goto error_nobufs; break; case ITEM_VOID: if (item->type != RTE_FLOW_ITEM_TYPE_VOID) goto trans; ++item; -- goto trans; mask.port_id = mlx5_nl_flow_item_mask (item, &rte_flow_item_port_id_mask, &mlx5_nl_flow_mask_supported.port_id, &mlx5_nl_flow_mask_empty.port_id, sizeof(mlx5_nl_flow_mask_supported.port_id), error); if (!mask.port_id) return -rte_errno; if (mask.port_id == &mlx5_nl_flow_mask_empty.port_id) { in_port_id_set = 1; ++item; break; } spec.port_id = item->spec; if (mask.port_id->id && mask.port_id->id != 0xffffffff) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask.port_id, "no support for partial mask on" " \"id\" field"); if (!mask.port_id->id) i = 0; else for (i = 0; ptoi[i].ifindex; ++i) if (ptoi[i].port_id == spec.port_id->id) break; if (!ptoi[i].ifindex) return rte_flow_error_set (error, ENODEV, RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec.port_id, "missing data to convert port ID to ifindex"); tcm = mnl_nlmsg_get_payload(buf); if (in_port_id_set && ptoi[i].ifindex != (unsigned int)tcm->tcm_ifindex) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec.port_id, "cannot match traffic for several port IDs" " through a single flow rule"); tcm->tcm_ifindex = ptoi[i].ifindex; in_port_id_set = 1; -- goto trans; mask.eth = mlx5_nl_flow_item_mask (item, &rte_flow_item_eth_mask, &mlx5_nl_flow_mask_supported.eth, &mlx5_nl_flow_mask_empty.eth, sizeof(mlx5_nl_flow_mask_supported.eth), error); if (!mask.eth) return -rte_errno; if (mask.eth == &mlx5_nl_flow_mask_empty.eth) { ++item; break; } spec.eth = item->spec; if (mask.eth->type && mask.eth->type != RTE_BE16(0xffff)) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask.eth, "no support for partial mask on" " \"type\" field"); if (mask.eth->type) { if (!mnl_attr_put_u16_check(buf, size, TCA_FLOWER_KEY_ETH_TYPE, spec.eth->type)) goto error_nobufs; eth_type_set = 1; } if ((!is_zero_ether_addr(&mask.eth->dst) && (!mnl_attr_put_check(buf, size, TCA_FLOWER_KEY_ETH_DST, -- spec.eth->src.addr_bytes) || !mnl_attr_put_check(buf, size, TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN, mask.eth->src.addr_bytes)))) goto error_nobufs; ++item; break; case ITEM_VLAN: if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) goto trans; mask.vlan = mlx5_nl_flow_item_mask (item, &rte_flow_item_vlan_mask, &mlx5_nl_flow_mask_supported.vlan, &mlx5_nl_flow_mask_empty.vlan, sizeof(mlx5_nl_flow_mask_supported.vlan), error); if (!mask.vlan) return -rte_errno; if (!eth_type_set && !mnl_attr_put_u16_check(buf, size, TCA_FLOWER_KEY_ETH_TYPE, RTE_BE16(ETH_P_8021Q))) goto error_nobufs; eth_type_set = 1; vlan_present = 1; if (mask.vlan == &mlx5_nl_flow_mask_empty.vlan) { ++item; break; -- (mask.vlan->tci & RTE_BE16(0xe000)) != RTE_BE16(0xe000)) || (mask.vlan->tci & RTE_BE16(0x0fff) && (mask.vlan->tci & RTE_BE16(0x0fff)) != RTE_BE16(0x0fff)) || (mask.vlan->inner_type && mask.vlan->inner_type != RTE_BE16(0xffff))) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask.vlan, "no support for partial masks on" " \"tci\" (PCP and VID parts) and" " \"inner_type\" fields"); if (mask.vlan->inner_type) { if (!mnl_attr_put_u16_check (buf, size, TCA_FLOWER_KEY_VLAN_ETH_TYPE, spec.vlan->inner_type)) goto error_nobufs; vlan_eth_type_set = 1; } if ((mask.vlan->tci & RTE_BE16(0xe000) && !mnl_attr_put_u8_check (buf, size, TCA_FLOWER_KEY_VLAN_PRIO, (rte_be_to_cpu_16(spec.vlan->tci) >> 13) & 0x7)) || (mask.vlan->tci & RTE_BE16(0x0fff) && !mnl_attr_put_u16_check (buf, size, TCA_FLOWER_KEY_VLAN_ID, rte_be_to_cpu_16(spec.vlan->tci & RTE_BE16(0x0fff))))) goto error_nobufs; ++item; break; case ITEM_IPV4: if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) goto trans; mask.ipv4 = mlx5_nl_flow_item_mask (item, &rte_flow_item_ipv4_mask, &mlx5_nl_flow_mask_supported.ipv4, &mlx5_nl_flow_mask_empty.ipv4, sizeof(mlx5_nl_flow_mask_supported.ipv4), error); if (!mask.ipv4) return -rte_errno; if ((!eth_type_set || !vlan_eth_type_set) && !mnl_attr_put_u16_check(buf, size, vlan_present ? TCA_FLOWER_KEY_VLAN_ETH_TYPE : TCA_FLOWER_KEY_ETH_TYPE, RTE_BE16(ETH_P_IP))) goto error_nobufs; eth_type_set = 1; vlan_eth_type_set = 1; if (mask.ipv4 == &mlx5_nl_flow_mask_empty.ipv4) { ++item; break; } spec.ipv4 = item->spec; if (mask.ipv4->hdr.next_proto_id && mask.ipv4->hdr.next_proto_id != 0xff) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask.ipv4, "no support for partial mask on" " \"hdr.next_proto_id\" field"); if (mask.ipv4->hdr.next_proto_id) { if (!mnl_attr_put_u8_check (buf, size, TCA_FLOWER_KEY_IP_PROTO, spec.ipv4->hdr.next_proto_id)) goto error_nobufs; ip_proto_set = 1; } if ((mask.ipv4->hdr.src_addr && (!mnl_attr_put_u32_check(buf, size, TCA_FLOWER_KEY_IPV4_SRC, -- TCA_FLOWER_KEY_IPV4_DST, spec.ipv4->hdr.dst_addr) || !mnl_attr_put_u32_check(buf, size, TCA_FLOWER_KEY_IPV4_DST_MASK, mask.ipv4->hdr.dst_addr)))) goto error_nobufs; ++item; break; case ITEM_IPV6: if (item->type != RTE_FLOW_ITEM_TYPE_IPV6) goto trans; mask.ipv6 = mlx5_nl_flow_item_mask (item, &rte_flow_item_ipv6_mask, &mlx5_nl_flow_mask_supported.ipv6, &mlx5_nl_flow_mask_empty.ipv6, sizeof(mlx5_nl_flow_mask_supported.ipv6), error); if (!mask.ipv6) return -rte_errno; if ((!eth_type_set || !vlan_eth_type_set) && !mnl_attr_put_u16_check(buf, size, vlan_present ? TCA_FLOWER_KEY_VLAN_ETH_TYPE : TCA_FLOWER_KEY_ETH_TYPE, RTE_BE16(ETH_P_IPV6))) goto error_nobufs; eth_type_set = 1; vlan_eth_type_set = 1; if (mask.ipv6 == &mlx5_nl_flow_mask_empty.ipv6) { ++item; break; } spec.ipv6 = item->spec; if (mask.ipv6->hdr.proto && mask.ipv6->hdr.proto != 0xff) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask.ipv6, "no support for partial mask on" " \"hdr.proto\" field"); if (mask.ipv6->hdr.proto) { if (!mnl_attr_put_u8_check (buf, size, TCA_FLOWER_KEY_IP_PROTO, spec.ipv6->hdr.proto)) goto error_nobufs; ip_proto_set = 1; } if ((!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.src_addr) && (!mnl_attr_put_check(buf, size, TCA_FLOWER_KEY_IPV6_SRC, -- spec.ipv6->hdr.dst_addr) || !mnl_attr_put_check(buf, size, TCA_FLOWER_KEY_IPV6_DST_MASK, sizeof(mask.ipv6->hdr.dst_addr), mask.ipv6->hdr.dst_addr)))) goto error_nobufs; ++item; break; case ITEM_TCP: if (item->type != RTE_FLOW_ITEM_TYPE_TCP) goto trans; mask.tcp = mlx5_nl_flow_item_mask (item, &rte_flow_item_tcp_mask, &mlx5_nl_flow_mask_supported.tcp, &mlx5_nl_flow_mask_empty.tcp, sizeof(mlx5_nl_flow_mask_supported.tcp), error); if (!mask.tcp) return -rte_errno; if (!ip_proto_set && !mnl_attr_put_u8_check(buf, size, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP)) goto error_nobufs; if (mask.tcp == &mlx5_nl_flow_mask_empty.tcp) { ++item; break; } spec.tcp = item->spec; -- TCA_FLOWER_KEY_TCP_DST, spec.tcp->hdr.dst_port) || !mnl_attr_put_u16_check(buf, size, TCA_FLOWER_KEY_TCP_DST_MASK, mask.tcp->hdr.dst_port)))) goto error_nobufs; ++item; break; case ITEM_UDP: if (item->type != RTE_FLOW_ITEM_TYPE_UDP) goto trans; mask.udp = mlx5_nl_flow_item_mask (item, &rte_flow_item_udp_mask, &mlx5_nl_flow_mask_supported.udp, &mlx5_nl_flow_mask_empty.udp, sizeof(mlx5_nl_flow_mask_supported.udp), error); if (!mask.udp) return -rte_errno; if (!ip_proto_set && !mnl_attr_put_u8_check(buf, size, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP)) goto error_nobufs; if (mask.udp == &mlx5_nl_flow_mask_empty.udp) { ++item; break; } spec.udp = item->spec; -- TCA_FLOWER_KEY_UDP_DST, spec.udp->hdr.dst_port) || !mnl_attr_put_u16_check(buf, size, TCA_FLOWER_KEY_UDP_DST_MASK, mask.udp->hdr.dst_port)))) goto error_nobufs; ++item; break; case ACTIONS: if (item->type != RTE_FLOW_ITEM_TYPE_END) goto trans; assert(na_flower); assert(!na_flower_act); na_flower_act = mnl_attr_nest_start_check(buf, size, TCA_FLOWER_ACT); if (!na_flower_act) goto error_nobufs; act_index_cur = 1; break; case ACTION_VOID: if (action->type != RTE_FLOW_ACTION_TYPE_VOID) goto trans; -- else for (i = 0; ptoi[i].ifindex; ++i) if (ptoi[i].port_id == conf.port_id->id) break; if (!ptoi[i].ifindex) return rte_flow_error_set (error, ENODEV, RTE_FLOW_ERROR_TYPE_ACTION_CONF, conf.port_id, "missing data to convert port ID to ifindex"); act_index = mnl_attr_nest_start_check(buf, size, act_index_cur++); if (!act_index || !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "mirred")) goto error_nobufs; act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS); if (!act) goto error_nobufs; if (!mnl_attr_put_check(buf, size, TCA_MIRRED_PARMS, sizeof(struct tc_mirred), &(struct tc_mirred){ .action = TC_ACT_STOLEN, .eaction = TCA_EGRESS_REDIR, .ifindex = ptoi[i].ifindex, })) goto error_nobufs; mnl_attr_nest_end(buf, act); mnl_attr_nest_end(buf, act_index); ++action; break; case ACTION_DROP: -- goto trans; act_index = mnl_attr_nest_start_check(buf, size, act_index_cur++); if (!act_index || !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "gact")) goto error_nobufs; act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS); if (!act) goto error_nobufs; if (!mnl_attr_put_check(buf, size, TCA_GACT_PARMS, sizeof(struct tc_gact), &(struct tc_gact){ .action = TC_ACT_SHOT, })) goto error_nobufs; mnl_attr_nest_end(buf, act); mnl_attr_nest_end(buf, act_index); ++action; break; case ACTION_OF_POP_VLAN: -- action_of_vlan: act_index = mnl_attr_nest_start_check(buf, size, act_index_cur++); if (!act_index || !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "vlan")) goto error_nobufs; act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS); if (!act) goto error_nobufs; if (!mnl_attr_put_check(buf, size, TCA_VLAN_PARMS, sizeof(struct tc_vlan), &(struct tc_vlan){ .action = TC_ACT_PIPE, .v_action = i, })) goto error_nobufs; if (i == TCA_VLAN_ACT_POP) { mnl_attr_nest_end(buf, act); mnl_attr_nest_end(buf, act_index); ++action; break; } if (i == TCA_VLAN_ACT_PUSH && !mnl_attr_put_u16_check(buf, size, TCA_VLAN_PUSH_VLAN_PROTOCOL, conf.of_push_vlan->ethertype)) goto error_nobufs; na_vlan_id = mnl_nlmsg_get_payload_tail(buf); if (!mnl_attr_put_u16_check(buf, size, TCA_VLAN_PAD, 0)) goto error_nobufs; na_vlan_priority = mnl_nlmsg_get_payload_tail(buf); if (!mnl_attr_put_u8_check(buf, size, TCA_VLAN_PAD, 0)) goto error_nobufs; mnl_attr_nest_end(buf, act); mnl_attr_nest_end(buf, act_index); if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) { override_na_vlan_id: na_vlan_id->nla_type = TCA_VLAN_PUSH_VLAN_ID; -- } back = trans; trans = mlx5_nl_flow_trans[trans[n - 1]]; n = 0; goto trans; error_nobufs: if (buf != buf_tmp) { buf = buf_tmp; size = sizeof(buf_tmp); goto init; } return rte_flow_error_set (error, ENOBUFS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "generated TC message is too large"); } /** * Brand rtnetlink buffer with unique handle. -- * * @param nl * Libmnl socket to use. * @param buf * Flow rule buffer previously initialized by mlx5_nl_flow_transpose(). * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_nl_flow_create(struct mnl_socket *nl, void *buf, struct rte_flow_error *error) { struct nlmsghdr *nlh = buf; nlh->nlmsg_type = RTM_NEWTFILTER; nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL; if (!mlx5_nl_flow_nl_ack(nl, nlh)) return 0; return rte_flow_error_set (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "netlink: failed to create TC flow rule"); } /** * Destroy a Netlink flow rule. * * @param nl * Libmnl socket to use. * @param buf * Flow rule buffer previously initialized by mlx5_nl_flow_transpose(). * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_nl_flow_destroy(struct mnl_socket *nl, void *buf, struct rte_flow_error *error) { struct nlmsghdr *nlh = buf; nlh->nlmsg_type = RTM_DELTFILTER; nlh->nlmsg_flags = NLM_F_REQUEST; if (!mlx5_nl_flow_nl_ack(nl, nlh)) return 0; return rte_flow_error_set (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "netlink: failed to destroy TC flow rule"); } /** * Initialize ingress qdisc of a given network interface. * * @param nl * Libmnl socket of the @p NETLINK_ROUTE kind. * @param ifindex * Index of network interface to initialize. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_nl_flow_init(struct mnl_socket *nl, unsigned int ifindex, struct rte_flow_error *error) { struct nlmsghdr *nlh; struct tcmsg *tcm; alignas(struct nlmsghdr) uint8_t buf[mnl_nlmsg_size(sizeof(*tcm) + 128)]; -- tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm)); tcm->tcm_family = AF_UNSPEC; tcm->tcm_ifindex = ifindex; tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0); tcm->tcm_parent = TC_H_INGRESS; /* Ignore errors when qdisc is already absent. */ if (mlx5_nl_flow_nl_ack(nl, nlh) && rte_errno != EINVAL && rte_errno != ENOENT) return rte_flow_error_set (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "netlink: failed to remove ingress qdisc"); /* Create fresh ingress qdisc. */ nlh = mnl_nlmsg_put_header(buf); nlh->nlmsg_type = RTM_NEWQDISC; nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL; tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm)); -- tcm->tcm_ifindex = ifindex; tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0); tcm->tcm_parent = TC_H_INGRESS; mnl_attr_put_strz_check(nlh, sizeof(buf), TCA_KIND, "ingress"); if (mlx5_nl_flow_nl_ack(nl, nlh)) return rte_flow_error_set (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "netlink: failed to create ingress qdisc"); return 0; } /** * Create and configure a libmnl socket for Netlink flow rules. -- mlx5_nl_flow_socket_destroy(struct mnl_socket *nl) { mnl_socket_close(nl); } error: patch failed: drivers/net/mlx5/mlx5_nl_flow.c:1 error: drivers/net/mlx5/mlx5_nl_flow.c: patch does not apply *Repo: dpdk-next-net Checking patch drivers/net/mlx5/Makefile... error: while searching for: SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_verbs.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl_flow.c ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y) INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE) error: patch failed: drivers/net/mlx5/Makefile:35 error: drivers/net/mlx5/Makefile: patch does not apply Checking patch drivers/net/mlx5/mlx5.c... Hunk #1 succeeded at 282 (offset -4 lines). Hunk #2 succeeded at 1128 (offset -7 lines). Hunk #3 succeeded at 1180 (offset -7 lines). Checking patch drivers/net/mlx5/mlx5.h... *Repo: dpdk-next-crypto Checking patch drivers/net/mlx5/Makefile... error: while searching for: SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_verbs.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl_flow.c ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y) INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE) error: patch failed: drivers/net/mlx5/Makefile:35 error: drivers/net/mlx5/Makefile: patch does not apply Checking patch drivers/net/mlx5/mlx5.c... Hunk #1 succeeded at 282 (offset -4 lines). Hunk #2 succeeded at 1126 (offset -9 lines). Hunk #3 succeeded at 1178 (offset -9 lines). Checking patch drivers/net/mlx5/mlx5.h... Hunk #1 succeeded at 156 (offset -1 lines). Hunk #2 succeeded at 390 (offset -2 lines). Checking patch drivers/net/mlx5/mlx5_nl_flow.c... error: while searching for: /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2018 6WIND S.A. * Copyright 2018 Mellanox Technologies, Ltd */ -- * Default mask for pattern item as specified by the flow API. * @param[in] mask_supported * Mask fields supported by the implementation. * @param[in] mask_empty * Empty mask to return when there is no specification. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * Either @p item->mask or one of the mask parameters on success, NULL * otherwise and rte_errno is set. */ -- mlx5_nl_flow_item_mask(const struct rte_flow_item *item, const void *mask_default, const void *mask_supported, const void *mask_empty, size_t mask_size, struct rte_flow_error *error) { const uint8_t *mask; size_t i; /* item->last and item->mask cannot exist without item->spec. */ if (!item->spec && (item->mask || item->last)) { rte_flow_error_set (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "\"mask\" or \"last\" field provided without a" " corresponding \"spec\""); return NULL; } /* No spec, no mask, no problem. */ -- for (i = 0; i != mask_size; ++i) { if (!mask[i]) continue; if ((mask[i] | ((const uint8_t *)mask_supported)[i]) != ((const uint8_t *)mask_supported)[i]) { rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, "unsupported field found in \"mask\""); return NULL; } if (item->last && (((const uint8_t *)item->spec)[i] & mask[i]) != (((const uint8_t *)item->last)[i] & mask[i])) { rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_LAST, item->last, "range between \"spec\" and \"last\" not" " comprised in \"mask\""); return NULL; } -- * Flow rule attributes. * @param[in] pattern * Pattern specification. * @param[in] actions * Associated actions. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * A positive value representing the exact size of the message in bytes * regardless of the @p size parameter on success, a negative errno value * otherwise and rte_errno is set. -- size_t size, const struct mlx5_nl_flow_ptoi *ptoi, const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, const struct rte_flow_action *actions, struct rte_flow_error *error) { alignas(struct nlmsghdr) uint8_t buf_tmp[mnl_nlmsg_size(sizeof(struct tcmsg) + 1024)]; const struct rte_flow_item *item; const struct rte_flow_action *action; -- struct nlattr *na_vlan_priority; const enum mlx5_nl_flow_trans *trans; const enum mlx5_nl_flow_trans *back; if (!size) goto error_nobufs; init: item = pattern; action = actions; n = 0; act_index_cur = 0; -- struct nlattr *act; unsigned int i; case INVALID: if (item->type) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "unsupported pattern item combination"); else if (action->type) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action, "unsupported action combination"); return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "flow rule lacks some kind of fate action"); case BACK: trans = back; n = 0; goto trans; -- * Supported attributes: no groups, some priorities and * ingress only. Don't care about transfer as it is the * caller's problem. */ if (attr->group) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr, "groups are not supported"); if (attr->priority > 0xfffe) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, "lowest priority level is 0xfffe"); if (!attr->ingress) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, "only ingress is supported"); if (attr->egress) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, "egress is not supported"); if (size < mnl_nlmsg_size(sizeof(*tcm))) goto error_nobufs; nlh = mnl_nlmsg_put_header(buf); nlh->nlmsg_type = 0; nlh->nlmsg_flags = 0; nlh->nlmsg_seq = 0; tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm)); -- tcm->tcm_info = TC_H_MAKE((attr->priority + 1) << 16, RTE_BE16(ETH_P_ALL)); break; case PATTERN: if (!mnl_attr_put_strz_check(buf, size, TCA_KIND, "flower")) goto error_nobufs; na_flower = mnl_attr_nest_start_check(buf, size, TCA_OPTIONS); if (!na_flower) goto error_nobufs; if (!mnl_attr_put_u32_check(buf, size, TCA_FLOWER_FLAGS, TCA_CLS_FLAGS_SKIP_SW)) goto error_nobufs; break; case ITEM_VOID: if (item->type != RTE_FLOW_ITEM_TYPE_VOID) goto trans; ++item; -- goto trans; mask.port_id = mlx5_nl_flow_item_mask (item, &rte_flow_item_port_id_mask, &mlx5_nl_flow_mask_supported.port_id, &mlx5_nl_flow_mask_empty.port_id, sizeof(mlx5_nl_flow_mask_supported.port_id), error); if (!mask.port_id) return -rte_errno; if (mask.port_id == &mlx5_nl_flow_mask_empty.port_id) { in_port_id_set = 1; ++item; break; } spec.port_id = item->spec; if (mask.port_id->id && mask.port_id->id != 0xffffffff) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask.port_id, "no support for partial mask on" " \"id\" field"); if (!mask.port_id->id) i = 0; else for (i = 0; ptoi[i].ifindex; ++i) if (ptoi[i].port_id == spec.port_id->id) break; if (!ptoi[i].ifindex) return rte_flow_error_set (error, ENODEV, RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec.port_id, "missing data to convert port ID to ifindex"); tcm = mnl_nlmsg_get_payload(buf); if (in_port_id_set && ptoi[i].ifindex != (unsigned int)tcm->tcm_ifindex) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec.port_id, "cannot match traffic for several port IDs" " through a single flow rule"); tcm->tcm_ifindex = ptoi[i].ifindex; in_port_id_set = 1; -- goto trans; mask.eth = mlx5_nl_flow_item_mask (item, &rte_flow_item_eth_mask, &mlx5_nl_flow_mask_supported.eth, &mlx5_nl_flow_mask_empty.eth, sizeof(mlx5_nl_flow_mask_supported.eth), error); if (!mask.eth) return -rte_errno; if (mask.eth == &mlx5_nl_flow_mask_empty.eth) { ++item; break; } spec.eth = item->spec; if (mask.eth->type && mask.eth->type != RTE_BE16(0xffff)) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask.eth, "no support for partial mask on" " \"type\" field"); if (mask.eth->type) { if (!mnl_attr_put_u16_check(buf, size, TCA_FLOWER_KEY_ETH_TYPE, spec.eth->type)) goto error_nobufs; eth_type_set = 1; } if ((!is_zero_ether_addr(&mask.eth->dst) && (!mnl_attr_put_check(buf, size, TCA_FLOWER_KEY_ETH_DST, -- spec.eth->src.addr_bytes) || !mnl_attr_put_check(buf, size, TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN, mask.eth->src.addr_bytes)))) goto error_nobufs; ++item; break; case ITEM_VLAN: if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) goto trans; mask.vlan = mlx5_nl_flow_item_mask (item, &rte_flow_item_vlan_mask, &mlx5_nl_flow_mask_supported.vlan, &mlx5_nl_flow_mask_empty.vlan, sizeof(mlx5_nl_flow_mask_supported.vlan), error); if (!mask.vlan) return -rte_errno; if (!eth_type_set && !mnl_attr_put_u16_check(buf, size, TCA_FLOWER_KEY_ETH_TYPE, RTE_BE16(ETH_P_8021Q))) goto error_nobufs; eth_type_set = 1; vlan_present = 1; if (mask.vlan == &mlx5_nl_flow_mask_empty.vlan) { ++item; break; -- (mask.vlan->tci & RTE_BE16(0xe000)) != RTE_BE16(0xe000)) || (mask.vlan->tci & RTE_BE16(0x0fff) && (mask.vlan->tci & RTE_BE16(0x0fff)) != RTE_BE16(0x0fff)) || (mask.vlan->inner_type && mask.vlan->inner_type != RTE_BE16(0xffff))) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask.vlan, "no support for partial masks on" " \"tci\" (PCP and VID parts) and" " \"inner_type\" fields"); if (mask.vlan->inner_type) { if (!mnl_attr_put_u16_check (buf, size, TCA_FLOWER_KEY_VLAN_ETH_TYPE, spec.vlan->inner_type)) goto error_nobufs; vlan_eth_type_set = 1; } if ((mask.vlan->tci & RTE_BE16(0xe000) && !mnl_attr_put_u8_check (buf, size, TCA_FLOWER_KEY_VLAN_PRIO, (rte_be_to_cpu_16(spec.vlan->tci) >> 13) & 0x7)) || (mask.vlan->tci & RTE_BE16(0x0fff) && !mnl_attr_put_u16_check (buf, size, TCA_FLOWER_KEY_VLAN_ID, rte_be_to_cpu_16(spec.vlan->tci & RTE_BE16(0x0fff))))) goto error_nobufs; ++item; break; case ITEM_IPV4: if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) goto trans; mask.ipv4 = mlx5_nl_flow_item_mask (item, &rte_flow_item_ipv4_mask, &mlx5_nl_flow_mask_supported.ipv4, &mlx5_nl_flow_mask_empty.ipv4, sizeof(mlx5_nl_flow_mask_supported.ipv4), error); if (!mask.ipv4) return -rte_errno; if ((!eth_type_set || !vlan_eth_type_set) && !mnl_attr_put_u16_check(buf, size, vlan_present ? TCA_FLOWER_KEY_VLAN_ETH_TYPE : TCA_FLOWER_KEY_ETH_TYPE, RTE_BE16(ETH_P_IP))) goto error_nobufs; eth_type_set = 1; vlan_eth_type_set = 1; if (mask.ipv4 == &mlx5_nl_flow_mask_empty.ipv4) { ++item; break; } spec.ipv4 = item->spec; if (mask.ipv4->hdr.next_proto_id && mask.ipv4->hdr.next_proto_id != 0xff) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask.ipv4, "no support for partial mask on" " \"hdr.next_proto_id\" field"); if (mask.ipv4->hdr.next_proto_id) { if (!mnl_attr_put_u8_check (buf, size, TCA_FLOWER_KEY_IP_PROTO, spec.ipv4->hdr.next_proto_id)) goto error_nobufs; ip_proto_set = 1; } if ((mask.ipv4->hdr.src_addr && (!mnl_attr_put_u32_check(buf, size, TCA_FLOWER_KEY_IPV4_SRC, -- TCA_FLOWER_KEY_IPV4_DST, spec.ipv4->hdr.dst_addr) || !mnl_attr_put_u32_check(buf, size, TCA_FLOWER_KEY_IPV4_DST_MASK, mask.ipv4->hdr.dst_addr)))) goto error_nobufs; ++item; break; case ITEM_IPV6: if (item->type != RTE_FLOW_ITEM_TYPE_IPV6) goto trans; mask.ipv6 = mlx5_nl_flow_item_mask (item, &rte_flow_item_ipv6_mask, &mlx5_nl_flow_mask_supported.ipv6, &mlx5_nl_flow_mask_empty.ipv6, sizeof(mlx5_nl_flow_mask_supported.ipv6), error); if (!mask.ipv6) return -rte_errno; if ((!eth_type_set || !vlan_eth_type_set) && !mnl_attr_put_u16_check(buf, size, vlan_present ? TCA_FLOWER_KEY_VLAN_ETH_TYPE : TCA_FLOWER_KEY_ETH_TYPE, RTE_BE16(ETH_P_IPV6))) goto error_nobufs; eth_type_set = 1; vlan_eth_type_set = 1; if (mask.ipv6 == &mlx5_nl_flow_mask_empty.ipv6) { ++item; break; } spec.ipv6 = item->spec; if (mask.ipv6->hdr.proto && mask.ipv6->hdr.proto != 0xff) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask.ipv6, "no support for partial mask on" " \"hdr.proto\" field"); if (mask.ipv6->hdr.proto) { if (!mnl_attr_put_u8_check (buf, size, TCA_FLOWER_KEY_IP_PROTO, spec.ipv6->hdr.proto)) goto error_nobufs; ip_proto_set = 1; } if ((!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.src_addr) && (!mnl_attr_put_check(buf, size, TCA_FLOWER_KEY_IPV6_SRC, -- spec.ipv6->hdr.dst_addr) || !mnl_attr_put_check(buf, size, TCA_FLOWER_KEY_IPV6_DST_MASK, sizeof(mask.ipv6->hdr.dst_addr), mask.ipv6->hdr.dst_addr)))) goto error_nobufs; ++item; break; case ITEM_TCP: if (item->type != RTE_FLOW_ITEM_TYPE_TCP) goto trans; mask.tcp = mlx5_nl_flow_item_mask (item, &rte_flow_item_tcp_mask, &mlx5_nl_flow_mask_supported.tcp, &mlx5_nl_flow_mask_empty.tcp, sizeof(mlx5_nl_flow_mask_supported.tcp), error); if (!mask.tcp) return -rte_errno; if (!ip_proto_set && !mnl_attr_put_u8_check(buf, size, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP)) goto error_nobufs; if (mask.tcp == &mlx5_nl_flow_mask_empty.tcp) { ++item; break; } spec.tcp = item->spec; -- TCA_FLOWER_KEY_TCP_DST, spec.tcp->hdr.dst_port) || !mnl_attr_put_u16_check(buf, size, TCA_FLOWER_KEY_TCP_DST_MASK, mask.tcp->hdr.dst_port)))) goto error_nobufs; ++item; break; case ITEM_UDP: if (item->type != RTE_FLOW_ITEM_TYPE_UDP) goto trans; mask.udp = mlx5_nl_flow_item_mask (item, &rte_flow_item_udp_mask, &mlx5_nl_flow_mask_supported.udp, &mlx5_nl_flow_mask_empty.udp, sizeof(mlx5_nl_flow_mask_supported.udp), error); if (!mask.udp) return -rte_errno; if (!ip_proto_set && !mnl_attr_put_u8_check(buf, size, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP)) goto error_nobufs; if (mask.udp == &mlx5_nl_flow_mask_empty.udp) { ++item; break; } spec.udp = item->spec; -- TCA_FLOWER_KEY_UDP_DST, spec.udp->hdr.dst_port) || !mnl_attr_put_u16_check(buf, size, TCA_FLOWER_KEY_UDP_DST_MASK, mask.udp->hdr.dst_port)))) goto error_nobufs; ++item; break; case ACTIONS: if (item->type != RTE_FLOW_ITEM_TYPE_END) goto trans; assert(na_flower); assert(!na_flower_act); na_flower_act = mnl_attr_nest_start_check(buf, size, TCA_FLOWER_ACT); if (!na_flower_act) goto error_nobufs; act_index_cur = 1; break; case ACTION_VOID: if (action->type != RTE_FLOW_ACTION_TYPE_VOID) goto trans; -- else for (i = 0; ptoi[i].ifindex; ++i) if (ptoi[i].port_id == conf.port_id->id) break; if (!ptoi[i].ifindex) return rte_flow_error_set (error, ENODEV, RTE_FLOW_ERROR_TYPE_ACTION_CONF, conf.port_id, "missing data to convert port ID to ifindex"); act_index = mnl_attr_nest_start_check(buf, size, act_index_cur++); if (!act_index || !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "mirred")) goto error_nobufs; act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS); if (!act) goto error_nobufs; if (!mnl_attr_put_check(buf, size, TCA_MIRRED_PARMS, sizeof(struct tc_mirred), &(struct tc_mirred){ .action = TC_ACT_STOLEN, .eaction = TCA_EGRESS_REDIR, .ifindex = ptoi[i].ifindex, })) goto error_nobufs; mnl_attr_nest_end(buf, act); mnl_attr_nest_end(buf, act_index); ++action; break; case ACTION_DROP: -- goto trans; act_index = mnl_attr_nest_start_check(buf, size, act_index_cur++); if (!act_index || !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "gact")) goto error_nobufs; act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS); if (!act) goto error_nobufs; if (!mnl_attr_put_check(buf, size, TCA_GACT_PARMS, sizeof(struct tc_gact), &(struct tc_gact){ .action = TC_ACT_SHOT, })) goto error_nobufs; mnl_attr_nest_end(buf, act); mnl_attr_nest_end(buf, act_index); ++action; break; case ACTION_OF_POP_VLAN: -- action_of_vlan: act_index = mnl_attr_nest_start_check(buf, size, act_index_cur++); if (!act_index || !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "vlan")) goto error_nobufs; act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS); if (!act) goto error_nobufs; if (!mnl_attr_put_check(buf, size, TCA_VLAN_PARMS, sizeof(struct tc_vlan), &(struct tc_vlan){ .action = TC_ACT_PIPE, .v_action = i, })) goto error_nobufs; if (i == TCA_VLAN_ACT_POP) { mnl_attr_nest_end(buf, act); mnl_attr_nest_end(buf, act_index); ++action; break; } if (i == TCA_VLAN_ACT_PUSH && !mnl_attr_put_u16_check(buf, size, TCA_VLAN_PUSH_VLAN_PROTOCOL, conf.of_push_vlan->ethertype)) goto error_nobufs; na_vlan_id = mnl_nlmsg_get_payload_tail(buf); if (!mnl_attr_put_u16_check(buf, size, TCA_VLAN_PAD, 0)) goto error_nobufs; na_vlan_priority = mnl_nlmsg_get_payload_tail(buf); if (!mnl_attr_put_u8_check(buf, size, TCA_VLAN_PAD, 0)) goto error_nobufs; mnl_attr_nest_end(buf, act); mnl_attr_nest_end(buf, act_index); if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) { override_na_vlan_id: na_vlan_id->nla_type = TCA_VLAN_PUSH_VLAN_ID; -- } back = trans; trans = mlx5_nl_flow_trans[trans[n - 1]]; n = 0; goto trans; error_nobufs: if (buf != buf_tmp) { buf = buf_tmp; size = sizeof(buf_tmp); goto init; } return rte_flow_error_set (error, ENOBUFS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "generated TC message is too large"); } /** * Brand rtnetlink buffer with unique handle. -- * * @param nl * Libmnl socket to use. * @param buf * Flow rule buffer previously initialized by mlx5_nl_flow_transpose(). * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_nl_flow_create(struct mnl_socket *nl, void *buf, struct rte_flow_error *error) { struct nlmsghdr *nlh = buf; nlh->nlmsg_type = RTM_NEWTFILTER; nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL; if (!mlx5_nl_flow_nl_ack(nl, nlh)) return 0; return rte_flow_error_set (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "netlink: failed to create TC flow rule"); } /** * Destroy a Netlink flow rule. * * @param nl * Libmnl socket to use. * @param buf * Flow rule buffer previously initialized by mlx5_nl_flow_transpose(). * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_nl_flow_destroy(struct mnl_socket *nl, void *buf, struct rte_flow_error *error) { struct nlmsghdr *nlh = buf; nlh->nlmsg_type = RTM_DELTFILTER; nlh->nlmsg_flags = NLM_F_REQUEST; if (!mlx5_nl_flow_nl_ack(nl, nlh)) return 0; return rte_flow_error_set (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "netlink: failed to destroy TC flow rule"); } /** * Initialize ingress qdisc of a given network interface. * * @param nl * Libmnl socket of the @p NETLINK_ROUTE kind. * @param ifindex * Index of network interface to initialize. * @param[out] error * Perform verbose error reporting if not NULL. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_nl_flow_init(struct mnl_socket *nl, unsigned int ifindex, struct rte_flow_error *error) { struct nlmsghdr *nlh; struct tcmsg *tcm; alignas(struct nlmsghdr) uint8_t buf[mnl_nlmsg_size(sizeof(*tcm) + 128)]; -- tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm)); tcm->tcm_family = AF_UNSPEC; tcm->tcm_ifindex = ifindex; tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0); tcm->tcm_parent = TC_H_INGRESS; /* Ignore errors when qdisc is already absent. */ if (mlx5_nl_flow_nl_ack(nl, nlh) && rte_errno != EINVAL && rte_errno != ENOENT) return rte_flow_error_set (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "netlink: failed to remove ingress qdisc"); /* Create fresh ingress qdisc. */ nlh = mnl_nlmsg_put_header(buf); nlh->nlmsg_type = RTM_NEWQDISC; nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL; tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm)); -- tcm->tcm_ifindex = ifindex; tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0); tcm->tcm_parent = TC_H_INGRESS; mnl_attr_put_strz_check(nlh, sizeof(buf), TCA_KIND, "ingress"); if (mlx5_nl_flow_nl_ack(nl, nlh)) return rte_flow_error_set (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "netlink: failed to create ingress qdisc"); return 0; } /** * Create and configure a libmnl socket for Netlink flow rules. -- mlx5_nl_flow_socket_destroy(struct mnl_socket *nl) { mnl_socket_close(nl); } error: patch failed: drivers/net/mlx5/mlx5_nl_flow.c:1 error: drivers/net/mlx5/mlx5_nl_flow.c: patch does not apply *Repo: dpdk-next-virtio Checking patch drivers/net/mlx5/Makefile... error: while searching for: SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_verbs.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl_flow.c ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y) INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE) error: patch failed: drivers/net/mlx5/Makefile:35 error: drivers/net/mlx5/Makefile: patch does not apply Checking patch drivers/net/mlx5/mlx5.c... Hunk #1 succeeded at 282 (offset -4 lines). Hunk #2 succeeded at 1128 (offset -7 lines). Hunk #3 succeeded at 1180 (offset -7 lines). Checking patch drivers/net/mlx5/mlx5.h... DPDK STV team