From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wm0-f47.google.com (mail-wm0-f47.google.com [74.125.82.47]) by dpdk.org (Postfix) with ESMTP id 0CBB89253 for ; Fri, 30 Oct 2015 19:56:01 +0100 (CET) Received: by wmff134 with SMTP id f134so18769999wmf.0 for ; Fri, 30 Oct 2015 11:56:01 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=6wind_com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=DHho7DTtig6e25Yog91JJExjDkrHek/SPRDUiAF7qKk=; b=mhmRKkY71wbfBeRSJISToQgwHNkWDmjiweGjZG22zFD0UUC60waMChcbVYl52ByUlk aFNf8YxQugK15G1r6vSYSk/Ju4Av20XzahHwic37KY4/FMPP4BkbG8eBgRv2rX6uH5sF 8bdR4a9Dabqt8drPQQLoENXI0s4xQNDzLP7Q6R3TyShIcVWRM3UA9fSXMyqCxV14vtEI Qk4kE1ueTwqw+NOm/AyPB2J+gc/LerOQD0wDa8n8r2zOrFK+yR2afOWq12l+oRbSGF2y qH2XmQvu+EKjDBWx0k/dmkKQDxtFd2Q/7RnCMZGsnMuKFybdk12XnujoD7M7gsLN/GaB AOkA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=DHho7DTtig6e25Yog91JJExjDkrHek/SPRDUiAF7qKk=; b=lTR/oZ0bl4qB7ufNnHuZnzOU7DmFINkGtAgEAparU1WQ4OLKpVzMxjpxLgL0yMna/I OVbv3bxJFWp7jJhtyrtqQe8pMnCZNhzDx/CiZk0yktu5ksjX65YcE5kIWc/Z9k/Fuk7G yUDH1DTG8Pzb60GxGnigPvVZkeopuuxO9nA2SoPnQBVye/79FI8MOofGAFihtMCBzVlK r35ZELYLmFWWtEk1W0lH8MnRSqki432YyVk2G/FMusvM/iaZivj0bAClNpsznHHM8Vdj 2w/ngCygMbQiGDeafFVotnfRsHvwzSSFjGBzPy7iz2Bn3IIMlFa4ZBZcbQVDXchDYhnK 2LMw== X-Gm-Message-State: ALoCoQm64Iv8n4+lXiIFjkLjzIBLL0JI/7vN9oGAXY+2h5eTbGMI6GXx6An90IeLDgiDtMdcsjhC X-Received: by 10.28.137.211 with SMTP id l202mr5315288wmd.90.1446231360920; Fri, 30 Oct 2015 11:56:00 -0700 (PDT) Received: from 6wind.com (guy78-3-82-239-227-177.fbx.proxad.net. [82.239.227.177]) by smtp.gmail.com with ESMTPSA id gl4sm8369510wjd.49.2015.10.30.11.55.59 (version=TLSv1.2 cipher=RC4-SHA bits=128/128); Fri, 30 Oct 2015 11:56:00 -0700 (PDT) From: Adrien Mazarguil To: dev@dpdk.org Date: Fri, 30 Oct 2015 19:55:09 +0100 Message-Id: <1446231319-8185-7-git-send-email-adrien.mazarguil@6wind.com> X-Mailer: git-send-email 2.1.0 In-Reply-To: <1446231319-8185-1-git-send-email-adrien.mazarguil@6wind.com> References: <1444067692-29645-1-git-send-email-adrien.mazarguil@6wind.com> <1446231319-8185-1-git-send-email-adrien.mazarguil@6wind.com> Subject: [dpdk-dev] [PATCH v2 06/16] mlx5: define specific flow steering rules for each hash RX QP X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 30 Oct 2015 18:56:01 -0000 From: Olga Shern All hash RX QPs currently use the same flow steering rule (L2 MAC filtering) regardless of their type (TCP, UDP, IPv4, IPv6), which prevents them from being dispatched properly. This is fixed by adding flow information to the hash RX queue initialization data and generating specific flow steering rules for each of them. Signed-off-by: Olga Shern Signed-off-by: Adrien Mazarguil Signed-off-by: Nelio Laranjeiro --- drivers/net/mlx5/mlx5_mac.c | 19 ++++------- drivers/net/mlx5/mlx5_rxq.c | 77 ++++++++++++++++++++++++++++++++++++++++++++ drivers/net/mlx5/mlx5_rxtx.h | 21 ++++++++++++ 3 files changed, 105 insertions(+), 12 deletions(-) diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index b580494..d3ab5b9 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -242,12 +242,9 @@ hash_rxq_add_mac_flow(struct hash_rxq *hash_rxq, unsigned int mac_index, const uint8_t (*mac)[ETHER_ADDR_LEN] = (const uint8_t (*)[ETHER_ADDR_LEN]) priv->mac[mac_index].addr_bytes; - struct __attribute__((packed)) { - struct ibv_flow_attr attr; - struct ibv_flow_spec_eth spec; - } data; - struct ibv_flow_attr *attr = &data.attr; - struct ibv_flow_spec_eth *spec = &data.spec; + FLOW_ATTR_SPEC_ETH(data, hash_rxq_flow_attr(hash_rxq, NULL, 0)); + struct ibv_flow_attr *attr = &data->attr; + struct ibv_flow_spec_eth *spec = &data->spec; unsigned int vlan_enabled = !!priv->vlan_filter_n; unsigned int vlan_id = priv->vlan_filter[vlan_index]; @@ -260,12 +257,10 @@ hash_rxq_add_mac_flow(struct hash_rxq *hash_rxq, unsigned int mac_index, * This layout is expected by libibverbs. */ assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec); - *attr = (struct ibv_flow_attr){ - .type = IBV_FLOW_ATTR_NORMAL, - .num_of_specs = 1, - .port = priv->port, - .flags = 0 - }; + hash_rxq_flow_attr(hash_rxq, attr, sizeof(data)); + /* The first specification must be Ethernet. */ + assert(spec->type == IBV_FLOW_SPEC_ETH); + assert(spec->size == sizeof(*spec)); *spec = (struct ibv_flow_spec_eth){ .type = IBV_FLOW_SPEC_ETH, .size = sizeof(*spec), diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 41f8811..1e15ff9 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -71,19 +71,43 @@ static const struct hash_rxq_init hash_rxq_init[] = { IBV_EXP_RX_HASH_DST_IPV4 | IBV_EXP_RX_HASH_SRC_PORT_TCP | IBV_EXP_RX_HASH_DST_PORT_TCP), + .flow_priority = 0, + .flow_spec.tcp_udp = { + .type = IBV_FLOW_SPEC_TCP, + .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), + }, + .underlayer = &hash_rxq_init[HASH_RXQ_IPV4], }, [HASH_RXQ_UDPV4] = { .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 | IBV_EXP_RX_HASH_DST_IPV4 | IBV_EXP_RX_HASH_SRC_PORT_UDP | IBV_EXP_RX_HASH_DST_PORT_UDP), + .flow_priority = 0, + .flow_spec.tcp_udp = { + .type = IBV_FLOW_SPEC_UDP, + .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), + }, + .underlayer = &hash_rxq_init[HASH_RXQ_IPV4], }, [HASH_RXQ_IPV4] = { .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 | IBV_EXP_RX_HASH_DST_IPV4), + .flow_priority = 1, + .flow_spec.ipv4 = { + .type = IBV_FLOW_SPEC_IPV4, + .size = sizeof(hash_rxq_init[0].flow_spec.ipv4), + }, + .underlayer = &hash_rxq_init[HASH_RXQ_ETH], }, [HASH_RXQ_ETH] = { .hash_fields = 0, + .flow_priority = 2, + .flow_spec.eth = { + .type = IBV_FLOW_SPEC_ETH, + .size = sizeof(hash_rxq_init[0].flow_spec.eth), + }, + .underlayer = NULL, }, }; @@ -125,6 +149,59 @@ static uint8_t hash_rxq_default_key[] = { }; /** + * Populate flow steering rule for a given hash RX queue type using + * information from hash_rxq_init[]. Nothing is written to flow_attr when + * flow_attr_size is not large enough, but the required size is still returned. + * + * @param[in] hash_rxq + * Pointer to hash RX queue. + * @param[out] flow_attr + * Pointer to flow attribute structure to fill. Note that the allocated + * area must be larger and large enough to hold all flow specifications. + * @param flow_attr_size + * Entire size of flow_attr and trailing room for flow specifications. + * + * @return + * Total size of the flow attribute buffer. No errors are defined. + */ +size_t +hash_rxq_flow_attr(const struct hash_rxq *hash_rxq, + struct ibv_flow_attr *flow_attr, + size_t flow_attr_size) +{ + size_t offset = sizeof(*flow_attr); + enum hash_rxq_type type = hash_rxq->type; + const struct hash_rxq_init *init = &hash_rxq_init[type]; + + assert(hash_rxq->priv != NULL); + assert((size_t)type < RTE_DIM(hash_rxq_init)); + do { + offset += init->flow_spec.hdr.size; + init = init->underlayer; + } while (init != NULL); + if (offset > flow_attr_size) + return offset; + flow_attr_size = offset; + init = &hash_rxq_init[type]; + *flow_attr = (struct ibv_flow_attr){ + .type = IBV_FLOW_ATTR_NORMAL, + .priority = init->flow_priority, + .num_of_specs = 0, + .port = hash_rxq->priv->port, + .flags = 0, + }; + do { + offset -= init->flow_spec.hdr.size; + memcpy((void *)((uintptr_t)flow_attr + offset), + &init->flow_spec, + init->flow_spec.hdr.size); + ++flow_attr->num_of_specs; + init = init->underlayer; + } while (init != NULL); + return flow_attr_size; +} + +/** * Return nearest power of two above input value. * * @param v diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index f89d3ec..c31fa8e 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -34,6 +34,7 @@ #ifndef RTE_PMD_MLX5_RXTX_H_ #define RTE_PMD_MLX5_RXTX_H_ +#include #include /* Verbs header. */ @@ -126,9 +127,27 @@ enum hash_rxq_type { HASH_RXQ_ETH, }; +/* Flow structure with Ethernet specification. It is packed to prevent padding + * between attr and spec as this layout is expected by libibverbs. */ +struct flow_attr_spec_eth { + struct ibv_flow_attr attr; + struct ibv_flow_spec_eth spec; +} __attribute__((packed)); + +/* Define a struct flow_attr_spec_eth object as an array of at least + * "size" bytes. Room after the first index is normally used to store + * extra flow specifications. */ +#define FLOW_ATTR_SPEC_ETH(name, size) \ + struct flow_attr_spec_eth name \ + [((size) / sizeof(struct flow_attr_spec_eth)) + \ + !!((size) % sizeof(struct flow_attr_spec_eth))] + /* Initialization data for hash RX queue. */ struct hash_rxq_init { uint64_t hash_fields; /* Fields that participate in the hash. */ + unsigned int flow_priority; /* Flow priority to use. */ + struct ibv_flow_spec flow_spec; /* Flow specification template. */ + const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */ }; /* Initialization data for indirection table. */ @@ -193,6 +212,8 @@ struct txq { /* mlx5_rxq.c */ +size_t hash_rxq_flow_attr(const struct hash_rxq *, struct ibv_flow_attr *, + size_t); int priv_create_hash_rxqs(struct priv *); void priv_destroy_hash_rxqs(struct priv *); void rxq_cleanup(struct rxq *); -- 2.1.0