From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id AE7C51BA2B for ; Tue, 10 Apr 2018 15:34:31 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from xuemingl@mellanox.com) with ESMTPS (AES256-SHA encrypted); 10 Apr 2018 16:35:36 +0300 Received: from dev-r630-06.mtbc.labs.mlnx (dev-r630-06.mtbc.labs.mlnx [10.12.205.180]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id w3ADYQrG023171; Tue, 10 Apr 2018 16:34:27 +0300 Received: from dev-r630-06.mtbc.labs.mlnx (localhost [127.0.0.1]) by dev-r630-06.mtbc.labs.mlnx (8.14.7/8.14.7) with ESMTP id w3ADYQrj189968; Tue, 10 Apr 2018 21:34:26 +0800 Received: (from xuemingl@localhost) by dev-r630-06.mtbc.labs.mlnx (8.14.7/8.14.7/Submit) id w3ADYQF5189967; Tue, 10 Apr 2018 21:34:26 +0800 From: Xueming Li To: Nelio Laranjeiro , Shahaf Shuler Cc: Xueming Li , dev@dpdk.org Date: Tue, 10 Apr 2018 21:34:01 +0800 Message-Id: <20180410133415.189905-2-xuemingl@mellanox.com> X-Mailer: git-send-email 2.13.3 In-Reply-To: <20180410133415.189905-1-xuemingl@mellanox.com> References: <20180410133415.189905-1-xuemingl@mellanox.com> Subject: [dpdk-dev] [PATCH v2 01/15] net/mlx5: support 16 hardware priorities X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 10 Apr 2018 13:34:32 -0000 Adjust flow priority mapping to adapt new hardware 16 verb flow priorites support: 0-3: RTE FLOW tunnel rule 4-7: RTE FLOW non-tunnel rule 8-15: PMD control flow Signed-off-by: Xueming Li --- drivers/net/mlx5/mlx5.c | 10 ++++ drivers/net/mlx5/mlx5.h | 8 +++ drivers/net/mlx5/mlx5_flow.c | 107 ++++++++++++++++++++++++++++++---------- drivers/net/mlx5/mlx5_trigger.c | 8 --- 4 files changed, 100 insertions(+), 33 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index cfab55897..a1f2799e5 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -197,6 +197,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) priv->txqs_n = 0; priv->txqs = NULL; } + mlx5_flow_delete_drop_queue(dev); if (priv->pd != NULL) { assert(priv->ctx != NULL); claim_zero(mlx5_glue->dealloc_pd(priv->pd)); @@ -993,6 +994,15 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, mlx5_set_link_up(eth_dev); /* Store device configuration on private structure. */ priv->config = config; + /* Create drop queue. */ + err = mlx5_flow_create_drop_queue(eth_dev); + if (err) { + DRV_LOG(ERR, "port %u drop queue allocation failed: %s", + eth_dev->data->port_id, strerror(rte_errno)); + goto port_error; + } + /* Supported flow priority number detection. */ + mlx5_flow_priorities_detect(eth_dev); continue; port_error: if (priv) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 63b24e6bb..708272f6d 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -89,6 +89,8 @@ struct mlx5_dev_config { unsigned int rx_vec_en:1; /* Rx vector is enabled. */ unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */ unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */ + unsigned int flow_priority_shift; /* Non-tunnel flow priority shift. */ + unsigned int control_flow_priority; /* Control flow priority. */ unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */ unsigned int ind_table_max_size; /* Maximum indirection table size. */ int txq_inline; /* Maximum packet size for inlining. */ @@ -105,6 +107,11 @@ enum mlx5_verbs_alloc_type { MLX5_VERBS_ALLOC_TYPE_RX_QUEUE, }; +/* 8 Verbs priorities per flow. */ +#define MLX5_VERBS_FLOW_PRIO_8 8 +/* 4 Verbs priorities per flow. */ +#define MLX5_VERBS_FLOW_PRIO_4 4 + /** * Verbs allocator needs a context to know in the callback which kind of * resources it is allocating. @@ -253,6 +260,7 @@ int mlx5_traffic_restart(struct rte_eth_dev *dev); /* mlx5_flow.c */ +void mlx5_flow_priorities_detect(struct rte_eth_dev *dev); int mlx5_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 288610620..394760418 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -32,9 +32,6 @@ #include "mlx5_prm.h" #include "mlx5_glue.h" -/* Define minimal priority for control plane flows. */ -#define MLX5_CTRL_FLOW_PRIORITY 4 - /* Internet Protocol versions. */ #define MLX5_IPV4 4 #define MLX5_IPV6 6 @@ -129,7 +126,7 @@ const struct hash_rxq_init hash_rxq_init[] = { IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP), .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP, - .flow_priority = 1, + .flow_priority = 0, .ip_version = MLX5_IPV4, }, [HASH_RXQ_UDPV4] = { @@ -138,7 +135,7 @@ const struct hash_rxq_init hash_rxq_init[] = { IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP), .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP, - .flow_priority = 1, + .flow_priority = 0, .ip_version = MLX5_IPV4, }, [HASH_RXQ_IPV4] = { @@ -146,7 +143,7 @@ const struct hash_rxq_init hash_rxq_init[] = { IBV_RX_HASH_DST_IPV4), .dpdk_rss_hf = (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4), - .flow_priority = 2, + .flow_priority = 1, .ip_version = MLX5_IPV4, }, [HASH_RXQ_TCPV6] = { @@ -155,7 +152,7 @@ const struct hash_rxq_init hash_rxq_init[] = { IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP), .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP, - .flow_priority = 1, + .flow_priority = 0, .ip_version = MLX5_IPV6, }, [HASH_RXQ_UDPV6] = { @@ -164,7 +161,7 @@ const struct hash_rxq_init hash_rxq_init[] = { IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP), .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP, - .flow_priority = 1, + .flow_priority = 0, .ip_version = MLX5_IPV6, }, [HASH_RXQ_IPV6] = { @@ -172,13 +169,13 @@ const struct hash_rxq_init hash_rxq_init[] = { IBV_RX_HASH_DST_IPV6), .dpdk_rss_hf = (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6), - .flow_priority = 2, + .flow_priority = 1, .ip_version = MLX5_IPV6, }, [HASH_RXQ_ETH] = { .hash_fields = 0, .dpdk_rss_hf = 0, - .flow_priority = 3, + .flow_priority = 2, }, }; @@ -536,6 +533,8 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, /** * Extract attribute to the parser. * + * @param dev + * Pointer to Ethernet device. * @param[in] attr * Flow rule attributes. * @param[out] error @@ -545,9 +544,12 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_flow_convert_attributes(const struct rte_flow_attr *attr, +mlx5_flow_convert_attributes(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, struct rte_flow_error *error) { + struct priv *priv = dev->data->dev_private; + if (attr->group) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, @@ -555,7 +557,7 @@ mlx5_flow_convert_attributes(const struct rte_flow_attr *attr, "groups are not supported"); return -rte_errno; } - if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) { + if (attr->priority > priv->config.control_flow_priority) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL, @@ -900,30 +902,38 @@ mlx5_flow_convert_allocate(unsigned int size, struct rte_flow_error *error) * Make inner packet matching with an higher priority from the non Inner * matching. * + * @param dev + * Pointer to Ethernet device. * @param[in, out] parser * Internal parser structure. * @param attr * User flow attribute. */ static void -mlx5_flow_update_priority(struct mlx5_flow_parse *parser, +mlx5_flow_update_priority(struct rte_eth_dev *dev, + struct mlx5_flow_parse *parser, const struct rte_flow_attr *attr) { + struct priv *priv = dev->data->dev_private; unsigned int i; + uint16_t priority; + if (priv->config.flow_priority_shift == 1) + priority = attr->priority * MLX5_VERBS_FLOW_PRIO_4; + else + priority = attr->priority * MLX5_VERBS_FLOW_PRIO_8; + if (!parser->inner) + priority += priv->config.flow_priority_shift; if (parser->drop) { - parser->queue[HASH_RXQ_ETH].ibv_attr->priority = - attr->priority + - hash_rxq_init[HASH_RXQ_ETH].flow_priority; + parser->queue[HASH_RXQ_ETH].ibv_attr->priority = priority + + hash_rxq_init[HASH_RXQ_ETH].flow_priority; return; } for (i = 0; i != hash_rxq_init_n; ++i) { - if (parser->queue[i].ibv_attr) { - parser->queue[i].ibv_attr->priority = - attr->priority + - hash_rxq_init[i].flow_priority - - (parser->inner ? 1 : 0); - } + if (!parser->queue[i].ibv_attr) + continue; + parser->queue[i].ibv_attr->priority = priority + + hash_rxq_init[i].flow_priority; } } @@ -1087,7 +1097,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev, .layer = HASH_RXQ_ETH, .mark_id = MLX5_FLOW_MARK_DEFAULT, }; - ret = mlx5_flow_convert_attributes(attr, error); + ret = mlx5_flow_convert_attributes(dev, attr, error); if (ret) return ret; ret = mlx5_flow_convert_actions(dev, actions, error, parser); @@ -1158,7 +1168,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev, */ if (!parser->drop) mlx5_flow_convert_finalise(parser); - mlx5_flow_update_priority(parser, attr); + mlx5_flow_update_priority(dev, parser, attr); exit_free: /* Only verification is expected, all resources should be released. */ if (!parser->create) { @@ -2450,7 +2460,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct priv *priv = dev->data->dev_private; const struct rte_flow_attr attr = { .ingress = 1, - .priority = MLX5_CTRL_FLOW_PRIORITY, + .priority = priv->config.control_flow_priority, }; struct rte_flow_item items[] = { { @@ -3161,3 +3171,50 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, } return 0; } + +/** + * Detect number of Verbs flow priorities supported. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_flow_priorities_detect(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + uint32_t verb_priorities = MLX5_VERBS_FLOW_PRIO_8 * 2; + struct { + struct ibv_flow_attr attr; + struct ibv_flow_spec_eth eth; + struct ibv_flow_spec_action_drop drop; + } flow_attr = { + .attr = { + .num_of_specs = 2, + .priority = verb_priorities - 1, + }, + .eth = { + .type = IBV_FLOW_SPEC_ETH, + .size = sizeof(struct ibv_flow_spec_eth), + }, + .drop = { + .size = sizeof(struct ibv_flow_spec_action_drop), + .type = IBV_FLOW_SPEC_ACTION_DROP, + }, + }; + struct ibv_flow *flow; + + if (priv->config.control_flow_priority) + return; + flow = mlx5_glue->create_flow(priv->flow_drop_queue->qp, + &flow_attr.attr); + if (flow) { + priv->config.flow_priority_shift = MLX5_VERBS_FLOW_PRIO_8 / 2; + claim_zero(mlx5_glue->destroy_flow(flow)); + } else { + priv->config.flow_priority_shift = 1; + verb_priorities = verb_priorities / 2; + } + priv->config.control_flow_priority = 1; + DRV_LOG(INFO, "port %u Verbs flow priorities: %d", + dev->data->port_id, verb_priorities); +} diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index 6bb4ffb14..d80a2e688 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -148,12 +148,6 @@ mlx5_dev_start(struct rte_eth_dev *dev) int ret; dev->data->dev_started = 1; - ret = mlx5_flow_create_drop_queue(dev); - if (ret) { - DRV_LOG(ERR, "port %u drop queue allocation failed: %s", - dev->data->port_id, strerror(rte_errno)); - goto error; - } DRV_LOG(DEBUG, "port %u allocating and configuring hash Rx queues", dev->data->port_id); rte_mempool_walk(mlx5_mp2mr_iter, priv); @@ -202,7 +196,6 @@ mlx5_dev_start(struct rte_eth_dev *dev) mlx5_traffic_disable(dev); mlx5_txq_stop(dev); mlx5_rxq_stop(dev); - mlx5_flow_delete_drop_queue(dev); rte_errno = ret; /* Restore rte_errno. */ return -rte_errno; } @@ -237,7 +230,6 @@ mlx5_dev_stop(struct rte_eth_dev *dev) mlx5_rxq_stop(dev); for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) mlx5_mr_release(mr); - mlx5_flow_delete_drop_queue(dev); } /** -- 2.13.3