From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wm0-f44.google.com (mail-wm0-f44.google.com [74.125.82.44]) by dpdk.org (Postfix) with ESMTP id 6D322A0B6 for ; Wed, 2 Aug 2017 16:11:03 +0200 (CEST) Received: by mail-wm0-f44.google.com with SMTP id t138so41149098wmt.1 for ; Wed, 02 Aug 2017 07:11:03 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=6wind-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references :in-reply-to:references; bh=0twXuNAr6zwyh403XSV120L4hST2bza4FMvEWhn8DRo=; b=diiG+77DQ2YSYv1nuoBZE5ZgH2JXJT5J5QSc/BIt3tlZQDpdA3tbKUpcZjXDDHqmo9 PabHcqZpCjYVWLv++q54bcpUGP1H11iKObfCzEq+7aZ67cKdCQrZn3sa2FYHyS1vhcGq LRxnoWL3CTHWMHq0GHjL02BcHH09Z1lhhvDwRZROAEihzEx4W95wov2VVhkAHbSTBARc mmHDZkpM2B7g6OSlduObzBaJXFlBJcKVtinNTrG757RFk2OZ7oE7epHKt5pBJ9VaSs6l 5oZL+PJVmZkESrlRX6KPBr4Nb0yS1PPwAvdhqo9+Cyl7rjzeaiJEFXtnqRU2IBb8qkXu CY6w== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:in-reply-to:references; bh=0twXuNAr6zwyh403XSV120L4hST2bza4FMvEWhn8DRo=; b=Ivk9pjDjtO9gDC+OEmGNIwfgc+au9Lc/jAbiRGwXFZ2oD30aTOSc9RhHR5jV1Xzzw9 Z9eqEmQltASvOjEMKFz4lvexO00eDwoFxdjZEr4q/NtFhRosI/iidcP+Kyru9FH7G3md N8DSUUBbclqByUkqUClrgPIRL+OjlRuUzppHUlkBYN5hJ1IQqi8T/lXo2t+DugtsQyGn bGWNiypjsPDJ1QnfmURMHJ+TCEQ0DAp1h6dnj9U3Yx6MO9JBn567HbcnVVVHY+z867eP kgzUC4WcyRwGcW66gUcg4VZaJJis/Hp3P3gAhpCcpK2g9edA573dT0uxrstW2/r1HWZY n/Kw== X-Gm-Message-State: AIVw113Tnc7vIDnx417+9rVFkLTvCNnZjEdtsW68OHeAuKTzHsup5C/K 4O1gkYMwdTUNC4QMZtDzNg== X-Received: by 10.28.134.201 with SMTP id i192mr1786103wmd.124.1501683062156; Wed, 02 Aug 2017 07:11:02 -0700 (PDT) Received: from ping.dev.6wind.com (host.78.145.23.62.rev.coltfrance.com. [62.23.145.78]) by smtp.gmail.com with ESMTPSA id d53sm39449552wrd.81.2017.08.02.07.11.01 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Wed, 02 Aug 2017 07:11:01 -0700 (PDT) From: Nelio Laranjeiro To: dev@dpdk.org Cc: adrien.mazarguil@6wind.com Date: Wed, 2 Aug 2017 16:10:19 +0200 Message-Id: <1336626caaf2e333d895bf26dc065a7f4c97a3ca.1501681927.git.nelio.laranjeiro@6wind.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: References: In-Reply-To: References: Subject: [dpdk-dev] [PATCH v1 03/21] net/mlx5: prefix Rx queue control structures X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 02 Aug 2017 14:11:03 -0000 Prefix struct rxq_ctrl with mlx5. Signed-off-by: Nelio Laranjeiro --- drivers/net/mlx5/mlx5.c | 8 ++--- drivers/net/mlx5/mlx5.h | 4 +-- drivers/net/mlx5/mlx5_flow.c | 12 +++---- drivers/net/mlx5/mlx5_rxq.c | 61 +++++++++++++++++++----------------- drivers/net/mlx5/mlx5_rxtx.c | 14 ++++----- drivers/net/mlx5/mlx5_rxtx.h | 17 +++++----- drivers/net/mlx5/mlx5_rxtx_vec_sse.c | 21 +++++++------ drivers/net/mlx5/mlx5_stats.c | 2 +- drivers/net/mlx5/mlx5_vlan.c | 5 +-- 9 files changed, 75 insertions(+), 69 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index ef10a22..d2fa8b1 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -157,14 +157,14 @@ mlx5_dev_close(struct rte_eth_dev *dev) /* XXX race condition if mlx5_rx_burst() is still running. */ usleep(1000); for (i = 0; (i != priv->rxqs_n); ++i) { - struct rxq *rxq = (*priv->rxqs)[i]; - struct rxq_ctrl *rxq_ctrl; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl; if (rxq == NULL) continue; - rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); (*priv->rxqs)[i] = NULL; - rxq_cleanup(rxq_ctrl); + mlx5_rxq_cleanup(rxq_ctrl); rte_free(rxq_ctrl); } priv->rxqs_n = 0; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index c7194de..55cea6f 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -129,7 +129,7 @@ struct priv { /* RX/TX queues. */ unsigned int rxqs_n; /* RX queues array size. */ unsigned int txqs_n; /* TX queues array size. */ - struct rxq *(*rxqs)[]; /* RX queues. */ + struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */ struct txq *(*txqs)[]; /* TX queues. */ /* Indirection tables referencing all RX WQs. */ struct ibv_exp_rwq_ind_table *(*ind_tables)[]; @@ -287,6 +287,6 @@ int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *); int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *); int priv_flow_start(struct priv *); void priv_flow_stop(struct priv *); -int priv_flow_rxq_in_use(struct priv *, struct rxq *); +int priv_flow_rxq_in_use(struct priv *, struct mlx5_rxq_data *); #endif /* RTE_PMD_MLX5_H_ */ diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 545bc8f..77b85a6 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -99,7 +99,7 @@ struct rte_flow { uint32_t mark:1; /**< Set if the flow is marked. */ uint32_t drop:1; /**< Drop queue. */ uint64_t hash_fields; /**< Fields that participate in the hash. */ - struct rxq *rxqs[]; /**< Pointer to the queues array. */ + struct mlx5_rxq_data *rxqs[]; /**< Pointer to the queues array. */ }; /** Static initializer for items. */ @@ -1106,10 +1106,10 @@ priv_flow_create_action_queue(struct priv *priv, return NULL; } for (i = 0; i < flow->actions.queues_n; ++i) { - struct rxq_ctrl *rxq; + struct mlx5_rxq_ctrl *rxq; rxq = container_of((*priv->rxqs)[flow->actions.queues[i]], - struct rxq_ctrl, rxq); + struct mlx5_rxq_ctrl, rxq); wqs[i] = rxq->wq; rte_flow->rxqs[i] = &rxq->rxq; ++rte_flow->rxqs_n; @@ -1305,7 +1305,7 @@ priv_flow_destroy(struct priv *priv, claim_zero(ibv_exp_destroy_rwq_ind_table(flow->ind_table)); if (flow->mark) { struct rte_flow *tmp; - struct rxq *rxq; + struct mlx5_rxq_data *rxq; uint32_t mark_n = 0; uint32_t queue_n; @@ -1325,7 +1325,7 @@ priv_flow_destroy(struct priv *priv, for (tqueue_n = 0; tqueue_n < tmp->rxqs_n; ++tqueue_n) { - struct rxq *trxq; + struct mlx5_rxq_data *trxq; trxq = tmp->rxqs[tqueue_n]; if (rxq == trxq) @@ -1594,7 +1594,7 @@ priv_flow_start(struct priv *priv) * Nonzero if the queue is used by a flow. */ int -priv_flow_rxq_in_use(struct priv *priv, struct rxq *rxq) +priv_flow_rxq_in_use(struct priv *priv, struct mlx5_rxq_data *rxq) { struct rte_flow *flow; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index bbb914a..c09a554 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -374,10 +374,10 @@ priv_create_hash_rxqs(struct priv *priv) priv->reta_idx_n); } for (i = 0; (i != priv->reta_idx_n); ++i) { - struct rxq_ctrl *rxq_ctrl; + struct mlx5_rxq_ctrl *rxq_ctrl; rxq_ctrl = container_of((*priv->rxqs)[(*priv->reta_idx)[i]], - struct rxq_ctrl, rxq); + struct mlx5_rxq_ctrl, rxq); wqs[i] = rxq_ctrl->wq; } /* Get number of hash RX queues to configure. */ @@ -638,7 +638,7 @@ priv_rehash_flows(struct priv *priv) * 0 on success, errno value on failure. */ static int -rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n) +rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl, unsigned int elts_n) { const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; unsigned int i; @@ -679,7 +679,7 @@ rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n) (*rxq_ctrl->rxq.elts)[i] = buf; } if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { - struct rxq *rxq = &rxq_ctrl->rxq; + struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; struct rte_mbuf *mbuf_init = &rxq->fake_mbuf; assert(rxq->elts_n == rxq->cqe_n); @@ -721,9 +721,9 @@ rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n) * Pointer to RX queue structure. */ static void -rxq_free_elts(struct rxq_ctrl *rxq_ctrl) +rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) { - struct rxq *rxq = &rxq_ctrl->rxq; + struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; const uint16_t q_n = (1 << rxq->elts_n); const uint16_t q_mask = q_n - 1; uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi); @@ -757,7 +757,7 @@ rxq_free_elts(struct rxq_ctrl *rxq_ctrl) * Pointer to RX queue structure. */ void -rxq_cleanup(struct rxq_ctrl *rxq_ctrl) +mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl) { DEBUG("cleaning up %p", (void *)rxq_ctrl); rxq_free_elts(rxq_ctrl); @@ -782,7 +782,7 @@ rxq_cleanup(struct rxq_ctrl *rxq_ctrl) * 0 on success, errno value on failure. */ static inline int -rxq_setup(struct rxq_ctrl *tmpl) +rxq_setup(struct mlx5_rxq_ctrl *tmpl) { struct ibv_cq *ibcq = tmpl->cq; struct ibv_mlx5_cq_info cq_info; @@ -839,12 +839,12 @@ rxq_setup(struct rxq_ctrl *tmpl) * 0 on success, errno value on failure. */ int -rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, - uint16_t desc, unsigned int socket, - const struct rte_eth_rxconf *conf, struct rte_mempool *mp) +mlx5_rxq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl, + uint16_t desc, unsigned int socket, + const struct rte_eth_rxconf *conf, struct rte_mempool *mp) { struct priv *priv = dev->data->dev_private; - struct rxq_ctrl tmpl = { + struct mlx5_rxq_ctrl tmpl = { .priv = priv, .socket = socket, .rxq = { @@ -1062,7 +1062,7 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, } /* Clean up rxq in case we're reinitializing it. */ DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq_ctrl); - rxq_cleanup(rxq_ctrl); + mlx5_rxq_cleanup(rxq_ctrl); /* Move mbuf pointers to dedicated storage area in RX queue. */ elts = (void *)(rxq_ctrl + 1); rte_memcpy(elts, tmpl.rxq.elts, sizeof(*elts)); @@ -1081,7 +1081,7 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, return 0; error: elts = tmpl.rxq.elts; - rxq_cleanup(&tmpl); + mlx5_rxq_cleanup(&tmpl); rte_free(elts); assert(ret > 0); return ret; @@ -1112,8 +1112,9 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct rte_mempool *mp) { struct priv *priv = dev->data->dev_private; - struct rxq *rxq = (*priv->rxqs)[idx]; - struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); const uint16_t desc_n = desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; int ret; @@ -1144,7 +1145,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, return -EEXIST; } (*priv->rxqs)[idx] = NULL; - rxq_cleanup(rxq_ctrl); + mlx5_rxq_cleanup(rxq_ctrl); /* Resize if rxq size is changed. */ if (rxq_ctrl->rxq.elts_n != log2above(desc)) { rxq_ctrl = rte_realloc(rxq_ctrl, @@ -1170,7 +1171,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, return -ENOMEM; } } - ret = rxq_ctrl_setup(dev, rxq_ctrl, desc, socket, conf, mp); + ret = mlx5_rxq_ctrl_setup(dev, rxq_ctrl, desc, socket, conf, mp); if (ret) rte_free(rxq_ctrl); else { @@ -1192,8 +1193,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, void mlx5_rx_queue_release(void *dpdk_rxq) { - struct rxq *rxq = (struct rxq *)dpdk_rxq; - struct rxq_ctrl *rxq_ctrl; + struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq; + struct mlx5_rxq_ctrl *rxq_ctrl; struct priv *priv; unsigned int i; @@ -1202,7 +1203,7 @@ mlx5_rx_queue_release(void *dpdk_rxq) if (rxq == NULL) return; - rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); priv = rxq_ctrl->priv; priv_lock(priv); if (priv_flow_rxq_in_use(priv, rxq)) @@ -1215,7 +1216,7 @@ mlx5_rx_queue_release(void *dpdk_rxq) (*priv->rxqs)[i] = NULL; break; } - rxq_cleanup(rxq_ctrl); + mlx5_rxq_cleanup(rxq_ctrl); rte_free(rxq_ctrl); priv_unlock(priv); } @@ -1249,9 +1250,9 @@ priv_rx_intr_vec_enable(struct priv *priv) } intr_handle->type = RTE_INTR_HANDLE_EXT; for (i = 0; i != n; ++i) { - struct rxq *rxq = (*priv->rxqs)[i]; - struct rxq_ctrl *rxq_ctrl = - container_of(rxq, struct rxq_ctrl, rxq); + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); int fd; int flags; int rc; @@ -1325,8 +1326,9 @@ int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct priv *priv = mlx5_get_priv(dev); - struct rxq *rxq = (*priv->rxqs)[rx_queue_id]; - struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); int ret; if (!rxq || !rxq_ctrl->channel) { @@ -1355,8 +1357,9 @@ int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct priv *priv = mlx5_get_priv(dev); - struct rxq *rxq = (*priv->rxqs)[rx_queue_id]; - struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); struct ibv_cq *ev_cq; void *ev_ctx; int ret; diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 59d9ce0..cd5182c 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -66,11 +66,11 @@ static __rte_always_inline uint32_t rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe); static __rte_always_inline int -mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, +mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, uint16_t cqe_cnt, uint32_t *rss_hash); static __rte_always_inline uint32_t -rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe); +rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe); uint32_t mlx5_ptype_table[] __rte_cache_aligned = { [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */ @@ -282,7 +282,7 @@ mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset) int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) { - struct rxq *rxq = rx_queue; + struct mlx5_rxq_data *rxq = rx_queue; struct rxq_zip *zip = &rxq->zip; volatile struct mlx5_cqe *cqe; const unsigned int cqe_n = (1 << rxq->cqe_n); @@ -1619,7 +1619,7 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe) * with error. */ static inline int -mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, +mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, uint16_t cqe_cnt, uint32_t *rss_hash) { struct rxq_zip *zip = &rxq->zip; @@ -1730,7 +1730,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, * Offload flags (ol_flags) for struct rte_mbuf. */ static inline uint32_t -rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe) +rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe) { uint32_t ol_flags = 0; uint16_t flags = ntohs(cqe->hdr_type_etc); @@ -1769,7 +1769,7 @@ rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe) uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { - struct rxq *rxq = dpdk_rxq; + struct mlx5_rxq_data *rxq = dpdk_rxq; const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1; const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1; const unsigned int sges_n = rxq->sges_n; @@ -2008,7 +2008,7 @@ priv_check_vec_tx_support(struct priv *priv) } int __attribute__((weak)) -rxq_check_vec_support(struct rxq *rxq) +rxq_check_vec_support(struct mlx5_rxq_data *rxq) { (void)rxq; return -ENOTSUP; diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 91ff780..bd07b5d 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -89,7 +89,7 @@ struct rxq_zip { }; /* RX queue descriptor. */ -struct rxq { +struct mlx5_rxq_data { unsigned int csum:1; /* Enable checksum offloading. */ unsigned int csum_l2tun:1; /* Same for L2 tunnels. */ unsigned int vlan_strip:1; /* Enable VLAN stripping. */ @@ -118,14 +118,14 @@ struct rxq { } __rte_cache_aligned; /* RX queue control descriptor. */ -struct rxq_ctrl { +struct mlx5_rxq_ctrl { struct priv *priv; /* Back pointer to private data. */ struct ibv_cq *cq; /* Completion Queue. */ struct ibv_exp_wq *wq; /* Work Queue. */ struct ibv_mr *mr; /* Memory Region (for mp). */ struct ibv_comp_channel *channel; unsigned int socket; /* CPU socket ID for allocations. */ - struct rxq rxq; /* Data path structure. */ + struct mlx5_rxq_data rxq; /* Data path structure. */ }; /* Hash RX queue types. */ @@ -289,10 +289,11 @@ int priv_create_hash_rxqs(struct priv *); void priv_destroy_hash_rxqs(struct priv *); int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type); int priv_rehash_flows(struct priv *); -void rxq_cleanup(struct rxq_ctrl *); -int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t, - unsigned int, const struct rte_eth_rxconf *, - struct rte_mempool *); +void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *); +int mlx5_rxq_rehash(struct rte_eth_dev *, struct mlx5_rxq_ctrl *); +int mlx5_rxq_ctrl_setup(struct rte_eth_dev *, struct mlx5_rxq_ctrl *, + uint16_t, unsigned int, const struct rte_eth_rxconf *, + struct rte_mempool *); int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, const struct rte_eth_rxconf *, struct rte_mempool *); void mlx5_rx_queue_release(void *); @@ -330,7 +331,7 @@ int mlx5_tx_descriptor_status(void *, uint16_t); /* Vectorized version of mlx5_rxtx.c */ int priv_check_raw_vec_tx_support(struct priv *); int priv_check_vec_tx_support(struct priv *); -int rxq_check_vec_support(struct rxq *); +int rxq_check_vec_support(struct mlx5_rxq_data *); int priv_check_vec_rx_support(struct priv *); uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t); uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t); diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c index 290d6cf..245a58e 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c @@ -516,7 +516,7 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) * Number of packets to be stored. */ static inline void -rxq_copy_mbuf_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t n) +rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n) { const uint16_t q_mask = (1 << rxq->elts_n) - 1; struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask]; @@ -542,7 +542,7 @@ rxq_copy_mbuf_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t n) * Number of buffers to be replenished. */ static inline void -rxq_replenish_bulk_mbuf(struct rxq *rxq, uint16_t n) +rxq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n) { const uint16_t q_n = 1 << rxq->elts_n; const uint16_t q_mask = q_n - 1; @@ -580,7 +580,7 @@ rxq_replenish_bulk_mbuf(struct rxq *rxq, uint16_t n) * the title completion descriptor to be copied to the rest of mbufs. */ static inline void -rxq_cq_decompress_v(struct rxq *rxq, +rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, struct rte_mbuf **elts) { @@ -739,7 +739,7 @@ rxq_cq_decompress_v(struct rxq *rxq, * Pointer to array of packets to be filled. */ static inline void -rxq_cq_to_ptype_oflags_v(struct rxq *rxq, __m128i cqes[4], __m128i op_err, +rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4], __m128i op_err, struct rte_mbuf **pkts) { __m128i pinfo0, pinfo1; @@ -873,7 +873,7 @@ rxq_cq_to_ptype_oflags_v(struct rxq *rxq, __m128i cqes[4], __m128i op_err, * Number of packets successfully received (<= pkts_n). */ static uint16_t -rxq_handle_pending_error(struct rxq *rxq, struct rte_mbuf **pkts, +rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { uint16_t n = 0; @@ -908,7 +908,7 @@ rxq_handle_pending_error(struct rxq *rxq, struct rte_mbuf **pkts, * Number of packets received including errors (<= pkts_n). */ static inline uint16_t -rxq_burst_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { const uint16_t q_n = 1 << rxq->cqe_n; const uint16_t q_mask = q_n - 1; @@ -1254,7 +1254,7 @@ rxq_burst_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { - struct rxq *rxq = dpdk_rxq; + struct mlx5_rxq_data *rxq = dpdk_rxq; uint16_t nb_rx; nb_rx = rxq_burst_v(rxq, pkts, pkts_n); @@ -1320,9 +1320,10 @@ priv_check_vec_tx_support(struct priv *priv) * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -rxq_check_vec_support(struct rxq *rxq) +rxq_check_vec_support(struct mlx5_rxq_data *rxq) { - struct rxq_ctrl *ctrl = container_of(rxq, struct rxq_ctrl, rxq); + struct mlx5_rxq_ctrl *ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0) return -ENOTSUP; @@ -1347,7 +1348,7 @@ priv_check_vec_rx_support(struct priv *priv) return -ENOTSUP; /* All the configured queues should support. */ for (i = 0; i < priv->rxqs_n; ++i) { - struct rxq *rxq = (*priv->rxqs)[i]; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; if (rxq_check_vec_support(rxq) < 0) break; diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index 7b45c8c..3c3db24 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -329,7 +329,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) priv_lock(priv); /* Add software counters. */ for (i = 0; (i != priv->rxqs_n); ++i) { - struct rxq *rxq = (*priv->rxqs)[i]; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; if (rxq == NULL) continue; diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index 353ae49..512052a 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -137,8 +137,9 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) static void priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) { - struct rxq *rxq = (*priv->rxqs)[idx]; - struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); struct ibv_exp_wq_attr mod; uint16_t vlan_offloads = (on ? IBV_EXP_RECEIVE_WQ_CVLAN_STRIP : 0) | -- 2.1.4