From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B6950A0A0C; Mon, 5 Jul 2021 10:36:32 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3702140141; Mon, 5 Jul 2021 10:36:32 +0200 (CEST) Received: from smtpbgbr2.qq.com (smtpbgbr2.qq.com [54.207.22.56]) by mails.dpdk.org (Postfix) with ESMTP id 7DE9B4003C for ; Mon, 5 Jul 2021 10:36:30 +0200 (CEST) X-QQ-mid: bizesmtp38t1625474184t3490xax Received: from jiawenwu (unknown [183.129.236.74]) by esmtp6.qq.com (ESMTP) with id ; Mon, 05 Jul 2021 16:36:23 +0800 (CST) X-QQ-SSF: 01400000000000D0E000B00A0000000 X-QQ-FEAT: Ry58bBY793vDOb/u+SC2tFapLZvRXORIUDWNp36zcm1IOrSGKW3duM/UKQSmb vwgeHU2EHe1B8hdiPToDdWtaU8zxV5w2HV7KjZQhAsoup1yCrrERRBs8qgZuTWfsuKGQ8c1 o5eLKS9+9JyP73nbTd2dzao7QVOWa5hz7UuTbCFlnJhcgcGumpt+qcOHiQ/wLK/IeQgh5UK 3nGi3pRWLvWffZr8Qwe6SNcU10hwuDCx5EegE89En0P3hQw4zoMWJQ3n4ish3l+fRzGEUyn AY9ZjE6JBXz4tJT2K+/7v6jwzqqw5IU6VHaFbTVN4JnEvKpXJz86U3oo+8oic2hV8trs3rE TPB2lWBGZKfHVV+pu05LOP2gR92Pw== X-QQ-GoodBg: 2 From: "Jiawen Wu" To: "'Andrew Rybchenko'" , References: <20210617110005.4132926-1-jiawenwu@trustnetic.com> <20210617110005.4132926-13-jiawenwu@trustnetic.com> <6d34d736-eaf6-a69a-47b5-9f42e0c09b5c@oktetlabs.ru> In-Reply-To: <6d34d736-eaf6-a69a-47b5-9f42e0c09b5c@oktetlabs.ru> Date: Mon, 5 Jul 2021 16:36:23 +0800 Message-ID: <00a801d77178$d6d645e0$8482d1a0$@trustnetic.com>+DD0CD383F9C5A948 MIME-Version: 1.0 Content-Type: text/plain; charset="UTF-8" Content-Transfer-Encoding: quoted-printable X-Mailer: Microsoft Outlook 16.0 Thread-Index: AQEx9tyhu8851cQlQUtl5PIyQvroAQHfIcqsAmvVc5CsXKrRwA== Content-Language: zh-cn X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign7 X-QQ-Bgrelay: 1 Subject: Re: [dpdk-dev] [PATCH v6 12/19] net/ngbe: add Rx queue setup and release X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" On July 3, 2021 12:36 AM, Andrew Rybchenko wrote: > On 6/17/21 1:59 PM, Jiawen Wu wrote: > > Setup device Rx queue and release Rx queue. > > > > Signed-off-by: Jiawen Wu > > --- > > drivers/net/ngbe/meson.build | 1 + > > drivers/net/ngbe/ngbe_ethdev.c | 37 +++- > > drivers/net/ngbe/ngbe_ethdev.h | 16 ++ > > drivers/net/ngbe/ngbe_rxtx.c | 308 = +++++++++++++++++++++++++++++++++ > > drivers/net/ngbe/ngbe_rxtx.h | 96 ++++++++++ > > 5 files changed, 457 insertions(+), 1 deletion(-) create mode = 100644 > > drivers/net/ngbe/ngbe_rxtx.c create mode 100644 > > drivers/net/ngbe/ngbe_rxtx.h > > > > diff --git a/drivers/net/ngbe/meson.build > > b/drivers/net/ngbe/meson.build index 81173fa7f0..9e75b82f1c 100644 > > --- a/drivers/net/ngbe/meson.build > > +++ b/drivers/net/ngbe/meson.build > > @@ -12,6 +12,7 @@ objs =3D [base_objs] > > > > sources =3D files( > > 'ngbe_ethdev.c', > > + 'ngbe_rxtx.c', > > ) > > > > includes +=3D include_directories('base') diff --git > > a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c > > index c952023e8b..e73606c5f3 100644 > > --- a/drivers/net/ngbe/ngbe_ethdev.c > > +++ b/drivers/net/ngbe/ngbe_ethdev.c > > @@ -12,6 +12,7 @@ > > #include "ngbe_logs.h" > > #include "base/ngbe.h" > > #include "ngbe_ethdev.h" > > +#include "ngbe_rxtx.h" > > > > static int ngbe_dev_close(struct rte_eth_dev *dev); > > > > @@ -37,6 +38,12 @@ static const struct rte_pci_id pci_id_ngbe_map[] = =3D { > > { .vendor_id =3D 0, /* sentinel */ }, > > }; > > > > +static const struct rte_eth_desc_lim rx_desc_lim =3D { > > + .nb_max =3D NGBE_RING_DESC_MAX, > > + .nb_min =3D NGBE_RING_DESC_MIN, > > + .nb_align =3D NGBE_RXD_ALIGN, > > +}; > > + > > static const struct eth_dev_ops ngbe_eth_dev_ops; > > > > static inline void > > @@ -241,12 +248,19 @@ static int > > ngbe_dev_configure(struct rte_eth_dev *dev) { > > struct ngbe_interrupt *intr =3D ngbe_dev_intr(dev); > > + struct ngbe_adapter *adapter =3D ngbe_dev_adapter(dev); > > > > PMD_INIT_FUNC_TRACE(); > > > > /* set flag to update link status after init */ > > intr->flags |=3D NGBE_FLAG_NEED_LINK_UPDATE; > > > > + /* > > + * Initialize to TRUE. If any of Rx queues doesn't meet the bulk > > + * allocation Rx preconditions we will reset it. > > + */ > > + adapter->rx_bulk_alloc_allowed =3D true; > > + > > return 0; > > } > > > > @@ -266,11 +280,30 @@ ngbe_dev_close(struct rte_eth_dev *dev) = static > > int ngbe_dev_info_get(struct rte_eth_dev *dev, struct > > rte_eth_dev_info *dev_info) { > > - RTE_SET_USED(dev); > > + struct ngbe_hw *hw =3D ngbe_dev_hw(dev); > > + > > + dev_info->max_rx_queues =3D (uint16_t)hw->mac.max_rx_queues; > > + > > + dev_info->default_rxconf =3D (struct rte_eth_rxconf) { > > + .rx_thresh =3D { > > + .pthresh =3D NGBE_DEFAULT_RX_PTHRESH, > > + .hthresh =3D NGBE_DEFAULT_RX_HTHRESH, > > + .wthresh =3D NGBE_DEFAULT_RX_WTHRESH, > > + }, > > + .rx_free_thresh =3D NGBE_DEFAULT_RX_FREE_THRESH, > > + .rx_drop_en =3D 0, > > + .offloads =3D 0, > > + }; > > + > > + dev_info->rx_desc_lim =3D rx_desc_lim; > > > > dev_info->speed_capa =3D ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M | > > ETH_LINK_SPEED_10M; > > > > + /* Driver-preferred Rx/Tx parameters */ > > + dev_info->default_rxportconf.nb_queues =3D 1; > > + dev_info->default_rxportconf.ring_size =3D 256; > > + > > return 0; > > } > > > > @@ -570,6 +603,8 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = =3D > { > > .dev_configure =3D ngbe_dev_configure, > > .dev_infos_get =3D ngbe_dev_info_get, > > .link_update =3D ngbe_dev_link_update, > > + .rx_queue_setup =3D ngbe_dev_rx_queue_setup, > > + .rx_queue_release =3D ngbe_dev_rx_queue_release, > > }; > > > > RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd); diff --git > > a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h > > index b67508a3de..6580d288c8 100644 > > --- a/drivers/net/ngbe/ngbe_ethdev.h > > +++ b/drivers/net/ngbe/ngbe_ethdev.h > > @@ -30,6 +30,7 @@ struct ngbe_interrupt { struct ngbe_adapter { > > struct ngbe_hw hw; > > struct ngbe_interrupt intr; > > + bool rx_bulk_alloc_allowed; >=20 > Shouldn't it be aligned as well as above fields? >=20 > > }; > > > > static inline struct ngbe_adapter * > > @@ -58,6 +59,13 @@ ngbe_dev_intr(struct rte_eth_dev *dev) > > return intr; > > } > > > > +void ngbe_dev_rx_queue_release(void *rxq); > > + > > +int ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t = rx_queue_id, > > + uint16_t nb_rx_desc, unsigned int socket_id, > > + const struct rte_eth_rxconf *rx_conf, > > + struct rte_mempool *mb_pool); > > + > > int > > ngbe_dev_link_update_share(struct rte_eth_dev *dev, > > int wait_to_complete); > > @@ -66,4 +74,12 @@ ngbe_dev_link_update_share(struct rte_eth_dev = *dev, > > #define NGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ > > #define NGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC = addr. */ > > > > +/* > > + * Default values for Rx/Tx configuration */ #define > > +NGBE_DEFAULT_RX_FREE_THRESH 32 > > +#define NGBE_DEFAULT_RX_PTHRESH 8 > > +#define NGBE_DEFAULT_RX_HTHRESH 8 > > +#define NGBE_DEFAULT_RX_WTHRESH 0 > > + > > #endif /* _NGBE_ETHDEV_H_ */ > > diff --git a/drivers/net/ngbe/ngbe_rxtx.c > > b/drivers/net/ngbe/ngbe_rxtx.c new file mode 100644 index > > 0000000000..df0b64dc01 > > --- /dev/null > > +++ b/drivers/net/ngbe/ngbe_rxtx.c > > @@ -0,0 +1,308 @@ > > +/* SPDX-License-Identifier: BSD-3-Clause > > + * Copyright(c) 2018-2020 Beijing WangXun Technology Co., Ltd. > > + * Copyright(c) 2010-2017 Intel Corporation */ > > + > > +#include > > + > > +#include > > +#include > > +#include > > +#include > > + > > +#include "ngbe_logs.h" > > +#include "base/ngbe.h" > > +#include "ngbe_ethdev.h" > > +#include "ngbe_rxtx.h" > > + > > +/** > > + * ngbe_free_sc_cluster - free the not-yet-completed scattered > > +cluster > > + * > > + * The "next" pointer of the last segment of (not-yet-completed) = RSC > > +clusters > > + * in the sw_sc_ring is not set to NULL but rather points to the = next > > + * mbuf of this RSC aggregation (that has not been completed yet = and > > +still > > + * resides on the HW ring). So, instead of calling for > > +rte_pktmbuf_free() we > > + * will just free first "nb_segs" segments of the cluster = explicitly > > +by calling > > + * an rte_pktmbuf_free_seg(). > > + * > > + * @m scattered cluster head > > + */ > > +static void __rte_cold > > +ngbe_free_sc_cluster(struct rte_mbuf *m) { > > + uint16_t i, nb_segs =3D m->nb_segs; > > + struct rte_mbuf *next_seg; > > + > > + for (i =3D 0; i < nb_segs; i++) { > > + next_seg =3D m->next; > > + rte_pktmbuf_free_seg(m); > > + m =3D next_seg; > > + } > > +} > > + > > +static void __rte_cold > > +ngbe_rx_queue_release_mbufs(struct ngbe_rx_queue *rxq) { > > + unsigned int i; > > + > > + if (rxq->sw_ring !=3D NULL) { > > + for (i =3D 0; i < rxq->nb_rx_desc; i++) { > > + if (rxq->sw_ring[i].mbuf !=3D NULL) { > > + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); > > + rxq->sw_ring[i].mbuf =3D NULL; > > + } > > + } > > + if (rxq->rx_nb_avail) { >=20 > Compare vs 0 explicitly. However, it looks like that the check is not = required at > all. Body may be done unconditionally. >=20 > > + for (i =3D 0; i < rxq->rx_nb_avail; ++i) { > > + struct rte_mbuf *mb; > > + > > + mb =3D rxq->rx_stage[rxq->rx_next_avail + i]; > > + rte_pktmbuf_free_seg(mb); > > + } > > + rxq->rx_nb_avail =3D 0; > > + } > > + } > > + > > + if (rxq->sw_sc_ring !=3D NULL) > > + for (i =3D 0; i < rxq->nb_rx_desc; i++) > > + if (rxq->sw_sc_ring[i].fbuf) { >=20 > Compare vs NULL >=20 > > + ngbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf); > > + rxq->sw_sc_ring[i].fbuf =3D NULL; > > + } > > +} > > + > > +static void __rte_cold > > +ngbe_rx_queue_release(struct ngbe_rx_queue *rxq) { > > + if (rxq !=3D NULL) { > > + ngbe_rx_queue_release_mbufs(rxq); > > + rte_free(rxq->sw_ring); > > + rte_free(rxq->sw_sc_ring); > > + rte_free(rxq); > > + } > > +} > > + > > +void __rte_cold > > +ngbe_dev_rx_queue_release(void *rxq) > > +{ > > + ngbe_rx_queue_release(rxq); > > +} > > + > > +/* > > + * Check if Rx Burst Bulk Alloc function can be used. > > + * Return > > + * 0: the preconditions are satisfied and the bulk = allocation function > > + * can be used. > > + * -EINVAL: the preconditions are NOT satisfied and the default Rx = burst > > + * function must be used. > > + */ > > +static inline int __rte_cold > > +check_rx_burst_bulk_alloc_preconditions(struct ngbe_rx_queue *rxq) = { > > + int ret =3D 0; > > + > > + /* > > + * Make sure the following pre-conditions are satisfied: > > + * rxq->rx_free_thresh >=3D RTE_PMD_NGBE_RX_MAX_BURST > > + * rxq->rx_free_thresh < rxq->nb_rx_desc > > + * (rxq->nb_rx_desc % rxq->rx_free_thresh) =3D=3D 0 > > + * Scattered packets are not supported. This should be checked > > + * outside of this function. > > + */ > > + if (!(rxq->rx_free_thresh >=3D RTE_PMD_NGBE_RX_MAX_BURST)) { >=20 > Isn't is simpler to read and understand: > rxq->rx_free_thresh < RTE_PMD_NGBE_RX_MAX_BURST >=20 > > + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " > > + "rxq->rx_free_thresh=3D%d, " > > + "RTE_PMD_NGBE_RX_MAX_BURST=3D%d", >=20 > Do not split format string. >=20 > > + rxq->rx_free_thresh, RTE_PMD_NGBE_RX_MAX_BURST); > > + ret =3D -EINVAL; > > + } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) { >=20 > rxq->rx_free_thresh >=3D rxq->nb_rx_desc > is simpler to read >=20 > > + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " > > + "rxq->rx_free_thresh=3D%d, " > > + "rxq->nb_rx_desc=3D%d", >=20 > Do not split format string. >=20 > > + rxq->rx_free_thresh, rxq->nb_rx_desc); > > + ret =3D -EINVAL; > > + } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) =3D=3D 0)) { >=20 > (rxq->nb_rx_desc % rxq->rx_free_thresh) !=3D 0 is easier to read >=20 > > + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " > > + "rxq->nb_rx_desc=3D%d, " > > + "rxq->rx_free_thresh=3D%d", > > + rxq->nb_rx_desc, rxq->rx_free_thresh); >=20 > Do not split format string >=20 > > + ret =3D -EINVAL; > > + } > > + > > + return ret; > > +} > > + > > +/* Reset dynamic ngbe_rx_queue fields back to defaults */ static = void > > +__rte_cold ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct > > +ngbe_rx_queue *rxq) { > > + static const struct ngbe_rx_desc zeroed_desc =3D { > > + {{0}, {0} }, {{0}, {0} } }; > > + unsigned int i; > > + uint16_t len =3D rxq->nb_rx_desc; > > + > > + /* > > + * By default, the Rx queue setup function allocates enough memory = for > > + * NGBE_RING_DESC_MAX. The Rx Burst bulk allocation function = requires > > + * extra memory at the end of the descriptor ring to be zero'd = out. > > + */ > > + if (adapter->rx_bulk_alloc_allowed) > > + /* zero out extra memory */ > > + len +=3D RTE_PMD_NGBE_RX_MAX_BURST; > > + > > + /* > > + * Zero out HW ring memory. Zero out extra memory at the end of > > + * the H/W ring so look-ahead logic in Rx Burst bulk alloc = function > > + * reads extra memory as zeros. > > + */ > > + for (i =3D 0; i < len; i++) > > + rxq->rx_ring[i] =3D zeroed_desc; > > + > > + /* > > + * initialize extra software ring entries. Space for these extra > > + * entries is always allocated > > + */ > > + memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); > > + for (i =3D rxq->nb_rx_desc; i < len; ++i) > > + rxq->sw_ring[i].mbuf =3D &rxq->fake_mbuf; > > + > > + rxq->rx_nb_avail =3D 0; > > + rxq->rx_next_avail =3D 0; > > + rxq->rx_free_trigger =3D (uint16_t)(rxq->rx_free_thresh - 1); > > + rxq->rx_tail =3D 0; > > + rxq->nb_rx_hold =3D 0; > > + rxq->pkt_first_seg =3D NULL; > > + rxq->pkt_last_seg =3D NULL; > > +} > > + > > +int __rte_cold > > +ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, > > + uint16_t queue_idx, > > + uint16_t nb_desc, > > + unsigned int socket_id, > > + const struct rte_eth_rxconf *rx_conf, > > + struct rte_mempool *mp) > > +{ > > + const struct rte_memzone *rz; > > + struct ngbe_rx_queue *rxq; > > + struct ngbe_hw *hw; > > + uint16_t len; > > + struct ngbe_adapter *adapter =3D ngbe_dev_adapter(dev); > > + > > + PMD_INIT_FUNC_TRACE(); > > + hw =3D ngbe_dev_hw(dev); > > + > > + /* > > + * Validate number of receive descriptors. > > + * It must not exceed hardware maximum, and must be multiple > > + * of NGBE_ALIGN. > > + */ > > + if (nb_desc % NGBE_RXD_ALIGN !=3D 0 || > > + nb_desc > NGBE_RING_DESC_MAX || > > + nb_desc < NGBE_RING_DESC_MIN) { > > + return -EINVAL; > > + } >=20 > rte_eth_rx_queue_setup cares about it >=20 I don't quite understand. > > + > > + /* Free memory prior to re-allocation if needed... */ > > + if (dev->data->rx_queues[queue_idx] !=3D NULL) { > > + ngbe_rx_queue_release(dev->data->rx_queues[queue_idx]); > > + dev->data->rx_queues[queue_idx] =3D NULL; > > + } > > + > > + /* First allocate the Rx queue data structure */ > > + rxq =3D rte_zmalloc_socket("ethdev RX queue", > > + sizeof(struct ngbe_rx_queue), > > + RTE_CACHE_LINE_SIZE, socket_id); > > + if (rxq =3D=3D NULL) > > + return -ENOMEM; > > + rxq->mb_pool =3D mp; > > + rxq->nb_rx_desc =3D nb_desc; > > + rxq->rx_free_thresh =3D rx_conf->rx_free_thresh; > > + rxq->queue_id =3D queue_idx; > > + rxq->reg_idx =3D queue_idx; > > + rxq->port_id =3D dev->data->port_id; > > + rxq->drop_en =3D rx_conf->rx_drop_en; > > + rxq->rx_deferred_start =3D rx_conf->rx_deferred_start; > > + > > + /* > > + * Allocate Rx ring hardware descriptors. A memzone large enough = to > > + * handle the maximum ring size is allocated in order to allow for > > + * resizing in later calls to the queue setup function. > > + */ > > + rz =3D rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, > > + RX_RING_SZ, NGBE_ALIGN, socket_id); > > + if (rz =3D=3D NULL) { > > + ngbe_rx_queue_release(rxq); > > + return -ENOMEM; > > + } > > + > > + /* > > + * Zero init all the descriptors in the ring. > > + */ > > + memset(rz->addr, 0, RX_RING_SZ); > > + > > + rxq->rdt_reg_addr =3D NGBE_REG_ADDR(hw, NGBE_RXWP(rxq->reg_idx)); > > + rxq->rdh_reg_addr =3D NGBE_REG_ADDR(hw, NGBE_RXRP(rxq->reg_idx)); > > + > > + rxq->rx_ring_phys_addr =3D TMZ_PADDR(rz); > > + rxq->rx_ring =3D (struct ngbe_rx_desc *)TMZ_VADDR(rz); > > + > > + /* > > + * Certain constraints must be met in order to use the bulk buffer > > + * allocation Rx burst function. If any of Rx queues doesn't meet = them > > + * the feature should be disabled for the whole port. > > + */ > > + if (check_rx_burst_bulk_alloc_preconditions(rxq)) { > > + PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc " > > + "preconditions - canceling the feature for " > > + "the whole port[%d]", >=20 > Do not split format string. >=20 > > + rxq->queue_id, rxq->port_id); > > + adapter->rx_bulk_alloc_allowed =3D false; > > + } > > + > > + /* > > + * Allocate software ring. Allow for space at the end of the > > + * S/W ring to make sure look-ahead logic in bulk alloc Rx burst > > + * function does not access an invalid memory region. > > + */ > > + len =3D nb_desc; > > + if (adapter->rx_bulk_alloc_allowed) > > + len +=3D RTE_PMD_NGBE_RX_MAX_BURST; > > + > > + rxq->sw_ring =3D rte_zmalloc_socket("rxq->sw_ring", > > + sizeof(struct ngbe_rx_entry) * len, > > + RTE_CACHE_LINE_SIZE, socket_id); > > + if (rxq->sw_ring =3D=3D NULL) { > > + ngbe_rx_queue_release(rxq); > > + return -ENOMEM; > > + } > > + > > + /* > > + * Always allocate even if it's not going to be needed in order to > > + * simplify the code. > > + * > > + * This ring is used in Scattered Rx cases and Scattered Rx may > > + * be requested in ngbe_dev_rx_init(), which is called later from > > + * dev_start() flow. > > + */ > > + rxq->sw_sc_ring =3D > > + rte_zmalloc_socket("rxq->sw_sc_ring", > > + sizeof(struct ngbe_scattered_rx_entry) * len, > > + RTE_CACHE_LINE_SIZE, socket_id); > > + if (rxq->sw_sc_ring =3D=3D NULL) { > > + ngbe_rx_queue_release(rxq); > > + return -ENOMEM; > > + } > > + > > + PMD_INIT_LOG(DEBUG, "sw_ring=3D%p sw_sc_ring=3D%p hw_ring=3D%p " > > + "dma_addr=3D0x%" PRIx64, >=20 > Do not split format string. >=20 > > + rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring, > > + rxq->rx_ring_phys_addr); > > + > > + dev->data->rx_queues[queue_idx] =3D rxq; > > + > > + ngbe_reset_rx_queue(adapter, rxq); > > + > > + return 0; > > +} > > +