From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 24B08201 for ; Tue, 19 Mar 2019 09:43:30 +0100 (CET) X-Amp-Result: UNSCANNABLE X-Amp-File-Uploaded: False Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 19 Mar 2019 01:43:30 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.58,497,1544515200"; d="scan'208";a="126650513" Received: from yexl-server.sh.intel.com (HELO localhost) ([10.67.110.206]) by orsmga008.jf.intel.com with ESMTP; 19 Mar 2019 01:43:28 -0700 Date: Tue, 19 Mar 2019 16:39:35 +0800 From: Ye Xiaolong To: Mattias =?iso-8859-1?Q?R=F6nnblom?= Cc: dev@dpdk.org, Qi Zhang , Karlsson Magnus , Topel Bjorn Message-ID: <20190319083935.GA16135@intel.com> References: <20190301080947.91086-1-xiaolong.ye@intel.com> <20190319071256.26302-1-xiaolong.ye@intel.com> <20190319071256.26302-6-xiaolong.ye@intel.com> <6b786b77-f9b6-cb0b-634b-701e92a1c3a2@lysator.liu.se> MIME-Version: 1.0 Content-Type: text/plain; charset=iso-8859-1 Content-Disposition: inline Content-Transfer-Encoding: 8bit In-Reply-To: <6b786b77-f9b6-cb0b-634b-701e92a1c3a2@lysator.liu.se> User-Agent: Mutt/1.9.4 (2018-02-28) Subject: Re: [dpdk-dev] [PATCH v2 5/6] net/af_xdp: enable zero copy X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 19 Mar 2019 08:43:31 -0000 Hi, Mattias Thanks for the review. On 03/19, Mattias Rönnblom wrote: >On 2019-03-19 08:12, Xiaolong Ye wrote: >> Try to check if external mempool (from rx_queue_setup) is fit for >> af_xdp, if it is, it will be registered to af_xdp socket directly and >> there will be no packet data copy on Rx and Tx. >> >> Signed-off-by: Xiaolong Ye >> --- >> drivers/net/af_xdp/rte_eth_af_xdp.c | 128 ++++++++++++++++++++-------- >> 1 file changed, 91 insertions(+), 37 deletions(-) >> >> diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c >> index fc60cb5c5..c22791e51 100644 >> --- a/drivers/net/af_xdp/rte_eth_af_xdp.c >> +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c >> @@ -62,6 +62,7 @@ struct xsk_umem_info { >> struct xsk_umem *umem; >> struct rte_mempool *mb_pool; >> void *buffer; >> + uint8_t zc; >> }; >> struct pkt_rx_queue { >> @@ -76,6 +77,7 @@ struct pkt_rx_queue { >> struct pkt_tx_queue *pair; >> uint16_t queue_idx; >> + uint8_t zc; >> }; >> struct pkt_tx_queue { >> @@ -191,17 +193,24 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) >> uint32_t len = xsk_ring_cons__rx_desc(rx, idx_rx++)->len; >> char *pkt = xsk_umem__get_data(rxq->umem->buffer, addr); >> - mbuf = rte_pktmbuf_alloc(rxq->mb_pool); >> - if (mbuf) { >> - memcpy(rte_pktmbuf_mtod(mbuf, void*), pkt, len); >> + if (rxq->zc) { >> + mbuf = addr_to_mbuf(rxq->umem, addr); >> rte_pktmbuf_pkt_len(mbuf) = >> rte_pktmbuf_data_len(mbuf) = len; >> - rx_bytes += len; >> bufs[count++] = mbuf; >> } else { >> - dropped++; >> + mbuf = rte_pktmbuf_alloc(rxq->mb_pool); >> + if (mbuf) { > >if (likely(mbuf != NULL)) Got it. > >> + memcpy(rte_pktmbuf_mtod(mbuf, void*), pkt, len); > >Use rte_memcpy. Got it. > >> + rte_pktmbuf_pkt_len(mbuf) = >> + rte_pktmbuf_data_len(mbuf) = len; >> + rx_bytes += len; >> + bufs[count++] = mbuf; >> + } else { >> + dropped++; >> + } >> + rte_pktmbuf_free(addr_to_mbuf(umem, addr)); >> } >> - rte_pktmbuf_free(addr_to_mbuf(umem, addr)); >> } >> xsk_ring_cons__release(rx, rcvd); >> @@ -285,22 +294,29 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) >> - ETH_AF_XDP_DATA_HEADROOM; >> desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i); >> mbuf = bufs[i]; >> - if (mbuf->pkt_len <= buf_len) { >> - mbuf_to_tx = rte_pktmbuf_alloc(umem->mb_pool); >> - if (!mbuf_to_tx) { >> - rte_pktmbuf_free(mbuf); >> - continue; >> - } >> - desc->addr = mbuf_to_addr(umem, mbuf_to_tx); >> + if (txq->pair->zc && mbuf->pool == umem->mb_pool) { >> + desc->addr = mbuf_to_addr(umem, mbuf); >> desc->len = mbuf->pkt_len; >> - pkt = xsk_umem__get_data(umem->buffer, >> - desc->addr); >> - memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), >> - desc->len); >> valid++; >> tx_bytes += mbuf->pkt_len; >> + } else { >> + if (mbuf->pkt_len <= buf_len) { >> + mbuf_to_tx = rte_pktmbuf_alloc(umem->mb_pool); >> + if (!mbuf_to_tx) { > >if (unlikely(mbuf_to_tx == NULL)) > >See DPDK coding conventions 1.8.1 for how to do pointer comparisons. I'll check it and do it correctly in next version. > >> + rte_pktmbuf_free(mbuf); >> + continue; >> + } >> + desc->addr = mbuf_to_addr(umem, mbuf_to_tx); >> + desc->len = mbuf->pkt_len; >> + pkt = xsk_umem__get_data(umem->buffer, >> + desc->addr); >> + memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), >> + desc->len); > >rte_memcpy() Got it. > >> + valid++; >> + tx_bytes += mbuf->pkt_len; >> + } >> + rte_pktmbuf_free(mbuf); >> } >> - rte_pktmbuf_free(mbuf); >> } >> xsk_ring_prod__submit(&txq->tx, nb_pkts); >> @@ -479,7 +495,7 @@ static inline uint64_t get_len(struct rte_mempool *mp) >> return (uint64_t)(memhdr->len); >> } >> -static struct xsk_umem_info *xdp_umem_configure(void) >> +static struct xsk_umem_info *xdp_umem_configure(struct rte_mempool *mb_pool) >> { >> struct xsk_umem_info *umem; >> struct xsk_umem_config usr_config = { >> @@ -497,20 +513,25 @@ static struct xsk_umem_info *xdp_umem_configure(void) >> return NULL; >> } >> - snprintf(pool_name, 0x100, "af_xdp_ring"); >> - umem->mb_pool = rte_pktmbuf_pool_create_with_flags(pool_name, >> - ETH_AF_XDP_NUM_BUFFERS, >> - 250, 0, >> - ETH_AF_XDP_FRAME_SIZE - >> - ETH_AF_XDP_MBUF_OVERHEAD, >> - MEMPOOL_F_NO_SPREAD | MEMPOOL_F_PAGE_ALIGN, >> - SOCKET_ID_ANY); >> - >> - if (!umem->mb_pool || umem->mb_pool->nb_mem_chunks != 1) { >> - RTE_LOG(ERR, AF_XDP, >> - "Failed to create rte_mempool\n"); >> - goto err; >> + if (!mb_pool) { > >1.8.1 Got it. > >> + snprintf(pool_name, 0x100, "af_xdp_ring"); > >0x100?? Will use RTE_MEMPOOL_NAMESIZE instead. > >> + umem->mb_pool = rte_pktmbuf_pool_create_with_flags(pool_name, >> + ETH_AF_XDP_NUM_BUFFERS, >> + 250, 0, >> + ETH_AF_XDP_FRAME_SIZE - >> + ETH_AF_XDP_MBUF_OVERHEAD, >> + MEMPOOL_F_NO_SPREAD | MEMPOOL_F_PAGE_ALIGN, >> + SOCKET_ID_ANY); >> + >> + if (!umem->mb_pool || umem->mb_pool->nb_mem_chunks != 1) { > >1.8.1 Got it. > >> + RTE_LOG(ERR, AF_XDP, "Failed to create rte_mempool\n"); >> + goto err; >> + } >> + } else { >> + umem->mb_pool = mb_pool; >> + umem->zc = 1; >> } >> + >> base_addr = (void *)get_base_addr(umem->mb_pool); >> ret = xsk_umem__create(&umem->umem, base_addr, >> @@ -531,16 +552,43 @@ static struct xsk_umem_info *xdp_umem_configure(void) >> return NULL; >> } >> +static uint8_t >> +check_mempool_zc(struct rte_mempool *mp) >> +{ >> + RTE_ASSERT(mp); >> + >> + /* must continues */ >> + if (mp->nb_mem_chunks > 1) >> + return 0; >> + >> + /* check header size */ >> + if (mp->header_size != RTE_CACHE_LINE_SIZE) >> + return 0; >> + >> + /* check base address */ >> + if ((uint64_t)get_base_addr(mp) % getpagesize() != 0) > >This should be an uintptr_t cast. Got it. Thanks, Xiaolong > >> + return 0; >> + >> + /* check chunk size */ >> + if ((mp->elt_size + mp->header_size + mp->trailer_size) % >> + ETH_AF_XDP_FRAME_SIZE != 0) >> + return 0; >> + >> + return 1; >> +} >> + >> static int >> xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, >> - int ring_size) >> + int ring_size, struct rte_mempool *mb_pool) >> { >> struct xsk_socket_config cfg; >> struct pkt_tx_queue *txq = rxq->pair; >> + struct rte_mempool *mp; >> int ret = 0; >> int reserve_size; >> - rxq->umem = xdp_umem_configure(); >> + mp = check_mempool_zc(mb_pool) ? mb_pool : NULL; >> + rxq->umem = xdp_umem_configure(mp); >> if (!rxq->umem) { >> ret = -ENOMEM; >> goto err; >> @@ -627,15 +675,21 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, >> rxq->mb_pool = mb_pool; >> - if (xsk_configure(internals, rxq, nb_rx_desc)) { >> - RTE_LOG(ERR, AF_XDP, >> - "Failed to configure xdp socket\n"); >> + if (xsk_configure(internals, rxq, nb_rx_desc, mb_pool)) { >> + RTE_LOG(ERR, AF_XDP, "Failed to configure xdp socket\n"); >> ret = -EINVAL; >> goto err; >> } >> internals->umem = rxq->umem; >> + if (mb_pool == internals->umem->mb_pool) >> + rxq->zc = internals->umem->zc; >> + >> + if (rxq->zc) >> + RTE_LOG(INFO, AF_XDP, >> + "zero copy enabled on rx queue %d\n", rx_queue_id); >> + >> dev->data->rx_queues[rx_queue_id] = rxq; >> return 0; >> From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id D5320A00E6 for ; Tue, 19 Mar 2019 09:43:32 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 3C6C71DBD; Tue, 19 Mar 2019 09:43:32 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 24B08201 for ; Tue, 19 Mar 2019 09:43:30 +0100 (CET) X-Amp-Result: UNSCANNABLE X-Amp-File-Uploaded: False Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 19 Mar 2019 01:43:30 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.58,497,1544515200"; d="scan'208";a="126650513" Received: from yexl-server.sh.intel.com (HELO localhost) ([10.67.110.206]) by orsmga008.jf.intel.com with ESMTP; 19 Mar 2019 01:43:28 -0700 Date: Tue, 19 Mar 2019 16:39:35 +0800 From: Ye Xiaolong To: Mattias =?iso-8859-1?Q?R=F6nnblom?= Cc: dev@dpdk.org, Qi Zhang , Karlsson Magnus , Topel Bjorn Message-ID: <20190319083935.GA16135@intel.com> References: <20190301080947.91086-1-xiaolong.ye@intel.com> <20190319071256.26302-1-xiaolong.ye@intel.com> <20190319071256.26302-6-xiaolong.ye@intel.com> <6b786b77-f9b6-cb0b-634b-701e92a1c3a2@lysator.liu.se> MIME-Version: 1.0 Content-Type: text/plain; charset="UTF-8" Content-Disposition: inline Content-Transfer-Encoding: 8bit In-Reply-To: <6b786b77-f9b6-cb0b-634b-701e92a1c3a2@lysator.liu.se> User-Agent: Mutt/1.9.4 (2018-02-28) Subject: Re: [dpdk-dev] [PATCH v2 5/6] net/af_xdp: enable zero copy X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Message-ID: <20190319083935.a63IAXtquVifmQtT1r0yVL1k9WayCcOVOYSOZ1qcSjA@z> Hi, Mattias Thanks for the review. On 03/19, Mattias Rönnblom wrote: >On 2019-03-19 08:12, Xiaolong Ye wrote: >> Try to check if external mempool (from rx_queue_setup) is fit for >> af_xdp, if it is, it will be registered to af_xdp socket directly and >> there will be no packet data copy on Rx and Tx. >> >> Signed-off-by: Xiaolong Ye >> --- >> drivers/net/af_xdp/rte_eth_af_xdp.c | 128 ++++++++++++++++++++-------- >> 1 file changed, 91 insertions(+), 37 deletions(-) >> >> diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c >> index fc60cb5c5..c22791e51 100644 >> --- a/drivers/net/af_xdp/rte_eth_af_xdp.c >> +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c >> @@ -62,6 +62,7 @@ struct xsk_umem_info { >> struct xsk_umem *umem; >> struct rte_mempool *mb_pool; >> void *buffer; >> + uint8_t zc; >> }; >> struct pkt_rx_queue { >> @@ -76,6 +77,7 @@ struct pkt_rx_queue { >> struct pkt_tx_queue *pair; >> uint16_t queue_idx; >> + uint8_t zc; >> }; >> struct pkt_tx_queue { >> @@ -191,17 +193,24 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) >> uint32_t len = xsk_ring_cons__rx_desc(rx, idx_rx++)->len; >> char *pkt = xsk_umem__get_data(rxq->umem->buffer, addr); >> - mbuf = rte_pktmbuf_alloc(rxq->mb_pool); >> - if (mbuf) { >> - memcpy(rte_pktmbuf_mtod(mbuf, void*), pkt, len); >> + if (rxq->zc) { >> + mbuf = addr_to_mbuf(rxq->umem, addr); >> rte_pktmbuf_pkt_len(mbuf) = >> rte_pktmbuf_data_len(mbuf) = len; >> - rx_bytes += len; >> bufs[count++] = mbuf; >> } else { >> - dropped++; >> + mbuf = rte_pktmbuf_alloc(rxq->mb_pool); >> + if (mbuf) { > >if (likely(mbuf != NULL)) Got it. > >> + memcpy(rte_pktmbuf_mtod(mbuf, void*), pkt, len); > >Use rte_memcpy. Got it. > >> + rte_pktmbuf_pkt_len(mbuf) = >> + rte_pktmbuf_data_len(mbuf) = len; >> + rx_bytes += len; >> + bufs[count++] = mbuf; >> + } else { >> + dropped++; >> + } >> + rte_pktmbuf_free(addr_to_mbuf(umem, addr)); >> } >> - rte_pktmbuf_free(addr_to_mbuf(umem, addr)); >> } >> xsk_ring_cons__release(rx, rcvd); >> @@ -285,22 +294,29 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) >> - ETH_AF_XDP_DATA_HEADROOM; >> desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i); >> mbuf = bufs[i]; >> - if (mbuf->pkt_len <= buf_len) { >> - mbuf_to_tx = rte_pktmbuf_alloc(umem->mb_pool); >> - if (!mbuf_to_tx) { >> - rte_pktmbuf_free(mbuf); >> - continue; >> - } >> - desc->addr = mbuf_to_addr(umem, mbuf_to_tx); >> + if (txq->pair->zc && mbuf->pool == umem->mb_pool) { >> + desc->addr = mbuf_to_addr(umem, mbuf); >> desc->len = mbuf->pkt_len; >> - pkt = xsk_umem__get_data(umem->buffer, >> - desc->addr); >> - memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), >> - desc->len); >> valid++; >> tx_bytes += mbuf->pkt_len; >> + } else { >> + if (mbuf->pkt_len <= buf_len) { >> + mbuf_to_tx = rte_pktmbuf_alloc(umem->mb_pool); >> + if (!mbuf_to_tx) { > >if (unlikely(mbuf_to_tx == NULL)) > >See DPDK coding conventions 1.8.1 for how to do pointer comparisons. I'll check it and do it correctly in next version. > >> + rte_pktmbuf_free(mbuf); >> + continue; >> + } >> + desc->addr = mbuf_to_addr(umem, mbuf_to_tx); >> + desc->len = mbuf->pkt_len; >> + pkt = xsk_umem__get_data(umem->buffer, >> + desc->addr); >> + memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), >> + desc->len); > >rte_memcpy() Got it. > >> + valid++; >> + tx_bytes += mbuf->pkt_len; >> + } >> + rte_pktmbuf_free(mbuf); >> } >> - rte_pktmbuf_free(mbuf); >> } >> xsk_ring_prod__submit(&txq->tx, nb_pkts); >> @@ -479,7 +495,7 @@ static inline uint64_t get_len(struct rte_mempool *mp) >> return (uint64_t)(memhdr->len); >> } >> -static struct xsk_umem_info *xdp_umem_configure(void) >> +static struct xsk_umem_info *xdp_umem_configure(struct rte_mempool *mb_pool) >> { >> struct xsk_umem_info *umem; >> struct xsk_umem_config usr_config = { >> @@ -497,20 +513,25 @@ static struct xsk_umem_info *xdp_umem_configure(void) >> return NULL; >> } >> - snprintf(pool_name, 0x100, "af_xdp_ring"); >> - umem->mb_pool = rte_pktmbuf_pool_create_with_flags(pool_name, >> - ETH_AF_XDP_NUM_BUFFERS, >> - 250, 0, >> - ETH_AF_XDP_FRAME_SIZE - >> - ETH_AF_XDP_MBUF_OVERHEAD, >> - MEMPOOL_F_NO_SPREAD | MEMPOOL_F_PAGE_ALIGN, >> - SOCKET_ID_ANY); >> - >> - if (!umem->mb_pool || umem->mb_pool->nb_mem_chunks != 1) { >> - RTE_LOG(ERR, AF_XDP, >> - "Failed to create rte_mempool\n"); >> - goto err; >> + if (!mb_pool) { > >1.8.1 Got it. > >> + snprintf(pool_name, 0x100, "af_xdp_ring"); > >0x100?? Will use RTE_MEMPOOL_NAMESIZE instead. > >> + umem->mb_pool = rte_pktmbuf_pool_create_with_flags(pool_name, >> + ETH_AF_XDP_NUM_BUFFERS, >> + 250, 0, >> + ETH_AF_XDP_FRAME_SIZE - >> + ETH_AF_XDP_MBUF_OVERHEAD, >> + MEMPOOL_F_NO_SPREAD | MEMPOOL_F_PAGE_ALIGN, >> + SOCKET_ID_ANY); >> + >> + if (!umem->mb_pool || umem->mb_pool->nb_mem_chunks != 1) { > >1.8.1 Got it. > >> + RTE_LOG(ERR, AF_XDP, "Failed to create rte_mempool\n"); >> + goto err; >> + } >> + } else { >> + umem->mb_pool = mb_pool; >> + umem->zc = 1; >> } >> + >> base_addr = (void *)get_base_addr(umem->mb_pool); >> ret = xsk_umem__create(&umem->umem, base_addr, >> @@ -531,16 +552,43 @@ static struct xsk_umem_info *xdp_umem_configure(void) >> return NULL; >> } >> +static uint8_t >> +check_mempool_zc(struct rte_mempool *mp) >> +{ >> + RTE_ASSERT(mp); >> + >> + /* must continues */ >> + if (mp->nb_mem_chunks > 1) >> + return 0; >> + >> + /* check header size */ >> + if (mp->header_size != RTE_CACHE_LINE_SIZE) >> + return 0; >> + >> + /* check base address */ >> + if ((uint64_t)get_base_addr(mp) % getpagesize() != 0) > >This should be an uintptr_t cast. Got it. Thanks, Xiaolong > >> + return 0; >> + >> + /* check chunk size */ >> + if ((mp->elt_size + mp->header_size + mp->trailer_size) % >> + ETH_AF_XDP_FRAME_SIZE != 0) >> + return 0; >> + >> + return 1; >> +} >> + >> static int >> xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, >> - int ring_size) >> + int ring_size, struct rte_mempool *mb_pool) >> { >> struct xsk_socket_config cfg; >> struct pkt_tx_queue *txq = rxq->pair; >> + struct rte_mempool *mp; >> int ret = 0; >> int reserve_size; >> - rxq->umem = xdp_umem_configure(); >> + mp = check_mempool_zc(mb_pool) ? mb_pool : NULL; >> + rxq->umem = xdp_umem_configure(mp); >> if (!rxq->umem) { >> ret = -ENOMEM; >> goto err; >> @@ -627,15 +675,21 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, >> rxq->mb_pool = mb_pool; >> - if (xsk_configure(internals, rxq, nb_rx_desc)) { >> - RTE_LOG(ERR, AF_XDP, >> - "Failed to configure xdp socket\n"); >> + if (xsk_configure(internals, rxq, nb_rx_desc, mb_pool)) { >> + RTE_LOG(ERR, AF_XDP, "Failed to configure xdp socket\n"); >> ret = -EINVAL; >> goto err; >> } >> internals->umem = rxq->umem; >> + if (mb_pool == internals->umem->mb_pool) >> + rxq->zc = internals->umem->zc; >> + >> + if (rxq->zc) >> + RTE_LOG(INFO, AF_XDP, >> + "zero copy enabled on rx queue %d\n", rx_queue_id); >> + >> dev->data->rx_queues[rx_queue_id] = rxq; >> return 0; >>