From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E6A0EA0547; Fri, 10 Sep 2021 19:27:50 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 88D32410FE; Fri, 10 Sep 2021 19:27:46 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by mails.dpdk.org (Postfix) with ESMTP id F08734067E for ; Fri, 10 Sep 2021 19:27:43 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10103"; a="208363249" X-IronPort-AV: E=Sophos;i="5.85,283,1624345200"; d="scan'208";a="208363249" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 10 Sep 2021 10:27:43 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.85,283,1624345200"; d="scan'208";a="694959548" Received: from silpixa00401122.ir.intel.com ([10.55.128.10]) by fmsmga006.fm.intel.com with ESMTP; 10 Sep 2021 10:27:41 -0700 From: Kevin Laatz To: dev@dpdk.org Cc: bruce.richardson@intel.com, fengchengwen@huawei.com, conor.walsh@intel.com, Konstantin Ananyev Date: Fri, 10 Sep 2021 17:27:32 +0000 Message-Id: <20210910172737.2561156-2-kevin.laatz@intel.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20210910172737.2561156-1-kevin.laatz@intel.com> References: <20210910172737.2561156-1-kevin.laatz@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH 1/6] examples/ioat: always use same lcore for both DMA requests enqueue and dequeue X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Konstantin Ananyev Few changes in ioat sample behaviour: - Always do SW copy for packet metadata (mbuf fields) - Always use same lcore for both DMA requests enqueue and dequeue Main reasons for that: a) it is safer, as idxd PMD doesn't support MT safe enqueue/dequeue (yet). b) sort of more apples to apples comparison with sw copy. c) from my testing things are faster that way. Signed-off-by: Konstantin Ananyev --- examples/ioat/ioatfwd.c | 185 ++++++++++++++++++++++------------------ 1 file changed, 101 insertions(+), 84 deletions(-) diff --git a/examples/ioat/ioatfwd.c b/examples/ioat/ioatfwd.c index 0c413180f8..617c8c3dfb 100644 --- a/examples/ioat/ioatfwd.c +++ b/examples/ioat/ioatfwd.c @@ -331,43 +331,36 @@ update_mac_addrs(struct rte_mbuf *m, uint32_t dest_portid) /* Perform packet copy there is a user-defined function. 8< */ static inline void -pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst) +pktmbuf_metadata_copy(const struct rte_mbuf *src, struct rte_mbuf *dst) { - /* Copy packet metadata */ - rte_memcpy(&dst->rearm_data, - &src->rearm_data, - offsetof(struct rte_mbuf, cacheline1) - - offsetof(struct rte_mbuf, rearm_data)); + dst->data_off = src->data_off; + memcpy(&dst->rx_descriptor_fields1, &src->rx_descriptor_fields1, + offsetof(struct rte_mbuf, buf_len) - + offsetof(struct rte_mbuf, rx_descriptor_fields1)); +} - /* Copy packet data */ +/* Copy packet data */ +static inline void +pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst) +{ rte_memcpy(rte_pktmbuf_mtod(dst, char *), rte_pktmbuf_mtod(src, char *), src->data_len); } /* >8 End of perform packet copy there is a user-defined function. */ static uint32_t -ioat_enqueue_packets(struct rte_mbuf **pkts, +ioat_enqueue_packets(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[], uint32_t nb_rx, uint16_t dev_id) { int ret; uint32_t i; - struct rte_mbuf *pkts_copy[MAX_PKT_BURST]; - - const uint64_t addr_offset = RTE_PTR_DIFF(pkts[0]->buf_addr, - &pkts[0]->rearm_data); - - ret = rte_mempool_get_bulk(ioat_pktmbuf_pool, - (void *)pkts_copy, nb_rx); - - if (unlikely(ret < 0)) - rte_exit(EXIT_FAILURE, "Unable to allocate memory.\n"); for (i = 0; i < nb_rx; i++) { /* Perform data copy */ ret = rte_ioat_enqueue_copy(dev_id, - pkts[i]->buf_iova - addr_offset, - pkts_copy[i]->buf_iova - addr_offset, - rte_pktmbuf_data_len(pkts[i]) + addr_offset, + rte_pktmbuf_iova(pkts[i]), + rte_pktmbuf_iova(pkts_copy[i]), + rte_pktmbuf_data_len(pkts[i]), (uintptr_t)pkts[i], (uintptr_t)pkts_copy[i]); @@ -376,20 +369,50 @@ ioat_enqueue_packets(struct rte_mbuf **pkts, } ret = i; - /* Free any not enqueued packets. */ - rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts[i], nb_rx - i); - rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts_copy[i], - nb_rx - i); - return ret; } +static inline uint32_t +ioat_enqueue(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[], + uint32_t num, uint16_t dev_id) +{ + uint32_t n; + + n = ioat_enqueue_packets(pkts, pkts_copy, num, dev_id); + if (n > 0) + rte_ioat_perform_ops(dev_id); + + return n; +} + +static inline uint32_t +ioat_dequeue(struct rte_mbuf *src[], struct rte_mbuf *dst[], uint32_t num, + uint16_t dev_id) +{ + int32_t rc; + /* Dequeue the mbufs from IOAT device. Since all memory + * is DPDK pinned memory and therefore all addresses should + * be valid, we don't check for copy errors + */ + rc = rte_ioat_completed_ops(dev_id, num, NULL, NULL, + (void *)src, (void *)dst); + if (rc < 0) { + RTE_LOG(CRIT, IOAT, + "rte_ioat_completed_ops(%hu) failedi, error: %d\n", + dev_id, rte_errno); + rc = 0; + } + return rc; +} + /* Receive packets on one port and enqueue to IOAT rawdev or rte_ring. 8< */ static void ioat_rx_port(struct rxtx_port_config *rx_config) { + int32_t ret; uint32_t nb_rx, nb_enq, i, j; struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST]; for (i = 0; i < rx_config->nb_queues; i++) { @@ -401,40 +424,54 @@ ioat_rx_port(struct rxtx_port_config *rx_config) port_statistics.rx[rx_config->rxtx_port] += nb_rx; + ret = rte_mempool_get_bulk(ioat_pktmbuf_pool, + (void *)pkts_burst_copy, nb_rx); + + if (unlikely(ret < 0)) + rte_exit(EXIT_FAILURE, + "Unable to allocate memory.\n"); + + for (j = 0; j < nb_rx; j++) + pktmbuf_metadata_copy(pkts_burst[j], + pkts_burst_copy[j]); + if (copy_mode == COPY_MODE_IOAT_NUM) { - /* Perform packet hardware copy */ - nb_enq = ioat_enqueue_packets(pkts_burst, + + /* enqueue packets for hardware copy */ + nb_enq = ioat_enqueue(pkts_burst, pkts_burst_copy, nb_rx, rx_config->ioat_ids[i]); - if (nb_enq > 0) - rte_ioat_perform_ops(rx_config->ioat_ids[i]); - } else { - /* Perform packet software copy, free source packets */ - int ret; - struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST]; - ret = rte_mempool_get_bulk(ioat_pktmbuf_pool, - (void *)pkts_burst_copy, nb_rx); + /* free any not enqueued packets. */ + rte_mempool_put_bulk(ioat_pktmbuf_pool, + (void *)&pkts_burst[nb_enq], + nb_rx - nb_enq); + rte_mempool_put_bulk(ioat_pktmbuf_pool, + (void *)&pkts_burst_copy[nb_enq], + nb_rx - nb_enq); - if (unlikely(ret < 0)) - rte_exit(EXIT_FAILURE, - "Unable to allocate memory.\n"); + port_statistics.copy_dropped[rx_config->rxtx_port] += + (nb_rx - nb_enq); + /* get completed copies */ + nb_rx = ioat_dequeue(pkts_burst, pkts_burst_copy, + MAX_PKT_BURST, rx_config->ioat_ids[i]); + } else { + /* Perform packet software copy, free source packets */ for (j = 0; j < nb_rx; j++) pktmbuf_sw_copy(pkts_burst[j], pkts_burst_copy[j]); + } - rte_mempool_put_bulk(ioat_pktmbuf_pool, - (void *)pkts_burst, nb_rx); + rte_mempool_put_bulk(ioat_pktmbuf_pool, + (void *)pkts_burst, nb_rx); - nb_enq = rte_ring_enqueue_burst( - rx_config->rx_to_tx_ring, - (void *)pkts_burst_copy, nb_rx, NULL); + nb_enq = rte_ring_enqueue_burst(rx_config->rx_to_tx_ring, + (void *)pkts_burst_copy, nb_rx, NULL); - /* Free any not enqueued packets. */ - rte_mempool_put_bulk(ioat_pktmbuf_pool, - (void *)&pkts_burst_copy[nb_enq], - nb_rx - nb_enq); - } + /* Free any not enqueued packets. */ + rte_mempool_put_bulk(ioat_pktmbuf_pool, + (void *)&pkts_burst_copy[nb_enq], + nb_rx - nb_enq); port_statistics.copy_dropped[rx_config->rxtx_port] += (nb_rx - nb_enq); @@ -446,51 +483,33 @@ ioat_rx_port(struct rxtx_port_config *rx_config) static void ioat_tx_port(struct rxtx_port_config *tx_config) { - uint32_t i, j, nb_dq = 0; - struct rte_mbuf *mbufs_src[MAX_PKT_BURST]; - struct rte_mbuf *mbufs_dst[MAX_PKT_BURST]; + uint32_t i, j, nb_dq, nb_tx; + struct rte_mbuf *mbufs[MAX_PKT_BURST]; for (i = 0; i < tx_config->nb_queues; i++) { - if (copy_mode == COPY_MODE_IOAT_NUM) { - /* Dequeue the mbufs from IOAT device. Since all memory - * is DPDK pinned memory and therefore all addresses should - * be valid, we don't check for copy errors - */ - nb_dq = rte_ioat_completed_ops( - tx_config->ioat_ids[i], MAX_PKT_BURST, NULL, NULL, - (void *)mbufs_src, (void *)mbufs_dst); - } else { - /* Dequeue the mbufs from rx_to_tx_ring. */ - nb_dq = rte_ring_dequeue_burst( - tx_config->rx_to_tx_ring, (void *)mbufs_dst, - MAX_PKT_BURST, NULL); - } - - if ((int32_t) nb_dq <= 0) - return; - if (copy_mode == COPY_MODE_IOAT_NUM) - rte_mempool_put_bulk(ioat_pktmbuf_pool, - (void *)mbufs_src, nb_dq); + /* Dequeue the mbufs from rx_to_tx_ring. */ + nb_dq = rte_ring_dequeue_burst(tx_config->rx_to_tx_ring, + (void *)mbufs, MAX_PKT_BURST, NULL); + if (nb_dq == 0) + continue; /* Update macs if enabled */ if (mac_updating) { for (j = 0; j < nb_dq; j++) - update_mac_addrs(mbufs_dst[j], + update_mac_addrs(mbufs[j], tx_config->rxtx_port); } - const uint16_t nb_tx = rte_eth_tx_burst( - tx_config->rxtx_port, 0, - (void *)mbufs_dst, nb_dq); + nb_tx = rte_eth_tx_burst(tx_config->rxtx_port, 0, + (void *)mbufs, nb_dq); port_statistics.tx[tx_config->rxtx_port] += nb_tx; /* Free any unsent packets. */ if (unlikely(nb_tx < nb_dq)) rte_mempool_put_bulk(ioat_pktmbuf_pool, - (void *)&mbufs_dst[nb_tx], - nb_dq - nb_tx); + (void *)&mbufs[nb_tx], nb_dq - nb_tx); } } /* >8 End of transmitting packets from IOAT. */ @@ -853,9 +872,6 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues) local_port_conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads; - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) - local_port_conf.txmode.offloads |= - DEV_TX_OFFLOAD_MBUF_FAST_FREE; ret = rte_eth_dev_configure(portid, nb_queues, 1, &local_port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure device:" @@ -979,7 +995,8 @@ main(int argc, char **argv) /* Allocates mempool to hold the mbufs. 8< */ nb_mbufs = RTE_MAX(nb_ports * (nb_queues * (nb_rxd + nb_txd + - 4 * MAX_PKT_BURST) + rte_lcore_count() * MEMPOOL_CACHE_SIZE), + 4 * MAX_PKT_BURST + ring_size) + ring_size + + rte_lcore_count() * MEMPOOL_CACHE_SIZE), MIN_POOL_SIZE); /* Create the mbuf pool */ @@ -1011,8 +1028,8 @@ main(int argc, char **argv) if (copy_mode == COPY_MODE_IOAT_NUM) assign_rawdevs(); - else /* copy_mode == COPY_MODE_SW_NUM */ - assign_rings(); + + assign_rings(); /* >8 End of assigning each port resources. */ start_forwarding_cores(); -- 2.30.2