From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id 915055A4F for ; Thu, 21 May 2015 14:39:33 +0200 (CEST) Received: from orsmga002.jf.intel.com ([10.7.209.21]) by fmsmga101.fm.intel.com with ESMTP; 21 May 2015 05:39:32 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.13,468,1427785200"; d="scan'208";a="733053093" Received: from unknown (HELO stargo) ([10.217.248.233]) by orsmga002.jf.intel.com with SMTP; 21 May 2015 05:39:30 -0700 Received: by stargo (sSMTP sendmail emulation); Thu, 21 May 2015 14:37:54 +0200 From: Maciej Gajdzica To: dev@dpdk.org Date: Thu, 21 May 2015 14:28:44 +0200 Message-Id: <1432211324-5078-5-git-send-email-maciejx.t.gajdzica@intel.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1432211324-5078-1-git-send-email-maciejx.t.gajdzica@intel.com> References: <1432211324-5078-1-git-send-email-maciejx.t.gajdzica@intel.com> Subject: [dpdk-dev] [PATCH v3 4/4] port: added ring_writer_nodrop port X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 21 May 2015 12:39:34 -0000 When ring_writer_nodrop port fails to send data, it tries to resend. Operation is aborted when maximum number of retries is reached. Signed-off-by: Maciej Gajdzica --- lib/librte_port/rte_port_ring.c | 179 +++++++++++++++++++++++++++++++++++++++ lib/librte_port/rte_port_ring.h | 16 ++++ 2 files changed, 195 insertions(+) diff --git a/lib/librte_port/rte_port_ring.c b/lib/librte_port/rte_port_ring.c index ba1fdcb..89b9641 100644 --- a/lib/librte_port/rte_port_ring.c +++ b/lib/librte_port/rte_port_ring.c @@ -31,6 +31,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include +#include #include #include @@ -233,6 +234,176 @@ rte_port_ring_writer_free(void *port) } /* + * Port RING Writer Nodrop + */ +struct rte_port_ring_writer_nodrop { + struct rte_mbuf *tx_buf[RTE_PORT_IN_BURST_SIZE_MAX]; + struct rte_ring *ring; + uint32_t tx_burst_sz; + uint32_t tx_buf_count; + uint64_t bsz_mask; + uint64_t n_retries; +}; + +static void * +rte_port_ring_writer_nodrop_create(void *params, int socket_id) +{ + struct rte_port_ring_writer_nodrop_params *conf = + (struct rte_port_ring_writer_nodrop_params *) params; + struct rte_port_ring_writer_nodrop *port; + + /* Check input parameters */ + if ((conf == NULL) || + (conf->ring == NULL) || + (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) { + RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__); + return NULL; + } + + /* Memory allocation */ + port = rte_zmalloc_socket("PORT", sizeof(*port), + RTE_CACHE_LINE_SIZE, socket_id); + if (port == NULL) { + RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__); + return NULL; + } + + /* Initialization */ + port->ring = conf->ring; + port->tx_burst_sz = conf->tx_burst_sz; + port->tx_buf_count = 0; + port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1); + + /* + * When n_retries is 0 it means that we should wait for every packet to + * send no matter how many retries should it take. To limit number of + * branches in fast path, we use UINT64_MAX instead of branching. + */ + port->n_retries = (conf->n_retries == 0) ? UINT64_MAX : conf->n_retries; + + return port; +} + +static inline void +send_burst_nodrop(struct rte_port_ring_writer_nodrop *p) +{ + uint32_t nb_tx = 0, i; + + nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf, + p->tx_buf_count); + + /* We sent all the packets in a first try */ + if (nb_tx >= p->tx_buf_count) + return; + + for (i = 0; i < p->n_retries; i++) { + nb_tx += rte_ring_sp_enqueue_burst(p->ring, + (void **) (p->tx_buf + nb_tx), p->tx_buf_count - nb_tx); + + /* We sent all the packets in more than one try */ + if (nb_tx >= p->tx_buf_count) + return; + } + + /* We didn't send the packets in maximum allowed attempts */ + for ( ; nb_tx < p->tx_buf_count; nb_tx++) + rte_pktmbuf_free(p->tx_buf[nb_tx]); + + p->tx_buf_count = 0; +} + +static int +rte_port_ring_writer_nodrop_tx(void *port, struct rte_mbuf *pkt) +{ + struct rte_port_ring_writer_nodrop *p = + (struct rte_port_ring_writer_nodrop *) port; + + p->tx_buf[p->tx_buf_count++] = pkt; + if (p->tx_buf_count >= p->tx_burst_sz) + send_burst_nodrop(p); + + return 0; +} + +static int +rte_port_ring_writer_nodrop_tx_bulk(void *port, + struct rte_mbuf **pkts, + uint64_t pkts_mask) +{ + struct rte_port_ring_writer_nodrop *p = + (struct rte_port_ring_writer_nodrop *) port; + + uint32_t bsz_mask = p->bsz_mask; + uint32_t tx_buf_count = p->tx_buf_count; + uint64_t expr = (pkts_mask & (pkts_mask + 1)) | + ((pkts_mask & bsz_mask) ^ bsz_mask); + + if (expr == 0) { + uint64_t n_pkts = __builtin_popcountll(pkts_mask); + uint32_t n_pkts_ok; + + if (tx_buf_count) + send_burst_nodrop(p); + + n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring, (void **)pkts, n_pkts); + + if (n_pkts_ok >= n_pkts) + return 0; + + /* + * If we didnt manage to send all packets in single burst, move + * remaining packets to the buffer and call send burst. + */ + for (; n_pkts_ok < n_pkts; n_pkts_ok++) { + struct rte_mbuf *pkt = pkts[n_pkts_ok]; + p->tx_buf[p->tx_buf_count++] = pkt; + } + send_burst_nodrop(p); + } else { + for ( ; pkts_mask; ) { + uint32_t pkt_index = __builtin_ctzll(pkts_mask); + uint64_t pkt_mask = 1LLU << pkt_index; + struct rte_mbuf *pkt = pkts[pkt_index]; + + p->tx_buf[tx_buf_count++] = pkt; + pkts_mask &= ~pkt_mask; + } + + p->tx_buf_count = tx_buf_count; + if (tx_buf_count >= p->tx_burst_sz) + send_burst_nodrop(p); + } + + return 0; +} + +static int +rte_port_ring_writer_nodrop_flush(void *port) +{ + struct rte_port_ring_writer_nodrop *p = + (struct rte_port_ring_writer_nodrop *) port; + + if (p->tx_buf_count > 0) + send_burst_nodrop(p); + + return 0; +} + +static int +rte_port_ring_writer_nodrop_free(void *port) +{ + if (port == NULL) { + RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__); + return -EINVAL; + } + + rte_port_ring_writer_nodrop_flush(port); + rte_free(port); + + return 0; +} + +/* * Summary of port operations */ struct rte_port_in_ops rte_port_ring_reader_ops = { @@ -248,3 +419,11 @@ struct rte_port_out_ops rte_port_ring_writer_ops = { .f_tx_bulk = rte_port_ring_writer_tx_bulk, .f_flush = rte_port_ring_writer_flush, }; + +struct rte_port_out_ops rte_port_ring_writer_nodrop_ops = { + .f_create = rte_port_ring_writer_nodrop_create, + .f_free = rte_port_ring_writer_nodrop_free, + .f_tx = rte_port_ring_writer_nodrop_tx, + .f_tx_bulk = rte_port_ring_writer_nodrop_tx_bulk, + .f_flush = rte_port_ring_writer_nodrop_flush, +}; diff --git a/lib/librte_port/rte_port_ring.h b/lib/librte_port/rte_port_ring.h index 009dcf8..89a219b 100644 --- a/lib/librte_port/rte_port_ring.h +++ b/lib/librte_port/rte_port_ring.h @@ -75,6 +75,22 @@ struct rte_port_ring_writer_params { /** ring_writer port operations */ extern struct rte_port_out_ops rte_port_ring_writer_ops; +/** ring_writer_nodrop port parameters */ +struct rte_port_ring_writer_nodrop_params { + /** Underlying single producer ring that has to be pre-initialized */ + struct rte_ring *ring; + + /** Recommended burst size to ring. The actual burst size can be + bigger or smaller than this value. */ + uint32_t tx_burst_sz; + + /** Maximum number of retries, 0 for no limit */ + uint32_t n_retries; +}; + +/** ring_writer_nodrop port operations */ +extern struct rte_port_out_ops rte_port_ring_writer_nodrop_ops; + #ifdef __cplusplus } #endif -- 1.7.9.5