Hi, I have been working on the dpdk memif driver and encountered the following issues. Please consider these patches for review. Firstly, the zero copy code in drivers/net/memif/rte_eth_memif.c can readilly overflow the ring buffer for mbufs. This patch adjusts the n_slots used in rte_poktmbuf_alloc_bulk to ensure it does not run off the end of the buffers list. dev@dpdk.org@@ -524,14 +1196,23 @@ refill: */ head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED); n_slots = ring_size - head + mq->last_tail; - - if (n_slots < 32) - goto no_free_mbufs; + // CTAM - critical BUG FIX ! +#if 1 + // only work within mask so alloc_bulk does not overrun the buffers array + if ((head&mask) + n_slots > ring_size) + { + n_slots = ring_size - (head&mask); + } + if (n_slots <=0) + goto no_free_mbufs; +#else + if (n_slots < 32) + goto no_free_mbufs; +#endif ret = rte_pktmbuf_alloc_bulk(mq->mempool, &mq->buffers[head & mask], n_slots); if (unlikely(ret < 0)) goto no_free_mbufs; - while (n_slots--) { s0 = head++ & mask; if (n_slots > 0) Secondly, a segfault can occur on the stats routine on initialisation due to null pointer dereferencing. @@ -1404,10 +2167,14 @@ memif_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* RX stats */ for (i = 0; i < nq; i++) { mq = dev->data->rx_queues[i]; - stats->q_ipackets[i] = mq->n_pkts; - stats->q_ibytes[i] = mq->n_bytes; - stats->ipackets += mq->n_pkts; - stats->ibytes += mq->n_bytes; + // CTAM test this, as it may not yet initialised! + if (mq) + { + stats->q_ipackets[i] = mq->n_pkts; + stats->q_ibytes[i] = mq->n_bytes; + stats->ipackets += mq->n_pkts; + stats->ibytes += mq->n_bytes; + } } tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_s2c_rings : This can occur in several places in the stats code, and null dereference guards are needed in all locations. Hope this helps someone else. John