*/
head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
n_slots = ring_size - head + mq->last_tail;
-
- if (n_slots < 32)
- goto no_free_mbufs;
+ // CTAM - critical BUG FIX !
+#if 1
+ // only work within mask so alloc_bulk does not overrun the buffers array
+ if ((head&mask) + n_slots > ring_size)
+ {
+ n_slots = ring_size - (head&mask);
+ }
+ if (n_slots <=0)
+ goto no_free_mbufs;
+#else
+ if (n_slots < 32)
+ goto no_free_mbufs;
+#endif
ret = rte_pktmbuf_alloc_bulk(mq->mempool, &mq->buffers[head & mask], n_slots);
if (unlikely(ret < 0))
goto no_free_mbufs;
-
while (n_slots--) {
s0 = head++ & mask;
if (n_slots > 0)
Secondly, a segfault can occur on the stats routine on initialisation due to null pointer dereferencing.
@@ -1404,10 +2167,14 @@ memif_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* RX stats */
for (i = 0; i < nq; i++) {
mq = dev->data->rx_queues[i];
- stats->q_ipackets[i] = mq->n_pkts;
- stats->q_ibytes[i] = mq->n_bytes;
- stats->ipackets += mq->n_pkts;
- stats->ibytes += mq->n_bytes;
+ // CTAM test this, as it may not yet initialised!
+ if (mq)
+ {
+ stats->q_ipackets[i] = mq->n_pkts;
+ stats->q_ibytes[i] = mq->n_bytes;
+ stats->ipackets += mq->n_pkts;
+ stats->ibytes += mq->n_bytes;
+ }
}
tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_s2c_rings :
This can occur in several places in the stats code, and null dereference guards are needed in all locations.
Hope this helps someone else.
John