* resend same queue tx burst
@ 2024-08-05 14:01 Lokesh Chakka
0 siblings, 0 replies; only message in thread
From: Lokesh Chakka @ 2024-08-05 14:01 UTC (permalink / raw)
To: users
[-- Attachment #1.1: Type: text/plain, Size: 469 bytes --]
hi,
I've written a small piece of code(PFA) for sending packets. I'm facing two
issues here :
In rte_pktmbuf_append, if I pass dev_info.max_mtu, it's failing. however,
if I set 1500, it is able to allocate.
Is there a way to allocate dev_info.max_mtu(9702) ?
I want to send the same queue multiple times using rte_eth_tx_burst. First
time, it is sending. But the second time it is giving an error.
Can someone help me understand?
Thanks & Regards
--
Lokesh Chakka.
[-- Attachment #1.2: Type: text/html, Size: 875 bytes --]
[-- Attachment #2: log.txt --]
[-- Type: text/plain, Size: 1440 bytes --]
gcc pmd.c -g -o pmd -I/usr/local/lib64 -lrte_eal -lrte_mbuf -lrte_ethdev -lrte_mempool && ./pmd 0 s a
EAL: Detected CPU lcores: 84
EAL: Detected NUMA nodes: 1
EAL: Detected shared linkage of DPDK
EAL: Multi-process socket /var/run/dpdk/rte/mp_socket
EAL: Selected IOVA mode 'VA'
EAL: VFIO support initialized
EAL: Using IOMMU type 1 (Type 1)
EAL: Ignore mapping IO port bar(1)
EAL: Ignore mapping IO port bar(4)
EAL: Probe PCI driver: net_ice (8086:579d) device: 0000:1b:00.2 (socket 0)
ice_dev_init(): Failed to read device serial number
ice_load_pkg_type(): Active package is: 1.3.36.0, ICE OS Default Package (double VLAN mode)
EAL: Ignore mapping IO port bar(1)
EAL: Ignore mapping IO port bar(4)
EAL: Probe PCI driver: net_ice (8086:579d) device: 0000:1b:00.3 (socket 0)
ice_dev_init(): Failed to read device serial number
ice_load_pkg_type(): Active package is: 1.3.36.0, ICE OS Default Package (double VLAN mode)
TELEMETRY: No legacy callbacks, legacy socket not created
main 50 port id: 0 num_of_pkts_per_queue: 4096
ice_set_rx_function(): Using AVX2 Vector Rx (port 0).
ice_interrupt_handler(): OICR: MDD event
ice_interrupt_handler(): OICR: MDD event
ice_interrupt_handler(): OICR: MDD event
ice_interrupt_handler(): OICR: MDD event
main 105 port: 0 packet: a sent 4096 packets
ice_interrupt_handler(): OICR: MDD event
2 No such file or directory
EAL: Error - exiting with code: 1
Cause: main 103 rte_eth_tx_burst port id: 0
[-- Attachment #3: pmd.c --]
[-- Type: text/x-csrc, Size: 5668 bytes --]
//pmd 0 s a
#include<rte_ethdev.h>
#include<rte_malloc.h>
#include<pthread.h>
#include<signal.h>
_Bool received_sigint = false;
struct rte_eth_conf port_conf =
{
.rxmode = { .mq_mode = RTE_ETH_MQ_RX_NONE, },
.txmode = { .mq_mode = RTE_ETH_MQ_TX_NONE, },
};
static void SIGINT_signal_handler( const int signal )
{
fprintf( stderr, "\b\bReceived Interrupt Signal SIGINT (%d). Exiting...\n", signal );
received_sigint = true;
}
int main( int argc, char **argv )
{
uint16_t pkt_count, num_of_pkts_rcvd;
struct rte_eth_dev_info dev_info;
if( signal( SIGINT, SIGINT_signal_handler ) == SIG_ERR )
{
fprintf( stderr, "%s %d SIGINT signal handling failed\n", __func__, __LINE__ );
exit( 1 );
}
const int ret = rte_eal_init( argc, argv );
if( ret < 0 )
rte_exit( EXIT_FAILURE, "Error with EAL initialization\n" );
argc -= ret;
argv += ret;
const int port_id = atoi( argv[1] );
const char send_recv = argv[2][0];
if( rte_eth_dev_info_get( port_id, &dev_info ) != 0 )
rte_exit( EXIT_FAILURE, "%s %d rte_eth_dev_info_get\n", __func__, __LINE__ );
const int sock = rte_eth_dev_socket_id( port_id );
if( sock == -1 )
rte_exit( EXIT_FAILURE, "%s %d rte_eth_dev_socket_id port id: %u", __func__, __LINE__, port_id );
// if( rte_eth_promiscuous_enable( port_id ) != 0 )
// rte_exit( EXIT_FAILURE, "%s %d rte_eth_promiscuous_enable port id: %u", __func__, __LINE__, port_id );
char mem_pool_name[20];
sprintf( mem_pool_name, "pool_%u_%c", port_id, send_recv );
const uint32_t num_of_pkts_per_queue = 4096;
struct rte_mbuf *mbuf[num_of_pkts_per_queue];
char *packet_buffer[num_of_pkts_per_queue];
fprintf( stderr, "%s %d port id: %d num_of_pkts_per_queue: %u\n", __func__, __LINE__, port_id, num_of_pkts_per_queue );
struct rte_mempool *mem_pool = rte_pktmbuf_pool_create( mem_pool_name, num_of_pkts_per_queue, RTE_MEMPOOL_CACHE_MAX_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, sock );
if( mem_pool == NULL )
{
fprintf( stderr, "%d %s\n", rte_errno, rte_strerror(rte_errno) );
rte_exit( EXIT_FAILURE, "%s %d rte_pktmbuf_pool_create port id: %u\n", __func__, __LINE__, port_id );
}
for( pkt_count=0; pkt_count<num_of_pkts_per_queue; pkt_count++ )
{
mbuf[pkt_count] = rte_pktmbuf_alloc( mem_pool );
if( mbuf[pkt_count] == NULL )
rte_exit( EXIT_FAILURE, "%s %d rte_pktmbuf_alloc port id: %u pkt_count: %u", __func__, __LINE__, port_id, pkt_count );
const uint16_t pkt_size = 1500;
packet_buffer[pkt_count] = rte_pktmbuf_append( mbuf[pkt_count], pkt_size );
if( packet_buffer[pkt_count] == NULL )
{
fprintf( stderr, "%d %s max_mtu: %u\n", rte_errno, rte_strerror(rte_errno), pkt_size );
rte_exit( EXIT_FAILURE, "%s %d rte_pktmbuf_append port id: %u pkt_count: %u\n", __func__, __LINE__, port_id, pkt_count );
}
}
if( send_recv == 's' )
{
if( rte_eth_dev_configure( port_id, 0, 1, &port_conf ) != 0 )
rte_exit( EXIT_FAILURE, "%s %d rte_eth_dev_configure port id: %u", __func__, __LINE__, port_id );
if( rte_eth_tx_queue_setup( port_id, 0, num_of_pkts_per_queue, (unsigned int)sock, NULL ) < 0 )
rte_exit( EXIT_FAILURE, "%s %d rte_eth_tx_queue_setup port id: %u", __func__, __LINE__, port_id );
for( pkt_count=0; pkt_count<num_of_pkts_per_queue; pkt_count++ )
{
memset( packet_buffer[pkt_count], argv[3][0], dev_info.max_mtu );
packet_buffer[pkt_count][dev_info.max_mtu-1] = '\0';
mbuf[pkt_count]->pkt_len = mbuf[pkt_count]->data_len = dev_info.max_mtu;
}
}
else if( send_recv == 'r' )
{
if( rte_eth_dev_configure( port_id, 1, 0, &port_conf ) != 0 )
rte_exit( EXIT_FAILURE, "%s %d rte_eth_dev_configure port id: %u", __func__, __LINE__, port_id );
if( rte_eth_rx_queue_setup( port_id, 0, num_of_pkts_per_queue, (unsigned int)sock, NULL, mem_pool ) < 0 )
rte_exit( EXIT_FAILURE, "%s %d rte_eth_rx_queue_setup port id: %u", __func__, __LINE__, port_id );
}
else
{
rte_exit( EXIT_FAILURE, "%s %d Invalid param %c port id: %u", __func__, __LINE__, send_recv, port_id );
}
if( rte_eth_dev_start( port_id ) < 0 )
rte_exit( EXIT_FAILURE, "%s %d rte_eth_dev_start port id: %u", __func__, __LINE__, port_id );
while( received_sigint == false )
{
if( send_recv == 's' )
{
if( rte_eth_tx_burst( port_id, 0, mbuf, num_of_pkts_per_queue ) != num_of_pkts_per_queue )
{
fprintf( stderr, "%d %s\n", rte_errno, rte_strerror(rte_errno) );
rte_exit( EXIT_FAILURE, "%s %d rte_eth_tx_burst port id: %u\n", __func__, __LINE__, port_id );
}
fprintf( stderr, "%s %d port: %u packet: %c sent %u packets\n", __func__, __LINE__, port_id, argv[3][0], num_of_pkts_per_queue );
}
else if( send_recv == 'r' )
{
fprintf( stderr, "port: %u rte_eth_rx_burst\n", port_id );
num_of_pkts_rcvd = rte_eth_rx_burst( port_id, 0, mbuf, num_of_pkts_per_queue );
for( pkt_count=0; pkt_count<num_of_pkts_rcvd; pkt_count++ )
{
if( mbuf[pkt_count]->pkt_len != mbuf[pkt_count]->data_len )
rte_exit( EXIT_FAILURE, "%s %d mbuf[pkt_count]->pkt_len(%u) != mbuf[pkt_count]->data_len(%u) port id: %u", __func__, __LINE__, mbuf[pkt_count]->pkt_len, mbuf[pkt_count]->data_len, port_id );
if( mbuf[pkt_count]->pkt_len > 40 )
mbuf[pkt_count]->pkt_len = 40;
fprintf( stderr, "port: %u pkt count: %u\t", port_id, pkt_count );
for( uint8_t i=0; i<mbuf[pkt_count]->pkt_len; i++ )
fprintf( stderr, "%02X ", packet_buffer[pkt_count][i] );
fprintf( stderr, "\n" );
}
}
}
for( pkt_count=0; pkt_count<num_of_pkts_per_queue; pkt_count++ )
{
rte_pktmbuf_free( mbuf[pkt_count] );
}
rte_mempool_free( mem_pool );
if( rte_eth_dev_stop( port_id ) < 0 )
rte_exit( EXIT_FAILURE, "%s %d rte_eth_dev_stop port id: %u", __func__, __LINE__, port_id );
return 0;
}
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2024-08-05 14:01 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-08-05 14:01 resend same queue tx burst Lokesh Chakka
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).