DPDK usage discussions
 help / color / mirror / Atom feed
From: Lokesh Chakka <lvenkatakumarchakka@gmail.com>
To: "Pathak, Pravin" <pravin.pathak@intel.com>
Cc: Bing Zhao <bingz@nvidia.com>, users <users@dpdk.org>
Subject: Re: unable to capture packets
Date: Tue, 8 Oct 2024 03:40:52 +0530	[thread overview]
Message-ID: <CACh--sg8OdZubVN+6vxq+AKKqkDvXd8+D+Tg+Z5sD5OE7y0Apw@mail.gmail.com> (raw)
In-Reply-To: <BL1PR11MB546184E13F04F6F94FFD793BF47D2@BL1PR11MB5461.namprd11.prod.outlook.com>


[-- Attachment #1.1: Type: text/plain, Size: 1882 bytes --]

please find the full fledged code as attachment.


Thanks & Regards
--
Lokesh Chakka.


On Mon, Oct 7, 2024 at 9:32 PM Pathak, Pravin <pravin.pathak@intel.com>
wrote:

> I hope accidentally num_of_pkts_per_queue  is not zero.
>
> Pravin
>
>
>
> *From:* Lokesh Chakka <lvenkatakumarchakka@gmail.com>
> *Sent:* Monday, October 7, 2024 11:36 AM
> *To:* Bing Zhao <bingz@nvidia.com>
> *Cc:* users <users@dpdk.org>
> *Subject:* Re: unable to capture packets
>
>
>
> I've tried TX. It's working fine.
>
> I'm sure problem is only with my code.
>
>
>
> On Mon, 7 Oct, 2024, 20:52 Bing Zhao, <bingz@nvidia.com> wrote:
>
> Which NIC are you using?
>
> Have you tried dpdk-testpmd or l2fwd on your setup to check if the packet
> can be sent and received correctly?
>
>
>
> BR. Bing
>
>
>
> *From:* Lokesh Chakka <lvenkatakumarchakka@gmail.com>
> *Sent:* Monday, October 7, 2024 9:52 PM
> *To:* users <users@dpdk.org>
> *Subject:* unable to capture packets
>
>
>
> *External email: Use caution opening links or attachments*
>
>
>
> hello,
>
>
>
> I'm trying to capture packets using the following piece of code :
>
>
>
> ==========================================================
>
> struct rte_eth_rxconf rxq_conf = dev_info.default_rxconf;
> rxq_conf.offloads = port_conf.rxmode.offloads;
> rte_eth_rx_queue_setup( 0, 0, num_of_pkts_per_queue, (unsigned int)sock,
> &rxq_conf, mem_pool );
> rte_eth_dev_start( 0 );
> while( 1 )
> {
> num_of_pkts_rcvd = rte_eth_rx_burst( 0, 0, mbuf, num_of_pkts_per_queue );
> fprintf( stderr, "num_of_pkts_rcvd: %u\n", num_of_pkts_rcvd );
> }
>
> ==========================================================
>
> It's always printing num_of_pkts_rcvd as 0.
>
>
>
> Can someone help me understand what the issue is ....
>
>
> Thanks & Regards
> --
> Lokesh Chakka.
>
>

[-- Attachment #1.2: Type: text/html, Size: 6438 bytes --]

[-- Attachment #2: pmd.c --]
[-- Type: text/x-csrc, Size: 4481 bytes --]

//pmd 0 s a
#include<rte_ethdev.h>
#include<rte_malloc.h>
#include<pthread.h>
#include<signal.h>

_Bool received_sigint = false;

struct rte_eth_conf port_conf =
{
	.rxmode = { .mq_mode = RTE_ETH_MQ_RX_NONE, },
	.txmode = { .mq_mode = RTE_ETH_MQ_TX_NONE, },
};

static void SIGINT_signal_handler( const int signal )
{
	fprintf( stderr, "\b\bReceived Interrupt Signal SIGINT (%d). Exiting...\n", signal );
	received_sigint = true;
}

int main( int argc, char **argv )
{
	uint16_t pkt_count, num_of_pkts_rcvd;
	struct rte_eth_dev_info dev_info;

	if( signal( SIGINT, SIGINT_signal_handler ) == SIG_ERR )
	{
		fprintf( stderr, "%s %d SIGINT signal handling failed\n", __func__, __LINE__ );
		exit( 1 );
	}
	const int ret = rte_eal_init( argc, argv );
	if( ret < 0 )
		rte_exit( EXIT_FAILURE, "Error with EAL initialization\n" );
	argc -= ret;
	argv += ret;
	const int port_id = atoi( argv[1] );
	if( rte_eth_dev_info_get( port_id, &dev_info ) != 0 )
		rte_exit( EXIT_FAILURE, "%s %d rte_eth_dev_info_get\n", __func__, __LINE__ );
	uint16_t fetched_mtu = 0;
	if( rte_eth_dev_get_mtu( port_id, &fetched_mtu ) != 0 )
	{
		rte_exit( EXIT_FAILURE, "%s %d rte_eth_dev_stop port id: %u errno: %u Error: %s\n", __func__, __LINE__, port_id, rte_errno, rte_strerror( rte_errno ) );
	}
	port_conf.rxmode.mtu = dev_info.max_mtu = fetched_mtu;
	const int sock = rte_eth_dev_socket_id( port_id );
	if( sock == -1 )
		rte_exit( EXIT_FAILURE, "%s %d rte_eth_dev_socket_id port id: %u\n", __func__, __LINE__, port_id );
	char mem_pool_name[20];
	sprintf( mem_pool_name, "pool_%u_r", port_id );
	const uint32_t num_of_pkts_per_queue = 4096;
	struct rte_mbuf *mbuf[num_of_pkts_per_queue];
	char *packet_buffer[num_of_pkts_per_queue];
	fprintf( stderr, "%s %d port id: %d num_of_pkts_per_queue: %u\n", __func__, __LINE__, port_id, num_of_pkts_per_queue );
	struct rte_mempool *mem_pool = rte_pktmbuf_pool_create( mem_pool_name, num_of_pkts_per_queue, RTE_MEMPOOL_CACHE_MAX_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, sock );
	if( mem_pool == NULL )
	{
		fprintf( stderr, "%d %s\n", rte_errno, rte_strerror(rte_errno) );
		rte_exit( EXIT_FAILURE, "%s %d rte_pktmbuf_pool_create port id: %u\n", __func__, __LINE__, port_id );
	}
	if( rte_eth_dev_configure( port_id, 1, 0, &port_conf ) != 0 )
	{
		fprintf( stderr, "%d %s\n", rte_errno, rte_strerror(rte_errno) );
		rte_exit( EXIT_FAILURE, "%s %d rte_eth_dev_configure port id: %u\n", __func__, __LINE__, port_id );
	}
	struct rte_eth_rxconf rxq_conf;
	rxq_conf = dev_info.default_rxconf;
	rxq_conf.offloads = port_conf.rxmode.offloads;
	if( rte_eth_rx_queue_setup( port_id, 0, num_of_pkts_per_queue, (unsigned int)sock, &rxq_conf, mem_pool ) < 0 )
	{
		fprintf( stderr, "%d %s\n", rte_errno, rte_strerror(rte_errno) );
		rte_exit( EXIT_FAILURE, "%s %d rte_eth_rx_queue_setup port id: %u\n", __func__, __LINE__, port_id );
	}
	if( rte_eth_dev_start( port_id ) < 0 )
		rte_exit( EXIT_FAILURE, "%s %d rte_eth_dev_start port id: %u\n", __func__, __LINE__, port_id );
	const time_t begin_time = time( NULL );
	uint64_t pkts_sent_till_now = 0;
	for( int count=0; count<2; count++ )
	{
		num_of_pkts_rcvd = rte_eth_rx_burst( port_id, 0, mbuf, num_of_pkts_per_queue );
		fprintf( stderr, "port: %u rte_eth_rx_burst %u num_of_pkts_rcvd\n", port_id, num_of_pkts_rcvd );
		pkts_sent_till_now += num_of_pkts_rcvd; 
		for( pkt_count=0; pkt_count<num_of_pkts_rcvd; pkt_count++ )
		{
			if( mbuf[pkt_count]->pkt_len != mbuf[pkt_count]->data_len )
				rte_exit( EXIT_FAILURE, "%s %d mbuf[pkt_count]->pkt_len(%u) != mbuf[pkt_count]->data_len(%u) port id: %u\n", __func__, __LINE__, mbuf[pkt_count]->pkt_len, mbuf[pkt_count]->data_len, port_id );
			if( mbuf[pkt_count]->pkt_len > 40 )
				mbuf[pkt_count]->pkt_len = 40;
			fprintf( stderr, "port: %u pkt count: %u\t", port_id, pkt_count );
			for( uint8_t i=0; i<mbuf[pkt_count]->pkt_len; i++ )
				fprintf( stderr, "%02X ", packet_buffer[pkt_count][i] );
			fprintf( stderr, "\n" );
		}
	}
	const time_t end_time =time( NULL );
	const time_t elapsed_time = end_time-begin_time;
	const uint64_t bw = ( 2048*8*pkts_sent_till_now )/ elapsed_time;
	fprintf( stderr, "%s %d time : %ld total pkts sent: %lu bandwidth: %lu\n", __func__, __LINE__, elapsed_time, pkts_sent_till_now, bw/1024/1024/1000 );
	rte_pktmbuf_free_bulk( mbuf, num_of_pkts_per_queue );
	rte_mempool_free( mem_pool );
	if( rte_eth_dev_stop( port_id ) < 0 )
		rte_exit( EXIT_FAILURE, "%s %d rte_eth_dev_stop port id: %u\n", __func__, __LINE__, port_id );
	return 0;
}

  reply	other threads:[~2024-10-07 22:11 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-07 13:52 Lokesh Chakka
2024-10-07 15:21 ` Bing Zhao
2024-10-07 15:34   ` Stephen Hemminger
2024-10-07 15:36   ` Lokesh Chakka
2024-10-07 16:02     ` Pathak, Pravin
2024-10-07 22:10       ` Lokesh Chakka [this message]
2024-10-08  0:23         ` Stephen Hemminger
2024-10-09 12:15           ` Lokesh Chakka
2024-10-09 13:27             ` Van Haaren, Harry

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CACh--sg8OdZubVN+6vxq+AKKqkDvXd8+D+Tg+Z5sD5OE7y0Apw@mail.gmail.com \
    --to=lvenkatakumarchakka@gmail.com \
    --cc=bingz@nvidia.com \
    --cc=pravin.pathak@intel.com \
    --cc=users@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).