* [dpdk-users] Intel 10G X550T NIC takes a long time(25s+) after DPDK initialization to passing traffic correctly.
@ 2019-07-23 23:33 Jun Gan
0 siblings, 0 replies; 2+ messages in thread
From: Jun Gan @ 2019-07-23 23:33 UTC (permalink / raw)
To: users
Hi,
We have an intel 10G X550T NIC, after bind it to igb_uio and launched our
dpdk application.
Although I don’t see any error reported while calling “rte_eth_tx_burst”, I
cannot see any packets on peer side, until at least 25 seconds later.
Everything finally became normal.
I don’t see this issue with my Mellanox ConnectX-5 NIC, using same code.
My dpdk init code, nothing special I think:
Status InitDpdk() {
Status status = SpdkHelper::Init();
if (status != kStatusOk) {
return GeneralError::EC_FAILURE;
}
rc = rte_openlog_stream(stderr);
assert(rc == 0);
int dev_socket_id = rte_eth_dev_socket_id(eth_port_);
rte_eth_dev_info_get(eth_port_, &dev_info_);
if ((dev_info_.tx_offload_capa & DEV_TX_OFFLOAD_MULTI_SEGS) == 0) {
return GeneralError::EC_FAILURE;
}
int size_pktpool =
2048 //dev_info_.rx_desc_lim.nb_max + dev_info_.tx_desc_lim.nb_max
+ 512 /* PKTPOOL_EXTRA_SIZE */;
// Packet mem pool for tx
tx_pkt_pool_ = rte_pktmbuf_pool_create("pkt_pool_tx",
size_pktpool,
32 /* per lcore cache size*/,
0,
RTE_MBUF_DEFAULT_BUF_SIZE,
dev_socket_id);
assert(tx_pkt_pool_ != NULL);
// Packet mem pool for rx
rx_pkt_pool_ = rte_pktmbuf_pool_create("pkt_pool_rx",
size_pktpool,
32 /* per lcore cache size*/,
0,
RTE_MBUF_DEFAULT_BUF_SIZE,
dev_socket_id);
assert(rx_pkt_pool_ != NULL);
struct rte_eth_conf cfg_port;
memset(&cfg_port, 0, sizeof(cfg_port));
cfg_port.txmode.mq_mode = ETH_MQ_TX_NONE;
cfg_port.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
// Reference for how to offload:
// https://doc.dpdk.org/guides/prog_guide/mbuf_lib.html
// Section "Meta Information".
// Also rte_mbuf.h and examples.
// Offload Tx checksums if possible
std::string offloads;
if (dev_info_.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
offloads += " IPv4 Tx cksum";
cfg_port.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
}
if (dev_info_.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
offloads += " UDP Tx cksum";
cfg_port.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
}
// Offload Rx checksums if possible
if (dev_info_.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
offloads += " IPv4 Rx cksum";
cfg_port.rxmode.offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM;
}
if (dev_info_.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
offloads += " UDP Rx cksum";
cfg_port.rxmode.offloads |= DEV_RX_OFFLOAD_UDP_CKSUM;
}
rc = rte_eth_dev_configure(eth_port_, 1, 1, &cfg_port);
assert(rc == 0);
if (rc) {
return GeneralError::EC_FAILURE;
}
uint16_t n_rx_desc = 1024;
uint16_t n_tx_desc = 1024;
rc = rte_eth_dev_adjust_nb_rx_tx_desc(eth_port_, &n_rx_desc, &n_tx_desc);
assert(rc == 0);
if (rc) {
return GeneralError::EC_FAILURE;
}
struct rte_eth_rxconf rxconf;
rxconf = dev_info_.default_rxconf;
rc = rte_eth_rx_queue_setup(eth_port_, 0, n_rx_desc,
dev_socket_id, &rxconf, rx_pkt_pool_);
assert(rc == 0);
if (rc) {
return GeneralError::EC_FAILURE;
}
struct rte_eth_txconf txconf;
txconf = dev_info_.default_txconf;
rc = rte_eth_tx_queue_setup(eth_port_, 0, n_tx_desc,
dev_socket_id, &txconf);
assert(rc == 0);
if (rc) {
return GeneralError::EC_FAILURE;
}
rc = rte_eth_dev_start(eth_port_);
assert(rc == 0);
if (rc) {
return GeneralError::EC_FAILURE;
}
// RTE Ring for our TX packets, with multiple producers but single
consumer
tx_ring_ = rte_ring_create("TX Ring", tx_ring_size_,
dev_socket_id, RING_F_SC_DEQ);
assert(tx_ring_ != nullptr);
if (tx_ring_ == nullptr) {
return GeneralError::EC_FAILURE;
}
return kStatusOk;
}
Really appreciate it if someone can help!
--
Jun Gan
^ permalink raw reply [flat|nested] 2+ messages in thread
* [dpdk-users] Intel 10G X550T NIC takes a long time(25s+) after DPDK initialization to passing traffic correctly.
@ 2019-07-15 17:56 Jun Gan
0 siblings, 0 replies; 2+ messages in thread
From: Jun Gan @ 2019-07-15 17:56 UTC (permalink / raw)
To: users
Hi,
We have an intel 10G X550T NIC, after bind it to igb_uio and launched our dpdk application.
Although I don’t see any error reported while calling “rte_eth_tx_burst”, I cannot see any packets on peer side, until at least 25 seconds later. Everything finally became normal.
I don’t see this issue with my Mellanox ConnectX-5 NIC, using same code.
My dpdk init code, nothing special I think:
Status InitDpdk() {
Status status = SpdkHelper::Init();
if (status != kStatusOk) {
return GeneralError::EC_FAILURE;
}
rc = rte_openlog_stream(stderr);
assert(rc == 0);
int dev_socket_id = rte_eth_dev_socket_id(eth_port_);
rte_eth_dev_info_get(eth_port_, &dev_info_);
if ((dev_info_.tx_offload_capa & DEV_TX_OFFLOAD_MULTI_SEGS) == 0) {
return GeneralError::EC_FAILURE;
}
int size_pktpool =
2048 //dev_info_.rx_desc_lim.nb_max + dev_info_.tx_desc_lim.nb_max
+ 512 /* PKTPOOL_EXTRA_SIZE */;
// Packet mem pool for tx
tx_pkt_pool_ = rte_pktmbuf_pool_create("pkt_pool_tx",
size_pktpool,
32 /* per lcore cache size*/,
0,
RTE_MBUF_DEFAULT_BUF_SIZE,
dev_socket_id);
assert(tx_pkt_pool_ != NULL);
// Packet mem pool for rx
rx_pkt_pool_ = rte_pktmbuf_pool_create("pkt_pool_rx",
size_pktpool,
32 /* per lcore cache size*/,
0,
RTE_MBUF_DEFAULT_BUF_SIZE,
dev_socket_id);
assert(rx_pkt_pool_ != NULL);
struct rte_eth_conf cfg_port;
memset(&cfg_port, 0, sizeof(cfg_port));
cfg_port.txmode.mq_mode = ETH_MQ_TX_NONE;
cfg_port.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
// Reference for how to offload:
// https://doc.dpdk.org/guides/prog_guide/mbuf_lib.html
// Section "Meta Information".
// Also rte_mbuf.h and examples.
// Offload Tx checksums if possible
std::string offloads;
if (dev_info_.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
offloads += " IPv4 Tx cksum";
cfg_port.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
}
if (dev_info_.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
offloads += " UDP Tx cksum";
cfg_port.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
}
// Offload Rx checksums if possible
if (dev_info_.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
offloads += " IPv4 Rx cksum";
cfg_port.rxmode.offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM;
}
if (dev_info_.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
offloads += " UDP Rx cksum";
cfg_port.rxmode.offloads |= DEV_RX_OFFLOAD_UDP_CKSUM;
}
rc = rte_eth_dev_configure(eth_port_, 1, 1, &cfg_port);
assert(rc == 0);
if (rc) {
return GeneralError::EC_FAILURE;
}
uint16_t n_rx_desc = 1024;
uint16_t n_tx_desc = 1024;
rc = rte_eth_dev_adjust_nb_rx_tx_desc(eth_port_, &n_rx_desc, &n_tx_desc);
assert(rc == 0);
if (rc) {
return GeneralError::EC_FAILURE;
}
struct rte_eth_rxconf rxconf;
rxconf = dev_info_.default_rxconf;
rc = rte_eth_rx_queue_setup(eth_port_, 0, n_rx_desc,
dev_socket_id, &rxconf, rx_pkt_pool_);
assert(rc == 0);
if (rc) {
return GeneralError::EC_FAILURE;
}
struct rte_eth_txconf txconf;
txconf = dev_info_.default_txconf;
rc = rte_eth_tx_queue_setup(eth_port_, 0, n_tx_desc,
dev_socket_id, &txconf);
assert(rc == 0);
if (rc) {
return GeneralError::EC_FAILURE;
}
rc = rte_eth_dev_start(eth_port_);
assert(rc == 0);
if (rc) {
return GeneralError::EC_FAILURE;
}
// RTE Ring for our TX packets, with multiple producers but single consumer
tx_ring_ = rte_ring_create("TX Ring", tx_ring_size_,
dev_socket_id, RING_F_SC_DEQ);
assert(tx_ring_ != nullptr);
if (tx_ring_ == nullptr) {
return GeneralError::EC_FAILURE;
}
return kStatusOk;
}
Really appreciate it if someone can help!
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2019-07-23 23:33 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-23 23:33 [dpdk-users] Intel 10G X550T NIC takes a long time(25s+) after DPDK initialization to passing traffic correctly Jun Gan
-- strict thread matches above, loose matches on Subject: below --
2019-07-15 17:56 Jun Gan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).