DPDK usage discussions
 help / color / mirror / Atom feed
* Packets Not Received After Redirection
@ 2025-02-18 13:25 Sid ali cherrati
  0 siblings, 0 replies; only message in thread
From: Sid ali cherrati @ 2025-02-18 13:25 UTC (permalink / raw)
  To: users

[-- Attachment #1: Type: text/plain, Size: 12334 bytes --]

Dear DPDK Team,

I am using DPDK 23.11 with an X540-AT (ixgbe) NIC.

I am facing an issue where this card is not compatible with a combination
of two flow rules. To work around this, I attempted to create a single rule
to redirect packets that match specific criteria to, let's say, queue 5.
Initially, this works for a few packets, but after that, no more packets
are received.

I am unsure whether this issue is related to the interrupts (though I’ve
tested without interrupts and the problem persists) or not. I also thought
that if queue 0 is full (since the drop rule is not functioning), the
filter might stop working, so I tried clearing the queue to see if it would
resolve the issue. However, even after that, the problem remains.

I am new to DPDK, so please let me know if I am doing something wrong or if
there are any suggestions to address this issue.

Below is the code I have implemented:

#include "../include/flow.h"
#include "../include/port.h"
#include <stdio.h>
#include <string.h>

int flow_filtering(uint16_t port_id, uint32_t ip_addr, uint16_t udp_port) {
struct rte_flow_error error;
struct rte_flow_attr attr = { .ingress = 1, .priority = 0 };
struct rte_flow_item pattern[4];
struct rte_flow_action action[2];
struct rte_flow *flow;

// Définir le motif Ethernet
memset(pattern, 0, sizeof(pattern));
pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;

// Définir le motif IPv4
struct rte_flow_item_ipv4 ipv4_spec = { .hdr.dst_addr = RTE_BE32(ip_addr) };
struct rte_flow_item_ipv4 ipv4_mask = { .hdr.dst_addr = RTE_BE32(0xFFFFFFFF)
};
pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
pattern[1].spec = &ipv4_spec;
pattern[1].mask = &ipv4_mask;

// Définir le motif UDP
struct rte_flow_item_udp udp_spec = { .hdr.dst_port = RTE_BE16(udp_port) };
struct rte_flow_item_udp udp_mask = { .hdr.dst_port = RTE_BE16(0xFFFF) };
pattern[2].type = RTE_FLOW_ITEM_TYPE_UDP;
pattern[2].spec = &udp_spec;
pattern[2].mask = &udp_mask;

// Terminer le motif
pattern[3].type = RTE_FLOW_ITEM_TYPE_END;

// Définir l'action
action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
struct rte_flow_action_queue queue_action = { .index = RX_ID };
action[0].conf = &queue_action;
action[1].type = RTE_FLOW_ACTION_TYPE_END;
if (rte_flow_validate(port_id, &attr, pattern, action, &error) != 0) {
printf("Échec validation: %s\n", error.message);
return -1;
}
// Créer la règle de flux
flow = rte_flow_create(port_id, &attr, pattern, action, &error);
if (flow == NULL) {
printf("Erreur lors de la création de la règle de flux : %s\n", error.
message);
return -1;
}

printf("Règle de flux créée avec succès pour IP %u.%u.%u.%u et port UDP %u\n
",
(ip_addr >> 24) & 0xFF, (ip_addr >> 16) & 0xFF,
(ip_addr >> 8) & 0xFF, ip_addr & 0xFF, udp_port);

return 0;
}

int create_drop_all_rule(uint16_t port_id) {
struct rte_flow_error error;
struct rte_flow_attr attr = { .ingress = 1, .priority = 1 };
struct rte_flow_item pattern[4];
struct rte_flow_action action[2];
struct rte_flow *flow;

// Définir le motif Ethernet
memset(pattern, 0, sizeof(pattern));
pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;

// Définir le motif IPv4
struct rte_flow_item_ipv4 ipv4_spec;
struct rte_flow_item_ipv4 ipv4_mask;
pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
pattern[1].spec = &ipv4_spec;
pattern[1].mask = &ipv4_mask;

// Définir le motif UDP
struct rte_flow_item_udp udp_spec;
struct rte_flow_item_udp udp_mask;
pattern[2].type = RTE_FLOW_ITEM_TYPE_UDP;
pattern[2].spec = &udp_spec;
pattern[2].mask = &udp_mask;

// Terminer le motif
pattern[3].type = RTE_FLOW_ITEM_TYPE_END;

// Définir l'action
action[0].type = RTE_FLOW_ACTION_TYPE_DROP;
struct rte_flow_action_queue queue_action; // temporary
action[0].conf = &queue_action;
action[1].type = RTE_FLOW_ACTION_TYPE_END;

// Validation & Création
if (rte_flow_validate(port_id, &attr, pattern, action, &error) != 0) {
printf("Échec validation: %s\n", error.message);
return -1;
}

flow = rte_flow_create(port_id, &attr, pattern, action, &error);
if (flow == NULL) {
printf("Échec création: %s\n", error.message);
return -1;
}

printf("Règle DROP créée.\n");
return 0;
}


#include "../include/port.h"
#include <stdio.h>
#include <rte_arp.h>

// Configuration par défaut du port
static struct rte_eth_conf port_conf = {
.txmode = {
.mq_mode = RTE_ETH_MQ_TX_NONE,
},
.rxmode = {
.offloads = RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_IPV4_CKSUM,
.mq_mode = RTE_ETH_MQ_RX_NONE,
.max_lro_pkt_size = 9000,
},
.intr_conf = {
.rxq = 1,
}
};

int port_init(uint16_t port_id, struct rte_mempool *mbuf_pool) {
int retval;
struct rte_eth_dev_info dev_info;
struct rte_eth_rxconf rxq_conf;
struct rte_eth_txconf txq_conf;

// Obtenir les informations sur le périphérique
retval = rte_eth_dev_info_get(port_id, &dev_info);

if (retval != 0) {
return retval;
}

if (NUM_RX > dev_info.max_rx_queues) {
printf("Erreur: NUM_RX dépasse le maximum supporté (%u)\n", dev_info.
max_rx_queues);
return -1;
}

// Configurer le périphérique Ethernet
retval = rte_eth_dev_configure(port_id, NUM_RX, NUM_TX, &port_conf);
if (retval != 0) {
return retval;
}

// Configurer les queues RX
rxq_conf = dev_info.default_rxconf;
rxq_conf.offloads = port_conf.rxmode.offloads;
for(int i = 0; i < NUM_RX; i++){
retval = rte_eth_rx_queue_setup(port_id, i, 128, rte_eth_dev_socket_id(
port_id), &rxq_conf, mbuf_pool);
if (retval < 0) {
return retval;
}

}

// Configurer les queues TX
txq_conf = dev_info.default_txconf;
txq_conf.offloads = port_conf.txmode.offloads;
retval = rte_eth_tx_queue_setup(port_id, TX_ID, 128, rte_eth_dev_socket_id(
port_id), &txq_conf);
if (retval < 0) {
return retval;
}

// Démarrer le périphérique Ethernet
retval = rte_eth_dev_start(port_id);
if (retval < 0) {
return retval;
}

// Activer le mode promiscuous (facultatif)
// rte_eth_promiscuous_enable(port_id);

printf("Port %u initialisé avec succès\n", port_id);
return 0;
}

void port_cleanup(uint16_t port_id) {
rte_eth_dev_stop(port_id);
rte_eth_dev_close(port_id);
printf("Port %u arrêté et nettoyé\n", port_id);
}


void send_gratuitous_arp(uint16_t port_id,
struct rte_mempool *mbuf_pool,
const struct rte_ether_addr *local_mac,
uint32_t local_ip)
{
struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mbuf_pool);
if (!mbuf) {
fprintf(stderr, "Failed to allocate mbuf\n");
return;
}

const uint16_t pkt_size = sizeof(struct rte_ether_hdr) + sizeof(struct
rte_arp_hdr);
const uint16_t min_frame_size = 60; // Minimum Ethernet frame size without
FCS
uint16_t padding = (pkt_size < min_frame_size) ? (min_frame_size - pkt_size)
: 0;

char *pkt_data = rte_pktmbuf_append(mbuf, pkt_size + padding);
if (!pkt_data) {
fprintf(stderr, "Failed to append data in mbuf\n");
rte_pktmbuf_free(mbuf);
return;
}

// Zero out padding space
if (padding > 0) {
memset(pkt_data + pkt_size, 0, padding);
}

// Ethernet Header
struct rte_ether_hdr *eth_hdr = (struct rte_ether_hdr *)pkt_data;
rte_ether_addr_copy(local_mac, &eth_hdr->src_addr);
memset(&eth_hdr->dst_addr, 0xFF, RTE_ETHER_ADDR_LEN); // Broadcast MAC
eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP);

// ARP Header
struct rte_arp_hdr *arp_hdr = (struct rte_arp_hdr *)(pkt_data + sizeof(
struct rte_ether_hdr));
arp_hdr->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER);
arp_hdr->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
arp_hdr->arp_hlen = RTE_ETHER_ADDR_LEN;
arp_hdr->arp_plen = 4;
arp_hdr->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REQUEST); // Correcting
this to request

// Sender MAC/IP
rte_ether_addr_copy(local_mac, &arp_hdr->arp_data.arp_sha);
arp_hdr->arp_data.arp_sip = rte_cpu_to_be_32(local_ip);

// Target MAC/IP (ARP request with the same IP as source)
memset(&arp_hdr->arp_data.arp_tha, 0, RTE_ETHER_ADDR_LEN); // Unknown
target MAC
arp_hdr->arp_data.arp_tip = rte_cpu_to_be_32(local_ip); // Target IP = Our
own IP

// Transmit
struct rte_mbuf *tx_pkts[1];
tx_pkts[0] = mbuf;
uint16_t nb_tx = rte_eth_tx_burst(port_id, TX_ID, tx_pkts, 1);

if (nb_tx < 1) {
rte_pktmbuf_free(mbuf);
printf("Failed to send Gratuitous ARP\n");
} else {
printf("Gratuitous ARP sent successfully\n");
}
}


#include "../include/flow.h"
#include "../include/port.h"
#include <unistd.h>
#include <rte_eal.h>
#include <rte_mbuf.h>
#include <rte_ethdev.h>
#include <rte_ether.h>
#include <rte_ip.h>
#include <arpa/inet.h>
#include <sys/epoll.h>

#define MAX_PKT_BURST 32
#define IP_ADDR RTE_IPV4(10, 81, 16, 111)
#define UDP_PORT 1234

void clear_rx_queue(uint16_t port_id, uint16_t rx_id) {
struct rte_mbuf *bufs[MAX_PKT_BURST];
uint16_t nb_rx;


while ((nb_rx = rte_eth_rx_burst(port_id, rx_id, bufs, MAX_PKT_BURST)) > 0)
{
for (uint16_t i = 0; i < nb_rx; i++) {
rte_pktmbuf_free(bufs[i]);
}
}
}

int main(int argc, char **argv) {
struct rte_mempool *mbuf_pool;
struct rte_ether_addr local_mac;
struct rte_epoll_event event[1];
struct rte_flow_error error;
int ret, n;

// Initialize EAL
ret = rte_eal_init(argc, argv);
if (ret < 0) {
rte_exit(EXIT_FAILURE, "EAL initialization failed\n");
}

// Create mbuf pool
mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS, MBUF_CACHE_SIZE,
0,
RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
if (mbuf_pool == NULL) {
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
}

// Initialize port
if (port_init(PORT_ID, mbuf_pool) != 0) {
rte_exit(EXIT_FAILURE, "Port initialization failed\n");
}

// Get MAC address
ret = rte_eth_macaddr_get(PORT_ID, &local_mac);
if(ret != 0) {
rte_exit(EXIT_FAILURE, "Failed to get MAC address\n");
}

send_gratuitous_arp(PORT_ID, mbuf_pool, &local_mac, IP_ADDR);


if (flow_filtering(PORT_ID, IP_ADDR, UDP_PORT) != 0) {
rte_exit(EXIT_FAILURE, "Flow rule creation failed\n");
}

// // Créer la règle drop-all
// if (create_drop_all_rule(PORT_ID) != 0) {
// rte_exit(EXIT_FAILURE, "Erreur création règle drop-all\n");
// }

ret = rte_eth_dev_rx_intr_enable(PORT_ID, RX_ID);
if (ret < 0) {
rte_exit(EXIT_FAILURE, "Failed to enable RX interrupts\n");
}

int intr_fd = rte_eth_dev_rx_intr_ctl_q_get_fd(PORT_ID, RX_ID);
if (intr_fd < 0) {
rte_exit(EXIT_FAILURE, "Failed to get interrupt fd\n");
}

struct rte_epoll_event ev = {
.epdata.event = EPOLLIN | EPOLLET,
.fd = intr_fd
};

ret = rte_epoll_ctl(RTE_EPOLL_PER_THREAD, EPOLL_CTL_ADD, intr_fd, &ev);
if (ret < 0) {
rte_exit(EXIT_FAILURE, "Failed to add fd to epoll\n");
}

printf("Processing packets...\n");
struct rte_mbuf *bufs[MAX_PKT_BURST];

while (1) {
uint16_t rx_count = rte_eth_rx_queue_count(PORT_ID, 0);
printf("Paquets dans la RX queue 0: %u\n", rx_count);

rx_count = rte_eth_rx_queue_count(PORT_ID, RX_ID);
printf("Paquets dans la RX queue 5: %u\n", rx_count);

// Wait for interrupt
n = rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, 1, 5000); // 5 secondes

if (n < 0) {
if (errno == EINTR) {
continue;
}
rte_exit(EXIT_FAILURE, "Epoll wait failed: %s\n", strerror(errno));
}

// Clear any pending interrupts
rte_eth_dev_rx_intr_disable(PORT_ID, RX_ID);

// Process packets
do {
const uint16_t nb_rx = rte_eth_rx_burst(PORT_ID, RX_ID, bufs, MAX_PKT_BURST
);
if (nb_rx == 0) break;


printf("Received %u packet(s)\n", nb_rx);
for (uint16_t i = 0; i < nb_rx; i++) {
struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(bufs[i], struct
rte_ether_hdr *);
if (rte_be_to_cpu_16(eth_hdr->ether_type) == RTE_ETHER_TYPE_IPV4) {
struct rte_ipv4_hdr *ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
char src_str[INET_ADDRSTRLEN];
char dst_str[INET_ADDRSTRLEN];
inet_ntop(AF_INET, &ip_hdr->src_addr, src_str, INET_ADDRSTRLEN);
inet_ntop(AF_INET, &ip_hdr->dst_addr, dst_str, INET_ADDRSTRLEN);
printf("Received IPv4 packet: %s -> %s, Size: %u bytes\n",
src_str, dst_str, rte_pktmbuf_pkt_len(bufs[i]));
}
rte_pktmbuf_free(bufs[i]);
}
} while (1);
clear_rx_queue(PORT_ID, 0);

ret = rte_eth_dev_rx_intr_enable(PORT_ID, RX_ID);
if (ret < 0) {
rte_exit(EXIT_FAILURE, "Failed to re-enable RX interrupts\n");
}
}

rte_flow_flush(PORT_ID, &error);
port_cleanup(PORT_ID);
rte_eal_cleanup();
return 0;
}

Best regards,
SidAli

[-- Attachment #2: Type: text/html, Size: 127766 bytes --]

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2025-02-18 13:26 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-02-18 13:25 Packets Not Received After Redirection Sid ali cherrati

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).