DPDK usage discussions
 help / color / mirror / Atom feed
* [dpdk-users] Adding ESP packet filters to ACL table
@ 2020-02-17 14:55 James Hymes
  0 siblings, 0 replies; only message in thread
From: James Hymes @ 2020-02-17 14:55 UTC (permalink / raw)
  To: users

Hello,

I am trying to make a flow classify based implementation able to filter packets based on their esp spi values. I keep getting an error on line 405 where the rule is validated returning an error -22: unsupported pattern. The code I'm working off is a modified version of basicfwd with elements of flow_classify. Am I missing something, or going about this the wrong way? Any light shed on this would be greatly appreciated, after going through all the relevant documentation I'm still stumped.

The hardware I'm using is a FRWY-LS1046A and the following parameters are passed for running: "-l 0-1 -n 1 --proc-type auto --file-prefix c -b fm1-mac10 --"

Here is the output I receive from running:

EAL: Detected 4 lcore(s)
EAL: Detected 1 NUMA nodes
EAL: Auto-detected process type: PRIMARY
EAL: Multi-process socket /var/run/dpdk/c/mp_socket
EAL: Probing VFIO support...
EAL: VFIO support initialized
EAL: DPAA Bus Detected
PMD: Using FMC script mode,Make sure to use DPDK supported FMC scripts only.
PMD: net: dpaa: fm1-mac5: 00:04:9f:06:2a:9b
PMD: net: dpaa: fm1-mac6: 00:04:9f:06:2a:9c
PMD: dpaa_sec-1 cryptodev init
PMD: dpaa_sec-2 cryptodev init
PMD: dpaa_sec-3 cryptodev init
PMD: dpaa_sec-4 cryptodev init
Port 0 MAC: 00 04 9f 06 2a 9b
Port 1 MAC: 00 04 9f 06 2a 9c

WARNING: Too many lcores enabled. Only 1 used.

Core 0 forwarding packets. [Ctrl+C to quit]

Classifier_app memory allocation successful
Flow classifier create successful
table create successful
rule validate failed error -22: Unsupported pattern
setup completed

And here is the code that I am currently using

#include <getopt.h>
#include <stdint.h>
#include <inttypes.h>
#include <rte_eal.h>
#include <rte_ethdev.h>
#include <endian.h>
#include <rte_common.h>
#include <rte_config.h>
#include <rte_ether.h>
#include <rte_cycles.h>
#include <rte_lcore.h>
#include <rte_mbuf.h>
#include <rte_ip.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <signal.h>
#include <rte_udp.h>
#include <rte_esp.h>
#include <rte_byteorder.h>
#include <rte_flow.h>
#include <rte_flow_classify.h>
#include <rte_table_acl.h>
#include <rte_acl.h>
#include <rte_flow_driver.h>

#define RX_RING_SIZE 1024
#define TX_RING_SIZE 1024

#define NUM_MBUFS 8191
#define MBUF_CACHE_SIZE 250
#define BURST_SIZE 1

#define FLOW_CLASSIFY_MAX_RULE_NUM 91

char *convertToIP(uint32_t ip);
void printPacketData(struct rte_mbuf *bufs, uint16_t nb_rx);
struct flow_classifier *setupFlowClassify(void);

static struct rte_flow_attr attr;
static struct rte_flow_classify_rule *rules[10];

static const struct rte_eth_conf port_conf_default = {
    .rxmode = {
        .max_rx_pkt_len = ETHER_MAX_LEN,
    },
};

struct flow_classifier {
    struct rte_flow_classifier *classifier;
};

struct flow_classifier_acl {
    struct flow_classifier classifier;
}__rte_cache_aligned;

static struct rte_flow_classify_stats classify_stats;

//set up IPv4 5 tuple rule for table
/* ACL field definitions for IPv4 5 tuple rule */
enum {
    PROTO_FIELD_IPV4,
    SRC_FIELD_IPV4,
    DST_FIELD_IPV4,
    SRCP_FIELD_IPV4,
    DSTP_FIELD_IPV4,
    NUM_FIELDS_IPV4
};

enum {
    PROTO_INPUT_IPV4,
    SRC_INPUT_IPV4,
    DST_INPUT_IPV4,
    SRCP_DESTP_INPUT_IPV4
};

static struct rte_acl_field_def ipv4_defs[6] = {
    /* first input field - always one byte long. */
    {
        .type = RTE_ACL_FIELD_TYPE_BITMASK,
        .size = sizeof(uint8_t),
        .field_index = 0,
        .input_index = 0,
        .offset = sizeof(struct ether_hdr) +
            offsetof(struct ipv4_hdr, next_proto_id),
    },
    /* next input field (IPv4 source address) - 4 consecutive bytes. */
    {
        /* rte_flow uses a bit mask for IPv4 addresses */
        .type = RTE_ACL_FIELD_TYPE_BITMASK,
        .size = sizeof(uint32_t),
        .field_index = 1,
        .input_index = 1,
        .offset = sizeof(struct ether_hdr) +
            offsetof(struct ipv4_hdr, src_addr),
    },
    /* next input field (IPv4 destination address) - 4 consecutive bytes. */
    {
        // rte_flow uses a bit mask for IPv4 addresses
        .type = RTE_ACL_FIELD_TYPE_BITMASK,
        .size = sizeof(uint32_t),
        .field_index = 2,
        .input_index = 2,
        .offset = sizeof(struct ether_hdr) +
            offsetof(struct ipv4_hdr, dst_addr),
    },
    //UDP part
    {
        // rte_flow uses a bit mask for protocol ports
        .type = RTE_ACL_FIELD_TYPE_BITMASK,
        .size = sizeof(uint16_t),
        .field_index = 3,
        .input_index = 3,
        .offset = sizeof(struct ether_hdr) +
            sizeof(struct ipv4_hdr) +
            offsetof(struct udp_hdr, src_port),
    },
    {
        // rte_flow uses a bit mask for protocol ports
        .type = RTE_ACL_FIELD_TYPE_BITMASK,
        .size = sizeof(uint16_t),
        .field_index = 4,
        .input_index = 4,
        .offset = sizeof(struct ether_hdr) +
            sizeof(struct ipv4_hdr) +
            offsetof(struct udp_hdr, dst_port),
    },
    /*
     * ESP Part.
     */
    {
        .type = RTE_ACL_FIELD_TYPE_BITMASK,
        .size = sizeof(rte_be32_t),
        .field_index = 5,
        .input_index = 5,
        .offset = sizeof(struct ether_hdr) +
            sizeof(struct ipv4_hdr) +
            sizeof(struct udp_hdr) +
            offsetof(struct esp_hdr, spi),
    },
};

/* basicfwd.c: Basic DPDK skeleton forwarding example. */

struct rte_flow_query_count count = {
    .reset = 1,
    .hits_set = 1,
    .bytes_set = 1,
    .hits = 0,
    .bytes = 0,
};

/*
 * Initializes a given port using global settings and with the RX buffers
 * coming from the mbuf_pool passed as a parameter.
 */
static inline int
port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
    struct rte_eth_conf port_conf = port_conf_default;
    const uint16_t rx_rings = 1, tx_rings = 1;
    uint16_t nb_rxd = RX_RING_SIZE;
    uint16_t nb_txd = TX_RING_SIZE;
    int retval;
    uint16_t q;
    struct rte_eth_dev_info dev_info;
    struct rte_eth_txconf txconf;

    if (!rte_eth_dev_is_valid_port(port))
        return -1;

    rte_eth_dev_info_get(port, &dev_info);
    if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
        port_conf.txmode.offloads |=
            DEV_TX_OFFLOAD_MBUF_FAST_FREE;

    /* Configure the Ethernet device. */
    retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
    if (retval != 0)
        return retval;

    retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
    if (retval != 0)
        return retval;

    /* Allocate and set up 1 RX queue per Ethernet port. */
    for (q = 0; q < rx_rings; q++) {
        retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
                rte_eth_dev_socket_id(port), NULL, mbuf_pool);
        if (retval < 0)
            return retval;
    }

    txconf = dev_info.default_txconf;
    txconf.offloads = port_conf.txmode.offloads;
    /* Allocate and set up 1 TX queue per Ethernet port. */
    for (q = 0; q < tx_rings; q++) {
        retval = rte_eth_tx_queue_setup(port, q, nb_txd,
                rte_eth_dev_socket_id(port), &txconf);
        if (retval < 0)
            return retval;
    }

    /* Start the Ethernet port. */
    retval = rte_eth_dev_start(port);
    if (retval < 0)
        return retval;

    /* Display the port MAC address. */
    struct ether_addr addr;
    rte_eth_macaddr_get(port, &addr);
    printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
               " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
            port,
            addr.addr_bytes[0], addr.addr_bytes[1],
            addr.addr_bytes[2], addr.addr_bytes[3],
            addr.addr_bytes[4], addr.addr_bytes[5]);

    /* Enable RX in promiscuous mode for the Ethernet device. */
    rte_eth_promiscuous_enable(port);

    return 0;
}

//Recieves integer IPv4 ip address and converts to dot decimal format
char *convertToIP(uint32_t ip){
    char *address = malloc (sizeof (char) * 16);
    sprintf(address, "%d.%d.%d.%d", (ip>>24)& 0xFF, (ip>>16)& 0xFF,(ip>>8)& 0xFF, ip& 0xFF);
    return address;
}

struct flow_classifier *setupFlowClassify(void){

    //classifier structs
    struct flow_classifier *classifier_app;
    struct rte_flow_classifier_params classifier_params;

    //table structs
    struct rte_table_acl_params table_params;
    struct rte_flow_classify_table_params table_params_cls;

    //Flow Rule structs
    struct rte_flow_item packetTuple[5];
    struct rte_flow_action actions[2];
    struct rte_flow_classify_rule *rule;
    struct rte_flow_error error;

    //flow item structs
    struct rte_flow_item  eth_part = {RTE_FLOW_ITEM_TYPE_ETH, 0, 0, 0 };
    struct rte_flow_item  end_part = {RTE_FLOW_ITEM_TYPE_END, 0, 0, 0 };

    struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT, &count};
    struct rte_flow_action end_action = { RTE_FLOW_ACTION_TYPE_END, 0};
    // ipv4
    struct rte_flow_item       ipv4_part;
    struct rte_flow_item_ipv4  ipv4_spec;
    struct rte_flow_item_ipv4  ipv4_mask;
/*  // UDP
    struct rte_flow_item       udp_part;
    struct rte_flow_item_udp   udp_spec;
    struct rte_flow_item_udp   udp_mask;

    // ESP
    struct rte_flow_item       esp_part;
    struct rte_flow_item_esp   esp_spec;
    struct rte_flow_item_esp   esp_mask;
*/
    int ret = -1;
    int key_found;

    //setup classifier
    int size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct flow_classifier_acl));
    classifier_app = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
    if (classifier_app == NULL)
        printf("Classifier_app memory allocation failed\n");
    else
        printf("Classifier_app memory allocation successful\n");

    classifier_params.name = "flow_classifier";
    classifier_params.socket_id = rte_eth_dev_socket_id(0);
    classifier_app->classifier = rte_flow_classifier_create(&classifier_params);
    if (classifier_app->classifier == NULL)
        printf("Flow classifier create failed\n");
    else
        printf("Flow classifier create successful\n");

    //create table
    table_params.name = "table_acl";
    table_params.n_rules = FLOW_CLASSIFY_MAX_RULE_NUM;
    table_params.n_rule_fields = RTE_DIM(ipv4_defs);
    memcpy(table_params.field_format, ipv4_defs, sizeof(ipv4_defs));

    table_params_cls.ops = &rte_table_acl_ops;
    table_params_cls.arg_create = &table_params;
    table_params_cls.type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;

    ret = rte_flow_classify_table_create(classifier_app->classifier, &table_params_cls);
    if(ret){
        printf("table create failed\n"); // if 1 then failed
        rte_flow_classifier_free(classifier_app->classifier);
    }else
        printf("table create successful\n");

    //create packet profile 1 rules
    // set ether part
    packetTuple[0] = eth_part;

    //set IP header part
    //memory setup
    memset(&ipv4_spec, 0, sizeof(ipv4_spec));
    memset(&ipv4_mask, 0, sizeof(ipv4_mask));
    //create rules
    ipv4_spec.hdr.src_addr = (uint32_t)12345;
    ipv4_spec.hdr.dst_addr = (uint32_t)12345;
    ipv4_spec.hdr.next_proto_id = (uint8_t)50;

    ipv4_mask.hdr.src_addr = 0xFFFFFFFF;
    ipv4_mask.hdr.dst_addr = 0xFFFFFFFF;
    ipv4_mask.hdr.next_proto_id = 0xFF;

    //encapsulate
    ipv4_part.type = RTE_FLOW_ITEM_TYPE_IPV4;
    ipv4_part.spec = &ipv4_spec;
    ipv4_part.mask = &ipv4_mask;
    ipv4_part.last = NULL;
    packetTuple[1] = ipv4_part;


    //set UDP header part
    struct rte_flow_item       udp_part;
    struct rte_flow_item_udp   udp_spec;
    struct rte_flow_item_udp   udp_mask;

    //memory setup
    memset(&udp_spec, 0, sizeof(udp_spec));
    memset(&udp_mask, 0, sizeof(udp_mask));

    //create rules
    udp_spec.hdr.src_port = (uint16_t)0;
    udp_spec.hdr.dst_port = (uint16_t)0;
    udp_spec.hdr.dgram_len = (uint16_t)0;
    udp_spec.hdr.dgram_cksum = (uint16_t)0;

    udp_mask.hdr.src_port = 0xFFFF;
    udp_mask.hdr.dst_port = 0xFFFF;
    udp_mask.hdr.dgram_len = (uint16_t)0;
    udp_mask.hdr.dgram_cksum = (uint16_t)0;

    //encapsulate
    udp_part.type = RTE_FLOW_ITEM_TYPE_UDP;
    udp_part.spec = &udp_spec;
    udp_part.mask = &udp_mask;
    udp_part.last = NULL;
    packetTuple[2] = udp_part;


    //set ESP header part
    struct rte_flow_item       esp_part;
    struct rte_flow_item_esp   esp_spec;
    struct rte_flow_item_esp   esp_mask;

    //memory setup
    memset(&esp_spec, 0, sizeof(esp_spec));
    memset(&esp_mask, 0, sizeof(esp_mask));

    //create rules
    esp_spec.hdr.spi = 0x00000000;
    esp_spec.hdr.seq = 0x00000000;

    esp_mask.hdr.spi = 0xFFFFFFFF;
    esp_mask.hdr.seq = 0xFFFFFFFF;

    //encapsulate
    esp_part.type = RTE_FLOW_ITEM_TYPE_RAW;
    esp_part.spec = &esp_spec;
    esp_part.mask = &esp_mask;
    esp_part.last = NULL;
    packetTuple[3] = esp_part;

    //end profile
    packetTuple[4] = end_part;

    //final bits
    attr.priority = (uint32_t)0;
    attr.group = (uint32_t)0;
    attr.ingress = (uint32_t)1; //incoming packets

    actions[0] = count_action;
    actions[1] = end_action;

    //validate rule/
    ret = rte_flow_classify_validate(classifier_app->classifier, &attr, packetTuple, actions, &error);
    if(ret){
        printf("rule validate failed error %d: %s\n", ret, error.message); // if 1 then failed
        rte_flow_classifier_free(classifier_app->classifier);
    }else
        printf("rule validate successful\n");

    //create and store rule
    rule = rte_flow_classify_table_entry_add(classifier_app->classifier, &attr, packetTuple, actions, &key_found, &error);
    rules[0] = rule;

    printf("setup completed\n");

    return classifier_app;
}

/*
 * The lcore main. This is the main thread that does the work, reading from
 * an input port and writing to an output port.
 */
static __attribute__((noreturn)) void
lcore_main(void)
{
    uint16_t port;
    int ret;

    /*
     * Check that the port is on the same NUMA node as the polling thread
     * for best performance.
     */
    RTE_ETH_FOREACH_DEV(port)
        if (rte_eth_dev_socket_id(port) > 0 &&
                rte_eth_dev_socket_id(port) !=
                        (int)rte_socket_id())
            printf("WARNING, port %u is on remote NUMA node to "
                    "polling thread.\n\tPerformance will "
                    "not be optimal.\n", port);

    printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n", rte_lcore_id());

    //printf("\nVersion: 0.20\n");

    //setup flow classify
    struct flow_classifier *classifier_app = setupFlowClassify();

    /* Run until the application is quit or killed. */
    for (;;) {
        /*
         * Receive packets on a port and forward them on the paired
         * port. The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
         */
        port = 1;

        /* Get burst of RX packets, from first port of pair. */
        struct rte_mbuf *bufs[BURST_SIZE];
        const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
                bufs, BURST_SIZE);

        if (unlikely(nb_rx == 0))
            continue;

        // -------- Packets Recieved ----------
        //printPacketData(*bufs, nb_rx);

        ret = rte_flow_classifier_query(classifier_app->classifier, bufs, nb_rx, rules[1], &classify_stats);
        if (ret)
            printf("rule [%d] query failed ret [%d]\n\n", 0, ret);
        else
            printf("packet is profile 1?\n");

        /* Send burst of TX packets, to second port of pair. */
        const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
                bufs, nb_rx);

        /* Free any unsent packets. */
        if (unlikely(nb_tx < nb_rx)) {
            uint16_t buf;
            for (buf = nb_tx; buf < nb_rx; buf++)
                rte_pktmbuf_free(bufs[buf]);
        }
    }
    rte_flow_classifier_free(classifier_app->classifier);//emptying it just in case
}

/*
 * The main function, which does initialization and calls the per-lcore
 * functions.
 */
int
main(int argc, char *argv[])
{
    struct rte_mempool *mbuf_pool;
    unsigned nb_ports;
    uint16_t portid;

    /* Initialize the Environment Abstraction Layer (EAL). */
    int ret = rte_eal_init(argc, argv);
    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");

    argc -= ret;
    argv += ret;

    /* Check that there is an even number of ports to send/receive on. */
    //nb_ports = rte_eth_dev_count_avail();
    //if (nb_ports < 2 || (nb_ports & 1))
    //  rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
    nb_ports = 2;

    /* Creates a new mempool in memory to hold the mbufs. */
    mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
        MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());

    if (mbuf_pool == NULL)
        rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");

    /* Initialize all ports. */
    //for(portid = 0; portid < 2; portid++)
    RTE_ETH_FOREACH_DEV(portid)
        if (port_init(portid, mbuf_pool) != 0)
            rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
                    portid);

    if (rte_lcore_count() > 1)
        printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");
    /* Call lcore_main on the master core only. */
    lcore_main();

    return 0;
}





^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2020-02-17 14:55 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-02-17 14:55 [dpdk-users] Adding ESP packet filters to ACL table James Hymes

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).