DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [DPDK v17.11 LTS] Crash (segmentation fault) in ACL table packet look up
@ 2019-09-18  8:43 Victor Huertas
  0 siblings, 0 replies; only message in thread
From: Victor Huertas @ 2019-09-18  8:43 UTC (permalink / raw)
  To: dev; +Cc: Vikutoru

Hi all,

the DPDK lib always crashes when a packet enters an ACL table I created to
check IPv6 fragmented packets. If the table is empty nothing happens as the
missed packets go to the next table in the pipeline but as soon as I put
some entries a crash happens when the first packet enters.

It seems to happen in the acl_run.h  file (in librte_acl) in line 178 (static
inline uint64_t acl_start_next_trie(struct acl_flow_data *flows, struct
parms *parms, int n, const struct rte_acl_ctx *ctx)). Subsequently, I put
the section of the code where it crashes (in bold and red):

 /* set completion parameters and starting index for this slot */
parms[n].cmplt = flows->last_cmplt;
transition =
*       flows->trans[parms[n].data[*parms[n].data_index++]* +
           ctx->trie[flows->trie].root_index];

Running in debug mode, Eclipse tells me that the 'trans' component of
'flows' is NULL and that's what I think that is the cause of the
segmentation fault.

The thing is that other ACL tables that I use don't causes this
segmentation fault at all. I have revised the fields format configuration,
etc. and all seems to be OK. The table creation returns 0 and all the table
entry insertions returns 0. So the lib doesn't complain at all until the
crash happens.

I enclose below the sections of my code where the fields format is set as
well as the table creation section.

Any help is really wellcome to see what is happening here.

Thanks a lot for your attention.

Now the code samples:

================== ACL fields format (only the one that fails)
=======================
struct rte_acl_field_def *field_format_ipv6_1st_fragment*
[NUM_FIELDS_IPV6_1ST_FRAGMENT_ACL] = {
/* Protocol (1 byte) this value will always be the same (44)*/
[0] = {
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint8_t),
.field_index = 0,
.input_index = 0,
.offset = sizeof(struct ether_hdr) +
offsetof(struct ipv6_hdr, proto),
},
/* ethertype (2 bytes) : this value will always be the same (0x86DD) */
[1] = {
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint16_t),
.field_index = 1,
.input_index = 1, // this value must be multiple of 4 bytes
.offset = offsetof(struct ether_hdr, ether_type),
},
/* tos field (2 bytes) */
[2] = {
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint16_t),
.field_index = 2,
.input_index = 1, // this value must be multiple of 4 bytes
.offset = sizeof(struct ether_hdr) +
offsetof(struct ipv6_hdr, vtc_flow),
},
/************* IPv6 source address **************/
/* Source IPv6 address [0-3] */
[3] = {
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = 3,
.input_index = 2, // this value must be multiple of 4 bytes
.offset = sizeof(struct ether_hdr) +
offsetof(struct ipv6_hdr, src_addr[0]),
},
/* Source IPv6 address [4-7] */
[4] = {
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = 4,
.input_index = 3, // this value must be multiple of 4 bytes
.offset = sizeof(struct ether_hdr) +
offsetof(struct ipv6_hdr, src_addr[4]),
},
/* Source IPv6 address [8-11] */
[5] = {
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = 5,
.input_index = 4, // this value must be multiple of 4 bytes
.offset = sizeof(struct ether_hdr) +
offsetof(struct ipv6_hdr, src_addr[8]),
},
/* Source IPv6 address [12-15] */
[6] = {
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = 6,
.input_index = 5, // this value must be multiple of 4 bytes
.offset = sizeof(struct ether_hdr) +
offsetof(struct ipv6_hdr, src_addr[12]),
},

/************* IPv6 destination address **************/
/* Destination IPv6 address [0-3] */
[7] = {
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = 7,
.input_index = 6, // this value must be multiple of 4 bytes
.offset = sizeof(struct ether_hdr) +
offsetof(struct ipv6_hdr, dst_addr[0]),
},
/* Destination IPv6 address [4-7] */
[8] = {
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = 8,
.input_index = 7, // this value must be multiple of 4 bytes
.offset = sizeof(struct ether_hdr) +
offsetof(struct ipv6_hdr, dst_addr[4]),
},
/* Destination IPv6 address [8-11] */
[9] = {
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = 9,
.input_index = 8, // this value must be multiple of 4 bytes
.offset = sizeof(struct ether_hdr) +
offsetof(struct ipv6_hdr, dst_addr[8]),
},
/* Destination IPv6 address [12-15] */
[10] = {
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = 10,
.input_index = 9, // this value must be multiple of 4 bytes
.offset = sizeof(struct ether_hdr) +
offsetof(struct ipv6_hdr, dst_addr[12]),
},
/* next_header+reserved+frag_data (4 byte) in ipv6 frag header */
[11] = {
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint32_t),
.field_index = 11,
.input_index = 10, // this value must be multiple of 4 bytes
.offset = sizeof(struct ether_hdr) +
sizeof(struct ipv6_hdr) +
offsetof(struct ipv6_extension_fragment, next_header),
},
/* Source Port */
[12] = {
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = 12,
.input_index = 11, // this value must be multiple of 4 bytes
.offset = sizeof(struct ether_hdr) +
sizeof(struct ipv6_hdr) +
sizeof(struct ipv6_extension_fragment) +
offsetof(struct tcp_hdr, src_port),
},

/* Destination Port */
[13] = {
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = 13,
.input_index = 11, // this value must be multiple of 4 bytes
.offset = sizeof(struct ether_hdr) +
sizeof(struct ipv6_hdr) +
sizeof(struct ipv6_extension_fragment) +
offsetof(struct tcp_hdr, dst_port),
},
};

===================== Table creation ==========================
struct rte_table_acl_params
acl_flow_line_acl_params[SERVICE_QOS_NUM_FLOW_CLASSIF_TABLES];

 /* SERVICE_QOS_IPV4_QOS_POLICY_ACL  table params setting */
strcpy(s_fc->table_p[SERVICE_QOS_IPV4_QOS_POLICY_ACL].table_name,"table_acl_ipv4_qos_policy");
s_fc->table_p[SERVICE_QOS_IPV4_QOS_POLICY_ACL].field_format =
field_format_ipv4;
s_fc->table_p[SERVICE_QOS_IPV4_QOS_POLICY_ACL].n_rule_fields =
RTE_DIM(field_format_ipv4);
s_fc->table_p[SERVICE_QOS_IPV4_QOS_POLICY_ACL].field_format_size =
sizeof(field_format_ipv4);
s_fc->table_p[SERVICE_QOS_IPV4_QOS_POLICY_ACL].n_rules =
FLOW_CLASSIFY_MAX_RULE_NUM;

/* SERVICE_QOS_IPV6_QOS_POLICY_ACL  table params setting */
strcpy(s_fc->table_p[SERVICE_QOS_IPV6_QOS_POLICY_ACL].table_name,"table_acl_ipv6_qos_policy");
s_fc->table_p[SERVICE_QOS_IPV6_QOS_POLICY_ACL].field_format =
field_format_ipv6;
s_fc->table_p[SERVICE_QOS_IPV6_QOS_POLICY_ACL].n_rule_fields =
RTE_DIM(field_format_ipv6);
s_fc->table_p[SERVICE_QOS_IPV6_QOS_POLICY_ACL].field_format_size =
sizeof(field_format_ipv6);
s_fc->table_p[SERVICE_QOS_IPV6_QOS_POLICY_ACL].n_rules =
FLOW_CLASSIFY_MAX_RULE_NUM;  // 8*1024






* /* SERVICE_QOS_IPV6_QOS_POLICY_1ST_FRAGMENT_ACL  table params setting */
strcpy(s_fc->table_p[SERVICE_QOS_IPV6_QOS_POLICY_1ST_FRAGMENT_ACL].table_name,"table_acl_ipv6_qos_policy_1st_fragment");
s_fc->table_p[SERVICE_QOS_IPV6_QOS_POLICY_1ST_FRAGMENT_ACL].field_format =
field_format_ipv6_1st_fragment;
s_fc->table_p[SERVICE_QOS_IPV6_QOS_POLICY_1ST_FRAGMENT_ACL].n_rule_fields =
RTE_DIM(field_format_ipv6_1st_fragment);
s_fc->table_p[SERVICE_QOS_IPV6_QOS_POLICY_1ST_FRAGMENT_ACL].field_format_size
= sizeof(field_format_ipv6_1st_fragment);
s_fc->table_p[SERVICE_QOS_IPV6_QOS_POLICY_1ST_FRAGMENT_ACL].n_rules =
FLOW_CLASSIFY_MAX_RULE_NUM; // 8*1024*

for(unsigned int i=0; i<SERVICE_QOS_NUM_FLOW_CLASSIF_TABLES; i++){
    if(table_present[i] == true && is_acl_table((enum
service_qos_fc_table_name)i) == true){
       /* initialise ACL table params */
       acl_flow_line_acl_params[i].name = (const
char*)s_fc->table_p[i].table_name;
       acl_flow_line_acl_params[i].n_rules = s_fc->table_p[i].n_rules;
       acl_flow_line_acl_params[i].n_rule_fields =
s_fc->table_p[i].n_rule_fields;
       for(unsigned int j=0; j<s_fc->table_p[i].n_rule_fields; j++){

 memcpy(&acl_flow_line_acl_params[i].field_format[j],&s_fc->table_p[i].field_format[j],sizeof(struct
rte_acl_field_def));
       }

       s_fc->table_p[i].pipeline_table_params.ops = &rte_table_acl_ops;
       s_fc->table_p[i].pipeline_table_params.arg_create =
&acl_flow_line_acl_params[i];
       s_fc->table_p[i].pipeline_table_params.f_action_hit =
get_service_qos_fc_table_ah_hit();
       s_fc->table_p[i].pipeline_table_params.f_action_miss = NULL;
       s_fc->table_p[i].pipeline_table_params.arg_ah =
&s_qos->qoss.metadata;
       s_fc->table_p[i].pipeline_table_params.action_data_size =
            sizeof(struct service_qos_policy_flow_table_entry) -
            sizeof(struct rte_pipeline_table_entry);

       status = rte_pipeline_table_create(p->p,
&s_fc->table_p[i].pipeline_table_params,
&p->table_id[s_fc->table_p[i].table_index]);

       if (status) {
           return NULL; // the rte_free must be done in the pipeline
       }
       else{
           p->n_tables++;
           uint32_t pipeline_id;
           APP_PARAM_GET_ID(p,"PIPELINE",pipeline_id);
           S_LOG_BE(p, pipeline_id,PIPELINE_LOG_LEVEL_INFO, "Successfully
added '%s' table with index {%u}",
           acl_flow_line_acl_params[i].name,s_fc->table_p[i].table_index);
       }
    }
}
-- 
Victor

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2019-09-18  8:43 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-09-18  8:43 [dpdk-dev] [DPDK v17.11 LTS] Crash (segmentation fault) in ACL table packet look up Victor Huertas

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).