From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 272B5A0562; Fri, 3 Apr 2020 17:40:12 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 69ACC1C1E7; Fri, 3 Apr 2020 17:37:54 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by dpdk.org (Postfix) with ESMTP id C5FD11C1E5 for ; Fri, 3 Apr 2020 17:37:52 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id 033FZLRY001820; Fri, 3 Apr 2020 08:37:52 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0818; bh=s61+qIG4naCdldN6yMG9Oq2Za1fYeHWekZCbVket0D8=; b=BRk9bf4N+pi8VjBalb9GAl1dw3n3LJaYk3s56hFLsEFhWyWxD6WTyVffMmxwCPIlROwS dKr3SkyLPM2QpR/C8fVFDaM01Mw+foAXYsLd/jSnuIOa9Mvb5tiSQGFm9AJ+QUdYVoWX IQcGdAZ4op5Acu1AK0oTHXsTsPJZnsWUVIVXOajY0yE1oE7PFwhOqnhSGq8om/osy79b sXMkRFAQmAIRfT2DA+BhDGlZGQsWPFvR6pB7wxtsiRNGTGplP9GZKsyUdnFAyIPaSbpY yaMHgRfFLmS8ttUfl0A9lnJJ6J7PQZHN70MwHY9Uw4/mKOZG/VjMPNsluAHzMg9fEBx2 RQ== Received: from sc-exch03.marvell.com ([199.233.58.183]) by mx0b-0016f401.pphosted.com with ESMTP id 3046h67gk4-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Fri, 03 Apr 2020 08:37:52 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by SC-EXCH03.marvell.com (10.93.176.83) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Fri, 3 Apr 2020 08:37:49 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Fri, 3 Apr 2020 08:37:50 -0700 Received: from jerin-lab.marvell.com (jerin-lab.marvell.com [10.28.34.14]) by maili.marvell.com (Postfix) with ESMTP id 910503F703F; Fri, 3 Apr 2020 08:37:47 -0700 (PDT) From: To: Jerin Jacob , Sunil Kumar Kori CC: , , , , Date: Fri, 3 Apr 2020 21:06:50 +0530 Message-ID: <20200403153709.3703448-15-jerinj@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20200403153709.3703448-1-jerinj@marvell.com> References: <20200329144342.1543749-1-jerinj@marvell.com> <20200403153709.3703448-1-jerinj@marvell.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Content-Type: text/plain X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.138, 18.0.676 definitions=2020-04-03_11:2020-04-03, 2020-04-03 signatures=0 Subject: [dpdk-dev] [PATCH v4 14/33] eal/trace: implement provider payload X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Jerin Jacob The trace function payloads such as rte_trace_ctf_* have dual functions. The first to emit the payload for the registration function and the second one to act as trace mem emitters aka provider payload. When it used as provider payload, those function copy the trace field to trace memory based on the tracing mode. Added payload definitions under ALLOW_EXPERIMENTAL define to allow the out_of_tree application to compile with out ALLOW_EXPERIMENTAL flag. Signed-off-by: Jerin Jacob Signed-off-by: Sunil Kumar Kori --- lib/librte_eal/include/rte_trace_provider.h | 117 ++++++++++++++++++++ 1 file changed, 117 insertions(+) diff --git a/lib/librte_eal/include/rte_trace_provider.h b/lib/librte_eal/include/rte_trace_provider.h index 2257de85b..091853f45 100644 --- a/lib/librte_eal/include/rte_trace_provider.h +++ b/lib/librte_eal/include/rte_trace_provider.h @@ -9,6 +9,11 @@ #ifndef _RTE_TRACE_PROVIDER_H_ #define _RTE_TRACE_PROVIDER_H_ +#ifdef ALLOW_EXPERIMENTAL_API + +#include +#include +#include #include #include #include @@ -40,4 +45,116 @@ struct __rte_trace_header { RTE_DECLARE_PER_LCORE(void *, trace_mem); +static __rte_always_inline void* +__rte_trace_mem_get(uint64_t in) +{ + struct __rte_trace_header *trace = RTE_PER_LCORE(trace_mem); + const uint16_t sz = in & __RTE_TRACE_FIELD_SIZE_MASK; + + /* Trace memory is not initialized for this thread */ + if (unlikely(trace == NULL)) { + __rte_trace_mem_per_thread_alloc(); + trace = RTE_PER_LCORE(trace_mem); + if (unlikely(trace == NULL)) + return NULL; + } + /* Check the wrap around case */ + uint32_t offset = trace->offset; + if (unlikely((offset + sz) >= trace->len)) { + /* Disable the trace event if it in DISCARD mode */ + if (unlikely(in & __RTE_TRACE_FIELD_ENABLE_DISCARD)) + return NULL; + + offset = 0; + } + /* Align to event header size */ + offset = RTE_ALIGN_CEIL(offset, __RTE_TRACE_EVENT_HEADER_SZ); + void *mem = RTE_PTR_ADD(&trace->mem[0], offset); + offset += sz; + trace->offset = offset; + + return mem; +} + +static __rte_always_inline void* +__rte_trace_emit_ev_header(void *mem, uint64_t in) +{ + uint64_t val; + + /* Event header [63:0] = id [63:48] | timestamp [47:0] */ + val = rte_get_tsc_cycles() & + ~(0xffffULL << __RTE_TRACE_EVENT_HEADER_ID_SHIFT); + val |= ((in & __RTE_TRACE_FIELD_ID_MASK) << + (__RTE_TRACE_EVENT_HEADER_ID_SHIFT - __RTE_TRACE_FIELD_ID_SHIFT)); + + *(uint64_t *)mem = val; + return RTE_PTR_ADD(mem, __RTE_TRACE_EVENT_HEADER_SZ); +} + +#define __rte_trace_emit_header_generic(t)\ +void *mem;\ +do {\ + const uint64_t val = __atomic_load_n(t, __ATOMIC_ACQUIRE);\ + if (likely(!(val & __RTE_TRACE_FIELD_ENABLE_MASK)))\ + return;\ + mem = __rte_trace_mem_get(val);\ + if (unlikely(mem == NULL)) \ + return;\ + mem = __rte_trace_emit_ev_header(mem, val);\ +} while (0) + +#define __rte_trace_emit_header_dp(t)\ + if (rte_trace_is_dp_enabled())\ + return;\ + __rte_trace_emit_header_generic(t); + +#define __rte_trace_emit_datatype(in)\ +do {\ + memcpy(mem, &(in), sizeof(in));\ + mem = RTE_PTR_ADD(mem, sizeof(in));\ +} while (0) + +#define rte_trace_ctf_u64(in) __rte_trace_emit_datatype(in) +#define rte_trace_ctf_i64(in) __rte_trace_emit_datatype(in) +#define rte_trace_ctf_u32(in) __rte_trace_emit_datatype(in) +#define rte_trace_ctf_i32(in) __rte_trace_emit_datatype(in) +#define rte_trace_ctf_u16(in) __rte_trace_emit_datatype(in) +#define rte_trace_ctf_i16(in) __rte_trace_emit_datatype(in) +#define rte_trace_ctf_u8(in) __rte_trace_emit_datatype(in) +#define rte_trace_ctf_i8(in) __rte_trace_emit_datatype(in) +#define rte_trace_ctf_int(in) __rte_trace_emit_datatype(in) +#define rte_trace_ctf_long(in) __rte_trace_emit_datatype(in) +#define rte_trace_ctf_float(in) __rte_trace_emit_datatype(in) +#define rte_trace_ctf_ptr(in) __rte_trace_emit_datatype(in) +#define rte_trace_ctf_double(in) __rte_trace_emit_datatype(in) + +#define rte_trace_ctf_string(in)\ +do {\ + if (unlikely(in == NULL))\ + return;\ + rte_strscpy(mem, in, __RTE_TRACE_EMIT_STRING_LEN_MAX);\ + mem = RTE_PTR_ADD(mem, __RTE_TRACE_EMIT_STRING_LEN_MAX);\ +} while (0) + +#else + +#define __rte_trace_emit_header_generic(t) RTE_SET_USED(t) +#define __rte_trace_emit_header_dp(t) RTE_SET_USED(t) +#define rte_trace_ctf_u64(in) RTE_SET_USED(in) +#define rte_trace_ctf_i64(in) RTE_SET_USED(in) +#define rte_trace_ctf_u32(in) RTE_SET_USED(in) +#define rte_trace_ctf_i32(in) RTE_SET_USED(in) +#define rte_trace_ctf_u16(in) RTE_SET_USED(in) +#define rte_trace_ctf_i16(in) RTE_SET_USED(in) +#define rte_trace_ctf_u8(in) RTE_SET_USED(in) +#define rte_trace_ctf_i8(in) RTE_SET_USED(in) +#define rte_trace_ctf_int(in) RTE_SET_USED(in) +#define rte_trace_ctf_long(in) RTE_SET_USED(in) +#define rte_trace_ctf_float(in) RTE_SET_USED(in) +#define rte_trace_ctf_ptr(in) RTE_SET_USED(in) +#define rte_trace_ctf_double(in) RTE_SET_USED(in) +#define rte_trace_ctf_string(in) RTE_SET_USED(in) + +#endif + #endif /* _RTE_TRACE_PROVIDER_H_ */ -- 2.25.1