1 /* SPDX-License-Identifier: GPL-2.0 */
3 /* Stage 5 definitions for creating trace events */
6 * remember the offset of each array from the beginning of the event.
13 * Fields should never declare an array: i.e. __field(int, arr[5])
14 * If they do, it will cause issues in parsing and possibly corrupt the
15 * events. To prevent that from happening, test the sizeof() a fictitious
16 * type called "struct _test_no_array_##item" which will fail if "item"
17 * contains array elements (like "arr[5]").
19 * If you hit this, use __array(int, arr, 5) instead.
22 #define __field(type, item) \
23 { (void)sizeof(struct _test_no_array_##item *); }
26 #define __field_ext(type, item, filter_type) \
27 { (void)sizeof(struct _test_no_array_##item *); }
30 #define __field_struct(type, item) \
31 { (void)sizeof(struct _test_no_array_##item *); }
33 #undef __field_struct_ext
34 #define __field_struct_ext(type, item, filter_type) \
35 { (void)sizeof(struct _test_no_array_##item *); }
38 #define __array(type, item, len)
40 #undef __dynamic_array
41 #define __dynamic_array(type, item, len) \
42 __item_length = (len) * sizeof(type); \
43 __data_offsets->item = __data_size + \
44 offsetof(typeof(*entry), __data); \
45 __data_offsets->item |= __item_length << 16; \
46 __data_size += __item_length;
49 #define __string(item, src) __dynamic_array(char, item, \
50 strlen((const char *)(src) ? : "(null)") + 1) \
51 __data_offsets->item##_ptr_ = src;
54 #define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1)\
55 __data_offsets->item##_ptr_ = src;
58 #define __vstring(item, fmt, ap) __dynamic_array(char, item, \
59 __trace_event_vstr_len(fmt, ap))
61 #undef __rel_dynamic_array
62 #define __rel_dynamic_array(type, item, len) \
63 __item_length = (len) * sizeof(type); \
64 __data_offsets->item = __data_size + \
65 offsetof(typeof(*entry), __data) - \
66 offsetof(typeof(*entry), __rel_loc_##item) - \
68 __data_offsets->item |= __item_length << 16; \
69 __data_size += __item_length;
72 #define __rel_string(item, src) __rel_dynamic_array(char, item, \
73 strlen((const char *)(src) ? : "(null)") + 1) \
74 __data_offsets->item##_ptr_ = src;
76 #undef __rel_string_len
77 #define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, (len) + 1)\
78 __data_offsets->item##_ptr_ = src;
81 * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
82 * num_possible_cpus().
84 #define __bitmask_size_in_bytes_raw(nr_bits) \
87 #define __bitmask_size_in_longs(nr_bits) \
88 ((__bitmask_size_in_bytes_raw(nr_bits) + \
89 ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
92 * __bitmask_size_in_bytes is the number of bytes needed to hold
93 * num_possible_cpus() padded out to the nearest long. This is what
94 * is saved in the buffer, just to be consistent.
96 #define __bitmask_size_in_bytes(nr_bits) \
97 (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
100 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
101 __bitmask_size_in_longs(nr_bits))
104 #define __cpumask(item) __bitmask(item, nr_cpumask_bits)
107 #define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, \
108 __bitmask_size_in_longs(nr_bits))
111 #define __rel_cpumask(item) __rel_bitmask(item, nr_cpumask_bits)
114 #define __sockaddr(field, len) __dynamic_array(u8, field, len)
116 #undef __rel_sockaddr
117 #define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)