struct tracepoint {
const char *name; /* Tracepoint name */
- struct static_key key;
+ struct static_key_false key;
struct static_call_key *static_call_key;
void *static_call_tramp;
void *iterator;
#ifdef CONFIG_TRACEPOINTS
# define tracepoint_enabled(tp) \
- static_key_false(&(__tracepoint_##tp).key)
+ static_branch_unlikely(&(__tracepoint_##tp).key)
#else
# define tracepoint_enabled(tracepoint) false
#endif
#define __DECLARE_TRACE_RCU(name, proto, args, cond) \
static inline void trace_##name##_rcuidle(proto) \
{ \
- if (static_key_false(&__tracepoint_##name.key)) \
+ if (static_branch_unlikely(&__tracepoint_##name.key)) \
__DO_TRACE(name, \
TP_ARGS(args), \
TP_CONDITION(cond), 1); \
extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \
{ \
- if (static_key_false(&__tracepoint_##name.key)) \
+ if (static_branch_unlikely(&__tracepoint_##name.key)) \
__DO_TRACE(name, \
TP_ARGS(args), \
TP_CONDITION(cond), 0); \
static inline bool \
trace_##name##_enabled(void) \
{ \
- return static_key_false(&__tracepoint_##name.key); \
+ return static_branch_unlikely(&__tracepoint_##name.key);\
}
/*
struct tracepoint __tracepoint_##_name __used \
__section("__tracepoints") = { \
.name = __tpstrtab_##_name, \
- .key = STATIC_KEY_INIT_FALSE, \
+ .key = STATIC_KEY_FALSE_INIT, \
.static_call_key = &STATIC_CALL_KEY(tp_func_##_name), \
.static_call_tramp = STATIC_CALL_TRAMP_ADDR(tp_func_##_name), \
.iterator = &__traceiter_##_name, \
{
struct tracepoint *tp = event->tp;
- if (unlikely(atomic_read(&tp->key.enabled) > 0)) {
+ if (unlikely(static_key_enabled(&tp->key))) {
struct tracepoint_func *probe_func_ptr;
synth_probe_func_t probe_func;
void *__data;
struct tracepoint *tp = &user->tracepoint;
char status = 0;
- if (atomic_read(&tp->key.enabled) > 0) {
+ if (static_key_enabled(&tp->key)) {
struct tracepoint_func *probe_func_ptr;
user_event_func_t probe_func;
* It's possible key.enabled disables after this check, however
* we don't mind if a few events are included in this condition.
*/
- if (likely(atomic_read(&tp->key.enabled) > 0)) {
+ if (likely(static_key_enabled(&tp->key))) {
struct tracepoint_func *probe_func_ptr;
user_event_func_t probe_func;
struct iov_iter copy;
tracepoint_update_call(tp, tp_funcs);
/* Both iterator and static call handle NULL tp->funcs */
rcu_assign_pointer(tp->funcs, tp_funcs);
- static_key_enable(&tp->key);
+ static_branch_enable(&tp->key);
break;
case TP_FUNC_2: /* 1->2 */
/* Set iterator static call */
if (tp->unregfunc && static_key_enabled(&tp->key))
tp->unregfunc();
- static_key_disable(&tp->key);
+ static_branch_disable(&tp->key);
/* Set iterator static call */
tracepoint_update_call(tp, tp_funcs);
/* Both iterator and static call handle NULL tp->funcs */