Merge branch 'akpm' (patches from Andrew)
[linux-2.6-microblaze.git] / include / linux / bpf-cgroup.h
index 6c9b10d..2746fd8 100644 (file)
@@ -23,22 +23,73 @@ struct ctl_table_header;
 struct task_struct;
 
 #ifdef CONFIG_CGROUP_BPF
+enum cgroup_bpf_attach_type {
+       CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
+       CGROUP_INET_INGRESS = 0,
+       CGROUP_INET_EGRESS,
+       CGROUP_INET_SOCK_CREATE,
+       CGROUP_SOCK_OPS,
+       CGROUP_DEVICE,
+       CGROUP_INET4_BIND,
+       CGROUP_INET6_BIND,
+       CGROUP_INET4_CONNECT,
+       CGROUP_INET6_CONNECT,
+       CGROUP_INET4_POST_BIND,
+       CGROUP_INET6_POST_BIND,
+       CGROUP_UDP4_SENDMSG,
+       CGROUP_UDP6_SENDMSG,
+       CGROUP_SYSCTL,
+       CGROUP_UDP4_RECVMSG,
+       CGROUP_UDP6_RECVMSG,
+       CGROUP_GETSOCKOPT,
+       CGROUP_SETSOCKOPT,
+       CGROUP_INET4_GETPEERNAME,
+       CGROUP_INET6_GETPEERNAME,
+       CGROUP_INET4_GETSOCKNAME,
+       CGROUP_INET6_GETSOCKNAME,
+       CGROUP_INET_SOCK_RELEASE,
+       MAX_CGROUP_BPF_ATTACH_TYPE
+};
 
-extern struct static_key_false cgroup_bpf_enabled_key[MAX_BPF_ATTACH_TYPE];
-#define cgroup_bpf_enabled(type) static_branch_unlikely(&cgroup_bpf_enabled_key[type])
+#define CGROUP_ATYPE(type) \
+       case BPF_##type: return type
 
-#define BPF_CGROUP_STORAGE_NEST_MAX    8
+static inline enum cgroup_bpf_attach_type
+to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
+{
+       switch (attach_type) {
+       CGROUP_ATYPE(CGROUP_INET_INGRESS);
+       CGROUP_ATYPE(CGROUP_INET_EGRESS);
+       CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
+       CGROUP_ATYPE(CGROUP_SOCK_OPS);
+       CGROUP_ATYPE(CGROUP_DEVICE);
+       CGROUP_ATYPE(CGROUP_INET4_BIND);
+       CGROUP_ATYPE(CGROUP_INET6_BIND);
+       CGROUP_ATYPE(CGROUP_INET4_CONNECT);
+       CGROUP_ATYPE(CGROUP_INET6_CONNECT);
+       CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
+       CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
+       CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
+       CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
+       CGROUP_ATYPE(CGROUP_SYSCTL);
+       CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
+       CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
+       CGROUP_ATYPE(CGROUP_GETSOCKOPT);
+       CGROUP_ATYPE(CGROUP_SETSOCKOPT);
+       CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
+       CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
+       CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
+       CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
+       CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
+       default:
+               return CGROUP_BPF_ATTACH_TYPE_INVALID;
+       }
+}
 
-struct bpf_cgroup_storage_info {
-       struct task_struct *task;
-       struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
-};
+#undef CGROUP_ATYPE
 
-/* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks
- * to use bpf cgroup storage simultaneously.
- */
-DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
-               bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
+extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
+#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
 
 #define for_each_cgroup_storage_type(stype) \
        for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
@@ -80,15 +131,15 @@ struct bpf_prog_array;
 
 struct cgroup_bpf {
        /* array of effective progs in this cgroup */
-       struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
+       struct bpf_prog_array __rcu *effective[MAX_CGROUP_BPF_ATTACH_TYPE];
 
        /* attached progs to this cgroup and attach flags
         * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
         * have either zero or one element
         * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
         */
-       struct list_head progs[MAX_BPF_ATTACH_TYPE];
-       u32 flags[MAX_BPF_ATTACH_TYPE];
+       struct list_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
+       u32 flags[MAX_CGROUP_BPF_ATTACH_TYPE];
 
        /* list of cgroup shared storages */
        struct list_head storages;
@@ -128,28 +179,28 @@ int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
 
 int __cgroup_bpf_run_filter_skb(struct sock *sk,
                                struct sk_buff *skb,
-                               enum bpf_attach_type type);
+                               enum cgroup_bpf_attach_type atype);
 
 int __cgroup_bpf_run_filter_sk(struct sock *sk,
-                              enum bpf_attach_type type);
+                              enum cgroup_bpf_attach_type atype);
 
 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
                                      struct sockaddr *uaddr,
-                                     enum bpf_attach_type type,
+                                     enum cgroup_bpf_attach_type atype,
                                      void *t_ctx,
                                      u32 *flags);
 
 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
                                     struct bpf_sock_ops_kern *sock_ops,
-                                    enum bpf_attach_type type);
+                                    enum cgroup_bpf_attach_type atype);
 
 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
-                                     short access, enum bpf_attach_type type);
+                                     short access, enum cgroup_bpf_attach_type atype);
 
 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
                                   struct ctl_table *table, int write,
                                   char **buf, size_t *pcount, loff_t *ppos,
-                                  enum bpf_attach_type type);
+                                  enum cgroup_bpf_attach_type atype);
 
 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
                                       int *optname, char __user *optval,
@@ -172,44 +223,6 @@ static inline enum bpf_cgroup_storage_type cgroup_storage_type(
        return BPF_CGROUP_STORAGE_SHARED;
 }
 
-static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage
-                                        *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
-{
-       enum bpf_cgroup_storage_type stype;
-       int i, err = 0;
-
-       preempt_disable();
-       for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
-               if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL))
-                       continue;
-
-               this_cpu_write(bpf_cgroup_storage_info[i].task, current);
-               for_each_cgroup_storage_type(stype)
-                       this_cpu_write(bpf_cgroup_storage_info[i].storage[stype],
-                                      storage[stype]);
-               goto out;
-       }
-       err = -EBUSY;
-       WARN_ON_ONCE(1);
-
-out:
-       preempt_enable();
-       return err;
-}
-
-static inline void bpf_cgroup_storage_unset(void)
-{
-       int i;
-
-       for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
-               if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
-                       continue;
-
-               this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
-               return;
-       }
-}
-
 struct bpf_cgroup_storage *
 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
                      void *key, bool locked);
@@ -230,9 +243,9 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)                            \
 ({                                                                           \
        int __ret = 0;                                                        \
-       if (cgroup_bpf_enabled(BPF_CGROUP_INET_INGRESS))                      \
+       if (cgroup_bpf_enabled(CGROUP_INET_INGRESS))                  \
                __ret = __cgroup_bpf_run_filter_skb(sk, skb,                  \
-                                                   BPF_CGROUP_INET_INGRESS); \
+                                                   CGROUP_INET_INGRESS); \
                                                                              \
        __ret;                                                                \
 })
@@ -240,54 +253,54 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)                              \
 ({                                                                            \
        int __ret = 0;                                                         \
-       if (cgroup_bpf_enabled(BPF_CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
+       if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
                typeof(sk) __sk = sk_to_full_sk(sk);                           \
                if (sk_fullsock(__sk))                                         \
                        __ret = __cgroup_bpf_run_filter_skb(__sk, skb,         \
-                                                     BPF_CGROUP_INET_EGRESS); \
+                                                     CGROUP_INET_EGRESS); \
        }                                                                      \
        __ret;                                                                 \
 })
 
-#define BPF_CGROUP_RUN_SK_PROG(sk, type)                                      \
+#define BPF_CGROUP_RUN_SK_PROG(sk, atype)                                     \
 ({                                                                            \
        int __ret = 0;                                                         \
-       if (cgroup_bpf_enabled(type)) {                                        \
-               __ret = __cgroup_bpf_run_filter_sk(sk, type);                  \
+       if (cgroup_bpf_enabled(atype)) {                                               \
+               __ret = __cgroup_bpf_run_filter_sk(sk, atype);                 \
        }                                                                      \
        __ret;                                                                 \
 })
 
 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)                                     \
-       BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
+       BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
 
 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk)                             \
-       BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
+       BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
 
 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)                                       \
-       BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
+       BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
 
 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)                                       \
-       BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
+       BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
 
-#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type)                                       \
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype)                                      \
 ({                                                                            \
        u32 __unused_flags;                                                    \
        int __ret = 0;                                                         \
-       if (cgroup_bpf_enabled(type))                                          \
-               __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
+       if (cgroup_bpf_enabled(atype))                                         \
+               __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype,     \
                                                          NULL,                \
                                                          &__unused_flags);    \
        __ret;                                                                 \
 })
 
-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx)                   \
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx)                  \
 ({                                                                            \
        u32 __unused_flags;                                                    \
        int __ret = 0;                                                         \
-       if (cgroup_bpf_enabled(type))   {                                      \
+       if (cgroup_bpf_enabled(atype))  {                                      \
                lock_sock(sk);                                                 \
-               __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
+               __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype,     \
                                                          t_ctx,               \
                                                          &__unused_flags);    \
                release_sock(sk);                                              \
@@ -300,13 +313,13 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
  * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
  * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
  */
-#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, bind_flags)               \
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags)              \
 ({                                                                            \
        u32 __flags = 0;                                                       \
        int __ret = 0;                                                         \
-       if (cgroup_bpf_enabled(type))   {                                      \
+       if (cgroup_bpf_enabled(atype))  {                                      \
                lock_sock(sk);                                                 \
-               __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
+               __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype,     \
                                                          NULL, &__flags);     \
                release_sock(sk);                                              \
                if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE)            \
@@ -316,33 +329,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
 })
 
 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk)                                    \
-       ((cgroup_bpf_enabled(BPF_CGROUP_INET4_CONNECT) ||                      \
-         cgroup_bpf_enabled(BPF_CGROUP_INET6_CONNECT)) &&                     \
+       ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) ||                  \
+         cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) &&                 \
         (sk)->sk_prot->pre_connect)
 
 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr)                          \
-       BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
+       BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
 
 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr)                          \
-       BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
+       BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
 
 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr)                     \
-       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
+       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
 
 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr)                     \
-       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
+       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
 
 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx)                       \
-       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
+       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
 
 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx)                       \
-       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
+       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
 
 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr)                       \
-       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
+       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
 
 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr)                       \
-       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
+       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
 
 /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
  * fullsock and its parent fullsock cannot be traced by
@@ -362,33 +375,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
 #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk)                  \
 ({                                                                     \
        int __ret = 0;                                                  \
-       if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS))                    \
+       if (cgroup_bpf_enabled(CGROUP_SOCK_OPS))                        \
                __ret = __cgroup_bpf_run_filter_sock_ops(sk,            \
                                                         sock_ops,      \
-                                                        BPF_CGROUP_SOCK_OPS); \
+                                                        CGROUP_SOCK_OPS); \
        __ret;                                                          \
 })
 
 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)                                \
 ({                                                                            \
        int __ret = 0;                                                         \
-       if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS) && (sock_ops)->sk) {       \
+       if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) {       \
                typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);               \
                if (__sk && sk_fullsock(__sk))                                 \
                        __ret = __cgroup_bpf_run_filter_sock_ops(__sk,         \
                                                                 sock_ops,     \
-                                                        BPF_CGROUP_SOCK_OPS); \
+                                                        CGROUP_SOCK_OPS); \
        }                                                                      \
        __ret;                                                                 \
 })
 
-#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access)        \
+#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access)       \
 ({                                                                           \
        int __ret = 0;                                                        \
-       if (cgroup_bpf_enabled(BPF_CGROUP_DEVICE))                            \
-               __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
+       if (cgroup_bpf_enabled(CGROUP_DEVICE))                        \
+               __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
                                                          access,             \
-                                                         BPF_CGROUP_DEVICE); \
+                                                         CGROUP_DEVICE); \
                                                                              \
        __ret;                                                                \
 })
@@ -397,10 +410,10 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos)  \
 ({                                                                            \
        int __ret = 0;                                                         \
-       if (cgroup_bpf_enabled(BPF_CGROUP_SYSCTL))                             \
+       if (cgroup_bpf_enabled(CGROUP_SYSCTL))                         \
                __ret = __cgroup_bpf_run_filter_sysctl(head, table, write,     \
                                                       buf, count, pos,        \
-                                                      BPF_CGROUP_SYSCTL);     \
+                                                      CGROUP_SYSCTL);     \
        __ret;                                                                 \
 })
 
@@ -408,7 +421,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
                                       kernel_optval)                          \
 ({                                                                            \
        int __ret = 0;                                                         \
-       if (cgroup_bpf_enabled(BPF_CGROUP_SETSOCKOPT))                         \
+       if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT))                             \
                __ret = __cgroup_bpf_run_filter_setsockopt(sock, level,        \
                                                           optname, optval,    \
                                                           optlen,             \
@@ -419,7 +432,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen)                              \
 ({                                                                            \
        int __ret = 0;                                                         \
-       if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT))                         \
+       if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))                             \
                get_user(__ret, optlen);                                       \
        __ret;                                                                 \
 })
@@ -428,7 +441,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
                                       max_optlen, retval)                     \
 ({                                                                            \
        int __ret = retval;                                                    \
-       if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT))                         \
+       if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))                             \
                if (!(sock)->sk_prot->bpf_bypass_getsockopt ||                 \
                    !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
                                        tcp_bpf_bypass_getsockopt,             \
@@ -443,7 +456,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
                                            optlen, retval)                    \
 ({                                                                            \
        int __ret = retval;                                                    \
-       if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT))                         \
+       if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))                             \
                __ret = __cgroup_bpf_run_filter_getsockopt_kern(               \
                        sock, level, optname, optval, optlen, retval);         \
        __ret;                                                                 \
@@ -487,9 +500,6 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
        return -EINVAL;
 }
 
-static inline int bpf_cgroup_storage_set(
-       struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; }
-static inline void bpf_cgroup_storage_unset(void) {}
 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
                                            struct bpf_map *map) { return 0; }
 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
@@ -505,14 +515,14 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
        return 0;
 }
 
-#define cgroup_bpf_enabled(type) (0)
-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
+#define cgroup_bpf_enabled(atype) (0)
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, flags) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
@@ -524,7 +534,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \