1 /* SPDX-License-Identifier: GPL-2.0 */
3 * linux/cgroup-defs.h - basic definitions for cgroup
5 * This file provides basic type and interface. Include this file directly
6 * only if necessary to avoid cyclic dependencies.
8 #ifndef _LINUX_CGROUP_DEFS_H
9 #define _LINUX_CGROUP_DEFS_H
11 #include <linux/limits.h>
12 #include <linux/list.h>
13 #include <linux/idr.h>
14 #include <linux/wait.h>
15 #include <linux/mutex.h>
16 #include <linux/rcupdate.h>
17 #include <linux/refcount.h>
18 #include <linux/percpu-refcount.h>
19 #include <linux/percpu-rwsem.h>
20 #include <linux/u64_stats_sync.h>
21 #include <linux/workqueue.h>
22 #include <linux/bpf-cgroup.h>
23 #include <linux/psi_types.h>
30 struct cgroup_taskset;
33 struct kernfs_open_file;
36 #define MAX_CGROUP_TYPE_NAMELEN 32
37 #define MAX_CGROUP_ROOT_NAMELEN 64
38 #define MAX_CFTYPE_NAME 64
40 /* define the enumeration of all cgroup subsystems */
41 #define SUBSYS(_x) _x ## _cgrp_id,
42 enum cgroup_subsys_id {
43 #include <linux/cgroup_subsys.h>
48 /* bits in struct cgroup_subsys_state flags field */
50 CSS_NO_REF = (1 << 0), /* no reference counting for this css */
51 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
52 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
53 CSS_VISIBLE = (1 << 3), /* css is visible to userland */
54 CSS_DYING = (1 << 4), /* css is dying */
57 /* bits in struct cgroup flags field */
59 /* Control Group requires release notifications to userspace */
60 CGRP_NOTIFY_ON_RELEASE,
62 * Clone the parent's configuration when creating a new child
63 * cpuset cgroup. For historical reasons, this option can be
64 * specified at mount time and thus is implemented here.
66 CGRP_CPUSET_CLONE_CHILDREN,
69 /* cgroup_root->flags */
71 CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
72 CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
75 * Consider namespaces as delegation boundaries. If this flag is
76 * set, controller specific interface files in a namespace root
77 * aren't writeable from inside the namespace.
79 CGRP_ROOT_NS_DELEGATE = (1 << 3),
82 * Enable cpuset controller in v1 cgroup to use v2 behavior.
84 CGRP_ROOT_CPUSET_V2_MODE = (1 << 4),
89 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
90 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
91 CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */
93 CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
94 CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
95 CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */
97 /* internal flags, do not use outside cgroup core proper */
98 __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
99 __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
103 * cgroup_file is the handle for a file instance created in a cgroup which
104 * is used, for example, to generate file changed notifications. This can
105 * be obtained by setting cftype->file_offset.
108 /* do not access any fields from outside cgroup core */
109 struct kernfs_node *kn;
110 unsigned long notified_at;
111 struct timer_list notify_timer;
115 * Per-subsystem/per-cgroup state maintained by the system. This is the
116 * fundamental structural building block that controllers deal with.
118 * Fields marked with "PI:" are public and immutable and may be accessed
119 * directly without synchronization.
121 struct cgroup_subsys_state {
122 /* PI: the cgroup that this css is attached to */
123 struct cgroup *cgroup;
125 /* PI: the cgroup subsystem that this css is attached to */
126 struct cgroup_subsys *ss;
128 /* reference count - access via css_[try]get() and css_put() */
129 struct percpu_ref refcnt;
131 /* siblings list anchored at the parent's ->children */
132 struct list_head sibling;
133 struct list_head children;
135 /* flush target list anchored at cgrp->rstat_css_list */
136 struct list_head rstat_css_node;
139 * PI: Subsys-unique ID. 0 is unused and root is always 1. The
140 * matching css can be looked up using css_from_id().
147 * Monotonically increasing unique serial number which defines a
148 * uniform order among all csses. It's guaranteed that all
149 * ->children lists are in the ascending order of ->serial_nr and
150 * used to allow interrupting and resuming iterations.
155 * Incremented by online self and children. Used to guarantee that
156 * parents are not offlined before their children.
160 /* percpu_ref killing and RCU release */
161 struct work_struct destroy_work;
162 struct rcu_work destroy_rwork;
165 * PI: the parent css. Placed here for cache proximity to following
166 * fields of the containing structure.
168 struct cgroup_subsys_state *parent;
172 * A css_set is a structure holding pointers to a set of
173 * cgroup_subsys_state objects. This saves space in the task struct
174 * object and speeds up fork()/exit(), since a single inc/dec and a
175 * list_add()/del() can bump the reference count on the entire cgroup
180 * Set of subsystem states, one for each subsystem. This array is
181 * immutable after creation apart from the init_css_set during
182 * subsystem registration (at boot time).
184 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
186 /* reference count */
190 * For a domain cgroup, the following points to self. If threaded,
191 * to the matching cset of the nearest domain ancestor. The
192 * dom_cset provides access to the domain cgroup and its csses to
193 * which domain level resource consumptions should be charged.
195 struct css_set *dom_cset;
197 /* the default cgroup associated with this css_set */
198 struct cgroup *dfl_cgrp;
200 /* internal task count, protected by css_set_lock */
204 * Lists running through all tasks using this cgroup group.
205 * mg_tasks lists tasks which belong to this cset but are in the
206 * process of being migrated out or in. Protected by
207 * css_set_rwsem, but, during migration, once tasks are moved to
208 * mg_tasks, it can be read safely while holding cgroup_mutex.
210 struct list_head tasks;
211 struct list_head mg_tasks;
213 /* all css_task_iters currently walking this cset */
214 struct list_head task_iters;
217 * On the default hierarhcy, ->subsys[ssid] may point to a css
218 * attached to an ancestor instead of the cgroup this css_set is
219 * associated with. The following node is anchored at
220 * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
221 * iterate through all css's attached to a given cgroup.
223 struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
225 /* all threaded csets whose ->dom_cset points to this cset */
226 struct list_head threaded_csets;
227 struct list_head threaded_csets_node;
230 * List running through all cgroup groups in the same hash
231 * slot. Protected by css_set_lock
233 struct hlist_node hlist;
236 * List of cgrp_cset_links pointing at cgroups referenced from this
237 * css_set. Protected by css_set_lock.
239 struct list_head cgrp_links;
242 * List of csets participating in the on-going migration either as
243 * source or destination. Protected by cgroup_mutex.
245 struct list_head mg_preload_node;
246 struct list_head mg_node;
249 * If this cset is acting as the source of migration the following
250 * two fields are set. mg_src_cgrp and mg_dst_cgrp are
251 * respectively the source and destination cgroups of the on-going
252 * migration. mg_dst_cset is the destination cset the target tasks
253 * on this cset should be migrated to. Protected by cgroup_mutex.
255 struct cgroup *mg_src_cgrp;
256 struct cgroup *mg_dst_cgrp;
257 struct css_set *mg_dst_cset;
259 /* dead and being drained, ignore for migration */
262 /* For RCU-protected deletion */
263 struct rcu_head rcu_head;
266 struct cgroup_base_stat {
267 struct task_cputime cputime;
271 * rstat - cgroup scalable recursive statistics. Accounting is done
272 * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the
273 * hierarchy on reads.
275 * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are
276 * linked into the updated tree. On the following read, propagation only
277 * considers and consumes the updated tree. This makes reading O(the
278 * number of descendants which have been active since last read) instead of
279 * O(the total number of descendants).
281 * This is important because there can be a lot of (draining) cgroups which
282 * aren't active and stat may be read frequently. The combination can
283 * become very expensive. By propagating selectively, increasing reading
284 * frequency decreases the cost of each read.
286 * This struct hosts both the fields which implement the above -
287 * updated_children and updated_next - and the fields which track basic
288 * resource statistics on top of it - bsync, bstat and last_bstat.
290 struct cgroup_rstat_cpu {
292 * ->bsync protects ->bstat. These are the only fields which get
293 * updated in the hot path.
295 struct u64_stats_sync bsync;
296 struct cgroup_base_stat bstat;
299 * Snapshots at the last reading. These are used to calculate the
300 * deltas to propagate to the global counters.
302 struct cgroup_base_stat last_bstat;
305 * Child cgroups with stat updates on this cpu since the last read
306 * are linked on the parent's ->updated_children through
309 * In addition to being more compact, singly-linked list pointing
310 * to the cgroup makes it unnecessary for each per-cpu struct to
311 * point back to the associated cgroup.
313 * Protected by per-cpu cgroup_rstat_cpu_lock.
315 struct cgroup *updated_children; /* terminated by self cgroup */
316 struct cgroup *updated_next; /* NULL iff not on the list */
320 /* self css with NULL ->ss, points back to this cgroup */
321 struct cgroup_subsys_state self;
323 unsigned long flags; /* "unsigned long" so bitops work */
326 * idr allocated in-hierarchy ID.
328 * ID 0 is not used, the ID of the root cgroup is always 1, and a
329 * new cgroup will be assigned with a smallest available ID.
331 * Allocating/Removing ID must be protected by cgroup_mutex.
336 * The depth this cgroup is at. The root is at depth zero and each
337 * step down the hierarchy increments the level. This along with
338 * ancestor_ids[] can determine whether a given cgroup is a
339 * descendant of another without traversing the hierarchy.
343 /* Maximum allowed descent tree depth */
347 * Keep track of total numbers of visible and dying descent cgroups.
348 * Dying cgroups are cgroups which were deleted by a user,
349 * but are still existing because someone else is holding a reference.
350 * max_descendants is a maximum allowed number of descent cgroups.
353 int nr_dying_descendants;
357 * Each non-empty css_set associated with this cgroup contributes
358 * one to nr_populated_csets. The counter is zero iff this cgroup
359 * doesn't have any tasks.
361 * All children which have non-zero nr_populated_csets and/or
362 * nr_populated_children of their own contribute one to either
363 * nr_populated_domain_children or nr_populated_threaded_children
364 * depending on their type. Each counter is zero iff all cgroups
365 * of the type in the subtree proper don't have any tasks.
367 int nr_populated_csets;
368 int nr_populated_domain_children;
369 int nr_populated_threaded_children;
371 int nr_threaded_children; /* # of live threaded child cgroups */
373 struct kernfs_node *kn; /* cgroup kernfs entry */
374 struct cgroup_file procs_file; /* handle for "cgroup.procs" */
375 struct cgroup_file events_file; /* handle for "cgroup.events" */
378 * The bitmask of subsystems enabled on the child cgroups.
379 * ->subtree_control is the one configured through
380 * "cgroup.subtree_control" while ->child_ss_mask is the effective
381 * one which may have more subsystems enabled. Controller knobs
382 * are made available iff it's enabled in ->subtree_control.
386 u16 old_subtree_control;
387 u16 old_subtree_ss_mask;
389 /* Private pointers for each registered subsystem */
390 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
392 struct cgroup_root *root;
395 * List of cgrp_cset_links pointing at css_sets with tasks in this
396 * cgroup. Protected by css_set_lock.
398 struct list_head cset_links;
401 * On the default hierarchy, a css_set for a cgroup with some
402 * susbsys disabled will point to css's which are associated with
403 * the closest ancestor which has the subsys enabled. The
404 * following lists all css_sets which point to this cgroup's css
405 * for the given subsystem.
407 struct list_head e_csets[CGROUP_SUBSYS_COUNT];
410 * If !threaded, self. If threaded, it points to the nearest
411 * domain ancestor. Inside a threaded subtree, cgroups are exempt
412 * from process granularity and no-internal-task constraint.
413 * Domain level resource consumptions which aren't tied to a
414 * specific task are charged to the dom_cgrp.
416 struct cgroup *dom_cgrp;
417 struct cgroup *old_dom_cgrp; /* used while enabling threaded */
419 /* per-cpu recursive resource statistics */
420 struct cgroup_rstat_cpu __percpu *rstat_cpu;
421 struct list_head rstat_css_list;
423 /* cgroup basic resource statistics */
424 struct cgroup_base_stat pending_bstat; /* pending from children */
425 struct cgroup_base_stat bstat;
426 struct prev_cputime prev_cputime; /* for printing out cputime */
429 * list of pidlists, up to two for each namespace (one for procs, one
430 * for tasks); created on demand.
432 struct list_head pidlists;
433 struct mutex pidlist_mutex;
435 /* used to wait for offlining of csses */
436 wait_queue_head_t offline_waitq;
438 /* used to schedule release agent */
439 struct work_struct release_agent_work;
441 /* used to track pressure stalls */
442 struct psi_group psi;
444 /* used to store eBPF programs */
445 struct cgroup_bpf bpf;
447 /* If there is block congestion on this cgroup. */
448 atomic_t congestion_count;
450 /* ids of the ancestors at each level including self */
455 * A cgroup_root represents the root of a cgroup hierarchy, and may be
456 * associated with a kernfs_root to form an active hierarchy. This is
457 * internal to cgroup core. Don't access directly from controllers.
460 struct kernfs_root *kf_root;
462 /* The bitmask of subsystems attached to this hierarchy */
463 unsigned int subsys_mask;
465 /* Unique id for this hierarchy. */
468 /* The root cgroup. Root is destroyed on its release. */
471 /* for cgrp->ancestor_ids[0] */
472 int cgrp_ancestor_id_storage;
474 /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
477 /* A list running through the active hierarchies */
478 struct list_head root_list;
480 /* Hierarchy-specific flags */
483 /* IDs for cgroups in this hierarchy */
484 struct idr cgroup_idr;
486 /* The path to use for release notifications. */
487 char release_agent_path[PATH_MAX];
489 /* The name for this hierarchy - may be empty */
490 char name[MAX_CGROUP_ROOT_NAMELEN];
494 * struct cftype: handler definitions for cgroup control files
496 * When reading/writing to a file:
497 * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
498 * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
502 * By convention, the name should begin with the name of the
503 * subsystem, followed by a period. Zero length string indicates
504 * end of cftype array.
506 char name[MAX_CFTYPE_NAME];
507 unsigned long private;
510 * The maximum length of string, excluding trailing nul, that can
511 * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
513 size_t max_write_len;
519 * If non-zero, should contain the offset from the start of css to
520 * a struct cgroup_file field. cgroup will record the handle of
521 * the created file into it. The recorded handle can be used as
522 * long as the containing css remains accessible.
524 unsigned int file_offset;
527 * Fields used for internal bookkeeping. Initialized automatically
528 * during registration.
530 struct cgroup_subsys *ss; /* NULL for cgroup core files */
531 struct list_head node; /* anchored at ss->cfts */
532 struct kernfs_ops *kf_ops;
534 int (*open)(struct kernfs_open_file *of);
535 void (*release)(struct kernfs_open_file *of);
538 * read_u64() is a shortcut for the common case of returning a
539 * single integer. Use it in place of read()
541 u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
543 * read_s64() is a signed version of read_u64()
545 s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
547 /* generic seq_file read interface */
548 int (*seq_show)(struct seq_file *sf, void *v);
550 /* optional ops, implement all or none */
551 void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
552 void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
553 void (*seq_stop)(struct seq_file *sf, void *v);
556 * write_u64() is a shortcut for the common case of accepting
557 * a single integer (as parsed by simple_strtoull) from
558 * userspace. Use in place of write(); return 0 or error.
560 int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
563 * write_s64() is a signed version of write_u64()
565 int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
569 * write() is the generic write callback which maps directly to
570 * kernfs write operation and overrides all other operations.
571 * Maximum write size is determined by ->max_write_len. Use
572 * of_css/cft() to access the associated css and cft.
574 ssize_t (*write)(struct kernfs_open_file *of,
575 char *buf, size_t nbytes, loff_t off);
577 #ifdef CONFIG_DEBUG_LOCK_ALLOC
578 struct lock_class_key lockdep_key;
583 * Control Group subsystem type.
584 * See Documentation/cgroup-v1/cgroups.txt for details
586 struct cgroup_subsys {
587 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
588 int (*css_online)(struct cgroup_subsys_state *css);
589 void (*css_offline)(struct cgroup_subsys_state *css);
590 void (*css_released)(struct cgroup_subsys_state *css);
591 void (*css_free)(struct cgroup_subsys_state *css);
592 void (*css_reset)(struct cgroup_subsys_state *css);
593 void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
594 int (*css_extra_stat_show)(struct seq_file *seq,
595 struct cgroup_subsys_state *css);
597 int (*can_attach)(struct cgroup_taskset *tset);
598 void (*cancel_attach)(struct cgroup_taskset *tset);
599 void (*attach)(struct cgroup_taskset *tset);
600 void (*post_attach)(void);
601 int (*can_fork)(struct task_struct *task);
602 void (*cancel_fork)(struct task_struct *task);
603 void (*fork)(struct task_struct *task);
604 void (*exit)(struct task_struct *task);
605 void (*free)(struct task_struct *task);
606 void (*bind)(struct cgroup_subsys_state *root_css);
611 * If %true, the controller, on the default hierarchy, doesn't show
612 * up in "cgroup.controllers" or "cgroup.subtree_control", is
613 * implicitly enabled on all cgroups on the default hierarchy, and
614 * bypasses the "no internal process" constraint. This is for
615 * utility type controllers which is transparent to userland.
617 * An implicit controller can be stolen from the default hierarchy
618 * anytime and thus must be okay with offline csses from previous
619 * hierarchies coexisting with csses for the current one.
621 bool implicit_on_dfl:1;
624 * If %true, the controller, supports threaded mode on the default
625 * hierarchy. In a threaded subtree, both process granularity and
626 * no-internal-process constraint are ignored and a threaded
627 * controllers should be able to handle that.
629 * Note that as an implicit controller is automatically enabled on
630 * all cgroups on the default hierarchy, it should also be
631 * threaded. implicit && !threaded is not supported.
636 * If %false, this subsystem is properly hierarchical -
637 * configuration, resource accounting and restriction on a parent
638 * cgroup cover those of its children. If %true, hierarchy support
639 * is broken in some ways - some subsystems ignore hierarchy
640 * completely while others are only implemented half-way.
642 * It's now disallowed to create nested cgroups if the subsystem is
643 * broken and cgroup core will emit a warning message on such
644 * cases. Eventually, all subsystems will be made properly
645 * hierarchical and this will go away.
647 bool broken_hierarchy:1;
648 bool warned_broken_hierarchy:1;
650 /* the following two fields are initialized automtically during boot */
654 /* optional, initialized automatically during boot if not set */
655 const char *legacy_name;
657 /* link to parent, protected by cgroup_lock() */
658 struct cgroup_root *root;
660 /* idr for css->id */
664 * List of cftypes. Each entry is the first entry of an array
665 * terminated by zero length name.
667 struct list_head cfts;
670 * Base cftypes which are automatically registered. The two can
671 * point to the same array.
673 struct cftype *dfl_cftypes; /* for the default hierarchy */
674 struct cftype *legacy_cftypes; /* for the legacy hierarchies */
677 * A subsystem may depend on other subsystems. When such subsystem
678 * is enabled on a cgroup, the depended-upon subsystems are enabled
679 * together if available. Subsystems enabled due to dependency are
680 * not visible to userland until explicitly enabled. The following
681 * specifies the mask of subsystems that this one depends on.
683 unsigned int depends_on;
686 extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
689 * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
692 * Allows cgroup operations to synchronize against threadgroup changes
693 * using a percpu_rw_semaphore.
695 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
697 percpu_down_read(&cgroup_threadgroup_rwsem);
701 * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
704 * Counterpart of cgroup_threadcgroup_change_begin().
706 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
708 percpu_up_read(&cgroup_threadgroup_rwsem);
711 #else /* CONFIG_CGROUPS */
713 #define CGROUP_SUBSYS_COUNT 0
715 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
720 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
722 #endif /* CONFIG_CGROUPS */
724 #ifdef CONFIG_SOCK_CGROUP_DATA
727 * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
728 * per-socket cgroup information except for memcg association.
730 * On legacy hierarchies, net_prio and net_cls controllers directly set
731 * attributes on each sock which can then be tested by the network layer.
732 * On the default hierarchy, each sock is associated with the cgroup it was
733 * created in and the networking layer can match the cgroup directly.
735 * To avoid carrying all three cgroup related fields separately in sock,
736 * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
737 * On boot, sock_cgroup_data records the cgroup that the sock was created
738 * in so that cgroup2 matches can be made; however, once either net_prio or
739 * net_cls starts being used, the area is overriden to carry prioidx and/or
740 * classid. The two modes are distinguished by whether the lowest bit is
741 * set. Clear bit indicates cgroup pointer while set bit prioidx and
744 * While userland may start using net_prio or net_cls at any time, once
745 * either is used, cgroup2 matching no longer works. There is no reason to
746 * mix the two and this is in line with how legacy and v2 compatibility is
747 * handled. On mode switch, cgroup references which are already being
748 * pointed to by socks may be leaked. While this can be remedied by adding
749 * synchronization around sock_cgroup_data, given that the number of leaked
750 * cgroups is bound and highly unlikely to be high, this seems to be the
753 struct sock_cgroup_data {
755 #ifdef __LITTLE_ENDIAN
775 * There's a theoretical window where the following accessors race with
776 * updaters and return part of the previous pointer as the prioidx or
777 * classid. Such races are short-lived and the result isn't critical.
779 static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
781 /* fallback to 1 which is always the ID of the root cgroup */
782 return (skcd->is_data & 1) ? skcd->prioidx : 1;
785 static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
787 /* fallback to 0 which is the unconfigured default classid */
788 return (skcd->is_data & 1) ? skcd->classid : 0;
792 * If invoked concurrently, the updaters may clobber each other. The
793 * caller is responsible for synchronization.
795 static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
798 struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
800 if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
803 if (!(skcd_buf.is_data & 1)) {
805 skcd_buf.is_data = 1;
808 skcd_buf.prioidx = prioidx;
809 WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
812 static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
815 struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
817 if (sock_cgroup_classid(&skcd_buf) == classid)
820 if (!(skcd_buf.is_data & 1)) {
822 skcd_buf.is_data = 1;
825 skcd_buf.classid = classid;
826 WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
829 #else /* CONFIG_SOCK_CGROUP_DATA */
831 struct sock_cgroup_data {
834 #endif /* CONFIG_SOCK_CGROUP_DATA */
836 #endif /* _LINUX_CGROUP_DEFS_H */