1 /* SPDX-License-Identifier: GPL-2.0 */
3 * linux/cgroup-defs.h - basic definitions for cgroup
5 * This file provides basic type and interface. Include this file directly
6 * only if necessary to avoid cyclic dependencies.
8 #ifndef _LINUX_CGROUP_DEFS_H
9 #define _LINUX_CGROUP_DEFS_H
11 #include <linux/limits.h>
12 #include <linux/list.h>
13 #include <linux/idr.h>
14 #include <linux/wait.h>
15 #include <linux/mutex.h>
16 #include <linux/rcupdate.h>
17 #include <linux/refcount.h>
18 #include <linux/percpu-refcount.h>
19 #include <linux/percpu-rwsem.h>
20 #include <linux/u64_stats_sync.h>
21 #include <linux/workqueue.h>
22 #include <linux/bpf-cgroup.h>
23 #include <linux/psi_types.h>
30 struct cgroup_taskset;
33 struct kernfs_open_file;
35 struct poll_table_struct;
37 #define MAX_CGROUP_TYPE_NAMELEN 32
38 #define MAX_CGROUP_ROOT_NAMELEN 64
39 #define MAX_CFTYPE_NAME 64
41 /* define the enumeration of all cgroup subsystems */
42 #define SUBSYS(_x) _x ## _cgrp_id,
43 enum cgroup_subsys_id {
44 #include <linux/cgroup_subsys.h>
49 /* bits in struct cgroup_subsys_state flags field */
51 CSS_NO_REF = (1 << 0), /* no reference counting for this css */
52 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
53 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
54 CSS_VISIBLE = (1 << 3), /* css is visible to userland */
55 CSS_DYING = (1 << 4), /* css is dying */
58 /* bits in struct cgroup flags field */
60 /* Control Group requires release notifications to userspace */
61 CGRP_NOTIFY_ON_RELEASE,
63 * Clone the parent's configuration when creating a new child
64 * cpuset cgroup. For historical reasons, this option can be
65 * specified at mount time and thus is implemented here.
67 CGRP_CPUSET_CLONE_CHILDREN,
69 /* Control group has to be frozen. */
72 /* Cgroup is frozen. */
76 /* cgroup_root->flags */
78 CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
79 CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
82 * Consider namespaces as delegation boundaries. If this flag is
83 * set, controller specific interface files in a namespace root
84 * aren't writeable from inside the namespace.
86 CGRP_ROOT_NS_DELEGATE = (1 << 3),
89 * Enable cpuset controller in v1 cgroup to use v2 behavior.
91 CGRP_ROOT_CPUSET_V2_MODE = (1 << 4),
96 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
97 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
98 CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */
100 CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
101 CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
102 CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */
104 /* internal flags, do not use outside cgroup core proper */
105 __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
106 __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
110 * cgroup_file is the handle for a file instance created in a cgroup which
111 * is used, for example, to generate file changed notifications. This can
112 * be obtained by setting cftype->file_offset.
115 /* do not access any fields from outside cgroup core */
116 struct kernfs_node *kn;
117 unsigned long notified_at;
118 struct timer_list notify_timer;
122 * Per-subsystem/per-cgroup state maintained by the system. This is the
123 * fundamental structural building block that controllers deal with.
125 * Fields marked with "PI:" are public and immutable and may be accessed
126 * directly without synchronization.
128 struct cgroup_subsys_state {
129 /* PI: the cgroup that this css is attached to */
130 struct cgroup *cgroup;
132 /* PI: the cgroup subsystem that this css is attached to */
133 struct cgroup_subsys *ss;
135 /* reference count - access via css_[try]get() and css_put() */
136 struct percpu_ref refcnt;
138 /* siblings list anchored at the parent's ->children */
139 struct list_head sibling;
140 struct list_head children;
142 /* flush target list anchored at cgrp->rstat_css_list */
143 struct list_head rstat_css_node;
146 * PI: Subsys-unique ID. 0 is unused and root is always 1. The
147 * matching css can be looked up using css_from_id().
154 * Monotonically increasing unique serial number which defines a
155 * uniform order among all csses. It's guaranteed that all
156 * ->children lists are in the ascending order of ->serial_nr and
157 * used to allow interrupting and resuming iterations.
162 * Incremented by online self and children. Used to guarantee that
163 * parents are not offlined before their children.
167 /* percpu_ref killing and RCU release */
168 struct work_struct destroy_work;
169 struct rcu_work destroy_rwork;
172 * PI: the parent css. Placed here for cache proximity to following
173 * fields of the containing structure.
175 struct cgroup_subsys_state *parent;
179 * A css_set is a structure holding pointers to a set of
180 * cgroup_subsys_state objects. This saves space in the task struct
181 * object and speeds up fork()/exit(), since a single inc/dec and a
182 * list_add()/del() can bump the reference count on the entire cgroup
187 * Set of subsystem states, one for each subsystem. This array is
188 * immutable after creation apart from the init_css_set during
189 * subsystem registration (at boot time).
191 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
193 /* reference count */
197 * For a domain cgroup, the following points to self. If threaded,
198 * to the matching cset of the nearest domain ancestor. The
199 * dom_cset provides access to the domain cgroup and its csses to
200 * which domain level resource consumptions should be charged.
202 struct css_set *dom_cset;
204 /* the default cgroup associated with this css_set */
205 struct cgroup *dfl_cgrp;
207 /* internal task count, protected by css_set_lock */
211 * Lists running through all tasks using this cgroup group.
212 * mg_tasks lists tasks which belong to this cset but are in the
213 * process of being migrated out or in. Protected by
214 * css_set_rwsem, but, during migration, once tasks are moved to
215 * mg_tasks, it can be read safely while holding cgroup_mutex.
217 struct list_head tasks;
218 struct list_head mg_tasks;
220 /* all css_task_iters currently walking this cset */
221 struct list_head task_iters;
224 * On the default hierarhcy, ->subsys[ssid] may point to a css
225 * attached to an ancestor instead of the cgroup this css_set is
226 * associated with. The following node is anchored at
227 * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
228 * iterate through all css's attached to a given cgroup.
230 struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
232 /* all threaded csets whose ->dom_cset points to this cset */
233 struct list_head threaded_csets;
234 struct list_head threaded_csets_node;
237 * List running through all cgroup groups in the same hash
238 * slot. Protected by css_set_lock
240 struct hlist_node hlist;
243 * List of cgrp_cset_links pointing at cgroups referenced from this
244 * css_set. Protected by css_set_lock.
246 struct list_head cgrp_links;
249 * List of csets participating in the on-going migration either as
250 * source or destination. Protected by cgroup_mutex.
252 struct list_head mg_preload_node;
253 struct list_head mg_node;
256 * If this cset is acting as the source of migration the following
257 * two fields are set. mg_src_cgrp and mg_dst_cgrp are
258 * respectively the source and destination cgroups of the on-going
259 * migration. mg_dst_cset is the destination cset the target tasks
260 * on this cset should be migrated to. Protected by cgroup_mutex.
262 struct cgroup *mg_src_cgrp;
263 struct cgroup *mg_dst_cgrp;
264 struct css_set *mg_dst_cset;
266 /* dead and being drained, ignore for migration */
269 /* For RCU-protected deletion */
270 struct rcu_head rcu_head;
273 struct cgroup_base_stat {
274 struct task_cputime cputime;
278 * rstat - cgroup scalable recursive statistics. Accounting is done
279 * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the
280 * hierarchy on reads.
282 * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are
283 * linked into the updated tree. On the following read, propagation only
284 * considers and consumes the updated tree. This makes reading O(the
285 * number of descendants which have been active since last read) instead of
286 * O(the total number of descendants).
288 * This is important because there can be a lot of (draining) cgroups which
289 * aren't active and stat may be read frequently. The combination can
290 * become very expensive. By propagating selectively, increasing reading
291 * frequency decreases the cost of each read.
293 * This struct hosts both the fields which implement the above -
294 * updated_children and updated_next - and the fields which track basic
295 * resource statistics on top of it - bsync, bstat and last_bstat.
297 struct cgroup_rstat_cpu {
299 * ->bsync protects ->bstat. These are the only fields which get
300 * updated in the hot path.
302 struct u64_stats_sync bsync;
303 struct cgroup_base_stat bstat;
306 * Snapshots at the last reading. These are used to calculate the
307 * deltas to propagate to the global counters.
309 struct cgroup_base_stat last_bstat;
312 * Child cgroups with stat updates on this cpu since the last read
313 * are linked on the parent's ->updated_children through
316 * In addition to being more compact, singly-linked list pointing
317 * to the cgroup makes it unnecessary for each per-cpu struct to
318 * point back to the associated cgroup.
320 * Protected by per-cpu cgroup_rstat_cpu_lock.
322 struct cgroup *updated_children; /* terminated by self cgroup */
323 struct cgroup *updated_next; /* NULL iff not on the list */
326 struct cgroup_freezer_state {
327 /* Should the cgroup and its descendants be frozen. */
330 /* Should the cgroup actually be frozen? */
333 /* Fields below are protected by css_set_lock */
335 /* Number of frozen descendant cgroups */
336 int nr_frozen_descendants;
339 * Number of tasks, which are counted as frozen:
340 * frozen, SIGSTOPped, and PTRACEd.
346 /* self css with NULL ->ss, points back to this cgroup */
347 struct cgroup_subsys_state self;
349 unsigned long flags; /* "unsigned long" so bitops work */
352 * idr allocated in-hierarchy ID.
354 * ID 0 is not used, the ID of the root cgroup is always 1, and a
355 * new cgroup will be assigned with a smallest available ID.
357 * Allocating/Removing ID must be protected by cgroup_mutex.
362 * The depth this cgroup is at. The root is at depth zero and each
363 * step down the hierarchy increments the level. This along with
364 * ancestor_ids[] can determine whether a given cgroup is a
365 * descendant of another without traversing the hierarchy.
369 /* Maximum allowed descent tree depth */
373 * Keep track of total numbers of visible and dying descent cgroups.
374 * Dying cgroups are cgroups which were deleted by a user,
375 * but are still existing because someone else is holding a reference.
376 * max_descendants is a maximum allowed number of descent cgroups.
378 * nr_descendants and nr_dying_descendants are protected
379 * by cgroup_mutex and css_set_lock. It's fine to read them holding
380 * any of cgroup_mutex and css_set_lock; for writing both locks
384 int nr_dying_descendants;
388 * Each non-empty css_set associated with this cgroup contributes
389 * one to nr_populated_csets. The counter is zero iff this cgroup
390 * doesn't have any tasks.
392 * All children which have non-zero nr_populated_csets and/or
393 * nr_populated_children of their own contribute one to either
394 * nr_populated_domain_children or nr_populated_threaded_children
395 * depending on their type. Each counter is zero iff all cgroups
396 * of the type in the subtree proper don't have any tasks.
398 int nr_populated_csets;
399 int nr_populated_domain_children;
400 int nr_populated_threaded_children;
402 int nr_threaded_children; /* # of live threaded child cgroups */
404 struct kernfs_node *kn; /* cgroup kernfs entry */
405 struct cgroup_file procs_file; /* handle for "cgroup.procs" */
406 struct cgroup_file events_file; /* handle for "cgroup.events" */
409 * The bitmask of subsystems enabled on the child cgroups.
410 * ->subtree_control is the one configured through
411 * "cgroup.subtree_control" while ->child_ss_mask is the effective
412 * one which may have more subsystems enabled. Controller knobs
413 * are made available iff it's enabled in ->subtree_control.
417 u16 old_subtree_control;
418 u16 old_subtree_ss_mask;
420 /* Private pointers for each registered subsystem */
421 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
423 struct cgroup_root *root;
426 * List of cgrp_cset_links pointing at css_sets with tasks in this
427 * cgroup. Protected by css_set_lock.
429 struct list_head cset_links;
432 * On the default hierarchy, a css_set for a cgroup with some
433 * susbsys disabled will point to css's which are associated with
434 * the closest ancestor which has the subsys enabled. The
435 * following lists all css_sets which point to this cgroup's css
436 * for the given subsystem.
438 struct list_head e_csets[CGROUP_SUBSYS_COUNT];
441 * If !threaded, self. If threaded, it points to the nearest
442 * domain ancestor. Inside a threaded subtree, cgroups are exempt
443 * from process granularity and no-internal-task constraint.
444 * Domain level resource consumptions which aren't tied to a
445 * specific task are charged to the dom_cgrp.
447 struct cgroup *dom_cgrp;
448 struct cgroup *old_dom_cgrp; /* used while enabling threaded */
450 /* per-cpu recursive resource statistics */
451 struct cgroup_rstat_cpu __percpu *rstat_cpu;
452 struct list_head rstat_css_list;
454 /* cgroup basic resource statistics */
455 struct cgroup_base_stat pending_bstat; /* pending from children */
456 struct cgroup_base_stat bstat;
457 struct prev_cputime prev_cputime; /* for printing out cputime */
460 * list of pidlists, up to two for each namespace (one for procs, one
461 * for tasks); created on demand.
463 struct list_head pidlists;
464 struct mutex pidlist_mutex;
466 /* used to wait for offlining of csses */
467 wait_queue_head_t offline_waitq;
469 /* used to schedule release agent */
470 struct work_struct release_agent_work;
472 /* used to track pressure stalls */
473 struct psi_group psi;
475 /* used to store eBPF programs */
476 struct cgroup_bpf bpf;
478 /* If there is block congestion on this cgroup. */
479 atomic_t congestion_count;
481 /* Used to store internal freezer state */
482 struct cgroup_freezer_state freezer;
484 /* ids of the ancestors at each level including self */
489 * A cgroup_root represents the root of a cgroup hierarchy, and may be
490 * associated with a kernfs_root to form an active hierarchy. This is
491 * internal to cgroup core. Don't access directly from controllers.
494 struct kernfs_root *kf_root;
496 /* The bitmask of subsystems attached to this hierarchy */
497 unsigned int subsys_mask;
499 /* Unique id for this hierarchy. */
502 /* The root cgroup. Root is destroyed on its release. */
505 /* for cgrp->ancestor_ids[0] */
506 int cgrp_ancestor_id_storage;
508 /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
511 /* A list running through the active hierarchies */
512 struct list_head root_list;
514 /* Hierarchy-specific flags */
517 /* IDs for cgroups in this hierarchy */
518 struct idr cgroup_idr;
520 /* The path to use for release notifications. */
521 char release_agent_path[PATH_MAX];
523 /* The name for this hierarchy - may be empty */
524 char name[MAX_CGROUP_ROOT_NAMELEN];
528 * struct cftype: handler definitions for cgroup control files
530 * When reading/writing to a file:
531 * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
532 * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
536 * By convention, the name should begin with the name of the
537 * subsystem, followed by a period. Zero length string indicates
538 * end of cftype array.
540 char name[MAX_CFTYPE_NAME];
541 unsigned long private;
544 * The maximum length of string, excluding trailing nul, that can
545 * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
547 size_t max_write_len;
553 * If non-zero, should contain the offset from the start of css to
554 * a struct cgroup_file field. cgroup will record the handle of
555 * the created file into it. The recorded handle can be used as
556 * long as the containing css remains accessible.
558 unsigned int file_offset;
561 * Fields used for internal bookkeeping. Initialized automatically
562 * during registration.
564 struct cgroup_subsys *ss; /* NULL for cgroup core files */
565 struct list_head node; /* anchored at ss->cfts */
566 struct kernfs_ops *kf_ops;
568 int (*open)(struct kernfs_open_file *of);
569 void (*release)(struct kernfs_open_file *of);
572 * read_u64() is a shortcut for the common case of returning a
573 * single integer. Use it in place of read()
575 u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
577 * read_s64() is a signed version of read_u64()
579 s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
581 /* generic seq_file read interface */
582 int (*seq_show)(struct seq_file *sf, void *v);
584 /* optional ops, implement all or none */
585 void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
586 void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
587 void (*seq_stop)(struct seq_file *sf, void *v);
590 * write_u64() is a shortcut for the common case of accepting
591 * a single integer (as parsed by simple_strtoull) from
592 * userspace. Use in place of write(); return 0 or error.
594 int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
597 * write_s64() is a signed version of write_u64()
599 int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
603 * write() is the generic write callback which maps directly to
604 * kernfs write operation and overrides all other operations.
605 * Maximum write size is determined by ->max_write_len. Use
606 * of_css/cft() to access the associated css and cft.
608 ssize_t (*write)(struct kernfs_open_file *of,
609 char *buf, size_t nbytes, loff_t off);
611 __poll_t (*poll)(struct kernfs_open_file *of,
612 struct poll_table_struct *pt);
614 #ifdef CONFIG_DEBUG_LOCK_ALLOC
615 struct lock_class_key lockdep_key;
620 * Control Group subsystem type.
621 * See Documentation/cgroup-v1/cgroups.txt for details
623 struct cgroup_subsys {
624 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
625 int (*css_online)(struct cgroup_subsys_state *css);
626 void (*css_offline)(struct cgroup_subsys_state *css);
627 void (*css_released)(struct cgroup_subsys_state *css);
628 void (*css_free)(struct cgroup_subsys_state *css);
629 void (*css_reset)(struct cgroup_subsys_state *css);
630 void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
631 int (*css_extra_stat_show)(struct seq_file *seq,
632 struct cgroup_subsys_state *css);
634 int (*can_attach)(struct cgroup_taskset *tset);
635 void (*cancel_attach)(struct cgroup_taskset *tset);
636 void (*attach)(struct cgroup_taskset *tset);
637 void (*post_attach)(void);
638 int (*can_fork)(struct task_struct *task);
639 void (*cancel_fork)(struct task_struct *task);
640 void (*fork)(struct task_struct *task);
641 void (*exit)(struct task_struct *task);
642 void (*release)(struct task_struct *task);
643 void (*bind)(struct cgroup_subsys_state *root_css);
648 * If %true, the controller, on the default hierarchy, doesn't show
649 * up in "cgroup.controllers" or "cgroup.subtree_control", is
650 * implicitly enabled on all cgroups on the default hierarchy, and
651 * bypasses the "no internal process" constraint. This is for
652 * utility type controllers which is transparent to userland.
654 * An implicit controller can be stolen from the default hierarchy
655 * anytime and thus must be okay with offline csses from previous
656 * hierarchies coexisting with csses for the current one.
658 bool implicit_on_dfl:1;
661 * If %true, the controller, supports threaded mode on the default
662 * hierarchy. In a threaded subtree, both process granularity and
663 * no-internal-process constraint are ignored and a threaded
664 * controllers should be able to handle that.
666 * Note that as an implicit controller is automatically enabled on
667 * all cgroups on the default hierarchy, it should also be
668 * threaded. implicit && !threaded is not supported.
673 * If %false, this subsystem is properly hierarchical -
674 * configuration, resource accounting and restriction on a parent
675 * cgroup cover those of its children. If %true, hierarchy support
676 * is broken in some ways - some subsystems ignore hierarchy
677 * completely while others are only implemented half-way.
679 * It's now disallowed to create nested cgroups if the subsystem is
680 * broken and cgroup core will emit a warning message on such
681 * cases. Eventually, all subsystems will be made properly
682 * hierarchical and this will go away.
684 bool broken_hierarchy:1;
685 bool warned_broken_hierarchy:1;
687 /* the following two fields are initialized automtically during boot */
691 /* optional, initialized automatically during boot if not set */
692 const char *legacy_name;
694 /* link to parent, protected by cgroup_lock() */
695 struct cgroup_root *root;
697 /* idr for css->id */
701 * List of cftypes. Each entry is the first entry of an array
702 * terminated by zero length name.
704 struct list_head cfts;
707 * Base cftypes which are automatically registered. The two can
708 * point to the same array.
710 struct cftype *dfl_cftypes; /* for the default hierarchy */
711 struct cftype *legacy_cftypes; /* for the legacy hierarchies */
714 * A subsystem may depend on other subsystems. When such subsystem
715 * is enabled on a cgroup, the depended-upon subsystems are enabled
716 * together if available. Subsystems enabled due to dependency are
717 * not visible to userland until explicitly enabled. The following
718 * specifies the mask of subsystems that this one depends on.
720 unsigned int depends_on;
723 extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
726 * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
729 * Allows cgroup operations to synchronize against threadgroup changes
730 * using a percpu_rw_semaphore.
732 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
734 percpu_down_read(&cgroup_threadgroup_rwsem);
738 * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
741 * Counterpart of cgroup_threadcgroup_change_begin().
743 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
745 percpu_up_read(&cgroup_threadgroup_rwsem);
748 #else /* CONFIG_CGROUPS */
750 #define CGROUP_SUBSYS_COUNT 0
752 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
757 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
759 #endif /* CONFIG_CGROUPS */
761 #ifdef CONFIG_SOCK_CGROUP_DATA
764 * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
765 * per-socket cgroup information except for memcg association.
767 * On legacy hierarchies, net_prio and net_cls controllers directly set
768 * attributes on each sock which can then be tested by the network layer.
769 * On the default hierarchy, each sock is associated with the cgroup it was
770 * created in and the networking layer can match the cgroup directly.
772 * To avoid carrying all three cgroup related fields separately in sock,
773 * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
774 * On boot, sock_cgroup_data records the cgroup that the sock was created
775 * in so that cgroup2 matches can be made; however, once either net_prio or
776 * net_cls starts being used, the area is overriden to carry prioidx and/or
777 * classid. The two modes are distinguished by whether the lowest bit is
778 * set. Clear bit indicates cgroup pointer while set bit prioidx and
781 * While userland may start using net_prio or net_cls at any time, once
782 * either is used, cgroup2 matching no longer works. There is no reason to
783 * mix the two and this is in line with how legacy and v2 compatibility is
784 * handled. On mode switch, cgroup references which are already being
785 * pointed to by socks may be leaked. While this can be remedied by adding
786 * synchronization around sock_cgroup_data, given that the number of leaked
787 * cgroups is bound and highly unlikely to be high, this seems to be the
790 struct sock_cgroup_data {
792 #ifdef __LITTLE_ENDIAN
812 * There's a theoretical window where the following accessors race with
813 * updaters and return part of the previous pointer as the prioidx or
814 * classid. Such races are short-lived and the result isn't critical.
816 static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
818 /* fallback to 1 which is always the ID of the root cgroup */
819 return (skcd->is_data & 1) ? skcd->prioidx : 1;
822 static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
824 /* fallback to 0 which is the unconfigured default classid */
825 return (skcd->is_data & 1) ? skcd->classid : 0;
829 * If invoked concurrently, the updaters may clobber each other. The
830 * caller is responsible for synchronization.
832 static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
835 struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
837 if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
840 if (!(skcd_buf.is_data & 1)) {
842 skcd_buf.is_data = 1;
845 skcd_buf.prioidx = prioidx;
846 WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
849 static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
852 struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
854 if (sock_cgroup_classid(&skcd_buf) == classid)
857 if (!(skcd_buf.is_data & 1)) {
859 skcd_buf.is_data = 1;
862 skcd_buf.classid = classid;
863 WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
866 #else /* CONFIG_SOCK_CGROUP_DATA */
868 struct sock_cgroup_data {
871 #endif /* CONFIG_SOCK_CGROUP_DATA */
873 #endif /* _LINUX_CGROUP_DEFS_H */