1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_MM_H
3 #define _LINUX_SCHED_MM_H
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/mm_types.h>
10 #include <linux/sync_core.h>
13 * Routines for handling mm_structs
15 extern struct mm_struct * mm_alloc(void);
18 * mmgrab() - Pin a &struct mm_struct.
19 * @mm: The &struct mm_struct to pin.
21 * Make sure that @mm will not get freed even after the owning task
22 * exits. This doesn't guarantee that the associated address space
23 * will still exist later on and mmget_not_zero() has to be used before
26 * This is a preferred way to to pin @mm for a longer/unbounded amount
29 * Use mmdrop() to release the reference acquired by mmgrab().
31 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
32 * of &mm_struct.mm_count vs &mm_struct.mm_users.
34 static inline void mmgrab(struct mm_struct *mm)
36 atomic_inc(&mm->mm_count);
39 /* mmdrop drops the mm and the page tables */
40 extern void __mmdrop(struct mm_struct *);
41 static inline void mmdrop(struct mm_struct *mm)
44 * The implicit full barrier implied by atomic_dec_and_test() is
45 * required by the membarrier system call before returning to
46 * user-space, after storing to rq->curr.
48 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
52 static inline void mmdrop_async_fn(struct work_struct *work)
54 struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
58 static inline void mmdrop_async(struct mm_struct *mm)
60 if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
61 INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
62 schedule_work(&mm->async_put_work);
67 * mmget() - Pin the address space associated with a &struct mm_struct.
68 * @mm: The address space to pin.
70 * Make sure that the address space of the given &struct mm_struct doesn't
71 * go away. This does not protect against parts of the address space being
72 * modified or freed, however.
74 * Never use this function to pin this address space for an
75 * unbounded/indefinite amount of time.
77 * Use mmput() to release the reference acquired by mmget().
79 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
80 * of &mm_struct.mm_count vs &mm_struct.mm_users.
82 static inline void mmget(struct mm_struct *mm)
84 atomic_inc(&mm->mm_users);
87 static inline bool mmget_not_zero(struct mm_struct *mm)
89 return atomic_inc_not_zero(&mm->mm_users);
92 /* mmput gets rid of the mappings and all user-space */
93 extern void mmput(struct mm_struct *);
95 /* same as above but performs the slow path from the async context. Can
96 * be called from the atomic context as well
98 void mmput_async(struct mm_struct *);
101 /* Grab a reference to a task's mm, if it is not already going away */
102 extern struct mm_struct *get_task_mm(struct task_struct *task);
104 * Grab a reference to a task's mm, if it is not already going away
105 * and ptrace_may_access with the mode parameter passed to it
108 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
109 /* Remove the current tasks stale references to the old mm_struct */
110 extern void mm_release(struct task_struct *, struct mm_struct *);
113 extern void mm_update_next_owner(struct mm_struct *mm);
115 static inline void mm_update_next_owner(struct mm_struct *mm)
118 #endif /* CONFIG_MEMCG */
121 extern void arch_pick_mmap_layout(struct mm_struct *mm);
123 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
124 unsigned long, unsigned long);
126 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
127 unsigned long len, unsigned long pgoff,
128 unsigned long flags);
130 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
133 static inline bool in_vfork(struct task_struct *tsk)
138 * need RCU to access ->real_parent if CLONE_VM was used along with
141 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
144 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
145 * ->real_parent is not necessarily the task doing vfork(), so in
146 * theory we can't rely on task_lock() if we want to dereference it.
148 * And in this case we can't trust the real_parent->mm == tsk->mm
149 * check, it can be false negative. But we do not care, if init or
150 * another oom-unkillable task does this it should blame itself.
153 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
160 * Applies per-task gfp context to the given allocation flags.
161 * PF_MEMALLOC_NOIO implies GFP_NOIO
162 * PF_MEMALLOC_NOFS implies GFP_NOFS
164 static inline gfp_t current_gfp_context(gfp_t flags)
167 * NOIO implies both NOIO and NOFS and it is a weaker context
168 * so always make sure it makes precendence
170 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
171 flags &= ~(__GFP_IO | __GFP_FS);
172 else if (unlikely(current->flags & PF_MEMALLOC_NOFS))
177 #ifdef CONFIG_LOCKDEP
178 extern void fs_reclaim_acquire(gfp_t gfp_mask);
179 extern void fs_reclaim_release(gfp_t gfp_mask);
181 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
182 static inline void fs_reclaim_release(gfp_t gfp_mask) { }
185 static inline unsigned int memalloc_noio_save(void)
187 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
188 current->flags |= PF_MEMALLOC_NOIO;
192 static inline void memalloc_noio_restore(unsigned int flags)
194 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
197 static inline unsigned int memalloc_nofs_save(void)
199 unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
200 current->flags |= PF_MEMALLOC_NOFS;
204 static inline void memalloc_nofs_restore(unsigned int flags)
206 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
209 static inline unsigned int memalloc_noreclaim_save(void)
211 unsigned int flags = current->flags & PF_MEMALLOC;
212 current->flags |= PF_MEMALLOC;
216 static inline void memalloc_noreclaim_restore(unsigned int flags)
218 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
221 #ifdef CONFIG_MEMBARRIER
223 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
224 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
225 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
226 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
227 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
228 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
232 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
235 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
236 #include <asm/membarrier.h>
239 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
241 if (likely(!(atomic_read(&mm->membarrier_state) &
242 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
244 sync_core_before_usermode();
247 static inline void membarrier_execve(struct task_struct *t)
249 atomic_set(&t->mm->membarrier_state, 0);
252 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
253 static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
254 struct mm_struct *next,
255 struct task_struct *tsk)
259 static inline void membarrier_execve(struct task_struct *t)
262 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
267 #endif /* _LINUX_SCHED_MM_H */