1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_MM_H
3 #define _LINUX_SCHED_MM_H
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/mm_types.h>
10 #include <linux/sync_core.h>
13 * Routines for handling mm_structs
15 extern struct mm_struct *mm_alloc(void);
18 * mmgrab() - Pin a &struct mm_struct.
19 * @mm: The &struct mm_struct to pin.
21 * Make sure that @mm will not get freed even after the owning task
22 * exits. This doesn't guarantee that the associated address space
23 * will still exist later on and mmget_not_zero() has to be used before
26 * This is a preferred way to to pin @mm for a longer/unbounded amount
29 * Use mmdrop() to release the reference acquired by mmgrab().
31 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
32 * of &mm_struct.mm_count vs &mm_struct.mm_users.
34 static inline void mmgrab(struct mm_struct *mm)
36 atomic_inc(&mm->mm_count);
39 extern void __mmdrop(struct mm_struct *mm);
41 static inline void mmdrop(struct mm_struct *mm)
44 * The implicit full barrier implied by atomic_dec_and_test() is
45 * required by the membarrier system call before returning to
46 * user-space, after storing to rq->curr.
48 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
53 * mmget() - Pin the address space associated with a &struct mm_struct.
54 * @mm: The address space to pin.
56 * Make sure that the address space of the given &struct mm_struct doesn't
57 * go away. This does not protect against parts of the address space being
58 * modified or freed, however.
60 * Never use this function to pin this address space for an
61 * unbounded/indefinite amount of time.
63 * Use mmput() to release the reference acquired by mmget().
65 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
66 * of &mm_struct.mm_count vs &mm_struct.mm_users.
68 static inline void mmget(struct mm_struct *mm)
70 atomic_inc(&mm->mm_users);
73 static inline bool mmget_not_zero(struct mm_struct *mm)
75 return atomic_inc_not_zero(&mm->mm_users);
78 /* mmput gets rid of the mappings and all user-space */
79 extern void mmput(struct mm_struct *);
81 /* same as above but performs the slow path from the async context. Can
82 * be called from the atomic context as well
84 void mmput_async(struct mm_struct *);
87 /* Grab a reference to a task's mm, if it is not already going away */
88 extern struct mm_struct *get_task_mm(struct task_struct *task);
90 * Grab a reference to a task's mm, if it is not already going away
91 * and ptrace_may_access with the mode parameter passed to it
94 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
95 /* Remove the current tasks stale references to the old mm_struct */
96 extern void mm_release(struct task_struct *, struct mm_struct *);
99 extern void mm_update_next_owner(struct mm_struct *mm);
101 static inline void mm_update_next_owner(struct mm_struct *mm)
104 #endif /* CONFIG_MEMCG */
107 extern void arch_pick_mmap_layout(struct mm_struct *mm,
108 struct rlimit *rlim_stack);
110 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
111 unsigned long, unsigned long);
113 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
114 unsigned long len, unsigned long pgoff,
115 unsigned long flags);
117 static inline void arch_pick_mmap_layout(struct mm_struct *mm,
118 struct rlimit *rlim_stack) {}
121 static inline bool in_vfork(struct task_struct *tsk)
126 * need RCU to access ->real_parent if CLONE_VM was used along with
129 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
132 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
133 * ->real_parent is not necessarily the task doing vfork(), so in
134 * theory we can't rely on task_lock() if we want to dereference it.
136 * And in this case we can't trust the real_parent->mm == tsk->mm
137 * check, it can be false negative. But we do not care, if init or
138 * another oom-unkillable task does this it should blame itself.
141 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
148 * Applies per-task gfp context to the given allocation flags.
149 * PF_MEMALLOC_NOIO implies GFP_NOIO
150 * PF_MEMALLOC_NOFS implies GFP_NOFS
151 * PF_MEMALLOC_NOCMA implies no allocation from CMA region.
153 static inline gfp_t current_gfp_context(gfp_t flags)
155 if (unlikely(current->flags &
156 (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_NOCMA))) {
158 * NOIO implies both NOIO and NOFS and it is a weaker context
159 * so always make sure it makes precedence
161 if (current->flags & PF_MEMALLOC_NOIO)
162 flags &= ~(__GFP_IO | __GFP_FS);
163 else if (current->flags & PF_MEMALLOC_NOFS)
166 if (current->flags & PF_MEMALLOC_NOCMA)
167 flags &= ~__GFP_MOVABLE;
173 #ifdef CONFIG_LOCKDEP
174 extern void __fs_reclaim_acquire(void);
175 extern void __fs_reclaim_release(void);
176 extern void fs_reclaim_acquire(gfp_t gfp_mask);
177 extern void fs_reclaim_release(gfp_t gfp_mask);
179 static inline void __fs_reclaim_acquire(void) { }
180 static inline void __fs_reclaim_release(void) { }
181 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
182 static inline void fs_reclaim_release(gfp_t gfp_mask) { }
186 * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
188 * This functions marks the beginning of the GFP_NOIO allocation scope.
189 * All further allocations will implicitly drop __GFP_IO flag and so
190 * they are safe for the IO critical section from the allocation recursion
191 * point of view. Use memalloc_noio_restore to end the scope with flags
192 * returned by this function.
194 * This function is safe to be used from any context.
196 static inline unsigned int memalloc_noio_save(void)
198 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
199 current->flags |= PF_MEMALLOC_NOIO;
204 * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
205 * @flags: Flags to restore.
207 * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
208 * Always make sure that that the given flags is the return value from the
209 * pairing memalloc_noio_save call.
211 static inline void memalloc_noio_restore(unsigned int flags)
213 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
217 * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
219 * This functions marks the beginning of the GFP_NOFS allocation scope.
220 * All further allocations will implicitly drop __GFP_FS flag and so
221 * they are safe for the FS critical section from the allocation recursion
222 * point of view. Use memalloc_nofs_restore to end the scope with flags
223 * returned by this function.
225 * This function is safe to be used from any context.
227 static inline unsigned int memalloc_nofs_save(void)
229 unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
230 current->flags |= PF_MEMALLOC_NOFS;
235 * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
236 * @flags: Flags to restore.
238 * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
239 * Always make sure that that the given flags is the return value from the
240 * pairing memalloc_nofs_save call.
242 static inline void memalloc_nofs_restore(unsigned int flags)
244 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
247 static inline unsigned int memalloc_noreclaim_save(void)
249 unsigned int flags = current->flags & PF_MEMALLOC;
250 current->flags |= PF_MEMALLOC;
254 static inline void memalloc_noreclaim_restore(unsigned int flags)
256 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
260 static inline unsigned int memalloc_nocma_save(void)
262 unsigned int flags = current->flags & PF_MEMALLOC_NOCMA;
264 current->flags |= PF_MEMALLOC_NOCMA;
268 static inline void memalloc_nocma_restore(unsigned int flags)
270 current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags;
273 static inline unsigned int memalloc_nocma_save(void)
278 static inline void memalloc_nocma_restore(unsigned int flags)
285 * memalloc_use_memcg - Starts the remote memcg charging scope.
286 * @memcg: memcg to charge.
288 * This function marks the beginning of the remote memcg charging scope. All the
289 * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
292 * NOTE: This function is not nesting safe.
294 static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
296 WARN_ON_ONCE(current->active_memcg);
297 current->active_memcg = memcg;
301 * memalloc_unuse_memcg - Ends the remote memcg charging scope.
303 * This function marks the end of the remote memcg charging scope started by
304 * memalloc_use_memcg().
306 static inline void memalloc_unuse_memcg(void)
308 current->active_memcg = NULL;
311 static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
315 static inline void memalloc_unuse_memcg(void)
320 #ifdef CONFIG_MEMBARRIER
322 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
323 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
324 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
325 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
326 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
327 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
331 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
334 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
335 #include <asm/membarrier.h>
338 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
340 if (likely(!(atomic_read(&mm->membarrier_state) &
341 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
343 sync_core_before_usermode();
346 static inline void membarrier_execve(struct task_struct *t)
348 atomic_set(&t->mm->membarrier_state, 0);
351 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
352 static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
353 struct mm_struct *next,
354 struct task_struct *tsk)
358 static inline void membarrier_execve(struct task_struct *t)
361 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
366 #endif /* _LINUX_SCHED_MM_H */