1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 1992, 1993 Krishna Balasubramanian
5 * Many improvements/fixes by Bruno Haible.
6 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
7 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
9 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
10 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
11 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
12 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
13 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
14 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
15 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
22 * Pavel Emelianov <xemul@openvz.org>
24 * Better ipc lock (kern_ipc_perm.lock) handling
25 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
28 #include <linux/slab.h>
30 #include <linux/hugetlb.h>
31 #include <linux/shm.h>
32 #include <linux/init.h>
33 #include <linux/file.h>
34 #include <linux/mman.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/security.h>
37 #include <linux/syscalls.h>
38 #include <linux/audit.h>
39 #include <linux/capability.h>
40 #include <linux/ptrace.h>
41 #include <linux/seq_file.h>
42 #include <linux/rwsem.h>
43 #include <linux/nsproxy.h>
44 #include <linux/mount.h>
45 #include <linux/ipc_namespace.h>
46 #include <linux/rhashtable.h>
48 #include <linux/uaccess.h>
52 struct shmid_kernel /* private to the kernel */
54 struct kern_ipc_perm shm_perm;
55 struct file *shm_file;
56 unsigned long shm_nattch;
57 unsigned long shm_segsz;
61 struct pid *shm_cprid;
62 struct pid *shm_lprid;
63 struct user_struct *mlock_user;
65 /* The task created the shm object. NULL if the task is dead. */
66 struct task_struct *shm_creator;
67 struct list_head shm_clist; /* list by creator */
70 /* shm_mode upper byte flags */
71 #define SHM_DEST 01000 /* segment will be destroyed on last detach */
72 #define SHM_LOCKED 02000 /* segment will not be swapped */
74 struct shm_file_data {
76 struct ipc_namespace *ns;
78 const struct vm_operations_struct *vm_ops;
81 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
83 static const struct file_operations shm_file_operations;
84 static const struct vm_operations_struct shm_vm_ops;
86 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
88 #define shm_unlock(shp) \
89 ipc_unlock(&(shp)->shm_perm)
91 static int newseg(struct ipc_namespace *, struct ipc_params *);
92 static void shm_open(struct vm_area_struct *vma);
93 static void shm_close(struct vm_area_struct *vma);
94 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
96 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
99 int shm_init_ns(struct ipc_namespace *ns)
101 ns->shm_ctlmax = SHMMAX;
102 ns->shm_ctlall = SHMALL;
103 ns->shm_ctlmni = SHMMNI;
104 ns->shm_rmid_forced = 0;
106 return ipc_init_ids(&shm_ids(ns));
110 * Called with shm_ids.rwsem (writer) and the shp structure locked.
111 * Only shm_ids.rwsem remains locked on exit.
113 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
115 struct shmid_kernel *shp;
117 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
119 if (shp->shm_nattch) {
120 shp->shm_perm.mode |= SHM_DEST;
121 /* Do not find it any more */
122 ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
125 shm_destroy(ns, shp);
129 void shm_exit_ns(struct ipc_namespace *ns)
131 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
132 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
133 rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
137 static int __init ipc_ns_init(void)
139 const int err = shm_init_ns(&init_ipc_ns);
140 WARN(err, "ipc: sysv shm_init_ns failed: %d\n", err);
144 pure_initcall(ipc_ns_init);
146 void __init shm_init(void)
148 ipc_init_proc_interface("sysvipc/shm",
149 #if BITS_PER_LONG <= 32
150 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
152 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
154 IPC_SHM_IDS, sysvipc_shm_proc_show);
157 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
159 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
162 return ERR_CAST(ipcp);
164 return container_of(ipcp, struct shmid_kernel, shm_perm);
167 static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
169 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
172 return ERR_CAST(ipcp);
174 return container_of(ipcp, struct shmid_kernel, shm_perm);
178 * shm_lock_(check_) routines are called in the paths where the rwsem
179 * is not necessarily held.
181 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
183 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
186 * Callers of shm_lock() must validate the status of the returned ipc
187 * object pointer (as returned by ipc_lock()), and error out as
192 return container_of(ipcp, struct shmid_kernel, shm_perm);
195 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
198 ipc_lock_object(&ipcp->shm_perm);
201 static void shm_rcu_free(struct rcu_head *head)
203 struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
205 struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
207 security_shm_free(&shp->shm_perm);
211 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
213 list_del(&s->shm_clist);
214 ipc_rmid(&shm_ids(ns), &s->shm_perm);
218 static int __shm_open(struct vm_area_struct *vma)
220 struct file *file = vma->vm_file;
221 struct shm_file_data *sfd = shm_file_data(file);
222 struct shmid_kernel *shp;
224 shp = shm_lock(sfd->ns, sfd->id);
229 if (shp->shm_file != sfd->file) {
235 shp->shm_atim = ktime_get_real_seconds();
236 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
242 /* This is called by fork, once for every shm attach. */
243 static void shm_open(struct vm_area_struct *vma)
245 int err = __shm_open(vma);
247 * We raced in the idr lookup or with shm_destroy().
248 * Either way, the ID is busted.
254 * shm_destroy - free the struct shmid_kernel
257 * @shp: struct to free
259 * It has to be called with shp and shm_ids.rwsem (writer) locked,
260 * but returns with shp unlocked and freed.
262 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
264 struct file *shm_file;
266 shm_file = shp->shm_file;
267 shp->shm_file = NULL;
268 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
271 if (!is_file_hugepages(shm_file))
272 shmem_lock(shm_file, 0, shp->mlock_user);
273 else if (shp->mlock_user)
274 user_shm_unlock(i_size_read(file_inode(shm_file)),
277 ipc_update_pid(&shp->shm_cprid, NULL);
278 ipc_update_pid(&shp->shm_lprid, NULL);
279 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
283 * shm_may_destroy - identifies whether shm segment should be destroyed now
285 * Returns true if and only if there are no active users of the segment and
286 * one of the following is true:
288 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
290 * 2) sysctl kernel.shm_rmid_forced is set to 1.
292 static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
294 return (shp->shm_nattch == 0) &&
295 (ns->shm_rmid_forced ||
296 (shp->shm_perm.mode & SHM_DEST));
300 * remove the attach descriptor vma.
301 * free memory for segment if it is marked destroyed.
302 * The descriptor has already been removed from the current->mm->mmap list
303 * and will later be kfree()d.
305 static void shm_close(struct vm_area_struct *vma)
307 struct file *file = vma->vm_file;
308 struct shm_file_data *sfd = shm_file_data(file);
309 struct shmid_kernel *shp;
310 struct ipc_namespace *ns = sfd->ns;
312 down_write(&shm_ids(ns).rwsem);
313 /* remove from the list of attaches of the shm segment */
314 shp = shm_lock(ns, sfd->id);
317 * We raced in the idr lookup or with shm_destroy().
318 * Either way, the ID is busted.
320 if (WARN_ON_ONCE(IS_ERR(shp)))
321 goto done; /* no-op */
323 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
324 shp->shm_dtim = ktime_get_real_seconds();
326 if (shm_may_destroy(ns, shp))
327 shm_destroy(ns, shp);
331 up_write(&shm_ids(ns).rwsem);
334 /* Called with ns->shm_ids(ns).rwsem locked */
335 static int shm_try_destroy_orphaned(int id, void *p, void *data)
337 struct ipc_namespace *ns = data;
338 struct kern_ipc_perm *ipcp = p;
339 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
342 * We want to destroy segments without users and with already
343 * exit'ed originating process.
345 * As shp->* are changed under rwsem, it's safe to skip shp locking.
347 if (shp->shm_creator != NULL)
350 if (shm_may_destroy(ns, shp)) {
351 shm_lock_by_ptr(shp);
352 shm_destroy(ns, shp);
357 void shm_destroy_orphaned(struct ipc_namespace *ns)
359 down_write(&shm_ids(ns).rwsem);
360 if (shm_ids(ns).in_use)
361 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
362 up_write(&shm_ids(ns).rwsem);
365 /* Locking assumes this will only be called with task == current */
366 void exit_shm(struct task_struct *task)
368 struct ipc_namespace *ns = task->nsproxy->ipc_ns;
369 struct shmid_kernel *shp, *n;
371 if (list_empty(&task->sysvshm.shm_clist))
375 * If kernel.shm_rmid_forced is not set then only keep track of
376 * which shmids are orphaned, so that a later set of the sysctl
379 if (!ns->shm_rmid_forced) {
380 down_read(&shm_ids(ns).rwsem);
381 list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
382 shp->shm_creator = NULL;
384 * Only under read lock but we are only called on current
385 * so no entry on the list will be shared.
387 list_del(&task->sysvshm.shm_clist);
388 up_read(&shm_ids(ns).rwsem);
393 * Destroy all already created segments, that were not yet mapped,
394 * and mark any mapped as orphan to cover the sysctl toggling.
395 * Destroy is skipped if shm_may_destroy() returns false.
397 down_write(&shm_ids(ns).rwsem);
398 list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
399 shp->shm_creator = NULL;
401 if (shm_may_destroy(ns, shp)) {
402 shm_lock_by_ptr(shp);
403 shm_destroy(ns, shp);
407 /* Remove the list head from any segments still attached. */
408 list_del(&task->sysvshm.shm_clist);
409 up_write(&shm_ids(ns).rwsem);
412 static vm_fault_t shm_fault(struct vm_fault *vmf)
414 struct file *file = vmf->vma->vm_file;
415 struct shm_file_data *sfd = shm_file_data(file);
417 return sfd->vm_ops->fault(vmf);
420 static int shm_split(struct vm_area_struct *vma, unsigned long addr)
422 struct file *file = vma->vm_file;
423 struct shm_file_data *sfd = shm_file_data(file);
425 if (sfd->vm_ops->split)
426 return sfd->vm_ops->split(vma, addr);
431 static unsigned long shm_pagesize(struct vm_area_struct *vma)
433 struct file *file = vma->vm_file;
434 struct shm_file_data *sfd = shm_file_data(file);
436 if (sfd->vm_ops->pagesize)
437 return sfd->vm_ops->pagesize(vma);
443 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
445 struct file *file = vma->vm_file;
446 struct shm_file_data *sfd = shm_file_data(file);
449 if (sfd->vm_ops->set_policy)
450 err = sfd->vm_ops->set_policy(vma, new);
454 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
457 struct file *file = vma->vm_file;
458 struct shm_file_data *sfd = shm_file_data(file);
459 struct mempolicy *pol = NULL;
461 if (sfd->vm_ops->get_policy)
462 pol = sfd->vm_ops->get_policy(vma, addr);
463 else if (vma->vm_policy)
464 pol = vma->vm_policy;
470 static int shm_mmap(struct file *file, struct vm_area_struct *vma)
472 struct shm_file_data *sfd = shm_file_data(file);
476 * In case of remap_file_pages() emulation, the file can represent an
477 * IPC ID that was removed, and possibly even reused by another shm
478 * segment already. Propagate this case as an error to caller.
480 ret = __shm_open(vma);
484 ret = call_mmap(sfd->file, vma);
489 sfd->vm_ops = vma->vm_ops;
491 WARN_ON(!sfd->vm_ops->fault);
493 vma->vm_ops = &shm_vm_ops;
497 static int shm_release(struct inode *ino, struct file *file)
499 struct shm_file_data *sfd = shm_file_data(file);
503 shm_file_data(file) = NULL;
508 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
510 struct shm_file_data *sfd = shm_file_data(file);
512 if (!sfd->file->f_op->fsync)
514 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
517 static long shm_fallocate(struct file *file, int mode, loff_t offset,
520 struct shm_file_data *sfd = shm_file_data(file);
522 if (!sfd->file->f_op->fallocate)
524 return sfd->file->f_op->fallocate(file, mode, offset, len);
527 static unsigned long shm_get_unmapped_area(struct file *file,
528 unsigned long addr, unsigned long len, unsigned long pgoff,
531 struct shm_file_data *sfd = shm_file_data(file);
533 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
537 static const struct file_operations shm_file_operations = {
540 .release = shm_release,
541 .get_unmapped_area = shm_get_unmapped_area,
542 .llseek = noop_llseek,
543 .fallocate = shm_fallocate,
547 * shm_file_operations_huge is now identical to shm_file_operations,
548 * but we keep it distinct for the sake of is_file_shm_hugepages().
550 static const struct file_operations shm_file_operations_huge = {
553 .release = shm_release,
554 .get_unmapped_area = shm_get_unmapped_area,
555 .llseek = noop_llseek,
556 .fallocate = shm_fallocate,
559 bool is_file_shm_hugepages(struct file *file)
561 return file->f_op == &shm_file_operations_huge;
564 static const struct vm_operations_struct shm_vm_ops = {
565 .open = shm_open, /* callback for a new vm-area open */
566 .close = shm_close, /* callback for when the vm-area is released */
569 .pagesize = shm_pagesize,
570 #if defined(CONFIG_NUMA)
571 .set_policy = shm_set_policy,
572 .get_policy = shm_get_policy,
577 * newseg - Create a new shared memory segment
579 * @params: ptr to the structure that contains key, size and shmflg
581 * Called with shm_ids.rwsem held as a writer.
583 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
585 key_t key = params->key;
586 int shmflg = params->flg;
587 size_t size = params->u.size;
589 struct shmid_kernel *shp;
590 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
593 vm_flags_t acctflag = 0;
595 if (size < SHMMIN || size > ns->shm_ctlmax)
598 if (numpages << PAGE_SHIFT < size)
601 if (ns->shm_tot + numpages < ns->shm_tot ||
602 ns->shm_tot + numpages > ns->shm_ctlall)
605 shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
609 shp->shm_perm.key = key;
610 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
611 shp->mlock_user = NULL;
613 shp->shm_perm.security = NULL;
614 error = security_shm_alloc(&shp->shm_perm);
620 sprintf(name, "SYSV%08x", key);
621 if (shmflg & SHM_HUGETLB) {
625 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
630 hugesize = ALIGN(size, huge_page_size(hs));
632 /* hugetlb_file_setup applies strict accounting */
633 if (shmflg & SHM_NORESERVE)
634 acctflag = VM_NORESERVE;
635 file = hugetlb_file_setup(name, hugesize, acctflag,
636 &shp->mlock_user, HUGETLB_SHMFS_INODE,
637 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
640 * Do not allow no accounting for OVERCOMMIT_NEVER, even
643 if ((shmflg & SHM_NORESERVE) &&
644 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
645 acctflag = VM_NORESERVE;
646 file = shmem_kernel_file_setup(name, size, acctflag);
648 error = PTR_ERR(file);
652 shp->shm_cprid = get_pid(task_tgid(current));
653 shp->shm_lprid = NULL;
654 shp->shm_atim = shp->shm_dtim = 0;
655 shp->shm_ctim = ktime_get_real_seconds();
656 shp->shm_segsz = size;
658 shp->shm_file = file;
659 shp->shm_creator = current;
661 /* ipc_addid() locks shp upon success. */
662 error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
666 list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
669 * shmid gets reported as "inode#" in /proc/pid/maps.
670 * proc-ps tools use this. Changing this will break them.
672 file_inode(file)->i_ino = shp->shm_perm.id;
674 ns->shm_tot += numpages;
675 error = shp->shm_perm.id;
677 ipc_unlock_object(&shp->shm_perm);
682 ipc_update_pid(&shp->shm_cprid, NULL);
683 ipc_update_pid(&shp->shm_lprid, NULL);
684 if (is_file_hugepages(file) && shp->mlock_user)
685 user_shm_unlock(size, shp->mlock_user);
688 call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
693 * Called with shm_ids.rwsem and ipcp locked.
695 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
696 struct ipc_params *params)
698 struct shmid_kernel *shp;
700 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
701 if (shp->shm_segsz < params->u.size)
707 long ksys_shmget(key_t key, size_t size, int shmflg)
709 struct ipc_namespace *ns;
710 static const struct ipc_ops shm_ops = {
712 .associate = security_shm_associate,
713 .more_checks = shm_more_checks,
715 struct ipc_params shm_params;
717 ns = current->nsproxy->ipc_ns;
719 shm_params.key = key;
720 shm_params.flg = shmflg;
721 shm_params.u.size = size;
723 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
726 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
728 return ksys_shmget(key, size, shmflg);
731 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
735 return copy_to_user(buf, in, sizeof(*in));
740 memset(&out, 0, sizeof(out));
741 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
742 out.shm_segsz = in->shm_segsz;
743 out.shm_atime = in->shm_atime;
744 out.shm_dtime = in->shm_dtime;
745 out.shm_ctime = in->shm_ctime;
746 out.shm_cpid = in->shm_cpid;
747 out.shm_lpid = in->shm_lpid;
748 out.shm_nattch = in->shm_nattch;
750 return copy_to_user(buf, &out, sizeof(out));
757 static inline unsigned long
758 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
762 if (copy_from_user(out, buf, sizeof(*out)))
767 struct shmid_ds tbuf_old;
769 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
772 out->shm_perm.uid = tbuf_old.shm_perm.uid;
773 out->shm_perm.gid = tbuf_old.shm_perm.gid;
774 out->shm_perm.mode = tbuf_old.shm_perm.mode;
783 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
787 return copy_to_user(buf, in, sizeof(*in));
792 if (in->shmmax > INT_MAX)
793 out.shmmax = INT_MAX;
795 out.shmmax = (int)in->shmmax;
797 out.shmmin = in->shmmin;
798 out.shmmni = in->shmmni;
799 out.shmseg = in->shmseg;
800 out.shmall = in->shmall;
802 return copy_to_user(buf, &out, sizeof(out));
810 * Calculate and add used RSS and swap pages of a shm.
811 * Called with shm_ids.rwsem held as a reader
813 static void shm_add_rss_swap(struct shmid_kernel *shp,
814 unsigned long *rss_add, unsigned long *swp_add)
818 inode = file_inode(shp->shm_file);
820 if (is_file_hugepages(shp->shm_file)) {
821 struct address_space *mapping = inode->i_mapping;
822 struct hstate *h = hstate_file(shp->shm_file);
823 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
826 struct shmem_inode_info *info = SHMEM_I(inode);
828 spin_lock_irq(&info->lock);
829 *rss_add += inode->i_mapping->nrpages;
830 *swp_add += info->swapped;
831 spin_unlock_irq(&info->lock);
833 *rss_add += inode->i_mapping->nrpages;
839 * Called with shm_ids.rwsem held as a reader
841 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
850 in_use = shm_ids(ns).in_use;
852 for (total = 0, next_id = 0; total < in_use; next_id++) {
853 struct kern_ipc_perm *ipc;
854 struct shmid_kernel *shp;
856 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
859 shp = container_of(ipc, struct shmid_kernel, shm_perm);
861 shm_add_rss_swap(shp, rss, swp);
868 * This function handles some shmctl commands which require the rwsem
869 * to be held in write mode.
870 * NOTE: no locks must be held, the rwsem is taken inside this function.
872 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
873 struct shmid64_ds *shmid64)
875 struct kern_ipc_perm *ipcp;
876 struct shmid_kernel *shp;
879 down_write(&shm_ids(ns).rwsem);
882 ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
883 &shmid64->shm_perm, 0);
889 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
891 err = security_shm_shmctl(&shp->shm_perm, cmd);
897 ipc_lock_object(&shp->shm_perm);
898 /* do_shm_rmid unlocks the ipc object and rcu */
899 do_shm_rmid(ns, ipcp);
902 ipc_lock_object(&shp->shm_perm);
903 err = ipc_update_perm(&shmid64->shm_perm, ipcp);
906 shp->shm_ctim = ktime_get_real_seconds();
914 ipc_unlock_object(&shp->shm_perm);
918 up_write(&shm_ids(ns).rwsem);
922 static int shmctl_ipc_info(struct ipc_namespace *ns,
923 struct shminfo64 *shminfo)
925 int err = security_shm_shmctl(NULL, IPC_INFO);
927 memset(shminfo, 0, sizeof(*shminfo));
928 shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni;
929 shminfo->shmmax = ns->shm_ctlmax;
930 shminfo->shmall = ns->shm_ctlall;
931 shminfo->shmmin = SHMMIN;
932 down_read(&shm_ids(ns).rwsem);
933 err = ipc_get_maxid(&shm_ids(ns));
934 up_read(&shm_ids(ns).rwsem);
941 static int shmctl_shm_info(struct ipc_namespace *ns,
942 struct shm_info *shm_info)
944 int err = security_shm_shmctl(NULL, SHM_INFO);
946 memset(shm_info, 0, sizeof(*shm_info));
947 down_read(&shm_ids(ns).rwsem);
948 shm_info->used_ids = shm_ids(ns).in_use;
949 shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp);
950 shm_info->shm_tot = ns->shm_tot;
951 shm_info->swap_attempts = 0;
952 shm_info->swap_successes = 0;
953 err = ipc_get_maxid(&shm_ids(ns));
954 up_read(&shm_ids(ns).rwsem);
961 static int shmctl_stat(struct ipc_namespace *ns, int shmid,
962 int cmd, struct shmid64_ds *tbuf)
964 struct shmid_kernel *shp;
967 memset(tbuf, 0, sizeof(*tbuf));
970 if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) {
971 shp = shm_obtain_object(ns, shmid);
976 } else { /* IPC_STAT */
977 shp = shm_obtain_object_check(ns, shmid);
985 * Semantically SHM_STAT_ANY ought to be identical to
986 * that functionality provided by the /proc/sysvipc/
987 * interface. As such, only audit these calls and
988 * do not do traditional S_IRUGO permission checks on
991 if (cmd == SHM_STAT_ANY)
992 audit_ipc_obj(&shp->shm_perm);
995 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
999 err = security_shm_shmctl(&shp->shm_perm, cmd);
1003 ipc_lock_object(&shp->shm_perm);
1005 if (!ipc_valid_object(&shp->shm_perm)) {
1006 ipc_unlock_object(&shp->shm_perm);
1011 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
1012 tbuf->shm_segsz = shp->shm_segsz;
1013 tbuf->shm_atime = shp->shm_atim;
1014 tbuf->shm_dtime = shp->shm_dtim;
1015 tbuf->shm_ctime = shp->shm_ctim;
1016 #ifndef CONFIG_64BIT
1017 tbuf->shm_atime_high = shp->shm_atim >> 32;
1018 tbuf->shm_dtime_high = shp->shm_dtim >> 32;
1019 tbuf->shm_ctime_high = shp->shm_ctim >> 32;
1021 tbuf->shm_cpid = pid_vnr(shp->shm_cprid);
1022 tbuf->shm_lpid = pid_vnr(shp->shm_lprid);
1023 tbuf->shm_nattch = shp->shm_nattch;
1025 if (cmd == IPC_STAT) {
1027 * As defined in SUS:
1028 * Return 0 on success
1033 * SHM_STAT and SHM_STAT_ANY (both Linux specific)
1034 * Return the full id, including the sequence number
1036 err = shp->shm_perm.id;
1039 ipc_unlock_object(&shp->shm_perm);
1045 static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
1047 struct shmid_kernel *shp;
1048 struct file *shm_file;
1052 shp = shm_obtain_object_check(ns, shmid);
1058 audit_ipc_obj(&(shp->shm_perm));
1059 err = security_shm_shmctl(&shp->shm_perm, cmd);
1063 ipc_lock_object(&shp->shm_perm);
1065 /* check if shm_destroy() is tearing down shp */
1066 if (!ipc_valid_object(&shp->shm_perm)) {
1071 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1072 kuid_t euid = current_euid();
1074 if (!uid_eq(euid, shp->shm_perm.uid) &&
1075 !uid_eq(euid, shp->shm_perm.cuid)) {
1079 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1085 shm_file = shp->shm_file;
1086 if (is_file_hugepages(shm_file))
1089 if (cmd == SHM_LOCK) {
1090 struct user_struct *user = current_user();
1092 err = shmem_lock(shm_file, 1, user);
1093 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1094 shp->shm_perm.mode |= SHM_LOCKED;
1095 shp->mlock_user = user;
1101 if (!(shp->shm_perm.mode & SHM_LOCKED))
1103 shmem_lock(shm_file, 0, shp->mlock_user);
1104 shp->shm_perm.mode &= ~SHM_LOCKED;
1105 shp->mlock_user = NULL;
1107 ipc_unlock_object(&shp->shm_perm);
1109 shmem_unlock_mapping(shm_file->f_mapping);
1115 ipc_unlock_object(&shp->shm_perm);
1121 long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
1124 struct ipc_namespace *ns;
1125 struct shmid64_ds sem64;
1127 if (cmd < 0 || shmid < 0)
1130 version = ipc_parse_version(&cmd);
1131 ns = current->nsproxy->ipc_ns;
1135 struct shminfo64 shminfo;
1136 err = shmctl_ipc_info(ns, &shminfo);
1139 if (copy_shminfo_to_user(buf, &shminfo, version))
1144 struct shm_info shm_info;
1145 err = shmctl_shm_info(ns, &shm_info);
1148 if (copy_to_user(buf, &shm_info, sizeof(shm_info)))
1155 err = shmctl_stat(ns, shmid, cmd, &sem64);
1158 if (copy_shmid_to_user(buf, &sem64, version))
1163 if (copy_shmid_from_user(&sem64, buf, version))
1167 return shmctl_down(ns, shmid, cmd, &sem64);
1170 return shmctl_do_lock(ns, shmid, cmd);
1176 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1178 return ksys_shmctl(shmid, cmd, buf);
1181 #ifdef CONFIG_COMPAT
1183 struct compat_shmid_ds {
1184 struct compat_ipc_perm shm_perm;
1186 compat_time_t shm_atime;
1187 compat_time_t shm_dtime;
1188 compat_time_t shm_ctime;
1189 compat_ipc_pid_t shm_cpid;
1190 compat_ipc_pid_t shm_lpid;
1191 unsigned short shm_nattch;
1192 unsigned short shm_unused;
1193 compat_uptr_t shm_unused2;
1194 compat_uptr_t shm_unused3;
1197 struct compat_shminfo64 {
1198 compat_ulong_t shmmax;
1199 compat_ulong_t shmmin;
1200 compat_ulong_t shmmni;
1201 compat_ulong_t shmseg;
1202 compat_ulong_t shmall;
1203 compat_ulong_t __unused1;
1204 compat_ulong_t __unused2;
1205 compat_ulong_t __unused3;
1206 compat_ulong_t __unused4;
1209 struct compat_shm_info {
1210 compat_int_t used_ids;
1211 compat_ulong_t shm_tot, shm_rss, shm_swp;
1212 compat_ulong_t swap_attempts, swap_successes;
1215 static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in,
1218 if (in->shmmax > INT_MAX)
1219 in->shmmax = INT_MAX;
1220 if (version == IPC_64) {
1221 struct compat_shminfo64 info;
1222 memset(&info, 0, sizeof(info));
1223 info.shmmax = in->shmmax;
1224 info.shmmin = in->shmmin;
1225 info.shmmni = in->shmmni;
1226 info.shmseg = in->shmseg;
1227 info.shmall = in->shmall;
1228 return copy_to_user(buf, &info, sizeof(info));
1230 struct shminfo info;
1231 memset(&info, 0, sizeof(info));
1232 info.shmmax = in->shmmax;
1233 info.shmmin = in->shmmin;
1234 info.shmmni = in->shmmni;
1235 info.shmseg = in->shmseg;
1236 info.shmall = in->shmall;
1237 return copy_to_user(buf, &info, sizeof(info));
1241 static int put_compat_shm_info(struct shm_info *ip,
1242 struct compat_shm_info __user *uip)
1244 struct compat_shm_info info;
1246 memset(&info, 0, sizeof(info));
1247 info.used_ids = ip->used_ids;
1248 info.shm_tot = ip->shm_tot;
1249 info.shm_rss = ip->shm_rss;
1250 info.shm_swp = ip->shm_swp;
1251 info.swap_attempts = ip->swap_attempts;
1252 info.swap_successes = ip->swap_successes;
1253 return copy_to_user(uip, &info, sizeof(info));
1256 static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
1259 if (version == IPC_64) {
1260 struct compat_shmid64_ds v;
1261 memset(&v, 0, sizeof(v));
1262 to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm);
1263 v.shm_atime = lower_32_bits(in->shm_atime);
1264 v.shm_atime_high = upper_32_bits(in->shm_atime);
1265 v.shm_dtime = lower_32_bits(in->shm_dtime);
1266 v.shm_dtime_high = upper_32_bits(in->shm_dtime);
1267 v.shm_ctime = lower_32_bits(in->shm_ctime);
1268 v.shm_ctime_high = upper_32_bits(in->shm_ctime);
1269 v.shm_segsz = in->shm_segsz;
1270 v.shm_nattch = in->shm_nattch;
1271 v.shm_cpid = in->shm_cpid;
1272 v.shm_lpid = in->shm_lpid;
1273 return copy_to_user(buf, &v, sizeof(v));
1275 struct compat_shmid_ds v;
1276 memset(&v, 0, sizeof(v));
1277 to_compat_ipc_perm(&v.shm_perm, &in->shm_perm);
1278 v.shm_perm.key = in->shm_perm.key;
1279 v.shm_atime = in->shm_atime;
1280 v.shm_dtime = in->shm_dtime;
1281 v.shm_ctime = in->shm_ctime;
1282 v.shm_segsz = in->shm_segsz;
1283 v.shm_nattch = in->shm_nattch;
1284 v.shm_cpid = in->shm_cpid;
1285 v.shm_lpid = in->shm_lpid;
1286 return copy_to_user(buf, &v, sizeof(v));
1290 static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
1293 memset(out, 0, sizeof(*out));
1294 if (version == IPC_64) {
1295 struct compat_shmid64_ds __user *p = buf;
1296 return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm);
1298 struct compat_shmid_ds __user *p = buf;
1299 return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm);
1303 long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr)
1305 struct ipc_namespace *ns;
1306 struct shmid64_ds sem64;
1307 int version = compat_ipc_parse_version(&cmd);
1310 ns = current->nsproxy->ipc_ns;
1312 if (cmd < 0 || shmid < 0)
1317 struct shminfo64 shminfo;
1318 err = shmctl_ipc_info(ns, &shminfo);
1321 if (copy_compat_shminfo_to_user(uptr, &shminfo, version))
1326 struct shm_info shm_info;
1327 err = shmctl_shm_info(ns, &shm_info);
1330 if (put_compat_shm_info(&shm_info, uptr))
1337 err = shmctl_stat(ns, shmid, cmd, &sem64);
1340 if (copy_compat_shmid_to_user(uptr, &sem64, version))
1345 if (copy_compat_shmid_from_user(&sem64, uptr, version))
1349 return shmctl_down(ns, shmid, cmd, &sem64);
1352 return shmctl_do_lock(ns, shmid, cmd);
1360 COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
1362 return compat_ksys_shmctl(shmid, cmd, uptr);
1367 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1369 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1370 * "raddr" thing points to kernel space, and there has to be a wrapper around
1373 long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1374 ulong *raddr, unsigned long shmlba)
1376 struct shmid_kernel *shp;
1377 unsigned long addr = (unsigned long)shmaddr;
1379 struct file *file, *base;
1381 unsigned long flags = MAP_SHARED;
1384 struct ipc_namespace *ns;
1385 struct shm_file_data *sfd;
1387 unsigned long populate = 0;
1394 if (addr & (shmlba - 1)) {
1395 if (shmflg & SHM_RND) {
1396 addr &= ~(shmlba - 1); /* round down */
1399 * Ensure that the round-down is non-nil
1400 * when remapping. This can happen for
1401 * cases when addr < shmlba.
1403 if (!addr && (shmflg & SHM_REMAP))
1406 #ifndef __ARCH_FORCE_SHMLBA
1407 if (addr & ~PAGE_MASK)
1413 } else if ((shmflg & SHM_REMAP))
1416 if (shmflg & SHM_RDONLY) {
1421 prot = PROT_READ | PROT_WRITE;
1422 acc_mode = S_IRUGO | S_IWUGO;
1425 if (shmflg & SHM_EXEC) {
1427 acc_mode |= S_IXUGO;
1431 * We cannot rely on the fs check since SYSV IPC does have an
1432 * additional creator id...
1434 ns = current->nsproxy->ipc_ns;
1436 shp = shm_obtain_object_check(ns, shmid);
1443 if (ipcperms(ns, &shp->shm_perm, acc_mode))
1446 err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg);
1450 ipc_lock_object(&shp->shm_perm);
1452 /* check if shm_destroy() is tearing down shp */
1453 if (!ipc_valid_object(&shp->shm_perm)) {
1454 ipc_unlock_object(&shp->shm_perm);
1460 * We need to take a reference to the real shm file to prevent the
1461 * pointer from becoming stale in cases where the lifetime of the outer
1462 * file extends beyond that of the shm segment. It's not usually
1463 * possible, but it can happen during remap_file_pages() emulation as
1464 * that unmaps the memory, then does ->mmap() via file reference only.
1465 * We'll deny the ->mmap() if the shm segment was since removed, but to
1466 * detect shm ID reuse we need to compare the file pointers.
1468 base = get_file(shp->shm_file);
1470 size = i_size_read(file_inode(base));
1471 ipc_unlock_object(&shp->shm_perm);
1475 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1481 file = alloc_file_clone(base, f_flags,
1482 is_file_hugepages(base) ?
1483 &shm_file_operations_huge :
1484 &shm_file_operations);
1485 err = PTR_ERR(file);
1492 sfd->id = shp->shm_perm.id;
1493 sfd->ns = get_ipc_ns(ns);
1496 file->private_data = sfd;
1498 err = security_mmap_file(file, prot, flags);
1502 if (down_write_killable(¤t->mm->mmap_sem)) {
1507 if (addr && !(shmflg & SHM_REMAP)) {
1509 if (addr + size < addr)
1512 if (find_vma_intersection(current->mm, addr, addr + size))
1516 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL);
1519 if (IS_ERR_VALUE(addr))
1522 up_write(¤t->mm->mmap_sem);
1524 mm_populate(addr, populate);
1530 down_write(&shm_ids(ns).rwsem);
1531 shp = shm_lock(ns, shmid);
1533 if (shm_may_destroy(ns, shp))
1534 shm_destroy(ns, shp);
1537 up_write(&shm_ids(ns).rwsem);
1546 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1551 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1554 force_successful_syscall_return();
1558 #ifdef CONFIG_COMPAT
1560 #ifndef COMPAT_SHMLBA
1561 #define COMPAT_SHMLBA SHMLBA
1564 COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg)
1569 err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA);
1572 force_successful_syscall_return();
1578 * detach and kill segment if marked destroyed.
1579 * The work is done in shm_close.
1581 long ksys_shmdt(char __user *shmaddr)
1583 struct mm_struct *mm = current->mm;
1584 struct vm_area_struct *vma;
1585 unsigned long addr = (unsigned long)shmaddr;
1586 int retval = -EINVAL;
1590 struct vm_area_struct *next;
1593 if (addr & ~PAGE_MASK)
1596 if (down_write_killable(&mm->mmap_sem))
1600 * This function tries to be smart and unmap shm segments that
1601 * were modified by partial mlock or munmap calls:
1602 * - It first determines the size of the shm segment that should be
1603 * unmapped: It searches for a vma that is backed by shm and that
1604 * started at address shmaddr. It records it's size and then unmaps
1606 * - Then it unmaps all shm vmas that started at shmaddr and that
1607 * are within the initially determined size and that are from the
1608 * same shm segment from which we determined the size.
1609 * Errors from do_munmap are ignored: the function only fails if
1610 * it's called with invalid parameters or if it's called to unmap
1611 * a part of a vma. Both calls in this function are for full vmas,
1612 * the parameters are directly copied from the vma itself and always
1613 * valid - therefore do_munmap cannot fail. (famous last words?)
1616 * If it had been mremap()'d, the starting address would not
1617 * match the usual checks anyway. So assume all vma's are
1618 * above the starting address given.
1620 vma = find_vma(mm, addr);
1624 next = vma->vm_next;
1627 * Check if the starting address would match, i.e. it's
1628 * a fragment created by mprotect() and/or munmap(), or it
1629 * otherwise it starts at this address with no hassles.
1631 if ((vma->vm_ops == &shm_vm_ops) &&
1632 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1635 * Record the file of the shm segment being
1636 * unmapped. With mremap(), someone could place
1637 * page from another segment but with equal offsets
1638 * in the range we are unmapping.
1640 file = vma->vm_file;
1641 size = i_size_read(file_inode(vma->vm_file));
1642 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1644 * We discovered the size of the shm segment, so
1645 * break out of here and fall through to the next
1646 * loop that uses the size information to stop
1647 * searching for matching vma's.
1657 * We need look no further than the maximum address a fragment
1658 * could possibly have landed at. Also cast things to loff_t to
1659 * prevent overflows and make comparisons vs. equal-width types.
1661 size = PAGE_ALIGN(size);
1662 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1663 next = vma->vm_next;
1665 /* finding a matching vma now does not alter retval */
1666 if ((vma->vm_ops == &shm_vm_ops) &&
1667 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1668 (vma->vm_file == file))
1669 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1673 #else /* CONFIG_MMU */
1674 /* under NOMMU conditions, the exact address to be destroyed must be
1677 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1678 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1684 up_write(&mm->mmap_sem);
1688 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1690 return ksys_shmdt(shmaddr);
1693 #ifdef CONFIG_PROC_FS
1694 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1696 struct pid_namespace *pid_ns = ipc_seq_pid_ns(s);
1697 struct user_namespace *user_ns = seq_user_ns(s);
1698 struct kern_ipc_perm *ipcp = it;
1699 struct shmid_kernel *shp;
1700 unsigned long rss = 0, swp = 0;
1702 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1703 shm_add_rss_swap(shp, &rss, &swp);
1705 #if BITS_PER_LONG <= 32
1706 #define SIZE_SPEC "%10lu"
1708 #define SIZE_SPEC "%21lu"
1712 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1713 "%5lu %5u %5u %5u %5u %10llu %10llu %10llu "
1714 SIZE_SPEC " " SIZE_SPEC "\n",
1719 pid_nr_ns(shp->shm_cprid, pid_ns),
1720 pid_nr_ns(shp->shm_lprid, pid_ns),
1722 from_kuid_munged(user_ns, shp->shm_perm.uid),
1723 from_kgid_munged(user_ns, shp->shm_perm.gid),
1724 from_kuid_munged(user_ns, shp->shm_perm.cuid),
1725 from_kgid_munged(user_ns, shp->shm_perm.cgid),