3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
8 * SMP-threaded, sysctl's added
9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
10 * Enforced range limit on SEM_UNDO
11 * (c) 2001 Red Hat Inc
13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
14 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
15 * Further wakeup optimizations, documentation
16 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
18 * support for audit of ipc object properties and permission changes
19 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
23 * Pavel Emelianov <xemul@openvz.org>
25 * Implementation notes: (May 2010)
26 * This file implements System V semaphores.
28 * User space visible behavior:
29 * - FIFO ordering for semop() operations (just FIFO, not starvation
31 * - multiple semaphore operations that alter the same semaphore in
32 * one semop() are handled.
33 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
35 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
36 * - undo adjustments at process exit are limited to 0..SEMVMX.
37 * - namespace are supported.
38 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
39 * to /proc/sys/kernel/sem.
40 * - statistics about the usage are reported in /proc/sysvipc/sem.
44 * - all global variables are read-mostly.
45 * - semop() calls and semctl(RMID) are synchronized by RCU.
46 * - most operations do write operations (actually: spin_lock calls) to
47 * the per-semaphore array structure.
48 * Thus: Perfect SMP scaling between independent semaphore arrays.
49 * If multiple semaphores in one array are used, then cache line
50 * trashing on the semaphore array spinlock will limit the scaling.
51 * - semncnt and semzcnt are calculated on demand in count_semcnt()
52 * - the task that performs a successful semop() scans the list of all
53 * sleeping tasks and completes any pending operations that can be fulfilled.
54 * Semaphores are actively given to waiting tasks (necessary for FIFO).
55 * (see update_queue())
56 * - To improve the scalability, the actual wake-up calls are performed after
57 * dropping all locks. (see wake_up_sem_queue_prepare())
58 * - All work is done by the waker, the woken up task does not have to do
59 * anything - not even acquiring a lock or dropping a refcount.
60 * - A woken up task may not even touch the semaphore array anymore, it may
61 * have been destroyed already by a semctl(RMID).
62 * - UNDO values are stored in an array (one per process and per
63 * semaphore array, lazily allocated). For backwards compatibility, multiple
64 * modes for the UNDO variables are supported (per process, per thread)
65 * (see copy_semundo, CLONE_SYSVSEM)
66 * - There are two lists of the pending operations: a per-array list
67 * and per-semaphore list (stored in the array). This allows to achieve FIFO
68 * ordering without always scanning all pending operations.
69 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
72 #include <linux/slab.h>
73 #include <linux/spinlock.h>
74 #include <linux/init.h>
75 #include <linux/proc_fs.h>
76 #include <linux/time.h>
77 #include <linux/security.h>
78 #include <linux/syscalls.h>
79 #include <linux/audit.h>
80 #include <linux/capability.h>
81 #include <linux/seq_file.h>
82 #include <linux/rwsem.h>
83 #include <linux/nsproxy.h>
84 #include <linux/ipc_namespace.h>
85 #include <linux/sched/wake_q.h>
87 #include <linux/uaccess.h>
91 /* One queue for each sleeping process in the system. */
93 struct list_head list; /* queue of pending operations */
94 struct task_struct *sleeper; /* this process */
95 struct sem_undo *undo; /* undo structure */
96 int pid; /* process id of requesting process */
97 int status; /* completion status of operation */
98 struct sembuf *sops; /* array of pending operations */
99 struct sembuf *blocking; /* the operation that blocked */
100 int nsops; /* number of operations */
101 bool alter; /* does *sops alter the array? */
102 bool dupsop; /* sops on more than one sem_num */
105 /* Each task has a list of undo requests. They are executed automatically
106 * when the process exits.
109 struct list_head list_proc; /* per-process list: *
110 * all undos from one process
112 struct rcu_head rcu; /* rcu struct for sem_undo */
113 struct sem_undo_list *ulp; /* back ptr to sem_undo_list */
114 struct list_head list_id; /* per semaphore array list:
115 * all undos for one array */
116 int semid; /* semaphore set identifier */
117 short *semadj; /* array of adjustments */
118 /* one per semaphore */
121 /* sem_undo_list controls shared access to the list of sem_undo structures
122 * that may be shared among all a CLONE_SYSVSEM task group.
124 struct sem_undo_list {
127 struct list_head list_proc;
131 #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
133 #define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
135 static int newary(struct ipc_namespace *, struct ipc_params *);
136 static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
137 #ifdef CONFIG_PROC_FS
138 static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
141 #define SEMMSL_FAST 256 /* 512 bytes on stack */
142 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
145 * Switching from the mode suitable for simple ops
146 * to the mode for complex ops is costly. Therefore:
147 * use some hysteresis
149 #define USE_GLOBAL_LOCK_HYSTERESIS 10
153 * a) global sem_lock() for read/write
155 * sem_array.complex_count,
156 * sem_array.pending{_alter,_const},
159 * b) global or semaphore sem_lock() for read/write:
160 * sem_array.sems[i].pending_{const,alter}:
163 * sem_undo_list.list_proc:
164 * * undo_list->lock for write
167 * * global sem_lock() for write
168 * * either local or global sem_lock() for read.
171 * Most ordering is enforced by using spin_lock() and spin_unlock().
172 * The special case is use_global_lock:
173 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
174 * using smp_store_release().
175 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
176 * smp_load_acquire().
177 * Setting it from 0 to non-zero must be ordered with regards to
178 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
179 * is inside a spin_lock() and after a write from 0 to non-zero a
180 * spin_lock()+spin_unlock() is done.
183 #define sc_semmsl sem_ctls[0]
184 #define sc_semmns sem_ctls[1]
185 #define sc_semopm sem_ctls[2]
186 #define sc_semmni sem_ctls[3]
188 void sem_init_ns(struct ipc_namespace *ns)
190 ns->sc_semmsl = SEMMSL;
191 ns->sc_semmns = SEMMNS;
192 ns->sc_semopm = SEMOPM;
193 ns->sc_semmni = SEMMNI;
195 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
199 void sem_exit_ns(struct ipc_namespace *ns)
201 free_ipcs(ns, &sem_ids(ns), freeary);
202 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
206 void __init sem_init(void)
208 sem_init_ns(&init_ipc_ns);
209 ipc_init_proc_interface("sysvipc/sem",
210 " key semid perms nsems uid gid cuid cgid otime ctime\n",
211 IPC_SEM_IDS, sysvipc_sem_proc_show);
215 * unmerge_queues - unmerge queues, if possible.
216 * @sma: semaphore array
218 * The function unmerges the wait queues if complex_count is 0.
219 * It must be called prior to dropping the global semaphore array lock.
221 static void unmerge_queues(struct sem_array *sma)
223 struct sem_queue *q, *tq;
225 /* complex operations still around? */
226 if (sma->complex_count)
229 * We will switch back to simple mode.
230 * Move all pending operation back into the per-semaphore
233 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
235 curr = &sma->sems[q->sops[0].sem_num];
237 list_add_tail(&q->list, &curr->pending_alter);
239 INIT_LIST_HEAD(&sma->pending_alter);
243 * merge_queues - merge single semop queues into global queue
244 * @sma: semaphore array
246 * This function merges all per-semaphore queues into the global queue.
247 * It is necessary to achieve FIFO ordering for the pending single-sop
248 * operations when a multi-semop operation must sleep.
249 * Only the alter operations must be moved, the const operations can stay.
251 static void merge_queues(struct sem_array *sma)
254 for (i = 0; i < sma->sem_nsems; i++) {
255 struct sem *sem = &sma->sems[i];
257 list_splice_init(&sem->pending_alter, &sma->pending_alter);
261 static void sem_rcu_free(struct rcu_head *head)
263 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
264 struct sem_array *sma = ipc_rcu_to_struct(p);
266 security_sem_free(sma);
271 * Enter the mode suitable for non-simple operations:
272 * Caller must own sem_perm.lock.
274 static void complexmode_enter(struct sem_array *sma)
279 if (sma->use_global_lock > 0) {
281 * We are already in global lock mode.
282 * Nothing to do, just reset the
283 * counter until we return to simple mode.
285 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
288 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
290 for (i = 0; i < sma->sem_nsems; i++) {
292 spin_lock(&sem->lock);
293 spin_unlock(&sem->lock);
298 * Try to leave the mode that disallows simple operations:
299 * Caller must own sem_perm.lock.
301 static void complexmode_tryleave(struct sem_array *sma)
303 if (sma->complex_count) {
304 /* Complex ops are sleeping.
305 * We must stay in complex mode
309 if (sma->use_global_lock == 1) {
311 * Immediately after setting use_global_lock to 0,
312 * a simple op can start. Thus: all memory writes
313 * performed by the current operation must be visible
314 * before we set use_global_lock to 0.
316 smp_store_release(&sma->use_global_lock, 0);
318 sma->use_global_lock--;
322 #define SEM_GLOBAL_LOCK (-1)
324 * If the request contains only one semaphore operation, and there are
325 * no complex transactions pending, lock only the semaphore involved.
326 * Otherwise, lock the entire semaphore array, since we either have
327 * multiple semaphores in our own semops, or we need to look at
328 * semaphores from other pending complex operations.
330 static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
336 /* Complex operation - acquire a full lock */
337 ipc_lock_object(&sma->sem_perm);
339 /* Prevent parallel simple ops */
340 complexmode_enter(sma);
341 return SEM_GLOBAL_LOCK;
345 * Only one semaphore affected - try to optimize locking.
346 * Optimized locking is possible if no complex operation
347 * is either enqueued or processed right now.
349 * Both facts are tracked by use_global_mode.
351 sem = &sma->sems[sops->sem_num];
354 * Initial check for use_global_lock. Just an optimization,
355 * no locking, no memory barrier.
357 if (!sma->use_global_lock) {
359 * It appears that no complex operation is around.
360 * Acquire the per-semaphore lock.
362 spin_lock(&sem->lock);
364 /* pairs with smp_store_release() */
365 if (!smp_load_acquire(&sma->use_global_lock)) {
366 /* fast path successful! */
367 return sops->sem_num;
369 spin_unlock(&sem->lock);
372 /* slow path: acquire the full lock */
373 ipc_lock_object(&sma->sem_perm);
375 if (sma->use_global_lock == 0) {
377 * The use_global_lock mode ended while we waited for
378 * sma->sem_perm.lock. Thus we must switch to locking
380 * Unlike in the fast path, there is no need to recheck
381 * sma->use_global_lock after we have acquired sem->lock:
382 * We own sma->sem_perm.lock, thus use_global_lock cannot
385 spin_lock(&sem->lock);
387 ipc_unlock_object(&sma->sem_perm);
388 return sops->sem_num;
391 * Not a false alarm, thus continue to use the global lock
392 * mode. No need for complexmode_enter(), this was done by
393 * the caller that has set use_global_mode to non-zero.
395 return SEM_GLOBAL_LOCK;
399 static inline void sem_unlock(struct sem_array *sma, int locknum)
401 if (locknum == SEM_GLOBAL_LOCK) {
403 complexmode_tryleave(sma);
404 ipc_unlock_object(&sma->sem_perm);
406 struct sem *sem = &sma->sems[locknum];
407 spin_unlock(&sem->lock);
412 * sem_lock_(check_) routines are called in the paths where the rwsem
415 * The caller holds the RCU read lock.
417 static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
419 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
422 return ERR_CAST(ipcp);
424 return container_of(ipcp, struct sem_array, sem_perm);
427 static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
430 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
433 return ERR_CAST(ipcp);
435 return container_of(ipcp, struct sem_array, sem_perm);
438 static inline void sem_lock_and_putref(struct sem_array *sma)
440 sem_lock(sma, NULL, -1);
441 ipc_rcu_putref(sma, sem_rcu_free);
444 static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
446 ipc_rmid(&sem_ids(ns), &s->sem_perm);
450 * newary - Create a new semaphore set
452 * @params: ptr to the structure that contains key, semflg and nsems
454 * Called with sem_ids.rwsem held (as a writer)
456 static int newary(struct ipc_namespace *ns, struct ipc_params *params)
460 struct sem_array *sma;
462 key_t key = params->key;
463 int nsems = params->u.nsems;
464 int semflg = params->flg;
469 if (ns->used_sems + nsems > ns->sc_semmns)
472 size = sizeof(*sma) + nsems * sizeof(sma->sems[0]);
473 sma = ipc_rcu_alloc(size);
477 memset(sma, 0, size);
479 sma->sem_perm.mode = (semflg & S_IRWXUGO);
480 sma->sem_perm.key = key;
482 sma->sem_perm.security = NULL;
483 retval = security_sem_alloc(sma);
485 ipc_rcu_putref(sma, ipc_rcu_free);
489 for (i = 0; i < nsems; i++) {
490 INIT_LIST_HEAD(&sma->sems[i].pending_alter);
491 INIT_LIST_HEAD(&sma->sems[i].pending_const);
492 spin_lock_init(&sma->sems[i].lock);
495 sma->complex_count = 0;
496 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
497 INIT_LIST_HEAD(&sma->pending_alter);
498 INIT_LIST_HEAD(&sma->pending_const);
499 INIT_LIST_HEAD(&sma->list_id);
500 sma->sem_nsems = nsems;
501 sma->sem_ctime = get_seconds();
503 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
505 ipc_rcu_putref(sma, sem_rcu_free);
508 ns->used_sems += nsems;
513 return sma->sem_perm.id;
518 * Called with sem_ids.rwsem and ipcp locked.
520 static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
522 struct sem_array *sma;
524 sma = container_of(ipcp, struct sem_array, sem_perm);
525 return security_sem_associate(sma, semflg);
529 * Called with sem_ids.rwsem and ipcp locked.
531 static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
532 struct ipc_params *params)
534 struct sem_array *sma;
536 sma = container_of(ipcp, struct sem_array, sem_perm);
537 if (params->u.nsems > sma->sem_nsems)
543 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
545 struct ipc_namespace *ns;
546 static const struct ipc_ops sem_ops = {
548 .associate = sem_security,
549 .more_checks = sem_more_checks,
551 struct ipc_params sem_params;
553 ns = current->nsproxy->ipc_ns;
555 if (nsems < 0 || nsems > ns->sc_semmsl)
558 sem_params.key = key;
559 sem_params.flg = semflg;
560 sem_params.u.nsems = nsems;
562 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
566 * perform_atomic_semop[_slow] - Attempt to perform semaphore
567 * operations on a given array.
568 * @sma: semaphore array
569 * @q: struct sem_queue that describes the operation
571 * Caller blocking are as follows, based the value
572 * indicated by the semaphore operation (sem_op):
574 * (1) >0 never blocks.
575 * (2) 0 (wait-for-zero operation): semval is non-zero.
576 * (3) <0 attempting to decrement semval to a value smaller than zero.
578 * Returns 0 if the operation was possible.
579 * Returns 1 if the operation is impossible, the caller must sleep.
580 * Returns <0 for error codes.
582 static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
584 int result, sem_op, nsops, pid;
594 for (sop = sops; sop < sops + nsops; sop++) {
595 curr = &sma->sems[sop->sem_num];
596 sem_op = sop->sem_op;
597 result = curr->semval;
599 if (!sem_op && result)
608 if (sop->sem_flg & SEM_UNDO) {
609 int undo = un->semadj[sop->sem_num] - sem_op;
610 /* Exceeding the undo range is an error. */
611 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
613 un->semadj[sop->sem_num] = undo;
616 curr->semval = result;
621 while (sop >= sops) {
622 sma->sems[sop->sem_num].sempid = pid;
635 if (sop->sem_flg & IPC_NOWAIT)
642 while (sop >= sops) {
643 sem_op = sop->sem_op;
644 sma->sems[sop->sem_num].semval -= sem_op;
645 if (sop->sem_flg & SEM_UNDO)
646 un->semadj[sop->sem_num] += sem_op;
653 static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
655 int result, sem_op, nsops;
665 if (unlikely(q->dupsop))
666 return perform_atomic_semop_slow(sma, q);
669 * We scan the semaphore set twice, first to ensure that the entire
670 * operation can succeed, therefore avoiding any pointless writes
671 * to shared memory and having to undo such changes in order to block
672 * until the operations can go through.
674 for (sop = sops; sop < sops + nsops; sop++) {
675 curr = &sma->sems[sop->sem_num];
676 sem_op = sop->sem_op;
677 result = curr->semval;
679 if (!sem_op && result)
680 goto would_block; /* wait-for-zero */
689 if (sop->sem_flg & SEM_UNDO) {
690 int undo = un->semadj[sop->sem_num] - sem_op;
692 /* Exceeding the undo range is an error. */
693 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
698 for (sop = sops; sop < sops + nsops; sop++) {
699 curr = &sma->sems[sop->sem_num];
700 sem_op = sop->sem_op;
701 result = curr->semval;
703 if (sop->sem_flg & SEM_UNDO) {
704 int undo = un->semadj[sop->sem_num] - sem_op;
706 un->semadj[sop->sem_num] = undo;
708 curr->semval += sem_op;
709 curr->sempid = q->pid;
716 return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
719 static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
720 struct wake_q_head *wake_q)
722 wake_q_add(wake_q, q->sleeper);
724 * Rely on the above implicit barrier, such that we can
725 * ensure that we hold reference to the task before setting
726 * q->status. Otherwise we could race with do_exit if the
727 * task is awoken by an external event before calling
730 WRITE_ONCE(q->status, error);
733 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
737 sma->complex_count--;
740 /** check_restart(sma, q)
741 * @sma: semaphore array
742 * @q: the operation that just completed
744 * update_queue is O(N^2) when it restarts scanning the whole queue of
745 * waiting operations. Therefore this function checks if the restart is
746 * really necessary. It is called after a previously waiting operation
747 * modified the array.
748 * Note that wait-for-zero operations are handled without restart.
750 static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
752 /* pending complex alter operations are too difficult to analyse */
753 if (!list_empty(&sma->pending_alter))
756 /* we were a sleeping complex operation. Too difficult */
760 /* It is impossible that someone waits for the new value:
761 * - complex operations always restart.
762 * - wait-for-zero are handled seperately.
763 * - q is a previously sleeping simple operation that
764 * altered the array. It must be a decrement, because
765 * simple increments never sleep.
766 * - If there are older (higher priority) decrements
767 * in the queue, then they have observed the original
768 * semval value and couldn't proceed. The operation
769 * decremented to value - thus they won't proceed either.
775 * wake_const_ops - wake up non-alter tasks
776 * @sma: semaphore array.
777 * @semnum: semaphore that was modified.
778 * @wake_q: lockless wake-queue head.
780 * wake_const_ops must be called after a semaphore in a semaphore array
781 * was set to 0. If complex const operations are pending, wake_const_ops must
782 * be called with semnum = -1, as well as with the number of each modified
784 * The tasks that must be woken up are added to @wake_q. The return code
785 * is stored in q->pid.
786 * The function returns 1 if at least one operation was completed successfully.
788 static int wake_const_ops(struct sem_array *sma, int semnum,
789 struct wake_q_head *wake_q)
791 struct sem_queue *q, *tmp;
792 struct list_head *pending_list;
793 int semop_completed = 0;
796 pending_list = &sma->pending_const;
798 pending_list = &sma->sems[semnum].pending_const;
800 list_for_each_entry_safe(q, tmp, pending_list, list) {
801 int error = perform_atomic_semop(sma, q);
805 /* operation completed, remove from queue & wakeup */
806 unlink_queue(sma, q);
808 wake_up_sem_queue_prepare(q, error, wake_q);
813 return semop_completed;
817 * do_smart_wakeup_zero - wakeup all wait for zero tasks
818 * @sma: semaphore array
819 * @sops: operations that were performed
820 * @nsops: number of operations
821 * @wake_q: lockless wake-queue head
823 * Checks all required queue for wait-for-zero operations, based
824 * on the actual changes that were performed on the semaphore array.
825 * The function returns 1 if at least one operation was completed successfully.
827 static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
828 int nsops, struct wake_q_head *wake_q)
831 int semop_completed = 0;
834 /* first: the per-semaphore queues, if known */
836 for (i = 0; i < nsops; i++) {
837 int num = sops[i].sem_num;
839 if (sma->sems[num].semval == 0) {
841 semop_completed |= wake_const_ops(sma, num, wake_q);
846 * No sops means modified semaphores not known.
847 * Assume all were changed.
849 for (i = 0; i < sma->sem_nsems; i++) {
850 if (sma->sems[i].semval == 0) {
852 semop_completed |= wake_const_ops(sma, i, wake_q);
857 * If one of the modified semaphores got 0,
858 * then check the global queue, too.
861 semop_completed |= wake_const_ops(sma, -1, wake_q);
863 return semop_completed;
868 * update_queue - look for tasks that can be completed.
869 * @sma: semaphore array.
870 * @semnum: semaphore that was modified.
871 * @wake_q: lockless wake-queue head.
873 * update_queue must be called after a semaphore in a semaphore array
874 * was modified. If multiple semaphores were modified, update_queue must
875 * be called with semnum = -1, as well as with the number of each modified
877 * The tasks that must be woken up are added to @wake_q. The return code
878 * is stored in q->pid.
879 * The function internally checks if const operations can now succeed.
881 * The function return 1 if at least one semop was completed successfully.
883 static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
885 struct sem_queue *q, *tmp;
886 struct list_head *pending_list;
887 int semop_completed = 0;
890 pending_list = &sma->pending_alter;
892 pending_list = &sma->sems[semnum].pending_alter;
895 list_for_each_entry_safe(q, tmp, pending_list, list) {
898 /* If we are scanning the single sop, per-semaphore list of
899 * one semaphore and that semaphore is 0, then it is not
900 * necessary to scan further: simple increments
901 * that affect only one entry succeed immediately and cannot
902 * be in the per semaphore pending queue, and decrements
903 * cannot be successful if the value is already 0.
905 if (semnum != -1 && sma->sems[semnum].semval == 0)
908 error = perform_atomic_semop(sma, q);
910 /* Does q->sleeper still need to sleep? */
914 unlink_queue(sma, q);
920 do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
921 restart = check_restart(sma, q);
924 wake_up_sem_queue_prepare(q, error, wake_q);
928 return semop_completed;
932 * set_semotime - set sem_otime
933 * @sma: semaphore array
934 * @sops: operations that modified the array, may be NULL
936 * sem_otime is replicated to avoid cache line trashing.
937 * This function sets one instance to the current time.
939 static void set_semotime(struct sem_array *sma, struct sembuf *sops)
942 sma->sems[0].sem_otime = get_seconds();
944 sma->sems[sops[0].sem_num].sem_otime =
950 * do_smart_update - optimized update_queue
951 * @sma: semaphore array
952 * @sops: operations that were performed
953 * @nsops: number of operations
954 * @otime: force setting otime
955 * @wake_q: lockless wake-queue head
957 * do_smart_update() does the required calls to update_queue and wakeup_zero,
958 * based on the actual changes that were performed on the semaphore array.
959 * Note that the function does not do the actual wake-up: the caller is
960 * responsible for calling wake_up_q().
961 * It is safe to perform this call after dropping all locks.
963 static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
964 int otime, struct wake_q_head *wake_q)
968 otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
970 if (!list_empty(&sma->pending_alter)) {
971 /* semaphore array uses the global queue - just process it. */
972 otime |= update_queue(sma, -1, wake_q);
976 * No sops, thus the modified semaphores are not
979 for (i = 0; i < sma->sem_nsems; i++)
980 otime |= update_queue(sma, i, wake_q);
983 * Check the semaphores that were increased:
984 * - No complex ops, thus all sleeping ops are
986 * - if we decreased the value, then any sleeping
987 * semaphore ops wont be able to run: If the
988 * previous value was too small, then the new
989 * value will be too small, too.
991 for (i = 0; i < nsops; i++) {
992 if (sops[i].sem_op > 0) {
993 otime |= update_queue(sma,
994 sops[i].sem_num, wake_q);
1000 set_semotime(sma, sops);
1004 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1006 static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1009 struct sembuf *sop = q->blocking;
1012 * Linux always (since 0.99.10) reported a task as sleeping on all
1013 * semaphores. This violates SUS, therefore it was changed to the
1014 * standard compliant behavior.
1015 * Give the administrators a chance to notice that an application
1016 * might misbehave because it relies on the Linux behavior.
1018 pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1019 "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1020 current->comm, task_pid_nr(current));
1022 if (sop->sem_num != semnum)
1025 if (count_zero && sop->sem_op == 0)
1027 if (!count_zero && sop->sem_op < 0)
1033 /* The following counts are associated to each semaphore:
1034 * semncnt number of tasks waiting on semval being nonzero
1035 * semzcnt number of tasks waiting on semval being zero
1037 * Per definition, a task waits only on the semaphore of the first semop
1038 * that cannot proceed, even if additional operation would block, too.
1040 static int count_semcnt(struct sem_array *sma, ushort semnum,
1043 struct list_head *l;
1044 struct sem_queue *q;
1048 /* First: check the simple operations. They are easy to evaluate */
1050 l = &sma->sems[semnum].pending_const;
1052 l = &sma->sems[semnum].pending_alter;
1054 list_for_each_entry(q, l, list) {
1055 /* all task on a per-semaphore list sleep on exactly
1061 /* Then: check the complex operations. */
1062 list_for_each_entry(q, &sma->pending_alter, list) {
1063 semcnt += check_qop(sma, semnum, q, count_zero);
1066 list_for_each_entry(q, &sma->pending_const, list) {
1067 semcnt += check_qop(sma, semnum, q, count_zero);
1073 /* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1074 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1075 * remains locked on exit.
1077 static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1079 struct sem_undo *un, *tu;
1080 struct sem_queue *q, *tq;
1081 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1083 DEFINE_WAKE_Q(wake_q);
1085 /* Free the existing undo structures for this semaphore set. */
1086 ipc_assert_locked_object(&sma->sem_perm);
1087 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1088 list_del(&un->list_id);
1089 spin_lock(&un->ulp->lock);
1091 list_del_rcu(&un->list_proc);
1092 spin_unlock(&un->ulp->lock);
1096 /* Wake up all pending processes and let them fail with EIDRM. */
1097 list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1098 unlink_queue(sma, q);
1099 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1102 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1103 unlink_queue(sma, q);
1104 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1106 for (i = 0; i < sma->sem_nsems; i++) {
1107 struct sem *sem = &sma->sems[i];
1108 list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1109 unlink_queue(sma, q);
1110 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1112 list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1113 unlink_queue(sma, q);
1114 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1118 /* Remove the semaphore set from the IDR */
1120 sem_unlock(sma, -1);
1124 ns->used_sems -= sma->sem_nsems;
1125 ipc_rcu_putref(sma, sem_rcu_free);
1128 static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1132 return copy_to_user(buf, in, sizeof(*in));
1135 struct semid_ds out;
1137 memset(&out, 0, sizeof(out));
1139 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1141 out.sem_otime = in->sem_otime;
1142 out.sem_ctime = in->sem_ctime;
1143 out.sem_nsems = in->sem_nsems;
1145 return copy_to_user(buf, &out, sizeof(out));
1152 static time_t get_semotime(struct sem_array *sma)
1157 res = sma->sems[0].sem_otime;
1158 for (i = 1; i < sma->sem_nsems; i++) {
1159 time_t to = sma->sems[i].sem_otime;
1167 static int semctl_nolock(struct ipc_namespace *ns, int semid,
1168 int cmd, int version, void __user *p)
1171 struct sem_array *sma;
1177 struct seminfo seminfo;
1180 err = security_sem_semctl(NULL, cmd);
1184 memset(&seminfo, 0, sizeof(seminfo));
1185 seminfo.semmni = ns->sc_semmni;
1186 seminfo.semmns = ns->sc_semmns;
1187 seminfo.semmsl = ns->sc_semmsl;
1188 seminfo.semopm = ns->sc_semopm;
1189 seminfo.semvmx = SEMVMX;
1190 seminfo.semmnu = SEMMNU;
1191 seminfo.semmap = SEMMAP;
1192 seminfo.semume = SEMUME;
1193 down_read(&sem_ids(ns).rwsem);
1194 if (cmd == SEM_INFO) {
1195 seminfo.semusz = sem_ids(ns).in_use;
1196 seminfo.semaem = ns->used_sems;
1198 seminfo.semusz = SEMUSZ;
1199 seminfo.semaem = SEMAEM;
1201 max_id = ipc_get_maxid(&sem_ids(ns));
1202 up_read(&sem_ids(ns).rwsem);
1203 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1205 return (max_id < 0) ? 0 : max_id;
1210 struct semid64_ds tbuf;
1213 memset(&tbuf, 0, sizeof(tbuf));
1216 if (cmd == SEM_STAT) {
1217 sma = sem_obtain_object(ns, semid);
1222 id = sma->sem_perm.id;
1224 sma = sem_obtain_object_check(ns, semid);
1232 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1235 err = security_sem_semctl(sma, cmd);
1239 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
1240 tbuf.sem_otime = get_semotime(sma);
1241 tbuf.sem_ctime = sma->sem_ctime;
1242 tbuf.sem_nsems = sma->sem_nsems;
1244 if (copy_semid_to_user(p, &tbuf, version))
1256 static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1259 struct sem_undo *un;
1260 struct sem_array *sma;
1263 DEFINE_WAKE_Q(wake_q);
1265 #if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1266 /* big-endian 64bit */
1269 /* 32bit or little-endian 64bit */
1273 if (val > SEMVMX || val < 0)
1277 sma = sem_obtain_object_check(ns, semid);
1280 return PTR_ERR(sma);
1283 if (semnum < 0 || semnum >= sma->sem_nsems) {
1289 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1294 err = security_sem_semctl(sma, SETVAL);
1300 sem_lock(sma, NULL, -1);
1302 if (!ipc_valid_object(&sma->sem_perm)) {
1303 sem_unlock(sma, -1);
1308 curr = &sma->sems[semnum];
1310 ipc_assert_locked_object(&sma->sem_perm);
1311 list_for_each_entry(un, &sma->list_id, list_id)
1312 un->semadj[semnum] = 0;
1315 curr->sempid = task_tgid_vnr(current);
1316 sma->sem_ctime = get_seconds();
1317 /* maybe some queued-up processes were waiting for this */
1318 do_smart_update(sma, NULL, 0, 0, &wake_q);
1319 sem_unlock(sma, -1);
1325 static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1326 int cmd, void __user *p)
1328 struct sem_array *sma;
1331 ushort fast_sem_io[SEMMSL_FAST];
1332 ushort *sem_io = fast_sem_io;
1333 DEFINE_WAKE_Q(wake_q);
1336 sma = sem_obtain_object_check(ns, semid);
1339 return PTR_ERR(sma);
1342 nsems = sma->sem_nsems;
1345 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1346 goto out_rcu_wakeup;
1348 err = security_sem_semctl(sma, cmd);
1350 goto out_rcu_wakeup;
1356 ushort __user *array = p;
1359 sem_lock(sma, NULL, -1);
1360 if (!ipc_valid_object(&sma->sem_perm)) {
1364 if (nsems > SEMMSL_FAST) {
1365 if (!ipc_rcu_getref(sma)) {
1369 sem_unlock(sma, -1);
1371 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1372 if (sem_io == NULL) {
1373 ipc_rcu_putref(sma, sem_rcu_free);
1378 sem_lock_and_putref(sma);
1379 if (!ipc_valid_object(&sma->sem_perm)) {
1384 for (i = 0; i < sma->sem_nsems; i++)
1385 sem_io[i] = sma->sems[i].semval;
1386 sem_unlock(sma, -1);
1389 if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1396 struct sem_undo *un;
1398 if (!ipc_rcu_getref(sma)) {
1400 goto out_rcu_wakeup;
1404 if (nsems > SEMMSL_FAST) {
1405 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1406 if (sem_io == NULL) {
1407 ipc_rcu_putref(sma, sem_rcu_free);
1412 if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1413 ipc_rcu_putref(sma, sem_rcu_free);
1418 for (i = 0; i < nsems; i++) {
1419 if (sem_io[i] > SEMVMX) {
1420 ipc_rcu_putref(sma, sem_rcu_free);
1426 sem_lock_and_putref(sma);
1427 if (!ipc_valid_object(&sma->sem_perm)) {
1432 for (i = 0; i < nsems; i++) {
1433 sma->sems[i].semval = sem_io[i];
1434 sma->sems[i].sempid = task_tgid_vnr(current);
1437 ipc_assert_locked_object(&sma->sem_perm);
1438 list_for_each_entry(un, &sma->list_id, list_id) {
1439 for (i = 0; i < nsems; i++)
1442 sma->sem_ctime = get_seconds();
1443 /* maybe some queued-up processes were waiting for this */
1444 do_smart_update(sma, NULL, 0, 0, &wake_q);
1448 /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1451 if (semnum < 0 || semnum >= nsems)
1452 goto out_rcu_wakeup;
1454 sem_lock(sma, NULL, -1);
1455 if (!ipc_valid_object(&sma->sem_perm)) {
1459 curr = &sma->sems[semnum];
1469 err = count_semcnt(sma, semnum, 0);
1472 err = count_semcnt(sma, semnum, 1);
1477 sem_unlock(sma, -1);
1482 if (sem_io != fast_sem_io)
1487 static inline unsigned long
1488 copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1492 if (copy_from_user(out, buf, sizeof(*out)))
1497 struct semid_ds tbuf_old;
1499 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1502 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1503 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1504 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1514 * This function handles some semctl commands which require the rwsem
1515 * to be held in write mode.
1516 * NOTE: no locks must be held, the rwsem is taken inside this function.
1518 static int semctl_down(struct ipc_namespace *ns, int semid,
1519 int cmd, int version, void __user *p)
1521 struct sem_array *sma;
1523 struct semid64_ds semid64;
1524 struct kern_ipc_perm *ipcp;
1526 if (cmd == IPC_SET) {
1527 if (copy_semid_from_user(&semid64, p, version))
1531 down_write(&sem_ids(ns).rwsem);
1534 ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1535 &semid64.sem_perm, 0);
1537 err = PTR_ERR(ipcp);
1541 sma = container_of(ipcp, struct sem_array, sem_perm);
1543 err = security_sem_semctl(sma, cmd);
1549 sem_lock(sma, NULL, -1);
1550 /* freeary unlocks the ipc object and rcu */
1554 sem_lock(sma, NULL, -1);
1555 err = ipc_update_perm(&semid64.sem_perm, ipcp);
1558 sma->sem_ctime = get_seconds();
1566 sem_unlock(sma, -1);
1570 up_write(&sem_ids(ns).rwsem);
1574 SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1577 struct ipc_namespace *ns;
1578 void __user *p = (void __user *)arg;
1583 version = ipc_parse_version(&cmd);
1584 ns = current->nsproxy->ipc_ns;
1591 return semctl_nolock(ns, semid, cmd, version, p);
1598 return semctl_main(ns, semid, semnum, cmd, p);
1600 return semctl_setval(ns, semid, semnum, arg);
1603 return semctl_down(ns, semid, cmd, version, p);
1609 /* If the task doesn't already have a undo_list, then allocate one
1610 * here. We guarantee there is only one thread using this undo list,
1611 * and current is THE ONE
1613 * If this allocation and assignment succeeds, but later
1614 * portions of this code fail, there is no need to free the sem_undo_list.
1615 * Just let it stay associated with the task, and it'll be freed later
1618 * This can block, so callers must hold no locks.
1620 static inline int get_undo_list(struct sem_undo_list **undo_listp)
1622 struct sem_undo_list *undo_list;
1624 undo_list = current->sysvsem.undo_list;
1626 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1627 if (undo_list == NULL)
1629 spin_lock_init(&undo_list->lock);
1630 atomic_set(&undo_list->refcnt, 1);
1631 INIT_LIST_HEAD(&undo_list->list_proc);
1633 current->sysvsem.undo_list = undo_list;
1635 *undo_listp = undo_list;
1639 static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1641 struct sem_undo *un;
1643 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1644 if (un->semid == semid)
1650 static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1652 struct sem_undo *un;
1654 assert_spin_locked(&ulp->lock);
1656 un = __lookup_undo(ulp, semid);
1658 list_del_rcu(&un->list_proc);
1659 list_add_rcu(&un->list_proc, &ulp->list_proc);
1665 * find_alloc_undo - lookup (and if not present create) undo array
1667 * @semid: semaphore array id
1669 * The function looks up (and if not present creates) the undo structure.
1670 * The size of the undo structure depends on the size of the semaphore
1671 * array, thus the alloc path is not that straightforward.
1672 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1673 * performs a rcu_read_lock().
1675 static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1677 struct sem_array *sma;
1678 struct sem_undo_list *ulp;
1679 struct sem_undo *un, *new;
1682 error = get_undo_list(&ulp);
1684 return ERR_PTR(error);
1687 spin_lock(&ulp->lock);
1688 un = lookup_undo(ulp, semid);
1689 spin_unlock(&ulp->lock);
1690 if (likely(un != NULL))
1693 /* no undo structure around - allocate one. */
1694 /* step 1: figure out the size of the semaphore array */
1695 sma = sem_obtain_object_check(ns, semid);
1698 return ERR_CAST(sma);
1701 nsems = sma->sem_nsems;
1702 if (!ipc_rcu_getref(sma)) {
1704 un = ERR_PTR(-EIDRM);
1709 /* step 2: allocate new undo structure */
1710 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1712 ipc_rcu_putref(sma, sem_rcu_free);
1713 return ERR_PTR(-ENOMEM);
1716 /* step 3: Acquire the lock on semaphore array */
1718 sem_lock_and_putref(sma);
1719 if (!ipc_valid_object(&sma->sem_perm)) {
1720 sem_unlock(sma, -1);
1723 un = ERR_PTR(-EIDRM);
1726 spin_lock(&ulp->lock);
1729 * step 4: check for races: did someone else allocate the undo struct?
1731 un = lookup_undo(ulp, semid);
1736 /* step 5: initialize & link new undo structure */
1737 new->semadj = (short *) &new[1];
1740 assert_spin_locked(&ulp->lock);
1741 list_add_rcu(&new->list_proc, &ulp->list_proc);
1742 ipc_assert_locked_object(&sma->sem_perm);
1743 list_add(&new->list_id, &sma->list_id);
1747 spin_unlock(&ulp->lock);
1748 sem_unlock(sma, -1);
1753 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1754 unsigned, nsops, const struct timespec __user *, timeout)
1756 int error = -EINVAL;
1757 struct sem_array *sma;
1758 struct sembuf fast_sops[SEMOPM_FAST];
1759 struct sembuf *sops = fast_sops, *sop;
1760 struct sem_undo *un;
1762 bool undos = false, alter = false, dupsop = false;
1763 struct sem_queue queue;
1764 unsigned long dup = 0, jiffies_left = 0;
1765 struct ipc_namespace *ns;
1767 ns = current->nsproxy->ipc_ns;
1769 if (nsops < 1 || semid < 0)
1771 if (nsops > ns->sc_semopm)
1773 if (nsops > SEMOPM_FAST) {
1774 sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
1779 if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1785 struct timespec _timeout;
1786 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1790 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1791 _timeout.tv_nsec >= 1000000000L) {
1795 jiffies_left = timespec_to_jiffies(&_timeout);
1799 for (sop = sops; sop < sops + nsops; sop++) {
1800 unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
1802 if (sop->sem_num >= max)
1804 if (sop->sem_flg & SEM_UNDO)
1808 * There was a previous alter access that appears
1809 * to have accessed the same semaphore, thus use
1810 * the dupsop logic. "appears", because the detection
1811 * can only check % BITS_PER_LONG.
1815 if (sop->sem_op != 0) {
1822 /* On success, find_alloc_undo takes the rcu_read_lock */
1823 un = find_alloc_undo(ns, semid);
1825 error = PTR_ERR(un);
1833 sma = sem_obtain_object_check(ns, semid);
1836 error = PTR_ERR(sma);
1841 if (max >= sma->sem_nsems) {
1847 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
1852 error = security_sem_semop(sma, sops, nsops, alter);
1859 locknum = sem_lock(sma, sops, nsops);
1861 * We eventually might perform the following check in a lockless
1862 * fashion, considering ipc_valid_object() locking constraints.
1863 * If nsops == 1 and there is no contention for sem_perm.lock, then
1864 * only a per-semaphore lock is held and it's OK to proceed with the
1865 * check below. More details on the fine grained locking scheme
1866 * entangled here and why it's RMID race safe on comments at sem_lock()
1868 if (!ipc_valid_object(&sma->sem_perm))
1869 goto out_unlock_free;
1871 * semid identifiers are not unique - find_alloc_undo may have
1872 * allocated an undo structure, it was invalidated by an RMID
1873 * and now a new array with received the same id. Check and fail.
1874 * This case can be detected checking un->semid. The existence of
1875 * "un" itself is guaranteed by rcu.
1877 if (un && un->semid == -1)
1878 goto out_unlock_free;
1881 queue.nsops = nsops;
1883 queue.pid = task_tgid_vnr(current);
1884 queue.alter = alter;
1885 queue.dupsop = dupsop;
1887 error = perform_atomic_semop(sma, &queue);
1888 if (error == 0) { /* non-blocking succesfull path */
1889 DEFINE_WAKE_Q(wake_q);
1892 * If the operation was successful, then do
1893 * the required updates.
1896 do_smart_update(sma, sops, nsops, 1, &wake_q);
1898 set_semotime(sma, sops);
1900 sem_unlock(sma, locknum);
1906 if (error < 0) /* non-blocking error path */
1907 goto out_unlock_free;
1910 * We need to sleep on this operation, so we put the current
1911 * task into the pending queue and go to sleep.
1915 curr = &sma->sems[sops->sem_num];
1918 if (sma->complex_count) {
1919 list_add_tail(&queue.list,
1920 &sma->pending_alter);
1923 list_add_tail(&queue.list,
1924 &curr->pending_alter);
1927 list_add_tail(&queue.list, &curr->pending_const);
1930 if (!sma->complex_count)
1934 list_add_tail(&queue.list, &sma->pending_alter);
1936 list_add_tail(&queue.list, &sma->pending_const);
1938 sma->complex_count++;
1942 queue.status = -EINTR;
1943 queue.sleeper = current;
1945 __set_current_state(TASK_INTERRUPTIBLE);
1946 sem_unlock(sma, locknum);
1950 jiffies_left = schedule_timeout(jiffies_left);
1955 * fastpath: the semop has completed, either successfully or
1956 * not, from the syscall pov, is quite irrelevant to us at this
1957 * point; we're done.
1959 * We _do_ care, nonetheless, about being awoken by a signal or
1960 * spuriously. The queue.status is checked again in the
1961 * slowpath (aka after taking sem_lock), such that we can detect
1962 * scenarios where we were awakened externally, during the
1963 * window between wake_q_add() and wake_up_q().
1965 error = READ_ONCE(queue.status);
1966 if (error != -EINTR) {
1968 * User space could assume that semop() is a memory
1969 * barrier: Without the mb(), the cpu could
1970 * speculatively read in userspace stale data that was
1971 * overwritten by the previous owner of the semaphore.
1978 locknum = sem_lock(sma, sops, nsops);
1980 if (!ipc_valid_object(&sma->sem_perm))
1981 goto out_unlock_free;
1983 error = READ_ONCE(queue.status);
1986 * If queue.status != -EINTR we are woken up by another process.
1987 * Leave without unlink_queue(), but with sem_unlock().
1989 if (error != -EINTR)
1990 goto out_unlock_free;
1993 * If an interrupt occurred we have to clean up the queue.
1995 if (timeout && jiffies_left == 0)
1997 } while (error == -EINTR && !signal_pending(current)); /* spurious */
1999 unlink_queue(sma, &queue);
2002 sem_unlock(sma, locknum);
2005 if (sops != fast_sops)
2010 SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2013 return sys_semtimedop(semid, tsops, nsops, NULL);
2016 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2017 * parent and child tasks.
2020 int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2022 struct sem_undo_list *undo_list;
2025 if (clone_flags & CLONE_SYSVSEM) {
2026 error = get_undo_list(&undo_list);
2029 atomic_inc(&undo_list->refcnt);
2030 tsk->sysvsem.undo_list = undo_list;
2032 tsk->sysvsem.undo_list = NULL;
2038 * add semadj values to semaphores, free undo structures.
2039 * undo structures are not freed when semaphore arrays are destroyed
2040 * so some of them may be out of date.
2041 * IMPLEMENTATION NOTE: There is some confusion over whether the
2042 * set of adjustments that needs to be done should be done in an atomic
2043 * manner or not. That is, if we are attempting to decrement the semval
2044 * should we queue up and wait until we can do so legally?
2045 * The original implementation attempted to do this (queue and wait).
2046 * The current implementation does not do so. The POSIX standard
2047 * and SVID should be consulted to determine what behavior is mandated.
2049 void exit_sem(struct task_struct *tsk)
2051 struct sem_undo_list *ulp;
2053 ulp = tsk->sysvsem.undo_list;
2056 tsk->sysvsem.undo_list = NULL;
2058 if (!atomic_dec_and_test(&ulp->refcnt))
2062 struct sem_array *sma;
2063 struct sem_undo *un;
2065 DEFINE_WAKE_Q(wake_q);
2070 un = list_entry_rcu(ulp->list_proc.next,
2071 struct sem_undo, list_proc);
2072 if (&un->list_proc == &ulp->list_proc) {
2074 * We must wait for freeary() before freeing this ulp,
2075 * in case we raced with last sem_undo. There is a small
2076 * possibility where we exit while freeary() didn't
2077 * finish unlocking sem_undo_list.
2079 spin_unlock_wait(&ulp->lock);
2083 spin_lock(&ulp->lock);
2085 spin_unlock(&ulp->lock);
2087 /* exit_sem raced with IPC_RMID, nothing to do */
2093 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2094 /* exit_sem raced with IPC_RMID, nothing to do */
2100 sem_lock(sma, NULL, -1);
2101 /* exit_sem raced with IPC_RMID, nothing to do */
2102 if (!ipc_valid_object(&sma->sem_perm)) {
2103 sem_unlock(sma, -1);
2107 un = __lookup_undo(ulp, semid);
2109 /* exit_sem raced with IPC_RMID+semget() that created
2110 * exactly the same semid. Nothing to do.
2112 sem_unlock(sma, -1);
2117 /* remove un from the linked lists */
2118 ipc_assert_locked_object(&sma->sem_perm);
2119 list_del(&un->list_id);
2121 /* we are the last process using this ulp, acquiring ulp->lock
2122 * isn't required. Besides that, we are also protected against
2123 * IPC_RMID as we hold sma->sem_perm lock now
2125 list_del_rcu(&un->list_proc);
2127 /* perform adjustments registered in un */
2128 for (i = 0; i < sma->sem_nsems; i++) {
2129 struct sem *semaphore = &sma->sems[i];
2130 if (un->semadj[i]) {
2131 semaphore->semval += un->semadj[i];
2133 * Range checks of the new semaphore value,
2134 * not defined by sus:
2135 * - Some unices ignore the undo entirely
2136 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
2137 * - some cap the value (e.g. FreeBSD caps
2138 * at 0, but doesn't enforce SEMVMX)
2140 * Linux caps the semaphore value, both at 0
2143 * Manfred <manfred@colorfullife.com>
2145 if (semaphore->semval < 0)
2146 semaphore->semval = 0;
2147 if (semaphore->semval > SEMVMX)
2148 semaphore->semval = SEMVMX;
2149 semaphore->sempid = task_tgid_vnr(current);
2152 /* maybe some queued-up processes were waiting for this */
2153 do_smart_update(sma, NULL, 0, 1, &wake_q);
2154 sem_unlock(sma, -1);
2163 #ifdef CONFIG_PROC_FS
2164 static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2166 struct user_namespace *user_ns = seq_user_ns(s);
2167 struct sem_array *sma = it;
2171 * The proc interface isn't aware of sem_lock(), it calls
2172 * ipc_lock_object() directly (in sysvipc_find_ipc).
2173 * In order to stay compatible with sem_lock(), we must
2174 * enter / leave complex_mode.
2176 complexmode_enter(sma);
2178 sem_otime = get_semotime(sma);
2181 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2186 from_kuid_munged(user_ns, sma->sem_perm.uid),
2187 from_kgid_munged(user_ns, sma->sem_perm.gid),
2188 from_kuid_munged(user_ns, sma->sem_perm.cuid),
2189 from_kgid_munged(user_ns, sma->sem_perm.cgid),
2193 complexmode_tryleave(sma);