Merge tag 'gpio-updates-for-v5.14-rc5' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / include / asm-generic / qspinlock_types.h
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Queued spinlock
4  *
5  * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
6  *
7  * Authors: Waiman Long <waiman.long@hp.com>
8  */
9 #ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
10 #define __ASM_GENERIC_QSPINLOCK_TYPES_H
11
12 #include <linux/types.h>
13
14 typedef struct qspinlock {
15         union {
16                 atomic_t val;
17
18                 /*
19                  * By using the whole 2nd least significant byte for the
20                  * pending bit, we can allow better optimization of the lock
21                  * acquisition for the pending bit holder.
22                  */
23 #ifdef __LITTLE_ENDIAN
24                 struct {
25                         u8      locked;
26                         u8      pending;
27                 };
28                 struct {
29                         u16     locked_pending;
30                         u16     tail;
31                 };
32 #else
33                 struct {
34                         u16     tail;
35                         u16     locked_pending;
36                 };
37                 struct {
38                         u8      reserved[2];
39                         u8      pending;
40                         u8      locked;
41                 };
42 #endif
43         };
44 } arch_spinlock_t;
45
46 /*
47  * Initializier
48  */
49 #define __ARCH_SPIN_LOCK_UNLOCKED       { { .val = ATOMIC_INIT(0) } }
50
51 /*
52  * Bitfields in the atomic value:
53  *
54  * When NR_CPUS < 16K
55  *  0- 7: locked byte
56  *     8: pending
57  *  9-15: not used
58  * 16-17: tail index
59  * 18-31: tail cpu (+1)
60  *
61  * When NR_CPUS >= 16K
62  *  0- 7: locked byte
63  *     8: pending
64  *  9-10: tail index
65  * 11-31: tail cpu (+1)
66  */
67 #define _Q_SET_MASK(type)       (((1U << _Q_ ## type ## _BITS) - 1)\
68                                       << _Q_ ## type ## _OFFSET)
69 #define _Q_LOCKED_OFFSET        0
70 #define _Q_LOCKED_BITS          8
71 #define _Q_LOCKED_MASK          _Q_SET_MASK(LOCKED)
72
73 #define _Q_PENDING_OFFSET       (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
74 #if CONFIG_NR_CPUS < (1U << 14)
75 #define _Q_PENDING_BITS         8
76 #else
77 #define _Q_PENDING_BITS         1
78 #endif
79 #define _Q_PENDING_MASK         _Q_SET_MASK(PENDING)
80
81 #define _Q_TAIL_IDX_OFFSET      (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
82 #define _Q_TAIL_IDX_BITS        2
83 #define _Q_TAIL_IDX_MASK        _Q_SET_MASK(TAIL_IDX)
84
85 #define _Q_TAIL_CPU_OFFSET      (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
86 #define _Q_TAIL_CPU_BITS        (32 - _Q_TAIL_CPU_OFFSET)
87 #define _Q_TAIL_CPU_MASK        _Q_SET_MASK(TAIL_CPU)
88
89 #define _Q_TAIL_OFFSET          _Q_TAIL_IDX_OFFSET
90 #define _Q_TAIL_MASK            (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
91
92 #define _Q_LOCKED_VAL           (1U << _Q_LOCKED_OFFSET)
93 #define _Q_PENDING_VAL          (1U << _Q_PENDING_OFFSET)
94
95 #endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */