From: Linus Torvalds Date: Wed, 1 Sep 2021 22:13:02 +0000 (-0700) Subject: Merge tag 'asm-generic-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd... X-Git-Tag: microblaze-v5.16~145 X-Git-Url: http://git.monstr.eu/?p=linux-2.6-microblaze.git;a=commitdiff_plain;h=4cdc4cc2ad35f92338497d53d3e8b7876cf2a51d Merge tag 'asm-generic-5.15' of git://git./linux/kernel/git/arnd/asm-generic Pull asm-generic updates from Arnd Bergmann: "The main content for 5.15 is a series that cleans up the handling of strncpy_from_user() and strnlen_user(), removing a lot of slightly incorrect versions of these in favor of the lib/strn*.c helpers that implement these correctly and more efficiently. The only architectures that retain a private version now are mips, ia64, um and parisc. I had offered to convert those at all, but Thomas Bogendoerfer wanted to keep the mips version for the moment until he had a chance to do regression testing. The branch also contains two patches for bitops and for ffs()" * tag 'asm-generic-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: bitops/non-atomic: make @nr unsigned to avoid any DIV asm-generic: ffs: Drop bogus reference to ffz location asm-generic: reverse GENERIC_{STRNCPY_FROM,STRNLEN}_USER symbols asm-generic: remove extra strn{cpy_from,len}_user declarations asm-generic: uaccess: remove inline strncpy_from_user/strnlen_user s390: use generic strncpy/strnlen from_user microblaze: use generic strncpy/strnlen from_user csky: use generic strncpy/strnlen from_user arc: use generic strncpy/strnlen from_user hexagon: use generic strncpy/strnlen from_user h8300: remove stale strncpy_from_user asm-generic/uaccess.h: remove __strncpy_from_user/__strnlen_user --- 4cdc4cc2ad35f92338497d53d3e8b7876cf2a51d diff --cc include/asm-generic/bitops/non-atomic.h index 365377fb104b,c5a7d8eb9c2b..078cc68be2f1 --- a/include/asm-generic/bitops/non-atomic.h +++ b/include/asm-generic/bitops/non-atomic.h @@@ -13,18 -13,15 +13,18 @@@ * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __set_bit(unsigned int nr, volatile unsigned long *addr) +static __always_inline void - arch___set_bit(int nr, volatile unsigned long *addr) ++arch___set_bit(unsigned int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p |= mask; } +#define __set_bit arch___set_bit -static inline void __clear_bit(unsigned int nr, volatile unsigned long *addr) +static __always_inline void - arch___clear_bit(int nr, volatile unsigned long *addr) ++arch___clear_bit(unsigned int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@@ -42,8 -38,7 +42,8 @@@ * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __change_bit(unsigned int nr, volatile unsigned long *addr) +static __always_inline - void arch___change_bit(int nr, volatile unsigned long *addr) ++void arch___change_bit(unsigned int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@@ -61,8 -55,7 +61,8 @@@ * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_set_bit(unsigned int nr, volatile unsigned long *addr) +static __always_inline int - arch___test_and_set_bit(int nr, volatile unsigned long *addr) ++arch___test_and_set_bit(unsigned int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@@ -82,8 -74,7 +82,8 @@@ * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_clear_bit(unsigned int nr, volatile unsigned long *addr) +static __always_inline int - arch___test_and_clear_bit(int nr, volatile unsigned long *addr) ++arch___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@@ -92,11 -83,10 +92,11 @@@ *p = old & ~mask; return (old & mask) != 0; } +#define __test_and_clear_bit arch___test_and_clear_bit /* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(unsigned int nr, - volatile unsigned long *addr) +static __always_inline int - arch___test_and_change_bit(int nr, volatile unsigned long *addr) ++arch___test_and_change_bit(unsigned int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@@ -112,8 -101,7 +112,8 @@@ * @nr: bit number to test * @addr: Address to start counting from */ -static inline int test_bit(unsigned int nr, const volatile unsigned long *addr) +static __always_inline int - arch_test_bit(int nr, const volatile unsigned long *addr) ++arch_test_bit(unsigned int nr, const volatile unsigned long *addr) { return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); }