arm64: memory: Cosmetic cleanups
authorWill Deacon <will@kernel.org>
Tue, 13 Aug 2019 16:22:51 +0000 (17:22 +0100)
committerWill Deacon <will@kernel.org>
Wed, 14 Aug 2019 12:09:45 +0000 (13:09 +0100)
Cleanup memory.h so that the indentation is consistent, remove pointless
line-wrapping and use consistent parameter names for different versions
of the same macro.

Reviewed-by: Steve Capper <steve.capper@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/memory.h

index 27f35ce..d69c286 100644 (file)
 
 #include <linux/compiler.h>
 #include <linux/const.h>
+#include <linux/sizes.h>
 #include <linux/types.h>
 #include <asm/bug.h>
 #include <asm/page-def.h>
-#include <linux/sizes.h>
 
 /*
  * Size of the PCI I/O space. This must remain a power of two so that
@@ -66,8 +66,8 @@
 
 #define _VA_START(va)          (-(UL(1) << ((va) - 1)))
 
-#define KERNEL_START      _text
-#define KERNEL_END        _end
+#define KERNEL_START           _text
+#define KERNEL_END             _end
 
 #ifdef CONFIG_ARM64_VA_BITS_52
 #define MAX_USER_VA_BITS       52
  * 16 KB granule: 128 level 3 entries, with contiguous bit
  * 64 KB granule:  32 level 3 entries, with contiguous bit
  */
-#define SEGMENT_ALIGN                  SZ_2M
+#define SEGMENT_ALIGN          SZ_2M
 #else
 /*
  *  4 KB granule:  16 level 3 entries, with contiguous bit
  * 16 KB granule:   4 level 3 entries, without contiguous bit
  * 64 KB granule:   1 level 3 entry
  */
-#define SEGMENT_ALIGN                  SZ_64K
+#define SEGMENT_ALIGN          SZ_64K
 #endif
 
 /*
@@ -253,8 +253,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
 
 #define __virt_to_phys_nodebug(x) ({                                   \
        phys_addr_t __x = (phys_addr_t)(__tag_reset(x));                \
-       __is_lm_address(__x) ? __lm_to_phys(__x) :                      \
-                              __kimg_to_phys(__x);                     \
+       __is_lm_address(__x) ? __lm_to_phys(__x) : __kimg_to_phys(__x); \
 })
 
 #define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x))
@@ -301,17 +300,17 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define __pa_nodebug(x)                __virt_to_phys_nodebug((unsigned long)(x))
 #define __va(x)                        ((void *)__phys_to_virt((phys_addr_t)(x)))
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
-#define virt_to_pfn(x)      __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
-#define sym_to_pfn(x)      __phys_to_pfn(__pa_symbol(x))
+#define virt_to_pfn(x)         __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
+#define sym_to_pfn(x)          __phys_to_pfn(__pa_symbol(x))
 
 /*
- *  virt_to_page(k)    convert a _valid_ virtual address to struct page *
- *  virt_addr_valid(k) indicates whether a virtual address is valid
+ *  virt_to_page(x)    convert a _valid_ virtual address to struct page *
+ *  virt_addr_valid(x) indicates whether a virtual address is valid
  */
 #define ARCH_PFN_OFFSET                ((unsigned long)PHYS_PFN_OFFSET)
 
 #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
-#define virt_to_page(kaddr)    pfn_to_page(virt_to_pfn(kaddr))
+#define virt_to_page(x)                pfn_to_page(virt_to_pfn(x))
 #else
 #define page_to_virt(x)        ({                                              \
        __typeof__(x) __page = x;                                       \