1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PROCESSOR_H
3 #define _ASM_X86_PROCESSOR_H
5 #include <asm/processor-flags.h>
7 /* Forward declaration, a strange C thing */
13 #include <asm/math_emu.h>
14 #include <asm/segment.h>
15 #include <asm/types.h>
16 #include <uapi/asm/sigcontext.h>
17 #include <asm/current.h>
18 #include <asm/cpufeatures.h>
19 #include <asm/cpuid.h>
21 #include <asm/pgtable_types.h>
22 #include <asm/percpu.h>
24 #include <asm/desc_defs.h>
26 #include <asm/special_insns.h>
27 #include <asm/fpu/types.h>
28 #include <asm/unwind_hints.h>
29 #include <asm/vmxfeatures.h>
30 #include <asm/vdso/processor.h>
31 #include <asm/shstk.h>
33 #include <linux/personality.h>
34 #include <linux/cache.h>
35 #include <linux/threads.h>
36 #include <linux/math64.h>
37 #include <linux/err.h>
38 #include <linux/irqflags.h>
39 #include <linux/mem_encrypt.h>
42 * We handle most unaligned accesses in hardware. On the other hand
43 * unaligned DMA can be quite expensive on some Nehalem processors.
45 * Based on this we disable the IP header alignment in network drivers.
47 #define NET_IP_ALIGN 0
52 * These alignment constraints are for performance in the vSMP case,
53 * but in the task_struct case we must also meet hardware imposed
54 * alignment requirements of the FPU state:
56 #ifdef CONFIG_X86_VSMP
57 # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
58 # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
60 # define ARCH_MIN_TASKALIGN __alignof__(union fpregs_state)
61 # define ARCH_MIN_MMSTRUCT_ALIGN 0
69 extern u16 __read_mostly tlb_lli_4k[NR_INFO];
70 extern u16 __read_mostly tlb_lli_2m[NR_INFO];
71 extern u16 __read_mostly tlb_lli_4m[NR_INFO];
72 extern u16 __read_mostly tlb_lld_4k[NR_INFO];
73 extern u16 __read_mostly tlb_lld_2m[NR_INFO];
74 extern u16 __read_mostly tlb_lld_4m[NR_INFO];
75 extern u16 __read_mostly tlb_lld_1g[NR_INFO];
78 * CPU type and hardware bug flags. Kept separately for each CPU.
81 struct cpuinfo_topology {
82 // Real APIC ID read from the local APIC
84 // The initial APIC ID provided by CPUID
87 // Physical package ID
90 // Physical die ID on AMD, Relative on Intel
93 // Compute unit ID - AMD specific
96 // Core ID relative to the package
99 // Logical ID mappings
105 __u8 x86; /* CPU family */
106 __u8 x86_vendor; /* CPU vendor */
110 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
113 #ifdef CONFIG_X86_VMX_FEATURE_NAMES
114 __u32 vmx_capability[NVMXINTS];
118 /* CPUID returned core id bits: */
119 __u8 x86_coreid_bits;
120 /* Max extended CPUID function supported: */
121 __u32 extended_cpuid_level;
122 /* Maximum supported CPUID level, -1=no CPUID: */
125 * Align to size of unsigned long because the x86_capability array
126 * is passed to bitops which require the alignment. Use unnamed
127 * union to enforce the array is aligned to size of unsigned long.
130 __u32 x86_capability[NCAPINTS + NBUGINTS];
131 unsigned long x86_capability_alignment;
133 char x86_vendor_id[16];
134 char x86_model_id[64];
135 struct cpuinfo_topology topo;
136 /* in KB - valid for CPUS which support this call: */
137 unsigned int x86_cache_size;
138 int x86_cache_alignment; /* In bytes */
139 /* Cache QoS architectural values, valid only on the BSP: */
140 int x86_cache_max_rmid; /* max index */
141 int x86_cache_occ_scale; /* scale to bytes */
142 int x86_cache_mbm_width_offset;
144 unsigned long loops_per_jiffy;
145 /* protected processor identification number */
147 /* cpuid returned max cores value: */
149 u16 x86_clflush_size;
150 /* number of cores as seen by the OS: */
152 /* Index into per_cpu list: */
154 /* Is SMT active on this core? */
157 /* Address space bits used by the cache internally */
159 unsigned initialized : 1;
160 } __randomize_layout;
162 #define X86_VENDOR_INTEL 0
163 #define X86_VENDOR_CYRIX 1
164 #define X86_VENDOR_AMD 2
165 #define X86_VENDOR_UMC 3
166 #define X86_VENDOR_CENTAUR 5
167 #define X86_VENDOR_TRANSMETA 7
168 #define X86_VENDOR_NSC 8
169 #define X86_VENDOR_HYGON 9
170 #define X86_VENDOR_ZHAOXIN 10
171 #define X86_VENDOR_VORTEX 11
172 #define X86_VENDOR_NUM 12
174 #define X86_VENDOR_UNKNOWN 0xff
177 * capabilities of CPUs
179 extern struct cpuinfo_x86 boot_cpu_data;
180 extern struct cpuinfo_x86 new_cpu_data;
182 extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
183 extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS];
186 DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
187 #define cpu_data(cpu) per_cpu(cpu_info, cpu)
189 #define cpu_info boot_cpu_data
190 #define cpu_data(cpu) boot_cpu_data
193 extern const struct seq_operations cpuinfo_op;
195 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
197 extern void cpu_detect(struct cpuinfo_x86 *c);
199 static inline unsigned long long l1tf_pfn_limit(void)
201 return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
204 extern void early_cpu_init(void);
205 extern void identify_secondary_cpu(struct cpuinfo_x86 *);
206 extern void print_cpu_info(struct cpuinfo_x86 *);
207 void print_cpu_msr(struct cpuinfo_x86 *);
210 * Friendlier CR3 helpers.
212 static inline unsigned long read_cr3_pa(void)
214 return __read_cr3() & CR3_ADDR_MASK;
217 static inline unsigned long native_read_cr3_pa(void)
219 return __native_read_cr3() & CR3_ADDR_MASK;
222 static inline void load_cr3(pgd_t *pgdir)
224 write_cr3(__sme_pa(pgdir));
228 * Note that while the legacy 'TSS' name comes from 'Task State Segment',
229 * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
230 * unrelated to the task-switch mechanism:
233 /* This is the TSS defined by the hardware. */
235 unsigned short back_link, __blh;
237 unsigned short ss0, __ss0h;
241 * We don't use ring 1, so ss1 is a convenient scratch space in
242 * the same cacheline as sp0. We use ss1 to cache the value in
243 * MSR_IA32_SYSENTER_CS. When we context switch
244 * MSR_IA32_SYSENTER_CS, we first check if the new value being
245 * written matches ss1, and, if it's not, then we wrmsr the new
246 * value and update ss1.
248 * The only reason we context switch MSR_IA32_SYSENTER_CS is
249 * that we set it to zero in vm86 tasks to avoid corrupting the
250 * stack if we were to go through the sysenter path from vm86
253 unsigned short ss1; /* MSR_IA32_SYSENTER_CS */
255 unsigned short __ss1h;
257 unsigned short ss2, __ss2h;
269 unsigned short es, __esh;
270 unsigned short cs, __csh;
271 unsigned short ss, __ssh;
272 unsigned short ds, __dsh;
273 unsigned short fs, __fsh;
274 unsigned short gs, __gsh;
275 unsigned short ldt, __ldth;
276 unsigned short trace;
277 unsigned short io_bitmap_base;
279 } __attribute__((packed));
287 * Since Linux does not use ring 2, the 'sp2' slot is unused by
288 * hardware. entry_SYSCALL_64 uses it as scratch space to stash
289 * the user RSP value.
300 } __attribute__((packed));
306 #define IO_BITMAP_BITS 65536
307 #define IO_BITMAP_BYTES (IO_BITMAP_BITS / BITS_PER_BYTE)
308 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES / sizeof(long))
310 #define IO_BITMAP_OFFSET_VALID_MAP \
311 (offsetof(struct tss_struct, io_bitmap.bitmap) - \
312 offsetof(struct tss_struct, x86_tss))
314 #define IO_BITMAP_OFFSET_VALID_ALL \
315 (offsetof(struct tss_struct, io_bitmap.mapall) - \
316 offsetof(struct tss_struct, x86_tss))
318 #ifdef CONFIG_X86_IOPL_IOPERM
320 * sizeof(unsigned long) coming from an extra "long" at the end of the
321 * iobitmap. The limit is inclusive, i.e. the last valid byte.
323 # define __KERNEL_TSS_LIMIT \
324 (IO_BITMAP_OFFSET_VALID_ALL + IO_BITMAP_BYTES + \
325 sizeof(unsigned long) - 1)
327 # define __KERNEL_TSS_LIMIT \
328 (offsetof(struct tss_struct, x86_tss) + sizeof(struct x86_hw_tss) - 1)
331 /* Base offset outside of TSS_LIMIT so unpriviledged IO causes #GP */
332 #define IO_BITMAP_OFFSET_INVALID (__KERNEL_TSS_LIMIT + 1)
335 char stack[PAGE_SIZE];
338 struct entry_stack_page {
339 struct entry_stack stack;
340 } __aligned(PAGE_SIZE);
343 * All IO bitmap related data stored in the TSS:
345 struct x86_io_bitmap {
346 /* The sequence number of the last active bitmap. */
350 * Store the dirty size of the last io bitmap offender. The next
351 * one will have to do the cleanup as the switch out to a non io
352 * bitmap user will just set x86_tss.io_bitmap_base to a value
353 * outside of the TSS limit. So for sane tasks there is no need to
354 * actually touch the io_bitmap at all.
356 unsigned int prev_max;
359 * The extra 1 is there because the CPU will access an
360 * additional byte beyond the end of the IO permission
361 * bitmap. The extra byte must be all 1 bits, and must
362 * be within the limit.
364 unsigned long bitmap[IO_BITMAP_LONGS + 1];
367 * Special I/O bitmap to emulate IOPL(3). All bytes zero,
368 * except the additional byte at the end.
370 unsigned long mapall[IO_BITMAP_LONGS + 1];
375 * The fixed hardware portion. This must not cross a page boundary
376 * at risk of violating the SDM's advice and potentially triggering
379 struct x86_hw_tss x86_tss;
381 struct x86_io_bitmap io_bitmap;
382 } __aligned(PAGE_SIZE);
384 DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
386 /* Per CPU interrupt stacks */
388 char stack[IRQ_STACK_SIZE];
389 } __aligned(IRQ_STACK_SIZE);
392 struct fixed_percpu_data {
394 * GCC hardcodes the stack canary as %gs:40. Since the
395 * irq_stack is the object at %gs:0, we reserve the bottom
396 * 48 bytes of the irq stack for the canary.
398 * Once we are willing to require -mstack-protector-guard-symbol=
399 * support for x86_64 stackprotector, we can get rid of this.
402 unsigned long stack_canary;
405 DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
406 DECLARE_INIT_PER_CPU(fixed_percpu_data);
408 static inline unsigned long cpu_kernelmode_gs_base(int cpu)
410 return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
413 extern asmlinkage void ignore_sysret(void);
415 /* Save actual FS/GS selectors and bases to current->thread */
416 void current_save_fsgs(void);
418 #ifdef CONFIG_STACKPROTECTOR
419 DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
425 struct thread_struct {
426 /* Cached TLS descriptors: */
427 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
433 unsigned long sysenter_cs;
437 unsigned short fsindex;
438 unsigned short gsindex;
442 unsigned long fsbase;
443 unsigned long gsbase;
446 * XXX: this could presumably be unsigned short. Alternatively,
447 * 32-bit kernels could be taught to use fsindex instead.
453 /* Save middle states of ptrace breakpoints */
454 struct perf_event *ptrace_bps[HBP_NUM];
455 /* Debug status used for traps, single steps, etc... */
456 unsigned long virtual_dr6;
457 /* Keep track of the exact dr7 value set by the user */
458 unsigned long ptrace_dr7;
461 unsigned long trap_nr;
462 unsigned long error_code;
464 /* Virtual 86 mode info */
467 /* IO permissions: */
468 struct io_bitmap *io_bitmap;
471 * IOPL. Privilege level dependent I/O permission which is
472 * emulated via the I/O bitmap to prevent user space from disabling
475 unsigned long iopl_emul;
477 unsigned int iopl_warn:1;
478 unsigned int sig_on_uaccess_err:1;
481 * Protection Keys Register for Userspace. Loaded immediately on
482 * context switch. Store it in thread_struct to avoid a lookup in
483 * the tasks's FPU xstate buffer. This value is only valid when a
484 * task is scheduled out. For 'current' the authoritative source of
485 * PKRU is the hardware itself.
489 #ifdef CONFIG_X86_USER_SHADOW_STACK
490 unsigned long features;
491 unsigned long features_locked;
493 struct thread_shstk shstk;
496 /* Floating point and extended processor state */
499 * WARNING: 'fpu' is dynamically-sized. It *MUST* be at
504 extern void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size);
506 static inline void arch_thread_struct_whitelist(unsigned long *offset,
509 fpu_thread_struct_whitelist(offset, size);
513 native_load_sp0(unsigned long sp0)
515 this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
518 static __always_inline void native_swapgs(void)
521 asm volatile("swapgs" ::: "memory");
525 static __always_inline unsigned long current_top_of_stack(void)
528 * We can't read directly from tss.sp0: sp0 on x86_32 is special in
529 * and around vm86 mode and sp0 on x86_64 is special because of the
532 return this_cpu_read_stable(pcpu_hot.top_of_stack);
535 static __always_inline bool on_thread_stack(void)
537 return (unsigned long)(current_top_of_stack() -
538 current_stack_pointer) < THREAD_SIZE;
541 #ifdef CONFIG_PARAVIRT_XXL
542 #include <asm/paravirt.h>
545 static inline void load_sp0(unsigned long sp0)
547 native_load_sp0(sp0);
550 #endif /* CONFIG_PARAVIRT_XXL */
552 unsigned long __get_wchan(struct task_struct *p);
554 extern void select_idle_routine(const struct cpuinfo_x86 *c);
555 extern void amd_e400_c1e_apic_setup(void);
557 extern unsigned long boot_option_idle_override;
559 enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
562 extern void enable_sep_cpu(void);
565 /* Defined in head.S */
566 extern struct desc_ptr early_gdt_descr;
568 extern void switch_gdt_and_percpu_base(int);
569 extern void load_direct_gdt(int);
570 extern void load_fixmap_gdt(int);
571 extern void cpu_init(void);
572 extern void cpu_init_exception_handling(void);
573 extern void cr4_init(void);
575 static inline unsigned long get_debugctlmsr(void)
577 unsigned long debugctlmsr = 0;
579 #ifndef CONFIG_X86_DEBUGCTLMSR
580 if (boot_cpu_data.x86 < 6)
583 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
588 static inline void update_debugctlmsr(unsigned long debugctlmsr)
590 #ifndef CONFIG_X86_DEBUGCTLMSR
591 if (boot_cpu_data.x86 < 6)
594 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
597 extern void set_task_blockstep(struct task_struct *task, bool on);
599 /* Boot loader type from the setup header: */
600 extern int bootloader_type;
601 extern int bootloader_version;
603 extern char ignore_fpu_irq;
605 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
606 #define ARCH_HAS_PREFETCHW
609 # define BASE_PREFETCH ""
610 # define ARCH_HAS_PREFETCH
612 # define BASE_PREFETCH "prefetcht0 %P1"
616 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
618 * It's not worth to care about 3dnow prefetches for the K6
619 * because they are microcoded there and very slow.
621 static inline void prefetch(const void *x)
623 alternative_input(BASE_PREFETCH, "prefetchnta %P1",
625 "m" (*(const char *)x));
629 * 3dnow prefetch to get an exclusive cache line.
630 * Useful for spinlocks to avoid one state transition in the
631 * cache coherency protocol:
633 static __always_inline void prefetchw(const void *x)
635 alternative_input(BASE_PREFETCH, "prefetchw %P1",
636 X86_FEATURE_3DNOWPREFETCH,
637 "m" (*(const char *)x));
640 #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
641 TOP_OF_KERNEL_STACK_PADDING)
643 #define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1))
645 #define task_pt_regs(task) \
647 unsigned long __ptr = (unsigned long)task_stack_page(task); \
648 __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
649 ((struct pt_regs *)__ptr) - 1; \
653 #define INIT_THREAD { \
654 .sp0 = TOP_OF_INIT_STACK, \
655 .sysenter_cs = __KERNEL_CS, \
658 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
661 extern unsigned long __end_init_task[];
663 #define INIT_THREAD { \
664 .sp = (unsigned long)&__end_init_task - sizeof(struct pt_regs), \
667 extern unsigned long KSTK_ESP(struct task_struct *task);
669 #endif /* CONFIG_X86_64 */
671 extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
672 unsigned long new_sp);
675 * This decides where the kernel will search for a free chunk of vm
676 * space during mmap's.
678 #define __TASK_UNMAPPED_BASE(task_size) (PAGE_ALIGN(task_size / 3))
679 #define TASK_UNMAPPED_BASE __TASK_UNMAPPED_BASE(TASK_SIZE_LOW)
681 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
683 /* Get/set a process' ability to use the timestamp counter instruction */
684 #define GET_TSC_CTL(adr) get_tsc_mode((adr))
685 #define SET_TSC_CTL(val) set_tsc_mode((val))
687 extern int get_tsc_mode(unsigned long adr);
688 extern int set_tsc_mode(unsigned int val);
690 DECLARE_PER_CPU(u64, msr_misc_features_shadow);
692 extern u16 get_llc_id(unsigned int cpu);
694 #ifdef CONFIG_CPU_SUP_AMD
695 extern u32 amd_get_nodes_per_socket(void);
696 extern u32 amd_get_highest_perf(void);
697 extern bool cpu_has_ibpb_brtype_microcode(void);
698 extern void amd_clear_divider(void);
699 extern void amd_check_microcode(void);
701 static inline u32 amd_get_nodes_per_socket(void) { return 0; }
702 static inline u32 amd_get_highest_perf(void) { return 0; }
703 static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; }
704 static inline void amd_clear_divider(void) { }
705 static inline void amd_check_microcode(void) { }
708 extern unsigned long arch_align_stack(unsigned long sp);
709 void free_init_pages(const char *what, unsigned long begin, unsigned long end);
710 extern void free_kernel_image_pages(const char *what, void *begin, void *end);
712 void default_idle(void);
714 bool xen_set_default_idle(void);
716 #define xen_set_default_idle 0
719 void __noreturn stop_this_cpu(void *dummy);
720 void microcode_check(struct cpuinfo_x86 *prev_info);
721 void store_cpu_caps(struct cpuinfo_x86 *info);
723 enum l1tf_mitigations {
725 L1TF_MITIGATION_FLUSH_NOWARN,
726 L1TF_MITIGATION_FLUSH,
727 L1TF_MITIGATION_FLUSH_NOSMT,
728 L1TF_MITIGATION_FULL,
729 L1TF_MITIGATION_FULL_FORCE
732 extern enum l1tf_mitigations l1tf_mitigation;
734 enum mds_mitigations {
737 MDS_MITIGATION_VMWERV,
740 #ifdef CONFIG_X86_SGX
741 int arch_memory_failure(unsigned long pfn, int flags);
742 #define arch_memory_failure arch_memory_failure
744 bool arch_is_platform_page(u64 paddr);
745 #define arch_is_platform_page arch_is_platform_page
748 extern bool gds_ucode_mitigated(void);
750 #endif /* _ASM_X86_PROCESSOR_H */