GNU Linux-libre 4.19.207-gnu1
[releases.git] / arch / x86 / kernel / cpu / common.c
1 /* cpu_feature_enabled() cannot be used this early */
2 #define USE_EARLY_PGTABLE_L5
3
4 #include <linux/bootmem.h>
5 #include <linux/linkage.h>
6 #include <linux/bitops.h>
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/percpu.h>
10 #include <linux/string.h>
11 #include <linux/ctype.h>
12 #include <linux/delay.h>
13 #include <linux/sched/mm.h>
14 #include <linux/sched/clock.h>
15 #include <linux/sched/task.h>
16 #include <linux/init.h>
17 #include <linux/kprobes.h>
18 #include <linux/kgdb.h>
19 #include <linux/smp.h>
20 #include <linux/io.h>
21 #include <linux/syscore_ops.h>
22
23 #include <asm/stackprotector.h>
24 #include <asm/perf_event.h>
25 #include <asm/mmu_context.h>
26 #include <asm/archrandom.h>
27 #include <asm/hypervisor.h>
28 #include <asm/processor.h>
29 #include <asm/tlbflush.h>
30 #include <asm/debugreg.h>
31 #include <asm/sections.h>
32 #include <asm/vsyscall.h>
33 #include <linux/topology.h>
34 #include <linux/cpumask.h>
35 #include <asm/pgtable.h>
36 #include <linux/atomic.h>
37 #include <asm/proto.h>
38 #include <asm/setup.h>
39 #include <asm/apic.h>
40 #include <asm/desc.h>
41 #include <asm/fpu/internal.h>
42 #include <asm/mtrr.h>
43 #include <asm/hwcap2.h>
44 #include <linux/numa.h>
45 #include <asm/asm.h>
46 #include <asm/bugs.h>
47 #include <asm/cpu.h>
48 #include <asm/mce.h>
49 #include <asm/msr.h>
50 #include <asm/pat.h>
51 #include <asm/microcode.h>
52 #include <asm/microcode_intel.h>
53 #include <asm/intel-family.h>
54 #include <asm/cpu_device_id.h>
55
56 #ifdef CONFIG_X86_LOCAL_APIC
57 #include <asm/uv/uv.h>
58 #endif
59
60 #include "cpu.h"
61
62 u32 elf_hwcap2 __read_mostly;
63
64 /* all of these masks are initialized in setup_cpu_local_masks() */
65 cpumask_var_t cpu_initialized_mask;
66 cpumask_var_t cpu_callout_mask;
67 cpumask_var_t cpu_callin_mask;
68
69 /* representing cpus for which sibling maps can be computed */
70 cpumask_var_t cpu_sibling_setup_mask;
71
72 /* Number of siblings per CPU package */
73 int smp_num_siblings = 1;
74 EXPORT_SYMBOL(smp_num_siblings);
75
76 /* Last level cache ID of each logical CPU */
77 DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
78
79 /* correctly size the local cpu masks */
80 void __init setup_cpu_local_masks(void)
81 {
82         alloc_bootmem_cpumask_var(&cpu_initialized_mask);
83         alloc_bootmem_cpumask_var(&cpu_callin_mask);
84         alloc_bootmem_cpumask_var(&cpu_callout_mask);
85         alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
86 }
87
88 static void default_init(struct cpuinfo_x86 *c)
89 {
90 #ifdef CONFIG_X86_64
91         cpu_detect_cache_sizes(c);
92 #else
93         /* Not much we can do here... */
94         /* Check if at least it has cpuid */
95         if (c->cpuid_level == -1) {
96                 /* No cpuid. It must be an ancient CPU */
97                 if (c->x86 == 4)
98                         strcpy(c->x86_model_id, "486");
99                 else if (c->x86 == 3)
100                         strcpy(c->x86_model_id, "386");
101         }
102 #endif
103 }
104
105 static const struct cpu_dev default_cpu = {
106         .c_init         = default_init,
107         .c_vendor       = "Unknown",
108         .c_x86_vendor   = X86_VENDOR_UNKNOWN,
109 };
110
111 static const struct cpu_dev *this_cpu = &default_cpu;
112
113 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
114 #ifdef CONFIG_X86_64
115         /*
116          * We need valid kernel segments for data and code in long mode too
117          * IRET will check the segment types  kkeil 2000/10/28
118          * Also sysret mandates a special GDT layout
119          *
120          * TLS descriptors are currently at a different place compared to i386.
121          * Hopefully nobody expects them at a fixed place (Wine?)
122          */
123         [GDT_ENTRY_KERNEL32_CS]         = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
124         [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
125         [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
126         [GDT_ENTRY_DEFAULT_USER32_CS]   = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
127         [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
128         [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
129 #else
130         [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
131         [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
132         [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
133         [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
134         /*
135          * Segments used for calling PnP BIOS have byte granularity.
136          * They code segments and data segments have fixed 64k limits,
137          * the transfer segment sizes are set at run time.
138          */
139         /* 32-bit code */
140         [GDT_ENTRY_PNPBIOS_CS32]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
141         /* 16-bit code */
142         [GDT_ENTRY_PNPBIOS_CS16]        = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
143         /* 16-bit data */
144         [GDT_ENTRY_PNPBIOS_DS]          = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
145         /* 16-bit data */
146         [GDT_ENTRY_PNPBIOS_TS1]         = GDT_ENTRY_INIT(0x0092, 0, 0),
147         /* 16-bit data */
148         [GDT_ENTRY_PNPBIOS_TS2]         = GDT_ENTRY_INIT(0x0092, 0, 0),
149         /*
150          * The APM segments have byte granularity and their bases
151          * are set at run time.  All have 64k limits.
152          */
153         /* 32-bit code */
154         [GDT_ENTRY_APMBIOS_BASE]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
155         /* 16-bit code */
156         [GDT_ENTRY_APMBIOS_BASE+1]      = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
157         /* data */
158         [GDT_ENTRY_APMBIOS_BASE+2]      = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
159
160         [GDT_ENTRY_ESPFIX_SS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
161         [GDT_ENTRY_PERCPU]              = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
162         GDT_STACK_CANARY_INIT
163 #endif
164 } };
165 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
166
167 static int __init x86_mpx_setup(char *s)
168 {
169         /* require an exact match without trailing characters */
170         if (strlen(s))
171                 return 0;
172
173         /* do not emit a message if the feature is not present */
174         if (!boot_cpu_has(X86_FEATURE_MPX))
175                 return 1;
176
177         setup_clear_cpu_cap(X86_FEATURE_MPX);
178         pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n");
179         return 1;
180 }
181 __setup("nompx", x86_mpx_setup);
182
183 #ifdef CONFIG_X86_64
184 static int __init x86_nopcid_setup(char *s)
185 {
186         /* nopcid doesn't accept parameters */
187         if (s)
188                 return -EINVAL;
189
190         /* do not emit a message if the feature is not present */
191         if (!boot_cpu_has(X86_FEATURE_PCID))
192                 return 0;
193
194         setup_clear_cpu_cap(X86_FEATURE_PCID);
195         pr_info("nopcid: PCID feature disabled\n");
196         return 0;
197 }
198 early_param("nopcid", x86_nopcid_setup);
199 #endif
200
201 static int __init x86_noinvpcid_setup(char *s)
202 {
203         /* noinvpcid doesn't accept parameters */
204         if (s)
205                 return -EINVAL;
206
207         /* do not emit a message if the feature is not present */
208         if (!boot_cpu_has(X86_FEATURE_INVPCID))
209                 return 0;
210
211         setup_clear_cpu_cap(X86_FEATURE_INVPCID);
212         pr_info("noinvpcid: INVPCID feature disabled\n");
213         return 0;
214 }
215 early_param("noinvpcid", x86_noinvpcid_setup);
216
217 #ifdef CONFIG_X86_32
218 static int cachesize_override = -1;
219 static int disable_x86_serial_nr = 1;
220
221 static int __init cachesize_setup(char *str)
222 {
223         get_option(&str, &cachesize_override);
224         return 1;
225 }
226 __setup("cachesize=", cachesize_setup);
227
228 static int __init x86_sep_setup(char *s)
229 {
230         setup_clear_cpu_cap(X86_FEATURE_SEP);
231         return 1;
232 }
233 __setup("nosep", x86_sep_setup);
234
235 /* Standard macro to see if a specific flag is changeable */
236 static inline int flag_is_changeable_p(u32 flag)
237 {
238         u32 f1, f2;
239
240         /*
241          * Cyrix and IDT cpus allow disabling of CPUID
242          * so the code below may return different results
243          * when it is executed before and after enabling
244          * the CPUID. Add "volatile" to not allow gcc to
245          * optimize the subsequent calls to this function.
246          */
247         asm volatile ("pushfl           \n\t"
248                       "pushfl           \n\t"
249                       "popl %0          \n\t"
250                       "movl %0, %1      \n\t"
251                       "xorl %2, %0      \n\t"
252                       "pushl %0         \n\t"
253                       "popfl            \n\t"
254                       "pushfl           \n\t"
255                       "popl %0          \n\t"
256                       "popfl            \n\t"
257
258                       : "=&r" (f1), "=&r" (f2)
259                       : "ir" (flag));
260
261         return ((f1^f2) & flag) != 0;
262 }
263
264 /* Probe for the CPUID instruction */
265 int have_cpuid_p(void)
266 {
267         return flag_is_changeable_p(X86_EFLAGS_ID);
268 }
269
270 static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
271 {
272         unsigned long lo, hi;
273
274         if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
275                 return;
276
277         /* Disable processor serial number: */
278
279         rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
280         lo |= 0x200000;
281         wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
282
283         pr_notice("CPU serial number disabled.\n");
284         clear_cpu_cap(c, X86_FEATURE_PN);
285
286         /* Disabling the serial number may affect the cpuid level */
287         c->cpuid_level = cpuid_eax(0);
288 }
289
290 static int __init x86_serial_nr_setup(char *s)
291 {
292         disable_x86_serial_nr = 0;
293         return 1;
294 }
295 __setup("serialnumber", x86_serial_nr_setup);
296 #else
297 static inline int flag_is_changeable_p(u32 flag)
298 {
299         return 1;
300 }
301 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
302 {
303 }
304 #endif
305
306 static __init int setup_disable_smep(char *arg)
307 {
308         setup_clear_cpu_cap(X86_FEATURE_SMEP);
309         /* Check for things that depend on SMEP being enabled: */
310         check_mpx_erratum(&boot_cpu_data);
311         return 1;
312 }
313 __setup("nosmep", setup_disable_smep);
314
315 static __always_inline void setup_smep(struct cpuinfo_x86 *c)
316 {
317         if (cpu_has(c, X86_FEATURE_SMEP))
318                 cr4_set_bits(X86_CR4_SMEP);
319 }
320
321 static __init int setup_disable_smap(char *arg)
322 {
323         setup_clear_cpu_cap(X86_FEATURE_SMAP);
324         return 1;
325 }
326 __setup("nosmap", setup_disable_smap);
327
328 static __always_inline void setup_smap(struct cpuinfo_x86 *c)
329 {
330         unsigned long eflags = native_save_fl();
331
332         /* This should have been cleared long ago */
333         BUG_ON(eflags & X86_EFLAGS_AC);
334
335         if (cpu_has(c, X86_FEATURE_SMAP)) {
336 #ifdef CONFIG_X86_SMAP
337                 cr4_set_bits(X86_CR4_SMAP);
338 #else
339                 cr4_clear_bits(X86_CR4_SMAP);
340 #endif
341         }
342 }
343
344 static __always_inline void setup_umip(struct cpuinfo_x86 *c)
345 {
346         /* Check the boot processor, plus build option for UMIP. */
347         if (!cpu_feature_enabled(X86_FEATURE_UMIP))
348                 goto out;
349
350         /* Check the current processor's cpuid bits. */
351         if (!cpu_has(c, X86_FEATURE_UMIP))
352                 goto out;
353
354         cr4_set_bits(X86_CR4_UMIP);
355
356         pr_info("x86/cpu: Activated the Intel User Mode Instruction Prevention (UMIP) CPU feature\n");
357
358         return;
359
360 out:
361         /*
362          * Make sure UMIP is disabled in case it was enabled in a
363          * previous boot (e.g., via kexec).
364          */
365         cr4_clear_bits(X86_CR4_UMIP);
366 }
367
368 /*
369  * Protection Keys are not available in 32-bit mode.
370  */
371 static bool pku_disabled;
372
373 static __always_inline void setup_pku(struct cpuinfo_x86 *c)
374 {
375         /* check the boot processor, plus compile options for PKU: */
376         if (!cpu_feature_enabled(X86_FEATURE_PKU))
377                 return;
378         /* checks the actual processor's cpuid bits: */
379         if (!cpu_has(c, X86_FEATURE_PKU))
380                 return;
381         if (pku_disabled)
382                 return;
383
384         cr4_set_bits(X86_CR4_PKE);
385         /*
386          * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
387          * cpuid bit to be set.  We need to ensure that we
388          * update that bit in this CPU's "cpu_info".
389          */
390         set_cpu_cap(c, X86_FEATURE_OSPKE);
391 }
392
393 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
394 static __init int setup_disable_pku(char *arg)
395 {
396         /*
397          * Do not clear the X86_FEATURE_PKU bit.  All of the
398          * runtime checks are against OSPKE so clearing the
399          * bit does nothing.
400          *
401          * This way, we will see "pku" in cpuinfo, but not
402          * "ospke", which is exactly what we want.  It shows
403          * that the CPU has PKU, but the OS has not enabled it.
404          * This happens to be exactly how a system would look
405          * if we disabled the config option.
406          */
407         pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
408         pku_disabled = true;
409         return 1;
410 }
411 __setup("nopku", setup_disable_pku);
412 #endif /* CONFIG_X86_64 */
413
414 /*
415  * Some CPU features depend on higher CPUID levels, which may not always
416  * be available due to CPUID level capping or broken virtualization
417  * software.  Add those features to this table to auto-disable them.
418  */
419 struct cpuid_dependent_feature {
420         u32 feature;
421         u32 level;
422 };
423
424 static const struct cpuid_dependent_feature
425 cpuid_dependent_features[] = {
426         { X86_FEATURE_MWAIT,            0x00000005 },
427         { X86_FEATURE_DCA,              0x00000009 },
428         { X86_FEATURE_XSAVE,            0x0000000d },
429         { 0, 0 }
430 };
431
432 static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
433 {
434         const struct cpuid_dependent_feature *df;
435
436         for (df = cpuid_dependent_features; df->feature; df++) {
437
438                 if (!cpu_has(c, df->feature))
439                         continue;
440                 /*
441                  * Note: cpuid_level is set to -1 if unavailable, but
442                  * extended_extended_level is set to 0 if unavailable
443                  * and the legitimate extended levels are all negative
444                  * when signed; hence the weird messing around with
445                  * signs here...
446                  */
447                 if (!((s32)df->level < 0 ?
448                      (u32)df->level > (u32)c->extended_cpuid_level :
449                      (s32)df->level > (s32)c->cpuid_level))
450                         continue;
451
452                 clear_cpu_cap(c, df->feature);
453                 if (!warn)
454                         continue;
455
456                 pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
457                         x86_cap_flag(df->feature), df->level);
458         }
459 }
460
461 /*
462  * Naming convention should be: <Name> [(<Codename>)]
463  * This table only is used unless init_<vendor>() below doesn't set it;
464  * in particular, if CPUID levels 0x80000002..4 are supported, this
465  * isn't used
466  */
467
468 /* Look up CPU names by table lookup. */
469 static const char *table_lookup_model(struct cpuinfo_x86 *c)
470 {
471 #ifdef CONFIG_X86_32
472         const struct legacy_cpu_model_info *info;
473
474         if (c->x86_model >= 16)
475                 return NULL;    /* Range check */
476
477         if (!this_cpu)
478                 return NULL;
479
480         info = this_cpu->legacy_models;
481
482         while (info->family) {
483                 if (info->family == c->x86)
484                         return info->model_names[c->x86_model];
485                 info++;
486         }
487 #endif
488         return NULL;            /* Not found */
489 }
490
491 __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
492 __u32 cpu_caps_set[NCAPINTS + NBUGINTS];
493
494 void load_percpu_segment(int cpu)
495 {
496 #ifdef CONFIG_X86_32
497         loadsegment(fs, __KERNEL_PERCPU);
498 #else
499         __loadsegment_simple(gs, 0);
500         wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
501 #endif
502         load_stack_canary_segment();
503 }
504
505 #ifdef CONFIG_X86_32
506 /* The 32-bit entry code needs to find cpu_entry_area. */
507 DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
508 #endif
509
510 #ifdef CONFIG_X86_64
511 /*
512  * Special IST stacks which the CPU switches to when it calls
513  * an IST-marked descriptor entry. Up to 7 stacks (hardware
514  * limit), all of them are 4K, except the debug stack which
515  * is 8K.
516  */
517 static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
518           [0 ... N_EXCEPTION_STACKS - 1]        = EXCEPTION_STKSZ,
519           [DEBUG_STACK - 1]                     = DEBUG_STKSZ
520 };
521 #endif
522
523 /* Load the original GDT from the per-cpu structure */
524 void load_direct_gdt(int cpu)
525 {
526         struct desc_ptr gdt_descr;
527
528         gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
529         gdt_descr.size = GDT_SIZE - 1;
530         load_gdt(&gdt_descr);
531 }
532 EXPORT_SYMBOL_GPL(load_direct_gdt);
533
534 /* Load a fixmap remapping of the per-cpu GDT */
535 void load_fixmap_gdt(int cpu)
536 {
537         struct desc_ptr gdt_descr;
538
539         gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
540         gdt_descr.size = GDT_SIZE - 1;
541         load_gdt(&gdt_descr);
542 }
543 EXPORT_SYMBOL_GPL(load_fixmap_gdt);
544
545 /*
546  * Current gdt points %fs at the "master" per-cpu area: after this,
547  * it's on the real one.
548  */
549 void switch_to_new_gdt(int cpu)
550 {
551         /* Load the original GDT */
552         load_direct_gdt(cpu);
553         /* Reload the per-cpu base */
554         load_percpu_segment(cpu);
555 }
556
557 static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
558
559 static void get_model_name(struct cpuinfo_x86 *c)
560 {
561         unsigned int *v;
562         char *p, *q, *s;
563
564         if (c->extended_cpuid_level < 0x80000004)
565                 return;
566
567         v = (unsigned int *)c->x86_model_id;
568         cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
569         cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
570         cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
571         c->x86_model_id[48] = 0;
572
573         /* Trim whitespace */
574         p = q = s = &c->x86_model_id[0];
575
576         while (*p == ' ')
577                 p++;
578
579         while (*p) {
580                 /* Note the last non-whitespace index */
581                 if (!isspace(*p))
582                         s = q;
583
584                 *q++ = *p++;
585         }
586
587         *(s + 1) = '\0';
588 }
589
590 void detect_num_cpu_cores(struct cpuinfo_x86 *c)
591 {
592         unsigned int eax, ebx, ecx, edx;
593
594         c->x86_max_cores = 1;
595         if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
596                 return;
597
598         cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
599         if (eax & 0x1f)
600                 c->x86_max_cores = (eax >> 26) + 1;
601 }
602
603 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
604 {
605         unsigned int n, dummy, ebx, ecx, edx, l2size;
606
607         n = c->extended_cpuid_level;
608
609         if (n >= 0x80000005) {
610                 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
611                 c->x86_cache_size = (ecx>>24) + (edx>>24);
612 #ifdef CONFIG_X86_64
613                 /* On K8 L1 TLB is inclusive, so don't count it */
614                 c->x86_tlbsize = 0;
615 #endif
616         }
617
618         if (n < 0x80000006)     /* Some chips just has a large L1. */
619                 return;
620
621         cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
622         l2size = ecx >> 16;
623
624 #ifdef CONFIG_X86_64
625         c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
626 #else
627         /* do processor-specific cache resizing */
628         if (this_cpu->legacy_cache_size)
629                 l2size = this_cpu->legacy_cache_size(c, l2size);
630
631         /* Allow user to override all this if necessary. */
632         if (cachesize_override != -1)
633                 l2size = cachesize_override;
634
635         if (l2size == 0)
636                 return;         /* Again, no L2 cache is possible */
637 #endif
638
639         c->x86_cache_size = l2size;
640 }
641
642 u16 __read_mostly tlb_lli_4k[NR_INFO];
643 u16 __read_mostly tlb_lli_2m[NR_INFO];
644 u16 __read_mostly tlb_lli_4m[NR_INFO];
645 u16 __read_mostly tlb_lld_4k[NR_INFO];
646 u16 __read_mostly tlb_lld_2m[NR_INFO];
647 u16 __read_mostly tlb_lld_4m[NR_INFO];
648 u16 __read_mostly tlb_lld_1g[NR_INFO];
649
650 static void cpu_detect_tlb(struct cpuinfo_x86 *c)
651 {
652         if (this_cpu->c_detect_tlb)
653                 this_cpu->c_detect_tlb(c);
654
655         pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
656                 tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
657                 tlb_lli_4m[ENTRIES]);
658
659         pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
660                 tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
661                 tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
662 }
663
664 int detect_ht_early(struct cpuinfo_x86 *c)
665 {
666 #ifdef CONFIG_SMP
667         u32 eax, ebx, ecx, edx;
668
669         if (!cpu_has(c, X86_FEATURE_HT))
670                 return -1;
671
672         if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
673                 return -1;
674
675         if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
676                 return -1;
677
678         cpuid(1, &eax, &ebx, &ecx, &edx);
679
680         smp_num_siblings = (ebx & 0xff0000) >> 16;
681         if (smp_num_siblings == 1)
682                 pr_info_once("CPU0: Hyper-Threading is disabled\n");
683 #endif
684         return 0;
685 }
686
687 void detect_ht(struct cpuinfo_x86 *c)
688 {
689 #ifdef CONFIG_SMP
690         int index_msb, core_bits;
691
692         if (detect_ht_early(c) < 0)
693                 return;
694
695         index_msb = get_count_order(smp_num_siblings);
696         c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
697
698         smp_num_siblings = smp_num_siblings / c->x86_max_cores;
699
700         index_msb = get_count_order(smp_num_siblings);
701
702         core_bits = get_count_order(c->x86_max_cores);
703
704         c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
705                                        ((1 << core_bits) - 1);
706 #endif
707 }
708
709 static void get_cpu_vendor(struct cpuinfo_x86 *c)
710 {
711         char *v = c->x86_vendor_id;
712         int i;
713
714         for (i = 0; i < X86_VENDOR_NUM; i++) {
715                 if (!cpu_devs[i])
716                         break;
717
718                 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
719                     (cpu_devs[i]->c_ident[1] &&
720                      !strcmp(v, cpu_devs[i]->c_ident[1]))) {
721
722                         this_cpu = cpu_devs[i];
723                         c->x86_vendor = this_cpu->c_x86_vendor;
724                         return;
725                 }
726         }
727
728         pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
729                     "CPU: Your system may be unstable.\n", v);
730
731         c->x86_vendor = X86_VENDOR_UNKNOWN;
732         this_cpu = &default_cpu;
733 }
734
735 void cpu_detect(struct cpuinfo_x86 *c)
736 {
737         /* Get vendor name */
738         cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
739               (unsigned int *)&c->x86_vendor_id[0],
740               (unsigned int *)&c->x86_vendor_id[8],
741               (unsigned int *)&c->x86_vendor_id[4]);
742
743         c->x86 = 4;
744         /* Intel-defined flags: level 0x00000001 */
745         if (c->cpuid_level >= 0x00000001) {
746                 u32 junk, tfms, cap0, misc;
747
748                 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
749                 c->x86          = x86_family(tfms);
750                 c->x86_model    = x86_model(tfms);
751                 c->x86_stepping = x86_stepping(tfms);
752
753                 if (cap0 & (1<<19)) {
754                         c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
755                         c->x86_cache_alignment = c->x86_clflush_size;
756                 }
757         }
758 }
759
760 static void apply_forced_caps(struct cpuinfo_x86 *c)
761 {
762         int i;
763
764         for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
765                 c->x86_capability[i] &= ~cpu_caps_cleared[i];
766                 c->x86_capability[i] |= cpu_caps_set[i];
767         }
768 }
769
770 static void init_speculation_control(struct cpuinfo_x86 *c)
771 {
772         /*
773          * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
774          * and they also have a different bit for STIBP support. Also,
775          * a hypervisor might have set the individual AMD bits even on
776          * Intel CPUs, for finer-grained selection of what's available.
777          */
778         if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
779                 set_cpu_cap(c, X86_FEATURE_IBRS);
780                 set_cpu_cap(c, X86_FEATURE_IBPB);
781                 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
782         }
783
784         if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
785                 set_cpu_cap(c, X86_FEATURE_STIBP);
786
787         if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
788             cpu_has(c, X86_FEATURE_VIRT_SSBD))
789                 set_cpu_cap(c, X86_FEATURE_SSBD);
790
791         if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
792                 set_cpu_cap(c, X86_FEATURE_IBRS);
793                 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
794         }
795
796         if (cpu_has(c, X86_FEATURE_AMD_IBPB))
797                 set_cpu_cap(c, X86_FEATURE_IBPB);
798
799         if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
800                 set_cpu_cap(c, X86_FEATURE_STIBP);
801                 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
802         }
803
804         if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
805                 set_cpu_cap(c, X86_FEATURE_SSBD);
806                 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
807                 clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
808         }
809 }
810
811 static void init_cqm(struct cpuinfo_x86 *c)
812 {
813         if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
814                 c->x86_cache_max_rmid  = -1;
815                 c->x86_cache_occ_scale = -1;
816                 return;
817         }
818
819         /* will be overridden if occupancy monitoring exists */
820         c->x86_cache_max_rmid = cpuid_ebx(0xf);
821
822         if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
823             cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
824             cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
825                 u32 eax, ebx, ecx, edx;
826
827                 /* QoS sub-leaf, EAX=0Fh, ECX=1 */
828                 cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
829
830                 c->x86_cache_max_rmid  = ecx;
831                 c->x86_cache_occ_scale = ebx;
832         }
833 }
834
835 void get_cpu_cap(struct cpuinfo_x86 *c)
836 {
837         u32 eax, ebx, ecx, edx;
838
839         /* Intel-defined flags: level 0x00000001 */
840         if (c->cpuid_level >= 0x00000001) {
841                 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
842
843                 c->x86_capability[CPUID_1_ECX] = ecx;
844                 c->x86_capability[CPUID_1_EDX] = edx;
845         }
846
847         /* Thermal and Power Management Leaf: level 0x00000006 (eax) */
848         if (c->cpuid_level >= 0x00000006)
849                 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
850
851         /* Additional Intel-defined flags: level 0x00000007 */
852         if (c->cpuid_level >= 0x00000007) {
853                 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
854                 c->x86_capability[CPUID_7_0_EBX] = ebx;
855                 c->x86_capability[CPUID_7_ECX] = ecx;
856                 c->x86_capability[CPUID_7_EDX] = edx;
857         }
858
859         /* Extended state features: level 0x0000000d */
860         if (c->cpuid_level >= 0x0000000d) {
861                 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
862
863                 c->x86_capability[CPUID_D_1_EAX] = eax;
864         }
865
866         /* AMD-defined flags: level 0x80000001 */
867         eax = cpuid_eax(0x80000000);
868         c->extended_cpuid_level = eax;
869
870         if ((eax & 0xffff0000) == 0x80000000) {
871                 if (eax >= 0x80000001) {
872                         cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
873
874                         c->x86_capability[CPUID_8000_0001_ECX] = ecx;
875                         c->x86_capability[CPUID_8000_0001_EDX] = edx;
876                 }
877         }
878
879         if (c->extended_cpuid_level >= 0x80000007) {
880                 cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
881
882                 c->x86_capability[CPUID_8000_0007_EBX] = ebx;
883                 c->x86_power = edx;
884         }
885
886         if (c->extended_cpuid_level >= 0x80000008) {
887                 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
888                 c->x86_capability[CPUID_8000_0008_EBX] = ebx;
889         }
890
891         if (c->extended_cpuid_level >= 0x8000000a)
892                 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
893
894         init_scattered_cpuid_features(c);
895         init_speculation_control(c);
896         init_cqm(c);
897
898         /*
899          * Clear/Set all flags overridden by options, after probe.
900          * This needs to happen each time we re-probe, which may happen
901          * several times during CPU initialization.
902          */
903         apply_forced_caps(c);
904 }
905
906 void get_cpu_address_sizes(struct cpuinfo_x86 *c)
907 {
908         u32 eax, ebx, ecx, edx;
909
910         if (c->extended_cpuid_level >= 0x80000008) {
911                 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
912
913                 c->x86_virt_bits = (eax >> 8) & 0xff;
914                 c->x86_phys_bits = eax & 0xff;
915         }
916 #ifdef CONFIG_X86_32
917         else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
918                 c->x86_phys_bits = 36;
919 #endif
920         c->x86_cache_bits = c->x86_phys_bits;
921 }
922
923 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
924 {
925 #ifdef CONFIG_X86_32
926         int i;
927
928         /*
929          * First of all, decide if this is a 486 or higher
930          * It's a 486 if we can modify the AC flag
931          */
932         if (flag_is_changeable_p(X86_EFLAGS_AC))
933                 c->x86 = 4;
934         else
935                 c->x86 = 3;
936
937         for (i = 0; i < X86_VENDOR_NUM; i++)
938                 if (cpu_devs[i] && cpu_devs[i]->c_identify) {
939                         c->x86_vendor_id[0] = 0;
940                         cpu_devs[i]->c_identify(c);
941                         if (c->x86_vendor_id[0]) {
942                                 get_cpu_vendor(c);
943                                 break;
944                         }
945                 }
946 #endif
947 }
948
949 #define NO_SPECULATION          BIT(0)
950 #define NO_MELTDOWN             BIT(1)
951 #define NO_SSB                  BIT(2)
952 #define NO_L1TF                 BIT(3)
953 #define NO_MDS                  BIT(4)
954 #define MSBDS_ONLY              BIT(5)
955 #define NO_SWAPGS               BIT(6)
956 #define NO_ITLB_MULTIHIT        BIT(7)
957
958 #define VULNWL(_vendor, _family, _model, _whitelist)    \
959         { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
960
961 #define VULNWL_INTEL(model, whitelist)          \
962         VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
963
964 #define VULNWL_AMD(family, whitelist)           \
965         VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
966
967 static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
968         VULNWL(ANY,     4, X86_MODEL_ANY,       NO_SPECULATION),
969         VULNWL(CENTAUR, 5, X86_MODEL_ANY,       NO_SPECULATION),
970         VULNWL(INTEL,   5, X86_MODEL_ANY,       NO_SPECULATION),
971         VULNWL(NSC,     5, X86_MODEL_ANY,       NO_SPECULATION),
972
973         /* Intel Family 6 */
974         VULNWL_INTEL(ATOM_SALTWELL,             NO_SPECULATION | NO_ITLB_MULTIHIT),
975         VULNWL_INTEL(ATOM_SALTWELL_TABLET,      NO_SPECULATION | NO_ITLB_MULTIHIT),
976         VULNWL_INTEL(ATOM_SALTWELL_MID,         NO_SPECULATION | NO_ITLB_MULTIHIT),
977         VULNWL_INTEL(ATOM_BONNELL,              NO_SPECULATION | NO_ITLB_MULTIHIT),
978         VULNWL_INTEL(ATOM_BONNELL_MID,          NO_SPECULATION | NO_ITLB_MULTIHIT),
979
980         VULNWL_INTEL(ATOM_SILVERMONT,           NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
981         VULNWL_INTEL(ATOM_SILVERMONT_X,         NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
982         VULNWL_INTEL(ATOM_SILVERMONT_MID,       NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
983         VULNWL_INTEL(ATOM_AIRMONT,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
984         VULNWL_INTEL(XEON_PHI_KNL,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
985         VULNWL_INTEL(XEON_PHI_KNM,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
986
987         VULNWL_INTEL(CORE_YONAH,                NO_SSB),
988
989         VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
990
991         VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
992         VULNWL_INTEL(ATOM_GOLDMONT_X,           NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
993         VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
994
995         /*
996          * Technically, swapgs isn't serializing on AMD (despite it previously
997          * being documented as such in the APM).  But according to AMD, %gs is
998          * updated non-speculatively, and the issuing of %gs-relative memory
999          * operands will be blocked until the %gs update completes, which is
1000          * good enough for our purposes.
1001          */
1002
1003         VULNWL_INTEL(ATOM_TREMONT_X,            NO_ITLB_MULTIHIT),
1004
1005         /* AMD Family 0xf - 0x12 */
1006         VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1007         VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1008         VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1009         VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1010
1011         /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
1012         VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1013         {}
1014 };
1015
1016 #define VULNBL_INTEL_STEPPINGS(model, steppings, issues)                   \
1017         X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6,             \
1018                                             INTEL_FAM6_##model, steppings, \
1019                                             X86_FEATURE_ANY, issues)
1020
1021 #define SRBDS           BIT(0)
1022
1023 static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
1024         VULNBL_INTEL_STEPPINGS(IVYBRIDGE,       X86_STEPPING_ANY,               SRBDS),
1025         VULNBL_INTEL_STEPPINGS(HASWELL_CORE,    X86_STEPPING_ANY,               SRBDS),
1026         VULNBL_INTEL_STEPPINGS(HASWELL_ULT,     X86_STEPPING_ANY,               SRBDS),
1027         VULNBL_INTEL_STEPPINGS(HASWELL_GT3E,    X86_STEPPING_ANY,               SRBDS),
1028         VULNBL_INTEL_STEPPINGS(BROADWELL_GT3E,  X86_STEPPING_ANY,               SRBDS),
1029         VULNBL_INTEL_STEPPINGS(BROADWELL_CORE,  X86_STEPPING_ANY,               SRBDS),
1030         VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE,  X86_STEPPING_ANY,               SRBDS),
1031         VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPING_ANY,               SRBDS),
1032         VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPINGS(0x0, 0xC),        SRBDS),
1033         VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPINGS(0x0, 0xD),        SRBDS),
1034         {}
1035 };
1036
1037 static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
1038 {
1039         const struct x86_cpu_id *m = x86_match_cpu(table);
1040
1041         return m && !!(m->driver_data & which);
1042 }
1043
1044 u64 x86_read_arch_cap_msr(void)
1045 {
1046         u64 ia32_cap = 0;
1047
1048         if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
1049                 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
1050
1051         return ia32_cap;
1052 }
1053
1054 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1055 {
1056         u64 ia32_cap = x86_read_arch_cap_msr();
1057
1058         /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
1059         if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
1060             !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
1061                 setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
1062
1063         if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
1064                 return;
1065
1066         setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
1067         setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1068
1069         if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
1070             !(ia32_cap & ARCH_CAP_SSB_NO) &&
1071            !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
1072                 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
1073
1074         if (ia32_cap & ARCH_CAP_IBRS_ALL)
1075                 setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
1076
1077         if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
1078             !(ia32_cap & ARCH_CAP_MDS_NO)) {
1079                 setup_force_cpu_bug(X86_BUG_MDS);
1080                 if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
1081                         setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
1082         }
1083
1084         if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
1085                 setup_force_cpu_bug(X86_BUG_SWAPGS);
1086
1087         /*
1088          * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
1089          *      - TSX is supported or
1090          *      - TSX_CTRL is present
1091          *
1092          * TSX_CTRL check is needed for cases when TSX could be disabled before
1093          * the kernel boot e.g. kexec.
1094          * TSX_CTRL check alone is not sufficient for cases when the microcode
1095          * update is not present or running as guest that don't get TSX_CTRL.
1096          */
1097         if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
1098             (cpu_has(c, X86_FEATURE_RTM) ||
1099              (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
1100                 setup_force_cpu_bug(X86_BUG_TAA);
1101
1102         /*
1103          * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
1104          * in the vulnerability blacklist.
1105          */
1106         if ((cpu_has(c, X86_FEATURE_RDRAND) ||
1107              cpu_has(c, X86_FEATURE_RDSEED)) &&
1108             cpu_matches(cpu_vuln_blacklist, SRBDS))
1109                     setup_force_cpu_bug(X86_BUG_SRBDS);
1110
1111         if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
1112                 return;
1113
1114         /* Rogue Data Cache Load? No! */
1115         if (ia32_cap & ARCH_CAP_RDCL_NO)
1116                 return;
1117
1118         setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
1119
1120         if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
1121                 return;
1122
1123         setup_force_cpu_bug(X86_BUG_L1TF);
1124 }
1125
1126 /*
1127  * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
1128  * unfortunately, that's not true in practice because of early VIA
1129  * chips and (more importantly) broken virtualizers that are not easy
1130  * to detect. In the latter case it doesn't even *fail* reliably, so
1131  * probing for it doesn't even work. Disable it completely on 32-bit
1132  * unless we can find a reliable way to detect all the broken cases.
1133  * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1134  */
1135 static void detect_nopl(void)
1136 {
1137 #ifdef CONFIG_X86_32
1138         setup_clear_cpu_cap(X86_FEATURE_NOPL);
1139 #else
1140         setup_force_cpu_cap(X86_FEATURE_NOPL);
1141 #endif
1142 }
1143
1144 /*
1145  * Do minimum CPU detection early.
1146  * Fields really needed: vendor, cpuid_level, family, model, mask,
1147  * cache alignment.
1148  * The others are not touched to avoid unwanted side effects.
1149  *
1150  * WARNING: this function is only called on the boot CPU.  Don't add code
1151  * here that is supposed to run on all CPUs.
1152  */
1153 static void __init early_identify_cpu(struct cpuinfo_x86 *c)
1154 {
1155 #ifdef CONFIG_X86_64
1156         c->x86_clflush_size = 64;
1157         c->x86_phys_bits = 36;
1158         c->x86_virt_bits = 48;
1159 #else
1160         c->x86_clflush_size = 32;
1161         c->x86_phys_bits = 32;
1162         c->x86_virt_bits = 32;
1163 #endif
1164         c->x86_cache_alignment = c->x86_clflush_size;
1165
1166         memset(&c->x86_capability, 0, sizeof c->x86_capability);
1167         c->extended_cpuid_level = 0;
1168
1169         if (!have_cpuid_p())
1170                 identify_cpu_without_cpuid(c);
1171
1172         /* cyrix could have cpuid enabled via c_identify()*/
1173         if (have_cpuid_p()) {
1174                 cpu_detect(c);
1175                 get_cpu_vendor(c);
1176                 get_cpu_cap(c);
1177                 get_cpu_address_sizes(c);
1178                 setup_force_cpu_cap(X86_FEATURE_CPUID);
1179
1180                 if (this_cpu->c_early_init)
1181                         this_cpu->c_early_init(c);
1182
1183                 c->cpu_index = 0;
1184                 filter_cpuid_features(c, false);
1185
1186                 if (this_cpu->c_bsp_init)
1187                         this_cpu->c_bsp_init(c);
1188         } else {
1189                 setup_clear_cpu_cap(X86_FEATURE_CPUID);
1190         }
1191
1192         setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1193
1194         cpu_set_bug_bits(c);
1195
1196         fpu__init_system(c);
1197
1198 #ifdef CONFIG_X86_32
1199         /*
1200          * Regardless of whether PCID is enumerated, the SDM says
1201          * that it can't be enabled in 32-bit mode.
1202          */
1203         setup_clear_cpu_cap(X86_FEATURE_PCID);
1204 #endif
1205
1206         /*
1207          * Later in the boot process pgtable_l5_enabled() relies on
1208          * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not
1209          * enabled by this point we need to clear the feature bit to avoid
1210          * false-positives at the later stage.
1211          *
1212          * pgtable_l5_enabled() can be false here for several reasons:
1213          *  - 5-level paging is disabled compile-time;
1214          *  - it's 32-bit kernel;
1215          *  - machine doesn't support 5-level paging;
1216          *  - user specified 'no5lvl' in kernel command line.
1217          */
1218         if (!pgtable_l5_enabled())
1219                 setup_clear_cpu_cap(X86_FEATURE_LA57);
1220
1221         detect_nopl();
1222 }
1223
1224 void __init early_cpu_init(void)
1225 {
1226         const struct cpu_dev *const *cdev;
1227         int count = 0;
1228
1229 #ifdef CONFIG_PROCESSOR_SELECT
1230         pr_info("KERNEL supported cpus:\n");
1231 #endif
1232
1233         for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
1234                 const struct cpu_dev *cpudev = *cdev;
1235
1236                 if (count >= X86_VENDOR_NUM)
1237                         break;
1238                 cpu_devs[count] = cpudev;
1239                 count++;
1240
1241 #ifdef CONFIG_PROCESSOR_SELECT
1242                 {
1243                         unsigned int j;
1244
1245                         for (j = 0; j < 2; j++) {
1246                                 if (!cpudev->c_ident[j])
1247                                         continue;
1248                                 pr_info("  %s %s\n", cpudev->c_vendor,
1249                                         cpudev->c_ident[j]);
1250                         }
1251                 }
1252 #endif
1253         }
1254         early_identify_cpu(&boot_cpu_data);
1255 }
1256
1257 static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
1258 {
1259 #ifdef CONFIG_X86_64
1260         /*
1261          * Empirically, writing zero to a segment selector on AMD does
1262          * not clear the base, whereas writing zero to a segment
1263          * selector on Intel does clear the base.  Intel's behavior
1264          * allows slightly faster context switches in the common case
1265          * where GS is unused by the prev and next threads.
1266          *
1267          * Since neither vendor documents this anywhere that I can see,
1268          * detect it directly instead of hardcoding the choice by
1269          * vendor.
1270          *
1271          * I've designated AMD's behavior as the "bug" because it's
1272          * counterintuitive and less friendly.
1273          */
1274
1275         unsigned long old_base, tmp;
1276         rdmsrl(MSR_FS_BASE, old_base);
1277         wrmsrl(MSR_FS_BASE, 1);
1278         loadsegment(fs, 0);
1279         rdmsrl(MSR_FS_BASE, tmp);
1280         if (tmp != 0)
1281                 set_cpu_bug(c, X86_BUG_NULL_SEG);
1282         wrmsrl(MSR_FS_BASE, old_base);
1283 #endif
1284 }
1285
1286 static void generic_identify(struct cpuinfo_x86 *c)
1287 {
1288         c->extended_cpuid_level = 0;
1289
1290         if (!have_cpuid_p())
1291                 identify_cpu_without_cpuid(c);
1292
1293         /* cyrix could have cpuid enabled via c_identify()*/
1294         if (!have_cpuid_p())
1295                 return;
1296
1297         cpu_detect(c);
1298
1299         get_cpu_vendor(c);
1300
1301         get_cpu_cap(c);
1302
1303         get_cpu_address_sizes(c);
1304
1305         if (c->cpuid_level >= 0x00000001) {
1306                 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
1307 #ifdef CONFIG_X86_32
1308 # ifdef CONFIG_SMP
1309                 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1310 # else
1311                 c->apicid = c->initial_apicid;
1312 # endif
1313 #endif
1314                 c->phys_proc_id = c->initial_apicid;
1315         }
1316
1317         get_model_name(c); /* Default name */
1318
1319         detect_null_seg_behavior(c);
1320
1321         /*
1322          * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
1323          * systems that run Linux at CPL > 0 may or may not have the
1324          * issue, but, even if they have the issue, there's absolutely
1325          * nothing we can do about it because we can't use the real IRET
1326          * instruction.
1327          *
1328          * NB: For the time being, only 32-bit kernels support
1329          * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
1330          * whether to apply espfix using paravirt hooks.  If any
1331          * non-paravirt system ever shows up that does *not* have the
1332          * ESPFIX issue, we can change this.
1333          */
1334 #ifdef CONFIG_X86_32
1335 # ifdef CONFIG_PARAVIRT
1336         do {
1337                 extern void native_iret(void);
1338                 if (pv_cpu_ops.iret == native_iret)
1339                         set_cpu_bug(c, X86_BUG_ESPFIX);
1340         } while (0);
1341 # else
1342         set_cpu_bug(c, X86_BUG_ESPFIX);
1343 # endif
1344 #endif
1345 }
1346
1347 static void x86_init_cache_qos(struct cpuinfo_x86 *c)
1348 {
1349         /*
1350          * The heavy lifting of max_rmid and cache_occ_scale are handled
1351          * in get_cpu_cap().  Here we just set the max_rmid for the boot_cpu
1352          * in case CQM bits really aren't there in this CPU.
1353          */
1354         if (c != &boot_cpu_data) {
1355                 boot_cpu_data.x86_cache_max_rmid =
1356                         min(boot_cpu_data.x86_cache_max_rmid,
1357                             c->x86_cache_max_rmid);
1358         }
1359 }
1360
1361 /*
1362  * Validate that ACPI/mptables have the same information about the
1363  * effective APIC id and update the package map.
1364  */
1365 static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
1366 {
1367 #ifdef CONFIG_SMP
1368         unsigned int apicid, cpu = smp_processor_id();
1369
1370         apicid = apic->cpu_present_to_apicid(cpu);
1371
1372         if (apicid != c->apicid) {
1373                 pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
1374                        cpu, apicid, c->initial_apicid);
1375         }
1376         BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
1377 #else
1378         c->logical_proc_id = 0;
1379 #endif
1380 }
1381
1382 /*
1383  * This does the hard work of actually picking apart the CPU stuff...
1384  */
1385 static void identify_cpu(struct cpuinfo_x86 *c)
1386 {
1387         int i;
1388
1389         c->loops_per_jiffy = loops_per_jiffy;
1390         c->x86_cache_size = 0;
1391         c->x86_vendor = X86_VENDOR_UNKNOWN;
1392         c->x86_model = c->x86_stepping = 0;     /* So far unknown... */
1393         c->x86_vendor_id[0] = '\0'; /* Unset */
1394         c->x86_model_id[0] = '\0';  /* Unset */
1395         c->x86_max_cores = 1;
1396         c->x86_coreid_bits = 0;
1397         c->cu_id = 0xff;
1398 #ifdef CONFIG_X86_64
1399         c->x86_clflush_size = 64;
1400         c->x86_phys_bits = 36;
1401         c->x86_virt_bits = 48;
1402 #else
1403         c->cpuid_level = -1;    /* CPUID not detected */
1404         c->x86_clflush_size = 32;
1405         c->x86_phys_bits = 32;
1406         c->x86_virt_bits = 32;
1407 #endif
1408         c->x86_cache_alignment = c->x86_clflush_size;
1409         memset(&c->x86_capability, 0, sizeof c->x86_capability);
1410
1411         generic_identify(c);
1412
1413         if (this_cpu->c_identify)
1414                 this_cpu->c_identify(c);
1415
1416         /* Clear/Set all flags overridden by options, after probe */
1417         apply_forced_caps(c);
1418
1419 #ifdef CONFIG_X86_64
1420         c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1421 #endif
1422
1423         /*
1424          * Vendor-specific initialization.  In this section we
1425          * canonicalize the feature flags, meaning if there are
1426          * features a certain CPU supports which CPUID doesn't
1427          * tell us, CPUID claiming incorrect flags, or other bugs,
1428          * we handle them here.
1429          *
1430          * At the end of this section, c->x86_capability better
1431          * indicate the features this CPU genuinely supports!
1432          */
1433         if (this_cpu->c_init)
1434                 this_cpu->c_init(c);
1435
1436         /* Disable the PN if appropriate */
1437         squash_the_stupid_serial_number(c);
1438
1439         /* Set up SMEP/SMAP/UMIP */
1440         setup_smep(c);
1441         setup_smap(c);
1442         setup_umip(c);
1443
1444         /*
1445          * The vendor-specific functions might have changed features.
1446          * Now we do "generic changes."
1447          */
1448
1449         /* Filter out anything that depends on CPUID levels we don't have */
1450         filter_cpuid_features(c, true);
1451
1452         /* If the model name is still unset, do table lookup. */
1453         if (!c->x86_model_id[0]) {
1454                 const char *p;
1455                 p = table_lookup_model(c);
1456                 if (p)
1457                         strcpy(c->x86_model_id, p);
1458                 else
1459                         /* Last resort... */
1460                         sprintf(c->x86_model_id, "%02x/%02x",
1461                                 c->x86, c->x86_model);
1462         }
1463
1464 #ifdef CONFIG_X86_64
1465         detect_ht(c);
1466 #endif
1467
1468         x86_init_rdrand(c);
1469         x86_init_cache_qos(c);
1470         setup_pku(c);
1471
1472         /*
1473          * Clear/Set all flags overridden by options, need do it
1474          * before following smp all cpus cap AND.
1475          */
1476         apply_forced_caps(c);
1477
1478         /*
1479          * On SMP, boot_cpu_data holds the common feature set between
1480          * all CPUs; so make sure that we indicate which features are
1481          * common between the CPUs.  The first time this routine gets
1482          * executed, c == &boot_cpu_data.
1483          */
1484         if (c != &boot_cpu_data) {
1485                 /* AND the already accumulated flags with these */
1486                 for (i = 0; i < NCAPINTS; i++)
1487                         boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1488
1489                 /* OR, i.e. replicate the bug flags */
1490                 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
1491                         c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
1492         }
1493
1494         /* Init Machine Check Exception if available. */
1495         mcheck_cpu_init(c);
1496
1497         select_idle_routine(c);
1498
1499 #ifdef CONFIG_NUMA
1500         numa_add_cpu(smp_processor_id());
1501 #endif
1502 }
1503
1504 /*
1505  * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
1506  * on 32-bit kernels:
1507  */
1508 #ifdef CONFIG_X86_32
1509 void enable_sep_cpu(void)
1510 {
1511         struct tss_struct *tss;
1512         int cpu;
1513
1514         if (!boot_cpu_has(X86_FEATURE_SEP))
1515                 return;
1516
1517         cpu = get_cpu();
1518         tss = &per_cpu(cpu_tss_rw, cpu);
1519
1520         /*
1521          * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
1522          * see the big comment in struct x86_hw_tss's definition.
1523          */
1524
1525         tss->x86_tss.ss1 = __KERNEL_CS;
1526         wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
1527         wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
1528         wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
1529
1530         put_cpu();
1531 }
1532 #endif
1533
1534 void __init identify_boot_cpu(void)
1535 {
1536         identify_cpu(&boot_cpu_data);
1537 #ifdef CONFIG_X86_32
1538         sysenter_setup();
1539         enable_sep_cpu();
1540 #endif
1541         cpu_detect_tlb(&boot_cpu_data);
1542         tsx_init();
1543 }
1544
1545 void identify_secondary_cpu(struct cpuinfo_x86 *c)
1546 {
1547         BUG_ON(c == &boot_cpu_data);
1548         identify_cpu(c);
1549 #ifdef CONFIG_X86_32
1550         enable_sep_cpu();
1551 #endif
1552         mtrr_ap_init();
1553         validate_apic_and_package_id(c);
1554         x86_spec_ctrl_setup_ap();
1555         update_srbds_msr();
1556 }
1557
1558 static __init int setup_noclflush(char *arg)
1559 {
1560         setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
1561         setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
1562         return 1;
1563 }
1564 __setup("noclflush", setup_noclflush);
1565
1566 void print_cpu_info(struct cpuinfo_x86 *c)
1567 {
1568         const char *vendor = NULL;
1569
1570         if (c->x86_vendor < X86_VENDOR_NUM) {
1571                 vendor = this_cpu->c_vendor;
1572         } else {
1573                 if (c->cpuid_level >= 0)
1574                         vendor = c->x86_vendor_id;
1575         }
1576
1577         if (vendor && !strstr(c->x86_model_id, vendor))
1578                 pr_cont("%s ", vendor);
1579
1580         if (c->x86_model_id[0])
1581                 pr_cont("%s", c->x86_model_id);
1582         else
1583                 pr_cont("%d86", c->x86);
1584
1585         pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
1586
1587         if (c->x86_stepping || c->cpuid_level >= 0)
1588                 pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
1589         else
1590                 pr_cont(")\n");
1591 }
1592
1593 /*
1594  * clearcpuid= was already parsed in fpu__init_parse_early_param.
1595  * But we need to keep a dummy __setup around otherwise it would
1596  * show up as an environment variable for init.
1597  */
1598 static __init int setup_clearcpuid(char *arg)
1599 {
1600         return 1;
1601 }
1602 __setup("clearcpuid=", setup_clearcpuid);
1603
1604 #ifdef CONFIG_X86_64
1605 DEFINE_PER_CPU_FIRST(union irq_stack_union,
1606                      irq_stack_union) __aligned(PAGE_SIZE) __visible;
1607 EXPORT_PER_CPU_SYMBOL_GPL(irq_stack_union);
1608
1609 /*
1610  * The following percpu variables are hot.  Align current_task to
1611  * cacheline size such that they fall in the same cacheline.
1612  */
1613 DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
1614         &init_task;
1615 EXPORT_PER_CPU_SYMBOL(current_task);
1616
1617 DEFINE_PER_CPU(char *, irq_stack_ptr) =
1618         init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE;
1619
1620 DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
1621
1622 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1623 EXPORT_PER_CPU_SYMBOL(__preempt_count);
1624
1625 /* May not be marked __init: used by software suspend */
1626 void syscall_init(void)
1627 {
1628         extern char _entry_trampoline[];
1629         extern char entry_SYSCALL_64_trampoline[];
1630
1631         int cpu = smp_processor_id();
1632         unsigned long SYSCALL64_entry_trampoline =
1633                 (unsigned long)get_cpu_entry_area(cpu)->entry_trampoline +
1634                 (entry_SYSCALL_64_trampoline - _entry_trampoline);
1635
1636         wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
1637         if (static_cpu_has(X86_FEATURE_PTI))
1638                 wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
1639         else
1640                 wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
1641
1642 #ifdef CONFIG_IA32_EMULATION
1643         wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
1644         /*
1645          * This only works on Intel CPUs.
1646          * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
1647          * This does not cause SYSENTER to jump to the wrong location, because
1648          * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
1649          */
1650         wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
1651         wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
1652         wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
1653 #else
1654         wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
1655         wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
1656         wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
1657         wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
1658 #endif
1659
1660         /* Flags to clear on syscall */
1661         wrmsrl(MSR_SYSCALL_MASK,
1662                X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
1663                X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
1664 }
1665
1666 /*
1667  * Copies of the original ist values from the tss are only accessed during
1668  * debugging, no special alignment required.
1669  */
1670 DEFINE_PER_CPU(struct orig_ist, orig_ist);
1671
1672 static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
1673 DEFINE_PER_CPU(int, debug_stack_usage);
1674
1675 int is_debug_stack(unsigned long addr)
1676 {
1677         return __this_cpu_read(debug_stack_usage) ||
1678                 (addr <= __this_cpu_read(debug_stack_addr) &&
1679                  addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
1680 }
1681 NOKPROBE_SYMBOL(is_debug_stack);
1682
1683 DEFINE_PER_CPU(u32, debug_idt_ctr);
1684
1685 void debug_stack_set_zero(void)
1686 {
1687         this_cpu_inc(debug_idt_ctr);
1688         load_current_idt();
1689 }
1690 NOKPROBE_SYMBOL(debug_stack_set_zero);
1691
1692 void debug_stack_reset(void)
1693 {
1694         if (WARN_ON(!this_cpu_read(debug_idt_ctr)))
1695                 return;
1696         if (this_cpu_dec_return(debug_idt_ctr) == 0)
1697                 load_current_idt();
1698 }
1699 NOKPROBE_SYMBOL(debug_stack_reset);
1700
1701 #else   /* CONFIG_X86_64 */
1702
1703 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1704 EXPORT_PER_CPU_SYMBOL(current_task);
1705 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1706 EXPORT_PER_CPU_SYMBOL(__preempt_count);
1707
1708 /*
1709  * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
1710  * the top of the kernel stack.  Use an extra percpu variable to track the
1711  * top of the kernel stack directly.
1712  */
1713 DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
1714         (unsigned long)&init_thread_union + THREAD_SIZE;
1715 EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
1716
1717 #ifdef CONFIG_STACKPROTECTOR
1718 DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
1719 #endif
1720
1721 #endif  /* CONFIG_X86_64 */
1722
1723 /*
1724  * Clear all 6 debug registers:
1725  */
1726 static void clear_all_debug_regs(void)
1727 {
1728         int i;
1729
1730         for (i = 0; i < 8; i++) {
1731                 /* Ignore db4, db5 */
1732                 if ((i == 4) || (i == 5))
1733                         continue;
1734
1735                 set_debugreg(0, i);
1736         }
1737 }
1738
1739 #ifdef CONFIG_KGDB
1740 /*
1741  * Restore debug regs if using kgdbwait and you have a kernel debugger
1742  * connection established.
1743  */
1744 static void dbg_restore_debug_regs(void)
1745 {
1746         if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
1747                 arch_kgdb_ops.correct_hw_break();
1748 }
1749 #else /* ! CONFIG_KGDB */
1750 #define dbg_restore_debug_regs()
1751 #endif /* ! CONFIG_KGDB */
1752
1753 static void wait_for_master_cpu(int cpu)
1754 {
1755 #ifdef CONFIG_SMP
1756         /*
1757          * wait for ACK from master CPU before continuing
1758          * with AP initialization
1759          */
1760         WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
1761         while (!cpumask_test_cpu(cpu, cpu_callout_mask))
1762                 cpu_relax();
1763 #endif
1764 }
1765
1766 /*
1767  * cpu_init() initializes state that is per-CPU. Some data is already
1768  * initialized (naturally) in the bootstrap process, such as the GDT
1769  * and IDT. We reload them nevertheless, this function acts as a
1770  * 'CPU state barrier', nothing should get across.
1771  * A lot of state is already set up in PDA init for 64 bit
1772  */
1773 #ifdef CONFIG_X86_64
1774
1775 void cpu_init(void)
1776 {
1777         struct orig_ist *oist;
1778         struct task_struct *me;
1779         struct tss_struct *t;
1780         unsigned long v;
1781         int cpu = raw_smp_processor_id();
1782         int i;
1783
1784         wait_for_master_cpu(cpu);
1785
1786         /*
1787          * Initialize the CR4 shadow before doing anything that could
1788          * try to read it.
1789          */
1790         cr4_init_shadow();
1791
1792         if (cpu)
1793                 load_ucode_ap();
1794
1795         t = &per_cpu(cpu_tss_rw, cpu);
1796         oist = &per_cpu(orig_ist, cpu);
1797
1798 #ifdef CONFIG_NUMA
1799         if (this_cpu_read(numa_node) == 0 &&
1800             early_cpu_to_node(cpu) != NUMA_NO_NODE)
1801                 set_numa_node(early_cpu_to_node(cpu));
1802 #endif
1803
1804         me = current;
1805
1806         pr_debug("Initializing CPU#%d\n", cpu);
1807
1808         cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1809
1810         /*
1811          * Initialize the per-CPU GDT with the boot GDT,
1812          * and set up the GDT descriptor:
1813          */
1814
1815         switch_to_new_gdt(cpu);
1816         loadsegment(fs, 0);
1817
1818         load_current_idt();
1819
1820         memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
1821         syscall_init();
1822
1823         wrmsrl(MSR_FS_BASE, 0);
1824         wrmsrl(MSR_KERNEL_GS_BASE, 0);
1825         barrier();
1826
1827         x86_configure_nx();
1828         x2apic_setup();
1829
1830         /*
1831          * set up and load the per-CPU TSS
1832          */
1833         if (!oist->ist[0]) {
1834                 char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
1835
1836                 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1837                         estacks += exception_stack_sizes[v];
1838                         oist->ist[v] = t->x86_tss.ist[v] =
1839                                         (unsigned long)estacks;
1840                         if (v == DEBUG_STACK-1)
1841                                 per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
1842                 }
1843         }
1844
1845         t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
1846
1847         /*
1848          * <= is required because the CPU will access up to
1849          * 8 bits beyond the end of the IO permission bitmap.
1850          */
1851         for (i = 0; i <= IO_BITMAP_LONGS; i++)
1852                 t->io_bitmap[i] = ~0UL;
1853
1854         mmgrab(&init_mm);
1855         me->active_mm = &init_mm;
1856         BUG_ON(me->mm);
1857         initialize_tlbstate_and_flush();
1858         enter_lazy_tlb(&init_mm, me);
1859
1860         /*
1861          * Initialize the TSS.  sp0 points to the entry trampoline stack
1862          * regardless of what task is running.
1863          */
1864         set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
1865         load_TR_desc();
1866         load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
1867
1868         load_mm_ldt(&init_mm);
1869
1870         clear_all_debug_regs();
1871         dbg_restore_debug_regs();
1872
1873         fpu__init_cpu();
1874
1875         if (is_uv_system())
1876                 uv_cpu_init();
1877
1878         load_fixmap_gdt(cpu);
1879 }
1880
1881 #else
1882
1883 void cpu_init(void)
1884 {
1885         int cpu = smp_processor_id();
1886         struct task_struct *curr = current;
1887         struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
1888
1889         wait_for_master_cpu(cpu);
1890
1891         /*
1892          * Initialize the CR4 shadow before doing anything that could
1893          * try to read it.
1894          */
1895         cr4_init_shadow();
1896
1897         show_ucode_info_early();
1898
1899         pr_info("Initializing CPU#%d\n", cpu);
1900
1901         if (cpu_feature_enabled(X86_FEATURE_VME) ||
1902             boot_cpu_has(X86_FEATURE_TSC) ||
1903             boot_cpu_has(X86_FEATURE_DE))
1904                 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1905
1906         load_current_idt();
1907         switch_to_new_gdt(cpu);
1908
1909         /*
1910          * Set up and load the per-CPU TSS and LDT
1911          */
1912         mmgrab(&init_mm);
1913         curr->active_mm = &init_mm;
1914         BUG_ON(curr->mm);
1915         initialize_tlbstate_and_flush();
1916         enter_lazy_tlb(&init_mm, curr);
1917
1918         /*
1919          * Initialize the TSS.  sp0 points to the entry trampoline stack
1920          * regardless of what task is running.
1921          */
1922         set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
1923         load_TR_desc();
1924         load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
1925
1926         load_mm_ldt(&init_mm);
1927
1928         t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
1929
1930 #ifdef CONFIG_DOUBLEFAULT
1931         /* Set up doublefault TSS pointer in the GDT */
1932         __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1933 #endif
1934
1935         clear_all_debug_regs();
1936         dbg_restore_debug_regs();
1937
1938         fpu__init_cpu();
1939
1940         load_fixmap_gdt(cpu);
1941 }
1942 #endif
1943
1944 static void bsp_resume(void)
1945 {
1946         if (this_cpu->c_bsp_resume)
1947                 this_cpu->c_bsp_resume(&boot_cpu_data);
1948 }
1949
1950 static struct syscore_ops cpu_syscore_ops = {
1951         .resume         = bsp_resume,
1952 };
1953
1954 static int __init init_cpu_syscore(void)
1955 {
1956         register_syscore_ops(&cpu_syscore_ops);
1957         return 0;
1958 }
1959 core_initcall(init_cpu_syscore);
1960
1961 /*
1962  * The microcode loader calls this upon late microcode load to recheck features,
1963  * only when microcode has been updated. Caller holds microcode_mutex and CPU
1964  * hotplug lock.
1965  */
1966 void microcode_check(void)
1967 {
1968         struct cpuinfo_x86 info;
1969
1970         perf_check_microcode();
1971
1972         /* Reload CPUID max function as it might've changed. */
1973         info.cpuid_level = cpuid_eax(0);
1974
1975         /*
1976          * Copy all capability leafs to pick up the synthetic ones so that
1977          * memcmp() below doesn't fail on that. The ones coming from CPUID will
1978          * get overwritten in get_cpu_cap().
1979          */
1980         memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
1981
1982         get_cpu_cap(&info);
1983
1984         if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
1985                 return;
1986
1987         pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
1988         pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
1989 }