1 #include <linux/export.h>
2 #include <linux/bitops.h>
7 #include <linux/sched.h>
8 #include <linux/sched/clock.h>
9 #include <linux/random.h>
10 #include <asm/processor.h>
13 #include <asm/spec-ctrl.h>
15 #include <asm/pci-direct.h>
16 #include <asm/delay.h>
19 # include <asm/mmconfig.h>
20 # include <asm/set_memory.h>
25 static const int amd_erratum_383[];
26 static const int amd_erratum_400[];
27 static const int amd_erratum_1485[];
28 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
31 * nodes_per_socket: Stores the number of nodes per socket.
32 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
33 * Node Identifiers[10:8]
35 static u32 nodes_per_socket = 1;
37 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
42 WARN_ONCE((boot_cpu_data.x86 != 0xf),
43 "%s should only be used on K8!\n", __func__);
48 err = rdmsr_safe_regs(gprs);
50 *p = gprs[0] | ((u64)gprs[2] << 32);
55 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
59 WARN_ONCE((boot_cpu_data.x86 != 0xf),
60 "%s should only be used on K8!\n", __func__);
67 return wrmsr_safe_regs(gprs);
71 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
72 * misexecution of code under Linux. Owners of such processors should
73 * contact AMD for precise details and a CPU swap.
75 * See http://www.multimania.com/poulot/k6bug.html
76 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
77 * (Publication # 21266 Issue Date: August 1998)
79 * The following test is erm.. interesting. AMD neglected to up
80 * the chip setting when fixing the bug but they also tweaked some
81 * performance at the same time..
84 extern __visible void vide(void);
85 __asm__(".globl vide\n"
86 ".type vide, @function\n"
90 static void init_amd_k5(struct cpuinfo_x86 *c)
94 * General Systems BIOSen alias the cpu frequency registers
95 * of the Elan at 0x000df000. Unfortunately, one of the Linux
96 * drivers subsequently pokes it, and changes the CPU speed.
97 * Workaround : Remove the unneeded alias.
99 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
100 #define CBAR_ENB (0x80000000)
101 #define CBAR_KEY (0X000000CB)
102 if (c->x86_model == 9 || c->x86_model == 10) {
103 if (inl(CBAR) & CBAR_ENB)
104 outl(0 | CBAR_KEY, CBAR);
109 static void init_amd_k6(struct cpuinfo_x86 *c)
113 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
115 if (c->x86_model < 6) {
116 /* Based on AMD doc 20734R - June 2000 */
117 if (c->x86_model == 0) {
118 clear_cpu_cap(c, X86_FEATURE_APIC);
119 set_cpu_cap(c, X86_FEATURE_PGE);
124 if (c->x86_model == 6 && c->x86_stepping == 1) {
125 const int K6_BUG_LOOP = 1000000;
127 void (*f_vide)(void);
130 pr_info("AMD K6 stepping B detected - ");
133 * It looks like AMD fixed the 2.6.2 bug and improved indirect
134 * calls at the same time.
139 OPTIMIZER_HIDE_VAR(f_vide);
146 if (d > 20*K6_BUG_LOOP)
147 pr_cont("system stability may be impaired when more than 32 MB are used.\n");
149 pr_cont("probably OK (after B9730xxxx).\n");
152 /* K6 with old style WHCR */
153 if (c->x86_model < 8 ||
154 (c->x86_model == 8 && c->x86_stepping < 8)) {
155 /* We can only write allocate on the low 508Mb */
159 rdmsr(MSR_K6_WHCR, l, h);
160 if ((l&0x0000FFFF) == 0) {
162 l = (1<<0)|((mbytes/4)<<1);
163 local_irq_save(flags);
165 wrmsr(MSR_K6_WHCR, l, h);
166 local_irq_restore(flags);
167 pr_info("Enabling old style K6 write allocation for %d Mb\n",
173 if ((c->x86_model == 8 && c->x86_stepping > 7) ||
174 c->x86_model == 9 || c->x86_model == 13) {
175 /* The more serious chips .. */
180 rdmsr(MSR_K6_WHCR, l, h);
181 if ((l&0xFFFF0000) == 0) {
183 l = ((mbytes>>2)<<22)|(1<<16);
184 local_irq_save(flags);
186 wrmsr(MSR_K6_WHCR, l, h);
187 local_irq_restore(flags);
188 pr_info("Enabling new style K6 write allocation for %d Mb\n",
195 if (c->x86_model == 10) {
196 /* AMD Geode LX is model 10 */
197 /* placeholder for any needed mods */
202 * Work around Erratum 1386. The XSAVES instruction malfunctions in
203 * certain circumstances on Zen1/2 uarch, and not all parts have had
204 * updated microcode at the time of writing (March 2023).
206 * Affected parts all have no supervisor XSAVE states, meaning that
207 * the XSAVEC instruction (which works fine) is equivalent.
209 clear_cpu_cap(c, X86_FEATURE_XSAVES);
212 static void init_amd_k7(struct cpuinfo_x86 *c)
218 * Bit 15 of Athlon specific MSR 15, needs to be 0
219 * to enable SSE on Palomino/Morgan/Barton CPU's.
220 * If the BIOS didn't enable it already, enable it here.
222 if (c->x86_model >= 6 && c->x86_model <= 10) {
223 if (!cpu_has(c, X86_FEATURE_XMM)) {
224 pr_info("Enabling disabled K7/SSE Support.\n");
225 msr_clear_bit(MSR_K7_HWCR, 15);
226 set_cpu_cap(c, X86_FEATURE_XMM);
231 * It's been determined by AMD that Athlons since model 8 stepping 1
232 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
233 * As per AMD technical note 27212 0.2
235 if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
236 rdmsr(MSR_K7_CLK_CTL, l, h);
237 if ((l & 0xfff00000) != 0x20000000) {
238 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
239 l, ((l & 0x000fffff)|0x20000000));
240 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
244 set_cpu_cap(c, X86_FEATURE_K7);
246 /* calling is from identify_secondary_cpu() ? */
251 * Certain Athlons might work (for various values of 'work') in SMP
252 * but they are not certified as MP capable.
254 /* Athlon 660/661 is valid. */
255 if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
256 (c->x86_stepping == 1)))
259 /* Duron 670 is valid */
260 if ((c->x86_model == 7) && (c->x86_stepping == 0))
264 * Athlon 662, Duron 671, and Athlon >model 7 have capability
265 * bit. It's worth noting that the A5 stepping (662) of some
266 * Athlon XP's have the MP bit set.
267 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
270 if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
271 ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
273 if (cpu_has(c, X86_FEATURE_MP))
276 /* If we get here, not a certified SMP capable AMD system. */
279 * Don't taint if we are running SMP kernel on a single non-MP
282 WARN_ONCE(1, "WARNING: This combination of AMD"
283 " processors is not suitable for SMP.\n");
284 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
290 * To workaround broken NUMA config. Read the comment in
291 * srat_detect_node().
293 static int nearby_node(int apicid)
297 for (i = apicid - 1; i >= 0; i--) {
298 node = __apicid_to_node[i];
299 if (node != NUMA_NO_NODE && node_online(node))
302 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
303 node = __apicid_to_node[i];
304 if (node != NUMA_NO_NODE && node_online(node))
307 return first_node(node_online_map); /* Shouldn't happen */
312 * Fix up cpu_core_id for pre-F17h systems to be in the
313 * [0 .. cores_per_node - 1] range. Not really needed but
314 * kept so as not to break existing setups.
316 static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
323 cus_per_node = c->x86_max_cores / nodes_per_socket;
324 c->cpu_core_id %= cus_per_node;
328 static void amd_get_topology_early(struct cpuinfo_x86 *c)
330 if (cpu_has(c, X86_FEATURE_TOPOEXT))
331 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
335 * Fixup core topology information for
336 * (1) AMD multi-node processors
337 * Assumption: Number of cores in each internal node is the same.
338 * (2) AMD processors supporting compute units
340 static void amd_get_topology(struct cpuinfo_x86 *c)
343 int cpu = smp_processor_id();
345 /* get information required for multi-node processors */
346 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
347 u32 eax, ebx, ecx, edx;
349 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
351 node_id = ecx & 0xff;
354 c->cu_id = ebx & 0xff;
356 if (c->x86 >= 0x17) {
357 c->cpu_core_id = ebx & 0xff;
359 if (smp_num_siblings > 1)
360 c->x86_max_cores /= smp_num_siblings;
364 * We may have multiple LLCs if L3 caches exist, so check if we
365 * have an L3 cache by looking at the L3 cache CPUID leaf.
367 if (cpuid_edx(0x80000006)) {
368 if (c->x86 == 0x17) {
370 * LLC is at the core complex level.
371 * Core complex id is ApicId[3].
373 per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
375 /* LLC is at the node level. */
376 per_cpu(cpu_llc_id, cpu) = node_id;
379 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
382 rdmsrl(MSR_FAM10H_NODE_ID, value);
385 per_cpu(cpu_llc_id, cpu) = node_id;
389 if (nodes_per_socket > 1) {
390 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
391 legacy_fixup_core_id(c);
396 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
397 * Assumes number of cores is a power of two.
399 static void amd_detect_cmp(struct cpuinfo_x86 *c)
402 int cpu = smp_processor_id();
404 bits = c->x86_coreid_bits;
405 /* Low order bits define the core id (index of core in socket) */
406 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
407 /* Convert the initial APIC ID into the socket ID */
408 c->phys_proc_id = c->initial_apicid >> bits;
409 /* use socket ID also for last level cache */
410 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
414 u16 amd_get_nb_id(int cpu)
416 return per_cpu(cpu_llc_id, cpu);
418 EXPORT_SYMBOL_GPL(amd_get_nb_id);
420 u32 amd_get_nodes_per_socket(void)
422 return nodes_per_socket;
424 EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
426 static void srat_detect_node(struct cpuinfo_x86 *c)
429 int cpu = smp_processor_id();
431 unsigned apicid = c->apicid;
433 node = numa_cpu_node(cpu);
434 if (node == NUMA_NO_NODE)
435 node = per_cpu(cpu_llc_id, cpu);
438 * On multi-fabric platform (e.g. Numascale NumaChip) a
439 * platform-specific handler needs to be called to fixup some
442 if (x86_cpuinit.fixup_cpu_id)
443 x86_cpuinit.fixup_cpu_id(c, node);
445 if (!node_online(node)) {
447 * Two possibilities here:
449 * - The CPU is missing memory and no node was created. In
450 * that case try picking one from a nearby CPU.
452 * - The APIC IDs differ from the HyperTransport node IDs
453 * which the K8 northbridge parsing fills in. Assume
454 * they are all increased by a constant offset, but in
455 * the same order as the HT nodeids. If that doesn't
456 * result in a usable node fall back to the path for the
459 * This workaround operates directly on the mapping between
460 * APIC ID and NUMA node, assuming certain relationship
461 * between APIC ID, HT node ID and NUMA topology. As going
462 * through CPU mapping may alter the outcome, directly
463 * access __apicid_to_node[].
465 int ht_nodeid = c->initial_apicid;
467 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
468 node = __apicid_to_node[ht_nodeid];
469 /* Pick a nearby node */
470 if (!node_online(node))
471 node = nearby_node(apicid);
473 numa_set_node(cpu, node);
477 static void early_init_amd_mc(struct cpuinfo_x86 *c)
482 /* Multi core CPU? */
483 if (c->extended_cpuid_level < 0x80000008)
486 ecx = cpuid_ecx(0x80000008);
488 c->x86_max_cores = (ecx & 0xff) + 1;
490 /* CPU telling us the core id bits shift? */
491 bits = (ecx >> 12) & 0xF;
493 /* Otherwise recompute */
495 while ((1 << bits) < c->x86_max_cores)
499 c->x86_coreid_bits = bits;
503 static void bsp_init_amd(struct cpuinfo_x86 *c)
508 unsigned long long tseg;
511 * Split up direct mapping around the TSEG SMM area.
512 * Don't do it for gbpages because there seems very little
513 * benefit in doing so.
515 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
516 unsigned long pfn = tseg >> PAGE_SHIFT;
518 pr_debug("tseg: %010llx\n", tseg);
519 if (pfn_range_is_mapped(pfn, pfn + 1))
520 set_memory_4k((unsigned long)__va(tseg), 1);
525 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
528 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
531 rdmsrl(MSR_K7_HWCR, val);
532 if (!(val & BIT(24)))
533 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
537 if (c->x86 == 0x15) {
538 unsigned long upperbit;
541 cpuid = cpuid_edx(0x80000005);
542 assoc = cpuid >> 16 & 0xff;
543 upperbit = ((cpuid >> 24) << 10) / assoc;
545 va_align.mask = (upperbit - 1) & PAGE_MASK;
546 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
548 /* A random value per boot for bit slice [12:upper_bit) */
549 va_align.bits = get_random_int() & va_align.mask;
552 if (cpu_has(c, X86_FEATURE_MWAITX))
555 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
558 ecx = cpuid_ecx(0x8000001e);
559 nodes_per_socket = ((ecx >> 8) & 7) + 1;
560 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
563 rdmsrl(MSR_FAM10H_NODE_ID, value);
564 nodes_per_socket = ((value >> 3) & 7) + 1;
567 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
568 !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
569 c->x86 >= 0x15 && c->x86 <= 0x17) {
573 case 0x15: bit = 54; break;
574 case 0x16: bit = 33; break;
575 case 0x17: bit = 10; break;
579 * Try to cache the base value so further operations can
580 * avoid RMW. If that faults, do not enable SSBD.
582 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
583 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
584 setup_force_cpu_cap(X86_FEATURE_SSBD);
585 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
590 static void early_init_amd(struct cpuinfo_x86 *c)
595 early_init_amd_mc(c);
597 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
600 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
601 * with P/T states and does not stop in deep C-states
603 if (c->x86_power & (1 << 8)) {
604 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
605 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
608 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
609 if (c->x86_power & BIT(12))
610 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
613 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
615 /* Set MTRR capability flag if appropriate */
617 if (c->x86_model == 13 || c->x86_model == 9 ||
618 (c->x86_model == 8 && c->x86_stepping >= 8))
619 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
621 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
623 * ApicID can always be treated as an 8-bit value for AMD APIC versions
624 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
625 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
628 if (boot_cpu_has(X86_FEATURE_APIC)) {
630 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
631 else if (c->x86 >= 0xf) {
632 /* check CPU config space for extended APIC ID */
635 val = read_pci_config(0, 24, 0, 0x68);
636 if ((val >> 17 & 0x3) == 0x3)
637 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
643 * This is only needed to tell the kernel whether to use VMCALL
644 * and VMMCALL. VMMCALL is never executed except under virt, so
645 * we can set it unconditionally.
647 set_cpu_cap(c, X86_FEATURE_VMMCALL);
649 /* F16h erratum 793, CVE-2013-6885 */
650 if (c->x86 == 0x16 && c->x86_model <= 0xf)
651 msr_set_bit(MSR_AMD64_LS_CFG, 15);
654 * Check whether the machine is affected by erratum 400. This is
655 * used to select the proper idle routine and to enable the check
656 * whether the machine is affected in arch_post_acpi_init(), which
657 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
659 if (cpu_has_amd_erratum(c, amd_erratum_400))
660 set_cpu_bug(c, X86_BUG_AMD_E400);
663 * BIOS support is required for SME. If BIOS has enabled SME then
664 * adjust x86_phys_bits by the SME physical address space reduction
665 * value. If BIOS has not enabled SME then don't advertise the
666 * feature (set in scattered.c). Also, since the SME support requires
667 * long mode, don't advertise the feature under CONFIG_X86_32.
669 if (cpu_has(c, X86_FEATURE_SME)) {
672 /* Check if SME is enabled */
673 rdmsrl(MSR_K8_SYSCFG, msr);
674 if (msr & MSR_K8_SYSCFG_MEM_ENCRYPT) {
675 c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
676 if (IS_ENABLED(CONFIG_X86_32))
677 clear_cpu_cap(c, X86_FEATURE_SME);
679 clear_cpu_cap(c, X86_FEATURE_SME);
683 /* Re-enable TopologyExtensions if switched off by BIOS */
684 if (c->x86 == 0x15 &&
685 (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
686 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
688 if (msr_set_bit(0xc0011005, 54) > 0) {
689 rdmsrl(0xc0011005, value);
690 if (value & BIT_64(54)) {
691 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
692 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
697 amd_get_topology_early(c);
700 static void init_amd_k8(struct cpuinfo_x86 *c)
705 /* On C+ stepping K8 rep microcode works well for copy/memset */
706 level = cpuid_eax(1);
707 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
708 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
711 * Some BIOSes incorrectly force this feature, but only K8 revision D
712 * (model = 0x14) and later actually support it.
713 * (AMD Erratum #110, docId: 25759).
715 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
716 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
717 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
718 value &= ~BIT_64(32);
719 wrmsrl_amd_safe(0xc001100d, value);
723 if (!c->x86_model_id[0])
724 strcpy(c->x86_model_id, "Hammer");
728 * Disable TLB flush filter by setting HWCR.FFDIS on K8
729 * bit 6 of msr C001_0015
731 * Errata 63 for SH-B3 steppings
732 * Errata 122 for all steppings (F+ have it disabled by default)
734 msr_set_bit(MSR_K7_HWCR, 6);
736 set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
739 static void init_amd_gh(struct cpuinfo_x86 *c)
742 /* do this for boot cpu */
743 if (c == &boot_cpu_data)
744 check_enable_amd_mmconf_dmi();
746 fam10h_check_enable_mmcfg();
750 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
751 * is always needed when GART is enabled, even in a kernel which has no
752 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
753 * If it doesn't, we do it here as suggested by the BKDG.
755 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
757 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
760 * On family 10h BIOS may not have properly enabled WC+ support, causing
761 * it to be converted to CD memtype. This may result in performance
762 * degradation for certain nested-paging guests. Prevent this conversion
763 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
765 * NOTE: we want to use the _safe accessors so as not to #GP kvm
766 * guests on older kvm hosts.
768 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
770 if (cpu_has_amd_erratum(c, amd_erratum_383))
771 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
774 static void init_amd_ln(struct cpuinfo_x86 *c)
777 * Apply erratum 665 fix unconditionally so machines without a BIOS
780 msr_set_bit(MSR_AMD64_DE_CFG, 31);
783 static bool rdrand_force;
785 static int __init rdrand_cmdline(char *str)
790 if (!strcmp(str, "force"))
797 early_param("rdrand", rdrand_cmdline);
799 static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
802 * Saving of the MSR used to hide the RDRAND support during
803 * suspend/resume is done by arch/x86/power/cpu.c, which is
804 * dependent on CONFIG_PM_SLEEP.
806 if (!IS_ENABLED(CONFIG_PM_SLEEP))
810 * The nordrand option can clear X86_FEATURE_RDRAND, so check for
811 * RDRAND support using the CPUID function directly.
813 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
816 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
819 * Verify that the CPUID change has occurred in case the kernel is
820 * running virtualized and the hypervisor doesn't support the MSR.
822 if (cpuid_ecx(1) & BIT(30)) {
823 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
827 clear_cpu_cap(c, X86_FEATURE_RDRAND);
828 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
831 static void init_amd_jg(struct cpuinfo_x86 *c)
834 * Some BIOS implementations do not restore proper RDRAND support
835 * across suspend and resume. Check on whether to hide the RDRAND
836 * instruction support via CPUID.
838 clear_rdrand_cpuid_bit(c);
841 static void init_amd_bd(struct cpuinfo_x86 *c)
846 * The way access filter has a performance penalty on some workloads.
847 * Disable it on the affected CPUs.
849 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
850 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
852 wrmsrl_safe(MSR_F15H_IC_CFG, value);
857 * Some BIOS implementations do not restore proper RDRAND support
858 * across suspend and resume. Check on whether to hide the RDRAND
859 * instruction support via CPUID.
861 clear_rdrand_cpuid_bit(c);
864 static void init_amd_zn(struct cpuinfo_x86 *c)
866 set_cpu_cap(c, X86_FEATURE_ZEN);
868 /* Fix up CPUID bits, but only if not virtualised. */
869 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
871 /* Erratum 1076: CPB feature bit not being set in CPUID. */
872 if (!cpu_has(c, X86_FEATURE_CPB))
873 set_cpu_cap(c, X86_FEATURE_CPB);
876 * Zen3 (Fam19 model < 0x10) parts are not susceptible to
877 * Branch Type Confusion, but predate the allocation of the
880 if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO))
881 set_cpu_cap(c, X86_FEATURE_BTC_NO);
885 static void init_amd(struct cpuinfo_x86 *c)
890 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
891 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
893 clear_cpu_cap(c, 0*32+31);
896 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
898 /* get apicid instead of initial apic id from cpuid */
899 c->apicid = hard_smp_processor_id();
901 /* K6s reports MCEs but don't actually have all the MSRs */
903 clear_cpu_cap(c, X86_FEATURE_MCE);
906 case 4: init_amd_k5(c); break;
907 case 5: init_amd_k6(c); break;
908 case 6: init_amd_k7(c); break;
909 case 0xf: init_amd_k8(c); break;
910 case 0x10: init_amd_gh(c); break;
911 case 0x12: init_amd_ln(c); break;
912 case 0x15: init_amd_bd(c); break;
913 case 0x16: init_amd_jg(c); break;
914 case 0x17: init_amd_zn(c); break;
918 * Enable workaround for FXSAVE leak on CPUs
919 * without a XSaveErPtr feature
921 if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
922 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
924 cpu_detect_cache_sizes(c);
929 init_amd_cacheinfo(c);
932 set_cpu_cap(c, X86_FEATURE_K8);
934 if (cpu_has(c, X86_FEATURE_XMM2)) {
935 unsigned long long val;
939 * A serializing LFENCE has less overhead than MFENCE, so
940 * use it for execution serialization. On families which
941 * don't have that MSR, LFENCE is already serializing.
942 * msr_set_bit() uses the safe accessors, too, even if the MSR
945 msr_set_bit(MSR_AMD64_DE_CFG,
946 MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
949 * Verify that the MSR write was successful (could be running
950 * under a hypervisor) and only then assume that LFENCE is
953 ret = rdmsrl_safe(MSR_AMD64_DE_CFG, &val);
954 if (!ret && (val & MSR_AMD64_DE_CFG_LFENCE_SERIALIZE)) {
955 /* A serializing LFENCE stops RDTSC speculation */
956 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
958 /* MFENCE stops RDTSC speculation */
959 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
964 * Family 0x12 and above processors have APIC timer
965 * running in deep C states.
968 set_cpu_cap(c, X86_FEATURE_ARAT);
970 /* 3DNow or LM implies PREFETCHW */
971 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
972 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
973 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
975 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
976 if (!cpu_has(c, X86_FEATURE_XENPV))
977 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
979 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
980 cpu_has_amd_erratum(c, amd_erratum_1485))
981 msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
985 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
987 /* AMD errata T13 (order #21922) */
990 if (c->x86_model == 3 && c->x86_stepping == 0)
992 /* Tbird rev A1/A2 */
993 if (c->x86_model == 4 &&
994 (c->x86_stepping == 0 || c->x86_stepping == 1))
1001 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1003 u32 ebx, eax, ecx, edx;
1009 if (c->extended_cpuid_level < 0x80000006)
1012 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1014 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1015 tlb_lli_4k[ENTRIES] = ebx & mask;
1018 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1019 * characteristics from the CPUID function 0x80000005 instead.
1021 if (c->x86 == 0xf) {
1022 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1026 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1027 if (!((eax >> 16) & mask))
1028 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1030 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1032 /* a 4M entry uses two 2M entries */
1033 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1035 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1036 if (!(eax & mask)) {
1038 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1039 tlb_lli_2m[ENTRIES] = 1024;
1041 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1042 tlb_lli_2m[ENTRIES] = eax & 0xff;
1045 tlb_lli_2m[ENTRIES] = eax & mask;
1047 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1050 static const struct cpu_dev amd_cpu_dev = {
1052 .c_ident = { "AuthenticAMD" },
1053 #ifdef CONFIG_X86_32
1055 { .family = 4, .model_names =
1058 [7] = "486 DX/2-WB",
1060 [9] = "486 DX/4-WB",
1066 .legacy_cache_size = amd_size_cache,
1068 .c_early_init = early_init_amd,
1069 .c_detect_tlb = cpu_detect_tlb_amd,
1070 .c_bsp_init = bsp_init_amd,
1072 .c_x86_vendor = X86_VENDOR_AMD,
1075 cpu_dev_register(amd_cpu_dev);
1078 * AMD errata checking
1080 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
1081 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
1082 * have an OSVW id assigned, which it takes as first argument. Both take a
1083 * variable number of family-specific model-stepping ranges created by
1084 * AMD_MODEL_RANGE().
1088 * const int amd_erratum_319[] =
1089 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
1090 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
1091 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
1094 #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
1095 #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
1096 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1097 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1098 #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
1099 #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
1100 #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
1102 static const int amd_erratum_400[] =
1103 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1104 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
1106 static const int amd_erratum_383[] =
1107 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1110 static const int amd_erratum_1485[] =
1111 AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf),
1112 AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf));
1114 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1116 int osvw_id = *erratum++;
1120 if (osvw_id >= 0 && osvw_id < 65536 &&
1121 cpu_has(cpu, X86_FEATURE_OSVW)) {
1124 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
1125 if (osvw_id < osvw_len) {
1128 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
1130 return osvw_bits & (1ULL << (osvw_id & 0x3f));
1134 /* OSVW unavailable or ID unknown, match family-model-stepping range */
1135 ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1136 while ((range = *erratum++))
1137 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1138 (ms >= AMD_MODEL_RANGE_START(range)) &&
1139 (ms <= AMD_MODEL_RANGE_END(range)))
1145 void set_dr_addr_mask(unsigned long mask, int dr)
1147 if (!boot_cpu_has(X86_FEATURE_BPEXT))
1152 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
1157 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);