Mention branches and keyring.
[releases.git] / x86 / kernel / amd_nb.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Shared support code for AMD K8 northbridges and derivatives.
4  * Copyright 2006 Andi Kleen, SUSE Labs.
5  */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/export.h>
14 #include <linux/spinlock.h>
15 #include <linux/pci_ids.h>
16 #include <asm/amd_nb.h>
17
18 #define PCI_DEVICE_ID_AMD_17H_ROOT      0x1450
19 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
20 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
21 #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630
22 #define PCI_DEVICE_ID_AMD_17H_MA0H_ROOT 0x14b5
23 #define PCI_DEVICE_ID_AMD_19H_M10H_ROOT 0x14a4
24 #define PCI_DEVICE_ID_AMD_19H_M60H_ROOT 0x14d8
25 #define PCI_DEVICE_ID_AMD_19H_M70H_ROOT 0x14e8
26 #define PCI_DEVICE_ID_AMD_17H_DF_F4     0x1464
27 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
28 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
29 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
30 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
31 #define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4 0x1728
32 #define PCI_DEVICE_ID_AMD_19H_DF_F4     0x1654
33 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4 0x14b1
34 #define PCI_DEVICE_ID_AMD_19H_M40H_ROOT 0x14b5
35 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4 0x167d
36 #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
37 #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4
38 #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4
39 #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc
40
41 /* Protect the PCI config register pairs used for SMN. */
42 static DEFINE_MUTEX(smn_mutex);
43
44 static u32 *flush_words;
45
46 static const struct pci_device_id amd_root_ids[] = {
47         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
48         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
49         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
50         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
51         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_ROOT) },
52         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) },
53         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) },
54         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) },
55         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
56         {}
57 };
58
59 #define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
60
61 static const struct pci_device_id amd_nb_misc_ids[] = {
62         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
63         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
64         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
65         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
66         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
67         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
68         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
69         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
70         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
71         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
72         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
73         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
74         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
75         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
76         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
77         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
78         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
79         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
80         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
81         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
82         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
83         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
84         {}
85 };
86
87 static const struct pci_device_id amd_nb_link_ids[] = {
88         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
89         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
90         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
91         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
92         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
93         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
94         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
95         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
96         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
97         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
98         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4) },
99         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
100         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
101         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
102         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
103         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) },
104         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) },
105         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) },
106         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
107         {}
108 };
109
110 static const struct pci_device_id hygon_root_ids[] = {
111         { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
112         {}
113 };
114
115 static const struct pci_device_id hygon_nb_misc_ids[] = {
116         { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
117         {}
118 };
119
120 static const struct pci_device_id hygon_nb_link_ids[] = {
121         { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
122         {}
123 };
124
125 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
126         { 0x00, 0x18, 0x20 },
127         { 0xff, 0x00, 0x20 },
128         { 0xfe, 0x00, 0x20 },
129         { }
130 };
131
132 static struct amd_northbridge_info amd_northbridges;
133
134 u16 amd_nb_num(void)
135 {
136         return amd_northbridges.num;
137 }
138 EXPORT_SYMBOL_GPL(amd_nb_num);
139
140 bool amd_nb_has_feature(unsigned int feature)
141 {
142         return ((amd_northbridges.flags & feature) == feature);
143 }
144 EXPORT_SYMBOL_GPL(amd_nb_has_feature);
145
146 struct amd_northbridge *node_to_amd_nb(int node)
147 {
148         return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
149 }
150 EXPORT_SYMBOL_GPL(node_to_amd_nb);
151
152 static struct pci_dev *next_northbridge(struct pci_dev *dev,
153                                         const struct pci_device_id *ids)
154 {
155         do {
156                 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
157                 if (!dev)
158                         break;
159         } while (!pci_match_id(ids, dev));
160         return dev;
161 }
162
163 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
164 {
165         struct pci_dev *root;
166         int err = -ENODEV;
167
168         if (node >= amd_northbridges.num)
169                 goto out;
170
171         root = node_to_amd_nb(node)->root;
172         if (!root)
173                 goto out;
174
175         mutex_lock(&smn_mutex);
176
177         err = pci_write_config_dword(root, 0x60, address);
178         if (err) {
179                 pr_warn("Error programming SMN address 0x%x.\n", address);
180                 goto out_unlock;
181         }
182
183         err = (write ? pci_write_config_dword(root, 0x64, *value)
184                      : pci_read_config_dword(root, 0x64, value));
185         if (err)
186                 pr_warn("Error %s SMN address 0x%x.\n",
187                         (write ? "writing to" : "reading from"), address);
188
189 out_unlock:
190         mutex_unlock(&smn_mutex);
191
192 out:
193         return err;
194 }
195
196 int amd_smn_read(u16 node, u32 address, u32 *value)
197 {
198         return __amd_smn_rw(node, address, value, false);
199 }
200 EXPORT_SYMBOL_GPL(amd_smn_read);
201
202 int amd_smn_write(u16 node, u32 address, u32 value)
203 {
204         return __amd_smn_rw(node, address, &value, true);
205 }
206 EXPORT_SYMBOL_GPL(amd_smn_write);
207
208
209 static int amd_cache_northbridges(void)
210 {
211         const struct pci_device_id *misc_ids = amd_nb_misc_ids;
212         const struct pci_device_id *link_ids = amd_nb_link_ids;
213         const struct pci_device_id *root_ids = amd_root_ids;
214         struct pci_dev *root, *misc, *link;
215         struct amd_northbridge *nb;
216         u16 roots_per_misc = 0;
217         u16 misc_count = 0;
218         u16 root_count = 0;
219         u16 i, j;
220
221         if (amd_northbridges.num)
222                 return 0;
223
224         if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
225                 root_ids = hygon_root_ids;
226                 misc_ids = hygon_nb_misc_ids;
227                 link_ids = hygon_nb_link_ids;
228         }
229
230         misc = NULL;
231         while ((misc = next_northbridge(misc, misc_ids)))
232                 misc_count++;
233
234         if (!misc_count)
235                 return -ENODEV;
236
237         root = NULL;
238         while ((root = next_northbridge(root, root_ids)))
239                 root_count++;
240
241         if (root_count) {
242                 roots_per_misc = root_count / misc_count;
243
244                 /*
245                  * There should be _exactly_ N roots for each DF/SMN
246                  * interface.
247                  */
248                 if (!roots_per_misc || (root_count % roots_per_misc)) {
249                         pr_info("Unsupported AMD DF/PCI configuration found\n");
250                         return -ENODEV;
251                 }
252         }
253
254         nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
255         if (!nb)
256                 return -ENOMEM;
257
258         amd_northbridges.nb = nb;
259         amd_northbridges.num = misc_count;
260
261         link = misc = root = NULL;
262         for (i = 0; i < amd_northbridges.num; i++) {
263                 node_to_amd_nb(i)->root = root =
264                         next_northbridge(root, root_ids);
265                 node_to_amd_nb(i)->misc = misc =
266                         next_northbridge(misc, misc_ids);
267                 node_to_amd_nb(i)->link = link =
268                         next_northbridge(link, link_ids);
269
270                 /*
271                  * If there are more PCI root devices than data fabric/
272                  * system management network interfaces, then the (N)
273                  * PCI roots per DF/SMN interface are functionally the
274                  * same (for DF/SMN access) and N-1 are redundant.  N-1
275                  * PCI roots should be skipped per DF/SMN interface so
276                  * the following DF/SMN interfaces get mapped to
277                  * correct PCI roots.
278                  */
279                 for (j = 1; j < roots_per_misc; j++)
280                         root = next_northbridge(root, root_ids);
281         }
282
283         if (amd_gart_present())
284                 amd_northbridges.flags |= AMD_NB_GART;
285
286         /*
287          * Check for L3 cache presence.
288          */
289         if (!cpuid_edx(0x80000006))
290                 return 0;
291
292         /*
293          * Some CPU families support L3 Cache Index Disable. There are some
294          * limitations because of E382 and E388 on family 0x10.
295          */
296         if (boot_cpu_data.x86 == 0x10 &&
297             boot_cpu_data.x86_model >= 0x8 &&
298             (boot_cpu_data.x86_model > 0x9 ||
299              boot_cpu_data.x86_stepping >= 0x1))
300                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
301
302         if (boot_cpu_data.x86 == 0x15)
303                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
304
305         /* L3 cache partitioning is supported on family 0x15 */
306         if (boot_cpu_data.x86 == 0x15)
307                 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
308
309         return 0;
310 }
311
312 /*
313  * Ignores subdevice/subvendor but as far as I can figure out
314  * they're useless anyways
315  */
316 bool __init early_is_amd_nb(u32 device)
317 {
318         const struct pci_device_id *misc_ids = amd_nb_misc_ids;
319         const struct pci_device_id *id;
320         u32 vendor = device & 0xffff;
321
322         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
323             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
324                 return false;
325
326         if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
327                 misc_ids = hygon_nb_misc_ids;
328
329         device >>= 16;
330         for (id = misc_ids; id->vendor; id++)
331                 if (vendor == id->vendor && device == id->device)
332                         return true;
333         return false;
334 }
335
336 struct resource *amd_get_mmconfig_range(struct resource *res)
337 {
338         u32 address;
339         u64 base, msr;
340         unsigned int segn_busn_bits;
341
342         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
343             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
344                 return NULL;
345
346         /* assume all cpus from fam10h have mmconfig */
347         if (boot_cpu_data.x86 < 0x10)
348                 return NULL;
349
350         address = MSR_FAM10H_MMIO_CONF_BASE;
351         rdmsrl(address, msr);
352
353         /* mmconfig is not enabled */
354         if (!(msr & FAM10H_MMIO_CONF_ENABLE))
355                 return NULL;
356
357         base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
358
359         segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
360                          FAM10H_MMIO_CONF_BUSRANGE_MASK;
361
362         res->flags = IORESOURCE_MEM;
363         res->start = base;
364         res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
365         return res;
366 }
367
368 int amd_get_subcaches(int cpu)
369 {
370         struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link;
371         unsigned int mask;
372
373         if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
374                 return 0;
375
376         pci_read_config_dword(link, 0x1d4, &mask);
377
378         return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
379 }
380
381 int amd_set_subcaches(int cpu, unsigned long mask)
382 {
383         static unsigned int reset, ban;
384         struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu));
385         unsigned int reg;
386         int cuid;
387
388         if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
389                 return -EINVAL;
390
391         /* if necessary, collect reset state of L3 partitioning and BAN mode */
392         if (reset == 0) {
393                 pci_read_config_dword(nb->link, 0x1d4, &reset);
394                 pci_read_config_dword(nb->misc, 0x1b8, &ban);
395                 ban &= 0x180000;
396         }
397
398         /* deactivate BAN mode if any subcaches are to be disabled */
399         if (mask != 0xf) {
400                 pci_read_config_dword(nb->misc, 0x1b8, &reg);
401                 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
402         }
403
404         cuid = cpu_data(cpu).cpu_core_id;
405         mask <<= 4 * cuid;
406         mask |= (0xf ^ (1 << cuid)) << 26;
407
408         pci_write_config_dword(nb->link, 0x1d4, mask);
409
410         /* reset BAN mode if L3 partitioning returned to reset state */
411         pci_read_config_dword(nb->link, 0x1d4, &reg);
412         if (reg == reset) {
413                 pci_read_config_dword(nb->misc, 0x1b8, &reg);
414                 reg &= ~0x180000;
415                 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
416         }
417
418         return 0;
419 }
420
421 static void amd_cache_gart(void)
422 {
423         u16 i;
424
425         if (!amd_nb_has_feature(AMD_NB_GART))
426                 return;
427
428         flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
429         if (!flush_words) {
430                 amd_northbridges.flags &= ~AMD_NB_GART;
431                 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
432                 return;
433         }
434
435         for (i = 0; i != amd_northbridges.num; i++)
436                 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
437 }
438
439 void amd_flush_garts(void)
440 {
441         int flushed, i;
442         unsigned long flags;
443         static DEFINE_SPINLOCK(gart_lock);
444
445         if (!amd_nb_has_feature(AMD_NB_GART))
446                 return;
447
448         /*
449          * Avoid races between AGP and IOMMU. In theory it's not needed
450          * but I'm not sure if the hardware won't lose flush requests
451          * when another is pending. This whole thing is so expensive anyways
452          * that it doesn't matter to serialize more. -AK
453          */
454         spin_lock_irqsave(&gart_lock, flags);
455         flushed = 0;
456         for (i = 0; i < amd_northbridges.num; i++) {
457                 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
458                                        flush_words[i] | 1);
459                 flushed++;
460         }
461         for (i = 0; i < amd_northbridges.num; i++) {
462                 u32 w;
463                 /* Make sure the hardware actually executed the flush*/
464                 for (;;) {
465                         pci_read_config_dword(node_to_amd_nb(i)->misc,
466                                               0x9c, &w);
467                         if (!(w & 1))
468                                 break;
469                         cpu_relax();
470                 }
471         }
472         spin_unlock_irqrestore(&gart_lock, flags);
473         if (!flushed)
474                 pr_notice("nothing to flush?\n");
475 }
476 EXPORT_SYMBOL_GPL(amd_flush_garts);
477
478 static void __fix_erratum_688(void *info)
479 {
480 #define MSR_AMD64_IC_CFG 0xC0011021
481
482         msr_set_bit(MSR_AMD64_IC_CFG, 3);
483         msr_set_bit(MSR_AMD64_IC_CFG, 14);
484 }
485
486 /* Apply erratum 688 fix so machines without a BIOS fix work. */
487 static __init void fix_erratum_688(void)
488 {
489         struct pci_dev *F4;
490         u32 val;
491
492         if (boot_cpu_data.x86 != 0x14)
493                 return;
494
495         if (!amd_northbridges.num)
496                 return;
497
498         F4 = node_to_amd_nb(0)->link;
499         if (!F4)
500                 return;
501
502         if (pci_read_config_dword(F4, 0x164, &val))
503                 return;
504
505         if (val & BIT(2))
506                 return;
507
508         on_each_cpu(__fix_erratum_688, NULL, 0);
509
510         pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
511 }
512
513 static __init int init_amd_nbs(void)
514 {
515         amd_cache_northbridges();
516         amd_cache_gart();
517
518         fix_erratum_688();
519
520         return 0;
521 }
522
523 /* This has to go after the PCI subsystem */
524 fs_initcall(init_amd_nbs);