2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/export.h>
13 #include <linux/spinlock.h>
14 #include <asm/amd_nb.h>
16 #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
17 #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
18 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
20 /* Protect the PCI config register pairs used for SMN and DF indirect access. */
21 static DEFINE_MUTEX(smn_mutex);
23 static u32 *flush_words;
25 static const struct pci_device_id amd_root_ids[] = {
26 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
30 #define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
32 const struct pci_device_id amd_nb_misc_ids[] = {
33 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
34 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
35 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
36 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
37 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
38 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
39 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
40 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
41 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
42 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
45 EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
47 static const struct pci_device_id amd_nb_link_ids[] = {
48 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
49 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
50 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
51 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
52 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
53 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
54 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
58 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
65 static struct amd_northbridge_info amd_northbridges;
69 return amd_northbridges.num;
71 EXPORT_SYMBOL_GPL(amd_nb_num);
73 bool amd_nb_has_feature(unsigned int feature)
75 return ((amd_northbridges.flags & feature) == feature);
77 EXPORT_SYMBOL_GPL(amd_nb_has_feature);
79 struct amd_northbridge *node_to_amd_nb(int node)
81 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
83 EXPORT_SYMBOL_GPL(node_to_amd_nb);
85 static struct pci_dev *next_northbridge(struct pci_dev *dev,
86 const struct pci_device_id *ids)
89 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
92 } while (!pci_match_id(ids, dev));
96 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
101 if (node >= amd_northbridges.num)
104 root = node_to_amd_nb(node)->root;
108 mutex_lock(&smn_mutex);
110 err = pci_write_config_dword(root, 0x60, address);
112 pr_warn("Error programming SMN address 0x%x.\n", address);
116 err = (write ? pci_write_config_dword(root, 0x64, *value)
117 : pci_read_config_dword(root, 0x64, value));
119 pr_warn("Error %s SMN address 0x%x.\n",
120 (write ? "writing to" : "reading from"), address);
123 mutex_unlock(&smn_mutex);
129 int amd_smn_read(u16 node, u32 address, u32 *value)
131 return __amd_smn_rw(node, address, value, false);
133 EXPORT_SYMBOL_GPL(amd_smn_read);
135 int amd_smn_write(u16 node, u32 address, u32 value)
137 return __amd_smn_rw(node, address, &value, true);
139 EXPORT_SYMBOL_GPL(amd_smn_write);
142 * Data Fabric Indirect Access uses FICAA/FICAD.
144 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
145 * on the device's Instance Id and the PCI function and register offset of
146 * the desired register.
148 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
149 * and FICAD HI registers but so far we only need the LO register.
151 int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
157 if (node >= amd_northbridges.num)
160 F4 = node_to_amd_nb(node)->link;
165 ficaa |= reg & 0x3FC;
166 ficaa |= (func & 0x7) << 11;
167 ficaa |= instance_id << 16;
169 mutex_lock(&smn_mutex);
171 err = pci_write_config_dword(F4, 0x5C, ficaa);
173 pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
177 err = pci_read_config_dword(F4, 0x98, lo);
179 pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
182 mutex_unlock(&smn_mutex);
187 EXPORT_SYMBOL_GPL(amd_df_indirect_read);
189 int amd_cache_northbridges(void)
192 struct amd_northbridge *nb;
193 struct pci_dev *root, *misc, *link;
195 if (amd_northbridges.num)
199 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
205 nb = kcalloc(i, sizeof(struct amd_northbridge), GFP_KERNEL);
209 amd_northbridges.nb = nb;
210 amd_northbridges.num = i;
212 link = misc = root = NULL;
213 for (i = 0; i != amd_northbridges.num; i++) {
214 node_to_amd_nb(i)->root = root =
215 next_northbridge(root, amd_root_ids);
216 node_to_amd_nb(i)->misc = misc =
217 next_northbridge(misc, amd_nb_misc_ids);
218 node_to_amd_nb(i)->link = link =
219 next_northbridge(link, amd_nb_link_ids);
222 if (amd_gart_present())
223 amd_northbridges.flags |= AMD_NB_GART;
226 * Check for L3 cache presence.
228 if (!cpuid_edx(0x80000006))
232 * Some CPU families support L3 Cache Index Disable. There are some
233 * limitations because of E382 and E388 on family 0x10.
235 if (boot_cpu_data.x86 == 0x10 &&
236 boot_cpu_data.x86_model >= 0x8 &&
237 (boot_cpu_data.x86_model > 0x9 ||
238 boot_cpu_data.x86_stepping >= 0x1))
239 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
241 if (boot_cpu_data.x86 == 0x15)
242 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
244 /* L3 cache partitioning is supported on family 0x15 */
245 if (boot_cpu_data.x86 == 0x15)
246 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
250 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
253 * Ignores subdevice/subvendor but as far as I can figure out
254 * they're useless anyways
256 bool __init early_is_amd_nb(u32 device)
258 const struct pci_device_id *id;
259 u32 vendor = device & 0xffff;
262 for (id = amd_nb_misc_ids; id->vendor; id++)
263 if (vendor == id->vendor && device == id->device)
268 struct resource *amd_get_mmconfig_range(struct resource *res)
272 unsigned int segn_busn_bits;
274 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
277 /* assume all cpus from fam10h have mmconfig */
278 if (boot_cpu_data.x86 < 0x10)
281 address = MSR_FAM10H_MMIO_CONF_BASE;
282 rdmsrl(address, msr);
284 /* mmconfig is not enabled */
285 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
288 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
290 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
291 FAM10H_MMIO_CONF_BUSRANGE_MASK;
293 res->flags = IORESOURCE_MEM;
295 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
299 int amd_get_subcaches(int cpu)
301 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
304 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
307 pci_read_config_dword(link, 0x1d4, &mask);
309 return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
312 int amd_set_subcaches(int cpu, unsigned long mask)
314 static unsigned int reset, ban;
315 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
319 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
322 /* if necessary, collect reset state of L3 partitioning and BAN mode */
324 pci_read_config_dword(nb->link, 0x1d4, &reset);
325 pci_read_config_dword(nb->misc, 0x1b8, &ban);
329 /* deactivate BAN mode if any subcaches are to be disabled */
331 pci_read_config_dword(nb->misc, 0x1b8, ®);
332 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
335 cuid = cpu_data(cpu).cpu_core_id;
337 mask |= (0xf ^ (1 << cuid)) << 26;
339 pci_write_config_dword(nb->link, 0x1d4, mask);
341 /* reset BAN mode if L3 partitioning returned to reset state */
342 pci_read_config_dword(nb->link, 0x1d4, ®);
344 pci_read_config_dword(nb->misc, 0x1b8, ®);
346 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
352 static void amd_cache_gart(void)
356 if (!amd_nb_has_feature(AMD_NB_GART))
359 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
361 amd_northbridges.flags &= ~AMD_NB_GART;
362 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
366 for (i = 0; i != amd_northbridges.num; i++)
367 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
370 void amd_flush_garts(void)
374 static DEFINE_SPINLOCK(gart_lock);
376 if (!amd_nb_has_feature(AMD_NB_GART))
380 * Avoid races between AGP and IOMMU. In theory it's not needed
381 * but I'm not sure if the hardware won't lose flush requests
382 * when another is pending. This whole thing is so expensive anyways
383 * that it doesn't matter to serialize more. -AK
385 spin_lock_irqsave(&gart_lock, flags);
387 for (i = 0; i < amd_northbridges.num; i++) {
388 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
392 for (i = 0; i < amd_northbridges.num; i++) {
394 /* Make sure the hardware actually executed the flush*/
396 pci_read_config_dword(node_to_amd_nb(i)->misc,
403 spin_unlock_irqrestore(&gart_lock, flags);
405 pr_notice("nothing to flush?\n");
407 EXPORT_SYMBOL_GPL(amd_flush_garts);
409 static void __fix_erratum_688(void *info)
411 #define MSR_AMD64_IC_CFG 0xC0011021
413 msr_set_bit(MSR_AMD64_IC_CFG, 3);
414 msr_set_bit(MSR_AMD64_IC_CFG, 14);
417 /* Apply erratum 688 fix so machines without a BIOS fix work. */
418 static __init void fix_erratum_688(void)
423 if (boot_cpu_data.x86 != 0x14)
426 if (!amd_northbridges.num)
429 F4 = node_to_amd_nb(0)->link;
433 if (pci_read_config_dword(F4, 0x164, &val))
439 on_each_cpu(__fix_erratum_688, NULL, 0);
441 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
444 static __init int init_amd_nbs(void)
446 amd_cache_northbridges();
454 /* This has to go after the PCI subsystem */
455 fs_initcall(init_amd_nbs);