2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/initrd.h>
17 #include <linux/swap.h>
18 #include <linux/pagemap.h>
19 #include <linux/poison.h>
21 #include <linux/seq_file.h>
22 #include <linux/kprobes.h>
23 #include <linux/cache.h>
24 #include <linux/sort.h>
25 #include <linux/ioport.h>
26 #include <linux/percpu.h>
27 #include <linux/memblock.h>
28 #include <linux/mmzone.h>
29 #include <linux/gfp.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
35 #include <asm/oplib.h>
36 #include <asm/iommu.h>
38 #include <asm/uaccess.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
42 #include <asm/starfire.h>
44 #include <asm/spitfire.h>
45 #include <asm/sections.h>
47 #include <asm/hypervisor.h>
49 #include <asm/mdesc.h>
50 #include <asm/cpudata.h>
51 #include <asm/setup.h>
56 unsigned long kern_linear_pte_xor[4] __read_mostly;
57 static unsigned long page_cache4v_flag;
59 /* A bitmap, two bits for every 256MB of physical memory. These two
60 * bits determine what page size we use for kernel linear
61 * translations. They form an index into kern_linear_pte_xor[]. The
62 * value in the indexed slot is XOR'd with the TLB miss virtual
63 * address to form the resulting TTE. The mapping is:
70 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
71 * support 2GB pages, and hopefully future cpus will support the 16GB
72 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
73 * if these larger page sizes are not supported by the cpu.
75 * It would be nice to determine this from the machine description
76 * 'cpu' properties, but we need to have this table setup before the
77 * MDESC is initialized.
80 #ifndef CONFIG_DEBUG_PAGEALLOC
81 /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
82 * Space is allocated for this right after the trap table in
83 * arch/sparc64/kernel/head.S
85 extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
87 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
89 static unsigned long cpu_pgsz_mask;
91 #define MAX_BANKS 1024
93 static struct linux_prom64_registers pavail[MAX_BANKS];
94 static int pavail_ents;
96 u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES];
98 static int cmp_p64(const void *a, const void *b)
100 const struct linux_prom64_registers *x = a, *y = b;
102 if (x->phys_addr > y->phys_addr)
104 if (x->phys_addr < y->phys_addr)
109 static void __init read_obp_memory(const char *property,
110 struct linux_prom64_registers *regs,
113 phandle node = prom_finddevice("/memory");
114 int prop_size = prom_getproplen(node, property);
117 ents = prop_size / sizeof(struct linux_prom64_registers);
118 if (ents > MAX_BANKS) {
119 prom_printf("The machine has more %s property entries than "
120 "this kernel can support (%d).\n",
121 property, MAX_BANKS);
125 ret = prom_getproperty(node, property, (char *) regs, prop_size);
127 prom_printf("Couldn't get %s property from /memory.\n",
132 /* Sanitize what we got from the firmware, by page aligning
135 for (i = 0; i < ents; i++) {
136 unsigned long base, size;
138 base = regs[i].phys_addr;
139 size = regs[i].reg_size;
142 if (base & ~PAGE_MASK) {
143 unsigned long new_base = PAGE_ALIGN(base);
145 size -= new_base - base;
146 if ((long) size < 0L)
151 /* If it is empty, simply get rid of it.
152 * This simplifies the logic of the other
153 * functions that process these arrays.
155 memmove(®s[i], ®s[i + 1],
156 (ents - i - 1) * sizeof(regs[0]));
161 regs[i].phys_addr = base;
162 regs[i].reg_size = size;
167 sort(regs, ents, sizeof(struct linux_prom64_registers),
171 /* Kernel physical address base and size in bytes. */
172 unsigned long kern_base __read_mostly;
173 unsigned long kern_size __read_mostly;
175 /* Initial ramdisk setup */
176 extern unsigned long sparc_ramdisk_image64;
177 extern unsigned int sparc_ramdisk_image;
178 extern unsigned int sparc_ramdisk_size;
180 struct page *mem_map_zero __read_mostly;
181 EXPORT_SYMBOL(mem_map_zero);
183 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
185 unsigned long sparc64_kern_pri_context __read_mostly;
186 unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
187 unsigned long sparc64_kern_sec_context __read_mostly;
189 int num_kernel_image_mappings;
191 #ifdef CONFIG_DEBUG_DCFLUSH
192 atomic_t dcpage_flushes = ATOMIC_INIT(0);
194 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
198 inline void flush_dcache_page_impl(struct page *page)
200 BUG_ON(tlb_type == hypervisor);
201 #ifdef CONFIG_DEBUG_DCFLUSH
202 atomic_inc(&dcpage_flushes);
205 #ifdef DCACHE_ALIASING_POSSIBLE
206 __flush_dcache_page(page_address(page),
207 ((tlb_type == spitfire) &&
208 page_mapping(page) != NULL));
210 if (page_mapping(page) != NULL &&
211 tlb_type == spitfire)
212 __flush_icache_page(__pa(page_address(page)));
216 #define PG_dcache_dirty PG_arch_1
217 #define PG_dcache_cpu_shift 32UL
218 #define PG_dcache_cpu_mask \
219 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
221 #define dcache_dirty_cpu(page) \
222 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
224 static inline void set_dcache_dirty(struct page *page, int this_cpu)
226 unsigned long mask = this_cpu;
227 unsigned long non_cpu_bits;
229 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
230 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
232 __asm__ __volatile__("1:\n\t"
234 "and %%g7, %1, %%g1\n\t"
235 "or %%g1, %0, %%g1\n\t"
236 "casx [%2], %%g7, %%g1\n\t"
238 "bne,pn %%xcc, 1b\n\t"
241 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
245 static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
247 unsigned long mask = (1UL << PG_dcache_dirty);
249 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
252 "srlx %%g7, %4, %%g1\n\t"
253 "and %%g1, %3, %%g1\n\t"
255 "bne,pn %%icc, 2f\n\t"
256 " andn %%g7, %1, %%g1\n\t"
257 "casx [%2], %%g7, %%g1\n\t"
259 "bne,pn %%xcc, 1b\n\t"
263 : "r" (cpu), "r" (mask), "r" (&page->flags),
264 "i" (PG_dcache_cpu_mask),
265 "i" (PG_dcache_cpu_shift)
269 static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
271 unsigned long tsb_addr = (unsigned long) ent;
273 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
274 tsb_addr = __pa(tsb_addr);
276 __tsb_insert(tsb_addr, tag, pte);
279 unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
281 static void flush_dcache(unsigned long pfn)
285 page = pfn_to_page(pfn);
287 unsigned long pg_flags;
289 pg_flags = page->flags;
290 if (pg_flags & (1UL << PG_dcache_dirty)) {
291 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
293 int this_cpu = get_cpu();
295 /* This is just to optimize away some function calls
299 flush_dcache_page_impl(page);
301 smp_flush_dcache_page_impl(page, cpu);
303 clear_dcache_dirty_cpu(page, cpu);
310 /* mm->context.lock must be held */
311 static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
312 unsigned long tsb_hash_shift, unsigned long address,
315 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
321 tsb += ((address >> tsb_hash_shift) &
322 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
323 tag = (address >> 22UL);
324 tsb_insert(tsb, tag, tte);
327 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
329 struct mm_struct *mm;
333 if (tlb_type != hypervisor) {
334 unsigned long pfn = pte_pfn(pte);
342 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
343 if (!pte_accessible(mm, pte))
346 spin_lock_irqsave(&mm->context.lock, flags);
348 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
349 if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
351 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
352 address, pte_val(pte));
355 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
356 address, pte_val(pte));
358 spin_unlock_irqrestore(&mm->context.lock, flags);
361 void flush_dcache_page(struct page *page)
363 struct address_space *mapping;
366 if (tlb_type == hypervisor)
369 /* Do not bother with the expensive D-cache flush if it
370 * is merely the zero page. The 'bigcore' testcase in GDB
371 * causes this case to run millions of times.
373 if (page == ZERO_PAGE(0))
376 this_cpu = get_cpu();
378 mapping = page_mapping(page);
379 if (mapping && !mapping_mapped(mapping)) {
380 int dirty = test_bit(PG_dcache_dirty, &page->flags);
382 int dirty_cpu = dcache_dirty_cpu(page);
384 if (dirty_cpu == this_cpu)
386 smp_flush_dcache_page_impl(page, dirty_cpu);
388 set_dcache_dirty(page, this_cpu);
390 /* We could delay the flush for the !page_mapping
391 * case too. But that case is for exec env/arg
392 * pages and those are %99 certainly going to get
393 * faulted into the tlb (and thus flushed) anyways.
395 flush_dcache_page_impl(page);
401 EXPORT_SYMBOL(flush_dcache_page);
403 void __kprobes flush_icache_range(unsigned long start, unsigned long end)
405 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
406 if (tlb_type == spitfire) {
409 /* This code only runs on Spitfire cpus so this is
410 * why we can assume _PAGE_PADDR_4U.
412 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
413 unsigned long paddr, mask = _PAGE_PADDR_4U;
415 if (kaddr >= PAGE_OFFSET)
416 paddr = kaddr & mask;
418 pgd_t *pgdp = pgd_offset_k(kaddr);
419 pud_t *pudp = pud_offset(pgdp, kaddr);
420 pmd_t *pmdp = pmd_offset(pudp, kaddr);
421 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
423 paddr = pte_val(*ptep) & mask;
425 __flush_icache_page(paddr);
429 EXPORT_SYMBOL(flush_icache_range);
431 void mmu_info(struct seq_file *m)
433 static const char *pgsz_strings[] = {
434 "8K", "64K", "512K", "4MB", "32MB",
435 "256MB", "2GB", "16GB",
439 if (tlb_type == cheetah)
440 seq_printf(m, "MMU Type\t: Cheetah\n");
441 else if (tlb_type == cheetah_plus)
442 seq_printf(m, "MMU Type\t: Cheetah+\n");
443 else if (tlb_type == spitfire)
444 seq_printf(m, "MMU Type\t: Spitfire\n");
445 else if (tlb_type == hypervisor)
446 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
448 seq_printf(m, "MMU Type\t: ???\n");
450 seq_printf(m, "MMU PGSZs\t: ");
452 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
453 if (cpu_pgsz_mask & (1UL << i)) {
454 seq_printf(m, "%s%s",
455 printed ? "," : "", pgsz_strings[i]);
461 #ifdef CONFIG_DEBUG_DCFLUSH
462 seq_printf(m, "DCPageFlushes\t: %d\n",
463 atomic_read(&dcpage_flushes));
465 seq_printf(m, "DCPageFlushesXC\t: %d\n",
466 atomic_read(&dcpage_flushes_xcall));
467 #endif /* CONFIG_SMP */
468 #endif /* CONFIG_DEBUG_DCFLUSH */
471 struct linux_prom_translation prom_trans[512] __read_mostly;
472 unsigned int prom_trans_ents __read_mostly;
474 unsigned long kern_locked_tte_data;
476 /* The obp translations are saved based on 8k pagesize, since obp can
477 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
478 * HI_OBP_ADDRESS range are handled in ktlb.S.
480 static inline int in_obp_range(unsigned long vaddr)
482 return (vaddr >= LOW_OBP_ADDRESS &&
483 vaddr < HI_OBP_ADDRESS);
486 static int cmp_ptrans(const void *a, const void *b)
488 const struct linux_prom_translation *x = a, *y = b;
490 if (x->virt > y->virt)
492 if (x->virt < y->virt)
497 /* Read OBP translations property into 'prom_trans[]'. */
498 static void __init read_obp_translations(void)
500 int n, node, ents, first, last, i;
502 node = prom_finddevice("/virtual-memory");
503 n = prom_getproplen(node, "translations");
504 if (unlikely(n == 0 || n == -1)) {
505 prom_printf("prom_mappings: Couldn't get size.\n");
508 if (unlikely(n > sizeof(prom_trans))) {
509 prom_printf("prom_mappings: Size %d is too big.\n", n);
513 if ((n = prom_getproperty(node, "translations",
514 (char *)&prom_trans[0],
515 sizeof(prom_trans))) == -1) {
516 prom_printf("prom_mappings: Couldn't get property.\n");
520 n = n / sizeof(struct linux_prom_translation);
524 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
527 /* Now kick out all the non-OBP entries. */
528 for (i = 0; i < ents; i++) {
529 if (in_obp_range(prom_trans[i].virt))
533 for (; i < ents; i++) {
534 if (!in_obp_range(prom_trans[i].virt))
539 for (i = 0; i < (last - first); i++) {
540 struct linux_prom_translation *src = &prom_trans[i + first];
541 struct linux_prom_translation *dest = &prom_trans[i];
545 for (; i < ents; i++) {
546 struct linux_prom_translation *dest = &prom_trans[i];
547 dest->virt = dest->size = dest->data = 0x0UL;
550 prom_trans_ents = last - first;
552 if (tlb_type == spitfire) {
553 /* Clear diag TTE bits. */
554 for (i = 0; i < prom_trans_ents; i++)
555 prom_trans[i].data &= ~0x0003fe0000000000UL;
558 /* Force execute bit on. */
559 for (i = 0; i < prom_trans_ents; i++)
560 prom_trans[i].data |= (tlb_type == hypervisor ?
561 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
564 static void __init hypervisor_tlb_lock(unsigned long vaddr,
568 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
571 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
572 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
577 static unsigned long kern_large_tte(unsigned long paddr);
579 static void __init remap_kernel(void)
581 unsigned long phys_page, tte_vaddr, tte_data;
582 int i, tlb_ent = sparc64_highest_locked_tlbent();
584 tte_vaddr = (unsigned long) KERNBASE;
585 phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
586 tte_data = kern_large_tte(phys_page);
588 kern_locked_tte_data = tte_data;
590 /* Now lock us into the TLBs via Hypervisor or OBP. */
591 if (tlb_type == hypervisor) {
592 for (i = 0; i < num_kernel_image_mappings; i++) {
593 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
594 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
595 tte_vaddr += 0x400000;
596 tte_data += 0x400000;
599 for (i = 0; i < num_kernel_image_mappings; i++) {
600 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
601 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
602 tte_vaddr += 0x400000;
603 tte_data += 0x400000;
605 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
607 if (tlb_type == cheetah_plus) {
608 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
609 CTX_CHEETAH_PLUS_NUC);
610 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
611 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
616 static void __init inherit_prom_mappings(void)
618 /* Now fixup OBP's idea about where we really are mapped. */
619 printk("Remapping the kernel... ");
624 void prom_world(int enter)
629 __asm__ __volatile__("flushw");
632 void __flush_dcache_range(unsigned long start, unsigned long end)
636 if (tlb_type == spitfire) {
639 for (va = start; va < end; va += 32) {
640 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
644 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
647 for (va = start; va < end; va += 32)
648 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
652 "i" (ASI_DCACHE_INVALIDATE));
655 EXPORT_SYMBOL(__flush_dcache_range);
657 /* get_new_mmu_context() uses "cache + 1". */
658 DEFINE_SPINLOCK(ctx_alloc_lock);
659 unsigned long tlb_context_cache = CTX_FIRST_VERSION;
660 #define MAX_CTX_NR (1UL << CTX_NR_BITS)
661 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
662 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
663 DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
665 static void mmu_context_wrap(void)
667 unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
668 unsigned long new_ver, new_ctx, old_ctx;
669 struct mm_struct *mm;
672 bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
674 /* Reserve kernel context */
675 set_bit(0, mmu_context_bmap);
677 new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
678 if (unlikely(new_ver == 0))
679 new_ver = CTX_FIRST_VERSION;
680 tlb_context_cache = new_ver;
683 * Make sure that any new mm that are added into per_cpu_secondary_mm,
684 * are going to go through get_new_mmu_context() path.
689 * Updated versions to current on those CPUs that had valid secondary
692 for_each_online_cpu(cpu) {
694 * If a new mm is stored after we took this mm from the array,
695 * it will go into get_new_mmu_context() path, because we
696 * already bumped the version in tlb_context_cache.
698 mm = per_cpu(per_cpu_secondary_mm, cpu);
700 if (unlikely(!mm || mm == &init_mm))
703 old_ctx = mm->context.sparc64_ctx_val;
704 if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
705 new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
706 set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
707 mm->context.sparc64_ctx_val = new_ctx;
712 /* Caller does TLB context flushing on local CPU if necessary.
713 * The caller also ensures that CTX_VALID(mm->context) is false.
715 * We must be careful about boundary cases so that we never
716 * let the user have CTX 0 (nucleus) or we ever use a CTX
717 * version of zero (and thus NO_CONTEXT would not be caught
718 * by version mis-match tests in mmu_context.h).
720 * Always invoked with interrupts disabled.
722 void get_new_mmu_context(struct mm_struct *mm)
724 unsigned long ctx, new_ctx;
725 unsigned long orig_pgsz_bits;
727 spin_lock(&ctx_alloc_lock);
729 /* wrap might have happened, test again if our context became valid */
730 if (unlikely(CTX_VALID(mm->context)))
732 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
733 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
734 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
735 if (new_ctx >= (1 << CTX_NR_BITS)) {
736 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
737 if (new_ctx >= ctx) {
742 if (mm->context.sparc64_ctx_val)
743 cpumask_clear(mm_cpumask(mm));
744 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
745 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
746 tlb_context_cache = new_ctx;
747 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
749 spin_unlock(&ctx_alloc_lock);
752 static int numa_enabled = 1;
753 static int numa_debug;
755 static int __init early_numa(char *p)
760 if (strstr(p, "off"))
763 if (strstr(p, "debug"))
768 early_param("numa", early_numa);
770 #define numadbg(f, a...) \
771 do { if (numa_debug) \
772 printk(KERN_INFO f, ## a); \
775 static void __init find_ramdisk(unsigned long phys_base)
777 #ifdef CONFIG_BLK_DEV_INITRD
778 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
779 unsigned long ramdisk_image;
781 /* Older versions of the bootloader only supported a
782 * 32-bit physical address for the ramdisk image
783 * location, stored at sparc_ramdisk_image. Newer
784 * SILO versions set sparc_ramdisk_image to zero and
785 * provide a full 64-bit physical address at
786 * sparc_ramdisk_image64.
788 ramdisk_image = sparc_ramdisk_image;
790 ramdisk_image = sparc_ramdisk_image64;
792 /* Another bootloader quirk. The bootloader normalizes
793 * the physical address to KERNBASE, so we have to
794 * factor that back out and add in the lowest valid
795 * physical page address to get the true physical address.
797 ramdisk_image -= KERNBASE;
798 ramdisk_image += phys_base;
800 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
801 ramdisk_image, sparc_ramdisk_size);
803 initrd_start = ramdisk_image;
804 initrd_end = ramdisk_image + sparc_ramdisk_size;
806 memblock_reserve(initrd_start, sparc_ramdisk_size);
808 initrd_start += PAGE_OFFSET;
809 initrd_end += PAGE_OFFSET;
814 struct node_mem_mask {
818 static struct node_mem_mask node_masks[MAX_NUMNODES];
819 static int num_node_masks;
821 #ifdef CONFIG_NEED_MULTIPLE_NODES
823 int numa_cpu_lookup_table[NR_CPUS];
824 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
826 struct mdesc_mblock {
829 u64 offset; /* RA-to-PA */
831 static struct mdesc_mblock *mblocks;
832 static int num_mblocks;
833 static int find_numa_node_for_addr(unsigned long pa,
834 struct node_mem_mask *pnode_mask);
836 static unsigned long __init ra_to_pa(unsigned long addr)
840 for (i = 0; i < num_mblocks; i++) {
841 struct mdesc_mblock *m = &mblocks[i];
843 if (addr >= m->base &&
844 addr < (m->base + m->size)) {
852 static int __init find_node(unsigned long addr)
854 static bool search_mdesc = true;
855 static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
856 static int last_index;
859 addr = ra_to_pa(addr);
860 for (i = 0; i < num_node_masks; i++) {
861 struct node_mem_mask *p = &node_masks[i];
863 if ((addr & p->mask) == p->val)
866 /* The following condition has been observed on LDOM guests because
867 * node_masks only contains the best latency mask and value.
868 * LDOM guest's mdesc can contain a single latency group to
869 * cover multiple address range. Print warning message only if the
870 * address cannot be found in node_masks nor mdesc.
872 if ((search_mdesc) &&
873 ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
874 /* find the available node in the mdesc */
875 last_index = find_numa_node_for_addr(addr, &last_mem_mask);
876 numadbg("find_node: latency group for address 0x%lx is %d\n",
878 if ((last_index < 0) || (last_index >= num_node_masks)) {
879 /* WARN_ONCE() and use default group 0 */
880 WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
881 search_mdesc = false;
889 static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
891 *nid = find_node(start);
893 while (start < end) {
894 int n = find_node(start);
908 /* This must be invoked after performing all of the necessary
909 * memblock_set_node() calls for 'nid'. We need to be able to get
910 * correct data from get_pfn_range_for_nid().
912 static void __init allocate_node_data(int nid)
914 struct pglist_data *p;
915 unsigned long start_pfn, end_pfn;
916 #ifdef CONFIG_NEED_MULTIPLE_NODES
919 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
921 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
924 NODE_DATA(nid) = __va(paddr);
925 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
927 NODE_DATA(nid)->node_id = nid;
932 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
933 p->node_start_pfn = start_pfn;
934 p->node_spanned_pages = end_pfn - start_pfn;
937 static void init_node_masks_nonnuma(void)
939 #ifdef CONFIG_NEED_MULTIPLE_NODES
943 numadbg("Initializing tables for non-numa.\n");
945 node_masks[0].mask = node_masks[0].val = 0;
948 #ifdef CONFIG_NEED_MULTIPLE_NODES
949 for (i = 0; i < NR_CPUS; i++)
950 numa_cpu_lookup_table[i] = 0;
952 cpumask_setall(&numa_cpumask_lookup_table[0]);
956 #ifdef CONFIG_NEED_MULTIPLE_NODES
957 struct pglist_data *node_data[MAX_NUMNODES];
959 EXPORT_SYMBOL(numa_cpu_lookup_table);
960 EXPORT_SYMBOL(numa_cpumask_lookup_table);
961 EXPORT_SYMBOL(node_data);
963 struct mdesc_mlgroup {
969 static struct mdesc_mlgroup *mlgroups;
970 static int num_mlgroups;
972 static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
977 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
978 u64 target = mdesc_arc_target(md, arc);
981 val = mdesc_get_property(md, target,
983 if (val && *val == cfg_handle)
989 static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
992 u64 arc, candidate, best_latency = ~(u64)0;
994 candidate = MDESC_NODE_NULL;
995 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
996 u64 target = mdesc_arc_target(md, arc);
997 const char *name = mdesc_node_name(md, target);
1000 if (strcmp(name, "pio-latency-group"))
1003 val = mdesc_get_property(md, target, "latency", NULL);
1007 if (*val < best_latency) {
1009 best_latency = *val;
1013 if (candidate == MDESC_NODE_NULL)
1016 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
1019 int of_node_to_nid(struct device_node *dp)
1021 const struct linux_prom64_registers *regs;
1022 struct mdesc_handle *md;
1027 /* This is the right thing to do on currently supported
1028 * SUN4U NUMA platforms as well, as the PCI controller does
1029 * not sit behind any particular memory controller.
1034 regs = of_get_property(dp, "reg", NULL);
1038 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1044 mdesc_for_each_node_by_name(md, grp, "group") {
1045 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1057 static void __init add_node_ranges(void)
1059 struct memblock_region *reg;
1061 for_each_memblock(memory, reg) {
1062 unsigned long size = reg->size;
1063 unsigned long start, end;
1067 while (start < end) {
1068 unsigned long this_end;
1071 this_end = memblock_nid_range(start, end, &nid);
1073 numadbg("Setting memblock NUMA node nid[%d] "
1074 "start[%lx] end[%lx]\n",
1075 nid, start, this_end);
1077 memblock_set_node(start, this_end - start,
1078 &memblock.memory, nid);
1084 static int __init grab_mlgroups(struct mdesc_handle *md)
1086 unsigned long paddr;
1090 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1095 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
1100 mlgroups = __va(paddr);
1101 num_mlgroups = count;
1104 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1105 struct mdesc_mlgroup *m = &mlgroups[count++];
1110 val = mdesc_get_property(md, node, "latency", NULL);
1112 val = mdesc_get_property(md, node, "address-match", NULL);
1114 val = mdesc_get_property(md, node, "address-mask", NULL);
1117 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1118 "match[%llx] mask[%llx]\n",
1119 count - 1, m->node, m->latency, m->match, m->mask);
1125 static int __init grab_mblocks(struct mdesc_handle *md)
1127 unsigned long paddr;
1131 mdesc_for_each_node_by_name(md, node, "mblock")
1136 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
1141 mblocks = __va(paddr);
1142 num_mblocks = count;
1145 mdesc_for_each_node_by_name(md, node, "mblock") {
1146 struct mdesc_mblock *m = &mblocks[count++];
1149 val = mdesc_get_property(md, node, "base", NULL);
1151 val = mdesc_get_property(md, node, "size", NULL);
1153 val = mdesc_get_property(md, node,
1154 "address-congruence-offset", NULL);
1156 /* The address-congruence-offset property is optional.
1157 * Explicity zero it be identifty this.
1164 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1165 count - 1, m->base, m->size, m->offset);
1171 static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1172 u64 grp, cpumask_t *mask)
1176 cpumask_clear(mask);
1178 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1179 u64 target = mdesc_arc_target(md, arc);
1180 const char *name = mdesc_node_name(md, target);
1183 if (strcmp(name, "cpu"))
1185 id = mdesc_get_property(md, target, "id", NULL);
1186 if (*id < nr_cpu_ids)
1187 cpumask_set_cpu(*id, mask);
1191 static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1195 for (i = 0; i < num_mlgroups; i++) {
1196 struct mdesc_mlgroup *m = &mlgroups[i];
1197 if (m->node == node)
1203 int __node_distance(int from, int to)
1205 if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
1206 pr_warn("Returning default NUMA distance value for %d->%d\n",
1208 return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
1210 return numa_latency[from][to];
1213 static int find_numa_node_for_addr(unsigned long pa,
1214 struct node_mem_mask *pnode_mask)
1216 struct mdesc_handle *md = mdesc_grab();
1220 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1221 if (node == MDESC_NODE_NULL)
1224 mdesc_for_each_node_by_name(md, node, "group") {
1225 mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
1226 u64 target = mdesc_arc_target(md, arc);
1227 struct mdesc_mlgroup *m = find_mlgroup(target);
1231 if ((pa & m->mask) == m->match) {
1233 pnode_mask->mask = m->mask;
1234 pnode_mask->val = m->match;
1248 static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
1252 for (i = 0; i < MAX_NUMNODES; i++) {
1253 struct node_mem_mask *n = &node_masks[i];
1255 if ((grp->mask == n->mask) && (grp->match == n->val))
1261 static void find_numa_latencies_for_group(struct mdesc_handle *md, u64 grp,
1266 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1268 u64 target = mdesc_arc_target(md, arc);
1269 struct mdesc_mlgroup *m = find_mlgroup(target);
1273 tnode = find_best_numa_node_for_mlgroup(m);
1274 if (tnode == MAX_NUMNODES)
1276 numa_latency[index][tnode] = m->latency;
1280 static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1283 struct mdesc_mlgroup *candidate = NULL;
1284 u64 arc, best_latency = ~(u64)0;
1285 struct node_mem_mask *n;
1287 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1288 u64 target = mdesc_arc_target(md, arc);
1289 struct mdesc_mlgroup *m = find_mlgroup(target);
1292 if (m->latency < best_latency) {
1294 best_latency = m->latency;
1300 if (num_node_masks != index) {
1301 printk(KERN_ERR "Inconsistent NUMA state, "
1302 "index[%d] != num_node_masks[%d]\n",
1303 index, num_node_masks);
1307 n = &node_masks[num_node_masks++];
1309 n->mask = candidate->mask;
1310 n->val = candidate->match;
1312 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
1313 index, n->mask, n->val, candidate->latency);
1318 static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1324 numa_parse_mdesc_group_cpus(md, grp, &mask);
1326 for_each_cpu(cpu, &mask)
1327 numa_cpu_lookup_table[cpu] = index;
1328 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1331 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1332 for_each_cpu(cpu, &mask)
1337 return numa_attach_mlgroup(md, grp, index);
1340 static int __init numa_parse_mdesc(void)
1342 struct mdesc_handle *md = mdesc_grab();
1343 int i, j, err, count;
1346 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1347 if (node == MDESC_NODE_NULL) {
1352 err = grab_mblocks(md);
1356 err = grab_mlgroups(md);
1361 mdesc_for_each_node_by_name(md, node, "group") {
1362 err = numa_parse_mdesc_group(md, node, count);
1369 mdesc_for_each_node_by_name(md, node, "group") {
1370 find_numa_latencies_for_group(md, node, count);
1374 /* Normalize numa latency matrix according to ACPI SLIT spec. */
1375 for (i = 0; i < MAX_NUMNODES; i++) {
1376 u64 self_latency = numa_latency[i][i];
1378 for (j = 0; j < MAX_NUMNODES; j++) {
1379 numa_latency[i][j] =
1380 (numa_latency[i][j] * LOCAL_DISTANCE) /
1387 for (i = 0; i < num_node_masks; i++) {
1388 allocate_node_data(i);
1398 static int __init numa_parse_jbus(void)
1400 unsigned long cpu, index;
1402 /* NUMA node id is encoded in bits 36 and higher, and there is
1403 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1406 for_each_present_cpu(cpu) {
1407 numa_cpu_lookup_table[cpu] = index;
1408 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
1409 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1410 node_masks[index].val = cpu << 36UL;
1414 num_node_masks = index;
1418 for (index = 0; index < num_node_masks; index++) {
1419 allocate_node_data(index);
1420 node_set_online(index);
1426 static int __init numa_parse_sun4u(void)
1428 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1431 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1432 if ((ver >> 32UL) == __JALAPENO_ID ||
1433 (ver >> 32UL) == __SERRANO_ID)
1434 return numa_parse_jbus();
1439 static int __init bootmem_init_numa(void)
1444 numadbg("bootmem_init_numa()\n");
1446 /* Some sane defaults for numa latency values */
1447 for (i = 0; i < MAX_NUMNODES; i++) {
1448 for (j = 0; j < MAX_NUMNODES; j++)
1449 numa_latency[i][j] = (i == j) ?
1450 LOCAL_DISTANCE : REMOTE_DISTANCE;
1454 if (tlb_type == hypervisor)
1455 err = numa_parse_mdesc();
1457 err = numa_parse_sun4u();
1464 static int bootmem_init_numa(void)
1471 static void __init bootmem_init_nonnuma(void)
1473 unsigned long top_of_ram = memblock_end_of_DRAM();
1474 unsigned long total_ram = memblock_phys_mem_size();
1476 numadbg("bootmem_init_nonnuma()\n");
1478 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1479 top_of_ram, total_ram);
1480 printk(KERN_INFO "Memory hole size: %ldMB\n",
1481 (top_of_ram - total_ram) >> 20);
1483 init_node_masks_nonnuma();
1484 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
1485 allocate_node_data(0);
1489 static unsigned long __init bootmem_init(unsigned long phys_base)
1491 unsigned long end_pfn;
1493 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1494 max_pfn = max_low_pfn = end_pfn;
1495 min_low_pfn = (phys_base >> PAGE_SHIFT);
1497 if (bootmem_init_numa() < 0)
1498 bootmem_init_nonnuma();
1500 /* Dump memblock with node info. */
1501 memblock_dump_all();
1503 /* XXX cpu notifier XXX */
1505 sparse_memory_present_with_active_regions(MAX_NUMNODES);
1511 static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1512 static int pall_ents __initdata;
1514 static unsigned long max_phys_bits = 40;
1516 bool kern_addr_valid(unsigned long addr)
1523 if ((long)addr < 0L) {
1524 unsigned long pa = __pa(addr);
1526 if ((pa >> max_phys_bits) != 0UL)
1529 return pfn_valid(pa >> PAGE_SHIFT);
1532 if (addr >= (unsigned long) KERNBASE &&
1533 addr < (unsigned long)&_end)
1536 pgd = pgd_offset_k(addr);
1540 pud = pud_offset(pgd, addr);
1544 if (pud_large(*pud))
1545 return pfn_valid(pud_pfn(*pud));
1547 pmd = pmd_offset(pud, addr);
1551 if (pmd_large(*pmd))
1552 return pfn_valid(pmd_pfn(*pmd));
1554 pte = pte_offset_kernel(pmd, addr);
1558 return pfn_valid(pte_pfn(*pte));
1560 EXPORT_SYMBOL(kern_addr_valid);
1562 static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
1566 const unsigned long mask16gb = (1UL << 34) - 1UL;
1567 u64 pte_val = vstart;
1569 /* Each PUD is 8GB */
1570 if ((vstart & mask16gb) ||
1571 (vend - vstart <= mask16gb)) {
1572 pte_val ^= kern_linear_pte_xor[2];
1573 pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
1575 return vstart + PUD_SIZE;
1578 pte_val ^= kern_linear_pte_xor[3];
1579 pte_val |= _PAGE_PUD_HUGE;
1581 vend = vstart + mask16gb + 1UL;
1582 while (vstart < vend) {
1583 pud_val(*pud) = pte_val;
1585 pte_val += PUD_SIZE;
1592 static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
1595 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
1601 static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
1605 const unsigned long mask256mb = (1UL << 28) - 1UL;
1606 const unsigned long mask2gb = (1UL << 31) - 1UL;
1607 u64 pte_val = vstart;
1609 /* Each PMD is 8MB */
1610 if ((vstart & mask256mb) ||
1611 (vend - vstart <= mask256mb)) {
1612 pte_val ^= kern_linear_pte_xor[0];
1613 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
1615 return vstart + PMD_SIZE;
1618 if ((vstart & mask2gb) ||
1619 (vend - vstart <= mask2gb)) {
1620 pte_val ^= kern_linear_pte_xor[1];
1621 pte_val |= _PAGE_PMD_HUGE;
1622 vend = vstart + mask256mb + 1UL;
1624 pte_val ^= kern_linear_pte_xor[2];
1625 pte_val |= _PAGE_PMD_HUGE;
1626 vend = vstart + mask2gb + 1UL;
1629 while (vstart < vend) {
1630 pmd_val(*pmd) = pte_val;
1632 pte_val += PMD_SIZE;
1640 static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
1643 if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
1649 static unsigned long __ref kernel_map_range(unsigned long pstart,
1650 unsigned long pend, pgprot_t prot,
1653 unsigned long vstart = PAGE_OFFSET + pstart;
1654 unsigned long vend = PAGE_OFFSET + pend;
1655 unsigned long alloc_bytes = 0UL;
1657 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1658 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1663 while (vstart < vend) {
1664 unsigned long this_end, paddr = __pa(vstart);
1665 pgd_t *pgd = pgd_offset_k(vstart);
1670 if (pgd_none(*pgd)) {
1673 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1674 alloc_bytes += PAGE_SIZE;
1675 pgd_populate(&init_mm, pgd, new);
1677 pud = pud_offset(pgd, vstart);
1678 if (pud_none(*pud)) {
1681 if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
1682 vstart = kernel_map_hugepud(vstart, vend, pud);
1685 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1686 alloc_bytes += PAGE_SIZE;
1687 pud_populate(&init_mm, pud, new);
1690 pmd = pmd_offset(pud, vstart);
1691 if (pmd_none(*pmd)) {
1694 if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
1695 vstart = kernel_map_hugepmd(vstart, vend, pmd);
1698 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1699 alloc_bytes += PAGE_SIZE;
1700 pmd_populate_kernel(&init_mm, pmd, new);
1703 pte = pte_offset_kernel(pmd, vstart);
1704 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1705 if (this_end > vend)
1708 while (vstart < this_end) {
1709 pte_val(*pte) = (paddr | pgprot_val(prot));
1711 vstart += PAGE_SIZE;
1720 static void __init flush_all_kernel_tsbs(void)
1724 for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
1725 struct tsb *ent = &swapper_tsb[i];
1727 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1729 #ifndef CONFIG_DEBUG_PAGEALLOC
1730 for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
1731 struct tsb *ent = &swapper_4m_tsb[i];
1733 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1738 extern unsigned int kvmap_linear_patch[1];
1740 static void __init kernel_physical_mapping_init(void)
1742 unsigned long i, mem_alloced = 0UL;
1743 bool use_huge = true;
1745 #ifdef CONFIG_DEBUG_PAGEALLOC
1748 for (i = 0; i < pall_ents; i++) {
1749 unsigned long phys_start, phys_end;
1751 phys_start = pall[i].phys_addr;
1752 phys_end = phys_start + pall[i].reg_size;
1754 mem_alloced += kernel_map_range(phys_start, phys_end,
1755 PAGE_KERNEL, use_huge);
1758 printk("Allocated %ld bytes for kernel page tables.\n",
1761 kvmap_linear_patch[0] = 0x01000000; /* nop */
1762 flushi(&kvmap_linear_patch[0]);
1764 flush_all_kernel_tsbs();
1769 #ifdef CONFIG_DEBUG_PAGEALLOC
1770 void __kernel_map_pages(struct page *page, int numpages, int enable)
1772 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1773 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1775 kernel_map_range(phys_start, phys_end,
1776 (enable ? PAGE_KERNEL : __pgprot(0)), false);
1778 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1779 PAGE_OFFSET + phys_end);
1781 /* we should perform an IPI and flush all tlbs,
1782 * but that can deadlock->flush only current cpu.
1784 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1785 PAGE_OFFSET + phys_end);
1789 unsigned long __init find_ecache_flush_span(unsigned long size)
1793 for (i = 0; i < pavail_ents; i++) {
1794 if (pavail[i].reg_size >= size)
1795 return pavail[i].phys_addr;
1801 unsigned long PAGE_OFFSET;
1802 EXPORT_SYMBOL(PAGE_OFFSET);
1804 unsigned long VMALLOC_END = 0x0000010000000000UL;
1805 EXPORT_SYMBOL(VMALLOC_END);
1807 unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
1808 unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1810 static void __init setup_page_offset(void)
1812 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1813 /* Cheetah/Panther support a full 64-bit virtual
1814 * address, so we can use all that our page tables
1817 sparc64_va_hole_top = 0xfff0000000000000UL;
1818 sparc64_va_hole_bottom = 0x0010000000000000UL;
1821 } else if (tlb_type == hypervisor) {
1822 switch (sun4v_chip_type) {
1823 case SUN4V_CHIP_NIAGARA1:
1824 case SUN4V_CHIP_NIAGARA2:
1825 /* T1 and T2 support 48-bit virtual addresses. */
1826 sparc64_va_hole_top = 0xffff800000000000UL;
1827 sparc64_va_hole_bottom = 0x0000800000000000UL;
1831 case SUN4V_CHIP_NIAGARA3:
1832 /* T3 supports 48-bit virtual addresses. */
1833 sparc64_va_hole_top = 0xffff800000000000UL;
1834 sparc64_va_hole_bottom = 0x0000800000000000UL;
1838 case SUN4V_CHIP_NIAGARA4:
1839 case SUN4V_CHIP_NIAGARA5:
1840 case SUN4V_CHIP_SPARC64X:
1841 case SUN4V_CHIP_SPARC_M6:
1842 /* T4 and later support 52-bit virtual addresses. */
1843 sparc64_va_hole_top = 0xfff8000000000000UL;
1844 sparc64_va_hole_bottom = 0x0008000000000000UL;
1847 case SUN4V_CHIP_SPARC_M7:
1849 /* M7 and later support 52-bit virtual addresses. */
1850 sparc64_va_hole_top = 0xfff8000000000000UL;
1851 sparc64_va_hole_bottom = 0x0008000000000000UL;
1857 if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
1858 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
1863 PAGE_OFFSET = sparc64_va_hole_top;
1864 VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
1865 (sparc64_va_hole_bottom >> 2));
1867 pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
1868 PAGE_OFFSET, max_phys_bits);
1869 pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
1870 VMALLOC_START, VMALLOC_END);
1871 pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
1872 VMEMMAP_BASE, VMEMMAP_BASE << 1);
1875 static void __init tsb_phys_patch(void)
1877 struct tsb_ldquad_phys_patch_entry *pquad;
1878 struct tsb_phys_patch_entry *p;
1880 pquad = &__tsb_ldquad_phys_patch;
1881 while (pquad < &__tsb_ldquad_phys_patch_end) {
1882 unsigned long addr = pquad->addr;
1884 if (tlb_type == hypervisor)
1885 *(unsigned int *) addr = pquad->sun4v_insn;
1887 *(unsigned int *) addr = pquad->sun4u_insn;
1889 __asm__ __volatile__("flush %0"
1896 p = &__tsb_phys_patch;
1897 while (p < &__tsb_phys_patch_end) {
1898 unsigned long addr = p->addr;
1900 *(unsigned int *) addr = p->insn;
1902 __asm__ __volatile__("flush %0"
1910 /* Don't mark as init, we give this to the Hypervisor. */
1911 #ifndef CONFIG_DEBUG_PAGEALLOC
1912 #define NUM_KTSB_DESCR 2
1914 #define NUM_KTSB_DESCR 1
1916 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
1918 /* The swapper TSBs are loaded with a base sequence of:
1920 * sethi %uhi(SYMBOL), REG1
1921 * sethi %hi(SYMBOL), REG2
1922 * or REG1, %ulo(SYMBOL), REG1
1923 * or REG2, %lo(SYMBOL), REG2
1924 * sllx REG1, 32, REG1
1925 * or REG1, REG2, REG1
1927 * When we use physical addressing for the TSB accesses, we patch the
1928 * first four instructions in the above sequence.
1931 static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1933 unsigned long high_bits, low_bits;
1935 high_bits = (pa >> 32) & 0xffffffff;
1936 low_bits = (pa >> 0) & 0xffffffff;
1938 while (start < end) {
1939 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1941 ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
1942 __asm__ __volatile__("flush %0" : : "r" (ia));
1944 ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
1945 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
1947 ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
1948 __asm__ __volatile__("flush %0" : : "r" (ia + 2));
1950 ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
1951 __asm__ __volatile__("flush %0" : : "r" (ia + 3));
1957 static void ktsb_phys_patch(void)
1959 extern unsigned int __swapper_tsb_phys_patch;
1960 extern unsigned int __swapper_tsb_phys_patch_end;
1961 unsigned long ktsb_pa;
1963 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1964 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1965 &__swapper_tsb_phys_patch_end, ktsb_pa);
1966 #ifndef CONFIG_DEBUG_PAGEALLOC
1968 extern unsigned int __swapper_4m_tsb_phys_patch;
1969 extern unsigned int __swapper_4m_tsb_phys_patch_end;
1970 ktsb_pa = (kern_base +
1971 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1972 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1973 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
1978 static void __init sun4v_ktsb_init(void)
1980 unsigned long ktsb_pa;
1982 /* First KTSB for PAGE_SIZE mappings. */
1983 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1985 switch (PAGE_SIZE) {
1988 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1989 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1993 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1994 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1998 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1999 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
2002 case 4 * 1024 * 1024:
2003 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
2004 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
2008 ktsb_descr[0].assoc = 1;
2009 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
2010 ktsb_descr[0].ctx_idx = 0;
2011 ktsb_descr[0].tsb_base = ktsb_pa;
2012 ktsb_descr[0].resv = 0;
2014 #ifndef CONFIG_DEBUG_PAGEALLOC
2015 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
2016 ktsb_pa = (kern_base +
2017 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2019 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
2020 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
2021 HV_PGSZ_MASK_256MB |
2023 HV_PGSZ_MASK_16GB) &
2025 ktsb_descr[1].assoc = 1;
2026 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
2027 ktsb_descr[1].ctx_idx = 0;
2028 ktsb_descr[1].tsb_base = ktsb_pa;
2029 ktsb_descr[1].resv = 0;
2033 void sun4v_ktsb_register(void)
2035 unsigned long pa, ret;
2037 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
2039 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
2041 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
2042 "errors with %lx\n", pa, ret);
2047 static void __init sun4u_linear_pte_xor_finalize(void)
2049 #ifndef CONFIG_DEBUG_PAGEALLOC
2050 /* This is where we would add Panther support for
2051 * 32MB and 256MB pages.
2056 static void __init sun4v_linear_pte_xor_finalize(void)
2058 unsigned long pagecv_flag;
2060 /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
2061 * enables MCD error. Do not set bit 9 on M7 processor.
2063 switch (sun4v_chip_type) {
2064 case SUN4V_CHIP_SPARC_M7:
2068 pagecv_flag = _PAGE_CV_4V;
2071 #ifndef CONFIG_DEBUG_PAGEALLOC
2072 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
2073 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
2075 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
2076 _PAGE_P_4V | _PAGE_W_4V);
2078 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
2081 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
2082 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
2084 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
2085 _PAGE_P_4V | _PAGE_W_4V);
2087 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
2090 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
2091 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
2093 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
2094 _PAGE_P_4V | _PAGE_W_4V);
2096 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
2101 /* paging_init() sets up the page tables */
2103 static unsigned long last_valid_pfn;
2105 static void sun4u_pgprot_init(void);
2106 static void sun4v_pgprot_init(void);
2108 static phys_addr_t __init available_memory(void)
2110 phys_addr_t available = 0ULL;
2111 phys_addr_t pa_start, pa_end;
2114 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
2116 available = available + (pa_end - pa_start);
2121 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2122 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2123 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2124 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2125 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2126 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2128 /* We need to exclude reserved regions. This exclusion will include
2129 * vmlinux and initrd. To be more precise the initrd size could be used to
2130 * compute a new lower limit because it is freed later during initialization.
2132 static void __init reduce_memory(phys_addr_t limit_ram)
2134 phys_addr_t avail_ram = available_memory();
2135 phys_addr_t pa_start, pa_end;
2138 if (limit_ram >= avail_ram)
2141 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
2143 phys_addr_t region_size = pa_end - pa_start;
2144 phys_addr_t clip_start = pa_start;
2146 avail_ram = avail_ram - region_size;
2147 /* Are we consuming too much? */
2148 if (avail_ram < limit_ram) {
2149 phys_addr_t give_back = limit_ram - avail_ram;
2151 region_size = region_size - give_back;
2152 clip_start = clip_start + give_back;
2155 memblock_remove(clip_start, region_size);
2157 if (avail_ram <= limit_ram)
2163 void __init paging_init(void)
2165 unsigned long end_pfn, shift, phys_base;
2166 unsigned long real_end, i;
2169 setup_page_offset();
2171 /* These build time checkes make sure that the dcache_dirty_cpu()
2172 * page->flags usage will work.
2174 * When a page gets marked as dcache-dirty, we store the
2175 * cpu number starting at bit 32 in the page->flags. Also,
2176 * functions like clear_dcache_dirty_cpu use the cpu mask
2177 * in 13-bit signed-immediate instruction fields.
2181 * Page flags must not reach into upper 32 bits that are used
2182 * for the cpu number
2184 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
2187 * The bit fields placed in the high range must not reach below
2188 * the 32 bit boundary. Otherwise we cannot place the cpu field
2189 * at the 32 bit boundary.
2191 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
2192 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
2194 BUILD_BUG_ON(NR_CPUS > 4096);
2196 kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
2197 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
2199 /* Invalidate both kernel TSBs. */
2200 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
2201 #ifndef CONFIG_DEBUG_PAGEALLOC
2202 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2205 /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
2206 * bit on M7 processor. This is a conflicting usage of the same
2207 * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
2208 * Detection error on all pages and this will lead to problems
2209 * later. Kernel does not run with MCD enabled and hence rest
2210 * of the required steps to fully configure memory corruption
2211 * detection are not taken. We need to ensure TTE.mcde is not
2212 * set on M7 processor. Compute the value of cacheability
2213 * flag for use later taking this into consideration.
2215 switch (sun4v_chip_type) {
2216 case SUN4V_CHIP_SPARC_M7:
2217 page_cache4v_flag = _PAGE_CP_4V;
2220 page_cache4v_flag = _PAGE_CACHE_4V;
2224 if (tlb_type == hypervisor)
2225 sun4v_pgprot_init();
2227 sun4u_pgprot_init();
2229 if (tlb_type == cheetah_plus ||
2230 tlb_type == hypervisor) {
2235 if (tlb_type == hypervisor)
2236 sun4v_patch_tlb_handlers();
2238 /* Find available physical memory...
2240 * Read it twice in order to work around a bug in openfirmware.
2241 * The call to grab this table itself can cause openfirmware to
2242 * allocate memory, which in turn can take away some space from
2243 * the list of available memory. Reading it twice makes sure
2244 * we really do get the final value.
2246 read_obp_translations();
2247 read_obp_memory("reg", &pall[0], &pall_ents);
2248 read_obp_memory("available", &pavail[0], &pavail_ents);
2249 read_obp_memory("available", &pavail[0], &pavail_ents);
2251 phys_base = 0xffffffffffffffffUL;
2252 for (i = 0; i < pavail_ents; i++) {
2253 phys_base = min(phys_base, pavail[i].phys_addr);
2254 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
2257 memblock_reserve(kern_base, kern_size);
2259 find_ramdisk(phys_base);
2261 if (cmdline_memory_size)
2262 reduce_memory(cmdline_memory_size);
2264 memblock_allow_resize();
2265 memblock_dump_all();
2267 set_bit(0, mmu_context_bmap);
2269 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
2271 real_end = (unsigned long)_end;
2272 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
2273 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2274 num_kernel_image_mappings);
2276 /* Set kernel pgd to upper alias so physical page computations
2279 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
2281 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
2283 inherit_prom_mappings();
2285 /* Ok, we can use our TLB miss and window trap handlers safely. */
2290 prom_build_devicetree();
2291 of_populate_present_mask();
2293 of_fill_in_cpu_data();
2296 if (tlb_type == hypervisor) {
2298 mdesc_populate_present_mask(cpu_all_mask);
2300 mdesc_fill_in_cpu_data(cpu_all_mask);
2302 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
2304 sun4v_linear_pte_xor_finalize();
2307 sun4v_ktsb_register();
2309 unsigned long impl, ver;
2311 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
2312 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
2314 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
2315 impl = ((ver >> 32) & 0xffff);
2316 if (impl == PANTHER_IMPL)
2317 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2318 HV_PGSZ_MASK_256MB);
2320 sun4u_linear_pte_xor_finalize();
2323 /* Flush the TLBs and the 4M TSB so that the updated linear
2324 * pte XOR settings are realized for all mappings.
2327 #ifndef CONFIG_DEBUG_PAGEALLOC
2328 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2332 /* Setup bootmem... */
2333 last_valid_pfn = end_pfn = bootmem_init(phys_base);
2335 /* Once the OF device tree and MDESC have been setup, we know
2336 * the list of possible cpus. Therefore we can allocate the
2339 for_each_possible_cpu(i) {
2340 node = cpu_to_node(i);
2342 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2345 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2350 kernel_physical_mapping_init();
2353 unsigned long max_zone_pfns[MAX_NR_ZONES];
2355 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
2357 max_zone_pfns[ZONE_NORMAL] = end_pfn;
2359 free_area_init_nodes(max_zone_pfns);
2362 printk("Booting Linux...\n");
2365 int page_in_phys_avail(unsigned long paddr)
2371 for (i = 0; i < pavail_ents; i++) {
2372 unsigned long start, end;
2374 start = pavail[i].phys_addr;
2375 end = start + pavail[i].reg_size;
2377 if (paddr >= start && paddr < end)
2380 if (paddr >= kern_base && paddr < (kern_base + kern_size))
2382 #ifdef CONFIG_BLK_DEV_INITRD
2383 if (paddr >= __pa(initrd_start) &&
2384 paddr < __pa(PAGE_ALIGN(initrd_end)))
2391 static void __init register_page_bootmem_info(void)
2393 #ifdef CONFIG_NEED_MULTIPLE_NODES
2396 for_each_online_node(i)
2397 if (NODE_DATA(i)->node_spanned_pages)
2398 register_page_bootmem_info_node(NODE_DATA(i));
2401 void __init mem_init(void)
2403 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2408 * Must be done after boot memory is put on freelist, because here we
2409 * might set fields in deferred struct pages that have not yet been
2410 * initialized, and free_all_bootmem() initializes all the reserved
2411 * deferred pages for us.
2413 register_page_bootmem_info();
2416 * Set up the zero page, mark it reserved, so that page count
2417 * is not manipulated when freeing the page from user ptes.
2419 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2420 if (mem_map_zero == NULL) {
2421 prom_printf("paging_init: Cannot alloc zero page.\n");
2424 mark_page_reserved(mem_map_zero);
2426 mem_init_print_info(NULL);
2428 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2429 cheetah_ecache_flush_init();
2432 void free_initmem(void)
2434 unsigned long addr, initend;
2437 /* If the physical memory maps were trimmed by kernel command
2438 * line options, don't even try freeing this initmem stuff up.
2439 * The kernel image could have been in the trimmed out region
2440 * and if so the freeing below will free invalid page structs.
2442 if (cmdline_memory_size)
2446 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2448 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2449 initend = (unsigned long)(__init_end) & PAGE_MASK;
2450 for (; addr < initend; addr += PAGE_SIZE) {
2454 ((unsigned long) __va(kern_base)) -
2455 ((unsigned long) KERNBASE));
2456 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
2459 free_reserved_page(virt_to_page(page));
2463 #ifdef CONFIG_BLK_DEV_INITRD
2464 void free_initrd_mem(unsigned long start, unsigned long end)
2466 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
2471 pgprot_t PAGE_KERNEL __read_mostly;
2472 EXPORT_SYMBOL(PAGE_KERNEL);
2474 pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2475 pgprot_t PAGE_COPY __read_mostly;
2477 pgprot_t PAGE_SHARED __read_mostly;
2478 EXPORT_SYMBOL(PAGE_SHARED);
2480 unsigned long pg_iobits __read_mostly;
2482 unsigned long _PAGE_IE __read_mostly;
2483 EXPORT_SYMBOL(_PAGE_IE);
2485 unsigned long _PAGE_E __read_mostly;
2486 EXPORT_SYMBOL(_PAGE_E);
2488 unsigned long _PAGE_CACHE __read_mostly;
2489 EXPORT_SYMBOL(_PAGE_CACHE);
2491 #ifdef CONFIG_SPARSEMEM_VMEMMAP
2492 int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2495 unsigned long pte_base;
2497 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2498 _PAGE_CP_4U | _PAGE_CV_4U |
2499 _PAGE_P_4U | _PAGE_W_4U);
2500 if (tlb_type == hypervisor)
2501 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2502 page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
2504 pte_base |= _PAGE_PMD_HUGE;
2506 vstart = vstart & PMD_MASK;
2507 vend = ALIGN(vend, PMD_SIZE);
2508 for (; vstart < vend; vstart += PMD_SIZE) {
2509 pgd_t *pgd = pgd_offset_k(vstart);
2514 if (pgd_none(*pgd)) {
2515 pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2519 pgd_populate(&init_mm, pgd, new);
2522 pud = pud_offset(pgd, vstart);
2523 if (pud_none(*pud)) {
2524 pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2528 pud_populate(&init_mm, pud, new);
2531 pmd = pmd_offset(pud, vstart);
2533 pte = pmd_val(*pmd);
2534 if (!(pte & _PAGE_VALID)) {
2535 void *block = vmemmap_alloc_block(PMD_SIZE, node);
2540 pmd_val(*pmd) = pte_base | __pa(block);
2547 void vmemmap_free(unsigned long start, unsigned long end)
2550 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
2552 static void prot_init_common(unsigned long page_none,
2553 unsigned long page_shared,
2554 unsigned long page_copy,
2555 unsigned long page_readonly,
2556 unsigned long page_exec_bit)
2558 PAGE_COPY = __pgprot(page_copy);
2559 PAGE_SHARED = __pgprot(page_shared);
2561 protection_map[0x0] = __pgprot(page_none);
2562 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2563 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2564 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2565 protection_map[0x4] = __pgprot(page_readonly);
2566 protection_map[0x5] = __pgprot(page_readonly);
2567 protection_map[0x6] = __pgprot(page_copy);
2568 protection_map[0x7] = __pgprot(page_copy);
2569 protection_map[0x8] = __pgprot(page_none);
2570 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2571 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2572 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2573 protection_map[0xc] = __pgprot(page_readonly);
2574 protection_map[0xd] = __pgprot(page_readonly);
2575 protection_map[0xe] = __pgprot(page_shared);
2576 protection_map[0xf] = __pgprot(page_shared);
2579 static void __init sun4u_pgprot_init(void)
2581 unsigned long page_none, page_shared, page_copy, page_readonly;
2582 unsigned long page_exec_bit;
2585 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2586 _PAGE_CACHE_4U | _PAGE_P_4U |
2587 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2589 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2590 _PAGE_CACHE_4U | _PAGE_P_4U |
2591 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2592 _PAGE_EXEC_4U | _PAGE_L_4U);
2594 _PAGE_IE = _PAGE_IE_4U;
2595 _PAGE_E = _PAGE_E_4U;
2596 _PAGE_CACHE = _PAGE_CACHE_4U;
2598 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2599 __ACCESS_BITS_4U | _PAGE_E_4U);
2601 #ifdef CONFIG_DEBUG_PAGEALLOC
2602 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2604 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
2607 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2608 _PAGE_P_4U | _PAGE_W_4U);
2610 for (i = 1; i < 4; i++)
2611 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2613 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2614 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2615 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2618 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2619 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2620 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2621 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2622 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2623 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2624 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2626 page_exec_bit = _PAGE_EXEC_4U;
2628 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2632 static void __init sun4v_pgprot_init(void)
2634 unsigned long page_none, page_shared, page_copy, page_readonly;
2635 unsigned long page_exec_bit;
2638 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2639 page_cache4v_flag | _PAGE_P_4V |
2640 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2642 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2644 _PAGE_IE = _PAGE_IE_4V;
2645 _PAGE_E = _PAGE_E_4V;
2646 _PAGE_CACHE = page_cache4v_flag;
2648 #ifdef CONFIG_DEBUG_PAGEALLOC
2649 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2651 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2654 kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
2657 for (i = 1; i < 4; i++)
2658 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2660 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2661 __ACCESS_BITS_4V | _PAGE_E_4V);
2663 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2664 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2665 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2666 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2668 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
2669 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2670 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2671 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2672 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2673 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2674 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2676 page_exec_bit = _PAGE_EXEC_4V;
2678 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2682 unsigned long pte_sz_bits(unsigned long sz)
2684 if (tlb_type == hypervisor) {
2688 return _PAGE_SZ8K_4V;
2690 return _PAGE_SZ64K_4V;
2692 return _PAGE_SZ512K_4V;
2693 case 4 * 1024 * 1024:
2694 return _PAGE_SZ4MB_4V;
2700 return _PAGE_SZ8K_4U;
2702 return _PAGE_SZ64K_4U;
2704 return _PAGE_SZ512K_4U;
2705 case 4 * 1024 * 1024:
2706 return _PAGE_SZ4MB_4U;
2711 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2715 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
2716 pte_val(pte) |= (((unsigned long)space) << 32);
2717 pte_val(pte) |= pte_sz_bits(page_size);
2722 static unsigned long kern_large_tte(unsigned long paddr)
2726 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2727 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2728 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2729 if (tlb_type == hypervisor)
2730 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2731 page_cache4v_flag | _PAGE_P_4V |
2732 _PAGE_EXEC_4V | _PAGE_W_4V);
2737 /* If not locked, zap it. */
2738 void __flush_tlb_all(void)
2740 unsigned long pstate;
2743 __asm__ __volatile__("flushw\n\t"
2744 "rdpr %%pstate, %0\n\t"
2745 "wrpr %0, %1, %%pstate"
2748 if (tlb_type == hypervisor) {
2749 sun4v_mmu_demap_all();
2750 } else if (tlb_type == spitfire) {
2751 for (i = 0; i < 64; i++) {
2752 /* Spitfire Errata #32 workaround */
2753 /* NOTE: Always runs on spitfire, so no
2754 * cheetah+ page size encodings.
2756 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2760 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2762 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2763 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2766 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2767 spitfire_put_dtlb_data(i, 0x0UL);
2770 /* Spitfire Errata #32 workaround */
2771 /* NOTE: Always runs on spitfire, so no
2772 * cheetah+ page size encodings.
2774 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2778 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2780 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2781 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2784 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2785 spitfire_put_itlb_data(i, 0x0UL);
2788 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2789 cheetah_flush_dtlb_all();
2790 cheetah_flush_itlb_all();
2792 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2796 pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2797 unsigned long address)
2799 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2800 __GFP_REPEAT | __GFP_ZERO);
2804 pte = (pte_t *) page_address(page);
2809 pgtable_t pte_alloc_one(struct mm_struct *mm,
2810 unsigned long address)
2812 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2813 __GFP_REPEAT | __GFP_ZERO);
2816 if (!pgtable_page_ctor(page)) {
2817 free_hot_cold_page(page, 0);
2820 return (pte_t *) page_address(page);
2823 void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2825 free_page((unsigned long)pte);
2828 static void __pte_free(pgtable_t pte)
2830 struct page *page = virt_to_page(pte);
2832 pgtable_page_dtor(page);
2836 void pte_free(struct mm_struct *mm, pgtable_t pte)
2841 void pgtable_free(void *table, bool is_page)
2846 kmem_cache_free(pgtable_cache, table);
2849 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2850 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2853 unsigned long pte, flags;
2854 struct mm_struct *mm;
2857 if (!pmd_large(entry) || !pmd_young(entry))
2860 pte = pmd_val(entry);
2862 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
2863 if (!(pte & _PAGE_VALID))
2866 /* We are fabricating 8MB pages using 4MB real hw pages. */
2867 pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
2871 spin_lock_irqsave(&mm->context.lock, flags);
2873 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
2874 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
2877 spin_unlock_irqrestore(&mm->context.lock, flags);
2879 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2881 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2882 static void context_reload(void *__data)
2884 struct mm_struct *mm = __data;
2886 if (mm == current->mm)
2887 load_secondary_context(mm);
2890 void hugetlb_setup(struct pt_regs *regs)
2892 struct mm_struct *mm = current->mm;
2893 struct tsb_config *tp;
2895 if (faulthandler_disabled() || !mm) {
2896 const struct exception_table_entry *entry;
2898 entry = search_exception_tables(regs->tpc);
2900 regs->tpc = entry->fixup;
2901 regs->tnpc = regs->tpc + 4;
2904 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2905 die_if_kernel("HugeTSB in atomic", regs);
2908 tp = &mm->context.tsb_block[MM_TSB_HUGE];
2909 if (likely(tp->tsb == NULL))
2910 tsb_grow(mm, MM_TSB_HUGE, 0);
2912 tsb_context_switch(mm);
2915 /* On UltraSPARC-III+ and later, configure the second half of
2916 * the Data-TLB for huge pages.
2918 if (tlb_type == cheetah_plus) {
2919 bool need_context_reload = false;
2922 spin_lock_irq(&ctx_alloc_lock);
2923 ctx = mm->context.sparc64_ctx_val;
2924 ctx &= ~CTX_PGSZ_MASK;
2925 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2926 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
2928 if (ctx != mm->context.sparc64_ctx_val) {
2929 /* When changing the page size fields, we
2930 * must perform a context flush so that no
2931 * stale entries match. This flush must
2932 * occur with the original context register
2935 do_flush_tlb_mm(mm);
2937 /* Reload the context register of all processors
2938 * also executing in this address space.
2940 mm->context.sparc64_ctx_val = ctx;
2941 need_context_reload = true;
2943 spin_unlock_irq(&ctx_alloc_lock);
2945 if (need_context_reload)
2946 on_each_cpu(context_reload, mm, 0);
2951 static struct resource code_resource = {
2952 .name = "Kernel code",
2953 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
2956 static struct resource data_resource = {
2957 .name = "Kernel data",
2958 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
2961 static struct resource bss_resource = {
2962 .name = "Kernel bss",
2963 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
2966 static inline resource_size_t compute_kern_paddr(void *addr)
2968 return (resource_size_t) (addr - KERNBASE + kern_base);
2971 static void __init kernel_lds_init(void)
2973 code_resource.start = compute_kern_paddr(_text);
2974 code_resource.end = compute_kern_paddr(_etext - 1);
2975 data_resource.start = compute_kern_paddr(_etext);
2976 data_resource.end = compute_kern_paddr(_edata - 1);
2977 bss_resource.start = compute_kern_paddr(__bss_start);
2978 bss_resource.end = compute_kern_paddr(_end - 1);
2981 static int __init report_memory(void)
2984 struct resource *res;
2988 for (i = 0; i < pavail_ents; i++) {
2989 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
2992 pr_warn("Failed to allocate source.\n");
2996 res->name = "System RAM";
2997 res->start = pavail[i].phys_addr;
2998 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
2999 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
3001 if (insert_resource(&iomem_resource, res) < 0) {
3002 pr_warn("Resource insertion failed.\n");
3006 insert_resource(res, &code_resource);
3007 insert_resource(res, &data_resource);
3008 insert_resource(res, &bss_resource);
3013 arch_initcall(report_memory);
3016 #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
3018 #define do_flush_tlb_kernel_range __flush_tlb_kernel_range
3021 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3023 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
3024 if (start < LOW_OBP_ADDRESS) {
3025 flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
3026 do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
3028 if (end > HI_OBP_ADDRESS) {
3029 flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
3030 do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
3033 flush_tsb_kernel_range(start, end);
3034 do_flush_tlb_kernel_range(start, end);