1 // SPDX-License-Identifier: GPL-2.0-or-later
5 #include <linux/fcntl.h>
6 #include <linux/memblock.h>
9 #include <asm/bootinfo.h>
12 #include <boot_param.h>
16 #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
18 u32 memsize, highmemsize;
20 void __init prom_init_memory(void)
22 add_memory_region(0x0, (memsize << 20), BOOT_MEM_RAM);
24 add_memory_region(memsize << 20, LOONGSON_PCI_MEM_START - (memsize <<
25 20), BOOT_MEM_RESERVED);
27 #ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
31 bit = fls(memsize + highmemsize);
32 if (bit != ffs(memsize + highmemsize))
37 /* set cpu window3 to map CPU to DDR: 2G -> 2G */
38 LOONGSON_ADDRWIN_CPUTODDR(ADDRWIN_WIN3, 0x80000000ul,
39 0x80000000ul, (1 << bit));
42 #endif /* !CONFIG_CPU_SUPPORTS_ADDRWINCFG */
46 add_memory_region(LOONGSON_HIGHMEM_START,
47 highmemsize << 20, BOOT_MEM_RAM);
49 add_memory_region(LOONGSON_PCI_MEM_END + 1, LOONGSON_HIGHMEM_START -
50 LOONGSON_PCI_MEM_END - 1, BOOT_MEM_RESERVED);
52 #endif /* !CONFIG_64BIT */
55 #else /* CONFIG_LEFI_FIRMWARE_INTERFACE */
57 void __init prom_init_memory(void)
63 /* parse memory information */
64 for (i = 0; i < loongson_memmap->nr_map; i++) {
65 node_id = loongson_memmap->map[i].node_id;
66 mem_type = loongson_memmap->map[i].mem_type;
73 memblock_add(loongson_memmap->map[i].mem_start,
74 (u64)loongson_memmap->map[i].mem_size << 20);
77 memblock_add(loongson_memmap->map[i].mem_start,
78 (u64)loongson_memmap->map[i].mem_size << 20);
80 case SYSTEM_RAM_RESERVED:
81 memblock_reserve(loongson_memmap->map[i].mem_start,
82 (u64)loongson_memmap->map[i].mem_size << 20);
88 #endif /* CONFIG_LEFI_FIRMWARE_INTERFACE */
90 /* override of arch/mips/mm/cache.c: __uncached_access */
91 int __uncached_access(struct file *file, unsigned long addr)
93 if (file->f_flags & O_DSYNC)
96 return addr >= __pa(high_memory) ||
97 ((addr >= LOONGSON_MMIO_MEM_START) &&
98 (addr < LOONGSON_MMIO_MEM_END));
101 #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
103 #include <linux/pci.h>
104 #include <linux/sched.h>
105 #include <asm/current.h>
107 static unsigned long uca_start, uca_end;
109 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
110 unsigned long size, pgprot_t vma_prot)
112 unsigned long offset = pfn << PAGE_SHIFT;
113 unsigned long end = offset + size;
115 if (__uncached_access(file, offset)) {
116 if (uca_start && (offset >= uca_start) &&
118 return __pgprot((pgprot_val(vma_prot) &
120 _CACHE_UNCACHED_ACCELERATED);
122 return pgprot_noncached(vma_prot);
127 static int __init find_vga_mem_init(void)
129 struct pci_dev *dev = 0;
136 for_each_pci_dev(dev) {
137 if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
138 for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
139 r = &dev->resource[idx];
140 if (!r->start && r->end)
142 if (r->flags & IORESOURCE_IO)
144 if (r->flags & IORESOURCE_MEM) {
145 uca_start = r->start;
156 late_initcall(find_vga_mem_init);
157 #endif /* !CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED */