GNU Linux-libre 4.19.304-gnu1
[releases.git] / arch / ia64 / mm / init.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Initialize MMU support.
4  *
5  * Copyright (C) 1998-2003 Hewlett-Packard Co
6  *      David Mosberger-Tang <davidm@hpl.hp.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10
11 #include <linux/bootmem.h>
12 #include <linux/efi.h>
13 #include <linux/elf.h>
14 #include <linux/memblock.h>
15 #include <linux/mm.h>
16 #include <linux/sched/signal.h>
17 #include <linux/mmzone.h>
18 #include <linux/module.h>
19 #include <linux/personality.h>
20 #include <linux/reboot.h>
21 #include <linux/slab.h>
22 #include <linux/swap.h>
23 #include <linux/proc_fs.h>
24 #include <linux/bitops.h>
25 #include <linux/kexec.h>
26
27 #include <asm/dma.h>
28 #include <asm/io.h>
29 #include <asm/machvec.h>
30 #include <asm/numa.h>
31 #include <asm/patch.h>
32 #include <asm/pgalloc.h>
33 #include <asm/sal.h>
34 #include <asm/sections.h>
35 #include <asm/tlb.h>
36 #include <linux/uaccess.h>
37 #include <asm/unistd.h>
38 #include <asm/mca.h>
39
40 extern void ia64_tlb_init (void);
41
42 unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
43
44 #ifdef CONFIG_VIRTUAL_MEM_MAP
45 unsigned long VMALLOC_END = VMALLOC_END_INIT;
46 EXPORT_SYMBOL(VMALLOC_END);
47 struct page *vmem_map;
48 EXPORT_SYMBOL(vmem_map);
49 #endif
50
51 struct page *zero_page_memmap_ptr;      /* map entry for zero page */
52 EXPORT_SYMBOL(zero_page_memmap_ptr);
53
54 void
55 __ia64_sync_icache_dcache (pte_t pte)
56 {
57         unsigned long addr;
58         struct page *page;
59
60         page = pte_page(pte);
61         addr = (unsigned long) page_address(page);
62
63         if (test_bit(PG_arch_1, &page->flags))
64                 return;                         /* i-cache is already coherent with d-cache */
65
66         flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
67         set_bit(PG_arch_1, &page->flags);       /* mark page as clean */
68 }
69
70 /*
71  * Since DMA is i-cache coherent, any (complete) pages that were written via
72  * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
73  * flush them when they get mapped into an executable vm-area.
74  */
75 void
76 dma_mark_clean(void *addr, size_t size)
77 {
78         unsigned long pg_addr, end;
79
80         pg_addr = PAGE_ALIGN((unsigned long) addr);
81         end = (unsigned long) addr + size;
82         while (pg_addr + PAGE_SIZE <= end) {
83                 struct page *page = virt_to_page(pg_addr);
84                 set_bit(PG_arch_1, &page->flags);
85                 pg_addr += PAGE_SIZE;
86         }
87 }
88
89 inline void
90 ia64_set_rbs_bot (void)
91 {
92         unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
93
94         if (stack_size > MAX_USER_STACK_SIZE)
95                 stack_size = MAX_USER_STACK_SIZE;
96         current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
97 }
98
99 /*
100  * This performs some platform-dependent address space initialization.
101  * On IA-64, we want to setup the VM area for the register backing
102  * store (which grows upwards) and install the gateway page which is
103  * used for signal trampolines, etc.
104  */
105 void
106 ia64_init_addr_space (void)
107 {
108         struct vm_area_struct *vma;
109
110         ia64_set_rbs_bot();
111
112         /*
113          * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
114          * the problem.  When the process attempts to write to the register backing store
115          * for the first time, it will get a SEGFAULT in this case.
116          */
117         vma = vm_area_alloc(current->mm);
118         if (vma) {
119                 vma_set_anonymous(vma);
120                 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
121                 vma->vm_end = vma->vm_start + PAGE_SIZE;
122                 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
123                 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
124                 down_write(&current->mm->mmap_sem);
125                 if (insert_vm_struct(current->mm, vma)) {
126                         up_write(&current->mm->mmap_sem);
127                         vm_area_free(vma);
128                         return;
129                 }
130                 up_write(&current->mm->mmap_sem);
131         }
132
133         /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
134         if (!(current->personality & MMAP_PAGE_ZERO)) {
135                 vma = vm_area_alloc(current->mm);
136                 if (vma) {
137                         vma_set_anonymous(vma);
138                         vma->vm_end = PAGE_SIZE;
139                         vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
140                         vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
141                                         VM_DONTEXPAND | VM_DONTDUMP;
142                         down_write(&current->mm->mmap_sem);
143                         if (insert_vm_struct(current->mm, vma)) {
144                                 up_write(&current->mm->mmap_sem);
145                                 vm_area_free(vma);
146                                 return;
147                         }
148                         up_write(&current->mm->mmap_sem);
149                 }
150         }
151 }
152
153 void
154 free_initmem (void)
155 {
156         free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
157                            -1, "unused kernel");
158 }
159
160 void __init
161 free_initrd_mem (unsigned long start, unsigned long end)
162 {
163         /*
164          * EFI uses 4KB pages while the kernel can use 4KB or bigger.
165          * Thus EFI and the kernel may have different page sizes. It is
166          * therefore possible to have the initrd share the same page as
167          * the end of the kernel (given current setup).
168          *
169          * To avoid freeing/using the wrong page (kernel sized) we:
170          *      - align up the beginning of initrd
171          *      - align down the end of initrd
172          *
173          *  |             |
174          *  |=============| a000
175          *  |             |
176          *  |             |
177          *  |             | 9000
178          *  |/////////////|
179          *  |/////////////|
180          *  |=============| 8000
181          *  |///INITRD////|
182          *  |/////////////|
183          *  |/////////////| 7000
184          *  |             |
185          *  |KKKKKKKKKKKKK|
186          *  |=============| 6000
187          *  |KKKKKKKKKKKKK|
188          *  |KKKKKKKKKKKKK|
189          *  K=kernel using 8KB pages
190          *
191          * In this example, we must free page 8000 ONLY. So we must align up
192          * initrd_start and keep initrd_end as is.
193          */
194         start = PAGE_ALIGN(start);
195         end = end & PAGE_MASK;
196
197         if (start < end)
198                 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
199
200         for (; start < end; start += PAGE_SIZE) {
201                 if (!virt_addr_valid(start))
202                         continue;
203                 free_reserved_page(virt_to_page(start));
204         }
205 }
206
207 /*
208  * This installs a clean page in the kernel's page table.
209  */
210 static struct page * __init
211 put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
212 {
213         pgd_t *pgd;
214         pud_t *pud;
215         pmd_t *pmd;
216         pte_t *pte;
217
218         pgd = pgd_offset_k(address);            /* note: this is NOT pgd_offset()! */
219
220         {
221                 pud = pud_alloc(&init_mm, pgd, address);
222                 if (!pud)
223                         goto out;
224                 pmd = pmd_alloc(&init_mm, pud, address);
225                 if (!pmd)
226                         goto out;
227                 pte = pte_alloc_kernel(pmd, address);
228                 if (!pte)
229                         goto out;
230                 if (!pte_none(*pte))
231                         goto out;
232                 set_pte(pte, mk_pte(page, pgprot));
233         }
234   out:
235         /* no need for flush_tlb */
236         return page;
237 }
238
239 static void __init
240 setup_gate (void)
241 {
242         struct page *page;
243
244         /*
245          * Map the gate page twice: once read-only to export the ELF
246          * headers etc. and once execute-only page to enable
247          * privilege-promotion via "epc":
248          */
249         page = virt_to_page(ia64_imva(__start_gate_section));
250         put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
251 #ifdef HAVE_BUGGY_SEGREL
252         page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
253         put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
254 #else
255         put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
256         /* Fill in the holes (if any) with read-only zero pages: */
257         {
258                 unsigned long addr;
259
260                 for (addr = GATE_ADDR + PAGE_SIZE;
261                      addr < GATE_ADDR + PERCPU_PAGE_SIZE;
262                      addr += PAGE_SIZE)
263                 {
264                         put_kernel_page(ZERO_PAGE(0), addr,
265                                         PAGE_READONLY);
266                         put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
267                                         PAGE_READONLY);
268                 }
269         }
270 #endif
271         ia64_patch_gate();
272 }
273
274 static struct vm_area_struct gate_vma;
275
276 static int __init gate_vma_init(void)
277 {
278         vma_init(&gate_vma, NULL);
279         gate_vma.vm_start = FIXADDR_USER_START;
280         gate_vma.vm_end = FIXADDR_USER_END;
281         gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
282         gate_vma.vm_page_prot = __P101;
283
284         return 0;
285 }
286 __initcall(gate_vma_init);
287
288 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
289 {
290         return &gate_vma;
291 }
292
293 int in_gate_area_no_mm(unsigned long addr)
294 {
295         if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
296                 return 1;
297         return 0;
298 }
299
300 int in_gate_area(struct mm_struct *mm, unsigned long addr)
301 {
302         return in_gate_area_no_mm(addr);
303 }
304
305 void ia64_mmu_init(void *my_cpu_data)
306 {
307         unsigned long pta, impl_va_bits;
308         extern void tlb_init(void);
309
310 #ifdef CONFIG_DISABLE_VHPT
311 #       define VHPT_ENABLE_BIT  0
312 #else
313 #       define VHPT_ENABLE_BIT  1
314 #endif
315
316         /*
317          * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
318          * address space.  The IA-64 architecture guarantees that at least 50 bits of
319          * virtual address space are implemented but if we pick a large enough page size
320          * (e.g., 64KB), the mapped address space is big enough that it will overlap with
321          * VMLPT.  I assume that once we run on machines big enough to warrant 64KB pages,
322          * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
323          * problem in practice.  Alternatively, we could truncate the top of the mapped
324          * address space to not permit mappings that would overlap with the VMLPT.
325          * --davidm 00/12/06
326          */
327 #       define pte_bits                 3
328 #       define mapped_space_bits        (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
329         /*
330          * The virtual page table has to cover the entire implemented address space within
331          * a region even though not all of this space may be mappable.  The reason for
332          * this is that the Access bit and Dirty bit fault handlers perform
333          * non-speculative accesses to the virtual page table, so the address range of the
334          * virtual page table itself needs to be covered by virtual page table.
335          */
336 #       define vmlpt_bits               (impl_va_bits - PAGE_SHIFT + pte_bits)
337 #       define POW2(n)                  (1ULL << (n))
338
339         impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
340
341         if (impl_va_bits < 51 || impl_va_bits > 61)
342                 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
343         /*
344          * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
345          * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
346          * the test makes sure that our mapped space doesn't overlap the
347          * unimplemented hole in the middle of the region.
348          */
349         if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
350             (mapped_space_bits > impl_va_bits - 1))
351                 panic("Cannot build a big enough virtual-linear page table"
352                       " to cover mapped address space.\n"
353                       " Try using a smaller page size.\n");
354
355
356         /* place the VMLPT at the end of each page-table mapped region: */
357         pta = POW2(61) - POW2(vmlpt_bits);
358
359         /*
360          * Set the (virtually mapped linear) page table address.  Bit
361          * 8 selects between the short and long format, bits 2-7 the
362          * size of the table, and bit 0 whether the VHPT walker is
363          * enabled.
364          */
365         ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
366
367         ia64_tlb_init();
368
369 #ifdef  CONFIG_HUGETLB_PAGE
370         ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
371         ia64_srlz_d();
372 #endif
373 }
374
375 #ifdef CONFIG_VIRTUAL_MEM_MAP
376 int vmemmap_find_next_valid_pfn(int node, int i)
377 {
378         unsigned long end_address, hole_next_pfn;
379         unsigned long stop_address;
380         pg_data_t *pgdat = NODE_DATA(node);
381
382         end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
383         end_address = PAGE_ALIGN(end_address);
384         stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
385
386         do {
387                 pgd_t *pgd;
388                 pud_t *pud;
389                 pmd_t *pmd;
390                 pte_t *pte;
391
392                 pgd = pgd_offset_k(end_address);
393                 if (pgd_none(*pgd)) {
394                         end_address += PGDIR_SIZE;
395                         continue;
396                 }
397
398                 pud = pud_offset(pgd, end_address);
399                 if (pud_none(*pud)) {
400                         end_address += PUD_SIZE;
401                         continue;
402                 }
403
404                 pmd = pmd_offset(pud, end_address);
405                 if (pmd_none(*pmd)) {
406                         end_address += PMD_SIZE;
407                         continue;
408                 }
409
410                 pte = pte_offset_kernel(pmd, end_address);
411 retry_pte:
412                 if (pte_none(*pte)) {
413                         end_address += PAGE_SIZE;
414                         pte++;
415                         if ((end_address < stop_address) &&
416                             (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
417                                 goto retry_pte;
418                         continue;
419                 }
420                 /* Found next valid vmem_map page */
421                 break;
422         } while (end_address < stop_address);
423
424         end_address = min(end_address, stop_address);
425         end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
426         hole_next_pfn = end_address / sizeof(struct page);
427         return hole_next_pfn - pgdat->node_start_pfn;
428 }
429
430 int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
431 {
432         unsigned long address, start_page, end_page;
433         struct page *map_start, *map_end;
434         int node;
435         pgd_t *pgd;
436         pud_t *pud;
437         pmd_t *pmd;
438         pte_t *pte;
439
440         map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
441         map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT);
442
443         start_page = (unsigned long) map_start & PAGE_MASK;
444         end_page = PAGE_ALIGN((unsigned long) map_end);
445         node = paddr_to_nid(__pa(start));
446
447         for (address = start_page; address < end_page; address += PAGE_SIZE) {
448                 pgd = pgd_offset_k(address);
449                 if (pgd_none(*pgd))
450                         pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
451                 pud = pud_offset(pgd, address);
452
453                 if (pud_none(*pud))
454                         pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
455                 pmd = pmd_offset(pud, address);
456
457                 if (pmd_none(*pmd))
458                         pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
459                 pte = pte_offset_kernel(pmd, address);
460
461                 if (pte_none(*pte))
462                         set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
463                                              PAGE_KERNEL));
464         }
465         return 0;
466 }
467
468 struct memmap_init_callback_data {
469         struct page *start;
470         struct page *end;
471         int nid;
472         unsigned long zone;
473 };
474
475 static int __meminit
476 virtual_memmap_init(u64 start, u64 end, void *arg)
477 {
478         struct memmap_init_callback_data *args;
479         struct page *map_start, *map_end;
480
481         args = (struct memmap_init_callback_data *) arg;
482         map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
483         map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT);
484
485         if (map_start < args->start)
486                 map_start = args->start;
487         if (map_end > args->end)
488                 map_end = args->end;
489
490         /*
491          * We have to initialize "out of bounds" struct page elements that fit completely
492          * on the same pages that were allocated for the "in bounds" elements because they
493          * may be referenced later (and found to be "reserved").
494          */
495         map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
496         map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
497                     / sizeof(struct page));
498
499         if (map_start < map_end)
500                 memmap_init_zone((unsigned long)(map_end - map_start),
501                                  args->nid, args->zone, page_to_pfn(map_start),
502                                  MEMINIT_EARLY, NULL);
503         return 0;
504 }
505
506 void __meminit
507 memmap_init (unsigned long size, int nid, unsigned long zone,
508              unsigned long start_pfn)
509 {
510         if (!vmem_map) {
511                 memmap_init_zone(size, nid, zone, start_pfn,
512                                  MEMINIT_EARLY, NULL);
513         } else {
514                 struct page *start;
515                 struct memmap_init_callback_data args;
516
517                 start = pfn_to_page(start_pfn);
518                 args.start = start;
519                 args.end = start + size;
520                 args.nid = nid;
521                 args.zone = zone;
522
523                 efi_memmap_walk(virtual_memmap_init, &args);
524         }
525 }
526
527 int
528 ia64_pfn_valid (unsigned long pfn)
529 {
530         char byte;
531         struct page *pg = pfn_to_page(pfn);
532
533         return     (__get_user(byte, (char __user *) pg) == 0)
534                 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
535                         || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
536 }
537 EXPORT_SYMBOL(ia64_pfn_valid);
538
539 int __init find_largest_hole(u64 start, u64 end, void *arg)
540 {
541         u64 *max_gap = arg;
542
543         static u64 last_end = PAGE_OFFSET;
544
545         /* NOTE: this algorithm assumes efi memmap table is ordered */
546
547         if (*max_gap < (start - last_end))
548                 *max_gap = start - last_end;
549         last_end = end;
550         return 0;
551 }
552
553 #endif /* CONFIG_VIRTUAL_MEM_MAP */
554
555 int __init register_active_ranges(u64 start, u64 len, int nid)
556 {
557         u64 end = start + len;
558
559 #ifdef CONFIG_KEXEC
560         if (start > crashk_res.start && start < crashk_res.end)
561                 start = crashk_res.end;
562         if (end > crashk_res.start && end < crashk_res.end)
563                 end = crashk_res.start;
564 #endif
565
566         if (start < end)
567                 memblock_add_node(__pa(start), end - start, nid);
568         return 0;
569 }
570
571 int
572 find_max_min_low_pfn (u64 start, u64 end, void *arg)
573 {
574         unsigned long pfn_start, pfn_end;
575 #ifdef CONFIG_FLATMEM
576         pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
577         pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
578 #else
579         pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
580         pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
581 #endif
582         min_low_pfn = min(min_low_pfn, pfn_start);
583         max_low_pfn = max(max_low_pfn, pfn_end);
584         return 0;
585 }
586
587 /*
588  * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
589  * system call handler.  When this option is in effect, all fsyscalls will end up bubbling
590  * down into the kernel and calling the normal (heavy-weight) syscall handler.  This is
591  * useful for performance testing, but conceivably could also come in handy for debugging
592  * purposes.
593  */
594
595 static int nolwsys __initdata;
596
597 static int __init
598 nolwsys_setup (char *s)
599 {
600         nolwsys = 1;
601         return 1;
602 }
603
604 __setup("nolwsys", nolwsys_setup);
605
606 void __init
607 mem_init (void)
608 {
609         int i;
610
611         BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
612         BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
613         BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
614
615 #ifdef CONFIG_PCI
616         /*
617          * This needs to be called _after_ the command line has been parsed but _before_
618          * any drivers that may need the PCI DMA interface are initialized or bootmem has
619          * been freed.
620          */
621         platform_dma_init();
622 #endif
623
624 #ifdef CONFIG_FLATMEM
625         BUG_ON(!mem_map);
626 #endif
627
628         set_max_mapnr(max_low_pfn);
629         high_memory = __va(max_low_pfn * PAGE_SIZE);
630         free_all_bootmem();
631         mem_init_print_info(NULL);
632
633         /*
634          * For fsyscall entrpoints with no light-weight handler, use the ordinary
635          * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
636          * code can tell them apart.
637          */
638         for (i = 0; i < NR_syscalls; ++i) {
639                 extern unsigned long fsyscall_table[NR_syscalls];
640                 extern unsigned long sys_call_table[NR_syscalls];
641
642                 if (!fsyscall_table[i] || nolwsys)
643                         fsyscall_table[i] = sys_call_table[i] | 1;
644         }
645         setup_gate();
646 }
647
648 #ifdef CONFIG_MEMORY_HOTPLUG
649 int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
650                 bool want_memblock)
651 {
652         unsigned long start_pfn = start >> PAGE_SHIFT;
653         unsigned long nr_pages = size >> PAGE_SHIFT;
654         int ret;
655
656         ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
657         if (ret)
658                 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
659                        __func__,  ret);
660
661         return ret;
662 }
663
664 void arch_remove_memory(int nid, u64 start, u64 size,
665                         struct vmem_altmap *altmap)
666 {
667         unsigned long start_pfn = start >> PAGE_SHIFT;
668         unsigned long nr_pages = size >> PAGE_SHIFT;
669
670         __remove_pages(start_pfn, nr_pages, altmap);
671 }
672 #endif