1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _M68K_PAGE_MM_H
3 #define _M68K_PAGE_MM_H
7 #include <linux/compiler.h>
8 #include <asm/module.h>
11 * We don't need to check for alignment etc.
13 #ifdef CPU_M68040_OR_M68060_ONLY
14 static inline void copy_page(void *to, void *from)
18 __asm__ __volatile__("1:\t"
20 "move16 %1@+,%0@+\n\t"
21 "move16 %1@+,%0@+\n\t"
24 : "=a" (to), "=a" (from), "=d" (tmp)
25 : "0" (to), "1" (from) , "2" (PAGE_SIZE / 32 - 1)
29 static inline void clear_page(void *page)
32 unsigned long *sp = page;
39 __asm__ __volatile__("1:\t"
41 "move16 %2@+,%0@+\n\t"
46 : "=a" (sp), "=d" (tmp)
47 : "a" (page), "0" (sp),
48 "1" ((PAGE_SIZE - 16) / 16 - 1));
52 #define clear_page(page) memset((page), 0, PAGE_SIZE)
53 #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
56 #define clear_user_page(addr, vaddr, page) \
57 do { clear_page(addr); \
58 flush_dcache_page(page); \
60 #define copy_user_page(to, from, vaddr, page) \
61 do { copy_page(to, from); \
62 flush_dcache_page(page); \
65 extern unsigned long m68k_memoffset;
69 #define WANT_PAGE_VIRTUAL
71 static inline unsigned long ___pa(void *vaddr)
78 : "0" (vaddr), "i" (m68k_fixup_memoffset));
81 #define __pa(vaddr) ___pa((void *)(long)(vaddr))
82 static inline void *__va(unsigned long paddr)
89 : "0" (paddr), "i" (m68k_fixup_memoffset));
93 #else /* !CONFIG_SUN3 */
94 /* This #define is a horrible hack to suppress lots of warnings. --m */
95 #define __pa(x) ___pa((unsigned long)(x))
96 static inline unsigned long ___pa(unsigned long x)
101 return (x-PAGE_OFFSET);
103 return (x+0x2000000);
106 static inline void *__va(unsigned long x)
112 return (void *)(x+PAGE_OFFSET);
114 return (void *)(x-0x2000000);
116 #endif /* CONFIG_SUN3 */
119 * NOTE: virtual isn't really correct, actually it should be the offset into the
120 * memory node, but we have no highmem, so that works for now.
121 * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
122 * of the shifts unnecessary.
124 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
125 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
127 extern int m68k_virt_to_node_shift;
129 #ifdef CONFIG_SINGLE_MEMORY_CHUNK
130 #define __virt_to_node(addr) (&pg_data_map[0])
132 extern struct pglist_data *pg_data_table[];
134 static inline __attribute_const__ int __virt_to_node_shift(void)
142 : "i" (m68k_fixup_vnode_shift));
146 #define __virt_to_node(addr) (pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()])
149 #define virt_to_page(addr) ({ \
150 pfn_to_page(virt_to_pfn(addr)); \
152 #define page_to_virt(page) ({ \
153 pfn_to_virt(page_to_pfn(page)); \
156 #define pfn_to_page(pfn) ({ \
157 unsigned long __pfn = (pfn); \
158 struct pglist_data *pgdat; \
159 pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn)); \
160 pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \
162 #define page_to_pfn(_page) ({ \
163 const struct page *__p = (_page); \
164 struct pglist_data *pgdat; \
165 pgdat = &pg_data_map[page_to_nid(__p)]; \
166 ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
169 #define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory)
170 #define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
172 #endif /* __ASSEMBLY__ */
174 #endif /* _M68K_PAGE_MM_H */