arm64: dts: qcom: sm8550: add TRNG node
[linux-modified.git] / arch / loongarch / include / asm / cacheflush.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  */
5 #ifndef _ASM_CACHEFLUSH_H
6 #define _ASM_CACHEFLUSH_H
7
8 #include <linux/mm.h>
9 #include <asm/cpu-info.h>
10 #include <asm/cacheops.h>
11
12 static inline bool cache_present(struct cache_desc *cdesc)
13 {
14         return cdesc->flags & CACHE_PRESENT;
15 }
16
17 static inline bool cache_private(struct cache_desc *cdesc)
18 {
19         return cdesc->flags & CACHE_PRIVATE;
20 }
21
22 static inline bool cache_inclusive(struct cache_desc *cdesc)
23 {
24         return cdesc->flags & CACHE_INCLUSIVE;
25 }
26
27 static inline unsigned int cpu_last_level_cache_line_size(void)
28 {
29         int cache_present = boot_cpu_data.cache_leaves_present;
30
31         return boot_cpu_data.cache_leaves[cache_present - 1].linesz;
32 }
33
34 asmlinkage void __flush_cache_all(void);
35 void local_flush_icache_range(unsigned long start, unsigned long end);
36
37 #define flush_icache_range      local_flush_icache_range
38 #define flush_icache_user_range local_flush_icache_range
39
40 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
41
42 #define flush_cache_all()                               do { } while (0)
43 #define flush_cache_mm(mm)                              do { } while (0)
44 #define flush_cache_dup_mm(mm)                          do { } while (0)
45 #define flush_cache_range(vma, start, end)              do { } while (0)
46 #define flush_cache_page(vma, vmaddr, pfn)              do { } while (0)
47 #define flush_cache_vmap(start, end)                    do { } while (0)
48 #define flush_cache_vunmap(start, end)                  do { } while (0)
49 #define flush_icache_user_page(vma, page, addr, len)    do { } while (0)
50 #define flush_dcache_page(page)                         do { } while (0)
51 #define flush_dcache_mmap_lock(mapping)                 do { } while (0)
52 #define flush_dcache_mmap_unlock(mapping)               do { } while (0)
53
54 #define cache_op(op, addr)                                              \
55         __asm__ __volatile__(                                           \
56         "       cacop   %0, %1                                  \n"     \
57         :                                                               \
58         : "i" (op), "ZC" (*(unsigned char *)(addr)))
59
60 static inline void flush_cache_line(int leaf, unsigned long addr)
61 {
62         switch (leaf) {
63         case Cache_LEAF0:
64                 cache_op(Index_Writeback_Inv_LEAF0, addr);
65                 break;
66         case Cache_LEAF1:
67                 cache_op(Index_Writeback_Inv_LEAF1, addr);
68                 break;
69         case Cache_LEAF2:
70                 cache_op(Index_Writeback_Inv_LEAF2, addr);
71                 break;
72         case Cache_LEAF3:
73                 cache_op(Index_Writeback_Inv_LEAF3, addr);
74                 break;
75         case Cache_LEAF4:
76                 cache_op(Index_Writeback_Inv_LEAF4, addr);
77                 break;
78         case Cache_LEAF5:
79                 cache_op(Index_Writeback_Inv_LEAF5, addr);
80                 break;
81         default:
82                 break;
83         }
84 }
85
86 #include <asm-generic/cacheflush.h>
87
88 #endif /* _ASM_CACHEFLUSH_H */