4 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/cache.h>
16 #include <linux/mmu_context.h>
17 #include <linux/syscalls.h>
18 #include <linux/uaccess.h>
19 #include <linux/pagemap.h>
20 #include <asm/cacheflush.h>
21 #include <asm/cachectl.h>
22 #include <asm/setup.h>
24 #ifdef CONFIG_ISA_ARCV2
25 #define USE_RGN_FLSH 1
28 static int l2_line_sz;
29 static int ioc_exists;
30 int slc_enable = 1, ioc_enable = 1;
31 unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
32 unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
34 void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
35 unsigned long sz, const int op, const int full_page);
37 void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
38 void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
39 void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
41 char *arc_cache_mumbojumbo(int c, char *buf, int len)
44 struct cpuinfo_arc_cache *p;
46 #define PR_CACHE(p, cfg, str) \
48 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
50 n += scnprintf(buf + n, len - n, \
51 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
52 (p)->sz_k, (p)->assoc, (p)->line_len, \
53 (p)->vipt ? "VIPT" : "PIPT", \
54 (p)->alias ? " aliasing" : "", \
57 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
58 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
60 p = &cpuinfo_arc700[c].slc;
62 n += scnprintf(buf + n, len - n,
63 "SLC\t\t: %uK, %uB Line%s\n",
64 p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
66 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
68 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
74 * Read the Cache Build Confuration Registers, Decode them and save into
75 * the cpuinfo structure for later use.
76 * No Validation done here, simply read/convert the BCRs
78 static void read_decode_cache_bcr_arcv2(int cpu)
80 struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
81 struct bcr_generic sbcr;
84 #ifdef CONFIG_CPU_BIG_ENDIAN
85 unsigned int pad:24, way:2, lsz:2, sz:4;
87 unsigned int sz:4, lsz:2, way:2, pad:24;
91 struct bcr_clust_cfg {
92 #ifdef CONFIG_CPU_BIG_ENDIAN
93 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
95 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
100 #ifdef CONFIG_CPU_BIG_ENDIAN
101 unsigned int start:4, limit:4, pad:22, order:1, disable:1;
103 unsigned int disable:1, order:1, pad:22, limit:4, start:4;
108 READ_BCR(ARC_REG_SLC_BCR, sbcr);
110 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
111 p_slc->sz_k = 128 << slc_cfg.sz;
112 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
115 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
121 /* HS 2.0 didn't have AUX_VOL */
122 if (cpuinfo_arc700[cpu].core.family > 0x51) {
123 READ_BCR(AUX_VOL, vol);
124 perip_base = vol.start << 28;
125 /* HS 3.0 has limit and strict-ordering fields */
126 if (cpuinfo_arc700[cpu].core.family > 0x52)
127 perip_end = (vol.limit << 28) - 1;
131 void read_decode_cache_bcr(void)
133 struct cpuinfo_arc_cache *p_ic, *p_dc;
134 unsigned int cpu = smp_processor_id();
136 #ifdef CONFIG_CPU_BIG_ENDIAN
137 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
139 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
143 p_ic = &cpuinfo_arc700[cpu].icache;
144 READ_BCR(ARC_REG_IC_BCR, ibcr);
150 BUG_ON(ibcr.config != 3);
151 p_ic->assoc = 2; /* Fixed to 2w set assoc */
152 } else if (ibcr.ver >= 4) {
153 p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
156 p_ic->line_len = 8 << ibcr.line_len;
157 p_ic->sz_k = 1 << (ibcr.sz - 1);
159 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
162 p_dc = &cpuinfo_arc700[cpu].dcache;
163 READ_BCR(ARC_REG_DC_BCR, dbcr);
169 BUG_ON(dbcr.config != 2);
170 p_dc->assoc = 4; /* Fixed to 4w set assoc */
172 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
173 } else if (dbcr.ver >= 4) {
174 p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
176 p_dc->alias = 0; /* PIPT so can't VIPT alias */
179 p_dc->line_len = 16 << dbcr.line_len;
180 p_dc->sz_k = 1 << (dbcr.sz - 1);
184 read_decode_cache_bcr_arcv2(cpu);
188 * Line Operation on {I,D}-Cache
193 #define OP_FLUSH_N_INV 0x3
194 #define OP_INV_IC 0x4
197 * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
199 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
200 * The orig Cache Management Module "CDU" only required paddr to invalidate a
201 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
202 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
203 * the exact same line.
205 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
206 * paddr alone could not be used to correctly index the cache.
209 * MMU v1/v2 (Fixed Page Size 8k)
211 * The solution was to provide CDU with these additonal vaddr bits. These
212 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
213 * standard page size of 8k.
214 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
215 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
216 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
217 * represent the offset within cache-line. The adv of using this "clumsy"
218 * interface for additional info was no new reg was needed in CDU programming
221 * 17:13 represented the max num of bits passable, actual bits needed were
222 * fewer, based on the num-of-aliases possible.
223 * -for 2 alias possibility, only bit 13 needed (32K cache)
224 * -for 4 alias possibility, bits 14:13 needed (64K cache)
229 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
230 * only support 8k (default), 16k and 4k.
231 * However from hardware perspective, smaller page sizes aggravate aliasing
232 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
233 * the existing scheme of piggybacking won't work for certain configurations.
234 * Two new registers IC_PTAG and DC_PTAG inttoduced.
235 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
239 void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
240 unsigned long sz, const int op, const int full_page)
242 unsigned int aux_cmd;
245 if (op == OP_INV_IC) {
246 aux_cmd = ARC_REG_IC_IVIL;
248 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
249 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
252 /* Ensure we properly floor/ceil the non-line aligned/sized requests
253 * and have @paddr - aligned to cache line and integral @num_lines.
254 * This however can be avoided for page sized since:
255 * -@paddr will be cache-line aligned already (being page aligned)
256 * -@sz will be integral multiple of line size (being page sized).
259 sz += paddr & ~CACHE_LINE_MASK;
260 paddr &= CACHE_LINE_MASK;
261 vaddr &= CACHE_LINE_MASK;
264 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
266 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
267 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
269 while (num_lines-- > 0) {
270 write_aux_reg(aux_cmd, paddr);
271 paddr += L1_CACHE_BYTES;
276 * For ARC700 MMUv3 I-cache and D-cache flushes
277 * - ARC700 programming model requires paddr and vaddr be passed in seperate
278 * AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
279 * caches actually alias or not.
280 * - For HS38, only the aliasing I-cache configuration uses the PTAG reg
281 * (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
284 void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
285 unsigned long sz, const int op, const int full_page)
287 unsigned int aux_cmd, aux_tag;
290 if (op == OP_INV_IC) {
291 aux_cmd = ARC_REG_IC_IVIL;
292 aux_tag = ARC_REG_IC_PTAG;
294 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
295 aux_tag = ARC_REG_DC_PTAG;
298 /* Ensure we properly floor/ceil the non-line aligned/sized requests
299 * and have @paddr - aligned to cache line and integral @num_lines.
300 * This however can be avoided for page sized since:
301 * -@paddr will be cache-line aligned already (being page aligned)
302 * -@sz will be integral multiple of line size (being page sized).
305 sz += paddr & ~CACHE_LINE_MASK;
306 paddr &= CACHE_LINE_MASK;
307 vaddr &= CACHE_LINE_MASK;
309 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
312 * MMUv3, cache ops require paddr in PTAG reg
313 * if V-P const for loop, PTAG can be written once outside loop
316 write_aux_reg(aux_tag, paddr);
319 * This is technically for MMU v4, using the MMU v3 programming model
320 * Special work for HS38 aliasing I-cache configuration with PAE40
321 * - upper 8 bits of paddr need to be written into PTAG_HI
322 * - (and needs to be written before the lower 32 bits)
323 * Note that PTAG_HI is hoisted outside the line loop
325 if (is_pae40_enabled() && op == OP_INV_IC)
326 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
328 while (num_lines-- > 0) {
330 write_aux_reg(aux_tag, paddr);
331 paddr += L1_CACHE_BYTES;
334 write_aux_reg(aux_cmd, vaddr);
335 vaddr += L1_CACHE_BYTES;
342 * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
343 * Here's how cache ops are implemented
345 * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
346 * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
347 * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
348 * respectively, similar to MMU v3 programming model, hence
349 * __cache_line_loop_v3() is used)
351 * If PAE40 is enabled, independent of aliasing considerations, the higher bits
352 * needs to be written into PTAG_HI
355 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
356 unsigned long sz, const int op, const int full_page)
358 unsigned int aux_cmd;
361 if (op == OP_INV_IC) {
362 aux_cmd = ARC_REG_IC_IVIL;
364 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
365 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
368 /* Ensure we properly floor/ceil the non-line aligned/sized requests
369 * and have @paddr - aligned to cache line and integral @num_lines.
370 * This however can be avoided for page sized since:
371 * -@paddr will be cache-line aligned already (being page aligned)
372 * -@sz will be integral multiple of line size (being page sized).
375 sz += paddr & ~CACHE_LINE_MASK;
376 paddr &= CACHE_LINE_MASK;
379 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
382 * For HS38 PAE40 configuration
383 * - upper 8 bits of paddr need to be written into PTAG_HI
384 * - (and needs to be written before the lower 32 bits)
386 if (is_pae40_enabled()) {
389 * Non aliasing I-cache in HS38,
390 * aliasing I-cache handled in __cache_line_loop_v3()
392 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
394 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
397 while (num_lines-- > 0) {
398 write_aux_reg(aux_cmd, paddr);
399 paddr += L1_CACHE_BYTES;
406 * optimized flush operation which takes a region as opposed to iterating per line
409 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
410 unsigned long sz, const int op, const int full_page)
414 /* Only for Non aliasing I-cache in HS38 */
415 if (op == OP_INV_IC) {
419 s = ARC_REG_DC_STARTR;
424 /* for any leading gap between @paddr and start of cache line */
425 sz += paddr & ~CACHE_LINE_MASK;
426 paddr &= CACHE_LINE_MASK;
429 * account for any trailing gap to end of cache line
430 * this is equivalent to DIV_ROUND_UP() in line ops above
432 sz += L1_CACHE_BYTES - 1;
435 if (is_pae40_enabled()) {
436 /* TBD: check if crossing 4TB boundary */
438 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
440 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
443 /* ENDR needs to be set ahead of START */
444 write_aux_reg(e, paddr + sz); /* ENDR is exclusive */
445 write_aux_reg(s, paddr);
447 /* caller waits on DC_CTRL.FS */
452 #if (CONFIG_ARC_MMU_VER < 3)
453 #define __cache_line_loop __cache_line_loop_v2
454 #elif (CONFIG_ARC_MMU_VER == 3)
455 #define __cache_line_loop __cache_line_loop_v3
456 #elif (CONFIG_ARC_MMU_VER > 3)
457 #define __cache_line_loop __cache_line_loop_v4
460 #ifdef CONFIG_ARC_HAS_DCACHE
462 /***************************************************************
463 * Machine specific helpers for Entire D-Cache or Per Line ops
468 * this version avoids extra read/write of DC_CTRL for flush or invalid ops
469 * in the non region flush regime (such as for ARCompact)
471 static inline void __before_dc_op(const int op)
473 if (op == OP_FLUSH_N_INV) {
474 /* Dcache provides 2 cmd: FLUSH or INV
475 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
476 * flush-n-inv is achieved by INV cmd but with IM=1
477 * So toggle INV sub-mode depending on op request and default
479 const unsigned int ctl = ARC_REG_DC_CTRL;
480 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
486 static inline void __before_dc_op(const int op)
488 const unsigned int ctl = ARC_REG_DC_CTRL;
489 unsigned int val = read_aux_reg(ctl);
491 if (op == OP_FLUSH_N_INV) {
492 val |= DC_CTRL_INV_MODE_FLUSH;
495 if (op != OP_INV_IC) {
497 * Flush / Invalidate is provided by DC_CTRL.RNG_OP 0 or 1
498 * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above
500 val &= ~DC_CTRL_RGN_OP_MSK;
502 val |= DC_CTRL_RGN_OP_INV;
504 write_aux_reg(ctl, val);
510 static inline void __after_dc_op(const int op)
513 const unsigned int ctl = ARC_REG_DC_CTRL;
516 /* flush / flush-n-inv both wait */
517 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
520 /* Switch back to default Invalidate mode */
521 if (op == OP_FLUSH_N_INV)
522 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
527 * Operation on Entire D-Cache
528 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
529 * Note that constant propagation ensures all the checks are gone
532 static inline void __dc_entire_op(const int op)
538 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
539 aux = ARC_REG_DC_IVDC;
541 aux = ARC_REG_DC_FLSH;
543 write_aux_reg(aux, 0x1);
548 static inline void __dc_disable(void)
550 const int r = ARC_REG_DC_CTRL;
552 __dc_entire_op(OP_FLUSH_N_INV);
553 write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
556 static void __dc_enable(void)
558 const int r = ARC_REG_DC_CTRL;
560 write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
563 /* For kernel mappings cache operation: index is same as paddr */
564 #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
567 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
569 static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
570 unsigned long sz, const int op)
572 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
575 local_irq_save(flags);
579 __cache_line_loop(paddr, vaddr, sz, op, full_page);
583 local_irq_restore(flags);
588 #define __dc_entire_op(op)
589 #define __dc_disable()
590 #define __dc_enable()
591 #define __dc_line_op(paddr, vaddr, sz, op)
592 #define __dc_line_op_k(paddr, sz, op)
594 #endif /* CONFIG_ARC_HAS_DCACHE */
596 #ifdef CONFIG_ARC_HAS_ICACHE
598 static inline void __ic_entire_inv(void)
600 write_aux_reg(ARC_REG_IC_IVIC, 1);
601 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
605 __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
608 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
611 local_irq_save(flags);
612 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
613 local_irq_restore(flags);
618 #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
623 phys_addr_t paddr, vaddr;
627 static void __ic_line_inv_vaddr_helper(void *info)
629 struct ic_inv_args *ic_inv = info;
631 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
634 static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
637 struct ic_inv_args ic_inv = {
643 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
646 #endif /* CONFIG_SMP */
648 #else /* !CONFIG_ARC_HAS_ICACHE */
650 #define __ic_entire_inv()
651 #define __ic_line_inv_vaddr(pstart, vstart, sz)
653 #endif /* CONFIG_ARC_HAS_ICACHE */
655 noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
657 #ifdef CONFIG_ISA_ARCV2
659 * SLC is shared between all cores and concurrent aux operations from
660 * multiple cores need to be serialized using a spinlock
661 * A concurrent operation can be silently ignored and/or the old/new
662 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
665 static DEFINE_SPINLOCK(lock);
670 spin_lock_irqsave(&lock, flags);
673 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
674 * - b'000 (default) is Flush,
675 * - b'001 is Invalidate if CTRL.IM == 0
676 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
678 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
680 /* Don't rely on default value of IM bit */
681 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
682 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
687 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
689 ctrl &= ~SLC_CTRL_RGN_OP_INV;
691 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
694 * Lower bits are ignored, no need to clip
695 * END needs to be setup before START (latter triggers the operation)
696 * END can't be same as START, so add (l2_line_sz - 1) to sz
698 end = paddr + sz + l2_line_sz - 1;
699 if (is_pae40_enabled())
700 write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
702 write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
704 if (is_pae40_enabled())
705 write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
707 write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
709 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
710 read_aux_reg(ARC_REG_SLC_CTRL);
712 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
714 spin_unlock_irqrestore(&lock, flags);
718 noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
720 #ifdef CONFIG_ISA_ARCV2
722 * SLC is shared between all cores and concurrent aux operations from
723 * multiple cores need to be serialized using a spinlock
724 * A concurrent operation can be silently ignored and/or the old/new
725 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
728 static DEFINE_SPINLOCK(lock);
730 const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1);
731 unsigned int ctrl, cmd;
735 spin_lock_irqsave(&lock, flags);
737 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
739 /* Don't rely on default value of IM bit */
740 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
741 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
745 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
747 cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL;
749 sz += paddr & ~SLC_LINE_MASK;
750 paddr &= SLC_LINE_MASK;
752 num_lines = DIV_ROUND_UP(sz, l2_line_sz);
754 while (num_lines-- > 0) {
755 write_aux_reg(cmd, paddr);
759 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
760 read_aux_reg(ARC_REG_SLC_CTRL);
762 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
764 spin_unlock_irqrestore(&lock, flags);
768 #define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op)
770 noinline static void slc_entire_op(const int op)
772 unsigned int ctrl, r = ARC_REG_SLC_CTRL;
774 ctrl = read_aux_reg(r);
776 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
777 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
781 write_aux_reg(r, ctrl);
783 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
784 write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1);
786 write_aux_reg(ARC_REG_SLC_FLUSH, 0x1);
788 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
791 /* Important to wait for flush to complete */
792 while (read_aux_reg(r) & SLC_CTRL_BUSY);
795 static inline void arc_slc_disable(void)
797 const int r = ARC_REG_SLC_CTRL;
799 slc_entire_op(OP_FLUSH_N_INV);
800 write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
803 static inline void arc_slc_enable(void)
805 const int r = ARC_REG_SLC_CTRL;
807 write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
810 /***********************************************************
815 * Handle cache congruency of kernel and userspace mappings of page when kernel
816 * writes-to/reads-from
818 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
819 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
820 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
821 * -In SMP, if hardware caches are coherent
823 * There's a corollary case, where kernel READs from a userspace mapped page.
824 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
826 void flush_dcache_page(struct page *page)
828 struct address_space *mapping;
830 if (!cache_is_vipt_aliasing()) {
831 clear_bit(PG_dc_clean, &page->flags);
835 /* don't handle anon pages here */
836 mapping = page_mapping_file(page);
841 * pagecache page, file not yet mapped to userspace
842 * Make a note that K-mapping is dirty
844 if (!mapping_mapped(mapping)) {
845 clear_bit(PG_dc_clean, &page->flags);
846 } else if (page_mapcount(page)) {
848 /* kernel reading from page with U-mapping */
849 phys_addr_t paddr = (unsigned long)page_address(page);
850 unsigned long vaddr = page->index << PAGE_SHIFT;
852 if (addr_not_cache_congruent(paddr, vaddr))
853 __flush_dcache_page(paddr, vaddr);
856 EXPORT_SYMBOL(flush_dcache_page);
859 * DMA ops for systems with L1 cache only
860 * Make memory coherent with L1 cache by flushing/invalidating L1 lines
862 static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
864 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
867 static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
869 __dc_line_op_k(start, sz, OP_INV);
872 static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
874 __dc_line_op_k(start, sz, OP_FLUSH);
878 * DMA ops for systems with both L1 and L2 caches, but without IOC
879 * Both L1 and L2 lines need to be explicitly flushed/invalidated
881 static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
883 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
884 slc_op(start, sz, OP_FLUSH_N_INV);
887 static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
889 __dc_line_op_k(start, sz, OP_INV);
890 slc_op(start, sz, OP_INV);
893 static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
895 __dc_line_op_k(start, sz, OP_FLUSH);
896 slc_op(start, sz, OP_FLUSH);
902 void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
904 __dma_cache_wback_inv(start, sz);
906 EXPORT_SYMBOL(dma_cache_wback_inv);
908 void dma_cache_inv(phys_addr_t start, unsigned long sz)
910 __dma_cache_inv(start, sz);
912 EXPORT_SYMBOL(dma_cache_inv);
914 void dma_cache_wback(phys_addr_t start, unsigned long sz)
916 __dma_cache_wback(start, sz);
918 EXPORT_SYMBOL(dma_cache_wback);
921 * This is API for making I/D Caches consistent when modifying
922 * kernel code (loadable modules, kprobes, kgdb...)
923 * This is called on insmod, with kernel virtual address for CODE of
924 * the module. ARC cache maintenance ops require PHY address thus we
925 * need to convert vmalloc addr to PHY addr
927 void flush_icache_range(unsigned long kstart, unsigned long kend)
931 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
933 /* Shortcut for bigger flush ranges.
934 * Here we don't care if this was kernel virtual or phy addr
936 tot_sz = kend - kstart;
937 if (tot_sz > PAGE_SIZE) {
942 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
943 if (likely(kstart > PAGE_OFFSET)) {
945 * The 2nd arg despite being paddr will be used to index icache
946 * This is OK since no alternate virtual mappings will exist
947 * given the callers for this case: kprobe/kgdb in built-in
950 __sync_icache_dcache(kstart, kstart, kend - kstart);
955 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
956 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
957 * handling of kernel vaddr.
959 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
960 * it still needs to handle a 2 page scenario, where the range
961 * straddles across 2 virtual pages and hence need for loop
964 unsigned int off, sz;
965 unsigned long phy, pfn;
967 off = kstart % PAGE_SIZE;
968 pfn = vmalloc_to_pfn((void *)kstart);
969 phy = (pfn << PAGE_SHIFT) + off;
970 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
971 __sync_icache_dcache(phy, kstart, sz);
976 EXPORT_SYMBOL(flush_icache_range);
979 * General purpose helper to make I and D cache lines consistent.
980 * @paddr is phy addr of region
981 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
982 * However in one instance, when called by kprobe (for a breakpt in
983 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
984 * use a paddr to index the cache (despite VIPT). This is fine since since a
985 * builtin kernel page will not have any virtual mappings.
986 * kprobe on loadable module will be kernel vaddr.
988 void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
990 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
991 __ic_line_inv_vaddr(paddr, vaddr, len);
994 /* wrapper to compile time eliminate alignment checks in flush loop */
995 void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
997 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
1001 * wrapper to clearout kernel or userspace mappings of a page
1002 * For kernel mappings @vaddr == @paddr
1004 void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
1006 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
1009 noinline void flush_cache_all(void)
1011 unsigned long flags;
1013 local_irq_save(flags);
1016 __dc_entire_op(OP_FLUSH_N_INV);
1018 local_irq_restore(flags);
1022 #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
1024 void flush_cache_mm(struct mm_struct *mm)
1029 void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
1032 phys_addr_t paddr = pfn << PAGE_SHIFT;
1034 u_vaddr &= PAGE_MASK;
1036 __flush_dcache_page(paddr, u_vaddr);
1038 if (vma->vm_flags & VM_EXEC)
1039 __inv_icache_page(paddr, u_vaddr);
1042 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
1048 void flush_anon_page(struct vm_area_struct *vma, struct page *page,
1049 unsigned long u_vaddr)
1051 /* TBD: do we really need to clear the kernel mapping */
1052 __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
1053 __flush_dcache_page((phys_addr_t)page_address(page),
1054 (phys_addr_t)page_address(page));
1060 void copy_user_highpage(struct page *to, struct page *from,
1061 unsigned long u_vaddr, struct vm_area_struct *vma)
1063 void *kfrom = kmap_atomic(from);
1064 void *kto = kmap_atomic(to);
1065 int clean_src_k_mappings = 0;
1068 * If SRC page was already mapped in userspace AND it's U-mapping is
1069 * not congruent with K-mapping, sync former to physical page so that
1070 * K-mapping in memcpy below, sees the right data
1072 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
1073 * equally valid for SRC page as well
1075 * For !VIPT cache, all of this gets compiled out as
1076 * addr_not_cache_congruent() is 0
1078 if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
1079 __flush_dcache_page((unsigned long)kfrom, u_vaddr);
1080 clean_src_k_mappings = 1;
1083 copy_page(kto, kfrom);
1086 * Mark DST page K-mapping as dirty for a later finalization by
1087 * update_mmu_cache(). Although the finalization could have been done
1088 * here as well (given that both vaddr/paddr are available).
1089 * But update_mmu_cache() already has code to do that for other
1090 * non copied user pages (e.g. read faults which wire in pagecache page
1093 clear_bit(PG_dc_clean, &to->flags);
1096 * if SRC was already usermapped and non-congruent to kernel mapping
1097 * sync the kernel mapping back to physical page
1099 if (clean_src_k_mappings) {
1100 __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
1101 set_bit(PG_dc_clean, &from->flags);
1103 clear_bit(PG_dc_clean, &from->flags);
1107 kunmap_atomic(kfrom);
1110 void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
1113 clear_bit(PG_dc_clean, &page->flags);
1115 EXPORT_SYMBOL(clear_user_page);
1117 /**********************************************************************
1118 * Explicit Cache flush request from user space via syscall
1119 * Needed for JITs which generate code on the fly
1121 SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
1123 /* TBD: optimize this */
1129 * IO-Coherency (IOC) setup rules:
1131 * 1. Needs to be at system level, so only once by Master core
1132 * Non-Masters need not be accessing caches at that time
1133 * - They are either HALT_ON_RESET and kick started much later or
1134 * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
1135 * doesn't perturb caches or coherency unit
1137 * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
1138 * otherwise any straggler data might behave strangely post IOC enabling
1140 * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
1141 * Coherency transactions
1143 noinline void __init arc_ioc_setup(void)
1145 unsigned int ioc_base, mem_sz;
1148 * If IOC was already enabled (due to bootloader) it technically needs to
1149 * be reconfigured with aperture base,size corresponding to Linux memory map
1150 * which will certainly be different than uboot's. But disabling and
1151 * reenabling IOC when DMA might be potentially active is tricky business.
1152 * To avoid random memory issues later, just panic here and ask user to
1153 * upgrade bootloader to one which doesn't enable IOC
1155 if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
1156 panic("IOC already enabled, please upgrade bootloader!\n");
1162 * As for today we don't support both IOC and ZONE_HIGHMEM enabled
1163 * simultaneously. This happens because as of today IOC aperture covers
1164 * only ZONE_NORMAL (low mem) and any dma transactions outside this
1165 * region won't be HW coherent.
1166 * If we want to use both IOC and ZONE_HIGHMEM we can use
1167 * bounce_buffer to handle dma transactions to HIGHMEM.
1168 * Also it is possible to modify dma_direct cache ops or increase IOC
1169 * aperture size if we are planning to use HIGHMEM without PAE.
1171 if (IS_ENABLED(CONFIG_HIGHMEM))
1172 panic("IOC and HIGHMEM can't be used simultaneously");
1174 /* Flush + invalidate + disable L1 dcache */
1177 /* Flush + invalidate SLC */
1178 if (read_aux_reg(ARC_REG_SLC_BCR))
1179 slc_entire_op(OP_FLUSH_N_INV);
1182 * currently IOC Aperture covers entire DDR
1183 * TBD: fix for PGU + 1GB of low mem
1186 mem_sz = arc_get_mem_sz();
1188 if (!is_power_of_2(mem_sz) || mem_sz < 4096)
1189 panic("IOC Aperture size must be power of 2 larger than 4KB");
1192 * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
1193 * so setting 0x11 implies 512MB, 0x12 implies 1GB...
1195 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2);
1197 /* for now assume kernel base is start of IOC aperture */
1198 ioc_base = CONFIG_LINUX_RAM_BASE;
1200 if (ioc_base % mem_sz != 0)
1201 panic("IOC Aperture start must be aligned to the size of the aperture");
1203 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
1204 write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
1205 write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
1207 /* Re-enable L1 dcache */
1212 * Cache related boot time checks/setups only needed on master CPU:
1213 * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
1214 * Assume SMP only, so all cores will have same cache config. A check on
1215 * one core suffices for all
1216 * - IOC setup / dma callbacks only need to be done once
1218 void __init arc_cache_init_master(void)
1220 unsigned int __maybe_unused cpu = smp_processor_id();
1222 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
1223 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
1226 panic("cache support enabled but non-existent cache\n");
1228 if (ic->line_len != L1_CACHE_BYTES)
1229 panic("ICache line [%d] != kernel Config [%d]",
1230 ic->line_len, L1_CACHE_BYTES);
1233 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
1234 * pair to provide vaddr/paddr respectively, just as in MMU v3
1236 if (is_isa_arcv2() && ic->alias)
1237 _cache_line_loop_ic_fn = __cache_line_loop_v3;
1239 _cache_line_loop_ic_fn = __cache_line_loop;
1242 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
1243 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
1246 panic("cache support enabled but non-existent cache\n");
1248 if (dc->line_len != L1_CACHE_BYTES)
1249 panic("DCache line [%d] != kernel Config [%d]",
1250 dc->line_len, L1_CACHE_BYTES);
1252 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
1253 if (is_isa_arcompact()) {
1254 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
1255 int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
1259 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1260 if (CACHE_COLORS_NUM != num_colors)
1261 panic("CACHE_COLORS_NUM not optimized for config\n");
1262 } else if (!dc->alias && handled) {
1263 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1269 * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
1270 * or equal to any cache line length.
1272 BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
1273 "SMP_CACHE_BYTES must be >= any cache line length");
1274 if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
1275 panic("L2 Cache line [%d] > kernel Config [%d]\n",
1276 l2_line_sz, SMP_CACHE_BYTES);
1278 /* Note that SLC disable not formally supported till HS 3.0 */
1279 if (is_isa_arcv2() && l2_line_sz && !slc_enable)
1282 if (is_isa_arcv2() && ioc_exists)
1285 if (is_isa_arcv2() && l2_line_sz && slc_enable) {
1286 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
1287 __dma_cache_inv = __dma_cache_inv_slc;
1288 __dma_cache_wback = __dma_cache_wback_slc;
1290 __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
1291 __dma_cache_inv = __dma_cache_inv_l1;
1292 __dma_cache_wback = __dma_cache_wback_l1;
1295 * In case of IOC (say IOC+SLC case), pointers above could still be set
1296 * but end up not being relevant as the first function in chain is not
1297 * called at all for @dma_direct_ops
1298 * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
1302 void __ref arc_cache_init(void)
1304 unsigned int __maybe_unused cpu = smp_processor_id();
1307 pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str)));
1310 arc_cache_init_master();
1313 * In PAE regime, TLB and cache maintenance ops take wider addresses
1314 * And even if PAE is not enabled in kernel, the upper 32-bits still need
1315 * to be zeroed to keep the ops sane.
1316 * As an optimization for more common !PAE enabled case, zero them out
1317 * once at init, rather than checking/setting to 0 for every runtime op
1319 if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
1321 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
1322 write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
1324 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
1325 write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
1328 write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
1329 write_aux_reg(ARC_REG_SLC_RGN_START1, 0);