GNU Linux-libre 5.15.137-gnu
[releases.git] / arch / mips / kernel / mips-cpc.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2013 Imagination Technologies
4  * Author: Paul Burton <paul.burton@mips.com>
5  */
6
7 #include <linux/errno.h>
8 #include <linux/percpu.h>
9 #include <linux/of.h>
10 #include <linux/of_address.h>
11 #include <linux/spinlock.h>
12
13 #include <asm/mips-cps.h>
14
15 void __iomem *mips_cpc_base;
16
17 static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
18
19 static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
20
21 phys_addr_t __weak mips_cpc_default_phys_base(void)
22 {
23         struct device_node *cpc_node;
24         struct resource res;
25         int err;
26
27         cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
28         if (cpc_node) {
29                 err = of_address_to_resource(cpc_node, 0, &res);
30                 of_node_put(cpc_node);
31                 if (!err)
32                         return res.start;
33         }
34
35         return 0;
36 }
37
38 /**
39  * mips_cpc_phys_base - retrieve the physical base address of the CPC
40  *
41  * This function returns the physical base address of the Cluster Power
42  * Controller memory mapped registers, or 0 if no Cluster Power Controller
43  * is present.
44  */
45 static phys_addr_t mips_cpc_phys_base(void)
46 {
47         unsigned long cpc_base;
48
49         if (!mips_cm_present())
50                 return 0;
51
52         if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX))
53                 return 0;
54
55         /* If the CPC is already enabled, leave it so */
56         cpc_base = read_gcr_cpc_base();
57         if (cpc_base & CM_GCR_CPC_BASE_CPCEN)
58                 return cpc_base & CM_GCR_CPC_BASE_CPCBASE;
59
60         /* Otherwise, use the default address */
61         cpc_base = mips_cpc_default_phys_base();
62         if (!cpc_base)
63                 return cpc_base;
64
65         /* Enable the CPC, mapped at the default address */
66         write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN);
67         return cpc_base;
68 }
69
70 int mips_cpc_probe(void)
71 {
72         phys_addr_t addr;
73         unsigned int cpu;
74
75         for_each_possible_cpu(cpu)
76                 spin_lock_init(&per_cpu(cpc_core_lock, cpu));
77
78         addr = mips_cpc_phys_base();
79         if (!addr)
80                 return -ENODEV;
81
82         mips_cpc_base = ioremap(addr, 0x8000);
83         if (!mips_cpc_base)
84                 return -ENXIO;
85
86         return 0;
87 }
88
89 void mips_cpc_lock_other(unsigned int core)
90 {
91         unsigned int curr_core;
92
93         if (mips_cm_revision() >= CM_REV_CM3)
94                 /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
95                 return;
96
97         preempt_disable();
98         curr_core = cpu_core(&current_cpu_data);
99         spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
100                           per_cpu(cpc_core_lock_flags, curr_core));
101         write_cpc_cl_other(core << __ffs(CPC_Cx_OTHER_CORENUM));
102
103         /*
104          * Ensure the core-other region reflects the appropriate core &
105          * VP before any accesses to it occur.
106          */
107         mb();
108 }
109
110 void mips_cpc_unlock_other(void)
111 {
112         unsigned int curr_core;
113
114         if (mips_cm_revision() >= CM_REV_CM3)
115                 /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
116                 return;
117
118         curr_core = cpu_core(&current_cpu_data);
119         spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
120                                per_cpu(cpc_core_lock_flags, curr_core));
121         preempt_enable();
122 }