Mention branches and keyring.
[releases.git] / s390 / kernel / abs_lowcore.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/pgtable.h>
4 #include <asm/abs_lowcore.h>
5
6 #define ABS_LOWCORE_UNMAPPED    1
7 #define ABS_LOWCORE_LAP_ON      2
8 #define ABS_LOWCORE_IRQS_ON     4
9
10 unsigned long __bootdata_preserved(__abs_lowcore);
11 bool __ro_after_init abs_lowcore_mapped;
12
13 int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc)
14 {
15         unsigned long addr = __abs_lowcore + (cpu * sizeof(struct lowcore));
16         unsigned long phys = __pa(lc);
17         int rc, i;
18
19         for (i = 0; i < LC_PAGES; i++) {
20                 rc = __vmem_map_4k_page(addr, phys, PAGE_KERNEL, alloc);
21                 if (rc) {
22                         /*
23                          * Do not unmap allocated page tables in case the
24                          * allocation was not requested. In such a case the
25                          * request is expected coming from an atomic context,
26                          * while the unmap attempt might sleep.
27                          */
28                         if (alloc) {
29                                 for (--i; i >= 0; i--) {
30                                         addr -= PAGE_SIZE;
31                                         vmem_unmap_4k_page(addr);
32                                 }
33                         }
34                         return rc;
35                 }
36                 addr += PAGE_SIZE;
37                 phys += PAGE_SIZE;
38         }
39         return 0;
40 }
41
42 void abs_lowcore_unmap(int cpu)
43 {
44         unsigned long addr = __abs_lowcore + (cpu * sizeof(struct lowcore));
45         int i;
46
47         for (i = 0; i < LC_PAGES; i++) {
48                 vmem_unmap_4k_page(addr);
49                 addr += PAGE_SIZE;
50         }
51 }
52
53 struct lowcore *get_abs_lowcore(unsigned long *flags)
54 {
55         unsigned long irq_flags;
56         union ctlreg0 cr0;
57         int cpu;
58
59         *flags = 0;
60         cpu = get_cpu();
61         if (abs_lowcore_mapped) {
62                 return ((struct lowcore *)__abs_lowcore) + cpu;
63         } else {
64                 if (cpu != 0)
65                         panic("Invalid unmapped absolute lowcore access\n");
66                 local_irq_save(irq_flags);
67                 if (!irqs_disabled_flags(irq_flags))
68                         *flags |= ABS_LOWCORE_IRQS_ON;
69                 __ctl_store(cr0.val, 0, 0);
70                 if (cr0.lap) {
71                         *flags |= ABS_LOWCORE_LAP_ON;
72                         __ctl_clear_bit(0, 28);
73                 }
74                 *flags |= ABS_LOWCORE_UNMAPPED;
75                 return lowcore_ptr[0];
76         }
77 }
78
79 void put_abs_lowcore(struct lowcore *lc, unsigned long flags)
80 {
81         if (abs_lowcore_mapped) {
82                 if (flags)
83                         panic("Invalid mapped absolute lowcore release\n");
84         } else {
85                 if (smp_processor_id() != 0)
86                         panic("Invalid mapped absolute lowcore access\n");
87                 if (!(flags & ABS_LOWCORE_UNMAPPED))
88                         panic("Invalid unmapped absolute lowcore release\n");
89                 if (flags & ABS_LOWCORE_LAP_ON)
90                         __ctl_set_bit(0, 28);
91                 if (flags & ABS_LOWCORE_IRQS_ON)
92                         local_irq_enable();
93         }
94         put_cpu();
95 }