GNU Linux-libre 5.15.72-gnu
[releases.git] / arch / powerpc / platforms / pseries / lpar.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * pSeries_lpar.c
4  * Copyright (C) 2001 Todd Inglett, IBM Corporation
5  *
6  * pSeries LPAR support.
7  */
8
9 /* Enables debugging of low-level hash table routines - careful! */
10 #undef DEBUG
11 #define pr_fmt(fmt) "lpar: " fmt
12
13 #include <linux/kernel.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/console.h>
16 #include <linux/export.h>
17 #include <linux/jump_label.h>
18 #include <linux/delay.h>
19 #include <linux/stop_machine.h>
20 #include <linux/spinlock.h>
21 #include <linux/cpuhotplug.h>
22 #include <linux/workqueue.h>
23 #include <linux/proc_fs.h>
24 #include <linux/pgtable.h>
25 #include <linux/debugfs.h>
26
27 #include <asm/processor.h>
28 #include <asm/mmu.h>
29 #include <asm/page.h>
30 #include <asm/machdep.h>
31 #include <asm/mmu_context.h>
32 #include <asm/iommu.h>
33 #include <asm/tlb.h>
34 #include <asm/prom.h>
35 #include <asm/cputable.h>
36 #include <asm/udbg.h>
37 #include <asm/smp.h>
38 #include <asm/trace.h>
39 #include <asm/firmware.h>
40 #include <asm/plpar_wrappers.h>
41 #include <asm/kexec.h>
42 #include <asm/fadump.h>
43 #include <asm/asm-prototypes.h>
44 #include <asm/dtl.h>
45
46 #include "pseries.h"
47
48 /* Flag bits for H_BULK_REMOVE */
49 #define HBR_REQUEST     0x4000000000000000UL
50 #define HBR_RESPONSE    0x8000000000000000UL
51 #define HBR_END         0xc000000000000000UL
52 #define HBR_AVPN        0x0200000000000000UL
53 #define HBR_ANDCOND     0x0100000000000000UL
54
55
56 /* in hvCall.S */
57 EXPORT_SYMBOL(plpar_hcall);
58 EXPORT_SYMBOL(plpar_hcall9);
59 EXPORT_SYMBOL(plpar_hcall_norets);
60
61 /*
62  * H_BLOCK_REMOVE supported block size for this page size in segment who's base
63  * page size is that page size.
64  *
65  * The first index is the segment base page size, the second one is the actual
66  * page size.
67  */
68 static int hblkrm_size[MMU_PAGE_COUNT][MMU_PAGE_COUNT] __ro_after_init;
69
70 /*
71  * Due to the involved complexity, and that the current hypervisor is only
72  * returning this value or 0, we are limiting the support of the H_BLOCK_REMOVE
73  * buffer size to 8 size block.
74  */
75 #define HBLKRM_SUPPORTED_BLOCK_SIZE 8
76
77 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
78 static u8 dtl_mask = DTL_LOG_PREEMPT;
79 #else
80 static u8 dtl_mask;
81 #endif
82
83 void alloc_dtl_buffers(unsigned long *time_limit)
84 {
85         int cpu;
86         struct paca_struct *pp;
87         struct dtl_entry *dtl;
88
89         for_each_possible_cpu(cpu) {
90                 pp = paca_ptrs[cpu];
91                 if (pp->dispatch_log)
92                         continue;
93                 dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
94                 if (!dtl) {
95                         pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
96                                 cpu);
97 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
98                         pr_warn("Stolen time statistics will be unreliable\n");
99 #endif
100                         break;
101                 }
102
103                 pp->dtl_ridx = 0;
104                 pp->dispatch_log = dtl;
105                 pp->dispatch_log_end = dtl + N_DISPATCH_LOG;
106                 pp->dtl_curr = dtl;
107
108                 if (time_limit && time_after(jiffies, *time_limit)) {
109                         cond_resched();
110                         *time_limit = jiffies + HZ;
111                 }
112         }
113 }
114
115 void register_dtl_buffer(int cpu)
116 {
117         long ret;
118         struct paca_struct *pp;
119         struct dtl_entry *dtl;
120         int hwcpu = get_hard_smp_processor_id(cpu);
121
122         pp = paca_ptrs[cpu];
123         dtl = pp->dispatch_log;
124         if (dtl && dtl_mask) {
125                 pp->dtl_ridx = 0;
126                 pp->dtl_curr = dtl;
127                 lppaca_of(cpu).dtl_idx = 0;
128
129                 /* hypervisor reads buffer length from this field */
130                 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
131                 ret = register_dtl(hwcpu, __pa(dtl));
132                 if (ret)
133                         pr_err("WARNING: DTL registration of cpu %d (hw %d) failed with %ld\n",
134                                cpu, hwcpu, ret);
135
136                 lppaca_of(cpu).dtl_enable_mask = dtl_mask;
137         }
138 }
139
140 #ifdef CONFIG_PPC_SPLPAR
141 struct dtl_worker {
142         struct delayed_work work;
143         int cpu;
144 };
145
146 struct vcpu_dispatch_data {
147         int last_disp_cpu;
148
149         int total_disp;
150
151         int same_cpu_disp;
152         int same_chip_disp;
153         int diff_chip_disp;
154         int far_chip_disp;
155
156         int numa_home_disp;
157         int numa_remote_disp;
158         int numa_far_disp;
159 };
160
161 /*
162  * This represents the number of cpus in the hypervisor. Since there is no
163  * architected way to discover the number of processors in the host, we
164  * provision for dealing with NR_CPUS. This is currently 2048 by default, and
165  * is sufficient for our purposes. This will need to be tweaked if
166  * CONFIG_NR_CPUS is changed.
167  */
168 #define NR_CPUS_H       NR_CPUS
169
170 DEFINE_RWLOCK(dtl_access_lock);
171 static DEFINE_PER_CPU(struct vcpu_dispatch_data, vcpu_disp_data);
172 static DEFINE_PER_CPU(u64, dtl_entry_ridx);
173 static DEFINE_PER_CPU(struct dtl_worker, dtl_workers);
174 static enum cpuhp_state dtl_worker_state;
175 static DEFINE_MUTEX(dtl_enable_mutex);
176 static int vcpudispatch_stats_on __read_mostly;
177 static int vcpudispatch_stats_freq = 50;
178 static __be32 *vcpu_associativity, *pcpu_associativity;
179
180
181 static void free_dtl_buffers(unsigned long *time_limit)
182 {
183 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
184         int cpu;
185         struct paca_struct *pp;
186
187         for_each_possible_cpu(cpu) {
188                 pp = paca_ptrs[cpu];
189                 if (!pp->dispatch_log)
190                         continue;
191                 kmem_cache_free(dtl_cache, pp->dispatch_log);
192                 pp->dtl_ridx = 0;
193                 pp->dispatch_log = 0;
194                 pp->dispatch_log_end = 0;
195                 pp->dtl_curr = 0;
196
197                 if (time_limit && time_after(jiffies, *time_limit)) {
198                         cond_resched();
199                         *time_limit = jiffies + HZ;
200                 }
201         }
202 #endif
203 }
204
205 static int init_cpu_associativity(void)
206 {
207         vcpu_associativity = kcalloc(num_possible_cpus() / threads_per_core,
208                         VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL);
209         pcpu_associativity = kcalloc(NR_CPUS_H / threads_per_core,
210                         VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL);
211
212         if (!vcpu_associativity || !pcpu_associativity) {
213                 pr_err("error allocating memory for associativity information\n");
214                 return -ENOMEM;
215         }
216
217         return 0;
218 }
219
220 static void destroy_cpu_associativity(void)
221 {
222         kfree(vcpu_associativity);
223         kfree(pcpu_associativity);
224         vcpu_associativity = pcpu_associativity = 0;
225 }
226
227 static __be32 *__get_cpu_associativity(int cpu, __be32 *cpu_assoc, int flag)
228 {
229         __be32 *assoc;
230         int rc = 0;
231
232         assoc = &cpu_assoc[(int)(cpu / threads_per_core) * VPHN_ASSOC_BUFSIZE];
233         if (!assoc[0]) {
234                 rc = hcall_vphn(cpu, flag, &assoc[0]);
235                 if (rc)
236                         return NULL;
237         }
238
239         return assoc;
240 }
241
242 static __be32 *get_pcpu_associativity(int cpu)
243 {
244         return __get_cpu_associativity(cpu, pcpu_associativity, VPHN_FLAG_PCPU);
245 }
246
247 static __be32 *get_vcpu_associativity(int cpu)
248 {
249         return __get_cpu_associativity(cpu, vcpu_associativity, VPHN_FLAG_VCPU);
250 }
251
252 static int cpu_relative_dispatch_distance(int last_disp_cpu, int cur_disp_cpu)
253 {
254         __be32 *last_disp_cpu_assoc, *cur_disp_cpu_assoc;
255
256         if (last_disp_cpu >= NR_CPUS_H || cur_disp_cpu >= NR_CPUS_H)
257                 return -EINVAL;
258
259         last_disp_cpu_assoc = get_pcpu_associativity(last_disp_cpu);
260         cur_disp_cpu_assoc = get_pcpu_associativity(cur_disp_cpu);
261
262         if (!last_disp_cpu_assoc || !cur_disp_cpu_assoc)
263                 return -EIO;
264
265         return cpu_relative_distance(last_disp_cpu_assoc, cur_disp_cpu_assoc);
266 }
267
268 static int cpu_home_node_dispatch_distance(int disp_cpu)
269 {
270         __be32 *disp_cpu_assoc, *vcpu_assoc;
271         int vcpu_id = smp_processor_id();
272
273         if (disp_cpu >= NR_CPUS_H) {
274                 pr_debug_ratelimited("vcpu dispatch cpu %d > %d\n",
275                                                 disp_cpu, NR_CPUS_H);
276                 return -EINVAL;
277         }
278
279         disp_cpu_assoc = get_pcpu_associativity(disp_cpu);
280         vcpu_assoc = get_vcpu_associativity(vcpu_id);
281
282         if (!disp_cpu_assoc || !vcpu_assoc)
283                 return -EIO;
284
285         return cpu_relative_distance(disp_cpu_assoc, vcpu_assoc);
286 }
287
288 static void update_vcpu_disp_stat(int disp_cpu)
289 {
290         struct vcpu_dispatch_data *disp;
291         int distance;
292
293         disp = this_cpu_ptr(&vcpu_disp_data);
294         if (disp->last_disp_cpu == -1) {
295                 disp->last_disp_cpu = disp_cpu;
296                 return;
297         }
298
299         disp->total_disp++;
300
301         if (disp->last_disp_cpu == disp_cpu ||
302                 (cpu_first_thread_sibling(disp->last_disp_cpu) ==
303                                         cpu_first_thread_sibling(disp_cpu)))
304                 disp->same_cpu_disp++;
305         else {
306                 distance = cpu_relative_dispatch_distance(disp->last_disp_cpu,
307                                                                 disp_cpu);
308                 if (distance < 0)
309                         pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
310                                         smp_processor_id());
311                 else {
312                         switch (distance) {
313                         case 0:
314                                 disp->same_chip_disp++;
315                                 break;
316                         case 1:
317                                 disp->diff_chip_disp++;
318                                 break;
319                         case 2:
320                                 disp->far_chip_disp++;
321                                 break;
322                         default:
323                                 pr_debug_ratelimited("vcpudispatch_stats: cpu %d (%d -> %d): unexpected relative dispatch distance %d\n",
324                                                  smp_processor_id(),
325                                                  disp->last_disp_cpu,
326                                                  disp_cpu,
327                                                  distance);
328                         }
329                 }
330         }
331
332         distance = cpu_home_node_dispatch_distance(disp_cpu);
333         if (distance < 0)
334                 pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
335                                 smp_processor_id());
336         else {
337                 switch (distance) {
338                 case 0:
339                         disp->numa_home_disp++;
340                         break;
341                 case 1:
342                         disp->numa_remote_disp++;
343                         break;
344                 case 2:
345                         disp->numa_far_disp++;
346                         break;
347                 default:
348                         pr_debug_ratelimited("vcpudispatch_stats: cpu %d on %d: unexpected numa dispatch distance %d\n",
349                                                  smp_processor_id(),
350                                                  disp_cpu,
351                                                  distance);
352                 }
353         }
354
355         disp->last_disp_cpu = disp_cpu;
356 }
357
358 static void process_dtl_buffer(struct work_struct *work)
359 {
360         struct dtl_entry dtle;
361         u64 i = __this_cpu_read(dtl_entry_ridx);
362         struct dtl_entry *dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
363         struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
364         struct lppaca *vpa = local_paca->lppaca_ptr;
365         struct dtl_worker *d = container_of(work, struct dtl_worker, work.work);
366
367         if (!local_paca->dispatch_log)
368                 return;
369
370         /* if we have been migrated away, we cancel ourself */
371         if (d->cpu != smp_processor_id()) {
372                 pr_debug("vcpudispatch_stats: cpu %d worker migrated -- canceling worker\n",
373                                                 smp_processor_id());
374                 return;
375         }
376
377         if (i == be64_to_cpu(vpa->dtl_idx))
378                 goto out;
379
380         while (i < be64_to_cpu(vpa->dtl_idx)) {
381                 dtle = *dtl;
382                 barrier();
383                 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
384                         /* buffer has overflowed */
385                         pr_debug_ratelimited("vcpudispatch_stats: cpu %d lost %lld DTL samples\n",
386                                 d->cpu,
387                                 be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG - i);
388                         i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
389                         dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
390                         continue;
391                 }
392                 update_vcpu_disp_stat(be16_to_cpu(dtle.processor_id));
393                 ++i;
394                 ++dtl;
395                 if (dtl == dtl_end)
396                         dtl = local_paca->dispatch_log;
397         }
398
399         __this_cpu_write(dtl_entry_ridx, i);
400
401 out:
402         schedule_delayed_work_on(d->cpu, to_delayed_work(work),
403                                         HZ / vcpudispatch_stats_freq);
404 }
405
406 static int dtl_worker_online(unsigned int cpu)
407 {
408         struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
409
410         memset(d, 0, sizeof(*d));
411         INIT_DELAYED_WORK(&d->work, process_dtl_buffer);
412         d->cpu = cpu;
413
414 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
415         per_cpu(dtl_entry_ridx, cpu) = 0;
416         register_dtl_buffer(cpu);
417 #else
418         per_cpu(dtl_entry_ridx, cpu) = be64_to_cpu(lppaca_of(cpu).dtl_idx);
419 #endif
420
421         schedule_delayed_work_on(cpu, &d->work, HZ / vcpudispatch_stats_freq);
422         return 0;
423 }
424
425 static int dtl_worker_offline(unsigned int cpu)
426 {
427         struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
428
429         cancel_delayed_work_sync(&d->work);
430
431 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
432         unregister_dtl(get_hard_smp_processor_id(cpu));
433 #endif
434
435         return 0;
436 }
437
438 static void set_global_dtl_mask(u8 mask)
439 {
440         int cpu;
441
442         dtl_mask = mask;
443         for_each_present_cpu(cpu)
444                 lppaca_of(cpu).dtl_enable_mask = dtl_mask;
445 }
446
447 static void reset_global_dtl_mask(void)
448 {
449         int cpu;
450
451 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
452         dtl_mask = DTL_LOG_PREEMPT;
453 #else
454         dtl_mask = 0;
455 #endif
456         for_each_present_cpu(cpu)
457                 lppaca_of(cpu).dtl_enable_mask = dtl_mask;
458 }
459
460 static int dtl_worker_enable(unsigned long *time_limit)
461 {
462         int rc = 0, state;
463
464         if (!write_trylock(&dtl_access_lock)) {
465                 rc = -EBUSY;
466                 goto out;
467         }
468
469         set_global_dtl_mask(DTL_LOG_ALL);
470
471         /* Setup dtl buffers and register those */
472         alloc_dtl_buffers(time_limit);
473
474         state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/dtl:online",
475                                         dtl_worker_online, dtl_worker_offline);
476         if (state < 0) {
477                 pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n");
478                 free_dtl_buffers(time_limit);
479                 reset_global_dtl_mask();
480                 write_unlock(&dtl_access_lock);
481                 rc = -EINVAL;
482                 goto out;
483         }
484         dtl_worker_state = state;
485
486 out:
487         return rc;
488 }
489
490 static void dtl_worker_disable(unsigned long *time_limit)
491 {
492         cpuhp_remove_state(dtl_worker_state);
493         free_dtl_buffers(time_limit);
494         reset_global_dtl_mask();
495         write_unlock(&dtl_access_lock);
496 }
497
498 static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
499                 size_t count, loff_t *ppos)
500 {
501         unsigned long time_limit = jiffies + HZ;
502         struct vcpu_dispatch_data *disp;
503         int rc, cmd, cpu;
504         char buf[16];
505
506         if (count > 15)
507                 return -EINVAL;
508
509         if (copy_from_user(buf, p, count))
510                 return -EFAULT;
511
512         buf[count] = 0;
513         rc = kstrtoint(buf, 0, &cmd);
514         if (rc || cmd < 0 || cmd > 1) {
515                 pr_err("vcpudispatch_stats: please use 0 to disable or 1 to enable dispatch statistics\n");
516                 return rc ? rc : -EINVAL;
517         }
518
519         mutex_lock(&dtl_enable_mutex);
520
521         if ((cmd == 0 && !vcpudispatch_stats_on) ||
522                         (cmd == 1 && vcpudispatch_stats_on))
523                 goto out;
524
525         if (cmd) {
526                 rc = init_cpu_associativity();
527                 if (rc)
528                         goto out;
529
530                 for_each_possible_cpu(cpu) {
531                         disp = per_cpu_ptr(&vcpu_disp_data, cpu);
532                         memset(disp, 0, sizeof(*disp));
533                         disp->last_disp_cpu = -1;
534                 }
535
536                 rc = dtl_worker_enable(&time_limit);
537                 if (rc) {
538                         destroy_cpu_associativity();
539                         goto out;
540                 }
541         } else {
542                 dtl_worker_disable(&time_limit);
543                 destroy_cpu_associativity();
544         }
545
546         vcpudispatch_stats_on = cmd;
547
548 out:
549         mutex_unlock(&dtl_enable_mutex);
550         if (rc)
551                 return rc;
552         return count;
553 }
554
555 static int vcpudispatch_stats_display(struct seq_file *p, void *v)
556 {
557         int cpu;
558         struct vcpu_dispatch_data *disp;
559
560         if (!vcpudispatch_stats_on) {
561                 seq_puts(p, "off\n");
562                 return 0;
563         }
564
565         for_each_online_cpu(cpu) {
566                 disp = per_cpu_ptr(&vcpu_disp_data, cpu);
567                 seq_printf(p, "cpu%d", cpu);
568                 seq_put_decimal_ull(p, " ", disp->total_disp);
569                 seq_put_decimal_ull(p, " ", disp->same_cpu_disp);
570                 seq_put_decimal_ull(p, " ", disp->same_chip_disp);
571                 seq_put_decimal_ull(p, " ", disp->diff_chip_disp);
572                 seq_put_decimal_ull(p, " ", disp->far_chip_disp);
573                 seq_put_decimal_ull(p, " ", disp->numa_home_disp);
574                 seq_put_decimal_ull(p, " ", disp->numa_remote_disp);
575                 seq_put_decimal_ull(p, " ", disp->numa_far_disp);
576                 seq_puts(p, "\n");
577         }
578
579         return 0;
580 }
581
582 static int vcpudispatch_stats_open(struct inode *inode, struct file *file)
583 {
584         return single_open(file, vcpudispatch_stats_display, NULL);
585 }
586
587 static const struct proc_ops vcpudispatch_stats_proc_ops = {
588         .proc_open      = vcpudispatch_stats_open,
589         .proc_read      = seq_read,
590         .proc_write     = vcpudispatch_stats_write,
591         .proc_lseek     = seq_lseek,
592         .proc_release   = single_release,
593 };
594
595 static ssize_t vcpudispatch_stats_freq_write(struct file *file,
596                 const char __user *p, size_t count, loff_t *ppos)
597 {
598         int rc, freq;
599         char buf[16];
600
601         if (count > 15)
602                 return -EINVAL;
603
604         if (copy_from_user(buf, p, count))
605                 return -EFAULT;
606
607         buf[count] = 0;
608         rc = kstrtoint(buf, 0, &freq);
609         if (rc || freq < 1 || freq > HZ) {
610                 pr_err("vcpudispatch_stats_freq: please specify a frequency between 1 and %d\n",
611                                 HZ);
612                 return rc ? rc : -EINVAL;
613         }
614
615         vcpudispatch_stats_freq = freq;
616
617         return count;
618 }
619
620 static int vcpudispatch_stats_freq_display(struct seq_file *p, void *v)
621 {
622         seq_printf(p, "%d\n", vcpudispatch_stats_freq);
623         return 0;
624 }
625
626 static int vcpudispatch_stats_freq_open(struct inode *inode, struct file *file)
627 {
628         return single_open(file, vcpudispatch_stats_freq_display, NULL);
629 }
630
631 static const struct proc_ops vcpudispatch_stats_freq_proc_ops = {
632         .proc_open      = vcpudispatch_stats_freq_open,
633         .proc_read      = seq_read,
634         .proc_write     = vcpudispatch_stats_freq_write,
635         .proc_lseek     = seq_lseek,
636         .proc_release   = single_release,
637 };
638
639 static int __init vcpudispatch_stats_procfs_init(void)
640 {
641         /*
642          * Avoid smp_processor_id while preemptible. All CPUs should have
643          * the same value for lppaca_shared_proc.
644          */
645         preempt_disable();
646         if (!lppaca_shared_proc(get_lppaca())) {
647                 preempt_enable();
648                 return 0;
649         }
650         preempt_enable();
651
652         if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL,
653                                         &vcpudispatch_stats_proc_ops))
654                 pr_err("vcpudispatch_stats: error creating procfs file\n");
655         else if (!proc_create("powerpc/vcpudispatch_stats_freq", 0600, NULL,
656                                         &vcpudispatch_stats_freq_proc_ops))
657                 pr_err("vcpudispatch_stats_freq: error creating procfs file\n");
658
659         return 0;
660 }
661
662 machine_device_initcall(pseries, vcpudispatch_stats_procfs_init);
663 #endif /* CONFIG_PPC_SPLPAR */
664
665 void vpa_init(int cpu)
666 {
667         int hwcpu = get_hard_smp_processor_id(cpu);
668         unsigned long addr;
669         long ret;
670
671         /*
672          * The spec says it "may be problematic" if CPU x registers the VPA of
673          * CPU y. We should never do that, but wail if we ever do.
674          */
675         WARN_ON(cpu != smp_processor_id());
676
677         if (cpu_has_feature(CPU_FTR_ALTIVEC))
678                 lppaca_of(cpu).vmxregs_in_use = 1;
679
680         if (cpu_has_feature(CPU_FTR_ARCH_207S))
681                 lppaca_of(cpu).ebb_regs_in_use = 1;
682
683         addr = __pa(&lppaca_of(cpu));
684         ret = register_vpa(hwcpu, addr);
685
686         if (ret) {
687                 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
688                        "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
689                 return;
690         }
691
692 #ifdef CONFIG_PPC_BOOK3S_64
693         /*
694          * PAPR says this feature is SLB-Buffer but firmware never
695          * reports that.  All SPLPAR support SLB shadow buffer.
696          */
697         if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) {
698                 addr = __pa(paca_ptrs[cpu]->slb_shadow_ptr);
699                 ret = register_slb_shadow(hwcpu, addr);
700                 if (ret)
701                         pr_err("WARNING: SLB shadow buffer registration for "
702                                "cpu %d (hw %d) of area %lx failed with %ld\n",
703                                cpu, hwcpu, addr, ret);
704         }
705 #endif /* CONFIG_PPC_BOOK3S_64 */
706
707         /*
708          * Register dispatch trace log, if one has been allocated.
709          */
710         register_dtl_buffer(cpu);
711 }
712
713 #ifdef CONFIG_PPC_BOOK3S_64
714
715 static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
716                                      unsigned long vpn, unsigned long pa,
717                                      unsigned long rflags, unsigned long vflags,
718                                      int psize, int apsize, int ssize)
719 {
720         unsigned long lpar_rc;
721         unsigned long flags;
722         unsigned long slot;
723         unsigned long hpte_v, hpte_r;
724
725         if (!(vflags & HPTE_V_BOLTED))
726                 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
727                          "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
728                          hpte_group, vpn,  pa, rflags, vflags, psize);
729
730         hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
731         hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
732
733         if (!(vflags & HPTE_V_BOLTED))
734                 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
735
736         /* Now fill in the actual HPTE */
737         /* Set CEC cookie to 0         */
738         /* Zero page = 0               */
739         /* I-cache Invalidate = 0      */
740         /* I-cache synchronize = 0     */
741         /* Exact = 0                   */
742         flags = 0;
743
744         if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
745                 flags |= H_COALESCE_CAND;
746
747         lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
748         if (unlikely(lpar_rc == H_PTEG_FULL)) {
749                 pr_devel("Hash table group is full\n");
750                 return -1;
751         }
752
753         /*
754          * Since we try and ioremap PHBs we don't own, the pte insert
755          * will fail. However we must catch the failure in hash_page
756          * or we will loop forever, so return -2 in this case.
757          */
758         if (unlikely(lpar_rc != H_SUCCESS)) {
759                 pr_err("Failed hash pte insert with error %ld\n", lpar_rc);
760                 return -2;
761         }
762         if (!(vflags & HPTE_V_BOLTED))
763                 pr_devel(" -> slot: %lu\n", slot & 7);
764
765         /* Because of iSeries, we have to pass down the secondary
766          * bucket bit here as well
767          */
768         return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
769 }
770
771 static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
772
773 static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
774 {
775         unsigned long slot_offset;
776         unsigned long lpar_rc;
777         int i;
778         unsigned long dummy1, dummy2;
779
780         /* pick a random slot to start at */
781         slot_offset = mftb() & 0x7;
782
783         for (i = 0; i < HPTES_PER_GROUP; i++) {
784
785                 /* don't remove a bolted entry */
786                 lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
787                                            HPTE_V_BOLTED, &dummy1, &dummy2);
788                 if (lpar_rc == H_SUCCESS)
789                         return i;
790
791                 /*
792                  * The test for adjunct partition is performed before the
793                  * ANDCOND test.  H_RESOURCE may be returned, so we need to
794                  * check for that as well.
795                  */
796                 BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
797
798                 slot_offset++;
799                 slot_offset &= 0x7;
800         }
801
802         return -1;
803 }
804
805 /* Called during kexec sequence with MMU off */
806 static notrace void manual_hpte_clear_all(void)
807 {
808         unsigned long size_bytes = 1UL << ppc64_pft_size;
809         unsigned long hpte_count = size_bytes >> 4;
810         struct {
811                 unsigned long pteh;
812                 unsigned long ptel;
813         } ptes[4];
814         long lpar_rc;
815         unsigned long i, j;
816
817         /* Read in batches of 4,
818          * invalidate only valid entries not in the VRMA
819          * hpte_count will be a multiple of 4
820          */
821         for (i = 0; i < hpte_count; i += 4) {
822                 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
823                 if (lpar_rc != H_SUCCESS) {
824                         pr_info("Failed to read hash page table at %ld err %ld\n",
825                                 i, lpar_rc);
826                         continue;
827                 }
828                 for (j = 0; j < 4; j++){
829                         if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
830                                 HPTE_V_VRMA_MASK)
831                                 continue;
832                         if (ptes[j].pteh & HPTE_V_VALID)
833                                 plpar_pte_remove_raw(0, i + j, 0,
834                                         &(ptes[j].pteh), &(ptes[j].ptel));
835                 }
836         }
837 }
838
839 /* Called during kexec sequence with MMU off */
840 static notrace int hcall_hpte_clear_all(void)
841 {
842         int rc;
843
844         do {
845                 rc = plpar_hcall_norets(H_CLEAR_HPT);
846         } while (rc == H_CONTINUE);
847
848         return rc;
849 }
850
851 /* Called during kexec sequence with MMU off */
852 static notrace void pseries_hpte_clear_all(void)
853 {
854         int rc;
855
856         rc = hcall_hpte_clear_all();
857         if (rc != H_SUCCESS)
858                 manual_hpte_clear_all();
859
860 #ifdef __LITTLE_ENDIAN__
861         /*
862          * Reset exceptions to big endian.
863          *
864          * FIXME this is a hack for kexec, we need to reset the exception
865          * endian before starting the new kernel and this is a convenient place
866          * to do it.
867          *
868          * This is also called on boot when a fadump happens. In that case we
869          * must not change the exception endian mode.
870          */
871         if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active())
872                 pseries_big_endian_exceptions();
873 #endif
874 }
875
876 /*
877  * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
878  * the low 3 bits of flags happen to line up.  So no transform is needed.
879  * We can probably optimize here and assume the high bits of newpp are
880  * already zero.  For now I am paranoid.
881  */
882 static long pSeries_lpar_hpte_updatepp(unsigned long slot,
883                                        unsigned long newpp,
884                                        unsigned long vpn,
885                                        int psize, int apsize,
886                                        int ssize, unsigned long inv_flags)
887 {
888         unsigned long lpar_rc;
889         unsigned long flags;
890         unsigned long want_v;
891
892         want_v = hpte_encode_avpn(vpn, psize, ssize);
893
894         flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN;
895         flags |= (newpp & HPTE_R_KEY_HI) >> 48;
896         if (mmu_has_feature(MMU_FTR_KERNEL_RO))
897                 /* Move pp0 into bit 8 (IBM 55) */
898                 flags |= (newpp & HPTE_R_PP0) >> 55;
899
900         pr_devel("    update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
901                  want_v, slot, flags, psize);
902
903         lpar_rc = plpar_pte_protect(flags, slot, want_v);
904
905         if (lpar_rc == H_NOT_FOUND) {
906                 pr_devel("not found !\n");
907                 return -1;
908         }
909
910         pr_devel("ok\n");
911
912         BUG_ON(lpar_rc != H_SUCCESS);
913
914         return 0;
915 }
916
917 static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group)
918 {
919         long lpar_rc;
920         unsigned long i, j;
921         struct {
922                 unsigned long pteh;
923                 unsigned long ptel;
924         } ptes[4];
925
926         for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
927
928                 lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
929                 if (lpar_rc != H_SUCCESS) {
930                         pr_info("Failed to read hash page table at %ld err %ld\n",
931                                 hpte_group, lpar_rc);
932                         continue;
933                 }
934
935                 for (j = 0; j < 4; j++) {
936                         if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
937                             (ptes[j].pteh & HPTE_V_VALID))
938                                 return i + j;
939                 }
940         }
941
942         return -1;
943 }
944
945 static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
946 {
947         long slot;
948         unsigned long hash;
949         unsigned long want_v;
950         unsigned long hpte_group;
951
952         hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
953         want_v = hpte_encode_avpn(vpn, psize, ssize);
954
955         /*
956          * We try to keep bolted entries always in primary hash
957          * But in some case we can find them in secondary too.
958          */
959         hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
960         slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
961         if (slot < 0) {
962                 /* Try in secondary */
963                 hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
964                 slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
965                 if (slot < 0)
966                         return -1;
967         }
968         return hpte_group + slot;
969 }
970
971 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
972                                              unsigned long ea,
973                                              int psize, int ssize)
974 {
975         unsigned long vpn;
976         unsigned long lpar_rc, slot, vsid, flags;
977
978         vsid = get_kernel_vsid(ea, ssize);
979         vpn = hpt_vpn(ea, vsid, ssize);
980
981         slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
982         BUG_ON(slot == -1);
983
984         flags = newpp & (HPTE_R_PP | HPTE_R_N);
985         if (mmu_has_feature(MMU_FTR_KERNEL_RO))
986                 /* Move pp0 into bit 8 (IBM 55) */
987                 flags |= (newpp & HPTE_R_PP0) >> 55;
988
989         flags |= ((newpp & HPTE_R_KEY_HI) >> 48) | (newpp & HPTE_R_KEY_LO);
990
991         lpar_rc = plpar_pte_protect(flags, slot, 0);
992
993         BUG_ON(lpar_rc != H_SUCCESS);
994 }
995
996 static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
997                                          int psize, int apsize,
998                                          int ssize, int local)
999 {
1000         unsigned long want_v;
1001         unsigned long lpar_rc;
1002         unsigned long dummy1, dummy2;
1003
1004         pr_devel("    inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
1005                  slot, vpn, psize, local);
1006
1007         want_v = hpte_encode_avpn(vpn, psize, ssize);
1008         lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
1009         if (lpar_rc == H_NOT_FOUND)
1010                 return;
1011
1012         BUG_ON(lpar_rc != H_SUCCESS);
1013 }
1014
1015
1016 /*
1017  * As defined in the PAPR's section 14.5.4.1.8
1018  * The control mask doesn't include the returned reference and change bit from
1019  * the processed PTE.
1020  */
1021 #define HBLKR_AVPN              0x0100000000000000UL
1022 #define HBLKR_CTRL_MASK         0xf800000000000000UL
1023 #define HBLKR_CTRL_SUCCESS      0x8000000000000000UL
1024 #define HBLKR_CTRL_ERRNOTFOUND  0x8800000000000000UL
1025 #define HBLKR_CTRL_ERRBUSY      0xa000000000000000UL
1026
1027 /*
1028  * Returned true if we are supporting this block size for the specified segment
1029  * base page size and actual page size.
1030  *
1031  * Currently, we only support 8 size block.
1032  */
1033 static inline bool is_supported_hlbkrm(int bpsize, int psize)
1034 {
1035         return (hblkrm_size[bpsize][psize] == HBLKRM_SUPPORTED_BLOCK_SIZE);
1036 }
1037
1038 /**
1039  * H_BLOCK_REMOVE caller.
1040  * @idx should point to the latest @param entry set with a PTEX.
1041  * If PTE cannot be processed because another CPUs has already locked that
1042  * group, those entries are put back in @param starting at index 1.
1043  * If entries has to be retried and @retry_busy is set to true, these entries
1044  * are retried until success. If @retry_busy is set to false, the returned
1045  * is the number of entries yet to process.
1046  */
1047 static unsigned long call_block_remove(unsigned long idx, unsigned long *param,
1048                                        bool retry_busy)
1049 {
1050         unsigned long i, rc, new_idx;
1051         unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
1052
1053         if (idx < 2) {
1054                 pr_warn("Unexpected empty call to H_BLOCK_REMOVE");
1055                 return 0;
1056         }
1057 again:
1058         new_idx = 0;
1059         if (idx > PLPAR_HCALL9_BUFSIZE) {
1060                 pr_err("Too many PTEs (%lu) for H_BLOCK_REMOVE", idx);
1061                 idx = PLPAR_HCALL9_BUFSIZE;
1062         } else if (idx < PLPAR_HCALL9_BUFSIZE)
1063                 param[idx] = HBR_END;
1064
1065         rc = plpar_hcall9(H_BLOCK_REMOVE, retbuf,
1066                           param[0], /* AVA */
1067                           param[1],  param[2],  param[3],  param[4], /* TS0-7 */
1068                           param[5],  param[6],  param[7],  param[8]);
1069         if (rc == H_SUCCESS)
1070                 return 0;
1071
1072         BUG_ON(rc != H_PARTIAL);
1073
1074         /* Check that the unprocessed entries were 'not found' or 'busy' */
1075         for (i = 0; i < idx-1; i++) {
1076                 unsigned long ctrl = retbuf[i] & HBLKR_CTRL_MASK;
1077
1078                 if (ctrl == HBLKR_CTRL_ERRBUSY) {
1079                         param[++new_idx] = param[i+1];
1080                         continue;
1081                 }
1082
1083                 BUG_ON(ctrl != HBLKR_CTRL_SUCCESS
1084                        && ctrl != HBLKR_CTRL_ERRNOTFOUND);
1085         }
1086
1087         /*
1088          * If there were entries found busy, retry these entries if requested,
1089          * of if all the entries have to be retried.
1090          */
1091         if (new_idx && (retry_busy || new_idx == (PLPAR_HCALL9_BUFSIZE-1))) {
1092                 idx = new_idx + 1;
1093                 goto again;
1094         }
1095
1096         return new_idx;
1097 }
1098
1099 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1100 /*
1101  * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
1102  * to make sure that we avoid bouncing the hypervisor tlbie lock.
1103  */
1104 #define PPC64_HUGE_HPTE_BATCH 12
1105
1106 static void hugepage_block_invalidate(unsigned long *slot, unsigned long *vpn,
1107                                       int count, int psize, int ssize)
1108 {
1109         unsigned long param[PLPAR_HCALL9_BUFSIZE];
1110         unsigned long shift, current_vpgb, vpgb;
1111         int i, pix = 0;
1112
1113         shift = mmu_psize_defs[psize].shift;
1114
1115         for (i = 0; i < count; i++) {
1116                 /*
1117                  * Shifting 3 bits more on the right to get a
1118                  * 8 pages aligned virtual addresse.
1119                  */
1120                 vpgb = (vpn[i] >> (shift - VPN_SHIFT + 3));
1121                 if (!pix || vpgb != current_vpgb) {
1122                         /*
1123                          * Need to start a new 8 pages block, flush
1124                          * the current one if needed.
1125                          */
1126                         if (pix)
1127                                 (void)call_block_remove(pix, param, true);
1128                         current_vpgb = vpgb;
1129                         param[0] = hpte_encode_avpn(vpn[i], psize, ssize);
1130                         pix = 1;
1131                 }
1132
1133                 param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot[i];
1134                 if (pix == PLPAR_HCALL9_BUFSIZE) {
1135                         pix = call_block_remove(pix, param, false);
1136                         /*
1137                          * pix = 0 means that all the entries were
1138                          * removed, we can start a new block.
1139                          * Otherwise, this means that there are entries
1140                          * to retry, and pix points to latest one, so
1141                          * we should increment it and try to continue
1142                          * the same block.
1143                          */
1144                         if (pix)
1145                                 pix++;
1146                 }
1147         }
1148         if (pix)
1149                 (void)call_block_remove(pix, param, true);
1150 }
1151
1152 static void hugepage_bulk_invalidate(unsigned long *slot, unsigned long *vpn,
1153                                      int count, int psize, int ssize)
1154 {
1155         unsigned long param[PLPAR_HCALL9_BUFSIZE];
1156         int i = 0, pix = 0, rc;
1157
1158         for (i = 0; i < count; i++) {
1159
1160                 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
1161                         pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
1162                                                      ssize, 0);
1163                 } else {
1164                         param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
1165                         param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
1166                         pix += 2;
1167                         if (pix == 8) {
1168                                 rc = plpar_hcall9(H_BULK_REMOVE, param,
1169                                                   param[0], param[1], param[2],
1170                                                   param[3], param[4], param[5],
1171                                                   param[6], param[7]);
1172                                 BUG_ON(rc != H_SUCCESS);
1173                                 pix = 0;
1174                         }
1175                 }
1176         }
1177         if (pix) {
1178                 param[pix] = HBR_END;
1179                 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
1180                                   param[2], param[3], param[4], param[5],
1181                                   param[6], param[7]);
1182                 BUG_ON(rc != H_SUCCESS);
1183         }
1184 }
1185
1186 static inline void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
1187                                                       unsigned long *vpn,
1188                                                       int count, int psize,
1189                                                       int ssize)
1190 {
1191         unsigned long flags = 0;
1192         int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
1193
1194         if (lock_tlbie)
1195                 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
1196
1197         /* Assuming THP size is 16M */
1198         if (is_supported_hlbkrm(psize, MMU_PAGE_16M))
1199                 hugepage_block_invalidate(slot, vpn, count, psize, ssize);
1200         else
1201                 hugepage_bulk_invalidate(slot, vpn, count, psize, ssize);
1202
1203         if (lock_tlbie)
1204                 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
1205 }
1206
1207 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
1208                                              unsigned long addr,
1209                                              unsigned char *hpte_slot_array,
1210                                              int psize, int ssize, int local)
1211 {
1212         int i, index = 0;
1213         unsigned long s_addr = addr;
1214         unsigned int max_hpte_count, valid;
1215         unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
1216         unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
1217         unsigned long shift, hidx, vpn = 0, hash, slot;
1218
1219         shift = mmu_psize_defs[psize].shift;
1220         max_hpte_count = 1U << (PMD_SHIFT - shift);
1221
1222         for (i = 0; i < max_hpte_count; i++) {
1223                 valid = hpte_valid(hpte_slot_array, i);
1224                 if (!valid)
1225                         continue;
1226                 hidx =  hpte_hash_index(hpte_slot_array, i);
1227
1228                 /* get the vpn */
1229                 addr = s_addr + (i * (1ul << shift));
1230                 vpn = hpt_vpn(addr, vsid, ssize);
1231                 hash = hpt_hash(vpn, shift, ssize);
1232                 if (hidx & _PTEIDX_SECONDARY)
1233                         hash = ~hash;
1234
1235                 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1236                 slot += hidx & _PTEIDX_GROUP_IX;
1237
1238                 slot_array[index] = slot;
1239                 vpn_array[index] = vpn;
1240                 if (index == PPC64_HUGE_HPTE_BATCH - 1) {
1241                         /*
1242                          * Now do a bluk invalidate
1243                          */
1244                         __pSeries_lpar_hugepage_invalidate(slot_array,
1245                                                            vpn_array,
1246                                                            PPC64_HUGE_HPTE_BATCH,
1247                                                            psize, ssize);
1248                         index = 0;
1249                 } else
1250                         index++;
1251         }
1252         if (index)
1253                 __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
1254                                                    index, psize, ssize);
1255 }
1256 #else
1257 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
1258                                              unsigned long addr,
1259                                              unsigned char *hpte_slot_array,
1260                                              int psize, int ssize, int local)
1261 {
1262         WARN(1, "%s called without THP support\n", __func__);
1263 }
1264 #endif
1265
1266 static int pSeries_lpar_hpte_removebolted(unsigned long ea,
1267                                           int psize, int ssize)
1268 {
1269         unsigned long vpn;
1270         unsigned long slot, vsid;
1271
1272         vsid = get_kernel_vsid(ea, ssize);
1273         vpn = hpt_vpn(ea, vsid, ssize);
1274
1275         slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
1276         if (slot == -1)
1277                 return -ENOENT;
1278
1279         /*
1280          * lpar doesn't use the passed actual page size
1281          */
1282         pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
1283         return 0;
1284 }
1285
1286
1287 static inline unsigned long compute_slot(real_pte_t pte,
1288                                          unsigned long vpn,
1289                                          unsigned long index,
1290                                          unsigned long shift,
1291                                          int ssize)
1292 {
1293         unsigned long slot, hash, hidx;
1294
1295         hash = hpt_hash(vpn, shift, ssize);
1296         hidx = __rpte_to_hidx(pte, index);
1297         if (hidx & _PTEIDX_SECONDARY)
1298                 hash = ~hash;
1299         slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1300         slot += hidx & _PTEIDX_GROUP_IX;
1301         return slot;
1302 }
1303
1304 /**
1305  * The hcall H_BLOCK_REMOVE implies that the virtual pages to processed are
1306  * "all within the same naturally aligned 8 page virtual address block".
1307  */
1308 static void do_block_remove(unsigned long number, struct ppc64_tlb_batch *batch,
1309                             unsigned long *param)
1310 {
1311         unsigned long vpn;
1312         unsigned long i, pix = 0;
1313         unsigned long index, shift, slot, current_vpgb, vpgb;
1314         real_pte_t pte;
1315         int psize, ssize;
1316
1317         psize = batch->psize;
1318         ssize = batch->ssize;
1319
1320         for (i = 0; i < number; i++) {
1321                 vpn = batch->vpn[i];
1322                 pte = batch->pte[i];
1323                 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
1324                         /*
1325                          * Shifting 3 bits more on the right to get a
1326                          * 8 pages aligned virtual addresse.
1327                          */
1328                         vpgb = (vpn >> (shift - VPN_SHIFT + 3));
1329                         if (!pix || vpgb != current_vpgb) {
1330                                 /*
1331                                  * Need to start a new 8 pages block, flush
1332                                  * the current one if needed.
1333                                  */
1334                                 if (pix)
1335                                         (void)call_block_remove(pix, param,
1336                                                                 true);
1337                                 current_vpgb = vpgb;
1338                                 param[0] = hpte_encode_avpn(vpn, psize,
1339                                                             ssize);
1340                                 pix = 1;
1341                         }
1342
1343                         slot = compute_slot(pte, vpn, index, shift, ssize);
1344                         param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot;
1345
1346                         if (pix == PLPAR_HCALL9_BUFSIZE) {
1347                                 pix = call_block_remove(pix, param, false);
1348                                 /*
1349                                  * pix = 0 means that all the entries were
1350                                  * removed, we can start a new block.
1351                                  * Otherwise, this means that there are entries
1352                                  * to retry, and pix points to latest one, so
1353                                  * we should increment it and try to continue
1354                                  * the same block.
1355                                  */
1356                                 if (pix)
1357                                         pix++;
1358                         }
1359                 } pte_iterate_hashed_end();
1360         }
1361
1362         if (pix)
1363                 (void)call_block_remove(pix, param, true);
1364 }
1365
1366 /*
1367  * TLB Block Invalidate Characteristics
1368  *
1369  * These characteristics define the size of the block the hcall H_BLOCK_REMOVE
1370  * is able to process for each couple segment base page size, actual page size.
1371  *
1372  * The ibm,get-system-parameter properties is returning a buffer with the
1373  * following layout:
1374  *
1375  * [ 2 bytes size of the RTAS buffer (excluding these 2 bytes) ]
1376  * -----------------
1377  * TLB Block Invalidate Specifiers:
1378  * [ 1 byte LOG base 2 of the TLB invalidate block size being specified ]
1379  * [ 1 byte Number of page sizes (N) that are supported for the specified
1380  *          TLB invalidate block size ]
1381  * [ 1 byte Encoded segment base page size and actual page size
1382  *          MSB=0 means 4k segment base page size and actual page size
1383  *          MSB=1 the penc value in mmu_psize_def ]
1384  * ...
1385  * -----------------
1386  * Next TLB Block Invalidate Specifiers...
1387  * -----------------
1388  * [ 0 ]
1389  */
1390 static inline void set_hblkrm_bloc_size(int bpsize, int psize,
1391                                         unsigned int block_size)
1392 {
1393         if (block_size > hblkrm_size[bpsize][psize])
1394                 hblkrm_size[bpsize][psize] = block_size;
1395 }
1396
1397 /*
1398  * Decode the Encoded segment base page size and actual page size.
1399  * PAPR specifies:
1400  *   - bit 7 is the L bit
1401  *   - bits 0-5 are the penc value
1402  * If the L bit is 0, this means 4K segment base page size and actual page size
1403  * otherwise the penc value should be read.
1404  */
1405 #define HBLKRM_L_MASK           0x80
1406 #define HBLKRM_PENC_MASK        0x3f
1407 static inline void __init check_lp_set_hblkrm(unsigned int lp,
1408                                               unsigned int block_size)
1409 {
1410         unsigned int bpsize, psize;
1411
1412         /* First, check the L bit, if not set, this means 4K */
1413         if ((lp & HBLKRM_L_MASK) == 0) {
1414                 set_hblkrm_bloc_size(MMU_PAGE_4K, MMU_PAGE_4K, block_size);
1415                 return;
1416         }
1417
1418         lp &= HBLKRM_PENC_MASK;
1419         for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++) {
1420                 struct mmu_psize_def *def = &mmu_psize_defs[bpsize];
1421
1422                 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
1423                         if (def->penc[psize] == lp) {
1424                                 set_hblkrm_bloc_size(bpsize, psize, block_size);
1425                                 return;
1426                         }
1427                 }
1428         }
1429 }
1430
1431 #define SPLPAR_TLB_BIC_TOKEN            50
1432
1433 /*
1434  * The size of the TLB Block Invalidate Characteristics is variable. But at the
1435  * maximum it will be the number of possible page sizes *2 + 10 bytes.
1436  * Currently MMU_PAGE_COUNT is 16, which means 42 bytes. Use a cache line size
1437  * (128 bytes) for the buffer to get plenty of space.
1438  */
1439 #define SPLPAR_TLB_BIC_MAXLENGTH        128
1440
1441 void __init pseries_lpar_read_hblkrm_characteristics(void)
1442 {
1443         unsigned char local_buffer[SPLPAR_TLB_BIC_MAXLENGTH];
1444         int call_status, len, idx, bpsize;
1445
1446         if (!firmware_has_feature(FW_FEATURE_BLOCK_REMOVE))
1447                 return;
1448
1449         spin_lock(&rtas_data_buf_lock);
1450         memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
1451         call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
1452                                 NULL,
1453                                 SPLPAR_TLB_BIC_TOKEN,
1454                                 __pa(rtas_data_buf),
1455                                 RTAS_DATA_BUF_SIZE);
1456         memcpy(local_buffer, rtas_data_buf, SPLPAR_TLB_BIC_MAXLENGTH);
1457         local_buffer[SPLPAR_TLB_BIC_MAXLENGTH - 1] = '\0';
1458         spin_unlock(&rtas_data_buf_lock);
1459
1460         if (call_status != 0) {
1461                 pr_warn("%s %s Error calling get-system-parameter (0x%x)\n",
1462                         __FILE__, __func__, call_status);
1463                 return;
1464         }
1465
1466         /*
1467          * The first two (2) bytes of the data in the buffer are the length of
1468          * the returned data, not counting these first two (2) bytes.
1469          */
1470         len = be16_to_cpu(*((u16 *)local_buffer)) + 2;
1471         if (len > SPLPAR_TLB_BIC_MAXLENGTH) {
1472                 pr_warn("%s too large returned buffer %d", __func__, len);
1473                 return;
1474         }
1475
1476         idx = 2;
1477         while (idx < len) {
1478                 u8 block_shift = local_buffer[idx++];
1479                 u32 block_size;
1480                 unsigned int npsize;
1481
1482                 if (!block_shift)
1483                         break;
1484
1485                 block_size = 1 << block_shift;
1486
1487                 for (npsize = local_buffer[idx++];
1488                      npsize > 0 && idx < len; npsize--)
1489                         check_lp_set_hblkrm((unsigned int) local_buffer[idx++],
1490                                             block_size);
1491         }
1492
1493         for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
1494                 for (idx = 0; idx < MMU_PAGE_COUNT; idx++)
1495                         if (hblkrm_size[bpsize][idx])
1496                                 pr_info("H_BLOCK_REMOVE supports base psize:%d psize:%d block size:%d",
1497                                         bpsize, idx, hblkrm_size[bpsize][idx]);
1498 }
1499
1500 /*
1501  * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
1502  * lock.
1503  */
1504 static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
1505 {
1506         unsigned long vpn;
1507         unsigned long i, pix, rc;
1508         unsigned long flags = 0;
1509         struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
1510         int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
1511         unsigned long param[PLPAR_HCALL9_BUFSIZE];
1512         unsigned long index, shift, slot;
1513         real_pte_t pte;
1514         int psize, ssize;
1515
1516         if (lock_tlbie)
1517                 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
1518
1519         if (is_supported_hlbkrm(batch->psize, batch->psize)) {
1520                 do_block_remove(number, batch, param);
1521                 goto out;
1522         }
1523
1524         psize = batch->psize;
1525         ssize = batch->ssize;
1526         pix = 0;
1527         for (i = 0; i < number; i++) {
1528                 vpn = batch->vpn[i];
1529                 pte = batch->pte[i];
1530                 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
1531                         slot = compute_slot(pte, vpn, index, shift, ssize);
1532                         if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
1533                                 /*
1534                                  * lpar doesn't use the passed actual page size
1535                                  */
1536                                 pSeries_lpar_hpte_invalidate(slot, vpn, psize,
1537                                                              0, ssize, local);
1538                         } else {
1539                                 param[pix] = HBR_REQUEST | HBR_AVPN | slot;
1540                                 param[pix+1] = hpte_encode_avpn(vpn, psize,
1541                                                                 ssize);
1542                                 pix += 2;
1543                                 if (pix == 8) {
1544                                         rc = plpar_hcall9(H_BULK_REMOVE, param,
1545                                                 param[0], param[1], param[2],
1546                                                 param[3], param[4], param[5],
1547                                                 param[6], param[7]);
1548                                         BUG_ON(rc != H_SUCCESS);
1549                                         pix = 0;
1550                                 }
1551                         }
1552                 } pte_iterate_hashed_end();
1553         }
1554         if (pix) {
1555                 param[pix] = HBR_END;
1556                 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
1557                                   param[2], param[3], param[4], param[5],
1558                                   param[6], param[7]);
1559                 BUG_ON(rc != H_SUCCESS);
1560         }
1561
1562 out:
1563         if (lock_tlbie)
1564                 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
1565 }
1566
1567 static int __init disable_bulk_remove(char *str)
1568 {
1569         if (strcmp(str, "off") == 0 &&
1570             firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
1571                 pr_info("Disabling BULK_REMOVE firmware feature");
1572                 powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
1573         }
1574         return 1;
1575 }
1576
1577 __setup("bulk_remove=", disable_bulk_remove);
1578
1579 #define HPT_RESIZE_TIMEOUT      10000 /* ms */
1580
1581 struct hpt_resize_state {
1582         unsigned long shift;
1583         int commit_rc;
1584 };
1585
1586 static int pseries_lpar_resize_hpt_commit(void *data)
1587 {
1588         struct hpt_resize_state *state = data;
1589
1590         state->commit_rc = plpar_resize_hpt_commit(0, state->shift);
1591         if (state->commit_rc != H_SUCCESS)
1592                 return -EIO;
1593
1594         /* Hypervisor has transitioned the HTAB, update our globals */
1595         ppc64_pft_size = state->shift;
1596         htab_size_bytes = 1UL << ppc64_pft_size;
1597         htab_hash_mask = (htab_size_bytes >> 7) - 1;
1598
1599         return 0;
1600 }
1601
1602 /*
1603  * Must be called in process context. The caller must hold the
1604  * cpus_lock.
1605  */
1606 static int pseries_lpar_resize_hpt(unsigned long shift)
1607 {
1608         struct hpt_resize_state state = {
1609                 .shift = shift,
1610                 .commit_rc = H_FUNCTION,
1611         };
1612         unsigned int delay, total_delay = 0;
1613         int rc;
1614         ktime_t t0, t1, t2;
1615
1616         might_sleep();
1617
1618         if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE))
1619                 return -ENODEV;
1620
1621         pr_info("Attempting to resize HPT to shift %lu\n", shift);
1622
1623         t0 = ktime_get();
1624
1625         rc = plpar_resize_hpt_prepare(0, shift);
1626         while (H_IS_LONG_BUSY(rc)) {
1627                 delay = get_longbusy_msecs(rc);
1628                 total_delay += delay;
1629                 if (total_delay > HPT_RESIZE_TIMEOUT) {
1630                         /* prepare with shift==0 cancels an in-progress resize */
1631                         rc = plpar_resize_hpt_prepare(0, 0);
1632                         if (rc != H_SUCCESS)
1633                                 pr_warn("Unexpected error %d cancelling timed out HPT resize\n",
1634                                        rc);
1635                         return -ETIMEDOUT;
1636                 }
1637                 msleep(delay);
1638                 rc = plpar_resize_hpt_prepare(0, shift);
1639         }
1640
1641         switch (rc) {
1642         case H_SUCCESS:
1643                 /* Continue on */
1644                 break;
1645
1646         case H_PARAMETER:
1647                 pr_warn("Invalid argument from H_RESIZE_HPT_PREPARE\n");
1648                 return -EINVAL;
1649         case H_RESOURCE:
1650                 pr_warn("Operation not permitted from H_RESIZE_HPT_PREPARE\n");
1651                 return -EPERM;
1652         default:
1653                 pr_warn("Unexpected error %d from H_RESIZE_HPT_PREPARE\n", rc);
1654                 return -EIO;
1655         }
1656
1657         t1 = ktime_get();
1658
1659         rc = stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit,
1660                                      &state, NULL);
1661
1662         t2 = ktime_get();
1663
1664         if (rc != 0) {
1665                 switch (state.commit_rc) {
1666                 case H_PTEG_FULL:
1667                         return -ENOSPC;
1668
1669                 default:
1670                         pr_warn("Unexpected error %d from H_RESIZE_HPT_COMMIT\n",
1671                                 state.commit_rc);
1672                         return -EIO;
1673                 };
1674         }
1675
1676         pr_info("HPT resize to shift %lu complete (%lld ms / %lld ms)\n",
1677                 shift, (long long) ktime_ms_delta(t1, t0),
1678                 (long long) ktime_ms_delta(t2, t1));
1679
1680         return 0;
1681 }
1682
1683 static int pseries_lpar_register_process_table(unsigned long base,
1684                         unsigned long page_size, unsigned long table_size)
1685 {
1686         long rc;
1687         unsigned long flags = 0;
1688
1689         if (table_size)
1690                 flags |= PROC_TABLE_NEW;
1691         if (radix_enabled()) {
1692                 flags |= PROC_TABLE_RADIX;
1693                 if (mmu_has_feature(MMU_FTR_GTSE))
1694                         flags |= PROC_TABLE_GTSE;
1695         } else
1696                 flags |= PROC_TABLE_HPT_SLB;
1697         for (;;) {
1698                 rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base,
1699                                         page_size, table_size);
1700                 if (!H_IS_LONG_BUSY(rc))
1701                         break;
1702                 mdelay(get_longbusy_msecs(rc));
1703         }
1704         if (rc != H_SUCCESS) {
1705                 pr_err("Failed to register process table (rc=%ld)\n", rc);
1706                 BUG();
1707         }
1708         return rc;
1709 }
1710
1711 void __init hpte_init_pseries(void)
1712 {
1713         mmu_hash_ops.hpte_invalidate     = pSeries_lpar_hpte_invalidate;
1714         mmu_hash_ops.hpte_updatepp       = pSeries_lpar_hpte_updatepp;
1715         mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
1716         mmu_hash_ops.hpte_insert         = pSeries_lpar_hpte_insert;
1717         mmu_hash_ops.hpte_remove         = pSeries_lpar_hpte_remove;
1718         mmu_hash_ops.hpte_removebolted   = pSeries_lpar_hpte_removebolted;
1719         mmu_hash_ops.flush_hash_range    = pSeries_lpar_flush_hash_range;
1720         mmu_hash_ops.hpte_clear_all      = pseries_hpte_clear_all;
1721         mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
1722
1723         if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
1724                 mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
1725
1726         /*
1727          * On POWER9, we need to do a H_REGISTER_PROC_TBL hcall
1728          * to inform the hypervisor that we wish to use the HPT.
1729          */
1730         if (cpu_has_feature(CPU_FTR_ARCH_300))
1731                 pseries_lpar_register_process_table(0, 0, 0);
1732 }
1733
1734 #ifdef CONFIG_PPC_RADIX_MMU
1735 void radix_init_pseries(void)
1736 {
1737         pr_info("Using radix MMU under hypervisor\n");
1738
1739         pseries_lpar_register_process_table(__pa(process_tb),
1740                                                 0, PRTB_SIZE_SHIFT - 12);
1741 }
1742 #endif
1743
1744 #ifdef CONFIG_PPC_SMLPAR
1745 #define CMO_FREE_HINT_DEFAULT 1
1746 static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;
1747
1748 static int __init cmo_free_hint(char *str)
1749 {
1750         char *parm;
1751         parm = strstrip(str);
1752
1753         if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
1754                 pr_info("%s: CMO free page hinting is not active.\n", __func__);
1755                 cmo_free_hint_flag = 0;
1756                 return 1;
1757         }
1758
1759         cmo_free_hint_flag = 1;
1760         pr_info("%s: CMO free page hinting is active.\n", __func__);
1761
1762         if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
1763                 return 1;
1764
1765         return 0;
1766 }
1767
1768 __setup("cmo_free_hint=", cmo_free_hint);
1769
1770 static void pSeries_set_page_state(struct page *page, int order,
1771                                    unsigned long state)
1772 {
1773         int i, j;
1774         unsigned long cmo_page_sz, addr;
1775
1776         cmo_page_sz = cmo_get_page_size();
1777         addr = __pa((unsigned long)page_address(page));
1778
1779         for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
1780                 for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
1781                         plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
1782         }
1783 }
1784
1785 void arch_free_page(struct page *page, int order)
1786 {
1787         if (radix_enabled())
1788                 return;
1789         if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
1790                 return;
1791
1792         pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
1793 }
1794 EXPORT_SYMBOL(arch_free_page);
1795
1796 #endif /* CONFIG_PPC_SMLPAR */
1797 #endif /* CONFIG_PPC_BOOK3S_64 */
1798
1799 #ifdef CONFIG_TRACEPOINTS
1800 #ifdef CONFIG_JUMP_LABEL
1801 struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
1802
1803 int hcall_tracepoint_regfunc(void)
1804 {
1805         static_key_slow_inc(&hcall_tracepoint_key);
1806         return 0;
1807 }
1808
1809 void hcall_tracepoint_unregfunc(void)
1810 {
1811         static_key_slow_dec(&hcall_tracepoint_key);
1812 }
1813 #else
1814 /*
1815  * We optimise our hcall path by placing hcall_tracepoint_refcount
1816  * directly in the TOC so we can check if the hcall tracepoints are
1817  * enabled via a single load.
1818  */
1819
1820 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
1821 extern long hcall_tracepoint_refcount;
1822
1823 int hcall_tracepoint_regfunc(void)
1824 {
1825         hcall_tracepoint_refcount++;
1826         return 0;
1827 }
1828
1829 void hcall_tracepoint_unregfunc(void)
1830 {
1831         hcall_tracepoint_refcount--;
1832 }
1833 #endif
1834
1835 /*
1836  * Keep track of hcall tracing depth and prevent recursion. Warn if any is
1837  * detected because it may indicate a problem. This will not catch all
1838  * problems with tracing code making hcalls, because the tracing might have
1839  * been invoked from a non-hcall, so the first hcall could recurse into it
1840  * without warning here, but this better than nothing.
1841  *
1842  * Hcalls with specific problems being traced should use the _notrace
1843  * plpar_hcall variants.
1844  */
1845 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
1846
1847
1848 notrace void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
1849 {
1850         unsigned long flags;
1851         unsigned int *depth;
1852
1853         local_irq_save(flags);
1854
1855         depth = this_cpu_ptr(&hcall_trace_depth);
1856
1857         if (WARN_ON_ONCE(*depth))
1858                 goto out;
1859
1860         (*depth)++;
1861         preempt_disable();
1862         trace_hcall_entry(opcode, args);
1863         (*depth)--;
1864
1865 out:
1866         local_irq_restore(flags);
1867 }
1868
1869 notrace void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
1870 {
1871         unsigned long flags;
1872         unsigned int *depth;
1873
1874         local_irq_save(flags);
1875
1876         depth = this_cpu_ptr(&hcall_trace_depth);
1877
1878         if (*depth) /* Don't warn again on the way out */
1879                 goto out;
1880
1881         (*depth)++;
1882         trace_hcall_exit(opcode, retval, retbuf);
1883         preempt_enable();
1884         (*depth)--;
1885
1886 out:
1887         local_irq_restore(flags);
1888 }
1889 #endif
1890
1891 /**
1892  * h_get_mpp
1893  * H_GET_MPP hcall returns info in 7 parms
1894  */
1895 int h_get_mpp(struct hvcall_mpp_data *mpp_data)
1896 {
1897         int rc;
1898         unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
1899
1900         rc = plpar_hcall9(H_GET_MPP, retbuf);
1901
1902         mpp_data->entitled_mem = retbuf[0];
1903         mpp_data->mapped_mem = retbuf[1];
1904
1905         mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
1906         mpp_data->pool_num = retbuf[2] & 0xffff;
1907
1908         mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
1909         mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
1910         mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
1911
1912         mpp_data->pool_size = retbuf[4];
1913         mpp_data->loan_request = retbuf[5];
1914         mpp_data->backing_mem = retbuf[6];
1915
1916         return rc;
1917 }
1918 EXPORT_SYMBOL(h_get_mpp);
1919
1920 int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
1921 {
1922         int rc;
1923         unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
1924
1925         rc = plpar_hcall9(H_GET_MPP_X, retbuf);
1926
1927         mpp_x_data->coalesced_bytes = retbuf[0];
1928         mpp_x_data->pool_coalesced_bytes = retbuf[1];
1929         mpp_x_data->pool_purr_cycles = retbuf[2];
1930         mpp_x_data->pool_spurr_cycles = retbuf[3];
1931
1932         return rc;
1933 }
1934
1935 static unsigned long vsid_unscramble(unsigned long vsid, int ssize)
1936 {
1937         unsigned long protovsid;
1938         unsigned long va_bits = VA_BITS;
1939         unsigned long modinv, vsid_modulus;
1940         unsigned long max_mod_inv, tmp_modinv;
1941
1942         if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
1943                 va_bits = 65;
1944
1945         if (ssize == MMU_SEGSIZE_256M) {
1946                 modinv = VSID_MULINV_256M;
1947                 vsid_modulus = ((1UL << (va_bits - SID_SHIFT)) - 1);
1948         } else {
1949                 modinv = VSID_MULINV_1T;
1950                 vsid_modulus = ((1UL << (va_bits - SID_SHIFT_1T)) - 1);
1951         }
1952
1953         /*
1954          * vsid outside our range.
1955          */
1956         if (vsid >= vsid_modulus)
1957                 return 0;
1958
1959         /*
1960          * If modinv is the modular multiplicate inverse of (x % vsid_modulus)
1961          * and vsid = (protovsid * x) % vsid_modulus, then we say:
1962          *   protovsid = (vsid * modinv) % vsid_modulus
1963          */
1964
1965         /* Check if (vsid * modinv) overflow (63 bits) */
1966         max_mod_inv = 0x7fffffffffffffffull / vsid;
1967         if (modinv < max_mod_inv)
1968                 return (vsid * modinv) % vsid_modulus;
1969
1970         tmp_modinv = modinv/max_mod_inv;
1971         modinv %= max_mod_inv;
1972
1973         protovsid = (((vsid * max_mod_inv) % vsid_modulus) * tmp_modinv) % vsid_modulus;
1974         protovsid = (protovsid + vsid * modinv) % vsid_modulus;
1975
1976         return protovsid;
1977 }
1978
1979 static int __init reserve_vrma_context_id(void)
1980 {
1981         unsigned long protovsid;
1982
1983         /*
1984          * Reserve context ids which map to reserved virtual addresses. For now
1985          * we only reserve the context id which maps to the VRMA VSID. We ignore
1986          * the addresses in "ibm,adjunct-virtual-addresses" because we don't
1987          * enable adjunct support via the "ibm,client-architecture-support"
1988          * interface.
1989          */
1990         protovsid = vsid_unscramble(VRMA_VSID, MMU_SEGSIZE_1T);
1991         hash__reserve_context_id(protovsid >> ESID_BITS_1T);
1992         return 0;
1993 }
1994 machine_device_initcall(pseries, reserve_vrma_context_id);
1995
1996 #ifdef CONFIG_DEBUG_FS
1997 /* debugfs file interface for vpa data */
1998 static ssize_t vpa_file_read(struct file *filp, char __user *buf, size_t len,
1999                               loff_t *pos)
2000 {
2001         int cpu = (long)filp->private_data;
2002         struct lppaca *lppaca = &lppaca_of(cpu);
2003
2004         return simple_read_from_buffer(buf, len, pos, lppaca,
2005                                 sizeof(struct lppaca));
2006 }
2007
2008 static const struct file_operations vpa_fops = {
2009         .open           = simple_open,
2010         .read           = vpa_file_read,
2011         .llseek         = default_llseek,
2012 };
2013
2014 static int __init vpa_debugfs_init(void)
2015 {
2016         char name[16];
2017         long i;
2018         struct dentry *vpa_dir;
2019
2020         if (!firmware_has_feature(FW_FEATURE_SPLPAR))
2021                 return 0;
2022
2023         vpa_dir = debugfs_create_dir("vpa", arch_debugfs_dir);
2024
2025         /* set up the per-cpu vpa file*/
2026         for_each_possible_cpu(i) {
2027                 sprintf(name, "cpu-%ld", i);
2028                 debugfs_create_file(name, 0400, vpa_dir, (void *)i, &vpa_fops);
2029         }
2030
2031         return 0;
2032 }
2033 machine_arch_initcall(pseries, vpa_debugfs_init);
2034 #endif /* CONFIG_DEBUG_FS */