1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2001 Todd Inglett, IBM Corporation
6 * pSeries LPAR support.
9 /* Enables debugging of low-level hash table routines - careful! */
11 #define pr_fmt(fmt) "lpar: " fmt
13 #include <linux/kernel.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/console.h>
16 #include <linux/export.h>
17 #include <linux/jump_label.h>
18 #include <linux/delay.h>
19 #include <linux/stop_machine.h>
20 #include <linux/spinlock.h>
21 #include <linux/cpuhotplug.h>
22 #include <linux/workqueue.h>
23 #include <linux/proc_fs.h>
24 #include <linux/pgtable.h>
25 #include <linux/debugfs.h>
27 #include <asm/processor.h>
30 #include <asm/machdep.h>
31 #include <asm/mmu_context.h>
32 #include <asm/iommu.h>
35 #include <asm/cputable.h>
38 #include <asm/trace.h>
39 #include <asm/firmware.h>
40 #include <asm/plpar_wrappers.h>
41 #include <asm/kexec.h>
42 #include <asm/fadump.h>
43 #include <asm/asm-prototypes.h>
48 /* Flag bits for H_BULK_REMOVE */
49 #define HBR_REQUEST 0x4000000000000000UL
50 #define HBR_RESPONSE 0x8000000000000000UL
51 #define HBR_END 0xc000000000000000UL
52 #define HBR_AVPN 0x0200000000000000UL
53 #define HBR_ANDCOND 0x0100000000000000UL
57 EXPORT_SYMBOL(plpar_hcall);
58 EXPORT_SYMBOL(plpar_hcall9);
59 EXPORT_SYMBOL(plpar_hcall_norets);
62 * H_BLOCK_REMOVE supported block size for this page size in segment who's base
63 * page size is that page size.
65 * The first index is the segment base page size, the second one is the actual
68 static int hblkrm_size[MMU_PAGE_COUNT][MMU_PAGE_COUNT] __ro_after_init;
71 * Due to the involved complexity, and that the current hypervisor is only
72 * returning this value or 0, we are limiting the support of the H_BLOCK_REMOVE
73 * buffer size to 8 size block.
75 #define HBLKRM_SUPPORTED_BLOCK_SIZE 8
77 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
78 static u8 dtl_mask = DTL_LOG_PREEMPT;
83 void alloc_dtl_buffers(unsigned long *time_limit)
86 struct paca_struct *pp;
87 struct dtl_entry *dtl;
89 for_each_possible_cpu(cpu) {
93 dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
95 pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
97 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
98 pr_warn("Stolen time statistics will be unreliable\n");
104 pp->dispatch_log = dtl;
105 pp->dispatch_log_end = dtl + N_DISPATCH_LOG;
108 if (time_limit && time_after(jiffies, *time_limit)) {
110 *time_limit = jiffies + HZ;
115 void register_dtl_buffer(int cpu)
118 struct paca_struct *pp;
119 struct dtl_entry *dtl;
120 int hwcpu = get_hard_smp_processor_id(cpu);
123 dtl = pp->dispatch_log;
124 if (dtl && dtl_mask) {
127 lppaca_of(cpu).dtl_idx = 0;
129 /* hypervisor reads buffer length from this field */
130 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
131 ret = register_dtl(hwcpu, __pa(dtl));
133 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed with %ld\n",
136 lppaca_of(cpu).dtl_enable_mask = dtl_mask;
140 #ifdef CONFIG_PPC_SPLPAR
142 struct delayed_work work;
146 struct vcpu_dispatch_data {
157 int numa_remote_disp;
162 * This represents the number of cpus in the hypervisor. Since there is no
163 * architected way to discover the number of processors in the host, we
164 * provision for dealing with NR_CPUS. This is currently 2048 by default, and
165 * is sufficient for our purposes. This will need to be tweaked if
166 * CONFIG_NR_CPUS is changed.
168 #define NR_CPUS_H NR_CPUS
170 DEFINE_RWLOCK(dtl_access_lock);
171 static DEFINE_PER_CPU(struct vcpu_dispatch_data, vcpu_disp_data);
172 static DEFINE_PER_CPU(u64, dtl_entry_ridx);
173 static DEFINE_PER_CPU(struct dtl_worker, dtl_workers);
174 static enum cpuhp_state dtl_worker_state;
175 static DEFINE_MUTEX(dtl_enable_mutex);
176 static int vcpudispatch_stats_on __read_mostly;
177 static int vcpudispatch_stats_freq = 50;
178 static __be32 *vcpu_associativity, *pcpu_associativity;
181 static void free_dtl_buffers(unsigned long *time_limit)
183 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
185 struct paca_struct *pp;
187 for_each_possible_cpu(cpu) {
189 if (!pp->dispatch_log)
191 kmem_cache_free(dtl_cache, pp->dispatch_log);
193 pp->dispatch_log = 0;
194 pp->dispatch_log_end = 0;
197 if (time_limit && time_after(jiffies, *time_limit)) {
199 *time_limit = jiffies + HZ;
205 static int init_cpu_associativity(void)
207 vcpu_associativity = kcalloc(num_possible_cpus() / threads_per_core,
208 VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL);
209 pcpu_associativity = kcalloc(NR_CPUS_H / threads_per_core,
210 VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL);
212 if (!vcpu_associativity || !pcpu_associativity) {
213 pr_err("error allocating memory for associativity information\n");
220 static void destroy_cpu_associativity(void)
222 kfree(vcpu_associativity);
223 kfree(pcpu_associativity);
224 vcpu_associativity = pcpu_associativity = 0;
227 static __be32 *__get_cpu_associativity(int cpu, __be32 *cpu_assoc, int flag)
232 assoc = &cpu_assoc[(int)(cpu / threads_per_core) * VPHN_ASSOC_BUFSIZE];
234 rc = hcall_vphn(cpu, flag, &assoc[0]);
242 static __be32 *get_pcpu_associativity(int cpu)
244 return __get_cpu_associativity(cpu, pcpu_associativity, VPHN_FLAG_PCPU);
247 static __be32 *get_vcpu_associativity(int cpu)
249 return __get_cpu_associativity(cpu, vcpu_associativity, VPHN_FLAG_VCPU);
252 static int cpu_relative_dispatch_distance(int last_disp_cpu, int cur_disp_cpu)
254 __be32 *last_disp_cpu_assoc, *cur_disp_cpu_assoc;
256 if (last_disp_cpu >= NR_CPUS_H || cur_disp_cpu >= NR_CPUS_H)
259 last_disp_cpu_assoc = get_pcpu_associativity(last_disp_cpu);
260 cur_disp_cpu_assoc = get_pcpu_associativity(cur_disp_cpu);
262 if (!last_disp_cpu_assoc || !cur_disp_cpu_assoc)
265 return cpu_relative_distance(last_disp_cpu_assoc, cur_disp_cpu_assoc);
268 static int cpu_home_node_dispatch_distance(int disp_cpu)
270 __be32 *disp_cpu_assoc, *vcpu_assoc;
271 int vcpu_id = smp_processor_id();
273 if (disp_cpu >= NR_CPUS_H) {
274 pr_debug_ratelimited("vcpu dispatch cpu %d > %d\n",
275 disp_cpu, NR_CPUS_H);
279 disp_cpu_assoc = get_pcpu_associativity(disp_cpu);
280 vcpu_assoc = get_vcpu_associativity(vcpu_id);
282 if (!disp_cpu_assoc || !vcpu_assoc)
285 return cpu_relative_distance(disp_cpu_assoc, vcpu_assoc);
288 static void update_vcpu_disp_stat(int disp_cpu)
290 struct vcpu_dispatch_data *disp;
293 disp = this_cpu_ptr(&vcpu_disp_data);
294 if (disp->last_disp_cpu == -1) {
295 disp->last_disp_cpu = disp_cpu;
301 if (disp->last_disp_cpu == disp_cpu ||
302 (cpu_first_thread_sibling(disp->last_disp_cpu) ==
303 cpu_first_thread_sibling(disp_cpu)))
304 disp->same_cpu_disp++;
306 distance = cpu_relative_dispatch_distance(disp->last_disp_cpu,
309 pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
314 disp->same_chip_disp++;
317 disp->diff_chip_disp++;
320 disp->far_chip_disp++;
323 pr_debug_ratelimited("vcpudispatch_stats: cpu %d (%d -> %d): unexpected relative dispatch distance %d\n",
332 distance = cpu_home_node_dispatch_distance(disp_cpu);
334 pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
339 disp->numa_home_disp++;
342 disp->numa_remote_disp++;
345 disp->numa_far_disp++;
348 pr_debug_ratelimited("vcpudispatch_stats: cpu %d on %d: unexpected numa dispatch distance %d\n",
355 disp->last_disp_cpu = disp_cpu;
358 static void process_dtl_buffer(struct work_struct *work)
360 struct dtl_entry dtle;
361 u64 i = __this_cpu_read(dtl_entry_ridx);
362 struct dtl_entry *dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
363 struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
364 struct lppaca *vpa = local_paca->lppaca_ptr;
365 struct dtl_worker *d = container_of(work, struct dtl_worker, work.work);
367 if (!local_paca->dispatch_log)
370 /* if we have been migrated away, we cancel ourself */
371 if (d->cpu != smp_processor_id()) {
372 pr_debug("vcpudispatch_stats: cpu %d worker migrated -- canceling worker\n",
377 if (i == be64_to_cpu(vpa->dtl_idx))
380 while (i < be64_to_cpu(vpa->dtl_idx)) {
383 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
384 /* buffer has overflowed */
385 pr_debug_ratelimited("vcpudispatch_stats: cpu %d lost %lld DTL samples\n",
387 be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG - i);
388 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
389 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
392 update_vcpu_disp_stat(be16_to_cpu(dtle.processor_id));
396 dtl = local_paca->dispatch_log;
399 __this_cpu_write(dtl_entry_ridx, i);
402 schedule_delayed_work_on(d->cpu, to_delayed_work(work),
403 HZ / vcpudispatch_stats_freq);
406 static int dtl_worker_online(unsigned int cpu)
408 struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
410 memset(d, 0, sizeof(*d));
411 INIT_DELAYED_WORK(&d->work, process_dtl_buffer);
414 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
415 per_cpu(dtl_entry_ridx, cpu) = 0;
416 register_dtl_buffer(cpu);
418 per_cpu(dtl_entry_ridx, cpu) = be64_to_cpu(lppaca_of(cpu).dtl_idx);
421 schedule_delayed_work_on(cpu, &d->work, HZ / vcpudispatch_stats_freq);
425 static int dtl_worker_offline(unsigned int cpu)
427 struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
429 cancel_delayed_work_sync(&d->work);
431 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
432 unregister_dtl(get_hard_smp_processor_id(cpu));
438 static void set_global_dtl_mask(u8 mask)
443 for_each_present_cpu(cpu)
444 lppaca_of(cpu).dtl_enable_mask = dtl_mask;
447 static void reset_global_dtl_mask(void)
451 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
452 dtl_mask = DTL_LOG_PREEMPT;
456 for_each_present_cpu(cpu)
457 lppaca_of(cpu).dtl_enable_mask = dtl_mask;
460 static int dtl_worker_enable(unsigned long *time_limit)
464 if (!write_trylock(&dtl_access_lock)) {
469 set_global_dtl_mask(DTL_LOG_ALL);
471 /* Setup dtl buffers and register those */
472 alloc_dtl_buffers(time_limit);
474 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/dtl:online",
475 dtl_worker_online, dtl_worker_offline);
477 pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n");
478 free_dtl_buffers(time_limit);
479 reset_global_dtl_mask();
480 write_unlock(&dtl_access_lock);
484 dtl_worker_state = state;
490 static void dtl_worker_disable(unsigned long *time_limit)
492 cpuhp_remove_state(dtl_worker_state);
493 free_dtl_buffers(time_limit);
494 reset_global_dtl_mask();
495 write_unlock(&dtl_access_lock);
498 static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
499 size_t count, loff_t *ppos)
501 unsigned long time_limit = jiffies + HZ;
502 struct vcpu_dispatch_data *disp;
509 if (copy_from_user(buf, p, count))
513 rc = kstrtoint(buf, 0, &cmd);
514 if (rc || cmd < 0 || cmd > 1) {
515 pr_err("vcpudispatch_stats: please use 0 to disable or 1 to enable dispatch statistics\n");
516 return rc ? rc : -EINVAL;
519 mutex_lock(&dtl_enable_mutex);
521 if ((cmd == 0 && !vcpudispatch_stats_on) ||
522 (cmd == 1 && vcpudispatch_stats_on))
526 rc = init_cpu_associativity();
530 for_each_possible_cpu(cpu) {
531 disp = per_cpu_ptr(&vcpu_disp_data, cpu);
532 memset(disp, 0, sizeof(*disp));
533 disp->last_disp_cpu = -1;
536 rc = dtl_worker_enable(&time_limit);
538 destroy_cpu_associativity();
542 dtl_worker_disable(&time_limit);
543 destroy_cpu_associativity();
546 vcpudispatch_stats_on = cmd;
549 mutex_unlock(&dtl_enable_mutex);
555 static int vcpudispatch_stats_display(struct seq_file *p, void *v)
558 struct vcpu_dispatch_data *disp;
560 if (!vcpudispatch_stats_on) {
561 seq_puts(p, "off\n");
565 for_each_online_cpu(cpu) {
566 disp = per_cpu_ptr(&vcpu_disp_data, cpu);
567 seq_printf(p, "cpu%d", cpu);
568 seq_put_decimal_ull(p, " ", disp->total_disp);
569 seq_put_decimal_ull(p, " ", disp->same_cpu_disp);
570 seq_put_decimal_ull(p, " ", disp->same_chip_disp);
571 seq_put_decimal_ull(p, " ", disp->diff_chip_disp);
572 seq_put_decimal_ull(p, " ", disp->far_chip_disp);
573 seq_put_decimal_ull(p, " ", disp->numa_home_disp);
574 seq_put_decimal_ull(p, " ", disp->numa_remote_disp);
575 seq_put_decimal_ull(p, " ", disp->numa_far_disp);
582 static int vcpudispatch_stats_open(struct inode *inode, struct file *file)
584 return single_open(file, vcpudispatch_stats_display, NULL);
587 static const struct proc_ops vcpudispatch_stats_proc_ops = {
588 .proc_open = vcpudispatch_stats_open,
589 .proc_read = seq_read,
590 .proc_write = vcpudispatch_stats_write,
591 .proc_lseek = seq_lseek,
592 .proc_release = single_release,
595 static ssize_t vcpudispatch_stats_freq_write(struct file *file,
596 const char __user *p, size_t count, loff_t *ppos)
604 if (copy_from_user(buf, p, count))
608 rc = kstrtoint(buf, 0, &freq);
609 if (rc || freq < 1 || freq > HZ) {
610 pr_err("vcpudispatch_stats_freq: please specify a frequency between 1 and %d\n",
612 return rc ? rc : -EINVAL;
615 vcpudispatch_stats_freq = freq;
620 static int vcpudispatch_stats_freq_display(struct seq_file *p, void *v)
622 seq_printf(p, "%d\n", vcpudispatch_stats_freq);
626 static int vcpudispatch_stats_freq_open(struct inode *inode, struct file *file)
628 return single_open(file, vcpudispatch_stats_freq_display, NULL);
631 static const struct proc_ops vcpudispatch_stats_freq_proc_ops = {
632 .proc_open = vcpudispatch_stats_freq_open,
633 .proc_read = seq_read,
634 .proc_write = vcpudispatch_stats_freq_write,
635 .proc_lseek = seq_lseek,
636 .proc_release = single_release,
639 static int __init vcpudispatch_stats_procfs_init(void)
642 * Avoid smp_processor_id while preemptible. All CPUs should have
643 * the same value for lppaca_shared_proc.
646 if (!lppaca_shared_proc(get_lppaca())) {
652 if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL,
653 &vcpudispatch_stats_proc_ops))
654 pr_err("vcpudispatch_stats: error creating procfs file\n");
655 else if (!proc_create("powerpc/vcpudispatch_stats_freq", 0600, NULL,
656 &vcpudispatch_stats_freq_proc_ops))
657 pr_err("vcpudispatch_stats_freq: error creating procfs file\n");
662 machine_device_initcall(pseries, vcpudispatch_stats_procfs_init);
663 #endif /* CONFIG_PPC_SPLPAR */
665 void vpa_init(int cpu)
667 int hwcpu = get_hard_smp_processor_id(cpu);
672 * The spec says it "may be problematic" if CPU x registers the VPA of
673 * CPU y. We should never do that, but wail if we ever do.
675 WARN_ON(cpu != smp_processor_id());
677 if (cpu_has_feature(CPU_FTR_ALTIVEC))
678 lppaca_of(cpu).vmxregs_in_use = 1;
680 if (cpu_has_feature(CPU_FTR_ARCH_207S))
681 lppaca_of(cpu).ebb_regs_in_use = 1;
683 addr = __pa(&lppaca_of(cpu));
684 ret = register_vpa(hwcpu, addr);
687 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
688 "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
692 #ifdef CONFIG_PPC_BOOK3S_64
694 * PAPR says this feature is SLB-Buffer but firmware never
695 * reports that. All SPLPAR support SLB shadow buffer.
697 if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) {
698 addr = __pa(paca_ptrs[cpu]->slb_shadow_ptr);
699 ret = register_slb_shadow(hwcpu, addr);
701 pr_err("WARNING: SLB shadow buffer registration for "
702 "cpu %d (hw %d) of area %lx failed with %ld\n",
703 cpu, hwcpu, addr, ret);
705 #endif /* CONFIG_PPC_BOOK3S_64 */
708 * Register dispatch trace log, if one has been allocated.
710 register_dtl_buffer(cpu);
713 #ifdef CONFIG_PPC_BOOK3S_64
715 static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
716 unsigned long vpn, unsigned long pa,
717 unsigned long rflags, unsigned long vflags,
718 int psize, int apsize, int ssize)
720 unsigned long lpar_rc;
723 unsigned long hpte_v, hpte_r;
725 if (!(vflags & HPTE_V_BOLTED))
726 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
727 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
728 hpte_group, vpn, pa, rflags, vflags, psize);
730 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
731 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
733 if (!(vflags & HPTE_V_BOLTED))
734 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
736 /* Now fill in the actual HPTE */
737 /* Set CEC cookie to 0 */
739 /* I-cache Invalidate = 0 */
740 /* I-cache synchronize = 0 */
744 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
745 flags |= H_COALESCE_CAND;
747 lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
748 if (unlikely(lpar_rc == H_PTEG_FULL)) {
749 pr_devel("Hash table group is full\n");
754 * Since we try and ioremap PHBs we don't own, the pte insert
755 * will fail. However we must catch the failure in hash_page
756 * or we will loop forever, so return -2 in this case.
758 if (unlikely(lpar_rc != H_SUCCESS)) {
759 pr_err("Failed hash pte insert with error %ld\n", lpar_rc);
762 if (!(vflags & HPTE_V_BOLTED))
763 pr_devel(" -> slot: %lu\n", slot & 7);
765 /* Because of iSeries, we have to pass down the secondary
766 * bucket bit here as well
768 return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
771 static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
773 static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
775 unsigned long slot_offset;
776 unsigned long lpar_rc;
778 unsigned long dummy1, dummy2;
780 /* pick a random slot to start at */
781 slot_offset = mftb() & 0x7;
783 for (i = 0; i < HPTES_PER_GROUP; i++) {
785 /* don't remove a bolted entry */
786 lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
787 HPTE_V_BOLTED, &dummy1, &dummy2);
788 if (lpar_rc == H_SUCCESS)
792 * The test for adjunct partition is performed before the
793 * ANDCOND test. H_RESOURCE may be returned, so we need to
794 * check for that as well.
796 BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
805 /* Called during kexec sequence with MMU off */
806 static notrace void manual_hpte_clear_all(void)
808 unsigned long size_bytes = 1UL << ppc64_pft_size;
809 unsigned long hpte_count = size_bytes >> 4;
817 /* Read in batches of 4,
818 * invalidate only valid entries not in the VRMA
819 * hpte_count will be a multiple of 4
821 for (i = 0; i < hpte_count; i += 4) {
822 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
823 if (lpar_rc != H_SUCCESS) {
824 pr_info("Failed to read hash page table at %ld err %ld\n",
828 for (j = 0; j < 4; j++){
829 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
832 if (ptes[j].pteh & HPTE_V_VALID)
833 plpar_pte_remove_raw(0, i + j, 0,
834 &(ptes[j].pteh), &(ptes[j].ptel));
839 /* Called during kexec sequence with MMU off */
840 static notrace int hcall_hpte_clear_all(void)
845 rc = plpar_hcall_norets(H_CLEAR_HPT);
846 } while (rc == H_CONTINUE);
851 /* Called during kexec sequence with MMU off */
852 static notrace void pseries_hpte_clear_all(void)
856 rc = hcall_hpte_clear_all();
858 manual_hpte_clear_all();
860 #ifdef __LITTLE_ENDIAN__
862 * Reset exceptions to big endian.
864 * FIXME this is a hack for kexec, we need to reset the exception
865 * endian before starting the new kernel and this is a convenient place
868 * This is also called on boot when a fadump happens. In that case we
869 * must not change the exception endian mode.
871 if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active())
872 pseries_big_endian_exceptions();
877 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
878 * the low 3 bits of flags happen to line up. So no transform is needed.
879 * We can probably optimize here and assume the high bits of newpp are
880 * already zero. For now I am paranoid.
882 static long pSeries_lpar_hpte_updatepp(unsigned long slot,
885 int psize, int apsize,
886 int ssize, unsigned long inv_flags)
888 unsigned long lpar_rc;
890 unsigned long want_v;
892 want_v = hpte_encode_avpn(vpn, psize, ssize);
894 flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN;
895 flags |= (newpp & HPTE_R_KEY_HI) >> 48;
896 if (mmu_has_feature(MMU_FTR_KERNEL_RO))
897 /* Move pp0 into bit 8 (IBM 55) */
898 flags |= (newpp & HPTE_R_PP0) >> 55;
900 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
901 want_v, slot, flags, psize);
903 lpar_rc = plpar_pte_protect(flags, slot, want_v);
905 if (lpar_rc == H_NOT_FOUND) {
906 pr_devel("not found !\n");
912 BUG_ON(lpar_rc != H_SUCCESS);
917 static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group)
926 for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
928 lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
929 if (lpar_rc != H_SUCCESS) {
930 pr_info("Failed to read hash page table at %ld err %ld\n",
931 hpte_group, lpar_rc);
935 for (j = 0; j < 4; j++) {
936 if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
937 (ptes[j].pteh & HPTE_V_VALID))
945 static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
949 unsigned long want_v;
950 unsigned long hpte_group;
952 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
953 want_v = hpte_encode_avpn(vpn, psize, ssize);
956 * We try to keep bolted entries always in primary hash
957 * But in some case we can find them in secondary too.
959 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
960 slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
962 /* Try in secondary */
963 hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
964 slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
968 return hpte_group + slot;
971 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
973 int psize, int ssize)
976 unsigned long lpar_rc, slot, vsid, flags;
978 vsid = get_kernel_vsid(ea, ssize);
979 vpn = hpt_vpn(ea, vsid, ssize);
981 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
984 flags = newpp & (HPTE_R_PP | HPTE_R_N);
985 if (mmu_has_feature(MMU_FTR_KERNEL_RO))
986 /* Move pp0 into bit 8 (IBM 55) */
987 flags |= (newpp & HPTE_R_PP0) >> 55;
989 flags |= ((newpp & HPTE_R_KEY_HI) >> 48) | (newpp & HPTE_R_KEY_LO);
991 lpar_rc = plpar_pte_protect(flags, slot, 0);
993 BUG_ON(lpar_rc != H_SUCCESS);
996 static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
997 int psize, int apsize,
998 int ssize, int local)
1000 unsigned long want_v;
1001 unsigned long lpar_rc;
1002 unsigned long dummy1, dummy2;
1004 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
1005 slot, vpn, psize, local);
1007 want_v = hpte_encode_avpn(vpn, psize, ssize);
1008 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
1009 if (lpar_rc == H_NOT_FOUND)
1012 BUG_ON(lpar_rc != H_SUCCESS);
1017 * As defined in the PAPR's section 14.5.4.1.8
1018 * The control mask doesn't include the returned reference and change bit from
1019 * the processed PTE.
1021 #define HBLKR_AVPN 0x0100000000000000UL
1022 #define HBLKR_CTRL_MASK 0xf800000000000000UL
1023 #define HBLKR_CTRL_SUCCESS 0x8000000000000000UL
1024 #define HBLKR_CTRL_ERRNOTFOUND 0x8800000000000000UL
1025 #define HBLKR_CTRL_ERRBUSY 0xa000000000000000UL
1028 * Returned true if we are supporting this block size for the specified segment
1029 * base page size and actual page size.
1031 * Currently, we only support 8 size block.
1033 static inline bool is_supported_hlbkrm(int bpsize, int psize)
1035 return (hblkrm_size[bpsize][psize] == HBLKRM_SUPPORTED_BLOCK_SIZE);
1039 * H_BLOCK_REMOVE caller.
1040 * @idx should point to the latest @param entry set with a PTEX.
1041 * If PTE cannot be processed because another CPUs has already locked that
1042 * group, those entries are put back in @param starting at index 1.
1043 * If entries has to be retried and @retry_busy is set to true, these entries
1044 * are retried until success. If @retry_busy is set to false, the returned
1045 * is the number of entries yet to process.
1047 static unsigned long call_block_remove(unsigned long idx, unsigned long *param,
1050 unsigned long i, rc, new_idx;
1051 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
1054 pr_warn("Unexpected empty call to H_BLOCK_REMOVE");
1059 if (idx > PLPAR_HCALL9_BUFSIZE) {
1060 pr_err("Too many PTEs (%lu) for H_BLOCK_REMOVE", idx);
1061 idx = PLPAR_HCALL9_BUFSIZE;
1062 } else if (idx < PLPAR_HCALL9_BUFSIZE)
1063 param[idx] = HBR_END;
1065 rc = plpar_hcall9(H_BLOCK_REMOVE, retbuf,
1067 param[1], param[2], param[3], param[4], /* TS0-7 */
1068 param[5], param[6], param[7], param[8]);
1069 if (rc == H_SUCCESS)
1072 BUG_ON(rc != H_PARTIAL);
1074 /* Check that the unprocessed entries were 'not found' or 'busy' */
1075 for (i = 0; i < idx-1; i++) {
1076 unsigned long ctrl = retbuf[i] & HBLKR_CTRL_MASK;
1078 if (ctrl == HBLKR_CTRL_ERRBUSY) {
1079 param[++new_idx] = param[i+1];
1083 BUG_ON(ctrl != HBLKR_CTRL_SUCCESS
1084 && ctrl != HBLKR_CTRL_ERRNOTFOUND);
1088 * If there were entries found busy, retry these entries if requested,
1089 * of if all the entries have to be retried.
1091 if (new_idx && (retry_busy || new_idx == (PLPAR_HCALL9_BUFSIZE-1))) {
1099 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1101 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
1102 * to make sure that we avoid bouncing the hypervisor tlbie lock.
1104 #define PPC64_HUGE_HPTE_BATCH 12
1106 static void hugepage_block_invalidate(unsigned long *slot, unsigned long *vpn,
1107 int count, int psize, int ssize)
1109 unsigned long param[PLPAR_HCALL9_BUFSIZE];
1110 unsigned long shift, current_vpgb, vpgb;
1113 shift = mmu_psize_defs[psize].shift;
1115 for (i = 0; i < count; i++) {
1117 * Shifting 3 bits more on the right to get a
1118 * 8 pages aligned virtual addresse.
1120 vpgb = (vpn[i] >> (shift - VPN_SHIFT + 3));
1121 if (!pix || vpgb != current_vpgb) {
1123 * Need to start a new 8 pages block, flush
1124 * the current one if needed.
1127 (void)call_block_remove(pix, param, true);
1128 current_vpgb = vpgb;
1129 param[0] = hpte_encode_avpn(vpn[i], psize, ssize);
1133 param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot[i];
1134 if (pix == PLPAR_HCALL9_BUFSIZE) {
1135 pix = call_block_remove(pix, param, false);
1137 * pix = 0 means that all the entries were
1138 * removed, we can start a new block.
1139 * Otherwise, this means that there are entries
1140 * to retry, and pix points to latest one, so
1141 * we should increment it and try to continue
1149 (void)call_block_remove(pix, param, true);
1152 static void hugepage_bulk_invalidate(unsigned long *slot, unsigned long *vpn,
1153 int count, int psize, int ssize)
1155 unsigned long param[PLPAR_HCALL9_BUFSIZE];
1156 int i = 0, pix = 0, rc;
1158 for (i = 0; i < count; i++) {
1160 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
1161 pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
1164 param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
1165 param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
1168 rc = plpar_hcall9(H_BULK_REMOVE, param,
1169 param[0], param[1], param[2],
1170 param[3], param[4], param[5],
1171 param[6], param[7]);
1172 BUG_ON(rc != H_SUCCESS);
1178 param[pix] = HBR_END;
1179 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
1180 param[2], param[3], param[4], param[5],
1181 param[6], param[7]);
1182 BUG_ON(rc != H_SUCCESS);
1186 static inline void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
1188 int count, int psize,
1191 unsigned long flags = 0;
1192 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
1195 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
1197 /* Assuming THP size is 16M */
1198 if (is_supported_hlbkrm(psize, MMU_PAGE_16M))
1199 hugepage_block_invalidate(slot, vpn, count, psize, ssize);
1201 hugepage_bulk_invalidate(slot, vpn, count, psize, ssize);
1204 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
1207 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
1209 unsigned char *hpte_slot_array,
1210 int psize, int ssize, int local)
1213 unsigned long s_addr = addr;
1214 unsigned int max_hpte_count, valid;
1215 unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
1216 unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
1217 unsigned long shift, hidx, vpn = 0, hash, slot;
1219 shift = mmu_psize_defs[psize].shift;
1220 max_hpte_count = 1U << (PMD_SHIFT - shift);
1222 for (i = 0; i < max_hpte_count; i++) {
1223 valid = hpte_valid(hpte_slot_array, i);
1226 hidx = hpte_hash_index(hpte_slot_array, i);
1229 addr = s_addr + (i * (1ul << shift));
1230 vpn = hpt_vpn(addr, vsid, ssize);
1231 hash = hpt_hash(vpn, shift, ssize);
1232 if (hidx & _PTEIDX_SECONDARY)
1235 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1236 slot += hidx & _PTEIDX_GROUP_IX;
1238 slot_array[index] = slot;
1239 vpn_array[index] = vpn;
1240 if (index == PPC64_HUGE_HPTE_BATCH - 1) {
1242 * Now do a bluk invalidate
1244 __pSeries_lpar_hugepage_invalidate(slot_array,
1246 PPC64_HUGE_HPTE_BATCH,
1253 __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
1254 index, psize, ssize);
1257 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
1259 unsigned char *hpte_slot_array,
1260 int psize, int ssize, int local)
1262 WARN(1, "%s called without THP support\n", __func__);
1266 static int pSeries_lpar_hpte_removebolted(unsigned long ea,
1267 int psize, int ssize)
1270 unsigned long slot, vsid;
1272 vsid = get_kernel_vsid(ea, ssize);
1273 vpn = hpt_vpn(ea, vsid, ssize);
1275 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
1280 * lpar doesn't use the passed actual page size
1282 pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
1287 static inline unsigned long compute_slot(real_pte_t pte,
1289 unsigned long index,
1290 unsigned long shift,
1293 unsigned long slot, hash, hidx;
1295 hash = hpt_hash(vpn, shift, ssize);
1296 hidx = __rpte_to_hidx(pte, index);
1297 if (hidx & _PTEIDX_SECONDARY)
1299 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1300 slot += hidx & _PTEIDX_GROUP_IX;
1305 * The hcall H_BLOCK_REMOVE implies that the virtual pages to processed are
1306 * "all within the same naturally aligned 8 page virtual address block".
1308 static void do_block_remove(unsigned long number, struct ppc64_tlb_batch *batch,
1309 unsigned long *param)
1312 unsigned long i, pix = 0;
1313 unsigned long index, shift, slot, current_vpgb, vpgb;
1317 psize = batch->psize;
1318 ssize = batch->ssize;
1320 for (i = 0; i < number; i++) {
1321 vpn = batch->vpn[i];
1322 pte = batch->pte[i];
1323 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
1325 * Shifting 3 bits more on the right to get a
1326 * 8 pages aligned virtual addresse.
1328 vpgb = (vpn >> (shift - VPN_SHIFT + 3));
1329 if (!pix || vpgb != current_vpgb) {
1331 * Need to start a new 8 pages block, flush
1332 * the current one if needed.
1335 (void)call_block_remove(pix, param,
1337 current_vpgb = vpgb;
1338 param[0] = hpte_encode_avpn(vpn, psize,
1343 slot = compute_slot(pte, vpn, index, shift, ssize);
1344 param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot;
1346 if (pix == PLPAR_HCALL9_BUFSIZE) {
1347 pix = call_block_remove(pix, param, false);
1349 * pix = 0 means that all the entries were
1350 * removed, we can start a new block.
1351 * Otherwise, this means that there are entries
1352 * to retry, and pix points to latest one, so
1353 * we should increment it and try to continue
1359 } pte_iterate_hashed_end();
1363 (void)call_block_remove(pix, param, true);
1367 * TLB Block Invalidate Characteristics
1369 * These characteristics define the size of the block the hcall H_BLOCK_REMOVE
1370 * is able to process for each couple segment base page size, actual page size.
1372 * The ibm,get-system-parameter properties is returning a buffer with the
1375 * [ 2 bytes size of the RTAS buffer (excluding these 2 bytes) ]
1377 * TLB Block Invalidate Specifiers:
1378 * [ 1 byte LOG base 2 of the TLB invalidate block size being specified ]
1379 * [ 1 byte Number of page sizes (N) that are supported for the specified
1380 * TLB invalidate block size ]
1381 * [ 1 byte Encoded segment base page size and actual page size
1382 * MSB=0 means 4k segment base page size and actual page size
1383 * MSB=1 the penc value in mmu_psize_def ]
1386 * Next TLB Block Invalidate Specifiers...
1390 static inline void set_hblkrm_bloc_size(int bpsize, int psize,
1391 unsigned int block_size)
1393 if (block_size > hblkrm_size[bpsize][psize])
1394 hblkrm_size[bpsize][psize] = block_size;
1398 * Decode the Encoded segment base page size and actual page size.
1400 * - bit 7 is the L bit
1401 * - bits 0-5 are the penc value
1402 * If the L bit is 0, this means 4K segment base page size and actual page size
1403 * otherwise the penc value should be read.
1405 #define HBLKRM_L_MASK 0x80
1406 #define HBLKRM_PENC_MASK 0x3f
1407 static inline void __init check_lp_set_hblkrm(unsigned int lp,
1408 unsigned int block_size)
1410 unsigned int bpsize, psize;
1412 /* First, check the L bit, if not set, this means 4K */
1413 if ((lp & HBLKRM_L_MASK) == 0) {
1414 set_hblkrm_bloc_size(MMU_PAGE_4K, MMU_PAGE_4K, block_size);
1418 lp &= HBLKRM_PENC_MASK;
1419 for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++) {
1420 struct mmu_psize_def *def = &mmu_psize_defs[bpsize];
1422 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
1423 if (def->penc[psize] == lp) {
1424 set_hblkrm_bloc_size(bpsize, psize, block_size);
1431 #define SPLPAR_TLB_BIC_TOKEN 50
1434 * The size of the TLB Block Invalidate Characteristics is variable. But at the
1435 * maximum it will be the number of possible page sizes *2 + 10 bytes.
1436 * Currently MMU_PAGE_COUNT is 16, which means 42 bytes. Use a cache line size
1437 * (128 bytes) for the buffer to get plenty of space.
1439 #define SPLPAR_TLB_BIC_MAXLENGTH 128
1441 void __init pseries_lpar_read_hblkrm_characteristics(void)
1443 unsigned char local_buffer[SPLPAR_TLB_BIC_MAXLENGTH];
1444 int call_status, len, idx, bpsize;
1446 if (!firmware_has_feature(FW_FEATURE_BLOCK_REMOVE))
1449 spin_lock(&rtas_data_buf_lock);
1450 memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
1451 call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
1453 SPLPAR_TLB_BIC_TOKEN,
1454 __pa(rtas_data_buf),
1455 RTAS_DATA_BUF_SIZE);
1456 memcpy(local_buffer, rtas_data_buf, SPLPAR_TLB_BIC_MAXLENGTH);
1457 local_buffer[SPLPAR_TLB_BIC_MAXLENGTH - 1] = '\0';
1458 spin_unlock(&rtas_data_buf_lock);
1460 if (call_status != 0) {
1461 pr_warn("%s %s Error calling get-system-parameter (0x%x)\n",
1462 __FILE__, __func__, call_status);
1467 * The first two (2) bytes of the data in the buffer are the length of
1468 * the returned data, not counting these first two (2) bytes.
1470 len = be16_to_cpu(*((u16 *)local_buffer)) + 2;
1471 if (len > SPLPAR_TLB_BIC_MAXLENGTH) {
1472 pr_warn("%s too large returned buffer %d", __func__, len);
1478 u8 block_shift = local_buffer[idx++];
1480 unsigned int npsize;
1485 block_size = 1 << block_shift;
1487 for (npsize = local_buffer[idx++];
1488 npsize > 0 && idx < len; npsize--)
1489 check_lp_set_hblkrm((unsigned int) local_buffer[idx++],
1493 for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
1494 for (idx = 0; idx < MMU_PAGE_COUNT; idx++)
1495 if (hblkrm_size[bpsize][idx])
1496 pr_info("H_BLOCK_REMOVE supports base psize:%d psize:%d block size:%d",
1497 bpsize, idx, hblkrm_size[bpsize][idx]);
1501 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
1504 static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
1507 unsigned long i, pix, rc;
1508 unsigned long flags = 0;
1509 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
1510 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
1511 unsigned long param[PLPAR_HCALL9_BUFSIZE];
1512 unsigned long index, shift, slot;
1517 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
1519 if (is_supported_hlbkrm(batch->psize, batch->psize)) {
1520 do_block_remove(number, batch, param);
1524 psize = batch->psize;
1525 ssize = batch->ssize;
1527 for (i = 0; i < number; i++) {
1528 vpn = batch->vpn[i];
1529 pte = batch->pte[i];
1530 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
1531 slot = compute_slot(pte, vpn, index, shift, ssize);
1532 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
1534 * lpar doesn't use the passed actual page size
1536 pSeries_lpar_hpte_invalidate(slot, vpn, psize,
1539 param[pix] = HBR_REQUEST | HBR_AVPN | slot;
1540 param[pix+1] = hpte_encode_avpn(vpn, psize,
1544 rc = plpar_hcall9(H_BULK_REMOVE, param,
1545 param[0], param[1], param[2],
1546 param[3], param[4], param[5],
1547 param[6], param[7]);
1548 BUG_ON(rc != H_SUCCESS);
1552 } pte_iterate_hashed_end();
1555 param[pix] = HBR_END;
1556 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
1557 param[2], param[3], param[4], param[5],
1558 param[6], param[7]);
1559 BUG_ON(rc != H_SUCCESS);
1564 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
1567 static int __init disable_bulk_remove(char *str)
1569 if (strcmp(str, "off") == 0 &&
1570 firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
1571 pr_info("Disabling BULK_REMOVE firmware feature");
1572 powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
1577 __setup("bulk_remove=", disable_bulk_remove);
1579 #define HPT_RESIZE_TIMEOUT 10000 /* ms */
1581 struct hpt_resize_state {
1582 unsigned long shift;
1586 static int pseries_lpar_resize_hpt_commit(void *data)
1588 struct hpt_resize_state *state = data;
1590 state->commit_rc = plpar_resize_hpt_commit(0, state->shift);
1591 if (state->commit_rc != H_SUCCESS)
1594 /* Hypervisor has transitioned the HTAB, update our globals */
1595 ppc64_pft_size = state->shift;
1596 htab_size_bytes = 1UL << ppc64_pft_size;
1597 htab_hash_mask = (htab_size_bytes >> 7) - 1;
1603 * Must be called in process context. The caller must hold the
1606 static int pseries_lpar_resize_hpt(unsigned long shift)
1608 struct hpt_resize_state state = {
1610 .commit_rc = H_FUNCTION,
1612 unsigned int delay, total_delay = 0;
1618 if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE))
1621 pr_info("Attempting to resize HPT to shift %lu\n", shift);
1625 rc = plpar_resize_hpt_prepare(0, shift);
1626 while (H_IS_LONG_BUSY(rc)) {
1627 delay = get_longbusy_msecs(rc);
1628 total_delay += delay;
1629 if (total_delay > HPT_RESIZE_TIMEOUT) {
1630 /* prepare with shift==0 cancels an in-progress resize */
1631 rc = plpar_resize_hpt_prepare(0, 0);
1632 if (rc != H_SUCCESS)
1633 pr_warn("Unexpected error %d cancelling timed out HPT resize\n",
1638 rc = plpar_resize_hpt_prepare(0, shift);
1647 pr_warn("Invalid argument from H_RESIZE_HPT_PREPARE\n");
1650 pr_warn("Operation not permitted from H_RESIZE_HPT_PREPARE\n");
1653 pr_warn("Unexpected error %d from H_RESIZE_HPT_PREPARE\n", rc);
1659 rc = stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit,
1665 switch (state.commit_rc) {
1670 pr_warn("Unexpected error %d from H_RESIZE_HPT_COMMIT\n",
1676 pr_info("HPT resize to shift %lu complete (%lld ms / %lld ms)\n",
1677 shift, (long long) ktime_ms_delta(t1, t0),
1678 (long long) ktime_ms_delta(t2, t1));
1683 static int pseries_lpar_register_process_table(unsigned long base,
1684 unsigned long page_size, unsigned long table_size)
1687 unsigned long flags = 0;
1690 flags |= PROC_TABLE_NEW;
1691 if (radix_enabled()) {
1692 flags |= PROC_TABLE_RADIX;
1693 if (mmu_has_feature(MMU_FTR_GTSE))
1694 flags |= PROC_TABLE_GTSE;
1696 flags |= PROC_TABLE_HPT_SLB;
1698 rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base,
1699 page_size, table_size);
1700 if (!H_IS_LONG_BUSY(rc))
1702 mdelay(get_longbusy_msecs(rc));
1704 if (rc != H_SUCCESS) {
1705 pr_err("Failed to register process table (rc=%ld)\n", rc);
1711 void __init hpte_init_pseries(void)
1713 mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate;
1714 mmu_hash_ops.hpte_updatepp = pSeries_lpar_hpte_updatepp;
1715 mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
1716 mmu_hash_ops.hpte_insert = pSeries_lpar_hpte_insert;
1717 mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove;
1718 mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted;
1719 mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
1720 mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
1721 mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
1723 if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
1724 mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
1727 * On POWER9, we need to do a H_REGISTER_PROC_TBL hcall
1728 * to inform the hypervisor that we wish to use the HPT.
1730 if (cpu_has_feature(CPU_FTR_ARCH_300))
1731 pseries_lpar_register_process_table(0, 0, 0);
1734 #ifdef CONFIG_PPC_RADIX_MMU
1735 void radix_init_pseries(void)
1737 pr_info("Using radix MMU under hypervisor\n");
1739 pseries_lpar_register_process_table(__pa(process_tb),
1740 0, PRTB_SIZE_SHIFT - 12);
1744 #ifdef CONFIG_PPC_SMLPAR
1745 #define CMO_FREE_HINT_DEFAULT 1
1746 static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;
1748 static int __init cmo_free_hint(char *str)
1751 parm = strstrip(str);
1753 if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
1754 pr_info("%s: CMO free page hinting is not active.\n", __func__);
1755 cmo_free_hint_flag = 0;
1759 cmo_free_hint_flag = 1;
1760 pr_info("%s: CMO free page hinting is active.\n", __func__);
1762 if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
1768 __setup("cmo_free_hint=", cmo_free_hint);
1770 static void pSeries_set_page_state(struct page *page, int order,
1771 unsigned long state)
1774 unsigned long cmo_page_sz, addr;
1776 cmo_page_sz = cmo_get_page_size();
1777 addr = __pa((unsigned long)page_address(page));
1779 for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
1780 for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
1781 plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
1785 void arch_free_page(struct page *page, int order)
1787 if (radix_enabled())
1789 if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
1792 pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
1794 EXPORT_SYMBOL(arch_free_page);
1796 #endif /* CONFIG_PPC_SMLPAR */
1797 #endif /* CONFIG_PPC_BOOK3S_64 */
1799 #ifdef CONFIG_TRACEPOINTS
1800 #ifdef CONFIG_JUMP_LABEL
1801 struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
1803 int hcall_tracepoint_regfunc(void)
1805 static_key_slow_inc(&hcall_tracepoint_key);
1809 void hcall_tracepoint_unregfunc(void)
1811 static_key_slow_dec(&hcall_tracepoint_key);
1815 * We optimise our hcall path by placing hcall_tracepoint_refcount
1816 * directly in the TOC so we can check if the hcall tracepoints are
1817 * enabled via a single load.
1820 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
1821 extern long hcall_tracepoint_refcount;
1823 int hcall_tracepoint_regfunc(void)
1825 hcall_tracepoint_refcount++;
1829 void hcall_tracepoint_unregfunc(void)
1831 hcall_tracepoint_refcount--;
1836 * Keep track of hcall tracing depth and prevent recursion. Warn if any is
1837 * detected because it may indicate a problem. This will not catch all
1838 * problems with tracing code making hcalls, because the tracing might have
1839 * been invoked from a non-hcall, so the first hcall could recurse into it
1840 * without warning here, but this better than nothing.
1842 * Hcalls with specific problems being traced should use the _notrace
1843 * plpar_hcall variants.
1845 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
1848 notrace void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
1850 unsigned long flags;
1851 unsigned int *depth;
1853 local_irq_save(flags);
1855 depth = this_cpu_ptr(&hcall_trace_depth);
1857 if (WARN_ON_ONCE(*depth))
1862 trace_hcall_entry(opcode, args);
1866 local_irq_restore(flags);
1869 notrace void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
1871 unsigned long flags;
1872 unsigned int *depth;
1874 local_irq_save(flags);
1876 depth = this_cpu_ptr(&hcall_trace_depth);
1878 if (*depth) /* Don't warn again on the way out */
1882 trace_hcall_exit(opcode, retval, retbuf);
1887 local_irq_restore(flags);
1893 * H_GET_MPP hcall returns info in 7 parms
1895 int h_get_mpp(struct hvcall_mpp_data *mpp_data)
1898 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
1900 rc = plpar_hcall9(H_GET_MPP, retbuf);
1902 mpp_data->entitled_mem = retbuf[0];
1903 mpp_data->mapped_mem = retbuf[1];
1905 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
1906 mpp_data->pool_num = retbuf[2] & 0xffff;
1908 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
1909 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
1910 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
1912 mpp_data->pool_size = retbuf[4];
1913 mpp_data->loan_request = retbuf[5];
1914 mpp_data->backing_mem = retbuf[6];
1918 EXPORT_SYMBOL(h_get_mpp);
1920 int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
1923 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
1925 rc = plpar_hcall9(H_GET_MPP_X, retbuf);
1927 mpp_x_data->coalesced_bytes = retbuf[0];
1928 mpp_x_data->pool_coalesced_bytes = retbuf[1];
1929 mpp_x_data->pool_purr_cycles = retbuf[2];
1930 mpp_x_data->pool_spurr_cycles = retbuf[3];
1935 static unsigned long vsid_unscramble(unsigned long vsid, int ssize)
1937 unsigned long protovsid;
1938 unsigned long va_bits = VA_BITS;
1939 unsigned long modinv, vsid_modulus;
1940 unsigned long max_mod_inv, tmp_modinv;
1942 if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
1945 if (ssize == MMU_SEGSIZE_256M) {
1946 modinv = VSID_MULINV_256M;
1947 vsid_modulus = ((1UL << (va_bits - SID_SHIFT)) - 1);
1949 modinv = VSID_MULINV_1T;
1950 vsid_modulus = ((1UL << (va_bits - SID_SHIFT_1T)) - 1);
1954 * vsid outside our range.
1956 if (vsid >= vsid_modulus)
1960 * If modinv is the modular multiplicate inverse of (x % vsid_modulus)
1961 * and vsid = (protovsid * x) % vsid_modulus, then we say:
1962 * protovsid = (vsid * modinv) % vsid_modulus
1965 /* Check if (vsid * modinv) overflow (63 bits) */
1966 max_mod_inv = 0x7fffffffffffffffull / vsid;
1967 if (modinv < max_mod_inv)
1968 return (vsid * modinv) % vsid_modulus;
1970 tmp_modinv = modinv/max_mod_inv;
1971 modinv %= max_mod_inv;
1973 protovsid = (((vsid * max_mod_inv) % vsid_modulus) * tmp_modinv) % vsid_modulus;
1974 protovsid = (protovsid + vsid * modinv) % vsid_modulus;
1979 static int __init reserve_vrma_context_id(void)
1981 unsigned long protovsid;
1984 * Reserve context ids which map to reserved virtual addresses. For now
1985 * we only reserve the context id which maps to the VRMA VSID. We ignore
1986 * the addresses in "ibm,adjunct-virtual-addresses" because we don't
1987 * enable adjunct support via the "ibm,client-architecture-support"
1990 protovsid = vsid_unscramble(VRMA_VSID, MMU_SEGSIZE_1T);
1991 hash__reserve_context_id(protovsid >> ESID_BITS_1T);
1994 machine_device_initcall(pseries, reserve_vrma_context_id);
1996 #ifdef CONFIG_DEBUG_FS
1997 /* debugfs file interface for vpa data */
1998 static ssize_t vpa_file_read(struct file *filp, char __user *buf, size_t len,
2001 int cpu = (long)filp->private_data;
2002 struct lppaca *lppaca = &lppaca_of(cpu);
2004 return simple_read_from_buffer(buf, len, pos, lppaca,
2005 sizeof(struct lppaca));
2008 static const struct file_operations vpa_fops = {
2009 .open = simple_open,
2010 .read = vpa_file_read,
2011 .llseek = default_llseek,
2014 static int __init vpa_debugfs_init(void)
2018 struct dentry *vpa_dir;
2020 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
2023 vpa_dir = debugfs_create_dir("vpa", arch_debugfs_dir);
2025 /* set up the per-cpu vpa file*/
2026 for_each_possible_cpu(i) {
2027 sprintf(name, "cpu-%ld", i);
2028 debugfs_create_file(name, 0400, vpa_dir, (void *)i, &vpa_fops);
2033 machine_arch_initcall(pseries, vpa_debugfs_init);
2034 #endif /* CONFIG_DEBUG_FS */