2 * native hashtable management.
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
18 #include <linux/processor.h>
19 #include <linux/threads.h>
20 #include <linux/smp.h>
22 #include <asm/machdep.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgtable.h>
26 #include <asm/trace.h>
28 #include <asm/cputable.h>
30 #include <asm/kexec.h>
31 #include <asm/ppc-opcode.h>
32 #include <asm/feature-fixups.h>
34 #include <misc/cxl-base.h>
37 #define DBG_LOW(fmt...) udbg_printf(fmt)
39 #define DBG_LOW(fmt...)
43 #define HPTE_LOCK_BIT 3
45 #define HPTE_LOCK_BIT (56+3)
48 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
50 static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
54 rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
56 asm volatile("tlbiel %0" : : "r" (rb));
60 * tlbiel instruction for hash, set invalidation
61 * i.e., r=1 and is=01 or is=10 or is=11
63 static inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
65 unsigned int ric, unsigned int prs)
69 unsigned int r = 0; /* hash format */
71 rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
72 rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
74 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
75 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
80 static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is)
84 asm volatile("ptesync": : :"memory");
86 for (set = 0; set < num_sets; set++)
87 tlbiel_hash_set_isa206(set, is);
89 asm volatile("ptesync": : :"memory");
92 static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
96 asm volatile("ptesync": : :"memory");
99 * Flush the first set of the TLB, and any caching of partition table
100 * entries. Then flush the remaining sets of the TLB. Hash mode uses
101 * partition scoped TLB translations.
103 tlbiel_hash_set_isa300(0, is, 0, 2, 0);
104 for (set = 1; set < num_sets; set++)
105 tlbiel_hash_set_isa300(set, is, 0, 0, 0);
108 * Now invalidate the process table cache.
110 * From ISA v3.0B p. 1078:
111 * The following forms are invalid.
112 * * PRS=1, R=0, and RIC!=2 (The only process-scoped
113 * HPT caching is of the Process Table.)
115 tlbiel_hash_set_isa300(0, is, 0, 2, 1);
117 asm volatile("ptesync": : :"memory");
119 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
122 void hash__tlbiel_all(unsigned int action)
127 case TLB_INVAL_SCOPE_GLOBAL:
130 case TLB_INVAL_SCOPE_LPID:
137 if (early_cpu_has_feature(CPU_FTR_ARCH_300))
138 tlbiel_all_isa300(POWER9_TLB_SETS_HASH, is);
139 else if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
140 tlbiel_all_isa206(POWER8_TLB_SETS, is);
141 else if (early_cpu_has_feature(CPU_FTR_ARCH_206))
142 tlbiel_all_isa206(POWER7_TLB_SETS, is);
144 WARN(1, "%s called on pre-POWER7 CPU\n", __func__);
147 static inline unsigned long ___tlbie(unsigned long vpn, int psize,
148 int apsize, int ssize)
155 * We need 14 to 65 bits of va for a tlibe of 4K page
156 * With vpn we ignore the lower VPN_SHIFT bits already.
157 * And top two bits are already ignored because we can
158 * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
161 va = vpn << VPN_SHIFT;
163 * clear top 16 bits of 64bit va, non SLS segment
164 * Older versions of the architecture (2.02 and earler) require the
165 * masking of the top 16 bits.
167 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
168 va &= ~(0xffffULL << 48);
172 /* clear out bits after (52) [0....52.....63] */
173 va &= ~((1ul << (64 - 52)) - 1);
175 sllp = get_sllp_encoding(apsize);
177 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
178 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
182 /* We need 14 to 14 + i bits of va */
183 penc = mmu_psize_defs[psize].penc[apsize];
184 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
189 * We don't need all the bits, but rest of the bits
190 * must be ignored by the processor.
191 * vpn cover upto 65 bits of va. (0...65) and we need
194 va |= (vpn & 0xfe); /* AVAL */
196 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
197 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
204 static inline void fixup_tlbie_vpn(unsigned long vpn, int psize,
205 int apsize, int ssize)
207 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
208 /* Radix flush for a hash guest */
210 unsigned long rb,rs,prs,r,ric;
212 rb = PPC_BIT(52); /* IS = 2 */
213 rs = 0; /* lpid = 0 */
214 prs = 0; /* partition scoped */
215 r = 1; /* radix format */
216 ric = 0; /* RIC_FLSUH_TLB */
219 * Need the extra ptesync to make sure we don't
222 asm volatile("ptesync": : :"memory");
223 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
224 : : "r"(rb), "i"(r), "i"(prs),
225 "i"(ric), "r"(rs) : "memory");
229 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
230 /* Need the extra ptesync to ensure we don't reorder tlbie*/
231 asm volatile("ptesync": : :"memory");
232 ___tlbie(vpn, psize, apsize, ssize);
236 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
240 rb = ___tlbie(vpn, psize, apsize, ssize);
241 trace_tlbie(0, 0, rb, 0, 0, 0, 0);
244 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
250 /* VPN_SHIFT can be atmost 12 */
251 va = vpn << VPN_SHIFT;
253 * clear top 16 bits of 64 bit va, non SLS segment
254 * Older versions of the architecture (2.02 and earler) require the
255 * masking of the top 16 bits.
257 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
258 va &= ~(0xffffULL << 48);
262 /* clear out bits after(52) [0....52.....63] */
263 va &= ~((1ul << (64 - 52)) - 1);
265 sllp = get_sllp_encoding(apsize);
267 asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1)
268 : : "r" (va), "i" (CPU_FTR_ARCH_206)
272 /* We need 14 to 14 + i bits of va */
273 penc = mmu_psize_defs[psize].penc[apsize];
274 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
279 * We don't need all the bits, but rest of the bits
280 * must be ignored by the processor.
281 * vpn cover upto 65 bits of va. (0...65) and we need
286 asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1)
287 : : "r" (va), "i" (CPU_FTR_ARCH_206)
291 trace_tlbie(0, 1, va, 0, 0, 0, 0);
295 static inline void tlbie(unsigned long vpn, int psize, int apsize,
296 int ssize, int local)
298 unsigned int use_local;
299 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
301 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
304 use_local = mmu_psize_defs[psize].tlbiel;
305 if (lock_tlbie && !use_local)
306 raw_spin_lock(&native_tlbie_lock);
307 asm volatile("ptesync": : :"memory");
309 __tlbiel(vpn, psize, apsize, ssize);
310 asm volatile("ptesync": : :"memory");
312 __tlbie(vpn, psize, apsize, ssize);
313 fixup_tlbie_vpn(vpn, psize, apsize, ssize);
314 asm volatile("eieio; tlbsync; ptesync": : :"memory");
316 if (lock_tlbie && !use_local)
317 raw_spin_unlock(&native_tlbie_lock);
320 static inline void native_lock_hpte(struct hash_pte *hptep)
322 unsigned long *word = (unsigned long *)&hptep->v;
325 if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
328 while(test_bit(HPTE_LOCK_BIT, word))
334 static inline void native_unlock_hpte(struct hash_pte *hptep)
336 unsigned long *word = (unsigned long *)&hptep->v;
338 clear_bit_unlock(HPTE_LOCK_BIT, word);
341 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
342 unsigned long pa, unsigned long rflags,
343 unsigned long vflags, int psize, int apsize, int ssize)
345 struct hash_pte *hptep = htab_address + hpte_group;
346 unsigned long hpte_v, hpte_r;
349 if (!(vflags & HPTE_V_BOLTED)) {
350 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
351 " rflags=%lx, vflags=%lx, psize=%d)\n",
352 hpte_group, vpn, pa, rflags, vflags, psize);
355 for (i = 0; i < HPTES_PER_GROUP; i++) {
356 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
357 /* retry with lock held */
358 native_lock_hpte(hptep);
359 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
361 native_unlock_hpte(hptep);
367 if (i == HPTES_PER_GROUP)
370 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
371 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
373 if (!(vflags & HPTE_V_BOLTED)) {
374 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
378 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
379 hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
380 hpte_v = hpte_old_to_new_v(hpte_v);
383 hptep->r = cpu_to_be64(hpte_r);
384 /* Guarantee the second dword is visible before the valid bit */
387 * Now set the first dword including the valid bit
388 * NOTE: this also unlocks the hpte
390 hptep->v = cpu_to_be64(hpte_v);
392 __asm__ __volatile__ ("ptesync" : : : "memory");
394 return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
397 static long native_hpte_remove(unsigned long hpte_group)
399 struct hash_pte *hptep;
402 unsigned long hpte_v;
404 DBG_LOW(" remove(group=%lx)\n", hpte_group);
406 /* pick a random entry to start at */
407 slot_offset = mftb() & 0x7;
409 for (i = 0; i < HPTES_PER_GROUP; i++) {
410 hptep = htab_address + hpte_group + slot_offset;
411 hpte_v = be64_to_cpu(hptep->v);
413 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
414 /* retry with lock held */
415 native_lock_hpte(hptep);
416 hpte_v = be64_to_cpu(hptep->v);
417 if ((hpte_v & HPTE_V_VALID)
418 && !(hpte_v & HPTE_V_BOLTED))
420 native_unlock_hpte(hptep);
427 if (i == HPTES_PER_GROUP)
430 /* Invalidate the hpte. NOTE: this also unlocks it */
436 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
437 unsigned long vpn, int bpsize,
438 int apsize, int ssize, unsigned long flags)
440 struct hash_pte *hptep = htab_address + slot;
441 unsigned long hpte_v, want_v;
442 int ret = 0, local = 0;
444 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
446 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
447 vpn, want_v & HPTE_V_AVPN, slot, newpp);
449 hpte_v = hpte_get_old_v(hptep);
451 * We need to invalidate the TLB always because hpte_remove doesn't do
452 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
453 * random entry from it. When we do that we don't invalidate the TLB
454 * (hpte_remove) because we assume the old translation is still
455 * technically "valid".
457 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
458 DBG_LOW(" -> miss\n");
461 native_lock_hpte(hptep);
462 /* recheck with locks held */
463 hpte_v = hpte_get_old_v(hptep);
464 if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
465 !(hpte_v & HPTE_V_VALID))) {
468 DBG_LOW(" -> hit\n");
469 /* Update the HPTE */
470 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
471 ~(HPTE_R_PPP | HPTE_R_N)) |
472 (newpp & (HPTE_R_PPP | HPTE_R_N |
475 native_unlock_hpte(hptep);
478 if (flags & HPTE_LOCAL_UPDATE)
481 * Ensure it is out of the tlb too if it is not a nohpte fault
483 if (!(flags & HPTE_NOHPTE_UPDATE))
484 tlbie(vpn, bpsize, apsize, ssize, local);
489 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
491 struct hash_pte *hptep;
495 unsigned long want_v, hpte_v;
497 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
498 want_v = hpte_encode_avpn(vpn, psize, ssize);
500 /* Bolted mappings are only ever in the primary group */
501 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
502 for (i = 0; i < HPTES_PER_GROUP; i++) {
504 hptep = htab_address + slot;
505 hpte_v = hpte_get_old_v(hptep);
506 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
516 * Update the page protection bits. Intended to be used to create
517 * guard pages for kernel data structures on pages which are bolted
518 * in the HPT. Assumes pages being operated on will not be stolen.
520 * No need to lock here because we should be the only user.
522 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
523 int psize, int ssize)
528 struct hash_pte *hptep;
530 vsid = get_kernel_vsid(ea, ssize);
531 vpn = hpt_vpn(ea, vsid, ssize);
533 slot = native_hpte_find(vpn, psize, ssize);
535 panic("could not find page to bolt\n");
536 hptep = htab_address + slot;
538 /* Update the HPTE */
539 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
540 ~(HPTE_R_PPP | HPTE_R_N)) |
541 (newpp & (HPTE_R_PPP | HPTE_R_N)));
543 * Ensure it is out of the tlb too. Bolted entries base and
544 * actual page size will be same.
546 tlbie(vpn, psize, psize, ssize, 0);
550 * Remove a bolted kernel entry. Memory hotplug uses this.
552 * No need to lock here because we should be the only user.
554 static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
559 struct hash_pte *hptep;
561 vsid = get_kernel_vsid(ea, ssize);
562 vpn = hpt_vpn(ea, vsid, ssize);
564 slot = native_hpte_find(vpn, psize, ssize);
568 hptep = htab_address + slot;
570 VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED));
572 /* Invalidate the hpte */
575 /* Invalidate the TLB */
576 tlbie(vpn, psize, psize, ssize, 0);
581 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
582 int bpsize, int apsize, int ssize, int local)
584 struct hash_pte *hptep = htab_address + slot;
585 unsigned long hpte_v;
586 unsigned long want_v;
589 local_irq_save(flags);
591 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
593 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
594 hpte_v = hpte_get_old_v(hptep);
596 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
597 native_lock_hpte(hptep);
598 /* recheck with locks held */
599 hpte_v = hpte_get_old_v(hptep);
601 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
602 /* Invalidate the hpte. NOTE: this also unlocks it */
605 native_unlock_hpte(hptep);
608 * We need to invalidate the TLB always because hpte_remove doesn't do
609 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
610 * random entry from it. When we do that we don't invalidate the TLB
611 * (hpte_remove) because we assume the old translation is still
612 * technically "valid".
614 tlbie(vpn, bpsize, apsize, ssize, local);
616 local_irq_restore(flags);
619 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
620 static void native_hugepage_invalidate(unsigned long vsid,
622 unsigned char *hpte_slot_array,
623 int psize, int ssize, int local)
626 struct hash_pte *hptep;
627 int actual_psize = MMU_PAGE_16M;
628 unsigned int max_hpte_count, valid;
629 unsigned long flags, s_addr = addr;
630 unsigned long hpte_v, want_v, shift;
631 unsigned long hidx, vpn = 0, hash, slot;
633 shift = mmu_psize_defs[psize].shift;
634 max_hpte_count = 1U << (PMD_SHIFT - shift);
636 local_irq_save(flags);
637 for (i = 0; i < max_hpte_count; i++) {
638 valid = hpte_valid(hpte_slot_array, i);
641 hidx = hpte_hash_index(hpte_slot_array, i);
644 addr = s_addr + (i * (1ul << shift));
645 vpn = hpt_vpn(addr, vsid, ssize);
646 hash = hpt_hash(vpn, shift, ssize);
647 if (hidx & _PTEIDX_SECONDARY)
650 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
651 slot += hidx & _PTEIDX_GROUP_IX;
653 hptep = htab_address + slot;
654 want_v = hpte_encode_avpn(vpn, psize, ssize);
655 hpte_v = hpte_get_old_v(hptep);
657 /* Even if we miss, we need to invalidate the TLB */
658 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
659 /* recheck with locks held */
660 native_lock_hpte(hptep);
661 hpte_v = hpte_get_old_v(hptep);
663 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
665 * Invalidate the hpte. NOTE: this also unlocks it
670 native_unlock_hpte(hptep);
673 * We need to do tlb invalidate for all the address, tlbie
674 * instruction compares entry_VA in tlb with the VA specified
677 tlbie(vpn, psize, actual_psize, ssize, local);
679 local_irq_restore(flags);
682 static void native_hugepage_invalidate(unsigned long vsid,
684 unsigned char *hpte_slot_array,
685 int psize, int ssize, int local)
687 WARN(1, "%s called without THP support\n", __func__);
691 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
692 int *psize, int *apsize, int *ssize, unsigned long *vpn)
694 unsigned long avpn, pteg, vpi;
695 unsigned long hpte_v = be64_to_cpu(hpte->v);
696 unsigned long hpte_r = be64_to_cpu(hpte->r);
697 unsigned long vsid, seg_off;
698 int size, a_size, shift;
699 /* Look at the 8 bit LP value */
700 unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
702 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
703 hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
704 hpte_r = hpte_new_to_old_r(hpte_r);
706 if (!(hpte_v & HPTE_V_LARGE)) {
708 a_size = MMU_PAGE_4K;
710 size = hpte_page_sizes[lp] & 0xf;
711 a_size = hpte_page_sizes[lp] >> 4;
713 /* This works for all page sizes, and for 256M and 1T segments */
714 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
715 shift = mmu_psize_defs[size].shift;
717 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
718 pteg = slot / HPTES_PER_GROUP;
719 if (hpte_v & HPTE_V_SECONDARY)
723 case MMU_SEGSIZE_256M:
724 /* We only have 28 - 23 bits of seg_off in avpn */
725 seg_off = (avpn & 0x1f) << 23;
727 /* We can find more bits from the pteg value */
729 vpi = (vsid ^ pteg) & htab_hash_mask;
730 seg_off |= vpi << shift;
732 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
735 /* We only have 40 - 23 bits of seg_off in avpn */
736 seg_off = (avpn & 0x1ffff) << 23;
739 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
740 seg_off |= vpi << shift;
742 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
752 * clear all mappings on kexec. All cpus are in real mode (or they will
753 * be when they isi), and we are the only one left. We rely on our kernel
754 * mapping being 0xC0's and the hardware ignoring those two real bits.
756 * This must be called with interrupts disabled.
758 * Taking the native_tlbie_lock is unsafe here due to the possibility of
759 * lockdep being on. On pre POWER5 hardware, not taking the lock could
760 * cause deadlock. POWER5 and newer not taking the lock is fine. This only
761 * gets called during boot before secondary CPUs have come up and during
762 * crashdump and all bets are off anyway.
764 * TODO: add batching support when enabled. remember, no dynamic memory here,
765 * although there is the control page available...
767 static void native_hpte_clear(void)
769 unsigned long vpn = 0;
770 unsigned long slot, slots;
771 struct hash_pte *hptep = htab_address;
772 unsigned long hpte_v;
773 unsigned long pteg_count;
774 int psize, apsize, ssize;
776 pteg_count = htab_hash_mask + 1;
778 slots = pteg_count * HPTES_PER_GROUP;
780 for (slot = 0; slot < slots; slot++, hptep++) {
782 * we could lock the pte here, but we are the only cpu
783 * running, right? and for crash dump, we probably
784 * don't want to wait for a maybe bad cpu.
786 hpte_v = be64_to_cpu(hptep->v);
789 * Call __tlbie() here rather than tlbie() since we can't take the
792 if (hpte_v & HPTE_V_VALID) {
793 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
795 ___tlbie(vpn, psize, apsize, ssize);
799 asm volatile("eieio; tlbsync; ptesync":::"memory");
803 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
804 * the lock all the time
806 static void native_flush_hash_range(unsigned long number, int local)
808 unsigned long vpn = 0;
809 unsigned long hash, index, hidx, shift, slot;
810 struct hash_pte *hptep;
811 unsigned long hpte_v;
812 unsigned long want_v;
815 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
816 unsigned long psize = batch->psize;
817 int ssize = batch->ssize;
819 unsigned int use_local;
821 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
822 mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
824 local_irq_save(flags);
826 for (i = 0; i < number; i++) {
830 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
831 hash = hpt_hash(vpn, shift, ssize);
832 hidx = __rpte_to_hidx(pte, index);
833 if (hidx & _PTEIDX_SECONDARY)
835 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
836 slot += hidx & _PTEIDX_GROUP_IX;
837 hptep = htab_address + slot;
838 want_v = hpte_encode_avpn(vpn, psize, ssize);
839 hpte_v = hpte_get_old_v(hptep);
841 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
843 /* lock and try again */
844 native_lock_hpte(hptep);
845 hpte_v = hpte_get_old_v(hptep);
847 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
848 native_unlock_hpte(hptep);
852 } pte_iterate_hashed_end();
856 asm volatile("ptesync":::"memory");
857 for (i = 0; i < number; i++) {
861 pte_iterate_hashed_subpages(pte, psize,
863 __tlbiel(vpn, psize, psize, ssize);
864 } pte_iterate_hashed_end();
866 asm volatile("ptesync":::"memory");
868 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
871 raw_spin_lock(&native_tlbie_lock);
873 asm volatile("ptesync":::"memory");
874 for (i = 0; i < number; i++) {
878 pte_iterate_hashed_subpages(pte, psize,
880 __tlbie(vpn, psize, psize, ssize);
881 } pte_iterate_hashed_end();
884 * Just do one more with the last used values.
886 fixup_tlbie_vpn(vpn, psize, psize, ssize);
887 asm volatile("eieio; tlbsync; ptesync":::"memory");
890 raw_spin_unlock(&native_tlbie_lock);
893 local_irq_restore(flags);
896 void __init hpte_init_native(void)
898 mmu_hash_ops.hpte_invalidate = native_hpte_invalidate;
899 mmu_hash_ops.hpte_updatepp = native_hpte_updatepp;
900 mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
901 mmu_hash_ops.hpte_removebolted = native_hpte_removebolted;
902 mmu_hash_ops.hpte_insert = native_hpte_insert;
903 mmu_hash_ops.hpte_remove = native_hpte_remove;
904 mmu_hash_ops.hpte_clear_all = native_hpte_clear;
905 mmu_hash_ops.flush_hash_range = native_flush_hash_range;
906 mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;