2 * native hashtable management.
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
18 #include <linux/processor.h>
19 #include <linux/threads.h>
20 #include <linux/smp.h>
22 #include <asm/machdep.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgtable.h>
26 #include <asm/tlbflush.h>
27 #include <asm/trace.h>
29 #include <asm/cputable.h>
31 #include <asm/kexec.h>
32 #include <asm/ppc-opcode.h>
34 #include <misc/cxl-base.h>
37 #define DBG_LOW(fmt...) udbg_printf(fmt)
39 #define DBG_LOW(fmt...)
43 #define HPTE_LOCK_BIT 3
45 #define HPTE_LOCK_BIT (56+3)
48 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
50 static inline unsigned long ___tlbie(unsigned long vpn, int psize,
51 int apsize, int ssize)
58 * We need 14 to 65 bits of va for a tlibe of 4K page
59 * With vpn we ignore the lower VPN_SHIFT bits already.
60 * And top two bits are already ignored because we can
61 * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
64 va = vpn << VPN_SHIFT;
66 * clear top 16 bits of 64bit va, non SLS segment
67 * Older versions of the architecture (2.02 and earler) require the
68 * masking of the top 16 bits.
70 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
71 va &= ~(0xffffULL << 48);
75 /* clear out bits after (52) [0....52.....63] */
76 va &= ~((1ul << (64 - 52)) - 1);
78 sllp = get_sllp_encoding(apsize);
80 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
81 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
85 /* We need 14 to 14 + i bits of va */
86 penc = mmu_psize_defs[psize].penc[apsize];
87 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
92 * We don't need all the bits, but rest of the bits
93 * must be ignored by the processor.
94 * vpn cover upto 65 bits of va. (0...65) and we need
97 va |= (vpn & 0xfe); /* AVAL */
99 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
100 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
107 static inline void fixup_tlbie_vpn(unsigned long vpn, int psize,
108 int apsize, int ssize)
110 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
111 /* Radix flush for a hash guest */
113 unsigned long rb,rs,prs,r,ric;
115 rb = PPC_BIT(52); /* IS = 2 */
116 rs = 0; /* lpid = 0 */
117 prs = 0; /* partition scoped */
118 r = 1; /* radix format */
119 ric = 0; /* RIC_FLSUH_TLB */
122 * Need the extra ptesync to make sure we don't
125 asm volatile("ptesync": : :"memory");
126 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
127 : : "r"(rb), "i"(r), "i"(prs),
128 "i"(ric), "r"(rs) : "memory");
131 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
132 /* Need the extra ptesync to ensure we don't reorder tlbie*/
133 asm volatile("ptesync": : :"memory");
134 ___tlbie(vpn, psize, apsize, ssize);
138 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
142 rb = ___tlbie(vpn, psize, apsize, ssize);
143 trace_tlbie(0, 0, rb, 0, 0, 0, 0);
146 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
152 /* VPN_SHIFT can be atmost 12 */
153 va = vpn << VPN_SHIFT;
155 * clear top 16 bits of 64 bit va, non SLS segment
156 * Older versions of the architecture (2.02 and earler) require the
157 * masking of the top 16 bits.
159 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
160 va &= ~(0xffffULL << 48);
164 /* clear out bits after(52) [0....52.....63] */
165 va &= ~((1ul << (64 - 52)) - 1);
167 sllp = get_sllp_encoding(apsize);
169 asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1)
170 : : "r" (va), "i" (CPU_FTR_ARCH_206)
174 /* We need 14 to 14 + i bits of va */
175 penc = mmu_psize_defs[psize].penc[apsize];
176 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
181 * We don't need all the bits, but rest of the bits
182 * must be ignored by the processor.
183 * vpn cover upto 65 bits of va. (0...65) and we need
188 asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1)
189 : : "r" (va), "i" (CPU_FTR_ARCH_206)
193 trace_tlbie(0, 1, va, 0, 0, 0, 0);
197 static inline void tlbie(unsigned long vpn, int psize, int apsize,
198 int ssize, int local)
200 unsigned int use_local;
201 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
203 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
206 use_local = mmu_psize_defs[psize].tlbiel;
207 if (lock_tlbie && !use_local)
208 raw_spin_lock(&native_tlbie_lock);
209 asm volatile("ptesync": : :"memory");
211 __tlbiel(vpn, psize, apsize, ssize);
212 asm volatile("ptesync": : :"memory");
214 __tlbie(vpn, psize, apsize, ssize);
215 fixup_tlbie_vpn(vpn, psize, apsize, ssize);
216 asm volatile("eieio; tlbsync; ptesync": : :"memory");
218 if (lock_tlbie && !use_local)
219 raw_spin_unlock(&native_tlbie_lock);
222 static inline void native_lock_hpte(struct hash_pte *hptep)
224 unsigned long *word = (unsigned long *)&hptep->v;
227 if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
230 while(test_bit(HPTE_LOCK_BIT, word))
236 static inline void native_unlock_hpte(struct hash_pte *hptep)
238 unsigned long *word = (unsigned long *)&hptep->v;
240 clear_bit_unlock(HPTE_LOCK_BIT, word);
243 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
244 unsigned long pa, unsigned long rflags,
245 unsigned long vflags, int psize, int apsize, int ssize)
247 struct hash_pte *hptep = htab_address + hpte_group;
248 unsigned long hpte_v, hpte_r;
251 if (!(vflags & HPTE_V_BOLTED)) {
252 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
253 " rflags=%lx, vflags=%lx, psize=%d)\n",
254 hpte_group, vpn, pa, rflags, vflags, psize);
257 for (i = 0; i < HPTES_PER_GROUP; i++) {
258 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
259 /* retry with lock held */
260 native_lock_hpte(hptep);
261 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
263 native_unlock_hpte(hptep);
269 if (i == HPTES_PER_GROUP)
272 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
273 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
275 if (!(vflags & HPTE_V_BOLTED)) {
276 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
280 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
281 hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
282 hpte_v = hpte_old_to_new_v(hpte_v);
285 hptep->r = cpu_to_be64(hpte_r);
286 /* Guarantee the second dword is visible before the valid bit */
289 * Now set the first dword including the valid bit
290 * NOTE: this also unlocks the hpte
292 hptep->v = cpu_to_be64(hpte_v);
294 __asm__ __volatile__ ("ptesync" : : : "memory");
296 return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
299 static long native_hpte_remove(unsigned long hpte_group)
301 struct hash_pte *hptep;
304 unsigned long hpte_v;
306 DBG_LOW(" remove(group=%lx)\n", hpte_group);
308 /* pick a random entry to start at */
309 slot_offset = mftb() & 0x7;
311 for (i = 0; i < HPTES_PER_GROUP; i++) {
312 hptep = htab_address + hpte_group + slot_offset;
313 hpte_v = be64_to_cpu(hptep->v);
315 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
316 /* retry with lock held */
317 native_lock_hpte(hptep);
318 hpte_v = be64_to_cpu(hptep->v);
319 if ((hpte_v & HPTE_V_VALID)
320 && !(hpte_v & HPTE_V_BOLTED))
322 native_unlock_hpte(hptep);
329 if (i == HPTES_PER_GROUP)
332 /* Invalidate the hpte. NOTE: this also unlocks it */
338 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
339 unsigned long vpn, int bpsize,
340 int apsize, int ssize, unsigned long flags)
342 struct hash_pte *hptep = htab_address + slot;
343 unsigned long hpte_v, want_v;
344 int ret = 0, local = 0;
346 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
348 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
349 vpn, want_v & HPTE_V_AVPN, slot, newpp);
351 hpte_v = be64_to_cpu(hptep->v);
352 if (cpu_has_feature(CPU_FTR_ARCH_300))
353 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
355 * We need to invalidate the TLB always because hpte_remove doesn't do
356 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
357 * random entry from it. When we do that we don't invalidate the TLB
358 * (hpte_remove) because we assume the old translation is still
359 * technically "valid".
361 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
362 DBG_LOW(" -> miss\n");
365 native_lock_hpte(hptep);
366 /* recheck with locks held */
367 hpte_v = be64_to_cpu(hptep->v);
368 if (cpu_has_feature(CPU_FTR_ARCH_300))
369 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
370 if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
371 !(hpte_v & HPTE_V_VALID))) {
374 DBG_LOW(" -> hit\n");
375 /* Update the HPTE */
376 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
377 ~(HPTE_R_PPP | HPTE_R_N)) |
378 (newpp & (HPTE_R_PPP | HPTE_R_N |
381 native_unlock_hpte(hptep);
384 if (flags & HPTE_LOCAL_UPDATE)
387 * Ensure it is out of the tlb too if it is not a nohpte fault
389 if (!(flags & HPTE_NOHPTE_UPDATE))
390 tlbie(vpn, bpsize, apsize, ssize, local);
395 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
397 struct hash_pte *hptep;
401 unsigned long want_v, hpte_v;
403 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
404 want_v = hpte_encode_avpn(vpn, psize, ssize);
406 /* Bolted mappings are only ever in the primary group */
407 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
408 for (i = 0; i < HPTES_PER_GROUP; i++) {
409 hptep = htab_address + slot;
410 hpte_v = be64_to_cpu(hptep->v);
411 if (cpu_has_feature(CPU_FTR_ARCH_300))
412 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
414 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
424 * Update the page protection bits. Intended to be used to create
425 * guard pages for kernel data structures on pages which are bolted
426 * in the HPT. Assumes pages being operated on will not be stolen.
428 * No need to lock here because we should be the only user.
430 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
431 int psize, int ssize)
436 struct hash_pte *hptep;
438 vsid = get_kernel_vsid(ea, ssize);
439 vpn = hpt_vpn(ea, vsid, ssize);
441 slot = native_hpte_find(vpn, psize, ssize);
443 panic("could not find page to bolt\n");
444 hptep = htab_address + slot;
446 /* Update the HPTE */
447 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
448 ~(HPTE_R_PPP | HPTE_R_N)) |
449 (newpp & (HPTE_R_PPP | HPTE_R_N)));
451 * Ensure it is out of the tlb too. Bolted entries base and
452 * actual page size will be same.
454 tlbie(vpn, psize, psize, ssize, 0);
458 * Remove a bolted kernel entry. Memory hotplug uses this.
460 * No need to lock here because we should be the only user.
462 static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
467 struct hash_pte *hptep;
469 vsid = get_kernel_vsid(ea, ssize);
470 vpn = hpt_vpn(ea, vsid, ssize);
472 slot = native_hpte_find(vpn, psize, ssize);
476 hptep = htab_address + slot;
478 VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED));
480 /* Invalidate the hpte */
483 /* Invalidate the TLB */
484 tlbie(vpn, psize, psize, ssize, 0);
489 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
490 int bpsize, int apsize, int ssize, int local)
492 struct hash_pte *hptep = htab_address + slot;
493 unsigned long hpte_v;
494 unsigned long want_v;
497 local_irq_save(flags);
499 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
501 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
502 native_lock_hpte(hptep);
503 hpte_v = be64_to_cpu(hptep->v);
504 if (cpu_has_feature(CPU_FTR_ARCH_300))
505 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
508 * We need to invalidate the TLB always because hpte_remove doesn't do
509 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
510 * random entry from it. When we do that we don't invalidate the TLB
511 * (hpte_remove) because we assume the old translation is still
512 * technically "valid".
514 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
515 native_unlock_hpte(hptep);
517 /* Invalidate the hpte. NOTE: this also unlocks it */
520 /* Invalidate the TLB */
521 tlbie(vpn, bpsize, apsize, ssize, local);
523 local_irq_restore(flags);
526 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
527 static void native_hugepage_invalidate(unsigned long vsid,
529 unsigned char *hpte_slot_array,
530 int psize, int ssize, int local)
533 struct hash_pte *hptep;
534 int actual_psize = MMU_PAGE_16M;
535 unsigned int max_hpte_count, valid;
536 unsigned long flags, s_addr = addr;
537 unsigned long hpte_v, want_v, shift;
538 unsigned long hidx, vpn = 0, hash, slot;
540 shift = mmu_psize_defs[psize].shift;
541 max_hpte_count = 1U << (PMD_SHIFT - shift);
543 local_irq_save(flags);
544 for (i = 0; i < max_hpte_count; i++) {
545 valid = hpte_valid(hpte_slot_array, i);
548 hidx = hpte_hash_index(hpte_slot_array, i);
551 addr = s_addr + (i * (1ul << shift));
552 vpn = hpt_vpn(addr, vsid, ssize);
553 hash = hpt_hash(vpn, shift, ssize);
554 if (hidx & _PTEIDX_SECONDARY)
557 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
558 slot += hidx & _PTEIDX_GROUP_IX;
560 hptep = htab_address + slot;
561 want_v = hpte_encode_avpn(vpn, psize, ssize);
562 native_lock_hpte(hptep);
563 hpte_v = be64_to_cpu(hptep->v);
564 if (cpu_has_feature(CPU_FTR_ARCH_300))
565 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
567 /* Even if we miss, we need to invalidate the TLB */
568 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
569 native_unlock_hpte(hptep);
571 /* Invalidate the hpte. NOTE: this also unlocks it */
574 * We need to do tlb invalidate for all the address, tlbie
575 * instruction compares entry_VA in tlb with the VA specified
578 tlbie(vpn, psize, actual_psize, ssize, local);
580 local_irq_restore(flags);
583 static void native_hugepage_invalidate(unsigned long vsid,
585 unsigned char *hpte_slot_array,
586 int psize, int ssize, int local)
588 WARN(1, "%s called without THP support\n", __func__);
592 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
593 int *psize, int *apsize, int *ssize, unsigned long *vpn)
595 unsigned long avpn, pteg, vpi;
596 unsigned long hpte_v = be64_to_cpu(hpte->v);
597 unsigned long hpte_r = be64_to_cpu(hpte->r);
598 unsigned long vsid, seg_off;
599 int size, a_size, shift;
600 /* Look at the 8 bit LP value */
601 unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
603 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
604 hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
605 hpte_r = hpte_new_to_old_r(hpte_r);
607 if (!(hpte_v & HPTE_V_LARGE)) {
609 a_size = MMU_PAGE_4K;
611 size = hpte_page_sizes[lp] & 0xf;
612 a_size = hpte_page_sizes[lp] >> 4;
614 /* This works for all page sizes, and for 256M and 1T segments */
615 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
616 shift = mmu_psize_defs[size].shift;
618 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
619 pteg = slot / HPTES_PER_GROUP;
620 if (hpte_v & HPTE_V_SECONDARY)
624 case MMU_SEGSIZE_256M:
625 /* We only have 28 - 23 bits of seg_off in avpn */
626 seg_off = (avpn & 0x1f) << 23;
628 /* We can find more bits from the pteg value */
630 vpi = (vsid ^ pteg) & htab_hash_mask;
631 seg_off |= vpi << shift;
633 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
636 /* We only have 40 - 23 bits of seg_off in avpn */
637 seg_off = (avpn & 0x1ffff) << 23;
640 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
641 seg_off |= vpi << shift;
643 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
653 * clear all mappings on kexec. All cpus are in real mode (or they will
654 * be when they isi), and we are the only one left. We rely on our kernel
655 * mapping being 0xC0's and the hardware ignoring those two real bits.
657 * This must be called with interrupts disabled.
659 * Taking the native_tlbie_lock is unsafe here due to the possibility of
660 * lockdep being on. On pre POWER5 hardware, not taking the lock could
661 * cause deadlock. POWER5 and newer not taking the lock is fine. This only
662 * gets called during boot before secondary CPUs have come up and during
663 * crashdump and all bets are off anyway.
665 * TODO: add batching support when enabled. remember, no dynamic memory here,
666 * although there is the control page available...
668 static void native_hpte_clear(void)
670 unsigned long vpn = 0;
671 unsigned long slot, slots;
672 struct hash_pte *hptep = htab_address;
673 unsigned long hpte_v;
674 unsigned long pteg_count;
675 int psize, apsize, ssize;
677 pteg_count = htab_hash_mask + 1;
679 slots = pteg_count * HPTES_PER_GROUP;
681 for (slot = 0; slot < slots; slot++, hptep++) {
683 * we could lock the pte here, but we are the only cpu
684 * running, right? and for crash dump, we probably
685 * don't want to wait for a maybe bad cpu.
687 hpte_v = be64_to_cpu(hptep->v);
690 * Call __tlbie() here rather than tlbie() since we can't take the
693 if (hpte_v & HPTE_V_VALID) {
694 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
696 ___tlbie(vpn, psize, apsize, ssize);
700 asm volatile("eieio; tlbsync; ptesync":::"memory");
704 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
705 * the lock all the time
707 static void native_flush_hash_range(unsigned long number, int local)
709 unsigned long vpn = 0;
710 unsigned long hash, index, hidx, shift, slot;
711 struct hash_pte *hptep;
712 unsigned long hpte_v;
713 unsigned long want_v;
716 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
717 unsigned long psize = batch->psize;
718 int ssize = batch->ssize;
720 unsigned int use_local;
722 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
723 mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
725 local_irq_save(flags);
727 for (i = 0; i < number; i++) {
731 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
732 hash = hpt_hash(vpn, shift, ssize);
733 hidx = __rpte_to_hidx(pte, index);
734 if (hidx & _PTEIDX_SECONDARY)
736 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
737 slot += hidx & _PTEIDX_GROUP_IX;
738 hptep = htab_address + slot;
739 want_v = hpte_encode_avpn(vpn, psize, ssize);
740 native_lock_hpte(hptep);
741 hpte_v = be64_to_cpu(hptep->v);
742 if (cpu_has_feature(CPU_FTR_ARCH_300))
743 hpte_v = hpte_new_to_old_v(hpte_v,
744 be64_to_cpu(hptep->r));
745 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
746 !(hpte_v & HPTE_V_VALID))
747 native_unlock_hpte(hptep);
750 } pte_iterate_hashed_end();
754 asm volatile("ptesync":::"memory");
755 for (i = 0; i < number; i++) {
759 pte_iterate_hashed_subpages(pte, psize,
761 __tlbiel(vpn, psize, psize, ssize);
762 } pte_iterate_hashed_end();
764 asm volatile("ptesync":::"memory");
766 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
769 raw_spin_lock(&native_tlbie_lock);
771 asm volatile("ptesync":::"memory");
772 for (i = 0; i < number; i++) {
776 pte_iterate_hashed_subpages(pte, psize,
778 __tlbie(vpn, psize, psize, ssize);
779 } pte_iterate_hashed_end();
782 * Just do one more with the last used values.
784 fixup_tlbie_vpn(vpn, psize, psize, ssize);
785 asm volatile("eieio; tlbsync; ptesync":::"memory");
788 raw_spin_unlock(&native_tlbie_lock);
791 local_irq_restore(flags);
794 static int native_register_proc_table(unsigned long base, unsigned long page_size,
795 unsigned long table_size)
797 unsigned long patb1 = base << 25; /* VSID */
799 patb1 |= (page_size << 5); /* sllp */
802 partition_tb->patb1 = cpu_to_be64(patb1);
806 void __init hpte_init_native(void)
808 mmu_hash_ops.hpte_invalidate = native_hpte_invalidate;
809 mmu_hash_ops.hpte_updatepp = native_hpte_updatepp;
810 mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
811 mmu_hash_ops.hpte_removebolted = native_hpte_removebolted;
812 mmu_hash_ops.hpte_insert = native_hpte_insert;
813 mmu_hash_ops.hpte_remove = native_hpte_remove;
814 mmu_hash_ops.hpte_clear_all = native_hpte_clear;
815 mmu_hash_ops.flush_hash_range = native_flush_hash_range;
816 mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;
818 if (cpu_has_feature(CPU_FTR_ARCH_300))
819 register_process_table = native_register_proc_table;