1 #define pr_fmt(fmt) "Hyper-V: " fmt
3 #include <linux/hyperv.h>
4 #include <linux/log2.h>
5 #include <linux/slab.h>
6 #include <linux/types.h>
8 #include <asm/fpu/api.h>
9 #include <asm/mshyperv.h>
11 #include <asm/tlbflush.h>
13 #define CREATE_TRACE_POINTS
14 #include <asm/trace/hyperv.h>
16 /* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
17 struct hv_flush_pcpu {
24 /* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
25 struct hv_flush_pcpu_ex {
36 /* Each gva in gva_list encodes up to 4096 pages to flush */
37 #define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
39 static struct hv_flush_pcpu __percpu **pcpu_flush;
41 static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex;
44 * Fills in gva_list starting from offset. Returns the number of items added.
46 static inline int fill_gva_list(u64 gva_list[], int offset,
47 unsigned long start, unsigned long end)
50 unsigned long cur = start, diff;
53 diff = end > cur ? end - cur : 0;
55 gva_list[gva_n] = cur & PAGE_MASK;
57 * Lower 12 bits encode the number of additional
58 * pages to flush (in addition to the 'cur' page).
60 if (diff >= HV_TLB_FLUSH_UNIT) {
61 gva_list[gva_n] |= ~PAGE_MASK;
62 cur += HV_TLB_FLUSH_UNIT;
64 gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
72 return gva_n - offset;
75 /* Return the number of banks in the resulting vp_set */
76 static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
77 const struct cpumask *cpus)
79 int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
81 /* valid_bank_mask can represent up to 64 banks */
82 if (hv_max_vp_index / 64 >= 64)
86 * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex
87 * structs are not cleared between calls, we risk flushing unneeded
90 for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
91 flush->hv_vp_set.bank_contents[vcpu_bank] = 0;
94 * Some banks may end up being empty but this is acceptable.
96 for_each_cpu(cpu, cpus) {
97 vcpu = hv_cpu_number_to_vp_number(cpu);
98 vcpu_bank = vcpu / 64;
99 vcpu_offset = vcpu % 64;
100 __set_bit(vcpu_offset, (unsigned long *)
101 &flush->hv_vp_set.bank_contents[vcpu_bank]);
102 if (vcpu_bank >= nr_bank)
103 nr_bank = vcpu_bank + 1;
105 flush->hv_vp_set.valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
110 static void hyperv_flush_tlb_others(const struct cpumask *cpus,
111 const struct flush_tlb_info *info)
113 int cpu, vcpu, gva_n, max_gvas;
114 struct hv_flush_pcpu **flush_pcpu;
115 struct hv_flush_pcpu *flush;
116 u64 status = U64_MAX;
119 trace_hyperv_mmu_flush_tlb_others(cpus, info);
121 if (!pcpu_flush || !hv_hypercall_pg)
124 if (cpumask_empty(cpus))
127 local_irq_save(flags);
129 flush_pcpu = this_cpu_ptr(pcpu_flush);
131 if (unlikely(!*flush_pcpu))
132 *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
136 if (unlikely(!flush)) {
137 local_irq_restore(flags);
143 * AddressSpace argument must match the CR3 with PCID bits
146 flush->address_space = virt_to_phys(info->mm->pgd);
147 flush->address_space &= CR3_ADDR_MASK;
150 flush->address_space = 0;
151 flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
154 flush->processor_mask = 0;
155 if (cpumask_equal(cpus, cpu_present_mask)) {
156 flush->flags |= HV_FLUSH_ALL_PROCESSORS;
158 for_each_cpu(cpu, cpus) {
159 vcpu = hv_cpu_number_to_vp_number(cpu);
163 __set_bit(vcpu, (unsigned long *)
164 &flush->processor_mask);
169 * We can flush not more than max_gvas with one hypercall. Flush the
170 * whole address space if we were asked to do more.
172 max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]);
174 if (info->end == TLB_FLUSH_ALL) {
175 flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
176 status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
178 } else if (info->end &&
179 ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
180 status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
183 gva_n = fill_gva_list(flush->gva_list, 0,
184 info->start, info->end);
185 status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST,
186 gva_n, 0, flush, NULL);
189 local_irq_restore(flags);
191 if (!(status & HV_HYPERCALL_RESULT_MASK))
194 native_flush_tlb_others(cpus, info);
197 static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
198 const struct flush_tlb_info *info)
200 int nr_bank = 0, max_gvas, gva_n;
201 struct hv_flush_pcpu_ex **flush_pcpu;
202 struct hv_flush_pcpu_ex *flush;
203 u64 status = U64_MAX;
206 trace_hyperv_mmu_flush_tlb_others(cpus, info);
208 if (!pcpu_flush_ex || !hv_hypercall_pg)
211 if (cpumask_empty(cpus))
214 local_irq_save(flags);
216 flush_pcpu = this_cpu_ptr(pcpu_flush_ex);
218 if (unlikely(!*flush_pcpu))
219 *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
223 if (unlikely(!flush)) {
224 local_irq_restore(flags);
230 * AddressSpace argument must match the CR3 with PCID bits
233 flush->address_space = virt_to_phys(info->mm->pgd);
234 flush->address_space &= CR3_ADDR_MASK;
237 flush->address_space = 0;
238 flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
241 flush->hv_vp_set.valid_bank_mask = 0;
243 if (!cpumask_equal(cpus, cpu_present_mask)) {
244 flush->hv_vp_set.format = HV_GENERIC_SET_SPARCE_4K;
245 nr_bank = cpumask_to_vp_set(flush, cpus);
249 flush->hv_vp_set.format = HV_GENERIC_SET_ALL;
250 flush->flags |= HV_FLUSH_ALL_PROCESSORS;
254 * We can flush not more than max_gvas with one hypercall. Flush the
255 * whole address space if we were asked to do more.
258 (PAGE_SIZE - sizeof(*flush) - nr_bank *
259 sizeof(flush->hv_vp_set.bank_contents[0])) /
260 sizeof(flush->gva_list[0]);
262 if (info->end == TLB_FLUSH_ALL) {
263 flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
264 status = hv_do_rep_hypercall(
265 HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
266 0, nr_bank, flush, NULL);
267 } else if (info->end &&
268 ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
269 status = hv_do_rep_hypercall(
270 HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
271 0, nr_bank, flush, NULL);
273 gva_n = fill_gva_list(flush->gva_list, nr_bank,
274 info->start, info->end);
275 status = hv_do_rep_hypercall(
276 HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
277 gva_n, nr_bank, flush, NULL);
280 local_irq_restore(flags);
282 if (!(status & HV_HYPERCALL_RESULT_MASK))
285 native_flush_tlb_others(cpus, info);
288 void hyperv_setup_mmu_ops(void)
290 if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
293 if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) {
294 pr_info("Using hypercall for remote TLB flush\n");
295 pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
297 pr_info("Using ext hypercall for remote TLB flush\n");
298 pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex;
302 void hyper_alloc_mmu(void)
304 if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
307 if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
308 pcpu_flush = alloc_percpu(struct hv_flush_pcpu *);
310 pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *);