4 * Copyright (C) 2006 Qumranet, Inc.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6 * Copyright(C) 2015 Intel Corporation.
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 * Marcelo Tosatti <mtosatti@redhat.com>
12 * Paolo Bonzini <pbonzini@redhat.com>
13 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
19 #include <linux/kvm_host.h>
20 #include <linux/nospec.h>
26 #define IA32_MTRR_DEF_TYPE_E (1ULL << 11)
27 #define IA32_MTRR_DEF_TYPE_FE (1ULL << 10)
28 #define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff)
30 static bool msr_mtrr_valid(unsigned msr)
33 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
34 case MSR_MTRRfix64K_00000:
35 case MSR_MTRRfix16K_80000:
36 case MSR_MTRRfix16K_A0000:
37 case MSR_MTRRfix4K_C0000:
38 case MSR_MTRRfix4K_C8000:
39 case MSR_MTRRfix4K_D0000:
40 case MSR_MTRRfix4K_D8000:
41 case MSR_MTRRfix4K_E0000:
42 case MSR_MTRRfix4K_E8000:
43 case MSR_MTRRfix4K_F0000:
44 case MSR_MTRRfix4K_F8000:
52 static bool valid_pat_type(unsigned t)
54 return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
57 static bool valid_mtrr_type(unsigned t)
59 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
62 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
67 if (!msr_mtrr_valid(msr))
70 if (msr == MSR_IA32_CR_PAT) {
71 for (i = 0; i < 8; i++)
72 if (!valid_pat_type((data >> (i * 8)) & 0xff))
75 } else if (msr == MSR_MTRRdefType) {
78 return valid_mtrr_type(data & 0xff);
79 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
80 for (i = 0; i < 8 ; i++)
81 if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
87 WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
89 mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
92 if (!valid_mtrr_type(data & 0xff))
99 kvm_inject_gp(vcpu, 0);
105 EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
107 static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
109 return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E);
112 static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
114 return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE);
117 static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
119 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
122 static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
125 * Intel SDM 11.11.2.2: all MTRRs are disabled when
126 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
127 * memory type is applied to all of physical memory.
129 * However, virtual machines can be run with CPUID such that
130 * there are no MTRRs. In that case, the firmware will never
131 * enable MTRRs and it is obviously undesirable to run the
132 * guest entirely with UC memory and we use WB.
134 if (guest_cpuid_has_mtrr(vcpu))
135 return MTRR_TYPE_UNCACHABLE;
137 return MTRR_TYPE_WRBACK;
141 * Three terms are used in the following code:
142 * - segment, it indicates the address segments covered by fixed MTRRs.
143 * - unit, it corresponds to the MSR entry in the segment.
144 * - range, a range is covered in one memory cache type.
146 struct fixed_mtrr_segment {
152 /* the start position in kvm_mtrr.fixed_ranges[]. */
156 static struct fixed_mtrr_segment fixed_seg_table[] = {
157 /* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */
161 .range_shift = 16, /* 64K */
166 * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units,
172 .range_shift = 14, /* 16K */
177 * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units,
183 .range_shift = 12, /* 12K */
189 * The size of unit is covered in one MSR, one MSR entry contains
190 * 8 ranges so that unit size is always 8 * 2^range_shift.
192 static u64 fixed_mtrr_seg_unit_size(int seg)
194 return 8 << fixed_seg_table[seg].range_shift;
197 static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
200 case MSR_MTRRfix64K_00000:
204 case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
206 *unit = array_index_nospec(
207 msr - MSR_MTRRfix16K_80000,
208 MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1);
210 case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
212 *unit = array_index_nospec(
213 msr - MSR_MTRRfix4K_C0000,
214 MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1);
223 static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end)
225 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
226 u64 unit_size = fixed_mtrr_seg_unit_size(seg);
228 *start = mtrr_seg->start + unit * unit_size;
229 *end = *start + unit_size;
230 WARN_ON(*end > mtrr_seg->end);
233 static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
235 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
237 WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg)
240 /* each unit has 8 ranges. */
241 return mtrr_seg->range_start + 8 * unit;
244 static int fixed_mtrr_seg_end_range_index(int seg)
246 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
249 n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift;
250 return mtrr_seg->range_start + n - 1;
253 static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
257 if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
260 fixed_mtrr_seg_unit_range(seg, unit, start, end);
264 static int fixed_msr_to_range_index(u32 msr)
268 if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
271 return fixed_mtrr_seg_unit_range_index(seg, unit);
274 static int fixed_mtrr_addr_to_seg(u64 addr)
276 struct fixed_mtrr_segment *mtrr_seg;
277 int seg, seg_num = ARRAY_SIZE(fixed_seg_table);
279 for (seg = 0; seg < seg_num; seg++) {
280 mtrr_seg = &fixed_seg_table[seg];
281 if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
288 static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg)
290 struct fixed_mtrr_segment *mtrr_seg;
293 mtrr_seg = &fixed_seg_table[seg];
294 index = mtrr_seg->range_start;
295 index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift;
299 static u64 fixed_mtrr_range_end_addr(int seg, int index)
301 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
302 int pos = index - mtrr_seg->range_start;
304 return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift);
307 static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
311 *start = range->base & PAGE_MASK;
313 mask = range->mask & PAGE_MASK;
315 /* This cannot overflow because writing to the reserved bits of
316 * variable MTRRs causes a #GP.
318 *end = (*start | ~mask) + 1;
321 static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
323 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
327 if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
328 !kvm_arch_has_noncoherent_dma(vcpu->kvm))
331 if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
335 if (fixed_msr_to_range(msr, &start, &end)) {
336 if (!fixed_mtrr_is_enabled(mtrr_state))
338 } else if (msr == MSR_MTRRdefType) {
342 /* variable range MTRRs. */
343 index = (msr - 0x200) / 2;
344 var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end);
347 kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
350 static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
352 return (range->mask & (1 << 11)) != 0;
355 static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
357 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
358 struct kvm_mtrr_range *tmp, *cur;
359 int index, is_mtrr_mask;
361 index = (msr - 0x200) / 2;
362 is_mtrr_mask = msr - 0x200 - 2 * index;
363 cur = &mtrr_state->var_ranges[index];
365 /* remove the entry if it's in the list. */
366 if (var_mtrr_range_is_valid(cur))
367 list_del(&mtrr_state->var_ranges[index].node);
369 /* Extend the mask with all 1 bits to the left, since those
370 * bits must implicitly be 0. The bits are then cleared
376 cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
378 /* add it to the list if it's enabled. */
379 if (var_mtrr_range_is_valid(cur)) {
380 list_for_each_entry(tmp, &mtrr_state->head, node)
381 if (cur->base >= tmp->base)
383 list_add_tail(&cur->node, &tmp->node);
387 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
391 if (!kvm_mtrr_valid(vcpu, msr, data))
394 index = fixed_msr_to_range_index(msr);
396 *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
397 else if (msr == MSR_MTRRdefType)
398 vcpu->arch.mtrr_state.deftype = data;
399 else if (msr == MSR_IA32_CR_PAT)
400 vcpu->arch.pat = data;
402 set_var_mtrr_msr(vcpu, msr, data);
404 update_mtrr(vcpu, msr);
408 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
412 /* MSR_MTRRcap is a readonly MSR. */
413 if (msr == MSR_MTRRcap) {
418 * VCNT = KVM_NR_VAR_MTRR
420 *pdata = 0x500 | KVM_NR_VAR_MTRR;
424 if (!msr_mtrr_valid(msr))
427 index = fixed_msr_to_range_index(msr);
429 *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
430 else if (msr == MSR_MTRRdefType)
431 *pdata = vcpu->arch.mtrr_state.deftype;
432 else if (msr == MSR_IA32_CR_PAT)
433 *pdata = vcpu->arch.pat;
434 else { /* Variable MTRRs */
437 index = (msr - 0x200) / 2;
438 is_mtrr_mask = msr - 0x200 - 2 * index;
440 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
442 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
444 *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
450 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
452 INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
457 struct kvm_mtrr *mtrr_state;
463 /* mtrr is completely disabled? */
465 /* [start, end) is not fully covered in MTRRs? */
468 /* private fields. */
470 /* used for fixed MTRRs. */
476 /* used for var MTRRs. */
478 struct kvm_mtrr_range *range;
479 /* max address has been covered in var MTRRs. */
487 static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
491 if (!fixed_mtrr_is_enabled(iter->mtrr_state))
494 seg = fixed_mtrr_addr_to_seg(iter->start);
499 index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
505 static bool match_var_range(struct mtrr_iter *iter,
506 struct kvm_mtrr_range *range)
510 var_mtrr_range(range, &start, &end);
511 if (!(start >= iter->end || end <= iter->start)) {
515 * the function is called when we do kvm_mtrr.head walking.
516 * Range has the minimum base address which interleaves
517 * [looker->start_max, looker->end).
519 iter->partial_map |= iter->start_max < start;
521 /* update the max address has been covered. */
522 iter->start_max = max(iter->start_max, end);
529 static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
531 struct kvm_mtrr *mtrr_state = iter->mtrr_state;
533 list_for_each_entry_continue(iter->range, &mtrr_state->head, node)
534 if (match_var_range(iter, iter->range))
538 iter->partial_map |= iter->start_max < iter->end;
541 static void mtrr_lookup_var_start(struct mtrr_iter *iter)
543 struct kvm_mtrr *mtrr_state = iter->mtrr_state;
546 iter->start_max = iter->start;
548 iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
550 __mtrr_lookup_var_next(iter);
553 static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
555 /* terminate the lookup. */
556 if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) {
564 /* have looked up for all fixed MTRRs. */
565 if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))
566 return mtrr_lookup_var_start(iter);
568 /* switch to next segment. */
569 if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg))
573 static void mtrr_lookup_var_next(struct mtrr_iter *iter)
575 __mtrr_lookup_var_next(iter);
578 static void mtrr_lookup_start(struct mtrr_iter *iter)
580 if (!mtrr_is_enabled(iter->mtrr_state)) {
581 iter->mtrr_disabled = true;
585 if (!mtrr_lookup_fixed_start(iter))
586 mtrr_lookup_var_start(iter);
589 static void mtrr_lookup_init(struct mtrr_iter *iter,
590 struct kvm_mtrr *mtrr_state, u64 start, u64 end)
592 iter->mtrr_state = mtrr_state;
595 iter->mtrr_disabled = false;
596 iter->partial_map = false;
600 mtrr_lookup_start(iter);
603 static bool mtrr_lookup_okay(struct mtrr_iter *iter)
606 iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index];
611 iter->mem_type = iter->range->base & 0xff;
618 static void mtrr_lookup_next(struct mtrr_iter *iter)
621 mtrr_lookup_fixed_next(iter);
623 mtrr_lookup_var_next(iter);
626 #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
627 for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
628 mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
630 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
632 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
633 struct mtrr_iter iter;
636 const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
637 | (1 << MTRR_TYPE_WRTHROUGH);
639 start = gfn_to_gpa(gfn);
640 end = start + PAGE_SIZE;
642 mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
643 int curr_type = iter.mem_type;
646 * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
656 * If two or more variable memory ranges match and the
657 * memory types are identical, then that memory type is
660 if (type == curr_type)
664 * If two or more variable memory ranges match and one of
665 * the memory types is UC, the UC memory type used.
667 if (curr_type == MTRR_TYPE_UNCACHABLE)
668 return MTRR_TYPE_UNCACHABLE;
671 * If two or more variable memory ranges match and the
672 * memory types are WT and WB, the WT memory type is used.
674 if (((1 << type) & wt_wb_mask) &&
675 ((1 << curr_type) & wt_wb_mask)) {
676 type = MTRR_TYPE_WRTHROUGH;
681 * For overlaps not defined by the above rules, processor
682 * behavior is undefined.
685 /* We use WB for this undefined behavior. :( */
686 return MTRR_TYPE_WRBACK;
689 if (iter.mtrr_disabled)
690 return mtrr_disabled_type(vcpu);
692 /* not contained in any MTRRs. */
694 return mtrr_default_type(mtrr_state);
697 * We just check one page, partially covered by MTRRs is
700 WARN_ON(iter.partial_map);
704 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
706 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
709 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
710 struct mtrr_iter iter;
714 start = gfn_to_gpa(gfn);
715 end = gfn_to_gpa(gfn + page_num);
716 mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
718 type = iter.mem_type;
722 if (type != iter.mem_type)
726 if (iter.mtrr_disabled)
729 if (!iter.partial_map)
735 return type == mtrr_default_type(mtrr_state);