4 * Copyright (C) 2006 Qumranet, Inc.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6 * Copyright(C) 2015 Intel Corporation.
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 * Marcelo Tosatti <mtosatti@redhat.com>
12 * Paolo Bonzini <pbonzini@redhat.com>
13 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
19 #include <linux/kvm_host.h>
25 #define IA32_MTRR_DEF_TYPE_E (1ULL << 11)
26 #define IA32_MTRR_DEF_TYPE_FE (1ULL << 10)
27 #define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff)
29 static bool msr_mtrr_valid(unsigned msr)
32 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
33 case MSR_MTRRfix64K_00000:
34 case MSR_MTRRfix16K_80000:
35 case MSR_MTRRfix16K_A0000:
36 case MSR_MTRRfix4K_C0000:
37 case MSR_MTRRfix4K_C8000:
38 case MSR_MTRRfix4K_D0000:
39 case MSR_MTRRfix4K_D8000:
40 case MSR_MTRRfix4K_E0000:
41 case MSR_MTRRfix4K_E8000:
42 case MSR_MTRRfix4K_F0000:
43 case MSR_MTRRfix4K_F8000:
51 static bool valid_mtrr_type(unsigned t)
53 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
56 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
61 if (!msr_mtrr_valid(msr))
64 if (msr == MSR_IA32_CR_PAT) {
65 return kvm_pat_valid(data);
66 } else if (msr == MSR_MTRRdefType) {
69 return valid_mtrr_type(data & 0xff);
70 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
71 for (i = 0; i < 8 ; i++)
72 if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
78 WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
80 mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
83 if (!valid_mtrr_type(data & 0xff))
90 kvm_inject_gp(vcpu, 0);
96 EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
98 static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
100 return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E);
103 static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
105 return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE);
108 static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
110 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
113 static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
116 * Intel SDM 11.11.2.2: all MTRRs are disabled when
117 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
118 * memory type is applied to all of physical memory.
120 * However, virtual machines can be run with CPUID such that
121 * there are no MTRRs. In that case, the firmware will never
122 * enable MTRRs and it is obviously undesirable to run the
123 * guest entirely with UC memory and we use WB.
125 if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR))
126 return MTRR_TYPE_UNCACHABLE;
128 return MTRR_TYPE_WRBACK;
132 * Three terms are used in the following code:
133 * - segment, it indicates the address segments covered by fixed MTRRs.
134 * - unit, it corresponds to the MSR entry in the segment.
135 * - range, a range is covered in one memory cache type.
137 struct fixed_mtrr_segment {
143 /* the start position in kvm_mtrr.fixed_ranges[]. */
147 static struct fixed_mtrr_segment fixed_seg_table[] = {
148 /* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */
152 .range_shift = 16, /* 64K */
157 * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units,
163 .range_shift = 14, /* 16K */
168 * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units,
174 .range_shift = 12, /* 12K */
180 * The size of unit is covered in one MSR, one MSR entry contains
181 * 8 ranges so that unit size is always 8 * 2^range_shift.
183 static u64 fixed_mtrr_seg_unit_size(int seg)
185 return 8 << fixed_seg_table[seg].range_shift;
188 static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
191 case MSR_MTRRfix64K_00000:
195 case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
197 *unit = msr - MSR_MTRRfix16K_80000;
199 case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
201 *unit = msr - MSR_MTRRfix4K_C0000;
210 static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end)
212 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
213 u64 unit_size = fixed_mtrr_seg_unit_size(seg);
215 *start = mtrr_seg->start + unit * unit_size;
216 *end = *start + unit_size;
217 WARN_ON(*end > mtrr_seg->end);
220 static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
222 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
224 WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg)
227 /* each unit has 8 ranges. */
228 return mtrr_seg->range_start + 8 * unit;
231 static int fixed_mtrr_seg_end_range_index(int seg)
233 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
236 n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift;
237 return mtrr_seg->range_start + n - 1;
240 static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
244 if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
247 fixed_mtrr_seg_unit_range(seg, unit, start, end);
251 static int fixed_msr_to_range_index(u32 msr)
255 if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
258 return fixed_mtrr_seg_unit_range_index(seg, unit);
261 static int fixed_mtrr_addr_to_seg(u64 addr)
263 struct fixed_mtrr_segment *mtrr_seg;
264 int seg, seg_num = ARRAY_SIZE(fixed_seg_table);
266 for (seg = 0; seg < seg_num; seg++) {
267 mtrr_seg = &fixed_seg_table[seg];
268 if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
275 static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg)
277 struct fixed_mtrr_segment *mtrr_seg;
280 mtrr_seg = &fixed_seg_table[seg];
281 index = mtrr_seg->range_start;
282 index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift;
286 static u64 fixed_mtrr_range_end_addr(int seg, int index)
288 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
289 int pos = index - mtrr_seg->range_start;
291 return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift);
294 static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
298 *start = range->base & PAGE_MASK;
300 mask = range->mask & PAGE_MASK;
302 /* This cannot overflow because writing to the reserved bits of
303 * variable MTRRs causes a #GP.
305 *end = (*start | ~mask) + 1;
308 static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
310 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
314 if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
315 !kvm_arch_has_noncoherent_dma(vcpu->kvm))
318 if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
322 if (fixed_msr_to_range(msr, &start, &end)) {
323 if (!fixed_mtrr_is_enabled(mtrr_state))
325 } else if (msr == MSR_MTRRdefType) {
329 /* variable range MTRRs. */
330 index = (msr - 0x200) / 2;
331 var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end);
334 kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
337 static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
339 return (range->mask & (1 << 11)) != 0;
342 static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
344 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
345 struct kvm_mtrr_range *tmp, *cur;
346 int index, is_mtrr_mask;
348 index = (msr - 0x200) / 2;
349 is_mtrr_mask = msr - 0x200 - 2 * index;
350 cur = &mtrr_state->var_ranges[index];
352 /* remove the entry if it's in the list. */
353 if (var_mtrr_range_is_valid(cur))
354 list_del(&mtrr_state->var_ranges[index].node);
356 /* Extend the mask with all 1 bits to the left, since those
357 * bits must implicitly be 0. The bits are then cleared
363 cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
365 /* add it to the list if it's enabled. */
366 if (var_mtrr_range_is_valid(cur)) {
367 list_for_each_entry(tmp, &mtrr_state->head, node)
368 if (cur->base >= tmp->base)
370 list_add_tail(&cur->node, &tmp->node);
374 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
378 if (!kvm_mtrr_valid(vcpu, msr, data))
381 index = fixed_msr_to_range_index(msr);
383 *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
384 else if (msr == MSR_MTRRdefType)
385 vcpu->arch.mtrr_state.deftype = data;
386 else if (msr == MSR_IA32_CR_PAT)
387 vcpu->arch.pat = data;
389 set_var_mtrr_msr(vcpu, msr, data);
391 update_mtrr(vcpu, msr);
395 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
399 /* MSR_MTRRcap is a readonly MSR. */
400 if (msr == MSR_MTRRcap) {
405 * VCNT = KVM_NR_VAR_MTRR
407 *pdata = 0x500 | KVM_NR_VAR_MTRR;
411 if (!msr_mtrr_valid(msr))
414 index = fixed_msr_to_range_index(msr);
416 *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
417 else if (msr == MSR_MTRRdefType)
418 *pdata = vcpu->arch.mtrr_state.deftype;
419 else if (msr == MSR_IA32_CR_PAT)
420 *pdata = vcpu->arch.pat;
421 else { /* Variable MTRRs */
424 index = (msr - 0x200) / 2;
425 is_mtrr_mask = msr - 0x200 - 2 * index;
427 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
429 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
431 *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
437 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
439 INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
444 struct kvm_mtrr *mtrr_state;
450 /* mtrr is completely disabled? */
452 /* [start, end) is not fully covered in MTRRs? */
455 /* private fields. */
457 /* used for fixed MTRRs. */
463 /* used for var MTRRs. */
465 struct kvm_mtrr_range *range;
466 /* max address has been covered in var MTRRs. */
474 static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
478 if (!fixed_mtrr_is_enabled(iter->mtrr_state))
481 seg = fixed_mtrr_addr_to_seg(iter->start);
486 index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
492 static bool match_var_range(struct mtrr_iter *iter,
493 struct kvm_mtrr_range *range)
497 var_mtrr_range(range, &start, &end);
498 if (!(start >= iter->end || end <= iter->start)) {
502 * the function is called when we do kvm_mtrr.head walking.
503 * Range has the minimum base address which interleaves
504 * [looker->start_max, looker->end).
506 iter->partial_map |= iter->start_max < start;
508 /* update the max address has been covered. */
509 iter->start_max = max(iter->start_max, end);
516 static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
518 struct kvm_mtrr *mtrr_state = iter->mtrr_state;
520 list_for_each_entry_continue(iter->range, &mtrr_state->head, node)
521 if (match_var_range(iter, iter->range))
525 iter->partial_map |= iter->start_max < iter->end;
528 static void mtrr_lookup_var_start(struct mtrr_iter *iter)
530 struct kvm_mtrr *mtrr_state = iter->mtrr_state;
533 iter->start_max = iter->start;
535 iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
537 __mtrr_lookup_var_next(iter);
540 static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
542 /* terminate the lookup. */
543 if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) {
551 /* have looked up for all fixed MTRRs. */
552 if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))
553 return mtrr_lookup_var_start(iter);
555 /* switch to next segment. */
556 if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg))
560 static void mtrr_lookup_var_next(struct mtrr_iter *iter)
562 __mtrr_lookup_var_next(iter);
565 static void mtrr_lookup_start(struct mtrr_iter *iter)
567 if (!mtrr_is_enabled(iter->mtrr_state)) {
568 iter->mtrr_disabled = true;
572 if (!mtrr_lookup_fixed_start(iter))
573 mtrr_lookup_var_start(iter);
576 static void mtrr_lookup_init(struct mtrr_iter *iter,
577 struct kvm_mtrr *mtrr_state, u64 start, u64 end)
579 iter->mtrr_state = mtrr_state;
582 iter->mtrr_disabled = false;
583 iter->partial_map = false;
587 mtrr_lookup_start(iter);
590 static bool mtrr_lookup_okay(struct mtrr_iter *iter)
593 iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index];
598 iter->mem_type = iter->range->base & 0xff;
605 static void mtrr_lookup_next(struct mtrr_iter *iter)
608 mtrr_lookup_fixed_next(iter);
610 mtrr_lookup_var_next(iter);
613 #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
614 for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
615 mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
617 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
619 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
620 struct mtrr_iter iter;
623 const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
624 | (1 << MTRR_TYPE_WRTHROUGH);
626 start = gfn_to_gpa(gfn);
627 end = start + PAGE_SIZE;
629 mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
630 int curr_type = iter.mem_type;
633 * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
643 * If two or more variable memory ranges match and the
644 * memory types are identical, then that memory type is
647 if (type == curr_type)
651 * If two or more variable memory ranges match and one of
652 * the memory types is UC, the UC memory type used.
654 if (curr_type == MTRR_TYPE_UNCACHABLE)
655 return MTRR_TYPE_UNCACHABLE;
658 * If two or more variable memory ranges match and the
659 * memory types are WT and WB, the WT memory type is used.
661 if (((1 << type) & wt_wb_mask) &&
662 ((1 << curr_type) & wt_wb_mask)) {
663 type = MTRR_TYPE_WRTHROUGH;
668 * For overlaps not defined by the above rules, processor
669 * behavior is undefined.
672 /* We use WB for this undefined behavior. :( */
673 return MTRR_TYPE_WRBACK;
676 if (iter.mtrr_disabled)
677 return mtrr_disabled_type(vcpu);
679 /* not contained in any MTRRs. */
681 return mtrr_default_type(mtrr_state);
684 * We just check one page, partially covered by MTRRs is
687 WARN_ON(iter.partial_map);
691 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
693 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
696 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
697 struct mtrr_iter iter;
701 start = gfn_to_gpa(gfn);
702 end = gfn_to_gpa(gfn + page_num);
703 mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
705 type = iter.mem_type;
709 if (type != iter.mem_type)
713 if (iter.mtrr_disabled)
716 if (!iter.partial_map)
722 return type == mtrr_default_type(mtrr_state);