1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM dirty ring implementation
5 * Copyright 2019 Red Hat, Inc.
7 #include <linux/kvm_host.h>
9 #include <linux/vmalloc.h>
10 #include <linux/kvm_dirty_ring.h>
11 #include <trace/events/kvm.h>
14 int __weak kvm_cpu_dirty_log_size(void)
19 u32 kvm_dirty_ring_get_rsvd_entries(void)
21 return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size();
24 bool kvm_use_dirty_bitmap(struct kvm *kvm)
26 lockdep_assert_held(&kvm->slots_lock);
28 return !kvm->dirty_ring_size || kvm->dirty_ring_with_bitmap;
31 #ifndef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
32 bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
38 static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring)
40 return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
43 static bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
45 return kvm_dirty_ring_used(ring) >= ring->soft_limit;
48 static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring)
50 return kvm_dirty_ring_used(ring) >= ring->size;
53 static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
55 struct kvm_memory_slot *memslot;
61 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
64 memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
66 if (!memslot || (offset + __fls(mask)) >= memslot->npages)
70 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask);
74 int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
76 ring->dirty_gfns = vzalloc(size);
77 if (!ring->dirty_gfns)
80 ring->size = size / sizeof(struct kvm_dirty_gfn);
81 ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries();
82 ring->dirty_index = 0;
83 ring->reset_index = 0;
89 static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn)
91 smp_store_release(&gfn->flags, 0);
94 static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn)
96 gfn->flags = KVM_DIRTY_GFN_F_DIRTY;
99 static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn)
101 return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET;
104 int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
106 u32 cur_slot, next_slot;
107 u64 cur_offset, next_offset;
110 struct kvm_dirty_gfn *entry;
111 bool first_round = true;
113 /* This is only needed to make compilers happy */
114 cur_slot = cur_offset = mask = 0;
117 entry = &ring->dirty_gfns[ring->reset_index & (ring->size - 1)];
119 if (!kvm_dirty_gfn_harvested(entry))
122 next_slot = READ_ONCE(entry->slot);
123 next_offset = READ_ONCE(entry->offset);
125 /* Update the flags to reflect that this GFN is reset */
126 kvm_dirty_gfn_set_invalid(entry);
131 * Try to coalesce the reset operations when the guest is
132 * scanning pages in the same slot.
134 if (!first_round && next_slot == cur_slot) {
135 s64 delta = next_offset - cur_offset;
137 if (delta >= 0 && delta < BITS_PER_LONG) {
138 mask |= 1ull << delta;
142 /* Backwards visit, careful about overflows! */
143 if (delta > -BITS_PER_LONG && delta < 0 &&
144 (mask << -delta >> -delta) == mask) {
145 cur_offset = next_offset;
146 mask = (mask << -delta) | 1;
150 kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
151 cur_slot = next_slot;
152 cur_offset = next_offset;
157 kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
160 * The request KVM_REQ_DIRTY_RING_SOFT_FULL will be cleared
161 * by the VCPU thread next time when it enters the guest.
164 trace_kvm_dirty_ring_reset(ring);
169 void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset)
171 struct kvm_dirty_ring *ring = &vcpu->dirty_ring;
172 struct kvm_dirty_gfn *entry;
174 /* It should never get full */
175 WARN_ON_ONCE(kvm_dirty_ring_full(ring));
177 entry = &ring->dirty_gfns[ring->dirty_index & (ring->size - 1)];
180 entry->offset = offset;
182 * Make sure the data is filled in before we publish this to
183 * the userspace program. There's no paired kernel-side reader.
186 kvm_dirty_gfn_set_dirtied(entry);
188 trace_kvm_dirty_ring_push(ring, slot, offset);
190 if (kvm_dirty_ring_soft_full(ring))
191 kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
194 bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu)
197 * The VCPU isn't runnable when the dirty ring becomes soft full.
198 * The KVM_REQ_DIRTY_RING_SOFT_FULL event is always set to prevent
199 * the VCPU from running until the dirty pages are harvested and
200 * the dirty ring is reset by userspace.
202 if (kvm_check_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu) &&
203 kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
204 kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
205 vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
206 trace_kvm_dirty_ring_exit(vcpu);
213 struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
215 return vmalloc_to_page((void *)ring->dirty_gfns + offset * PAGE_SIZE);
218 void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
220 vfree(ring->dirty_gfns);
221 ring->dirty_gfns = NULL;