1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2008 Bull S.A.S.
6 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
8 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
12 #include <kvm/iodev.h>
14 #include <linux/kvm_host.h>
15 #include <linux/slab.h>
16 #include <linux/kvm.h>
18 #include "coalesced_mmio.h"
20 static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
22 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
25 static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
28 /* is it in a batchable area ?
29 * (addr,len) is fully included in
30 * (zone->addr, zone->size)
34 if (addr + len < addr)
36 if (addr < dev->zone.addr)
38 if (addr + len > dev->zone.addr + dev->zone.size)
43 static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
45 struct kvm_coalesced_mmio_ring *ring;
48 /* Are we able to batch it ? */
50 /* last is the first free entry
51 * check if we don't meet the first used entry
52 * there is always one unused entry in the buffer
54 ring = dev->kvm->coalesced_mmio_ring;
55 avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
64 static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
65 struct kvm_io_device *this, gpa_t addr,
66 int len, const void *val)
68 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
69 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
72 if (!coalesced_mmio_in_range(dev, addr, len))
75 spin_lock(&dev->kvm->ring_lock);
77 insert = READ_ONCE(ring->last);
78 if (!coalesced_mmio_has_room(dev, insert) ||
79 insert >= KVM_COALESCED_MMIO_MAX) {
80 spin_unlock(&dev->kvm->ring_lock);
84 /* copy data in first free entry of the ring */
86 ring->coalesced_mmio[insert].phys_addr = addr;
87 ring->coalesced_mmio[insert].len = len;
88 memcpy(ring->coalesced_mmio[insert].data, val, len);
90 ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
91 spin_unlock(&dev->kvm->ring_lock);
95 static void coalesced_mmio_destructor(struct kvm_io_device *this)
97 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
104 static const struct kvm_io_device_ops coalesced_mmio_ops = {
105 .write = coalesced_mmio_write,
106 .destructor = coalesced_mmio_destructor,
109 int kvm_coalesced_mmio_init(struct kvm *kvm)
115 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
120 kvm->coalesced_mmio_ring = page_address(page);
123 * We're using this spinlock to sync access to the coalesced ring.
124 * The list doesn't need it's own lock since device registration and
125 * unregistration should only happen when kvm->slots_lock is held.
127 spin_lock_init(&kvm->ring_lock);
128 INIT_LIST_HEAD(&kvm->coalesced_zones);
134 void kvm_coalesced_mmio_free(struct kvm *kvm)
136 if (kvm->coalesced_mmio_ring)
137 free_page((unsigned long)kvm->coalesced_mmio_ring);
140 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
141 struct kvm_coalesced_mmio_zone *zone)
144 struct kvm_coalesced_mmio_dev *dev;
146 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
150 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
154 mutex_lock(&kvm->slots_lock);
155 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
156 zone->size, &dev->dev);
159 list_add_tail(&dev->list, &kvm->coalesced_zones);
160 mutex_unlock(&kvm->slots_lock);
165 mutex_unlock(&kvm->slots_lock);
171 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
172 struct kvm_coalesced_mmio_zone *zone)
174 struct kvm_coalesced_mmio_dev *dev, *tmp;
176 mutex_lock(&kvm->slots_lock);
178 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
179 if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
180 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
181 kvm_iodevice_destructor(&dev->dev);
184 mutex_unlock(&kvm->slots_lock);