Loading drivers/kvm/kvm.h +60 −0 Original line number Diff line number Diff line Loading @@ -265,6 +265,65 @@ struct kvm_stat { u32 efer_reload; }; struct kvm_io_device { void (*read)(struct kvm_io_device *this, gpa_t addr, int len, void *val); void (*write)(struct kvm_io_device *this, gpa_t addr, int len, const void *val); int (*in_range)(struct kvm_io_device *this, gpa_t addr); void (*destructor)(struct kvm_io_device *this); void *private; }; static inline void kvm_iodevice_read(struct kvm_io_device *dev, gpa_t addr, int len, void *val) { dev->read(dev, addr, len, val); } static inline void kvm_iodevice_write(struct kvm_io_device *dev, gpa_t addr, int len, const void *val) { dev->write(dev, addr, len, val); } static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr) { return dev->in_range(dev, addr); } static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) { dev->destructor(dev); } /* * It would be nice to use something smarter than a linear search, TBD... * Thankfully we dont expect many devices to register (famous last words :), * so until then it will suffice. At least its abstracted so we can change * in one place. */ struct kvm_io_bus { int dev_count; #define NR_IOBUS_DEVS 6 struct kvm_io_device *devs[NR_IOBUS_DEVS]; }; void kvm_io_bus_init(struct kvm_io_bus *bus); void kvm_io_bus_destroy(struct kvm_io_bus *bus); struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr); void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev); struct kvm_vcpu { struct kvm *kvm; union { Loading Loading @@ -393,6 +452,7 @@ struct kvm { unsigned long rmap_overflow; struct list_head vm_list; struct file *filp; struct kvm_io_bus mmio_bus; }; struct descriptor_table { Loading drivers/kvm/kvm_main.c +82 −12 Original line number Diff line number Diff line Loading @@ -366,6 +366,7 @@ static struct kvm *kvm_create_vm(void) spin_lock(&kvm_lock); list_add(&kvm->vm_list, &vm_list); spin_unlock(&kvm_lock); kvm_io_bus_init(&kvm->mmio_bus); for (i = 0; i < KVM_MAX_VCPUS; ++i) { struct kvm_vcpu *vcpu = &kvm->vcpus[i]; Loading Loading @@ -474,6 +475,7 @@ static void kvm_destroy_vm(struct kvm *kvm) spin_lock(&kvm_lock); list_del(&kvm->vm_list); spin_unlock(&kvm_lock); kvm_io_bus_destroy(&kvm->mmio_bus); kvm_free_vcpus(kvm); kvm_free_physmem(kvm); kfree(kvm); Loading Loading @@ -1097,12 +1099,25 @@ static int emulator_write_std(unsigned long addr, return X86EMUL_UNHANDLEABLE; } static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, gpa_t addr) { /* * Note that its important to have this wrapper function because * in the very near future we will be checking for MMIOs against * the LAPIC as well as the general MMIO bus */ return kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); } static int emulator_read_emulated(unsigned long addr, void *val, unsigned int bytes, struct x86_emulate_ctxt *ctxt) { struct kvm_vcpu *vcpu = ctxt->vcpu; struct kvm_io_device *mmio_dev; gpa_t gpa; if (vcpu->mmio_read_completed) { memcpy(val, vcpu->mmio_data, bytes); Loading @@ -1111,11 +1126,20 @@ static int emulator_read_emulated(unsigned long addr, } else if (emulator_read_std(addr, val, bytes, ctxt) == X86EMUL_CONTINUE) return X86EMUL_CONTINUE; else { gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; /* * Is this MMIO handled locally? */ mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); if (mmio_dev) { kvm_iodevice_read(mmio_dev, gpa, bytes, val); return X86EMUL_CONTINUE; } vcpu->mmio_needed = 1; vcpu->mmio_phys_addr = gpa; vcpu->mmio_size = bytes; Loading @@ -1123,7 +1147,6 @@ static int emulator_read_emulated(unsigned long addr, return X86EMUL_UNHANDLEABLE; } } static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, const void *val, int bytes) Loading Loading @@ -1151,6 +1174,7 @@ static int emulator_write_emulated(unsigned long addr, struct x86_emulate_ctxt *ctxt) { struct kvm_vcpu *vcpu = ctxt->vcpu; struct kvm_io_device *mmio_dev; gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); if (gpa == UNMAPPED_GVA) { Loading @@ -1161,6 +1185,15 @@ static int emulator_write_emulated(unsigned long addr, if (emulator_write_phys(vcpu, gpa, val, bytes)) return X86EMUL_CONTINUE; /* * Is this MMIO handled locally? */ mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); if (mmio_dev) { kvm_iodevice_write(mmio_dev, gpa, bytes, val); return X86EMUL_CONTINUE; } vcpu->mmio_needed = 1; vcpu->mmio_phys_addr = gpa; vcpu->mmio_size = bytes; Loading Loading @@ -3031,6 +3064,43 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, return NOTIFY_OK; } void kvm_io_bus_init(struct kvm_io_bus *bus) { memset(bus, 0, sizeof(*bus)); } void kvm_io_bus_destroy(struct kvm_io_bus *bus) { int i; for (i = 0; i < bus->dev_count; i++) { struct kvm_io_device *pos = bus->devs[i]; kvm_iodevice_destructor(pos); } } struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) { int i; for (i = 0; i < bus->dev_count; i++) { struct kvm_io_device *pos = bus->devs[i]; if (pos->in_range(pos, addr)) return pos; } return NULL; } void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev) { BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1)); bus->devs[bus->dev_count++] = dev; } static struct notifier_block kvm_cpu_notifier = { .notifier_call = kvm_cpu_hotplug, .priority = 20, /* must be > scheduler priority */ Loading Loading
drivers/kvm/kvm.h +60 −0 Original line number Diff line number Diff line Loading @@ -265,6 +265,65 @@ struct kvm_stat { u32 efer_reload; }; struct kvm_io_device { void (*read)(struct kvm_io_device *this, gpa_t addr, int len, void *val); void (*write)(struct kvm_io_device *this, gpa_t addr, int len, const void *val); int (*in_range)(struct kvm_io_device *this, gpa_t addr); void (*destructor)(struct kvm_io_device *this); void *private; }; static inline void kvm_iodevice_read(struct kvm_io_device *dev, gpa_t addr, int len, void *val) { dev->read(dev, addr, len, val); } static inline void kvm_iodevice_write(struct kvm_io_device *dev, gpa_t addr, int len, const void *val) { dev->write(dev, addr, len, val); } static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr) { return dev->in_range(dev, addr); } static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) { dev->destructor(dev); } /* * It would be nice to use something smarter than a linear search, TBD... * Thankfully we dont expect many devices to register (famous last words :), * so until then it will suffice. At least its abstracted so we can change * in one place. */ struct kvm_io_bus { int dev_count; #define NR_IOBUS_DEVS 6 struct kvm_io_device *devs[NR_IOBUS_DEVS]; }; void kvm_io_bus_init(struct kvm_io_bus *bus); void kvm_io_bus_destroy(struct kvm_io_bus *bus); struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr); void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev); struct kvm_vcpu { struct kvm *kvm; union { Loading Loading @@ -393,6 +452,7 @@ struct kvm { unsigned long rmap_overflow; struct list_head vm_list; struct file *filp; struct kvm_io_bus mmio_bus; }; struct descriptor_table { Loading
drivers/kvm/kvm_main.c +82 −12 Original line number Diff line number Diff line Loading @@ -366,6 +366,7 @@ static struct kvm *kvm_create_vm(void) spin_lock(&kvm_lock); list_add(&kvm->vm_list, &vm_list); spin_unlock(&kvm_lock); kvm_io_bus_init(&kvm->mmio_bus); for (i = 0; i < KVM_MAX_VCPUS; ++i) { struct kvm_vcpu *vcpu = &kvm->vcpus[i]; Loading Loading @@ -474,6 +475,7 @@ static void kvm_destroy_vm(struct kvm *kvm) spin_lock(&kvm_lock); list_del(&kvm->vm_list); spin_unlock(&kvm_lock); kvm_io_bus_destroy(&kvm->mmio_bus); kvm_free_vcpus(kvm); kvm_free_physmem(kvm); kfree(kvm); Loading Loading @@ -1097,12 +1099,25 @@ static int emulator_write_std(unsigned long addr, return X86EMUL_UNHANDLEABLE; } static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, gpa_t addr) { /* * Note that its important to have this wrapper function because * in the very near future we will be checking for MMIOs against * the LAPIC as well as the general MMIO bus */ return kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); } static int emulator_read_emulated(unsigned long addr, void *val, unsigned int bytes, struct x86_emulate_ctxt *ctxt) { struct kvm_vcpu *vcpu = ctxt->vcpu; struct kvm_io_device *mmio_dev; gpa_t gpa; if (vcpu->mmio_read_completed) { memcpy(val, vcpu->mmio_data, bytes); Loading @@ -1111,11 +1126,20 @@ static int emulator_read_emulated(unsigned long addr, } else if (emulator_read_std(addr, val, bytes, ctxt) == X86EMUL_CONTINUE) return X86EMUL_CONTINUE; else { gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; /* * Is this MMIO handled locally? */ mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); if (mmio_dev) { kvm_iodevice_read(mmio_dev, gpa, bytes, val); return X86EMUL_CONTINUE; } vcpu->mmio_needed = 1; vcpu->mmio_phys_addr = gpa; vcpu->mmio_size = bytes; Loading @@ -1123,7 +1147,6 @@ static int emulator_read_emulated(unsigned long addr, return X86EMUL_UNHANDLEABLE; } } static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, const void *val, int bytes) Loading Loading @@ -1151,6 +1174,7 @@ static int emulator_write_emulated(unsigned long addr, struct x86_emulate_ctxt *ctxt) { struct kvm_vcpu *vcpu = ctxt->vcpu; struct kvm_io_device *mmio_dev; gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); if (gpa == UNMAPPED_GVA) { Loading @@ -1161,6 +1185,15 @@ static int emulator_write_emulated(unsigned long addr, if (emulator_write_phys(vcpu, gpa, val, bytes)) return X86EMUL_CONTINUE; /* * Is this MMIO handled locally? */ mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); if (mmio_dev) { kvm_iodevice_write(mmio_dev, gpa, bytes, val); return X86EMUL_CONTINUE; } vcpu->mmio_needed = 1; vcpu->mmio_phys_addr = gpa; vcpu->mmio_size = bytes; Loading Loading @@ -3031,6 +3064,43 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, return NOTIFY_OK; } void kvm_io_bus_init(struct kvm_io_bus *bus) { memset(bus, 0, sizeof(*bus)); } void kvm_io_bus_destroy(struct kvm_io_bus *bus) { int i; for (i = 0; i < bus->dev_count; i++) { struct kvm_io_device *pos = bus->devs[i]; kvm_iodevice_destructor(pos); } } struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) { int i; for (i = 0; i < bus->dev_count; i++) { struct kvm_io_device *pos = bus->devs[i]; if (pos->in_range(pos, addr)) return pos; } return NULL; } void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev) { BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1)); bus->devs[bus->dev_count++] = dev; } static struct notifier_block kvm_cpu_notifier = { .notifier_call = kvm_cpu_hotplug, .priority = 20, /* must be > scheduler priority */ Loading