Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d96eb2c6 authored by Alex Williamson's avatar Alex Williamson Committed by Paolo Bonzini
Browse files

kvm/x86: Convert iommu_flags to iommu_noncoherent



Default to operating in coherent mode.  This simplifies the logic when
we switch to a model of registering and unregistering noncoherent I/O
with KVM.

Signed-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ec53500f
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -476,7 +476,7 @@ struct kvm_arch {


	struct list_head assigned_dev_head;
	struct list_head assigned_dev_head;
	struct iommu_domain *iommu_domain;
	struct iommu_domain *iommu_domain;
	int iommu_flags;
	bool iommu_noncoherent;


	unsigned long irq_sources_bitmap;
	unsigned long irq_sources_bitmap;
	unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
	unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
+1 −1
Original line number Original line Diff line number Diff line
@@ -564,7 +564,7 @@ struct kvm_arch {


	struct list_head assigned_dev_head;
	struct list_head assigned_dev_head;
	struct iommu_domain *iommu_domain;
	struct iommu_domain *iommu_domain;
	int iommu_flags;
	bool iommu_noncoherent;
	struct kvm_pic *vpic;
	struct kvm_pic *vpic;
	struct kvm_ioapic *vioapic;
	struct kvm_ioapic *vioapic;
	struct kvm_pit *vpit;
	struct kvm_pit *vpit;
+1 −1
Original line number Original line Diff line number Diff line
@@ -7446,7 +7446,7 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
	if (is_mmio)
	if (is_mmio)
		ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
		ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
	else if (vcpu->kvm->arch.iommu_domain &&
	else if (vcpu->kvm->arch.iommu_domain &&
		!(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY))
		 vcpu->kvm->arch.iommu_noncoherent)
		ret = kvm_get_guest_memory_type(vcpu, gfn) <<
		ret = kvm_get_guest_memory_type(vcpu, gfn) <<
		      VMX_EPT_MT_EPTE_SHIFT;
		      VMX_EPT_MT_EPTE_SHIFT;
	else
	else
+1 −1
Original line number Original line Diff line number Diff line
@@ -2719,7 +2719,7 @@ static void wbinvd_ipi(void *garbage)
static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
{
	return vcpu->kvm->arch.iommu_domain &&
	return vcpu->kvm->arch.iommu_domain &&
		!(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
	       vcpu->kvm->arch.iommu_noncoherent;
}
}


void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+0 −3
Original line number Original line Diff line number Diff line
@@ -746,9 +746,6 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
int kvm_request_irq_source_id(struct kvm *kvm);
int kvm_request_irq_source_id(struct kvm *kvm);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);


/* For vcpu->arch.iommu_flags */
#define KVM_IOMMU_CACHE_COHERENCY	0x1

#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
Loading