Loading Documentation/kernel-parameters.txt +0 −5 Original line number Original line Diff line number Diff line Loading @@ -320,11 +320,6 @@ and is between 256 and 4096 characters. It is defined in the file amd_iommu= [HW,X86-84] amd_iommu= [HW,X86-84] Pass parameters to the AMD IOMMU driver in the system. Pass parameters to the AMD IOMMU driver in the system. Possible values are: Possible values are: isolate - enable device isolation (each device, as far as possible, will get its own protection domain) [default] share - put every device behind one IOMMU into the same protection domain fullflush - enable flushing of IO/TLB entries when fullflush - enable flushing of IO/TLB entries when they are unmapped. Otherwise they are they are unmapped. Otherwise they are flushed before they will be reused, which flushed before they will be reused, which Loading arch/x86/include/asm/amd_iommu_types.h +3 −0 Original line number Original line Diff line number Diff line Loading @@ -21,6 +21,7 @@ #define _ASM_X86_AMD_IOMMU_TYPES_H #define _ASM_X86_AMD_IOMMU_TYPES_H #include <linux/types.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/spinlock.h> Loading Loading @@ -140,6 +141,7 @@ /* constants to configure the command buffer */ /* constants to configure the command buffer */ #define CMD_BUFFER_SIZE 8192 #define CMD_BUFFER_SIZE 8192 #define CMD_BUFFER_UNINITIALIZED 1 #define CMD_BUFFER_ENTRIES 512 #define CMD_BUFFER_ENTRIES 512 #define MMIO_CMD_SIZE_SHIFT 56 #define MMIO_CMD_SIZE_SHIFT 56 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) Loading Loading @@ -237,6 +239,7 @@ struct protection_domain { struct list_head list; /* for list of all protection domains */ struct list_head list; /* for list of all protection domains */ struct list_head dev_list; /* List of all devices in this domain */ struct list_head dev_list; /* List of all devices in this domain */ spinlock_t lock; /* mostly used to lock the page table*/ spinlock_t lock; /* mostly used to lock the page table*/ struct mutex api_lock; /* protect page tables in the iommu-api path */ u16 id; /* the domain id written to the device table */ u16 id; /* the domain id written to the device table */ int mode; /* paging mode (0-6 levels) */ int mode; /* paging mode (0-6 levels) */ u64 *pt_root; /* page table root pointer */ u64 *pt_root; /* page table root pointer */ Loading arch/x86/kernel/amd_iommu.c +14 −6 Original line number Original line Diff line number Diff line Loading @@ -118,7 +118,7 @@ static bool check_device(struct device *dev) return false; return false; /* No device or no PCI device */ /* No device or no PCI device */ if (!dev || dev->bus != &pci_bus_type) if (dev->bus != &pci_bus_type) return false; return false; devid = get_device_id(dev); devid = get_device_id(dev); Loading Loading @@ -392,6 +392,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) u32 tail, head; u32 tail, head; u8 *target; u8 *target; WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); target = iommu->cmd_buf + tail; target = iommu->cmd_buf + tail; memcpy_toio(target, cmd, sizeof(*cmd)); memcpy_toio(target, cmd, sizeof(*cmd)); Loading Loading @@ -2186,7 +2187,7 @@ static void prealloc_protection_domains(void) struct dma_ops_domain *dma_dom; struct dma_ops_domain *dma_dom; u16 devid; u16 devid; while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { for_each_pci_dev(dev) { /* Do we handle this device? */ /* Do we handle this device? */ if (!check_device(&dev->dev)) if (!check_device(&dev->dev)) Loading Loading @@ -2298,7 +2299,7 @@ static void cleanup_domain(struct protection_domain *domain) list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { struct device *dev = dev_data->dev; struct device *dev = dev_data->dev; do_detach(dev); __detach_device(dev); atomic_set(&dev_data->bind, 0); atomic_set(&dev_data->bind, 0); } } Loading Loading @@ -2327,6 +2328,7 @@ static struct protection_domain *protection_domain_alloc(void) return NULL; return NULL; spin_lock_init(&domain->lock); spin_lock_init(&domain->lock); mutex_init(&domain->api_lock); domain->id = domain_id_alloc(); domain->id = domain_id_alloc(); if (!domain->id) if (!domain->id) goto out_err; goto out_err; Loading Loading @@ -2379,9 +2381,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) free_pagetable(domain); free_pagetable(domain); domain_id_free(domain->id); protection_domain_free(domain); kfree(domain); dom->priv = NULL; dom->priv = NULL; } } Loading Loading @@ -2456,6 +2456,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, iova &= PAGE_MASK; iova &= PAGE_MASK; paddr &= PAGE_MASK; paddr &= PAGE_MASK; mutex_lock(&domain->api_lock); for (i = 0; i < npages; ++i) { for (i = 0; i < npages; ++i) { ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); if (ret) if (ret) Loading @@ -2465,6 +2467,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, paddr += PAGE_SIZE; paddr += PAGE_SIZE; } } mutex_unlock(&domain->api_lock); return 0; return 0; } } Loading @@ -2477,12 +2481,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, iova &= PAGE_MASK; iova &= PAGE_MASK; mutex_lock(&domain->api_lock); for (i = 0; i < npages; ++i) { for (i = 0; i < npages; ++i) { iommu_unmap_page(domain, iova, PM_MAP_4k); iommu_unmap_page(domain, iova, PM_MAP_4k); iova += PAGE_SIZE; iova += PAGE_SIZE; } } iommu_flush_tlb_pde(domain); iommu_flush_tlb_pde(domain); mutex_unlock(&domain->api_lock); } } static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, Loading arch/x86/kernel/amd_iommu_init.c +33 −15 Original line number Original line Diff line number Diff line Loading @@ -138,9 +138,9 @@ int amd_iommus_present; bool amd_iommu_np_cache __read_mostly; bool amd_iommu_np_cache __read_mostly; /* /* * Set to true if ACPI table parsing and hardware intialization went properly * The ACPI table parsing functions set this variable on an error */ */ static bool amd_iommu_initialized; static int __initdata amd_iommu_init_err; /* /* * List of protection domains - used during resume * List of protection domains - used during resume Loading Loading @@ -391,9 +391,11 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table) */ */ for (i = 0; i < table->length; ++i) for (i = 0; i < table->length; ++i) checksum += p[i]; checksum += p[i]; if (checksum != 0) if (checksum != 0) { /* ACPI table corrupt */ /* ACPI table corrupt */ return -ENODEV; amd_iommu_init_err = -ENODEV; return 0; } p += IVRS_HEADER_LENGTH; p += IVRS_HEADER_LENGTH; Loading Loading @@ -436,7 +438,7 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) if (cmd_buf == NULL) if (cmd_buf == NULL) return NULL; return NULL; iommu->cmd_buf_size = CMD_BUFFER_SIZE; iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; return cmd_buf; return cmd_buf; } } Loading Loading @@ -472,12 +474,13 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) &entry, sizeof(entry)); &entry, sizeof(entry)); amd_iommu_reset_cmd_buffer(iommu); amd_iommu_reset_cmd_buffer(iommu); iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); } } static void __init free_command_buffer(struct amd_iommu *iommu) static void __init free_command_buffer(struct amd_iommu *iommu) { { free_pages((unsigned long)iommu->cmd_buf, free_pages((unsigned long)iommu->cmd_buf, get_order(iommu->cmd_buf_size)); get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); } } /* allocates the memory where the IOMMU will log its events to */ /* allocates the memory where the IOMMU will log its events to */ Loading Loading @@ -920,11 +923,16 @@ static int __init init_iommu_all(struct acpi_table_header *table) h->mmio_phys); h->mmio_phys); iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); if (iommu == NULL) if (iommu == NULL) { return -ENOMEM; amd_iommu_init_err = -ENOMEM; return 0; } ret = init_iommu_one(iommu, h); ret = init_iommu_one(iommu, h); if (ret) if (ret) { return ret; amd_iommu_init_err = ret; return 0; } break; break; default: default: break; break; Loading @@ -934,8 +942,6 @@ static int __init init_iommu_all(struct acpi_table_header *table) } } WARN_ON(p != end); WARN_ON(p != end); amd_iommu_initialized = true; return 0; return 0; } } Loading Loading @@ -1211,6 +1217,10 @@ static int __init amd_iommu_init(void) if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) return -ENODEV; return -ENODEV; ret = amd_iommu_init_err; if (ret) goto out; dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); Loading Loading @@ -1270,12 +1280,19 @@ static int __init amd_iommu_init(void) if (acpi_table_parse("IVRS", init_iommu_all) != 0) if (acpi_table_parse("IVRS", init_iommu_all) != 0) goto free; goto free; if (!amd_iommu_initialized) if (amd_iommu_init_err) { ret = amd_iommu_init_err; goto free; goto free; } if (acpi_table_parse("IVRS", init_memory_definitions) != 0) if (acpi_table_parse("IVRS", init_memory_definitions) != 0) goto free; goto free; if (amd_iommu_init_err) { ret = amd_iommu_init_err; goto free; } ret = sysdev_class_register(&amd_iommu_sysdev_class); ret = sysdev_class_register(&amd_iommu_sysdev_class); if (ret) if (ret) goto free; goto free; Loading @@ -1288,6 +1305,8 @@ static int __init amd_iommu_init(void) if (ret) if (ret) goto free; goto free; enable_iommus(); if (iommu_pass_through) if (iommu_pass_through) ret = amd_iommu_init_passthrough(); ret = amd_iommu_init_passthrough(); else else Loading @@ -1300,8 +1319,6 @@ static int __init amd_iommu_init(void) amd_iommu_init_notifier(); amd_iommu_init_notifier(); enable_iommus(); if (iommu_pass_through) if (iommu_pass_through) goto out; goto out; Loading @@ -1315,6 +1332,7 @@ static int __init amd_iommu_init(void) return ret; return ret; free: free: disable_iommus(); amd_iommu_uninit_devices(); amd_iommu_uninit_devices(); Loading arch/x86/kernel/aperture_64.c +14 −1 Original line number Original line Diff line number Diff line Loading @@ -393,6 +393,7 @@ void __init gart_iommu_hole_init(void) for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { int bus; int bus; int dev_base, dev_limit; int dev_base, dev_limit; u32 ctl; bus = bus_dev_ranges[i].bus; bus = bus_dev_ranges[i].bus; dev_base = bus_dev_ranges[i].dev_base; dev_base = bus_dev_ranges[i].dev_base; Loading @@ -406,7 +407,19 @@ void __init gart_iommu_hole_init(void) gart_iommu_aperture = 1; gart_iommu_aperture = 1; x86_init.iommu.iommu_init = gart_iommu_init; x86_init.iommu.iommu_init = gart_iommu_init; aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); /* * Before we do anything else disable the GART. It may * still be enabled if we boot into a crash-kernel here. * Reconfiguring the GART while it is enabled could have * unknown side-effects. */ ctl &= ~GARTEN; write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); aper_order = (ctl >> 1) & 7; aper_size = (32 * 1024 * 1024) << aper_order; aper_size = (32 * 1024 * 1024) << aper_order; aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; aper_base <<= 25; aper_base <<= 25; Loading Loading
Documentation/kernel-parameters.txt +0 −5 Original line number Original line Diff line number Diff line Loading @@ -320,11 +320,6 @@ and is between 256 and 4096 characters. It is defined in the file amd_iommu= [HW,X86-84] amd_iommu= [HW,X86-84] Pass parameters to the AMD IOMMU driver in the system. Pass parameters to the AMD IOMMU driver in the system. Possible values are: Possible values are: isolate - enable device isolation (each device, as far as possible, will get its own protection domain) [default] share - put every device behind one IOMMU into the same protection domain fullflush - enable flushing of IO/TLB entries when fullflush - enable flushing of IO/TLB entries when they are unmapped. Otherwise they are they are unmapped. Otherwise they are flushed before they will be reused, which flushed before they will be reused, which Loading
arch/x86/include/asm/amd_iommu_types.h +3 −0 Original line number Original line Diff line number Diff line Loading @@ -21,6 +21,7 @@ #define _ASM_X86_AMD_IOMMU_TYPES_H #define _ASM_X86_AMD_IOMMU_TYPES_H #include <linux/types.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/spinlock.h> Loading Loading @@ -140,6 +141,7 @@ /* constants to configure the command buffer */ /* constants to configure the command buffer */ #define CMD_BUFFER_SIZE 8192 #define CMD_BUFFER_SIZE 8192 #define CMD_BUFFER_UNINITIALIZED 1 #define CMD_BUFFER_ENTRIES 512 #define CMD_BUFFER_ENTRIES 512 #define MMIO_CMD_SIZE_SHIFT 56 #define MMIO_CMD_SIZE_SHIFT 56 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) Loading Loading @@ -237,6 +239,7 @@ struct protection_domain { struct list_head list; /* for list of all protection domains */ struct list_head list; /* for list of all protection domains */ struct list_head dev_list; /* List of all devices in this domain */ struct list_head dev_list; /* List of all devices in this domain */ spinlock_t lock; /* mostly used to lock the page table*/ spinlock_t lock; /* mostly used to lock the page table*/ struct mutex api_lock; /* protect page tables in the iommu-api path */ u16 id; /* the domain id written to the device table */ u16 id; /* the domain id written to the device table */ int mode; /* paging mode (0-6 levels) */ int mode; /* paging mode (0-6 levels) */ u64 *pt_root; /* page table root pointer */ u64 *pt_root; /* page table root pointer */ Loading
arch/x86/kernel/amd_iommu.c +14 −6 Original line number Original line Diff line number Diff line Loading @@ -118,7 +118,7 @@ static bool check_device(struct device *dev) return false; return false; /* No device or no PCI device */ /* No device or no PCI device */ if (!dev || dev->bus != &pci_bus_type) if (dev->bus != &pci_bus_type) return false; return false; devid = get_device_id(dev); devid = get_device_id(dev); Loading Loading @@ -392,6 +392,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) u32 tail, head; u32 tail, head; u8 *target; u8 *target; WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); target = iommu->cmd_buf + tail; target = iommu->cmd_buf + tail; memcpy_toio(target, cmd, sizeof(*cmd)); memcpy_toio(target, cmd, sizeof(*cmd)); Loading Loading @@ -2186,7 +2187,7 @@ static void prealloc_protection_domains(void) struct dma_ops_domain *dma_dom; struct dma_ops_domain *dma_dom; u16 devid; u16 devid; while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { for_each_pci_dev(dev) { /* Do we handle this device? */ /* Do we handle this device? */ if (!check_device(&dev->dev)) if (!check_device(&dev->dev)) Loading Loading @@ -2298,7 +2299,7 @@ static void cleanup_domain(struct protection_domain *domain) list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { struct device *dev = dev_data->dev; struct device *dev = dev_data->dev; do_detach(dev); __detach_device(dev); atomic_set(&dev_data->bind, 0); atomic_set(&dev_data->bind, 0); } } Loading Loading @@ -2327,6 +2328,7 @@ static struct protection_domain *protection_domain_alloc(void) return NULL; return NULL; spin_lock_init(&domain->lock); spin_lock_init(&domain->lock); mutex_init(&domain->api_lock); domain->id = domain_id_alloc(); domain->id = domain_id_alloc(); if (!domain->id) if (!domain->id) goto out_err; goto out_err; Loading Loading @@ -2379,9 +2381,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) free_pagetable(domain); free_pagetable(domain); domain_id_free(domain->id); protection_domain_free(domain); kfree(domain); dom->priv = NULL; dom->priv = NULL; } } Loading Loading @@ -2456,6 +2456,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, iova &= PAGE_MASK; iova &= PAGE_MASK; paddr &= PAGE_MASK; paddr &= PAGE_MASK; mutex_lock(&domain->api_lock); for (i = 0; i < npages; ++i) { for (i = 0; i < npages; ++i) { ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); if (ret) if (ret) Loading @@ -2465,6 +2467,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, paddr += PAGE_SIZE; paddr += PAGE_SIZE; } } mutex_unlock(&domain->api_lock); return 0; return 0; } } Loading @@ -2477,12 +2481,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, iova &= PAGE_MASK; iova &= PAGE_MASK; mutex_lock(&domain->api_lock); for (i = 0; i < npages; ++i) { for (i = 0; i < npages; ++i) { iommu_unmap_page(domain, iova, PM_MAP_4k); iommu_unmap_page(domain, iova, PM_MAP_4k); iova += PAGE_SIZE; iova += PAGE_SIZE; } } iommu_flush_tlb_pde(domain); iommu_flush_tlb_pde(domain); mutex_unlock(&domain->api_lock); } } static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, Loading
arch/x86/kernel/amd_iommu_init.c +33 −15 Original line number Original line Diff line number Diff line Loading @@ -138,9 +138,9 @@ int amd_iommus_present; bool amd_iommu_np_cache __read_mostly; bool amd_iommu_np_cache __read_mostly; /* /* * Set to true if ACPI table parsing and hardware intialization went properly * The ACPI table parsing functions set this variable on an error */ */ static bool amd_iommu_initialized; static int __initdata amd_iommu_init_err; /* /* * List of protection domains - used during resume * List of protection domains - used during resume Loading Loading @@ -391,9 +391,11 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table) */ */ for (i = 0; i < table->length; ++i) for (i = 0; i < table->length; ++i) checksum += p[i]; checksum += p[i]; if (checksum != 0) if (checksum != 0) { /* ACPI table corrupt */ /* ACPI table corrupt */ return -ENODEV; amd_iommu_init_err = -ENODEV; return 0; } p += IVRS_HEADER_LENGTH; p += IVRS_HEADER_LENGTH; Loading Loading @@ -436,7 +438,7 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) if (cmd_buf == NULL) if (cmd_buf == NULL) return NULL; return NULL; iommu->cmd_buf_size = CMD_BUFFER_SIZE; iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; return cmd_buf; return cmd_buf; } } Loading Loading @@ -472,12 +474,13 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) &entry, sizeof(entry)); &entry, sizeof(entry)); amd_iommu_reset_cmd_buffer(iommu); amd_iommu_reset_cmd_buffer(iommu); iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); } } static void __init free_command_buffer(struct amd_iommu *iommu) static void __init free_command_buffer(struct amd_iommu *iommu) { { free_pages((unsigned long)iommu->cmd_buf, free_pages((unsigned long)iommu->cmd_buf, get_order(iommu->cmd_buf_size)); get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); } } /* allocates the memory where the IOMMU will log its events to */ /* allocates the memory where the IOMMU will log its events to */ Loading Loading @@ -920,11 +923,16 @@ static int __init init_iommu_all(struct acpi_table_header *table) h->mmio_phys); h->mmio_phys); iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); if (iommu == NULL) if (iommu == NULL) { return -ENOMEM; amd_iommu_init_err = -ENOMEM; return 0; } ret = init_iommu_one(iommu, h); ret = init_iommu_one(iommu, h); if (ret) if (ret) { return ret; amd_iommu_init_err = ret; return 0; } break; break; default: default: break; break; Loading @@ -934,8 +942,6 @@ static int __init init_iommu_all(struct acpi_table_header *table) } } WARN_ON(p != end); WARN_ON(p != end); amd_iommu_initialized = true; return 0; return 0; } } Loading Loading @@ -1211,6 +1217,10 @@ static int __init amd_iommu_init(void) if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) return -ENODEV; return -ENODEV; ret = amd_iommu_init_err; if (ret) goto out; dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); Loading Loading @@ -1270,12 +1280,19 @@ static int __init amd_iommu_init(void) if (acpi_table_parse("IVRS", init_iommu_all) != 0) if (acpi_table_parse("IVRS", init_iommu_all) != 0) goto free; goto free; if (!amd_iommu_initialized) if (amd_iommu_init_err) { ret = amd_iommu_init_err; goto free; goto free; } if (acpi_table_parse("IVRS", init_memory_definitions) != 0) if (acpi_table_parse("IVRS", init_memory_definitions) != 0) goto free; goto free; if (amd_iommu_init_err) { ret = amd_iommu_init_err; goto free; } ret = sysdev_class_register(&amd_iommu_sysdev_class); ret = sysdev_class_register(&amd_iommu_sysdev_class); if (ret) if (ret) goto free; goto free; Loading @@ -1288,6 +1305,8 @@ static int __init amd_iommu_init(void) if (ret) if (ret) goto free; goto free; enable_iommus(); if (iommu_pass_through) if (iommu_pass_through) ret = amd_iommu_init_passthrough(); ret = amd_iommu_init_passthrough(); else else Loading @@ -1300,8 +1319,6 @@ static int __init amd_iommu_init(void) amd_iommu_init_notifier(); amd_iommu_init_notifier(); enable_iommus(); if (iommu_pass_through) if (iommu_pass_through) goto out; goto out; Loading @@ -1315,6 +1332,7 @@ static int __init amd_iommu_init(void) return ret; return ret; free: free: disable_iommus(); amd_iommu_uninit_devices(); amd_iommu_uninit_devices(); Loading
arch/x86/kernel/aperture_64.c +14 −1 Original line number Original line Diff line number Diff line Loading @@ -393,6 +393,7 @@ void __init gart_iommu_hole_init(void) for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { int bus; int bus; int dev_base, dev_limit; int dev_base, dev_limit; u32 ctl; bus = bus_dev_ranges[i].bus; bus = bus_dev_ranges[i].bus; dev_base = bus_dev_ranges[i].dev_base; dev_base = bus_dev_ranges[i].dev_base; Loading @@ -406,7 +407,19 @@ void __init gart_iommu_hole_init(void) gart_iommu_aperture = 1; gart_iommu_aperture = 1; x86_init.iommu.iommu_init = gart_iommu_init; x86_init.iommu.iommu_init = gart_iommu_init; aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); /* * Before we do anything else disable the GART. It may * still be enabled if we boot into a crash-kernel here. * Reconfiguring the GART while it is enabled could have * unknown side-effects. */ ctl &= ~GARTEN; write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); aper_order = (ctl >> 1) & 7; aper_size = (32 * 1024 * 1024) << aper_order; aper_size = (32 * 1024 * 1024) << aper_order; aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; aper_base <<= 25; aper_base <<= 25; Loading