Loading Documentation/kernel-parameters.txt +0 −5 Original line number Diff line number Diff line Loading @@ -320,11 +320,6 @@ and is between 256 and 4096 characters. It is defined in the file amd_iommu= [HW,X86-84] Pass parameters to the AMD IOMMU driver in the system. Possible values are: isolate - enable device isolation (each device, as far as possible, will get its own protection domain) [default] share - put every device behind one IOMMU into the same protection domain fullflush - enable flushing of IO/TLB entries when they are unmapped. Otherwise they are flushed before they will be reused, which Loading arch/x86/include/asm/amd_iommu_types.h +3 −0 Original line number Diff line number Diff line Loading @@ -21,6 +21,7 @@ #define _ASM_X86_AMD_IOMMU_TYPES_H #include <linux/types.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/spinlock.h> Loading Loading @@ -140,6 +141,7 @@ /* constants to configure the command buffer */ #define CMD_BUFFER_SIZE 8192 #define CMD_BUFFER_UNINITIALIZED 1 #define CMD_BUFFER_ENTRIES 512 #define MMIO_CMD_SIZE_SHIFT 56 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) Loading Loading @@ -237,6 +239,7 @@ struct protection_domain { struct list_head list; /* for list of all protection domains */ struct list_head dev_list; /* List of all devices in this domain */ spinlock_t lock; /* mostly used to lock the page table*/ struct mutex api_lock; /* protect page tables in the iommu-api path */ u16 id; /* the domain id written to the device table */ int mode; /* paging mode (0-6 levels) */ u64 *pt_root; /* page table root pointer */ Loading arch/x86/kernel/amd_iommu.c +14 −6 Original line number Diff line number Diff line Loading @@ -118,7 +118,7 @@ static bool check_device(struct device *dev) return false; /* No device or no PCI device */ if (!dev || dev->bus != &pci_bus_type) if (dev->bus != &pci_bus_type) return false; devid = get_device_id(dev); Loading Loading @@ -392,6 +392,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) u32 tail, head; u8 *target; WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); target = iommu->cmd_buf + tail; memcpy_toio(target, cmd, sizeof(*cmd)); Loading Loading @@ -2186,7 +2187,7 @@ static void prealloc_protection_domains(void) struct dma_ops_domain *dma_dom; u16 devid; while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { for_each_pci_dev(dev) { /* Do we handle this device? */ if (!check_device(&dev->dev)) Loading Loading @@ -2298,7 +2299,7 @@ static void cleanup_domain(struct protection_domain *domain) list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { struct device *dev = dev_data->dev; do_detach(dev); __detach_device(dev); atomic_set(&dev_data->bind, 0); } Loading Loading @@ -2327,6 +2328,7 @@ static struct protection_domain *protection_domain_alloc(void) return NULL; spin_lock_init(&domain->lock); mutex_init(&domain->api_lock); domain->id = domain_id_alloc(); if (!domain->id) goto out_err; Loading Loading @@ -2379,9 +2381,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) free_pagetable(domain); domain_id_free(domain->id); kfree(domain); protection_domain_free(domain); dom->priv = NULL; } Loading Loading @@ -2456,6 +2456,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, iova &= PAGE_MASK; paddr &= PAGE_MASK; mutex_lock(&domain->api_lock); for (i = 0; i < npages; ++i) { ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); if (ret) Loading @@ -2465,6 +2467,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, paddr += PAGE_SIZE; } mutex_unlock(&domain->api_lock); return 0; } Loading @@ -2477,12 +2481,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, iova &= PAGE_MASK; mutex_lock(&domain->api_lock); for (i = 0; i < npages; ++i) { iommu_unmap_page(domain, iova, PM_MAP_4k); iova += PAGE_SIZE; } iommu_flush_tlb_pde(domain); mutex_unlock(&domain->api_lock); } static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, Loading arch/x86/kernel/amd_iommu_init.c +33 −15 Original line number Diff line number Diff line Loading @@ -138,9 +138,9 @@ int amd_iommus_present; bool amd_iommu_np_cache __read_mostly; /* * Set to true if ACPI table parsing and hardware intialization went properly * The ACPI table parsing functions set this variable on an error */ static bool amd_iommu_initialized; static int __initdata amd_iommu_init_err; /* * List of protection domains - used during resume Loading Loading @@ -391,9 +391,11 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table) */ for (i = 0; i < table->length; ++i) checksum += p[i]; if (checksum != 0) if (checksum != 0) { /* ACPI table corrupt */ return -ENODEV; amd_iommu_init_err = -ENODEV; return 0; } p += IVRS_HEADER_LENGTH; Loading Loading @@ -436,7 +438,7 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) if (cmd_buf == NULL) return NULL; iommu->cmd_buf_size = CMD_BUFFER_SIZE; iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; return cmd_buf; } Loading Loading @@ -472,12 +474,13 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) &entry, sizeof(entry)); amd_iommu_reset_cmd_buffer(iommu); iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); } static void __init free_command_buffer(struct amd_iommu *iommu) { free_pages((unsigned long)iommu->cmd_buf, get_order(iommu->cmd_buf_size)); get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); } /* allocates the memory where the IOMMU will log its events to */ Loading Loading @@ -920,11 +923,16 @@ static int __init init_iommu_all(struct acpi_table_header *table) h->mmio_phys); iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); if (iommu == NULL) return -ENOMEM; if (iommu == NULL) { amd_iommu_init_err = -ENOMEM; return 0; } ret = init_iommu_one(iommu, h); if (ret) return ret; if (ret) { amd_iommu_init_err = ret; return 0; } break; default: break; Loading @@ -934,8 +942,6 @@ static int __init init_iommu_all(struct acpi_table_header *table) } WARN_ON(p != end); amd_iommu_initialized = true; return 0; } Loading Loading @@ -1211,6 +1217,10 @@ static int __init amd_iommu_init(void) if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) return -ENODEV; ret = amd_iommu_init_err; if (ret) goto out; dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); Loading Loading @@ -1270,12 +1280,19 @@ static int __init amd_iommu_init(void) if (acpi_table_parse("IVRS", init_iommu_all) != 0) goto free; if (!amd_iommu_initialized) if (amd_iommu_init_err) { ret = amd_iommu_init_err; goto free; } if (acpi_table_parse("IVRS", init_memory_definitions) != 0) goto free; if (amd_iommu_init_err) { ret = amd_iommu_init_err; goto free; } ret = sysdev_class_register(&amd_iommu_sysdev_class); if (ret) goto free; Loading @@ -1288,6 +1305,8 @@ static int __init amd_iommu_init(void) if (ret) goto free; enable_iommus(); if (iommu_pass_through) ret = amd_iommu_init_passthrough(); else Loading @@ -1300,8 +1319,6 @@ static int __init amd_iommu_init(void) amd_iommu_init_notifier(); enable_iommus(); if (iommu_pass_through) goto out; Loading @@ -1315,6 +1332,7 @@ static int __init amd_iommu_init(void) return ret; free: disable_iommus(); amd_iommu_uninit_devices(); Loading arch/x86/kernel/crash.c +0 −6 Original line number Diff line number Diff line Loading @@ -27,7 +27,6 @@ #include <asm/cpu.h> #include <asm/reboot.h> #include <asm/virtext.h> #include <asm/x86_init.h> #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) Loading Loading @@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs) #ifdef CONFIG_HPET_TIMER hpet_disable(); #endif #ifdef CONFIG_X86_64 x86_platform.iommu_shutdown(); #endif crash_save_cpu(regs, safe_smp_processor_id()); } Loading
Documentation/kernel-parameters.txt +0 −5 Original line number Diff line number Diff line Loading @@ -320,11 +320,6 @@ and is between 256 and 4096 characters. It is defined in the file amd_iommu= [HW,X86-84] Pass parameters to the AMD IOMMU driver in the system. Possible values are: isolate - enable device isolation (each device, as far as possible, will get its own protection domain) [default] share - put every device behind one IOMMU into the same protection domain fullflush - enable flushing of IO/TLB entries when they are unmapped. Otherwise they are flushed before they will be reused, which Loading
arch/x86/include/asm/amd_iommu_types.h +3 −0 Original line number Diff line number Diff line Loading @@ -21,6 +21,7 @@ #define _ASM_X86_AMD_IOMMU_TYPES_H #include <linux/types.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/spinlock.h> Loading Loading @@ -140,6 +141,7 @@ /* constants to configure the command buffer */ #define CMD_BUFFER_SIZE 8192 #define CMD_BUFFER_UNINITIALIZED 1 #define CMD_BUFFER_ENTRIES 512 #define MMIO_CMD_SIZE_SHIFT 56 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) Loading Loading @@ -237,6 +239,7 @@ struct protection_domain { struct list_head list; /* for list of all protection domains */ struct list_head dev_list; /* List of all devices in this domain */ spinlock_t lock; /* mostly used to lock the page table*/ struct mutex api_lock; /* protect page tables in the iommu-api path */ u16 id; /* the domain id written to the device table */ int mode; /* paging mode (0-6 levels) */ u64 *pt_root; /* page table root pointer */ Loading
arch/x86/kernel/amd_iommu.c +14 −6 Original line number Diff line number Diff line Loading @@ -118,7 +118,7 @@ static bool check_device(struct device *dev) return false; /* No device or no PCI device */ if (!dev || dev->bus != &pci_bus_type) if (dev->bus != &pci_bus_type) return false; devid = get_device_id(dev); Loading Loading @@ -392,6 +392,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) u32 tail, head; u8 *target; WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); target = iommu->cmd_buf + tail; memcpy_toio(target, cmd, sizeof(*cmd)); Loading Loading @@ -2186,7 +2187,7 @@ static void prealloc_protection_domains(void) struct dma_ops_domain *dma_dom; u16 devid; while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { for_each_pci_dev(dev) { /* Do we handle this device? */ if (!check_device(&dev->dev)) Loading Loading @@ -2298,7 +2299,7 @@ static void cleanup_domain(struct protection_domain *domain) list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { struct device *dev = dev_data->dev; do_detach(dev); __detach_device(dev); atomic_set(&dev_data->bind, 0); } Loading Loading @@ -2327,6 +2328,7 @@ static struct protection_domain *protection_domain_alloc(void) return NULL; spin_lock_init(&domain->lock); mutex_init(&domain->api_lock); domain->id = domain_id_alloc(); if (!domain->id) goto out_err; Loading Loading @@ -2379,9 +2381,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) free_pagetable(domain); domain_id_free(domain->id); kfree(domain); protection_domain_free(domain); dom->priv = NULL; } Loading Loading @@ -2456,6 +2456,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, iova &= PAGE_MASK; paddr &= PAGE_MASK; mutex_lock(&domain->api_lock); for (i = 0; i < npages; ++i) { ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); if (ret) Loading @@ -2465,6 +2467,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, paddr += PAGE_SIZE; } mutex_unlock(&domain->api_lock); return 0; } Loading @@ -2477,12 +2481,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, iova &= PAGE_MASK; mutex_lock(&domain->api_lock); for (i = 0; i < npages; ++i) { iommu_unmap_page(domain, iova, PM_MAP_4k); iova += PAGE_SIZE; } iommu_flush_tlb_pde(domain); mutex_unlock(&domain->api_lock); } static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, Loading
arch/x86/kernel/amd_iommu_init.c +33 −15 Original line number Diff line number Diff line Loading @@ -138,9 +138,9 @@ int amd_iommus_present; bool amd_iommu_np_cache __read_mostly; /* * Set to true if ACPI table parsing and hardware intialization went properly * The ACPI table parsing functions set this variable on an error */ static bool amd_iommu_initialized; static int __initdata amd_iommu_init_err; /* * List of protection domains - used during resume Loading Loading @@ -391,9 +391,11 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table) */ for (i = 0; i < table->length; ++i) checksum += p[i]; if (checksum != 0) if (checksum != 0) { /* ACPI table corrupt */ return -ENODEV; amd_iommu_init_err = -ENODEV; return 0; } p += IVRS_HEADER_LENGTH; Loading Loading @@ -436,7 +438,7 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) if (cmd_buf == NULL) return NULL; iommu->cmd_buf_size = CMD_BUFFER_SIZE; iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; return cmd_buf; } Loading Loading @@ -472,12 +474,13 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) &entry, sizeof(entry)); amd_iommu_reset_cmd_buffer(iommu); iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); } static void __init free_command_buffer(struct amd_iommu *iommu) { free_pages((unsigned long)iommu->cmd_buf, get_order(iommu->cmd_buf_size)); get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); } /* allocates the memory where the IOMMU will log its events to */ Loading Loading @@ -920,11 +923,16 @@ static int __init init_iommu_all(struct acpi_table_header *table) h->mmio_phys); iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); if (iommu == NULL) return -ENOMEM; if (iommu == NULL) { amd_iommu_init_err = -ENOMEM; return 0; } ret = init_iommu_one(iommu, h); if (ret) return ret; if (ret) { amd_iommu_init_err = ret; return 0; } break; default: break; Loading @@ -934,8 +942,6 @@ static int __init init_iommu_all(struct acpi_table_header *table) } WARN_ON(p != end); amd_iommu_initialized = true; return 0; } Loading Loading @@ -1211,6 +1217,10 @@ static int __init amd_iommu_init(void) if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) return -ENODEV; ret = amd_iommu_init_err; if (ret) goto out; dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); Loading Loading @@ -1270,12 +1280,19 @@ static int __init amd_iommu_init(void) if (acpi_table_parse("IVRS", init_iommu_all) != 0) goto free; if (!amd_iommu_initialized) if (amd_iommu_init_err) { ret = amd_iommu_init_err; goto free; } if (acpi_table_parse("IVRS", init_memory_definitions) != 0) goto free; if (amd_iommu_init_err) { ret = amd_iommu_init_err; goto free; } ret = sysdev_class_register(&amd_iommu_sysdev_class); if (ret) goto free; Loading @@ -1288,6 +1305,8 @@ static int __init amd_iommu_init(void) if (ret) goto free; enable_iommus(); if (iommu_pass_through) ret = amd_iommu_init_passthrough(); else Loading @@ -1300,8 +1319,6 @@ static int __init amd_iommu_init(void) amd_iommu_init_notifier(); enable_iommus(); if (iommu_pass_through) goto out; Loading @@ -1315,6 +1332,7 @@ static int __init amd_iommu_init(void) return ret; free: disable_iommus(); amd_iommu_uninit_devices(); Loading
arch/x86/kernel/crash.c +0 −6 Original line number Diff line number Diff line Loading @@ -27,7 +27,6 @@ #include <asm/cpu.h> #include <asm/reboot.h> #include <asm/virtext.h> #include <asm/x86_init.h> #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) Loading Loading @@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs) #ifdef CONFIG_HPET_TIMER hpet_disable(); #endif #ifdef CONFIG_X86_64 x86_platform.iommu_shutdown(); #endif crash_save_cpu(regs, safe_smp_processor_id()); }