Loading arch/ia64/hp/common/hwsw_iommu.c +11 −2 Original line number Original line Diff line number Diff line Loading @@ -17,7 +17,7 @@ #include <asm/machvec.h> #include <asm/machvec.h> /* swiotlb declarations & definitions: */ /* swiotlb declarations & definitions: */ extern void swiotlb_init_with_default_size (size_t size); extern int swiotlb_late_init_with_default_size (size_t size); extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; extern ia64_mv_dma_free_coherent swiotlb_free_coherent; extern ia64_mv_dma_free_coherent swiotlb_free_coherent; extern ia64_mv_dma_map_single swiotlb_map_single; extern ia64_mv_dma_map_single swiotlb_map_single; Loading Loading @@ -67,7 +67,16 @@ void hwsw_init (void) hwsw_init (void) { { /* default to a smallish 2MB sw I/O TLB */ /* default to a smallish 2MB sw I/O TLB */ swiotlb_init_with_default_size (2 * (1<<20)); if (swiotlb_late_init_with_default_size (2 * (1<<20)) != 0) { #ifdef CONFIG_IA64_GENERIC /* Better to have normal DMA than panic */ printk(KERN_WARNING "%s: Failed to initialize software I/O TLB," " reverting to hpzx1 platform vector\n", __FUNCTION__); machvec_init("hpzx1"); #else panic("Unable to initialize software I/O TLB services"); #endif } } } void * void * Loading arch/ia64/hp/common/sba_iommu.c +32 −13 Original line number Original line Diff line number Diff line Loading @@ -2028,9 +2028,40 @@ static struct acpi_driver acpi_sba_ioc_driver = { static int __init static int __init sba_init(void) sba_init(void) { { if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb")) return 0; acpi_bus_register_driver(&acpi_sba_ioc_driver); acpi_bus_register_driver(&acpi_sba_ioc_driver); if (!ioc_list) if (!ioc_list) { #ifdef CONFIG_IA64_GENERIC extern int swiotlb_late_init_with_default_size (size_t size); /* * If we didn't find something sba_iommu can claim, we * need to setup the swiotlb and switch to the dig machvec. */ if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) panic("Unable to find SBA IOMMU or initialize " "software I/O TLB: Try machvec=dig boot option"); machvec_init("dig"); #else panic("Unable to find SBA IOMMU: Try a generic or DIG kernel"); #endif return 0; return 0; } #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB) /* * hpzx1_swiotlb needs to have a fairly small swiotlb bounce * buffer setup to support devices with smaller DMA masks than * sba_iommu can handle. */ if (ia64_platform_is("hpzx1_swiotlb")) { extern void hwsw_init(void); hwsw_init(); } #endif #ifdef CONFIG_PCI #ifdef CONFIG_PCI { { Loading @@ -2048,18 +2079,6 @@ sba_init(void) subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */ subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */ extern void dig_setup(char**); /* * MAX_DMA_ADDRESS needs to be setup prior to paging_init to do any good, * so we use the platform_setup hook to fix it up. */ void __init sba_setup(char **cmdline_p) { MAX_DMA_ADDRESS = ~0UL; dig_setup(cmdline_p); } static int __init static int __init nosbagart(char *str) nosbagart(char *str) { { Loading arch/ia64/lib/swiotlb.c +102 −0 Original line number Original line Diff line number Diff line Loading @@ -49,6 +49,15 @@ */ */ #define IO_TLB_SHIFT 11 #define IO_TLB_SHIFT 11 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) /* * Minimum IO TLB size to bother booting with. Systems with mainly * 64bit capable cards will only lightly use the swiotlb. If we can't * allocate a contiguous 1MB, we're probably in trouble anyway. */ #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) int swiotlb_force; int swiotlb_force; /* /* Loading Loading @@ -154,6 +163,99 @@ swiotlb_init (void) swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ } } /* * Systems with larger DMA zones (those that don't support ISA) can * initialize the swiotlb later using the slab allocator if needed. * This should be just like above, but with some error catching. */ int swiotlb_late_init_with_default_size (size_t default_size) { unsigned long i, req_nslabs = io_tlb_nslabs; unsigned int order; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } /* * Get IO TLB memory from the low pages */ order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); io_tlb_nslabs = SLABS_PER_PAGE << order; while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); if (io_tlb_start) break; order--; } if (!io_tlb_start) goto cleanup1; if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) { printk(KERN_WARNING "Warning: only able to allocate %ld MB " "for software IO TLB\n", (PAGE_SIZE << order) >> 20); io_tlb_nslabs = SLABS_PER_PAGE << order; } io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT)); /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * between io_tlb_start and io_tlb_end. */ io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, get_order(io_tlb_nslabs * sizeof(int))); if (!io_tlb_list) goto cleanup2; for (i = 0; i < io_tlb_nslabs; i++) io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); io_tlb_index = 0; io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, get_order(io_tlb_nslabs * sizeof(char *))); if (!io_tlb_orig_addr) goto cleanup3; memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); /* * Get the overflow emergency buffer */ io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA, get_order(io_tlb_overflow)); if (!io_tlb_overflow_buffer) goto cleanup4; printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20, virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); return 0; cleanup4: free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * sizeof(char *))); io_tlb_orig_addr = NULL; cleanup3: free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * sizeof(int))); io_tlb_list = NULL; io_tlb_end = NULL; cleanup2: free_pages((unsigned long)io_tlb_start, order); io_tlb_start = NULL; cleanup1: io_tlb_nslabs = req_nslabs; return -ENOMEM; } static inline int static inline int address_needs_mapping(struct device *hwdev, dma_addr_t addr) address_needs_mapping(struct device *hwdev, dma_addr_t addr) { { Loading include/asm-ia64/machvec_hpzx1.h +10 −11 Original line number Original line Diff line number Diff line Loading @@ -2,7 +2,6 @@ #define _ASM_IA64_MACHVEC_HPZX1_h #define _ASM_IA64_MACHVEC_HPZX1_h extern ia64_mv_setup_t dig_setup; extern ia64_mv_setup_t dig_setup; extern ia64_mv_setup_t sba_setup; extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; extern ia64_mv_dma_free_coherent sba_free_coherent; extern ia64_mv_dma_free_coherent sba_free_coherent; extern ia64_mv_dma_map_single sba_map_single; extern ia64_mv_dma_map_single sba_map_single; Loading @@ -20,7 +19,7 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error; * the macros are used directly. * the macros are used directly. */ */ #define platform_name "hpzx1" #define platform_name "hpzx1" #define platform_setup sba_setup #define platform_setup dig_setup #define platform_dma_init machvec_noop #define platform_dma_init machvec_noop #define platform_dma_alloc_coherent sba_alloc_coherent #define platform_dma_alloc_coherent sba_alloc_coherent #define platform_dma_free_coherent sba_free_coherent #define platform_dma_free_coherent sba_free_coherent Loading include/asm-ia64/machvec_hpzx1_swiotlb.h +1 −2 Original line number Original line Diff line number Diff line Loading @@ -2,7 +2,6 @@ #define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h #define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h extern ia64_mv_setup_t dig_setup; extern ia64_mv_setup_t dig_setup; extern ia64_mv_dma_init hwsw_init; extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; extern ia64_mv_dma_free_coherent hwsw_free_coherent; extern ia64_mv_dma_free_coherent hwsw_free_coherent; extern ia64_mv_dma_map_single hwsw_map_single; extern ia64_mv_dma_map_single hwsw_map_single; Loading @@ -26,7 +25,7 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device; #define platform_name "hpzx1_swiotlb" #define platform_name "hpzx1_swiotlb" #define platform_setup dig_setup #define platform_setup dig_setup #define platform_dma_init hwsw_init #define platform_dma_init machvec_noop #define platform_dma_alloc_coherent hwsw_alloc_coherent #define platform_dma_alloc_coherent hwsw_alloc_coherent #define platform_dma_free_coherent hwsw_free_coherent #define platform_dma_free_coherent hwsw_free_coherent #define platform_dma_map_single hwsw_map_single #define platform_dma_map_single hwsw_map_single Loading Loading
arch/ia64/hp/common/hwsw_iommu.c +11 −2 Original line number Original line Diff line number Diff line Loading @@ -17,7 +17,7 @@ #include <asm/machvec.h> #include <asm/machvec.h> /* swiotlb declarations & definitions: */ /* swiotlb declarations & definitions: */ extern void swiotlb_init_with_default_size (size_t size); extern int swiotlb_late_init_with_default_size (size_t size); extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; extern ia64_mv_dma_free_coherent swiotlb_free_coherent; extern ia64_mv_dma_free_coherent swiotlb_free_coherent; extern ia64_mv_dma_map_single swiotlb_map_single; extern ia64_mv_dma_map_single swiotlb_map_single; Loading Loading @@ -67,7 +67,16 @@ void hwsw_init (void) hwsw_init (void) { { /* default to a smallish 2MB sw I/O TLB */ /* default to a smallish 2MB sw I/O TLB */ swiotlb_init_with_default_size (2 * (1<<20)); if (swiotlb_late_init_with_default_size (2 * (1<<20)) != 0) { #ifdef CONFIG_IA64_GENERIC /* Better to have normal DMA than panic */ printk(KERN_WARNING "%s: Failed to initialize software I/O TLB," " reverting to hpzx1 platform vector\n", __FUNCTION__); machvec_init("hpzx1"); #else panic("Unable to initialize software I/O TLB services"); #endif } } } void * void * Loading
arch/ia64/hp/common/sba_iommu.c +32 −13 Original line number Original line Diff line number Diff line Loading @@ -2028,9 +2028,40 @@ static struct acpi_driver acpi_sba_ioc_driver = { static int __init static int __init sba_init(void) sba_init(void) { { if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb")) return 0; acpi_bus_register_driver(&acpi_sba_ioc_driver); acpi_bus_register_driver(&acpi_sba_ioc_driver); if (!ioc_list) if (!ioc_list) { #ifdef CONFIG_IA64_GENERIC extern int swiotlb_late_init_with_default_size (size_t size); /* * If we didn't find something sba_iommu can claim, we * need to setup the swiotlb and switch to the dig machvec. */ if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) panic("Unable to find SBA IOMMU or initialize " "software I/O TLB: Try machvec=dig boot option"); machvec_init("dig"); #else panic("Unable to find SBA IOMMU: Try a generic or DIG kernel"); #endif return 0; return 0; } #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB) /* * hpzx1_swiotlb needs to have a fairly small swiotlb bounce * buffer setup to support devices with smaller DMA masks than * sba_iommu can handle. */ if (ia64_platform_is("hpzx1_swiotlb")) { extern void hwsw_init(void); hwsw_init(); } #endif #ifdef CONFIG_PCI #ifdef CONFIG_PCI { { Loading @@ -2048,18 +2079,6 @@ sba_init(void) subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */ subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */ extern void dig_setup(char**); /* * MAX_DMA_ADDRESS needs to be setup prior to paging_init to do any good, * so we use the platform_setup hook to fix it up. */ void __init sba_setup(char **cmdline_p) { MAX_DMA_ADDRESS = ~0UL; dig_setup(cmdline_p); } static int __init static int __init nosbagart(char *str) nosbagart(char *str) { { Loading
arch/ia64/lib/swiotlb.c +102 −0 Original line number Original line Diff line number Diff line Loading @@ -49,6 +49,15 @@ */ */ #define IO_TLB_SHIFT 11 #define IO_TLB_SHIFT 11 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) /* * Minimum IO TLB size to bother booting with. Systems with mainly * 64bit capable cards will only lightly use the swiotlb. If we can't * allocate a contiguous 1MB, we're probably in trouble anyway. */ #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) int swiotlb_force; int swiotlb_force; /* /* Loading Loading @@ -154,6 +163,99 @@ swiotlb_init (void) swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ } } /* * Systems with larger DMA zones (those that don't support ISA) can * initialize the swiotlb later using the slab allocator if needed. * This should be just like above, but with some error catching. */ int swiotlb_late_init_with_default_size (size_t default_size) { unsigned long i, req_nslabs = io_tlb_nslabs; unsigned int order; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } /* * Get IO TLB memory from the low pages */ order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); io_tlb_nslabs = SLABS_PER_PAGE << order; while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); if (io_tlb_start) break; order--; } if (!io_tlb_start) goto cleanup1; if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) { printk(KERN_WARNING "Warning: only able to allocate %ld MB " "for software IO TLB\n", (PAGE_SIZE << order) >> 20); io_tlb_nslabs = SLABS_PER_PAGE << order; } io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT)); /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * between io_tlb_start and io_tlb_end. */ io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, get_order(io_tlb_nslabs * sizeof(int))); if (!io_tlb_list) goto cleanup2; for (i = 0; i < io_tlb_nslabs; i++) io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); io_tlb_index = 0; io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, get_order(io_tlb_nslabs * sizeof(char *))); if (!io_tlb_orig_addr) goto cleanup3; memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); /* * Get the overflow emergency buffer */ io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA, get_order(io_tlb_overflow)); if (!io_tlb_overflow_buffer) goto cleanup4; printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20, virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); return 0; cleanup4: free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * sizeof(char *))); io_tlb_orig_addr = NULL; cleanup3: free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * sizeof(int))); io_tlb_list = NULL; io_tlb_end = NULL; cleanup2: free_pages((unsigned long)io_tlb_start, order); io_tlb_start = NULL; cleanup1: io_tlb_nslabs = req_nslabs; return -ENOMEM; } static inline int static inline int address_needs_mapping(struct device *hwdev, dma_addr_t addr) address_needs_mapping(struct device *hwdev, dma_addr_t addr) { { Loading
include/asm-ia64/machvec_hpzx1.h +10 −11 Original line number Original line Diff line number Diff line Loading @@ -2,7 +2,6 @@ #define _ASM_IA64_MACHVEC_HPZX1_h #define _ASM_IA64_MACHVEC_HPZX1_h extern ia64_mv_setup_t dig_setup; extern ia64_mv_setup_t dig_setup; extern ia64_mv_setup_t sba_setup; extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; extern ia64_mv_dma_free_coherent sba_free_coherent; extern ia64_mv_dma_free_coherent sba_free_coherent; extern ia64_mv_dma_map_single sba_map_single; extern ia64_mv_dma_map_single sba_map_single; Loading @@ -20,7 +19,7 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error; * the macros are used directly. * the macros are used directly. */ */ #define platform_name "hpzx1" #define platform_name "hpzx1" #define platform_setup sba_setup #define platform_setup dig_setup #define platform_dma_init machvec_noop #define platform_dma_init machvec_noop #define platform_dma_alloc_coherent sba_alloc_coherent #define platform_dma_alloc_coherent sba_alloc_coherent #define platform_dma_free_coherent sba_free_coherent #define platform_dma_free_coherent sba_free_coherent Loading
include/asm-ia64/machvec_hpzx1_swiotlb.h +1 −2 Original line number Original line Diff line number Diff line Loading @@ -2,7 +2,6 @@ #define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h #define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h extern ia64_mv_setup_t dig_setup; extern ia64_mv_setup_t dig_setup; extern ia64_mv_dma_init hwsw_init; extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; extern ia64_mv_dma_free_coherent hwsw_free_coherent; extern ia64_mv_dma_free_coherent hwsw_free_coherent; extern ia64_mv_dma_map_single hwsw_map_single; extern ia64_mv_dma_map_single hwsw_map_single; Loading @@ -26,7 +25,7 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device; #define platform_name "hpzx1_swiotlb" #define platform_name "hpzx1_swiotlb" #define platform_setup dig_setup #define platform_setup dig_setup #define platform_dma_init hwsw_init #define platform_dma_init machvec_noop #define platform_dma_alloc_coherent hwsw_alloc_coherent #define platform_dma_alloc_coherent hwsw_alloc_coherent #define platform_dma_free_coherent hwsw_free_coherent #define platform_dma_free_coherent hwsw_free_coherent #define platform_dma_map_single hwsw_map_single #define platform_dma_map_single hwsw_map_single Loading