Loading arch/arm64/mm/dma-mapping.c +8 −11 Original line number Diff line number Diff line Loading @@ -1153,16 +1153,6 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, set_bit(PG_dcache_clean, &page->flags); } static int arm_dma_set_mask(struct device *dev, u64 dma_mask) { if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; *dev->dma_mask = dma_mask; return 0; } /* IOMMU */ static void __dma_clear_buffer(struct page *page, size_t size, Loading Loading @@ -1913,7 +1903,6 @@ const struct dma_map_ops iommu_ops = { .map_resource = arm_iommu_dma_map_resource, .unmap_resource = arm_iommu_dma_unmap_resource, .set_dma_mask = arm_dma_set_mask, .mapping_error = arm_iommu_mapping_error, }; Loading Loading @@ -2006,6 +1995,7 @@ int arm_iommu_attach_device(struct device *dev, int err; int s1_bypass = 0, is_fast = 0; struct iommu_group *group; dma_addr_t iova_end; group = dev->iommu_group; if (!group) { Loading @@ -2018,6 +2008,13 @@ int arm_iommu_attach_device(struct device *dev, return -EINVAL; } iova_end = mapping->base + (mapping->bits << PAGE_SHIFT) - 1; if (iova_end > dma_get_mask(dev)) { dev_err(dev, "dma mask %llx too small for requested iova range %pad to %pad\n", dma_get_mask(dev), &mapping->base, &iova_end); return -EINVAL; } iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast); if (is_fast) return fast_smmu_attach_device(dev, mapping); Loading drivers/iommu/dma-mapping-fast.c +16 −7 Original line number Diff line number Diff line Loading @@ -611,6 +611,21 @@ static int fast_smmu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, return ret; } static int fast_smmu_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { unsigned int n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; struct vm_struct *area; area = find_vm_area(cpu_addr); if (!area || !area->pages) return -EINVAL; return sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0, size, GFP_KERNEL); } static dma_addr_t fast_smmu_dma_map_resource( struct device *dev, phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, Loading Loading @@ -659,12 +674,6 @@ static void fast_smmu_dma_unmap_resource( spin_unlock_irqrestore(&mapping->lock, flags); } static int fast_smmu_dma_supported(struct device *dev, u64 mask) { return mask <= 0xffffffff; } static int fast_smmu_mapping_error(struct device *dev, dma_addr_t dma_addr) { Loading Loading @@ -708,6 +717,7 @@ static const struct dma_map_ops fast_smmu_dma_ops = { .alloc = fast_smmu_alloc, .free = fast_smmu_free, .mmap = fast_smmu_mmap_attrs, .get_sgtable = fast_smmu_get_sgtable, .map_page = fast_smmu_map_page, .unmap_page = fast_smmu_unmap_page, .sync_single_for_cpu = fast_smmu_sync_single_for_cpu, Loading @@ -718,7 +728,6 @@ static const struct dma_map_ops fast_smmu_dma_ops = { .sync_sg_for_device = fast_smmu_sync_sg_for_device, .map_resource = fast_smmu_dma_map_resource, .unmap_resource = fast_smmu_dma_unmap_resource, .dma_supported = fast_smmu_dma_supported, .mapping_error = fast_smmu_mapping_error, }; Loading Loading
arch/arm64/mm/dma-mapping.c +8 −11 Original line number Diff line number Diff line Loading @@ -1153,16 +1153,6 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, set_bit(PG_dcache_clean, &page->flags); } static int arm_dma_set_mask(struct device *dev, u64 dma_mask) { if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; *dev->dma_mask = dma_mask; return 0; } /* IOMMU */ static void __dma_clear_buffer(struct page *page, size_t size, Loading Loading @@ -1913,7 +1903,6 @@ const struct dma_map_ops iommu_ops = { .map_resource = arm_iommu_dma_map_resource, .unmap_resource = arm_iommu_dma_unmap_resource, .set_dma_mask = arm_dma_set_mask, .mapping_error = arm_iommu_mapping_error, }; Loading Loading @@ -2006,6 +1995,7 @@ int arm_iommu_attach_device(struct device *dev, int err; int s1_bypass = 0, is_fast = 0; struct iommu_group *group; dma_addr_t iova_end; group = dev->iommu_group; if (!group) { Loading @@ -2018,6 +2008,13 @@ int arm_iommu_attach_device(struct device *dev, return -EINVAL; } iova_end = mapping->base + (mapping->bits << PAGE_SHIFT) - 1; if (iova_end > dma_get_mask(dev)) { dev_err(dev, "dma mask %llx too small for requested iova range %pad to %pad\n", dma_get_mask(dev), &mapping->base, &iova_end); return -EINVAL; } iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast); if (is_fast) return fast_smmu_attach_device(dev, mapping); Loading
drivers/iommu/dma-mapping-fast.c +16 −7 Original line number Diff line number Diff line Loading @@ -611,6 +611,21 @@ static int fast_smmu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, return ret; } static int fast_smmu_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { unsigned int n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; struct vm_struct *area; area = find_vm_area(cpu_addr); if (!area || !area->pages) return -EINVAL; return sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0, size, GFP_KERNEL); } static dma_addr_t fast_smmu_dma_map_resource( struct device *dev, phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, Loading Loading @@ -659,12 +674,6 @@ static void fast_smmu_dma_unmap_resource( spin_unlock_irqrestore(&mapping->lock, flags); } static int fast_smmu_dma_supported(struct device *dev, u64 mask) { return mask <= 0xffffffff; } static int fast_smmu_mapping_error(struct device *dev, dma_addr_t dma_addr) { Loading Loading @@ -708,6 +717,7 @@ static const struct dma_map_ops fast_smmu_dma_ops = { .alloc = fast_smmu_alloc, .free = fast_smmu_free, .mmap = fast_smmu_mmap_attrs, .get_sgtable = fast_smmu_get_sgtable, .map_page = fast_smmu_map_page, .unmap_page = fast_smmu_unmap_page, .sync_single_for_cpu = fast_smmu_sync_single_for_cpu, Loading @@ -718,7 +728,6 @@ static const struct dma_map_ops fast_smmu_dma_ops = { .sync_sg_for_device = fast_smmu_sync_sg_for_device, .map_resource = fast_smmu_dma_map_resource, .unmap_resource = fast_smmu_dma_unmap_resource, .dma_supported = fast_smmu_dma_supported, .mapping_error = fast_smmu_mapping_error, }; Loading