Loading drivers/iommu/dma-iommu.c +17 −0 Original line number Diff line number Diff line Loading @@ -341,6 +341,23 @@ int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base, return 0; } /* * Should be called prior to using dma-apis. */ int iommu_dma_enable_best_fit_algo(struct device *dev) { struct iommu_domain *domain; struct iova_domain *iovad; domain = iommu_get_domain_for_dev(dev); if (!domain || !domain->iova_cookie) return -EINVAL; iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; iovad->best_fit = true; return 0; } /** * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API * page flags. Loading drivers/iommu/iova.c +71 −2 Original line number Diff line number Diff line Loading @@ -61,6 +61,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR; rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node); rb_insert_color(&iovad->anchor.node, &iovad->rbroot); iovad->best_fit = false; init_iova_rcaches(iovad); } EXPORT_SYMBOL_GPL(init_iova_domain); Loading Loading @@ -248,6 +249,69 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, return 0; } static int __alloc_and_insert_iova_best_fit(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, struct iova *new, bool size_aligned) { struct rb_node *curr, *prev; struct iova *curr_iova, *prev_iova; unsigned long flags; unsigned long align_mask = ~0UL; struct rb_node *candidate_rb_parent; unsigned long new_pfn, candidate_pfn = ~0UL; unsigned long gap, candidate_gap = ~0UL; if (size_aligned) align_mask <<= limit_align(iovad, fls_long(size - 1)); /* Walk the tree backwards */ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); curr = &iovad->anchor.node; prev = rb_prev(curr); for (; prev; curr = prev, prev = rb_prev(curr)) { curr_iova = rb_entry(curr, struct iova, node); prev_iova = rb_entry(prev, struct iova, node); limit_pfn = min(limit_pfn, curr_iova->pfn_lo); new_pfn = (limit_pfn - size) & align_mask; gap = curr_iova->pfn_lo - prev_iova->pfn_hi - 1; if ((limit_pfn >= size) && (new_pfn > prev_iova->pfn_hi) && (gap < candidate_gap)) { candidate_gap = gap; candidate_pfn = new_pfn; candidate_rb_parent = curr; if (gap == size) goto insert; } } curr_iova = rb_entry(curr, struct iova, node); limit_pfn = min(limit_pfn, curr_iova->pfn_lo); new_pfn = (limit_pfn - size) & align_mask; gap = curr_iova->pfn_lo - iovad->start_pfn; if (limit_pfn >= size && new_pfn >= iovad->start_pfn && gap < candidate_gap) { candidate_gap = gap; candidate_pfn = new_pfn; candidate_rb_parent = curr; } insert: if (candidate_pfn == ~0UL) { spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return -ENOMEM; } /* pfn_lo will point to size aligned address if size_aligned is set */ new->pfn_lo = candidate_pfn; new->pfn_hi = new->pfn_lo + size - 1; /* If we have 'prev', it's a valid place to start the insertion. */ iova_insert_rbtree(&iovad->rbroot, new, candidate_rb_parent); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return 0; } static struct kmem_cache *iova_cache; static unsigned int iova_cache_users; static DEFINE_MUTEX(iova_cache_mutex); Loading Loading @@ -323,8 +387,13 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, if (!new_iova) return NULL; if (iovad->best_fit) { ret = __alloc_and_insert_iova_best_fit(iovad, size, limit_pfn + 1, new_iova, size_aligned); } else { ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1, new_iova, size_aligned); } if (ret) { free_iova_mem(new_iova); Loading include/linux/dma-iommu.h +7 −0 Original line number Diff line number Diff line Loading @@ -85,6 +85,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base, u64 size); int iommu_dma_enable_best_fit_algo(struct device *dev); #else struct iommu_domain; Loading Loading @@ -124,6 +126,11 @@ static inline int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base, return -ENODEV; } static inline int iommu_dma_enable_best_fit_algo(struct device *dev) { return -ENODEV; } #endif /* CONFIG_IOMMU_DMA */ #endif /* __KERNEL__ */ #endif /* __DMA_IOMMU_H */ include/linux/iova.h +1 −0 Original line number Diff line number Diff line Loading @@ -97,6 +97,7 @@ struct iova_domain { flush-queues */ atomic_t fq_timer_on; /* 1 when timer is active, 0 when not */ bool best_fit; }; static inline unsigned long iova_size(struct iova *iova) Loading Loading
drivers/iommu/dma-iommu.c +17 −0 Original line number Diff line number Diff line Loading @@ -341,6 +341,23 @@ int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base, return 0; } /* * Should be called prior to using dma-apis. */ int iommu_dma_enable_best_fit_algo(struct device *dev) { struct iommu_domain *domain; struct iova_domain *iovad; domain = iommu_get_domain_for_dev(dev); if (!domain || !domain->iova_cookie) return -EINVAL; iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; iovad->best_fit = true; return 0; } /** * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API * page flags. Loading
drivers/iommu/iova.c +71 −2 Original line number Diff line number Diff line Loading @@ -61,6 +61,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR; rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node); rb_insert_color(&iovad->anchor.node, &iovad->rbroot); iovad->best_fit = false; init_iova_rcaches(iovad); } EXPORT_SYMBOL_GPL(init_iova_domain); Loading Loading @@ -248,6 +249,69 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, return 0; } static int __alloc_and_insert_iova_best_fit(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, struct iova *new, bool size_aligned) { struct rb_node *curr, *prev; struct iova *curr_iova, *prev_iova; unsigned long flags; unsigned long align_mask = ~0UL; struct rb_node *candidate_rb_parent; unsigned long new_pfn, candidate_pfn = ~0UL; unsigned long gap, candidate_gap = ~0UL; if (size_aligned) align_mask <<= limit_align(iovad, fls_long(size - 1)); /* Walk the tree backwards */ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); curr = &iovad->anchor.node; prev = rb_prev(curr); for (; prev; curr = prev, prev = rb_prev(curr)) { curr_iova = rb_entry(curr, struct iova, node); prev_iova = rb_entry(prev, struct iova, node); limit_pfn = min(limit_pfn, curr_iova->pfn_lo); new_pfn = (limit_pfn - size) & align_mask; gap = curr_iova->pfn_lo - prev_iova->pfn_hi - 1; if ((limit_pfn >= size) && (new_pfn > prev_iova->pfn_hi) && (gap < candidate_gap)) { candidate_gap = gap; candidate_pfn = new_pfn; candidate_rb_parent = curr; if (gap == size) goto insert; } } curr_iova = rb_entry(curr, struct iova, node); limit_pfn = min(limit_pfn, curr_iova->pfn_lo); new_pfn = (limit_pfn - size) & align_mask; gap = curr_iova->pfn_lo - iovad->start_pfn; if (limit_pfn >= size && new_pfn >= iovad->start_pfn && gap < candidate_gap) { candidate_gap = gap; candidate_pfn = new_pfn; candidate_rb_parent = curr; } insert: if (candidate_pfn == ~0UL) { spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return -ENOMEM; } /* pfn_lo will point to size aligned address if size_aligned is set */ new->pfn_lo = candidate_pfn; new->pfn_hi = new->pfn_lo + size - 1; /* If we have 'prev', it's a valid place to start the insertion. */ iova_insert_rbtree(&iovad->rbroot, new, candidate_rb_parent); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return 0; } static struct kmem_cache *iova_cache; static unsigned int iova_cache_users; static DEFINE_MUTEX(iova_cache_mutex); Loading Loading @@ -323,8 +387,13 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, if (!new_iova) return NULL; if (iovad->best_fit) { ret = __alloc_and_insert_iova_best_fit(iovad, size, limit_pfn + 1, new_iova, size_aligned); } else { ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1, new_iova, size_aligned); } if (ret) { free_iova_mem(new_iova); Loading
include/linux/dma-iommu.h +7 −0 Original line number Diff line number Diff line Loading @@ -85,6 +85,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base, u64 size); int iommu_dma_enable_best_fit_algo(struct device *dev); #else struct iommu_domain; Loading Loading @@ -124,6 +126,11 @@ static inline int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base, return -ENODEV; } static inline int iommu_dma_enable_best_fit_algo(struct device *dev) { return -ENODEV; } #endif /* CONFIG_IOMMU_DMA */ #endif /* __KERNEL__ */ #endif /* __DMA_IOMMU_H */
include/linux/iova.h +1 −0 Original line number Diff line number Diff line Loading @@ -97,6 +97,7 @@ struct iova_domain { flush-queues */ atomic_t fq_timer_on; /* 1 when timer is active, 0 when not */ bool best_fit; }; static inline unsigned long iova_size(struct iova *iova) Loading