Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d24f9c69 authored by Milton Miller's avatar Milton Miller Committed by Benjamin Herrenschmidt
Browse files

powerpc: Use the newly added get_required_mask dma_map_ops hook



Now that the generic code has dma_map_ops set, instead of having a
messy ifdef & if block in the base dma_get_required_mask hook push
the computation into the dma ops.

If the ops fails to set the get_required_mask hook default to the
width of dma_addr_t.

This also corrects ibmbus ibmebus_dma_supported to require a 64
bit mask.  I doubt anything is checking or setting the dma mask on
that bus.

Signed-off-by: default avatarMilton Miller <miltonm@bga.com>
Signed-off-by: default avatarNishanth Aravamudan <nacc@us.ibm.com>
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-kernel@vger.kernel.org
Cc: benh@kernel.crashing.org
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 3a8f7558
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -37,4 +37,6 @@ struct pdev_archdata {
	u64 dma_mask;
};

#define ARCH_HAS_DMA_GET_REQUIRED_MASK

#endif /* _ASM_POWERPC_DEVICE_H */
+0 −3
Original line number Diff line number Diff line
@@ -20,8 +20,6 @@

#define DMA_ERROR_CODE		(~(dma_addr_t)0x0)

#define ARCH_HAS_DMA_GET_REQUIRED_MASK

/* Some dma direct funcs must be visible for use in other dma_ops */
extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
				       dma_addr_t *dma_handle, gfp_t flag);
@@ -71,7 +69,6 @@ static inline unsigned long device_to_mask(struct device *dev)
 */
#ifdef CONFIG_PPC64
extern struct dma_map_ops dma_iommu_ops;
extern u64 dma_iommu_get_required_mask(struct device *dev);
#endif
extern struct dma_map_ops dma_direct_ops;

+2 −1
Original line number Diff line number Diff line
@@ -90,7 +90,7 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
		return 1;
}

u64 dma_iommu_get_required_mask(struct device *dev)
static u64 dma_iommu_get_required_mask(struct device *dev)
{
	struct iommu_table *tbl = get_iommu_table_base(dev);
	u64 mask;
@@ -111,5 +111,6 @@ struct dma_map_ops dma_iommu_ops = {
	.dma_supported	= dma_iommu_dma_supported,
	.map_page	= dma_iommu_map_page,
	.unmap_page	= dma_iommu_unmap_page,
	.get_required_mask	= dma_iommu_get_required_mask,
};
EXPORT_SYMBOL(dma_iommu_ops);
+16 −0
Original line number Diff line number Diff line
@@ -24,6 +24,21 @@

unsigned int ppc_swiotlb_enable;

static u64 swiotlb_powerpc_get_required(struct device *dev)
{
	u64 end, mask, max_direct_dma_addr = dev->archdata.max_direct_dma_addr;

	end = memblock_end_of_DRAM();
	if (max_direct_dma_addr && end > max_direct_dma_addr)
		end = max_direct_dma_addr;
	end += get_dma_offset(dev);

	mask = 1ULL << (fls64(end) - 1);
	mask += mask - 1;

	return mask;
}

/*
 * At the moment, all platforms that use this code only require
 * swiotlb to be used if we're operating on HIGHMEM.  Since
@@ -44,6 +59,7 @@ struct dma_map_ops swiotlb_dma_ops = {
	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
	.sync_sg_for_device = swiotlb_sync_sg_for_device,
	.mapping_error = swiotlb_dma_mapping_error,
	.get_required_mask = swiotlb_powerpc_get_required,
};

void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
+16 −25
Original line number Diff line number Diff line
@@ -96,6 +96,18 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask)
#endif
}

static u64 dma_direct_get_required_mask(struct device *dev)
{
	u64 end, mask;

	end = memblock_end_of_DRAM() + get_dma_offset(dev);

	mask = 1ULL << (fls64(end) - 1);
	mask += mask - 1;

	return mask;
}

static inline dma_addr_t dma_direct_map_page(struct device *dev,
					     struct page *page,
					     unsigned long offset,
@@ -144,6 +156,7 @@ struct dma_map_ops dma_direct_ops = {
	.dma_supported	= dma_direct_dma_supported,
	.map_page	= dma_direct_map_page,
	.unmap_page	= dma_direct_unmap_page,
	.get_required_mask	= dma_direct_get_required_mask,
#ifdef CONFIG_NOT_COHERENT_CACHE
	.sync_single_for_cpu 		= dma_direct_sync_single,
	.sync_single_for_device 	= dma_direct_sync_single,
@@ -173,7 +186,6 @@ EXPORT_SYMBOL(dma_set_mask);
u64 dma_get_required_mask(struct device *dev)
{
	struct dma_map_ops *dma_ops = get_dma_ops(dev);
	u64 mask, end = 0;

	if (ppc_md.dma_get_required_mask)
		return ppc_md.dma_get_required_mask(dev);
@@ -181,31 +193,10 @@ u64 dma_get_required_mask(struct device *dev)
	if (unlikely(dma_ops == NULL))
		return 0;

#ifdef CONFIG_PPC64
	else if (dma_ops == &dma_iommu_ops)
		return dma_iommu_get_required_mask(dev);
#endif
#ifdef CONFIG_SWIOTLB
	else if (dma_ops == &swiotlb_dma_ops) {
		u64 max_direct_dma_addr = dev->archdata.max_direct_dma_addr;

		end = memblock_end_of_DRAM();
		if (max_direct_dma_addr && end > max_direct_dma_addr)
			end = max_direct_dma_addr;
		end += get_dma_offset(dev);
	}
#endif
	else if (dma_ops == &dma_direct_ops)
		end = memblock_end_of_DRAM() + get_dma_offset(dev);
	else {
		WARN_ONCE(1, "%s: unknown ops %p\n", __func__, dma_ops);
		end = memblock_end_of_DRAM();
	}
	if (dma_ops->get_required_mask)
		return dma_ops->get_required_mask(dev);

	mask = 1ULL << (fls64(end) - 1);
	mask += mask - 1;

	return mask;
	return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
}
EXPORT_SYMBOL_GPL(dma_get_required_mask);

Loading