Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 802c1f66 authored by Glauber Costa's avatar Glauber Costa Committed by Ingo Molnar
Browse files

x86: move dma_supported and dma_set_mask to pci-dma_32.c



This is the way x86_64 does, so this make them equal. They have
to be extern now in the header, and the extern definition is moved to
the common dma-mapping.h header.

Signed-off-by: default avatarGlauber Costa <gcosta@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 3cb6a917
Loading
Loading
Loading
Loading
+33 −0
Original line number Diff line number Diff line
@@ -156,6 +156,39 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
int forbid_dac;
EXPORT_SYMBOL(forbid_dac);

int
dma_supported(struct device *dev, u64 mask)
{
	/*
	 * we fall back to GFP_DMA when the mask isn't all 1s,
	 * so we can't guarantee allocations that must be
	 * within a tighter range than GFP_DMA..
	 */
	if (mask < 0x00ffffff)
		return 0;

	/* Work around chipset bugs */
	if (forbid_dac > 0 && mask > 0xffffffffULL)
		return 0;

	if (dma_ops->dma_supported)
		return dma_ops->dma_supported(dev, mask);

	return 1;
}

int
dma_set_mask(struct device *dev, u64 mask)
{
	if (!dev->dma_mask || !dma_supported(dev, mask))
		return -EIO;

	*dev->dma_mask = mask;

	return 0;
}


static __devinit void via_no_dac(struct pci_dev *dev)
{
	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
+3 −0
Original line number Diff line number Diff line
@@ -62,6 +62,9 @@ void dma_free_coherent(struct device *dev, size_t size,
			 void *vaddr, dma_addr_t dma_handle);


extern int dma_supported(struct device *hwdev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 mask);

#ifdef CONFIG_X86_32
# include "dma-mapping_32.h"
#else
+0 −29
Original line number Diff line number Diff line
@@ -16,35 +16,6 @@ dma_mapping_error(dma_addr_t dma_addr)

extern int forbid_dac;

static inline int
dma_supported(struct device *dev, u64 mask)
{
        /*
         * we fall back to GFP_DMA when the mask isn't all 1s,
         * so we can't guarantee allocations that must be
         * within a tighter range than GFP_DMA..
         */
        if(mask < 0x00ffffff)
                return 0;

	/* Work around chipset bugs */
	if (forbid_dac > 0 && mask > 0xffffffffULL)
		return 0;

	return 1;
}

static inline int
dma_set_mask(struct device *dev, u64 mask)
{
	if(!dev->dma_mask || !dma_supported(dev, mask))
		return -EIO;

	*dev->dma_mask = mask;

	return 0;
}

static inline int
dma_get_cache_alignment(void)
{
+0 −4
Original line number Diff line number Diff line
@@ -12,8 +12,6 @@ static inline int dma_mapping_error(dma_addr_t dma_addr)
	return (dma_addr == bad_dma_address);
}

extern int dma_supported(struct device *hwdev, u64 mask);

/* same for gart, swiotlb, and nommu */
static inline int dma_get_cache_alignment(void)
{
@@ -22,8 +20,6 @@ static inline int dma_get_cache_alignment(void)

#define dma_is_consistent(d, h) 1

extern int dma_set_mask(struct device *dev, u64 mask);

extern struct device fallback_dev;
extern int panic_on_overflow;