Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 55f878ec authored by Dave Jiang's avatar Dave Jiang Committed by Vinod Koul
Browse files

dmaengine: ioatdma: fixup ioatdma_device namings



Changing the variable names for ioatdma_device to be consistently named
ioat_dma instead of device/dma in order to avoid confusion and distinct
from struct device. This will clearly indicate that it is an
ioatdma_device. This also make all the naming consistent that the dma
device is ioat_dma and all the channels are ioat_chan.

Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Acked-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent 5a976888
Loading
Loading
Loading
Loading
+79 −77
Original line number Diff line number Diff line
@@ -93,30 +93,30 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)

/* common channel initialization */
void
ioat_init_channel(struct ioatdma_device *device, struct ioatdma_chan *ioat_chan,
		  int idx)
ioat_init_channel(struct ioatdma_device *ioat_dma,
		  struct ioatdma_chan *ioat_chan, int idx)
{
	struct dma_device *dma = &device->common;
	struct dma_device *dma = &ioat_dma->dma_dev;
	struct dma_chan *c = &ioat_chan->dma_chan;
	unsigned long data = (unsigned long) c;

	ioat_chan->device = device;
	ioat_chan->reg_base = device->reg_base + (0x80 * (idx + 1));
	ioat_chan->ioat_dma = ioat_dma;
	ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
	spin_lock_init(&ioat_chan->cleanup_lock);
	ioat_chan->dma_chan.device = dma;
	dma_cookie_init(&ioat_chan->dma_chan);
	list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
	device->idx[idx] = ioat_chan;
	ioat_dma->idx[idx] = ioat_chan;
	init_timer(&ioat_chan->timer);
	ioat_chan->timer.function = device->timer_fn;
	ioat_chan->timer.function = ioat_dma->timer_fn;
	ioat_chan->timer.data = data;
	tasklet_init(&ioat_chan->cleanup_task, device->cleanup_fn, data);
	tasklet_init(&ioat_chan->cleanup_task, ioat_dma->cleanup_fn, data);
}

void ioat_stop(struct ioatdma_chan *ioat_chan)
{
	struct ioatdma_device *device = ioat_chan->device;
	struct pci_dev *pdev = device->pdev;
	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
	struct pci_dev *pdev = ioat_dma->pdev;
	int chan_id = chan_num(ioat_chan);
	struct msix_entry *msix;

@@ -126,9 +126,9 @@ void ioat_stop(struct ioatdma_chan *ioat_chan)
	clear_bit(IOAT_RUN, &ioat_chan->state);

	/* flush inflight interrupts */
	switch (device->irq_mode) {
	switch (ioat_dma->irq_mode) {
	case IOAT_MSIX:
		msix = &device->msix_entries[chan_id];
		msix = &ioat_dma->msix_entries[chan_id];
		synchronize_irq(msix->vector);
		break;
	case IOAT_MSI:
@@ -146,7 +146,7 @@ void ioat_stop(struct ioatdma_chan *ioat_chan)
	tasklet_kill(&ioat_chan->cleanup_task);

	/* final cleanup now that everything is quiesced and can't re-arm */
	device->cleanup_fn((unsigned long)&ioat_chan->dma_chan);
	ioat_dma->cleanup_fn((unsigned long)&ioat_chan->dma_chan);
}

dma_addr_t ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
@@ -189,14 +189,14 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
		   struct dma_tx_state *txstate)
{
	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
	struct ioatdma_device *device = ioat_chan->device;
	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
	enum dma_status ret;

	ret = dma_cookie_status(c, cookie, txstate);
	if (ret == DMA_COMPLETE)
		return ret;

	device->cleanup_fn((unsigned long) c);
	ioat_dma->cleanup_fn((unsigned long) c);

	return dma_cookie_status(c, cookie, txstate);
}
@@ -215,15 +215,15 @@ static void ioat_dma_test_callback(void *dma_async_param)

/**
 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
 * @device: device to be tested
 * @ioat_dma: dma device to be tested
 */
int ioat_dma_self_test(struct ioatdma_device *device)
int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
{
	int i;
	u8 *src;
	u8 *dest;
	struct dma_device *dma = &device->common;
	struct device *dev = &device->pdev->dev;
	struct dma_device *dma = &ioat_dma->dma_dev;
	struct device *dev = &ioat_dma->pdev->dev;
	struct dma_chan *dma_chan;
	struct dma_async_tx_descriptor *tx;
	dma_addr_t dma_dest, dma_src;
@@ -266,8 +266,9 @@ int ioat_dma_self_test(struct ioatdma_device *device)
		goto unmap_src;
	}
	flags = DMA_PREP_INTERRUPT;
	tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
						   IOAT_TEST_SIZE, flags);
	tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest,
						      dma_src, IOAT_TEST_SIZE,
						      flags);
	if (!tx) {
		dev_err(dev, "Self-test prep failed, disabling\n");
		err = -ENODEV;
@@ -321,12 +322,12 @@ MODULE_PARM_DESC(ioat_interrupt_style,

/**
 * ioat_dma_setup_interrupts - setup interrupt handler
 * @device: ioat device
 * @ioat_dma: ioat dma device
 */
int ioat_dma_setup_interrupts(struct ioatdma_device *device)
int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
{
	struct ioatdma_chan *ioat_chan;
	struct pci_dev *pdev = device->pdev;
	struct pci_dev *pdev = ioat_dma->pdev;
	struct device *dev = &pdev->dev;
	struct msix_entry *msix;
	int i, j, msixcnt;
@@ -344,31 +345,31 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *device)

msix:
	/* The number of MSI-X vectors should equal the number of channels */
	msixcnt = device->common.chancnt;
	msixcnt = ioat_dma->dma_dev.chancnt;
	for (i = 0; i < msixcnt; i++)
		device->msix_entries[i].entry = i;
		ioat_dma->msix_entries[i].entry = i;

	err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt);
	err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt);
	if (err)
		goto msi;

	for (i = 0; i < msixcnt; i++) {
		msix = &device->msix_entries[i];
		ioat_chan = ioat_chan_by_index(device, i);
		msix = &ioat_dma->msix_entries[i];
		ioat_chan = ioat_chan_by_index(ioat_dma, i);
		err = devm_request_irq(dev, msix->vector,
				       ioat_dma_do_interrupt_msix, 0,
				       "ioat-msix", ioat_chan);
		if (err) {
			for (j = 0; j < i; j++) {
				msix = &device->msix_entries[j];
				ioat_chan = ioat_chan_by_index(device, j);
				msix = &ioat_dma->msix_entries[j];
				ioat_chan = ioat_chan_by_index(ioat_dma, j);
				devm_free_irq(dev, msix->vector, ioat_chan);
			}
			goto msi;
		}
	}
	intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
	device->irq_mode = IOAT_MSIX;
	ioat_dma->irq_mode = IOAT_MSIX;
	goto done;

msi:
@@ -377,69 +378,70 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *device)
		goto intx;

	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
			       "ioat-msi", device);
			       "ioat-msi", ioat_dma);
	if (err) {
		pci_disable_msi(pdev);
		goto intx;
	}
	device->irq_mode = IOAT_MSI;
	ioat_dma->irq_mode = IOAT_MSI;
	goto done;

intx:
	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
			       IRQF_SHARED, "ioat-intx", device);
			       IRQF_SHARED, "ioat-intx", ioat_dma);
	if (err)
		goto err_no_irq;

	device->irq_mode = IOAT_INTX;
	ioat_dma->irq_mode = IOAT_INTX;
done:
	if (device->intr_quirk)
		device->intr_quirk(device);
	if (ioat_dma->intr_quirk)
		ioat_dma->intr_quirk(ioat_dma);
	intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
	writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
	writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
	return 0;

err_no_irq:
	/* Disable all interrupt generation */
	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
	device->irq_mode = IOAT_NOIRQ;
	writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
	ioat_dma->irq_mode = IOAT_NOIRQ;
	dev_err(dev, "no usable interrupts\n");
	return err;
}
EXPORT_SYMBOL(ioat_dma_setup_interrupts);

static void ioat_disable_interrupts(struct ioatdma_device *device)
static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma)
{
	/* Disable all interrupt generation */
	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
	writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
}

int ioat_probe(struct ioatdma_device *device)
int ioat_probe(struct ioatdma_device *ioat_dma)
{
	int err = -ENODEV;
	struct dma_device *dma = &device->common;
	struct pci_dev *pdev = device->pdev;
	struct dma_device *dma = &ioat_dma->dma_dev;
	struct pci_dev *pdev = ioat_dma->pdev;
	struct device *dev = &pdev->dev;

	/* DMA coherent memory pool for DMA descriptor allocations */
	device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
	ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
					     sizeof(struct ioat_dma_descriptor),
					     64, 0);
	if (!device->dma_pool) {
	if (!ioat_dma->dma_pool) {
		err = -ENOMEM;
		goto err_dma_pool;
	}

	device->completion_pool = pci_pool_create("completion_pool", pdev,
						  sizeof(u64), SMP_CACHE_BYTES,
	ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev,
						    sizeof(u64),
						    SMP_CACHE_BYTES,
						    SMP_CACHE_BYTES);

	if (!device->completion_pool) {
	if (!ioat_dma->completion_pool) {
		err = -ENOMEM;
		goto err_completion_pool;
	}

	device->enumerate_channels(device);
	ioat_dma->enumerate_channels(ioat_dma);

	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
	dma->dev = &pdev->dev;
@@ -449,34 +451,34 @@ int ioat_probe(struct ioatdma_device *device)
		goto err_setup_interrupts;
	}

	err = ioat_dma_setup_interrupts(device);
	err = ioat_dma_setup_interrupts(ioat_dma);
	if (err)
		goto err_setup_interrupts;

	err = device->self_test(device);
	err = ioat_dma->self_test(ioat_dma);
	if (err)
		goto err_self_test;

	return 0;

err_self_test:
	ioat_disable_interrupts(device);
	ioat_disable_interrupts(ioat_dma);
err_setup_interrupts:
	pci_pool_destroy(device->completion_pool);
	pci_pool_destroy(ioat_dma->completion_pool);
err_completion_pool:
	pci_pool_destroy(device->dma_pool);
	pci_pool_destroy(ioat_dma->dma_pool);
err_dma_pool:
	return err;
}

int ioat_register(struct ioatdma_device *device)
int ioat_register(struct ioatdma_device *ioat_dma)
{
	int err = dma_async_device_register(&device->common);
	int err = dma_async_device_register(&ioat_dma->dma_dev);

	if (err) {
		ioat_disable_interrupts(device);
		pci_pool_destroy(device->completion_pool);
		pci_pool_destroy(device->dma_pool);
		ioat_disable_interrupts(ioat_dma);
		pci_pool_destroy(ioat_dma->completion_pool);
		pci_pool_destroy(ioat_dma->dma_pool);
	}

	return err;
@@ -499,10 +501,10 @@ struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
static ssize_t version_show(struct dma_chan *c, char *page)
{
	struct dma_device *dma = c->device;
	struct ioatdma_device *device = to_ioatdma_device(dma);
	struct ioatdma_device *ioat_dma = to_ioatdma_device(dma);

	return sprintf(page, "%d.%d\n",
		       device->version >> 4, device->version & 0xf);
		       ioat_dma->version >> 4, ioat_dma->version & 0xf);
}
struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);

@@ -524,9 +526,9 @@ const struct sysfs_ops ioat_sysfs_ops = {
	.show	= ioat_attr_show,
};

void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type)
{
	struct dma_device *dma = &device->common;
	struct dma_device *dma = &ioat_dma->dma_dev;
	struct dma_chan *c;

	list_for_each_entry(c, &dma->channels, device_node) {
@@ -545,9 +547,9 @@ void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
	}
}

void ioat_kobject_del(struct ioatdma_device *device)
void ioat_kobject_del(struct ioatdma_device *ioat_dma)
{
	struct dma_device *dma = &device->common;
	struct dma_device *dma = &ioat_dma->dma_dev;
	struct dma_chan *c;

	list_for_each_entry(c, &dma->channels, device_node) {
@@ -560,18 +562,18 @@ void ioat_kobject_del(struct ioatdma_device *device)
	}
}

void ioat_dma_remove(struct ioatdma_device *device)
void ioat_dma_remove(struct ioatdma_device *ioat_dma)
{
	struct dma_device *dma = &device->common;
	struct dma_device *dma = &ioat_dma->dma_dev;

	ioat_disable_interrupts(device);
	ioat_disable_interrupts(ioat_dma);

	ioat_kobject_del(device);
	ioat_kobject_del(ioat_dma);

	dma_async_device_unregister(dma);

	pci_pool_destroy(device->dma_pool);
	pci_pool_destroy(device->completion_pool);
	pci_pool_destroy(ioat_dma->dma_pool);
	pci_pool_destroy(ioat_dma->completion_pool);

	INIT_LIST_HEAD(&dma->channels);
}
+25 −25
Original line number Diff line number Diff line
@@ -30,11 +30,11 @@

#define IOAT_DMA_DCA_ANY_CPU		~0

#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev)
#define to_pdev(ioat_chan) ((ioat_chan)->device->pdev)
#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)

#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
#define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)

/*
 * workaround for IOAT ver.3.0 null descriptor issue
@@ -54,7 +54,7 @@ enum ioat_irq_mode {
 * @pdev: PCI-Express device
 * @reg_base: MMIO register space base address
 * @dma_pool: for allocating DMA descriptors
 * @common: embedded struct dma_device
 * @dma_dev: embedded struct dma_device
 * @version: version of ioatdma device
 * @msix_entries: irq handlers
 * @idx: per channel data
@@ -75,19 +75,19 @@ struct ioatdma_device {
	struct pci_pool *completion_pool;
#define MAX_SED_POOLS	5
	struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
	struct dma_device common;
	struct dma_device dma_dev;
	u8 version;
	struct msix_entry msix_entries[4];
	struct ioatdma_chan *idx[4];
	struct dca_provider *dca;
	enum ioat_irq_mode irq_mode;
	u32 cap;
	void (*intr_quirk)(struct ioatdma_device *device);
	int (*enumerate_channels)(struct ioatdma_device *device);
	void (*intr_quirk)(struct ioatdma_device *ioat_dma);
	int (*enumerate_channels)(struct ioatdma_device *ioat_dma);
	int (*reset_hw)(struct ioatdma_chan *ioat_chan);
	void (*cleanup_fn)(unsigned long data);
	void (*timer_fn)(unsigned long data);
	int (*self_test)(struct ioatdma_device *device);
	int (*self_test)(struct ioatdma_device *ioat_dma);
};

struct ioatdma_chan {
@@ -107,7 +107,7 @@ struct ioatdma_chan {
	#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
	#define IDLE_TIMEOUT msecs_to_jiffies(2000)
	#define RESET_DELAY msecs_to_jiffies(100)
	struct ioatdma_device *device;
	struct ioatdma_device *ioat_dma;
	dma_addr_t completion_dma;
	u64 *completion;
	struct tasklet_struct cleanup_task;
@@ -188,14 +188,14 @@ __dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw,
	({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; })

static inline struct ioatdma_chan *
ioat_chan_by_index(struct ioatdma_device *device, int index)
ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
{
	return device->idx[index];
	return ioat_dma->idx[index];
}

static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan)
{
	u8 ver = ioat_chan->device->version;
	u8 ver = ioat_chan->ioat_dma->version;
	u64 status;
	u32 status_lo;

@@ -214,7 +214,7 @@ static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan)

static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
{
	u8 ver = ioat_chan->device->version;
	u8 ver = ioat_chan->ioat_dma->version;
	u64 status;

	 /* With IOAT v3.3 the status register is 64bit.  */
@@ -242,7 +242,7 @@ static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan)

static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)
{
	u8 ver = ioat_chan->device->version;
	u8 ver = ioat_chan->ioat_dma->version;

	writeb(IOAT_CHANCMD_SUSPEND,
	       ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
@@ -250,7 +250,7 @@ static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)

static inline void ioat_reset(struct ioatdma_chan *ioat_chan)
{
	u8 ver = ioat_chan->device->version;
	u8 ver = ioat_chan->ioat_dma->version;

	writeb(IOAT_CHANCMD_RESET,
	       ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
@@ -258,7 +258,7 @@ static inline void ioat_reset(struct ioatdma_chan *ioat_chan)

static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan)
{
	u8 ver = ioat_chan->device->version;
	u8 ver = ioat_chan->ioat_dma->version;
	u8 cmd;

	cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
@@ -291,20 +291,20 @@ static inline bool is_ioat_bug(unsigned long err)
	return !!err;
}

int ioat_probe(struct ioatdma_device *device);
int ioat_register(struct ioatdma_device *device);
int ioat_dma_self_test(struct ioatdma_device *device);
void ioat_dma_remove(struct ioatdma_device *device);
int ioat_probe(struct ioatdma_device *ioat_dma);
int ioat_register(struct ioatdma_device *ioat_dma);
int ioat_dma_self_test(struct ioatdma_device *ioat_dma);
void ioat_dma_remove(struct ioatdma_device *ioat_dma);
struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
void ioat_init_channel(struct ioatdma_device *device,
void ioat_init_channel(struct ioatdma_device *ioat_dma,
		       struct ioatdma_chan *ioat_chan, int idx);
enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
				   struct dma_tx_state *txstate);
bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
			   dma_addr_t *phys_complete);
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
void ioat_kobject_del(struct ioatdma_device *device);
int ioat_dma_setup_interrupts(struct ioatdma_device *device);
void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
void ioat_kobject_del(struct ioatdma_device *ioat_dma);
int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
void ioat_stop(struct ioatdma_chan *ioat_chan);
extern const struct sysfs_ops ioat_sysfs_ops;
extern struct ioat_sysfs_entry ioat_version_attr;
+24 −24
Original line number Diff line number Diff line
@@ -187,25 +187,25 @@ int ioat2_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)

/**
 * ioat2_enumerate_channels - find and initialize the device's channels
 * @device: the device to be enumerated
 * @ioat_dma: the ioat dma device to be enumerated
 */
int ioat2_enumerate_channels(struct ioatdma_device *device)
int ioat2_enumerate_channels(struct ioatdma_device *ioat_dma)
{
	struct ioatdma_chan *ioat_chan;
	struct device *dev = &device->pdev->dev;
	struct dma_device *dma = &device->common;
	struct device *dev = &ioat_dma->pdev->dev;
	struct dma_device *dma = &ioat_dma->dma_dev;
	u8 xfercap_log;
	int i;

	INIT_LIST_HEAD(&dma->channels);
	dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
	dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
	dma->chancnt &= 0x1f; /* bits [4:0] valid */
	if (dma->chancnt > ARRAY_SIZE(device->idx)) {
	if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
			 dma->chancnt, ARRAY_SIZE(device->idx));
		dma->chancnt = ARRAY_SIZE(device->idx);
			 dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
		dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
	}
	xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
	xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
	xfercap_log &= 0x1f; /* bits [4:0] valid */
	if (xfercap_log == 0)
		return 0;
@@ -216,10 +216,10 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
		if (!ioat_chan)
			break;

		ioat_init_channel(device, ioat_chan, i);
		ioat_init_channel(ioat_dma, ioat_chan, i);
		ioat_chan->xfercap_log = xfercap_log;
		spin_lock_init(&ioat_chan->prep_lock);
		if (device->reset_hw(ioat_chan)) {
		if (ioat_dma->reset_hw(ioat_chan)) {
			i = 0;
			break;
		}
@@ -258,18 +258,18 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f
{
	struct ioat_dma_descriptor *hw;
	struct ioat_ring_ent *desc;
	struct ioatdma_device *dma;
	struct ioatdma_device *ioat_dma;
	dma_addr_t phys;

	dma = to_ioatdma_device(chan->device);
	hw = pci_pool_alloc(dma->dma_pool, flags, &phys);
	ioat_dma = to_ioatdma_device(chan->device);
	hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys);
	if (!hw)
		return NULL;
	memset(hw, 0, sizeof(*hw));

	desc = kmem_cache_zalloc(ioat2_cache, flags);
	if (!desc) {
		pci_pool_free(dma->dma_pool, hw, phys);
		pci_pool_free(ioat_dma->dma_pool, hw, phys);
		return NULL;
	}

@@ -282,10 +282,10 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f

static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
{
	struct ioatdma_device *dma;
	struct ioatdma_device *ioat_dma;

	dma = to_ioatdma_device(chan->device);
	pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
	ioat_dma = to_ioatdma_device(chan->device);
	pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
	kmem_cache_free(ioat2_cache, desc);
}

@@ -348,7 +348,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
	/* allocate a completion writeback area */
	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
	ioat_chan->completion =
		pci_pool_alloc(ioat_chan->device->completion_pool,
		pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
			       GFP_KERNEL, &ioat_chan->completion_dma);
	if (!ioat_chan->completion)
		return -ENOMEM;
@@ -554,10 +554,10 @@ int ioat2_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
	 */
	if (time_is_before_jiffies(ioat_chan->timer.expires)
	    && timer_pending(&ioat_chan->timer)) {
		struct ioatdma_device *device = ioat_chan->device;
		struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;

		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
		device->timer_fn((unsigned long)ioat_chan);
		ioat_dma->timer_fn((unsigned long)ioat_chan);
	}

	return -ENOMEM;
@@ -617,7 +617,7 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
void ioat2_free_chan_resources(struct dma_chan *c)
{
	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
	struct ioatdma_device *device = ioat_chan->device;
	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
	struct ioat_ring_ent *desc;
	const int total_descs = 1 << ioat_chan->alloc_order;
	int descs;
@@ -630,7 +630,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
		return;

	ioat_stop(ioat_chan);
	device->reset_hw(ioat_chan);
	ioat_dma->reset_hw(ioat_chan);

	spin_lock_bh(&ioat_chan->cleanup_lock);
	spin_lock_bh(&ioat_chan->prep_lock);
@@ -654,7 +654,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
	kfree(ioat_chan->ring);
	ioat_chan->ring = NULL;
	ioat_chan->alloc_order = 0;
	pci_pool_free(device->completion_pool, ioat_chan->completion,
	pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
		      ioat_chan->completion_dma);
	spin_unlock_bh(&ioat_chan->prep_lock);
	spin_unlock_bh(&ioat_chan->cleanup_lock);
+3 −3
Original line number Diff line number Diff line
@@ -121,11 +121,11 @@ ioat2_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
	       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
}

int ioat2_dma_probe(struct ioatdma_device *dev, int dca);
int ioat3_dma_probe(struct ioatdma_device *dev, int dca);
int ioat2_dma_probe(struct ioatdma_device *ioat_dma, int dca);
int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca);
struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
int ioat2_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
int ioat2_enumerate_channels(struct ioatdma_device *device);
int ioat2_enumerate_channels(struct ioatdma_device *ioat_dma);
struct dma_async_tx_descriptor *
ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
			   dma_addr_t dma_src, size_t len, unsigned long flags);
+64 −62

File changed.

Preview size limit exceeded, changes collapsed.