Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 85596a19 authored by Dave Jiang's avatar Dave Jiang Committed by Vinod Koul
Browse files

dmaengine: ioatdma: remove ioat1 specific code



Cleaning up of ioat1 specific code as it is no longer supported

Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Acked-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent d73f277b
Loading
Loading
Loading
Loading
+0 −151
Original line number Diff line number Diff line
@@ -71,14 +71,6 @@ static inline int dca2_tag_map_valid(u8 *tag_map)
#define APICID_BIT(x)		(DCA_TAG_MAP_VALID | (x))
#define IOAT_TAG_MAP_LEN	8

static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = {
	1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = {
	1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = {
	1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), };
static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 };

/* pack PCI B/D/F into a u16 */
static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
{
@@ -126,72 +118,6 @@ struct ioat_dca_priv {
	struct ioat_dca_slot 	 req_slots[0];
};

/* 5000 series chipset DCA Port Requester ID Table Entry Format
 * [15:8]	PCI-Express Bus Number
 * [7:3]	PCI-Express Device Number
 * [2:0]	PCI-Express Function Number
 *
 * 5000 series chipset DCA control register format
 * [7:1]	Reserved (0)
 * [0]		Ignore Function Number
 */

static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
{
	struct ioat_dca_priv *ioatdca = dca_priv(dca);
	struct pci_dev *pdev;
	int i;
	u16 id;

	/* This implementation only supports PCI-Express */
	if (!dev_is_pci(dev))
		return -ENODEV;
	pdev = to_pci_dev(dev);
	id = dcaid_from_pcidev(pdev);

	if (ioatdca->requester_count == ioatdca->max_requesters)
		return -ENODEV;

	for (i = 0; i < ioatdca->max_requesters; i++) {
		if (ioatdca->req_slots[i].pdev == NULL) {
			/* found an empty slot */
			ioatdca->requester_count++;
			ioatdca->req_slots[i].pdev = pdev;
			ioatdca->req_slots[i].rid = id;
			writew(id, ioatdca->dca_base + (i * 4));
			/* make sure the ignore function bit is off */
			writeb(0, ioatdca->dca_base + (i * 4) + 2);
			return i;
		}
	}
	/* Error, ioatdma->requester_count is out of whack */
	return -EFAULT;
}

static int ioat_dca_remove_requester(struct dca_provider *dca,
				     struct device *dev)
{
	struct ioat_dca_priv *ioatdca = dca_priv(dca);
	struct pci_dev *pdev;
	int i;

	/* This implementation only supports PCI-Express */
	if (!dev_is_pci(dev))
		return -ENODEV;
	pdev = to_pci_dev(dev);

	for (i = 0; i < ioatdca->max_requesters; i++) {
		if (ioatdca->req_slots[i].pdev == pdev) {
			writew(0, ioatdca->dca_base + (i * 4));
			ioatdca->req_slots[i].pdev = NULL;
			ioatdca->req_slots[i].rid = 0;
			ioatdca->requester_count--;
			return i;
		}
	}
	return -ENODEV;
}

static u8 ioat_dca_get_tag(struct dca_provider *dca,
			   struct device *dev,
			   int cpu)
@@ -231,83 +157,6 @@ static int ioat_dca_dev_managed(struct dca_provider *dca,
	return 0;
}

static struct dca_ops ioat_dca_ops = {
	.add_requester		= ioat_dca_add_requester,
	.remove_requester	= ioat_dca_remove_requester,
	.get_tag		= ioat_dca_get_tag,
	.dev_managed		= ioat_dca_dev_managed,
};


struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
	struct dca_provider *dca;
	struct ioat_dca_priv *ioatdca;
	u8 *tag_map = NULL;
	int i;
	int err;
	u8 version;
	u8 max_requesters;

	if (!system_has_dca_enabled(pdev))
		return NULL;

	/* I/OAT v1 systems must have a known tag_map to support DCA */
	switch (pdev->vendor) {
	case PCI_VENDOR_ID_INTEL:
		switch (pdev->device) {
		case PCI_DEVICE_ID_INTEL_IOAT:
			tag_map = ioat_tag_map_BNB;
			break;
		case PCI_DEVICE_ID_INTEL_IOAT_CNB:
			tag_map = ioat_tag_map_CNB;
			break;
		case PCI_DEVICE_ID_INTEL_IOAT_SCNB:
			tag_map = ioat_tag_map_SCNB;
			break;
		}
		break;
	case PCI_VENDOR_ID_UNISYS:
		switch (pdev->device) {
		case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR:
			tag_map = ioat_tag_map_UNISYS;
			break;
		}
		break;
	}
	if (tag_map == NULL)
		return NULL;

	version = readb(iobase + IOAT_VER_OFFSET);
	if (version == IOAT_VER_3_0)
		max_requesters = IOAT3_DCA_MAX_REQ;
	else
		max_requesters = IOAT_DCA_MAX_REQ;

	dca = alloc_dca_provider(&ioat_dca_ops,
			sizeof(*ioatdca) +
			(sizeof(struct ioat_dca_slot) * max_requesters));
	if (!dca)
		return NULL;

	ioatdca = dca_priv(dca);
	ioatdca->max_requesters = max_requesters;
	ioatdca->dca_base = iobase + 0x54;

	/* copy over the APIC ID to DCA tag mapping */
	for (i = 0; i < IOAT_TAG_MAP_LEN; i++)
		ioatdca->tag_map[i] = tag_map[i];

	err = register_dca_provider(dca, &pdev->dev);
	if (err) {
		free_dca_provider(dca);
		return NULL;
	}

	return dca;
}


static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
{
	struct ioat_dca_priv *ioatdca = dca_priv(dca);
+1 −673
Original line number Diff line number Diff line
/*
 * Intel I/OAT DMA Linux driver
 * Copyright(c) 2004 - 2009 Intel Corporation.
 * Copyright(c) 2004 - 2015 Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
@@ -43,10 +43,6 @@ module_param(ioat_pending_level, int, 0644);
MODULE_PARM_DESC(ioat_pending_level,
		 "high-water mark for pushing ioat descriptors (default: 4)");

/* internal functions */
static void ioat1_cleanup(struct ioat_dma_chan *ioat);
static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);

/**
 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
 * @irq: interrupt id
@@ -116,248 +112,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
	tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
}

/**
 * ioat1_dma_enumerate_channels - find and initialize the device's channels
 * @device: the device to be enumerated
 */
static int ioat1_enumerate_channels(struct ioatdma_device *device)
{
	u8 xfercap_scale;
	u32 xfercap;
	int i;
	struct ioat_dma_chan *ioat;
	struct device *dev = &device->pdev->dev;
	struct dma_device *dma = &device->common;

	INIT_LIST_HEAD(&dma->channels);
	dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
	dma->chancnt &= 0x1f; /* bits [4:0] valid */
	if (dma->chancnt > ARRAY_SIZE(device->idx)) {
		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
			 dma->chancnt, ARRAY_SIZE(device->idx));
		dma->chancnt = ARRAY_SIZE(device->idx);
	}
	xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
	xfercap_scale &= 0x1f; /* bits [4:0] valid */
	xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
	dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);

#ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
	if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
		dma->chancnt--;
#endif
	for (i = 0; i < dma->chancnt; i++) {
		ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
		if (!ioat)
			break;

		ioat_init_channel(device, &ioat->base, i);
		ioat->xfercap = xfercap;
		spin_lock_init(&ioat->desc_lock);
		INIT_LIST_HEAD(&ioat->free_desc);
		INIT_LIST_HEAD(&ioat->used_desc);
	}
	dma->chancnt = i;
	return i;
}

/**
 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
 *                                 descriptors to hw
 * @chan: DMA channel handle
 */
static inline void
__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
{
	void __iomem *reg_base = ioat->base.reg_base;

	dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
		__func__, ioat->pending);
	ioat->pending = 0;
	writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
}

static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
{
	struct ioat_dma_chan *ioat = to_ioat_chan(chan);

	if (ioat->pending > 0) {
		spin_lock_bh(&ioat->desc_lock);
		__ioat1_dma_memcpy_issue_pending(ioat);
		spin_unlock_bh(&ioat->desc_lock);
	}
}

/**
 * ioat1_reset_channel - restart a channel
 * @ioat: IOAT DMA channel handle
 */
static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
{
	struct ioat_chan_common *chan = &ioat->base;
	void __iomem *reg_base = chan->reg_base;
	u32 chansts, chanerr;

	dev_warn(to_dev(chan), "reset\n");
	chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
	chansts = *chan->completion & IOAT_CHANSTS_STATUS;
	if (chanerr) {
		dev_err(to_dev(chan),
			"chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
			chan_num(chan), chansts, chanerr);
		writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
	}

	/*
	 * whack it upside the head with a reset
	 * and wait for things to settle out.
	 * force the pending count to a really big negative
	 * to make sure no one forces an issue_pending
	 * while we're waiting.
	 */

	ioat->pending = INT_MIN;
	writeb(IOAT_CHANCMD_RESET,
	       reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
	set_bit(IOAT_RESET_PENDING, &chan->state);
	mod_timer(&chan->timer, jiffies + RESET_DELAY);
}

static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
{
	struct dma_chan *c = tx->chan;
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
	struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
	struct ioat_chan_common *chan = &ioat->base;
	struct ioat_desc_sw *first;
	struct ioat_desc_sw *chain_tail;
	dma_cookie_t cookie;

	spin_lock_bh(&ioat->desc_lock);
	/* cookie incr and addition to used_list must be atomic */
	cookie = dma_cookie_assign(tx);
	dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);

	/* write address into NextDescriptor field of last desc in chain */
	first = to_ioat_desc(desc->tx_list.next);
	chain_tail = to_ioat_desc(ioat->used_desc.prev);
	/* make descriptor updates globally visible before chaining */
	wmb();
	chain_tail->hw->next = first->txd.phys;
	list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
	dump_desc_dbg(ioat, chain_tail);
	dump_desc_dbg(ioat, first);

	if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);

	ioat->active += desc->hw->tx_cnt;
	ioat->pending += desc->hw->tx_cnt;
	if (ioat->pending >= ioat_pending_level)
		__ioat1_dma_memcpy_issue_pending(ioat);
	spin_unlock_bh(&ioat->desc_lock);

	return cookie;
}

/**
 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
 * @ioat: the channel supplying the memory pool for the descriptors
 * @flags: allocation flags
 */
static struct ioat_desc_sw *
ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
{
	struct ioat_dma_descriptor *desc;
	struct ioat_desc_sw *desc_sw;
	struct ioatdma_device *ioatdma_device;
	dma_addr_t phys;

	ioatdma_device = ioat->base.device;
	desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
	if (unlikely(!desc))
		return NULL;

	desc_sw = kzalloc(sizeof(*desc_sw), flags);
	if (unlikely(!desc_sw)) {
		pci_pool_free(ioatdma_device->dma_pool, desc, phys);
		return NULL;
	}

	memset(desc, 0, sizeof(*desc));

	INIT_LIST_HEAD(&desc_sw->tx_list);
	dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
	desc_sw->txd.tx_submit = ioat1_tx_submit;
	desc_sw->hw = desc;
	desc_sw->txd.phys = phys;
	set_desc_id(desc_sw, -1);

	return desc_sw;
}

static int ioat_initial_desc_count = 256;
module_param(ioat_initial_desc_count, int, 0644);
MODULE_PARM_DESC(ioat_initial_desc_count,
		 "ioat1: initial descriptors per channel (default: 256)");
/**
 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
 * @chan: the channel to be filled out
 */
static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
{
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
	struct ioat_chan_common *chan = &ioat->base;
	struct ioat_desc_sw *desc;
	u32 chanerr;
	int i;
	LIST_HEAD(tmp_list);

	/* have we already been set up? */
	if (!list_empty(&ioat->free_desc))
		return ioat->desccount;

	/* Setup register to interrupt and write completion status on error */
	writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);

	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
	if (chanerr) {
		dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
		writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
	}

	/* Allocate descriptors */
	for (i = 0; i < ioat_initial_desc_count; i++) {
		desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
		if (!desc) {
			dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
			break;
		}
		set_desc_id(desc, i);
		list_add_tail(&desc->node, &tmp_list);
	}
	spin_lock_bh(&ioat->desc_lock);
	ioat->desccount = i;
	list_splice(&tmp_list, &ioat->free_desc);
	spin_unlock_bh(&ioat->desc_lock);

	/* allocate a completion writeback area */
	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
	chan->completion = pci_pool_alloc(chan->device->completion_pool,
					  GFP_KERNEL, &chan->completion_dma);
	memset(chan->completion, 0, sizeof(*chan->completion));
	writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
	       chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
	writel(((u64) chan->completion_dma) >> 32,
	       chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);

	set_bit(IOAT_RUN, &chan->state);
	ioat1_dma_start_null_desc(ioat);  /* give chain to dma device */
	dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
		__func__, ioat->desccount);
	return ioat->desccount;
}

void ioat_stop(struct ioat_chan_common *chan)
{
	struct ioatdma_device *device = chan->device;
@@ -394,177 +148,6 @@ void ioat_stop(struct ioat_chan_common *chan)
	device->cleanup_fn((unsigned long) &chan->common);
}

/**
 * ioat1_dma_free_chan_resources - release all the descriptors
 * @chan: the channel to be cleaned
 */
static void ioat1_dma_free_chan_resources(struct dma_chan *c)
{
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
	struct ioat_chan_common *chan = &ioat->base;
	struct ioatdma_device *ioatdma_device = chan->device;
	struct ioat_desc_sw *desc, *_desc;
	int in_use_descs = 0;

	/* Before freeing channel resources first check
	 * if they have been previously allocated for this channel.
	 */
	if (ioat->desccount == 0)
		return;

	ioat_stop(chan);

	/* Delay 100ms after reset to allow internal DMA logic to quiesce
	 * before removing DMA descriptor resources.
	 */
	writeb(IOAT_CHANCMD_RESET,
	       chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
	mdelay(100);

	spin_lock_bh(&ioat->desc_lock);
	list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
		dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
			__func__, desc_id(desc));
		dump_desc_dbg(ioat, desc);
		in_use_descs++;
		list_del(&desc->node);
		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
			      desc->txd.phys);
		kfree(desc);
	}
	list_for_each_entry_safe(desc, _desc,
				 &ioat->free_desc, node) {
		list_del(&desc->node);
		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
			      desc->txd.phys);
		kfree(desc);
	}
	spin_unlock_bh(&ioat->desc_lock);

	pci_pool_free(ioatdma_device->completion_pool,
		      chan->completion,
		      chan->completion_dma);

	/* one is ok since we left it on there on purpose */
	if (in_use_descs > 1)
		dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
			in_use_descs - 1);

	chan->last_completion = 0;
	chan->completion_dma = 0;
	ioat->pending = 0;
	ioat->desccount = 0;
}

/**
 * ioat1_dma_get_next_descriptor - return the next available descriptor
 * @ioat: IOAT DMA channel handle
 *
 * Gets the next descriptor from the chain, and must be called with the
 * channel's desc_lock held.  Allocates more descriptors if the channel
 * has run out.
 */
static struct ioat_desc_sw *
ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
{
	struct ioat_desc_sw *new;

	if (!list_empty(&ioat->free_desc)) {
		new = to_ioat_desc(ioat->free_desc.next);
		list_del(&new->node);
	} else {
		/* try to get another desc */
		new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
		if (!new) {
			dev_err(to_dev(&ioat->base), "alloc failed\n");
			return NULL;
		}
	}
	dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
		__func__, desc_id(new));
	prefetch(new->hw);
	return new;
}

static struct dma_async_tx_descriptor *
ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
		      dma_addr_t dma_src, size_t len, unsigned long flags)
{
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
	struct ioat_desc_sw *desc;
	size_t copy;
	LIST_HEAD(chain);
	dma_addr_t src = dma_src;
	dma_addr_t dest = dma_dest;
	size_t total_len = len;
	struct ioat_dma_descriptor *hw = NULL;
	int tx_cnt = 0;

	spin_lock_bh(&ioat->desc_lock);
	desc = ioat1_dma_get_next_descriptor(ioat);
	do {
		if (!desc)
			break;

		tx_cnt++;
		copy = min_t(size_t, len, ioat->xfercap);

		hw = desc->hw;
		hw->size = copy;
		hw->ctl = 0;
		hw->src_addr = src;
		hw->dst_addr = dest;

		list_add_tail(&desc->node, &chain);

		len -= copy;
		dest += copy;
		src += copy;
		if (len) {
			struct ioat_desc_sw *next;

			async_tx_ack(&desc->txd);
			next = ioat1_dma_get_next_descriptor(ioat);
			hw->next = next ? next->txd.phys : 0;
			dump_desc_dbg(ioat, desc);
			desc = next;
		} else
			hw->next = 0;
	} while (len);

	if (!desc) {
		struct ioat_chan_common *chan = &ioat->base;

		dev_err(to_dev(chan),
			"chan%d - get_next_desc failed\n", chan_num(chan));
		list_splice(&chain, &ioat->free_desc);
		spin_unlock_bh(&ioat->desc_lock);
		return NULL;
	}
	spin_unlock_bh(&ioat->desc_lock);

	desc->txd.flags = flags;
	desc->len = total_len;
	list_splice(&chain, &desc->tx_list);
	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
	hw->ctl_f.compl_write = 1;
	hw->tx_cnt = tx_cnt;
	dump_desc_dbg(ioat, desc);

	return &desc->txd;
}

static void ioat1_cleanup_event(unsigned long data)
{
	struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
	struct ioat_chan_common *chan = &ioat->base;

	ioat1_cleanup(ioat);
	if (!test_bit(IOAT_RUN, &chan->state))
		return;
	writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}

dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
{
	dma_addr_t phys_complete;
@@ -599,150 +182,6 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
	return true;
}

static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
{
	struct ioat_chan_common *chan = &ioat->base;
	struct list_head *_desc, *n;
	struct dma_async_tx_descriptor *tx;

	dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
		 __func__, (unsigned long long) phys_complete);
	list_for_each_safe(_desc, n, &ioat->used_desc) {
		struct ioat_desc_sw *desc;

		prefetch(n);
		desc = list_entry(_desc, typeof(*desc), node);
		tx = &desc->txd;
		/*
		 * Incoming DMA requests may use multiple descriptors,
		 * due to exceeding xfercap, perhaps. If so, only the
		 * last one will have a cookie, and require unmapping.
		 */
		dump_desc_dbg(ioat, desc);
		if (tx->cookie) {
			dma_cookie_complete(tx);
			dma_descriptor_unmap(tx);
			ioat->active -= desc->hw->tx_cnt;
			if (tx->callback) {
				tx->callback(tx->callback_param);
				tx->callback = NULL;
			}
		}

		if (tx->phys != phys_complete) {
			/*
			 * a completed entry, but not the last, so clean
			 * up if the client is done with the descriptor
			 */
			if (async_tx_test_ack(tx))
				list_move_tail(&desc->node, &ioat->free_desc);
		} else {
			/*
			 * last used desc. Do not remove, so we can
			 * append from it.
			 */

			/* if nothing else is pending, cancel the
			 * completion timeout
			 */
			if (n == &ioat->used_desc) {
				dev_dbg(to_dev(chan),
					"%s cancel completion timeout\n",
					__func__);
				clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
			}

			/* TODO check status bits? */
			break;
		}
	}

	chan->last_completion = phys_complete;
}

/**
 * ioat1_cleanup - cleanup up finished descriptors
 * @chan: ioat channel to be cleaned up
 *
 * To prevent lock contention we defer cleanup when the locks are
 * contended with a terminal timeout that forces cleanup and catches
 * completion notification errors.
 */
static void ioat1_cleanup(struct ioat_dma_chan *ioat)
{
	struct ioat_chan_common *chan = &ioat->base;
	dma_addr_t phys_complete;

	prefetch(chan->completion);

	if (!spin_trylock_bh(&chan->cleanup_lock))
		return;

	if (!ioat_cleanup_preamble(chan, &phys_complete)) {
		spin_unlock_bh(&chan->cleanup_lock);
		return;
	}

	if (!spin_trylock_bh(&ioat->desc_lock)) {
		spin_unlock_bh(&chan->cleanup_lock);
		return;
	}

	__cleanup(ioat, phys_complete);

	spin_unlock_bh(&ioat->desc_lock);
	spin_unlock_bh(&chan->cleanup_lock);
}

static void ioat1_timer_event(unsigned long data)
{
	struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
	struct ioat_chan_common *chan = &ioat->base;

	dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);

	spin_lock_bh(&chan->cleanup_lock);
	if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
		struct ioat_desc_sw *desc;

		spin_lock_bh(&ioat->desc_lock);

		/* restart active descriptors */
		desc = to_ioat_desc(ioat->used_desc.prev);
		ioat_set_chainaddr(ioat, desc->txd.phys);
		ioat_start(chan);

		ioat->pending = 0;
		set_bit(IOAT_COMPLETION_PENDING, &chan->state);
		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
		spin_unlock_bh(&ioat->desc_lock);
	} else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
		dma_addr_t phys_complete;

		spin_lock_bh(&ioat->desc_lock);
		/* if we haven't made progress and we have already
		 * acknowledged a pending completion once, then be more
		 * forceful with a restart
		 */
		if (ioat_cleanup_preamble(chan, &phys_complete))
			__cleanup(ioat, phys_complete);
		else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
			ioat1_reset_channel(ioat);
		else {
			u64 status = ioat_chansts(chan);

			/* manually update the last completion address */
			if (ioat_chansts_to_addr(status) != 0)
				*chan->completion = status;

			set_bit(IOAT_COMPLETION_ACK, &chan->state);
			mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
		}
		spin_unlock_bh(&ioat->desc_lock);
	}
	spin_unlock_bh(&chan->cleanup_lock);
}

enum dma_status
ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
		   struct dma_tx_state *txstate)
@@ -760,42 +199,6 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
	return dma_cookie_status(c, cookie, txstate);
}

static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
{
	struct ioat_chan_common *chan = &ioat->base;
	struct ioat_desc_sw *desc;
	struct ioat_dma_descriptor *hw;

	spin_lock_bh(&ioat->desc_lock);

	desc = ioat1_dma_get_next_descriptor(ioat);

	if (!desc) {
		dev_err(to_dev(chan),
			"Unable to start null desc - get next desc failed\n");
		spin_unlock_bh(&ioat->desc_lock);
		return;
	}

	hw = desc->hw;
	hw->ctl = 0;
	hw->ctl_f.null = 1;
	hw->ctl_f.int_en = 1;
	hw->ctl_f.compl_write = 1;
	/* set size to non-zero value (channel returns error when size is 0) */
	hw->size = NULL_DESC_BUFFER_SIZE;
	hw->src_addr = 0;
	hw->dst_addr = 0;
	async_tx_ack(&desc->txd);
	hw->next = 0;
	list_add_tail(&desc->node, &ioat->used_desc);
	dump_desc_dbg(ioat, desc);

	ioat_set_chainaddr(ioat, desc->txd.phys);
	ioat_start(chan);
	spin_unlock_bh(&ioat->desc_lock);
}

/*
 * Perform a IOAT transaction to verify the HW works.
 */
@@ -1077,36 +480,6 @@ int ioat_register(struct ioatdma_device *device)
	return err;
}

/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
static void ioat1_intr_quirk(struct ioatdma_device *device)
{
	struct pci_dev *pdev = device->pdev;
	u32 dmactrl;

	pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
	if (pdev->msi_enabled)
		dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
	else
		dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
	pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
}

static ssize_t ring_size_show(struct dma_chan *c, char *page)
{
	struct ioat_dma_chan *ioat = to_ioat_chan(c);

	return sprintf(page, "%d\n", ioat->desccount);
}
static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);

static ssize_t ring_active_show(struct dma_chan *c, char *page)
{
	struct ioat_dma_chan *ioat = to_ioat_chan(c);

	return sprintf(page, "%d\n", ioat->active);
}
static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);

static ssize_t cap_show(struct dma_chan *c, char *page)
{
	struct dma_device *dma = c->device;
@@ -1131,14 +504,6 @@ static ssize_t version_show(struct dma_chan *c, char *page)
}
struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);

static struct attribute *ioat1_attrs[] = {
	&ring_size_attr.attr,
	&ring_active_attr.attr,
	&ioat_cap_attr.attr,
	&ioat_version_attr.attr,
	NULL,
};

static ssize_t
ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
@@ -1157,11 +522,6 @@ const struct sysfs_ops ioat_sysfs_ops = {
	.show	= ioat_attr_show,
};

static struct kobj_type ioat1_ktype = {
	.sysfs_ops = &ioat_sysfs_ops,
	.default_attrs = ioat1_attrs,
};

void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
{
	struct dma_device *dma = &device->common;
@@ -1197,38 +557,6 @@ void ioat_kobject_del(struct ioatdma_device *device)
	}
}

int ioat1_dma_probe(struct ioatdma_device *device, int dca)
{
	struct pci_dev *pdev = device->pdev;
	struct dma_device *dma;
	int err;

	device->intr_quirk = ioat1_intr_quirk;
	device->enumerate_channels = ioat1_enumerate_channels;
	device->self_test = ioat_dma_self_test;
	device->timer_fn = ioat1_timer_event;
	device->cleanup_fn = ioat1_cleanup_event;
	dma = &device->common;
	dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
	dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
	dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
	dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
	dma->device_tx_status = ioat_dma_tx_status;

	err = ioat_probe(device);
	if (err)
		return err;
	err = ioat_register(device);
	if (err)
		return err;
	ioat_kobject_add(device, &ioat1_ktype);

	if (dca)
		device->dca = ioat_dca_init(pdev, device->reg_base);

	return err;
}

void ioat_dma_remove(struct ioatdma_device *device)
{
	struct dma_device *dma = &device->common;