Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 43d6e369 authored by Shannon Nelson's avatar Shannon Nelson Committed by Linus Torvalds
Browse files

I/OAT: code cleanup from checkpatch output



Take care of a bunch of little code nits in ioatdma files

Signed-off-by: default avatarShannon Nelson <shannon.nelson@intel.com>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1fda5f4e
Loading
Loading
Loading
Loading
+110 −88
Original line number Original line Diff line number Diff line
/*
/*
 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
 * Intel I/OAT DMA Linux driver
 * Copyright(c) 2004 - 2007 Intel Corporation.
 *
 *
 * This program is free software; you can redistribute it and/or modify it
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * under the terms and conditions of the GNU General Public License,
 * Software Foundation; either version 2 of the License, or (at your option)
 * version 2, as published by the Free Software Foundation.
 * any later version.
 *
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@@ -12,11 +12,12 @@
 * more details.
 * more details.
 *
 *
 * You should have received a copy of the GNU General Public License along with
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59
 * this program; if not, write to the Free Software Foundation, Inc.,
 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * The full GNU General Public License is included in this distribution in
 * the file called "COPYING".
 *
 *
 * The full GNU General Public License is included in this distribution in the
 * file called COPYING.
 */
 */


/*
/*
@@ -35,17 +36,22 @@
#include "ioatdma_registers.h"
#include "ioatdma_registers.h"
#include "ioatdma_hw.h"
#include "ioatdma_hw.h"


#define INITIAL_IOAT_DESC_COUNT 128

#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
#define to_ioat_device(dev) container_of(dev, struct ioat_device, common)
#define to_ioat_device(dev) container_of(dev, struct ioat_device, common)
#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)


/* internal functions */
/* internal functions */
static int __devinit ioat_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
static int __devinit ioat_probe(struct pci_dev *pdev,
				const struct pci_device_id *ent);
static void ioat_shutdown(struct pci_dev *pdev);
static void ioat_shutdown(struct pci_dev *pdev);
static void __devexit ioat_remove(struct pci_dev *pdev);
static void __devexit ioat_remove(struct pci_dev *pdev);


static int enumerate_dma_channels(struct ioat_device *device)
static int ioat_dma_enumerate_channels(struct ioat_device *device)
{
{
	u8 xfercap_scale;
	u8 xfercap_scale;
	u32 xfercap;
	u32 xfercap;
@@ -78,8 +84,9 @@ static int enumerate_dma_channels(struct ioat_device *device)
	return device->common.chancnt;
	return device->common.chancnt;
}
}


static void
static void ioat_set_src(dma_addr_t addr,
ioat_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index)
			 struct dma_async_tx_descriptor *tx,
			 int index)
{
{
	struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
	struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
@@ -93,8 +100,9 @@ ioat_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index)


}
}


static void
static void ioat_set_dest(dma_addr_t addr,
ioat_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index)
			  struct dma_async_tx_descriptor *tx,
			  int index)
{
{
	struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
	struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
@@ -107,8 +115,7 @@ ioat_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index)
	}
	}
}
}


static dma_cookie_t
static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
ioat_tx_submit(struct dma_async_tx_descriptor *tx)
{
{
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
	struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
	struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
@@ -177,10 +184,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
	return desc_sw;
	return desc_sw;
}
}


#define INITIAL_IOAT_DESC_COUNT 128

static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan);

/* returns the actual number of allocated descriptors */
/* returns the actual number of allocated descriptors */
static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
{
{
@@ -203,7 +206,8 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)


	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
	if (chanerr) {
	if (chanerr) {
		printk("IOAT: CHANERR = %x, clearing\n", chanerr);
		dev_err(&ioat_chan->device->pdev->dev,
			"ioatdma: CHANERR = %x, clearing\n", chanerr);
		writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
		writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
	}
	}


@@ -211,7 +215,8 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
	for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
	for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
		desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
		desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
		if (!desc) {
		if (!desc) {
			printk(KERN_ERR "IOAT: Only %d initial descriptors\n", i);
			dev_err(&ioat_chan->device->pdev->dev,
				"ioatdma: Only %d initial descriptors\n", i);
			break;
			break;
		}
		}
		list_add_tail(&desc->node, &tmp_list);
		list_add_tail(&desc->node, &tmp_list);
@@ -233,12 +238,10 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
	writel(((u64) ioat_chan->completion_addr) >> 32,
	writel(((u64) ioat_chan->completion_addr) >> 32,
	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);


	ioat_start_null_desc(ioat_chan);
	ioat_dma_start_null_desc(ioat_chan);
	return i;
	return i;
}
}


static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);

static void ioat_dma_free_chan_resources(struct dma_chan *chan)
static void ioat_dma_free_chan_resources(struct dma_chan *chan)
{
{
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
@@ -273,14 +276,17 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)


	/* one is ok since we left it on there on purpose */
	/* one is ok since we left it on there on purpose */
	if (in_use_descs > 1)
	if (in_use_descs > 1)
		printk(KERN_ERR "IOAT: Freeing %d in use descriptors!\n",
		dev_err(&ioat_chan->device->pdev->dev,
			"ioatdma: Freeing %d in use descriptors!\n",
			in_use_descs - 1);
			in_use_descs - 1);


	ioat_chan->last_completion = ioat_chan->completion_addr = 0;
	ioat_chan->last_completion = ioat_chan->completion_addr = 0;
}
}


static struct dma_async_tx_descriptor *
static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy(
ioat_dma_prep_memcpy(struct dma_chan *chan, size_t len, int int_en)
						struct dma_chan *chan,
						size_t len,
						int int_en)
{
{
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
	struct ioat_desc_sw *first, *prev, *new;
	struct ioat_desc_sw *first, *prev, *new;
@@ -343,12 +349,11 @@ ioat_dma_prep_memcpy(struct dma_chan *chan, size_t len, int int_en)
	return new ? &new->async_tx : NULL;
	return new ? &new->async_tx : NULL;
}
}



/**
/**
 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended descriptors to hw
 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
 *                                 descriptors to hw
 * @chan: DMA channel handle
 * @chan: DMA channel handle
 */
 */

static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
{
{
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
@@ -360,15 +365,15 @@ static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
	}
	}
}
}


static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)
static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
{
{
	unsigned long phys_complete;
	unsigned long phys_complete;
	struct ioat_desc_sw *desc, *_desc;
	struct ioat_desc_sw *desc, *_desc;
	dma_cookie_t cookie = 0;
	dma_cookie_t cookie = 0;


	prefetch(chan->completion_virt);
	prefetch(ioat_chan->completion_virt);


	if (!spin_trylock(&chan->cleanup_lock))
	if (!spin_trylock(&ioat_chan->cleanup_lock))
		return;
		return;


	/* The completion writeback can happen at any time,
	/* The completion writeback can happen at any time,
@@ -378,26 +383,27 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)


#if (BITS_PER_LONG == 64)
#if (BITS_PER_LONG == 64)
	phys_complete =
	phys_complete =
	chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
	ioat_chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
#else
#else
	phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
	phys_complete = ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
#endif
#endif


	if ((chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
	if ((ioat_chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
				IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
				IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
		printk("IOAT: Channel halted, chanerr = %x\n",
		dev_err(&ioat_chan->device->pdev->dev,
			readl(chan->reg_base + IOAT_CHANERR_OFFSET));
			"ioatdma: Channel halted, chanerr = %x\n",
			readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));


		/* TODO do something to salvage the situation */
		/* TODO do something to salvage the situation */
	}
	}


	if (phys_complete == chan->last_completion) {
	if (phys_complete == ioat_chan->last_completion) {
		spin_unlock(&chan->cleanup_lock);
		spin_unlock(&ioat_chan->cleanup_lock);
		return;
		return;
	}
	}


	spin_lock_bh(&chan->desc_lock);
	spin_lock_bh(&ioat_chan->desc_lock);
	list_for_each_entry_safe(desc, _desc, &chan->used_desc, node) {
	list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {


		/*
		/*
		 * Incoming DMA requests may use multiple descriptors, due to
		 * Incoming DMA requests may use multiple descriptors, due to
@@ -407,31 +413,36 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)
		if (desc->async_tx.cookie) {
		if (desc->async_tx.cookie) {
			cookie = desc->async_tx.cookie;
			cookie = desc->async_tx.cookie;


			/* yes we are unmapping both _page and _single alloc'd
			/*
			   regions with unmap_page. Is this *really* that bad?
			 * yes we are unmapping both _page and _single alloc'd
			 * regions with unmap_page. Is this *really* that bad?
			 */
			 */
			pci_unmap_page(chan->device->pdev,
			pci_unmap_page(ioat_chan->device->pdev,
					pci_unmap_addr(desc, dst),
					pci_unmap_addr(desc, dst),
					pci_unmap_len(desc, len),
					pci_unmap_len(desc, len),
					PCI_DMA_FROMDEVICE);
					PCI_DMA_FROMDEVICE);
			pci_unmap_page(chan->device->pdev,
			pci_unmap_page(ioat_chan->device->pdev,
					pci_unmap_addr(desc, src),
					pci_unmap_addr(desc, src),
					pci_unmap_len(desc, len),
					pci_unmap_len(desc, len),
					PCI_DMA_TODEVICE);
					PCI_DMA_TODEVICE);
		}
		}


		if (desc->async_tx.phys != phys_complete) {
		if (desc->async_tx.phys != phys_complete) {
			/* a completed entry, but not the last, so cleanup
			/*
			 * a completed entry, but not the last, so cleanup
			 * if the client is done with the descriptor
			 * if the client is done with the descriptor
			 */
			 */
			if (desc->async_tx.ack) {
			if (desc->async_tx.ack) {
				list_del(&desc->node);
				list_del(&desc->node);
				list_add_tail(&desc->node, &chan->free_desc);
				list_add_tail(&desc->node,
					      &ioat_chan->free_desc);
			} else
			} else
				desc->async_tx.cookie = 0;
				desc->async_tx.cookie = 0;
		} else {
		} else {
			/* last used desc. Do not remove, so we can append from
			/*
			   it, but don't look at it next time, either */
			 * last used desc. Do not remove, so we can append from
			 * it, but don't look at it next time, either
			 */
			desc->async_tx.cookie = 0;
			desc->async_tx.cookie = 0;


			/* TODO check status bits? */
			/* TODO check status bits? */
@@ -439,13 +450,13 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)
		}
		}
	}
	}


	spin_unlock_bh(&chan->desc_lock);
	spin_unlock_bh(&ioat_chan->desc_lock);


	chan->last_completion = phys_complete;
	ioat_chan->last_completion = phys_complete;
	if (cookie != 0)
	if (cookie != 0)
		chan->completed_cookie = cookie;
		ioat_chan->completed_cookie = cookie;


	spin_unlock(&chan->cleanup_lock);
	spin_unlock(&ioat_chan->cleanup_lock);
}
}


static void ioat_dma_dependency_added(struct dma_chan *chan)
static void ioat_dma_dependency_added(struct dma_chan *chan)
@@ -466,7 +477,6 @@ static void ioat_dma_dependency_added(struct dma_chan *chan)
 * @done: if not %NULL, updated with last completed transaction
 * @done: if not %NULL, updated with last completed transaction
 * @used: if not %NULL, updated with last used transaction
 * @used: if not %NULL, updated with last used transaction
 */
 */

static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
					    dma_cookie_t cookie,
					    dma_cookie_t cookie,
					    dma_cookie_t *done,
					    dma_cookie_t *done,
@@ -538,13 +548,13 @@ static irqreturn_t ioat_do_interrupt(int irq, void *data)


	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);


	printk(KERN_ERR "ioatdma error: interrupt! status %lx\n", attnstatus);
	printk(KERN_ERR "ioatdma: interrupt! status %lx\n", attnstatus);


	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
	return IRQ_HANDLED;
	return IRQ_HANDLED;
}
}


static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan)
static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
{
{
	struct ioat_desc_sw *desc;
	struct ioat_desc_sw *desc;


@@ -611,6 +621,8 @@ static int ioat_self_test(struct ioat_device *device)
				struct dma_chan,
				struct dma_chan,
				device_node);
				device_node);
	if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
	if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
		dev_err(&device->pdev->dev,
			"selftest cannot allocate chan resource\n");
		err = -ENODEV;
		err = -ENODEV;
		goto out;
		goto out;
	}
	}
@@ -628,12 +640,14 @@ static int ioat_self_test(struct ioat_device *device)
	msleep(1);
	msleep(1);


	if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
	if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
		printk(KERN_ERR "ioatdma: Self-test copy timed out, disabling\n");
		dev_err(&device->pdev->dev,
			"ioatdma: Self-test copy timed out, disabling\n");
		err = -ENODEV;
		err = -ENODEV;
		goto free_resources;
		goto free_resources;
	}
	}
	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
		printk(KERN_ERR "ioatdma: Self-test copy failed compare, disabling\n");
		dev_err(&device->pdev->dev,
			"ioatdma: Self-test copy failed compare, disabling\n");
		err = -ENODEV;
		err = -ENODEV;
		goto free_resources;
		goto free_resources;
	}
	}
@@ -691,7 +705,9 @@ static int __devinit ioat_probe(struct pci_dev *pdev,
		goto err_dma_pool;
		goto err_dma_pool;
	}
	}


	device->completion_pool = pci_pool_create("completion_pool", pdev, sizeof(u64), SMP_CACHE_BYTES, SMP_CACHE_BYTES);
	device->completion_pool = pci_pool_create("completion_pool", pdev,
						  sizeof(u64), SMP_CACHE_BYTES,
						  SMP_CACHE_BYTES);
	if (!device->completion_pool) {
	if (!device->completion_pool) {
		err = -ENOMEM;
		err = -ENOMEM;
		goto err_completion_pool;
		goto err_completion_pool;
@@ -713,21 +729,25 @@ static int __devinit ioat_probe(struct pci_dev *pdev,


	device->reg_base = reg_base;
	device->reg_base = reg_base;


	writeb(IOAT_INTRCTRL_MASTER_INT_EN, device->reg_base + IOAT_INTRCTRL_OFFSET);
	writeb(IOAT_INTRCTRL_MASTER_INT_EN,
	       device->reg_base + IOAT_INTRCTRL_OFFSET);
	pci_set_master(pdev);
	pci_set_master(pdev);


	INIT_LIST_HEAD(&device->common.channels);
	INIT_LIST_HEAD(&device->common.channels);
	enumerate_dma_channels(device);
	ioat_dma_enumerate_channels(device);


	dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
	dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
	device->common.device_alloc_chan_resources = ioat_dma_alloc_chan_resources;
	device->common.device_alloc_chan_resources =
	device->common.device_free_chan_resources = ioat_dma_free_chan_resources;
						ioat_dma_alloc_chan_resources;
	device->common.device_free_chan_resources =
						ioat_dma_free_chan_resources;
	device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy;
	device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy;
	device->common.device_is_tx_complete = ioat_dma_is_complete;
	device->common.device_is_tx_complete = ioat_dma_is_complete;
	device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
	device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
	device->common.device_dependency_added = ioat_dma_dependency_added;
	device->common.device_dependency_added = ioat_dma_dependency_added;
	device->common.dev = &pdev->dev;
	device->common.dev = &pdev->dev;
	printk(KERN_INFO "Intel(R) I/OAT DMA Engine found, %d channels\n",
	printk(KERN_INFO
		 "ioatdma: Intel(R) I/OAT DMA Engine found, %d channels\n",
		 device->common.chancnt);
		 device->common.chancnt);


	err = ioat_self_test(device);
	err = ioat_self_test(device);
@@ -754,7 +774,8 @@ static int __devinit ioat_probe(struct pci_dev *pdev,
	pci_disable_device(pdev);
	pci_disable_device(pdev);
err_enable_device:
err_enable_device:


	printk(KERN_ERR "Intel(R) I/OAT DMA Engine initialization failed\n");
	printk(KERN_INFO
		"ioatdma: Intel(R) I/OAT DMA Engine initialization failed\n");


	return err;
	return err;
}
}
@@ -786,7 +807,8 @@ static void __devexit ioat_remove(struct pci_dev *pdev)
	iounmap(device->reg_base);
	iounmap(device->reg_base);
	pci_release_regions(pdev);
	pci_release_regions(pdev);
	pci_disable_device(pdev);
	pci_disable_device(pdev);
	list_for_each_entry_safe(chan, _chan, &device->common.channels, device_node) {
	list_for_each_entry_safe(chan, _chan,
				 &device->common.channels, device_node) {
		ioat_chan = to_ioat_chan(chan);
		ioat_chan = to_ioat_chan(chan);
		list_del(&chan->device_node);
		list_del(&chan->device_node);
		kfree(ioat_chan);
		kfree(ioat_chan);