Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e93252fa authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev:
  [PATCH] libata: Remove dependence on host_set->dev for SAS
  [PATCH] libata: ata_scsi_ioctl cleanup
  [PATCH] libata: ata_scsi_queuecmd cleanup
  [libata] export ata_dev_pair; trim trailing whitespace
  [PATCH] libata: add ata_dev_pair helper
  [PATCH] Make libata not powerdown drivers on PM_EVENT_FREEZE.
  [PATCH] libata: make ata_set_mode() responsible for failure handling
  [PATCH] libata: use ata_dev_disable() in ata_bus_probe()
  [PATCH] libata: implement ata_dev_disable()
  [PATCH] libata: check if port is disabled after internal command
  [PATCH] libata: make per-dev transfer mode limits per-dev
  [PATCH] libata: add per-dev pio/mwdma/udma_mask
  [PATCH] libata: implement ata_unpack_xfermask()
  [libata] Move some bmdma-specific code to libata-bmdma.c
  [libata sata_uli] kill scr_addr abuse
  [libata sata_nv] eliminate duplicate codepaths with iomap
  [libata sata_nv] cleanups: convert #defines to enums; remove in-file history
  [libata sata_sil24] cleanups: use pci_iomap(), kzalloc()
parents f125b561 2f1f610b
Loading
Loading
Loading
Loading
+236 −2
Original line number Original line Diff line number Diff line
@@ -418,6 +418,240 @@ u8 ata_altstatus(struct ata_port *ap)
	return inb(ap->ioaddr.altstatus_addr);
	return inb(ap->ioaddr.altstatus_addr);
}
}


/**
 *	ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
 *	@qc: Info associated with this ATA transaction.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */

static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
	u8 dmactl;
	void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;

	/* load PRD table addr. */
	mb();	/* make sure PRD table writes are visible to controller */
	writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);

	/* specify data direction, triple-check start bit is clear */
	dmactl = readb(mmio + ATA_DMA_CMD);
	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
	if (!rw)
		dmactl |= ATA_DMA_WR;
	writeb(dmactl, mmio + ATA_DMA_CMD);

	/* issue r/w command */
	ap->ops->exec_command(ap, &qc->tf);
}

/**
 *	ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
 *	@qc: Info associated with this ATA transaction.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */

static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
	u8 dmactl;

	/* start host DMA transaction */
	dmactl = readb(mmio + ATA_DMA_CMD);
	writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);

	/* Strictly, one may wish to issue a readb() here, to
	 * flush the mmio write.  However, control also passes
	 * to the hardware at this point, and it will interrupt
	 * us when we are to resume control.  So, in effect,
	 * we don't care when the mmio write flushes.
	 * Further, a read of the DMA status register _immediately_
	 * following the write may not be what certain flaky hardware
	 * is expected, so I think it is best to not add a readb()
	 * without first all the MMIO ATA cards/mobos.
	 * Or maybe I'm just being paranoid.
	 */
}

/**
 *	ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
 *	@qc: Info associated with this ATA transaction.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */

static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
	u8 dmactl;

	/* load PRD table addr. */
	outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);

	/* specify data direction, triple-check start bit is clear */
	dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
	if (!rw)
		dmactl |= ATA_DMA_WR;
	outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);

	/* issue r/w command */
	ap->ops->exec_command(ap, &qc->tf);
}

/**
 *	ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
 *	@qc: Info associated with this ATA transaction.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */

static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	u8 dmactl;

	/* start host DMA transaction */
	dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
	outb(dmactl | ATA_DMA_START,
	     ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
}


/**
 *	ata_bmdma_start - Start a PCI IDE BMDMA transaction
 *	@qc: Info associated with this ATA transaction.
 *
 *	Writes the ATA_DMA_START flag to the DMA command register.
 *
 *	May be used as the bmdma_start() entry in ata_port_operations.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */
void ata_bmdma_start(struct ata_queued_cmd *qc)
{
	if (qc->ap->flags & ATA_FLAG_MMIO)
		ata_bmdma_start_mmio(qc);
	else
		ata_bmdma_start_pio(qc);
}


/**
 *	ata_bmdma_setup - Set up PCI IDE BMDMA transaction
 *	@qc: Info associated with this ATA transaction.
 *
 *	Writes address of PRD table to device's PRD Table Address
 *	register, sets the DMA control register, and calls
 *	ops->exec_command() to start the transfer.
 *
 *	May be used as the bmdma_setup() entry in ata_port_operations.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */
void ata_bmdma_setup(struct ata_queued_cmd *qc)
{
	if (qc->ap->flags & ATA_FLAG_MMIO)
		ata_bmdma_setup_mmio(qc);
	else
		ata_bmdma_setup_pio(qc);
}


/**
 *	ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
 *	@ap: Port associated with this ATA transaction.
 *
 *	Clear interrupt and error flags in DMA status register.
 *
 *	May be used as the irq_clear() entry in ata_port_operations.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */

void ata_bmdma_irq_clear(struct ata_port *ap)
{
	if (!ap->ioaddr.bmdma_addr)
		return;

	if (ap->flags & ATA_FLAG_MMIO) {
		void __iomem *mmio =
		      ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
		writeb(readb(mmio), mmio);
	} else {
		unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
		outb(inb(addr), addr);
	}
}


/**
 *	ata_bmdma_status - Read PCI IDE BMDMA status
 *	@ap: Port associated with this ATA transaction.
 *
 *	Read and return BMDMA status register.
 *
 *	May be used as the bmdma_status() entry in ata_port_operations.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */

u8 ata_bmdma_status(struct ata_port *ap)
{
	u8 host_stat;
	if (ap->flags & ATA_FLAG_MMIO) {
		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
		host_stat = readb(mmio + ATA_DMA_STATUS);
	} else
		host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
	return host_stat;
}


/**
 *	ata_bmdma_stop - Stop PCI IDE BMDMA transfer
 *	@qc: Command we are ending DMA for
 *
 *	Clears the ATA_DMA_START flag in the dma control register
 *
 *	May be used as the bmdma_stop() entry in ata_port_operations.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */

void ata_bmdma_stop(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	if (ap->flags & ATA_FLAG_MMIO) {
		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;

		/* clear start/stop bit */
		writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
			mmio + ATA_DMA_CMD);
	} else {
		/* clear start/stop bit */
		outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
			ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
	}

	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
	ata_altstatus(ap);        /* dummy read */
}

#ifdef CONFIG_PCI
#ifdef CONFIG_PCI
static struct ata_probe_ent *
static struct ata_probe_ent *
ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
+162 −307
Original line number Original line Diff line number Diff line
@@ -64,9 +64,9 @@
static unsigned int ata_dev_init_params(struct ata_port *ap,
static unsigned int ata_dev_init_params(struct ata_port *ap,
					struct ata_device *dev);
					struct ata_device *dev);
static void ata_set_mode(struct ata_port *ap);
static void ata_set_mode(struct ata_port *ap);
static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
static unsigned int ata_dev_xfermask(struct ata_port *ap,
					 struct ata_device *dev);
					 struct ata_device *dev);
static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);


static unsigned int ata_unique_id = 1;
static unsigned int ata_unique_id = 1;
static struct workqueue_struct *ata_wq;
static struct workqueue_struct *ata_wq;
@@ -252,6 +252,29 @@ static unsigned int ata_pack_xfermask(unsigned int pio_mask,
		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
}
}


/**
 *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
 *	@xfer_mask: xfer_mask to unpack
 *	@pio_mask: resulting pio_mask
 *	@mwdma_mask: resulting mwdma_mask
 *	@udma_mask: resulting udma_mask
 *
 *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
 *	Any NULL distination masks will be ignored.
 */
static void ata_unpack_xfermask(unsigned int xfer_mask,
				unsigned int *pio_mask,
				unsigned int *mwdma_mask,
				unsigned int *udma_mask)
{
	if (pio_mask)
		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
	if (mwdma_mask)
		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
	if (udma_mask)
		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
}

static const struct ata_xfer_ent {
static const struct ata_xfer_ent {
	unsigned int shift, bits;
	unsigned int shift, bits;
	u8 base;
	u8 base;
@@ -372,6 +395,15 @@ static const char *ata_mode_string(unsigned int xfer_mask)
	return "<n/a>";
	return "<n/a>";
}
}


static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
{
	if (ata_dev_present(dev)) {
		printk(KERN_WARNING "ata%u: dev %u disabled\n",
		       ap->id, dev->devno);
		dev->class++;
	}
}

/**
/**
 *	ata_pio_devchk - PATA device presence detection
 *	ata_pio_devchk - PATA device presence detection
 *	@ap: ATA channel to examine
 *	@ap: ATA channel to examine
@@ -987,6 +1019,22 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,


	ata_qc_free(qc);
	ata_qc_free(qc);


	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
	 * Until those drivers are fixed, we detect the condition
	 * here, fail the command with AC_ERR_SYSTEM and reenable the
	 * port.
	 *
	 * Note that this doesn't change any behavior as internal
	 * command failure results in disabling the device in the
	 * higher layer for LLDDs without new reset/EH callbacks.
	 *
	 * Kill the following code as soon as those drivers are fixed.
	 */
	if (ap->flags & ATA_FLAG_PORT_DISABLED) {
		err_mask |= AC_ERR_SYSTEM;
		ata_port_probe(ap);
	}

	return err_mask;
	return err_mask;
}
}


@@ -1305,7 +1353,7 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
		if (print_info)
		if (print_info)
			printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
			printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
			       ap->id, dev->devno);
			       ap->id, dev->devno);
		ap->udma_mask &= ATA_UDMA5;
		dev->udma_mask &= ATA_UDMA5;
		dev->max_sectors = ATA_MAX_SECTORS;
		dev->max_sectors = ATA_MAX_SECTORS;
	}
	}


@@ -1316,8 +1364,6 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
	return 0;
	return 0;


err_out_nosup:
err_out_nosup:
	printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
	       ap->id, dev->devno);
	DPRINTK("EXIT, err\n");
	DPRINTK("EXIT, err\n");
	return rc;
	return rc;
}
}
@@ -1384,7 +1430,7 @@ static int ata_bus_probe(struct ata_port *ap)
		}
		}


		if (ata_dev_configure(ap, dev, 1)) {
		if (ata_dev_configure(ap, dev, 1)) {
			dev->class++;	/* disable device */
			ata_dev_disable(ap, dev);
			continue;
			continue;
		}
		}


@@ -1529,6 +1575,23 @@ void sata_phy_reset(struct ata_port *ap)
	ata_bus_reset(ap);
	ata_bus_reset(ap);
}
}


/**
 *	ata_dev_pair		-	return other device on cable
 *	@ap: port
 *	@adev: device
 *
 *	Obtain the other device on the same cable, or if none is
 *	present NULL is returned
 */

struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev)
{
	struct ata_device *pair = &ap->device[1 - adev->devno];
	if (!ata_dev_present(pair))
		return NULL;
	return pair;
}

/**
/**
 *	ata_port_disable - Disable port.
 *	ata_port_disable - Disable port.
 *	@ap: Port to be disabled.
 *	@ap: Port to be disabled.
@@ -1697,20 +1760,28 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
	return 0;
	return 0;
}
}


static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
{
{
	if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
	unsigned int err_mask;
		return;
	int rc;


	if (dev->xfer_shift == ATA_SHIFT_PIO)
	if (dev->xfer_shift == ATA_SHIFT_PIO)
		dev->flags |= ATA_DFLAG_PIO;
		dev->flags |= ATA_DFLAG_PIO;


	ata_dev_set_xfermode(ap, dev);
	err_mask = ata_dev_set_xfermode(ap, dev);
	if (err_mask) {
		printk(KERN_ERR
		       "ata%u: failed to set xfermode (err_mask=0x%x)\n",
		       ap->id, err_mask);
		return -EIO;
	}


	if (ata_dev_revalidate(ap, dev, 0)) {
	rc = ata_dev_revalidate(ap, dev, 0);
		printk(KERN_ERR "ata%u: failed to revalidate after set "
	if (rc) {
		       "xfermode, disabled\n", ap->id);
		printk(KERN_ERR
		ata_port_disable(ap);
		       "ata%u: failed to revalidate after set xfermode\n",
		       ap->id);
		return rc;
	}
	}


	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
@@ -1719,6 +1790,7 @@ static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
	printk(KERN_INFO "ata%u: dev %u configured for %s\n",
	printk(KERN_INFO "ata%u: dev %u configured for %s\n",
	       ap->id, dev->devno,
	       ap->id, dev->devno,
	       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
	       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
	return 0;
}
}


static int ata_host_set_pio(struct ata_port *ap)
static int ata_host_set_pio(struct ata_port *ap)
@@ -1778,16 +1850,19 @@ static void ata_set_mode(struct ata_port *ap)
	/* step 1: calculate xfer_mask */
	/* step 1: calculate xfer_mask */
	for (i = 0; i < ATA_MAX_DEVICES; i++) {
	for (i = 0; i < ATA_MAX_DEVICES; i++) {
		struct ata_device *dev = &ap->device[i];
		struct ata_device *dev = &ap->device[i];
		unsigned int xfer_mask;
		unsigned int pio_mask, dma_mask;


		if (!ata_dev_present(dev))
		if (!ata_dev_present(dev))
			continue;
			continue;


		xfer_mask = ata_dev_xfermask(ap, dev);
		ata_dev_xfermask(ap, dev);

		/* TODO: let LLDD filter dev->*_mask here */


		dev->pio_mode = ata_xfer_mask2mode(xfer_mask & ATA_MASK_PIO);
		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
		dev->dma_mode = ata_xfer_mask2mode(xfer_mask & (ATA_MASK_MWDMA |
		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
								ATA_MASK_UDMA));
		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
	}
	}


	/* step 2: always set host PIO timings */
	/* step 2: always set host PIO timings */
@@ -1799,11 +1874,15 @@ static void ata_set_mode(struct ata_port *ap)
	ata_host_set_dma(ap);
	ata_host_set_dma(ap);


	/* step 4: update devices' xfer mode */
	/* step 4: update devices' xfer mode */
	for (i = 0; i < ATA_MAX_DEVICES; i++)
	for (i = 0; i < ATA_MAX_DEVICES; i++) {
		ata_dev_set_mode(ap, &ap->device[i]);
		struct ata_device *dev = &ap->device[i];


	if (ap->flags & ATA_FLAG_PORT_DISABLED)
		if (!ata_dev_present(dev))
		return;
			continue;

		if (ata_dev_set_mode(ap, dev))
			goto err_out;
	}


	if (ap->ops->post_set_mode)
	if (ap->ops->post_set_mode)
		ap->ops->post_set_mode(ap);
		ap->ops->post_set_mode(ap);
@@ -2630,18 +2709,15 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
 *	@ap: Port on which the device to compute xfermask for resides
 *	@ap: Port on which the device to compute xfermask for resides
 *	@dev: Device to compute xfermask for
 *	@dev: Device to compute xfermask for
 *
 *
 *	Compute supported xfermask of @dev.  This function is
 *	Compute supported xfermask of @dev and store it in
 *	responsible for applying all known limits including host
 *	dev->*_mask.  This function is responsible for applying all
 *	controller limits, device blacklist, etc...
 *	known limits including host controller limits, device
 *	blacklist, etc...
 *
 *
 *	LOCKING:
 *	LOCKING:
 *	None.
 *	None.
 *
 *	RETURNS:
 *	Computed xfermask.
 */
 */
static unsigned int ata_dev_xfermask(struct ata_port *ap,
static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
				     struct ata_device *dev)
{
{
	unsigned long xfer_mask;
	unsigned long xfer_mask;
	int i;
	int i;
@@ -2654,6 +2730,8 @@ static unsigned int ata_dev_xfermask(struct ata_port *ap,
		struct ata_device *d = &ap->device[i];
		struct ata_device *d = &ap->device[i];
		if (!ata_dev_present(d))
		if (!ata_dev_present(d))
			continue;
			continue;
		xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
					       d->udma_mask);
		xfer_mask &= ata_id_xfermask(d->id);
		xfer_mask &= ata_id_xfermask(d->id);
		if (ata_dma_blacklisted(d))
		if (ata_dma_blacklisted(d))
			xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
			xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
@@ -2663,7 +2741,8 @@ static unsigned int ata_dev_xfermask(struct ata_port *ap,
		printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
		printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
		       "disabling DMA\n", ap->id, dev->devno);
		       "disabling DMA\n", ap->id, dev->devno);


	return xfer_mask;
	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
			    &dev->udma_mask);
}
}


/**
/**
@@ -2676,11 +2755,16 @@ static unsigned int ata_dev_xfermask(struct ata_port *ap,
 *
 *
 *	LOCKING:
 *	LOCKING:
 *	PCI/etc. bus probe sem.
 *	PCI/etc. bus probe sem.
 *
 *	RETURNS:
 *	0 on success, AC_ERR_* mask otherwise.
 */
 */


static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
					 struct ata_device *dev)
{
{
	struct ata_taskfile tf;
	struct ata_taskfile tf;
	unsigned int err_mask;


	/* set up set-features taskfile */
	/* set up set-features taskfile */
	DPRINTK("set features - xfer mode\n");
	DPRINTK("set features - xfer mode\n");
@@ -2692,13 +2776,10 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
	tf.protocol = ATA_PROT_NODATA;
	tf.protocol = ATA_PROT_NODATA;
	tf.nsect = dev->xfer_mode;
	tf.nsect = dev->xfer_mode;


	if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) {
	err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
		printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n",
		       ap->id);
		ata_port_disable(ap);
	}


	DPRINTK("EXIT\n");
	DPRINTK("EXIT, err_mask=%x\n", err_mask);
	return err_mask;
}
}


/**
/**
@@ -2775,7 +2856,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)


	if (qc->flags & ATA_QCFLAG_SG) {
	if (qc->flags & ATA_QCFLAG_SG) {
		if (qc->n_elem)
		if (qc->n_elem)
			dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
		/* restore last sg */
		/* restore last sg */
		sg[qc->orig_n_elem - 1].length += qc->pad_len;
		sg[qc->orig_n_elem - 1].length += qc->pad_len;
		if (pad_buf) {
		if (pad_buf) {
@@ -2786,7 +2867,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
		}
		}
	} else {
	} else {
		if (qc->n_elem)
		if (qc->n_elem)
			dma_unmap_single(ap->host_set->dev,
			dma_unmap_single(ap->dev,
				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
				dir);
				dir);
		/* restore sg */
		/* restore sg */
@@ -2997,7 +3078,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
		goto skip_map;
		goto skip_map;
	}
	}


	dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
	dma_address = dma_map_single(ap->dev, qc->buf_virt,
				     sg->length, dir);
				     sg->length, dir);
	if (dma_mapping_error(dma_address)) {
	if (dma_mapping_error(dma_address)) {
		/* restore sg */
		/* restore sg */
@@ -3085,7 +3166,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
	}
	}


	dir = qc->dma_dir;
	dir = qc->dma_dir;
	n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
	n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
	if (n_elem < 1) {
	if (n_elem < 1) {
		/* restore last sg */
		/* restore last sg */
		lsg->length += qc->pad_len;
		lsg->length += qc->pad_len;
@@ -4064,240 +4145,6 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
	return 0;
	return 0;
}
}


/**
 *	ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
 *	@qc: Info associated with this ATA transaction.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */

static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
	u8 dmactl;
	void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;

	/* load PRD table addr. */
	mb();	/* make sure PRD table writes are visible to controller */
	writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);

	/* specify data direction, triple-check start bit is clear */
	dmactl = readb(mmio + ATA_DMA_CMD);
	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
	if (!rw)
		dmactl |= ATA_DMA_WR;
	writeb(dmactl, mmio + ATA_DMA_CMD);

	/* issue r/w command */
	ap->ops->exec_command(ap, &qc->tf);
}

/**
 *	ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
 *	@qc: Info associated with this ATA transaction.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */

static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
	u8 dmactl;

	/* start host DMA transaction */
	dmactl = readb(mmio + ATA_DMA_CMD);
	writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);

	/* Strictly, one may wish to issue a readb() here, to
	 * flush the mmio write.  However, control also passes
	 * to the hardware at this point, and it will interrupt
	 * us when we are to resume control.  So, in effect,
	 * we don't care when the mmio write flushes.
	 * Further, a read of the DMA status register _immediately_
	 * following the write may not be what certain flaky hardware
	 * is expected, so I think it is best to not add a readb()
	 * without first all the MMIO ATA cards/mobos.
	 * Or maybe I'm just being paranoid.
	 */
}

/**
 *	ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
 *	@qc: Info associated with this ATA transaction.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */

static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
	u8 dmactl;

	/* load PRD table addr. */
	outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);

	/* specify data direction, triple-check start bit is clear */
	dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
	if (!rw)
		dmactl |= ATA_DMA_WR;
	outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);

	/* issue r/w command */
	ap->ops->exec_command(ap, &qc->tf);
}

/**
 *	ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
 *	@qc: Info associated with this ATA transaction.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */

static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	u8 dmactl;

	/* start host DMA transaction */
	dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
	outb(dmactl | ATA_DMA_START,
	     ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
}


/**
 *	ata_bmdma_start - Start a PCI IDE BMDMA transaction
 *	@qc: Info associated with this ATA transaction.
 *
 *	Writes the ATA_DMA_START flag to the DMA command register.
 *
 *	May be used as the bmdma_start() entry in ata_port_operations.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */
void ata_bmdma_start(struct ata_queued_cmd *qc)
{
	if (qc->ap->flags & ATA_FLAG_MMIO)
		ata_bmdma_start_mmio(qc);
	else
		ata_bmdma_start_pio(qc);
}


/**
 *	ata_bmdma_setup - Set up PCI IDE BMDMA transaction
 *	@qc: Info associated with this ATA transaction.
 *
 *	Writes address of PRD table to device's PRD Table Address
 *	register, sets the DMA control register, and calls
 *	ops->exec_command() to start the transfer.
 *
 *	May be used as the bmdma_setup() entry in ata_port_operations.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */
void ata_bmdma_setup(struct ata_queued_cmd *qc)
{
	if (qc->ap->flags & ATA_FLAG_MMIO)
		ata_bmdma_setup_mmio(qc);
	else
		ata_bmdma_setup_pio(qc);
}


/**
 *	ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
 *	@ap: Port associated with this ATA transaction.
 *
 *	Clear interrupt and error flags in DMA status register.
 *
 *	May be used as the irq_clear() entry in ata_port_operations.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */

void ata_bmdma_irq_clear(struct ata_port *ap)
{
	if (!ap->ioaddr.bmdma_addr)
		return;

	if (ap->flags & ATA_FLAG_MMIO) {
		void __iomem *mmio =
		      ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
		writeb(readb(mmio), mmio);
	} else {
		unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
		outb(inb(addr), addr);
	}
}


/**
 *	ata_bmdma_status - Read PCI IDE BMDMA status
 *	@ap: Port associated with this ATA transaction.
 *
 *	Read and return BMDMA status register.
 *
 *	May be used as the bmdma_status() entry in ata_port_operations.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */

u8 ata_bmdma_status(struct ata_port *ap)
{
	u8 host_stat;
	if (ap->flags & ATA_FLAG_MMIO) {
		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
		host_stat = readb(mmio + ATA_DMA_STATUS);
	} else
		host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
	return host_stat;
}


/**
 *	ata_bmdma_stop - Stop PCI IDE BMDMA transfer
 *	@qc: Command we are ending DMA for
 *
 *	Clears the ATA_DMA_START flag in the dma control register
 *
 *	May be used as the bmdma_stop() entry in ata_port_operations.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host_set lock)
 */

void ata_bmdma_stop(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	if (ap->flags & ATA_FLAG_MMIO) {
		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;

		/* clear start/stop bit */
		writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
			mmio + ATA_DMA_CMD);
	} else {
		/* clear start/stop bit */
		outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
			ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
	}

	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
	ata_altstatus(ap);        /* dummy read */
}

/**
/**
 *	ata_host_intr - Handle host interrupt for given (port, task)
 *	ata_host_intr - Handle host interrupt for given (port, task)
 *	@ap: Port on which interrupt arrived (possibly...)
 *	@ap: Port on which interrupt arrived (possibly...)
@@ -4506,13 +4353,14 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
 *	Flush the cache on the drive, if appropriate, then issue a
 *	Flush the cache on the drive, if appropriate, then issue a
 *	standbynow command.
 *	standbynow command.
 */
 */
int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
{
{
	if (!ata_dev_present(dev))
	if (!ata_dev_present(dev))
		return 0;
		return 0;
	if (dev->class == ATA_DEV_ATA)
	if (dev->class == ATA_DEV_ATA)
		ata_flush_cache(ap, dev);
		ata_flush_cache(ap, dev);


	if (state.event != PM_EVENT_FREEZE)
		ata_standby_drive(ap, dev);
		ata_standby_drive(ap, dev);
	ap->flags |= ATA_FLAG_SUSPENDED;
	ap->flags |= ATA_FLAG_SUSPENDED;
	return 0;
	return 0;
@@ -4533,7 +4381,7 @@ int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)


int ata_port_start (struct ata_port *ap)
int ata_port_start (struct ata_port *ap)
{
{
	struct device *dev = ap->host_set->dev;
	struct device *dev = ap->dev;
	int rc;
	int rc;


	ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
	ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
@@ -4566,7 +4414,7 @@ int ata_port_start (struct ata_port *ap)


void ata_port_stop (struct ata_port *ap)
void ata_port_stop (struct ata_port *ap)
{
{
	struct device *dev = ap->host_set->dev;
	struct device *dev = ap->dev;


	dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
	dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
	ata_pad_free(ap, dev);
	ata_pad_free(ap, dev);
@@ -4632,6 +4480,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
	ap->host = host;
	ap->host = host;
	ap->ctl = ATA_DEVCTL_OBS;
	ap->ctl = ATA_DEVCTL_OBS;
	ap->host_set = host_set;
	ap->host_set = host_set;
	ap->dev = ent->dev;
	ap->port_no = port_no;
	ap->port_no = port_no;
	ap->hard_port_no =
	ap->hard_port_no =
		ent->legacy_mode ? ent->hard_port_no : port_no;
		ent->legacy_mode ? ent->hard_port_no : port_no;
@@ -4647,8 +4496,13 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
	INIT_WORK(&ap->port_task, NULL, NULL);
	INIT_WORK(&ap->port_task, NULL, NULL);
	INIT_LIST_HEAD(&ap->eh_done_q);
	INIT_LIST_HEAD(&ap->eh_done_q);


	for (i = 0; i < ATA_MAX_DEVICES; i++)
	for (i = 0; i < ATA_MAX_DEVICES; i++) {
		ap->device[i].devno = i;
		struct ata_device *dev = &ap->device[i];
		dev->devno = i;
		dev->pio_mask = UINT_MAX;
		dev->mwdma_mask = UINT_MAX;
		dev->udma_mask = UINT_MAX;
	}


#ifdef ATA_IRQ_TRAP
#ifdef ATA_IRQ_TRAP
	ap->stats.unhandled_irq = 1;
	ap->stats.unhandled_irq = 1;
@@ -5114,6 +4968,8 @@ EXPORT_SYMBOL_GPL(ata_std_postreset);
EXPORT_SYMBOL_GPL(ata_std_probe_reset);
EXPORT_SYMBOL_GPL(ata_std_probe_reset);
EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
EXPORT_SYMBOL_GPL(ata_dev_revalidate);
EXPORT_SYMBOL_GPL(ata_dev_revalidate);
EXPORT_SYMBOL_GPL(ata_dev_classify);
EXPORT_SYMBOL_GPL(ata_dev_pair);
EXPORT_SYMBOL_GPL(ata_port_disable);
EXPORT_SYMBOL_GPL(ata_port_disable);
EXPORT_SYMBOL_GPL(ata_ratelimit);
EXPORT_SYMBOL_GPL(ata_ratelimit);
EXPORT_SYMBOL_GPL(ata_busy_sleep);
EXPORT_SYMBOL_GPL(ata_busy_sleep);
@@ -5124,7 +4980,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_error);
EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
EXPORT_SYMBOL_GPL(ata_scsi_release);
EXPORT_SYMBOL_GPL(ata_scsi_release);
EXPORT_SYMBOL_GPL(ata_host_intr);
EXPORT_SYMBOL_GPL(ata_host_intr);
EXPORT_SYMBOL_GPL(ata_dev_classify);
EXPORT_SYMBOL_GPL(ata_id_string);
EXPORT_SYMBOL_GPL(ata_id_string);
EXPORT_SYMBOL_GPL(ata_id_c_string);
EXPORT_SYMBOL_GPL(ata_id_c_string);
EXPORT_SYMBOL_GPL(ata_scsi_simulate);
EXPORT_SYMBOL_GPL(ata_scsi_simulate);
+35 −44

File changed.

Preview size limit exceeded, changes collapsed.

+62 −119

File changed.

Preview size limit exceeded, changes collapsed.

+1 −1
Original line number Original line Diff line number Diff line
@@ -371,7 +371,7 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
	if (quirks & SIL_QUIRK_UDMA5MAX) {
	if (quirks & SIL_QUIRK_UDMA5MAX) {
		printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n",
		printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n",
		       ap->id, dev->devno, model_num);
		       ap->id, dev->devno, model_num);
		ap->udma_mask &= ATA_UDMA5;
		dev->udma_mask &= ATA_UDMA5;
		return;
		return;
	}
	}
}
}
Loading