Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 66337dab authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev:
  [PCI] Add JMicron PCI ID constants
  [PATCH] ahci: Ensure that we don't grab both functions
  [PATCH] libata-core.c: restore configuration boot messages in ata_dev_configure(), v2
  [PATCH] sata_sil24: add suspend/sleep support
  [PATCH] sata_sil24: separate out sil24_init_controller()
  [PATCH] sata_sil: add suspend/sleep support
  [PATCH] sata_sil: separate out sil_init_controller()
  [PATCH] libata: reimplement controller-wide PM
  [PATCH] libata: reimplement per-dev PM
  [PATCH] libata: implement PM EH actions
  [PATCH] libata: separate out __ata_ehi_hotplugged()
  [PATCH] libata: implement ATA_EHI_NO_AUTOPSY and QUIET
  [PATCH] libata: clean up debounce parameters and improve parameter selection
  [PATCH] libata: implement ATA_EHI_RESUME_LINK
  [PATCH] libata: replace ap_lock w/ ap->lock in ata_scsi_error()
  [PATCH] libata: fix ehc->i.action setting in ata_eh_autopsy()
  [PATCH] libata: add ap->pflags and move core dynamic flags to it
  [PATCH] libata: Conditionally set host->max_cmd_len
  [PATCH] sata_vsc: data_xfer should use mmio
parents a942b57b 309bade0
Loading
Loading
Loading
Loading
+12 −5
Original line number Diff line number Diff line
@@ -1052,7 +1052,7 @@ static void ahci_thaw(struct ata_port *ap)

static void ahci_error_handler(struct ata_port *ap)
{
	if (!(ap->flags & ATA_FLAG_FROZEN)) {
	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
		/* restart engine */
		ahci_stop_engine(ap);
		ahci_start_engine(ap);
@@ -1323,6 +1323,17 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
	if (!printed_version++)
		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");

	/* JMicron-specific fixup: make sure we're in AHCI mode */
	/* This is protected from races with ata_jmicron by the pci probe
	   locking */
	if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
		/* AHCI enable, AHCI on function 0 */
		pci_write_config_byte(pdev, 0x41, 0xa1);
		/* Function 1 is the PATA controller */
		if (PCI_FUNC(pdev->devfn))
			return -ENODEV;
	}

	rc = pci_enable_device(pdev);
	if (rc)
		return rc;
@@ -1378,10 +1389,6 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
	if (have_msi)
		hpriv->flags |= AHCI_FLAG_MSI;

	/* JMicron-specific fixup: make sure we're in AHCI mode */
	if (pdev->vendor == 0x197b)
		pci_write_config_byte(pdev, 0x41, 0xa1);

	/* initialize adapter */
	rc = ahci_host_init(probe_ent);
	if (rc)
+185 −104
Original line number Diff line number Diff line
@@ -61,9 +61,9 @@
#include "libata.h"

/* debounce timing parameters in msecs { interval, duration, timeout } */
const unsigned long sata_deb_timing_boot[]		= {   5,  100, 2000 };
const unsigned long sata_deb_timing_eh[]		= {  25,  500, 2000 };
const unsigned long sata_deb_timing_before_fsrst[]	= { 100, 2000, 5000 };
const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };

static unsigned int ata_dev_init_params(struct ata_device *dev,
					u16 heads, u16 sectors);
@@ -907,7 +907,7 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
{
	int rc;

	if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
	if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
		return;

	PREPARE_WORK(&ap->port_task, fn, data);
@@ -938,7 +938,7 @@ void ata_port_flush_task(struct ata_port *ap)
	DPRINTK("ENTER\n");

	spin_lock_irqsave(ap->lock, flags);
	ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
	ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
	spin_unlock_irqrestore(ap->lock, flags);

	DPRINTK("flush #1\n");
@@ -957,7 +957,7 @@ void ata_port_flush_task(struct ata_port *ap)
	}

	spin_lock_irqsave(ap->lock, flags);
	ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
	ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
	spin_unlock_irqrestore(ap->lock, flags);

	if (ata_msg_ctl(ap))
@@ -1009,7 +1009,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
	spin_lock_irqsave(ap->lock, flags);

	/* no internal command while frozen */
	if (ap->flags & ATA_FLAG_FROZEN) {
	if (ap->pflags & ATA_PFLAG_FROZEN) {
		spin_unlock_irqrestore(ap->lock, flags);
		return AC_ERR_SYSTEM;
	}
@@ -1325,6 +1325,19 @@ static void ata_dev_config_ncq(struct ata_device *dev,
		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
}

static void ata_set_port_max_cmd_len(struct ata_port *ap)
{
	int i;

	if (ap->host) {
		ap->host->max_cmd_len = 0;
		for (i = 0; i < ATA_MAX_DEVICES; i++)
			ap->host->max_cmd_len = max_t(unsigned int,
						      ap->host->max_cmd_len,
						      ap->device[i].cdb_len);
	}
}

/**
 *	ata_dev_configure - Configure the specified ATA/ATAPI device
 *	@dev: Target device to configure
@@ -1344,7 +1357,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
	struct ata_port *ap = dev->ap;
	const u16 *id = dev->id;
	unsigned int xfer_mask;
	int i, rc;
	int rc;

	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
		ata_dev_printk(dev, KERN_INFO,
@@ -1404,7 +1417,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));

			/* print device info to dmesg */
			if (ata_msg_info(ap))
			if (ata_msg_drv(ap) && print_info)
				ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
					"max %s, %Lu sectors: %s %s\n",
					ata_id_major_version(id),
@@ -1427,7 +1440,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
			}

			/* print device info to dmesg */
			if (ata_msg_info(ap))
			if (ata_msg_drv(ap) && print_info)
				ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
					"max %s, %Lu sectors: CHS %u/%u/%u\n",
					ata_id_major_version(id),
@@ -1439,7 +1452,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)

		if (dev->id[59] & 0x100) {
			dev->multi_count = dev->id[59] & 0xff;
			if (ata_msg_info(ap))
			if (ata_msg_drv(ap) && print_info)
				ata_dev_printk(dev, KERN_INFO,
					"ata%u: dev %u multi count %u\n",
					ap->id, dev->devno, dev->multi_count);
@@ -1468,21 +1481,17 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
		}

		/* print device info to dmesg */
		if (ata_msg_info(ap))
		if (ata_msg_drv(ap) && print_info)
			ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
				       ata_mode_string(xfer_mask),
				       cdb_intr_string);
	}

	ap->host->max_cmd_len = 0;
	for (i = 0; i < ATA_MAX_DEVICES; i++)
		ap->host->max_cmd_len = max_t(unsigned int,
					      ap->host->max_cmd_len,
					      ap->device[i].cdb_len);
	ata_set_port_max_cmd_len(ap);

	/* limit bridge transfers to udma5, 200 sectors */
	if (ata_dev_knobble(dev)) {
		if (ata_msg_info(ap))
		if (ata_msg_drv(ap) && print_info)
			ata_dev_printk(dev, KERN_INFO,
				       "applying bridge limits\n");
		dev->udma_mask &= ATA_UDMA5;
@@ -2137,7 +2146,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
		 * return error code and failing device on failure.
		 */
		for (i = 0; i < ATA_MAX_DEVICES; i++) {
			if (ata_dev_enabled(&ap->device[i])) {
			if (ata_dev_ready(&ap->device[i])) {
				ap->ops->set_mode(ap);
				break;
			}
@@ -2203,7 +2212,8 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
	for (i = 0; i < ATA_MAX_DEVICES; i++) {
		dev = &ap->device[i];

		if (!ata_dev_enabled(dev))
		/* don't udpate suspended devices' xfer mode */
		if (!ata_dev_ready(dev))
			continue;

		rc = ata_dev_set_mode(dev);
@@ -2579,7 +2589,7 @@ static void ata_wait_spinup(struct ata_port *ap)

	/* first, debounce phy if SATA */
	if (ap->cbl == ATA_CBL_SATA) {
		rc = sata_phy_debounce(ap, sata_deb_timing_eh);
		rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);

		/* if debounced successfully and offline, no need to wait */
		if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
@@ -2615,16 +2625,17 @@ static void ata_wait_spinup(struct ata_port *ap)
int ata_std_prereset(struct ata_port *ap)
{
	struct ata_eh_context *ehc = &ap->eh_context;
	const unsigned long *timing;
	const unsigned long *timing = sata_ehc_deb_timing(ehc);
	int rc;

	/* hotplug? */
	if (ehc->i.flags & ATA_EHI_HOTPLUGGED) {
		if (ap->flags & ATA_FLAG_HRST_TO_RESUME)
	/* handle link resume & hotplug spinup */
	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
	    (ap->flags & ATA_FLAG_HRST_TO_RESUME))
		ehc->i.action |= ATA_EH_HARDRESET;
		if (ap->flags & ATA_FLAG_SKIP_D2H_BSY)

	if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
	    (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
		ata_wait_spinup(ap);
	}

	/* if we're about to do hardreset, nothing more to do */
	if (ehc->i.action & ATA_EH_HARDRESET)
@@ -2632,11 +2643,6 @@ int ata_std_prereset(struct ata_port *ap)

	/* if SATA, resume phy */
	if (ap->cbl == ATA_CBL_SATA) {
		if (ap->flags & ATA_FLAG_LOADING)
			timing = sata_deb_timing_boot;
		else
			timing = sata_deb_timing_eh;

		rc = sata_phy_resume(ap, timing);
		if (rc && rc != -EOPNOTSUPP) {
			/* phy resume failed */
@@ -2724,6 +2730,8 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
 */
int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
{
	struct ata_eh_context *ehc = &ap->eh_context;
	const unsigned long *timing = sata_ehc_deb_timing(ehc);
	u32 scontrol;
	int rc;

@@ -2761,7 +2769,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
	msleep(1);

	/* bring phy back */
	sata_phy_resume(ap, sata_deb_timing_eh);
	sata_phy_resume(ap, timing);

	/* TODO: phy layer with polling, timeouts, etc. */
	if (ata_port_offline(ap)) {
@@ -4285,7 +4293,7 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
	unsigned int i;

	/* no command while frozen */
	if (unlikely(ap->flags & ATA_FLAG_FROZEN))
	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
		return NULL;

	/* the last tag is reserved for internal command. */
@@ -4407,7 +4415,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
	 * taken care of.
	 */
	if (ap->ops->error_handler) {
		WARN_ON(ap->flags & ATA_FLAG_FROZEN);
		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);

		if (unlikely(qc->err_mask))
			qc->flags |= ATA_QCFLAG_FAILED;
@@ -5001,86 +5009,120 @@ int ata_flush_cache(struct ata_device *dev)
	return 0;
}

static int ata_standby_drive(struct ata_device *dev)
static int ata_host_set_request_pm(struct ata_host_set *host_set,
				   pm_message_t mesg, unsigned int action,
				   unsigned int ehi_flags, int wait)
{
	unsigned int err_mask;
	unsigned long flags;
	int i, rc;

	err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
	if (err_mask) {
		ata_dev_printk(dev, KERN_ERR, "failed to standby drive "
			       "(err_mask=0x%x)\n", err_mask);
		return -EIO;
	for (i = 0; i < host_set->n_ports; i++) {
		struct ata_port *ap = host_set->ports[i];

		/* Previous resume operation might still be in
		 * progress.  Wait for PM_PENDING to clear.
		 */
		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
			ata_port_wait_eh(ap);
			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
		}

	return 0;
		/* request PM ops to EH */
		spin_lock_irqsave(ap->lock, flags);

		ap->pm_mesg = mesg;
		if (wait) {
			rc = 0;
			ap->pm_result = &rc;
		}

static int ata_start_drive(struct ata_device *dev)
{
	unsigned int err_mask;
		ap->pflags |= ATA_PFLAG_PM_PENDING;
		ap->eh_info.action |= action;
		ap->eh_info.flags |= ehi_flags;

	err_mask = ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
	if (err_mask) {
		ata_dev_printk(dev, KERN_ERR, "failed to start drive "
			       "(err_mask=0x%x)\n", err_mask);
		return -EIO;
		ata_port_schedule_eh(ap);

		spin_unlock_irqrestore(ap->lock, flags);

		/* wait and check result */
		if (wait) {
			ata_port_wait_eh(ap);
			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
			if (rc)
				return rc;
		}
	}

	return 0;
}

/**
 *	ata_device_resume - wakeup a previously suspended devices
 *	@dev: the device to resume
 *	ata_host_set_suspend - suspend host_set
 *	@host_set: host_set to suspend
 *	@mesg: PM message
 *
 *	Kick the drive back into action, by sending it an idle immediate
 *	command and making sure its transfer mode matches between drive
 *	and host.
 *	Suspend @host_set.  Actual operation is performed by EH.  This
 *	function requests EH to perform PM operations and waits for EH
 *	to finish.
 *
 *	LOCKING:
 *	Kernel thread context (may sleep).
 *
 *	RETURNS:
 *	0 on success, -errno on failure.
 */
int ata_device_resume(struct ata_device *dev)
int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg)
{
	struct ata_port *ap = dev->ap;
	int i, j, rc;

	if (ap->flags & ATA_FLAG_SUSPENDED) {
		struct ata_device *failed_dev;
	rc = ata_host_set_request_pm(host_set, mesg, 0, ATA_EHI_QUIET, 1);
	if (rc)
		goto fail;

		ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
		ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000);
	/* EH is quiescent now.  Fail if we have any ready device.
	 * This happens if hotplug occurs between completion of device
	 * suspension and here.
	 */
	for (i = 0; i < host_set->n_ports; i++) {
		struct ata_port *ap = host_set->ports[i];

		for (j = 0; j < ATA_MAX_DEVICES; j++) {
			struct ata_device *dev = &ap->device[j];

		ap->flags &= ~ATA_FLAG_SUSPENDED;
		while (ata_set_mode(ap, &failed_dev))
			ata_dev_disable(failed_dev);
			if (ata_dev_ready(dev)) {
				ata_port_printk(ap, KERN_WARNING,
						"suspend failed, device %d "
						"still active\n", dev->devno);
				rc = -EBUSY;
				goto fail;
			}
		}
	}
	if (!ata_dev_enabled(dev))
		return 0;
	if (dev->class == ATA_DEV_ATA)
		ata_start_drive(dev);

	host_set->dev->power.power_state = mesg;
	return 0;

 fail:
	ata_host_set_resume(host_set);
	return rc;
}

/**
 *	ata_device_suspend - prepare a device for suspend
 *	@dev: the device to suspend
 *	@state: target power management state
 *	ata_host_set_resume - resume host_set
 *	@host_set: host_set to resume
 *
 *	Resume @host_set.  Actual operation is performed by EH.  This
 *	function requests EH to perform PM operations and returns.
 *	Note that all resume operations are performed parallely.
 *
 *	Flush the cache on the drive, if appropriate, then issue a
 *	standbynow command.
 *	LOCKING:
 *	Kernel thread context (may sleep).
 */
int ata_device_suspend(struct ata_device *dev, pm_message_t state)
void ata_host_set_resume(struct ata_host_set *host_set)
{
	struct ata_port *ap = dev->ap;

	if (!ata_dev_enabled(dev))
		return 0;
	if (dev->class == ATA_DEV_ATA)
		ata_flush_cache(dev);

	if (state.event != PM_EVENT_FREEZE)
		ata_standby_drive(dev);
	ap->flags |= ATA_FLAG_SUSPENDED;
	return 0;
	ata_host_set_request_pm(host_set, PMSG_ON, ATA_EH_SOFTRESET,
				ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
	host_set->dev->power.power_state = PMSG_ON;
}

/**
@@ -5440,6 +5482,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
		}

		if (ap->ops->error_handler) {
			struct ata_eh_info *ehi = &ap->eh_info;
			unsigned long flags;

			ata_port_probe(ap);
@@ -5447,10 +5490,11 @@ int ata_device_add(const struct ata_probe_ent *ent)
			/* kick EH for boot probing */
			spin_lock_irqsave(ap->lock, flags);

			ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1;
			ap->eh_info.action |= ATA_EH_SOFTRESET;
			ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
			ehi->action |= ATA_EH_SOFTRESET;
			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;

			ap->flags |= ATA_FLAG_LOADING;
			ap->pflags |= ATA_PFLAG_LOADING;
			ata_port_schedule_eh(ap);

			spin_unlock_irqrestore(ap->lock, flags);
@@ -5518,7 +5562,7 @@ void ata_port_detach(struct ata_port *ap)

	/* tell EH we're leaving & flush EH */
	spin_lock_irqsave(ap->lock, flags);
	ap->flags |= ATA_FLAG_UNLOADING;
	ap->pflags |= ATA_PFLAG_UNLOADING;
	spin_unlock_irqrestore(ap->lock, flags);

	ata_port_wait_eh(ap);
@@ -5723,20 +5767,55 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
	return (tmp == bits->val) ? 1 : 0;
}

int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state)
{
	pci_save_state(pdev);

	if (state.event == PM_EVENT_SUSPEND) {
		pci_disable_device(pdev);
		pci_set_power_state(pdev, PCI_D3hot);
	return 0;
	}
}

int ata_pci_device_resume(struct pci_dev *pdev)
void ata_pci_device_do_resume(struct pci_dev *pdev)
{
	pci_set_power_state(pdev, PCI_D0);
	pci_restore_state(pdev);
	pci_enable_device(pdev);
	pci_set_master(pdev);
}

int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
{
	struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
	int rc = 0;

	rc = ata_host_set_suspend(host_set, state);
	if (rc)
		return rc;

	if (host_set->next) {
		rc = ata_host_set_suspend(host_set->next, state);
		if (rc) {
			ata_host_set_resume(host_set);
			return rc;
		}
	}

	ata_pci_device_do_suspend(pdev, state);

	return 0;
}

int ata_pci_device_resume(struct pci_dev *pdev)
{
	struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);

	ata_pci_device_do_resume(pdev);
	ata_host_set_resume(host_set);
	if (host_set->next)
		ata_host_set_resume(host_set->next);

	return 0;
}
#endif /* CONFIG_PCI */
@@ -5842,9 +5921,9 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
 * Do not depend on ABI/API stability.
 */

EXPORT_SYMBOL_GPL(sata_deb_timing_boot);
EXPORT_SYMBOL_GPL(sata_deb_timing_eh);
EXPORT_SYMBOL_GPL(sata_deb_timing_before_fsrst);
EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
EXPORT_SYMBOL_GPL(sata_deb_timing_long);
EXPORT_SYMBOL_GPL(ata_std_bios_param);
EXPORT_SYMBOL_GPL(ata_std_ports);
EXPORT_SYMBOL_GPL(ata_device_add);
@@ -5916,6 +5995,8 @@ EXPORT_SYMBOL_GPL(sata_scr_write);
EXPORT_SYMBOL_GPL(sata_scr_write_flush);
EXPORT_SYMBOL_GPL(ata_port_online);
EXPORT_SYMBOL_GPL(ata_port_offline);
EXPORT_SYMBOL_GPL(ata_host_set_suspend);
EXPORT_SYMBOL_GPL(ata_host_set_resume);
EXPORT_SYMBOL_GPL(ata_id_string);
EXPORT_SYMBOL_GPL(ata_id_c_string);
EXPORT_SYMBOL_GPL(ata_scsi_simulate);
@@ -5930,14 +6011,14 @@ EXPORT_SYMBOL_GPL(ata_pci_host_stop);
EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
EXPORT_SYMBOL_GPL(ata_pci_init_one);
EXPORT_SYMBOL_GPL(ata_pci_remove_one);
EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
EXPORT_SYMBOL_GPL(ata_pci_device_resume);
EXPORT_SYMBOL_GPL(ata_pci_default_filter);
EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
#endif /* CONFIG_PCI */

EXPORT_SYMBOL_GPL(ata_device_suspend);
EXPORT_SYMBOL_GPL(ata_device_resume);
EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
EXPORT_SYMBOL_GPL(ata_scsi_device_resume);

+357 −48

File changed.

Preview size limit exceeded, changes collapsed.

+117 −7
Original line number Diff line number Diff line
@@ -397,20 +397,129 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf)
	}
}

int ata_scsi_device_resume(struct scsi_device *sdev)
/**
 *	ata_scsi_device_suspend - suspend ATA device associated with sdev
 *	@sdev: the SCSI device to suspend
 *	@state: target power management state
 *
 *	Request suspend EH action on the ATA device associated with
 *	@sdev and wait for the operation to complete.
 *
 *	LOCKING:
 *	Kernel thread context (may sleep).
 *
 *	RETURNS:
 *	0 on success, -errno otherwise.
 */
int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
{
	struct ata_port *ap = ata_shost_to_port(sdev->host);
	struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
	struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
	unsigned long flags;
	unsigned int action;
	int rc = 0;

	if (!dev)
		goto out;

	return ata_device_resume(dev);
	spin_lock_irqsave(ap->lock, flags);

	/* wait for the previous resume to complete */
	while (dev->flags & ATA_DFLAG_SUSPENDED) {
		spin_unlock_irqrestore(ap->lock, flags);
		ata_port_wait_eh(ap);
		spin_lock_irqsave(ap->lock, flags);
	}

int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
	/* if @sdev is already detached, nothing to do */
	if (sdev->sdev_state == SDEV_OFFLINE ||
	    sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
		goto out_unlock;

	/* request suspend */
	action = ATA_EH_SUSPEND;
	if (state.event != PM_EVENT_SUSPEND)
		action |= ATA_EH_PM_FREEZE;
	ap->eh_info.dev_action[dev->devno] |= action;
	ap->eh_info.flags |= ATA_EHI_QUIET;
	ata_port_schedule_eh(ap);

	spin_unlock_irqrestore(ap->lock, flags);

	/* wait for EH to do the job */
	ata_port_wait_eh(ap);

	spin_lock_irqsave(ap->lock, flags);

	/* If @sdev is still attached but the associated ATA device
	 * isn't suspended, the operation failed.
	 */
	if (sdev->sdev_state != SDEV_OFFLINE &&
	    sdev->sdev_state != SDEV_CANCEL && sdev->sdev_state != SDEV_DEL &&
	    !(dev->flags & ATA_DFLAG_SUSPENDED))
		rc = -EIO;

 out_unlock:
	spin_unlock_irqrestore(ap->lock, flags);
 out:
	if (rc == 0)
		sdev->sdev_gendev.power.power_state = state;
	return rc;
}

/**
 *	ata_scsi_device_resume - resume ATA device associated with sdev
 *	@sdev: the SCSI device to resume
 *
 *	Request resume EH action on the ATA device associated with
 *	@sdev and return immediately.  This enables parallel
 *	wakeup/spinup of devices.
 *
 *	LOCKING:
 *	Kernel thread context (may sleep).
 *
 *	RETURNS:
 *	0.
 */
int ata_scsi_device_resume(struct scsi_device *sdev)
{
	struct ata_port *ap = ata_shost_to_port(sdev->host);
	struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
	struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
	struct ata_eh_info *ehi = &ap->eh_info;
	unsigned long flags;
	unsigned int action;

	if (!dev)
		goto out;

	spin_lock_irqsave(ap->lock, flags);

	/* if @sdev is already detached, nothing to do */
	if (sdev->sdev_state == SDEV_OFFLINE ||
	    sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
		goto out_unlock;

	/* request resume */
	action = ATA_EH_RESUME;
	if (sdev->sdev_gendev.power.power_state.event == PM_EVENT_SUSPEND)
		__ata_ehi_hotplugged(ehi);
	else
		action |= ATA_EH_PM_FREEZE | ATA_EH_SOFTRESET;
	ehi->dev_action[dev->devno] |= action;

	/* We don't want autopsy and verbose EH messages.  Disable
	 * those if we're the only device on this link.
	 */
	if (ata_port_max_devices(ap) == 1)
		ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;

	return ata_device_suspend(dev, state);
	ata_port_schedule_eh(ap);

 out_unlock:
	spin_unlock_irqrestore(ap->lock, flags);
 out:
	sdev->sdev_gendev.power.power_state = PMSG_ON;
	return 0;
}

/**
@@ -2930,7 +3039,7 @@ void ata_scsi_hotplug(void *data)
	struct ata_port *ap = data;
	int i;

	if (ap->flags & ATA_FLAG_UNLOADING) {
	if (ap->pflags & ATA_PFLAG_UNLOADING) {
		DPRINTK("ENTER/EXIT - unloading\n");
		return;
	}
@@ -3011,6 +3120,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
		if (dev) {
			ap->eh_info.probe_mask |= 1 << dev->devno;
			ap->eh_info.action |= ATA_EH_SOFTRESET;
			ap->eh_info.flags |= ATA_EHI_RESUME_LINK;
		} else
			rc = -EINVAL;
	}
+66 −39
Original line number Diff line number Diff line
@@ -109,6 +109,7 @@ enum {
};

static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static int sil_pci_device_resume(struct pci_dev *pdev);
static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
@@ -160,6 +161,8 @@ static struct pci_driver sil_pci_driver = {
	.id_table		= sil_pci_tbl,
	.probe			= sil_init_one,
	.remove			= ata_pci_remove_one,
	.suspend		= ata_pci_device_suspend,
	.resume			= sil_pci_device_resume,
};

static struct scsi_host_template sil_sht = {
@@ -178,6 +181,8 @@ static struct scsi_host_template sil_sht = {
	.slave_configure	= ata_scsi_slave_config,
	.slave_destroy		= ata_scsi_slave_destroy,
	.bios_param		= ata_std_bios_param,
	.suspend		= ata_scsi_device_suspend,
	.resume			= ata_scsi_device_resume,
};

static const struct ata_port_operations sil_ops = {
@@ -370,7 +375,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
		 * during hardreset makes controllers with broken SIEN
		 * repeat probing needlessly.
		 */
		if (!(ap->flags & ATA_FLAG_FROZEN)) {
		if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
			ata_ehi_hotplugged(&ap->eh_info);
			ap->eh_info.serror |= serror;
		}
@@ -561,6 +566,52 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
	}
}

static void sil_init_controller(struct pci_dev *pdev,
				int n_ports, unsigned long host_flags,
				void __iomem *mmio_base)
{
	u8 cls;
	u32 tmp;
	int i;

	/* Initialize FIFO PCI bus arbitration */
	cls = sil_get_device_cache_line(pdev);
	if (cls) {
		cls >>= 3;
		cls++;  /* cls = (line_size/8)+1 */
		for (i = 0; i < n_ports; i++)
			writew(cls << 8 | cls,
			       mmio_base + sil_port[i].fifo_cfg);
	} else
		dev_printk(KERN_WARNING, &pdev->dev,
			   "cache line size not set.  Driver may not function\n");

	/* Apply R_ERR on DMA activate FIS errata workaround */
	if (host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
		int cnt;

		for (i = 0, cnt = 0; i < n_ports; i++) {
			tmp = readl(mmio_base + sil_port[i].sfis_cfg);
			if ((tmp & 0x3) != 0x01)
				continue;
			if (!cnt)
				dev_printk(KERN_INFO, &pdev->dev,
					   "Applying R_ERR on DMA activate "
					   "FIS errata fix\n");
			writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
			cnt++;
		}
	}

	if (n_ports == 4) {
		/* flip the magic "make 4 ports work" bit */
		tmp = readl(mmio_base + sil_port[2].bmdma);
		if ((tmp & SIL_INTR_STEERING) == 0)
			writel(tmp | SIL_INTR_STEERING,
			       mmio_base + sil_port[2].bmdma);
	}
}

static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
	static int printed_version;
@@ -570,8 +621,6 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
	int rc;
	unsigned int i;
	int pci_dev_busy = 0;
	u32 tmp;
	u8 cls;

	if (!printed_version++)
		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -630,42 +679,8 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
		ata_std_ports(&probe_ent->port[i]);
	}

	/* Initialize FIFO PCI bus arbitration */
	cls = sil_get_device_cache_line(pdev);
	if (cls) {
		cls >>= 3;
		cls++;  /* cls = (line_size/8)+1 */
		for (i = 0; i < probe_ent->n_ports; i++)
			writew(cls << 8 | cls,
			       mmio_base + sil_port[i].fifo_cfg);
	} else
		dev_printk(KERN_WARNING, &pdev->dev,
			   "cache line size not set.  Driver may not function\n");

	/* Apply R_ERR on DMA activate FIS errata workaround */
	if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
		int cnt;

		for (i = 0, cnt = 0; i < probe_ent->n_ports; i++) {
			tmp = readl(mmio_base + sil_port[i].sfis_cfg);
			if ((tmp & 0x3) != 0x01)
				continue;
			if (!cnt)
				dev_printk(KERN_INFO, &pdev->dev,
					   "Applying R_ERR on DMA activate "
					   "FIS errata fix\n");
			writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
			cnt++;
		}
	}

	if (ent->driver_data == sil_3114) {
		/* flip the magic "make 4 ports work" bit */
		tmp = readl(mmio_base + sil_port[2].bmdma);
		if ((tmp & SIL_INTR_STEERING) == 0)
			writel(tmp | SIL_INTR_STEERING,
			       mmio_base + sil_port[2].bmdma);
	}
	sil_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
			    mmio_base);

	pci_set_master(pdev);

@@ -685,6 +700,18 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
	return rc;
}

static int sil_pci_device_resume(struct pci_dev *pdev)
{
	struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);

	ata_pci_device_do_resume(pdev);
	sil_init_controller(pdev, host_set->n_ports, host_set->ports[0]->flags,
			    host_set->mmio_base);
	ata_host_set_resume(host_set);

	return 0;
}

static int __init sil_init(void)
{
	return pci_module_init(&sil_pci_driver);
Loading