Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6636e7f4 authored by Don Brace's avatar Don Brace Committed by James Bottomley
Browse files

hpsa: Use local workqueues instead of system workqueues

parent c8ae0ab1
Loading
Loading
Loading
Loading
+54 −14
Original line number Original line Diff line number Diff line
@@ -6787,14 +6787,14 @@ static int hpsa_offline_devices_ready(struct ctlr_info *h)
	return 0;
	return 0;
}
}



static void hpsa_rescan_ctlr_worker(struct work_struct *work)
static void hpsa_monitor_ctlr_worker(struct work_struct *work)
{
{
	unsigned long flags;
	unsigned long flags;
	struct ctlr_info *h = container_of(to_delayed_work(work),
	struct ctlr_info *h = container_of(to_delayed_work(work),
					struct ctlr_info, monitor_ctlr_work);
					struct ctlr_info, rescan_ctlr_work);
	detect_controller_lockup(h);

	if (lockup_detected(h))

	if (h->remove_in_progress)
		return;
		return;


	if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
	if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
@@ -6803,17 +6803,44 @@ static void hpsa_monitor_ctlr_worker(struct work_struct *work)
		hpsa_scan_start(h->scsi_host);
		hpsa_scan_start(h->scsi_host);
		scsi_host_put(h->scsi_host);
		scsi_host_put(h->scsi_host);
	}
	}

	spin_lock_irqsave(&h->lock, flags);
	spin_lock_irqsave(&h->lock, flags);
	if (h->remove_in_progress) {
	if (!h->remove_in_progress)
		queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
				h->heartbeat_sample_interval);
	spin_unlock_irqrestore(&h->lock, flags);
	spin_unlock_irqrestore(&h->lock, flags);
		return;
}
}

static void hpsa_monitor_ctlr_worker(struct work_struct *work)
{
	unsigned long flags;
	struct ctlr_info *h = container_of(to_delayed_work(work),
					struct ctlr_info, monitor_ctlr_work);

	detect_controller_lockup(h);
	if (lockup_detected(h))
		return;

	spin_lock_irqsave(&h->lock, flags);
	if (!h->remove_in_progress)
		schedule_delayed_work(&h->monitor_ctlr_work,
		schedule_delayed_work(&h->monitor_ctlr_work,
				h->heartbeat_sample_interval);
				h->heartbeat_sample_interval);
	spin_unlock_irqrestore(&h->lock, flags);
	spin_unlock_irqrestore(&h->lock, flags);
}
}


static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
						char *name)
{
	struct workqueue_struct *wq = NULL;
	char wq_name[20];

	snprintf(wq_name, sizeof(wq_name), "%s_%d_hpsa", name, h->ctlr);
	wq = alloc_ordered_workqueue(wq_name, 0);
	if (!wq)
		dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);

	return wq;
}

static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
{
	int dac, rc;
	int dac, rc;
@@ -6856,12 +6883,18 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
	spin_lock_init(&h->scan_lock);
	spin_lock_init(&h->scan_lock);
	atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
	atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);


	h->resubmit_wq = alloc_workqueue("hpsa", WQ_MEM_RECLAIM, 0);
	h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
	if (!h->rescan_ctlr_wq) {
		rc = -ENOMEM;
		goto clean1;
	}

	h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
	if (!h->resubmit_wq) {
	if (!h->resubmit_wq) {
		dev_err(&h->pdev->dev, "Failed to allocate work queue\n");
		rc = -ENOMEM;
		rc = -ENOMEM;
		goto clean1;
		goto clean1;
	}
	}

	/* Allocate and clear per-cpu variable lockup_detected */
	/* Allocate and clear per-cpu variable lockup_detected */
	h->lockup_detected = alloc_percpu(u32);
	h->lockup_detected = alloc_percpu(u32);
	if (!h->lockup_detected) {
	if (!h->lockup_detected) {
@@ -6985,6 +7018,9 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
	INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
	INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
	schedule_delayed_work(&h->monitor_ctlr_work,
	schedule_delayed_work(&h->monitor_ctlr_work,
				h->heartbeat_sample_interval);
				h->heartbeat_sample_interval);
	INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
	queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
				h->heartbeat_sample_interval);
	return 0;
	return 0;


clean4:
clean4:
@@ -6996,6 +7032,8 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
clean1:
clean1:
	if (h->resubmit_wq)
	if (h->resubmit_wq)
		destroy_workqueue(h->resubmit_wq);
		destroy_workqueue(h->resubmit_wq);
	if (h->rescan_ctlr_wq)
		destroy_workqueue(h->rescan_ctlr_wq);
	if (h->lockup_detected)
	if (h->lockup_detected)
		free_percpu(h->lockup_detected);
		free_percpu(h->lockup_detected);
	kfree(h);
	kfree(h);
@@ -7069,11 +7107,13 @@ static void hpsa_remove_one(struct pci_dev *pdev)
	/* Get rid of any controller monitoring work items */
	/* Get rid of any controller monitoring work items */
	spin_lock_irqsave(&h->lock, flags);
	spin_lock_irqsave(&h->lock, flags);
	h->remove_in_progress = 1;
	h->remove_in_progress = 1;
	cancel_delayed_work(&h->monitor_ctlr_work);
	spin_unlock_irqrestore(&h->lock, flags);
	spin_unlock_irqrestore(&h->lock, flags);
	cancel_delayed_work_sync(&h->monitor_ctlr_work);
	cancel_delayed_work_sync(&h->rescan_ctlr_work);
	destroy_workqueue(h->rescan_ctlr_wq);
	destroy_workqueue(h->resubmit_wq);
	hpsa_unregister_scsi(h);	/* unhook from SCSI subsystem */
	hpsa_unregister_scsi(h);	/* unhook from SCSI subsystem */
	hpsa_shutdown(pdev);
	hpsa_shutdown(pdev);
	destroy_workqueue(h->resubmit_wq);
	iounmap(h->vaddr);
	iounmap(h->vaddr);
	iounmap(h->transtable);
	iounmap(h->transtable);
	iounmap(h->cfgtable);
	iounmap(h->cfgtable);
+2 −0
Original line number Original line Diff line number Diff line
@@ -207,6 +207,7 @@ struct ctlr_info {
	atomic_t firmware_flash_in_progress;
	atomic_t firmware_flash_in_progress;
	u32 __percpu *lockup_detected;
	u32 __percpu *lockup_detected;
	struct delayed_work monitor_ctlr_work;
	struct delayed_work monitor_ctlr_work;
	struct delayed_work rescan_ctlr_work;
	int remove_in_progress;
	int remove_in_progress;
	/* Address of h->q[x] is passed to intr handler to know which queue */
	/* Address of h->q[x] is passed to intr handler to know which queue */
	u8 q[MAX_REPLY_QUEUES];
	u8 q[MAX_REPLY_QUEUES];
@@ -251,6 +252,7 @@ struct ctlr_info {
	int	acciopath_status;
	int	acciopath_status;
	int	raid_offload_debug;
	int	raid_offload_debug;
	struct workqueue_struct *resubmit_wq;
	struct workqueue_struct *resubmit_wq;
	struct workqueue_struct *rescan_ctlr_wq;
};
};


struct offline_device_entry {
struct offline_device_entry {