Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d4b4ff8e authored by Keith Busch's avatar Keith Busch Committed by Matthew Wilcox
Browse files

NVMe: Schedule reset for failed controllers



Schedules a controller reset when it indicates it has a failed status. If
the device does not become ready after a reset, the pci device will be
scheduled for removal.

Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
[fixed checkpatch issue]
Signed-off-by: default avatarMatthew Wilcox <matthew.r.wilcox@intel.com>
parent 9a6b9458
Loading
Loading
Loading
Loading
+19 −2
Original line number Original line Diff line number Diff line
@@ -60,6 +60,8 @@ static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
static struct task_struct *nvme_thread;
static struct workqueue_struct *nvme_workq;
static struct workqueue_struct *nvme_workq;


static void nvme_reset_failed_dev(struct work_struct *ws);

/*
/*
 * An NVM Express queue.  Each device has at least two (one for admin
 * An NVM Express queue.  Each device has at least two (one for admin
 * commands and one for I/O commands).
 * commands and one for I/O commands).
@@ -1612,13 +1614,25 @@ static void nvme_resubmit_bios(struct nvme_queue *nvmeq)


static int nvme_kthread(void *data)
static int nvme_kthread(void *data)
{
{
	struct nvme_dev *dev;
	struct nvme_dev *dev, *next;


	while (!kthread_should_stop()) {
	while (!kthread_should_stop()) {
		set_current_state(TASK_INTERRUPTIBLE);
		set_current_state(TASK_INTERRUPTIBLE);
		spin_lock(&dev_list_lock);
		spin_lock(&dev_list_lock);
		list_for_each_entry(dev, &dev_list, node) {
		list_for_each_entry_safe(dev, next, &dev_list, node) {
			int i;
			int i;
			if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
							dev->initialized) {
				if (work_busy(&dev->reset_work))
					continue;
				list_del_init(&dev->node);
				dev_warn(&dev->pci_dev->dev,
					"Failed status, reset controller\n");
				INIT_WORK(&dev->reset_work,
							nvme_reset_failed_dev);
				queue_work(nvme_workq, &dev->reset_work);
				continue;
			}
			for (i = 0; i < dev->queue_count; i++) {
			for (i = 0; i < dev->queue_count; i++) {
				struct nvme_queue *nvmeq = dev->queues[i];
				struct nvme_queue *nvmeq = dev->queues[i];
				if (!nvmeq)
				if (!nvmeq)
@@ -2006,6 +2020,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
{
{
	int i;
	int i;


	dev->initialized = 0;
	for (i = dev->queue_count - 1; i >= 0; i--)
	for (i = dev->queue_count - 1; i >= 0; i--)
		nvme_disable_queue(dev, i);
		nvme_disable_queue(dev, i);


@@ -2196,6 +2211,7 @@ static int nvme_dev_resume(struct nvme_dev *dev)
		queue_work(nvme_workq, &dev->reset_work);
		queue_work(nvme_workq, &dev->reset_work);
		spin_unlock(&dev_list_lock);
		spin_unlock(&dev_list_lock);
	}
	}
	dev->initialized = 1;
	return 0;
	return 0;
}
}


@@ -2269,6 +2285,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
	if (result)
	if (result)
		goto remove;
		goto remove;


	dev->initialized = 1;
	kref_init(&dev->kref);
	kref_init(&dev->kref);
	return 0;
	return 0;


+1 −0
Original line number Original line Diff line number Diff line
@@ -95,6 +95,7 @@ struct nvme_dev {
	u32 max_hw_sectors;
	u32 max_hw_sectors;
	u32 stripe_size;
	u32 stripe_size;
	u16 oncs;
	u16 oncs;
	u8 initialized;
};
};


/*
/*