Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1bcdb535 authored by Sebastian Ott's avatar Sebastian Ott Committed by Martin Schwidefsky
Browse files

s390/dasd: simplify locking in dasd_times_out



Provide __dasd_cancel_req that is called with the ccw device lock
held to simplify the locking in dasd_times_out. Also this removes
the following sparse warning:
context imbalance in 'dasd_times_out' - different lock contexts for basic block

Note: with this change dasd_schedule_device_bh is now called (via
dasd_cancel_req) with the ccw device lock held. But is is already
the case for other codepaths.

Signed-off-by: default avatarSebastian Ott <sebott@linux.ibm.com>
Reviewed-by: default avatarStefan Haberland <sth@linux.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent a166c368
Loading
Loading
Loading
Loading
+17 −14
Original line number Diff line number Diff line
@@ -2569,14 +2569,11 @@ EXPORT_SYMBOL(dasd_sleep_on_immediatly);
 * Cancellation of a request is an asynchronous operation! The calling
 * function has to wait until the request is properly returned via callback.
 */
int dasd_cancel_req(struct dasd_ccw_req *cqr)
static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
{
	struct dasd_device *device = cqr->startdev;
	unsigned long flags;
	int rc;
	int rc = 0;

	rc = 0;
	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
	switch (cqr->status) {
	case DASD_CQR_QUEUED:
		/* request was not started - just set to cleared */
@@ -2596,11 +2593,21 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
	default: /* already finished or clear pending - do nothing */
		break;
	}
	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
	dasd_schedule_device_bh(device);
	return rc;
}
EXPORT_SYMBOL(dasd_cancel_req);

int dasd_cancel_req(struct dasd_ccw_req *cqr)
{
	struct dasd_device *device = cqr->startdev;
	unsigned long flags;
	int rc;

	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
	rc = __dasd_cancel_req(cqr);
	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
	return rc;
}

/*
 * SECTION: Operations of the dasd_block layer.
@@ -3082,12 +3089,10 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
	cqr->retries = -1;
	cqr->intrc = -ETIMEDOUT;
	if (cqr->status >= DASD_CQR_QUEUED) {
		spin_unlock(get_ccwdev_lock(device->cdev));
		rc = dasd_cancel_req(cqr);
		rc = __dasd_cancel_req(cqr);
	} else if (cqr->status == DASD_CQR_FILLED ||
		   cqr->status == DASD_CQR_NEED_ERP) {
		cqr->status = DASD_CQR_TERMINATED;
		spin_unlock(get_ccwdev_lock(device->cdev));
	} else if (cqr->status == DASD_CQR_IN_ERP) {
		struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;

@@ -3102,9 +3107,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
			searchcqr->retries = -1;
			searchcqr->intrc = -ETIMEDOUT;
			if (searchcqr->status >= DASD_CQR_QUEUED) {
				spin_unlock(get_ccwdev_lock(device->cdev));
				rc = dasd_cancel_req(searchcqr);
				spin_lock(get_ccwdev_lock(device->cdev));
				rc = __dasd_cancel_req(searchcqr);
			} else if ((searchcqr->status == DASD_CQR_FILLED) ||
				   (searchcqr->status == DASD_CQR_NEED_ERP)) {
				searchcqr->status = DASD_CQR_TERMINATED;
@@ -3118,8 +3121,8 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
			}
			break;
		}
		spin_unlock(get_ccwdev_lock(device->cdev));
	}
	spin_unlock(get_ccwdev_lock(device->cdev));
	dasd_schedule_block_bh(block);
	spin_unlock(&block->queue_lock);
	spin_unlock_irqrestore(&cqr->dq->lock, flags);