Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bbc615b1 authored by Bartlomiej Zolnierkiewicz's avatar Bartlomiej Zolnierkiewicz
Browse files

ide: use __ide_end_request() in ide_end_dequeued_request()



* Remove dead code for handling IDE TCQ from ide_end_dequeued_request().

* Add 'dequeue' parameter to __ide_end_request().

* Use __ide_end_request() in ide_end_dequeued_request().

Signed-off-by: default avatarBartlomiej Zolnierkiewicz <bzolnier@gmail.com>
parent bde07e5e
Loading
Loading
Loading
Loading
+10 −34
Original line number Diff line number Diff line
@@ -55,7 +55,7 @@
#include <asm/io.h>

static int __ide_end_request(ide_drive_t *drive, struct request *rq,
			     int uptodate, unsigned int nr_bytes)
			     int uptodate, unsigned int nr_bytes, int dequeue)
{
	int ret = 1;

@@ -80,9 +80,11 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,

	if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
		add_disk_randomness(rq->rq_disk);
		if (dequeue) {
			if (!list_empty(&rq->queuelist))
				blkdev_dequeue_request(rq);
			HWGROUP(drive)->rq = NULL;
		}
		end_that_request_last(rq, uptodate);
		ret = 0;
	}
@@ -122,7 +124,7 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
			nr_bytes = rq->hard_cur_sectors << 9;
	}

	ret = __ide_end_request(drive, rq, uptodate, nr_bytes);
	ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1);

	spin_unlock_irqrestore(&ide_lock, flags);
	return ret;
@@ -255,39 +257,13 @@ int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
			     int uptodate, int nr_sectors)
{
	unsigned long flags;
	int ret = 1;
	int ret;

	spin_lock_irqsave(&ide_lock, flags);

	BUG_ON(!blk_rq_started(rq));

	/*
	 * if failfast is set on a request, override number of sectors and
	 * complete the whole request right now
	 */
	if (blk_noretry_request(rq) && end_io_error(uptodate))
		nr_sectors = rq->hard_nr_sectors;

	if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
		rq->errors = -EIO;

	/*
	 * decide whether to reenable DMA -- 3 is a random magic for now,
	 * if we DMA timeout more than 3 times, just stay in PIO
	 */
	if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
		drive->state = 0;
		HWGROUP(drive)->hwif->ide_dma_on(drive);
	}

	if (!end_that_request_first(rq, uptodate, nr_sectors)) {
		add_disk_randomness(rq->rq_disk);
		if (blk_rq_tagged(rq))
			blk_queue_end_tag(drive->queue, rq);
		end_that_request_last(rq, uptodate);
		ret = 0;
	}
	ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
	spin_unlock_irqrestore(&ide_lock, flags);

	return ret;
}
EXPORT_SYMBOL_GPL(ide_end_dequeued_request);