Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a39d1139 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'barrier' into for-linus

parents 8b6800fb 7e3da6c4
Loading
Loading
Loading
Loading
+9 −8
Original line number Diff line number Diff line
@@ -712,6 +712,14 @@ struct request *elv_next_request(struct request_queue *q)
	int ret;

	while ((rq = __elv_next_request(q)) != NULL) {
		/*
		 * Kill the empty barrier place holder, the driver must
		 * not ever see it.
		 */
		if (blk_empty_barrier(rq)) {
			end_queued_request(rq, 1);
			continue;
		}
		if (!(rq->cmd_flags & REQ_STARTED)) {
			/*
			 * This is the first time the device driver
@@ -751,15 +759,8 @@ struct request *elv_next_request(struct request_queue *q)
			rq = NULL;
			break;
		} else if (ret == BLKPREP_KILL) {
			int nr_bytes = rq->hard_nr_sectors << 9;

			if (!nr_bytes)
				nr_bytes = rq->data_len;

			blkdev_dequeue_request(rq);
			rq->cmd_flags |= REQ_QUIET;
			end_that_request_chunk(rq, 0, nr_bytes);
			end_that_request_last(rq, 0);
			end_queued_request(rq, 0);
		} else {
			printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
								ret);
+190 −80
Original line number Diff line number Diff line
@@ -304,23 +304,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,

EXPORT_SYMBOL(blk_queue_ordered);

/**
 * blk_queue_issue_flush_fn - set function for issuing a flush
 * @q:     the request queue
 * @iff:   the function to be called issuing the flush
 *
 * Description:
 *   If a driver supports issuing a flush command, the support is notified
 *   to the block layer by defining it through this call.
 *
 **/
void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
{
	q->issue_flush_fn = iff;
}

EXPORT_SYMBOL(blk_queue_issue_flush_fn);

/*
 * Cache flushing for ordered writes handling
 */
@@ -377,10 +360,12 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
	/*
	 * Okay, sequence complete.
	 */
	rq = q->orig_bar_rq;
	uptodate = q->orderr ? q->orderr : 1;
	uptodate = 1;
	if (q->orderr)
		uptodate = q->orderr;

	q->ordseq = 0;
	rq = q->orig_bar_rq;

	end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
	end_that_request_last(rq, uptodate);
@@ -445,7 +430,8 @@ static inline struct request *start_ordered(struct request_queue *q,
	rq_init(q, rq);
	if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
		rq->cmd_flags |= REQ_RW;
	rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
	if (q->ordered & QUEUE_ORDERED_FUA)
		rq->cmd_flags |= REQ_FUA;
	rq->elevator_private = NULL;
	rq->elevator_private2 = NULL;
	init_request_from_bio(rq, q->orig_bar_rq->bio);
@@ -455,9 +441,12 @@ static inline struct request *start_ordered(struct request_queue *q,
	 * Queue ordered sequence.  As we stack them at the head, we
	 * need to queue in reverse order.  Note that we rely on that
	 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
	 * request gets inbetween ordered sequence.
	 * request gets inbetween ordered sequence. If this request is
	 * an empty barrier, we don't need to do a postflush ever since
	 * there will be no data written between the pre and post flush.
	 * Hence a single flush will suffice.
	 */
	if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
	if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
		queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
	else
		q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
@@ -481,7 +470,7 @@ static inline struct request *start_ordered(struct request_queue *q,
int blk_do_ordered(struct request_queue *q, struct request **rqp)
{
	struct request *rq = *rqp;
	int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
	const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);

	if (!q->ordseq) {
		if (!is_barrier)
@@ -2660,6 +2649,14 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,

EXPORT_SYMBOL(blk_execute_rq);

static void bio_end_empty_barrier(struct bio *bio, int err)
{
	if (err)
		clear_bit(BIO_UPTODATE, &bio->bi_flags);

	complete(bio->bi_private);
}

/**
 * blkdev_issue_flush - queue a flush
 * @bdev:	blockdev to issue flush for
@@ -2672,7 +2669,10 @@ EXPORT_SYMBOL(blk_execute_rq);
 */
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q;
	struct bio *bio;
	int ret;

	if (bdev->bd_disk == NULL)
		return -ENXIO;
@@ -2680,10 +2680,32 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;
	if (!q->issue_flush_fn)
		return -EOPNOTSUPP;

	return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
	bio = bio_alloc(GFP_KERNEL, 0);
	if (!bio)
		return -ENOMEM;

	bio->bi_end_io = bio_end_empty_barrier;
	bio->bi_private = &wait;
	bio->bi_bdev = bdev;
	submit_bio(1 << BIO_RW_BARRIER, bio);

	wait_for_completion(&wait);

	/*
	 * The driver must store the error location in ->bi_sector, if
	 * it supports it. For non-stacked drivers, this should be copied
	 * from rq->sector.
	 */
	if (error_sector)
		*error_sector = bio->bi_sector;

	ret = 0;
	if (!bio_flagged(bio, BIO_UPTODATE))
		ret = -EIO;

	bio_put(bio);
	return ret;
}

EXPORT_SYMBOL(blkdev_issue_flush);
@@ -3051,7 +3073,7 @@ static inline void blk_partition_remap(struct bio *bio)
{
	struct block_device *bdev = bio->bi_bdev;

	if (bdev != bdev->bd_contains) {
	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
		struct hd_struct *p = bdev->bd_part;
		const int rw = bio_data_dir(bio);

@@ -3117,6 +3139,35 @@ static inline int should_fail_request(struct bio *bio)

#endif /* CONFIG_FAIL_MAKE_REQUEST */

/*
 * Check whether this bio extends beyond the end of the device.
 */
static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
{
	sector_t maxsector;

	if (!nr_sectors)
		return 0;

	/* Test device or partition size, when known. */
	maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
	if (maxsector) {
		sector_t sector = bio->bi_sector;

		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
			/*
			 * This may well happen - the kernel calls bread()
			 * without checking the size of the device, e.g., when
			 * mounting a device.
			 */
			handle_bad_sector(bio);
			return 1;
		}
	}

	return 0;
}

/**
 * generic_make_request: hand a buffer to its device driver for I/O
 * @bio:  The bio describing the location in memory and on the device.
@@ -3144,27 +3195,14 @@ static inline int should_fail_request(struct bio *bio)
static inline void __generic_make_request(struct bio *bio)
{
	struct request_queue *q;
	sector_t maxsector;
	sector_t old_sector;
	int ret, nr_sectors = bio_sectors(bio);
	dev_t old_dev;

	might_sleep();
	/* Test device or partition size, when known. */
	maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
	if (maxsector) {
		sector_t sector = bio->bi_sector;

		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
			/*
			 * This may well happen - the kernel calls bread()
			 * without checking the size of the device, e.g., when
			 * mounting a device.
			 */
			handle_bad_sector(bio);
	if (bio_check_eod(bio, nr_sectors))
		goto end_io;
		}
	}

	/*
	 * Resolve the mapping until finished. (drivers are
@@ -3191,7 +3229,7 @@ static inline void __generic_make_request(struct bio *bio)
			break;
		}

		if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
		if (unlikely(nr_sectors > q->max_hw_sectors)) {
			printk("bio too big device %s (%u > %u)\n", 
				bdevname(bio->bi_bdev, b),
				bio_sectors(bio),
@@ -3220,21 +3258,8 @@ static inline void __generic_make_request(struct bio *bio)
		old_sector = bio->bi_sector;
		old_dev = bio->bi_bdev->bd_dev;

		maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
		if (maxsector) {
			sector_t sector = bio->bi_sector;

			if (maxsector < nr_sectors ||
					maxsector - nr_sectors < sector) {
				/*
				 * This may well happen - partitions are not
				 * checked to make sure they are within the size
				 * of the whole device.
				 */
				handle_bad_sector(bio);
		if (bio_check_eod(bio, nr_sectors))
			goto end_io;
			}
		}

		ret = q->make_request_fn(q, bio);
	} while (ret);
@@ -3307,9 +3332,17 @@ void submit_bio(int rw, struct bio *bio)
{
	int count = bio_sectors(bio);

	bio->bi_rw |= rw;

	/*
	 * If it's a regular read/write or a barrier with data attached,
	 * go through the normal accounting stuff before submission.
	 */
	if (!bio_empty_barrier(bio)) {

		BIO_BUG_ON(!bio->bi_size);
		BIO_BUG_ON(!bio->bi_io_vec);
	bio->bi_rw |= rw;

		if (rw & WRITE) {
			count_vm_events(PGPGOUT, count);
		} else {
@@ -3325,6 +3358,7 @@ void submit_bio(int rw, struct bio *bio)
				(unsigned long long)bio->bi_sector,
				bdevname(bio->bi_bdev,b));
		}
	}

	generic_make_request(bio);
}
@@ -3399,6 +3433,14 @@ static int __end_that_request_first(struct request *req, int uptodate,
	while ((bio = req->bio) != NULL) {
		int nbytes;

		/*
		 * For an empty barrier request, the low level driver must
		 * store a potential error location in ->sector. We pass
		 * that back up in ->bi_sector.
		 */
		if (blk_empty_barrier(req))
			bio->bi_sector = req->sector;

		if (nr_bytes >= bio->bi_size) {
			req->bio = bio->bi_next;
			nbytes = bio->bi_size;
@@ -3564,7 +3606,7 @@ static struct notifier_block blk_cpu_notifier __cpuinitdata = {
 * Description:
 *     Ends all I/O on a request. It does not handle partial completions,
 *     unless the driver actually implements this in its completion callback
 *     through requeueing. Theh actual completion happens out-of-order,
 *     through requeueing. The actual completion happens out-of-order,
 *     through a softirq handler. The user must have registered a completion
 *     callback through blk_queue_softirq_done().
 **/
@@ -3627,15 +3669,83 @@ void end_that_request_last(struct request *req, int uptodate)

EXPORT_SYMBOL(end_that_request_last);

void end_request(struct request *req, int uptodate)
static inline void __end_request(struct request *rq, int uptodate,
				 unsigned int nr_bytes, int dequeue)
{
	if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
		if (dequeue)
			blkdev_dequeue_request(rq);
		add_disk_randomness(rq->rq_disk);
		end_that_request_last(rq, uptodate);
	}
}

static unsigned int rq_byte_size(struct request *rq)
{
	if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
		add_disk_randomness(req->rq_disk);
		blkdev_dequeue_request(req);
		end_that_request_last(req, uptodate);
	if (blk_fs_request(rq))
		return rq->hard_nr_sectors << 9;

	return rq->data_len;
}

/**
 * end_queued_request - end all I/O on a queued request
 * @rq:		the request being processed
 * @uptodate:	error value or 0/1 uptodate flag
 *
 * Description:
 *     Ends all I/O on a request, and removes it from the block layer queues.
 *     Not suitable for normal IO completion, unless the driver still has
 *     the request attached to the block layer.
 *
 **/
void end_queued_request(struct request *rq, int uptodate)
{
	__end_request(rq, uptodate, rq_byte_size(rq), 1);
}
EXPORT_SYMBOL(end_queued_request);

/**
 * end_dequeued_request - end all I/O on a dequeued request
 * @rq:		the request being processed
 * @uptodate:	error value or 0/1 uptodate flag
 *
 * Description:
 *     Ends all I/O on a request. The request must already have been
 *     dequeued using blkdev_dequeue_request(), as is normally the case
 *     for most drivers.
 *
 **/
void end_dequeued_request(struct request *rq, int uptodate)
{
	__end_request(rq, uptodate, rq_byte_size(rq), 0);
}
EXPORT_SYMBOL(end_dequeued_request);


/**
 * end_request - end I/O on the current segment of the request
 * @rq:		the request being processed
 * @uptodate:	error value or 0/1 uptodate flag
 *
 * Description:
 *     Ends I/O on the current segment of a request. If that is the only
 *     remaining segment, the request is also completed and freed.
 *
 *     This is a remnant of how older block drivers handled IO completions.
 *     Modern drivers typically end IO on the full request in one go, unless
 *     they have a residual value to account for. For that case this function
 *     isn't really useful, unless the residual just happens to be the
 *     full current segment. In other words, don't use this function in new
 *     code. Either use end_request_completely(), or the
 *     end_that_request_chunk() (along with end_that_request_last()) for
 *     partial completions.
 *
 **/
void end_request(struct request *req, int uptodate)
{
	__end_request(req, uptodate, req->hard_cur_sectors << 9, 1);
}
EXPORT_SYMBOL(end_request);

static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+7 −0
Original line number Diff line number Diff line
@@ -1133,16 +1133,21 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
	 * Schedule reads for missing parts of the packet.
	 */
	for (f = 0; f < pkt->frames; f++) {
		struct bio_vec *vec;

		int p, offset;
		if (written[f])
			continue;
		bio = pkt->r_bios[f];
		vec = bio->bi_io_vec;
		bio_init(bio);
		bio->bi_max_vecs = 1;
		bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
		bio->bi_bdev = pd->bdev;
		bio->bi_end_io = pkt_end_io_read;
		bio->bi_private = pkt;
		bio->bi_io_vec = vec;
		bio->bi_destructor = pkt_bio_destructor;

		p = (f * CD_FRAMESIZE) / PAGE_SIZE;
		offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
@@ -1439,6 +1444,8 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
	pkt->w_bio->bi_bdev = pd->bdev;
	pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
	pkt->w_bio->bi_private = pkt;
	pkt->w_bio->bi_io_vec = bvec;
	pkt->w_bio->bi_destructor = pkt_bio_destructor;
	for (f = 0; f < pkt->frames; f++)
		if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
			BUG();
+0 −21
Original line number Diff line number Diff line
@@ -414,26 +414,6 @@ static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
	req->cmd_type = REQ_TYPE_FLUSH;
}

static int ps3disk_issue_flush(struct request_queue *q, struct gendisk *gendisk,
			       sector_t *sector)
{
	struct ps3_storage_device *dev = q->queuedata;
	struct request *req;
	int res;

	dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);

	req = blk_get_request(q, WRITE, __GFP_WAIT);
	ps3disk_prepare_flush(q, req);
	res = blk_execute_rq(q, gendisk, req, 0);
	if (res)
		dev_err(&dev->sbd.core, "%s:%u: flush request failed %d\n",
			__func__, __LINE__, res);
	blk_put_request(req);
	return res;
}


static unsigned long ps3disk_mask;

static DEFINE_MUTEX(ps3disk_mask_mutex);
@@ -506,7 +486,6 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
	blk_queue_dma_alignment(queue, dev->blk_size-1);
	blk_queue_hardsect_size(queue, dev->blk_size);

	blk_queue_issue_flush_fn(queue, ps3disk_issue_flush);
	blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
			  ps3disk_prepare_flush);

+0 −29
Original line number Diff line number Diff line
@@ -716,32 +716,6 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
	rq->buffer = rq->cmd;
}

static int idedisk_issue_flush(struct request_queue *q, struct gendisk *disk,
			       sector_t *error_sector)
{
	ide_drive_t *drive = q->queuedata;
	struct request *rq;
	int ret;

	if (!drive->wcache)
		return 0;

	rq = blk_get_request(q, WRITE, __GFP_WAIT);

	idedisk_prepare_flush(q, rq);

	ret = blk_execute_rq(q, disk, rq, 0);

	/*
	 * if we failed and caller wants error offset, get it
	 */
	if (ret && error_sector)
		*error_sector = ide_get_error_location(drive, rq->cmd);

	blk_put_request(rq);
	return ret;
}

/*
 * This is tightly woven into the driver->do_special can not touch.
 * DON'T do it again until a total personality rewrite is committed.
@@ -781,7 +755,6 @@ static void update_ordered(ide_drive_t *drive)
	struct hd_driveid *id = drive->id;
	unsigned ordered = QUEUE_ORDERED_NONE;
	prepare_flush_fn *prep_fn = NULL;
	issue_flush_fn *issue_fn = NULL;

	if (drive->wcache) {
		unsigned long long capacity;
@@ -805,13 +778,11 @@ static void update_ordered(ide_drive_t *drive)
		if (barrier) {
			ordered = QUEUE_ORDERED_DRAIN_FLUSH;
			prep_fn = idedisk_prepare_flush;
			issue_fn = idedisk_issue_flush;
		}
	} else
		ordered = QUEUE_ORDERED_DRAIN;

	blk_queue_ordered(drive->queue, ordered, prep_fn);
	blk_queue_issue_flush_fn(drive->queue, issue_fn);
}

static int write_cache(ide_drive_t *drive, int arg)
Loading