Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 461d4e90 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe
Browse files

[BLOCK] update SCSI to use new blk_ordered for barriers



All ordered request related stuff delegated to HLD.  Midlayer
now doens't deal with ordered setting or prepare_flush
callback.  sd.c updated to deal with blk_queue_ordered
setting.  Currently, ordered tag isn't used as SCSI midlayer
cannot guarantee request ordering.

Signed-off-by: default avatarTejun Heo <htejun@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@suse.de>
parent 797e7dbb
Loading
Loading
Loading
Loading
+0 −9
Original line number Diff line number Diff line
@@ -347,17 +347,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
	shost->cmd_per_lun = sht->cmd_per_lun;
	shost->unchecked_isa_dma = sht->unchecked_isa_dma;
	shost->use_clustering = sht->use_clustering;
	shost->ordered_flush = sht->ordered_flush;
	shost->ordered_tag = sht->ordered_tag;

	/*
	 * hosts/devices that do queueing must support ordered tags
	 */
	if (shost->can_queue > 1 && shost->ordered_flush) {
		printk(KERN_ERR "scsi: ordered flushes don't support queueing\n");
		shost->ordered_flush = 0;
	}

	if (sht->max_host_blocked)
		shost->max_host_blocked = sht->max_host_blocked;
	else
+0 −46
Original line number Diff line number Diff line
@@ -932,9 +932,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
	int sense_valid = 0;
	int sense_deferred = 0;

	if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
		return;

	/*
	 * Free up any indirection buffers we allocated for DMA purposes. 
	 * For the case of a READ, we need to copy the data out of the
@@ -1199,38 +1196,6 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
	return BLKPREP_KILL;
}

static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
{
	struct scsi_device *sdev = q->queuedata;
	struct scsi_driver *drv;

	if (sdev->sdev_state == SDEV_RUNNING) {
		drv = *(struct scsi_driver **) rq->rq_disk->private_data;

		if (drv->prepare_flush)
			return drv->prepare_flush(q, rq);
	}

	return 0;
}

static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
{
	struct scsi_device *sdev = q->queuedata;
	struct request *flush_rq = rq->end_io_data;
	struct scsi_driver *drv;

	if (flush_rq->errors) {
		printk("scsi: barrier error, disabling flush support\n");
		blk_queue_ordered(q, QUEUE_ORDERED_NONE);
	}

	if (sdev->sdev_state == SDEV_RUNNING) {
		drv = *(struct scsi_driver **) rq->rq_disk->private_data;
		drv->end_flush(q, rq);
	}
}

static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
			       sector_t *error_sector)
{
@@ -1703,17 +1668,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
	blk_queue_segment_boundary(q, shost->dma_boundary);
	blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);

	/*
	 * ordered tags are superior to flush ordering
	 */
	if (shost->ordered_tag)
		blk_queue_ordered(q, QUEUE_ORDERED_TAG);
	else if (shost->ordered_flush) {
		blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
		q->prepare_flush_fn = scsi_prepare_flush_fn;
		q->end_flush_fn = scsi_end_flush_fn;
	}

	if (!shost->use_clustering)
		clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
	return q;
+20 −38
Original line number Diff line number Diff line
@@ -121,8 +121,7 @@ static void sd_shutdown(struct device *dev);
static void sd_rescan(struct device *);
static int sd_init_command(struct scsi_cmnd *);
static int sd_issue_flush(struct device *, sector_t *);
static void sd_end_flush(request_queue_t *, struct request *);
static int sd_prepare_flush(request_queue_t *, struct request *);
static void sd_prepare_flush(request_queue_t *, struct request *);
static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
			     unsigned char *buffer);

@@ -137,8 +136,6 @@ static struct scsi_driver sd_template = {
	.rescan			= sd_rescan,
	.init_command		= sd_init_command,
	.issue_flush		= sd_issue_flush,
	.prepare_flush		= sd_prepare_flush,
	.end_flush		= sd_end_flush,
};

/*
@@ -729,42 +726,13 @@ static int sd_issue_flush(struct device *dev, sector_t *error_sector)
	return ret;
}

static void sd_end_flush(request_queue_t *q, struct request *flush_rq)
static void sd_prepare_flush(request_queue_t *q, struct request *rq)
{
	struct request *rq = flush_rq->end_io_data;
	struct scsi_cmnd *cmd = rq->special;
	unsigned int bytes = rq->hard_nr_sectors << 9;

	if (!flush_rq->errors) {
		spin_unlock(q->queue_lock);
		scsi_io_completion(cmd, bytes, 0);
		spin_lock(q->queue_lock);
	} else if (blk_barrier_postflush(rq)) {
		spin_unlock(q->queue_lock);
		scsi_io_completion(cmd, 0, bytes);
		spin_lock(q->queue_lock);
	} else {
		/*
		 * force journal abort of barriers
		 */
		end_that_request_first(rq, -EOPNOTSUPP, rq->hard_nr_sectors);
		end_that_request_last(rq, -EOPNOTSUPP);
	}
}

static int sd_prepare_flush(request_queue_t *q, struct request *rq)
{
	struct scsi_device *sdev = q->queuedata;
	struct scsi_disk *sdkp = dev_get_drvdata(&sdev->sdev_gendev);

	if (!sdkp || !sdkp->WCE)
		return 0;

	memset(rq->cmd, 0, sizeof(rq->cmd));
	rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
	rq->flags |= REQ_BLOCK_PC;
	rq->timeout = SD_TIMEOUT;
	rq->cmd[0] = SYNCHRONIZE_CACHE;
	return 1;
	rq->cmd_len = 10;
}

static void sd_rescan(struct device *dev)
@@ -1462,6 +1430,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
	struct scsi_disk *sdkp = scsi_disk(disk);
	struct scsi_device *sdp = sdkp->device;
	unsigned char *buffer;
	unsigned ordered;

	SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name));

@@ -1499,6 +1468,19 @@ static int sd_revalidate_disk(struct gendisk *disk)
		sd_read_cache_type(sdkp, disk->disk_name, buffer);
	}

	/*
	 * We now have all cache related info, determine how we deal
	 * with ordered requests.  Note that as the current SCSI
	 * dispatch function can alter request order, we cannot use
	 * QUEUE_ORDERED_TAG_* even when ordered tag is supported.
	 */
	if (sdkp->WCE)
		ordered = QUEUE_ORDERED_DRAIN_FLUSH;
	else
		ordered = QUEUE_ORDERED_DRAIN;

	blk_queue_ordered(sdkp->disk->queue, ordered, sd_prepare_flush);

	set_capacity(disk, sdkp->capacity);
	kfree(buffer);

@@ -1598,6 +1580,7 @@ static int sd_probe(struct device *dev)
	strcpy(gd->devfs_name, sdp->devfs_name);

	gd->private_data = &sdkp->driver;
	gd->queue = sdkp->device->request_queue;

	sd_revalidate_disk(gd);

@@ -1605,7 +1588,6 @@ static int sd_probe(struct device *dev)
	gd->flags = GENHD_FL_DRIVERFS;
	if (sdp->removable)
		gd->flags |= GENHD_FL_REMOVABLE;
	gd->queue = sdkp->device->request_queue;

	dev_set_drvdata(dev, sdkp);
	add_disk(gd);
+0 −1
Original line number Diff line number Diff line
@@ -15,7 +15,6 @@ struct scsi_driver {
	void (*rescan)(struct device *);
	int (*issue_flush)(struct device *, sector_t *);
	int (*prepare_flush)(struct request_queue *, struct request *);
	void (*end_flush)(struct request_queue *, struct request *);
};
#define to_scsi_driver(drv) \
	container_of((drv), struct scsi_driver, gendrv)
+0 −1
Original line number Diff line number Diff line
@@ -392,7 +392,6 @@ struct scsi_host_template {
	/*
	 * ordered write support
	 */
	unsigned ordered_flush:1;
	unsigned ordered_tag:1;

	/*