Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 93d17d3d authored by Adrian Bunk's avatar Adrian Bunk Committed by Linus Torvalds
Browse files

[PATCH] drivers/block/ll_rw_blk.c: cleanups



This patch contains the following cleanups:
- make needlessly global code static
- remove the following unused global functions:
  - blkdev_scsi_issue_flush_fn
  - __blk_attempt_remerge
- remove the following unused EXPORT_SYMBOL's:
  - blk_phys_contig_segment
  - blk_hw_contig_segment
  - blkdev_scsi_issue_flush_fn
  - __blk_attempt_remerge

Signed-off-by: default avatarAdrian Bunk <bunk@stusta.de>
Acked-by: default avatarJens Axboe <axboe@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent e8e1c729
Loading
Loading
Loading
Loading
+8 −59
Original line number Diff line number Diff line
@@ -37,6 +37,7 @@

static void blk_unplug_work(void *data);
static void blk_unplug_timeout(unsigned long data);
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);

/*
 * For the allocated request tables
@@ -1137,7 +1138,7 @@ new_hw_segment:
}


int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
				   struct bio *nxt)
{
	if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
@@ -1158,9 +1159,7 @@ int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
	return 0;
}

EXPORT_SYMBOL(blk_phys_contig_segment);

int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
				 struct bio *nxt)
{
	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
@@ -1176,8 +1175,6 @@ int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
	return 1;
}

EXPORT_SYMBOL(blk_hw_contig_segment);

/*
 * map a request to scatterlist, return number of sg entries setup. Caller
 * must make sure sg can hold rq->nr_phys_segments entries
@@ -1825,7 +1822,7 @@ static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
 * is the behaviour we want though - once it gets a wakeup it should be given
 * a nice run.
 */
void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
{
	if (!ioc || ioc_batching(q, ioc))
		return;
@@ -2254,45 +2251,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)

EXPORT_SYMBOL(blkdev_issue_flush);

/**
 * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
 * @q:		device queue
 * @disk:	gendisk
 * @error_sector:	error offset
 *
 * Description:
 *    Devices understanding the SCSI command set, can use this function as
 *    a helper for issuing a cache flush. Note: driver is required to store
 *    the error offset (in case of error flushing) in ->sector of struct
 *    request.
 */
int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
			       sector_t *error_sector)
{
	struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
	int ret;

	rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
	rq->sector = 0;
	memset(rq->cmd, 0, sizeof(rq->cmd));
	rq->cmd[0] = 0x35;
	rq->cmd_len = 12;
	rq->data = NULL;
	rq->data_len = 0;
	rq->timeout = 60 * HZ;

	ret = blk_execute_rq(q, disk, rq);

	if (ret && error_sector)
		*error_sector = rq->sector;

	blk_put_request(rq);
	return ret;
}

EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);

void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
{
	int rw = rq_data_dir(rq);

@@ -2551,16 +2510,6 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq)

EXPORT_SYMBOL(blk_attempt_remerge);

/*
 * Non-locking blk_attempt_remerge variant.
 */
void __blk_attempt_remerge(request_queue_t *q, struct request *rq)
{
	attempt_back_merge(q, rq);
}

EXPORT_SYMBOL(__blk_attempt_remerge);

static int __make_request(request_queue_t *q, struct bio *bio)
{
	struct request *req, *freereq = NULL;
@@ -2971,7 +2920,7 @@ void submit_bio(int rw, struct bio *bio)

EXPORT_SYMBOL(submit_bio);

void blk_recalc_rq_segments(struct request *rq)
static void blk_recalc_rq_segments(struct request *rq)
{
	struct bio *bio, *prevbio = NULL;
	int nr_phys_segs, nr_hw_segs;
@@ -3013,7 +2962,7 @@ void blk_recalc_rq_segments(struct request *rq)
	rq->nr_hw_segments = nr_hw_segs;
}

void blk_recalc_rq_sectors(struct request *rq, int nsect)
static void blk_recalc_rq_sectors(struct request *rq, int nsect)
{
	if (blk_fs_request(rq)) {
		rq->hard_sector += nsect;
@@ -3601,7 +3550,7 @@ static struct sysfs_ops queue_sysfs_ops = {
	.store	= queue_attr_store,
};

struct kobj_type queue_ktype = {
static struct kobj_type queue_ktype = {
	.sysfs_ops	= &queue_sysfs_ops,
	.default_attrs	= default_attrs,
};
+0 −6
Original line number Diff line number Diff line
@@ -539,15 +539,12 @@ extern void generic_make_request(struct bio *bio);
extern void blk_put_request(struct request *);
extern void blk_end_sync_rq(struct request *rq);
extern void blk_attempt_remerge(request_queue_t *, struct request *);
extern void __blk_attempt_remerge(request_queue_t *, struct request *);
extern struct request *blk_get_request(request_queue_t *, int, int);
extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
extern void blk_requeue_request(request_queue_t *, struct request *);
extern void blk_plug_device(request_queue_t *);
extern int blk_remove_plug(request_queue_t *);
extern void blk_recount_segments(request_queue_t *, struct bio *);
extern int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
extern int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
extern void blk_start_queue(request_queue_t *q);
extern void blk_stop_queue(request_queue_t *q);
@@ -631,7 +628,6 @@ extern void blk_queue_dma_alignment(request_queue_t *, int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern void blk_queue_ordered(request_queue_t *, int);
extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
extern int blkdev_scsi_issue_flush_fn(request_queue_t *, struct gendisk *, sector_t *);
extern struct request *blk_start_pre_flush(request_queue_t *,struct request *);
extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int);
extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int);
@@ -675,8 +671,6 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *);

#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)

extern void drive_stat_acct(struct request *, int, int);

static inline int queue_hardsect_size(request_queue_t *q)
{
	int retval = 512;