Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6728cb0e authored by Jens Axboe's avatar Jens Axboe
Browse files

block: make core bits checkpatch compliant

parent 22b13210
Loading
Loading
Loading
Loading
+2 −3
Original line number Diff line number Diff line
@@ -26,7 +26,8 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
{
	if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
	    prepare_flush_fn == NULL) {
		printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n");
		printk(KERN_ERR "%s: prepare_flush_fn required\n",
								__FUNCTION__);
		return -EINVAL;
	}

@@ -47,7 +48,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,

	return 0;
}

EXPORT_SYMBOL(blk_queue_ordered);

/*
@@ -315,5 +315,4 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
	bio_put(bio);
	return ret;
}

EXPORT_SYMBOL(blkdev_issue_flush);
+72 −81
Original line number Diff line number Diff line
@@ -3,7 +3,8 @@
 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> -  July2000
 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
 *	-  July2000
 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
 */

@@ -42,7 +43,7 @@ struct kmem_cache *request_cachep;
/*
 * For queue allocation
 */
struct kmem_cache *blk_requestq_cachep = NULL;
struct kmem_cache *blk_requestq_cachep;

/*
 * Controlling structure to kblockd
@@ -137,7 +138,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
			error = -EIO;

		if (unlikely(nbytes > bio->bi_size)) {
			printk("%s: want %u bytes done, only %u left\n",
			printk(KERN_ERR "%s: want %u bytes done, %u left\n",
			       __FUNCTION__, nbytes, bio->bi_size);
			nbytes = bio->bi_size;
		}
@@ -161,23 +162,26 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
{
	int bit;

	printk("%s: dev %s: type=%x, flags=%x\n", msg,
	printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
		rq->cmd_flags);

	printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
	printk(KERN_INFO "  sector %llu, nr/cnr %lu/%u\n",
						(unsigned long long)rq->sector,
						rq->nr_sectors,
						rq->current_nr_sectors);
	printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
	printk(KERN_INFO "  bio %p, biotail %p, buffer %p, data %p, len %u\n",
						rq->bio, rq->biotail,
						rq->buffer, rq->data,
						rq->data_len);

	if (blk_pc_request(rq)) {
		printk("cdb: ");
		printk(KERN_INFO "  cdb: ");
		for (bit = 0; bit < sizeof(rq->cmd); bit++)
			printk("%02x ", rq->cmd[bit]);
		printk("\n");
	}
}

EXPORT_SYMBOL(blk_dump_rq_flags);

/*
@@ -204,7 +208,6 @@ void blk_plug_device(struct request_queue *q)
		blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
	}
}

EXPORT_SYMBOL(blk_plug_device);

/*
@@ -221,7 +224,6 @@ int blk_remove_plug(struct request_queue *q)
	del_timer(&q->unplug_timer);
	return 1;
}

EXPORT_SYMBOL(blk_remove_plug);

/*
@@ -328,7 +330,6 @@ void blk_start_queue(struct request_queue *q)
		kblockd_schedule_work(&q->unplug_work);
	}
}

EXPORT_SYMBOL(blk_start_queue);

/**
@@ -419,7 +420,6 @@ void blk_cleanup_queue(struct request_queue * q)

	blk_put_queue(q);
}

EXPORT_SYMBOL(blk_cleanup_queue);

static int blk_init_free_list(struct request_queue *q)
@@ -575,7 +575,6 @@ int blk_get_queue(struct request_queue *q)

	return 1;
}

EXPORT_SYMBOL(blk_get_queue);

static inline void blk_free_request(struct request_queue *q, struct request *rq)
@@ -888,7 +887,6 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)

	elv_requeue_request(q, rq);
}

EXPORT_SYMBOL(blk_requeue_request);

/**
@@ -939,7 +937,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
	blk_start_queueing(q);
	spin_unlock_irqrestore(q->queue_lock, flags);
}

EXPORT_SYMBOL(blk_insert_request);

/*
@@ -987,7 +984,6 @@ void disk_round_stats(struct gendisk *disk)
	}
	disk->stamp = now;
}

EXPORT_SYMBOL_GPL(disk_round_stats);

/*
@@ -1017,7 +1013,6 @@ void __blk_put_request(struct request_queue *q, struct request *req)
		freed_request(q, rw, priv);
	}
}

EXPORT_SYMBOL_GPL(__blk_put_request);

void blk_put_request(struct request *req)
@@ -1035,7 +1030,6 @@ void blk_put_request(struct request *req)
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}

EXPORT_SYMBOL(blk_put_request);

void init_request_from_bio(struct request *req, struct bio *bio)
@@ -1350,7 +1344,7 @@ static inline void __generic_make_request(struct bio *bio)
		}

		if (unlikely(nr_sectors > q->max_hw_sectors)) {
			printk("bio too big device %s (%u > %u)\n", 
			printk(KERN_ERR "bio too big device %s (%u > %u)\n",
				bdevname(bio->bi_bdev, b),
				bio_sectors(bio),
				q->max_hw_sectors);
@@ -1439,7 +1433,6 @@ void generic_make_request(struct bio *bio)
	} while (bio);
	current->bio_tail = NULL; /* deactivate */
}

EXPORT_SYMBOL(generic_make_request);

/**
@@ -1486,7 +1479,6 @@ void submit_bio(int rw, struct bio *bio)

	generic_make_request(bio);
}

EXPORT_SYMBOL(submit_bio);

/**
@@ -1518,9 +1510,8 @@ static int __end_that_request_first(struct request *req, int error,
	if (!blk_pc_request(req))
		req->errors = 0;

	if (error) {
		if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
			printk("end_request: I/O error, dev %s, sector %llu\n",
	if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
		printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
				req->rq_disk ? req->rq_disk->disk_name : "?",
				(unsigned long long)req->sector);
	}
@@ -1554,9 +1545,9 @@ static int __end_that_request_first(struct request *req, int error,

			if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
				blk_dump_rq_flags(req, "__end_that");
				printk("%s: bio idx %d >= vcnt %d\n",
						__FUNCTION__,
						bio->bi_idx, bio->bi_vcnt);
				printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
						__FUNCTION__, bio->bi_idx,
						bio->bi_vcnt);
				break;
			}

@@ -1582,7 +1573,8 @@ static int __end_that_request_first(struct request *req, int error,
		total_bytes += nbytes;
		nr_bytes -= nbytes;

		if ((bio = req->bio)) {
		bio = req->bio;
		if (bio) {
			/*
			 * end more in this run, or just return 'not-done'
			 */
@@ -1626,15 +1618,16 @@ static void blk_done_softirq(struct softirq_action *h)
	local_irq_enable();

	while (!list_empty(&local_list)) {
		struct request *rq = list_entry(local_list.next, struct request, donelist);
		struct request *rq;

		rq = list_entry(local_list.next, struct request, donelist);
		list_del_init(&rq->donelist);
		rq->q->softirq_done_fn(rq);
	}
}

static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action,
			  void *hcpu)
static int __cpuinit blk_cpu_notify(struct notifier_block *self,
				    unsigned long action, void *hcpu)
{
	/*
	 * If a CPU goes away, splice its entries to the current CPU
@@ -1685,7 +1678,6 @@ void blk_complete_request(struct request *req)

	local_irq_restore(flags);
}

EXPORT_SYMBOL(blk_complete_request);

/*
@@ -2002,7 +1994,6 @@ int kblockd_schedule_work(struct work_struct *work)
{
	return queue_work(kblockd_workqueue, work);
}

EXPORT_SYMBOL(kblockd_schedule_work);

void kblockd_flush_work(struct work_struct *work)
+0 −1
Original line number Diff line number Diff line
@@ -101,5 +101,4 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,

	return err;
}

EXPORT_SYMBOL(blk_execute_rq);
+4 −6
Original line number Diff line number Diff line
@@ -53,7 +53,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
	 * direct dma. else, set up kernel bounce buffers
	 */
	uaddr = (unsigned long) ubuf;
	if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
	if (!(uaddr & queue_dma_alignment(q)) &&
	    !(len & queue_dma_alignment(q)))
		bio = bio_map_user(q, NULL, uaddr, len, reading);
	else
		bio = bio_copy_user(q, uaddr, len, reading);
@@ -144,7 +145,6 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
	blk_rq_unmap_user(bio);
	return ret;
}

EXPORT_SYMBOL(blk_rq_map_user);

/**
@@ -179,7 +179,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
	/* we don't allow misaligned data like bio_map_user() does.  If the
	 * user is using sg, they're expected to know the alignment constraints
	 * and respect them accordingly */
	bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
	bio = bio_map_user_iov(q, NULL, iov, iov_count,
				rq_data_dir(rq) == READ);
	if (IS_ERR(bio))
		return PTR_ERR(bio);

@@ -194,7 +195,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
	rq->buffer = rq->data = NULL;
	return 0;
}

EXPORT_SYMBOL(blk_rq_map_user_iov);

/**
@@ -227,7 +227,6 @@ int blk_rq_unmap_user(struct bio *bio)

	return ret;
}

EXPORT_SYMBOL(blk_rq_unmap_user);

/**
@@ -260,5 +259,4 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
	rq->buffer = rq->data = NULL;
	return 0;
}

EXPORT_SYMBOL(blk_rq_map_kern);
+6 −6
Original line number Diff line number Diff line
@@ -32,7 +32,7 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
		 * size, something has gone terribly wrong
		 */
		if (rq->nr_sectors < rq->current_nr_sectors) {
			printk("blk: request botched\n");
			printk(KERN_ERR "blk: request botched\n");
			rq->nr_sectors = rq->current_nr_sectors;
		}
	}
@@ -235,7 +235,6 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,

	return nsegs;
}

EXPORT_SYMBOL(blk_rq_map_sg);

static inline int ll_new_mergeable(struct request_queue *q,
@@ -305,8 +304,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
		blk_recount_segments(q, bio);
	len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
	    !BIOVEC_VIRT_OVERSIZE(len)) {
	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
	    && !BIOVEC_VIRT_OVERSIZE(len)) {
		int mergeable =  ll_new_mergeable(q, req, bio);

		if (mergeable) {
@@ -388,7 +387,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,

	total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
	if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
		int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
		int len = req->biotail->bi_hw_back_size +
				next->bio->bi_hw_front_size;
		/*
		 * propagate the combined length to the end of the requests
		 */
Loading