Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit babb29b0 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  xen/blkfront: use blk_rq_map_sg to generate ring entries
  block: reduce stack footprint of blk_recount_segments()
  cciss: shorten 30s timeout on controller reset
  block: add documentation for register_blkdev()
  block: fix bogus gcc warning for uninitialized var usage
parents 6fc79d40 9e973e64
Loading
Loading
Loading
Loading
+53 −41
Original line number Diff line number Diff line
@@ -38,28 +38,29 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
	}
}

void blk_recalc_rq_segments(struct request *rq)
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
					     struct bio *bio,
					     unsigned int *seg_size_ptr)
{
	int nr_phys_segs;
	unsigned int phys_size;
	struct bio_vec *bv, *bvprv = NULL;
	int seg_size;
	int cluster;
	struct req_iterator iter;
	int high, highprv = 1;
	struct request_queue *q = rq->q;
	int cluster, i, high, highprv = 1;
	unsigned int seg_size, nr_phys_segs;
	struct bio *fbio;

	if (!rq->bio)
		return;
	if (!bio)
		return 0;

	fbio = bio;
	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
	seg_size = 0;
	phys_size = nr_phys_segs = 0;
	rq_for_each_segment(bv, rq, iter) {
	for_each_bio(bio) {
		bio_for_each_segment(bv, bio, i) {
			/*
		 * the trick here is making sure that a high page is never
		 * considered part of another segment, since that might
		 * change with the bounce page.
			 * the trick here is making sure that a high page is
			 * never considered part of another segment, since that
			 * might change with the bounce page.
			 */
			high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
			if (high || highprv)
@@ -77,33 +78,44 @@ void blk_recalc_rq_segments(struct request *rq)
				continue;
			}
new_segment:
		if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
			rq->bio->bi_seg_front_size = seg_size;
			if (nr_phys_segs == 1 && seg_size >
			    fbio->bi_seg_front_size)
				fbio->bi_seg_front_size = seg_size;

			nr_phys_segs++;
			bvprv = bv;
			seg_size = bv->bv_len;
			highprv = high;
		}
	}

	if (seg_size_ptr)
		*seg_size_ptr = seg_size;

	if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
	return nr_phys_segs;
}

void blk_recalc_rq_segments(struct request *rq)
{
	unsigned int seg_size = 0, phys_segs;

	phys_segs = __blk_recalc_rq_segments(rq->q, rq->bio, &seg_size);

	if (phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
		rq->bio->bi_seg_front_size = seg_size;
	if (seg_size > rq->biotail->bi_seg_back_size)
		rq->biotail->bi_seg_back_size = seg_size;

	rq->nr_phys_segments = nr_phys_segs;
	rq->nr_phys_segments = phys_segs;
}

void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
	struct request rq;
	struct bio *nxt = bio->bi_next;
	rq.q = q;
	rq.bio = rq.biotail = bio;

	bio->bi_next = NULL;
	blk_recalc_rq_segments(&rq);
	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, NULL);
	bio->bi_next = nxt;
	bio->bi_phys_segments = rq.nr_phys_segments;
	bio->bi_flags |= (1 << BIO_SEG_VALID);
}
EXPORT_SYMBOL(blk_recount_segments);
+16 −0
Original line number Diff line number Diff line
@@ -256,6 +256,22 @@ void blkdev_show(struct seq_file *seqf, off_t offset)
}
#endif /* CONFIG_PROC_FS */

/**
 * register_blkdev - register a new block device
 *
 * @major: the requested major device number [1..255]. If @major=0, try to
 *         allocate any unused major number.
 * @name: the name of the new block device as a zero terminated string
 *
 * The @name must be unique within the system.
 *
 * The return value depends on the @major input parameter.
 *  - if a major device number was requested in range [1..255] then the
 *    function returns zero on success, or a negative error code
 *  - if any unused major number was requested with @major=0 parameter
 *    then the return value is the allocated major number in range
 *    [1..255] or a negative error code otherwise
 */
int register_blkdev(unsigned int major, const char *name)
{
	struct blk_major_name **n, *p;
+7 −3
Original line number Diff line number Diff line
@@ -3611,11 +3611,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
		schedule_timeout_uninterruptible(30*HZ);

		/* Now try to get the controller to respond to a no-op */
		for (i=0; i<12; i++) {
		for (i=0; i<30; i++) {
			if (cciss_noop(pdev) == 0)
				break;
			else
				printk("cciss: no-op failed%s\n", (i < 11 ? "; re-trying" : ""));

			schedule_timeout_uninterruptible(HZ);
		}
		if (i == 30) {
			printk(KERN_ERR "cciss: controller seems dead\n");
			return -EBUSY;
		}
	}

+15 −15
Original line number Diff line number Diff line
@@ -40,6 +40,7 @@
#include <linux/hdreg.h>
#include <linux/cdrom.h>
#include <linux/module.h>
#include <linux/scatterlist.h>

#include <xen/xenbus.h>
#include <xen/grant_table.h>
@@ -82,6 +83,7 @@ struct blkfront_info
	enum blkif_state connected;
	int ring_ref;
	struct blkif_front_ring ring;
	struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
	unsigned int evtchn, irq;
	struct request_queue *rq;
	struct work_struct work;
@@ -204,12 +206,11 @@ static int blkif_queue_request(struct request *req)
	struct blkfront_info *info = req->rq_disk->private_data;
	unsigned long buffer_mfn;
	struct blkif_request *ring_req;
	struct req_iterator iter;
	struct bio_vec *bvec;
	unsigned long id;
	unsigned int fsect, lsect;
	int ref;
	int i, ref;
	grant_ref_t gref_head;
	struct scatterlist *sg;

	if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
		return 1;
@@ -238,12 +239,13 @@ static int blkif_queue_request(struct request *req)
	if (blk_barrier_rq(req))
		ring_req->operation = BLKIF_OP_WRITE_BARRIER;

	ring_req->nr_segments = 0;
	rq_for_each_segment(bvec, req, iter) {
		BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST);
		buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
		fsect = bvec->bv_offset >> 9;
		lsect = fsect + (bvec->bv_len >> 9) - 1;
	ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
	BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);

	for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
		buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
		fsect = sg->offset >> 9;
		lsect = fsect + (sg->length >> 9) - 1;
		/* install a grant reference. */
		ref = gnttab_claim_grant_reference(&gref_head);
		BUG_ON(ref == -ENOSPC);
@@ -254,16 +256,12 @@ static int blkif_queue_request(struct request *req)
				buffer_mfn,
				rq_data_dir(req) );

		info->shadow[id].frame[ring_req->nr_segments] =
				mfn_to_pfn(buffer_mfn);

		ring_req->seg[ring_req->nr_segments] =
		info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
		ring_req->seg[i] =
				(struct blkif_request_segment) {
					.gref       = ref,
					.first_sect = fsect,
					.last_sect  = lsect };

		ring_req->nr_segments++;
	}

	info->ring.req_prod_pvt++;
@@ -622,6 +620,8 @@ static int setup_blkring(struct xenbus_device *dev,
	SHARED_RING_INIT(sring);
	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);

	sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);

	err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
	if (err < 0) {
		free_page((unsigned long)sring);
+1 −1
Original line number Diff line number Diff line
@@ -302,7 +302,7 @@ void bio_init(struct bio *bio)
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
	struct bio *bio = NULL;
	void *p;
	void *uninitialized_var(p);

	if (bs) {
		p = mempool_alloc(bs->bio_pool, gfp_mask);
Loading