Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit feaf3848 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  block: fix setting of max_segment_size and seg_boundary mask
  block: internal dequeue shouldn't start timer
  block: set disk->node_id before it's being used
  When block layer fails to map iov, it calls bio_unmap_user to undo
parents a7711327 0e435ac2
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -161,7 +161,7 @@ static inline struct request *start_ordered(struct request_queue *q,
	/*
	 * Prep proxy barrier request.
	 */
	blkdev_dequeue_request(rq);
	elv_dequeue_request(q, rq);
	q->orig_bar_rq = rq;
	rq = &q->bar_rq;
	blk_rq_init(q, rq);
@@ -219,7 +219,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
			 * This can happen when the queue switches to
			 * ORDERED_NONE while this request is on it.
			 */
			blkdev_dequeue_request(rq);
			elv_dequeue_request(q, rq);
			if (__blk_end_request(rq, -EOPNOTSUPP,
					      blk_rq_bytes(rq)))
				BUG();
+24 −2
Original line number Diff line number Diff line
@@ -592,7 +592,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
				   1 << QUEUE_FLAG_STACKABLE);
	q->queue_lock		= lock;

	blk_queue_segment_boundary(q, 0xffffffff);
	blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);

	blk_queue_make_request(q, __make_request);
	blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
@@ -1636,6 +1636,28 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);

/**
 * blkdev_dequeue_request - dequeue request and start timeout timer
 * @req: request to dequeue
 *
 * Dequeue @req and start timeout timer on it.  This hands off the
 * request to the driver.
 *
 * Block internal functions which don't want to start timer should
 * call elv_dequeue_request().
 */
void blkdev_dequeue_request(struct request *req)
{
	elv_dequeue_request(req->q, req);

	/*
	 * We are now handing the request to the hardware, add the
	 * timeout handler.
	 */
	blk_add_timer(req);
}
EXPORT_SYMBOL(blkdev_dequeue_request);

/**
 * __end_that_request_first - end I/O on a request
 * @req:      the request being processed
@@ -1774,7 +1796,7 @@ static void end_that_request_last(struct request *req, int error)
		blk_queue_end_tag(req->q, req);

	if (blk_queued_rq(req))
		blkdev_dequeue_request(req);
		elv_dequeue_request(req->q, req);

	if (unlikely(laptop_mode) && blk_fs_request(req))
		laptop_io_completion();
+1 −1
Original line number Diff line number Diff line
@@ -224,7 +224,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
		 */
		bio_get(bio);
		bio_endio(bio, 0);
		bio_unmap_user(bio);
		__blk_rq_unmap_user(bio);
		return -EINVAL;
	}

+4 −0
Original line number Diff line number Diff line
@@ -125,6 +125,9 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
	q->nr_requests = BLKDEV_MAX_RQ;
	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
	blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
	blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);

	q->make_request_fn = mfn;
	q->backing_dev_info.ra_pages =
			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
@@ -314,6 +317,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
	/* zero is "infinity" */
	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);

	t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
	t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
+0 −7
Original line number Diff line number Diff line
@@ -844,14 +844,7 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
	 */
	if (blk_account_rq(rq))
		q->in_flight++;

	/*
	 * We are now handing the request to the hardware, add the
	 * timeout handler.
	 */
	blk_add_timer(rq);
}
EXPORT_SYMBOL(elv_dequeue_request);

int elv_queue_empty(struct request_queue *q)
{
Loading