Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8d76d101 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'for-3.19/core' into for-3.19/drivers

parents e805b983 7c7f2f2b
Loading
Loading
Loading
Loading
+5 −1
Original line number Diff line number Diff line
@@ -946,7 +946,11 @@ elevator_allow_merge_fn called whenever the block layer determines
				request safely. The io scheduler may still
				want to stop a merge at this point if it
				results in some sort of conflict internally,
				this hook allows it to do that.
				this hook allows it to do that. Note however
				that two *requests* can still be merged at later
				time. Currently the io scheduler has no way to
				prevent that. It can only learn about the fact
				from elevator_merge_req_fn callback.

elevator_dispatch_fn*		fills the dispatch queue with ready requests.
				I/O schedulers are free to postpone requests by
+25 −14
Original line number Diff line number Diff line
@@ -269,17 +269,25 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
	blk_mq_queue_exit(q);
}

void blk_mq_free_request(struct request *rq)
void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
	struct blk_mq_ctx *ctx = rq->mq_ctx;
	struct blk_mq_hw_ctx *hctx;
	struct request_queue *q = rq->q;

	ctx->rq_completed[rq_is_sync(rq)]++;

	hctx = q->mq_ops->map_queue(q, ctx->cpu);
	__blk_mq_free_request(hctx, ctx, rq);

}
EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);

void blk_mq_free_request(struct request *rq)
{
	struct blk_mq_hw_ctx *hctx;
	struct request_queue *q = rq->q;

	hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
	blk_mq_free_hctx_request(hctx, rq);
}
EXPORT_SYMBOL_GPL(blk_mq_free_request);

inline void __blk_mq_end_request(struct request *rq, int error)
{
@@ -801,9 +809,18 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
		return;

	if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
	if (!async) {
		int cpu = get_cpu();
		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
			__blk_mq_run_hw_queue(hctx);
	else if (hctx->queue->nr_hw_queues == 1)
			put_cpu();
			return;
		}

		put_cpu();
	}

	if (hctx->queue->nr_hw_queues == 1)
		kblockd_schedule_delayed_work(&hctx->run_work, 0);
	else {
		unsigned int cpu;
@@ -824,9 +841,7 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
		    test_bit(BLK_MQ_S_STOPPED, &hctx->state))
			continue;

		preempt_disable();
		blk_mq_run_hw_queue(hctx, async);
		preempt_enable();
	}
}
EXPORT_SYMBOL(blk_mq_run_queues);
@@ -853,9 +868,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{
	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);

	preempt_disable();
	blk_mq_run_hw_queue(hctx, false);
	preempt_enable();
}
EXPORT_SYMBOL(blk_mq_start_hw_queue);

@@ -880,9 +893,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
			continue;

		clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
		preempt_disable();
		blk_mq_run_hw_queue(hctx, async);
		preempt_enable();
	}
}
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
+22 −7
Original line number Diff line number Diff line
@@ -479,12 +479,28 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
	 * write_inode()
	 */
	spin_lock(&inode->i_lock);
	/* Clear I_DIRTY_PAGES if we've written out all dirty pages */
	if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		inode->i_state &= ~I_DIRTY_PAGES;

	dirty = inode->i_state & I_DIRTY;
	inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
	inode->i_state &= ~I_DIRTY;

	/*
	 * Paired with smp_mb() in __mark_inode_dirty().  This allows
	 * __mark_inode_dirty() to test i_state without grabbing i_lock -
	 * either they see the I_DIRTY bits cleared or we see the dirtied
	 * inode.
	 *
	 * I_DIRTY_PAGES is always cleared together above even if @mapping
	 * still has dirty pages.  The flag is reinstated after smp_mb() if
	 * necessary.  This guarantees that either __mark_inode_dirty()
	 * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
	 */
	smp_mb();

	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		inode->i_state |= I_DIRTY_PAGES;

	spin_unlock(&inode->i_lock);

	/* Don't write the inode if only I_DIRTY_PAGES was set */
	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
		int err = write_inode(inode, wbc);
@@ -1148,12 +1164,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
	}

	/*
	 * make sure that changes are seen by all cpus before we test i_state
	 * -- mikulas
	 * Paired with smp_mb() in __writeback_single_inode() for the
	 * following lockless i_state test.  See there for details.
	 */
	smp_mb();

	/* avoid the locking if we can */
	if ((inode->i_state & flags) == flags)
		return;

+1 −0
Original line number Diff line number Diff line
@@ -169,6 +169,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_insert_request(struct request *, bool, bool, bool);
void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
		gfp_t gfp, bool reserved);