Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5220cc93 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: add REQ_SECURE to REQ_COMMON_MASK
  block: use the passed in @bdev when claiming if partno is zero
  block: Add __attribute__((format(printf...) and fix fallout
  block: make disk_block_events() properly wait for work cancellation
  block: remove non-syncing __disk_block_events() and fold it into disk_block_events()
  block: don't use non-syncing event blocking in disk_check_events()
  cfq-iosched: fix locking around ioc->ioc_data assignment
parents 726ce065 155d109b
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -927,7 +927,7 @@ static int throtl_dispatch(struct request_queue *q)


	bio_list_init(&bio_list_on_stack);
	bio_list_init(&bio_list_on_stack);


	throtl_log(td, "dispatch nr_queued=%lu read=%u write=%u",
	throtl_log(td, "dispatch nr_queued=%d read=%u write=%u",
			total_nr_queued(td), td->nr_queued[READ],
			total_nr_queued(td), td->nr_queued[READ],
			td->nr_queued[WRITE]);
			td->nr_queued[WRITE]);


@@ -1204,7 +1204,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
	}
	}


queue_bio:
queue_bio:
	throtl_log_tg(td, tg, "[%c] bio. bdisp=%u sz=%u bps=%llu"
	throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
			" iodisp=%u iops=%u queued=%d/%d",
			" iodisp=%u iops=%u queued=%d/%d",
			rw == READ ? 'R' : 'W',
			rw == READ ? 'R' : 'W',
			tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
			tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
+10 −6
Original line number Original line Diff line number Diff line
@@ -988,8 +988,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,


	cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
	cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
					st->min_vdisktime);
					st->min_vdisktime);
	cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
	cfq_log_cfqq(cfqq->cfqd, cfqq,
			" sect=%u", used_sl, cfqq->slice_dispatch, charge,
		     "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
		     used_sl, cfqq->slice_dispatch, charge,
		     iops_mode(cfqd), cfqq->nr_sectors);
		     iops_mode(cfqd), cfqq->nr_sectors);
	cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
	cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
					  unaccounted_sl);
					  unaccounted_sl);
@@ -2023,7 +2024,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
	 */
	 */
	if (sample_valid(cic->ttime_samples) &&
	if (sample_valid(cic->ttime_samples) &&
	    (cfqq->slice_end - jiffies < cic->ttime_mean)) {
	    (cfqq->slice_end - jiffies < cic->ttime_mean)) {
		cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
		cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
			     cic->ttime_mean);
			     cic->ttime_mean);
		return;
		return;
	}
	}
@@ -2772,8 +2773,11 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
	smp_wmb();
	smp_wmb();
	cic->key = cfqd_dead_key(cfqd);
	cic->key = cfqd_dead_key(cfqd);


	if (ioc->ioc_data == cic)
	if (rcu_dereference(ioc->ioc_data) == cic) {
		spin_lock(&ioc->lock);
		rcu_assign_pointer(ioc->ioc_data, NULL);
		rcu_assign_pointer(ioc->ioc_data, NULL);
		spin_unlock(&ioc->lock);
	}


	if (cic->cfqq[BLK_RW_ASYNC]) {
	if (cic->cfqq[BLK_RW_ASYNC]) {
		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
+45 −34
Original line number Original line Diff line number Diff line
@@ -1371,6 +1371,7 @@ struct disk_events {
	struct gendisk		*disk;		/* the associated disk */
	struct gendisk		*disk;		/* the associated disk */
	spinlock_t		lock;
	spinlock_t		lock;


	struct mutex		block_mutex;	/* protects blocking */
	int			block;		/* event blocking depth */
	int			block;		/* event blocking depth */
	unsigned int		pending;	/* events already sent out */
	unsigned int		pending;	/* events already sent out */
	unsigned int		clearing;	/* events being cleared */
	unsigned int		clearing;	/* events being cleared */
@@ -1414,22 +1415,44 @@ static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
	return msecs_to_jiffies(intv_msecs);
	return msecs_to_jiffies(intv_msecs);
}
}


static void __disk_block_events(struct gendisk *disk, bool sync)
/**
 * disk_block_events - block and flush disk event checking
 * @disk: disk to block events for
 *
 * On return from this function, it is guaranteed that event checking
 * isn't in progress and won't happen until unblocked by
 * disk_unblock_events().  Events blocking is counted and the actual
 * unblocking happens after the matching number of unblocks are done.
 *
 * Note that this intentionally does not block event checking from
 * disk_clear_events().
 *
 * CONTEXT:
 * Might sleep.
 */
void disk_block_events(struct gendisk *disk)
{
{
	struct disk_events *ev = disk->ev;
	struct disk_events *ev = disk->ev;
	unsigned long flags;
	unsigned long flags;
	bool cancel;
	bool cancel;


	if (!ev)
		return;

	/*
	 * Outer mutex ensures that the first blocker completes canceling
	 * the event work before further blockers are allowed to finish.
	 */
	mutex_lock(&ev->block_mutex);

	spin_lock_irqsave(&ev->lock, flags);
	spin_lock_irqsave(&ev->lock, flags);
	cancel = !ev->block++;
	cancel = !ev->block++;
	spin_unlock_irqrestore(&ev->lock, flags);
	spin_unlock_irqrestore(&ev->lock, flags);


	if (cancel) {
	if (cancel)
		if (sync)
		cancel_delayed_work_sync(&disk->ev->dwork);
		cancel_delayed_work_sync(&disk->ev->dwork);
		else

			cancel_delayed_work(&disk->ev->dwork);
	mutex_unlock(&ev->block_mutex);
	}
}
}


static void __disk_unblock_events(struct gendisk *disk, bool check_now)
static void __disk_unblock_events(struct gendisk *disk, bool check_now)
@@ -1460,27 +1483,6 @@ out_unlock:
	spin_unlock_irqrestore(&ev->lock, flags);
	spin_unlock_irqrestore(&ev->lock, flags);
}
}


/**
 * disk_block_events - block and flush disk event checking
 * @disk: disk to block events for
 *
 * On return from this function, it is guaranteed that event checking
 * isn't in progress and won't happen until unblocked by
 * disk_unblock_events().  Events blocking is counted and the actual
 * unblocking happens after the matching number of unblocks are done.
 *
 * Note that this intentionally does not block event checking from
 * disk_clear_events().
 *
 * CONTEXT:
 * Might sleep.
 */
void disk_block_events(struct gendisk *disk)
{
	if (disk->ev)
		__disk_block_events(disk, true);
}

/**
/**
 * disk_unblock_events - unblock disk event checking
 * disk_unblock_events - unblock disk event checking
 * @disk: disk to unblock events for
 * @disk: disk to unblock events for
@@ -1508,10 +1510,18 @@ void disk_unblock_events(struct gendisk *disk)
 */
 */
void disk_check_events(struct gendisk *disk)
void disk_check_events(struct gendisk *disk)
{
{
	if (disk->ev) {
	struct disk_events *ev = disk->ev;
		__disk_block_events(disk, false);
	unsigned long flags;
		__disk_unblock_events(disk, true);

	if (!ev)
		return;

	spin_lock_irqsave(&ev->lock, flags);
	if (!ev->block) {
		cancel_delayed_work(&ev->dwork);
		queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
	}
	}
	spin_unlock_irqrestore(&ev->lock, flags);
}
}
EXPORT_SYMBOL_GPL(disk_check_events);
EXPORT_SYMBOL_GPL(disk_check_events);


@@ -1546,7 +1556,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
	spin_unlock_irq(&ev->lock);
	spin_unlock_irq(&ev->lock);


	/* uncondtionally schedule event check and wait for it to finish */
	/* uncondtionally schedule event check and wait for it to finish */
	__disk_block_events(disk, true);
	disk_block_events(disk);
	queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
	queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
	flush_delayed_work(&ev->dwork);
	flush_delayed_work(&ev->dwork);
	__disk_unblock_events(disk, false);
	__disk_unblock_events(disk, false);
@@ -1664,7 +1674,7 @@ static ssize_t disk_events_poll_msecs_store(struct device *dev,
	if (intv < 0 && intv != -1)
	if (intv < 0 && intv != -1)
		return -EINVAL;
		return -EINVAL;


	__disk_block_events(disk, true);
	disk_block_events(disk);
	disk->ev->poll_msecs = intv;
	disk->ev->poll_msecs = intv;
	__disk_unblock_events(disk, true);
	__disk_unblock_events(disk, true);


@@ -1750,6 +1760,7 @@ static void disk_add_events(struct gendisk *disk)
	INIT_LIST_HEAD(&ev->node);
	INIT_LIST_HEAD(&ev->node);
	ev->disk = disk;
	ev->disk = disk;
	spin_lock_init(&ev->lock);
	spin_lock_init(&ev->lock);
	mutex_init(&ev->block_mutex);
	ev->block = 1;
	ev->block = 1;
	ev->poll_msecs = -1;
	ev->poll_msecs = -1;
	INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
	INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
@@ -1770,7 +1781,7 @@ static void disk_del_events(struct gendisk *disk)
	if (!disk->ev)
	if (!disk->ev)
		return;
		return;


	__disk_block_events(disk, true);
	disk_block_events(disk);


	mutex_lock(&disk_events_mutex);
	mutex_lock(&disk_events_mutex);
	list_del_init(&disk->ev->node);
	list_del_init(&disk->ev->node);
+13 −1
Original line number Original line Diff line number Diff line
@@ -762,7 +762,19 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
	if (!disk)
	if (!disk)
		return ERR_PTR(-ENXIO);
		return ERR_PTR(-ENXIO);


	/*
	 * Normally, @bdev should equal what's returned from bdget_disk()
	 * if partno is 0; however, some drivers (floppy) use multiple
	 * bdev's for the same physical device and @bdev may be one of the
	 * aliases.  Keep @bdev if partno is 0.  This means claimer
	 * tracking is broken for those devices but it has always been that
	 * way.
	 */
	if (partno)
		whole = bdget_disk(disk, 0);
		whole = bdget_disk(disk, 0);
	else
		whole = bdgrab(bdev);

	module_put(disk->fops->owner);
	module_put(disk->fops->owner);
	put_disk(disk);
	put_disk(disk);
	if (!whole)
	if (!whole)
+1 −1
Original line number Original line Diff line number Diff line
@@ -167,7 +167,7 @@ enum rq_flag_bits {
	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
#define REQ_COMMON_MASK \
	(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
	(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
	 REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
	 REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
#define REQ_CLONE_MASK		REQ_COMMON_MASK
#define REQ_CLONE_MASK		REQ_COMMON_MASK


#define REQ_RAHEAD		(1 << __REQ_RAHEAD)
#define REQ_RAHEAD		(1 << __REQ_RAHEAD)
Loading