Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0b31c3ec authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A small batch of fixes that should be included for the 4.13 release.
  This contains:

   - Revert of the 4k loop blocksize support. Even with a recent batch
     of 4 fixes, we're still not really happy with it. Rather than be
     stuck with an API issue, let's revert it and get it right for 4.14.

   - Trivial patch from Bart, adding a few flags to the blk-mq debugfs
     exports that were added in this release, but not to the debugfs
     parts.

   - Regression fix for bsg, fixing a potential kernel panic. From
     Benjamin.

   - Tweak for the blk throttling, improving how we account discards.
     From Shaohua"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq-debugfs: Add names for recently added flags
  bsg-lib: fix kernel panic resulting from missing allocation of reply-buffer
  Revert "loop: support 4k physical blocksize"
  blk-throttle: cap discard request size
parents 1f5de42d 22d53821
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -75,6 +75,8 @@ static const char *const blk_queue_flag_name[] = {
	QUEUE_FLAG_NAME(STATS),
	QUEUE_FLAG_NAME(POLL_STATS),
	QUEUE_FLAG_NAME(REGISTERED),
	QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
	QUEUE_FLAG_NAME(QUIESCED),
};
#undef QUEUE_FLAG_NAME

@@ -265,6 +267,7 @@ static const char *const cmd_flag_name[] = {
	CMD_FLAG_NAME(RAHEAD),
	CMD_FLAG_NAME(BACKGROUND),
	CMD_FLAG_NAME(NOUNMAP),
	CMD_FLAG_NAME(NOWAIT),
};
#undef CMD_FLAG_NAME

+14 −4
Original line number Diff line number Diff line
@@ -382,6 +382,14 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
	}								\
} while (0)

static inline unsigned int throtl_bio_data_size(struct bio *bio)
{
	/* assume it's one sector */
	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
		return 512;
	return bio->bi_iter.bi_size;
}

static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
{
	INIT_LIST_HEAD(&qn->node);
@@ -934,6 +942,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
	bool rw = bio_data_dir(bio);
	u64 bytes_allowed, extra_bytes, tmp;
	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
	unsigned int bio_size = throtl_bio_data_size(bio);

	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];

@@ -947,14 +956,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
	do_div(tmp, HZ);
	bytes_allowed = tmp;

	if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
	if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
		if (wait)
			*wait = 0;
		return true;
	}

	/* Calc approx time to dispatch */
	extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
	extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
	jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));

	if (!jiffy_wait)
@@ -1034,11 +1043,12 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
{
	bool rw = bio_data_dir(bio);
	unsigned int bio_size = throtl_bio_data_size(bio);

	/* Charge the bio to the group */
	tg->bytes_disp[rw] += bio->bi_iter.bi_size;
	tg->bytes_disp[rw] += bio_size;
	tg->io_disp[rw]++;
	tg->last_bytes_disp[rw] += bio->bi_iter.bi_size;
	tg->last_bytes_disp[rw] += bio_size;
	tg->last_io_disp[rw]++;

	/*
+44 −30
Original line number Diff line number Diff line
@@ -29,26 +29,25 @@
#include <scsi/scsi_cmnd.h>

/**
 * bsg_destroy_job - routine to teardown/delete a bsg job
 * bsg_teardown_job - routine to teardown a bsg job
 * @job: bsg_job that is to be torn down
 */
static void bsg_destroy_job(struct kref *kref)
static void bsg_teardown_job(struct kref *kref)
{
	struct bsg_job *job = container_of(kref, struct bsg_job, kref);
	struct request *rq = job->req;

	blk_end_request_all(rq, BLK_STS_OK);

	put_device(job->dev);	/* release reference for the request */

	kfree(job->request_payload.sg_list);
	kfree(job->reply_payload.sg_list);
	kfree(job);

	blk_end_request_all(rq, BLK_STS_OK);
}

void bsg_job_put(struct bsg_job *job)
{
	kref_put(&job->kref, bsg_destroy_job);
	kref_put(&job->kref, bsg_teardown_job);
}
EXPORT_SYMBOL_GPL(bsg_job_put);

@@ -100,7 +99,7 @@ EXPORT_SYMBOL_GPL(bsg_job_done);
 */
static void bsg_softirq_done(struct request *rq)
{
	struct bsg_job *job = rq->special;
	struct bsg_job *job = blk_mq_rq_to_pdu(rq);

	bsg_job_put(job);
}
@@ -122,33 +121,20 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
}

/**
 * bsg_create_job - create the bsg_job structure for the bsg request
 * bsg_prepare_job - create the bsg_job structure for the bsg request
 * @dev: device that is being sent the bsg request
 * @req: BSG request that needs a job structure
 */
static int bsg_create_job(struct device *dev, struct request *req)
static int bsg_prepare_job(struct device *dev, struct request *req)
{
	struct request *rsp = req->next_rq;
	struct request_queue *q = req->q;
	struct scsi_request *rq = scsi_req(req);
	struct bsg_job *job;
	struct bsg_job *job = blk_mq_rq_to_pdu(req);
	int ret;

	BUG_ON(req->special);

	job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
	if (!job)
		return -ENOMEM;

	req->special = job;
	job->req = req;
	if (q->bsg_job_size)
		job->dd_data = (void *)&job[1];
	job->request = rq->cmd;
	job->request_len = rq->cmd_len;
	job->reply = rq->sense;
	job->reply_len = SCSI_SENSE_BUFFERSIZE;	/* Size of sense buffer
						 * allocated */

	if (req->bio) {
		ret = bsg_map_buffer(&job->request_payload, req);
		if (ret)
@@ -187,7 +173,6 @@ static void bsg_request_fn(struct request_queue *q)
{
	struct device *dev = q->queuedata;
	struct request *req;
	struct bsg_job *job;
	int ret;

	if (!get_device(dev))
@@ -199,7 +184,7 @@ static void bsg_request_fn(struct request_queue *q)
			break;
		spin_unlock_irq(q->queue_lock);

		ret = bsg_create_job(dev, req);
		ret = bsg_prepare_job(dev, req);
		if (ret) {
			scsi_req(req)->result = ret;
			blk_end_request_all(req, BLK_STS_OK);
@@ -207,8 +192,7 @@ static void bsg_request_fn(struct request_queue *q)
			continue;
		}

		job = req->special;
		ret = q->bsg_job_fn(job);
		ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req));
		spin_lock_irq(q->queue_lock);
		if (ret)
			break;
@@ -219,6 +203,35 @@ static void bsg_request_fn(struct request_queue *q)
	spin_lock_irq(q->queue_lock);
}

static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
{
	struct bsg_job *job = blk_mq_rq_to_pdu(req);
	struct scsi_request *sreq = &job->sreq;

	memset(job, 0, sizeof(*job));

	scsi_req_init(sreq);
	sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
	sreq->sense = kzalloc(sreq->sense_len, gfp);
	if (!sreq->sense)
		return -ENOMEM;

	job->req = req;
	job->reply = sreq->sense;
	job->reply_len = sreq->sense_len;
	job->dd_data = job + 1;

	return 0;
}

static void bsg_exit_rq(struct request_queue *q, struct request *req)
{
	struct bsg_job *job = blk_mq_rq_to_pdu(req);
	struct scsi_request *sreq = &job->sreq;

	kfree(sreq->sense);
}

/**
 * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
 * @dev: device to attach bsg device to
@@ -235,7 +248,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
	q = blk_alloc_queue(GFP_KERNEL);
	if (!q)
		return ERR_PTR(-ENOMEM);
	q->cmd_size = sizeof(struct scsi_request);
	q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
	q->init_rq_fn = bsg_init_rq;
	q->exit_rq_fn = bsg_exit_rq;
	q->request_fn = bsg_request_fn;

	ret = blk_init_allocated_queue(q);
@@ -243,7 +258,6 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
		goto out_cleanup_queue;

	q->queuedata = dev;
	q->bsg_job_size = dd_job_size;
	q->bsg_job_fn = job_fn;
	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
	queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
+6 −36
Original line number Diff line number Diff line
@@ -221,8 +221,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
}

static int
figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
		 loff_t logical_blocksize)
figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
{
	loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
	sector_t x = (sector_t)size;
@@ -234,12 +233,6 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
		lo->lo_offset = offset;
	if (lo->lo_sizelimit != sizelimit)
		lo->lo_sizelimit = sizelimit;
	if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
		lo->lo_logical_blocksize = logical_blocksize;
		blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
		blk_queue_logical_block_size(lo->lo_queue,
					     lo->lo_logical_blocksize);
	}
	set_capacity(lo->lo_disk, x);
	bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
	/* let user-space know about the new size */
@@ -820,7 +813,6 @@ static void loop_config_discard(struct loop_device *lo)
	struct file *file = lo->lo_backing_file;
	struct inode *inode = file->f_mapping->host;
	struct request_queue *q = lo->lo_queue;
	int lo_bits = 9;

	/*
	 * We use punch hole to reclaim the free space used by the
@@ -840,11 +832,9 @@ static void loop_config_discard(struct loop_device *lo)

	q->limits.discard_granularity = inode->i_sb->s_blocksize;
	q->limits.discard_alignment = 0;
	if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
		lo_bits = blksize_bits(lo->lo_logical_blocksize);

	blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits);
	blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits);
	blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
	blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
}

@@ -938,7 +928,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,

	lo->use_dio = false;
	lo->lo_blocksize = lo_blocksize;
	lo->lo_logical_blocksize = 512;
	lo->lo_device = bdev;
	lo->lo_flags = lo_flags;
	lo->lo_backing_file = file;
@@ -1104,7 +1093,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
	int err;
	struct loop_func_table *xfer;
	kuid_t uid = current_uid();
	int lo_flags = lo->lo_flags;

	if (lo->lo_encrypt_key_size &&
	    !uid_eq(lo->lo_key_owner, uid) &&
@@ -1137,26 +1125,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
	if (err)
		goto exit;

	if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
		if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
			lo->lo_logical_blocksize = 512;
		lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
		if (LO_INFO_BLOCKSIZE(info) != 512 &&
		    LO_INFO_BLOCKSIZE(info) != 1024 &&
		    LO_INFO_BLOCKSIZE(info) != 2048 &&
		    LO_INFO_BLOCKSIZE(info) != 4096)
			return -EINVAL;
		if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
			return -EINVAL;
	}

	if (lo->lo_offset != info->lo_offset ||
	    lo->lo_sizelimit != info->lo_sizelimit ||
	    lo->lo_flags != lo_flags ||
	    ((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
	     lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
		if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
				     LO_INFO_BLOCKSIZE(info))) {
	    lo->lo_sizelimit != info->lo_sizelimit) {
		if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
			err = -EFBIG;
			goto exit;
		}
@@ -1348,8 +1319,7 @@ static int loop_set_capacity(struct loop_device *lo)
	if (unlikely(lo->lo_state != Lo_bound))
		return -ENXIO;

	return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit,
				lo->lo_logical_blocksize);
	return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
}

static int loop_set_dio(struct loop_device *lo, unsigned long arg)
+0 −1
Original line number Diff line number Diff line
@@ -49,7 +49,6 @@ struct loop_device {
	struct file *	lo_backing_file;
	struct block_device *lo_device;
	unsigned	lo_blocksize;
	unsigned	lo_logical_blocksize;
	void		*key_data; 

	gfp_t		old_gfp_mask;
Loading