Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 13f05c8d authored by Martin K. Petersen's avatar Martin K. Petersen Committed by Jens Axboe
Browse files

block/scsi: Provide a limit on the number of integrity segments



Some controllers have a hardware limit on the number of protection
information scatter-gather list segments they can handle.

Introduce a max_integrity_segments limit in the block layer and provide
a new scsi_host_template setting that allows HBA drivers to provide a
value suitable for the hardware.

Add support for honoring the integrity segment limit when merging both
bios and requests.

Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: default avatarJens Axboe <axboe@carl.home.kernel.dk>
parent c8bf1336
Loading
Loading
Loading
Loading
+72 −21
Original line number Diff line number Diff line
@@ -32,24 +32,37 @@ static struct kmem_cache *integrity_cachep;

/**
 * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
 * @rq:		request with integrity metadata attached
 * @q:		request queue
 * @bio:	bio with integrity metadata attached
 *
 * Description: Returns the number of elements required in a
 * scatterlist corresponding to the integrity metadata in a request.
 * scatterlist corresponding to the integrity metadata in a bio.
 */
int blk_rq_count_integrity_sg(struct request *rq)
int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
{
	struct bio_vec *iv, *ivprv;
	struct req_iterator iter;
	unsigned int segments;
	struct bio_vec *iv, *ivprv = NULL;
	unsigned int segments = 0;
	unsigned int seg_size = 0;
	unsigned int i = 0;

	ivprv = NULL;
	segments = 0;
	bio_for_each_integrity_vec(iv, bio, i) {

	rq_for_each_integrity_segment(iv, rq, iter) {
		if (ivprv) {
			if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
				goto new_segment;

			if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
				goto new_segment;

			if (seg_size + iv->bv_len > queue_max_segment_size(q))
				goto new_segment;

		if (!ivprv || !BIOVEC_PHYS_MERGEABLE(ivprv, iv))
			seg_size += iv->bv_len;
		} else {
new_segment:
			segments++;
			seg_size = iv->bv_len;
		}

		ivprv = iv;
	}
@@ -60,30 +73,34 @@ EXPORT_SYMBOL(blk_rq_count_integrity_sg);

/**
 * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
 * @rq:		request with integrity metadata attached
 * @q:		request queue
 * @bio:	bio with integrity metadata attached
 * @sglist:	target scatterlist
 *
 * Description: Map the integrity vectors in request into a
 * scatterlist.  The scatterlist must be big enough to hold all
 * elements.  I.e. sized using blk_rq_count_integrity_sg().
 */
int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
			    struct scatterlist *sglist)
{
	struct bio_vec *iv, *ivprv;
	struct req_iterator iter;
	struct scatterlist *sg;
	unsigned int segments;
	struct bio_vec *iv, *ivprv = NULL;
	struct scatterlist *sg = NULL;
	unsigned int segments = 0;
	unsigned int i = 0;

	ivprv = NULL;
	sg = NULL;
	segments = 0;

	rq_for_each_integrity_segment(iv, rq, iter) {
	bio_for_each_integrity_vec(iv, bio, i) {

		if (ivprv) {
			if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
				goto new_segment;

			if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
				goto new_segment;

			if (sg->length + iv->bv_len > queue_max_segment_size(q))
				goto new_segment;

			sg->length += iv->bv_len;
		} else {
new_segment:
@@ -162,6 +179,40 @@ int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
}
EXPORT_SYMBOL(blk_integrity_compare);

int blk_integrity_merge_rq(struct request_queue *q, struct request *req,
			   struct request *next)
{
	if (blk_integrity_rq(req) != blk_integrity_rq(next))
		return -1;

	if (req->nr_integrity_segments + next->nr_integrity_segments >
	    q->limits.max_integrity_segments)
		return -1;

	return 0;
}
EXPORT_SYMBOL(blk_integrity_merge_rq);

int blk_integrity_merge_bio(struct request_queue *q, struct request *req,
			    struct bio *bio)
{
	int nr_integrity_segs;
	struct bio *next = bio->bi_next;

	bio->bi_next = NULL;
	nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
	bio->bi_next = next;

	if (req->nr_integrity_segments + nr_integrity_segs >
	    q->limits.max_integrity_segments)
		return -1;

	req->nr_integrity_segments += nr_integrity_segs;

	return 0;
}
EXPORT_SYMBOL(blk_integrity_merge_bio);

struct integrity_sysfs_entry {
	struct attribute attr;
	ssize_t (*show)(struct blk_integrity *, char *);
+14 −9
Original line number Diff line number Diff line
@@ -205,12 +205,11 @@ static inline int ll_new_hw_segment(struct request_queue *q,
{
	int nr_phys_segs = bio_phys_segments(q, bio);

	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) {
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
		return 0;
	}
	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
		goto no_merge;

	if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
		goto no_merge;

	/*
	 * This will form the start of a new hw segment.  Bump both
@@ -218,6 +217,12 @@ static inline int ll_new_hw_segment(struct request_queue *q,
	 */
	req->nr_phys_segments += nr_phys_segs;
	return 1;

no_merge:
	req->cmd_flags |= REQ_NOMERGE;
	if (req == q->last_merge)
		q->last_merge = NULL;
	return 0;
}

int ll_back_merge_fn(struct request_queue *q, struct request *req,
@@ -301,6 +306,9 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
	if (total_phys_segments > queue_max_segments(q))
		return 0;

	if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
		return 0;

	/* Merge is OK... */
	req->nr_phys_segments = total_phys_segments;
	return 1;
@@ -372,9 +380,6 @@ static int attempt_merge(struct request_queue *q, struct request *req,
	    || next->special)
		return 0;

	if (blk_integrity_rq(req) != blk_integrity_rq(next))
		return 0;

	/*
	 * If we are allowed to merge, then append bio list
	 * from next to rq and release next. merge_requests_fn
+3 −0
Original line number Diff line number Diff line
@@ -111,6 +111,7 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
void blk_set_default_limits(struct queue_limits *lim)
{
	lim->max_segments = BLK_MAX_SEGMENTS;
	lim->max_integrity_segments = 0;
	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
	lim->max_sectors = BLK_DEF_MAX_SECTORS;
@@ -509,6 +510,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
					    b->seg_boundary_mask);

	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
						 b->max_integrity_segments);

	t->max_segment_size = min_not_zero(t->max_segment_size,
					   b->max_segment_size);
+11 −0
Original line number Diff line number Diff line
@@ -112,6 +112,11 @@ static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
	return queue_var_show(queue_max_segments(q), (page));
}

static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
{
	return queue_var_show(q->limits.max_integrity_segments, (page));
}

static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
{
	if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
@@ -288,6 +293,11 @@ static struct queue_sysfs_entry queue_max_segments_entry = {
	.show = queue_max_segments_show,
};

static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
	.attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
	.show = queue_max_integrity_segments_show,
};

static struct queue_sysfs_entry queue_max_segment_size_entry = {
	.attr = {.name = "max_segment_size", .mode = S_IRUGO },
	.show = queue_max_segment_size_show,
@@ -375,6 +385,7 @@ static struct attribute *default_attrs[] = {
	&queue_max_hw_sectors_entry.attr,
	&queue_max_sectors_entry.attr,
	&queue_max_segments_entry.attr,
	&queue_max_integrity_segments_entry.attr,
	&queue_max_segment_size_entry.attr,
	&queue_iosched_entry.attr,
	&queue_hw_sector_size_entry.attr,
+0 −8
Original line number Diff line number Diff line
@@ -132,14 +132,6 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
	return q->nr_congestion_off;
}

#if defined(CONFIG_BLK_DEV_INTEGRITY)

#define rq_for_each_integrity_segment(bvl, _rq, _iter)		\
	__rq_for_each_bio(_iter.bio, _rq)			\
		bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i)

#endif /* BLK_DEV_INTEGRITY */

static inline int blk_cpu_to_group(int cpu)
{
#ifdef CONFIG_SCHED_MC
Loading