Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 24113d48 authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer
Browse files

dm: avoid indirect call in __dm_make_request



Indirect calls are inefficient because of retpolines that are used for
spectre workaround. This patch replaces an indirect call with a condition
(that can be predicted by the branch predictor).

Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent cd19181b
Loading
Loading
Loading
Loading
+6 −23
Original line number Diff line number Diff line
@@ -1696,10 +1696,7 @@ static blk_qc_t __process_bio(struct mapped_device *md,
	return ret;
}

typedef blk_qc_t (process_bio_fn)(struct mapped_device *, struct dm_table *, struct bio *);

static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio,
				  process_bio_fn process_bio)
static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
{
	struct mapped_device *md = q->queuedata;
	blk_qc_t ret = BLK_QC_T_NONE;
@@ -1719,26 +1716,15 @@ static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio,
		return ret;
	}

	ret = process_bio(md, map, bio);
	if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
		ret = __process_bio(md, map, bio);
	else
		ret = __split_and_process_bio(md, map, bio);

	dm_put_live_table(md, srcu_idx);
	return ret;
}

/*
 * The request function that remaps the bio to one target and
 * splits off any remainder.
 */
static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
{
	return __dm_make_request(q, bio, __split_and_process_bio);
}

static blk_qc_t dm_make_request_nvme(struct request_queue *q, struct bio *bio)
{
	return __dm_make_request(q, bio, __process_bio);
}

static int dm_any_congested(void *congested_data, int bdi_bits)
{
	int r = bdi_bits;
@@ -2229,12 +2215,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
		break;
	case DM_TYPE_BIO_BASED:
	case DM_TYPE_DAX_BIO_BASED:
		dm_init_normal_md_queue(md);
		blk_queue_make_request(md->queue, dm_make_request);
		break;
	case DM_TYPE_NVME_BIO_BASED:
		dm_init_normal_md_queue(md);
		blk_queue_make_request(md->queue, dm_make_request_nvme);
		blk_queue_make_request(md->queue, dm_make_request);
		break;
	case DM_TYPE_NONE:
		WARN_ON_ONCE(true);