Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 30e5e929 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

nvme: don't pass struct nvme_ns to nvme_config_discard



To allow reusing this function for the multipath node.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 39b7baa4
Loading
Loading
Loading
Loading
+17 −16
Original line number Diff line number Diff line
@@ -1118,29 +1118,26 @@ static void nvme_set_chunk_size(struct nvme_ns *ns)
	blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
}

static void nvme_config_discard(struct nvme_ns *ns)
static void nvme_config_discard(struct nvme_ctrl *ctrl,
		unsigned stream_alignment, struct request_queue *queue)
{
	struct nvme_ctrl *ctrl = ns->ctrl;
	u32 logical_block_size = queue_logical_block_size(ns->queue);
	u32 size = queue_logical_block_size(queue);

	if (stream_alignment)
		size *= stream_alignment;

	BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
			NVME_DSM_MAX_RANGES);

	if (ctrl->nr_streams && ns->sws && ns->sgs) {
		unsigned int sz = logical_block_size * ns->sws * ns->sgs;
	queue->limits.discard_alignment = size;
	queue->limits.discard_granularity = size;

		ns->queue->limits.discard_alignment = sz;
		ns->queue->limits.discard_granularity = sz;
	} else {
		ns->queue->limits.discard_alignment = logical_block_size;
		ns->queue->limits.discard_granularity = logical_block_size;
	}
	blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
	blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES);
	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
	blk_queue_max_discard_sectors(queue, UINT_MAX);
	blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue);

	if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
		blk_queue_max_write_zeroes_sectors(ns->queue, UINT_MAX);
		blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
}

static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
@@ -1164,6 +1161,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
{
	struct nvme_ns *ns = disk->private_data;
	struct nvme_ctrl *ctrl = ns->ctrl;
	unsigned stream_alignment = 0;
	u16 bs;

	/*
@@ -1183,6 +1181,9 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
	else
		ns->pi_type = 0;

	if (ctrl->nr_streams && ns->sws && ns->sgs)
		stream_alignment = ns->sws * ns->sgs;

	blk_mq_freeze_queue(disk->queue);
	blk_integrity_unregister(disk);

@@ -1198,7 +1199,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
		set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));

	if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
		nvme_config_discard(ns);
		nvme_config_discard(ctrl, stream_alignment, disk->queue);
	blk_mq_unfreeze_queue(disk->queue);
}