Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ea263c7f authored by Mike Christie's avatar Mike Christie Committed by Nicholas Bellinger
Browse files

target: Fix max_unmap_lba_count calc overflow



max_discard_sectors only 32bits, and some non scsi backend
devices will set this to the max 0xffffffff, so we can end up
overflowing during the max_unmap_lba_count calculation.

This fixes a regression caused by my patch:

commit 8a9ebe71
Author: Mike Christie <mchristi@redhat.com>
Date:   Mon Jan 18 14:09:27 2016 -0600

    target: Fix WRITE_SAME/DISCARD conversion to linux 512b sectors

which can result in extra discards being sent to due the overflow
causing max_unmap_lba_count to be smaller than what the backing
device can actually support.

Signed-off-by: default avatarMike Christie <mchristi@redhat.com>
Reviewed-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent 064cdd2d
Loading
Loading
Loading
Loading
+5 −3
Original line number Diff line number Diff line
@@ -821,13 +821,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
 * in ATA and we need to set TPE=1
 */
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
				       struct request_queue *q, int block_size)
				       struct request_queue *q)
{
	int block_size = queue_logical_block_size(q);

	if (!blk_queue_discard(q))
		return false;

	attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
								block_size;
	attrib->max_unmap_lba_count =
		q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
	/*
	 * Currently hardcoded to 1 in Linux/SCSI code..
	 */
+1 −2
Original line number Diff line number Diff line
@@ -161,8 +161,7 @@ static int fd_configure_device(struct se_device *dev)
			dev_size, div_u64(dev_size, fd_dev->fd_block_size),
			fd_dev->fd_block_size);

		if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
						      fd_dev->fd_block_size))
		if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
			pr_debug("IFILE: BLOCK Discard support available,"
				 " disabled by default\n");
		/*
+1 −2
Original line number Diff line number Diff line
@@ -121,8 +121,7 @@ static int iblock_configure_device(struct se_device *dev)
	dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
	dev->dev_attrib.hw_queue_depth = q->nr_requests;

	if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
					      dev->dev_attrib.hw_block_size))
	if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
		pr_debug("IBLOCK: BLOCK Discard support available,"
			 " disabled by default\n");

+1 −1
Original line number Diff line number Diff line
@@ -95,6 +95,6 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
				       struct request_queue *q, int block_size);
				       struct request_queue *q);

#endif /* TARGET_CORE_BACKEND_H */