Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 288dab8a authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: add a separate operation type for secure erase



Instead of overloading the discard support with the REQ_SECURE flag.
Use the opportunity to rename the queue flag as well, and remove the
dead checks for this flag in the RAID 1 and RAID 10 drivers that don't
claim support for secure erase.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 56332f02
Loading
Loading
Loading
Loading
+17 −10
Original line number Diff line number Diff line
@@ -1977,16 +1977,21 @@ generic_make_request_checks(struct bio *bio)
		}
	}

	if ((bio_op(bio) == REQ_OP_DISCARD) &&
	    (!blk_queue_discard(q) ||
	     ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
		err = -EOPNOTSUPP;
		goto end_io;
	}

	if (bio_op(bio) == REQ_OP_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
		err = -EOPNOTSUPP;
		goto end_io;
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
		if (!blk_queue_discard(q))
			goto not_supported;
		break;
	case REQ_OP_SECURE_ERASE:
		if (!blk_queue_secure_erase(q))
			goto not_supported;
		break;
	case REQ_OP_WRITE_SAME:
		if (!bdev_write_same(bio->bi_bdev))
			goto not_supported;
		break;
	default:
		break;
	}

	/*
@@ -2003,6 +2008,8 @@ generic_make_request_checks(struct bio *bio)
	trace_block_bio_queue(q, bio);
	return true;

not_supported:
	err = -EOPNOTSUPP;
end_io:
	bio->bi_error = err;
	bio_endio(bio);
+14 −11
Original line number Diff line number Diff line
@@ -23,20 +23,27 @@ static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
}

int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, int op_flags,
		sector_t nr_sects, gfp_t gfp_mask, int flags,
		struct bio **biop)
{
	struct request_queue *q = bdev_get_queue(bdev);
	struct bio *bio = *biop;
	unsigned int granularity;
	enum req_op op;
	int alignment;

	if (!q)
		return -ENXIO;
	if (!blk_queue_discard(q))

	if (flags & BLKDEV_DISCARD_SECURE) {
		if (!blk_queue_secure_erase(q))
			return -EOPNOTSUPP;
	if ((op_flags & REQ_SECURE) && !blk_queue_secdiscard(q))
		op = REQ_OP_SECURE_ERASE;
	} else {
		if (!blk_queue_discard(q))
			return -EOPNOTSUPP;
		op = REQ_OP_DISCARD;
	}

	/* Zero-sector (unknown) and one-sector granularities are the same.  */
	granularity = max(q->limits.discard_granularity >> 9, 1U);
@@ -66,7 +73,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		bio = next_bio(bio, 1, gfp_mask);
		bio->bi_iter.bi_sector = sector;
		bio->bi_bdev = bdev;
		bio_set_op_attrs(bio, REQ_OP_DISCARD, op_flags);
		bio_set_op_attrs(bio, op, 0);

		bio->bi_iter.bi_size = req_sects << 9;
		nr_sects -= req_sects;
@@ -100,16 +107,12 @@ EXPORT_SYMBOL(__blkdev_issue_discard);
int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
{
	int op_flags = 0;
	struct bio *bio = NULL;
	struct blk_plug plug;
	int ret;

	if (flags & BLKDEV_DISCARD_SECURE)
		op_flags |= REQ_SECURE;

	blk_start_plug(&plug);
	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, op_flags,
	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
			&bio);
	if (!ret && bio) {
		ret = submit_bio_wait(bio);
+2 −4
Original line number Diff line number Diff line
@@ -649,8 +649,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
	if (!rq_mergeable(req) || !rq_mergeable(next))
		return 0;

	if (!blk_check_merge_flags(req->cmd_flags, req_op(req), next->cmd_flags,
				   req_op(next)))
	if (req_op(req) != req_op(next))
		return 0;

	/*
@@ -752,8 +751,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
	if (!rq_mergeable(rq) || !bio_mergeable(bio))
		return false;

	if (!blk_check_merge_flags(rq->cmd_flags, req_op(rq), bio->bi_rw,
				   bio_op(bio)))
	if (req_op(rq) != bio_op(bio))
		return false;

	/* different data direction or already started, don't merge */
+1 −1
Original line number Diff line number Diff line
@@ -480,7 +480,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
	if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
		vbd->flush_support = true;

	if (q && blk_queue_secdiscard(q))
	if (q && blk_queue_secure_erase(q))
		vbd->discard_secure = true;

	pr_debug("Successful creation of handle=%04x (dom=%u)\n",
+9 −5
Original line number Diff line number Diff line
@@ -545,7 +545,7 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
	ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
	ring_req->u.discard.id = id;
	ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
	if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
	if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
		ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
	else
		ring_req->u.discard.flag = 0;
@@ -841,7 +841,7 @@ static int blkif_queue_request(struct request *req, struct blkfront_ring_info *r
		return 1;

	if (unlikely(req_op(req) == REQ_OP_DISCARD ||
		     req->cmd_flags & REQ_SECURE))
		     req_op(req) == REQ_OP_SECURE_ERASE))
		return blkif_queue_discard_req(req, rinfo);
	else
		return blkif_queue_rw_req(req, rinfo);
@@ -955,7 +955,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
		rq->limits.discard_granularity = info->discard_granularity;
		rq->limits.discard_alignment = info->discard_alignment;
		if (info->feature_secdiscard)
			queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
			queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
	}

	/* Hard sector size and max sectors impersonate the equiv. hardware. */
@@ -1595,7 +1595,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
				info->feature_discard = 0;
				info->feature_secdiscard = 0;
				queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
				queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
				queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
			}
			blk_mq_complete_request(req, error);
			break;
@@ -2052,10 +2052,14 @@ static int blkif_recover(struct blkfront_info *info)
			 */
			if (req_op(copy[i].request) == REQ_OP_FLUSH ||
			    req_op(copy[i].request) == REQ_OP_DISCARD ||
			    copy[i].request->cmd_flags & (REQ_FUA | REQ_SECURE)) {
			    req_op(copy[i].request) == REQ_OP_SECURE_ERASE ||
			    copy[i].request->cmd_flags & REQ_FUA) {
				/*
				 * Flush operations don't contain bios, so
				 * we need to requeue the whole request
				 *
				 * XXX: but this doesn't make any sense for a
				 * write with the FUA flag set..
				 */
				list_add(&copy[i].request->queuelist, &requests);
				continue;
Loading