Loading drivers/md/dm-table.c +15 −0 Original line number Diff line number Diff line Loading @@ -1495,6 +1495,16 @@ static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); } static int queue_supports_inline_encryption(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); return q && blk_queue_inlinecrypt(q); } static bool dm_table_all_devices_attribute(struct dm_table *t, iterate_devices_callout_fn func) { Loading Loading @@ -1615,6 +1625,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, else queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); if (dm_table_all_devices_attribute(t, queue_supports_inline_encryption)) queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q); else queue_flag_clear_unlocked(QUEUE_FLAG_INLINECRYPT, q); dm_table_verify_integrity(t); /* Loading Loading
drivers/md/dm-table.c +15 −0 Original line number Diff line number Diff line Loading @@ -1495,6 +1495,16 @@ static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); } static int queue_supports_inline_encryption(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); return q && blk_queue_inlinecrypt(q); } static bool dm_table_all_devices_attribute(struct dm_table *t, iterate_devices_callout_fn func) { Loading Loading @@ -1615,6 +1625,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, else queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); if (dm_table_all_devices_attribute(t, queue_supports_inline_encryption)) queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q); else queue_flag_clear_unlocked(QUEUE_FLAG_INLINECRYPT, q); dm_table_verify_integrity(t); /* Loading