Loading Documentation/block/queue-sysfs.txt +9 −0 Original line number Original line Diff line number Diff line Loading @@ -141,6 +141,15 @@ control of this block device to that new IO scheduler. Note that writing an IO scheduler name to this file will attempt to load that IO scheduler an IO scheduler name to this file will attempt to load that IO scheduler module, if it isn't already present in the system. module, if it isn't already present in the system. write_cache (RW) ---------------- When read, this file will display whether the device has write back caching enabled or not. It will return "write back" for the former case, and "write through" for the latter. Writing to this file can change the kernels view of the device, but it doesn't alter the device state. This means that it might not be safe to toggle the setting from "write back" to "write through", since that will also eliminate cache flushes issued by the kernel. Jens Axboe <jens.axboe@oracle.com>, February 2009 Jens Axboe <jens.axboe@oracle.com>, February 2009 block/bio.c +0 −11 Original line number Original line Diff line number Diff line Loading @@ -311,17 +311,6 @@ static void bio_chain_endio(struct bio *bio) bio_endio(__bio_chain_endio(bio)); bio_endio(__bio_chain_endio(bio)); } } /* * Increment chain count for the bio. Make sure the CHAIN flag update * is visible before the raised count. */ static inline void bio_inc_remaining(struct bio *bio) { bio_set_flag(bio, BIO_CHAIN); smp_mb__before_atomic(); atomic_inc(&bio->__bi_remaining); } /** /** * bio_chain - chain bio completions * bio_chain - chain bio completions * @bio: the target bio * @bio: the target bio Loading block/blk-core.c +3 −2 Original line number Original line Diff line number Diff line Loading @@ -1523,6 +1523,7 @@ EXPORT_SYMBOL(blk_put_request); * blk_add_request_payload - add a payload to a request * blk_add_request_payload - add a payload to a request * @rq: request to update * @rq: request to update * @page: page backing the payload * @page: page backing the payload * @offset: offset in page * @len: length of the payload. * @len: length of the payload. * * * This allows to later add a payload to an already submitted request by * This allows to later add a payload to an already submitted request by Loading @@ -1533,12 +1534,12 @@ EXPORT_SYMBOL(blk_put_request); * discard requests should ever use it. * discard requests should ever use it. */ */ void blk_add_request_payload(struct request *rq, struct page *page, void blk_add_request_payload(struct request *rq, struct page *page, unsigned int len) int offset, unsigned int len) { { struct bio *bio = rq->bio; struct bio *bio = rq->bio; bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_offset = 0; bio->bi_io_vec->bv_offset = offset; bio->bi_io_vec->bv_len = len; bio->bi_io_vec->bv_len = len; bio->bi_iter.bi_size = len; bio->bi_iter.bi_size = len; Loading block/blk-lib.c +64 −114 Original line number Original line Diff line number Diff line Loading @@ -9,82 +9,46 @@ #include "blk.h" #include "blk.h" struct bio_batch { static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages, atomic_t done; gfp_t gfp) int error; struct completion *wait; }; static void bio_batch_end_io(struct bio *bio) { { struct bio_batch *bb = bio->bi_private; struct bio *new = bio_alloc(gfp, nr_pages); if (bio->bi_error && bio->bi_error != -EOPNOTSUPP) if (bio) { bb->error = bio->bi_error; bio_chain(bio, new); if (atomic_dec_and_test(&bb->done)) submit_bio(rw, bio); complete(bb->wait); bio_put(bio); } } /** return new; * blkdev_issue_discard - queue a discard } * @bdev: blockdev to issue discard for * @sector: start sector int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, * @nr_sects: number of sectors to discard sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop) * @gfp_mask: memory allocation flags (for bio_alloc) * @flags: BLKDEV_IFL_* flags to control behaviour * * Description: * Issue a discard request for the sectors in question. */ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) { { DECLARE_COMPLETION_ONSTACK(wait); struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev); int type = REQ_WRITE | REQ_DISCARD; struct bio *bio = *biop; unsigned int granularity; unsigned int granularity; int alignment; int alignment; struct bio_batch bb; struct bio *bio; int ret = 0; struct blk_plug plug; if (!q) if (!q) return -ENXIO; return -ENXIO; if (!blk_queue_discard(q)) if (!blk_queue_discard(q)) return -EOPNOTSUPP; return -EOPNOTSUPP; if ((type & REQ_SECURE) && !blk_queue_secdiscard(q)) return -EOPNOTSUPP; /* Zero-sector (unknown) and one-sector granularities are the same. */ /* Zero-sector (unknown) and one-sector granularities are the same. */ granularity = max(q->limits.discard_granularity >> 9, 1U); granularity = max(q->limits.discard_granularity >> 9, 1U); alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; if (flags & BLKDEV_DISCARD_SECURE) { if (!blk_queue_secdiscard(q)) return -EOPNOTSUPP; type |= REQ_SECURE; } atomic_set(&bb.done, 1); bb.error = 0; bb.wait = &wait; blk_start_plug(&plug); while (nr_sects) { while (nr_sects) { unsigned int req_sects; unsigned int req_sects; sector_t end_sect, tmp; sector_t end_sect, tmp; bio = bio_alloc(gfp_mask, 1); if (!bio) { ret = -ENOMEM; break; } /* Make sure bi_size doesn't overflow */ /* Make sure bi_size doesn't overflow */ req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); /* /** * If splitting a request, and the next starting sector would be * If splitting a request, and the next starting sector would be * misaligned, stop the discard at the previous aligned sector. * misaligned, stop the discard at the previous aligned sector. */ */ Loading @@ -98,18 +62,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, req_sects = end_sect - sector; req_sects = end_sect - sector; } } bio = next_bio(bio, type, 1, gfp_mask); bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector; bio->bi_end_io = bio_batch_end_io; bio->bi_bdev = bdev; bio->bi_bdev = bdev; bio->bi_private = &bb; bio->bi_iter.bi_size = req_sects << 9; bio->bi_iter.bi_size = req_sects << 9; nr_sects -= req_sects; nr_sects -= req_sects; sector = end_sect; sector = end_sect; atomic_inc(&bb.done); submit_bio(type, bio); /* /* * We can loop for a long time in here, if someone does * We can loop for a long time in here, if someone does * full device discards (like mkfs). Be nice and allow * full device discards (like mkfs). Be nice and allow Loading @@ -118,14 +78,44 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, */ */ cond_resched(); cond_resched(); } } blk_finish_plug(&plug); /* Wait for bios in-flight */ *biop = bio; if (!atomic_dec_and_test(&bb.done)) return 0; wait_for_completion_io(&wait); } EXPORT_SYMBOL(__blkdev_issue_discard); /** * blkdev_issue_discard - queue a discard * @bdev: blockdev to issue discard for * @sector: start sector * @nr_sects: number of sectors to discard * @gfp_mask: memory allocation flags (for bio_alloc) * @flags: BLKDEV_IFL_* flags to control behaviour * * Description: * Issue a discard request for the sectors in question. */ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) { int type = REQ_WRITE | REQ_DISCARD; struct bio *bio = NULL; struct blk_plug plug; int ret; if (flags & BLKDEV_DISCARD_SECURE) type |= REQ_SECURE; blk_start_plug(&plug); ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type, &bio); if (!ret && bio) { ret = submit_bio_wait(type, bio); if (ret == -EOPNOTSUPP) ret = 0; } blk_finish_plug(&plug); if (bb.error) return bb.error; return ret; return ret; } } EXPORT_SYMBOL(blkdev_issue_discard); EXPORT_SYMBOL(blkdev_issue_discard); Loading @@ -145,11 +135,9 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, sector_t nr_sects, gfp_t gfp_mask, struct page *page) struct page *page) { { DECLARE_COMPLETION_ONSTACK(wait); struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev); unsigned int max_write_same_sectors; unsigned int max_write_same_sectors; struct bio_batch bb; struct bio *bio = NULL; struct bio *bio; int ret = 0; int ret = 0; if (!q) if (!q) Loading @@ -158,21 +146,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, /* Ensure that max_write_same_sectors doesn't overflow bi_size */ /* Ensure that max_write_same_sectors doesn't overflow bi_size */ max_write_same_sectors = UINT_MAX >> 9; max_write_same_sectors = UINT_MAX >> 9; atomic_set(&bb.done, 1); bb.error = 0; bb.wait = &wait; while (nr_sects) { while (nr_sects) { bio = bio_alloc(gfp_mask, 1); bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask); if (!bio) { ret = -ENOMEM; break; } bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector; bio->bi_end_io = bio_batch_end_io; bio->bi_bdev = bdev; bio->bi_bdev = bdev; bio->bi_private = &bb; bio->bi_vcnt = 1; bio->bi_vcnt = 1; bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_offset = 0; bio->bi_io_vec->bv_offset = 0; Loading @@ -186,18 +163,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, bio->bi_iter.bi_size = nr_sects << 9; bio->bi_iter.bi_size = nr_sects << 9; nr_sects = 0; nr_sects = 0; } } atomic_inc(&bb.done); submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio); } } /* Wait for bios in-flight */ if (bio) if (!atomic_dec_and_test(&bb.done)) ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio); wait_for_completion_io(&wait); return ret != -EOPNOTSUPP ? ret : 0; if (bb.error) return bb.error; return ret; } } EXPORT_SYMBOL(blkdev_issue_write_same); EXPORT_SYMBOL(blkdev_issue_write_same); Loading @@ -216,28 +186,15 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask) sector_t nr_sects, gfp_t gfp_mask) { { int ret; int ret; struct bio *bio; struct bio *bio = NULL; struct bio_batch bb; unsigned int sz; unsigned int sz; DECLARE_COMPLETION_ONSTACK(wait); atomic_set(&bb.done, 1); bb.error = 0; bb.wait = &wait; ret = 0; while (nr_sects != 0) { while (nr_sects != 0) { bio = bio_alloc(gfp_mask, bio = next_bio(bio, WRITE, min(nr_sects, (sector_t)BIO_MAX_PAGES)); min(nr_sects, (sector_t)BIO_MAX_PAGES), if (!bio) { gfp_mask); ret = -ENOMEM; break; } bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector; bio->bi_bdev = bdev; bio->bi_bdev = bdev; bio->bi_end_io = bio_batch_end_io; bio->bi_private = &bb; while (nr_sects != 0) { while (nr_sects != 0) { sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); Loading @@ -247,18 +204,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, if (ret < (sz << 9)) if (ret < (sz << 9)) break; break; } } ret = 0; atomic_inc(&bb.done); submit_bio(WRITE, bio); } } /* Wait for bios in-flight */ if (bio) if (!atomic_dec_and_test(&bb.done)) return submit_bio_wait(WRITE, bio); wait_for_completion_io(&wait); return 0; if (bb.error) return bb.error; return ret; } } /** /** Loading block/blk-mq-tag.c +12 −0 Original line number Original line Diff line number Diff line Loading @@ -474,6 +474,18 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, } } EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, busy_tag_iter_fn *fn, void *priv) { int i; for (i = 0; i < tagset->nr_hw_queues; i++) { if (tagset->tags && tagset->tags[i]) blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv); } } EXPORT_SYMBOL(blk_mq_tagset_busy_iter); void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void *priv) void *priv) { { Loading Loading
Documentation/block/queue-sysfs.txt +9 −0 Original line number Original line Diff line number Diff line Loading @@ -141,6 +141,15 @@ control of this block device to that new IO scheduler. Note that writing an IO scheduler name to this file will attempt to load that IO scheduler an IO scheduler name to this file will attempt to load that IO scheduler module, if it isn't already present in the system. module, if it isn't already present in the system. write_cache (RW) ---------------- When read, this file will display whether the device has write back caching enabled or not. It will return "write back" for the former case, and "write through" for the latter. Writing to this file can change the kernels view of the device, but it doesn't alter the device state. This means that it might not be safe to toggle the setting from "write back" to "write through", since that will also eliminate cache flushes issued by the kernel. Jens Axboe <jens.axboe@oracle.com>, February 2009 Jens Axboe <jens.axboe@oracle.com>, February 2009
block/bio.c +0 −11 Original line number Original line Diff line number Diff line Loading @@ -311,17 +311,6 @@ static void bio_chain_endio(struct bio *bio) bio_endio(__bio_chain_endio(bio)); bio_endio(__bio_chain_endio(bio)); } } /* * Increment chain count for the bio. Make sure the CHAIN flag update * is visible before the raised count. */ static inline void bio_inc_remaining(struct bio *bio) { bio_set_flag(bio, BIO_CHAIN); smp_mb__before_atomic(); atomic_inc(&bio->__bi_remaining); } /** /** * bio_chain - chain bio completions * bio_chain - chain bio completions * @bio: the target bio * @bio: the target bio Loading
block/blk-core.c +3 −2 Original line number Original line Diff line number Diff line Loading @@ -1523,6 +1523,7 @@ EXPORT_SYMBOL(blk_put_request); * blk_add_request_payload - add a payload to a request * blk_add_request_payload - add a payload to a request * @rq: request to update * @rq: request to update * @page: page backing the payload * @page: page backing the payload * @offset: offset in page * @len: length of the payload. * @len: length of the payload. * * * This allows to later add a payload to an already submitted request by * This allows to later add a payload to an already submitted request by Loading @@ -1533,12 +1534,12 @@ EXPORT_SYMBOL(blk_put_request); * discard requests should ever use it. * discard requests should ever use it. */ */ void blk_add_request_payload(struct request *rq, struct page *page, void blk_add_request_payload(struct request *rq, struct page *page, unsigned int len) int offset, unsigned int len) { { struct bio *bio = rq->bio; struct bio *bio = rq->bio; bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_offset = 0; bio->bi_io_vec->bv_offset = offset; bio->bi_io_vec->bv_len = len; bio->bi_io_vec->bv_len = len; bio->bi_iter.bi_size = len; bio->bi_iter.bi_size = len; Loading
block/blk-lib.c +64 −114 Original line number Original line Diff line number Diff line Loading @@ -9,82 +9,46 @@ #include "blk.h" #include "blk.h" struct bio_batch { static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages, atomic_t done; gfp_t gfp) int error; struct completion *wait; }; static void bio_batch_end_io(struct bio *bio) { { struct bio_batch *bb = bio->bi_private; struct bio *new = bio_alloc(gfp, nr_pages); if (bio->bi_error && bio->bi_error != -EOPNOTSUPP) if (bio) { bb->error = bio->bi_error; bio_chain(bio, new); if (atomic_dec_and_test(&bb->done)) submit_bio(rw, bio); complete(bb->wait); bio_put(bio); } } /** return new; * blkdev_issue_discard - queue a discard } * @bdev: blockdev to issue discard for * @sector: start sector int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, * @nr_sects: number of sectors to discard sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop) * @gfp_mask: memory allocation flags (for bio_alloc) * @flags: BLKDEV_IFL_* flags to control behaviour * * Description: * Issue a discard request for the sectors in question. */ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) { { DECLARE_COMPLETION_ONSTACK(wait); struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev); int type = REQ_WRITE | REQ_DISCARD; struct bio *bio = *biop; unsigned int granularity; unsigned int granularity; int alignment; int alignment; struct bio_batch bb; struct bio *bio; int ret = 0; struct blk_plug plug; if (!q) if (!q) return -ENXIO; return -ENXIO; if (!blk_queue_discard(q)) if (!blk_queue_discard(q)) return -EOPNOTSUPP; return -EOPNOTSUPP; if ((type & REQ_SECURE) && !blk_queue_secdiscard(q)) return -EOPNOTSUPP; /* Zero-sector (unknown) and one-sector granularities are the same. */ /* Zero-sector (unknown) and one-sector granularities are the same. */ granularity = max(q->limits.discard_granularity >> 9, 1U); granularity = max(q->limits.discard_granularity >> 9, 1U); alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; if (flags & BLKDEV_DISCARD_SECURE) { if (!blk_queue_secdiscard(q)) return -EOPNOTSUPP; type |= REQ_SECURE; } atomic_set(&bb.done, 1); bb.error = 0; bb.wait = &wait; blk_start_plug(&plug); while (nr_sects) { while (nr_sects) { unsigned int req_sects; unsigned int req_sects; sector_t end_sect, tmp; sector_t end_sect, tmp; bio = bio_alloc(gfp_mask, 1); if (!bio) { ret = -ENOMEM; break; } /* Make sure bi_size doesn't overflow */ /* Make sure bi_size doesn't overflow */ req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); /* /** * If splitting a request, and the next starting sector would be * If splitting a request, and the next starting sector would be * misaligned, stop the discard at the previous aligned sector. * misaligned, stop the discard at the previous aligned sector. */ */ Loading @@ -98,18 +62,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, req_sects = end_sect - sector; req_sects = end_sect - sector; } } bio = next_bio(bio, type, 1, gfp_mask); bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector; bio->bi_end_io = bio_batch_end_io; bio->bi_bdev = bdev; bio->bi_bdev = bdev; bio->bi_private = &bb; bio->bi_iter.bi_size = req_sects << 9; bio->bi_iter.bi_size = req_sects << 9; nr_sects -= req_sects; nr_sects -= req_sects; sector = end_sect; sector = end_sect; atomic_inc(&bb.done); submit_bio(type, bio); /* /* * We can loop for a long time in here, if someone does * We can loop for a long time in here, if someone does * full device discards (like mkfs). Be nice and allow * full device discards (like mkfs). Be nice and allow Loading @@ -118,14 +78,44 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, */ */ cond_resched(); cond_resched(); } } blk_finish_plug(&plug); /* Wait for bios in-flight */ *biop = bio; if (!atomic_dec_and_test(&bb.done)) return 0; wait_for_completion_io(&wait); } EXPORT_SYMBOL(__blkdev_issue_discard); /** * blkdev_issue_discard - queue a discard * @bdev: blockdev to issue discard for * @sector: start sector * @nr_sects: number of sectors to discard * @gfp_mask: memory allocation flags (for bio_alloc) * @flags: BLKDEV_IFL_* flags to control behaviour * * Description: * Issue a discard request for the sectors in question. */ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) { int type = REQ_WRITE | REQ_DISCARD; struct bio *bio = NULL; struct blk_plug plug; int ret; if (flags & BLKDEV_DISCARD_SECURE) type |= REQ_SECURE; blk_start_plug(&plug); ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type, &bio); if (!ret && bio) { ret = submit_bio_wait(type, bio); if (ret == -EOPNOTSUPP) ret = 0; } blk_finish_plug(&plug); if (bb.error) return bb.error; return ret; return ret; } } EXPORT_SYMBOL(blkdev_issue_discard); EXPORT_SYMBOL(blkdev_issue_discard); Loading @@ -145,11 +135,9 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, sector_t nr_sects, gfp_t gfp_mask, struct page *page) struct page *page) { { DECLARE_COMPLETION_ONSTACK(wait); struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev); unsigned int max_write_same_sectors; unsigned int max_write_same_sectors; struct bio_batch bb; struct bio *bio = NULL; struct bio *bio; int ret = 0; int ret = 0; if (!q) if (!q) Loading @@ -158,21 +146,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, /* Ensure that max_write_same_sectors doesn't overflow bi_size */ /* Ensure that max_write_same_sectors doesn't overflow bi_size */ max_write_same_sectors = UINT_MAX >> 9; max_write_same_sectors = UINT_MAX >> 9; atomic_set(&bb.done, 1); bb.error = 0; bb.wait = &wait; while (nr_sects) { while (nr_sects) { bio = bio_alloc(gfp_mask, 1); bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask); if (!bio) { ret = -ENOMEM; break; } bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector; bio->bi_end_io = bio_batch_end_io; bio->bi_bdev = bdev; bio->bi_bdev = bdev; bio->bi_private = &bb; bio->bi_vcnt = 1; bio->bi_vcnt = 1; bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_offset = 0; bio->bi_io_vec->bv_offset = 0; Loading @@ -186,18 +163,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, bio->bi_iter.bi_size = nr_sects << 9; bio->bi_iter.bi_size = nr_sects << 9; nr_sects = 0; nr_sects = 0; } } atomic_inc(&bb.done); submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio); } } /* Wait for bios in-flight */ if (bio) if (!atomic_dec_and_test(&bb.done)) ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio); wait_for_completion_io(&wait); return ret != -EOPNOTSUPP ? ret : 0; if (bb.error) return bb.error; return ret; } } EXPORT_SYMBOL(blkdev_issue_write_same); EXPORT_SYMBOL(blkdev_issue_write_same); Loading @@ -216,28 +186,15 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask) sector_t nr_sects, gfp_t gfp_mask) { { int ret; int ret; struct bio *bio; struct bio *bio = NULL; struct bio_batch bb; unsigned int sz; unsigned int sz; DECLARE_COMPLETION_ONSTACK(wait); atomic_set(&bb.done, 1); bb.error = 0; bb.wait = &wait; ret = 0; while (nr_sects != 0) { while (nr_sects != 0) { bio = bio_alloc(gfp_mask, bio = next_bio(bio, WRITE, min(nr_sects, (sector_t)BIO_MAX_PAGES)); min(nr_sects, (sector_t)BIO_MAX_PAGES), if (!bio) { gfp_mask); ret = -ENOMEM; break; } bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector; bio->bi_bdev = bdev; bio->bi_bdev = bdev; bio->bi_end_io = bio_batch_end_io; bio->bi_private = &bb; while (nr_sects != 0) { while (nr_sects != 0) { sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); Loading @@ -247,18 +204,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, if (ret < (sz << 9)) if (ret < (sz << 9)) break; break; } } ret = 0; atomic_inc(&bb.done); submit_bio(WRITE, bio); } } /* Wait for bios in-flight */ if (bio) if (!atomic_dec_and_test(&bb.done)) return submit_bio_wait(WRITE, bio); wait_for_completion_io(&wait); return 0; if (bb.error) return bb.error; return ret; } } /** /** Loading
block/blk-mq-tag.c +12 −0 Original line number Original line Diff line number Diff line Loading @@ -474,6 +474,18 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, } } EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, busy_tag_iter_fn *fn, void *priv) { int i; for (i = 0; i < tagset->nr_hw_queues; i++) { if (tagset->tags && tagset->tags[i]) blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv); } } EXPORT_SYMBOL(blk_mq_tagset_busy_iter); void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void *priv) void *priv) { { Loading