Loading block/bio.c +9 −1 Original line number Original line Diff line number Diff line Loading @@ -580,6 +580,14 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio) } } EXPORT_SYMBOL(bio_phys_segments); EXPORT_SYMBOL(bio_phys_segments); static inline void bio_clone_crypt_key(struct bio *dst, const struct bio *src) { #ifdef CONFIG_PFK dst->bi_crypt_key = src->bi_crypt_key; dst->bi_iter.bi_dun = src->bi_iter.bi_dun; #endif } /** /** * __bio_clone_fast - clone a bio that shares the original bio's biovec * __bio_clone_fast - clone a bio that shares the original bio's biovec * @bio: destination bio * @bio: destination bio Loading Loading @@ -609,7 +617,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter = bio_src->bi_iter; bio->bi_iter = bio_src->bi_iter; bio->bi_io_vec = bio_src->bi_io_vec; bio->bi_io_vec = bio_src->bi_io_vec; bio_clone_crypt_key(bio, bio_src); bio_clone_blkcg_association(bio, bio_src); bio_clone_blkcg_association(bio, bio_src); } } EXPORT_SYMBOL(__bio_clone_fast); EXPORT_SYMBOL(__bio_clone_fast); Loading block/blk-core.c +15 −1 Original line number Original line Diff line number Diff line Loading @@ -1832,6 +1832,9 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, bio->bi_next = req->bio; bio->bi_next = req->bio; req->bio = bio; req->bio = bio; #ifdef CONFIG_PFK WARN_ON(req->__dun || bio->bi_iter.bi_dun); #endif req->__sector = bio->bi_iter.bi_sector; req->__sector = bio->bi_iter.bi_sector; req->__data_len += bio->bi_iter.bi_size; req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); Loading Loading @@ -1981,6 +1984,9 @@ void blk_init_request_from_bio(struct request *req, struct bio *bio) else else req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); req->write_hint = bio->bi_write_hint; req->write_hint = bio->bi_write_hint; #ifdef CONFIG_PFK req->__dun = bio->bi_iter.bi_dun; #endif blk_rq_bio_prep(req->q, req, bio); blk_rq_bio_prep(req->q, req, bio); } } EXPORT_SYMBOL_GPL(blk_init_request_from_bio); EXPORT_SYMBOL_GPL(blk_init_request_from_bio); Loading Loading @@ -3123,8 +3129,13 @@ bool blk_update_request(struct request *req, blk_status_t error, req->__data_len -= total_bytes; req->__data_len -= total_bytes; /* update sector only for requests with clear definition of sector */ /* update sector only for requests with clear definition of sector */ if (!blk_rq_is_passthrough(req)) if (!blk_rq_is_passthrough(req)) { req->__sector += total_bytes >> 9; req->__sector += total_bytes >> 9; #ifdef CONFIG_PFK if (req->__dun) req->__dun += total_bytes >> 12; #endif } /* mixed attributes always follow the first bio */ /* mixed attributes always follow the first bio */ if (req->rq_flags & RQF_MIXED_MERGE) { if (req->rq_flags & RQF_MIXED_MERGE) { Loading Loading @@ -3488,6 +3499,9 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src) { { dst->cpu = src->cpu; dst->cpu = src->cpu; dst->__sector = blk_rq_pos(src); dst->__sector = blk_rq_pos(src); #ifdef CONFIG_PFK dst->__dun = blk_rq_dun(src); #endif dst->__data_len = blk_rq_bytes(src); dst->__data_len = blk_rq_bytes(src); if (src->rq_flags & RQF_SPECIAL_PAYLOAD) { if (src->rq_flags & RQF_SPECIAL_PAYLOAD) { dst->rq_flags |= RQF_SPECIAL_PAYLOAD; dst->rq_flags |= RQF_SPECIAL_PAYLOAD; Loading block/blk-merge.c +16 −1 Original line number Original line Diff line number Diff line Loading @@ -9,7 +9,7 @@ #include <linux/scatterlist.h> #include <linux/scatterlist.h> #include <trace/events/block.h> #include <trace/events/block.h> #include <linux/pfk.h> #include "blk.h" #include "blk.h" static struct bio *blk_bio_discard_split(struct request_queue *q, static struct bio *blk_bio_discard_split(struct request_queue *q, Loading Loading @@ -670,6 +670,11 @@ static void blk_account_io_merge(struct request *req) } } } } static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt) { return (!pfk_allow_merge_bio(bio, nxt)); } /* /* * For non-mq, this has to be called with the request spinlock acquired. * For non-mq, this has to be called with the request spinlock acquired. * For mq with scheduling, the appropriate queue wide lock should be held. * For mq with scheduling, the appropriate queue wide lock should be held. Loading Loading @@ -708,6 +713,9 @@ static struct request *attempt_merge(struct request_queue *q, if (req->write_hint != next->write_hint) if (req->write_hint != next->write_hint) return NULL; return NULL; if (crypto_not_mergeable(req->bio, next->bio)) return 0; /* /* * If we are allowed to merge, then append bio list * If we are allowed to merge, then append bio list * from next to rq and release next. merge_requests_fn * from next to rq and release next. merge_requests_fn Loading Loading @@ -838,11 +846,18 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) if (rq->write_hint != bio->bi_write_hint) if (rq->write_hint != bio->bi_write_hint) return false; return false; if (crypto_not_mergeable(rq->bio, bio)) return false; return true; return true; } } enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) { { #ifdef CONFIG_PFK if (blk_rq_dun(rq) || bio_dun(bio)) return ELEVATOR_NO_MERGE; #endif if (req_op(rq) == REQ_OP_DISCARD && if (req_op(rq) == REQ_OP_DISCARD && queue_max_discard_segments(rq->q) > 1) queue_max_discard_segments(rq->q) > 1) return ELEVATOR_DISCARD_MERGE; return ELEVATOR_DISCARD_MERGE; Loading block/blk.h +0 −9 Original line number Original line Diff line number Diff line Loading @@ -54,15 +54,6 @@ static inline void queue_lockdep_assert_held(struct request_queue *q) lockdep_assert_held(q->queue_lock); lockdep_assert_held(q->queue_lock); } } static inline void queue_flag_set_unlocked(unsigned int flag, struct request_queue *q) { if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) && kref_read(&q->kobj.kref)) lockdep_assert_held(q->queue_lock); __set_bit(flag, &q->queue_flags); } static inline void queue_flag_clear_unlocked(unsigned int flag, static inline void queue_flag_clear_unlocked(unsigned int flag, struct request_queue *q) struct request_queue *q) { { Loading block/elevator.c +5 −3 Original line number Original line Diff line number Diff line Loading @@ -422,7 +422,7 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req, { { struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator; struct request *__rq; struct request *__rq; enum elv_merge ret; /* /* * Levels of merges: * Levels of merges: * nomerges: No merges at all attempted * nomerges: No merges at all attempted Loading @@ -435,9 +435,11 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req, /* /* * First try one-hit cache. * First try one-hit cache. */ */ if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { if (q->last_merge) { enum elv_merge ret = blk_try_merge(q->last_merge, bio); if (!elv_bio_merge_ok(q->last_merge, bio)) return ELEVATOR_NO_MERGE; ret = blk_try_merge(q->last_merge, bio); if (ret != ELEVATOR_NO_MERGE) { if (ret != ELEVATOR_NO_MERGE) { *req = q->last_merge; *req = q->last_merge; return ret; return ret; Loading Loading
block/bio.c +9 −1 Original line number Original line Diff line number Diff line Loading @@ -580,6 +580,14 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio) } } EXPORT_SYMBOL(bio_phys_segments); EXPORT_SYMBOL(bio_phys_segments); static inline void bio_clone_crypt_key(struct bio *dst, const struct bio *src) { #ifdef CONFIG_PFK dst->bi_crypt_key = src->bi_crypt_key; dst->bi_iter.bi_dun = src->bi_iter.bi_dun; #endif } /** /** * __bio_clone_fast - clone a bio that shares the original bio's biovec * __bio_clone_fast - clone a bio that shares the original bio's biovec * @bio: destination bio * @bio: destination bio Loading Loading @@ -609,7 +617,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter = bio_src->bi_iter; bio->bi_iter = bio_src->bi_iter; bio->bi_io_vec = bio_src->bi_io_vec; bio->bi_io_vec = bio_src->bi_io_vec; bio_clone_crypt_key(bio, bio_src); bio_clone_blkcg_association(bio, bio_src); bio_clone_blkcg_association(bio, bio_src); } } EXPORT_SYMBOL(__bio_clone_fast); EXPORT_SYMBOL(__bio_clone_fast); Loading
block/blk-core.c +15 −1 Original line number Original line Diff line number Diff line Loading @@ -1832,6 +1832,9 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, bio->bi_next = req->bio; bio->bi_next = req->bio; req->bio = bio; req->bio = bio; #ifdef CONFIG_PFK WARN_ON(req->__dun || bio->bi_iter.bi_dun); #endif req->__sector = bio->bi_iter.bi_sector; req->__sector = bio->bi_iter.bi_sector; req->__data_len += bio->bi_iter.bi_size; req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); Loading Loading @@ -1981,6 +1984,9 @@ void blk_init_request_from_bio(struct request *req, struct bio *bio) else else req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); req->write_hint = bio->bi_write_hint; req->write_hint = bio->bi_write_hint; #ifdef CONFIG_PFK req->__dun = bio->bi_iter.bi_dun; #endif blk_rq_bio_prep(req->q, req, bio); blk_rq_bio_prep(req->q, req, bio); } } EXPORT_SYMBOL_GPL(blk_init_request_from_bio); EXPORT_SYMBOL_GPL(blk_init_request_from_bio); Loading Loading @@ -3123,8 +3129,13 @@ bool blk_update_request(struct request *req, blk_status_t error, req->__data_len -= total_bytes; req->__data_len -= total_bytes; /* update sector only for requests with clear definition of sector */ /* update sector only for requests with clear definition of sector */ if (!blk_rq_is_passthrough(req)) if (!blk_rq_is_passthrough(req)) { req->__sector += total_bytes >> 9; req->__sector += total_bytes >> 9; #ifdef CONFIG_PFK if (req->__dun) req->__dun += total_bytes >> 12; #endif } /* mixed attributes always follow the first bio */ /* mixed attributes always follow the first bio */ if (req->rq_flags & RQF_MIXED_MERGE) { if (req->rq_flags & RQF_MIXED_MERGE) { Loading Loading @@ -3488,6 +3499,9 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src) { { dst->cpu = src->cpu; dst->cpu = src->cpu; dst->__sector = blk_rq_pos(src); dst->__sector = blk_rq_pos(src); #ifdef CONFIG_PFK dst->__dun = blk_rq_dun(src); #endif dst->__data_len = blk_rq_bytes(src); dst->__data_len = blk_rq_bytes(src); if (src->rq_flags & RQF_SPECIAL_PAYLOAD) { if (src->rq_flags & RQF_SPECIAL_PAYLOAD) { dst->rq_flags |= RQF_SPECIAL_PAYLOAD; dst->rq_flags |= RQF_SPECIAL_PAYLOAD; Loading
block/blk-merge.c +16 −1 Original line number Original line Diff line number Diff line Loading @@ -9,7 +9,7 @@ #include <linux/scatterlist.h> #include <linux/scatterlist.h> #include <trace/events/block.h> #include <trace/events/block.h> #include <linux/pfk.h> #include "blk.h" #include "blk.h" static struct bio *blk_bio_discard_split(struct request_queue *q, static struct bio *blk_bio_discard_split(struct request_queue *q, Loading Loading @@ -670,6 +670,11 @@ static void blk_account_io_merge(struct request *req) } } } } static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt) { return (!pfk_allow_merge_bio(bio, nxt)); } /* /* * For non-mq, this has to be called with the request spinlock acquired. * For non-mq, this has to be called with the request spinlock acquired. * For mq with scheduling, the appropriate queue wide lock should be held. * For mq with scheduling, the appropriate queue wide lock should be held. Loading Loading @@ -708,6 +713,9 @@ static struct request *attempt_merge(struct request_queue *q, if (req->write_hint != next->write_hint) if (req->write_hint != next->write_hint) return NULL; return NULL; if (crypto_not_mergeable(req->bio, next->bio)) return 0; /* /* * If we are allowed to merge, then append bio list * If we are allowed to merge, then append bio list * from next to rq and release next. merge_requests_fn * from next to rq and release next. merge_requests_fn Loading Loading @@ -838,11 +846,18 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) if (rq->write_hint != bio->bi_write_hint) if (rq->write_hint != bio->bi_write_hint) return false; return false; if (crypto_not_mergeable(rq->bio, bio)) return false; return true; return true; } } enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) { { #ifdef CONFIG_PFK if (blk_rq_dun(rq) || bio_dun(bio)) return ELEVATOR_NO_MERGE; #endif if (req_op(rq) == REQ_OP_DISCARD && if (req_op(rq) == REQ_OP_DISCARD && queue_max_discard_segments(rq->q) > 1) queue_max_discard_segments(rq->q) > 1) return ELEVATOR_DISCARD_MERGE; return ELEVATOR_DISCARD_MERGE; Loading
block/blk.h +0 −9 Original line number Original line Diff line number Diff line Loading @@ -54,15 +54,6 @@ static inline void queue_lockdep_assert_held(struct request_queue *q) lockdep_assert_held(q->queue_lock); lockdep_assert_held(q->queue_lock); } } static inline void queue_flag_set_unlocked(unsigned int flag, struct request_queue *q) { if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) && kref_read(&q->kobj.kref)) lockdep_assert_held(q->queue_lock); __set_bit(flag, &q->queue_flags); } static inline void queue_flag_clear_unlocked(unsigned int flag, static inline void queue_flag_clear_unlocked(unsigned int flag, struct request_queue *q) struct request_queue *q) { { Loading
block/elevator.c +5 −3 Original line number Original line Diff line number Diff line Loading @@ -422,7 +422,7 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req, { { struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator; struct request *__rq; struct request *__rq; enum elv_merge ret; /* /* * Levels of merges: * Levels of merges: * nomerges: No merges at all attempted * nomerges: No merges at all attempted Loading @@ -435,9 +435,11 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req, /* /* * First try one-hit cache. * First try one-hit cache. */ */ if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { if (q->last_merge) { enum elv_merge ret = blk_try_merge(q->last_merge, bio); if (!elv_bio_merge_ok(q->last_merge, bio)) return ELEVATOR_NO_MERGE; ret = blk_try_merge(q->last_merge, bio); if (ret != ELEVATOR_NO_MERGE) { if (ret != ELEVATOR_NO_MERGE) { *req = q->last_merge; *req = q->last_merge; return ret; return ret; Loading