Loading block/blk-core.c +12 −1 Original line number Diff line number Diff line Loading @@ -1929,6 +1929,7 @@ EXPORT_SYMBOL(generic_make_request); */ void submit_bio(int rw, struct bio *bio) { struct task_struct *tsk = current; bio->bi_rw |= rw; /* Loading @@ -1952,8 +1953,18 @@ void submit_bio(int rw, struct bio *bio) if (unlikely(block_dump)) { char b[BDEVNAME_SIZE]; /* * Not all the pages in the bio are dirtied by the * same task but most likely it will be, since the * sectors accessed on the device must be adjacent. */ if (bio->bi_io_vec && bio->bi_io_vec->bv_page && bio->bi_io_vec->bv_page->tsk_dirty) tsk = bio->bi_io_vec->bv_page->tsk_dirty; printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", current->comm, task_pid_nr(current), tsk->comm, task_pid_nr(tsk), (rw & WRITE) ? "WRITE" : "READ", (unsigned long long)bio->bi_iter.bi_sector, bdevname(bio->bi_bdev, b), Loading fs/buffer.c +2 −0 Original line number Diff line number Diff line Loading @@ -635,6 +635,8 @@ static void __set_page_dirty(struct page *page, account_page_dirtied(page, mapping); radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); /* Save the task that is dirtying this page */ page->tsk_dirty = current; } spin_unlock_irqrestore(&mapping->tree_lock, flags); __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); Loading include/linux/mm_types.h +1 −0 Original line number Diff line number Diff line Loading @@ -185,6 +185,7 @@ struct page { #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS unsigned long debug_flags; /* Use atomic bitops on this */ #endif struct task_struct *tsk_dirty; /* task that sets this page dirty */ #ifdef CONFIG_KMEMCHECK /* Loading kernel/trace/blktrace.c +66 −14 Original line number Diff line number Diff line Loading @@ -199,9 +199,9 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), * blk_io_trace structure and places it in a per-cpu subbuffer. */ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, int rw, u32 what, int error, int pdu_len, void *pdu_data) int rw, u32 what, int error, int pdu_len, void *pdu_data, struct task_struct *tsk) { struct task_struct *tsk = current; struct ring_buffer_event *event = NULL; struct ring_buffer *buffer = NULL; struct blk_io_trace *t; Loading Loading @@ -713,18 +713,33 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, unsigned int nr_bytes, u32 what) { struct blk_trace *bt = q->blk_trace; struct task_struct *tsk = current; if (likely(!bt)) return; /* * Use the bio context for all events except ISSUE and * COMPLETE events. * * Not all the pages in the bio are dirtied by the same task but * most likely it will be, since the sectors accessed on the device * must be adjacent. */ if (!((what == BLK_TA_ISSUE) || (what == BLK_TA_COMPLETE)) && bio_has_data(rq->bio) && rq->bio->bi_io_vec && rq->bio->bi_io_vec->bv_page && rq->bio->bi_io_vec->bv_page->tsk_dirty) tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty; if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { what |= BLK_TC_ACT(BLK_TC_PC); __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags, what, rq->errors, rq->cmd_len, rq->cmd); what, rq->errors, rq->cmd_len, rq->cmd, tsk); } else { what |= BLK_TC_ACT(BLK_TC_FS); __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes, rq->cmd_flags, what, rq->errors, 0, NULL); rq->cmd_flags, what, rq->errors, 0, NULL, tsk); } } Loading Loading @@ -776,15 +791,25 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, u32 what, int error) { struct blk_trace *bt = q->blk_trace; struct task_struct *tsk = current; if (likely(!bt)) return; /* * Not all the pages in the bio are dirtied by the same task but * most likely it will be, since the sectors accessed on the device * must be adjacent. */ if (bio_has_data(bio) && bio->bi_io_vec && bio->bi_io_vec->bv_page && bio->bi_io_vec->bv_page->tsk_dirty) tsk = bio->bi_io_vec->bv_page->tsk_dirty; if (!error && !bio_flagged(bio, BIO_UPTODATE)) error = EIO; __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio->bi_rw, what, error, 0, NULL); bio->bi_rw, what, error, 0, NULL, tsk); } static void blk_add_trace_bio_bounce(void *ignore, Loading Loading @@ -832,7 +857,8 @@ static void blk_add_trace_getrq(void *ignore, struct blk_trace *bt = q->blk_trace; if (bt) __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL); __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL, current); } } Loading @@ -848,7 +874,7 @@ static void blk_add_trace_sleeprq(void *ignore, if (bt) __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, 0, 0, NULL); 0, 0, NULL, current); } } Loading @@ -857,7 +883,8 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q) struct blk_trace *bt = q->blk_trace; if (bt) __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, current); } static void blk_add_trace_unplug(void *ignore, struct request_queue *q, Loading @@ -874,7 +901,8 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q, else what = BLK_TA_UNPLUG_TIMER; __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, current); } } Loading @@ -883,14 +911,20 @@ static void blk_add_trace_split(void *ignore, unsigned int pdu) { struct blk_trace *bt = q->blk_trace; struct task_struct *tsk = current; if (bt) { __be64 rpdu = cpu_to_be64(pdu); if (bio_has_data(bio) && bio->bi_io_vec && bio->bi_io_vec->bv_page && bio->bi_io_vec->bv_page->tsk_dirty) tsk = bio->bi_io_vec->bv_page->tsk_dirty; __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu); sizeof(rpdu), &rpdu, tsk); } } Loading @@ -913,6 +947,7 @@ static void blk_add_trace_bio_remap(void *ignore, { struct blk_trace *bt = q->blk_trace; struct blk_io_trace_remap r; struct task_struct *tsk = current; if (likely(!bt)) return; Loading @@ -921,9 +956,14 @@ static void blk_add_trace_bio_remap(void *ignore, r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev); r.sector_from = cpu_to_be64(from); if (bio_has_data(bio) && bio->bi_io_vec && bio->bi_io_vec->bv_page && bio->bi_io_vec->bv_page->tsk_dirty) tsk = bio->bi_io_vec->bv_page->tsk_dirty; __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r, tsk); } /** Loading @@ -946,6 +986,7 @@ static void blk_add_trace_rq_remap(void *ignore, { struct blk_trace *bt = q->blk_trace; struct blk_io_trace_remap r; struct task_struct *tsk = current; if (likely(!bt)) return; Loading @@ -954,9 +995,14 @@ static void blk_add_trace_rq_remap(void *ignore, r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); r.sector_from = cpu_to_be64(from); if (bio_has_data(rq->bio) && rq->bio->bi_io_vec && rq->bio->bi_io_vec->bv_page && rq->bio->bi_io_vec->bv_page->tsk_dirty) tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty; __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors, sizeof(r), &r); sizeof(r), &r, tsk); } /** Loading @@ -975,16 +1021,22 @@ void blk_add_driver_data(struct request_queue *q, void *data, size_t len) { struct blk_trace *bt = q->blk_trace; struct task_struct *tsk = current; if (likely(!bt)) return; if (bio_has_data(rq->bio) && rq->bio->bi_io_vec && rq->bio->bi_io_vec->bv_page && rq->bio->bi_io_vec->bv_page->tsk_dirty) tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty; if (rq->cmd_type == REQ_TYPE_BLOCK_PC) __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, BLK_TA_DRV_DATA, rq->errors, len, data); BLK_TA_DRV_DATA, rq->errors, len, data, tsk); else __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0, BLK_TA_DRV_DATA, rq->errors, len, data); BLK_TA_DRV_DATA, rq->errors, len, data, tsk); } EXPORT_SYMBOL_GPL(blk_add_driver_data); Loading Loading
block/blk-core.c +12 −1 Original line number Diff line number Diff line Loading @@ -1929,6 +1929,7 @@ EXPORT_SYMBOL(generic_make_request); */ void submit_bio(int rw, struct bio *bio) { struct task_struct *tsk = current; bio->bi_rw |= rw; /* Loading @@ -1952,8 +1953,18 @@ void submit_bio(int rw, struct bio *bio) if (unlikely(block_dump)) { char b[BDEVNAME_SIZE]; /* * Not all the pages in the bio are dirtied by the * same task but most likely it will be, since the * sectors accessed on the device must be adjacent. */ if (bio->bi_io_vec && bio->bi_io_vec->bv_page && bio->bi_io_vec->bv_page->tsk_dirty) tsk = bio->bi_io_vec->bv_page->tsk_dirty; printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", current->comm, task_pid_nr(current), tsk->comm, task_pid_nr(tsk), (rw & WRITE) ? "WRITE" : "READ", (unsigned long long)bio->bi_iter.bi_sector, bdevname(bio->bi_bdev, b), Loading
fs/buffer.c +2 −0 Original line number Diff line number Diff line Loading @@ -635,6 +635,8 @@ static void __set_page_dirty(struct page *page, account_page_dirtied(page, mapping); radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); /* Save the task that is dirtying this page */ page->tsk_dirty = current; } spin_unlock_irqrestore(&mapping->tree_lock, flags); __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); Loading
include/linux/mm_types.h +1 −0 Original line number Diff line number Diff line Loading @@ -185,6 +185,7 @@ struct page { #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS unsigned long debug_flags; /* Use atomic bitops on this */ #endif struct task_struct *tsk_dirty; /* task that sets this page dirty */ #ifdef CONFIG_KMEMCHECK /* Loading
kernel/trace/blktrace.c +66 −14 Original line number Diff line number Diff line Loading @@ -199,9 +199,9 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), * blk_io_trace structure and places it in a per-cpu subbuffer. */ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, int rw, u32 what, int error, int pdu_len, void *pdu_data) int rw, u32 what, int error, int pdu_len, void *pdu_data, struct task_struct *tsk) { struct task_struct *tsk = current; struct ring_buffer_event *event = NULL; struct ring_buffer *buffer = NULL; struct blk_io_trace *t; Loading Loading @@ -713,18 +713,33 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, unsigned int nr_bytes, u32 what) { struct blk_trace *bt = q->blk_trace; struct task_struct *tsk = current; if (likely(!bt)) return; /* * Use the bio context for all events except ISSUE and * COMPLETE events. * * Not all the pages in the bio are dirtied by the same task but * most likely it will be, since the sectors accessed on the device * must be adjacent. */ if (!((what == BLK_TA_ISSUE) || (what == BLK_TA_COMPLETE)) && bio_has_data(rq->bio) && rq->bio->bi_io_vec && rq->bio->bi_io_vec->bv_page && rq->bio->bi_io_vec->bv_page->tsk_dirty) tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty; if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { what |= BLK_TC_ACT(BLK_TC_PC); __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags, what, rq->errors, rq->cmd_len, rq->cmd); what, rq->errors, rq->cmd_len, rq->cmd, tsk); } else { what |= BLK_TC_ACT(BLK_TC_FS); __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes, rq->cmd_flags, what, rq->errors, 0, NULL); rq->cmd_flags, what, rq->errors, 0, NULL, tsk); } } Loading Loading @@ -776,15 +791,25 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, u32 what, int error) { struct blk_trace *bt = q->blk_trace; struct task_struct *tsk = current; if (likely(!bt)) return; /* * Not all the pages in the bio are dirtied by the same task but * most likely it will be, since the sectors accessed on the device * must be adjacent. */ if (bio_has_data(bio) && bio->bi_io_vec && bio->bi_io_vec->bv_page && bio->bi_io_vec->bv_page->tsk_dirty) tsk = bio->bi_io_vec->bv_page->tsk_dirty; if (!error && !bio_flagged(bio, BIO_UPTODATE)) error = EIO; __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio->bi_rw, what, error, 0, NULL); bio->bi_rw, what, error, 0, NULL, tsk); } static void blk_add_trace_bio_bounce(void *ignore, Loading Loading @@ -832,7 +857,8 @@ static void blk_add_trace_getrq(void *ignore, struct blk_trace *bt = q->blk_trace; if (bt) __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL); __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL, current); } } Loading @@ -848,7 +874,7 @@ static void blk_add_trace_sleeprq(void *ignore, if (bt) __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, 0, 0, NULL); 0, 0, NULL, current); } } Loading @@ -857,7 +883,8 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q) struct blk_trace *bt = q->blk_trace; if (bt) __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, current); } static void blk_add_trace_unplug(void *ignore, struct request_queue *q, Loading @@ -874,7 +901,8 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q, else what = BLK_TA_UNPLUG_TIMER; __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, current); } } Loading @@ -883,14 +911,20 @@ static void blk_add_trace_split(void *ignore, unsigned int pdu) { struct blk_trace *bt = q->blk_trace; struct task_struct *tsk = current; if (bt) { __be64 rpdu = cpu_to_be64(pdu); if (bio_has_data(bio) && bio->bi_io_vec && bio->bi_io_vec->bv_page && bio->bi_io_vec->bv_page->tsk_dirty) tsk = bio->bi_io_vec->bv_page->tsk_dirty; __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu); sizeof(rpdu), &rpdu, tsk); } } Loading @@ -913,6 +947,7 @@ static void blk_add_trace_bio_remap(void *ignore, { struct blk_trace *bt = q->blk_trace; struct blk_io_trace_remap r; struct task_struct *tsk = current; if (likely(!bt)) return; Loading @@ -921,9 +956,14 @@ static void blk_add_trace_bio_remap(void *ignore, r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev); r.sector_from = cpu_to_be64(from); if (bio_has_data(bio) && bio->bi_io_vec && bio->bi_io_vec->bv_page && bio->bi_io_vec->bv_page->tsk_dirty) tsk = bio->bi_io_vec->bv_page->tsk_dirty; __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r, tsk); } /** Loading @@ -946,6 +986,7 @@ static void blk_add_trace_rq_remap(void *ignore, { struct blk_trace *bt = q->blk_trace; struct blk_io_trace_remap r; struct task_struct *tsk = current; if (likely(!bt)) return; Loading @@ -954,9 +995,14 @@ static void blk_add_trace_rq_remap(void *ignore, r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); r.sector_from = cpu_to_be64(from); if (bio_has_data(rq->bio) && rq->bio->bi_io_vec && rq->bio->bi_io_vec->bv_page && rq->bio->bi_io_vec->bv_page->tsk_dirty) tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty; __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors, sizeof(r), &r); sizeof(r), &r, tsk); } /** Loading @@ -975,16 +1021,22 @@ void blk_add_driver_data(struct request_queue *q, void *data, size_t len) { struct blk_trace *bt = q->blk_trace; struct task_struct *tsk = current; if (likely(!bt)) return; if (bio_has_data(rq->bio) && rq->bio->bi_io_vec && rq->bio->bi_io_vec->bv_page && rq->bio->bi_io_vec->bv_page->tsk_dirty) tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty; if (rq->cmd_type == REQ_TYPE_BLOCK_PC) __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, BLK_TA_DRV_DATA, rq->errors, len, data); BLK_TA_DRV_DATA, rq->errors, len, data, tsk); else __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0, BLK_TA_DRV_DATA, rq->errors, len, data); BLK_TA_DRV_DATA, rq->errors, len, data, tsk); } EXPORT_SYMBOL_GPL(blk_add_driver_data); Loading