Loading drivers/block/xen-blkfront.c +33 −22 Original line number Diff line number Diff line Loading @@ -65,7 +65,7 @@ enum blkif_state { struct blk_shadow { struct blkif_request req; unsigned long request; struct request *request; unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; Loading Loading @@ -136,7 +136,7 @@ static void add_id_to_freelist(struct blkfront_info *info, unsigned long id) { info->shadow[id].req.id = info->shadow_free; info->shadow[id].request = 0; info->shadow[id].request = NULL; info->shadow_free = id; } Loading Loading @@ -245,14 +245,11 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, } /* * blkif_queue_request * Generate a Xen blkfront IO request from a blk layer request. Reads * and writes are handled as expected. Since we lack a loose flush * request, we map flushes into a full ordered barrier. * * request block io * * id: for guest use only. * operation: BLKIF_OP_{READ,WRITE,PROBE} * buffer: buffer to read/write into. this should be a * virtual address in the guest os. * @req: a request struct */ static int blkif_queue_request(struct request *req) { Loading Loading @@ -281,7 +278,7 @@ static int blkif_queue_request(struct request *req) /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = get_id_from_freelist(info); info->shadow[id].request = (unsigned long)req; info->shadow[id].request = req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); Loading @@ -290,6 +287,18 @@ static int blkif_queue_request(struct request *req) ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { /* * Ideally we could just do an unordered * flush-to-disk, but all we have is a full write * barrier at the moment. However, a barrier write is * a superset of FUA, so we can implement it the same * way. (It's also a FLUSH+FUA, since it is * guaranteed ordered WRT previous writes.) */ ring_req->operation = BLKIF_OP_WRITE_BARRIER; } ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); Loading Loading @@ -634,7 +643,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; req = (struct request *)info->shadow[id].request; req = info->shadow[id].request; blkif_completion(&info->shadow[id]); Loading @@ -647,6 +656,16 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", info->gd->disk_name); error = -EOPNOTSUPP; } if (unlikely(bret->status == BLKIF_RSP_ERROR && info->shadow[id].req.nr_segments == 0)) { printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n", info->gd->disk_name); error = -EOPNOTSUPP; } if (unlikely(error)) { if (error == -EOPNOTSUPP) error = 0; info->feature_flush = 0; xlvbd_flush(info); } Loading Loading @@ -899,7 +918,7 @@ static int blkif_recover(struct blkfront_info *info) /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < BLK_RING_SIZE; i++) { /* Not in use? */ if (copy[i].request == 0) if (!copy[i].request) continue; /* Grab a request slot and copy shadow state into it. */ Loading @@ -916,9 +935,7 @@ static int blkif_recover(struct blkfront_info *info) req->seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->id].frame[j]), rq_data_dir( (struct request *) info->shadow[req->id].request)); rq_data_dir(info->shadow[req->id].request)); info->shadow[req->id].req = *req; info->ring.req_prod_pvt++; Loading Loading @@ -1067,14 +1084,8 @@ static void blkfront_connect(struct blkfront_info *info) */ info->feature_flush = 0; /* * The driver doesn't properly handled empty flushes, so * lets disable barrier support for now. */ #if 0 if (!err && barrier) info->feature_flush = REQ_FLUSH; #endif info->feature_flush = REQ_FLUSH | REQ_FUA; err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); if (err) { Loading fs/ioprio.c +6 −25 Original line number Diff line number Diff line Loading @@ -103,22 +103,15 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) } ret = -ESRCH; /* * We want IOPRIO_WHO_PGRP/IOPRIO_WHO_USER to be "atomic", * so we can't use rcu_read_lock(). See re-copy of ->ioprio * in copy_process(). */ read_lock(&tasklist_lock); rcu_read_lock(); switch (which) { case IOPRIO_WHO_PROCESS: rcu_read_lock(); if (!who) p = current; else p = find_task_by_vpid(who); if (p) ret = set_task_ioprio(p, ioprio); rcu_read_unlock(); break; case IOPRIO_WHO_PGRP: if (!who) Loading @@ -141,12 +134,7 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) break; do_each_thread(g, p) { int match; rcu_read_lock(); match = __task_cred(p)->uid == who; rcu_read_unlock(); if (!match) if (__task_cred(p)->uid != who) continue; ret = set_task_ioprio(p, ioprio); if (ret) Loading @@ -160,7 +148,7 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) ret = -EINVAL; } read_unlock(&tasklist_lock); rcu_read_unlock(); return ret; } Loading Loading @@ -204,17 +192,15 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) int ret = -ESRCH; int tmpio; read_lock(&tasklist_lock); rcu_read_lock(); switch (which) { case IOPRIO_WHO_PROCESS: rcu_read_lock(); if (!who) p = current; else p = find_task_by_vpid(who); if (p) ret = get_task_ioprio(p); rcu_read_unlock(); break; case IOPRIO_WHO_PGRP: if (!who) Loading @@ -241,12 +227,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) break; do_each_thread(g, p) { int match; rcu_read_lock(); match = __task_cred(p)->uid == user->uid; rcu_read_unlock(); if (!match) if (__task_cred(p)->uid != user->uid) continue; tmpio = get_task_ioprio(p); if (tmpio < 0) Loading @@ -264,6 +245,6 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) ret = -EINVAL; } read_unlock(&tasklist_lock); rcu_read_unlock(); return ret; } Loading
drivers/block/xen-blkfront.c +33 −22 Original line number Diff line number Diff line Loading @@ -65,7 +65,7 @@ enum blkif_state { struct blk_shadow { struct blkif_request req; unsigned long request; struct request *request; unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; Loading Loading @@ -136,7 +136,7 @@ static void add_id_to_freelist(struct blkfront_info *info, unsigned long id) { info->shadow[id].req.id = info->shadow_free; info->shadow[id].request = 0; info->shadow[id].request = NULL; info->shadow_free = id; } Loading Loading @@ -245,14 +245,11 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, } /* * blkif_queue_request * Generate a Xen blkfront IO request from a blk layer request. Reads * and writes are handled as expected. Since we lack a loose flush * request, we map flushes into a full ordered barrier. * * request block io * * id: for guest use only. * operation: BLKIF_OP_{READ,WRITE,PROBE} * buffer: buffer to read/write into. this should be a * virtual address in the guest os. * @req: a request struct */ static int blkif_queue_request(struct request *req) { Loading Loading @@ -281,7 +278,7 @@ static int blkif_queue_request(struct request *req) /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = get_id_from_freelist(info); info->shadow[id].request = (unsigned long)req; info->shadow[id].request = req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); Loading @@ -290,6 +287,18 @@ static int blkif_queue_request(struct request *req) ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { /* * Ideally we could just do an unordered * flush-to-disk, but all we have is a full write * barrier at the moment. However, a barrier write is * a superset of FUA, so we can implement it the same * way. (It's also a FLUSH+FUA, since it is * guaranteed ordered WRT previous writes.) */ ring_req->operation = BLKIF_OP_WRITE_BARRIER; } ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); Loading Loading @@ -634,7 +643,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; req = (struct request *)info->shadow[id].request; req = info->shadow[id].request; blkif_completion(&info->shadow[id]); Loading @@ -647,6 +656,16 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", info->gd->disk_name); error = -EOPNOTSUPP; } if (unlikely(bret->status == BLKIF_RSP_ERROR && info->shadow[id].req.nr_segments == 0)) { printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n", info->gd->disk_name); error = -EOPNOTSUPP; } if (unlikely(error)) { if (error == -EOPNOTSUPP) error = 0; info->feature_flush = 0; xlvbd_flush(info); } Loading Loading @@ -899,7 +918,7 @@ static int blkif_recover(struct blkfront_info *info) /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < BLK_RING_SIZE; i++) { /* Not in use? */ if (copy[i].request == 0) if (!copy[i].request) continue; /* Grab a request slot and copy shadow state into it. */ Loading @@ -916,9 +935,7 @@ static int blkif_recover(struct blkfront_info *info) req->seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->id].frame[j]), rq_data_dir( (struct request *) info->shadow[req->id].request)); rq_data_dir(info->shadow[req->id].request)); info->shadow[req->id].req = *req; info->ring.req_prod_pvt++; Loading Loading @@ -1067,14 +1084,8 @@ static void blkfront_connect(struct blkfront_info *info) */ info->feature_flush = 0; /* * The driver doesn't properly handled empty flushes, so * lets disable barrier support for now. */ #if 0 if (!err && barrier) info->feature_flush = REQ_FLUSH; #endif info->feature_flush = REQ_FLUSH | REQ_FUA; err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); if (err) { Loading
fs/ioprio.c +6 −25 Original line number Diff line number Diff line Loading @@ -103,22 +103,15 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) } ret = -ESRCH; /* * We want IOPRIO_WHO_PGRP/IOPRIO_WHO_USER to be "atomic", * so we can't use rcu_read_lock(). See re-copy of ->ioprio * in copy_process(). */ read_lock(&tasklist_lock); rcu_read_lock(); switch (which) { case IOPRIO_WHO_PROCESS: rcu_read_lock(); if (!who) p = current; else p = find_task_by_vpid(who); if (p) ret = set_task_ioprio(p, ioprio); rcu_read_unlock(); break; case IOPRIO_WHO_PGRP: if (!who) Loading @@ -141,12 +134,7 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) break; do_each_thread(g, p) { int match; rcu_read_lock(); match = __task_cred(p)->uid == who; rcu_read_unlock(); if (!match) if (__task_cred(p)->uid != who) continue; ret = set_task_ioprio(p, ioprio); if (ret) Loading @@ -160,7 +148,7 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) ret = -EINVAL; } read_unlock(&tasklist_lock); rcu_read_unlock(); return ret; } Loading Loading @@ -204,17 +192,15 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) int ret = -ESRCH; int tmpio; read_lock(&tasklist_lock); rcu_read_lock(); switch (which) { case IOPRIO_WHO_PROCESS: rcu_read_lock(); if (!who) p = current; else p = find_task_by_vpid(who); if (p) ret = get_task_ioprio(p); rcu_read_unlock(); break; case IOPRIO_WHO_PGRP: if (!who) Loading @@ -241,12 +227,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) break; do_each_thread(g, p) { int match; rcu_read_lock(); match = __task_cred(p)->uid == user->uid; rcu_read_unlock(); if (!match) if (__task_cred(p)->uid != user->uid) continue; tmpio = get_task_ioprio(p); if (tmpio < 0) Loading @@ -264,6 +245,6 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) ret = -EINVAL; } read_unlock(&tasklist_lock); rcu_read_unlock(); return ret; }