Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4d6c84d9 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe
Browse files

ubd: cleanup completion path



ubd had its own block request partial completion mechanism, which is
unnecessary as block layer already does it.  Kill ubd_end_request()
and ubd_finish() and replace them with direct call to
blk_end_request().

[ Impact: cleanup ]

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Jeff Dike <jdike@linux.intel.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 04420850
Loading
Loading
Loading
Loading
+1 −22
Original line number Diff line number Diff line
@@ -451,23 +451,6 @@ static void do_ubd_request(struct request_queue * q);

/* Only changed by ubd_init, which is an initcall. */
static int thread_fd = -1;

static void ubd_end_request(struct request *req, int bytes, int error)
{
	blk_end_request(req, error, bytes);
}

/* Callable only from interrupt context - otherwise you need to do
 * spin_lock_irq()/spin_lock_irqsave() */
static inline void ubd_finish(struct request *req, int bytes)
{
	if(bytes < 0){
		ubd_end_request(req, 0, -EIO);
		return;
	}
	ubd_end_request(req, bytes, 0);
}

static LIST_HEAD(restart);

/* XXX - move this inside ubd_intr. */
@@ -475,7 +458,6 @@ static LIST_HEAD(restart);
static void ubd_handler(void)
{
	struct io_thread_req *req;
	struct request *rq;
	struct ubd *ubd;
	struct list_head *list, *next_ele;
	unsigned long flags;
@@ -492,10 +474,7 @@ static void ubd_handler(void)
			return;
		}

		rq = req->req;
		rq->nr_sectors -= req->length >> 9;
		if(rq->nr_sectors == 0)
			ubd_finish(rq, rq->hard_nr_sectors << 9);
		blk_end_request(req->req, 0, req->length);
		kfree(req);
	}
	reactivate_fd(thread_fd, UBD_IRQ);