Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e9193da0 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'stable/for-jens-4.13' of...

Merge branch 'stable/for-jens-4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen into for-linus

Pull xen-blkfront fixes from Konrad for 4.13.
parents 7a362ea9 bd912ef3
Loading
Loading
Loading
Loading
+12 −9
Original line number Diff line number Diff line
@@ -708,6 +708,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
	 * existing persistent grants, or if we have to get new grants,
	 * as there are not sufficiently many free.
	 */
	bool new_persistent_gnts = false;
	struct scatterlist *sg;
	int num_sg, max_grefs, num_grant;

@@ -719,19 +720,21 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
		 */
		max_grefs += INDIRECT_GREFS(max_grefs);

	/*
	 * We have to reserve 'max_grefs' grants because persistent
	 * grants are shared by all rings.
	 */
	if (max_grefs > 0)
		if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) {
	/* Check if we have enough persistent grants to allocate a requests */
	if (rinfo->persistent_gnts_c < max_grefs) {
		new_persistent_gnts = true;

		if (gnttab_alloc_grant_references(
		    max_grefs - rinfo->persistent_gnts_c,
		    &setup.gref_head) < 0) {
			gnttab_request_free_callback(
				&rinfo->callback,
				blkif_restart_queue_callback,
				rinfo,
				max_grefs);
				max_grefs - rinfo->persistent_gnts_c);
			return 1;
		}
	}

	/* Fill out a communications ring structure. */
	id = blkif_ring_get_request(rinfo, req, &ring_req);
@@ -832,7 +835,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
	if (unlikely(require_extra_req))
		rinfo->shadow[extra_id].req = *extra_ring_req;

	if (max_grefs > 0)
	if (new_persistent_gnts)
		gnttab_free_grant_references(setup.gref_head);

	return 0;
@@ -906,8 +909,8 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
	return BLK_STS_IOERR;

out_busy:
	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
	blk_mq_stop_hw_queue(hctx);
	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
	return BLK_STS_RESOURCE;
}