Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1545dec4 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'ceph-for-4.15-rc8' of git://github.com/ceph/ceph-client

Pull ceph fixes from Ilya Dryomov:
 "Two rbd fixes for 4.12 and 4.2 issues respectively, marked for
  stable"

* tag 'ceph-for-4.15-rc8' of git://github.com/ceph/ceph-client:
  rbd: set max_segments to USHRT_MAX
  rbd: reacquire lock should update lock owner client id
parents ab278159 21acdf45
Loading
Loading
Loading
Loading
+12 −6
Original line number Diff line number Diff line
@@ -3047,13 +3047,21 @@ static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
	mutex_unlock(&rbd_dev->watch_mutex);
}

static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
{
	struct rbd_client_id cid = rbd_get_cid(rbd_dev);

	strcpy(rbd_dev->lock_cookie, cookie);
	rbd_set_owner_cid(rbd_dev, &cid);
	queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
}

/*
 * lock_rwsem must be held for write
 */
static int rbd_lock(struct rbd_device *rbd_dev)
{
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
	struct rbd_client_id cid = rbd_get_cid(rbd_dev);
	char cookie[32];
	int ret;

@@ -3068,9 +3076,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
		return ret;

	rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
	strcpy(rbd_dev->lock_cookie, cookie);
	rbd_set_owner_cid(rbd_dev, &cid);
	queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
	__rbd_lock(rbd_dev, cookie);
	return 0;
}

@@ -3856,7 +3862,7 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
			queue_delayed_work(rbd_dev->task_wq,
					   &rbd_dev->lock_dwork, 0);
	} else {
		strcpy(rbd_dev->lock_cookie, cookie);
		__rbd_lock(rbd_dev, cookie);
	}
}

@@ -4381,7 +4387,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
	segment_size = rbd_obj_bytes(&rbd_dev->header);
	blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
	q->limits.max_sectors = queue_max_hw_sectors(q);
	blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
	blk_queue_max_segments(q, USHRT_MAX);
	blk_queue_max_segment_size(q, segment_size);
	blk_queue_io_min(q, segment_size);
	blk_queue_io_opt(q, segment_size);