Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8dfb790b authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'ceph-for-4.9-rc1' of git://github.com/ceph/ceph-client

Pull Ceph updates from Ilya Dryomov:
 "The big ticket item here is support for rbd exclusive-lock feature,
  with maintenance operations offloaded to userspace (Douglas Fuller,
  Mike Christie and myself). Another block device bullet is a series
  fixing up layering error paths (myself).

  On the filesystem side, we've got patches that improve our handling of
  buffered vs dio write races (Neil Brown) and a few assorted fixes from
  Zheng. Also included a couple of random cleanups and a minor CRUSH
  update"

* tag 'ceph-for-4.9-rc1' of git://github.com/ceph/ceph-client: (39 commits)
  crush: remove redundant local variable
  crush: don't normalize input of crush_ln iteratively
  libceph: ceph_build_auth() doesn't need ceph_auth_build_hello()
  libceph: use CEPH_AUTH_UNKNOWN in ceph_auth_build_hello()
  ceph: fix description for rsize and rasize mount options
  rbd: use kmalloc_array() in rbd_header_from_disk()
  ceph: use list_move instead of list_del/list_add
  ceph: handle CEPH_SESSION_REJECT message
  ceph: avoid accessing / when mounting a subpath
  ceph: fix mandatory flock check
  ceph: remove warning when ceph_releasepage() is called on dirty page
  ceph: ignore error from invalidate_inode_pages2_range() in direct write
  ceph: fix error handling of start_read()
  rbd: add rbd_obj_request_error() helper
  rbd: img_data requests don't own their page array
  rbd: don't call rbd_osd_req_format_read() for !img_data requests
  rbd: rework rbd_img_obj_exists_submit() error paths
  rbd: don't crash or leak on errors in rbd_img_obj_parent_read_full_callback()
  rbd: move bumping img_request refcount into rbd_obj_request_submit()
  rbd: mark the original request as done if stat request fails
  ...
parents fed41f7d 64f77566
Loading
Loading
Loading
Loading
+26 −3
Original line number Diff line number Diff line
@@ -6,7 +6,7 @@ Description:

Being used for adding and removing rbd block devices.

Usage: <mon ip addr> <options> <pool name> <rbd image name> [snap name]
Usage: <mon ip addr> <options> <pool name> <rbd image name> [<snap name>]

 $ echo "192.168.0.1 name=admin rbd foo" > /sys/bus/rbd/add

@@ -14,9 +14,13 @@ The snapshot name can be "-" or omitted to map the image read/write. A <dev-id>
will be assigned for any registered block device. If snapshot is used, it will
be mapped read-only.

Removal of a device:
Usage: <dev-id> [force]

  $ echo <dev-id> > /sys/bus/rbd/remove
 $ echo 2 > /sys/bus/rbd/remove

Optional "force" argument which when passed will wait for running requests and
then unmap the image. Requests sent to the driver after initiating the removal
will be failed.  (August 2016, since 4.9.)

What:		/sys/bus/rbd/add_single_major
Date:		December 2013
@@ -43,10 +47,25 @@ Description: Available only if rbd module is inserted with single_major
Entries under /sys/bus/rbd/devices/<dev-id>/
--------------------------------------------

client_addr

	The ceph unique client entity_addr_t (address + nonce).
	The format is <address>:<port>/<nonce>: '1.2.3.4:1234/5678' or
	'[1:2:3:4:5:6:7:8]:1234/5678'.  (August 2016, since 4.9.)

client_id

	The ceph unique client id that was assigned for this specific session.

cluster_fsid

	The ceph cluster UUID.  (August 2016, since 4.9.)

config_info

	The string written into /sys/bus/rbd/add{,_single_major}.  (August
	2016, since 4.9.)

features

	A hexadecimal encoding of the feature bits for this image.
@@ -92,6 +111,10 @@ current_snap

	The current snapshot for which the device is mapped.

snap_id

	The current snapshot's id.  (August 2016, since 4.9.)

parent

	Information identifying the chain of parent images in a layered rbd
+4 −0
Original line number Diff line number Diff line
@@ -98,6 +98,10 @@ Mount Options
	size.

  rsize=X
	Specify the maximum read size in bytes.  By default there is no
	maximum.

  rasize=X
	Specify the maximum readahead.

  mount_timeout=X
+1233 −329

File changed.

Preview size limit exceeded, changes collapsed.

+11 −0
Original line number Diff line number Diff line
@@ -28,6 +28,17 @@
#define RBD_DATA_PREFIX        "rbd_data."
#define RBD_ID_PREFIX          "rbd_id."

#define RBD_LOCK_NAME          "rbd_lock"
#define RBD_LOCK_TAG           "internal"
#define RBD_LOCK_COOKIE_PREFIX "auto"

enum rbd_notify_op {
	RBD_NOTIFY_OP_ACQUIRED_LOCK      = 0,
	RBD_NOTIFY_OP_RELEASED_LOCK      = 1,
	RBD_NOTIFY_OP_REQUEST_LOCK       = 2,
	RBD_NOTIFY_OP_HEADER_UPDATE      = 3,
};

/*
 * For format version 1, rbd image 'foo' consists of objects
 *   foo.rbd		- image metadata
+11 −13
Original line number Diff line number Diff line
@@ -175,9 +175,8 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset,

static int ceph_releasepage(struct page *page, gfp_t g)
{
	dout("%p releasepage %p idx %lu\n", page->mapping->host,
	     page, page->index);
	WARN_ON(PageDirty(page));
	dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host,
	     page, page->index, PageDirty(page) ? "" : "not ");

	/* Can we release the page from the cache? */
	if (!ceph_release_fscache_page(page, g))
@@ -298,14 +297,6 @@ static void finish_read(struct ceph_osd_request *req)
	kfree(osd_data->pages);
}

static void ceph_unlock_page_vector(struct page **pages, int num_pages)
{
	int i;

	for (i = 0; i < num_pages; i++)
		unlock_page(pages[i]);
}

/*
 * start an async read(ahead) operation.  return nr_pages we submitted
 * a read for on success, or negative error code.
@@ -370,6 +361,10 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
			dout("start_read %p add_to_page_cache failed %p\n",
			     inode, page);
			nr_pages = i;
			if (nr_pages > 0) {
				len = nr_pages << PAGE_SHIFT;
				break;
			}
			goto out_pages;
		}
		pages[i] = page;
@@ -386,8 +381,11 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
	return nr_pages;

out_pages:
	ceph_unlock_page_vector(pages, nr_pages);
	ceph_release_page_vector(pages, nr_pages);
	for (i = 0; i < nr_pages; ++i) {
		ceph_fscache_readpage_cancel(inode, pages[i]);
		unlock_page(pages[i]);
	}
	ceph_put_page_vector(pages, nr_pages, false);
out:
	ceph_osdc_put_request(req);
	return ret;
Loading