Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eff0d13f authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-3.6/drivers' of git://git.kernel.dk/linux-block

Pull block driver changes from Jens Axboe:

 - Making the plugging support for drivers a bit more sane from Neil.
   This supersedes the plugging change from Shaohua as well.

 - The usual round of drbd updates.

 - Using a tail add instead of a head add in the request completion for
   ndb, making us find the most completed request more quickly.

 - A few floppy changes, getting rid of a duplicated flag and also
   running the floppy init async (since it takes forever in boot terms)
   from Andi.

* 'for-3.6/drivers' of git://git.kernel.dk/linux-block:
  floppy: remove duplicated flag FD_RAW_NEED_DISK
  blk: pass from_schedule to non-request unplug functions.
  block: stack unplug
  blk: centralize non-request unplug handling.
  md: remove plug_cnt feature of plugging.
  block/nbd: micro-optimization in nbd request completion
  drbd: announce FLUSH/FUA capability to upper layers
  drbd: fix max_bio_size to be unsigned
  drbd: flush drbd work queue before invalidate/invalidate remote
  drbd: fix potential access after free
  drbd: call local-io-error handler early
  drbd: do not reset rs_pending_cnt too early
  drbd: reset congestion information before reporting it in /proc/drbd
  drbd: report congestion if we are waiting for some userland callback
  drbd: differentiate between normal and forced detach
  drbd: cleanup, remove two unused global flags
  floppy: Run floppy initialization asynchronous
parents 8cf1a3fc 10af8138
Loading
Loading
Loading
Loading
+34 −10
Original line number Original line Diff line number Diff line
@@ -2909,13 +2909,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,


}
}


static void flush_plug_callbacks(struct blk_plug *plug)
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
{
{
	LIST_HEAD(callbacks);
	LIST_HEAD(callbacks);


	if (list_empty(&plug->cb_list))
	while (!list_empty(&plug->cb_list)) {
		return;

		list_splice_init(&plug->cb_list, &callbacks);
		list_splice_init(&plug->cb_list, &callbacks);


		while (!list_empty(&callbacks)) {
		while (!list_empty(&callbacks)) {
@@ -2923,9 +2921,35 @@ static void flush_plug_callbacks(struct blk_plug *plug)
							  struct blk_plug_cb,
							  struct blk_plug_cb,
							  list);
							  list);
			list_del(&cb->list);
			list_del(&cb->list);
		cb->callback(cb);
			cb->callback(cb, from_schedule);
		}
	}
}

struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
				      int size)
{
	struct blk_plug *plug = current->plug;
	struct blk_plug_cb *cb;

	if (!plug)
		return NULL;

	list_for_each_entry(cb, &plug->cb_list, list)
		if (cb->callback == unplug && cb->data == data)
			return cb;

	/* Not currently on the callback list */
	BUG_ON(size < sizeof(*cb));
	cb = kzalloc(size, GFP_ATOMIC);
	if (cb) {
		cb->data = data;
		cb->callback = unplug;
		list_add(&cb->list, &plug->cb_list);
	}
	}
	return cb;
}
}
EXPORT_SYMBOL(blk_check_plugged);


void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
{
@@ -2937,7 +2961,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)


	BUG_ON(plug->magic != PLUG_MAGIC);
	BUG_ON(plug->magic != PLUG_MAGIC);


	flush_plug_callbacks(plug);
	flush_plug_callbacks(plug, from_schedule);
	if (list_empty(&plug->list))
	if (list_empty(&plug->list))
		return;
		return;


+6 −2
Original line number Original line Diff line number Diff line
@@ -411,7 +411,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
		+ mdev->ldev->md.al_offset + mdev->al_tr_pos;
		+ mdev->ldev->md.al_offset + mdev->al_tr_pos;


	if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
	if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
		drbd_chk_io_error(mdev, 1, true);
		drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);


	if (++mdev->al_tr_pos >
	if (++mdev->al_tr_pos >
	    div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
	    div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
@@ -876,7 +876,11 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
	unsigned int enr, count = 0;
	unsigned int enr, count = 0;
	struct lc_element *e;
	struct lc_element *e;


	if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
	/* this should be an empty REQ_FLUSH */
	if (size == 0)
		return 0;

	if (size < 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
		dev_err(DEV, "sector: %llus, size: %d\n",
		dev_err(DEV, "sector: %llus, size: %d\n",
			(unsigned long long)sector, size);
			(unsigned long long)sector, size);
		return 0;
		return 0;
+2 −2
Original line number Original line Diff line number Diff line
@@ -1096,7 +1096,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w


	if (ctx->error) {
	if (ctx->error) {
		dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
		dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
		drbd_chk_io_error(mdev, 1, true);
		drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
		err = -EIO; /* ctx->error ? */
		err = -EIO; /* ctx->error ? */
	}
	}


@@ -1212,7 +1212,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
	wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
	wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);


	if (ctx->error)
	if (ctx->error)
		drbd_chk_io_error(mdev, 1, true);
		drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
		/* that should force detach, so the in memory bitmap will be
		/* that should force detach, so the in memory bitmap will be
		 * gone in a moment as well. */
		 * gone in a moment as well. */


+31 −13
Original line number Original line Diff line number Diff line
@@ -813,7 +813,6 @@ enum {
	SIGNAL_ASENDER,		/* whether asender wants to be interrupted */
	SIGNAL_ASENDER,		/* whether asender wants to be interrupted */
	SEND_PING,		/* whether asender should send a ping asap */
	SEND_PING,		/* whether asender should send a ping asap */


	UNPLUG_QUEUED,		/* only relevant with kernel 2.4 */
	UNPLUG_REMOTE,		/* sending a "UnplugRemote" could help */
	UNPLUG_REMOTE,		/* sending a "UnplugRemote" could help */
	MD_DIRTY,		/* current uuids and flags not yet on disk */
	MD_DIRTY,		/* current uuids and flags not yet on disk */
	DISCARD_CONCURRENT,	/* Set on one node, cleared on the peer! */
	DISCARD_CONCURRENT,	/* Set on one node, cleared on the peer! */
@@ -824,7 +823,6 @@ enum {
	CRASHED_PRIMARY,	/* This node was a crashed primary.
	CRASHED_PRIMARY,	/* This node was a crashed primary.
				 * Gets cleared when the state.conn
				 * Gets cleared when the state.conn
				 * goes into C_CONNECTED state. */
				 * goes into C_CONNECTED state. */
	NO_BARRIER_SUPP,	/* underlying block device doesn't implement barriers */
	CONSIDER_RESYNC,
	CONSIDER_RESYNC,


	MD_NO_FUA,		/* Users wants us to not use FUA/FLUSH on meta data dev */
	MD_NO_FUA,		/* Users wants us to not use FUA/FLUSH on meta data dev */
@@ -834,6 +832,7 @@ enum {
	BITMAP_IO_QUEUED,       /* Started bitmap IO */
	BITMAP_IO_QUEUED,       /* Started bitmap IO */
	GO_DISKLESS,		/* Disk is being detached, on io-error or admin request. */
	GO_DISKLESS,		/* Disk is being detached, on io-error or admin request. */
	WAS_IO_ERROR,		/* Local disk failed returned IO error */
	WAS_IO_ERROR,		/* Local disk failed returned IO error */
	FORCE_DETACH,		/* Force-detach from local disk, aborting any pending local IO */
	RESYNC_AFTER_NEG,       /* Resync after online grow after the attach&negotiate finished. */
	RESYNC_AFTER_NEG,       /* Resync after online grow after the attach&negotiate finished. */
	NET_CONGESTED,		/* The data socket is congested */
	NET_CONGESTED,		/* The data socket is congested */


@@ -851,6 +850,13 @@ enum {
	AL_SUSPENDED,		/* Activity logging is currently suspended. */
	AL_SUSPENDED,		/* Activity logging is currently suspended. */
	AHEAD_TO_SYNC_SOURCE,   /* Ahead -> SyncSource queued */
	AHEAD_TO_SYNC_SOURCE,   /* Ahead -> SyncSource queued */
	STATE_SENT,		/* Do not change state/UUIDs while this is set */
	STATE_SENT,		/* Do not change state/UUIDs while this is set */

	CALLBACK_PENDING,	/* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
				 * pending, from drbd worker context.
				 * If set, bdi_write_congested() returns true,
				 * so shrink_page_list() would not recurse into,
				 * and potentially deadlock on, this drbd worker.
				 */
};
};


struct drbd_bitmap; /* opaque for drbd_conf */
struct drbd_bitmap; /* opaque for drbd_conf */
@@ -1130,8 +1136,8 @@ struct drbd_conf {
	int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
	int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
	int rs_planed;    /* resync sectors already planned */
	int rs_planed;    /* resync sectors already planned */
	atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
	atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
	int peer_max_bio_size;
	unsigned int peer_max_bio_size;
	int local_max_bio_size;
	unsigned int local_max_bio_size;
};
};


static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1435,9 +1441,9 @@ struct bm_extent {
 * hash table. */
 * hash table. */
#define HT_SHIFT 8
#define HT_SHIFT 8
#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
#define DRBD_MAX_BIO_SIZE_SAFE (1 << 12)       /* Works always = 4k */
#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)       /* Works always = 4k */


#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */
#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* The old header only allows packets up to 32Kib data */


/* Number of elements in the app_reads_hash */
/* Number of elements in the app_reads_hash */
#define APP_R_HSIZE 15
#define APP_R_HSIZE 15
@@ -1840,12 +1846,20 @@ static inline int drbd_request_state(struct drbd_conf *mdev,
	return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
	return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
}
}


enum drbd_force_detach_flags {
	DRBD_IO_ERROR,
	DRBD_META_IO_ERROR,
	DRBD_FORCE_DETACH,
};

#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where)
static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
		enum drbd_force_detach_flags forcedetach,
		const char *where)
{
{
	switch (mdev->ldev->dc.on_io_error) {
	switch (mdev->ldev->dc.on_io_error) {
	case EP_PASS_ON:
	case EP_PASS_ON:
		if (!forcedetach) {
		if (forcedetach == DRBD_IO_ERROR) {
			if (__ratelimit(&drbd_ratelimit_state))
			if (__ratelimit(&drbd_ratelimit_state))
				dev_err(DEV, "Local IO failed in %s.\n", where);
				dev_err(DEV, "Local IO failed in %s.\n", where);
			if (mdev->state.disk > D_INCONSISTENT)
			if (mdev->state.disk > D_INCONSISTENT)
@@ -1856,6 +1870,8 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
	case EP_DETACH:
	case EP_DETACH:
	case EP_CALL_HELPER:
	case EP_CALL_HELPER:
		set_bit(WAS_IO_ERROR, &mdev->flags);
		set_bit(WAS_IO_ERROR, &mdev->flags);
		if (forcedetach == DRBD_FORCE_DETACH)
			set_bit(FORCE_DETACH, &mdev->flags);
		if (mdev->state.disk > D_FAILED) {
		if (mdev->state.disk > D_FAILED) {
			_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
			_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
			dev_err(DEV,
			dev_err(DEV,
@@ -1875,7 +1891,7 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
 */
 */
#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
	int error, int forcedetach, const char *where)
	int error, enum drbd_force_detach_flags forcedetach, const char *where)
{
{
	if (error) {
	if (error) {
		unsigned long flags;
		unsigned long flags;
@@ -2405,15 +2421,17 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
	int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);
	int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);


	D_ASSERT(ap_bio >= 0);
	D_ASSERT(ap_bio >= 0);

	if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
		if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
			drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
	}

	/* this currently does wake_up for every dec_ap_bio!
	/* this currently does wake_up for every dec_ap_bio!
	 * maybe rather introduce some type of hysteresis?
	 * maybe rather introduce some type of hysteresis?
	 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
	 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
	if (ap_bio < mxb)
	if (ap_bio < mxb)
		wake_up(&mdev->misc_wait);
		wake_up(&mdev->misc_wait);
	if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
		if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
			drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
	}
}
}


static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
+49 −16
Original line number Original line Diff line number Diff line
@@ -1514,6 +1514,13 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,


	/* Do not change the order of the if above and the two below... */
	/* Do not change the order of the if above and the two below... */
	if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
	if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
		/* we probably will start a resync soon.
		 * make sure those things are properly reset. */
		mdev->rs_total = 0;
		mdev->rs_failed = 0;
		atomic_set(&mdev->rs_pending_cnt, 0);
		drbd_rs_cancel_all(mdev);

		drbd_send_uuids(mdev);
		drbd_send_uuids(mdev);
		drbd_send_state(mdev, ns);
		drbd_send_state(mdev, ns);
	}
	}
@@ -1630,8 +1637,23 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
			eh = mdev->ldev->dc.on_io_error;
			eh = mdev->ldev->dc.on_io_error;
			was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
			was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);


			/* Immediately allow completion of all application IO, that waits
			if (was_io_error && eh == EP_CALL_HELPER)
			   for completion from the local disk. */
				drbd_khelper(mdev, "local-io-error");

			/* Immediately allow completion of all application IO,
			 * that waits for completion from the local disk,
			 * if this was a force-detach due to disk_timeout
			 * or administrator request (drbdsetup detach --force).
			 * Do NOT abort otherwise.
			 * Aborting local requests may cause serious problems,
			 * if requests are completed to upper layers already,
			 * and then later the already submitted local bio completes.
			 * This can cause DMA into former bio pages that meanwhile
			 * have been re-used for other things.
			 * So aborting local requests may cause crashes,
			 * or even worse, silent data corruption.
			 */
			if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
				tl_abort_disk_io(mdev);
				tl_abort_disk_io(mdev);


			/* current state still has to be D_FAILED,
			/* current state still has to be D_FAILED,
@@ -1653,9 +1675,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
			drbd_md_sync(mdev);
			drbd_md_sync(mdev);
		}
		}
		put_ldev(mdev);
		put_ldev(mdev);

		if (was_io_error && eh == EP_CALL_HELPER)
			drbd_khelper(mdev, "local-io-error");
	}
	}


        /* second half of local IO error, failure to attach,
        /* second half of local IO error, failure to attach,
@@ -1669,10 +1688,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
                                "ASSERT FAILED: disk is %s while going diskless\n",
                                "ASSERT FAILED: disk is %s while going diskless\n",
                                drbd_disk_str(mdev->state.disk));
                                drbd_disk_str(mdev->state.disk));


                mdev->rs_total = 0;
                mdev->rs_failed = 0;
                atomic_set(&mdev->rs_pending_cnt, 0);

		if (ns.conn >= C_CONNECTED)
		if (ns.conn >= C_CONNECTED)
			drbd_send_state(mdev, ns);
			drbd_send_state(mdev, ns);


@@ -2194,7 +2209,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
{
{
	struct p_sizes p;
	struct p_sizes p;
	sector_t d_size, u_size;
	sector_t d_size, u_size;
	int q_order_type, max_bio_size;
	int q_order_type;
	unsigned int max_bio_size;
	int ok;
	int ok;


	if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
	if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
@@ -2203,7 +2219,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
		u_size = mdev->ldev->dc.disk_size;
		u_size = mdev->ldev->dc.disk_size;
		q_order_type = drbd_queue_order_type(mdev);
		q_order_type = drbd_queue_order_type(mdev);
		max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
		max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
		max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
		max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
		put_ldev(mdev);
		put_ldev(mdev);
	} else {
	} else {
		d_size = 0;
		d_size = 0;
@@ -2214,7 +2230,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl


	/* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
	/* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
	if (mdev->agreed_pro_version <= 94)
	if (mdev->agreed_pro_version <= 94)
		max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
		max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);


	p.d_size = cpu_to_be64(d_size);
	p.d_size = cpu_to_be64(d_size);
	p.u_size = cpu_to_be64(u_size);
	p.u_size = cpu_to_be64(u_size);
@@ -3541,6 +3557,22 @@ static int drbd_congested(void *congested_data, int bdi_bits)
		goto out;
		goto out;
	}
	}


	if (test_bit(CALLBACK_PENDING, &mdev->flags)) {
		r |= (1 << BDI_async_congested);
		/* Without good local data, we would need to read from remote,
		 * and that would need the worker thread as well, which is
		 * currently blocked waiting for that usermode helper to
		 * finish.
		 */
		if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
			r |= (1 << BDI_sync_congested);
		else
			put_ldev(mdev);
		r &= bdi_bits;
		reason = 'c';
		goto out;
	}

	if (get_ldev(mdev)) {
	if (get_ldev(mdev)) {
		q = bdev_get_queue(mdev->ldev->backing_bdev);
		q = bdev_get_queue(mdev->ldev->backing_bdev);
		r = bdi_congested(&q->backing_dev_info, bdi_bits);
		r = bdi_congested(&q->backing_dev_info, bdi_bits);
@@ -3604,6 +3636,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
	q->backing_dev_info.congested_data = mdev;
	q->backing_dev_info.congested_data = mdev;


	blk_queue_make_request(q, drbd_make_request);
	blk_queue_make_request(q, drbd_make_request);
	blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
	/* Setting the max_hw_sectors to an odd value of 8kibyte here
	/* Setting the max_hw_sectors to an odd value of 8kibyte here
	   This triggers a max_bio_size message upon first attach or connect */
	   This triggers a max_bio_size message upon first attach or connect */
	blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
	blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
@@ -3870,7 +3903,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
	if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
	if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
		/* this was a try anyways ... */
		/* this was a try anyways ... */
		dev_err(DEV, "meta data update failed!\n");
		dev_err(DEV, "meta data update failed!\n");
		drbd_chk_io_error(mdev, 1, true);
		drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
	}
	}


	/* Update mdev->ldev->md.la_size_sect,
	/* Update mdev->ldev->md.la_size_sect,
@@ -3950,9 +3983,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)


	spin_lock_irq(&mdev->req_lock);
	spin_lock_irq(&mdev->req_lock);
	if (mdev->state.conn < C_CONNECTED) {
	if (mdev->state.conn < C_CONNECTED) {
		int peer;
		unsigned int peer;
		peer = be32_to_cpu(buffer->la_peer_max_bio_size);
		peer = be32_to_cpu(buffer->la_peer_max_bio_size);
		peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
		peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
		mdev->peer_max_bio_size = peer;
		mdev->peer_max_bio_size = peer;
	}
	}
	spin_unlock_irq(&mdev->req_lock);
	spin_unlock_irq(&mdev->req_lock);
Loading