Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2c8d1967 authored by Philipp Reisner's avatar Philipp Reisner Committed by Jens Axboe
Browse files

drbd: Revert "drbd: Create new current UUID as late as possible"



The late-UUID writing is delayed until the next release.

Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent b7c33571
Loading
Loading
Loading
Loading
+1 −8
Original line number Diff line number Diff line
@@ -943,8 +943,7 @@ struct drbd_conf {
	struct drbd_work  resync_work,
			  unplug_work,
			  md_sync_work,
			  delay_probe_work,
			  uuid_work;
			  delay_probe_work;
	struct timer_list resync_timer;
	struct timer_list md_sync_timer;
	struct timer_list delay_probe_timer;
@@ -1069,7 +1068,6 @@ struct drbd_conf {
	struct timeval dps_time; /* delay-probes-start-time */
	unsigned int dp_volume_last;  /* send_cnt of last delay probe */
	int c_sync_rate; /* current resync rate after delay_probe magic */
	atomic_t new_c_uuid;
};

static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -2219,8 +2217,6 @@ static inline int __inc_ap_bio_cond(struct drbd_conf *mdev)
		return 0;
	if (test_bit(BITMAP_IO, &mdev->flags))
		return 0;
	if (atomic_read(&mdev->new_c_uuid))
		return 0;
	return 1;
}

@@ -2241,9 +2237,6 @@ static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
	 * to avoid races with the reconnect code,
	 * we need to atomic_inc within the spinlock. */

	if (atomic_read(&mdev->new_c_uuid) && atomic_add_unless(&mdev->new_c_uuid, -1, 1))
		drbd_queue_work_front(&mdev->data.work, &mdev->uuid_work);

	spin_lock_irq(&mdev->req_lock);
	while (!__inc_ap_bio_cond(mdev)) {
		prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
+6 −28
Original line number Diff line number Diff line
@@ -1215,18 +1215,17 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
	     ns.pdsk == D_OUTDATED)) {
		if (get_ldev(mdev)) {
			if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
			    mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE &&
			    !atomic_read(&mdev->new_c_uuid))
				atomic_set(&mdev->new_c_uuid, 2);
			    mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
				drbd_uuid_new_current(mdev);
				drbd_send_uuids(mdev);
			}
			put_ldev(mdev);
		}
	}

	if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
		/* Diskless peer becomes primary or got connected do diskless, primary peer. */
		if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0 &&
		    !atomic_read(&mdev->new_c_uuid))
			atomic_set(&mdev->new_c_uuid, 2);
		if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0)
			drbd_uuid_new_current(mdev);

		/* D_DISKLESS Peer becomes secondary */
		if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
@@ -1350,24 +1349,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
	drbd_md_sync(mdev);
}

static int w_new_current_uuid(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
	if (get_ldev(mdev)) {
		if (mdev->ldev->md.uuid[UI_BITMAP] == 0) {
			drbd_uuid_new_current(mdev);
			if (get_net_conf(mdev)) {
				drbd_send_uuids(mdev);
				put_net_conf(mdev);
			}
			drbd_md_sync(mdev);
		}
		put_ldev(mdev);
	}
	atomic_dec(&mdev->new_c_uuid);
	wake_up(&mdev->misc_wait);

	return 1;
}

static int drbd_thread_setup(void *arg)
{
@@ -2708,7 +2689,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
	atomic_set(&mdev->net_cnt, 0);
	atomic_set(&mdev->packet_seq, 0);
	atomic_set(&mdev->pp_in_use, 0);
	atomic_set(&mdev->new_c_uuid, 0);

	mutex_init(&mdev->md_io_mutex);
	mutex_init(&mdev->data.mutex);
@@ -2739,14 +2719,12 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
	INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
	INIT_LIST_HEAD(&mdev->delay_probes);
	INIT_LIST_HEAD(&mdev->delay_probe_work.list);
	INIT_LIST_HEAD(&mdev->uuid_work.list);

	mdev->resync_work.cb  = w_resync_inactive;
	mdev->unplug_work.cb  = w_send_write_hint;
	mdev->md_sync_work.cb = w_md_sync;
	mdev->bm_io_work.w.cb = w_bitmap_io;
	mdev->delay_probe_work.cb = w_delay_probes;
	mdev->uuid_work.cb = w_new_current_uuid;
	init_timer(&mdev->resync_timer);
	init_timer(&mdev->md_sync_timer);
	init_timer(&mdev->delay_probe_timer);
+0 −11
Original line number Diff line number Diff line
@@ -1154,17 +1154,6 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
	unsigned n_bios = 0;
	unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;

	if (atomic_read(&mdev->new_c_uuid)) {
		if (atomic_add_unless(&mdev->new_c_uuid, -1, 1)) {
			drbd_uuid_new_current(mdev);
			drbd_md_sync(mdev);

			atomic_dec(&mdev->new_c_uuid);
			wake_up(&mdev->misc_wait);
		}
		wait_event(mdev->misc_wait, !atomic_read(&mdev->new_c_uuid));
	}

	/* In most cases, we will only need one bio.  But in case the lower
	 * level restrictions happen to be different at this offset on this
	 * side than those of the sending peer, we may need to submit the