Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 06f10adb authored by Lars Ellenberg's avatar Lars Ellenberg Committed by Jens Axboe
Browse files

drbd: prepare for more than 32 bit flags



 - struct drbd_conf { ... unsigned long flags; ... }
 + struct drbd_conf { ... unsigned long drbd_flags[N]; ... }

And introduce wrapper functions for test/set/clear bit operations
on this member.

Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 44edfb0d
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -90,7 +90,7 @@ void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backi
		dt = MAX_SCHEDULE_TIMEOUT;

	dt = wait_event_timeout(mdev->misc_wait,
			*done || test_bit(FORCE_DETACH, &mdev->flags), dt);
			*done || drbd_test_flag(mdev, FORCE_DETACH), dt);
	if (dt == 0) {
		dev_err(DEV, "meta-data IO operation timed out\n");
		drbd_chk_io_error(mdev, 1, DRBD_FORCE_DETACH);
@@ -108,7 +108,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
	mdev->md_io.done = 0;
	mdev->md_io.error = -ENODEV;

	if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
	if ((rw & WRITE) && !drbd_test_flag(mdev, MD_NO_FUA))
		rw |= REQ_FUA | REQ_FLUSH;
	rw |= REQ_SYNC;

+44 −16
Original line number Diff line number Diff line
@@ -808,7 +808,7 @@ enum {
#define EE_HAS_DIGEST          (1<<__EE_HAS_DIGEST)

/* global flag bits */
enum {
enum drbd_flag {
	CREATE_BARRIER,		/* next P_DATA is preceded by a P_BARRIER */
	SIGNAL_ASENDER,		/* whether asender wants to be interrupted */
	SEND_PING,		/* whether asender should send a ping asap */
@@ -858,6 +858,9 @@ enum {
				 * and potentially deadlock on, this drbd worker.
				 */
	DISCONNECT_SENT,	/* Currently the last bit in this 32bit word */

	/* keep last */
	DRBD_N_FLAGS,
};

struct drbd_bitmap; /* opaque for drbd_conf */
@@ -970,8 +973,7 @@ struct fifo_buffer {
};

struct drbd_conf {
	/* things that are stored as / read from meta data on disk */
	unsigned long flags;
	unsigned long drbd_flags[(DRBD_N_FLAGS + BITS_PER_LONG -1)/BITS_PER_LONG];

	/* configured by drbdsetup */
	struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
@@ -1143,6 +1145,31 @@ struct drbd_conf {
	unsigned int local_max_bio_size;
};

static inline void drbd_set_flag(struct drbd_conf *mdev, enum drbd_flag f)
{
	set_bit(f, &mdev->drbd_flags[0]);
}

static inline void drbd_clear_flag(struct drbd_conf *mdev, enum drbd_flag f)
{
	clear_bit(f, &mdev->drbd_flags[0]);
}

static inline int drbd_test_flag(struct drbd_conf *mdev, enum drbd_flag f)
{
	return test_bit(f, &mdev->drbd_flags[0]);
}

static inline int drbd_test_and_set_flag(struct drbd_conf *mdev, enum drbd_flag f)
{
	return test_and_set_bit(f, &mdev->drbd_flags[0]);
}

static inline int drbd_test_and_clear_flag(struct drbd_conf *mdev, enum drbd_flag f)
{
	return test_and_clear_bit(f, &mdev->drbd_flags[0]);
}

static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
{
	struct drbd_conf *mdev;
@@ -1812,12 +1839,12 @@ static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
static inline void drbd_state_lock(struct drbd_conf *mdev)
{
	wait_event(mdev->misc_wait,
		   !test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags));
		   !drbd_test_and_set_flag(mdev, CLUSTER_ST_CHANGE));
}

static inline void drbd_state_unlock(struct drbd_conf *mdev)
{
	clear_bit(CLUSTER_ST_CHANGE, &mdev->flags);
	drbd_clear_flag(mdev, CLUSTER_ST_CHANGE);
	wake_up(&mdev->misc_wait);
}

@@ -1874,9 +1901,9 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
		/* NOTE fall through to detach case if forcedetach set */
	case EP_DETACH:
	case EP_CALL_HELPER:
		set_bit(WAS_IO_ERROR, &mdev->flags);
		drbd_set_flag(mdev, WAS_IO_ERROR);
		if (forcedetach == DRBD_FORCE_DETACH)
			set_bit(FORCE_DETACH, &mdev->flags);
			drbd_set_flag(mdev, FORCE_DETACH);
		if (mdev->state.disk > D_FAILED) {
			_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
			dev_err(DEV,
@@ -2037,13 +2064,13 @@ drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)

static inline void wake_asender(struct drbd_conf *mdev)
{
	if (test_bit(SIGNAL_ASENDER, &mdev->flags))
	if (drbd_test_flag(mdev, SIGNAL_ASENDER))
		force_sig(DRBD_SIG, mdev->asender.task);
}

static inline void request_ping(struct drbd_conf *mdev)
{
	set_bit(SEND_PING, &mdev->flags);
	drbd_set_flag(mdev, SEND_PING);
	wake_asender(mdev);
}

@@ -2374,7 +2401,7 @@ static inline bool may_inc_ap_bio(struct drbd_conf *mdev)

	if (is_susp(mdev->state))
		return false;
	if (test_bit(SUSPEND_IO, &mdev->flags))
	if (drbd_test_flag(mdev, SUSPEND_IO))
		return false;

	/* to avoid potential deadlock or bitmap corruption,
@@ -2389,7 +2416,7 @@ static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
	 * and we are within the spinlock anyways, we have this workaround.  */
	if (atomic_read(&mdev->ap_bio_cnt) > mxb)
		return false;
	if (test_bit(BITMAP_IO, &mdev->flags))
	if (drbd_test_flag(mdev, BITMAP_IO))
		return false;
	return true;
}
@@ -2427,8 +2454,8 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)

	D_ASSERT(ap_bio >= 0);

	if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
		if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
	if (ap_bio == 0 && drbd_test_flag(mdev, BITMAP_IO)) {
		if (!drbd_test_and_set_flag(mdev, BITMAP_IO_QUEUED))
			drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
	}

@@ -2477,7 +2504,7 @@ static inline void drbd_update_congested(struct drbd_conf *mdev)
{
	struct sock *sk = mdev->data.socket->sk;
	if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
		set_bit(NET_CONGESTED, &mdev->flags);
		drbd_set_flag(mdev, NET_CONGESTED);
}

static inline int drbd_queue_order_type(struct drbd_conf *mdev)
@@ -2494,14 +2521,15 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
{
	int r;

	if (test_bit(MD_NO_FUA, &mdev->flags))
	if (drbd_test_flag(mdev, MD_NO_FUA))
		return;

	r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_NOIO, NULL);
	if (r) {
		set_bit(MD_NO_FUA, &mdev->flags);
		drbd_set_flag(mdev, MD_NO_FUA);
		dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
	}
}


#endif
+41 −41
Original line number Diff line number Diff line
@@ -322,7 +322,7 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
	list_splice_init(&b->requests, &mdev->barrier_acked_requests);

	nob = b->next;
	if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
	if (drbd_test_and_clear_flag(mdev, CREATE_BARRIER)) {
		_tl_add_barrier(mdev, b);
		if (nob)
			mdev->oldest_tle = nob;
@@ -381,7 +381,7 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
				if (b->w.cb == NULL) {
					b->w.cb = w_send_barrier;
					inc_ap_pending(mdev);
					set_bit(CREATE_BARRIER, &mdev->flags);
					drbd_set_flag(mdev, CREATE_BARRIER);
				}

				drbd_queue_work(&mdev->data.work, &b->w);
@@ -464,7 +464,7 @@ static void _tl_clear(struct drbd_conf *mdev)
	}

	/* ensure bit indicating barrier is required is clear */
	clear_bit(CREATE_BARRIER, &mdev->flags);
	drbd_clear_flag(mdev, CREATE_BARRIER);

	memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));

@@ -582,10 +582,10 @@ _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
	unsigned long flags;
	enum drbd_state_rv rv;

	if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
	if (drbd_test_and_clear_flag(mdev, CL_ST_CHG_SUCCESS))
		return SS_CW_SUCCESS;

	if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
	if (drbd_test_and_clear_flag(mdev, CL_ST_CHG_FAIL))
		return SS_CW_FAILED_BY_PEER;

	rv = 0;
@@ -660,7 +660,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
		}

		if (mask.conn == C_MASK && val.conn == C_DISCONNECTING)
			set_bit(DISCONNECT_SENT, &mdev->flags);
			drbd_set_flag(mdev, DISCONNECT_SENT);

		wait_event(mdev->state_wait,
			(rv = _req_st_cond(mdev, mask, val)));
@@ -850,7 +850,7 @@ is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,

	/* While establishing a connection only allow cstate to change.
	   Delay/refuse role changes, detach attach etc... */
	if (test_bit(STATE_SENT, &mdev->flags) &&
	if (drbd_test_flag(mdev, STATE_SENT) &&
	    !(os.conn == C_WF_REPORT_PARAMS ||
	      (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
		rv = SS_IN_TRANSIENT_STATE;
@@ -1109,7 +1109,7 @@ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)

static void drbd_resume_al(struct drbd_conf *mdev)
{
	if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
	if (drbd_test_and_clear_flag(mdev, AL_SUSPENDED))
		dev_info(DEV, "Resumed AL updates\n");
}

@@ -1215,8 +1215,8 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
	if (ns.disk == D_DISKLESS &&
	    ns.conn == C_STANDALONE &&
	    ns.role == R_SECONDARY &&
	    !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
		set_bit(DEVICE_DYING, &mdev->flags);
	    !drbd_test_and_set_flag(mdev, CONFIG_PENDING))
		drbd_set_flag(mdev, DEVICE_DYING);

	/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
	 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
@@ -1291,7 +1291,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
						 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
						 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);

		if (test_bit(CRASHED_PRIMARY, &mdev->flags))
		if (drbd_test_flag(mdev, CRASHED_PRIMARY))
			mdf |= MDF_CRASHED_PRIMARY;
		if (mdev->state.role == R_PRIMARY ||
		    (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
@@ -1316,7 +1316,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
	/* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
	if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
	    os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
		set_bit(CONSIDER_RESYNC, &mdev->flags);
		drbd_set_flag(mdev, CONSIDER_RESYNC);

	/* Receiver should clean up itself */
	if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
@@ -1400,7 +1400,7 @@ int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
	D_ASSERT(current == mdev->worker.task);

	/* open coded non-blocking drbd_suspend_io(mdev); */
	set_bit(SUSPEND_IO, &mdev->flags);
	drbd_set_flag(mdev, SUSPEND_IO);

	drbd_bm_lock(mdev, why, flags);
	rv = io_fn(mdev);
@@ -1426,7 +1426,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
	union drbd_state nsm = (union drbd_state){ .i = -1 };

	if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
		clear_bit(CRASHED_PRIMARY, &mdev->flags);
		drbd_clear_flag(mdev, CRASHED_PRIMARY);
		if (mdev->p_uuid)
			mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
	}
@@ -1466,9 +1466,9 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
	if (ns.susp_fen) {
		/* case1: The outdate peer handler is successful: */
		if (os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) {
			if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
			if (drbd_test_flag(mdev, NEW_CUR_UUID)) {
				drbd_uuid_new_current(mdev);
				clear_bit(NEW_CUR_UUID, &mdev->flags);
				drbd_clear_flag(mdev, NEW_CUR_UUID);
			}
			spin_lock_irq(&mdev->req_lock);
			_tl_clear(mdev);
@@ -1477,7 +1477,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
		}
		/* case2: The connection was established again: */
		if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
			clear_bit(NEW_CUR_UUID, &mdev->flags);
			drbd_clear_flag(mdev, NEW_CUR_UUID);
			what = resend;
			nsm.susp_fen = 0;
		}
@@ -1534,7 +1534,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
			if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
			    mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
				if (is_susp(mdev->state)) {
					set_bit(NEW_CUR_UUID, &mdev->flags);
					drbd_set_flag(mdev, NEW_CUR_UUID);
				} else {
					drbd_uuid_new_current(mdev);
					drbd_send_uuids(mdev);
@@ -1625,7 +1625,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
		 * we might come from an failed Attach before ldev was set. */
		if (mdev->ldev) {
			eh = mdev->ldev->dc.on_io_error;
			was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
			was_io_error = drbd_test_and_clear_flag(mdev, WAS_IO_ERROR);

			if (was_io_error && eh == EP_CALL_HELPER)
				drbd_khelper(mdev, "local-io-error");
@@ -1643,7 +1643,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
			 * So aborting local requests may cause crashes,
			 * or even worse, silent data corruption.
			 */
			if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
			if (drbd_test_and_clear_flag(mdev, FORCE_DETACH))
				tl_abort_disk_io(mdev);

			/* current state still has to be D_FAILED,
@@ -1692,7 +1692,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,

	/* Disks got bigger while they were detached */
	if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
	    test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
	    drbd_test_and_clear_flag(mdev, RESYNC_AFTER_NEG)) {
		if (ns.conn == C_CONNECTED)
			resync_after_online_grow(mdev);
	}
@@ -1717,7 +1717,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,

	/* Wake up role changes, that were delayed because of connection establishing */
	if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS) {
		clear_bit(STATE_SENT, &mdev->flags);
		drbd_clear_flag(mdev, STATE_SENT);
		wake_up(&mdev->state_wait);
	}

@@ -1750,7 +1750,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
		if (os.aftr_isp != ns.aftr_isp)
			resume_next_sg(mdev);
		/* set in __drbd_set_state, unless CONFIG_PENDING was set */
		if (test_bit(DEVICE_DYING, &mdev->flags))
		if (drbd_test_flag(mdev, DEVICE_DYING))
			drbd_thread_stop_nowait(&mdev->worker);
	}

@@ -2145,7 +2145,7 @@ int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
	mdev->comm_bm_set = drbd_bm_total_weight(mdev);
	p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
	uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
	uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
	uuid_flags |= drbd_test_flag(mdev, CRASHED_PRIMARY) ? 2 : 0;
	uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
	p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);

@@ -2775,7 +2775,7 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
		offset += sent;
	} while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
	set_fs(oldfs);
	clear_bit(NET_CONGESTED, &mdev->flags);
	drbd_clear_flag(mdev, NET_CONGESTED);

	ok = (len == 0);
	if (likely(ok))
@@ -2877,7 +2877,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
		dp_flags |= DP_MAY_SET_IN_SYNC;

	p.dp_flags = cpu_to_be32(dp_flags);
	set_bit(UNPLUG_REMOTE, &mdev->flags);
	drbd_set_flag(mdev, UNPLUG_REMOTE);
	ok = (sizeof(p) ==
		drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
	if (ok && dgs) {
@@ -3056,7 +3056,7 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
	} while (sent < size);

	if (sock == mdev->data.socket)
		clear_bit(NET_CONGESTED, &mdev->flags);
		drbd_clear_flag(mdev, NET_CONGESTED);

	if (rv <= 0) {
		if (rv != -EAGAIN) {
@@ -3263,7 +3263,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
	}

	drbd_free_resources(mdev);
	clear_bit(AL_SUSPENDED, &mdev->flags);
	drbd_clear_flag(mdev, AL_SUSPENDED);

	/*
	 * currently we drbd_init_ee only on module load, so
@@ -3556,7 +3556,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
		goto out;
	}

	if (test_bit(CALLBACK_PENDING, &mdev->flags)) {
	if (drbd_test_flag(mdev, CALLBACK_PENDING)) {
		r |= (1 << BDI_async_congested);
		/* Without good local data, we would need to read from remote,
		 * and that would need the worker thread as well, which is
@@ -3580,7 +3580,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
			reason = 'b';
	}

	if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
	if (bdi_bits & (1 << BDI_async_congested) && drbd_test_flag(mdev, NET_CONGESTED)) {
		r |= (1 << BDI_async_congested);
		reason = reason == 'b' ? 'a' : 'n';
	}
@@ -3867,7 +3867,7 @@ void drbd_md_sync(struct drbd_conf *mdev)

	del_timer(&mdev->md_sync_timer);
	/* timer may be rearmed by drbd_md_mark_dirty() now. */
	if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
	if (!drbd_test_and_clear_flag(mdev, MD_DIRTY))
		return;

	/* We use here D_FAILED and not D_ATTACHING because we try to write
@@ -4011,7 +4011,7 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
#ifdef DEBUG
void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
{
	if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
	if (!drbd_test_and_set_flag(mdev, MD_DIRTY)) {
		mod_timer(&mdev->md_sync_timer, jiffies + HZ);
		mdev->last_md_mark_dirty.line = line;
		mdev->last_md_mark_dirty.func = func;
@@ -4020,7 +4020,7 @@ void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *
#else
void drbd_md_mark_dirty(struct drbd_conf *mdev)
{
	if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
	if (!drbd_test_and_set_flag(mdev, MD_DIRTY))
		mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
}
#endif
@@ -4182,14 +4182,14 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
		put_ldev(mdev);
	}

	clear_bit(BITMAP_IO, &mdev->flags);
	drbd_clear_flag(mdev, BITMAP_IO);
	smp_mb__after_clear_bit();
	wake_up(&mdev->misc_wait);

	if (work->done)
		work->done(mdev, rv);

	clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
	drbd_clear_flag(mdev, BITMAP_IO_QUEUED);
	work->why = NULL;
	work->flags = 0;

@@ -4210,7 +4210,7 @@ void drbd_ldev_destroy(struct drbd_conf *mdev)
		__free_page(mdev->md_io_tmpp);
		mdev->md_io_tmpp = NULL;
	}
	clear_bit(GO_DISKLESS, &mdev->flags);
	drbd_clear_flag(mdev, GO_DISKLESS);
}

static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
@@ -4227,7 +4227,7 @@ static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused
void drbd_go_diskless(struct drbd_conf *mdev)
{
	D_ASSERT(mdev->state.disk == D_FAILED);
	if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
	if (!drbd_test_and_set_flag(mdev, GO_DISKLESS))
		drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
}

@@ -4250,8 +4250,8 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
{
	D_ASSERT(current == mdev->worker.task);

	D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
	D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
	D_ASSERT(!drbd_test_flag(mdev, BITMAP_IO_QUEUED));
	D_ASSERT(!drbd_test_flag(mdev, BITMAP_IO));
	D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
	if (mdev->bm_io_work.why)
		dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
@@ -4263,9 +4263,9 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
	mdev->bm_io_work.flags = flags;

	spin_lock_irq(&mdev->req_lock);
	set_bit(BITMAP_IO, &mdev->flags);
	drbd_set_flag(mdev, BITMAP_IO);
	if (atomic_read(&mdev->ap_bio_cnt) == 0) {
		if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
		if (!drbd_test_and_set_flag(mdev, BITMAP_IO_QUEUED))
			drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
	}
	spin_unlock_irq(&mdev->req_lock);
+28 −28
Original line number Diff line number Diff line
@@ -148,7 +148,7 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
	int ret;

	if (current == mdev->worker.task)
		set_bit(CALLBACK_PENDING, &mdev->flags);
		drbd_set_flag(mdev, CALLBACK_PENDING);

	snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));

@@ -193,7 +193,7 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
				(ret >> 8) & 0xff, ret);

	if (current == mdev->worker.task)
		clear_bit(CALLBACK_PENDING, &mdev->flags);
		drbd_clear_flag(mdev, CALLBACK_PENDING);

	if (ret < 0) /* Ignore any ERRNOs we got. */
		ret = 0;
@@ -295,7 +295,7 @@ static int _try_outdate_peer_async(void *data)
	*/
	spin_lock_irq(&mdev->req_lock);
	ns = mdev->state;
	if (ns.conn < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &mdev->flags)) {
	if (ns.conn < C_WF_REPORT_PARAMS && !drbd_test_flag(mdev, STATE_SENT)) {
		ns.pdsk = nps;
		_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
	}
@@ -583,7 +583,7 @@ char *ppsize(char *buf, unsigned long long size)
 */
void drbd_suspend_io(struct drbd_conf *mdev)
{
	set_bit(SUSPEND_IO, &mdev->flags);
	drbd_set_flag(mdev, SUSPEND_IO);
	if (is_susp(mdev->state))
		return;
	wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
@@ -591,7 +591,7 @@ void drbd_suspend_io(struct drbd_conf *mdev)

void drbd_resume_io(struct drbd_conf *mdev)
{
	clear_bit(SUSPEND_IO, &mdev->flags);
	drbd_clear_flag(mdev, SUSPEND_IO);
	wake_up(&mdev->misc_wait);
}

@@ -881,8 +881,8 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
 */
static void drbd_reconfig_start(struct drbd_conf *mdev)
{
	wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
	wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
	wait_event(mdev->state_wait, !drbd_test_and_set_flag(mdev, CONFIG_PENDING));
	wait_event(mdev->state_wait, !drbd_test_flag(mdev, DEVICE_DYING));
	drbd_thread_start(&mdev->worker);
	drbd_flush_workqueue(mdev);
}
@@ -896,10 +896,10 @@ static void drbd_reconfig_done(struct drbd_conf *mdev)
	if (mdev->state.disk == D_DISKLESS &&
	    mdev->state.conn == C_STANDALONE &&
	    mdev->state.role == R_SECONDARY) {
		set_bit(DEVICE_DYING, &mdev->flags);
		drbd_set_flag(mdev, DEVICE_DYING);
		drbd_thread_stop_nowait(&mdev->worker);
	} else
		clear_bit(CONFIG_PENDING, &mdev->flags);
		drbd_clear_flag(mdev, CONFIG_PENDING);
	spin_unlock_irq(&mdev->req_lock);
	wake_up(&mdev->state_wait);
}
@@ -919,7 +919,7 @@ static void drbd_suspend_al(struct drbd_conf *mdev)

	spin_lock_irq(&mdev->req_lock);
	if (mdev->state.conn < C_CONNECTED)
		s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
		s = !drbd_test_and_set_flag(mdev, AL_SUSPENDED);

	spin_unlock_irq(&mdev->req_lock);

@@ -958,7 +958,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
	wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));

	/* make sure there is no leftover from previous force-detach attempts */
	clear_bit(FORCE_DETACH, &mdev->flags);
	drbd_clear_flag(mdev, FORCE_DETACH);

	/* and no leftover from previously aborted resync or verify, either */
	mdev->rs_total = 0;
@@ -1168,9 +1168,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
	/* Reset the "barriers don't work" bits here, then force meta data to
	 * be written, to ensure we determine if barriers are supported. */
	if (nbc->dc.no_md_flush)
		set_bit(MD_NO_FUA, &mdev->flags);
		drbd_set_flag(mdev, MD_NO_FUA);
	else
		clear_bit(MD_NO_FUA, &mdev->flags);
		drbd_clear_flag(mdev, MD_NO_FUA);

	/* Point of no return reached.
	 * Devices and memory are no longer released by error cleanup below.
@@ -1186,13 +1186,13 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
	drbd_bump_write_ordering(mdev, WO_bdev_flush);

	if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
		set_bit(CRASHED_PRIMARY, &mdev->flags);
		drbd_set_flag(mdev, CRASHED_PRIMARY);
	else
		clear_bit(CRASHED_PRIMARY, &mdev->flags);
		drbd_clear_flag(mdev, CRASHED_PRIMARY);

	if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
	    !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
		set_bit(CRASHED_PRIMARY, &mdev->flags);
		drbd_set_flag(mdev, CRASHED_PRIMARY);
		cp_discovered = 1;
	}

@@ -1217,18 +1217,18 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
	 * so we can automatically recover from a crash of a
	 * degraded but active "cluster" after a certain timeout.
	 */
	clear_bit(USE_DEGR_WFC_T, &mdev->flags);
	drbd_clear_flag(mdev, USE_DEGR_WFC_T);
	if (mdev->state.role != R_PRIMARY &&
	     drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
	    !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
		set_bit(USE_DEGR_WFC_T, &mdev->flags);
		drbd_set_flag(mdev, USE_DEGR_WFC_T);

	dd = drbd_determine_dev_size(mdev, 0);
	if (dd == dev_size_error) {
		retcode = ERR_NOMEM_BITMAP;
		goto force_diskless_dec;
	} else if (dd == grew)
		set_bit(RESYNC_AFTER_NEG, &mdev->flags);
		drbd_set_flag(mdev, RESYNC_AFTER_NEG);

	if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
		dev_info(DEV, "Assuming that all blocks are out of sync "
@@ -1362,7 +1362,7 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
	}

	if (dt.detach_force) {
		set_bit(FORCE_DETACH, &mdev->flags);
		drbd_set_flag(mdev, FORCE_DETACH);
		drbd_force_state(mdev, NS(disk, D_FAILED));
		reply->ret_code = SS_SUCCESS;
		goto out;
@@ -1707,7 +1707,7 @@ void resync_after_online_grow(struct drbd_conf *mdev)
	if (mdev->state.role != mdev->state.peer)
		iass = (mdev->state.role == R_PRIMARY);
	else
		iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
		iass = drbd_test_flag(mdev, DISCARD_CONCURRENT);

	if (iass)
		drbd_start_resync(mdev, C_SYNC_SOURCE);
@@ -1765,7 +1765,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,

	if (mdev->state.conn == C_CONNECTED) {
		if (dd == grew)
			set_bit(RESIZE_PENDING, &mdev->flags);
			drbd_set_flag(mdev, RESIZE_PENDING);

		drbd_send_uuids(mdev);
		drbd_send_sizes(mdev, 1, ddsf);
@@ -1983,7 +1983,7 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
	 * resync just being finished, wait for it before requesting a new resync.
	 * Also wait for it's after_state_ch(). */
	drbd_suspend_io(mdev);
	wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
	wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
	drbd_flush_workqueue(mdev);

	retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
@@ -2026,7 +2026,7 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
	 * resync just being finished, wait for it before requesting a new resync.
	 * Also wait for it's after_state_ch(). */
	drbd_suspend_io(mdev);
	wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
	wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
	drbd_flush_workqueue(mdev);

	retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
@@ -2094,9 +2094,9 @@ static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
			     struct drbd_nl_cfg_reply *reply)
{
	if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
	if (drbd_test_flag(mdev, NEW_CUR_UUID)) {
		drbd_uuid_new_current(mdev);
		clear_bit(NEW_CUR_UUID, &mdev->flags);
		drbd_clear_flag(mdev, NEW_CUR_UUID);
	}
	drbd_suspend_io(mdev);
	reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
@@ -2199,7 +2199,7 @@ static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_r
	tl = reply->tag_list;

	rv = mdev->state.pdsk == D_OUTDATED        ? UT_PEER_OUTDATED :
	  test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
	  drbd_test_flag(mdev, USE_DEGR_WFC_T) ? UT_DEGRADED : UT_DEFAULT;

	tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
	put_unaligned(TT_END, tl++); /* Close the tag list */
@@ -2224,7 +2224,7 @@ static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
	/* If there is still bitmap IO pending, e.g. previous resync or verify
	 * just being finished, wait for it before requesting a new resync. */
	drbd_suspend_io(mdev);
	wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
	wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));

	/* w_make_ov_request expects start position to be aligned */
	mdev->ov_start_sector = args.start_sector & ~(BM_SECT_PER_BIT-1);
+1 −1
Original line number Diff line number Diff line
@@ -270,7 +270,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
			   mdev->state.peer_isp ? 'p' : '-',
			   mdev->state.user_isp ? 'u' : '-',
			   mdev->congestion_reason ?: '-',
			   test_bit(AL_SUSPENDED, &mdev->flags) ? 's' : '-',
			   drbd_test_flag(mdev, AL_SUSPENDED) ? 's' : '-',
			   mdev->send_cnt/2,
			   mdev->recv_cnt/2,
			   mdev->writ_cnt/2,
Loading