Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0b0ba1ef authored by Andreas Gruenbacher's avatar Andreas Gruenbacher Committed by Philipp Reisner
Browse files

drbd: Add explicit device parameter to D_ASSERT



The implicit dependency on a variable inside the macro is problematic.

Signed-off-by: default avatarAndreas Gruenbacher <agruen@linbit.com>
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
parent 1ec861eb
Loading
Loading
Loading
Loading
+19 −19
Original line number Diff line number Diff line
@@ -198,7 +198,7 @@ int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bd
	int err;
	struct page *iop = device->md_io_page;

	D_ASSERT(atomic_read(&device->md_io_in_use) == 1);
	D_ASSERT(device, atomic_read(&device->md_io_in_use) == 1);

	BUG_ON(!bdev->md_bdev);

@@ -264,8 +264,8 @@ bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval
	unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
	unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);

	D_ASSERT((unsigned)(last - first) <= 1);
	D_ASSERT(atomic_read(&device->local_cnt) > 0);
	D_ASSERT(device, (unsigned)(last - first) <= 1);
	D_ASSERT(device, atomic_read(&device->local_cnt) > 0);

	/* FIXME figure out a fast path for bios crossing AL extent boundaries */
	if (first != last)
@@ -284,8 +284,8 @@ bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *
	unsigned enr;
	bool need_transaction = false;

	D_ASSERT(first <= last);
	D_ASSERT(atomic_read(&device->local_cnt) > 0);
	D_ASSERT(device, first <= last);
	D_ASSERT(device, atomic_read(&device->local_cnt) > 0);

	for (enr = first; enr <= last; enr++) {
		struct lc_element *al_ext;
@@ -371,7 +371,7 @@ int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *
	unsigned available_update_slots;
	unsigned enr;

	D_ASSERT(first <= last);
	D_ASSERT(device, first <= last);

	nr_al_extents = 1 + last - first; /* worst case: all touched extends are cold. */
	available_update_slots = min(al->nr_elements - al->used,
@@ -419,7 +419,7 @@ void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i)
	struct lc_element *extent;
	unsigned long flags;

	D_ASSERT(first <= last);
	D_ASSERT(device, first <= last);
	spin_lock_irqsave(&device->al_lock, flags);

	for (enr = first; enr <= last; enr++) {
@@ -648,7 +648,7 @@ void drbd_al_shrink(struct drbd_device *device)
	struct lc_element *al_ext;
	int i;

	D_ASSERT(test_bit(__LC_LOCKED, &device->act_log->flags));
	D_ASSERT(device, test_bit(__LC_LOCKED, &device->act_log->flags));

	for (i = 0; i < device->act_log->nr_elements; i++) {
		al_ext = lc_element_by_index(device->act_log, i);
@@ -729,7 +729,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *device, sector_t secto

	unsigned int enr;

	D_ASSERT(atomic_read(&device->local_cnt));
	D_ASSERT(device, atomic_read(&device->local_cnt));

	/* I simply assume that a sector/size pair never crosses
	 * a 16 MB extent border. (Currently this is true...) */
@@ -1093,8 +1093,8 @@ int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
		e = lc_find(device->resync, device->resync_wenr);
		bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
		if (bm_ext) {
			D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
			D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
			D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
			D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
			clear_bit(BME_NO_WRITES, &bm_ext->flags);
			device->resync_wenr = LC_FREE;
			if (lc_put(device->resync, &bm_ext->lce) == 0)
@@ -1118,7 +1118,7 @@ int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
			 * so we tried again.
			 * drop the extra reference. */
			bm_ext->lce.refcnt--;
			D_ASSERT(bm_ext->lce.refcnt > 0);
			D_ASSERT(device, bm_ext->lce.refcnt > 0);
		}
		goto check_al;
	} else {
@@ -1141,10 +1141,10 @@ int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
			bm_ext->rs_failed = 0;
			lc_committed(device->resync);
			wake_up(&device->al_wait);
			D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0);
			D_ASSERT(device, test_bit(BME_LOCKED, &bm_ext->flags) == 0);
		}
		set_bit(BME_NO_WRITES, &bm_ext->flags);
		D_ASSERT(bm_ext->lce.refcnt == 1);
		D_ASSERT(device, bm_ext->lce.refcnt == 1);
		device->resync_locked++;
		goto check_al;
	}
@@ -1244,8 +1244,8 @@ int drbd_rs_del_all(struct drbd_device *device)
				drbd_info(device, "dropping %u in drbd_rs_del_all, apparently"
				     " got 'synced' by application io\n",
				     device->resync_wenr);
				D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
				D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
				D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
				D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
				clear_bit(BME_NO_WRITES, &bm_ext->flags);
				device->resync_wenr = LC_FREE;
				lc_put(device->resync, &bm_ext->lce);
@@ -1257,11 +1257,11 @@ int drbd_rs_del_all(struct drbd_device *device)
				spin_unlock_irq(&device->al_lock);
				return -EAGAIN;
			}
			D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
			D_ASSERT(!test_bit(BME_NO_WRITES, &bm_ext->flags));
			D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
			D_ASSERT(device, !test_bit(BME_NO_WRITES, &bm_ext->flags));
			lc_del(device->resync, &bm_ext->lce);
		}
		D_ASSERT(device->resync->used == 0);
		D_ASSERT(device, device->resync->used == 0);
		put_ldev(device);
	}
	spin_unlock_irq(&device->al_lock);
+1 −1
Original line number Diff line number Diff line
@@ -692,7 +692,7 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
	want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
	have = b->bm_number_of_pages;
	if (want == have) {
		D_ASSERT(b->bm_pages != NULL);
		D_ASSERT(device, b->bm_pages != NULL);
		npages = b->bm_pages;
	} else {
		if (drbd_insert_fault(device, DRBD_FAULT_BM_ALLOC))
+6 −4
Original line number Diff line number Diff line
@@ -147,8 +147,10 @@ void drbd_printk_with_wrong_object_type(void);
#define dynamic_drbd_dbg(device, fmt, args...) \
	dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)

#define D_ASSERT(exp)	if (!(exp)) \
	 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
#define D_ASSERT(device, exp)	do { \
	if (!(exp)) \
		drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
	} while (0)

/**
 * expect  -  Make an assertion
@@ -1863,7 +1865,7 @@ static inline void put_ldev(struct drbd_device *device)
	 * so we must not sleep here. */

	__release(local);
	D_ASSERT(i >= 0);
	D_ASSERT(device, i >= 0);
	if (i == 0) {
		if (device->state.disk == D_DISKLESS)
			/* even internal references gone, safe to destroy */
@@ -2094,7 +2096,7 @@ static inline void dec_ap_bio(struct drbd_device *device)
	int mxb = drbd_get_max_buffers(device);
	int ap_bio = atomic_dec_return(&device->ap_bio_cnt);

	D_ASSERT(ap_bio >= 0);
	D_ASSERT(device, ap_bio >= 0);

	if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
		if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
+23 −23
Original line number Diff line number Diff line
@@ -891,7 +891,7 @@ void drbd_gen_and_send_sync_uuid(struct drbd_device *device)
	struct p_rs_uuid *p;
	u64 uuid;

	D_ASSERT(device->state.disk == D_UP_TO_DATE);
	D_ASSERT(device, device->state.disk == D_UP_TO_DATE);

	uuid = device->ldev->md.uuid[UI_BITMAP];
	if (uuid && uuid != UUID_JUST_CREATED)
@@ -919,7 +919,7 @@ int drbd_send_sizes(struct drbd_device *device, int trigger_reply, enum dds_flag
	unsigned int max_bio_size;

	if (get_ldev_if_state(device, D_NEGOTIATING)) {
		D_ASSERT(device->ldev->backing_bdev);
		D_ASSERT(device, device->ldev->backing_bdev);
		d_size = drbd_get_max_capacity(device->ldev);
		rcu_read_lock();
		u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
@@ -1974,7 +1974,7 @@ void drbd_device_cleanup(struct drbd_device *device)
		device->rs_mark_left[i] = 0;
		device->rs_mark_time[i] = 0;
	}
	D_ASSERT(first_peer_device(device)->connection->net_conf == NULL);
	D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);

	drbd_set_my_capacity(device, 0);
	if (device->bitmap) {
@@ -1988,16 +1988,16 @@ void drbd_device_cleanup(struct drbd_device *device)

	clear_bit(AL_SUSPENDED, &device->flags);

	D_ASSERT(list_empty(&device->active_ee));
	D_ASSERT(list_empty(&device->sync_ee));
	D_ASSERT(list_empty(&device->done_ee));
	D_ASSERT(list_empty(&device->read_ee));
	D_ASSERT(list_empty(&device->net_ee));
	D_ASSERT(list_empty(&device->resync_reads));
	D_ASSERT(list_empty(&first_peer_device(device)->connection->sender_work.q));
	D_ASSERT(list_empty(&device->resync_work.list));
	D_ASSERT(list_empty(&device->unplug_work.list));
	D_ASSERT(list_empty(&device->go_diskless.list));
	D_ASSERT(device, list_empty(&device->active_ee));
	D_ASSERT(device, list_empty(&device->sync_ee));
	D_ASSERT(device, list_empty(&device->done_ee));
	D_ASSERT(device, list_empty(&device->read_ee));
	D_ASSERT(device, list_empty(&device->net_ee));
	D_ASSERT(device, list_empty(&device->resync_reads));
	D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
	D_ASSERT(device, list_empty(&device->resync_work.list));
	D_ASSERT(device, list_empty(&device->unplug_work.list));
	D_ASSERT(device, list_empty(&device->go_diskless.list));

	drbd_set_defaults(device);
}
@@ -2014,7 +2014,7 @@ static void drbd_destroy_mempools(void)
		drbd_pp_vacant--;
	}

	/* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
	/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */

	if (drbd_md_io_bio_set)
		bioset_free(drbd_md_io_bio_set);
@@ -2169,7 +2169,7 @@ void drbd_destroy_device(struct kref *kref)
	del_timer_sync(&device->request_timer);

	/* paranoia asserts */
	D_ASSERT(device->open_cnt == 0);
	D_ASSERT(device, device->open_cnt == 0);
	/* end paranoia asserts */

	/* cleanup stuff that may have been allocated during
@@ -3006,7 +3006,7 @@ void drbd_md_write(struct drbd_device *device, void *b)
	buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
	buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);

	D_ASSERT(drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
	D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
	sector = device->ldev->md.md_offset;

	if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
@@ -3459,7 +3459,7 @@ static int w_bitmap_io(struct drbd_work *w, int unused)
	struct drbd_device *device = w->device;
	int rv = -EIO;

	D_ASSERT(atomic_read(&device->ap_bio_cnt) == 0);
	D_ASSERT(device, atomic_read(&device->ap_bio_cnt) == 0);

	if (get_ldev(device)) {
		drbd_bm_lock(device, work->why, work->flags);
@@ -3498,7 +3498,7 @@ static int w_go_diskless(struct drbd_work *w, int unused)
{
	struct drbd_device *device = w->device;

	D_ASSERT(device->state.disk == D_FAILED);
	D_ASSERT(device, device->state.disk == D_FAILED);
	/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
	 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
	 * the protected members anymore, though, so once put_ldev reaches zero
@@ -3552,11 +3552,11 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
			  void (*done)(struct drbd_device *, int),
			  char *why, enum bm_flag flags)
{
	D_ASSERT(current == first_peer_device(device)->connection->worker.task);
	D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);

	D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &device->flags));
	D_ASSERT(!test_bit(BITMAP_IO, &device->flags));
	D_ASSERT(list_empty(&device->bm_io_work.w.list));
	D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
	D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
	D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
	if (device->bm_io_work.why)
		drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
			why, device->bm_io_work.why);
@@ -3589,7 +3589,7 @@ int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *
{
	int rv;

	D_ASSERT(current != first_peer_device(device)->connection->worker.task);
	D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);

	if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
		drbd_suspend_io(device);
+4 −4
Original line number Diff line number Diff line
@@ -590,7 +590,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)

		if (rv == SS_NO_UP_TO_DATE_DISK &&
		    device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
			D_ASSERT(device->state.pdsk == D_UNKNOWN);
			D_ASSERT(device, device->state.pdsk == D_UNKNOWN);

			if (conn_try_outdate_peer(first_peer_device(device)->connection)) {
				val.disk = D_UP_TO_DATE;
@@ -1644,7 +1644,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
	 * Devices and memory are no longer released by error cleanup below.
	 * now device takes over responsibility, and the state engine should
	 * clean it up somewhere.  */
	D_ASSERT(device->ldev == NULL);
	D_ASSERT(device, device->ldev == NULL);
	device->ldev = nbc;
	device->resync = resync_lru;
	device->rs_plan_s = new_plan;
@@ -3011,8 +3011,8 @@ static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
		}

		device = peer_device->device;
		D_ASSERT(device->vnr == volume);
		D_ASSERT(first_peer_device(device)->connection == connection);
		D_ASSERT(device, device->vnr == volume);
		D_ASSERT(device, first_peer_device(device)->connection == connection);

		dh->minor = device_to_minor(device);
		dh->ret_code = NO_ERROR;
Loading