Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6a0afdf5 authored by Jens Axboe's avatar Jens Axboe
Browse files

drbd: remove tracing bits



They should be reimplemented in the current scheme.

Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent ab8fafc2
Loading
Loading
Loading
Loading
+0 −11
Original line number Diff line number Diff line
@@ -38,17 +38,6 @@ config BLK_DEV_DRBD

	  If unsure, say N.

config DRBD_TRACE
	tristate "DRBD tracing"
	depends on BLK_DEV_DRBD
	select TRACEPOINTS
	default n
	help

	  Say Y here if you want to be able to trace various events in DRBD.

	  If unsure, say N.

config DRBD_FAULT_INJECTION
	bool "DRBD fault injection"
	depends on BLK_DEV_DRBD
+0 −3
Original line number Diff line number Diff line
@@ -2,7 +2,4 @@ drbd-y := drbd_bitmap.o drbd_proc.o
drbd-y += drbd_worker.o drbd_receiver.o drbd_req.o drbd_actlog.o
drbd-y += drbd_main.o drbd_strings.o drbd_nl.o

drbd_trace-y := drbd_tracing.o

obj-$(CONFIG_BLK_DEV_DRBD)     += drbd.o
obj-$(CONFIG_DRBD_TRACE)       += drbd_trace.o
+1 −61
Original line number Diff line number Diff line
@@ -26,7 +26,6 @@
#include <linux/slab.h>
#include <linux/drbd.h>
#include "drbd_int.h"
#include "drbd_tracing.h"
#include "drbd_wrappers.h"

/* We maintain a trivial check sum in our on disk activity log.
@@ -66,17 +65,6 @@ struct drbd_atodb_wait {

int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int);

/* The actual tracepoint needs to have constant number of known arguments...
 */
void trace_drbd_resync(struct drbd_conf *mdev, int level, const char *fmt, ...)
{
	va_list ap;

	va_start(ap, fmt);
	trace__drbd_resync(mdev, level, fmt, ap);
	va_end(ap);
}

static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
				 struct drbd_backing_dev *bdev,
				 struct page *page, sector_t sector,
@@ -105,8 +93,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
	bio->bi_end_io = drbd_md_io_complete;
	bio->bi_rw = rw;

	trace_drbd_bio(mdev, "Md", bio, 0, NULL);

	if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
		bio_endio(bio, -EIO);
	else
@@ -236,8 +222,6 @@ void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)

	D_ASSERT(atomic_read(&mdev->local_cnt) > 0);

	trace_drbd_actlog(mdev, sector, "al_begin_io");

	wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr)));

	if (al_ext->lc_number != enr) {
@@ -270,8 +254,6 @@ void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector)
	struct lc_element *extent;
	unsigned long flags;

	trace_drbd_actlog(mdev, sector, "al_complete_io");

	spin_lock_irqsave(&mdev->al_lock, flags);

	extent = lc_find(mdev->act_log, enr);
@@ -967,10 +949,6 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
		ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
	sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);

	trace_drbd_resync(mdev, TRACE_LVL_METRICS,
			  "drbd_set_in_sync: sector=%llus size=%u sbnr=%lu ebnr=%lu\n",
			  (unsigned long long)sector, size, sbnr, ebnr);

	if (sbnr > ebnr)
		return;

@@ -1045,10 +1023,6 @@ void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
	sbnr = BM_SECT_TO_BIT(sector);
	ebnr = BM_SECT_TO_BIT(esector);

	trace_drbd_resync(mdev, TRACE_LVL_METRICS,
			  "drbd_set_out_of_sync: sector=%llus size=%u sbnr=%lu ebnr=%lu\n",
			  (unsigned long long)sector, size, sbnr, ebnr);

	/* ok, (capacity & 7) != 0 sometimes, but who cares...
	 * we count rs_{total,left} in bits, not sectors.  */
	spin_lock_irqsave(&mdev->al_lock, flags);
@@ -1143,10 +1117,6 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
	struct bm_extent *bm_ext;
	int i, sig;

	trace_drbd_resync(mdev, TRACE_LVL_ALL,
			  "drbd_rs_begin_io: sector=%llus (rs_end=%d)\n",
			  (unsigned long long)sector, enr);

	sig = wait_event_interruptible(mdev->al_wait,
			(bm_ext = _bme_get(mdev, enr)));
	if (sig)
@@ -1192,9 +1162,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
	struct bm_extent *bm_ext;
	int i;

	trace_drbd_resync(mdev, TRACE_LVL_ALL, "drbd_try_rs_begin_io: sector=%llus\n",
			  (unsigned long long)sector);

	spin_lock_irq(&mdev->al_lock);
	if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) {
		/* in case you have very heavy scattered io, it may
@@ -1210,11 +1177,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
		 * the lc_put here...
		 * we also have to wake_up
		 */

		trace_drbd_resync(mdev, TRACE_LVL_ALL,
				  "dropping %u, apparently got 'synced' by application io\n",
				  mdev->resync_wenr);

		e = lc_find(mdev->resync, mdev->resync_wenr);
		bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
		if (bm_ext) {
@@ -1242,21 +1204,14 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
			 * but then could not set BME_LOCKED,
			 * so we tried again.
			 * drop the extra reference. */
			trace_drbd_resync(mdev, TRACE_LVL_ALL,
					  "dropping extra reference on %u\n", enr);

			bm_ext->lce.refcnt--;
			D_ASSERT(bm_ext->lce.refcnt > 0);
		}
		goto check_al;
	} else {
		/* do we rather want to try later? */
		if (mdev->resync_locked > mdev->resync->nr_elements-3) {
			trace_drbd_resync(mdev, TRACE_LVL_ALL,
					  "resync_locked = %u!\n", mdev->resync_locked);

		if (mdev->resync_locked > mdev->resync->nr_elements-3)
			goto try_again;
		}
		/* Do or do not. There is no try. -- Yoda */
		e = lc_get(mdev->resync, enr);
		bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
@@ -1281,8 +1236,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
		goto check_al;
	}
check_al:
	trace_drbd_resync(mdev, TRACE_LVL_ALL, "checking al for %u\n", enr);

	for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
		if (unlikely(al_enr+i == mdev->act_log->new_number))
			goto try_again;
@@ -1296,7 +1249,6 @@ proceed:
	return 0;

try_again:
	trace_drbd_resync(mdev, TRACE_LVL_ALL, "need to try again for %u\n", enr);
	if (bm_ext)
		mdev->resync_wenr = enr;
	spin_unlock_irq(&mdev->al_lock);
@@ -1310,10 +1262,6 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
	struct bm_extent *bm_ext;
	unsigned long flags;

	trace_drbd_resync(mdev, TRACE_LVL_ALL,
			  "drbd_rs_complete_io: sector=%llus (rs_enr=%d)\n",
			  (long long)sector, enr);

	spin_lock_irqsave(&mdev->al_lock, flags);
	e = lc_find(mdev->resync, enr);
	bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
@@ -1348,8 +1296,6 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
 */
void drbd_rs_cancel_all(struct drbd_conf *mdev)
{
	trace_drbd_resync(mdev, TRACE_LVL_METRICS, "drbd_rs_cancel_all\n");

	spin_lock_irq(&mdev->al_lock);

	if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */
@@ -1375,8 +1321,6 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
	struct bm_extent *bm_ext;
	int i;

	trace_drbd_resync(mdev, TRACE_LVL_METRICS, "drbd_rs_del_all\n");

	spin_lock_irq(&mdev->al_lock);

	if (get_ldev_if_state(mdev, D_FAILED)) {
@@ -1429,10 +1373,6 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
	sector_t esector, nr_sectors;
	int wake_up = 0;

	trace_drbd_resync(mdev, TRACE_LVL_SUMMARY,
			  "drbd_rs_failed_io: sector=%llus, size=%u\n",
			  (unsigned long long)sector, size);

	if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
		dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
				(unsigned long long)sector, size);
+0 −7
Original line number Diff line number Diff line
@@ -135,8 +135,6 @@ enum {
	DRBD_FAULT_MAX,
};

extern void trace_drbd_resync(struct drbd_conf *mdev, int level, const char *fmt, ...);

#ifdef CONFIG_DRBD_FAULT_INJECTION
extern unsigned int
_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type);
@@ -712,11 +710,6 @@ enum epoch_event {
	EV_GOT_BARRIER_NR,
	EV_BARRIER_DONE,
	EV_BECAME_LAST,
	EV_TRACE_FLUSH,       /* TRACE_ are not real events, only used for tracing */
	EV_TRACE_ADD_BARRIER, /* Doing the first write as a barrier write */
	EV_TRACE_SETTING_BI,  /* Barrier is expressed with the first write of the next epoch */
	EV_TRACE_ALLOC,
	EV_TRACE_FREE,
	EV_CLEANUP = 32, /* used as flag */
};

+1 −35
Original line number Diff line number Diff line
@@ -53,7 +53,6 @@

#include <linux/drbd_limits.h>
#include "drbd_int.h"
#include "drbd_tracing.h"
#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */

#include "drbd_vli.h"
@@ -80,18 +79,6 @@ static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
static void md_sync_timer_fn(unsigned long data);
static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);

DEFINE_TRACE(drbd_unplug);
DEFINE_TRACE(drbd_uuid);
DEFINE_TRACE(drbd_ee);
DEFINE_TRACE(drbd_packet);
DEFINE_TRACE(drbd_md_io);
DEFINE_TRACE(drbd_epoch);
DEFINE_TRACE(drbd_netlink);
DEFINE_TRACE(drbd_actlog);
DEFINE_TRACE(drbd_bio);
DEFINE_TRACE(_drbd_resync);
DEFINE_TRACE(drbd_req);

MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
	      "Lars Ellenberg <lars@linbit.com>");
MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
@@ -1576,7 +1563,6 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
	h->command = cpu_to_be16(cmd);
	h->length  = cpu_to_be16(size-sizeof(struct p_header));

	trace_drbd_packet(mdev, sock, 0, (void *)h, __FILE__, __LINE__);
	sent = drbd_send(mdev, sock, h, size, msg_flags);

	ok = (sent == size);
@@ -1628,8 +1614,6 @@ int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
	if (!drbd_get_data_sock(mdev))
		return 0;

	trace_drbd_packet(mdev, mdev->data.socket, 0, (void *)&h, __FILE__, __LINE__);

	ok = (sizeof(h) ==
		drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
	ok = ok && (size ==
@@ -2359,7 +2343,6 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
		dp_flags |= DP_MAY_SET_IN_SYNC;

	p.dp_flags = cpu_to_be32(dp_flags);
	trace_drbd_packet(mdev, mdev->data.socket, 0, (void *)&p, __FILE__, __LINE__);
	set_bit(UNPLUG_REMOTE, &mdev->flags);
	ok = (sizeof(p) ==
		drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE));
@@ -2410,7 +2393,6 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
	if (!drbd_get_data_sock(mdev))
		return 0;

	trace_drbd_packet(mdev, mdev->data.socket, 0, (void *)&p, __FILE__, __LINE__);
	ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p,
					sizeof(p), MSG_MORE);
	if (ok && dgs) {
@@ -2546,8 +2528,6 @@ static void drbd_unplug_fn(struct request_queue *q)
{
	struct drbd_conf *mdev = q->queuedata;

	trace_drbd_unplug(mdev, "got unplugged");

	/* unplug FIRST */
	spin_lock_irq(q->queue_lock);
	blk_remove_plug(q);
@@ -3252,8 +3232,6 @@ void drbd_md_sync(struct drbd_conf *mdev)
	if (!get_ldev_if_state(mdev, D_FAILED))
		return;

	trace_drbd_md_io(mdev, WRITE, mdev->ldev);

	mutex_lock(&mdev->md_io_mutex);
	buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
	memset(buffer, 0, 512);
@@ -3308,8 +3286,6 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
	if (!get_ldev_if_state(mdev, D_ATTACHING))
		return ERR_IO_MD_DISK;

	trace_drbd_md_io(mdev, READ, bdev);

	mutex_lock(&mdev->md_io_mutex);
	buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);

@@ -3388,11 +3364,8 @@ static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
{
	int i;

	for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) {
	for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
		mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];

		trace_drbd_uuid(mdev, i+1);
	}
}

void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
@@ -3407,7 +3380,6 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
	}

	mdev->ldev->md.uuid[idx] = val;
	trace_drbd_uuid(mdev, idx);
	drbd_md_mark_dirty(mdev);
}

@@ -3417,7 +3389,6 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
	if (mdev->ldev->md.uuid[idx]) {
		drbd_uuid_move_history(mdev);
		mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
		trace_drbd_uuid(mdev, UI_HISTORY_START);
	}
	_drbd_uuid_set(mdev, idx, val);
}
@@ -3436,7 +3407,6 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
	dev_info(DEV, "Creating new current UUID\n");
	D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0);
	mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
	trace_drbd_uuid(mdev, UI_BITMAP);

	get_random_bytes(&val, sizeof(u64));
	_drbd_uuid_set(mdev, UI_CURRENT, val);
@@ -3451,8 +3421,6 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
		drbd_uuid_move_history(mdev);
		mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
		mdev->ldev->md.uuid[UI_BITMAP] = 0;
		trace_drbd_uuid(mdev, UI_HISTORY_START);
		trace_drbd_uuid(mdev, UI_BITMAP);
	} else {
		if (mdev->ldev->md.uuid[UI_BITMAP])
			dev_warn(DEV, "bm UUID already set");
@@ -3460,7 +3428,6 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
		mdev->ldev->md.uuid[UI_BITMAP] = val;
		mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1);

		trace_drbd_uuid(mdev, UI_BITMAP);
	}
	drbd_md_mark_dirty(mdev);
}
@@ -3727,7 +3694,6 @@ const char *drbd_buildtag(void)
module_init(drbd_init)
module_exit(drbd_cleanup)

/* For drbd_tracing: */
EXPORT_SYMBOL(drbd_conn_str);
EXPORT_SYMBOL(drbd_role_str);
EXPORT_SYMBOL(drbd_disk_str);
Loading