Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1bdad606 authored by Steven Whitehouse's avatar Steven Whitehouse
Browse files

[GFS2] Remove remote lock dropping code



There are several reasons why this is undesirable:

 1. It never happens during normal operation anyway
 2. If it does happen it causes performance to be very, very poor
 3. It isn't likely to solve the original problem (memory shortage
    on remote DLM node) it was supposed to solve
 4. It uses a bunch of arbitrary constants which are unlikely to be
    correct for any particular situation and for which the tuning seems
    to be a black art.
 5. In an N node cluster, only 1/N of the dropped locked will actually
    contribute to solving the problem on average.

So all in all we are better off without it. This also makes merging
the lock_dlm module into GFS2 a bit easier.

Signed-off-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
parent 9171f5a9
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -15,11 +15,6 @@ enum {
	CREATE = 1,
};

enum {
	NO_WAIT = 0,
	WAIT = 1,
};

enum {
	NO_FORCE = 0,
	FORCE = 1,
+3 −9
Original line number Diff line number Diff line
@@ -1316,11 +1316,6 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
			wake_up_process(sdp->sd_recoverd_process);
		return;

	case LM_CB_DROPLOCKS:
		gfs2_gl_hash_clear(sdp, NO_WAIT);
		gfs2_quota_scan(sdp);
		return;

	default:
		gfs2_assert_warn(sdp, 0);
		return;
@@ -1508,11 +1503,10 @@ static void clear_glock(struct gfs2_glock *gl)
 * @sdp: the filesystem
 * @wait: wait until it's all gone
 *
 * Called when unmounting the filesystem, or when inter-node lock manager
 * requests DROPLOCKS because it is running out of capacity.
 * Called when unmounting the filesystem.
 */

void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
{
	unsigned long t;
	unsigned int x;
@@ -1527,7 +1521,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
				cont = 1;
		}

		if (!wait || !cont)
		if (!cont)
			break;

		if (time_after_eq(jiffies,
+1 −1
Original line number Diff line number Diff line
@@ -132,7 +132,7 @@ void gfs2_lvb_unhold(struct gfs2_glock *gl);
void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait);
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);

int __init gfs2_glock_init(void);
void gfs2_glock_exit(void);
+0 −3
Original line number Diff line number Diff line
@@ -79,9 +79,6 @@ struct gdlm_ls {
	wait_queue_head_t	wait_control;
	struct task_struct	*thread;
	wait_queue_head_t	thread_wait;
	unsigned long		drop_time;
	int			drop_locks_count;
	int			drop_locks_period;
};

enum {
+0 −3
Original line number Diff line number Diff line
@@ -22,8 +22,6 @@ static struct gdlm_ls *init_gdlm(lm_callback_t cb, struct gfs2_sbd *sdp,
	if (!ls)
		return NULL;

	ls->drop_locks_count = GDLM_DROP_COUNT;
	ls->drop_locks_period = GDLM_DROP_PERIOD;
	ls->fscb = cb;
	ls->sdp = sdp;
	ls->fsflags = flags;
@@ -33,7 +31,6 @@ static struct gdlm_ls *init_gdlm(lm_callback_t cb, struct gfs2_sbd *sdp,
	INIT_LIST_HEAD(&ls->all_locks);
	init_waitqueue_head(&ls->thread_wait);
	init_waitqueue_head(&ls->wait_control);
	ls->drop_time = jiffies;
	ls->jid = -1;

	strncpy(buf, table_name, 256);
Loading