Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 01cab554 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull GFS2 updates from Bob Peterson:
 "Here is a list of patches we've accumulated for GFS2 for the current
  upstream merge window.  This time we've only got six patches, many of
  which are very small:

   - three cleanups from Andreas Gruenbacher, including a nice cleanup
     of the sequence file code for the sbstats debugfs file.

   - a patch from Ben Hutchings that changes statistics variables from
     signed to unsigned.

   - two patches from me that increase GFS2's glock scalability by
     switching from a conventional hash table to rhashtable"

* tag 'gfs2-merge-window' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
  gfs2: A minor "sbstats" cleanup
  gfs2: Fix a typo in a comment
  gfs2: Make statistics unsigned, suitable for use with do_div()
  GFS2: Use resizable hash table for glocks
  GFS2: Move glock superblock pointer to field gl_name
  gfs2: Simplify the seq file code for "sbstats"
parents 64d1def7 8f7e0a80
Loading
Loading
Loading
Loading
+136 −212
Original line number Diff line number Diff line
@@ -34,6 +34,7 @@
#include <linux/percpu.h>
#include <linux/list_sort.h>
#include <linux/lockref.h>
#include <linux/rhashtable.h>

#include "gfs2.h"
#include "incore.h"
@@ -50,9 +51,8 @@
#include "trace_gfs2.h"

struct gfs2_glock_iter {
	int hash;			/* hash bucket index           */
	unsigned nhash;			/* Index within current bucket */
	struct gfs2_sbd *sdp;		/* incore superblock           */
	struct rhashtable_iter hti;	/* rhashtable iterator         */
	struct gfs2_glock *gl;		/* current glock struct        */
	loff_t last_pos;		/* last position               */
};
@@ -70,44 +70,19 @@ static DEFINE_SPINLOCK(lru_lock);

#define GFS2_GL_HASH_SHIFT      15
#define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
#define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)

static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
static struct dentry *gfs2_root;

/**
 * gl_hash() - Turn glock number into hash bucket number
 * @lock: The glock number
 *
 * Returns: The number of the corresponding hash bucket
 */

static unsigned int gl_hash(const struct gfs2_sbd *sdp,
			    const struct lm_lockname *name)
{
	unsigned int h;

	h = jhash(&name->ln_number, sizeof(u64), 0);
	h = jhash(&name->ln_type, sizeof(unsigned int), h);
	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
	h &= GFS2_GL_HASH_MASK;

	return h;
}

static inline void spin_lock_bucket(unsigned int hash)
{
	hlist_bl_lock(&gl_hash_table[hash]);
}
static struct rhashtable_params ht_parms = {
	.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
	.key_len = sizeof(struct lm_lockname),
	.key_offset = offsetof(struct gfs2_glock, gl_name),
	.head_offset = offsetof(struct gfs2_glock, gl_node),
};

static inline void spin_unlock_bucket(unsigned int hash)
{
	hlist_bl_unlock(&gl_hash_table[hash]);
}
static struct rhashtable gl_hash_table;

static void gfs2_glock_dealloc(struct rcu_head *rcu)
void gfs2_glock_free(struct gfs2_glock *gl)
{
	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;

	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
@@ -115,13 +90,6 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
		kfree(gl->gl_lksb.sb_lvbptr);
		kmem_cache_free(gfs2_glock_cachep, gl);
	}
}

void gfs2_glock_free(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;

	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
		wake_up(&sdp->sd_glock_wait);
}
@@ -192,7 +160,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)

void gfs2_glock_put(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	struct address_space *mapping = gfs2_glock2aspace(gl);

	if (lockref_put_or_lock(&gl->gl_lockref))
@@ -202,42 +170,13 @@ void gfs2_glock_put(struct gfs2_glock *gl)

	gfs2_glock_remove_from_lru(gl);
	spin_unlock(&gl->gl_lockref.lock);
	spin_lock_bucket(gl->gl_hash);
	hlist_bl_del_rcu(&gl->gl_list);
	spin_unlock_bucket(gl->gl_hash);
	rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
	GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
	GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
	trace_gfs2_glock_put(gl);
	sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
}

/**
 * search_bucket() - Find struct gfs2_glock by lock number
 * @bucket: the bucket to search
 * @name: The lock name
 *
 * Returns: NULL, or the struct gfs2_glock with the requested number
 */

static struct gfs2_glock *search_bucket(unsigned int hash,
					const struct gfs2_sbd *sdp,
					const struct lm_lockname *name)
{
	struct gfs2_glock *gl;
	struct hlist_bl_node *h;

	hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
		if (!lm_name_equal(&gl->gl_name, name))
			continue;
		if (gl->gl_sbd != sdp)
			continue;
		if (lockref_get_not_dead(&gl->gl_lockref))
			return gl;
	}

	return NULL;
}

/**
 * may_grant - check if its ok to grant a new lock
 * @gl: The glock
@@ -506,7 +445,7 @@ __releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	unsigned int lck_flags = gh ? gh->gh_flags : 0;
	int ret;

@@ -628,7 +567,7 @@ __acquires(&gl->gl_spin)
static void delete_work_func(struct work_struct *work)
{
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	struct gfs2_inode *ip;
	struct inode *inode;
	u64 no_addr = gl->gl_name.ln_number;
@@ -704,15 +643,17 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
		   struct gfs2_glock **glp)
{
	struct super_block *s = sdp->sd_vfs;
	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
	struct gfs2_glock *gl, *tmp;
	unsigned int hash = gl_hash(sdp, &name);
	struct lm_lockname name = { .ln_number = number,
				    .ln_type = glops->go_type,
				    .ln_sbd = sdp };
	struct gfs2_glock *gl, *tmp = NULL;
	struct address_space *mapping;
	struct kmem_cache *cachep;
	int ret, tries = 0;

	rcu_read_lock();
	gl = search_bucket(hash, sdp, &name);
	rcu_read_unlock();
	gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
	if (gl && !lockref_get_not_dead(&gl->gl_lockref))
		gl = NULL;

	*glp = gl;
	if (gl)
@@ -739,14 +680,13 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
	}

	atomic_inc(&sdp->sd_glock_disposal);
	gl->gl_sbd = sdp;
	gl->gl_node.next = NULL;
	gl->gl_flags = 0;
	gl->gl_name = name;
	gl->gl_lockref.count = 1;
	gl->gl_state = LM_ST_UNLOCKED;
	gl->gl_target = LM_ST_UNLOCKED;
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
	gl->gl_hash = hash;
	gl->gl_ops = glops;
	gl->gl_dstamp = ktime_set(0, 0);
	preempt_disable();
@@ -771,22 +711,34 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
		mapping->writeback_index = 0;
	}

	spin_lock_bucket(hash);
	tmp = search_bucket(hash, sdp, &name);
	if (tmp) {
		spin_unlock_bucket(hash);
again:
	ret = rhashtable_lookup_insert_fast(&gl_hash_table, &gl->gl_node,
					    ht_parms);
	if (ret == 0) {
		*glp = gl;
		return 0;
	}

	if (ret == -EEXIST) {
		ret = 0;
		tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
		if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) {
			if (++tries < 100) {
				cond_resched();
				goto again;
			}
			tmp = NULL;
			ret = -ENOMEM;
		}
	} else {
		WARN_ON_ONCE(ret);
	}
	kfree(gl->gl_lksb.sb_lvbptr);
	kmem_cache_free(cachep, gl);
	atomic_dec(&sdp->sd_glock_disposal);
		gl = tmp;
	} else {
		hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
		spin_unlock_bucket(hash);
	}
	*glp = tmp;

	*glp = gl;

	return 0;
	return ret;
}

/**
@@ -928,7 +880,7 @@ __releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
{
	struct gfs2_glock *gl = gh->gh_gl;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	struct list_head *insert_pt = NULL;
	struct gfs2_holder *gh2;
	int try_futile = 0;
@@ -1006,7 +958,7 @@ __acquires(&gl->gl_spin)
int gfs2_glock_nq(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	int error = 0;

	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
@@ -1313,7 +1265,7 @@ static int gfs2_should_freeze(const struct gfs2_glock *gl)

void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
{
	struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;

	spin_lock(&gl->gl_spin);
	gl->gl_reply = ret;
@@ -1462,31 +1414,26 @@ static struct shrinker glock_shrinker = {
 *
 */

static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
			  unsigned int hash)
static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
{
	struct gfs2_glock *gl;
	struct hlist_bl_head *head = &gl_hash_table[hash];
	struct hlist_bl_node *pos;
	struct rhash_head *pos, *next;
	const struct bucket_table *tbl;
	int i;

	rcu_read_lock();
	hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
		if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref))
	tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
	for (i = 0; i < tbl->size; i++) {
		rht_for_each_entry_safe(gl, pos, next, tbl, i, gl_node) {
			if ((gl->gl_name.ln_sbd == sdp) &&
			    lockref_get_not_dead(&gl->gl_lockref))
				examiner(gl);
		}
	}
	rcu_read_unlock();
	cond_resched();
}

static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
{
	unsigned x;

	for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
		examine_bucket(examiner, sdp, x);
}


/**
 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
 * @gl: The glock to thaw
@@ -1569,7 +1516,7 @@ void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
	int ret;

	ret = gfs2_truncatei_resume(ip);
	gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
	gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);

	spin_lock(&gl->gl_spin);
	clear_bit(GLF_LOCK, &gl->gl_flags);
@@ -1733,17 +1680,17 @@ static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
{
	struct gfs2_glock *gl = iter_ptr;

	seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n",
	seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
		   gl->gl_name.ln_type,
		   (unsigned long long)gl->gl_name.ln_number,
		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
		   (long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
		   (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
		   (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
		   (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
	return 0;
}

@@ -1776,11 +1723,10 @@ static const char *gfs2_stype[] = {

static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
{
	struct gfs2_glock_iter *gi = seq->private;
	struct gfs2_sbd *sdp = gi->sdp;
	unsigned index = gi->hash >> 3;
	unsigned subindex = gi->hash & 0x07;
	s64 value;
	struct gfs2_sbd *sdp = seq->private;
	loff_t pos = *(loff_t *)iter_ptr;
	unsigned index = pos >> 3;
	unsigned subindex = pos & 0x07;
	int i;

	if (index == 0 && subindex != 0)
@@ -1791,12 +1737,12 @@ static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)

	for_each_possible_cpu(i) {
                const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
		if (index == 0) {
			value = i;
		} else {
			value = lkstats->lkstats[index - 1].stats[subindex];
		}
		seq_printf(seq, " %15lld", (long long)value);

		if (index == 0)
			seq_printf(seq, " %15u", i);
		else
			seq_printf(seq, " %15llu", (unsigned long long)lkstats->
				   lkstats[index - 1].stats[subindex]);
	}
	seq_putc(seq, '\n');
	return 0;
@@ -1804,20 +1750,24 @@ static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)

int __init gfs2_glock_init(void)
{
	unsigned i;
	for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
		INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
	}
	int ret;

	ret = rhashtable_init(&gl_hash_table, &ht_parms);
	if (ret < 0)
		return ret;

	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
	if (!glock_workqueue)
	if (!glock_workqueue) {
		rhashtable_destroy(&gl_hash_table);
		return -ENOMEM;
	}
	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
						WQ_MEM_RECLAIM | WQ_FREEZABLE,
						0);
	if (!gfs2_delete_workqueue) {
		destroy_workqueue(glock_workqueue);
		rhashtable_destroy(&gl_hash_table);
		return -ENOMEM;
	}

@@ -1829,72 +1779,41 @@ int __init gfs2_glock_init(void)
void gfs2_glock_exit(void)
{
	unregister_shrinker(&glock_shrinker);
	rhashtable_destroy(&gl_hash_table);
	destroy_workqueue(glock_workqueue);
	destroy_workqueue(gfs2_delete_workqueue);
}

static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
{
	return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
			      struct gfs2_glock, gl_list);
}

static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
{
	return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
			      struct gfs2_glock, gl_list);
}

static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
{
	struct gfs2_glock *gl;

	do {
		gl = gi->gl;
		if (gl) {
			gi->gl = glock_hash_next(gl);
			gi->nhash++;
		} else {
			if (gi->hash >= GFS2_GL_HASH_SIZE) {
				rcu_read_unlock();
				return 1;
			}
			gi->gl = glock_hash_chain(gi->hash);
			gi->nhash = 0;
		}
		while (gi->gl == NULL) {
			gi->hash++;
			if (gi->hash >= GFS2_GL_HASH_SIZE) {
				rcu_read_unlock();
				return 1;
			}
			gi->gl = glock_hash_chain(gi->hash);
			gi->nhash = 0;
		gi->gl = rhashtable_walk_next(&gi->hti);
		if (IS_ERR(gi->gl)) {
			if (PTR_ERR(gi->gl) == -EAGAIN)
				continue;
			gi->gl = NULL;
		}
	/* Skip entries for other sb and dead entries */
	} while (gi->sdp != gi->gl->gl_sbd ||
		 __lockref_is_dead(&gi->gl->gl_lockref));

	return 0;
	} while ((gi->gl) && ((gi->sdp != gi->gl->gl_name.ln_sbd) ||
			      __lockref_is_dead(&gi->gl->gl_lockref)));
}

static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
{
	struct gfs2_glock_iter *gi = seq->private;
	loff_t n = *pos;
	int ret;

	if (gi->last_pos <= *pos)
		n = gi->nhash + (*pos - gi->last_pos);
	else
		gi->hash = 0;
		n = (*pos - gi->last_pos);

	gi->nhash = 0;
	rcu_read_lock();
	ret = rhashtable_walk_start(&gi->hti);
	if (ret)
		return NULL;

	do {
		if (gfs2_glock_iter_next(gi))
			return NULL;
	} while (n--);
		gfs2_glock_iter_next(gi);
	} while (gi->gl && n--);

	gi->last_pos = *pos;
	return gi->gl;
@@ -1907,9 +1826,7 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,

	(*pos)++;
	gi->last_pos = *pos;
	if (gfs2_glock_iter_next(gi))
		return NULL;

	gfs2_glock_iter_next(gi);
	return gi->gl;
}

@@ -1917,9 +1834,8 @@ static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
{
	struct gfs2_glock_iter *gi = seq->private;

	if (gi->gl)
		rcu_read_unlock();
	gi->gl = NULL;
	rhashtable_walk_stop(&gi->hti);
}

static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
@@ -1930,26 +1846,19 @@ static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)

static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
{
	struct gfs2_glock_iter *gi = seq->private;

	gi->hash = *pos;
	preempt_disable();
	if (*pos >= GFS2_NR_SBSTATS)
		return NULL;
	preempt_disable();
	return SEQ_START_TOKEN;
	return pos;
}

static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
				   loff_t *pos)
{
	struct gfs2_glock_iter *gi = seq->private;
	(*pos)++;
	gi->hash++;
	if (gi->hash >= GFS2_NR_SBSTATS) {
		preempt_enable();
	if (*pos >= GFS2_NR_SBSTATS)
		return NULL;
	}
	return SEQ_START_TOKEN;
	return pos;
}

static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
@@ -1987,14 +1896,28 @@ static int gfs2_glocks_open(struct inode *inode, struct file *file)
	if (ret == 0) {
		struct seq_file *seq = file->private_data;
		struct gfs2_glock_iter *gi = seq->private;

		gi->sdp = inode->i_private;
		gi->last_pos = 0;
		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
		if (seq->buf)
			seq->size = GFS2_SEQ_GOODSIZE;
		gi->gl = NULL;
		ret = rhashtable_walk_init(&gl_hash_table, &gi->hti);
	}
	return ret;
}

static int gfs2_glocks_release(struct inode *inode, struct file *file)
{
	struct seq_file *seq = file->private_data;
	struct gfs2_glock_iter *gi = seq->private;

	gi->gl = NULL;
	rhashtable_walk_exit(&gi->hti);
	return seq_release_private(inode, file);
}

static int gfs2_glstats_open(struct inode *inode, struct file *file)
{
	int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
@@ -2003,21 +1926,22 @@ static int gfs2_glstats_open(struct inode *inode, struct file *file)
		struct seq_file *seq = file->private_data;
		struct gfs2_glock_iter *gi = seq->private;
		gi->sdp = inode->i_private;
		gi->last_pos = 0;
		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
		if (seq->buf)
			seq->size = GFS2_SEQ_GOODSIZE;
		gi->gl = NULL;
		ret = rhashtable_walk_init(&gl_hash_table, &gi->hti);
	}
	return ret;
}

static int gfs2_sbstats_open(struct inode *inode, struct file *file)
{
	int ret = seq_open_private(file, &gfs2_sbstats_seq_ops,
				   sizeof(struct gfs2_glock_iter));
	int ret = seq_open(file, &gfs2_sbstats_seq_ops);
	if (ret == 0) {
		struct seq_file *seq = file->private_data;
		struct gfs2_glock_iter *gi = seq->private;
		gi->sdp = inode->i_private;
		seq->private = inode->i_private;  /* sdp */
	}
	return ret;
}
@@ -2027,7 +1951,7 @@ static const struct file_operations gfs2_glocks_fops = {
	.open    = gfs2_glocks_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release_private,
	.release = gfs2_glocks_release,
};

static const struct file_operations gfs2_glstats_fops = {
@@ -2035,7 +1959,7 @@ static const struct file_operations gfs2_glstats_fops = {
	.open    = gfs2_glstats_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release_private,
	.release = gfs2_glocks_release,
};

static const struct file_operations gfs2_sbstats_fops = {
@@ -2043,7 +1967,7 @@ static const struct file_operations gfs2_sbstats_fops = {
	.open	 = gfs2_sbstats_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release_private,
	.release = seq_release,
};

int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
+20 −18
Original line number Diff line number Diff line
@@ -32,13 +32,15 @@ struct workqueue_struct *gfs2_freeze_wq;

static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
{
	fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n",
	fs_err(gl->gl_name.ln_sbd,
	       "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
	       "state 0x%lx\n",
	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
	       bh->b_page->mapping, bh->b_page->flags);
	fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n",
	fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
	       gl->gl_name.ln_type, gl->gl_name.ln_number,
	       gfs2_glock2aspace(gl));
	gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n");
	gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n");
}

/**
@@ -52,7 +54,7 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
			     unsigned int nr_revokes)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	struct list_head *head = &gl->gl_ail_list;
	struct gfs2_bufdata *bd, *tmp;
	struct buffer_head *bh;
@@ -80,7 +82,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,

static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	struct gfs2_trans tr;

	memset(&tr, 0, sizeof(tr));
@@ -109,7 +111,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)

void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	unsigned int revokes = atomic_read(&gl->gl_ail_count);
	unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
	int ret;
@@ -139,7 +141,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)

static void rgrp_go_sync(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	struct address_space *mapping = &sdp->sd_aspace;
	struct gfs2_rgrpd *rgd;
	int error;
@@ -179,7 +181,7 @@ static void rgrp_go_sync(struct gfs2_glock *gl)

static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	struct address_space *mapping = &sdp->sd_aspace;
	struct gfs2_rgrpd *rgd = gl->gl_object;

@@ -218,7 +220,7 @@ static void inode_go_sync(struct gfs2_glock *gl)

	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);

	gfs2_log_flush(gl->gl_sbd, gl, NORMAL_FLUSH);
	gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH);
	filemap_fdatawrite(metamapping);
	if (ip) {
		struct address_space *mapping = ip->i_inode.i_mapping;
@@ -252,7 +254,7 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
{
	struct gfs2_inode *ip = gl->gl_object;

	gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
	gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));

	if (flags & DIO_METADATA) {
		struct address_space *mapping = gfs2_glock2aspace(gl);
@@ -264,9 +266,9 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
		}
	}

	if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
		gfs2_log_flush(gl->gl_sbd, NULL, NORMAL_FLUSH);
		gl->gl_sbd->sd_rindex_uptodate = 0;
	if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
		gfs2_log_flush(gl->gl_name.ln_sbd, NULL, NORMAL_FLUSH);
		gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
	}
	if (ip && S_ISREG(ip->i_inode.i_mode))
		truncate_inode_pages(ip->i_inode.i_mapping, 0);
@@ -281,7 +283,7 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)

static int inode_go_demote_ok(const struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	struct gfs2_holder *gh;

	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
@@ -416,7 +418,7 @@ int gfs2_inode_refresh(struct gfs2_inode *ip)
static int inode_go_lock(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	struct gfs2_inode *ip = gl->gl_object;
	int error = 0;

@@ -477,7 +479,7 @@ static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
static void freeze_go_sync(struct gfs2_glock *gl)
{
	int error = 0;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;

	if (gl->gl_state == LM_ST_SHARED &&
	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
@@ -500,7 +502,7 @@ static void freeze_go_sync(struct gfs2_glock *gl)

static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
	struct gfs2_glock *j_gl = ip->i_gl;
	struct gfs2_log_header_host head;
@@ -545,7 +547,7 @@ static int freeze_go_demote_ok(const struct gfs2_glock *gl)
static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
{
	struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;

	if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
		return;
+8 −7
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@
#include <linux/ktime.h>
#include <linux/percpu.h>
#include <linux/lockref.h>
#include <linux/rhashtable.h>

#define DIO_WAIT	0x00000010
#define DIO_METADATA	0x00000020
@@ -203,13 +204,15 @@ enum {
};

struct lm_lockname {
	struct gfs2_sbd *ln_sbd;
	u64 ln_number;
	unsigned int ln_type;
};

#define lm_name_equal(name1, name2) \
        (((name1)->ln_number == (name2)->ln_number) &&	\
         ((name1)->ln_type == (name2)->ln_type))
	 ((name1)->ln_type == (name2)->ln_type) &&	\
	 ((name1)->ln_sbd == (name2)->ln_sbd))


struct gfs2_glock_operations {
@@ -241,7 +244,7 @@ enum {
};

struct gfs2_lkstats {
	s64 stats[GFS2_NR_LKSTATS];
	u64 stats[GFS2_NR_LKSTATS];
};

enum {
@@ -327,7 +330,6 @@ enum {

struct gfs2_glock {
	struct hlist_bl_node gl_list;
	struct gfs2_sbd *gl_sbd;
	unsigned long gl_flags;		/* GLF_... */
	struct lm_lockname gl_name;

@@ -341,7 +343,6 @@ struct gfs2_glock {
		     gl_req:2,		/* State in last dlm request */
		     gl_reply:8;	/* Last reply from the dlm */

	unsigned int gl_hash;
	unsigned long gl_demote_time; /* time of first demote request */
	long gl_hold_time;
	struct list_head gl_holders;
@@ -367,7 +368,7 @@ struct gfs2_glock {
			loff_t end;
		} gl_vm;
	};
	struct rcu_head gl_rcu;
	struct rhash_head gl_node;
};

#define GFS2_MIN_LVB_SIZE 32	/* Min size of LVB that gfs2 supports */
@@ -835,7 +836,7 @@ static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)

static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
{
	const struct gfs2_sbd *sdp = gl->gl_sbd;
	const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	preempt_disable();
	this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++;
	preempt_enable();
+6 −6
Original line number Diff line number Diff line
@@ -31,7 +31,7 @@ extern struct workqueue_struct *gfs2_control_wq;
 *
 * @delta is the difference between the current rtt sample and the
 * running average srtt. We add 1/8 of that to the srtt in order to
 * update the current srtt estimate. The varience estimate is a bit
 * update the current srtt estimate. The variance estimate is a bit
 * more complicated. We subtract the abs value of the @delta from
 * the current variance estimate and add 1/4 of that to the running
 * total.
@@ -80,7 +80,7 @@ static inline void gfs2_update_reply_times(struct gfs2_glock *gl)

	preempt_disable();
	rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp));
	lks = this_cpu_ptr(gl->gl_sbd->sd_lkstats);
	lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
	gfs2_update_stats(&gl->gl_stats, index, rtt);		/* Local */
	gfs2_update_stats(&lks->lkstats[gltype], index, rtt);	/* Global */
	preempt_enable();
@@ -108,7 +108,7 @@ static inline void gfs2_update_request_times(struct gfs2_glock *gl)
	dstamp = gl->gl_dstamp;
	gl->gl_dstamp = ktime_get_real();
	irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp));
	lks = this_cpu_ptr(gl->gl_sbd->sd_lkstats);
	lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
	gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt);		/* Local */
	gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt);	/* Global */
	preempt_enable();
@@ -253,7 +253,7 @@ static void gfs2_reverse_hex(char *c, u64 value)
static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
		     unsigned int flags)
{
	struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
	int req;
	u32 lkf;
	char strname[GDLM_STRNAME_BYTES] = "";
@@ -281,7 +281,7 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,

static void gdlm_put_lock(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
	int lvb_needs_unlock = 0;
	int error;
@@ -319,7 +319,7 @@ static void gdlm_put_lock(struct gfs2_glock *gl)

static void gdlm_cancel(struct gfs2_glock *gl)
{
	struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
	dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
}

+3 −3
Original line number Diff line number Diff line
@@ -70,7 +70,7 @@ static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
static void maybe_release_space(struct gfs2_bufdata *bd)
{
	struct gfs2_glock *gl = bd->bd_gl;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	struct gfs2_rgrpd *rgd = gl->gl_object;
	unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
	struct gfs2_bitmap *bi = rgd->rd_bits + index;
@@ -578,7 +578,7 @@ static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
static void gfs2_meta_sync(struct gfs2_glock *gl)
{
	struct address_space *mapping = gfs2_glock2aspace(gl);
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	int error;

	if (mapping == NULL)
@@ -588,7 +588,7 @@ static void gfs2_meta_sync(struct gfs2_glock *gl)
	error = filemap_fdatawait(mapping);

	if (error)
		gfs2_io_error(gl->gl_sbd);
		gfs2_io_error(gl->gl_name.ln_sbd);
}

static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
Loading