Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 79af3133 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-fixes:
  GFS2: remove dcache entries for remote deleted inodes
  GFS2: Fix incorrent statfs consistency check
  GFS2: Don't put unlikely reclaim candidates on the reclaim list.
  GFS2: Don't try and dealloc own inode
  GFS2: Fix panic in glock memory shrinker
  GFS2: keep statfs info in sync on grows
  GFS2: Shrink the shrinker
parents e1ca4aed b94a170e
Loading
Loading
Loading
Loading
+39 −0
Original line number Diff line number Diff line
@@ -624,6 +624,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
{
	struct gfs2_inode *ip = GFS2_I(mapping->host);
	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
	int alloc_required;
	int error = 0;
@@ -637,6 +638,14 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
	error = gfs2_glock_nq(&ip->i_gh);
	if (unlikely(error))
		goto out_uninit;
	if (&ip->i_inode == sdp->sd_rindex) {
		error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
					   GL_NOCACHE, &m_ip->i_gh);
		if (unlikely(error)) {
			gfs2_glock_dq(&ip->i_gh);
			goto out_uninit;
		}
	}

	error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
	if (error)
@@ -667,6 +676,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
		rblocks += data_blocks ? data_blocks : 1;
	if (ind_blocks || data_blocks)
		rblocks += RES_STATFS + RES_QUOTA;
	if (&ip->i_inode == sdp->sd_rindex)
		rblocks += 2 * RES_STATFS;

	error = gfs2_trans_begin(sdp, rblocks,
				 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
@@ -712,6 +723,10 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
		gfs2_alloc_put(ip);
	}
out_unlock:
	if (&ip->i_inode == sdp->sd_rindex) {
		gfs2_glock_dq(&m_ip->i_gh);
		gfs2_holder_uninit(&m_ip->i_gh);
	}
	gfs2_glock_dq(&ip->i_gh);
out_uninit:
	gfs2_holder_uninit(&ip->i_gh);
@@ -725,14 +740,21 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
static void adjust_fs_space(struct inode *inode)
{
	struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
	struct buffer_head *m_bh, *l_bh;
	u64 fs_total, new_free;

	/* Total up the file system space, according to the latest rindex. */
	fs_total = gfs2_ri_total(sdp);
	if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
		return;

	spin_lock(&sdp->sd_statfs_spin);
	gfs2_statfs_change_in(m_sc, m_bh->b_data +
			      sizeof(struct gfs2_dinode));
	if (fs_total > (m_sc->sc_total + l_sc->sc_total))
		new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
	else
@@ -741,6 +763,13 @@ static void adjust_fs_space(struct inode *inode)
	fs_warn(sdp, "File system extended by %llu blocks.\n",
		(unsigned long long)new_free);
	gfs2_statfs_change(sdp, new_free, new_free, 0);

	if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
		goto out;
	update_statfs(sdp, m_bh, l_bh);
	brelse(l_bh);
out:
	brelse(m_bh);
}

/**
@@ -763,6 +792,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
{
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
	u64 to = pos + copied;
	void *kaddr;
	unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
@@ -794,6 +824,10 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,

	brelse(dibh);
	gfs2_trans_end(sdp);
	if (inode == sdp->sd_rindex) {
		gfs2_glock_dq(&m_ip->i_gh);
		gfs2_holder_uninit(&m_ip->i_gh);
	}
	gfs2_glock_dq(&ip->i_gh);
	gfs2_holder_uninit(&ip->i_gh);
	return copied;
@@ -823,6 +857,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
	struct inode *inode = page->mapping->host;
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
	struct buffer_head *dibh;
	struct gfs2_alloc *al = ip->i_alloc;
	unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
@@ -865,6 +900,10 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
		gfs2_quota_unlock(ip);
		gfs2_alloc_put(ip);
	}
	if (inode == sdp->sd_rindex) {
		gfs2_glock_dq(&m_ip->i_gh);
		gfs2_holder_uninit(&m_ip->i_gh);
	}
	gfs2_glock_dq(&ip->i_gh);
	gfs2_holder_uninit(&ip->i_gh);
	return ret;
+91 −47
Original line number Diff line number Diff line
@@ -63,6 +63,7 @@ static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int
static DECLARE_RWSEM(gfs2_umount_flush_sem);
static struct dentry *gfs2_root;
static struct workqueue_struct *glock_workqueue;
struct workqueue_struct *gfs2_delete_workqueue;
static LIST_HEAD(lru_list);
static atomic_t lru_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(lru_lock);
@@ -167,12 +168,32 @@ static void glock_free(struct gfs2_glock *gl)
 *
 */

static void gfs2_glock_hold(struct gfs2_glock *gl)
void gfs2_glock_hold(struct gfs2_glock *gl)
{
	GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
	atomic_inc(&gl->gl_ref);
}

/**
 * demote_ok - Check to see if it's ok to unlock a glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

static int demote_ok(const struct gfs2_glock *gl)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;

	if (gl->gl_state == LM_ST_UNLOCKED)
		return 0;
	if (!list_empty(&gl->gl_holders))
		return 0;
	if (glops->go_demote_ok)
		return glops->go_demote_ok(gl);
	return 1;
}

/**
 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
 * @gl: the glock
@@ -181,14 +202,34 @@ static void gfs2_glock_hold(struct gfs2_glock *gl)

static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
{
	int may_reclaim;
	may_reclaim = (demote_ok(gl) &&
		       (atomic_read(&gl->gl_ref) == 1 ||
			(gl->gl_name.ln_type == LM_TYPE_INODE &&
			 atomic_read(&gl->gl_ref) <= 2)));
	spin_lock(&lru_lock);
	if (list_empty(&gl->gl_lru) && gl->gl_state != LM_ST_UNLOCKED) {
	if (list_empty(&gl->gl_lru) && may_reclaim) {
		list_add_tail(&gl->gl_lru, &lru_list);
		atomic_inc(&lru_count);
	}
	spin_unlock(&lru_lock);
}

/**
 * gfs2_glock_put_nolock() - Decrement reference count on glock
 * @gl: The glock to put
 *
 * This function should only be used if the caller has its own reference
 * to the glock, in addition to the one it is dropping.
 */

void gfs2_glock_put_nolock(struct gfs2_glock *gl)
{
	if (atomic_dec_and_test(&gl->gl_ref))
		GLOCK_BUG_ON(gl, 1);
	gfs2_glock_schedule_for_reclaim(gl);
}

/**
 * gfs2_glock_put() - Decrement reference count on glock
 * @gl: The glock to put
@@ -214,9 +255,9 @@ int gfs2_glock_put(struct gfs2_glock *gl)
		rv = 1;
		goto out;
	}
	/* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */
	if (atomic_read(&gl->gl_ref) == 2)
	spin_lock(&gl->gl_spin);
	gfs2_glock_schedule_for_reclaim(gl);
	spin_unlock(&gl->gl_spin);
	write_unlock(gl_lock_addr(gl->gl_hash));
out:
	return rv;
@@ -398,7 +439,7 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
		if (held2)
			gfs2_glock_hold(gl);
		else
			gfs2_glock_put(gl);
			gfs2_glock_put_nolock(gl);
	}

	gl->gl_state = new_state;
@@ -633,12 +674,35 @@ __acquires(&gl->gl_spin)
out_sched:
	gfs2_glock_hold(gl);
	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
		gfs2_glock_put(gl);
		gfs2_glock_put_nolock(gl);
out_unlock:
	clear_bit(GLF_LOCK, &gl->gl_flags);
	goto out;
}

static void delete_work_func(struct work_struct *work)
{
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_inode *ip = NULL;
	struct inode *inode;
	u64 no_addr = 0;

	spin_lock(&gl->gl_spin);
	ip = (struct gfs2_inode *)gl->gl_object;
	if (ip)
		no_addr = ip->i_no_addr;
	spin_unlock(&gl->gl_spin);
	if (ip) {
		inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
		if (inode) {
			d_prune_aliases(inode);
			iput(inode);
		}
	}
	gfs2_glock_put(gl);
}

static void glock_work_func(struct work_struct *work)
{
	unsigned long delay = 0;
@@ -717,6 +781,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
	gl->gl_sbd = sdp;
	gl->gl_aspace = NULL;
	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
	INIT_WORK(&gl->gl_delete, delete_work_func);

	/* If this glock protects actual on-disk data or metadata blocks,
	   create a VFS inode to manage the pages/buffers holding them. */
@@ -858,6 +923,8 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
			gl->gl_demote_state != state) {
		gl->gl_demote_state = LM_ST_UNLOCKED;
	}
	if (gl->gl_ops->go_callback)
		gl->gl_ops->go_callback(gl);
	trace_gfs2_demote_rq(gl);
}

@@ -1274,33 +1341,12 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
		gfs2_glock_put(gl);
}

/**
 * demote_ok - Check to see if it's ok to unlock a glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

static int demote_ok(const struct gfs2_glock *gl)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;

	if (gl->gl_state == LM_ST_UNLOCKED)
		return 0;
	if (!list_empty(&gl->gl_holders))
		return 0;
	if (glops->go_demote_ok)
		return glops->go_demote_ok(gl);
	return 1;
}


static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
{
	struct gfs2_glock *gl;
	int may_demote;
	int nr_skipped = 0;
	int got_ref = 0;
	LIST_HEAD(skipped);

	if (nr == 0)
@@ -1315,38 +1361,30 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
		list_del_init(&gl->gl_lru);
		atomic_dec(&lru_count);

		/* Check if glock is about to be freed */
		if (atomic_read(&gl->gl_ref) == 0)
			continue;

		/* Test for being demotable */
		if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
			gfs2_glock_hold(gl);
			got_ref = 1;
			spin_unlock(&lru_lock);
			spin_lock(&gl->gl_spin);
			may_demote = demote_ok(gl);
			spin_unlock(&gl->gl_spin);
			clear_bit(GLF_LOCK, &gl->gl_flags);
			if (may_demote) {
				handle_callback(gl, LM_ST_UNLOCKED, 0);
				nr--;
				if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
					gfs2_glock_put(gl);
				got_ref = 0;
			}
			if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
				gfs2_glock_put_nolock(gl);
			spin_unlock(&gl->gl_spin);
			clear_bit(GLF_LOCK, &gl->gl_flags);
			spin_lock(&lru_lock);
			if (may_demote)
			continue;
		}
		if (list_empty(&gl->gl_lru) &&
		    (atomic_read(&gl->gl_ref) <= (2 + got_ref))) {
		nr_skipped++;
		list_add(&gl->gl_lru, &skipped);
	}
		if (got_ref) {
			spin_unlock(&lru_lock);
			gfs2_glock_put(gl);
			spin_lock(&lru_lock);
			got_ref = 0;
		}
	}
	list_splice(&skipped, &lru_list);
	atomic_add(nr_skipped, &lru_count);
	spin_unlock(&lru_lock);
@@ -1727,6 +1765,11 @@ int __init gfs2_glock_init(void)
	glock_workqueue = create_workqueue("glock_workqueue");
	if (IS_ERR(glock_workqueue))
		return PTR_ERR(glock_workqueue);
	gfs2_delete_workqueue = create_workqueue("delete_workqueue");
	if (IS_ERR(gfs2_delete_workqueue)) {
		destroy_workqueue(glock_workqueue);
		return PTR_ERR(gfs2_delete_workqueue);
	}

	register_shrinker(&glock_shrinker);

@@ -1737,6 +1780,7 @@ void gfs2_glock_exit(void)
{
	unregister_shrinker(&glock_shrinker);
	destroy_workqueue(glock_workqueue);
	destroy_workqueue(gfs2_delete_workqueue);
}

static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
+3 −0
Original line number Diff line number Diff line
@@ -143,6 +143,7 @@ struct lm_lockops {

#define GLR_TRYFAILED		13

extern struct workqueue_struct *gfs2_delete_workqueue;
static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;
@@ -191,6 +192,8 @@ static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
int gfs2_glock_get(struct gfs2_sbd *sdp,
		   u64 number, const struct gfs2_glock_operations *glops,
		   int create, struct gfs2_glock **glp);
void gfs2_glock_hold(struct gfs2_glock *gl);
void gfs2_glock_put_nolock(struct gfs2_glock *gl);
int gfs2_glock_put(struct gfs2_glock *gl);
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
		      struct gfs2_holder *gh);
+21 −0
Original line number Diff line number Diff line
@@ -323,6 +323,7 @@ static void trans_go_sync(struct gfs2_glock *gl)

	if (gl->gl_state != LM_ST_UNLOCKED &&
	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
		flush_workqueue(gfs2_delete_workqueue);
		gfs2_meta_syncfs(sdp);
		gfs2_log_shutdown(sdp);
	}
@@ -372,6 +373,25 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl)
	return 0;
}

/**
 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
 * @gl: the glock
 *
 * gl_spin lock is held while calling this
 */
static void iopen_go_callback(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;

	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
	    gl->gl_state == LM_ST_SHARED &&
	    ip && test_bit(GIF_USER, &ip->i_flags)) {
		gfs2_glock_hold(gl);
		if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
			gfs2_glock_put_nolock(gl);
	}
}

const struct gfs2_glock_operations gfs2_meta_glops = {
	.go_type = LM_TYPE_META,
};
@@ -406,6 +426,7 @@ const struct gfs2_glock_operations gfs2_trans_glops = {

const struct gfs2_glock_operations gfs2_iopen_glops = {
	.go_type = LM_TYPE_IOPEN,
	.go_callback = iopen_go_callback,
};

const struct gfs2_glock_operations gfs2_flock_glops = {
+2 −0
Original line number Diff line number Diff line
@@ -159,6 +159,7 @@ struct gfs2_glock_operations {
	int (*go_lock) (struct gfs2_holder *gh);
	void (*go_unlock) (struct gfs2_holder *gh);
	int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
	void (*go_callback) (struct gfs2_glock *gl);
	const int go_type;
	const unsigned long go_min_hold_time;
};
@@ -228,6 +229,7 @@ struct gfs2_glock {
	struct list_head gl_ail_list;
	atomic_t gl_ail_count;
	struct delayed_work gl_work;
	struct work_struct gl_delete;
};

#define GFS2_MIN_LVB_SIZE 32	/* Min size of LVB that gfs2 supports */
Loading