Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5fdf4939 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull gfs2 updates from Bob Peterson:
 "We've only got six GFS2 patches for this merge window.  In patch
  order:

   - Fabian Frederick submitted a nice cleanup that uses the BIT macro
     rather than bit shifting.

   - Andreas Gruenbacher contributed a patch that fixes a long-standing
     annoyance whereby GFS2 warned about dirty pages.

   - Andreas also fixed a problem with the recent extended attribute
     readahead feature.

   - Chao Yu contributed a patch that checks the return code from
     function register_shrinker and reacts accordingly. Previously, it
     was not checked.

   - Andreas Gruenbacher also fixed a problem whereby incore file
     timestamps were forgotten if the file was invalidated. This merely
     moves the assignment inside the inode glock where it belongs.

   - Andreas also fixed a problem where incore timestamps were not
     initialized"

* tag 'gfs2-4.8.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
  gfs2: Initialize atime of I_NEW inodes
  gfs2: Update file times after grabbing glock
  gfs2: fix to detect failure of register_shrinker
  gfs2: Fix extended attribute readahead optimization
  gfs2: Remove dirty buffer warning from gfs2_releasepage
  GFS2: use BIT() macro
parents c35bcfd8 332f51d7
Loading
Loading
Loading
Loading
+13 −6
Original line number Diff line number Diff line
@@ -187,7 +187,7 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
		ClearPageChecked(page);
		if (!page_has_buffers(page)) {
			create_empty_buffers(page, inode->i_sb->s_blocksize,
					     (1 << BH_Dirty)|(1 << BH_Uptodate));
					     BIT(BH_Dirty)|BIT(BH_Uptodate));
		}
		gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
	}
@@ -1147,6 +1147,16 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
	if (!page_has_buffers(page))
		return 0;

	/*
	 * From xfs_vm_releasepage: mm accommodates an old ext3 case where
	 * clean pages might not have had the dirty bit cleared.  Thus, it can
	 * send actual dirty pages to ->releasepage() via shrink_active_list().
	 *
	 * As a workaround, we skip pages that contain dirty buffers below.
	 * Once ->releasepage isn't called on dirty pages anymore, we can warn
	 * on dirty buffers like we used to here again.
	 */

	gfs2_log_lock(sdp);
	spin_lock(&sdp->sd_ail_lock);
	head = bh = page_buffers(page);
@@ -1156,8 +1166,8 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
		bd = bh->b_private;
		if (bd && bd->bd_tr)
			goto cannot_release;
		if (buffer_pinned(bh) || buffer_dirty(bh))
			goto not_possible;
		if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
			goto cannot_release;
		bh = bh->b_this_page;
	} while(bh != head);
	spin_unlock(&sdp->sd_ail_lock);
@@ -1180,9 +1190,6 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)

	return try_to_free_buffers(page);

not_possible: /* Should never happen */
	WARN_ON(buffer_dirty(bh));
	WARN_ON(buffer_pinned(bh));
cannot_release:
	spin_unlock(&sdp->sd_ail_lock);
	gfs2_log_unlock(sdp);
+3 −3
Original line number Diff line number Diff line
@@ -82,8 +82,8 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
	}

	if (!page_has_buffers(page))
		create_empty_buffers(page, 1 << inode->i_blkbits,
				     (1 << BH_Uptodate));
		create_empty_buffers(page, BIT(inode->i_blkbits),
				     BIT(BH_Uptodate));

	bh = page_buffers(page);

@@ -690,7 +690,7 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi
	BUG_ON(!dblock);
	BUG_ON(!new);

	bh.b_size = 1 << (inode->i_blkbits + (create ? 0 : 5));
	bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
	ret = gfs2_block_map(inode, lblock, &bh, create);
	*extlen = bh.b_size >> inode->i_blkbits;
	*dblock = bh.b_blocknr;
+10 −10
Original line number Diff line number Diff line
@@ -351,7 +351,7 @@ static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip)
	if (hc)
		return hc;

	hsize = 1 << ip->i_depth;
	hsize = BIT(ip->i_depth);
	hsize *= sizeof(__be64);
	if (hsize != i_size_read(&ip->i_inode)) {
		gfs2_consist_inode(ip);
@@ -819,8 +819,8 @@ static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode,

	if (ip->i_diskflags & GFS2_DIF_EXHASH) {
		struct gfs2_leaf *leaf;
		unsigned hsize = 1 << ip->i_depth;
		unsigned index;
		unsigned int hsize = BIT(ip->i_depth);
		unsigned int index;
		u64 ln;
		if (hsize * sizeof(u64) != i_size_read(inode)) {
			gfs2_consist_inode(ip);
@@ -932,7 +932,7 @@ static int dir_make_exhash(struct inode *inode)
		return -ENOSPC;
	bn = bh->b_blocknr;

	gfs2_assert(sdp, dip->i_entries < (1 << 16));
	gfs2_assert(sdp, dip->i_entries < BIT(16));
	leaf->lf_entries = cpu_to_be16(dip->i_entries);

	/*  Copy dirents  */
@@ -1041,7 +1041,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
	bn = nbh->b_blocknr;

	/*  Compute the start and len of leaf pointers in the hash table.  */
	len = 1 << (dip->i_depth - be16_to_cpu(oleaf->lf_depth));
	len = BIT(dip->i_depth - be16_to_cpu(oleaf->lf_depth));
	half_len = len >> 1;
	if (!half_len) {
		pr_warn("i_depth %u lf_depth %u index %u\n",
@@ -1163,7 +1163,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
	int x;
	int error = 0;

	hsize = 1 << dip->i_depth;
	hsize = BIT(dip->i_depth);
	hsize_bytes = hsize * sizeof(__be64);

	hc = gfs2_dir_get_hash_table(dip);
@@ -1539,7 +1539,7 @@ static int dir_e_read(struct inode *inode, struct dir_context *ctx,
	int error = 0;
	unsigned depth = 0;

	hsize = 1 << dip->i_depth;
	hsize = BIT(dip->i_depth);
	hash = gfs2_dir_offset2hash(ctx->pos);
	index = hash >> (32 - dip->i_depth);

@@ -1558,7 +1558,7 @@ static int dir_e_read(struct inode *inode, struct dir_context *ctx,
		if (error)
			break;

		len = 1 << (dip->i_depth - depth);
		len = BIT(dip->i_depth - depth);
		index = (index & ~(len - 1)) + len;
	}

@@ -2113,7 +2113,7 @@ int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip)
	u64 leaf_no;
	int error = 0, last;

	hsize = 1 << dip->i_depth;
	hsize = BIT(dip->i_depth);

	lp = gfs2_dir_get_hash_table(dip);
	if (IS_ERR(lp))
@@ -2126,7 +2126,7 @@ int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip)
			if (error)
				goto out;
			leaf = (struct gfs2_leaf *)bh->b_data;
			len = 1 << (dip->i_depth - be16_to_cpu(leaf->lf_depth));
			len = BIT(dip->i_depth - be16_to_cpu(leaf->lf_depth));

			next_index = (index & ~(len - 1)) + len;
			last = ((next_index >= hsize) ? 1 : 0);
+3 −3
Original line number Diff line number Diff line
@@ -395,9 +395,6 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)

	sb_start_pagefault(inode->i_sb);

	/* Update file times before taking page lock */
	file_update_time(vma->vm_file);

	ret = gfs2_rsqa_alloc(ip);
	if (ret)
		goto out;
@@ -409,6 +406,9 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
	if (ret)
		goto out_uninit;

	/* Update file times before taking page lock */
	file_update_time(vma->vm_file);

	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
	set_bit(GIF_SW_PAGED, &ip->i_flags);

+8 −2
Original line number Diff line number Diff line
@@ -69,7 +69,7 @@ static atomic_t lru_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(lru_lock);

#define GFS2_GL_HASH_SHIFT      15
#define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
#define GFS2_GL_HASH_SIZE       BIT(GFS2_GL_HASH_SHIFT)

static struct rhashtable_params ht_parms = {
	.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
@@ -1781,7 +1781,13 @@ int __init gfs2_glock_init(void)
		return -ENOMEM;
	}

	register_shrinker(&glock_shrinker);
	ret = register_shrinker(&glock_shrinker);
	if (ret) {
		destroy_workqueue(gfs2_delete_workqueue);
		destroy_workqueue(glock_workqueue);
		rhashtable_destroy(&gl_hash_table);
		return ret;
	}

	return 0;
}
Loading