Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2c22b337 authored by Jan Kara's avatar Jan Kara Committed by Al Viro
Browse files

nilfs2: Convert to new freezing mechanism



We change nilfs_page_mkwrite() to provide proper freeze protection for
writeable page faults (we must wait for frozen filesystem even if the
page is fully mapped).

We remove all vfs_check_frozen() checks since they are now handled by
the generic code.

CC: linux-nilfs@vger.kernel.org
CC: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent fbf8fb76
Loading
Loading
Loading
Loading
+11 −7
Original line number Original line Diff line number Diff line
@@ -69,16 +69,18 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
	struct page *page = vmf->page;
	struct page *page = vmf->page;
	struct inode *inode = vma->vm_file->f_dentry->d_inode;
	struct inode *inode = vma->vm_file->f_dentry->d_inode;
	struct nilfs_transaction_info ti;
	struct nilfs_transaction_info ti;
	int ret;
	int ret = 0;


	if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info)))
	if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info)))
		return VM_FAULT_SIGBUS; /* -ENOSPC */
		return VM_FAULT_SIGBUS; /* -ENOSPC */


	sb_start_pagefault(inode->i_sb);
	lock_page(page);
	lock_page(page);
	if (page->mapping != inode->i_mapping ||
	if (page->mapping != inode->i_mapping ||
	    page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) {
	    page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) {
		unlock_page(page);
		unlock_page(page);
		return VM_FAULT_NOPAGE; /* make the VM retry the fault */
		ret = -EFAULT;	/* make the VM retry the fault */
		goto out;
	}
	}


	/*
	/*
@@ -112,19 +114,21 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
	ret = nilfs_transaction_begin(inode->i_sb, &ti, 1);
	ret = nilfs_transaction_begin(inode->i_sb, &ti, 1);
	/* never returns -ENOMEM, but may return -ENOSPC */
	/* never returns -ENOMEM, but may return -ENOSPC */
	if (unlikely(ret))
	if (unlikely(ret))
		return VM_FAULT_SIGBUS;
		goto out;


	ret = block_page_mkwrite(vma, vmf, nilfs_get_block);
	ret = __block_page_mkwrite(vma, vmf, nilfs_get_block);
	if (ret != VM_FAULT_LOCKED) {
	if (ret) {
		nilfs_transaction_abort(inode->i_sb);
		nilfs_transaction_abort(inode->i_sb);
		return ret;
		goto out;
	}
	}
	nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits));
	nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits));
	nilfs_transaction_commit(inode->i_sb);
	nilfs_transaction_commit(inode->i_sb);


 mapped:
 mapped:
	wait_on_page_writeback(page);
	wait_on_page_writeback(page);
	return VM_FAULT_LOCKED;
 out:
	sb_end_pagefault(inode->i_sb);
	return block_page_mkwrite_return(ret);
}
}


static const struct vm_operations_struct nilfs_file_vm_ops = {
static const struct vm_operations_struct nilfs_file_vm_ops = {
+0 −2
Original line number Original line Diff line number Diff line
@@ -660,8 +660,6 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
		goto out_free;
		goto out_free;
	}
	}


	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);

	ret = nilfs_ioctl_move_blocks(inode->i_sb, &argv[0], kbufs[0]);
	ret = nilfs_ioctl_move_blocks(inode->i_sb, &argv[0], kbufs[0]);
	if (ret < 0)
	if (ret < 0)
		printk(KERN_ERR "NILFS: GC failed during preparation: "
		printk(KERN_ERR "NILFS: GC failed during preparation: "
+4 −1
Original line number Original line Diff line number Diff line
@@ -189,7 +189,7 @@ int nilfs_transaction_begin(struct super_block *sb,
	if (ret > 0)
	if (ret > 0)
		return 0;
		return 0;


	vfs_check_frozen(sb, SB_FREEZE_WRITE);
	sb_start_intwrite(sb);


	nilfs = sb->s_fs_info;
	nilfs = sb->s_fs_info;
	down_read(&nilfs->ns_segctor_sem);
	down_read(&nilfs->ns_segctor_sem);
@@ -205,6 +205,7 @@ int nilfs_transaction_begin(struct super_block *sb,
	current->journal_info = ti->ti_save;
	current->journal_info = ti->ti_save;
	if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
	if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
		kmem_cache_free(nilfs_transaction_cachep, ti);
		kmem_cache_free(nilfs_transaction_cachep, ti);
	sb_end_intwrite(sb);
	return ret;
	return ret;
}
}


@@ -246,6 +247,7 @@ int nilfs_transaction_commit(struct super_block *sb)
		err = nilfs_construct_segment(sb);
		err = nilfs_construct_segment(sb);
	if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
	if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
		kmem_cache_free(nilfs_transaction_cachep, ti);
		kmem_cache_free(nilfs_transaction_cachep, ti);
	sb_end_intwrite(sb);
	return err;
	return err;
}
}


@@ -264,6 +266,7 @@ void nilfs_transaction_abort(struct super_block *sb)
	current->journal_info = ti->ti_save;
	current->journal_info = ti->ti_save;
	if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
	if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
		kmem_cache_free(nilfs_transaction_cachep, ti);
		kmem_cache_free(nilfs_transaction_cachep, ti);
	sb_end_intwrite(sb);
}
}


void nilfs_relax_pressure_in_lock(struct super_block *sb)
void nilfs_relax_pressure_in_lock(struct super_block *sb)