Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 10e5dce0 authored by Evgeniy Dushistov's avatar Evgeniy Dushistov Committed by Linus Torvalds
Browse files

[PATCH] ufs: truncate should allocate block for last byte



This patch fixes buggy behaviour of UFS
in such kind of scenario:
open(, O_TRUNC...)
ftruncate(, 1024)
ftruncate(, 0)

Such a scenario causes ufs_panic and remount read-only.  This happen
because of according to specification UFS should always allocate block for
last byte, and many parts of our implementation rely on this, but
`ufs_truncate' doesn't care about this.

To make possible return error code and to know about old size, this patch
removes `truncate' from ufs inode_operations and uses `setattr' method to
call ufs_truncate.

Signed-off-by: default avatarEvgeniy Dushistov <dushistov@mail.ru>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent eb28931e
Loading
Loading
Loading
Loading
+2 −46
Original line number Diff line number Diff line
@@ -217,48 +217,6 @@ void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
	return;
}

static struct page *ufs_get_locked_page(struct address_space *mapping,
				  unsigned long index)
{
	struct page *page;

try_again:
	page = find_lock_page(mapping, index);
	if (!page) {
		page = read_cache_page(mapping, index,
				       (filler_t*)mapping->a_ops->readpage,
				       NULL);
		if (IS_ERR(page)) {
			printk(KERN_ERR "ufs_change_blocknr: "
			       "read_cache_page error: ino %lu, index: %lu\n",
			       mapping->host->i_ino, index);
			goto out;
		}

		lock_page(page);

		if (!PageUptodate(page) || PageError(page)) {
			unlock_page(page);
			page_cache_release(page);

			printk(KERN_ERR "ufs_change_blocknr: "
			       "can not read page: ino %lu, index: %lu\n",
			       mapping->host->i_ino, index);

			page = ERR_PTR(-EIO);
			goto out;
		}
	}

	if (unlikely(!page->mapping || !page_has_buffers(page))) {
		unlock_page(page);
		page_cache_release(page);
		goto try_again;/*we really need these buffers*/
	}
out:
	return page;
}

/*
 * Modify inode page cache in such way:
 * have - blocks with b_blocknr equal to oldb...oldb+count-1
@@ -311,10 +269,8 @@ static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk,

		set_page_dirty(page);

		if (likely(cur_index != index)) {
			unlock_page(page);
			page_cache_release(page);
		}
		if (likely(cur_index != index))
			ufs_put_locked_page(page);
 	}
	UFSD("EXIT\n");
}
+0 −4
Original line number Diff line number Diff line
@@ -60,7 +60,3 @@ const struct file_operations ufs_file_operations = {
	.fsync		= ufs_sync_file,
	.sendfile	= generic_file_sendfile,
};

struct inode_operations ufs_file_inode_operations = {
	.truncate	= ufs_truncate,
};
+5 −2
Original line number Diff line number Diff line
@@ -843,14 +843,17 @@ int ufs_sync_inode (struct inode *inode)

void ufs_delete_inode (struct inode * inode)
{
	loff_t old_i_size;

	truncate_inode_pages(&inode->i_data, 0);
	/*UFS_I(inode)->i_dtime = CURRENT_TIME;*/
	lock_kernel();
	mark_inode_dirty(inode);
	ufs_update_inode(inode, IS_SYNC(inode));
	old_i_size = inode->i_size;
	inode->i_size = 0;
	if (inode->i_blocks)
		ufs_truncate (inode);
	if (inode->i_blocks && ufs_truncate(inode, old_i_size))
		ufs_warning(inode->i_sb, __FUNCTION__, "ufs_truncate failed\n");
	ufs_free_inode (inode);
	unlock_kernel();
}
+135 −13
Original line number Diff line number Diff line
@@ -370,21 +370,94 @@ static int ufs_trunc_tindirect (struct inode * inode)
	return retry;
}

void ufs_truncate (struct inode * inode)
static int ufs_alloc_lastblock(struct inode *inode)
{
	int err = 0;
	struct address_space *mapping = inode->i_mapping;
	struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
	struct ufs_inode_info *ufsi = UFS_I(inode);
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	int retry;
	unsigned lastfrag, i, end;
	struct page *lastpage;
	struct buffer_head *bh;

	lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift;

	if (!lastfrag) {
		ufsi->i_lastfrag = 0;
		goto out;
	}
	lastfrag--;

	lastpage = ufs_get_locked_page(mapping, lastfrag >>
				       (PAGE_CACHE_SHIFT - inode->i_blkbits));
       if (IS_ERR(lastpage)) {
               err = -EIO;
               goto out;
       }

       end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
       bh = page_buffers(lastpage);
       for (i = 0; i < end; ++i)
               bh = bh->b_this_page;

       if (!buffer_mapped(bh)) {
               err = ufs_getfrag_block(inode, lastfrag, bh, 1);

               if (unlikely(err))
                       goto out_unlock;

               if (buffer_new(bh)) {
                       clear_buffer_new(bh);
                       unmap_underlying_metadata(bh->b_bdev,
						 bh->b_blocknr);
		       /*
			* we do not zeroize fragment, because of
			* if it maped to hole, it already contains zeroes
			*/
                       set_buffer_uptodate(bh);
                       mark_buffer_dirty(bh);
                       set_page_dirty(lastpage);
               }
       }
out_unlock:
       ufs_put_locked_page(lastpage);
out:
       return err;
}

int ufs_truncate(struct inode *inode, loff_t old_i_size)
{
	struct ufs_inode_info *ufsi = UFS_I(inode);
	struct super_block *sb = inode->i_sb;
	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
	int retry, err = 0;
	
	UFSD("ENTER\n");
	sb = inode->i_sb;
	uspi = UFS_SB(sb)->s_uspi;

	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
		return;
	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
	      S_ISLNK(inode->i_mode)))
		return -EINVAL;
	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
		return;
		return -EPERM;

	if (inode->i_size > old_i_size) {
		/*
		 * if we expand file we should care about
		 * allocation of block for last byte first of all
		 */
		err = ufs_alloc_lastblock(inode);

		if (err) {
			i_size_write(inode, old_i_size);
			goto out;
		}
		/*
		 * go away, because of we expand file, and we do not
		 * need free blocks, and zeroizes page
		 */
		lock_kernel();
		goto almost_end;
	}

	block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block);

@@ -404,9 +477,58 @@ void ufs_truncate (struct inode * inode)
		yield();
	}

	if (inode->i_size < old_i_size) {
		/*
		 * now we should have enough space
		 * to allocate block for last byte
		 */
		err = ufs_alloc_lastblock(inode);
		if (err)
			/*
			 * looks like all the same - we have no space,
			 * but we truncate file already
			 */
			inode->i_size = (ufsi->i_lastfrag - 1) * uspi->s_fsize;
	}
almost_end:
	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
	ufsi->i_lastfrag = DIRECT_FRAGMENT;
	unlock_kernel();
	mark_inode_dirty(inode);
	UFSD("EXIT\n");
out:
	UFSD("EXIT: err %d\n", err);
	return err;
}


/*
 * We don't define our `inode->i_op->truncate', and call it here,
 * because of:
 * - there is no way to know old size
 * - there is no way inform user about error, if it happens in `truncate'
 */
static int ufs_setattr(struct dentry *dentry, struct iattr *attr)
{
	struct inode *inode = dentry->d_inode;
	unsigned int ia_valid = attr->ia_valid;
	int error;

	error = inode_change_ok(inode, attr);
	if (error)
		return error;

	if (ia_valid & ATTR_SIZE &&
	    attr->ia_size != i_size_read(inode)) {
		loff_t old_i_size = inode->i_size;
		error = vmtruncate(inode, attr->ia_size);
		if (error)
			return error;
		error = ufs_truncate(inode, old_i_size);
		if (error)
			return error;
	}
	return inode_setattr(inode, attr);
}

struct inode_operations ufs_file_inode_operations = {
	.setattr = ufs_setattr,
};
+54 −0
Original line number Diff line number Diff line
@@ -233,3 +233,57 @@ ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev
	else
		ufsi->i_u1.i_data[0] = fs32;
}

/**
 * ufs_get_locked_page() - locate, pin and lock a pagecache page, if not exist
 * read it from disk.
 * @mapping: the address_space to search
 * @index: the page index
 *
 * Locates the desired pagecache page, if not exist we'll read it,
 * locks it, increments its reference
 * count and returns its address.
 *
 */

struct page *ufs_get_locked_page(struct address_space *mapping,
				 pgoff_t index)
{
	struct page *page;

try_again:
	page = find_lock_page(mapping, index);
	if (!page) {
		page = read_cache_page(mapping, index,
				       (filler_t*)mapping->a_ops->readpage,
				       NULL);
		if (IS_ERR(page)) {
			printk(KERN_ERR "ufs_change_blocknr: "
			       "read_cache_page error: ino %lu, index: %lu\n",
			       mapping->host->i_ino, index);
			goto out;
		}

		lock_page(page);

		if (!PageUptodate(page) || PageError(page)) {
			unlock_page(page);
			page_cache_release(page);

			printk(KERN_ERR "ufs_change_blocknr: "
			       "can not read page: ino %lu, index: %lu\n",
			       mapping->host->i_ino, index);

			page = ERR_PTR(-EIO);
			goto out;
		}
	}

	if (unlikely(!page->mapping || !page_has_buffers(page))) {
		unlock_page(page);
		page_cache_release(page);
		goto try_again;/*we really need these buffers*/
	}
out:
	return page;
}
Loading