Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 60c9b274 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://oss.sgi.com:8090/xfs/xfs-2.6:
  [XFS] Add lockdep support for XFS
  [XFS] Fix race in xfs_write() b/w dmapi callout and direct I/O checks.
  [XFS] Get rid of redundant "required" in msg.
  [XFS] Export via a function xfs_buftarg_list for use by kdb/xfsidbg.
  [XFS] Remove unused ilen variable and references.
  [XFS] Fix to prevent the notorious 'NULL files' problem after a crash.
  [XFS] Fix race condition in xfs_write().
  [XFS] Fix uquota and oquota enforcement problems.
  [XFS] propogate return codes from flush routines
  [XFS] Fix quotaon syscall failures for group enforcement requests.
  [XFS] Invalidate quotacheck when mounting without a quota type.
  [XFS] reducing the number of random number functions.
  [XFS] remove more misc. unused args
  [XFS] the "aendp" arg to xfs_dir2_data_freescan is always NULL, remove it.
  [XFS] The last argument "lsn" of xfs_trans_commit() is always called with
parents 4750def5 f7c66ce3
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -43,6 +43,18 @@ static inline void mrupdate(mrlock_t *mrp)
	mrp->mr_writer = 1;
}

static inline void mraccess_nested(mrlock_t *mrp, int subclass)
{
	down_read_nested(&mrp->mr_lock, subclass);
}

static inline void mrupdate_nested(mrlock_t *mrp, int subclass)
{
	down_write_nested(&mrp->mr_lock, subclass);
	mrp->mr_writer = 1;
}


static inline int mrtryaccess(mrlock_t *mrp)
{
	return down_read_trylock(&mrp->mr_lock);
+77 −12
Original line number Diff line number Diff line
@@ -140,10 +140,47 @@ xfs_destroy_ioend(
	mempool_free(ioend, xfs_ioend_pool);
}

/*
 * Update on-disk file size now that data has been written to disk.
 * The current in-memory file size is i_size.  If a write is beyond
 * eof io_new_size will be the intended file size until i_size is
 * updated.  If this write does not extend all the way to the valid
 * file size then restrict this update to the end of the write.
 */
STATIC void
xfs_setfilesize(
	xfs_ioend_t		*ioend)
{
	xfs_inode_t		*ip;
	xfs_fsize_t		isize;
	xfs_fsize_t		bsize;

	ip = xfs_vtoi(ioend->io_vnode);

	ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
	ASSERT(ioend->io_type != IOMAP_READ);

	if (unlikely(ioend->io_error))
		return;

	bsize = ioend->io_offset + ioend->io_size;

	xfs_ilock(ip, XFS_ILOCK_EXCL);

	isize = MAX(ip->i_size, ip->i_iocore.io_new_size);
	isize = MIN(isize, bsize);

	if (ip->i_d.di_size < isize) {
		ip->i_d.di_size = isize;
		ip->i_update_core = 1;
		ip->i_update_size = 1;
	}

	xfs_iunlock(ip, XFS_ILOCK_EXCL);
}

/*
 * Buffered IO write completion for delayed allocate extents.
 * TODO: Update ondisk isize now that we know the file data
 * has been flushed (i.e. the notorious "NULL file" problem).
 */
STATIC void
xfs_end_bio_delalloc(
@@ -152,6 +189,7 @@ xfs_end_bio_delalloc(
	xfs_ioend_t		*ioend =
		container_of(work, xfs_ioend_t, io_work);

	xfs_setfilesize(ioend);
	xfs_destroy_ioend(ioend);
}

@@ -165,6 +203,7 @@ xfs_end_bio_written(
	xfs_ioend_t		*ioend =
		container_of(work, xfs_ioend_t, io_work);

	xfs_setfilesize(ioend);
	xfs_destroy_ioend(ioend);
}

@@ -184,8 +223,23 @@ xfs_end_bio_unwritten(
	xfs_off_t		offset = ioend->io_offset;
	size_t			size = ioend->io_size;

	if (likely(!ioend->io_error))
	if (likely(!ioend->io_error)) {
		bhv_vop_bmap(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL);
		xfs_setfilesize(ioend);
	}
	xfs_destroy_ioend(ioend);
}

/*
 * IO read completion for regular, written extents.
 */
STATIC void
xfs_end_bio_read(
	struct work_struct	*work)
{
	xfs_ioend_t		*ioend =
		container_of(work, xfs_ioend_t, io_work);

	xfs_destroy_ioend(ioend);
}

@@ -224,6 +278,8 @@ xfs_alloc_ioend(
		INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
	else if (type == IOMAP_DELAY)
		INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
	else if (type == IOMAP_READ)
		INIT_WORK(&ioend->io_work, xfs_end_bio_read);
	else
		INIT_WORK(&ioend->io_work, xfs_end_bio_written);

@@ -913,7 +969,7 @@ xfs_page_state_convert(
	bh = head = page_buffers(page);
	offset = page_offset(page);
	flags = -1;
	type = 0;
	type = IOMAP_READ;

	/* TODO: cleanup count and page_dirty */

@@ -999,7 +1055,7 @@ xfs_page_state_convert(
			 * That means it must already have extents allocated
			 * underneath it. Map the extent by reading it.
			 */
			if (!iomap_valid || type != 0) {
			if (!iomap_valid || type != IOMAP_READ) {
				flags = BMAPI_READ;
				size = xfs_probe_cluster(inode, page, bh,
								head, 1);
@@ -1010,7 +1066,7 @@ xfs_page_state_convert(
				iomap_valid = xfs_iomap_valid(&iomap, offset);
			}

			type = 0;
			type = IOMAP_READ;
			if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
				ASSERT(buffer_mapped(bh));
				if (iomap_valid)
@@ -1356,12 +1412,21 @@ xfs_end_io_direct(
	 * completion handler in the future, in which case all this can
	 * go away.
	 */
	if (private && size > 0) {
	ioend->io_offset = offset;
	ioend->io_size = size;
	if (ioend->io_type == IOMAP_READ) {
		xfs_finish_ioend(ioend);
	} else if (private && size > 0) {
		xfs_finish_ioend(ioend);
	} else {
		xfs_destroy_ioend(ioend);
		/*
		 * A direct I/O write ioend starts it's life in unwritten
		 * state in case they map an unwritten extent.  This write
		 * didn't map an unwritten extent so switch it's completion
		 * handler.
		 */
		INIT_WORK(&ioend->io_work, xfs_end_bio_written);
		xfs_finish_ioend(ioend);
	}

	/*
@@ -1392,15 +1457,15 @@ xfs_vm_direct_IO(
	if (error)
		return -error;

	iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);

	if (rw == WRITE) {
		iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
		ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
			iomap.iomap_target->bt_bdev,
			iov, offset, nr_segs,
			xfs_get_blocks_direct,
			xfs_end_io_direct);
	} else {
		iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
		ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
			iomap.iomap_target->bt_bdev,
			iov, offset, nr_segs,
+9 −1
Original line number Diff line number Diff line
@@ -1426,7 +1426,7 @@ xfs_free_bufhash(
/*
 *	buftarg list for delwrite queue processing
 */
LIST_HEAD(xfs_buftarg_list);
static LIST_HEAD(xfs_buftarg_list);
static DEFINE_SPINLOCK(xfs_buftarg_lock);

STATIC void
@@ -1867,3 +1867,11 @@ xfs_buf_terminate(void)
	ktrace_free(xfs_buf_trace_buf);
#endif
}

#ifdef CONFIG_KDB_MODULES
struct list_head *
xfs_get_buftarg_list(void)
{
	return &xfs_buftarg_list;
}
#endif
+3 −0
Original line number Diff line number Diff line
@@ -411,6 +411,9 @@ extern void xfs_free_buftarg(xfs_buftarg_t *, int);
extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
#ifdef CONFIG_KDB_MODULES
extern struct list_head *xfs_get_buftarg_list(void);
#endif

#define xfs_getsize_buftarg(buftarg)	block_size((buftarg)->bt_bdev)
#define xfs_readonly_buftarg(buftarg)	bdev_read_only((buftarg)->bt_bdev)
+14 −7
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@ fs_tosspages(
		truncate_inode_pages(ip->i_mapping, first);
}

void
int
fs_flushinval_pages(
	bhv_desc_t	*bdp,
	xfs_off_t	first,
@@ -44,13 +44,16 @@ fs_flushinval_pages(
{
	bhv_vnode_t	*vp = BHV_TO_VNODE(bdp);
	struct inode	*ip = vn_to_inode(vp);
	int		ret = 0;

	if (VN_CACHED(vp)) {
		if (VN_TRUNC(vp))
			VUNTRUNCATE(vp);
		filemap_write_and_wait(ip->i_mapping);
		ret = filemap_write_and_wait(ip->i_mapping);
		if (!ret)
			truncate_inode_pages(ip->i_mapping, first);
	}
	return ret;
}

int
@@ -63,14 +66,18 @@ fs_flush_pages(
{
	bhv_vnode_t	*vp = BHV_TO_VNODE(bdp);
	struct inode	*ip = vn_to_inode(vp);
	int		ret = 0;
	int		ret2;

	if (VN_DIRTY(vp)) {
		if (VN_TRUNC(vp))
			VUNTRUNCATE(vp);
		filemap_fdatawrite(ip->i_mapping);
		ret = filemap_fdatawrite(ip->i_mapping);
		if (flags & XFS_B_ASYNC)
			return 0;
		filemap_fdatawait(ip->i_mapping);
			return ret;
		ret2 = filemap_fdatawait(ip->i_mapping);
		if (!ret)
			ret = ret2;
	}
	return 0;
	return ret;
}
Loading