Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0b61a2ba authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6

* 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6: (62 commits)
  [XFS] add __init/__exit mark to specific init/cleanup functions
  [XFS] Fix oops in xfs_file_readdir()
  [XFS] kill xfs_root
  [XFS] keep i_nlink updated and use proper accessors
  [XFS] stop updating inode->i_blocks
  [XFS] Make xfs_ail_check check less by default
  [XFS] Move AIL pushing into it's own thread
  [XFS] use generic_permission
  [XFS] stop re-checking permissions in xfs_swapext
  [XFS] clean up xfs_swapext
  [XFS] remove permission check from xfs_change_file_space
  [XFS] prevent panic during log recovery due to bogus op_hdr length
  [XFS] Cleanup various fid related bits:
  [XFS] Fix xfs_lowbit64
  [XFS] Remove CFORK macros and use code directly in IFORK and DFORK macros.
  [XFS] kill superflous buffer locking (2nd attempt)
  [XFS] Use kernel-supplied "roundup_pow_of_two" for simplicity
  [XFS] Remove the BPCSHIFT and NB* based macros from XFS.
  [XFS] Remove bogus assert
  [XFS] optimize XFS_IS_REALTIME_INODE w/o realtime config
  ...
parents a13ff0bb de2eeea6
Loading
Loading
Loading
Loading
+0 −1
Original line number Original line Diff line number Diff line
@@ -70,7 +70,6 @@ xfs-y += xfs_alloc.o \
				   xfs_iget.o \
				   xfs_iget.o \
				   xfs_inode.o \
				   xfs_inode.o \
				   xfs_inode_item.o \
				   xfs_inode_item.o \
				   xfs_iocore.o \
				   xfs_iomap.o \
				   xfs_iomap.o \
				   xfs_itable.o \
				   xfs_itable.o \
				   xfs_dfrag.o \
				   xfs_dfrag.o \

fs/xfs/linux-2.6/spin.h

deleted100644 → 0
+0 −45
Original line number Original line Diff line number Diff line
/*
 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
 * All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 */
#ifndef __XFS_SUPPORT_SPIN_H__
#define __XFS_SUPPORT_SPIN_H__

#include <linux/sched.h>	/* preempt needs this */
#include <linux/spinlock.h>

/*
 * Map lock_t from IRIX to Linux spinlocks.
 *
 * We do not make use of lock_t from interrupt context, so we do not
 * have to worry about disabling interrupts at all (unlike IRIX).
 */

typedef spinlock_t lock_t;

#define SPLDECL(s)			unsigned long s
#ifndef DEFINE_SPINLOCK
#define DEFINE_SPINLOCK(s)		spinlock_t s = SPIN_LOCK_UNLOCKED
#endif

#define spinlock_init(lock, name)	spin_lock_init(lock)
#define	spinlock_destroy(lock)
#define mutex_spinlock(lock)		({ spin_lock(lock); 0; })
#define mutex_spinunlock(lock, s)	do { spin_unlock(lock); (void)s; } while (0)
#define nested_spinlock(lock)		spin_lock(lock)
#define nested_spinunlock(lock)		spin_unlock(lock)

#endif /* __XFS_SUPPORT_SPIN_H__ */
+24 −19
Original line number Original line Diff line number Diff line
@@ -107,6 +107,18 @@ xfs_page_trace(
#define xfs_page_trace(tag, inode, page, pgoff)
#define xfs_page_trace(tag, inode, page, pgoff)
#endif
#endif


STATIC struct block_device *
xfs_find_bdev_for_inode(
	struct xfs_inode	*ip)
{
	struct xfs_mount	*mp = ip->i_mount;

	if (XFS_IS_REALTIME_INODE(ip))
		return mp->m_rtdev_targp->bt_bdev;
	else
		return mp->m_ddev_targp->bt_bdev;
}

/*
/*
 * Schedule IO completion handling on a xfsdatad if this was
 * Schedule IO completion handling on a xfsdatad if this was
 * the final hold on this ioend. If we are asked to wait,
 * the final hold on this ioend. If we are asked to wait,
@@ -151,7 +163,7 @@ xfs_destroy_ioend(
/*
/*
 * Update on-disk file size now that data has been written to disk.
 * Update on-disk file size now that data has been written to disk.
 * The current in-memory file size is i_size.  If a write is beyond
 * The current in-memory file size is i_size.  If a write is beyond
 * eof io_new_size will be the intended file size until i_size is
 * eof i_new_size will be the intended file size until i_size is
 * updated.  If this write does not extend all the way to the valid
 * updated.  If this write does not extend all the way to the valid
 * file size then restrict this update to the end of the write.
 * file size then restrict this update to the end of the write.
 */
 */
@@ -173,7 +185,7 @@ xfs_setfilesize(


	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_ilock(ip, XFS_ILOCK_EXCL);


	isize = MAX(ip->i_size, ip->i_iocore.io_new_size);
	isize = MAX(ip->i_size, ip->i_new_size);
	isize = MIN(isize, bsize);
	isize = MIN(isize, bsize);


	if (ip->i_d.di_size < isize) {
	if (ip->i_d.di_size < isize) {
@@ -226,12 +238,13 @@ xfs_end_bio_unwritten(
{
{
	xfs_ioend_t		*ioend =
	xfs_ioend_t		*ioend =
		container_of(work, xfs_ioend_t, io_work);
		container_of(work, xfs_ioend_t, io_work);
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
	xfs_off_t		offset = ioend->io_offset;
	xfs_off_t		offset = ioend->io_offset;
	size_t			size = ioend->io_size;
	size_t			size = ioend->io_size;


	if (likely(!ioend->io_error)) {
	if (likely(!ioend->io_error)) {
		xfs_bmap(XFS_I(ioend->io_inode), offset, size,
		if (!XFS_FORCED_SHUTDOWN(ip->i_mount))
				BMAPI_UNWRITTEN, NULL, NULL);
			xfs_iomap_write_unwritten(ip, offset, size);
		xfs_setfilesize(ioend);
		xfs_setfilesize(ioend);
	}
	}
	xfs_destroy_ioend(ioend);
	xfs_destroy_ioend(ioend);
@@ -304,7 +317,7 @@ xfs_map_blocks(
	xfs_inode_t		*ip = XFS_I(inode);
	xfs_inode_t		*ip = XFS_I(inode);
	int			error, nmaps = 1;
	int			error, nmaps = 1;


	error = xfs_bmap(ip, offset, count,
	error = xfs_iomap(ip, offset, count,
				flags, mapp, &nmaps);
				flags, mapp, &nmaps);
	if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
	if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
		xfs_iflags_set(ip, XFS_IMODIFIED);
		xfs_iflags_set(ip, XFS_IMODIFIED);
@@ -1323,7 +1336,7 @@ __xfs_get_blocks(
	offset = (xfs_off_t)iblock << inode->i_blkbits;
	offset = (xfs_off_t)iblock << inode->i_blkbits;
	ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
	ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
	size = bh_result->b_size;
	size = bh_result->b_size;
	error = xfs_bmap(XFS_I(inode), offset, size,
	error = xfs_iomap(XFS_I(inode), offset, size,
			     create ? flags : BMAPI_READ, &iomap, &niomap);
			     create ? flags : BMAPI_READ, &iomap, &niomap);
	if (error)
	if (error)
		return -error;
		return -error;
@@ -1471,28 +1484,21 @@ xfs_vm_direct_IO(
{
{
	struct file	*file = iocb->ki_filp;
	struct file	*file = iocb->ki_filp;
	struct inode	*inode = file->f_mapping->host;
	struct inode	*inode = file->f_mapping->host;
	xfs_iomap_t	iomap;
	struct block_device *bdev;
	int		maps = 1;
	int		error;
	ssize_t		ret;
	ssize_t		ret;


	error = xfs_bmap(XFS_I(inode), offset, 0,
	bdev = xfs_find_bdev_for_inode(XFS_I(inode));
				BMAPI_DEVICE, &iomap, &maps);
	if (error)
		return -error;


	if (rw == WRITE) {
	if (rw == WRITE) {
		iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
		iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
		ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
		ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
			iomap.iomap_target->bt_bdev,
			bdev, iov, offset, nr_segs,
			iov, offset, nr_segs,
			xfs_get_blocks_direct,
			xfs_get_blocks_direct,
			xfs_end_io_direct);
			xfs_end_io_direct);
	} else {
	} else {
		iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
		iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
		ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
		ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
			iomap.iomap_target->bt_bdev,
			bdev, iov, offset, nr_segs,
			iov, offset, nr_segs,
			xfs_get_blocks_direct,
			xfs_get_blocks_direct,
			xfs_end_io_direct);
			xfs_end_io_direct);
	}
	}
@@ -1525,8 +1531,7 @@ xfs_vm_bmap(
	struct inode		*inode = (struct inode *)mapping->host;
	struct inode		*inode = (struct inode *)mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_inode	*ip = XFS_I(inode);


	vn_trace_entry(XFS_I(inode), __FUNCTION__,
	xfs_itrace_entry(XFS_I(inode));
			(inst_t *)__return_address);
	xfs_rwlock(ip, VRWLOCK_READ);
	xfs_rwlock(ip, VRWLOCK_READ);
	xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
	xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
	xfs_rwunlock(ip, VRWLOCK_READ);
	xfs_rwunlock(ip, VRWLOCK_READ);
+6 −51
Original line number Original line Diff line number Diff line
@@ -387,8 +387,6 @@ _xfs_buf_lookup_pages(
		if (unlikely(page == NULL)) {
		if (unlikely(page == NULL)) {
			if (flags & XBF_READ_AHEAD) {
			if (flags & XBF_READ_AHEAD) {
				bp->b_page_count = i;
				bp->b_page_count = i;
				for (i = 0; i < bp->b_page_count; i++)
					unlock_page(bp->b_pages[i]);
				return -ENOMEM;
				return -ENOMEM;
			}
			}


@@ -418,24 +416,17 @@ _xfs_buf_lookup_pages(
		ASSERT(!PagePrivate(page));
		ASSERT(!PagePrivate(page));
		if (!PageUptodate(page)) {
		if (!PageUptodate(page)) {
			page_count--;
			page_count--;
			if (blocksize >= PAGE_CACHE_SIZE) {
			if (blocksize < PAGE_CACHE_SIZE && !PagePrivate(page)) {
				if (flags & XBF_READ)
					bp->b_locked = 1;
			} else if (!PagePrivate(page)) {
				if (test_page_region(page, offset, nbytes))
				if (test_page_region(page, offset, nbytes))
					page_count++;
					page_count++;
			}
			}
		}
		}


		unlock_page(page);
		bp->b_pages[i] = page;
		bp->b_pages[i] = page;
		offset = 0;
		offset = 0;
	}
	}


	if (!bp->b_locked) {
		for (i = 0; i < bp->b_page_count; i++)
			unlock_page(bp->b_pages[i]);
	}

	if (page_count == bp->b_page_count)
	if (page_count == bp->b_page_count)
		bp->b_flags |= XBF_DONE;
		bp->b_flags |= XBF_DONE;


@@ -751,7 +742,6 @@ xfs_buf_associate_memory(
		bp->b_pages[i] = mem_to_page((void *)pageaddr);
		bp->b_pages[i] = mem_to_page((void *)pageaddr);
		pageaddr += PAGE_CACHE_SIZE;
		pageaddr += PAGE_CACHE_SIZE;
	}
	}
	bp->b_locked = 0;


	bp->b_count_desired = len;
	bp->b_count_desired = len;
	bp->b_buffer_length = buflen;
	bp->b_buffer_length = buflen;
@@ -1098,26 +1088,14 @@ xfs_buf_iostart(
	return status;
	return status;
}
}


STATIC_INLINE int
_xfs_buf_iolocked(
	xfs_buf_t		*bp)
{
	ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
	if (bp->b_flags & XBF_READ)
		return bp->b_locked;
	return 0;
}

STATIC_INLINE void
STATIC_INLINE void
_xfs_buf_ioend(
_xfs_buf_ioend(
	xfs_buf_t		*bp,
	xfs_buf_t		*bp,
	int			schedule)
	int			schedule)
{
{
	if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
		bp->b_locked = 0;
		xfs_buf_ioend(bp, schedule);
		xfs_buf_ioend(bp, schedule);
}
}
}


STATIC void
STATIC void
xfs_buf_bio_end_io(
xfs_buf_bio_end_io(
@@ -1147,10 +1125,6 @@ xfs_buf_bio_end_io(


		if (--bvec >= bio->bi_io_vec)
		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);
			prefetchw(&bvec->bv_page->flags);

		if (_xfs_buf_iolocked(bp)) {
			unlock_page(page);
		}
	} while (bvec >= bio->bi_io_vec);
	} while (bvec >= bio->bi_io_vec);


	_xfs_buf_ioend(bp, 1);
	_xfs_buf_ioend(bp, 1);
@@ -1161,13 +1135,12 @@ STATIC void
_xfs_buf_ioapply(
_xfs_buf_ioapply(
	xfs_buf_t		*bp)
	xfs_buf_t		*bp)
{
{
	int			i, rw, map_i, total_nr_pages, nr_pages;
	int			rw, map_i, total_nr_pages, nr_pages;
	struct bio		*bio;
	struct bio		*bio;
	int			offset = bp->b_offset;
	int			offset = bp->b_offset;
	int			size = bp->b_count_desired;
	int			size = bp->b_count_desired;
	sector_t		sector = bp->b_bn;
	sector_t		sector = bp->b_bn;
	unsigned int		blocksize = bp->b_target->bt_bsize;
	unsigned int		blocksize = bp->b_target->bt_bsize;
	int			locking = _xfs_buf_iolocked(bp);


	total_nr_pages = bp->b_page_count;
	total_nr_pages = bp->b_page_count;
	map_i = 0;
	map_i = 0;
@@ -1190,7 +1163,7 @@ _xfs_buf_ioapply(
	 * filesystem block size is not smaller than the page size.
	 * filesystem block size is not smaller than the page size.
	 */
	 */
	if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
	if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
	    (bp->b_flags & XBF_READ) && locking &&
	    (bp->b_flags & XBF_READ) &&
	    (blocksize >= PAGE_CACHE_SIZE)) {
	    (blocksize >= PAGE_CACHE_SIZE)) {
		bio = bio_alloc(GFP_NOIO, 1);
		bio = bio_alloc(GFP_NOIO, 1);


@@ -1207,24 +1180,6 @@ _xfs_buf_ioapply(
		goto submit_io;
		goto submit_io;
	}
	}


	/* Lock down the pages which we need to for the request */
	if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
		for (i = 0; size; i++) {
			int		nbytes = PAGE_CACHE_SIZE - offset;
			struct page	*page = bp->b_pages[i];

			if (nbytes > size)
				nbytes = size;

			lock_page(page);

			size -= nbytes;
			offset = 0;
		}
		offset = bp->b_offset;
		size = bp->b_count_desired;
	}

next_chunk:
next_chunk:
	atomic_inc(&bp->b_io_remaining);
	atomic_inc(&bp->b_io_remaining);
	nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
	nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
@@ -1571,7 +1526,7 @@ xfs_alloc_delwrite_queue(


	INIT_LIST_HEAD(&btp->bt_list);
	INIT_LIST_HEAD(&btp->bt_list);
	INIT_LIST_HEAD(&btp->bt_delwrite_queue);
	INIT_LIST_HEAD(&btp->bt_delwrite_queue);
	spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
	spin_lock_init(&btp->bt_delwrite_lock);
	btp->bt_flags = 0;
	btp->bt_flags = 0;
	btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
	btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
	if (IS_ERR(btp->bt_task)) {
	if (IS_ERR(btp->bt_task)) {
+0 −1
Original line number Original line Diff line number Diff line
@@ -143,7 +143,6 @@ typedef struct xfs_buf {
	void			*b_fspriv2;
	void			*b_fspriv2;
	void			*b_fspriv3;
	void			*b_fspriv3;
	unsigned short		b_error;	/* error code on I/O */
	unsigned short		b_error;	/* error code on I/O */
	unsigned short		b_locked;	/* page array is locked */
	unsigned int		b_page_count;	/* size of page array */
	unsigned int		b_page_count;	/* size of page array */
	unsigned int		b_offset;	/* page offset in first page */
	unsigned int		b_offset;	/* page offset in first page */
	struct page		**b_pages;	/* array of page pointers */
	struct page		**b_pages;	/* array of page pointers */
Loading