Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5bf431fa authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
  [XFS] Update maintainers
  [XFS] use scalable vmap API
  [XFS] remove old vmap cache
  [XFS] make xfs_ino_t an unsigned long long
  [XFS] truncate readdir offsets to signed 32 bit values
  [XFS] fix compile of xfs_btree_readahead_lblock on m68k
  [XFS] Remove macro-to-function indirections in the mask code
  [XFS] Remove macro-to-function indirections in attr code
  [XFS] Remove several unused typedefs.
  [XFS] pass XFS_IGET_BULKSTAT to xfs_iget for handle operations
parents c2919f2a cb7a97d0
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -4842,11 +4842,11 @@ S: Supported

XFS FILESYSTEM
P:	Silicon Graphics Inc
P:	Tim Shimmin
P:	Bill O'Donnell
M:	xfs-masters@oss.sgi.com
L:	xfs@oss.sgi.com
W:	http://oss.sgi.com/projects/xfs
T:	git git://oss.sgi.com:8090/xfs/xfs-2.6.git
T:	git://oss.sgi.com/xfs/xfs.git
S:	Supported

XILINX SYSTEMACE DRIVER
+0 −2
Original line number Diff line number Diff line
@@ -21,8 +21,6 @@
extern struct workqueue_struct *xfsdatad_workqueue;
extern mempool_t *xfs_ioend_pool;

typedef void (*xfs_ioend_func_t)(void *);

/*
 * xfs_ioend struct manages large extent writes for XFS.
 * It can manage several multi-page bio's at once.
+3 −76
Original line number Diff line number Diff line
@@ -165,75 +165,6 @@ test_page_region(
	return (mask && (page_private(page) & mask) == mask);
}

/*
 *	Mapping of multi-page buffers into contiguous virtual space
 */

typedef struct a_list {
	void		*vm_addr;
	struct a_list	*next;
} a_list_t;

static a_list_t		*as_free_head;
static int		as_list_len;
static DEFINE_SPINLOCK(as_lock);

/*
 *	Try to batch vunmaps because they are costly.
 */
STATIC void
free_address(
	void		*addr)
{
	a_list_t	*aentry;

#ifdef CONFIG_XEN
	/*
	 * Xen needs to be able to make sure it can get an exclusive
	 * RO mapping of pages it wants to turn into a pagetable.  If
	 * a newly allocated page is also still being vmap()ed by xfs,
	 * it will cause pagetable construction to fail.  This is a
	 * quick workaround to always eagerly unmap pages so that Xen
	 * is happy.
	 */
	vunmap(addr);
	return;
#endif

	aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
	if (likely(aentry)) {
		spin_lock(&as_lock);
		aentry->next = as_free_head;
		aentry->vm_addr = addr;
		as_free_head = aentry;
		as_list_len++;
		spin_unlock(&as_lock);
	} else {
		vunmap(addr);
	}
}

STATIC void
purge_addresses(void)
{
	a_list_t	*aentry, *old;

	if (as_free_head == NULL)
		return;

	spin_lock(&as_lock);
	aentry = as_free_head;
	as_free_head = NULL;
	as_list_len = 0;
	spin_unlock(&as_lock);

	while ((old = aentry) != NULL) {
		vunmap(aentry->vm_addr);
		aentry = aentry->next;
		kfree(old);
	}
}

/*
 *	Internal xfs_buf_t object manipulation
 */
@@ -333,7 +264,7 @@ xfs_buf_free(
		uint		i;

		if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
			free_address(bp->b_addr - bp->b_offset);
                       vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count);

		for (i = 0; i < bp->b_page_count; i++) {
			struct page	*page = bp->b_pages[i];
@@ -455,10 +386,8 @@ _xfs_buf_map_pages(
		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
		bp->b_flags |= XBF_MAPPED;
	} else if (flags & XBF_MAPPED) {
		if (as_list_len > 64)
			purge_addresses();
		bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
					VM_MAP, PAGE_KERNEL);
               bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
                                       -1, PAGE_KERNEL);
		if (unlikely(bp->b_addr == NULL))
			return -ENOMEM;
		bp->b_addr += bp->b_offset;
@@ -1743,8 +1672,6 @@ xfsbufd(
			count++;
		}

		if (as_list_len > 0)
			purge_addresses();
		if (count)
			blk_run_address_space(target->bt_mapping);

+19 −4
Original line number Diff line number Diff line
@@ -126,11 +126,26 @@ xfs_nfs_get_inode(
	if (ino == 0)
		return ERR_PTR(-ESTALE);

	error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
	if (error)
	/*
	 * The XFS_IGET_BULKSTAT means that an invalid inode number is just
	 * fine and not an indication of a corrupted filesystem.  Because
	 * clients can send any kind of invalid file handle, e.g. after
	 * a restore on the server we have to deal with this case gracefully.
	 */
	error = xfs_iget(mp, NULL, ino, XFS_IGET_BULKSTAT,
			 XFS_ILOCK_SHARED, &ip, 0);
	if (error) {
		/*
		 * EINVAL means the inode cluster doesn't exist anymore.
		 * This implies the filehandle is stale, so we should
		 * translate it here.
		 * We don't use ESTALE directly down the chain to not
		 * confuse applications using bulkstat that expect EINVAL.
		 */
		if (error == EINVAL)
			error = ESTALE;
		return ERR_PTR(-error);
	if (!ip)
		return ERR_PTR(-EIO);
	}

	if (ip->i_d.di_gen != generation) {
		xfs_iput_new(ip, XFS_ILOCK_SHARED);
+0 −1
Original line number Diff line number Diff line
@@ -22,7 +22,6 @@
 * Access Control Lists
 */
typedef __uint16_t	xfs_acl_perm_t;
typedef __int32_t	xfs_acl_type_t;
typedef __int32_t	xfs_acl_tag_t;
typedef __int32_t	xfs_acl_id_t;

Loading