Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 088d812f authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-v3.10-rc3' of git://oss.sgi.com/xfs/xfs

Pull xfs fixes from Ben Myers:
 "Here are fixes for corruption on 512 byte filesystems, a rounding
  error, a use-after-free, some flags to fix lockdep reports, and
  several fixes related to CRCs.  We have a somewhat larger post -rc1
  queue than usual due to fixes related to the CRC feature we merged for
  3.10:

   - Fix for corruption with FSX on 512 byte blocksize filesystems
   - Fix rounding error in xfs_free_file_space
   - Fix use-after-free with extent free intents
   - Add several missing KM_NOFS flags to fix lockdep reports
   - Several fixes for CRC related code"

* tag 'for-linus-v3.10-rc3' of git://oss.sgi.com/xfs/xfs:
  xfs: remote attribute lookups require the value length
  xfs: xfs_attr_shortform_allfit() does not handle attr3 format.
  xfs: xfs_da3_node_read_verify() doesn't handle XFS_ATTR3_LEAF_MAGIC
  xfs: fix missing KM_NOFS tags to keep lockdep happy
  xfs: Don't reference the EFI after it is freed
  xfs: fix rounding in xfs_free_file_space
  xfs: fix sub-page blocksize data integrity writes
parents 72de4c63 7ae07780
Loading
Loading
Loading
Loading
+19 −0
Original line number Diff line number Diff line
@@ -725,6 +725,25 @@ xfs_convert_page(
			(xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
			i_size_read(inode));

	/*
	 * If the current map does not span the entire page we are about to try
	 * to write, then give up. The only way we can write a page that spans
	 * multiple mappings in a single writeback iteration is via the
	 * xfs_vm_writepage() function. Data integrity writeback requires the
	 * entire page to be written in a single attempt, otherwise the part of
	 * the page we don't write here doesn't get written as part of the data
	 * integrity sync.
	 *
	 * For normal writeback, we also don't attempt to write partial pages
	 * here as it simply means that write_cache_pages() will see it under
	 * writeback and ignore the page until some point in the future, at
	 * which time this will be the only page in the file that needs
	 * writeback.  Hence for more optimal IO patterns, we should always
	 * avoid partial page writeback due to multiple mappings on a page here.
	 */
	if (!xfs_imap_valid(inode, imap, end_offset))
		goto fail_unlock_page;

	len = 1 << inode->i_blkbits;
	p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
					PAGE_CACHE_SIZE);
+15 −12
Original line number Diff line number Diff line
@@ -934,17 +934,19 @@ xfs_attr_shortform_allfit(
	struct xfs_buf		*bp,
	struct xfs_inode	*dp)
{
	xfs_attr_leafblock_t *leaf;
	xfs_attr_leaf_entry_t *entry;
	struct xfs_attr_leafblock *leaf;
	struct xfs_attr_leaf_entry *entry;
	xfs_attr_leaf_name_local_t *name_loc;
	int bytes, i;
	struct xfs_attr3_icleaf_hdr leafhdr;
	int			bytes;
	int			i;

	leaf = bp->b_addr;
	ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
	xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
	entry = xfs_attr3_leaf_entryp(leaf);

	entry = &leaf->entries[0];
	bytes = sizeof(struct xfs_attr_sf_hdr);
	for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
	for (i = 0; i < leafhdr.count; entry++, i++) {
		if (entry->flags & XFS_ATTR_INCOMPLETE)
			continue;		/* don't copy partial entries */
		if (!(entry->flags & XFS_ATTR_LOCAL))
@@ -961,8 +963,8 @@ xfs_attr_shortform_allfit(
	if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
	    (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
	    (bytes == sizeof(struct xfs_attr_sf_hdr)))
		return(-1);
	return(xfs_attr_shortform_bytesfit(dp, bytes));
		return -1;
	return xfs_attr_shortform_bytesfit(dp, bytes);
}

/*
@@ -2330,9 +2332,10 @@ xfs_attr3_leaf_lookup_int(
			if (!xfs_attr_namesp_match(args->flags, entry->flags))
				continue;
			args->index = probe;
			args->valuelen = be32_to_cpu(name_rmt->valuelen);
			args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
			args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount,
						   be32_to_cpu(name_rmt->valuelen));
						       args->valuelen);
			return XFS_ERROR(EEXIST);
		}
	}
+1 −1
Original line number Diff line number Diff line
@@ -1649,7 +1649,7 @@ xfs_alloc_buftarg(
{
	xfs_buftarg_t		*btp;

	btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
	btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);

	btp->bt_mount = mp;
	btp->bt_dev =  bdev->bd_dev;
+5 −2
Original line number Diff line number Diff line
@@ -270,6 +270,7 @@ xfs_da3_node_read_verify(
				break;
			return;
		case XFS_ATTR_LEAF_MAGIC:
		case XFS_ATTR3_LEAF_MAGIC:
			bp->b_ops = &xfs_attr3_leaf_buf_ops;
			bp->b_ops->verify_read(bp);
			return;
@@ -2464,7 +2465,8 @@ xfs_buf_map_from_irec(
	ASSERT(nirecs >= 1);

	if (nirecs > 1) {
		map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_SLEEP);
		map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
				  KM_SLEEP | KM_NOFS);
		if (!map)
			return ENOMEM;
		*mapp = map;
@@ -2520,7 +2522,8 @@ xfs_dabuf_map(
		 * Optimize the one-block case.
		 */
		if (nfsb != 1)
			irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_SLEEP);
			irecs = kmem_zalloc(sizeof(irec) * nfsb,
					    KM_SLEEP | KM_NOFS);

		nirecs = nfsb;
		error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
+1 −1
Original line number Diff line number Diff line
@@ -1336,7 +1336,7 @@ xfs_dir2_leaf_getdents(
				     mp->m_sb.sb_blocksize);
	map_info = kmem_zalloc(offsetof(struct xfs_dir2_leaf_map_info, map) +
				(length * sizeof(struct xfs_bmbt_irec)),
			       KM_SLEEP);
			       KM_SLEEP | KM_NOFS);
	map_info->map_size = length;

	/*
Loading