Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aeea1b1f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Alex Elder
Browse files

xfs: refactor xfs_vm_writepage



After the last patches the code for overwrites is the same as for
delayed and unwritten extents except that it doesn't need to call
xfs_map_at_offset.  Take care of that fact to simplify
xfs_vm_writepage.

The buffer loop now first checks the type of buffer and checks/sets
the ioend type, or continues to the next buffer if it's not
interesting to us.  Only after that we validate the iomap and
perform the block mapping if needed, all in common code for the
cases where we have to do work.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarAlex Elder <aelder@sgi.com>
parent 2fa24f92
Loading
Loading
Loading
Loading
+39 −58
Original line number Diff line number Diff line
@@ -999,10 +999,6 @@ xfs_vm_writepage(
			continue;
		}

		if (imap_valid)
			imap_valid = xfs_imap_valid(inode, &imap, offset);

		if (buffer_unwritten(bh) || buffer_delay(bh)) {
		if (buffer_unwritten(bh)) {
			if (type != IO_UNWRITTEN) {
				type = IO_UNWRITTEN;
@@ -1013,61 +1009,46 @@ xfs_vm_writepage(
				type = IO_DELALLOC;
				imap_valid = 0;
			}
			}

			if (!imap_valid) {
				/*
				 * If we didn't have a valid mapping then we
				 * need to ensure that we put the new mapping
				 * in a new ioend structure. This needs to be
				 * done to ensure that the ioends correctly
				 * reflect the block mappings at io completion
				 * for unwritten extent conversion.
				 */
				new_ioend = 1;
				err = xfs_map_blocks(inode, offset, &imap,
						     type, nonblocking);
				if (err)
					goto error;
				imap_valid = xfs_imap_valid(inode, &imap,
							    offset);
			}
			if (imap_valid) {
				xfs_map_at_offset(inode, bh, &imap, offset);
				xfs_add_to_ioend(inode, bh, offset, type,
						 &ioend, new_ioend);
				count++;
			}
		} else if (buffer_uptodate(bh)) {
			/*
			 * we got here because the buffer is already mapped.
			 * That means it must already have extents allocated
			 * underneath it. Map the extent by reading it.
			 */
			if (type != IO_OVERWRITE) {
				type = IO_OVERWRITE;
				imap_valid = 0;
			}
		} else {
			if (PageUptodate(page)) {
				ASSERT(buffer_mapped(bh));
				imap_valid = 0;
			}
			continue;
		}

		if (imap_valid)
			imap_valid = xfs_imap_valid(inode, &imap, offset);
		if (!imap_valid) {
			/*
			 * If we didn't have a valid mapping then we need to
			 * put the new mapping into a separate ioend structure.
			 * This ensures non-contiguous extents always have
			 * separate ioends, which is particularly important
			 * for unwritten extent conversion at I/O completion
			 * time.
			 */
			new_ioend = 1;
				err = xfs_map_blocks(inode, offset,
						&imap, type, nonblocking);
			err = xfs_map_blocks(inode, offset, &imap, type,
					     nonblocking);
			if (err)
				goto error;
				imap_valid = xfs_imap_valid(inode, &imap,
							    offset);
			imap_valid = xfs_imap_valid(inode, &imap, offset);
		}

		if (imap_valid) {
			if (type == IO_OVERWRITE)
				lock_buffer(bh);
				xfs_add_to_ioend(inode, bh, offset, type,
						&ioend, new_ioend);
			else
				xfs_map_at_offset(inode, bh, &imap, offset);
			xfs_add_to_ioend(inode, bh, offset, type, &ioend,
					 new_ioend);
			count++;
		}
		} else if (PageUptodate(page)) {
			ASSERT(buffer_mapped(bh));
			imap_valid = 0;
		}

		if (!iohead)
			iohead = ioend;