Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c40ea741 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Lachlan McIlroy
Browse files

[XFS] kill superflous buffer locking



There is no need to lock any page in xfs_buf.c because we operate on our
own address_space and all locking is covered by the buffer semaphore. If
we ever switch back to main blockdeive address_space as suggested e.g. for
fsblock with a similar scheme the locking will have to be totally revised
anyway because the current scheme is neither correct nor coherent with
itself.

SGI-PV: 971186
SGI-Modid: xfs-linux-melb:xfs-kern:29845a

Signed-off-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarLachlan McIlroy <lachlan@sgi.com>
Signed-off-by: default avatarTim Shimmin <tes@sgi.com>
parent 0771fb45
Loading
Loading
Loading
Loading
+5 −48
Original line number Diff line number Diff line
@@ -409,6 +409,7 @@ _xfs_buf_lookup_pages(
			congestion_wait(WRITE, HZ/50);
			goto retry;
		}
		unlock_page(page);

		XFS_STATS_INC(xb_page_found);

@@ -418,10 +419,7 @@ _xfs_buf_lookup_pages(
		ASSERT(!PagePrivate(page));
		if (!PageUptodate(page)) {
			page_count--;
			if (blocksize >= PAGE_CACHE_SIZE) {
				if (flags & XBF_READ)
					bp->b_locked = 1;
			} else if (!PagePrivate(page)) {
			if (blocksize < PAGE_CACHE_SIZE && !PagePrivate(page)) {
				if (test_page_region(page, offset, nbytes))
					page_count++;
			}
@@ -431,11 +429,6 @@ _xfs_buf_lookup_pages(
		offset = 0;
	}

	if (!bp->b_locked) {
		for (i = 0; i < bp->b_page_count; i++)
			unlock_page(bp->b_pages[i]);
	}

	if (page_count == bp->b_page_count)
		bp->b_flags |= XBF_DONE;

@@ -752,7 +745,6 @@ xfs_buf_associate_memory(
		bp->b_pages[i] = mem_to_page((void *)pageaddr);
		pageaddr += PAGE_CACHE_SIZE;
	}
	bp->b_locked = 0;

	bp->b_count_desired = len;
	bp->b_buffer_length = buflen;
@@ -1099,26 +1091,14 @@ xfs_buf_iostart(
	return status;
}

STATIC_INLINE int
_xfs_buf_iolocked(
	xfs_buf_t		*bp)
{
	ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
	if (bp->b_flags & XBF_READ)
		return bp->b_locked;
	return 0;
}

STATIC_INLINE void
_xfs_buf_ioend(
	xfs_buf_t		*bp,
	int			schedule)
{
	if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
		bp->b_locked = 0;
	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
		xfs_buf_ioend(bp, schedule);
}
}

STATIC void
xfs_buf_bio_end_io(
@@ -1148,10 +1128,6 @@ xfs_buf_bio_end_io(

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		if (_xfs_buf_iolocked(bp)) {
			unlock_page(page);
		}
	} while (bvec >= bio->bi_io_vec);

	_xfs_buf_ioend(bp, 1);
@@ -1162,13 +1138,12 @@ STATIC void
_xfs_buf_ioapply(
	xfs_buf_t		*bp)
{
	int			i, rw, map_i, total_nr_pages, nr_pages;
	int			rw, map_i, total_nr_pages, nr_pages;
	struct bio		*bio;
	int			offset = bp->b_offset;
	int			size = bp->b_count_desired;
	sector_t		sector = bp->b_bn;
	unsigned int		blocksize = bp->b_target->bt_bsize;
	int			locking = _xfs_buf_iolocked(bp);

	total_nr_pages = bp->b_page_count;
	map_i = 0;
@@ -1191,7 +1166,7 @@ _xfs_buf_ioapply(
	 * filesystem block size is not smaller than the page size.
	 */
	if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
	    (bp->b_flags & XBF_READ) && locking &&
	    (bp->b_flags & XBF_READ) &&
	    (blocksize >= PAGE_CACHE_SIZE)) {
		bio = bio_alloc(GFP_NOIO, 1);

@@ -1208,24 +1183,6 @@ _xfs_buf_ioapply(
		goto submit_io;
	}

	/* Lock down the pages which we need to for the request */
	if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
		for (i = 0; size; i++) {
			int		nbytes = PAGE_CACHE_SIZE - offset;
			struct page	*page = bp->b_pages[i];

			if (nbytes > size)
				nbytes = size;

			lock_page(page);

			size -= nbytes;
			offset = 0;
		}
		offset = bp->b_offset;
		size = bp->b_count_desired;
	}

next_chunk:
	atomic_inc(&bp->b_io_remaining);
	nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
+0 −1
Original line number Diff line number Diff line
@@ -143,7 +143,6 @@ typedef struct xfs_buf {
	void			*b_fspriv2;
	void			*b_fspriv3;
	unsigned short		b_error;	/* error code on I/O */
	unsigned short		b_locked;	/* page array is locked */
	unsigned int		b_page_count;	/* size of page array */
	unsigned int		b_offset;	/* page offset in first page */
	struct page		**b_pages;	/* array of page pointers */