Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e3768e47 authored by Daniel Rosenberg's avatar Daniel Rosenberg
Browse files

Revert "ANDROID: Squashfs: replace buffer_head with BIO"



This reverts commit afe18d1b.

Signed-off-by: default avatarDaniel Rosenberg <drosen@google.com>
Change-Id: I9d5833d5c9bf1f8086ca0bc4a72a1694a2598c63
parent b89fd787
Loading
Loading
Loading
Loading
+143 −379
Original line number Diff line number Diff line
@@ -32,8 +32,6 @@
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
#include <linux/pagemap.h>
#include <linux/workqueue.h>

#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
@@ -41,411 +39,177 @@
#include "decompressor.h"
#include "page_actor.h"

static struct workqueue_struct *squashfs_read_wq;

struct squashfs_read_request {
	struct super_block *sb;
	u64 index;
	int length;
	int compressed;
	int offset;
	u64 read_end;
	struct squashfs_page_actor *output;
	enum {
		SQUASHFS_COPY,
		SQUASHFS_DECOMPRESS,
		SQUASHFS_METADATA,
	} data_processing;
	bool synchronous;

/*
	 * If the read is synchronous, it is possible to retrieve information
	 * about the request by setting these pointers.
 * Read the metadata block length, this is stored in the first two
 * bytes of the metadata block.
 */
	int *res;
	int *bytes_read;
	int *bytes_uncompressed;

	int nr_buffers;
	struct buffer_head **bh;
	struct work_struct offload;
};

struct squashfs_bio_request {
	struct buffer_head **bh;
	int nr_buffers;
};

static int squashfs_bio_submit(struct squashfs_read_request *req);

int squashfs_init_read_wq(void)
{
	squashfs_read_wq = create_workqueue("SquashFS read wq");
	return !!squashfs_read_wq;
}

void squashfs_destroy_read_wq(void)
{
	flush_workqueue(squashfs_read_wq);
	destroy_workqueue(squashfs_read_wq);
}

static void free_read_request(struct squashfs_read_request *req, int error)
{
	if (!req->synchronous)
		squashfs_page_actor_free(req->output, error);
	if (req->res)
		*(req->res) = error;
	kfree(req->bh);
	kfree(req);
}

static void squashfs_process_blocks(struct squashfs_read_request *req)
static struct buffer_head *get_block_length(struct super_block *sb,
			u64 *cur_index, int *offset, int *length)
{
	int error = 0;
	int bytes, i, length;
	struct squashfs_sb_info *msblk = req->sb->s_fs_info;
	struct squashfs_page_actor *actor = req->output;
	struct buffer_head **bh = req->bh;
	int nr_buffers = req->nr_buffers;

	for (i = 0; i < nr_buffers; ++i) {
		if (!bh[i])
			continue;
		wait_on_buffer(bh[i]);
		if (!buffer_uptodate(bh[i]))
			error = -EIO;
	}
	if (error)
		goto cleanup;
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	struct buffer_head *bh;

	if (req->data_processing == SQUASHFS_METADATA) {
		/* Extract the length of the metadata block */
		if (req->offset != msblk->devblksize - 1) {
			length = le16_to_cpup((__le16 *)
					(bh[0]->b_data + req->offset));
	bh = sb_bread(sb, *cur_index);
	if (bh == NULL)
		return NULL;

	if (msblk->devblksize - *offset == 1) {
		*length = (unsigned char) bh->b_data[*offset];
		put_bh(bh);
		bh = sb_bread(sb, ++(*cur_index));
		if (bh == NULL)
			return NULL;
		*length |= (unsigned char) bh->b_data[0] << 8;
		*offset = 1;
	} else {
			length = (unsigned char)bh[0]->b_data[req->offset];
			length |= (unsigned char)bh[1]->b_data[0] << 8;
		}
		req->compressed = SQUASHFS_COMPRESSED(length);
		req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
						       : SQUASHFS_COPY;
		length = SQUASHFS_COMPRESSED_SIZE(length);
		if (req->index + length + 2 > req->read_end) {
			for (i = 0; i < nr_buffers; ++i)
				put_bh(bh[i]);
			kfree(bh);
			req->length = length;
			req->index += 2;
			squashfs_bio_submit(req);
			return;
		}
		req->length = length;
		req->offset = (req->offset + 2) % PAGE_SIZE;
		if (req->offset < 2) {
			put_bh(bh[0]);
			++bh;
			--nr_buffers;
		}
	}
	if (req->bytes_read)
		*(req->bytes_read) = req->length;
		*length = (unsigned char) bh->b_data[*offset] |
			(unsigned char) bh->b_data[*offset + 1] << 8;
		*offset += 2;

	if (req->data_processing == SQUASHFS_COPY) {
		squashfs_bh_to_actor(bh, nr_buffers, req->output, req->offset,
			req->length, msblk->devblksize);
	} else if (req->data_processing == SQUASHFS_DECOMPRESS) {
		req->length = squashfs_decompress(msblk, bh, nr_buffers,
			req->offset, req->length, actor);
		if (req->length < 0) {
			error = -EIO;
			goto cleanup;
		if (*offset == msblk->devblksize) {
			put_bh(bh);
			bh = sb_bread(sb, ++(*cur_index));
			if (bh == NULL)
				return NULL;
			*offset = 0;
		}
	}

	/* Last page may have trailing bytes not filled */
	bytes = req->length % PAGE_SIZE;
	if (bytes && actor->page[actor->pages - 1])
		zero_user_segment(actor->page[actor->pages - 1], bytes,
				  PAGE_SIZE);

cleanup:
	if (req->bytes_uncompressed)
		*(req->bytes_uncompressed) = req->length;
	if (error) {
		for (i = 0; i < nr_buffers; ++i)
			if (bh[i])
				put_bh(bh[i]);
	}
	free_read_request(req, error);
	return bh;
}

static void read_wq_handler(struct work_struct *work)
{
	squashfs_process_blocks(container_of(work,
		    struct squashfs_read_request, offload));
}

static void squashfs_bio_end_io(struct bio *bio)
{
	int i;
	int error = bio->bi_status;
	struct squashfs_bio_request *bio_req = bio->bi_private;

	bio_put(bio);

	for (i = 0; i < bio_req->nr_buffers; ++i) {
		if (!bio_req->bh[i])
			continue;
		if (!error)
			set_buffer_uptodate(bio_req->bh[i]);
		else
			clear_buffer_uptodate(bio_req->bh[i]);
		unlock_buffer(bio_req->bh[i]);
	}
	kfree(bio_req);
}

static int actor_getblks(struct squashfs_read_request *req, u64 block)
/*
 * Read and decompress a metadata block or datablock.  Length is non-zero
 * if a datablock is being read (the size is stored elsewhere in the
 * filesystem), otherwise the length is obtained from the first two bytes of
 * the metadata block.  A bit in the length field indicates if the block
 * is stored uncompressed in the filesystem (usually because compression
 * generated a larger block - this does occasionally happen with compression
 * algorithms).
 */
int squashfs_read_data(struct super_block *sb, u64 index, int length,
		u64 *next_index, struct squashfs_page_actor *output)
{
	int i;
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	struct buffer_head **bh;
	int offset = index & ((1 << msblk->devblksize_log2) - 1);
	u64 cur_index = index >> msblk->devblksize_log2;
	int bytes, compressed, b = 0, k = 0, avail, i;

	req->bh = kmalloc_array(req->nr_buffers, sizeof(*(req->bh)), GFP_NOIO);
	if (!req->bh)
	bh = kcalloc(((output->length + msblk->devblksize - 1)
		>> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL);
	if (bh == NULL)
		return -ENOMEM;

	for (i = 0; i < req->nr_buffers; ++i) {
		req->bh[i] = sb_getblk(req->sb, block + i);
		if (!req->bh[i]) {
			while (--i) {
				if (req->bh[i])
					put_bh(req->bh[i]);
			}
			return -1;
		}
	}
	return 0;
}

static int squashfs_bio_submit(struct squashfs_read_request *req)
{
	struct bio *bio = NULL;
	struct buffer_head *bh;
	struct squashfs_bio_request *bio_req = NULL;
	int b = 0, prev_block = 0;
	struct squashfs_sb_info *msblk = req->sb->s_fs_info;

	u64 read_start = round_down(req->index, msblk->devblksize);
	u64 read_end = round_up(req->index + req->length, msblk->devblksize);
	sector_t block = read_start >> msblk->devblksize_log2;
	sector_t block_end = read_end >> msblk->devblksize_log2;
	int offset = read_start - round_down(req->index, PAGE_SIZE);
	int nr_buffers = block_end - block;
	int blksz = msblk->devblksize;
	int bio_max_pages = nr_buffers > BIO_MAX_PAGES ? BIO_MAX_PAGES
						       : nr_buffers;

	/* Setup the request */
	req->read_end = read_end;
	req->offset = req->index - read_start;
	req->nr_buffers = nr_buffers;
	if (actor_getblks(req, block) < 0)
		goto getblk_failed;

	/* Create and submit the BIOs */
	for (b = 0; b < nr_buffers; ++b, offset += blksz) {
		bh = req->bh[b];
		if (!bh || !trylock_buffer(bh))
			continue;
		if (buffer_uptodate(bh)) {
			unlock_buffer(bh);
			continue;
		}
		offset %= PAGE_SIZE;

		/* Append the buffer to the current BIO if it is contiguous */
		if (bio && bio_req && prev_block + 1 == b) {
			if (bio_add_page(bio, bh->b_page, blksz, offset)) {
				bio_req->nr_buffers += 1;
				prev_block = b;
				continue;
			}
		}

		/* Otherwise, submit the current BIO and create a new one */
		if (bio)
			submit_bio(bio);
		bio_req = kcalloc(1, sizeof(struct squashfs_bio_request),
				  GFP_NOIO);
		if (!bio_req)
			goto req_alloc_failed;
		bio_req->bh = &req->bh[b];
		bio = bio_alloc(GFP_NOIO, bio_max_pages);
		if (!bio)
			goto bio_alloc_failed;
		bio_set_dev(bio, req->sb->s_bdev);
		bio->bi_iter.bi_sector = (block + b)
				       << (msblk->devblksize_log2 - 9);
		bio_set_op_attrs(bio, REQ_OP_READ, 0);
		bio->bi_private = bio_req;
		bio->bi_end_io = squashfs_bio_end_io;
	if (length) {
		/*
		 * Datablock.
		 */
		bytes = -offset;
		compressed = SQUASHFS_COMPRESSED_BLOCK(length);
		length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
		if (next_index)
			*next_index = index + length;

		bio_add_page(bio, bh->b_page, blksz, offset);
		bio_req->nr_buffers += 1;
		prev_block = b;
	}
	if (bio)
		submit_bio(bio);
		TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
			index, compressed ? "" : "un", length, output->length);

	if (req->synchronous)
		squashfs_process_blocks(req);
	else {
		INIT_WORK(&req->offload, read_wq_handler);
		schedule_work(&req->offload);
	}
	return 0;
		if (length < 0 || length > output->length ||
				(index + length) > msblk->bytes_used)
			goto read_failure;

bio_alloc_failed:
	kfree(bio_req);
req_alloc_failed:
	unlock_buffer(bh);
	while (--nr_buffers >= b)
		if (req->bh[nr_buffers])
			put_bh(req->bh[nr_buffers]);
	while (--b >= 0)
		if (req->bh[b])
			wait_on_buffer(req->bh[b]);
getblk_failed:
	free_read_request(req, -ENOMEM);
	return -ENOMEM;
		for (b = 0; bytes < length; b++, cur_index++) {
			bh[b] = sb_getblk(sb, cur_index);
			if (bh[b] == NULL)
				goto block_release;
			bytes += msblk->devblksize;
		}

static int read_metadata_block(struct squashfs_read_request *req,
			       u64 *next_index)
{
	int ret, error, bytes_read = 0, bytes_uncompressed = 0;
	struct squashfs_sb_info *msblk = req->sb->s_fs_info;

	if (req->index + 2 > msblk->bytes_used) {
		free_read_request(req, -EINVAL);
		return -EINVAL;
	}
	req->length = 2;

	/* Do not read beyond the end of the device */
	if (req->index + req->length > msblk->bytes_used)
		req->length = msblk->bytes_used - req->index;
	req->data_processing = SQUASHFS_METADATA;

		ll_rw_block(REQ_OP_READ, 0, b, bh);
	} else {
		/*
	 * Reading metadata is always synchronous because we don't know the
	 * length in advance and the function is expected to update
	 * 'next_index' and return the length.
		 * Metadata block.
		 */
	req->synchronous = true;
	req->res = &error;
	req->bytes_read = &bytes_read;
	req->bytes_uncompressed = &bytes_uncompressed;
		if ((index + 2) > msblk->bytes_used)
			goto read_failure;

	TRACE("Metadata block @ 0x%llx, %scompressed size %d, src size %d\n",
	      req->index, req->compressed ? "" : "un", bytes_read,
	      req->output->length);
		bh[0] = get_block_length(sb, &cur_index, &offset, &length);
		if (bh[0] == NULL)
			goto read_failure;
		b = 1;

	ret = squashfs_bio_submit(req);
	if (ret)
		return ret;
	if (error)
		return error;
		bytes = msblk->devblksize - offset;
		compressed = SQUASHFS_COMPRESSED(length);
		length = SQUASHFS_COMPRESSED_SIZE(length);
		if (next_index)
		*next_index += 2 + bytes_read;
	return bytes_uncompressed;
}
			*next_index = index + length + 2;

static int read_data_block(struct squashfs_read_request *req, int length,
			   u64 *next_index, bool synchronous)
{
	int ret, error = 0, bytes_uncompressed = 0, bytes_read = 0;
		TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
				compressed ? "" : "un", length);

	req->compressed = SQUASHFS_COMPRESSED_BLOCK(length);
	req->length = length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
	req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
					       : SQUASHFS_COPY;
		if (length < 0 || length > output->length ||
					(index + length) > msblk->bytes_used)
			goto block_release;

	req->synchronous = synchronous;
	if (synchronous) {
		req->res = &error;
		req->bytes_read = &bytes_read;
		req->bytes_uncompressed = &bytes_uncompressed;
		for (; bytes < length; b++) {
			bh[b] = sb_getblk(sb, ++cur_index);
			if (bh[b] == NULL)
				goto block_release;
			bytes += msblk->devblksize;
		}
		ll_rw_block(REQ_OP_READ, 0, b - 1, bh + 1);
	}

	TRACE("Data block @ 0x%llx, %scompressed size %d, src size %d\n",
	      req->index, req->compressed ? "" : "un", req->length,
	      req->output->length);

	ret = squashfs_bio_submit(req);
	if (ret)
		return ret;
	if (synchronous)
		ret = error ? error : bytes_uncompressed;
	if (next_index)
		*next_index += length;
	return ret;
	for (i = 0; i < b; i++) {
		wait_on_buffer(bh[i]);
		if (!buffer_uptodate(bh[i]))
			goto block_release;
	}

	if (compressed) {
		length = squashfs_decompress(msblk, bh, b, offset, length,
			output);
		if (length < 0)
			goto read_failure;
	} else {
		/*
 * Read and decompress a metadata block or datablock.  Length is non-zero
 * if a datablock is being read (the size is stored elsewhere in the
 * filesystem), otherwise the length is obtained from the first two bytes of
 * the metadata block.  A bit in the length field indicates if the block
 * is stored uncompressed in the filesystem (usually because compression
 * generated a larger block - this does occasionally happen with compression
 * algorithms).
		 * Block is uncompressed.
		 */
static int __squashfs_read_data(struct super_block *sb, u64 index, int length,
	u64 *next_index, struct squashfs_page_actor *output, bool sync)
{
	struct squashfs_read_request *req;
		int in, pg_offset = 0;
		void *data = squashfs_first_page(output);

	req = kcalloc(1, sizeof(struct squashfs_read_request), GFP_KERNEL);
	if (!req) {
		if (!sync)
			squashfs_page_actor_free(output, -ENOMEM);
		return -ENOMEM;
		for (bytes = length; k < b; k++) {
			in = min(bytes, msblk->devblksize - offset);
			bytes -= in;
			while (in) {
				if (pg_offset == PAGE_SIZE) {
					data = squashfs_next_page(output);
					pg_offset = 0;
				}
				avail = min_t(int, in, PAGE_SIZE -
						pg_offset);
				memcpy(data + pg_offset, bh[k]->b_data + offset,
						avail);
				in -= avail;
				pg_offset += avail;
				offset += avail;
			}
			offset = 0;
			put_bh(bh[k]);
		}
		squashfs_finish_page(output);
	}

	req->sb = sb;
	req->index = index;
	req->output = output;

	if (next_index)
		*next_index = index;
	kfree(bh);
	return length;

	if (length)
		length = read_data_block(req, length, next_index, sync);
	else
		length = read_metadata_block(req, next_index);
block_release:
	for (; k < b; k++)
		put_bh(bh[k]);

	if (length < 0) {
read_failure:
	ERROR("squashfs_read_data failed to read block 0x%llx\n",
					(unsigned long long) index);
	kfree(bh);
	return -EIO;
}

	return length;
}

int squashfs_read_data(struct super_block *sb, u64 index, int length,
	u64 *next_index, struct squashfs_page_actor *output)
{
	return __squashfs_read_data(sb, index, length, next_index, output,
				    true);
}

int squashfs_read_data_async(struct super_block *sb, u64 index, int length,
	u64 *next_index, struct squashfs_page_actor *output)
{

	return __squashfs_read_data(sb, index, length, next_index, output,
				    false);
}
+129 −66
Original line number Diff line number Diff line
@@ -20,49 +20,49 @@
#include "squashfs.h"
#include "page_actor.h"

static void release_actor_pages(struct page **page, int pages, int error)
static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
	int pages, struct page **page);

/* Read separately compressed datablock directly into page cache */
int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)

{
	int i;
	struct inode *inode = target_page->mapping->host;
	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;

	for (i = 0; i < pages; i++) {
		if (!page[i])
			continue;
		flush_dcache_page(page[i]);
		if (!error)
			SetPageUptodate(page[i]);
		else {
			SetPageError(page[i]);
			zero_user_segment(page[i], 0, PAGE_SIZE);
		}
		unlock_page(page[i]);
		put_page(page[i]);
	}
	kfree(page);
}
	int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
	int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
	int start_index = target_page->index & ~mask;
	int end_index = start_index | mask;
	int i, n, pages, missing_pages, bytes, res = -ENOMEM;
	struct page **page;
	struct squashfs_page_actor *actor;
	void *pageaddr;

	if (end_index > file_end)
		end_index = file_end;

	pages = end_index - start_index + 1;

	page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
	if (page == NULL)
		return res;

	/*
	 * Create a "page actor" which will kmap and kunmap the
	 * page cache pages appropriately within the decompressor
	 */
static struct squashfs_page_actor *actor_from_page_cache(
	struct page *target_page, int start_index, int nr_pages)
{
	int i, n;
	struct page **page;
	struct squashfs_page_actor *actor;
	actor = squashfs_page_actor_init(page, pages, 0, NULL);
	if (actor == NULL)
		goto out;

	/* Try to grab all the pages covered by the Squashfs block */
	for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) {
		page[i] = (n == target_page->index) ? target_page :
			grab_cache_page_nowait(target_page->mapping, n);

	page = kmalloc_array(nr_pages, sizeof(void *), GFP_KERNEL);
	if (!page)
		return NULL;

	/* Try to grab all the pages covered by the SquashFS block */
	for (i = 0, n = start_index; i < nr_pages; i++, n++) {
		if (target_page->index == n) {
			page[i] = target_page;
		} else {
			page[i] = grab_cache_page_nowait(target_page->mapping,
							 n);
			if (page[i] == NULL)
		if (page[i] == NULL) {
			missing_pages++;
			continue;
		}

@@ -70,44 +70,107 @@ static struct squashfs_page_actor *actor_from_page_cache(
			unlock_page(page[i]);
			put_page(page[i]);
			page[i] = NULL;
			missing_pages++;
		}
	}

	if (missing_pages) {
		/*
		 * Couldn't get one or more pages, this page has either
		 * been VM reclaimed, but others are still in the page cache
		 * and uptodate, or we're racing with another thread in
		 * squashfs_readpage also trying to grab them.  Fall back to
		 * using an intermediate buffer.
		 */
		res = squashfs_read_cache(target_page, block, bsize, pages,
								page);
		if (res < 0)
			goto mark_errored;

		goto out;
	}

	/* Decompress directly into the page cache buffers */
	res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
	if (res < 0)
		goto mark_errored;

	/* Last page may have trailing bytes not filled */
	bytes = res % PAGE_SIZE;
	if (bytes) {
		pageaddr = kmap_atomic(page[pages - 1]);
		memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
		kunmap_atomic(pageaddr);
	}

	/* Mark pages as uptodate, unlock and release */
	for (i = 0; i < pages; i++) {
		flush_dcache_page(page[i]);
		SetPageUptodate(page[i]);
		unlock_page(page[i]);
		if (page[i] != target_page)
			put_page(page[i]);
	}

	actor = squashfs_page_actor_init(page, nr_pages, 0,
			release_actor_pages);
	if (!actor) {
		release_actor_pages(page, nr_pages, -ENOMEM);
	kfree(actor);
	kfree(page);
		return NULL;

	return 0;

mark_errored:
	/* Decompression failed, mark pages as errored.  Target_page is
	 * dealt with by the caller
	 */
	for (i = 0; i < pages; i++) {
		if (page[i] == NULL || page[i] == target_page)
			continue;
		flush_dcache_page(page[i]);
		SetPageError(page[i]);
		unlock_page(page[i]);
		put_page(page[i]);
	}
	return actor;

out:
	squashfs_page_actor_free(actor, 0);
	kfree(page);
	return res;
}

/* Read separately compressed datablock directly into page cache */
int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)

static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
	int pages, struct page **page)
{
	struct inode *inode = target_page->mapping->host;
	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;

	int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
	int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
	int start_index = target_page->index & ~mask;
	int end_index = start_index | mask;
	int pages, res = -ENOMEM;
	struct squashfs_page_actor *actor;
	struct inode *i = target_page->mapping->host;
	struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
						 block, bsize);
	int bytes = buffer->length, res = buffer->error, n, offset = 0;
	void *pageaddr;

	if (res) {
		ERROR("Unable to read page, block %llx, size %x\n", block,
			bsize);
		goto out;
	}

	if (end_index > file_end)
		end_index = file_end;
	pages = end_index - start_index + 1;
	for (n = 0; n < pages && bytes > 0; n++,
			bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
		int avail = min_t(int, bytes, PAGE_SIZE);

	actor = actor_from_page_cache(target_page, start_index, pages);
		if (page[n] == NULL)
			continue;

	if (!actor)
		return -ENOMEM;
		pageaddr = kmap_atomic(page[n]);
		squashfs_copy_data(pageaddr, buffer, offset, avail);
		memset(pageaddr + avail, 0, PAGE_SIZE - avail);
		kunmap_atomic(pageaddr);
		flush_dcache_page(page[n]);
		SetPageUptodate(page[n]);
		unlock_page(page[n]);
		if (page[n] != target_page)
			put_page(page[n]);
	}

	get_page(target_page);
	res = squashfs_read_data_async(inode->i_sb, block, bsize, NULL,
				       actor);
	return res < 0 ? res : 0;
out:
	squashfs_cache_put(buffer);
	return res;
}
+0 −4
Original line number Diff line number Diff line
@@ -28,12 +28,8 @@
#define WARNING(s, args...)	pr_warn("SQUASHFS: "s, ## args)

/* block.c */
extern int squashfs_init_read_wq(void);
extern void squashfs_destroy_read_wq(void);
extern int squashfs_read_data(struct super_block *, u64, int, u64 *,
				struct squashfs_page_actor *);
extern int squashfs_read_data_async(struct super_block *, u64, int, u64 *,
				struct squashfs_page_actor *);

/* cache.c */
extern struct squashfs_cache *squashfs_cache_init(char *, int, int);
+0 −7
Original line number Diff line number Diff line
@@ -444,15 +444,9 @@ static int __init init_squashfs_fs(void)
	if (err)
		return err;

	if (!squashfs_init_read_wq()) {
		destroy_inodecache();
		return -ENOMEM;
	}

	err = register_filesystem(&squashfs_fs_type);
	if (err) {
		destroy_inodecache();
		squashfs_destroy_read_wq();
		return err;
	}

@@ -466,7 +460,6 @@ static void __exit exit_squashfs_fs(void)
{
	unregister_filesystem(&squashfs_fs_type);
	destroy_inodecache();
	squashfs_destroy_read_wq();
}