Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 603dd20b authored by Alistair Strachan's avatar Alistair Strachan
Browse files

ANDROID: Revert fs/squashfs back to linux-4.9.y



Bug: 29521202
Test: local build test only
Change-Id: I117e6e733f0ece85fd4d90604efc4d59fa545464
Signed-off-by: default avatarAlistair Strachan <astrachan@google.com>
parent 2ca7ab11
Loading
Loading
Loading
Loading
+28 −0
Original line number Diff line number Diff line
@@ -25,6 +25,34 @@ config SQUASHFS

	  If unsure, say N.

choice
	prompt "File decompression options"
	depends on SQUASHFS
	help
	  Squashfs now supports two options for decompressing file
	  data.  Traditionally Squashfs has decompressed into an
	  intermediate buffer and then memcopied it into the page cache.
	  Squashfs now supports the ability to decompress directly into
	  the page cache.

	  If unsure, select "Decompress file data into an intermediate buffer"

config SQUASHFS_FILE_CACHE
	bool "Decompress file data into an intermediate buffer"
	help
	  Decompress file data into an intermediate buffer and then
	  memcopy it into the page cache.

config SQUASHFS_FILE_DIRECT
	bool "Decompress files directly into the page cache"
	help
	  Directly decompress file data into the page cache.
	  Doing so can significantly improve performance because
	  it eliminates a memcpy and it also removes the lock contention
	  on the single buffer.

endchoice

choice
	prompt "Decompressor parallelisation options"
	depends on SQUASHFS
+2 −1
Original line number Diff line number Diff line
@@ -5,7 +5,8 @@
obj-$(CONFIG_SQUASHFS) += squashfs.o
squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
squashfs-y += namei.o super.o symlink.o decompressor.o
squashfs-y += file_direct.o page_actor.o
squashfs-$(CONFIG_SQUASHFS_FILE_CACHE) += file_cache.o
squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o page_actor.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_SINGLE) += decompressor_single.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI) += decompressor_multi.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) += decompressor_multi_percpu.o
+145 −405
Original line number Diff line number Diff line
@@ -28,12 +28,9 @@

#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/workqueue.h>

#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
@@ -41,436 +38,179 @@
#include "decompressor.h"
#include "page_actor.h"

static struct workqueue_struct *squashfs_read_wq;

struct squashfs_read_request {
	struct super_block *sb;
	u64 index;
	int length;
	int compressed;
	int offset;
	u64 read_end;
	struct squashfs_page_actor *output;
	enum {
		SQUASHFS_COPY,
		SQUASHFS_DECOMPRESS,
		SQUASHFS_METADATA,
	} data_processing;
	bool synchronous;

/*
	 * If the read is synchronous, it is possible to retrieve information
	 * about the request by setting these pointers.
 * Read the metadata block length, this is stored in the first two
 * bytes of the metadata block.
 */
	int *res;
	int *bytes_read;
	int *bytes_uncompressed;

	int nr_buffers;
	struct buffer_head **bh;
	struct work_struct offload;
};

struct squashfs_bio_request {
	struct buffer_head **bh;
	int nr_buffers;
};

static int squashfs_bio_submit(struct squashfs_read_request *req);

int squashfs_init_read_wq(void)
{
	squashfs_read_wq = create_workqueue("SquashFS read wq");
	return !!squashfs_read_wq;
}

void squashfs_destroy_read_wq(void)
{
	flush_workqueue(squashfs_read_wq);
	destroy_workqueue(squashfs_read_wq);
}

static void free_read_request(struct squashfs_read_request *req, int error)
{
	if (!req->synchronous)
		squashfs_page_actor_free(req->output, error);
	if (req->res)
		*(req->res) = error;
	kfree(req->bh);
	kfree(req);
}

static void squashfs_process_blocks(struct squashfs_read_request *req)
static struct buffer_head *get_block_length(struct super_block *sb,
			u64 *cur_index, int *offset, int *length)
{
	int error = 0;
	int bytes, i, length;
	struct squashfs_sb_info *msblk = req->sb->s_fs_info;
	struct squashfs_page_actor *actor = req->output;
	struct buffer_head **bh = req->bh;
	int nr_buffers = req->nr_buffers;

	for (i = 0; i < nr_buffers; ++i) {
		if (!bh[i])
			continue;
		wait_on_buffer(bh[i]);
		if (!buffer_uptodate(bh[i]))
			error = -EIO;
	}
	if (error)
		goto cleanup;
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	struct buffer_head *bh;

	if (req->data_processing == SQUASHFS_METADATA) {
		/* Extract the length of the metadata block */
		if (req->offset != msblk->devblksize - 1) {
			length = le16_to_cpup((__le16 *)
					(bh[0]->b_data + req->offset));
	bh = sb_bread(sb, *cur_index);
	if (bh == NULL)
		return NULL;

	if (msblk->devblksize - *offset == 1) {
		*length = (unsigned char) bh->b_data[*offset];
		put_bh(bh);
		bh = sb_bread(sb, ++(*cur_index));
		if (bh == NULL)
			return NULL;
		*length |= (unsigned char) bh->b_data[0] << 8;
		*offset = 1;
	} else {
			length = (unsigned char)bh[0]->b_data[req->offset];
			length |= (unsigned char)bh[1]->b_data[0] << 8;
		}
		req->compressed = SQUASHFS_COMPRESSED(length);
		req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
						       : SQUASHFS_COPY;
		length = SQUASHFS_COMPRESSED_SIZE(length);
		if (req->index + length + 2 > req->read_end) {
			for (i = 0; i < nr_buffers; ++i)
				put_bh(bh[i]);
			kfree(bh);
			req->length = length;
			req->index += 2;
			squashfs_bio_submit(req);
			return;
		}
		req->length = length;
		req->offset = (req->offset + 2) % PAGE_SIZE;
		if (req->offset < 2) {
			put_bh(bh[0]);
			++bh;
			--nr_buffers;
		}
	}
	if (req->bytes_read)
		*(req->bytes_read) = req->length;

	if (req->data_processing == SQUASHFS_COPY) {
		squashfs_bh_to_actor(bh, nr_buffers, req->output, req->offset,
			req->length, msblk->devblksize);
	} else if (req->data_processing == SQUASHFS_DECOMPRESS) {
		req->length = squashfs_decompress(msblk, bh, nr_buffers,
			req->offset, req->length, actor);
		if (req->length < 0) {
			error = -EIO;
			goto cleanup;
		}
	}

	/* Last page may have trailing bytes not filled */
	bytes = req->length % PAGE_SIZE;
	if (bytes && actor->page[actor->pages - 1])
		zero_user_segment(actor->page[actor->pages - 1], bytes,
				  PAGE_SIZE);
		*length = (unsigned char) bh->b_data[*offset] |
			(unsigned char) bh->b_data[*offset + 1] << 8;
		*offset += 2;

cleanup:
	if (req->bytes_uncompressed)
		*(req->bytes_uncompressed) = req->length;
	if (error) {
		for (i = 0; i < nr_buffers; ++i)
			if (bh[i])
				put_bh(bh[i]);
		if (*offset == msblk->devblksize) {
			put_bh(bh);
			bh = sb_bread(sb, ++(*cur_index));
			if (bh == NULL)
				return NULL;
			*offset = 0;
		}
	free_read_request(req, error);
	}

static void read_wq_handler(struct work_struct *work)
{
	squashfs_process_blocks(container_of(work,
		    struct squashfs_read_request, offload));
	return bh;
}

static void squashfs_bio_end_io(struct bio *bio)
{
	int i;
	int error = bio->bi_error;
	struct squashfs_bio_request *bio_req = bio->bi_private;

	bio_put(bio);

	for (i = 0; i < bio_req->nr_buffers; ++i) {
		if (!bio_req->bh[i])
			continue;
		if (!error)
			set_buffer_uptodate(bio_req->bh[i]);
		else
			clear_buffer_uptodate(bio_req->bh[i]);
		unlock_buffer(bio_req->bh[i]);
	}
	kfree(bio_req);
}

static int bh_is_optional(struct squashfs_read_request *req, int idx)
{
	int start_idx, end_idx;
	struct squashfs_sb_info *msblk = req->sb->s_fs_info;

	start_idx = (idx * msblk->devblksize - req->offset) >> PAGE_SHIFT;
	end_idx = ((idx + 1) * msblk->devblksize - req->offset + 1) >> PAGE_SHIFT;
	if (start_idx >= req->output->pages)
		return 1;
	if (start_idx < 0)
		start_idx = end_idx;
	if (end_idx >= req->output->pages)
		end_idx = start_idx;
	return !req->output->page[start_idx] && !req->output->page[end_idx];
}

static int actor_getblks(struct squashfs_read_request *req, u64 block)
/*
 * Read and decompress a metadata block or datablock.  Length is non-zero
 * if a datablock is being read (the size is stored elsewhere in the
 * filesystem), otherwise the length is obtained from the first two bytes of
 * the metadata block.  A bit in the length field indicates if the block
 * is stored uncompressed in the filesystem (usually because compression
 * generated a larger block - this does occasionally happen with compression
 * algorithms).
 */
int squashfs_read_data(struct super_block *sb, u64 index, int length,
		u64 *next_index, struct squashfs_page_actor *output)
{
	int i;
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	struct buffer_head **bh;
	int offset = index & ((1 << msblk->devblksize_log2) - 1);
	u64 cur_index = index >> msblk->devblksize_log2;
	int bytes, compressed, b = 0, k = 0, avail, i;

	req->bh = kmalloc_array(req->nr_buffers, sizeof(*(req->bh)), GFP_NOIO);
	if (!req->bh)
	bh = kcalloc(((output->length + msblk->devblksize - 1)
		>> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL);
	if (bh == NULL)
		return -ENOMEM;

	for (i = 0; i < req->nr_buffers; ++i) {
	if (length) {
		/*
		 * When dealing with an uncompressed block, the actor may
		 * contains NULL pages. There's no need to read the buffers
		 * associated with these pages.
		 * Datablock.
		 */
		if (!req->compressed && bh_is_optional(req, i)) {
			req->bh[i] = NULL;
			continue;
		}
		req->bh[i] = sb_getblk(req->sb, block + i);
		if (!req->bh[i]) {
			while (--i) {
				if (req->bh[i])
					put_bh(req->bh[i]);
			}
			return -1;
		}
	}
	return 0;
}

static int squashfs_bio_submit(struct squashfs_read_request *req)
{
	struct bio *bio = NULL;
	struct buffer_head *bh;
	struct squashfs_bio_request *bio_req = NULL;
	int b = 0, prev_block = 0;
	struct squashfs_sb_info *msblk = req->sb->s_fs_info;

	u64 read_start = round_down(req->index, msblk->devblksize);
	u64 read_end = round_up(req->index + req->length, msblk->devblksize);
	sector_t block = read_start >> msblk->devblksize_log2;
	sector_t block_end = read_end >> msblk->devblksize_log2;
	int offset = read_start - round_down(req->index, PAGE_SIZE);
	int nr_buffers = block_end - block;
	int blksz = msblk->devblksize;
	int bio_max_pages = nr_buffers > BIO_MAX_PAGES ? BIO_MAX_PAGES
						       : nr_buffers;

	/* Setup the request */
	req->read_end = read_end;
	req->offset = req->index - read_start;
	req->nr_buffers = nr_buffers;
	if (actor_getblks(req, block) < 0)
		goto getblk_failed;

	/* Create and submit the BIOs */
	for (b = 0; b < nr_buffers; ++b, offset += blksz) {
		bh = req->bh[b];
		if (!bh || !trylock_buffer(bh))
			continue;
		if (buffer_uptodate(bh)) {
			unlock_buffer(bh);
			continue;
		}
		offset %= PAGE_SIZE;

		/* Append the buffer to the current BIO if it is contiguous */
		if (bio && bio_req && prev_block + 1 == b) {
			if (bio_add_page(bio, bh->b_page, blksz, offset)) {
				bio_req->nr_buffers += 1;
				prev_block = b;
				continue;
			}
		}

		/* Otherwise, submit the current BIO and create a new one */
		if (bio)
			submit_bio(bio);
		bio_req = kcalloc(1, sizeof(struct squashfs_bio_request),
				  GFP_NOIO);
		if (!bio_req)
			goto req_alloc_failed;
		bio_req->bh = &req->bh[b];
		bio = bio_alloc(GFP_NOIO, bio_max_pages);
		if (!bio)
			goto bio_alloc_failed;
		bio->bi_bdev = req->sb->s_bdev;
		bio->bi_iter.bi_sector = (block + b)
				       << (msblk->devblksize_log2 - 9);
		bio_set_op_attrs(bio, REQ_OP_READ, 0);
		bio->bi_private = bio_req;
		bio->bi_end_io = squashfs_bio_end_io;

		bio_add_page(bio, bh->b_page, blksz, offset);
		bio_req->nr_buffers += 1;
		prev_block = b;
	}
	if (bio)
		submit_bio(bio);

	if (req->synchronous)
		squashfs_process_blocks(req);
	else {
		INIT_WORK(&req->offload, read_wq_handler);
		schedule_work(&req->offload);
	}
	return 0;
		bytes = -offset;
		compressed = SQUASHFS_COMPRESSED_BLOCK(length);
		length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
		if (next_index)
			*next_index = index + length;

bio_alloc_failed:
	kfree(bio_req);
req_alloc_failed:
	unlock_buffer(bh);
	while (--nr_buffers >= b)
		if (req->bh[nr_buffers])
			put_bh(req->bh[nr_buffers]);
	while (--b >= 0)
		if (req->bh[b])
			wait_on_buffer(req->bh[b]);
getblk_failed:
	free_read_request(req, -ENOMEM);
	return -ENOMEM;
}
		TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
			index, compressed ? "" : "un", length, output->length);

static int read_metadata_block(struct squashfs_read_request *req,
			       u64 *next_index)
{
	int ret, error, bytes_read = 0, bytes_uncompressed = 0;
	struct squashfs_sb_info *msblk = req->sb->s_fs_info;
		if (length < 0 || length > output->length ||
				(index + length) > msblk->bytes_used)
			goto read_failure;

	if (req->index + 2 > msblk->bytes_used) {
		free_read_request(req, -EINVAL);
		return -EINVAL;
		for (b = 0; bytes < length; b++, cur_index++) {
			bh[b] = sb_getblk(sb, cur_index);
			if (bh[b] == NULL)
				goto block_release;
			bytes += msblk->devblksize;
		}
	req->length = 2;

	/* Do not read beyond the end of the device */
	if (req->index + req->length > msblk->bytes_used)
		req->length = msblk->bytes_used - req->index;
	req->data_processing = SQUASHFS_METADATA;

		ll_rw_block(REQ_OP_READ, 0, b, bh);
	} else {
		/*
	 * Reading metadata is always synchronous because we don't know the
	 * length in advance and the function is expected to update
	 * 'next_index' and return the length.
		 * Metadata block.
		 */
	req->synchronous = true;
	req->res = &error;
	req->bytes_read = &bytes_read;
	req->bytes_uncompressed = &bytes_uncompressed;
		if ((index + 2) > msblk->bytes_used)
			goto read_failure;

	TRACE("Metadata block @ 0x%llx, %scompressed size %d, src size %d\n",
	      req->index, req->compressed ? "" : "un", bytes_read,
	      req->output->length);
		bh[0] = get_block_length(sb, &cur_index, &offset, &length);
		if (bh[0] == NULL)
			goto read_failure;
		b = 1;

	ret = squashfs_bio_submit(req);
	if (ret)
		return ret;
	if (error)
		return error;
		bytes = msblk->devblksize - offset;
		compressed = SQUASHFS_COMPRESSED(length);
		length = SQUASHFS_COMPRESSED_SIZE(length);
		if (next_index)
		*next_index += 2 + bytes_read;
	return bytes_uncompressed;
}
			*next_index = index + length + 2;

static int read_data_block(struct squashfs_read_request *req, int length,
			   u64 *next_index, bool synchronous)
{
	int ret, error = 0, bytes_uncompressed = 0, bytes_read = 0;
		TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
				compressed ? "" : "un", length);

	req->compressed = SQUASHFS_COMPRESSED_BLOCK(length);
	req->length = length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
	req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
					       : SQUASHFS_COPY;
		if (length < 0 || length > output->length ||
					(index + length) > msblk->bytes_used)
			goto block_release;

	req->synchronous = synchronous;
	if (synchronous) {
		req->res = &error;
		req->bytes_read = &bytes_read;
		req->bytes_uncompressed = &bytes_uncompressed;
		for (; bytes < length; b++) {
			bh[b] = sb_getblk(sb, ++cur_index);
			if (bh[b] == NULL)
				goto block_release;
			bytes += msblk->devblksize;
		}
		ll_rw_block(REQ_OP_READ, 0, b - 1, bh + 1);
	}

	TRACE("Data block @ 0x%llx, %scompressed size %d, src size %d\n",
	      req->index, req->compressed ? "" : "un", req->length,
	      req->output->length);

	ret = squashfs_bio_submit(req);
	if (ret)
		return ret;
	if (synchronous)
		ret = error ? error : bytes_uncompressed;
	if (next_index)
		*next_index += length;
	return ret;
	for (i = 0; i < b; i++) {
		wait_on_buffer(bh[i]);
		if (!buffer_uptodate(bh[i]))
			goto block_release;
	}

	if (compressed) {
		if (!msblk->stream)
			goto read_failure;
		length = squashfs_decompress(msblk, bh, b, offset, length,
			output);
		if (length < 0)
			goto read_failure;
	} else {
		/*
 * Read and decompress a metadata block or datablock.  Length is non-zero
 * if a datablock is being read (the size is stored elsewhere in the
 * filesystem), otherwise the length is obtained from the first two bytes of
 * the metadata block.  A bit in the length field indicates if the block
 * is stored uncompressed in the filesystem (usually because compression
 * generated a larger block - this does occasionally happen with compression
 * algorithms).
		 * Block is uncompressed.
		 */
static int __squashfs_read_data(struct super_block *sb, u64 index, int length,
	u64 *next_index, struct squashfs_page_actor *output, bool sync)
{
	struct squashfs_read_request *req;
		int in, pg_offset = 0;
		void *data = squashfs_first_page(output);

	req = kcalloc(1, sizeof(struct squashfs_read_request), GFP_KERNEL);
	if (!req) {
		if (!sync)
			squashfs_page_actor_free(output, -ENOMEM);
		return -ENOMEM;
		for (bytes = length; k < b; k++) {
			in = min(bytes, msblk->devblksize - offset);
			bytes -= in;
			while (in) {
				if (pg_offset == PAGE_SIZE) {
					data = squashfs_next_page(output);
					pg_offset = 0;
				}
				avail = min_t(int, in, PAGE_SIZE -
						pg_offset);
				memcpy(data + pg_offset, bh[k]->b_data + offset,
						avail);
				in -= avail;
				pg_offset += avail;
				offset += avail;
			}
			offset = 0;
			put_bh(bh[k]);
		}
		squashfs_finish_page(output);
	}

	req->sb = sb;
	req->index = index;
	req->output = output;

	if (next_index)
		*next_index = index;
	kfree(bh);
	return length;

	if (length)
		length = read_data_block(req, length, next_index, sync);
	else
		length = read_metadata_block(req, next_index);
block_release:
	for (; k < b; k++)
		put_bh(bh[k]);

	if (length < 0) {
read_failure:
	ERROR("squashfs_read_data failed to read block 0x%llx\n",
					(unsigned long long) index);
	kfree(bh);
	return -EIO;
}

	return length;
}

int squashfs_read_data(struct super_block *sb, u64 index, int length,
	u64 *next_index, struct squashfs_page_actor *output)
{
	return __squashfs_read_data(sb, index, length, next_index, output,
				    true);
}

int squashfs_read_data_async(struct super_block *sb, u64 index, int length,
	u64 *next_index, struct squashfs_page_actor *output)
{

	return __squashfs_read_data(sb, index, length, next_index, output,
				    false);
}
+44 −29
Original line number Diff line number Diff line
@@ -209,14 +209,17 @@ void squashfs_cache_put(struct squashfs_cache_entry *entry)
 */
void squashfs_cache_delete(struct squashfs_cache *cache)
{
	int i;
	int i, j;

	if (cache == NULL)
		return;

	for (i = 0; i < cache->entries; i++) {
		if (cache->entry[i].page)
			free_page_array(cache->entry[i].page, cache->pages);
		if (cache->entry[i].data) {
			for (j = 0; j < cache->pages; j++)
				kfree(cache->entry[i].data[j]);
			kfree(cache->entry[i].data);
		}
		kfree(cache->entry[i].actor);
	}

@@ -233,7 +236,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache)
struct squashfs_cache *squashfs_cache_init(char *name, int entries,
	int block_size)
{
	int i;
	int i, j;
	struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);

	if (cache == NULL) {
@@ -265,13 +268,22 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
		init_waitqueue_head(&cache->entry[i].wait_queue);
		entry->cache = cache;
		entry->block = SQUASHFS_INVALID_BLK;
		entry->page = alloc_page_array(cache->pages, GFP_KERNEL);
		if (!entry->page) {
		entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
		if (entry->data == NULL) {
			ERROR("Failed to allocate %s cache entry\n", name);
			goto cleanup;
		}
		entry->actor = squashfs_page_actor_init(entry->page,
			cache->pages, 0, NULL);

		for (j = 0; j < cache->pages; j++) {
			entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL);
			if (entry->data[j] == NULL) {
				ERROR("Failed to allocate %s buffer\n", name);
				goto cleanup;
			}
		}

		entry->actor = squashfs_page_actor_init(entry->data,
						cache->pages, 0);
		if (entry->actor == NULL) {
			ERROR("Failed to allocate %s cache entry\n", name);
			goto cleanup;
@@ -302,20 +314,18 @@ int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
		return min(length, entry->length - offset);

	while (offset < entry->length) {
		void *buff = kmap_atomic(entry->page[offset / PAGE_SIZE])
		void *buff = entry->data[offset / PAGE_SIZE]
				+ (offset % PAGE_SIZE);
		int bytes = min_t(int, entry->length - offset,
				PAGE_SIZE - (offset % PAGE_SIZE));

		if (bytes >= remaining) {
			memcpy(buffer, buff, remaining);
			kunmap_atomic(buff);
			remaining = 0;
			break;
		}

		memcpy(buffer, buff, bytes);
		kunmap_atomic(buff);
		buffer += bytes;
		remaining -= bytes;
		offset += bytes;
@@ -409,38 +419,43 @@ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
void *squashfs_read_table(struct super_block *sb, u64 block, int length)
{
	int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
	struct page **page;
	void *buff;
	int res;
	int i, res;
	void *table, *buffer, **data;
	struct squashfs_page_actor *actor;

	page = alloc_page_array(pages, GFP_KERNEL);
	if (!page)
	table = buffer = kmalloc(length, GFP_KERNEL);
	if (table == NULL)
		return ERR_PTR(-ENOMEM);

	actor = squashfs_page_actor_init(page, pages, length, NULL);
	if (actor == NULL) {
	data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
	if (data == NULL) {
		res = -ENOMEM;
		goto failed;
	}

	actor = squashfs_page_actor_init(data, pages, length);
	if (actor == NULL) {
		res = -ENOMEM;
		goto failed2;
	}

	for (i = 0; i < pages; i++, buffer += PAGE_SIZE)
		data[i] = buffer;

	res = squashfs_read_data(sb, block, length |
		SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor);

	kfree(data);
	kfree(actor);

	if (res < 0)
		goto failed2;
		goto failed;

	buff = kmalloc(length, GFP_KERNEL);
	if (!buff)
		goto failed2;
	squashfs_actor_to_buf(actor, buff, length);
	squashfs_page_actor_free(actor, 0);
	free_page_array(page, pages);
	return buff;
	return table;

failed2:
	squashfs_page_actor_free(actor, 0);
	kfree(data);
failed:
	free_page_array(page, pages);
	kfree(table);
	return ERR_PTR(res);
}
+25 −30
Original line number Diff line number Diff line
@@ -24,8 +24,7 @@
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>

#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
@@ -95,26 +94,24 @@ const struct squashfs_decompressor *squashfs_lookup_decompressor(int id)
static void *get_comp_opts(struct super_block *sb, unsigned short flags)
{
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	void *comp_opts, *buffer = NULL;
	struct page *page;
	void *buffer = NULL, *comp_opts;
	struct squashfs_page_actor *actor = NULL;
	int length = 0;

	if (!SQUASHFS_COMP_OPTS(flags))
		return squashfs_comp_opts(msblk, buffer, length);

	/*
	 * Read decompressor specific options from file system if present
	 */
	if (SQUASHFS_COMP_OPTS(flags)) {
		buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
		if (buffer == NULL) {
			comp_opts = ERR_PTR(-ENOMEM);
			goto out;
		}

	page = alloc_page(GFP_KERNEL);
	if (!page)
		return ERR_PTR(-ENOMEM);

	actor = squashfs_page_actor_init(&page, 1, 0, NULL);
		actor = squashfs_page_actor_init(&buffer, 1, 0);
		if (actor == NULL) {
			comp_opts = ERR_PTR(-ENOMEM);
		goto actor_error;
			goto out;
		}

		length = squashfs_read_data(sb,
@@ -122,17 +119,15 @@ static void *get_comp_opts(struct super_block *sb, unsigned short flags)

		if (length < 0) {
			comp_opts = ERR_PTR(length);
		goto read_error;
			goto out;
		}
	}

	buffer = kmap_atomic(page);
	comp_opts = squashfs_comp_opts(msblk, buffer, length);
	kunmap_atomic(buffer);

read_error:
	squashfs_page_actor_free(actor, 0);
actor_error:
	__free_page(page);
out:
	kfree(actor);
	kfree(buffer);
	return comp_opts;
}

Loading