Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ba82fe2e authored by Cong Wang's avatar Cong Wang Committed by Cong Wang
Browse files

zram: remove the second argument of k[un]map_atomic()

parent e3debd27
Loading
Loading
Loading
Loading
+19 −20
Original line number Original line Diff line number Diff line
@@ -56,17 +56,17 @@ static void clear_flag(struct block_header *block, enum blockflags flag)
 * This is called from xv_malloc/xv_free path, so it
 * This is called from xv_malloc/xv_free path, so it
 * needs to be fast.
 * needs to be fast.
 */
 */
static void *get_ptr_atomic(struct page *page, u16 offset, enum km_type type)
static void *get_ptr_atomic(struct page *page, u16 offset)
{
{
	unsigned char *base;
	unsigned char *base;


	base = kmap_atomic(page, type);
	base = kmap_atomic(page);
	return base + offset;
	return base + offset;
}
}


static void put_ptr_atomic(void *ptr, enum km_type type)
static void put_ptr_atomic(void *ptr)
{
{
	kunmap_atomic(ptr, type);
	kunmap_atomic(ptr);
}
}


static u32 get_blockprev(struct block_header *block)
static u32 get_blockprev(struct block_header *block)
@@ -202,10 +202,10 @@ static void insert_block(struct xv_pool *pool, struct page *page, u32 offset,


	if (block->link.next_page) {
	if (block->link.next_page) {
		nextblock = get_ptr_atomic(block->link.next_page,
		nextblock = get_ptr_atomic(block->link.next_page,
					block->link.next_offset, KM_USER1);
					block->link.next_offset);
		nextblock->link.prev_page = page;
		nextblock->link.prev_page = page;
		nextblock->link.prev_offset = offset;
		nextblock->link.prev_offset = offset;
		put_ptr_atomic(nextblock, KM_USER1);
		put_ptr_atomic(nextblock);
		/* If there was a next page then the free bits are set. */
		/* If there was a next page then the free bits are set. */
		return;
		return;
	}
	}
@@ -225,18 +225,18 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,


	if (block->link.prev_page) {
	if (block->link.prev_page) {
		tmpblock = get_ptr_atomic(block->link.prev_page,
		tmpblock = get_ptr_atomic(block->link.prev_page,
				block->link.prev_offset, KM_USER1);
				block->link.prev_offset);
		tmpblock->link.next_page = block->link.next_page;
		tmpblock->link.next_page = block->link.next_page;
		tmpblock->link.next_offset = block->link.next_offset;
		tmpblock->link.next_offset = block->link.next_offset;
		put_ptr_atomic(tmpblock, KM_USER1);
		put_ptr_atomic(tmpblock);
	}
	}


	if (block->link.next_page) {
	if (block->link.next_page) {
		tmpblock = get_ptr_atomic(block->link.next_page,
		tmpblock = get_ptr_atomic(block->link.next_page,
				block->link.next_offset, KM_USER1);
				block->link.next_offset);
		tmpblock->link.prev_page = block->link.prev_page;
		tmpblock->link.prev_page = block->link.prev_page;
		tmpblock->link.prev_offset = block->link.prev_offset;
		tmpblock->link.prev_offset = block->link.prev_offset;
		put_ptr_atomic(tmpblock, KM_USER1);
		put_ptr_atomic(tmpblock);
	}
	}


	/* Is this block is at the head of the freelist? */
	/* Is this block is at the head of the freelist? */
@@ -249,11 +249,10 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
		if (pool->freelist[slindex].page) {
		if (pool->freelist[slindex].page) {
			struct block_header *tmpblock;
			struct block_header *tmpblock;
			tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
			tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
					pool->freelist[slindex].offset,
					pool->freelist[slindex].offset);
					KM_USER1);
			tmpblock->link.prev_page = NULL;
			tmpblock->link.prev_page = NULL;
			tmpblock->link.prev_offset = 0;
			tmpblock->link.prev_offset = 0;
			put_ptr_atomic(tmpblock, KM_USER1);
			put_ptr_atomic(tmpblock);
		} else {
		} else {
			/* This freelist bucket is empty */
			/* This freelist bucket is empty */
			__clear_bit(slindex % BITS_PER_LONG,
			__clear_bit(slindex % BITS_PER_LONG,
@@ -284,7 +283,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags)
	stat_inc(&pool->total_pages);
	stat_inc(&pool->total_pages);


	spin_lock(&pool->lock);
	spin_lock(&pool->lock);
	block = get_ptr_atomic(page, 0, KM_USER0);
	block = get_ptr_atomic(page, 0);


	block->size = PAGE_SIZE - XV_ALIGN;
	block->size = PAGE_SIZE - XV_ALIGN;
	set_flag(block, BLOCK_FREE);
	set_flag(block, BLOCK_FREE);
@@ -293,7 +292,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags)


	insert_block(pool, page, 0, block);
	insert_block(pool, page, 0, block);


	put_ptr_atomic(block, KM_USER0);
	put_ptr_atomic(block);
	spin_unlock(&pool->lock);
	spin_unlock(&pool->lock);


	return 0;
	return 0;
@@ -375,7 +374,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
		return -ENOMEM;
		return -ENOMEM;
	}
	}


	block = get_ptr_atomic(*page, *offset, KM_USER0);
	block = get_ptr_atomic(*page, *offset);


	remove_block(pool, *page, *offset, block, index);
	remove_block(pool, *page, *offset, block, index);


@@ -405,7 +404,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
	block->size = origsize;
	block->size = origsize;
	clear_flag(block, BLOCK_FREE);
	clear_flag(block, BLOCK_FREE);


	put_ptr_atomic(block, KM_USER0);
	put_ptr_atomic(block);
	spin_unlock(&pool->lock);
	spin_unlock(&pool->lock);


	*offset += XV_ALIGN;
	*offset += XV_ALIGN;
@@ -426,7 +425,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)


	spin_lock(&pool->lock);
	spin_lock(&pool->lock);


	page_start = get_ptr_atomic(page, 0, KM_USER0);
	page_start = get_ptr_atomic(page, 0);
	block = (struct block_header *)((char *)page_start + offset);
	block = (struct block_header *)((char *)page_start + offset);


	/* Catch double free bugs */
	/* Catch double free bugs */
@@ -468,7 +467,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)


	/* No used objects in this page. Free it. */
	/* No used objects in this page. Free it. */
	if (block->size == PAGE_SIZE - XV_ALIGN) {
	if (block->size == PAGE_SIZE - XV_ALIGN) {
		put_ptr_atomic(page_start, KM_USER0);
		put_ptr_atomic(page_start);
		spin_unlock(&pool->lock);
		spin_unlock(&pool->lock);


		__free_page(page);
		__free_page(page);
@@ -486,7 +485,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
		set_blockprev(tmpblock, offset);
		set_blockprev(tmpblock, offset);
	}
	}


	put_ptr_atomic(page_start, KM_USER0);
	put_ptr_atomic(page_start);
	spin_unlock(&pool->lock);
	spin_unlock(&pool->lock);
}
}
EXPORT_SYMBOL_GPL(xv_free);
EXPORT_SYMBOL_GPL(xv_free);
+22 −22
Original line number Original line Diff line number Diff line
@@ -161,9 +161,9 @@ static void zram_free_page(struct zram *zram, size_t index)
		goto out;
		goto out;
	}
	}


	obj = kmap_atomic(page, KM_USER0) + offset;
	obj = kmap_atomic(page) + offset;
	clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
	clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
	kunmap_atomic(obj, KM_USER0);
	kunmap_atomic(obj);


	xv_free(zram->mem_pool, page, offset);
	xv_free(zram->mem_pool, page, offset);
	if (clen <= PAGE_SIZE / 2)
	if (clen <= PAGE_SIZE / 2)
@@ -182,9 +182,9 @@ static void handle_zero_page(struct bio_vec *bvec)
	struct page *page = bvec->bv_page;
	struct page *page = bvec->bv_page;
	void *user_mem;
	void *user_mem;


	user_mem = kmap_atomic(page, KM_USER0);
	user_mem = kmap_atomic(page);
	memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
	memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
	kunmap_atomic(user_mem, KM_USER0);
	kunmap_atomic(user_mem);


	flush_dcache_page(page);
	flush_dcache_page(page);
}
}
@@ -195,12 +195,12 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
	struct page *page = bvec->bv_page;
	struct page *page = bvec->bv_page;
	unsigned char *user_mem, *cmem;
	unsigned char *user_mem, *cmem;


	user_mem = kmap_atomic(page, KM_USER0);
	user_mem = kmap_atomic(page);
	cmem = kmap_atomic(zram->table[index].page, KM_USER1);
	cmem = kmap_atomic(zram->table[index].page);


	memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
	memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
	kunmap_atomic(cmem, KM_USER1);
	kunmap_atomic(cmem);
	kunmap_atomic(user_mem, KM_USER0);
	kunmap_atomic(user_mem);


	flush_dcache_page(page);
	flush_dcache_page(page);
}
}
@@ -249,12 +249,12 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
		}
		}
	}
	}


	user_mem = kmap_atomic(page, KM_USER0);
	user_mem = kmap_atomic(page);
	if (!is_partial_io(bvec))
	if (!is_partial_io(bvec))
		uncmem = user_mem;
		uncmem = user_mem;
	clen = PAGE_SIZE;
	clen = PAGE_SIZE;


	cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
	cmem = kmap_atomic(zram->table[index].page) +
		zram->table[index].offset;
		zram->table[index].offset;


	ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
	ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
@@ -267,8 +267,8 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
		kfree(uncmem);
		kfree(uncmem);
	}
	}


	kunmap_atomic(cmem, KM_USER1);
	kunmap_atomic(cmem);
	kunmap_atomic(user_mem, KM_USER0);
	kunmap_atomic(user_mem);


	/* Should NEVER happen. Return bio error if it does. */
	/* Should NEVER happen. Return bio error if it does. */
	if (unlikely(ret != LZO_E_OK)) {
	if (unlikely(ret != LZO_E_OK)) {
@@ -295,20 +295,20 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
		return 0;
		return 0;
	}
	}


	cmem = kmap_atomic(zram->table[index].page, KM_USER0) +
	cmem = kmap_atomic(zram->table[index].page) +
		zram->table[index].offset;
		zram->table[index].offset;


	/* Page is stored uncompressed since it's incompressible */
	/* Page is stored uncompressed since it's incompressible */
	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
		memcpy(mem, cmem, PAGE_SIZE);
		memcpy(mem, cmem, PAGE_SIZE);
		kunmap_atomic(cmem, KM_USER0);
		kunmap_atomic(cmem);
		return 0;
		return 0;
	}
	}


	ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
	ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
				    xv_get_object_size(cmem) - sizeof(*zheader),
				    xv_get_object_size(cmem) - sizeof(*zheader),
				    mem, &clen);
				    mem, &clen);
	kunmap_atomic(cmem, KM_USER0);
	kunmap_atomic(cmem);


	/* Should NEVER happen. Return bio error if it does. */
	/* Should NEVER happen. Return bio error if it does. */
	if (unlikely(ret != LZO_E_OK)) {
	if (unlikely(ret != LZO_E_OK)) {
@@ -359,7 +359,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
	    zram_test_flag(zram, index, ZRAM_ZERO))
	    zram_test_flag(zram, index, ZRAM_ZERO))
		zram_free_page(zram, index);
		zram_free_page(zram, index);


	user_mem = kmap_atomic(page, KM_USER0);
	user_mem = kmap_atomic(page);


	if (is_partial_io(bvec))
	if (is_partial_io(bvec))
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
@@ -368,7 +368,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
		uncmem = user_mem;
		uncmem = user_mem;


	if (page_zero_filled(uncmem)) {
	if (page_zero_filled(uncmem)) {
		kunmap_atomic(user_mem, KM_USER0);
		kunmap_atomic(user_mem);
		if (is_partial_io(bvec))
		if (is_partial_io(bvec))
			kfree(uncmem);
			kfree(uncmem);
		zram_stat_inc(&zram->stats.pages_zero);
		zram_stat_inc(&zram->stats.pages_zero);
@@ -380,7 +380,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
			       zram->compress_workmem);
			       zram->compress_workmem);


	kunmap_atomic(user_mem, KM_USER0);
	kunmap_atomic(user_mem);
	if (is_partial_io(bvec))
	if (is_partial_io(bvec))
			kfree(uncmem);
			kfree(uncmem);


@@ -408,7 +408,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
		zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
		zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
		zram_stat_inc(&zram->stats.pages_expand);
		zram_stat_inc(&zram->stats.pages_expand);
		zram->table[index].page = page_store;
		zram->table[index].page = page_store;
		src = kmap_atomic(page, KM_USER0);
		src = kmap_atomic(page);
		goto memstore;
		goto memstore;
	}
	}


@@ -424,7 +424,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
memstore:
memstore:
	zram->table[index].offset = store_offset;
	zram->table[index].offset = store_offset;


	cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
	cmem = kmap_atomic(zram->table[index].page) +
		zram->table[index].offset;
		zram->table[index].offset;


#if 0
#if 0
@@ -438,9 +438,9 @@ memstore:


	memcpy(cmem, src, clen);
	memcpy(cmem, src, clen);


	kunmap_atomic(cmem, KM_USER1);
	kunmap_atomic(cmem);
	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
		kunmap_atomic(src, KM_USER0);
		kunmap_atomic(src);


	/* Update stats */
	/* Update stats */
	zram_stat64_add(zram, &zram->stats.compr_size, clen);
	zram_stat64_add(zram, &zram->stats.compr_size, clen);