Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f614a9f4 authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds
Browse files

zram: remove workqueue for freeing removed pending slot



Commit a0c516cb ("zram: don't grab mutex in zram_slot_free_noity")
introduced free request pending code to avoid scheduling by mutex under
spinlock and it was a mess which made code lenghty and increased
overhead.

Now, we don't need zram->lock any more to free slot so this patch
reverts it and then, tb_lock should protect it.

Signed-off-by: default avatarMinchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Tested-by: default avatarSergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: Jerome Marchand <jmarchan@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 92967471
Loading
Loading
Loading
Loading
+6 −48
Original line number Original line Diff line number Diff line
@@ -522,20 +522,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
	return ret;
	return ret;
}
}


static void handle_pending_slot_free(struct zram *zram)
{
	struct zram_slot_free *free_rq;

	spin_lock(&zram->slot_free_lock);
	while (zram->slot_free_rq) {
		free_rq = zram->slot_free_rq;
		zram->slot_free_rq = free_rq->next;
		zram_free_page(zram, free_rq->index);
		kfree(free_rq);
	}
	spin_unlock(&zram->slot_free_lock);
}

static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
			int offset, struct bio *bio, int rw)
			int offset, struct bio *bio, int rw)
{
{
@@ -547,7 +533,6 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
		up_read(&zram->lock);
		up_read(&zram->lock);
	} else {
	} else {
		down_write(&zram->lock);
		down_write(&zram->lock);
		handle_pending_slot_free(zram);
		ret = zram_bvec_write(zram, bvec, index, offset);
		ret = zram_bvec_write(zram, bvec, index, offset);
		up_write(&zram->lock);
		up_write(&zram->lock);
	}
	}
@@ -566,8 +551,6 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
		return;
		return;
	}
	}


	flush_work(&zram->free_work);

	meta = zram->meta;
	meta = zram->meta;
	zram->init_done = 0;
	zram->init_done = 0;


@@ -769,40 +752,19 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
	bio_io_error(bio);
	bio_io_error(bio);
}
}


static void zram_slot_free(struct work_struct *work)
{
	struct zram *zram;

	zram = container_of(work, struct zram, free_work);
	down_write(&zram->lock);
	handle_pending_slot_free(zram);
	up_write(&zram->lock);
}

static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
{
	spin_lock(&zram->slot_free_lock);
	free_rq->next = zram->slot_free_rq;
	zram->slot_free_rq = free_rq;
	spin_unlock(&zram->slot_free_lock);
}

static void zram_slot_free_notify(struct block_device *bdev,
static void zram_slot_free_notify(struct block_device *bdev,
				unsigned long index)
				unsigned long index)
{
{
	struct zram *zram;
	struct zram *zram;
	struct zram_slot_free *free_rq;
	struct zram_meta *meta;


	zram = bdev->bd_disk->private_data;
	zram = bdev->bd_disk->private_data;
	atomic64_inc(&zram->stats.notify_free);
	meta = zram->meta;

	free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
	if (!free_rq)
		return;


	free_rq->index = index;
	write_lock(&meta->tb_lock);
	add_slot_free(zram, free_rq);
	zram_free_page(zram, index);
	schedule_work(&zram->free_work);
	write_unlock(&meta->tb_lock);
	atomic64_inc(&zram->stats.notify_free);
}
}


static const struct block_device_operations zram_devops = {
static const struct block_device_operations zram_devops = {
@@ -849,10 +811,6 @@ static int create_device(struct zram *zram, int device_id)
	init_rwsem(&zram->lock);
	init_rwsem(&zram->lock);
	init_rwsem(&zram->init_lock);
	init_rwsem(&zram->init_lock);


	INIT_WORK(&zram->free_work, zram_slot_free);
	spin_lock_init(&zram->slot_free_lock);
	zram->slot_free_rq = NULL;

	zram->queue = blk_alloc_queue(GFP_KERNEL);
	zram->queue = blk_alloc_queue(GFP_KERNEL);
	if (!zram->queue) {
	if (!zram->queue) {
		pr_err("Error allocating disk queue for device %d\n",
		pr_err("Error allocating disk queue for device %d\n",
+0 −10
Original line number Original line Diff line number Diff line
@@ -90,20 +90,11 @@ struct zram_meta {
	struct zs_pool *mem_pool;
	struct zs_pool *mem_pool;
};
};


struct zram_slot_free {
	unsigned long index;
	struct zram_slot_free *next;
};

struct zram {
struct zram {
	struct zram_meta *meta;
	struct zram_meta *meta;
	struct rw_semaphore lock; /* protect compression buffers,
	struct rw_semaphore lock; /* protect compression buffers,
				   * reads and writes
				   * reads and writes
				   */
				   */

	struct work_struct free_work;  /* handle pending free request */
	struct zram_slot_free *slot_free_rq; /* list head of free request */

	struct request_queue *queue;
	struct request_queue *queue;
	struct gendisk *disk;
	struct gendisk *disk;
	int init_done;
	int init_done;
@@ -114,7 +105,6 @@ struct zram {
	 * we can store in a disk.
	 * we can store in a disk.
	 */
	 */
	u64 disksize;	/* bytes */
	u64 disksize;	/* bytes */
	spinlock_t slot_free_lock;


	struct zram_stats stats;
	struct zram_stats stats;
};
};