Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6adb03de authored by Javier González's avatar Javier González Committed by Jens Axboe
Browse files

lightnvm: update closed list outside of intr context



When an I/O finishes, full blocks are moved from the open to the closed
list - a lock is taken to protect the list. This happens at the moment
in the interrupt context, which is not correct.

This patch moves this logic to the block workqueue instead, avoiding
holding a spinlock without interrupt save in an interrupt context.

Signed-off-by: default avatarJavier González <javier@cnexlabs.com>
Fixes: ff0e498b ("lightnvm: manage open and closed blocks sepa...")
Signed-off-by: default avatarMatias Bjørling <m@bjorling.me>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent fa3184b8
Loading
Loading
Loading
Loading
+10 −13
Original line number Diff line number Diff line
@@ -497,12 +497,21 @@ static void rrpc_gc_queue(struct work_struct *work)
	struct rrpc *rrpc = gcb->rrpc;
	struct rrpc_block *rblk = gcb->rblk;
	struct nvm_lun *lun = rblk->parent->lun;
	struct nvm_block *blk = rblk->parent;
	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];

	spin_lock(&rlun->lock);
	list_add_tail(&rblk->prio, &rlun->prio_list);
	spin_unlock(&rlun->lock);

	spin_lock(&lun->lock);
	lun->nr_open_blocks--;
	lun->nr_closed_blocks++;
	blk->state &= ~NVM_BLK_ST_OPEN;
	blk->state |= NVM_BLK_ST_CLOSED;
	list_move_tail(&rblk->list, &rlun->closed_list);
	spin_unlock(&lun->lock);

	mempool_free(gcb, rrpc->gcb_pool);
	pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
							rblk->parent->id);
@@ -666,22 +675,10 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
		lun = rblk->parent->lun;

		cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
		if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
			struct nvm_block *blk = rblk->parent;
			struct rrpc_lun *rlun = rblk->rlun;

			spin_lock(&lun->lock);
			lun->nr_open_blocks--;
			lun->nr_closed_blocks++;
			blk->state &= ~NVM_BLK_ST_OPEN;
			blk->state |= NVM_BLK_ST_CLOSED;
			list_move_tail(&rblk->list, &rlun->closed_list);
			spin_unlock(&lun->lock);

		if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
			rrpc_run_gc(rrpc, rblk);
	}
}
}

static void rrpc_end_io(struct nvm_rq *rqd)
{