Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e4274cfa authored by Anup Patel's avatar Anup Patel Committed by Vinod Koul
Browse files

dmaengine: bcm-sba-raid: Reduce locking context in sba_alloc_request()



We don't require to hold "sba->reqs_lock" for long-time
in sba_alloc_request() because lock protection is not
required when initializing members of "struct sba_request".

Signed-off-by: default avatarAnup Patel <anup.patel@broadcom.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent e897091a
Loading
Loading
Loading
Loading
+11 −11
Original line number Diff line number Diff line
@@ -207,11 +207,16 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba)
	struct sba_request *req = NULL;

	spin_lock_irqsave(&sba->reqs_lock, flags);

	req = list_first_entry_or_null(&sba->reqs_free_list,
				       struct sba_request, node);
	if (req) {
		list_move_tail(&req->node, &sba->reqs_alloc_list);
		sba->reqs_free_count--;
	}
	spin_unlock_irqrestore(&sba->reqs_lock, flags);
	if (!req)
		return NULL;

	req->state = SBA_REQUEST_STATE_ALLOCED;
	req->fence = false;
	req->first = req;
@@ -219,12 +224,7 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba)
	req->next_count = 1;
	atomic_set(&req->next_pending_count, 1);

		sba->reqs_free_count--;

	dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
	}

	spin_unlock_irqrestore(&sba->reqs_lock, flags);

	return req;
}