Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1b21f5b2 authored by Philip J Kelleher's avatar Philip J Kelleher Committed by Jens Axboe
Browse files

rsxx: Moving pci_map_page to prevent overflow.



The pci_map_page function has been moved into our
issued workqueue to prevent an us running out of
mappable addresses on non-HWWD PCIe x8 slots. The
maximum amount that can possible be mapped at one
time now is: 255 dmas X 4 dma channels X 4096 Bytes.

Signed-off-by: default avatarPhilip J Kelleher <pjk1939@linux.vnet.ibm.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e5feab22
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -749,10 +749,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)

	card->eeh_state = 0;

	st = rsxx_eeh_remap_dmas(card);
	if (st)
		goto failed_remap_dmas;

	spin_lock_irqsave(&card->irq_lock, flags);
	if (card->n_targets & RSXX_MAX_TARGETS)
		rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
@@ -779,7 +775,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
	return PCI_ERS_RESULT_RECOVERED;

failed_hw_buffers_init:
failed_remap_dmas:
	for (i = 0; i < card->n_targets; i++) {
		if (card->ctrl[i].status.buf)
			pci_free_consistent(card->dev,
+28 −42
Original line number Diff line number Diff line
@@ -397,6 +397,7 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
	int tag;
	int cmds_pending = 0;
	struct hw_cmd *hw_cmd_buf;
	int dir;

	hw_cmd_buf = ctrl->cmd.buf;

@@ -433,6 +434,28 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
			continue;
		}

		if (dma->cmd == HW_CMD_BLK_WRITE)
			dir = PCI_DMA_TODEVICE;
		else
			dir = PCI_DMA_FROMDEVICE;

		/*
		 * The function pci_map_page is placed here because we can
		 * only, by design, issue up to 255 commands to the hardware
		 * at one time per DMA channel. So the maximum amount of mapped
		 * memory would be 255 * 4 channels * 4096 Bytes which is less
		 * than 2GB, the limit of a x8 Non-HWWD PCIe slot. This way the
		 * pci_map_page function should never fail because of a
		 * lack of mappable memory.
		 */
		dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page,
				     dma->pg_off, dma->sub_page.cnt << 9, dir);
		if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
			push_tracker(ctrl->trackers, tag);
			rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
			continue;
		}

		set_tracker_dma(ctrl->trackers, tag, dma);
		hw_cmd_buf[ctrl->cmd.idx].command  = dma->cmd;
		hw_cmd_buf[ctrl->cmd.idx].tag      = tag;
@@ -629,14 +652,6 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
	if (!dma)
		return -ENOMEM;

	dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
				     dir ? PCI_DMA_TODEVICE :
				     PCI_DMA_FROMDEVICE);
	if (pci_dma_mapping_error(card->dev, dma->dma_addr)) {
		kmem_cache_free(rsxx_dma_pool, dma);
		return -ENOMEM;
	}

	dma->cmd          = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
	dma->laddr        = laddr;
	dma->sub_page.off = (dma_off >> 9);
@@ -1039,6 +1054,11 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
			else
				card->ctrl[i].stats.reads_issued--;

			pci_unmap_page(card->dev, dma->dma_addr,
				       get_dma_size(dma),
				       dma->cmd == HW_CMD_BLK_WRITE ?
				       PCI_DMA_TODEVICE :
				       PCI_DMA_FROMDEVICE);
			list_add_tail(&dma->list, &issued_dmas[i]);
			push_tracker(card->ctrl[i].trackers, j);
			cnt++;
@@ -1050,15 +1070,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
		atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
		card->ctrl[i].stats.sw_q_depth += cnt;
		card->ctrl[i].e_cnt = 0;

		list_for_each_entry(dma, &card->ctrl[i].queue, list) {
			if (!pci_dma_mapping_error(card->dev, dma->dma_addr))
				pci_unmap_page(card->dev, dma->dma_addr,
					       get_dma_size(dma),
					       dma->cmd == HW_CMD_BLK_WRITE ?
					       PCI_DMA_TODEVICE :
					       PCI_DMA_FROMDEVICE);
		}
		spin_unlock_bh(&card->ctrl[i].queue_lock);
	}

@@ -1067,31 +1078,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
	return 0;
}

int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
{
	struct rsxx_dma *dma;
	int i;

	for (i = 0; i < card->n_targets; i++) {
		spin_lock_bh(&card->ctrl[i].queue_lock);
		list_for_each_entry(dma, &card->ctrl[i].queue, list) {
			dma->dma_addr = pci_map_page(card->dev, dma->page,
					dma->pg_off, get_dma_size(dma),
					dma->cmd == HW_CMD_BLK_WRITE ?
					PCI_DMA_TODEVICE :
					PCI_DMA_FROMDEVICE);
			if (pci_dma_mapping_error(card->dev, dma->dma_addr)) {
				spin_unlock_bh(&card->ctrl[i].queue_lock);
				kmem_cache_free(rsxx_dma_pool, dma);
				return -ENOMEM;
			}
		}
		spin_unlock_bh(&card->ctrl[i].queue_lock);
	}

	return 0;
}

int rsxx_dma_init(void)
{
	rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);