Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 391e4dea authored by Michael S. Tsirkin's avatar Michael S. Tsirkin Committed by Roland Dreier
Browse files

IB/mthca: Fix access to MTT and MPT tables on non-cache-coherent CPUs



We allocate the MTT table with alloc_pages() and then do pci_map_sg(),
so we must call pci_dma_sync_sg() after the CPU writes to the MTT
table.  This works since the device will never write MTTs on mem-free
HCAs, once we get rid of the use of the WRITE_MTT firmware command.
This change is needed to make that work, and is an improvement for
now, since it gives FMRs a chance at working.

For MPTs, both the device and CPU might write there, so we must
allocate DMA coherent memory for these.

Signed-off-by: default avatarMichael S. Tsirkin <mst@mellanox.co.il>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 1d1f19cf
Loading
Loading
Loading
Loading
+21 −15
Original line number Diff line number Diff line
@@ -379,7 +379,7 @@ static int mthca_load_fw(struct mthca_dev *mdev)

	mdev->fw.arbel.fw_icm =
		mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages,
				GFP_HIGHUSER | __GFP_NOWARN);
				GFP_HIGHUSER | __GFP_NOWARN, 0);
	if (!mdev->fw.arbel.fw_icm) {
		mthca_err(mdev, "Couldn't allocate FW area, aborting.\n");
		return -ENOMEM;
@@ -412,7 +412,7 @@ static int mthca_load_fw(struct mthca_dev *mdev)
	mthca_UNMAP_FA(mdev, &status);

err_free:
	mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
	mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
	return err;
}

@@ -441,7 +441,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
		  (unsigned long long) aux_pages << 2);

	mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages,
						 GFP_HIGHUSER | __GFP_NOWARN);
						 GFP_HIGHUSER | __GFP_NOWARN, 0);
	if (!mdev->fw.arbel.aux_icm) {
		mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n");
		return -ENOMEM;
@@ -471,7 +471,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
	mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
							 MTHCA_MTT_SEG_SIZE,
							 mdev->limits.num_mtt_segs,
							 mdev->limits.reserved_mtts, 1);
							 mdev->limits.reserved_mtts,
							 1, 0);
	if (!mdev->mr_table.mtt_table) {
		mthca_err(mdev, "Failed to map MTT context memory, aborting.\n");
		err = -ENOMEM;
@@ -481,7 +482,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
	mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base,
							 dev_lim->mpt_entry_sz,
							 mdev->limits.num_mpts,
							 mdev->limits.reserved_mrws, 1);
							 mdev->limits.reserved_mrws,
							 1, 1);
	if (!mdev->mr_table.mpt_table) {
		mthca_err(mdev, "Failed to map MPT context memory, aborting.\n");
		err = -ENOMEM;
@@ -491,7 +493,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
	mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base,
							dev_lim->qpc_entry_sz,
							mdev->limits.num_qps,
							mdev->limits.reserved_qps, 0);
							mdev->limits.reserved_qps,
							0, 0);
	if (!mdev->qp_table.qp_table) {
		mthca_err(mdev, "Failed to map QP context memory, aborting.\n");
		err = -ENOMEM;
@@ -501,7 +504,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
	mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base,
							 dev_lim->eqpc_entry_sz,
							 mdev->limits.num_qps,
							 mdev->limits.reserved_qps, 0);
							 mdev->limits.reserved_qps,
							 0, 0);
	if (!mdev->qp_table.eqp_table) {
		mthca_err(mdev, "Failed to map EQP context memory, aborting.\n");
		err = -ENOMEM;
@@ -511,7 +515,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
	mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base,
							 MTHCA_RDB_ENTRY_SIZE,
							 mdev->limits.num_qps <<
							 mdev->qp_table.rdb_shift,
							 mdev->qp_table.rdb_shift, 0,
							 0, 0);
	if (!mdev->qp_table.rdb_table) {
		mthca_err(mdev, "Failed to map RDB context memory, aborting\n");
@@ -522,7 +526,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
       mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
						    dev_lim->cqc_entry_sz,
						    mdev->limits.num_cqs,
						    mdev->limits.reserved_cqs, 0);
						    mdev->limits.reserved_cqs,
						    0, 0);
	if (!mdev->cq_table.table) {
		mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
		err = -ENOMEM;
@@ -534,7 +539,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
			mthca_alloc_icm_table(mdev, init_hca->srqc_base,
					      dev_lim->srq_entry_sz,
					      mdev->limits.num_srqs,
					      mdev->limits.reserved_srqs, 0);
					      mdev->limits.reserved_srqs,
					      0, 0);
		if (!mdev->srq_table.table) {
			mthca_err(mdev, "Failed to map SRQ context memory, "
				  "aborting.\n");
@@ -554,7 +560,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
						      mdev->limits.num_amgms,
						      mdev->limits.num_mgms +
						      mdev->limits.num_amgms,
						      0);
						      0, 0);
	if (!mdev->mcg_table.table) {
		mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
		err = -ENOMEM;
@@ -592,7 +598,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
	mthca_UNMAP_ICM_AUX(mdev, &status);

err_free_aux:
	mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
	mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);

	return err;
}
@@ -613,7 +619,7 @@ static void mthca_free_icms(struct mthca_dev *mdev)
	mthca_unmap_eq_icm(mdev);

	mthca_UNMAP_ICM_AUX(mdev, &status);
	mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
	mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
}

static int mthca_init_arbel(struct mthca_dev *mdev)
@@ -697,7 +703,7 @@ static int mthca_init_arbel(struct mthca_dev *mdev)

err_stop_fw:
	mthca_UNMAP_FA(mdev, &status);
	mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
	mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);

err_disable:
	if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
@@ -716,7 +722,7 @@ static void mthca_close_hca(struct mthca_dev *mdev)
		mthca_free_icms(mdev);

		mthca_UNMAP_FA(mdev, &status);
		mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
		mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);

		if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
			mthca_DISABLE_LAM(mdev, &status);
+98 −29
Original line number Diff line number Diff line
@@ -35,6 +35,9 @@
 */

#include <linux/mm.h>
#include <linux/scatterlist.h>

#include <asm/page.h>

#include "mthca_memfree.h"
#include "mthca_dev.h"
@@ -58,15 +61,10 @@ struct mthca_user_db_table {
	}                page[0];
};

void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm)
static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
{
	struct mthca_icm_chunk *chunk, *tmp;
	int i;

	if (!icm)
		return;

	list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
	if (chunk->nsg > 0)
		pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
			     PCI_DMA_BIDIRECTIONAL);
@@ -74,6 +72,31 @@ void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm)
	for (i = 0; i < chunk->npages; ++i)
		__free_pages(chunk->mem[i].page,
			     get_order(chunk->mem[i].length));
}

static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
{
	int i;

	for (i = 0; i < chunk->npages; ++i) {
		dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
				  lowmem_page_address(chunk->mem[i].page),
				  sg_dma_address(&chunk->mem[i]));
	}
}

void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
{
	struct mthca_icm_chunk *chunk, *tmp;

	if (!icm)
		return;

	list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
		if (coherent)
			mthca_free_icm_coherent(dev, chunk);
		else
			mthca_free_icm_pages(dev, chunk);

		kfree(chunk);
	}
@@ -81,12 +104,41 @@ void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm)
	kfree(icm);
}

static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
{
	mem->page = alloc_pages(gfp_mask, order);
	if (!mem->page)
		return -ENOMEM;

	mem->length = PAGE_SIZE << order;
	mem->offset = 0;
	return 0;
}

static int mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
				    int order, gfp_t gfp_mask)
{
	void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem),
				       gfp_mask);
	if (!buf)
		return -ENOMEM;

	sg_set_buf(mem, buf, PAGE_SIZE << order);
	BUG_ON(mem->offset);
	sg_dma_len(mem) = PAGE_SIZE << order;
	return 0;
}

struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
				  gfp_t gfp_mask)
				  gfp_t gfp_mask, int coherent)
{
	struct mthca_icm *icm;
	struct mthca_icm_chunk *chunk = NULL;
	int cur_order;
	int ret;

	/* We use sg_set_buf for coherent allocs, which assumes low memory */
	BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));

	icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
	if (!icm)
@@ -112,21 +164,28 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
		while (1 << cur_order > npages)
			--cur_order;

		chunk->mem[chunk->npages].page = alloc_pages(gfp_mask, cur_order);
		if (chunk->mem[chunk->npages].page) {
			chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order;
			chunk->mem[chunk->npages].offset = 0;
		if (coherent)
			ret = mthca_alloc_icm_coherent(&dev->pdev->dev,
						       &chunk->mem[chunk->npages],
						       cur_order, gfp_mask);
		else
			ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
						    cur_order, gfp_mask);

		if (!ret) {
			++chunk->npages;

			if (++chunk->npages == MTHCA_ICM_CHUNK_LEN) {
			if (!coherent && chunk->npages == MTHCA_ICM_CHUNK_LEN) {
				chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
							chunk->npages,
							PCI_DMA_BIDIRECTIONAL);

				if (chunk->nsg <= 0)
					goto fail;
			}

			if (chunk->npages == MTHCA_ICM_CHUNK_LEN)
				chunk = NULL;
			}

			npages -= 1 << cur_order;
		} else {
@@ -136,7 +195,7 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
		}
	}

	if (chunk) {
	if (!coherent && chunk) {
		chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
					chunk->npages,
					PCI_DMA_BIDIRECTIONAL);
@@ -148,7 +207,7 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
	return icm;

fail:
	mthca_free_icm(dev, icm);
	mthca_free_icm(dev, icm, coherent);
	return NULL;
}

@@ -167,7 +226,7 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob

	table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
					(table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
					__GFP_NOWARN);
					__GFP_NOWARN, table->coherent);
	if (!table->icm[i]) {
		ret = -ENOMEM;
		goto out;
@@ -175,7 +234,7 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob

	if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
			  &status) || status) {
		mthca_free_icm(dev, table->icm[i]);
		mthca_free_icm(dev, table->icm[i], table->coherent);
		table->icm[i] = NULL;
		ret = -ENOMEM;
		goto out;
@@ -204,16 +263,16 @@ void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int o
		mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
				MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
				&status);
		mthca_free_icm(dev, table->icm[i]);
		mthca_free_icm(dev, table->icm[i], table->coherent);
		table->icm[i] = NULL;
	}

	mutex_unlock(&table->mutex);
}

void *mthca_table_find(struct mthca_icm_table *table, int obj)
void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle)
{
	int idx, offset, i;
	int idx, offset, dma_offset, i;
	struct mthca_icm_chunk *chunk;
	struct mthca_icm *icm;
	struct page *page = NULL;
@@ -225,13 +284,22 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj)

	idx = (obj & (table->num_obj - 1)) * table->obj_size;
	icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];
	offset = idx % MTHCA_TABLE_CHUNK_SIZE;
	dma_offset = offset = idx % MTHCA_TABLE_CHUNK_SIZE;

	if (!icm)
		goto out;

	list_for_each_entry(chunk, &icm->chunk_list, list) {
		for (i = 0; i < chunk->npages; ++i) {
			if (dma_handle && dma_offset >= 0) {
				if (sg_dma_len(&chunk->mem[i]) > dma_offset)
					*dma_handle = sg_dma_address(&chunk->mem[i]) +
						dma_offset;
				dma_offset -= sg_dma_len(&chunk->mem[i]);
			}
			/* DMA mapping can merge pages but not split them,
			 * so if we found the page, dma_handle has already
			 * been assigned to. */
			if (chunk->mem[i].length > offset) {
				page = chunk->mem[i].page;
				goto out;
@@ -283,7 +351,7 @@ void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
					      u64 virt, int obj_size,
					      int nobj, int reserved,
					      int use_lowmem)
					      int use_lowmem, int use_coherent)
{
	struct mthca_icm_table *table;
	int num_icm;
@@ -302,6 +370,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
	table->num_obj  = nobj;
	table->obj_size = obj_size;
	table->lowmem   = use_lowmem;
	table->coherent = use_coherent;
	mutex_init(&table->mutex);

	for (i = 0; i < num_icm; ++i)
@@ -314,12 +383,12 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,

		table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
						(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
						__GFP_NOWARN);
						__GFP_NOWARN, use_coherent);
		if (!table->icm[i])
			goto err;
		if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE,
				  &status) || status) {
			mthca_free_icm(dev, table->icm[i]);
			mthca_free_icm(dev, table->icm[i], table->coherent);
			table->icm[i] = NULL;
			goto err;
		}
@@ -339,7 +408,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
			mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
					MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
				        &status);
			mthca_free_icm(dev, table->icm[i]);
			mthca_free_icm(dev, table->icm[i], table->coherent);
		}

	kfree(table);
@@ -357,7 +426,7 @@ void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
			mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
					MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
					&status);
			mthca_free_icm(dev, table->icm[i]);
			mthca_free_icm(dev, table->icm[i], table->coherent);
		}

	kfree(table);
+5 −4
Original line number Diff line number Diff line
@@ -69,6 +69,7 @@ struct mthca_icm_table {
	int               num_obj;
	int               obj_size;
	int               lowmem;
	int               coherent;
	struct mutex      mutex;
	struct mthca_icm *icm[0];
};
@@ -82,17 +83,17 @@ struct mthca_icm_iter {
struct mthca_dev;

struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
				  gfp_t gfp_mask);
void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm);
				  gfp_t gfp_mask, int coherent);
void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent);

struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
					      u64 virt, int obj_size,
					      int nobj, int reserved,
					      int use_lowmem);
					      int use_lowmem, int use_coherent);
void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table);
int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj);
void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj);
void *mthca_table_find(struct mthca_icm_table *table, int obj);
void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle);
int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table,
			  int start, int end);
void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
+6 −2
Original line number Diff line number Diff line
@@ -524,7 +524,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
		if (err)
			goto err_out_mpt_free;

		mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key);
		mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key, NULL);
		BUG_ON(!mr->mem.arbel.mpt);
	} else
		mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base +
@@ -538,7 +538,8 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,

	if (mthca_is_memfree(dev)) {
		mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
						      mr->mtt->first_seg);
						      mr->mtt->first_seg,
						      &mr->mem.arbel.dma_handle);
		BUG_ON(!mr->mem.arbel.mtts);
	} else
		mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
@@ -712,6 +713,9 @@ int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
		fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
						     MTHCA_MTT_FLAG_PRESENT);

	dma_sync_single(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
			list_len * sizeof(u64), DMA_TO_DEVICE);

	fmr->mem.arbel.mpt->key    = cpu_to_be32(key);
	fmr->mem.arbel.mpt->lkey   = cpu_to_be32(key);
	fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
+1 −0
Original line number Diff line number Diff line
@@ -89,6 +89,7 @@ struct mthca_fmr {
		struct {
			struct mthca_mpt_entry *mpt;
			__be64 *mtts;
			dma_addr_t dma_handle;
		} arbel;
	} mem;
};