Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0d00c488 authored by Thomas Hellstrom's avatar Thomas Hellstrom
Browse files

drm/vmwgfx: Fix the driver for large dma addresses



With dma compliance / IOMMU support added to the driver in kernel 3.13,
the dma addresses can exceed 44 bits, which is what we support in
32-bit mode and with GMR1.
So in 32-bit mode and optionally in 64-bit mode, restrict the dma
addresses to 44 bits, and strip the old GMR1 code.

Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarJakob Bornecrantz <jakob@vmware.com>
Cc: stable@vger.kernel.org
parent c5416d66
Loading
Loading
Loading
Loading
+36 −11
Original line number Diff line number Diff line
@@ -189,6 +189,7 @@ static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
static int vmw_force_iommu;
static int vmw_restrict_iommu;
static int vmw_force_coherent;
static int vmw_restrict_dma_mask;

static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *);
@@ -203,6 +204,8 @@ MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);


static void vmw_print_capabilities(uint32_t capabilities)
@@ -510,6 +513,33 @@ out_fixup:
	return 0;
}

/**
 * vmw_dma_masks - set required page- and dma masks
 *
 * @dev: Pointer to struct drm-device
 *
 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
 * restriction also for 64-bit systems.
 */
#ifdef CONFIG_INTEL_IOMMU
static int vmw_dma_masks(struct vmw_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;

	if (intel_iommu_enabled &&
	    (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
		DRM_INFO("Restricting DMA addresses to 44 bits.\n");
		return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
	}
	return 0;
}
#else
static int vmw_dma_masks(struct vmw_private *dev_priv)
{
	return 0;
}
#endif

static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
	struct vmw_private *dev_priv;
@@ -578,14 +608,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)

	vmw_get_initial_size(dev_priv);

	if (dev_priv->capabilities & SVGA_CAP_GMR) {
		dev_priv->max_gmr_descriptors =
			vmw_read(dev_priv,
				 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
		dev_priv->max_gmr_ids =
			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
	}
	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
		dev_priv->max_gmr_pages =
			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
		dev_priv->memory_size =
@@ -599,17 +624,17 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
		dev_priv->memory_size = 512*1024*1024;
	}

	ret = vmw_dma_masks(dev_priv);
	if (unlikely(ret != 0))
		goto out_err0;

	mutex_unlock(&dev_priv->hw_mutex);

	vmw_print_capabilities(dev_priv->capabilities);

	if (dev_priv->capabilities & SVGA_CAP_GMR) {
	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
		DRM_INFO("Max GMR ids is %u\n",
			 (unsigned)dev_priv->max_gmr_ids);
		DRM_INFO("Max GMR descriptors is %u\n",
			 (unsigned)dev_priv->max_gmr_descriptors);
	}
	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
		DRM_INFO("Max number of GMR pages is %u\n",
			 (unsigned)dev_priv->max_gmr_pages);
		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
+0 −1
Original line number Diff line number Diff line
@@ -290,7 +290,6 @@ struct vmw_private {
	__le32 __iomem *mmio_virt;
	int mmio_mtrr;
	uint32_t capabilities;
	uint32_t max_gmr_descriptors;
	uint32_t max_gmr_ids;
	uint32_t max_gmr_pages;
	uint32_t memory_size;
+3 −157
Original line number Diff line number Diff line
@@ -125,181 +125,27 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
}


static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
				     struct list_head *desc_pages)
{
	struct page *page, *next;
	struct svga_guest_mem_descriptor *page_virtual;
	unsigned int desc_per_page = PAGE_SIZE /
		sizeof(struct svga_guest_mem_descriptor) - 1;

	if (list_empty(desc_pages))
		return;

	list_for_each_entry_safe(page, next, desc_pages, lru) {
		list_del_init(&page->lru);

		if (likely(desc_dma != DMA_ADDR_INVALID)) {
			dma_unmap_page(dev, desc_dma, PAGE_SIZE,
				       DMA_TO_DEVICE);
		}

		page_virtual = kmap_atomic(page);
		desc_dma = (dma_addr_t)
			le32_to_cpu(page_virtual[desc_per_page].ppn) <<
			PAGE_SHIFT;
		kunmap_atomic(page_virtual);

		__free_page(page);
	}
}

/**
 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
 * the number of used descriptors.
 *
 */

static int vmw_gmr_build_descriptors(struct device *dev,
				     struct list_head *desc_pages,
				     struct vmw_piter *iter,
				     unsigned long num_pages,
				     dma_addr_t *first_dma)
{
	struct page *page;
	struct svga_guest_mem_descriptor *page_virtual = NULL;
	struct svga_guest_mem_descriptor *desc_virtual = NULL;
	unsigned int desc_per_page;
	unsigned long prev_pfn;
	unsigned long pfn;
	int ret;
	dma_addr_t desc_dma;

	desc_per_page = PAGE_SIZE /
	    sizeof(struct svga_guest_mem_descriptor) - 1;

	while (likely(num_pages != 0)) {
		page = alloc_page(__GFP_HIGHMEM);
		if (unlikely(page == NULL)) {
			ret = -ENOMEM;
			goto out_err;
		}

		list_add_tail(&page->lru, desc_pages);
		page_virtual = kmap_atomic(page);
		desc_virtual = page_virtual - 1;
		prev_pfn = ~(0UL);

		while (likely(num_pages != 0)) {
			pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;

			if (pfn != prev_pfn + 1) {

				if (desc_virtual - page_virtual ==
				    desc_per_page - 1)
					break;

				(++desc_virtual)->ppn = cpu_to_le32(pfn);
				desc_virtual->num_pages = cpu_to_le32(1);
			} else {
				uint32_t tmp =
				    le32_to_cpu(desc_virtual->num_pages);
				desc_virtual->num_pages = cpu_to_le32(tmp + 1);
			}
			prev_pfn = pfn;
			--num_pages;
			vmw_piter_next(iter);
		}

		(++desc_virtual)->ppn = DMA_PAGE_INVALID;
		desc_virtual->num_pages = cpu_to_le32(0);
		kunmap_atomic(page_virtual);
	}

	desc_dma = 0;
	list_for_each_entry_reverse(page, desc_pages, lru) {
		page_virtual = kmap_atomic(page);
		page_virtual[desc_per_page].ppn = cpu_to_le32
			(desc_dma >> PAGE_SHIFT);
		kunmap_atomic(page_virtual);
		desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
					DMA_TO_DEVICE);

		if (unlikely(dma_mapping_error(dev, desc_dma)))
			goto out_err;
	}
	*first_dma = desc_dma;

	return 0;
out_err:
	vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
	return ret;
}

static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
				     int gmr_id, dma_addr_t desc_dma)
{
	mutex_lock(&dev_priv->hw_mutex);

	vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
	wmb();
	vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
	mb();

	mutex_unlock(&dev_priv->hw_mutex);

}

int vmw_gmr_bind(struct vmw_private *dev_priv,
		 const struct vmw_sg_table *vsgt,
		 unsigned long num_pages,
		 int gmr_id)
{
	struct list_head desc_pages;
	dma_addr_t desc_dma = 0;
	struct device *dev = dev_priv->dev->dev;
	struct vmw_piter data_iter;
	int ret;

	vmw_piter_start(&data_iter, vsgt, 0);

	if (unlikely(!vmw_piter_next(&data_iter)))
		return 0;

	if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
		return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);

	if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
	if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2)))
		return -EINVAL;

	if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
		return -EINVAL;

	INIT_LIST_HEAD(&desc_pages);

	ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
					num_pages, &desc_dma);
	if (unlikely(ret != 0))
		return ret;

	vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
	vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);

	return 0;
	return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
}


void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
{
	if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
	if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
		vmw_gmr2_unbind(dev_priv, gmr_id);
		return;
	}

	mutex_lock(&dev_priv->hw_mutex);
	vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
	wmb();
	vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
	mb();
	mutex_unlock(&dev_priv->hw_mutex);
}