Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e890a867 authored by Dan Williams's avatar Dan Williams Committed by Greg Kroah-Hartman
Browse files

mm, hmm: use devm semantics for hmm_devmem_{add, remove}

commit 58ef15b765af0d2cbe6799ec564f1dc485010ab8 upstream.

devm semantics arrange for resources to be torn down when
device-driver-probe fails or when device-driver-release completes.
Similar to devm_memremap_pages() there is no need to support an explicit
remove operation when the users properly adhere to devm semantics.

Note that devm_kzalloc() automatically handles allocating node-local
memory.

Link: http://lkml.kernel.org/r/154275559545.76910.9186690723515469051.stgit@dwillia2-desk3.amr.corp.intel.com


Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJérôme Glisse <jglisse@redhat.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c215c66c
Loading
Loading
Loading
Loading
+1 −3
Original line number Diff line number Diff line
@@ -499,8 +499,7 @@ struct hmm_devmem {
 * enough and allocate struct page for it.
 *
 * The device driver can wrap the hmm_devmem struct inside a private device
 * driver struct. The device driver must call hmm_devmem_remove() before the
 * device goes away and before freeing the hmm_devmem struct memory.
 * driver struct.
 */
struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
				  struct device *device,
@@ -508,7 +507,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
					   struct device *device,
					   struct resource *res);
void hmm_devmem_remove(struct hmm_devmem *devmem);

/*
 * hmm_devmem_page_set_drvdata - set per-page driver data field
+24 −103
Original line number Diff line number Diff line
@@ -945,7 +945,6 @@ static void hmm_devmem_ref_exit(void *data)

	devmem = container_of(ref, struct hmm_devmem, ref);
	percpu_ref_exit(ref);
	devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
}

static void hmm_devmem_ref_kill(void *data)
@@ -956,7 +955,6 @@ static void hmm_devmem_ref_kill(void *data)
	devmem = container_of(ref, struct hmm_devmem, ref);
	percpu_ref_kill(ref);
	wait_for_completion(&devmem->completion);
	devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
}

static int hmm_devmem_fault(struct vm_area_struct *vma,
@@ -994,7 +992,7 @@ static void hmm_devmem_radix_release(struct resource *resource)
	mutex_unlock(&hmm_devmem_lock);
}

static void hmm_devmem_release(struct device *dev, void *data)
static void hmm_devmem_release(void *data)
{
	struct hmm_devmem *devmem = data;
	struct resource *resource = devmem->resource;
@@ -1002,11 +1000,6 @@ static void hmm_devmem_release(struct device *dev, void *data)
	struct zone *zone;
	struct page *page;

	if (percpu_ref_tryget_live(&devmem->ref)) {
		dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
		percpu_ref_put(&devmem->ref);
	}

	/* pages are dead and unused, undo the arch mapping */
	start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
	npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
@@ -1130,19 +1123,6 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
	return ret;
}

static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
{
	struct hmm_devmem *devmem = data;

	return devmem->resource == match_data;
}

static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
{
	devres_release(devmem->device, &hmm_devmem_release,
		       &hmm_devmem_match, devmem->resource);
}

/*
 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
 *
@@ -1170,8 +1150,7 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,

	dev_pagemap_get_ops();

	devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
				   GFP_KERNEL, dev_to_node(device));
	devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
	if (!devmem)
		return ERR_PTR(-ENOMEM);

@@ -1185,11 +1164,11 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
	ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
			      0, GFP_KERNEL);
	if (ret)
		goto error_percpu_ref;
		return ERR_PTR(ret);

	ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
	ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
	if (ret)
		goto error_devm_add_action;
		return ERR_PTR(ret);

	size = ALIGN(size, PA_SECTION_SIZE);
	addr = min((unsigned long)iomem_resource.end,
@@ -1209,16 +1188,12 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,

		devmem->resource = devm_request_mem_region(device, addr, size,
							   dev_name(device));
		if (!devmem->resource) {
			ret = -ENOMEM;
			goto error_no_resource;
		}
		if (!devmem->resource)
			return ERR_PTR(-ENOMEM);
		break;
	}
	if (!devmem->resource) {
		ret = -ERANGE;
		goto error_no_resource;
	}
	if (!devmem->resource)
		return ERR_PTR(-ERANGE);

	devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
	devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
@@ -1227,28 +1202,13 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,

	ret = hmm_devmem_pages_create(devmem);
	if (ret)
		goto error_pages;

	devres_add(device, devmem);
		return ERR_PTR(ret);

	ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
	if (ret) {
		hmm_devmem_remove(devmem);
	ret = devm_add_action_or_reset(device, hmm_devmem_release, devmem);
	if (ret)
		return ERR_PTR(ret);
	}

	return devmem;

error_pages:
	devm_release_mem_region(device, devmem->resource->start,
				resource_size(devmem->resource));
error_no_resource:
error_devm_add_action:
	hmm_devmem_ref_kill(&devmem->ref);
	hmm_devmem_ref_exit(&devmem->ref);
error_percpu_ref:
	devres_free(devmem);
	return ERR_PTR(ret);
}
EXPORT_SYMBOL(hmm_devmem_add);

@@ -1264,8 +1224,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,

	dev_pagemap_get_ops();

	devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
				   GFP_KERNEL, dev_to_node(device));
	devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
	if (!devmem)
		return ERR_PTR(-ENOMEM);

@@ -1279,12 +1238,12 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
	ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
			      0, GFP_KERNEL);
	if (ret)
		goto error_percpu_ref;
		return ERR_PTR(ret);

	ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
	ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
			&devmem->ref);
	if (ret)
		goto error_devm_add_action;

		return ERR_PTR(ret);

	devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
	devmem->pfn_last = devmem->pfn_first +
@@ -1292,59 +1251,21 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,

	ret = hmm_devmem_pages_create(devmem);
	if (ret)
		goto error_devm_add_action;
		return ERR_PTR(ret);

	devres_add(device, devmem);
	ret = devm_add_action_or_reset(device, hmm_devmem_release, devmem);
	if (ret)
		return ERR_PTR(ret);

	ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
	if (ret) {
		hmm_devmem_remove(devmem);
	ret = devm_add_action_or_reset(device, hmm_devmem_ref_kill,
			&devmem->ref);
	if (ret)
		return ERR_PTR(ret);
	}

	return devmem;

error_devm_add_action:
	hmm_devmem_ref_kill(&devmem->ref);
	hmm_devmem_ref_exit(&devmem->ref);
error_percpu_ref:
	devres_free(devmem);
	return ERR_PTR(ret);
}
EXPORT_SYMBOL(hmm_devmem_add_resource);

/*
 * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
 *
 * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
 *
 * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
 * of the device driver. It will free struct page and remove the resource that
 * reserved the physical address range for this device memory.
 */
void hmm_devmem_remove(struct hmm_devmem *devmem)
{
	resource_size_t start, size;
	struct device *device;
	bool cdm = false;

	if (!devmem)
		return;

	device = devmem->device;
	start = devmem->resource->start;
	size = resource_size(devmem->resource);

	cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY;
	hmm_devmem_ref_kill(&devmem->ref);
	hmm_devmem_ref_exit(&devmem->ref);
	hmm_devmem_pages_remove(devmem);

	if (!cdm)
		devm_release_mem_region(device, start, size);
}
EXPORT_SYMBOL(hmm_devmem_remove);

/*
 * A device driver that wants to handle multiple devices memory through a
 * single fake device can use hmm_device to do so. This is purely a helper