Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1d5cfdb0 authored by Tetsuo Handa's avatar Tetsuo Handa Committed by Linus Torvalds
Browse files

tree wide: use kvfree() than conditional kfree()/vfree()



There are many locations that do

  if (memory_was_allocated_by_vmalloc)
    vfree(ptr);
  else
    kfree(ptr);

but kvfree() can handle both kmalloc()ed memory and vmalloc()ed memory
using is_vmalloc_addr().  Unless callers have special reasons, we can
replace this branch with kvfree().  Please check and reply if you found
problems.

Signed-off-by: default avatarTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarJan Kara <jack@suse.com>
Acked-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
Reviewed-by: default avatarAndreas Dilger <andreas.dilger@intel.com>
Acked-by: default avatar"Rafael J. Wysocki" <rjw@rjwysocki.net>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Oleg Drokin <oleg.drokin@intel.com>
Cc: Boris Petkov <bp@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent eab95db6
Loading
Loading
Loading
Loading
+2 −9
Original line number Diff line number Diff line
@@ -1200,10 +1200,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
	while (i--)
		if (pages[i])
			__free_pages(pages[i], 0);
	if (array_size <= PAGE_SIZE)
		kfree(pages);
	else
		vfree(pages);
	kvfree(pages);
	return NULL;
}

@@ -1211,7 +1208,6 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages,
			       size_t size, struct dma_attrs *attrs)
{
	int count = size >> PAGE_SHIFT;
	int array_size = count * sizeof(struct page *);
	int i;

	if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
@@ -1222,10 +1218,7 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages,
				__free_pages(pages[i], 0);
	}

	if (array_size <= PAGE_SIZE)
		kfree(pages);
	else
		vfree(pages);
	kvfree(pages);
	return 0;
}

+2 −4
Original line number Diff line number Diff line
@@ -32,6 +32,7 @@
#include <linux/hardirq.h>
#include <linux/pstore.h>
#include <linux/vmalloc.h>
#include <linux/mm.h> /* kvfree() */
#include <acpi/apei.h>

#include "apei-internal.h"
@@ -532,10 +533,7 @@ static int __erst_record_id_cache_add_one(void)
			return -ENOMEM;
		memcpy(new_entries, entries,
		       erst_record_id_cache.len * sizeof(entries[0]));
		if (erst_record_id_cache.size < PAGE_SIZE)
			kfree(entries);
		else
			vfree(entries);
		kvfree(entries);
		erst_record_id_cache.entries = entries = new_entries;
		erst_record_id_cache.size = new_size;
	}
+7 −19
Original line number Diff line number Diff line
@@ -364,12 +364,9 @@ static void bm_free_pages(struct page **pages, unsigned long number)
	}
}

static void bm_vk_free(void *ptr, int v)
static inline void bm_vk_free(void *ptr)
{
	if (v)
		vfree(ptr);
	else
		kfree(ptr);
	kvfree(ptr);
}

/*
@@ -379,7 +376,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
{
	struct page **old_pages = b->bm_pages;
	struct page **new_pages, *page;
	unsigned int i, bytes, vmalloced = 0;
	unsigned int i, bytes;
	unsigned long have = b->bm_number_of_pages;

	BUG_ON(have == 0 && old_pages != NULL);
@@ -401,7 +398,6 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
				PAGE_KERNEL);
		if (!new_pages)
			return NULL;
		vmalloced = 1;
	}

	if (want >= have) {
@@ -411,7 +407,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
			page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
			if (!page) {
				bm_free_pages(new_pages + have, i - have);
				bm_vk_free(new_pages, vmalloced);
				bm_vk_free(new_pages);
				return NULL;
			}
			/* we want to know which page it is
@@ -427,11 +423,6 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
		*/
	}

	if (vmalloced)
		b->bm_flags |= BM_P_VMALLOCED;
	else
		b->bm_flags &= ~BM_P_VMALLOCED;

	return new_pages;
}

@@ -469,7 +460,7 @@ void drbd_bm_cleanup(struct drbd_device *device)
	if (!expect(device->bitmap))
		return;
	bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages);
	bm_vk_free(device->bitmap->bm_pages, (BM_P_VMALLOCED & device->bitmap->bm_flags));
	bm_vk_free(device->bitmap->bm_pages);
	kfree(device->bitmap);
	device->bitmap = NULL;
}
@@ -643,7 +634,6 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
	unsigned long want, have, onpages; /* number of pages */
	struct page **npages, **opages = NULL;
	int err = 0, growing;
	int opages_vmalloced;

	if (!expect(b))
		return -ENOMEM;
@@ -656,8 +646,6 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
	if (capacity == b->bm_dev_capacity)
		goto out;

	opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);

	if (capacity == 0) {
		spin_lock_irq(&b->bm_lock);
		opages = b->bm_pages;
@@ -671,7 +659,7 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
		b->bm_dev_capacity = 0;
		spin_unlock_irq(&b->bm_lock);
		bm_free_pages(opages, onpages);
		bm_vk_free(opages, opages_vmalloced);
		bm_vk_free(opages);
		goto out;
	}
	bits  = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
@@ -744,7 +732,7 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi

	spin_unlock_irq(&b->bm_lock);
	if (opages != npages)
		bm_vk_free(opages, opages_vmalloced);
		bm_vk_free(opages);
	if (!growing)
		b->bm_set = bm_count_bits(b);
	drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
+0 −3
Original line number Diff line number Diff line
@@ -536,9 +536,6 @@ struct drbd_bitmap; /* opaque for drbd_device */
/* definition of bits in bm_flags to be used in drbd_bm_lock
 * and drbd_bitmap_io and friends. */
enum bm_flag {
	/* do we need to kfree, or vfree bm_pages? */
	BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */

	/* currently locked for bulk operation */
	BM_LOCKED_MASK = 0xf,

+3 −12
Original line number Diff line number Diff line
@@ -93,14 +93,11 @@ struct vma_data {
	spinlock_t lock;	/* Serialize access to this structure. */
	int count;		/* Number of pages allocated. */
	enum mspec_page_type type; /* Type of pages allocated. */
	int flags;		/* See VMD_xxx below. */
	unsigned long vm_start;	/* Original (unsplit) base. */
	unsigned long vm_end;	/* Original (unsplit) end. */
	unsigned long maddr[0];	/* Array of MSPEC addresses. */
};

#define VMD_VMALLOCED 0x1	/* vmalloc'd rather than kmalloc'd */

/* used on shub2 to clear FOP cache in the HUB */
static unsigned long scratch_page[MAX_NUMNODES];
#define SH2_AMO_CACHE_ENTRIES	4
@@ -185,10 +182,7 @@ mspec_close(struct vm_area_struct *vma)
			       "failed to zero page %ld\n", my_page);
	}

	if (vdata->flags & VMD_VMALLOCED)
		vfree(vdata);
	else
		kfree(vdata);
	kvfree(vdata);
}

/*
@@ -256,7 +250,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
					enum mspec_page_type type)
{
	struct vma_data *vdata;
	int pages, vdata_size, flags = 0;
	int pages, vdata_size;

	if (vma->vm_pgoff != 0)
		return -EINVAL;
@@ -271,16 +265,13 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
	vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
	if (vdata_size <= PAGE_SIZE)
		vdata = kzalloc(vdata_size, GFP_KERNEL);
	else {
	else
		vdata = vzalloc(vdata_size);
		flags = VMD_VMALLOCED;
	}
	if (!vdata)
		return -ENOMEM;

	vdata->vm_start = vma->vm_start;
	vdata->vm_end = vma->vm_end;
	vdata->flags = flags;
	vdata->type = type;
	spin_lock_init(&vdata->lock);
	atomic_set(&vdata->refcnt, 1);
Loading