Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b221385b authored by Adrian Bunk's avatar Adrian Bunk Committed by Linus Torvalds
Browse files

[PATCH] mm/: make functions static



This patch makes the following needlessly global functions static:
 - slab.c: kmem_find_general_cachep()
 - swap.c: __page_cache_release()
 - vmalloc.c: __vmalloc_node()

Signed-off-by: default avatarAdrian Bunk <bunk@stusta.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 204ec841
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -318,8 +318,6 @@ static inline int get_page_unless_zero(struct page *page)
	return atomic_inc_not_zero(&page->_count);
}

extern void FASTCALL(__page_cache_release(struct page *));

static inline int page_count(struct page *page)
{
	if (unlikely(PageCompound(page)))
+0 −2
Original line number Diff line number Diff line
@@ -67,7 +67,6 @@ extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
extern void kmem_cache_free(kmem_cache_t *, void *);
extern unsigned int kmem_cache_size(kmem_cache_t *);
extern const char *kmem_cache_name(kmem_cache_t *);
extern kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags);

/* Size description struct for general caches. */
struct cache_sizes {
@@ -223,7 +222,6 @@ extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
/* SLOB allocator routines */

void kmem_cache_init(void);
struct kmem_cache *kmem_find_general_cachep(size_t, gfp_t gfpflags);
struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t,
	unsigned long,
	void (*)(void *, struct kmem_cache *, unsigned long),
+0 −2
Original line number Diff line number Diff line
@@ -44,8 +44,6 @@ extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
				pgprot_t prot);
extern void *__vmalloc_node(unsigned long size, gfp_t gfp_mask,
				pgprot_t prot, int node);
extern void vfree(void *addr);

extern void *vmap(struct page **pages, unsigned int count,
+1 −2
Original line number Diff line number Diff line
@@ -768,11 +768,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
	return csizep->cs_cachep;
}

struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
{
	return __find_general_cachep(size, gfpflags);
}
EXPORT_SYMBOL(kmem_find_general_cachep);

static size_t slab_mgmt_size(size_t nr_objs, size_t align)
{
+19 −20
Original line number Diff line number Diff line
@@ -34,6 +34,25 @@
/* How many pages do we try to swap or page in/out together? */
int page_cluster;

/*
 * This path almost never happens for VM activity - pages are normally
 * freed via pagevecs.  But it gets used by networking.
 */
static void fastcall __page_cache_release(struct page *page)
{
	if (PageLRU(page)) {
		unsigned long flags;
		struct zone *zone = page_zone(page);

		spin_lock_irqsave(&zone->lru_lock, flags);
		VM_BUG_ON(!PageLRU(page));
		__ClearPageLRU(page);
		del_page_from_lru(zone, page);
		spin_unlock_irqrestore(&zone->lru_lock, flags);
	}
	free_hot_page(page);
}

static void put_compound_page(struct page *page)
{
	page = (struct page *)page_private(page);
@@ -222,26 +241,6 @@ int lru_add_drain_all(void)
}
#endif

/*
 * This path almost never happens for VM activity - pages are normally
 * freed via pagevecs.  But it gets used by networking.
 */
void fastcall __page_cache_release(struct page *page)
{
	if (PageLRU(page)) {
		unsigned long flags;
		struct zone *zone = page_zone(page);

		spin_lock_irqsave(&zone->lru_lock, flags);
		VM_BUG_ON(!PageLRU(page));
		__ClearPageLRU(page);
		del_page_from_lru(zone, page);
		spin_unlock_irqrestore(&zone->lru_lock, flags);
	}
	free_hot_page(page);
}
EXPORT_SYMBOL(__page_cache_release);

/*
 * Batched page_cache_release().  Decrement the reference count on all the
 * passed pages.  If it fell to zero then remove the page from the LRU and
Loading