Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 49c02a75 authored by Peng Tao's avatar Peng Tao Committed by Greg Kroah-Hartman
Browse files

staging/lustre: clean up and remove libcfs/linux/linux-mem.c



Those are simple wrappers for numa allocator. We don't need them.

Signed-off-by: default avatarPeng Tao <tao.peng@emc.com>
Signed-off-by: default avatarAndreas Dilger <andreas.dilger@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c3dbb781
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -161,8 +161,8 @@ do { \
do {									    \
	LIBCFS_ALLOC_PRE((size), (mask));				    \
	(ptr) = (size) <= LIBCFS_VMALLOC_SIZE ?				    \
		cfs_cpt_malloc((cptab), (cpt), (size), (mask)) :	    \
		cfs_cpt_vmalloc((cptab), (cpt), (size));		    \
		kmalloc_node((size), (mask), cfs_cpt_spread_node(cptab, cpt)) :\
		vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt));	    \
	LIBCFS_ALLOC_POST((ptr), (size));				    \
} while (0)

+0 −19
Original line number Diff line number Diff line
@@ -79,25 +79,6 @@
	do { __oldfs = get_fs(); set_fs(get_ds());} while(0)
#define MMSPACE_CLOSE	       set_fs(__oldfs)


/*
 * NUMA allocators
 *
 * NB: we will rename these functions in a separate patch:
 * - rename kmalloc to cfs_malloc
 * - rename kmalloc/free_page to cfs_page_alloc/free
 * - rename kmalloc/free_large to cfs_vmalloc/vfree
 */
extern void *cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt,
			    size_t nr_bytes, unsigned int flags);
extern void *cfs_cpt_vmalloc(struct cfs_cpt_table *cptab, int cpt,
			     size_t nr_bytes);
extern struct page *cfs_page_cpt_alloc(struct cfs_cpt_table *cptab,
				      int cpt, unsigned int flags);
extern void *cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep,
				     struct cfs_cpt_table *cptab,
				     int cpt, unsigned int flags);

/*
 * Shrinker
 */
+3 −2
Original line number Diff line number Diff line
@@ -1203,8 +1203,9 @@ kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
	p->ibp_npages = npages;

	for (i = 0; i < npages; i++) {
		p->ibp_pages[i] = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
						     __GFP_IO);
		p->ibp_pages[i] = alloc_pages_node(
				    cfs_cpt_spread_node(lnet_cpt_table(), cpt),
				    __GFP_IO, 0);
		if (p->ibp_pages[i] == NULL) {
			CERROR("Can't allocate page %d of %d\n", i, npages);
			kiblnd_free_pages(p);
+3 −2
Original line number Diff line number Diff line
@@ -1247,8 +1247,9 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
	rb->rb_pool = rbp;

	for (i = 0; i < npages; i++) {
		page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
					  __GFP_ZERO | GFP_IOFS);
		page = alloc_pages_node(
				cfs_cpt_spread_node(lnet_cpt_table(), cpt),
				__GFP_ZERO | GFP_IOFS, 0);
		if (page == NULL) {
			while (--i >= 0)
				__free_page(rb->rb_kiov[i].kiov_page);
+2 −1
Original line number Diff line number Diff line
@@ -146,7 +146,8 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
		struct page *pg;
		int	    nob;

		pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, GFP_IOFS);
		pg = alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(), cpt),
				      GFP_IOFS, 0);
		if (pg == NULL) {
			CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
			srpc_free_bulk(bk);
Loading