Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7682486b authored by Randy Dunlap's avatar Randy Dunlap Committed by Linus Torvalds
Browse files

mm: fix various kernel-doc comments



Fix various kernel-doc notation in mm/:

filemap.c: add function short description; convert 2 to kernel-doc
fremap.c: change parameter 'prot' to @prot
pagewalk.c: change "-" in function parameters to ":"
slab.c: fix short description of kmem_ptr_validate()
swap.c: fix description & parameters of put_pages_list()
swap_state.c: fix function parameters
vmalloc.c: change "@returns" to "Returns:" since that is not a parameter

Signed-off-by: default avatarRandy Dunlap <randy.dunlap@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6cb2a210
Loading
Loading
Loading
Loading
+17 −3
Original line number Diff line number Diff line
@@ -343,7 +343,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
EXPORT_SYMBOL(sync_page_range);

/**
 * sync_page_range_nolock
 * sync_page_range_nolock - write & wait on all pages in the passed range without locking
 * @inode:	target inode
 * @mapping:	target address_space
 * @pos:	beginning offset in pages to write
@@ -611,7 +611,10 @@ int __lock_page_killable(struct page *page)
					sync_page_killable, TASK_KILLABLE);
}

/*
/**
 * __lock_page_nosync - get a lock on the page, without calling sync_page()
 * @page: the page to lock
 *
 * Variant of lock_page that does not require the caller to hold a reference
 * on the page's mapping.
 */
@@ -1538,9 +1541,20 @@ static struct page *__read_cache_page(struct address_space *mapping,
	return page;
}

/*
/**
 * read_cache_page_async - read into page cache, fill it if needed
 * @mapping:	the page's address_space
 * @index:	the page index
 * @filler:	function to perform the read
 * @data:	destination for read data
 *
 * Same as read_cache_page, but don't wait for page to become unlocked
 * after submitting it to the filler.
 *
 * Read into the page cache. If a page already exists, and PageUptodate() is
 * not set, try to fill the page but don't wait for it to become unlocked.
 *
 * If the page does not get brought uptodate, return -EIO.
 */
struct page *read_cache_page_async(struct address_space *mapping,
				pgoff_t index,
+1 −1
Original line number Diff line number Diff line
@@ -113,7 +113,7 @@ static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma,
 * mmap()/mremap() it does not create any new vmas. The new mappings are
 * also safe across swapout.
 *
 * NOTE: the 'prot' parameter right now is ignored (but must be zero),
 * NOTE: the @prot parameter right now is ignored (but must be zero),
 * and the vma's default protection is used. Arbitrary protections
 * might be implemented in the future.
 */
+5 −5
Original line number Diff line number Diff line
@@ -77,11 +77,11 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,

/**
 * walk_page_range - walk a memory map's page tables with a callback
 * @mm - memory map to walk
 * @addr - starting address
 * @end - ending address
 * @walk - set of callbacks to invoke for each level of the tree
 * @private - private data passed to the callback function
 * @mm: memory map to walk
 * @addr: starting address
 * @end: ending address
 * @walk: set of callbacks to invoke for each level of the tree
 * @private: private data passed to the callback function
 *
 * Recursively walk the page table for the memory area in a VMA,
 * calling supplied callbacks. Callbacks are called in-order (first
+2 −3
Original line number Diff line number Diff line
@@ -3624,12 +3624,11 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
EXPORT_SYMBOL(kmem_cache_alloc);

/**
 * kmem_ptr_validate - check if an untrusted pointer might
 *	be a slab entry.
 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
 * @cachep: the cache we're checking against
 * @ptr: pointer to validate
 *
 * This verifies that the untrusted pointer looks sane:
 * This verifies that the untrusted pointer looks sane;
 * it is _not_ a guarantee that the pointer is actually
 * part of the slab cache in question, but it at least
 * validates that the pointer can be dereferenced and
+2 −3
Original line number Diff line number Diff line
@@ -78,12 +78,11 @@ void put_page(struct page *page)
EXPORT_SYMBOL(put_page);

/**
 * put_pages_list(): release a list of pages
 * put_pages_list() - release a list of pages
 * @pages: list of pages threaded on page->lru
 *
 * Release a list of pages which are strung together on page.lru.  Currently
 * used by read_cache_pages() and related error recovery code.
 *
 * @pages: list of pages threaded on page->lru
 */
void put_pages_list(struct list_head *pages)
{
Loading