Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 920c7a5d authored by Harvey Harrison's avatar Harvey Harrison Committed by Linus Torvalds
Browse files

mm: remove fastcall from mm/



fastcall is always defined to be empty, remove it

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: default avatarHarvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1e548deb
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -527,7 +527,7 @@ static inline void wake_up_page(struct page *page, int bit)
	__wake_up_bit(page_waitqueue(page), &page->flags, bit);
}

void fastcall wait_on_page_bit(struct page *page, int bit_nr)
void wait_on_page_bit(struct page *page, int bit_nr)
{
	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);

@@ -551,7 +551,7 @@ EXPORT_SYMBOL(wait_on_page_bit);
 * the clear_bit and the read of the waitqueue (to avoid SMP races with a
 * parallel wait_on_page_locked()).
 */
void fastcall unlock_page(struct page *page)
void unlock_page(struct page *page)
{
	smp_mb__before_clear_bit();
	if (!TestClearPageLocked(page))
@@ -585,7 +585,7 @@ EXPORT_SYMBOL(end_page_writeback);
 * chances are that on the second loop, the block layer's plug list is empty,
 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
 */
void fastcall __lock_page(struct page *page)
void __lock_page(struct page *page)
{
	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);

@@ -606,7 +606,7 @@ int fastcall __lock_page_killable(struct page *page)
 * Variant of lock_page that does not require the caller to hold a reference
 * on the page's mapping.
 */
void fastcall __lock_page_nosync(struct page *page)
void __lock_page_nosync(struct page *page)
{
	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
	__wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
@@ -1276,7 +1276,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
 * This adds the requested page to the page cache if it isn't already there,
 * and schedules an I/O to read in its contents from disk.
 */
static int fastcall page_cache_read(struct file * file, pgoff_t offset)
static int page_cache_read(struct file *file, pgoff_t offset)
{
	struct address_space *mapping = file->f_mapping;
	struct page *page; 
+2 −2
Original line number Diff line number Diff line
@@ -163,7 +163,7 @@ static inline unsigned long map_new_virtual(struct page *page)
	return vaddr;
}

void fastcall *kmap_high(struct page *page)
void *kmap_high(struct page *page)
{
	unsigned long vaddr;

@@ -185,7 +185,7 @@ void fastcall *kmap_high(struct page *page)

EXPORT_SYMBOL(kmap_high);

void fastcall kunmap_high(struct page *page)
void kunmap_high(struct page *page)
{
	unsigned long vaddr;
	unsigned long nr;
+1 −1
Original line number Diff line number Diff line
@@ -34,7 +34,7 @@ static inline void __put_page(struct page *page)
	atomic_dec(&page->_count);
}

extern void fastcall __init __free_pages_bootmem(struct page *page,
extern void __init __free_pages_bootmem(struct page *page,
						unsigned int order);

/*
+2 −1
Original line number Diff line number Diff line
@@ -1109,7 +1109,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
}
EXPORT_SYMBOL(get_user_pages);

pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
			spinlock_t **ptl)
{
	pgd_t * pgd = pgd_offset(mm, addr);
	pud_t * pud = pud_alloc(mm, pgd, addr);
+1 −1
Original line number Diff line number Diff line
@@ -1073,7 +1073,7 @@ static int __set_page_dirty(struct page *page)
	return 0;
}

int fastcall set_page_dirty(struct page *page)
int set_page_dirty(struct page *page)
{
	int ret = __set_page_dirty(page);
	if (ret)
Loading