Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4e60c86b authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds
Browse files

gcc-4.6: mm: fix unused but set warnings



No real bugs, just some dead code and some fixups.

Signed-off-by: default avatarAndi Kleen <ak@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 627295e4
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -126,8 +126,8 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
/* x86-64 always has all page tables mapped. */
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
#define pte_unmap(pte) /* NOP */
#define pte_unmap_nested(pte) /* NOP */
#define pte_unmap(pte) ((void)(pte))/* NOP */
#define pte_unmap_nested(pte) ((void)(pte)) /* NOP */

#define update_mmu_cache(vma, address, ptep) do { } while (0)

+5 −1
Original line number Diff line number Diff line
@@ -73,7 +73,11 @@ static inline void *kmap_atomic(struct page *page, enum km_type idx)
}
#define kmap_atomic_prot(page, idx, prot)	kmap_atomic(page, idx)

#define kunmap_atomic_notypecheck(addr, idx)	do { pagefault_enable(); } while (0)
static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
{
	pagefault_enable();
}

#define kmap_atomic_pfn(pfn, idx)	kmap_atomic(pfn_to_page(pfn), (idx))
#define kmap_atomic_to_page(ptr)	virt_to_page(ptr)

+1 −1
Original line number Diff line number Diff line
@@ -4,7 +4,7 @@
#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
#else
#define VM_BUG_ON(cond) do { } while (0)
#define VM_BUG_ON(cond) do { (void)(cond); } while (0)
#endif

#ifdef CONFIG_DEBUG_VIRTUAL
+0 −2
Original line number Diff line number Diff line
@@ -2238,14 +2238,12 @@ static ssize_t generic_perform_write(struct file *file,

	do {
		struct page *page;
		pgoff_t index;		/* Pagecache index for current page */
		unsigned long offset;	/* Offset into pagecache page */
		unsigned long bytes;	/* Bytes to write to page */
		size_t copied;		/* Bytes copied from user */
		void *fsdata;

		offset = (pos & (PAGE_CACHE_SIZE - 1));
		index = pos >> PAGE_CACHE_SHIFT;
		bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
						iov_iter_count(i));

+0 −2
Original line number Diff line number Diff line
@@ -307,7 +307,6 @@ void free_pgd_range(struct mmu_gather *tlb,
{
	pgd_t *pgd;
	unsigned long next;
	unsigned long start;

	/*
	 * The next few lines have given us lots of grief...
@@ -351,7 +350,6 @@ void free_pgd_range(struct mmu_gather *tlb,
	if (addr > end - 1)
		return;

	start = addr;
	pgd = pgd_offset(tlb->mm, addr);
	do {
		next = pgd_addr_end(addr, end);
Loading