Loading fs/proc/base.c +3 −0 Original line number Diff line number Diff line Loading @@ -3237,6 +3237,9 @@ static const struct pid_entry tgid_base_stuff[] = { REG("mounts", S_IRUGO, proc_mounts_operations), REG("mountinfo", S_IRUGO, proc_mountinfo_operations), REG("mountstats", S_IRUSR, proc_mountstats_operations), #ifdef CONFIG_PROCESS_RECLAIM REG("reclaim", 0200, proc_reclaim_operations), #endif #ifdef CONFIG_PROC_PAGE_MONITOR REG("clear_refs", S_IWUSR, proc_clear_refs_operations), REG("smaps", S_IRUGO, proc_pid_smaps_operations), Loading fs/proc/internal.h +1 −0 Original line number Diff line number Diff line Loading @@ -205,6 +205,7 @@ struct pde_opener { extern const struct inode_operations proc_link_inode_operations; extern const struct inode_operations proc_pid_link_inode_operations; extern const struct super_operations proc_sops; extern const struct file_operations proc_reclaim_operations; void proc_init_kmemcache(void); void set_proc_pid_nlink(void); Loading fs/proc/task_mmu.c +119 −0 Original line number Diff line number Diff line Loading @@ -19,6 +19,7 @@ #include <linux/shmem_fs.h> #include <linux/uaccess.h> #include <linux/pkeys.h> #include <linux/mm_inline.h> #include <asm/elf.h> #include <asm/tlb.h> Loading Loading @@ -1708,6 +1709,124 @@ const struct file_operations proc_pagemap_operations = { }; #endif /* CONFIG_PROC_PAGE_MONITOR */ #ifdef CONFIG_PROCESS_RECLAIM static int reclaim_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->private; pte_t *pte, ptent; spinlock_t *ptl; struct page *page; LIST_HEAD(page_list); int isolated; split_huge_pmd(vma, addr, pmd); if (pmd_trans_unstable(pmd)) return 0; cont: isolated = 0; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { ptent = *pte; if (!pte_present(ptent)) continue; page = vm_normal_page(vma, addr, ptent); if (!page) continue; if (isolate_lru_page(page)) continue; list_add(&page->lru, &page_list); inc_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); isolated++; if (isolated >= SWAP_CLUSTER_MAX) break; } pte_unmap_unlock(pte - 1, ptl); reclaim_pages_from_list(&page_list); if (addr != end) goto cont; cond_resched(); return 0; } enum reclaim_type { RECLAIM_FILE, RECLAIM_ANON, RECLAIM_ALL, RECLAIM_RANGE, }; static ssize_t reclaim_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task; char buffer[PROC_NUMBUF]; struct mm_struct *mm; struct vm_area_struct *vma; enum reclaim_type type; char *type_buf; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) return -EFAULT; type_buf = strstrip(buffer); if (!strcmp(type_buf, "file")) type = RECLAIM_FILE; else if (!strcmp(type_buf, "anon")) type = RECLAIM_ANON; else if (!strcmp(type_buf, "all")) type = RECLAIM_ALL; else return -EINVAL; task = get_proc_task(file->f_path.dentry->d_inode); if (!task) return -ESRCH; mm = get_task_mm(task); if (mm) { const struct mm_walk_ops reclaim_walk_ops = { .pmd_entry = reclaim_pte_range, }; down_read(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (is_vm_hugetlb_page(vma)) continue; if (type == RECLAIM_ANON && vma->vm_file) continue; if (type == RECLAIM_FILE && !vma->vm_file) continue; walk_page_range(mm, vma->vm_start, vma->vm_end, &reclaim_walk_ops, vma); } flush_tlb_mm(mm); up_read(&mm->mmap_sem); mmput(mm); } put_task_struct(task); return count; } const struct file_operations proc_reclaim_operations = { .write = reclaim_write, .llseek = noop_llseek, }; #endif #ifdef CONFIG_NUMA struct numa_maps { Loading include/linux/rmap.h +4 −0 Original line number Diff line number Diff line Loading @@ -12,6 +12,10 @@ #include <linux/memcontrol.h> #include <linux/highmem.h> extern int isolate_lru_page(struct page *page); extern void putback_lru_page(struct page *page); extern unsigned long reclaim_pages_from_list(struct list_head *page_list); /* * The anon_vma heads a list of private "related" vmas, to scan if * an anonymous page pointing to this anon_vma needs to be unmapped: Loading mm/Kconfig +14 −0 Original line number Diff line number Diff line Loading @@ -832,3 +832,17 @@ config OOM_TASK_PRIORITY_ADJ_LIMIT before considering tasks with a lower oom_score_adj value. endmenu config PROCESS_RECLAIM bool "Enable process reclaim" depends on PROC_FS depends on QGKI default y help It allows to reclaim pages of the process by /proc/pid/reclaim. (echo file > /proc/PID/reclaim) reclaims file-backed pages only. (echo anon > /proc/PID/reclaim) reclaims anonymous pages only. (echo all > /proc/PID/reclaim) reclaims all pages. Any other value is ignored. Loading
fs/proc/base.c +3 −0 Original line number Diff line number Diff line Loading @@ -3237,6 +3237,9 @@ static const struct pid_entry tgid_base_stuff[] = { REG("mounts", S_IRUGO, proc_mounts_operations), REG("mountinfo", S_IRUGO, proc_mountinfo_operations), REG("mountstats", S_IRUSR, proc_mountstats_operations), #ifdef CONFIG_PROCESS_RECLAIM REG("reclaim", 0200, proc_reclaim_operations), #endif #ifdef CONFIG_PROC_PAGE_MONITOR REG("clear_refs", S_IWUSR, proc_clear_refs_operations), REG("smaps", S_IRUGO, proc_pid_smaps_operations), Loading
fs/proc/internal.h +1 −0 Original line number Diff line number Diff line Loading @@ -205,6 +205,7 @@ struct pde_opener { extern const struct inode_operations proc_link_inode_operations; extern const struct inode_operations proc_pid_link_inode_operations; extern const struct super_operations proc_sops; extern const struct file_operations proc_reclaim_operations; void proc_init_kmemcache(void); void set_proc_pid_nlink(void); Loading
fs/proc/task_mmu.c +119 −0 Original line number Diff line number Diff line Loading @@ -19,6 +19,7 @@ #include <linux/shmem_fs.h> #include <linux/uaccess.h> #include <linux/pkeys.h> #include <linux/mm_inline.h> #include <asm/elf.h> #include <asm/tlb.h> Loading Loading @@ -1708,6 +1709,124 @@ const struct file_operations proc_pagemap_operations = { }; #endif /* CONFIG_PROC_PAGE_MONITOR */ #ifdef CONFIG_PROCESS_RECLAIM static int reclaim_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->private; pte_t *pte, ptent; spinlock_t *ptl; struct page *page; LIST_HEAD(page_list); int isolated; split_huge_pmd(vma, addr, pmd); if (pmd_trans_unstable(pmd)) return 0; cont: isolated = 0; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { ptent = *pte; if (!pte_present(ptent)) continue; page = vm_normal_page(vma, addr, ptent); if (!page) continue; if (isolate_lru_page(page)) continue; list_add(&page->lru, &page_list); inc_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); isolated++; if (isolated >= SWAP_CLUSTER_MAX) break; } pte_unmap_unlock(pte - 1, ptl); reclaim_pages_from_list(&page_list); if (addr != end) goto cont; cond_resched(); return 0; } enum reclaim_type { RECLAIM_FILE, RECLAIM_ANON, RECLAIM_ALL, RECLAIM_RANGE, }; static ssize_t reclaim_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task; char buffer[PROC_NUMBUF]; struct mm_struct *mm; struct vm_area_struct *vma; enum reclaim_type type; char *type_buf; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) return -EFAULT; type_buf = strstrip(buffer); if (!strcmp(type_buf, "file")) type = RECLAIM_FILE; else if (!strcmp(type_buf, "anon")) type = RECLAIM_ANON; else if (!strcmp(type_buf, "all")) type = RECLAIM_ALL; else return -EINVAL; task = get_proc_task(file->f_path.dentry->d_inode); if (!task) return -ESRCH; mm = get_task_mm(task); if (mm) { const struct mm_walk_ops reclaim_walk_ops = { .pmd_entry = reclaim_pte_range, }; down_read(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (is_vm_hugetlb_page(vma)) continue; if (type == RECLAIM_ANON && vma->vm_file) continue; if (type == RECLAIM_FILE && !vma->vm_file) continue; walk_page_range(mm, vma->vm_start, vma->vm_end, &reclaim_walk_ops, vma); } flush_tlb_mm(mm); up_read(&mm->mmap_sem); mmput(mm); } put_task_struct(task); return count; } const struct file_operations proc_reclaim_operations = { .write = reclaim_write, .llseek = noop_llseek, }; #endif #ifdef CONFIG_NUMA struct numa_maps { Loading
include/linux/rmap.h +4 −0 Original line number Diff line number Diff line Loading @@ -12,6 +12,10 @@ #include <linux/memcontrol.h> #include <linux/highmem.h> extern int isolate_lru_page(struct page *page); extern void putback_lru_page(struct page *page); extern unsigned long reclaim_pages_from_list(struct list_head *page_list); /* * The anon_vma heads a list of private "related" vmas, to scan if * an anonymous page pointing to this anon_vma needs to be unmapped: Loading
mm/Kconfig +14 −0 Original line number Diff line number Diff line Loading @@ -832,3 +832,17 @@ config OOM_TASK_PRIORITY_ADJ_LIMIT before considering tasks with a lower oom_score_adj value. endmenu config PROCESS_RECLAIM bool "Enable process reclaim" depends on PROC_FS depends on QGKI default y help It allows to reclaim pages of the process by /proc/pid/reclaim. (echo file > /proc/PID/reclaim) reclaims file-backed pages only. (echo anon > /proc/PID/reclaim) reclaims anonymous pages only. (echo all > /proc/PID/reclaim) reclaims all pages. Any other value is ignored.