diff options
-rw-r--r-- | include/linux/ksm.h | 7 | ||||
-rw-r--r-- | include/linux/rmap.h | 9 | ||||
-rw-r--r-- | mm/ksm.c | 6 | ||||
-rw-r--r-- | mm/migrate.c | 7 | ||||
-rw-r--r-- | mm/rmap.c | 19 |
5 files changed, 27 insertions, 21 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 45c9b6a17bcb..0eef8cb0baf7 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -76,8 +76,7 @@ struct page *ksm_might_need_to_copy(struct page *page, int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, unsigned long *vm_flags); int try_to_unmap_ksm(struct page *page, enum ttu_flags flags); -int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, - struct vm_area_struct *, unsigned long, void *), void *arg); +int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); void ksm_migrate_page(struct page *newpage, struct page *oldpage); #else /* !CONFIG_KSM */ @@ -120,8 +119,8 @@ static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) return 0; } -static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*, - struct vm_area_struct *, unsigned long, void *), void *arg) +static inline int rmap_walk_ksm(struct page *page, + struct rmap_walk_control *rwc) { return 0; } diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 6dacb93a6d94..6a456ce6de20 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -235,11 +235,16 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page); void page_unlock_anon_vma_read(struct anon_vma *anon_vma); int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); +struct rmap_walk_control { + void *arg; + int (*rmap_one)(struct page *page, struct vm_area_struct *vma, + unsigned long addr, void *arg); +}; + /* * Called by migrate.c to remove migration ptes, but might be used more later. */ -int rmap_walk(struct page *page, int (*rmap_one)(struct page *, - struct vm_area_struct *, unsigned long, void *), void *arg); +int rmap_walk(struct page *page, struct rmap_walk_control *rwc); #else /* !CONFIG_MMU */ @@ -1997,8 +1997,7 @@ out: } #ifdef CONFIG_MIGRATION -int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, - struct vm_area_struct *, unsigned long, void *), void *arg) +int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) { struct stable_node *stable_node; struct rmap_item *rmap_item; @@ -2033,7 +2032,8 @@ again: if ((rmap_item->mm == vma->vm_mm) == search_new_forks) continue; - ret = rmap_one(page, vma, rmap_item->address, arg); + ret = rwc->rmap_one(page, vma, + rmap_item->address, rwc->arg); if (ret != SWAP_AGAIN) { anon_vma_unlock_read(anon_vma); goto out; diff --git a/mm/migrate.c b/mm/migrate.c index 9194375b2307..11d89dc0574c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -199,7 +199,12 @@ out: */ static void remove_migration_ptes(struct page *old, struct page *new) { - rmap_walk(new, remove_migration_pte, old); + struct rmap_walk_control rwc = { + .rmap_one = remove_migration_pte, + .arg = old, + }; + + rmap_walk(new, &rwc); } /* diff --git a/mm/rmap.c b/mm/rmap.c index 5a79bf585e27..f8f10ad5d359 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1706,8 +1706,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page) * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): * Called by migrate.c to remove migration ptes, but might be used more later. */ -static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, - struct vm_area_struct *, unsigned long, void *), void *arg) +static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) { struct anon_vma *anon_vma; pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); @@ -1721,7 +1720,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { struct vm_area_struct *vma = avc->vma; unsigned long address = vma_address(page, vma); - ret = rmap_one(page, vma, address, arg); + ret = rwc->rmap_one(page, vma, address, rwc->arg); if (ret != SWAP_AGAIN) break; } @@ -1729,8 +1728,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, return ret; } -static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, - struct vm_area_struct *, unsigned long, void *), void *arg) +static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) { struct address_space *mapping = page->mapping; pgoff_t pgoff = page->index << compound_order(page); @@ -1742,7 +1740,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, mutex_lock(&mapping->i_mmap_mutex); vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { unsigned long address = vma_address(page, vma); - ret = rmap_one(page, vma, address, arg); + ret = rwc->rmap_one(page, vma, address, rwc->arg); if (ret != SWAP_AGAIN) break; } @@ -1755,17 +1753,16 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, return ret; } -int rmap_walk(struct page *page, int (*rmap_one)(struct page *, - struct vm_area_struct *, unsigned long, void *), void *arg) +int rmap_walk(struct page *page, struct rmap_walk_control *rwc) { VM_BUG_ON(!PageLocked(page)); if (unlikely(PageKsm(page))) - return rmap_walk_ksm(page, rmap_one, arg); + return rmap_walk_ksm(page, rwc); else if (PageAnon(page)) - return rmap_walk_anon(page, rmap_one, arg); + return rmap_walk_anon(page, rwc); else - return rmap_walk_file(page, rmap_one, arg); + return rmap_walk_file(page, rwc); } #endif /* CONFIG_MIGRATION */ |