diff options
author | Ingo Molnar <mingo@kernel.org> | 2017-10-23 13:30:47 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-10-23 13:30:47 +0200 |
commit | f95b23a112f1a31ea042483540cd907b58d23a5f (patch) | |
tree | 3d41d2f8cf1d66ed593deb12f25a30d2905eb077 /mm/z3fold.c | |
parent | da20ab35180780e4a6eadc804544f1fa967f3567 (diff) | |
parent | 58c3862b521ead4f69a24ef009a679cb3c519620 (diff) | |
download | linux-f95b23a112f1a31ea042483540cd907b58d23a5f.tar.gz linux-f95b23a112f1a31ea042483540cd907b58d23a5f.tar.xz |
Merge branch 'x86/urgent' into x86/asm, to pick up dependent fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/z3fold.c')
-rw-r--r-- | mm/z3fold.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c index 486550df32be..b2ba2ba585f3 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -250,6 +250,7 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) WARN_ON(!list_empty(&zhdr->buddy)); set_bit(PAGE_STALE, &page->private); + clear_bit(NEEDS_COMPACTING, &page->private); spin_lock(&pool->lock); if (!list_empty(&page->lru)) list_del(&page->lru); @@ -303,7 +304,6 @@ static void free_pages_work(struct work_struct *w) list_del(&zhdr->buddy); if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) continue; - clear_bit(NEEDS_COMPACTING, &page->private); spin_unlock(&pool->stale_lock); cancel_work_sync(&zhdr->work); free_z3fold_page(page); @@ -624,10 +624,8 @@ lookup: * stale pages list. cancel_work_sync() can sleep so we must make * sure it won't be called in case we're in atomic context. */ - if (zhdr && (can_sleep || !work_pending(&zhdr->work) || - !unlikely(work_busy(&zhdr->work)))) { + if (zhdr && (can_sleep || !work_pending(&zhdr->work))) { list_del(&zhdr->buddy); - clear_bit(NEEDS_COMPACTING, &page->private); spin_unlock(&pool->stale_lock); if (can_sleep) cancel_work_sync(&zhdr->work); @@ -875,16 +873,18 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) goto next; } next: + spin_lock(&pool->lock); if (test_bit(PAGE_HEADLESS, &page->private)) { if (ret == 0) { + spin_unlock(&pool->lock); free_z3fold_page(page); return 0; } } else if (kref_put(&zhdr->refcount, release_z3fold_page)) { atomic64_dec(&pool->pages_nr); + spin_unlock(&pool->lock); return 0; } - spin_lock(&pool->lock); /* * Add to the beginning of LRU. |