diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-30 12:46:17 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-30 12:46:17 -0700 |
commit | 3b03117c5cfbb04175b688c79ea4155b8ef812d3 (patch) | |
tree | 5d2e3d0d7db98e2b47c7bb51be39bc4ced6a4d97 | |
parent | fa7eadab4b4aec0139d2429e6f8d13375ff8a658 (diff) | |
parent | 0f1f694260e0d35b5ce7d471f6e679c3dd4d7d94 (diff) | |
download | linux-3b03117c5cfbb04175b688c79ea4155b8ef812d3.tar.gz linux-3b03117c5cfbb04175b688c79ea4155b8ef812d3.tar.xz |
Merge branch 'slub/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'slub/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
SLUB: Allow full duplication of kmalloc array for 390
slub: move kmem_cache_node into it's own cacheline
-rw-r--r-- | include/linux/slub_def.h | 11 | ||||
-rw-r--r-- | mm/slub.c | 33 |
2 files changed, 15 insertions, 29 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 55695c8d2f8a..4ba59cfc1f75 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -75,12 +75,6 @@ struct kmem_cache { int offset; /* Free pointer offset. */ struct kmem_cache_order_objects oo; - /* - * Avoid an extra cache line for UP, SMP and for the node local to - * struct kmem_cache. - */ - struct kmem_cache_node local_node; - /* Allocation and freeing of slabs */ struct kmem_cache_order_objects max; struct kmem_cache_order_objects min; @@ -102,6 +96,9 @@ struct kmem_cache { */ int remote_node_defrag_ratio; struct kmem_cache_node *node[MAX_NUMNODES]; +#else + /* Avoid an extra cache line for UP */ + struct kmem_cache_node local_node; #endif }; @@ -140,7 +137,7 @@ struct kmem_cache { #ifdef CONFIG_ZONE_DMA #define SLUB_DMA __GFP_DMA /* Reserve extra caches for potential DMA use */ -#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6) +#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT) #else /* Disable DMA functionality */ #define SLUB_DMA (__force gfp_t)0 diff --git a/mm/slub.c b/mm/slub.c index 26f0cb9cc584..578f68f3c51f 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2137,7 +2137,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = s->node[node]; - if (n && n != &s->local_node) + if (n) kmem_cache_free(kmalloc_caches, n); s->node[node] = NULL; } @@ -2146,33 +2146,22 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) { int node; - int local_node; - - if (slab_state >= UP && (s < kmalloc_caches || - s >= kmalloc_caches + KMALLOC_CACHES)) - local_node = page_to_nid(virt_to_page(s)); - else - local_node = 0; for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n; - if (local_node == node) - n = &s->local_node; - else { - if (slab_state == DOWN) { - early_kmem_cache_node_alloc(gfpflags, node); - continue; - } - n = kmem_cache_alloc_node(kmalloc_caches, - gfpflags, node); - - if (!n) { - free_kmem_cache_nodes(s); - return 0; - } + if (slab_state == DOWN) { + early_kmem_cache_node_alloc(gfpflags, node); + continue; + } + n = kmem_cache_alloc_node(kmalloc_caches, + gfpflags, node); + if (!n) { + free_kmem_cache_nodes(s); + return 0; } + s->node[node] = n; init_kmem_cache_node(n, s); } |