[llvmlinux] [PATCH 2/2] Replace WIP slab allocator patch with an upstreamable version.

Daniel Sanders daniel.sanders at imgtec.com
Fri Jan 9 14:20:22 UTC 2015


This patch should work for all arches but I've only tested Mips so far.
---

I haven't submitted this one upstream yet since I'd like to do one patch
successfully before attempting to do multiple at once.

 ...e_index-table-before-replacing-the-bootst.patch | 140 +++++++++++++++++++++
 arch/all/patches/series                            |   1 +
 .../mips-WIP-Fix-slab-allocator-bootstrap.patch    | 102 ---------------
 3 files changed, 141 insertions(+), 102 deletions(-)
 create mode 100644 arch/all/patches/correct-size_index-table-before-replacing-the-bootst.patch
 delete mode 100644 arch/mips/patches/mips-WIP-Fix-slab-allocator-bootstrap.patch

diff --git a/arch/all/patches/correct-size_index-table-before-replacing-the-bootst.patch b/arch/all/patches/correct-size_index-table-before-replacing-the-bootst.patch
new file mode 100644
index 0000000..fe2e904
--- /dev/null
+++ b/arch/all/patches/correct-size_index-table-before-replacing-the-bootst.patch
@@ -0,0 +1,140 @@
+From ef285703935fd863463f64eae490e8f1d880e50c Mon Sep 17 00:00:00 2001
+From: Daniel Sanders <daniel.sanders at imgtec.com>
+Date: Thu, 8 Jan 2015 14:38:02 +0000
+Subject: [PATCH] Correct size_index table before replacing the bootstrap
+ kmem_cache_node
+
+There are currently two ways to generate indices into kmalloc_caches
+(via kmalloc_index() and via the size_index table in slab_common.c) and on some
+arches (possibly only MIPS) they disagree with each other until
+create_kmalloc_caches() has been called. This patch moves the initialization
+of the size_index array slightly earlier so that the first few kmem_cache_node's
+can be safely allocated.
+
+The failing sequence was:
+* kmalloc_caches contains NULL elements
+* kmem_cache_init initialises the element that 'struct kmem_cache_node' will be
+  allocated to. For 32-bit Mips, this is a 56-byte struct and kmalloc_index
+  returns KMALLOC_SHIFT_LOW (7).
+* init_list is called which calls kmalloc_node to allocate a 'struct
+  kmem_cache_node'.
+* kmalloc_slab selects the kmem_caches element using
+  size_index[size_index_elem(size)]. For MIPS, size is 56, and the expression
+  returns 6.
+* This element of kmalloc_caches is NULL and allocation fails.
+* If it had not already failed, it would have called create_kmalloc_caches()
+  at this point which would have changed size_index[size_index_elem(size)] to 7.
+
+Signed-off-by: Daniel Sanders <daniel.sanders at imgtec.com>
+---
+
+This is needed to fix the boot when compiled with clang.  Interestingly, GCC
+does not normally encounter this bug. I believe this is because it manages to
+optimise the problematic allocation away. This theory is supported by GCC
+encountering this bug when I disable inlining by changing the definitions of
+inline, __inline, __inline__, and __always_inline in
+include/linux/compiler-gcc.h.
+
+ mm/slab.c        |  1 +
+ mm/slab.h        |  1 +
+ mm/slab_common.c | 37 +++++++++++++++++++++----------------
+ mm/slub.c        |  1 +
+ 4 files changed, 24 insertions(+), 16 deletions(-)
+
+diff --git a/mm/slab.c b/mm/slab.c
+index 65b5dcb..6c93f28 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -1440,6 +1440,7 @@ void __init kmem_cache_init(void)
+ 	kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
+ 				kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
+ 	slab_state = PARTIAL_NODE;
++	correct_kmalloc_cache_index_table();
+ 
+ 	slab_early_init = 0;
+ 
+diff --git a/mm/slab.h b/mm/slab.h
+index 1cf40054..036c08d 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -71,6 +71,7 @@ unsigned long calculate_alignment(unsigned long flags,
+ 
+ #ifndef CONFIG_SLOB
+ /* Kmalloc array related functions */
++void correct_kmalloc_cache_index_table(void);
+ void create_kmalloc_caches(unsigned long);
+ 
+ /* Find the kmalloc slab corresponding for a certain size */
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index e03dd6f..a4ac0d7 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -675,25 +675,19 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
+ }
+ 
+ /*
+- * Create the kmalloc array. Some of the regular kmalloc arrays
+- * may already have been created because they were needed to
+- * enable allocations for slab creation.
++ * Patch up the size_index table if we have strange large alignment
++ * requirements for the kmalloc array. This is only the case for
++ * MIPS it seems. The standard arches will not generate any code here.
++ *
++ * Largest permitted alignment is 256 bytes due to the way we
++ * handle the index determination for the smaller caches.
++ *
++ * Make sure that nothing crazy happens if someone starts tinkering
++ * around with ARCH_KMALLOC_MINALIGN
+  */
+-void __init create_kmalloc_caches(unsigned long flags)
+-{
++void __init correct_kmalloc_cache_index_table(void) {
+ 	int i;
+ 
+-	/*
+-	 * Patch up the size_index table if we have strange large alignment
+-	 * requirements for the kmalloc array. This is only the case for
+-	 * MIPS it seems. The standard arches will not generate any code here.
+-	 *
+-	 * Largest permitted alignment is 256 bytes due to the way we
+-	 * handle the index determination for the smaller caches.
+-	 *
+-	 * Make sure that nothing crazy happens if someone starts tinkering
+-	 * around with ARCH_KMALLOC_MINALIGN
+-	 */
+ 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
+ 		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
+ 
+@@ -724,6 +718,17 @@ void __init create_kmalloc_caches(unsigned long flags)
+ 		for (i = 128 + 8; i <= 192; i += 8)
+ 			size_index[size_index_elem(i)] = 8;
+ 	}
++}
++
++/*
++ * Create the kmalloc array. Some of the regular kmalloc arrays
++ * may already have been created because they were needed to
++ * enable allocations for slab creation.
++ */
++void __init create_kmalloc_caches(unsigned long flags)
++{
++	int i;
++
+ 	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
+ 		if (!kmalloc_caches[i]) {
+ 			kmalloc_caches[i] = create_kmalloc_cache(NULL,
+diff --git a/mm/slub.c b/mm/slub.c
+index fe376fe..2217761 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -3604,6 +3604,7 @@ void __init kmem_cache_init(void)
+ 	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
+ 
+ 	/* Now we can use the kmem_cache to allocate kmalloc slabs */
++	correct_kmalloc_cache_index_table();
+ 	create_kmalloc_caches(0);
+ 
+ #ifdef CONFIG_SMP
+-- 
+2.1.3
+
diff --git a/arch/all/patches/series b/arch/all/patches/series
index b37fbe0..7f0e3b9 100644
--- a/arch/all/patches/series
+++ b/arch/all/patches/series
@@ -25,3 +25,4 @@ vlais-wimax-i2400m.patch
 compiler-gcc.patch
 smaller.patch
 lib-mpi-extern-inline.patch
+correct-size_index-table-before-replacing-the-bootst.patch
diff --git a/arch/mips/patches/mips-WIP-Fix-slab-allocator-bootstrap.patch b/arch/mips/patches/mips-WIP-Fix-slab-allocator-bootstrap.patch
deleted file mode 100644
index 8805e98..0000000
--- a/arch/mips/patches/mips-WIP-Fix-slab-allocator-bootstrap.patch
+++ /dev/null
@@ -1,102 +0,0 @@
-From 875ec57dd7258115829918ed10980e1ad6a975c4 Mon Sep 17 00:00:00 2001
-From: Daniel Sanders <daniel.sanders at imgtec.com>
-Date: Fri, 19 Dec 2014 13:48:06 +0000
-Subject: [PATCH 1/2] [WIP] Fix slab allocator bootstrap.
-
-There are currently two functions that generate indices into kmalloc_caches and
-they disagree with each other.  This patch fixes the bug to the point that the
-LLVM-compiled kernel successfully boots for Mips but it will need further work
-before upstreaming.
-
-The failing sequence is:
-* kmalloc_caches contains NULL elements
-* kmem_cache_init initialises the element that 'struct kmem_cache_node' will be
-  allocated to. For 32-bit Mips, this is a 56-byte struct and kmalloc_index
-  selects element 7.
-* init_list is called which calls kmalloc_node to allocate a 'struct
-  kmem_cache_node'.
-* kmalloc_slab selects the kmem_caches element using
-  size_index[size_index_elem(size)]. For Mips, size is 56, and the expression
-  returns 6.
-* This element of kmalloc_caches is NULL and allocation fails.
-
-GCC does not normally encounter this bug. I believe this is because it manages
-to optimise the problematic allocation away. This theory is supported by GCC
-encountering this bug when I disable inlining by changing the definitions of
-inline, __inline, __inline__, and __always_inline in
-include/linux/compiler-gcc.h.
-
-Signed-off-by: Daniel Sanders <daniel.sanders at imgtec.com>
----
- .../all/patches/fix-slab-allocator-bootstrap.patch | 49 ++++++++++++++++++++++
- arch/all/patches/series                            |  1 +
- 2 files changed, 50 insertions(+)
- create mode 100644 arch/all/patches/fix-slab-allocator-bootstrap.patch
-
-diff --git a/arch/all/patches/fix-slab-allocator-bootstrap.patch b/arch/all/patches/fix-slab-allocator-bootstrap.patch
-new file mode 100644
-index 0000000..00d2abe
---- /dev/null
-+++ b/arch/all/patches/fix-slab-allocator-bootstrap.patch
-@@ -0,0 +1,49 @@
-+From e22638bbb262796ecb717e7cbc795aaad954a4ce Mon Sep 17 00:00:00 2001
-+From: Daniel Sanders <daniel.sanders at imgtec.com>
-+Date: Fri, 19 Dec 2014 13:16:19 +0000
-+Subject: [PATCH 1/2] [WIP] Fix slab allocator bootstrap.
-+
-+There are currently two functions that generate indices into kmalloc_caches and
-+they disagree with each other.  This patch fixes the bug to the point that the
-+LLVM-compiled kernel successfully boots for Mips but it will need further work
-+before upstreaming.
-+
-+The failing sequence is:
-+* kmalloc_caches contains NULL elements
-+* kmem_cache_init initialises the element that 'struct kmem_cache_node' will be
-+  allocated to. For 32-bit Mips, this is a 56-byte struct and kmalloc_index
-+  selects element 7.
-+* init_list is called which calls kmalloc_node to allocate a 'struct
-+  kmem_cache_node'.
-+* kmalloc_slab selects the kmem_caches element using
-+  size_index[size_index_elem(size)]. For Mips, size is 56, and the expression
-+  returns 6.
-+* This element of kmalloc_caches is NULL and allocation fails.
-+
-+GCC does not normally encounter this bug. I believe this is because it manages
-+to optimise the problematic allocation away. This theory is supported by GCC
-+encountering this bug when I disable inlining by changing the definitions of
-+inline, __inline, __inline__, and __always_inline in
-+include/linux/compiler-gcc.h.
-+
-+Signed-off-by: Daniel Sanders <daniel.sanders at imgtec.com>
-+---
-+ mm/slab_common.c | 2 +-
-+ 1 file changed, 1 insertion(+), 1 deletion(-)
-+
-+diff --git a/mm/slab_common.c b/mm/slab_common.c
-+index e03dd6f..459433f 100644
-+--- a/mm/slab_common.c
-++++ b/mm/slab_common.c
-+@@ -620,7 +620,7 @@ static s8 size_index[24] = {
-+ 	5,	/* 32 */
-+ 	6,	/* 40 */
-+ 	6,	/* 48 */
-+-	6,	/* 56 */
-++	7,	/* 56 */
-+ 	6,	/* 64 */
-+ 	1,	/* 72 */
-+ 	1,	/* 80 */
-+-- 
-+2.1.3
-+
-diff --git a/arch/all/patches/series b/arch/all/patches/series
-index b37fbe0..523c83d 100644
---- a/arch/all/patches/series
-+++ b/arch/all/patches/series
-@@ -25,3 +25,4 @@ vlais-wimax-i2400m.patch
- compiler-gcc.patch
- smaller.patch
- lib-mpi-extern-inline.patch
-+fix-slab-allocator-bootstrap.patch
--- 
-2.1.3
-
-- 
2.1.4



More information about the LLVMLinux mailing list