diff mbox

[ping] Use single shared memory block pool for all pool allocators

Message ID 55F8BF80.8010308@gmail.com
State New
Headers show

Commit Message

Mikhail Maltsev Sept. 16, 2015, 1:01 a.m. UTC
On 08/31/2015 02:44 PM, Richard Biener wrote:
> Apart from Richards comments:
> 
> +/* Return UNCAST_BLOCK to pool.  */
> +inline void
> +memory_block_pool::remove (void *uncast_block)
> +{
> +  block_list *block = reinterpret_cast<block_list *> (uncast_block);
> +  block->m_next = instance.m_blocks;
> +  instance.m_blocks = block;
> +}
> 
> you need to use placement new
> 
>     new (uncast_block) block_list;
> 
> instead of the reinterpret_cast to avoid type-based alias issues (as you
> are also inlining this function.
Fixed.

> 
> Now some bikeshedding...
> 
> +  static inline void *allocate () ATTRIBUTE_MALLOC;
> +  static inline void remove (void *);
> 
> why is it called 'remove' and not 'release' or 'free'? (ah, release is
> already taken)
OK, let's name it 'release' and rename 'release' into 'clear_free_list'.
Originally I used these names for consistency with corresponding methods of
object_pool and pool_allocator.

> 
> Also why virtual-memory.{h,cc} and not memory-block.{h,cc}?
I planned to add code for allocating memory directly from the OS (i.e. write
wrappers of mmap/VirtualAlloc) to these files. Of course right now memory-block
makes much more sense.

> 
> I think the patch is ok with the above correctness fix and whatever
> choice you take
> for the bikeshedding.  Also fixing Richards review comments, of course.
Fixed.

Bootstrapped and regtested on x86_64-linux and built config-list.mk. Applied to
trunk (r227817).
diff mbox

Patch

diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index b495bd2..254837e 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -1513,7 +1513,7 @@  OBJS = \
 # Objects in libcommon.a, potentially used by all host binaries and with
 # no target dependencies.
 OBJS-libcommon = diagnostic.o diagnostic-color.o pretty-print.o intl.o \
-	vec.o input.o version.o hash-table.o ggc-none.o
+	vec.o input.o version.o hash-table.o ggc-none.o memory-block.o
 
 # Objects in libcommon-target.a, used by drivers and by the core
 # compiler and containing target-dependent code.
diff --git a/gcc/alloc-pool.h b/gcc/alloc-pool.h
index 03bde63..70105ba 100644
--- a/gcc/alloc-pool.h
+++ b/gcc/alloc-pool.h
@@ -20,6 +20,7 @@  along with GCC; see the file COPYING3.  If not see
 #ifndef ALLOC_POOL_H
 #define ALLOC_POOL_H
 
+#include "memory-block.h"
 
 extern void dump_alloc_pool_statistics (void);
 
@@ -95,18 +96,53 @@  struct pool_usage: public mem_usage
 
 extern mem_alloc_description<pool_usage> pool_allocator_usage;
 
+#if 0
+/* If a pool with custom block size is needed, one might use the following
+   template.  An instance of this template can be used as a parameter for
+   instantiating base_pool_allocator template:
+
+	typedef custom_block_allocator <128*1024> huge_block_allocator;
+	...
+	static base_pool_allocator <huge_block_allocator>
+						value_pool ("value", 16384);
+
+   Right now it's not used anywhere in the code, and is given here as an
+   example).  */
+
+template <size_t BlockSize>
+class custom_block_allocator
+{
+public:
+  static const size_t block_size = BlockSize;
+
+  static inline void *
+  allocate () ATTRIBUTE_MALLOC
+  {
+    return XNEWVEC (char, BlockSize);
+  }
+
+  static inline void
+  release (void *block)
+  {
+    XDELETEVEC (block);
+  }
+};
+#endif
+
 /* Generic pool allocator.  */
-class pool_allocator
+
+template <typename TBlockAllocator>
+class base_pool_allocator
 {
 public:
-  /* Default constructor for pool allocator called NAME.  Each block
-     has NUM elements.  */
-  pool_allocator (const char *name, size_t num, size_t size CXX_MEM_STAT_INFO);
-  ~pool_allocator ();
+  /* Default constructor for pool allocator called NAME.  */
+  base_pool_allocator (const char *name, size_t size CXX_MEM_STAT_INFO);
+  ~base_pool_allocator ();
   void release ();
   void release_if_empty ();
   void *allocate () ATTRIBUTE_MALLOC;
   void remove (void *object);
+  size_t num_elts_current ();
 
 private:
   struct allocation_pool_list
@@ -151,7 +187,7 @@  private:
   };
 
   /* Align X to 8.  */
-  size_t
+  static inline size_t
   align_eight (size_t x)
   {
     return (((x+7) >> 3) << 3);
@@ -180,8 +216,6 @@  private:
   size_t m_blocks_allocated;
   /* List of blocks that are used to allocate new objects.  */
   allocation_pool_list *m_block_list;
-  /* The number of elements in a block.  */
-  size_t m_block_size;
   /* Size of a pool elements in bytes.  */
   size_t m_elt_size;
   /* Size in bytes that should be allocated for each element.  */
@@ -192,24 +226,24 @@  private:
   mem_location m_location;
 };
 
+template <typename TBlockAllocator>
 inline
-pool_allocator::pool_allocator (const char *name, size_t num,
-				size_t size MEM_STAT_DECL):
-  m_name (name), m_id (0), m_elts_per_block (num), m_returned_free_list (NULL),
+base_pool_allocator <TBlockAllocator>::base_pool_allocator (
+				const char *name, size_t size MEM_STAT_DECL):
+  m_name (name), m_id (0), m_elts_per_block (0), m_returned_free_list (NULL),
   m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0),
-  m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL),
-  m_block_size (0), m_size (size), m_initialized (false),
-  m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {}
+  m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL), m_size (size),
+  m_initialized (false), m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {}
 
 /* Initialize a pool allocator.  */
 
+template <typename TBlockAllocator>
 inline void
-pool_allocator::initialize ()
+base_pool_allocator <TBlockAllocator>::initialize ()
 {
   gcc_checking_assert (!m_initialized);
   m_initialized = true;
 
-  size_t header_size;
   size_t size = m_size;
 
   gcc_checking_assert (m_name);
@@ -218,15 +252,12 @@  pool_allocator::initialize ()
   if (size < sizeof (allocation_pool_list*))
     size = sizeof (allocation_pool_list*);
 
-  /* Now align the size to a multiple of 4.  */
+  /* Now align the size to a multiple of 8.  */
   size = align_eight (size);
 
   /* Add the aligned size of ID.  */
   size += offsetof (allocation_object, u.data);
 
-  /* Um, we can't really allocate 0 elements per block.  */
-  gcc_checking_assert (m_elts_per_block);
-
   m_elt_size = size;
 
   if (GATHER_STATISTICS)
@@ -239,9 +270,10 @@  pool_allocator::initialize ()
     }
 
   /* List header size should be a multiple of 8.  */
-  header_size = align_eight (sizeof (allocation_pool_list));
+  size_t header_size = align_eight (sizeof (allocation_pool_list));
 
-  m_block_size = (size * m_elts_per_block) + header_size;
+  m_elts_per_block = (TBlockAllocator::block_size - header_size) / size;
+  gcc_checking_assert (m_elts_per_block != 0);
 
 #ifdef ENABLE_CHECKING
   /* Increase the last used ID and use it for this pool.
@@ -255,8 +287,9 @@  pool_allocator::initialize ()
 }
 
 /* Free all memory allocated for the given memory pool.  */
+template <typename TBlockAllocator>
 inline void
-pool_allocator::release ()
+base_pool_allocator <TBlockAllocator>::release ()
 {
   if (!m_initialized)
     return;
@@ -267,7 +300,7 @@  pool_allocator::release ()
   for (block = m_block_list; block != NULL; block = next_block)
     {
       next_block = block->next;
-      free (block);
+      TBlockAllocator::release (block);
     }
 
   if (GATHER_STATISTICS)
@@ -285,21 +318,24 @@  pool_allocator::release ()
   m_block_list = NULL;
 }
 
-void
-inline pool_allocator::release_if_empty ()
+template <typename TBlockAllocator>
+inline void
+base_pool_allocator <TBlockAllocator>::release_if_empty ()
 {
   if (m_elts_free == m_elts_allocated)
     release ();
 }
 
-inline pool_allocator::~pool_allocator ()
+template <typename TBlockAllocator>
+inline base_pool_allocator <TBlockAllocator>::~base_pool_allocator ()
 {
   release ();
 }
 
 /* Allocates one element from the pool specified.  */
+template <typename TBlockAllocator>
 inline void*
-pool_allocator::allocate ()
+base_pool_allocator <TBlockAllocator>::allocate ()
 {
   if (!m_initialized)
     initialize ();
@@ -327,7 +363,7 @@  pool_allocator::allocate ()
 	  allocation_pool_list *block_header;
 
 	  /* Make the block.  */
-	  block = XNEWVEC (char, m_block_size);
+	  block = reinterpret_cast<char *> (TBlockAllocator::allocate ());
 	  block_header = (allocation_pool_list*) block;
 	  block += align_eight (sizeof (allocation_pool_list));
 
@@ -378,8 +414,9 @@  pool_allocator::allocate ()
 }
 
 /* Puts PTR back on POOL's free list.  */
+template <typename TBlockAllocator>
 inline void
-pool_allocator::remove (void *object)
+base_pool_allocator <TBlockAllocator>::remove (void *object)
 {
   gcc_checking_assert (m_initialized);
 
@@ -412,15 +449,28 @@  pool_allocator::remove (void *object)
     }
 }
 
+/* Number of elements currently active (not returned to pool).  Used for cheap
+   consistency checks.  */
+template <typename TBlockAllocator>
+inline size_t
+base_pool_allocator <TBlockAllocator>::num_elts_current ()
+{
+  return m_elts_allocated - m_elts_free;
+}
+
+/* Specialization of base_pool_allocator which should be used in most cases.
+   Another specialization may be needed, if object size is greater than
+   memory_block_pool::block_size (64 KB).  */
+typedef base_pool_allocator <memory_block_pool> pool_allocator;
+
 /* Type based memory pool allocator.  */
 template <typename T>
 class object_allocator
 {
 public:
-  /* Default constructor for pool allocator called NAME.  Each block
-     has NUM elements.  */
-  object_allocator (const char *name, size_t num CXX_MEM_STAT_INFO):
-    m_allocator (name, num, sizeof (T) PASS_MEM_STAT) {}
+  /* Default constructor for pool allocator called NAME.  */
+  object_allocator (const char *name CXX_MEM_STAT_INFO):
+    m_allocator (name, sizeof (T) PASS_MEM_STAT) {}
 
   inline void
   release ()
@@ -448,6 +498,12 @@  public:
     m_allocator.remove (object);
   }
 
+  inline size_t
+  num_elts_current ()
+  {
+    return m_allocator.num_elts_current ();
+  }
+
 private:
   pool_allocator m_allocator;
 };
diff --git a/gcc/asan.c b/gcc/asan.c
index 4f5adaa..7c243cd 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -350,7 +350,7 @@  struct asan_mem_ref
   HOST_WIDE_INT access_size;
 };
 
-object_allocator <asan_mem_ref> asan_mem_ref_pool ("asan_mem_ref", 10);
+object_allocator <asan_mem_ref> asan_mem_ref_pool ("asan_mem_ref");
 
 /* Initializes an instance of asan_mem_ref.  */
 
diff --git a/gcc/c-family/c-format.c b/gcc/c-family/c-format.c
index ab58076..d358098 100644
--- a/gcc/c-family/c-format.c
+++ b/gcc/c-family/c-format.c
@@ -1687,8 +1687,7 @@  check_format_arg (void *ctx, tree format_tree,
      will decrement it if it finds there are extra arguments, but this way
      need not adjust it for every return.  */
   res->number_other++;
-  object_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool",
-						  10);
+  object_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool");
   check_format_info_main (res, info, format_chars, format_length,
 			  params, arg_num, fwt_pool);
 }
diff --git a/gcc/cfg.c b/gcc/cfg.c
index c998492..2bc7857 100644
--- a/gcc/cfg.c
+++ b/gcc/cfg.c
@@ -1052,7 +1052,7 @@  void
 initialize_original_copy_tables (void)
 {
   original_copy_bb_pool = new object_allocator<htab_bb_copy_original_entry>
-    ("original_copy", 10);
+    ("original_copy");
   bb_original = new hash_table<bb_copy_hasher> (10);
   bb_copy = new hash_table<bb_copy_hasher> (10);
   loop_copy = new hash_table<bb_copy_hasher> (10);
diff --git a/gcc/coretypes.h b/gcc/coretypes.h
index 17e2b40..41bb58e 100644
--- a/gcc/coretypes.h
+++ b/gcc/coretypes.h
@@ -225,9 +225,16 @@  struct basic_block_def;
 typedef struct basic_block_def *basic_block;
 typedef const struct basic_block_def *const_basic_block;
 
-#define obstack_chunk_alloc	xmalloc
-#define obstack_chunk_free	free
-#define OBSTACK_CHUNK_SIZE	0
+#if !defined (GENERATOR_FILE)
+# define OBSTACK_CHUNK_SIZE     memory_block_pool::block_size
+# define obstack_chunk_alloc    mempool_obstack_chunk_alloc
+# define obstack_chunk_free     mempool_obstack_chunk_free
+#else
+# define OBSTACK_CHUNK_SIZE     0
+# define obstack_chunk_alloc    xmalloc
+# define obstack_chunk_free     free
+#endif
+
 #define gcc_obstack_init(OBSTACK)				\
   obstack_specify_allocation ((OBSTACK), OBSTACK_CHUNK_SIZE, 0,	\
 			      obstack_chunk_alloc,		\
@@ -328,6 +335,7 @@  typedef unsigned char uchar;
 #include "hash-set.h"
 #include "input.h"
 #include "is-a.h"
+#include "memory-block.h"
 #endif /* GENERATOR_FILE && !USED_FOR_TARGET */
 
 #endif /* coretypes.h */
diff --git a/gcc/cselib.c b/gcc/cselib.c
index 2149959..4264394 100644
--- a/gcc/cselib.c
+++ b/gcc/cselib.c
@@ -246,11 +246,11 @@  static unsigned int cfa_base_preserved_regno = INVALID_REGNUM;
    each time memory is invalidated.  */
 static cselib_val *first_containing_mem = &dummy_val;
 
-static object_allocator<elt_list> elt_list_pool ("elt_list", 10);
-static object_allocator<elt_loc_list> elt_loc_list_pool ("elt_loc_list", 10);
-static object_allocator<cselib_val> cselib_val_pool ("cselib_val_list", 10);
+static object_allocator<elt_list> elt_list_pool ("elt_list");
+static object_allocator<elt_loc_list> elt_loc_list_pool ("elt_loc_list");
+static object_allocator<cselib_val> cselib_val_pool ("cselib_val_list");
 
-static pool_allocator value_pool ("value", 100, RTX_CODE_SIZE (VALUE));
+static pool_allocator value_pool ("value", RTX_CODE_SIZE (VALUE));
 
 /* If nonnull, cselib will call this function before freeing useless
    VALUEs.  A VALUE is deemed useless if its "locs" field is null.  */
diff --git a/gcc/df-problems.c b/gcc/df-problems.c
index d4b5d76..0ab533f 100644
--- a/gcc/df-problems.c
+++ b/gcc/df-problems.c
@@ -1997,8 +1997,7 @@  static void
 df_chain_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
 {
   df_chain_remove_problem ();
-  df_chain->block_pool = new object_allocator<df_link> ("df_chain_block pool",
-						      50);
+  df_chain->block_pool = new object_allocator<df_link> ("df_chain_block pool");
   df_chain->optional_p = true;
 }
 
diff --git a/gcc/df-scan.c b/gcc/df-scan.c
index 259c959..eea93df 100644
--- a/gcc/df-scan.c
+++ b/gcc/df-scan.c
@@ -133,8 +133,6 @@  static const unsigned int copy_all = copy_defs | copy_uses | copy_eq_uses
    it gets run.  It also has no need for the iterative solver.
 ----------------------------------------------------------------------------*/
 
-#define SCAN_PROBLEM_DATA_BLOCK_SIZE 512
-
 /* Problem data for the scanning dataflow function.  */
 struct df_scan_problem_data
 {
@@ -253,17 +251,17 @@  df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
   df_scan->computed = true;
 
   problem_data->ref_base_pool = new object_allocator<df_base_ref>
-    ("df_scan ref base", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+    ("df_scan ref base");
   problem_data->ref_artificial_pool = new object_allocator<df_artificial_ref>
-    ("df_scan ref artificial", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+    ("df_scan ref artificial");
   problem_data->ref_regular_pool = new object_allocator<df_regular_ref>
-    ("df_scan ref regular", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+    ("df_scan ref regular");
   problem_data->insn_pool = new object_allocator<df_insn_info>
-    ("df_scan insn", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+    ("df_scan insn");
   problem_data->reg_pool = new object_allocator<df_reg_info>
-    ("df_scan reg", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+    ("df_scan reg");
   problem_data->mw_reg_pool = new object_allocator<df_mw_hardreg>
-    ("df_scan mw_reg", SCAN_PROBLEM_DATA_BLOCK_SIZE / 16);
+    ("df_scan mw_reg");
 
   bitmap_obstack_initialize (&problem_data->reg_bitmaps);
   bitmap_obstack_initialize (&problem_data->insn_bitmaps);
diff --git a/gcc/dse.c b/gcc/dse.c
index 0634d9d..86d0589 100644
--- a/gcc/dse.c
+++ b/gcc/dse.c
@@ -307,11 +307,9 @@  lowpart_bitmask (int n)
   return mask >> (HOST_BITS_PER_WIDE_INT - n);
 }
 
-static object_allocator<store_info> cse_store_info_pool ("cse_store_info_pool",
-						       100);
+static object_allocator<store_info> cse_store_info_pool ("cse_store_info_pool");
 
-static object_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool",
-						       100);
+static object_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool");
 
 /* This structure holds information about a load.  These are only
    built for rtx bases.  */
@@ -336,8 +334,7 @@  struct read_info_type
 };
 typedef struct read_info_type *read_info_t;
 
-static object_allocator<read_info_type> read_info_type_pool
-  ("read_info_pool", 100);
+static object_allocator<read_info_type> read_info_type_pool ("read_info_pool");
 
 /* One of these records is created for each insn.  */
 
@@ -426,8 +423,7 @@  struct insn_info_type
 };
 typedef struct insn_info_type *insn_info_t;
 
-static object_allocator<insn_info_type> insn_info_type_pool
-  ("insn_info_pool", 100);
+static object_allocator<insn_info_type> insn_info_type_pool ("insn_info_pool");
 
 /* The linked list of stores that are under consideration in this
    basic block.  */
@@ -494,7 +490,7 @@  struct dse_bb_info_type
 typedef struct dse_bb_info_type *bb_info_t;
 
 static object_allocator<dse_bb_info_type> dse_bb_info_type_pool
-  ("bb_info_pool", 100);
+  ("bb_info_pool");
 
 /* Table to hold all bb_infos.  */
 static bb_info_t *bb_table;
@@ -564,8 +560,7 @@  struct group_info
   int offset_map_size_n, offset_map_size_p;
 };
 
-static object_allocator<group_info> group_info_pool
-  ("rtx_group_info_pool", 100);
+static object_allocator<group_info> group_info_pool ("rtx_group_info_pool");
 
 /* Index into the rtx_group_vec.  */
 static int rtx_group_next_id;
@@ -589,7 +584,7 @@  struct deferred_change
 };
 
 static object_allocator<deferred_change> deferred_change_pool
-  ("deferred_change_pool", 10);
+  ("deferred_change_pool");
 
 static deferred_change *deferred_change_list = NULL;
 
diff --git a/gcc/et-forest.c b/gcc/et-forest.c
index 1931285..4f919d4 100644
--- a/gcc/et-forest.c
+++ b/gcc/et-forest.c
@@ -54,8 +54,8 @@  struct et_occ
 				   depth.  */
 };
 
-static object_allocator<et_node> et_nodes ("et_nodes pool", 300);
-static object_allocator<et_occ> et_occurrences ("et_occ pool", 300);
+static object_allocator<et_node> et_nodes ("et_nodes pool");
+static object_allocator<et_occ> et_occurrences ("et_occ pool");
 
 /* Changes depth of OCC to D.  */
 
diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
index 8de7e56..69a181d 100644
--- a/gcc/ipa-cp.c
+++ b/gcc/ipa-cp.c
@@ -276,16 +276,16 @@  public:
 /* Allocation pools for values and their sources in ipa-cp.  */
 
 object_allocator<ipcp_value<tree> > ipcp_cst_values_pool
-  ("IPA-CP constant values", 32);
+  ("IPA-CP constant values");
 
 object_allocator<ipcp_value<ipa_polymorphic_call_context> >
-  ipcp_poly_ctx_values_pool ("IPA-CP polymorphic contexts", 32);
+  ipcp_poly_ctx_values_pool ("IPA-CP polymorphic contexts");
 
 object_allocator<ipcp_value_source<tree> > ipcp_sources_pool
-  ("IPA-CP value sources", 64);
+  ("IPA-CP value sources");
 
 object_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool
-  ("IPA_CP aggregate lattices", 32);
+  ("IPA_CP aggregate lattices");
 
 /* Maximal count found in program.  */
 
diff --git a/gcc/ipa-inline-analysis.c b/gcc/ipa-inline-analysis.c
index 3a8f0ec..4822329 100644
--- a/gcc/ipa-inline-analysis.c
+++ b/gcc/ipa-inline-analysis.c
@@ -143,7 +143,7 @@  vec<inline_edge_summary_t> inline_edge_summary_vec;
 vec<edge_growth_cache_entry> edge_growth_cache;
 
 /* Edge predicates goes here.  */
-static object_allocator<predicate> edge_predicate_pool ("edge predicates", 10);
+static object_allocator<predicate> edge_predicate_pool ("edge predicates");
 
 /* Return true predicate (tautology).
    We represent it by empty list of clauses.  */
diff --git a/gcc/ipa-profile.c b/gcc/ipa-profile.c
index 1b929c6..382897c 100644
--- a/gcc/ipa-profile.c
+++ b/gcc/ipa-profile.c
@@ -87,8 +87,7 @@  struct histogram_entry
    duplicate entries.  */
 
 vec<histogram_entry *> histogram;
-static object_allocator<histogram_entry> histogram_pool
-  ("IPA histogram", 10);
+static object_allocator<histogram_entry> histogram_pool ("IPA histogram");
 
 /* Hashtable support for storing SSA names hashed by their SSA_NAME_VAR.  */
 
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index c862cff..8e0f182 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -95,7 +95,7 @@  struct ipa_cst_ref_desc
 /* Allocation pool for reference descriptions.  */
 
 static object_allocator<ipa_cst_ref_desc> ipa_refdesc_pool
-  ("IPA-PROP ref descriptions", 32);
+  ("IPA-PROP ref descriptions");
 
 /* Return true if DECL_FUNCTION_SPECIFIC_OPTIMIZATION of the decl associated
    with NODE should prevent us from analyzing it for the purposes of IPA-CP.  */
diff --git a/gcc/ira-build.c b/gcc/ira-build.c
index 9f0d7db..f49591c 100644
--- a/gcc/ira-build.c
+++ b/gcc/ira-build.c
@@ -420,9 +420,9 @@  rebuild_regno_allocno_maps (void)
 
 
 /* Pools for allocnos, allocno live ranges and objects.  */
-static object_allocator<live_range> live_range_pool ("live ranges", 100);
-static object_allocator<ira_allocno> allocno_pool ("allocnos", 100);
-static object_allocator<ira_object> object_pool ("objects", 100);
+static object_allocator<live_range> live_range_pool ("live ranges");
+static object_allocator<ira_allocno> allocno_pool ("allocnos");
+static object_allocator<ira_object> object_pool ("objects");
 
 /* Vec containing references to all created allocnos.  It is a
    container of array allocnos.  */
@@ -1170,7 +1170,7 @@  finish_allocnos (void)
 
 
 /* Pools for allocno preferences.  */
-static object_allocator <ira_allocno_pref> pref_pool ("prefs", 100);
+static object_allocator <ira_allocno_pref> pref_pool ("prefs");
 
 /* Vec containing references to all created preferences.  It is a
    container of array ira_prefs.  */
@@ -1357,7 +1357,7 @@  finish_prefs (void)
 
 
 /* Pools for copies.  */
-static object_allocator<ira_allocno_copy> copy_pool ("copies", 100);
+static object_allocator<ira_allocno_copy> copy_pool ("copies");
 
 /* Vec containing references to all created copies.  It is a
    container of array ira_copies.  */
@@ -1630,8 +1630,7 @@  initiate_cost_vectors (void)
     {
       aclass = ira_allocno_classes[i];
       cost_vector_pool[aclass] = new pool_allocator
-	("cost vectors", 100,
-	 sizeof (int) * (ira_class_hard_regs_num[aclass]));
+	("cost vectors", sizeof (int) * (ira_class_hard_regs_num[aclass]));
     }
 }
 
diff --git a/gcc/ira-color.c b/gcc/ira-color.c
index 74d2c2e..e6533c6 100644
--- a/gcc/ira-color.c
+++ b/gcc/ira-color.c
@@ -1157,7 +1157,7 @@  setup_profitable_hard_regs (void)
 
 /* Pool for update cost records.  */
 static object_allocator<update_cost_record> update_cost_record_pool
-  ("update cost records", 100);
+  ("update cost records");
 
 /* Return new update cost record with given params.  */
 static struct update_cost_record *
diff --git a/gcc/lra-lives.c b/gcc/lra-lives.c
index 1da5204..253bc18 100644
--- a/gcc/lra-lives.c
+++ b/gcc/lra-lives.c
@@ -107,8 +107,7 @@  static sparseset unused_set, dead_set;
 static bitmap_head temp_bitmap;
 
 /* Pool for pseudo live ranges.	 */
-static object_allocator<lra_live_range> lra_live_range_pool
-  ("live ranges", 100);
+static object_allocator<lra_live_range> lra_live_range_pool ("live ranges");
 
 /* Free live range list LR.  */
 static void
diff --git a/gcc/lra.c b/gcc/lra.c
index a836cab..bdbfe51 100644
--- a/gcc/lra.c
+++ b/gcc/lra.c
@@ -533,7 +533,7 @@  lra_update_dups (lra_insn_recog_data_t id, signed char *nops)
    insns.  */
 
 /* Pools for insn reg info.  */
-object_allocator<lra_insn_reg> lra_insn_reg_pool ("insn regs", 100);
+object_allocator<lra_insn_reg> lra_insn_reg_pool ("insn regs");
 
 /* Create LRA insn related info about a reference to REGNO in INSN with
    TYPE (in/out/inout), biggest reference mode MODE, flag that it is
@@ -744,7 +744,7 @@  free_insn_recog_data (lra_insn_recog_data_t data)
 }
 
 /* Pools for copies.  */
-static object_allocator<lra_copy> lra_copy_pool ("lra copies", 100);
+static object_allocator<lra_copy> lra_copy_pool ("lra copies");
 
 /* Finish LRA data about all insns.  */
 static void
diff --git a/gcc/memory-block.cc b/gcc/memory-block.cc
new file mode 100644
index 0000000..8470c7a
--- /dev/null
+++ b/gcc/memory-block.cc
@@ -0,0 +1,64 @@ 
+/* Shared pool of memory blocks for pool allocators.
+   Copyright (C) 2015 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3.  If not see
+<http://www.gnu.org/licenses/>.  */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "memory-block.h"
+#include "obstack.h"
+
+/* Global singleton-like instance.  */
+memory_block_pool memory_block_pool::instance;
+
+memory_block_pool::memory_block_pool () : m_blocks (NULL) {}
+
+/* Return all blocks from free list to the OS.  */
+void
+memory_block_pool::clear_free_list ()
+{
+  while (m_blocks)
+    {
+      block_list *next = m_blocks->m_next;
+      XDELETEVEC (m_blocks);
+      m_blocks = next;
+    }
+}
+
+/* Allocate a chunk for obstack.  Use the pool if requested chunk size matches
+   the size of blocks in the pool.  */
+void *
+mempool_obstack_chunk_alloc (size_t size)
+{
+  if (size == memory_block_pool::block_size)
+    return memory_block_pool::allocate ();
+  else
+    return XNEWVEC (char, size);
+}
+
+/* Free previously allocated obstack chunk.  */
+void
+mempool_obstack_chunk_free (void *chunk)
+{
+  size_t size = (reinterpret_cast<_obstack_chunk *> (chunk)->limit
+		 - reinterpret_cast<char *> (chunk));
+  if (size == memory_block_pool::block_size)
+    memory_block_pool::release (chunk);
+  else
+    XDELETEVEC (chunk);
+}
diff --git a/gcc/memory-block.h b/gcc/memory-block.h
new file mode 100644
index 0000000..1a495ea
--- /dev/null
+++ b/gcc/memory-block.h
@@ -0,0 +1,75 @@ 
+/* Shared pool of memory blocks for pool allocators.
+   Copyright (C) 2015 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3.  If not see
+<http://www.gnu.org/licenses/>.  */
+
+
+#ifndef MEMORY_BLOCK_H
+#define MEMORY_BLOCK_H
+
+/* Shared pool which allows other memory pools to reuse each others' allocated
+   memory blocks instead of calling free/malloc again.  */
+class memory_block_pool
+{
+public:
+  /* Blocks have fixed size.  This is necessary for sharing.  */
+  static const size_t block_size = 64 * 1024;
+
+  memory_block_pool ();
+
+  static inline void *allocate () ATTRIBUTE_MALLOC;
+  static inline void release (void *);
+  void clear_free_list ();
+
+private:
+  /* memory_block_pool singleton instance, defined in memory-block.cc.  */
+  static memory_block_pool instance;
+
+  struct block_list
+  {
+    block_list *m_next;
+  };
+
+  /* Free list.  */
+  block_list *m_blocks;
+};
+
+/* Allocate a single block.  Reuse a previously returned block, if possible.  */
+inline void *
+memory_block_pool::allocate ()
+{
+  if (instance.m_blocks == NULL)
+    return XNEWVEC (char, block_size);
+
+  void *result = instance.m_blocks;
+  instance.m_blocks = instance.m_blocks->m_next;
+  return result;
+}
+
+/* Return UNCAST_BLOCK to the pool.  */
+inline void
+memory_block_pool::release (void *uncast_block)
+{
+  block_list *block = new (uncast_block) block_list;
+  block->m_next = instance.m_blocks;
+  instance.m_blocks = block;
+}
+
+extern void *mempool_obstack_chunk_alloc (size_t) ATTRIBUTE_MALLOC;
+extern void mempool_obstack_chunk_free (void *);
+
+#endif /* MEMORY_BLOCK_H */
diff --git a/gcc/regcprop.c b/gcc/regcprop.c
index 97433f0..6f7d01e 100644
--- a/gcc/regcprop.c
+++ b/gcc/regcprop.c
@@ -75,7 +75,7 @@  struct value_data
 };
 
 static object_allocator<queued_debug_insn_change> queued_debug_insn_change_pool
-  ("debug insn changes pool", 256);
+  ("debug insn changes pool");
 
 static bool skip_debug_insn_p;
 
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index 8a40eea..9683055 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -4059,14 +4059,10 @@  sched_deps_init (bool global_p)
 
   if (global_p)
     {
-      dl_pool = new object_allocator<_deps_list> ("deps_list",
-                                   /* Allocate lists for one block at a time.  */
-                                   insns_in_block);
-      dn_pool = new object_allocator<_dep_node> ("dep_node",
-                                   /* Allocate nodes for one block at a time.
-                                      We assume that average insn has
-                                      5 producers.  */
-                                   5 * insns_in_block);
+      dl_pool = new object_allocator<_deps_list> ("deps_list");
+				/* Allocate lists for one block at a time.  */
+      dn_pool = new object_allocator<_dep_node> ("dep_node");
+				/* Allocate nodes for one block at a time.  */
     }
 }
 
diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index 9988285..8ea4dce 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -59,7 +59,7 @@  vec<sel_region_bb_info_def>
     sel_region_bb_info = vNULL;
 
 /* A pool for allocating all lists.  */
-object_allocator<_list_node> sched_lists_pool ("sel-sched-lists", 500);
+object_allocator<_list_node> sched_lists_pool ("sel-sched-lists");
 
 /* This contains information about successors for compute_av_set.  */
 struct succs_info current_succs;
diff --git a/gcc/stmt.c b/gcc/stmt.c
index 9d33cbe..134d78e 100644
--- a/gcc/stmt.c
+++ b/gcc/stmt.c
@@ -1138,7 +1138,7 @@  expand_case (gswitch *stmt)
   struct case_node *case_list = 0;
 
   /* A pool for case nodes.  */
-  object_allocator<case_node> case_node_pool ("struct case_node pool", 100);
+  object_allocator<case_node> case_node_pool ("struct case_node pool");
 
   /* An ERROR_MARK occurs for various reasons including invalid data type.
      ??? Can this still happen, with GIMPLE and all?  */
@@ -1314,8 +1314,7 @@  expand_sjlj_dispatch_table (rtx dispatch_index,
     {
       /* Similar to expand_case, but much simpler.  */
       struct case_node *case_list = 0;
-      object_allocator<case_node> case_node_pool ("struct sjlj_case pool",
-						ncases);
+      object_allocator<case_node> case_node_pool ("struct sjlj_case pool");
       tree index_expr = make_tree (index_type, dispatch_index);
       tree minval = build_int_cst (index_type, 0);
       tree maxval = CASE_LOW (dispatch_table.last ());
diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c
index 8b3a0ad..94fea2e 100644
--- a/gcc/tree-sra.c
+++ b/gcc/tree-sra.c
@@ -277,7 +277,7 @@  typedef struct access *access_p;
 
 
 /* Alloc pool for allocating access structures.  */
-static object_allocator<struct access> access_pool ("SRA accesses", 16);
+static object_allocator<struct access> access_pool ("SRA accesses");
 
 /* A structure linking lhs and rhs accesses from an aggregate assignment.  They
    are used to propagate subaccesses from rhs to lhs as long as they don't
@@ -289,7 +289,7 @@  struct assign_link
 };
 
 /* Alloc pool for allocating assign link structures.  */
-static object_allocator<assign_link> assign_link_pool ("SRA links", 16);
+static object_allocator<assign_link> assign_link_pool ("SRA links");
 
 /* Base (tree) -> Vector (vec<access_p> *) map.  */
 static hash_map<tree, auto_vec<access_p> > *base_access_vec;
diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c
index eae5358..c8d0d33 100644
--- a/gcc/tree-ssa-math-opts.c
+++ b/gcc/tree-ssa-math-opts.c
@@ -547,8 +547,7 @@  pass_cse_reciprocals::execute (function *fun)
   basic_block bb;
   tree arg;
 
-  occ_pool = new object_allocator<occurrence>
-    ("dominators for recip", n_basic_blocks_for_fn (fun) / 3 + 1);
+  occ_pool = new object_allocator<occurrence> ("dominators for recip");
 
   memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
   calculate_dominance_info (CDI_DOMINATORS);
diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index 1bf8558..9c4e301 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -349,7 +349,7 @@  clear_expression_ids (void)
   expressions.release ();
 }
 
-static object_allocator<pre_expr_d> pre_expr_pool ("pre_expr nodes", 30);
+static object_allocator<pre_expr_d> pre_expr_pool ("pre_expr nodes");
 
 /* Given an SSA_NAME NAME, get or create a pre_expr to represent it.  */
 
@@ -488,7 +488,7 @@  static unsigned int get_expr_value_id (pre_expr);
 /* We can add and remove elements and entries to and from sets
    and hash tables, so we use alloc pools for them.  */
 
-static object_allocator<bitmap_set> bitmap_set_pool ("Bitmap sets", 30);
+static object_allocator<bitmap_set> bitmap_set_pool ("Bitmap sets");
 static bitmap_obstack grand_bitmap_obstack;
 
 /* Set of blocks with statements that have had their EH properties changed.  */
diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c
index 45e7b61..f1ffb249 100644
--- a/gcc/tree-ssa-reassoc.c
+++ b/gcc/tree-ssa-reassoc.c
@@ -209,8 +209,8 @@  typedef struct operand_entry
   unsigned int count;
 } *operand_entry_t;
 
-static object_allocator<operand_entry> operand_entry_pool ("operand entry pool",
-							 30);
+static object_allocator<operand_entry> operand_entry_pool
+  ("operand entry pool");
 
 /* This is used to assign a unique ID to each struct operand_entry
    so that qsort results are identical on different hosts.  */
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index aea6acc..d9eb9f3 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -4146,9 +4146,9 @@  allocate_vn_table (vn_tables_t table)
   table->references = new vn_reference_table_type (23);
 
   gcc_obstack_init (&table->nary_obstack);
-  table->phis_pool = new object_allocator<vn_phi_s> ("VN phis", 30);
+  table->phis_pool = new object_allocator<vn_phi_s> ("VN phis");
   table->references_pool = new object_allocator<vn_reference_s>
-    ("VN references", 30);
+    ("VN references");
 }
 
 /* Free a value number table.  */
diff --git a/gcc/tree-ssa-strlen.c b/gcc/tree-ssa-strlen.c
index cfe4dd9..87f48bc 100644
--- a/gcc/tree-ssa-strlen.c
+++ b/gcc/tree-ssa-strlen.c
@@ -113,8 +113,7 @@  typedef struct strinfo_struct
 } *strinfo;
 
 /* Pool for allocating strinfo_struct entries.  */
-static object_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool",
-						      64);
+static object_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool");
 
 /* Vector mapping positive string indexes to strinfo, for the
    current basic block.  The first pointer in the vector is special,
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index 5800f8c..b5b9d0a 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -323,7 +323,7 @@  static inline bool type_can_have_subvars (const_tree);
 
 /* Pool of variable info structures.  */
 static object_allocator<variable_info> variable_info_pool
-  ("Variable info pool", 30);
+  ("Variable info pool");
 
 /* Map varinfo to final pt_solution.  */
 static hash_map<varinfo_t, pt_solution *> *final_solutions;
@@ -523,7 +523,7 @@  struct constraint
 /* List of constraints that we use to build the constraint graph from.  */
 
 static vec<constraint_t> constraints;
-static object_allocator<constraint> constraint_pool ("Constraint pool", 30);
+static object_allocator<constraint> constraint_pool ("Constraint pool");
 
 /* The constraint graph is represented as an array of bitmaps
    containing successor nodes.  */
diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c
index 126feee..e3542d3 100644
--- a/gcc/var-tracking.c
+++ b/gcc/var-tracking.c
@@ -576,28 +576,27 @@  typedef struct variable_tracking_info_def
 } *variable_tracking_info;
 
 /* Alloc pool for struct attrs_def.  */
-object_allocator<attrs_def> attrs_def_pool ("attrs_def pool", 1024);
+object_allocator<attrs_def> attrs_def_pool ("attrs_def pool");
 
 /* Alloc pool for struct variable_def with MAX_VAR_PARTS entries.  */
 
 static pool_allocator var_pool
-  ("variable_def pool", 64, sizeof (variable_def) +
+  ("variable_def pool", sizeof (variable_def) +
    (MAX_VAR_PARTS - 1) * sizeof (((variable)NULL)->var_part[0]));
 
 /* Alloc pool for struct variable_def with a single var_part entry.  */
 static pool_allocator valvar_pool
-  ("small variable_def pool", 256, sizeof (variable_def));
+  ("small variable_def pool", sizeof (variable_def));
 
-/* Alloc pool for struct location_chain_def.  */
+/* Alloc pool for struct location_chain.  */
 static object_allocator<location_chain> location_chain_pool
-  ("location_chain pool", 1024);
+  ("location_chain pool");
 
-/* Alloc pool for struct shared_hash_def.  */
-static object_allocator<shared_hash> shared_hash_pool
-  ("shared_hash pool", 256);
+/* Alloc pool for struct shared_hash.  */
+static object_allocator<shared_hash> shared_hash_pool ("shared_hash pool");
 
 /* Alloc pool for struct loc_exp_dep_s for NOT_ONEPART variables.  */
-object_allocator<loc_exp_dep> loc_exp_dep_pool ("loc_exp_dep pool", 64);
+object_allocator<loc_exp_dep> loc_exp_dep_pool ("loc_exp_dep pool");
 
 /* Changed variables, notes will be emitted for them.  */
 static variable_table_type *changed_variables;