@@ -35,3 +35,25 @@ dump_alloc_pool_statistics (void)
pool_allocator_usage.dump (ALLOC_POOL_ORIGIN);
}
+
+/* Global singleton-like instance. */
+memory_block_pool block_pool;
+
+memory_block_pool::memory_block_pool () : m_blocks (NULL) {}
+
+memory_block_pool::~memory_block_pool ()
+{
+ release ();
+}
+
+/* Free all memory allocated by memory_block_pool. */
+void
+memory_block_pool::release ()
+{
+ while (m_blocks)
+ {
+ block_list *next = m_blocks->m_next;
+ XDELETEVEC (m_blocks);
+ m_blocks = next;
+ }
+}
@@ -95,13 +95,60 @@ struct pool_usage: public mem_usage
extern mem_alloc_description<pool_usage> pool_allocator_usage;
+/* Shared pool which allows other memory pools to reuse each others' allocated
+ memory blocks instead of calling free/malloc again. */
+class memory_block_pool
+{
+public:
+ /* Blocks have fixed size. This is necessary for sharing. */
+ static const size_t block_size = 64 * 1024;
+
+ memory_block_pool ();
+ ~memory_block_pool ();
+
+ inline void *allocate () ATTRIBUTE_MALLOC;
+ inline void remove (void *);
+ void release ();
+private:
+ struct block_list
+ {
+ block_list *m_next;
+ };
+
+ /* Free list. */
+ block_list *m_blocks;
+};
+
+/* Allocate single block. Reuse previously returned block, if possible. */
+inline void *
+memory_block_pool::allocate ()
+{
+ if (m_blocks == NULL)
+ return XNEWVEC (char, block_size);
+
+ void *result = m_blocks;
+ m_blocks = m_blocks->m_next;
+ return result;
+}
+
+/* Return UNCAST_BLOCK to pool. */
+inline void
+memory_block_pool::remove (void *uncast_block)
+{
+ block_list *block = reinterpret_cast<block_list *> (uncast_block);
+ block->m_next = m_blocks;
+ m_blocks = block;
+}
+
+/* memory_block_pool singleton-like instance, defined in alloc-pool.c. */
+extern memory_block_pool block_pool;
+
/* Generic pool allocator. */
class pool_allocator
{
public:
- /* Default constructor for pool allocator called NAME. Each block
- has NUM elements. */
- pool_allocator (const char *name, size_t num, size_t size CXX_MEM_STAT_INFO);
+ /* Default constructor for pool allocator called NAME. */
+ pool_allocator (const char *name, size_t size CXX_MEM_STAT_INFO);
~pool_allocator ();
void release ();
void release_if_empty ();
@@ -151,7 +198,7 @@ private:
};
/* Align X to 8. */
- size_t
+ static inline size_t
align_eight (size_t x)
{
return (((x+7) >> 3) << 3);
@@ -180,8 +227,6 @@ private:
size_t m_blocks_allocated;
/* List of blocks that are used to allocate new objects. */
allocation_pool_list *m_block_list;
- /* The number of elements in a block. */
- size_t m_block_size;
/* Size of a pool elements in bytes. */
size_t m_elt_size;
/* Size in bytes that should be allocated for each element. */
@@ -193,13 +238,11 @@ private:
};
inline
-pool_allocator::pool_allocator (const char *name, size_t num,
- size_t size MEM_STAT_DECL):
- m_name (name), m_id (0), m_elts_per_block (num), m_returned_free_list (NULL),
+pool_allocator::pool_allocator (const char *name, size_t size MEM_STAT_DECL):
+ m_name (name), m_id (0), m_elts_per_block (0), m_returned_free_list (NULL),
m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0),
- m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL),
- m_block_size (0), m_size (size), m_initialized (false),
- m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {}
+ m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL), m_size (size),
+ m_initialized (false), m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {}
/* Initialize a pool allocator. */
@@ -209,7 +252,6 @@ pool_allocator::initialize ()
gcc_checking_assert (!m_initialized);
m_initialized = true;
- size_t header_size;
size_t size = m_size;
gcc_checking_assert (m_name);
@@ -218,15 +260,12 @@ pool_allocator::initialize ()
if (size < sizeof (allocation_pool_list*))
size = sizeof (allocation_pool_list*);
- /* Now align the size to a multiple of 4. */
+ /* Now align the size to a multiple of 8. */
size = align_eight (size);
/* Add the aligned size of ID. */
size += offsetof (allocation_object, u.data);
- /* Um, we can't really allocate 0 elements per block. */
- gcc_checking_assert (m_elts_per_block);
-
m_elt_size = size;
if (GATHER_STATISTICS)
@@ -239,9 +278,10 @@ pool_allocator::initialize ()
}
/* List header size should be a multiple of 8. */
- header_size = align_eight (sizeof (allocation_pool_list));
+ size_t header_size = align_eight (sizeof (allocation_pool_list));
- m_block_size = (size * m_elts_per_block) + header_size;
+ m_elts_per_block = (memory_block_pool::block_size - header_size) / size;
+ gcc_checking_assert (m_elts_per_block != 0);
#ifdef ENABLE_CHECKING
/* Increase the last used ID and use it for this pool.
@@ -267,7 +307,7 @@ pool_allocator::release ()
for (block = m_block_list; block != NULL; block = next_block)
{
next_block = block->next;
- free (block);
+ block_pool.remove (block);
}
if (GATHER_STATISTICS)
@@ -327,7 +367,7 @@ pool_allocator::allocate ()
allocation_pool_list *block_header;
/* Make the block. */
- block = XNEWVEC (char, m_block_size);
+ block = reinterpret_cast<char *> (block_pool.allocate ());
block_header = (allocation_pool_list*) block;
block += align_eight (sizeof (allocation_pool_list));
@@ -417,10 +457,9 @@ template <typename T>
class object_allocator
{
public:
- /* Default constructor for pool allocator called NAME. Each block
- has NUM elements. */
- object_allocator (const char *name, size_t num CXX_MEM_STAT_INFO):
- m_allocator (name, num, sizeof (T) PASS_MEM_STAT) {}
+ /* Default constructor for pool allocator called NAME. */
+ object_allocator (const char *name CXX_MEM_STAT_INFO):
+ m_allocator (name, sizeof (T) PASS_MEM_STAT) {}
inline void
release ()
@@ -350,7 +350,7 @@ struct asan_mem_ref
HOST_WIDE_INT access_size;
};
-object_allocator <asan_mem_ref> asan_mem_ref_pool ("asan_mem_ref", 10);
+object_allocator <asan_mem_ref> asan_mem_ref_pool ("asan_mem_ref");
/* Initializes an instance of asan_mem_ref. */
@@ -1687,8 +1687,7 @@ check_format_arg (void *ctx, tree format_tree,
will decrement it if it finds there are extra arguments, but this way
need not adjust it for every return. */
res->number_other++;
- object_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool",
- 10);
+ object_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool");
check_format_info_main (res, info, format_chars, format_length,
params, arg_num, fwt_pool);
}
@@ -1052,7 +1052,7 @@ void
initialize_original_copy_tables (void)
{
original_copy_bb_pool = new object_allocator<htab_bb_copy_original_entry>
- ("original_copy", 10);
+ ("original_copy");
bb_original = new hash_table<bb_copy_hasher> (10);
bb_copy = new hash_table<bb_copy_hasher> (10);
loop_copy = new hash_table<bb_copy_hasher> (10);
@@ -246,11 +246,11 @@ static unsigned int cfa_base_preserved_regno = INVALID_REGNUM;
each time memory is invalidated. */
static cselib_val *first_containing_mem = &dummy_val;
-static object_allocator<elt_list> elt_list_pool ("elt_list", 10);
-static object_allocator<elt_loc_list> elt_loc_list_pool ("elt_loc_list", 10);
-static object_allocator<cselib_val> cselib_val_pool ("cselib_val_list", 10);
+static object_allocator<elt_list> elt_list_pool ("elt_list");
+static object_allocator<elt_loc_list> elt_loc_list_pool ("elt_loc_list");
+static object_allocator<cselib_val> cselib_val_pool ("cselib_val_list");
-static pool_allocator value_pool ("value", 100, RTX_CODE_SIZE (VALUE));
+static pool_allocator value_pool ("value", RTX_CODE_SIZE (VALUE));
/* If nonnull, cselib will call this function before freeing useless
VALUEs. A VALUE is deemed useless if its "locs" field is null. */
@@ -1997,8 +1997,7 @@ static void
df_chain_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
{
df_chain_remove_problem ();
- df_chain->block_pool = new object_allocator<df_link> ("df_chain_block pool",
- 50);
+ df_chain->block_pool = new object_allocator<df_link> ("df_chain_block pool");
df_chain->optional_p = true;
}
@@ -133,8 +133,6 @@ static const unsigned int copy_all = copy_defs | copy_uses | copy_eq_uses
it gets run. It also has no need for the iterative solver.
----------------------------------------------------------------------------*/
-#define SCAN_PROBLEM_DATA_BLOCK_SIZE 512
-
/* Problem data for the scanning dataflow function. */
struct df_scan_problem_data
{
@@ -253,17 +251,17 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
df_scan->computed = true;
problem_data->ref_base_pool = new object_allocator<df_base_ref>
- ("df_scan ref base", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+ ("df_scan ref base");
problem_data->ref_artificial_pool = new object_allocator<df_artificial_ref>
- ("df_scan ref artificial", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+ ("df_scan ref artificial");
problem_data->ref_regular_pool = new object_allocator<df_regular_ref>
- ("df_scan ref regular", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+ ("df_scan ref regular");
problem_data->insn_pool = new object_allocator<df_insn_info>
- ("df_scan insn", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+ ("df_scan insn");
problem_data->reg_pool = new object_allocator<df_reg_info>
- ("df_scan reg", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+ ("df_scan reg");
problem_data->mw_reg_pool = new object_allocator<df_mw_hardreg>
- ("df_scan mw_reg", SCAN_PROBLEM_DATA_BLOCK_SIZE / 16);
+ ("df_scan mw_reg");
bitmap_obstack_initialize (&problem_data->reg_bitmaps);
bitmap_obstack_initialize (&problem_data->insn_bitmaps);
@@ -308,11 +308,9 @@ lowpart_bitmask (int n)
}
typedef struct store_info *store_info_t;
-static object_allocator<store_info> cse_store_info_pool ("cse_store_info_pool",
- 100);
+static object_allocator<store_info> cse_store_info_pool ("cse_store_info_pool");
-static object_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool",
- 100);
+static object_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool");
/* This structure holds information about a load. These are only
built for rtx bases. */
@@ -337,8 +335,7 @@ struct read_info_type
};
typedef struct read_info_type *read_info_t;
-static object_allocator<read_info_type> read_info_type_pool
- ("read_info_pool", 100);
+static object_allocator<read_info_type> read_info_type_pool ("read_info_pool");
/* One of these records is created for each insn. */
@@ -427,8 +424,7 @@ struct insn_info_type
};
typedef struct insn_info_type *insn_info_t;
-static object_allocator<insn_info_type> insn_info_type_pool
- ("insn_info_pool", 100);
+static object_allocator<insn_info_type> insn_info_type_pool ("insn_info_pool");
/* The linked list of stores that are under consideration in this
basic block. */
@@ -495,7 +491,7 @@ struct dse_bb_info_type
typedef struct dse_bb_info_type *bb_info_t;
static object_allocator<dse_bb_info_type> dse_bb_info_type_pool
- ("bb_info_pool", 100);
+ ("bb_info_pool");
/* Table to hold all bb_infos. */
static bb_info_t *bb_table;
@@ -567,8 +563,7 @@ struct group_info
typedef struct group_info *group_info_t;
typedef const struct group_info *const_group_info_t;
-static object_allocator<group_info> group_info_pool
- ("rtx_group_info_pool", 100);
+static object_allocator<group_info> group_info_pool ("rtx_group_info_pool");
/* Index into the rtx_group_vec. */
static int rtx_group_next_id;
@@ -594,7 +589,7 @@ struct deferred_change
typedef struct deferred_change *deferred_change_t;
static object_allocator<deferred_change> deferred_change_pool
- ("deferred_change_pool", 10);
+ ("deferred_change_pool");
static deferred_change_t deferred_change_list = NULL;
@@ -54,8 +54,8 @@ struct et_occ
depth. */
};
-static object_allocator<et_node> et_nodes ("et_nodes pool", 300);
-static object_allocator<et_occ> et_occurrences ("et_occ pool", 300);
+static object_allocator<et_node> et_nodes ("et_nodes pool");
+static object_allocator<et_occ> et_occurrences ("et_occ pool");
/* Changes depth of OCC to D. */
@@ -276,16 +276,16 @@ public:
/* Allocation pools for values and their sources in ipa-cp. */
object_allocator<ipcp_value<tree> > ipcp_cst_values_pool
- ("IPA-CP constant values", 32);
+ ("IPA-CP constant values");
object_allocator<ipcp_value<ipa_polymorphic_call_context> >
- ipcp_poly_ctx_values_pool ("IPA-CP polymorphic contexts", 32);
+ ipcp_poly_ctx_values_pool ("IPA-CP polymorphic contexts");
object_allocator<ipcp_value_source<tree> > ipcp_sources_pool
- ("IPA-CP value sources", 64);
+ ("IPA-CP value sources");
object_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool
- ("IPA_CP aggregate lattices", 32);
+ ("IPA_CP aggregate lattices");
/* Maximal count found in program. */
@@ -143,7 +143,7 @@ vec<inline_edge_summary_t> inline_edge_summary_vec;
vec<edge_growth_cache_entry> edge_growth_cache;
/* Edge predicates goes here. */
-static object_allocator<predicate> edge_predicate_pool ("edge predicates", 10);
+static object_allocator<predicate> edge_predicate_pool ("edge predicates");
/* Return true predicate (tautology).
We represent it by empty list of clauses. */
@@ -87,8 +87,7 @@ struct histogram_entry
duplicate entries. */
vec<histogram_entry *> histogram;
-static object_allocator<histogram_entry> histogram_pool
- ("IPA histogram", 10);
+static object_allocator<histogram_entry> histogram_pool ("IPA histogram");
/* Hashtable support for storing SSA names hashed by their SSA_NAME_VAR. */
@@ -95,7 +95,7 @@ struct ipa_cst_ref_desc
/* Allocation pool for reference descriptions. */
static object_allocator<ipa_cst_ref_desc> ipa_refdesc_pool
- ("IPA-PROP ref descriptions", 32);
+ ("IPA-PROP ref descriptions");
/* Return true if DECL_FUNCTION_SPECIFIC_OPTIMIZATION of the decl associated
with NODE should prevent us from analyzing it for the purposes of IPA-CP. */
@@ -421,9 +421,9 @@ rebuild_regno_allocno_maps (void)
/* Pools for allocnos, allocno live ranges and objects. */
-static object_allocator<live_range> live_range_pool ("live ranges", 100);
-static object_allocator<ira_allocno> allocno_pool ("allocnos", 100);
-static object_allocator<ira_object> object_pool ("objects", 100);
+static object_allocator<live_range> live_range_pool ("live ranges");
+static object_allocator<ira_allocno> allocno_pool ("allocnos");
+static object_allocator<ira_object> object_pool ("objects");
/* Vec containing references to all created allocnos. It is a
container of array allocnos. */
@@ -1171,7 +1171,7 @@ finish_allocnos (void)
/* Pools for allocno preferences. */
-static object_allocator <ira_allocno_pref> pref_pool ("prefs", 100);
+static object_allocator <ira_allocno_pref> pref_pool ("prefs");
/* Vec containing references to all created preferences. It is a
container of array ira_prefs. */
@@ -1358,7 +1358,7 @@ finish_prefs (void)
/* Pools for copies. */
-static object_allocator<ira_allocno_copy> copy_pool ("copies", 100);
+static object_allocator<ira_allocno_copy> copy_pool ("copies");
/* Vec containing references to all created copies. It is a
container of array ira_copies. */
@@ -1631,8 +1631,7 @@ initiate_cost_vectors (void)
{
aclass = ira_allocno_classes[i];
cost_vector_pool[aclass] = new pool_allocator
- ("cost vectors", 100,
- sizeof (int) * (ira_class_hard_regs_num[aclass]));
+ ("cost vectors", sizeof (int) * (ira_class_hard_regs_num[aclass]));
}
}
@@ -1157,7 +1157,7 @@ setup_profitable_hard_regs (void)
/* Pool for update cost records. */
static object_allocator<update_cost_record> update_cost_record_pool
- ("update cost records", 100);
+ ("update cost records");
/* Return new update cost record with given params. */
static struct update_cost_record *
@@ -107,8 +107,7 @@ static sparseset unused_set, dead_set;
static bitmap_head temp_bitmap;
/* Pool for pseudo live ranges. */
-static object_allocator<lra_live_range> lra_live_range_pool
- ("live ranges", 100);
+static object_allocator<lra_live_range> lra_live_range_pool ("live ranges");
/* Free live range list LR. */
static void
@@ -533,7 +533,7 @@ lra_update_dups (lra_insn_recog_data_t id, signed char *nops)
insns. */
/* Pools for insn reg info. */
-object_allocator<lra_insn_reg> lra_insn_reg_pool ("insn regs", 100);
+object_allocator<lra_insn_reg> lra_insn_reg_pool ("insn regs");
/* Create LRA insn related info about a reference to REGNO in INSN with
TYPE (in/out/inout), biggest reference mode MODE, flag that it is
@@ -746,7 +746,7 @@ free_insn_recog_data (lra_insn_recog_data_t data)
}
/* Pools for copies. */
-static object_allocator<lra_copy> lra_copy_pool ("lra copies", 100);
+static object_allocator<lra_copy> lra_copy_pool ("lra copies");
/* Finish LRA data about all insns. */
static void
@@ -75,7 +75,7 @@ struct value_data
};
static object_allocator<queued_debug_insn_change> queued_debug_insn_change_pool
- ("debug insn changes pool", 256);
+ ("debug insn changes pool");
static bool skip_debug_insn_p;
@@ -4058,14 +4058,10 @@ sched_deps_init (bool global_p)
if (global_p)
{
- dl_pool = new object_allocator<_deps_list> ("deps_list",
- /* Allocate lists for one block at a time. */
- insns_in_block);
- dn_pool = new object_allocator<_dep_node> ("dep_node",
- /* Allocate nodes for one block at a time.
- We assume that average insn has
- 5 producers. */
- 5 * insns_in_block);
+ dl_pool = new object_allocator<_deps_list> ("deps_list");
+ /* Allocate lists for one block at a time. */
+ dn_pool = new object_allocator<_dep_node> ("dep_node");
+ /* Allocate nodes for one block at a time. */
}
}
@@ -59,7 +59,7 @@ vec<sel_region_bb_info_def>
sel_region_bb_info = vNULL;
/* A pool for allocating all lists. */
-object_allocator<_list_node> sched_lists_pool ("sel-sched-lists", 500);
+object_allocator<_list_node> sched_lists_pool ("sel-sched-lists");
/* This contains information about successors for compute_av_set. */
struct succs_info current_succs;
@@ -1139,7 +1139,7 @@ expand_case (gswitch *stmt)
struct case_node *case_list = 0;
/* A pool for case nodes. */
- object_allocator<case_node> case_node_pool ("struct case_node pool", 100);
+ object_allocator<case_node> case_node_pool ("struct case_node pool");
/* An ERROR_MARK occurs for various reasons including invalid data type.
??? Can this still happen, with GIMPLE and all? */
@@ -1315,8 +1315,7 @@ expand_sjlj_dispatch_table (rtx dispatch_index,
{
/* Similar to expand_case, but much simpler. */
struct case_node *case_list = 0;
- object_allocator<case_node> case_node_pool ("struct sjlj_case pool",
- ncases);
+ object_allocator<case_node> case_node_pool ("struct sjlj_case pool");
tree index_expr = make_tree (index_type, dispatch_index);
tree minval = build_int_cst (index_type, 0);
tree maxval = CASE_LOW (dispatch_table.last ());
@@ -277,7 +277,7 @@ typedef struct access *access_p;
/* Alloc pool for allocating access structures. */
-static object_allocator<struct access> access_pool ("SRA accesses", 16);
+static object_allocator<struct access> access_pool ("SRA accesses");
/* A structure linking lhs and rhs accesses from an aggregate assignment. They
are used to propagate subaccesses from rhs to lhs as long as they don't
@@ -289,7 +289,7 @@ struct assign_link
};
/* Alloc pool for allocating assign link structures. */
-static object_allocator<assign_link> assign_link_pool ("SRA links", 16);
+static object_allocator<assign_link> assign_link_pool ("SRA links");
/* Base (tree) -> Vector (vec<access_p> *) map. */
static hash_map<tree, auto_vec<access_p> > *base_access_vec;
@@ -547,8 +547,7 @@ pass_cse_reciprocals::execute (function *fun)
basic_block bb;
tree arg;
- occ_pool = new object_allocator<occurrence>
- ("dominators for recip", n_basic_blocks_for_fn (fun) / 3 + 1);
+ occ_pool = new object_allocator<occurrence> ("dominators for recip");
memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
calculate_dominance_info (CDI_DOMINATORS);
@@ -349,7 +349,7 @@ clear_expression_ids (void)
expressions.release ();
}
-static object_allocator<pre_expr_d> pre_expr_pool ("pre_expr nodes", 30);
+static object_allocator<pre_expr_d> pre_expr_pool ("pre_expr nodes");
/* Given an SSA_NAME NAME, get or create a pre_expr to represent it. */
@@ -488,7 +488,7 @@ static unsigned int get_expr_value_id (pre_expr);
/* We can add and remove elements and entries to and from sets
and hash tables, so we use alloc pools for them. */
-static object_allocator<bitmap_set> bitmap_set_pool ("Bitmap sets", 30);
+static object_allocator<bitmap_set> bitmap_set_pool ("Bitmap sets");
static bitmap_obstack grand_bitmap_obstack;
/* Set of blocks with statements that have had their EH properties changed. */
@@ -209,8 +209,8 @@ typedef struct operand_entry
unsigned int count;
} *operand_entry_t;
-static object_allocator<operand_entry> operand_entry_pool ("operand entry pool",
- 30);
+static object_allocator<operand_entry> operand_entry_pool
+ ("operand entry pool");
/* This is used to assign a unique ID to each struct operand_entry
so that qsort results are identical on different hosts. */
@@ -4125,9 +4125,9 @@ allocate_vn_table (vn_tables_t table)
table->references = new vn_reference_table_type (23);
gcc_obstack_init (&table->nary_obstack);
- table->phis_pool = new object_allocator<vn_phi_s> ("VN phis", 30);
+ table->phis_pool = new object_allocator<vn_phi_s> ("VN phis");
table->references_pool = new object_allocator<vn_reference_s>
- ("VN references", 30);
+ ("VN references");
}
/* Free a value number table. */
@@ -113,8 +113,7 @@ typedef struct strinfo_struct
} *strinfo;
/* Pool for allocating strinfo_struct entries. */
-static object_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool",
- 64);
+static object_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool");
/* Vector mapping positive string indexes to strinfo, for the
current basic block. The first pointer in the vector is special,
@@ -323,7 +323,7 @@ static inline bool type_can_have_subvars (const_tree);
/* Pool of variable info structures. */
static object_allocator<variable_info> variable_info_pool
- ("Variable info pool", 30);
+ ("Variable info pool");
/* Map varinfo to final pt_solution. */
static hash_map<varinfo_t, pt_solution *> *final_solutions;
@@ -523,7 +523,7 @@ struct constraint
/* List of constraints that we use to build the constraint graph from. */
static vec<constraint_t> constraints;
-static object_allocator<constraint> constraint_pool ("Constraint pool", 30);
+static object_allocator<constraint> constraint_pool ("Constraint pool");
/* The constraint graph is represented as an array of bitmaps
containing successor nodes. */
@@ -576,28 +576,28 @@ typedef struct variable_tracking_info_def
} *variable_tracking_info;
/* Alloc pool for struct attrs_def. */
-object_allocator<attrs_def> attrs_def_pool ("attrs_def pool", 1024);
+object_allocator<attrs_def> attrs_def_pool ("attrs_def pool");
/* Alloc pool for struct variable_def with MAX_VAR_PARTS entries. */
static pool_allocator var_pool
- ("variable_def pool", 64, sizeof (variable_def) +
+ ("variable_def pool", sizeof (variable_def) +
(MAX_VAR_PARTS - 1) * sizeof (((variable)NULL)->var_part[0]));
/* Alloc pool for struct variable_def with a single var_part entry. */
static pool_allocator valvar_pool
- ("small variable_def pool", 256, sizeof (variable_def));
+ ("small variable_def pool", sizeof (variable_def));
/* Alloc pool for struct location_chain_def. */
static object_allocator<location_chain_def> location_chain_def_pool
- ("location_chain_def pool", 1024);
+ ("location_chain_def pool");
/* Alloc pool for struct shared_hash_def. */
static object_allocator<shared_hash_def> shared_hash_def_pool
- ("shared_hash_def pool", 256);
+ ("shared_hash_def pool");
/* Alloc pool for struct loc_exp_dep_s for NOT_ONEPART variables. */
-object_allocator<loc_exp_dep> loc_exp_dep_pool ("loc_exp_dep pool", 64);
+object_allocator<loc_exp_dep> loc_exp_dep_pool ("loc_exp_dep pool");
/* Changed variables, notes will be emitted for them. */
static variable_table_type *changed_variables;