@@ -5082,7 +5082,6 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
mchunkptr remainder; /* spare room at end to split off */
unsigned long remainder_size; /* its size */
INTERNAL_SIZE_T size;
- mchunkptr victim;
nb = checked_request2size (bytes);
if (nb == 0)
@@ -5101,129 +5100,13 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
we don't find anything in those bins, the common malloc code will
scan starting at 2x. */
- /* This will be set if we found a candidate chunk. */
- victim = NULL;
+ /* Call malloc with worst case padding to hit alignment. */
+ m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
- /* Fast bins are singly-linked, hard to remove a chunk from the middle
- and unlikely to meet our alignment requirements. We have not done
- any experimentation with searching for aligned fastbins. */
+ if (m == 0)
+ return 0; /* propagate failure */
- if (av != NULL)
- {
- int first_bin_index;
- int first_largebin_index;
- int last_bin_index;
-
- if (in_smallbin_range (nb))
- first_bin_index = smallbin_index (nb);
- else
- first_bin_index = largebin_index (nb);
-
- if (in_smallbin_range (nb * 2))
- last_bin_index = smallbin_index (nb * 2);
- else
- last_bin_index = largebin_index (nb * 2);
-
- first_largebin_index = largebin_index (MIN_LARGE_SIZE);
-
- int victim_index; /* its bin index */
-
- for (victim_index = first_bin_index;
- victim_index < last_bin_index;
- victim_index ++)
- {
- victim = NULL;
-
- if (victim_index < first_largebin_index)
- {
- /* Check small bins. Small bin chunks are doubly-linked despite
- being the same size. */
-
- mchunkptr fwd; /* misc temp for linking */
- mchunkptr bck; /* misc temp for linking */
-
- bck = bin_at (av, victim_index);
- fwd = bck->fd;
- while (fwd != bck)
- {
- if (chunk_ok_for_memalign (fwd, alignment, nb) > 0)
- {
- victim = fwd;
-
- /* Unlink it */
- victim->fd->bk = victim->bk;
- victim->bk->fd = victim->fd;
- break;
- }
-
- fwd = fwd->fd;
- }
- }
- else
- {
- /* Check large bins. */
- mchunkptr fwd; /* misc temp for linking */
- mchunkptr bck; /* misc temp for linking */
- mchunkptr best = NULL;
- size_t best_size = 0;
-
- bck = bin_at (av, victim_index);
- fwd = bck->fd;
-
- while (fwd != bck)
- {
- int extra;
-
- if (chunksize (fwd) < nb)
- break;
- extra = chunk_ok_for_memalign (fwd, alignment, nb);
- if (extra > 0
- && (extra <= best_size || best == NULL))
- {
- best = fwd;
- best_size = extra;
- }
-
- fwd = fwd->fd;
- }
- victim = best;
-
- if (victim != NULL)
- {
- unlink_chunk (av, victim);
- break;
- }
- }
-
- if (victim != NULL)
- break;
- }
- }
-
- /* Strategy: find a spot within that chunk that meets the alignment
- request, and then possibly free the leading and trailing space.
- This strategy is incredibly costly and can lead to external
- fragmentation if header and footer chunks are unused. */
-
- if (victim != NULL)
- {
- p = victim;
- m = chunk2mem (p);
- set_inuse (p);
- if (av != &main_arena)
- set_non_main_arena (p);
- }
- else
- {
- /* Call malloc with worst case padding to hit alignment. */
-
- m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
-
- if (m == 0)
- return 0; /* propagate failure */
-
- p = mem2chunk (m);
- }
+ p = mem2chunk (m);
if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
{
@@ -40,18 +40,6 @@ static TestCase tcache_allocs[] = {
};
#define TN array_length (tcache_allocs)
-static TestCase large_allocs[] = {
- { 23450, 64, NULL, NULL },
- { 23450, 64, NULL, NULL },
- { 23550, 64, NULL, NULL },
- { 23550, 64, NULL, NULL },
- { 23650, 64, NULL, NULL },
- { 23650, 64, NULL, NULL },
- { 33650, 64, NULL, NULL },
- { 33650, 64, NULL, NULL }
-};
-#define LN array_length (large_allocs)
-
void *p;
/* Sanity checks, ancillary to the actual test. */
@@ -113,47 +101,6 @@ do_test (void)
free (p);
TEST_VERIFY (count > 0);
- /* Large bins test. */
-
- for (i = 0; i < LN; ++ i)
- {
- large_allocs[i].ptr1 = memalign (large_allocs[i].alignment, large_allocs[i].size);
- CHECK (large_allocs[i].ptr1, large_allocs[i].alignment);
- /* Keep chunks from combining by fragmenting the heap. */
- p = malloc (512);
- CHECK (p, 4);
- }
-
- for (i = 0; i < LN; ++ i)
- free (large_allocs[i].ptr1);
-
- /* Force the unsorted bins to be scanned and moved to small/large
- bins. */
- p = malloc (60000);
-
- for (i = 0; i < LN; ++ i)
- {
- large_allocs[i].ptr2 = memalign (large_allocs[i].alignment, large_allocs[i].size);
- CHECK (large_allocs[i].ptr2, large_allocs[i].alignment);
- }
-
- count = 0;
- for (i = 0; i < LN; ++ i)
- {
- int ok = 0;
- for (j = 0; j < LN; ++ j)
- if (large_allocs[i].ptr1 == large_allocs[j].ptr2)
- ok = 1;
- if (ok == 1)
- count ++;
- }
-
- /* The allocation algorithm is complicated outside of the memalign
- logic, so just make sure it's working for most of the
- allocations. This avoids possible boundary conditions with
- empty/full heaps. */
- TEST_VERIFY (count > LN / 2);
-
return 0;
}