@@ -33,6 +33,8 @@ void * __init prom_early_alloc(unsigned long size)
void *ret;
ret = memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!ret)
+ panic("%s: Failed to allocate %lu bytes\n", __func__, size);
prom_early_allocated += size;
@@ -624,8 +624,14 @@ void __init alloc_irqstack_bootmem(void)
softirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
THREAD_SIZE, node);
+ if (!softirq_stack[i])
+ panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
+ __func__, THREAD_SIZE, THREAD_SIZE, node);
hardirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
THREAD_SIZE, node);
+ if (!hardirq_stack[i])
+ panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
+ __func__, THREAD_SIZE, THREAD_SIZE, node);
}
}
@@ -1628,6 +1628,8 @@ static void __init pcpu_populate_pte(unsigned long addr)
pud_t *new;
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
pgd_populate(&init_mm, pgd, new);
}
@@ -1636,6 +1638,8 @@ static void __init pcpu_populate_pte(unsigned long addr)
pmd_t *new;
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
pud_populate(&init_mm, pud, new);
}
@@ -1644,8 +1648,16 @@ static void __init pcpu_populate_pte(unsigned long addr)
pte_t *new;
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
pmd_populate_kernel(&init_mm, pmd, new);
}
+
+ return;
+
+err_alloc:
+ panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
}
void __init setup_per_cpu_areas(void)
@@ -1809,6 +1809,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
alloc_bytes += PAGE_SIZE;
pgd_populate(&init_mm, pgd, new);
}
@@ -1822,6 +1824,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
}
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
alloc_bytes += PAGE_SIZE;
pud_populate(&init_mm, pud, new);
}
@@ -1836,6 +1840,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
}
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
alloc_bytes += PAGE_SIZE;
pmd_populate_kernel(&init_mm, pmd, new);
}
@@ -1855,6 +1861,11 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
}
return alloc_bytes;
+
+err_alloc:
+ panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ return -ENOMEM;
}
static void __init flush_all_kernel_tsbs(void)
@@ -305,11 +305,17 @@ static void __init srmmu_nocache_init(void)
srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size,
SRMMU_NOCACHE_ALIGN_MAX);
+ if (!srmmu_nocache_pool)
+ panic("%s: Failed to allocate %lu bytes align=0x%x\n",
+ __func__, srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX);
memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
srmmu_nocache_bitmap =
memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
SMP_CACHE_BYTES);
+ if (!srmmu_nocache_bitmap)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ BITS_TO_LONGS(bitmap_bits) * sizeof(long));
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
@@ -468,6 +474,8 @@ static void __init sparc_context_init(int numctx)
size = numctx * sizeof(struct ctx_list);
ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!ctx_list_pool)
+ panic("%s: Failed to allocate %lu bytes\n", __func__, size);
for (ctx = 0; ctx < numctx; ctx++) {
struct ctx_list *clist;