@@ -51,7 +51,6 @@
#include "exec/memory-internal.h"
//#define DEBUG_UNASSIGNED
-//#define DEBUG_SUBPAGE
#if !defined(CONFIG_USER_ONLY)
int phys_ram_fd;
@@ -82,7 +81,14 @@ int use_icount;
#if !defined(CONFIG_USER_ONLY)
-static MemoryRegionSection *phys_sections;
+#define SUBSECTION_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
+
+typedef struct PhysSection {
+ MemoryRegionSection section;
+ uint16_t *sub_section;
+} PhysSection;
+
+static PhysSection *phys_sections;
static unsigned phys_sections_nb, phys_sections_nb_alloc;
static uint16_t phys_section_unassigned;
static uint16_t phys_section_notdirty;
@@ -182,8 +188,8 @@ static void phys_page_set(AddressSpaceDispatch *d,
phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
}
-static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d,
- hwaddr index)
+static PhysSection *phys_section_find(AddressSpaceDispatch *d,
+ hwaddr index)
{
PhysPageEntry lp = d->phys_map;
PhysPageEntry *p;
@@ -646,7 +652,7 @@ hwaddr memory_region_section_get_iotlb(CPUArchState *env,
and avoid full address decoding in every device.
We can't use the high bits of pd for this because
IO_MEM_ROMD uses these as a ram address. */
- iotlb = section - phys_sections;
+ iotlb = container_of(section, PhysSection, section) - phys_sections;
iotlb += memory_region_section_addr(section, paddr);
}
@@ -668,27 +674,13 @@ hwaddr memory_region_section_get_iotlb(CPUArchState *env,
#endif /* defined(CONFIG_USER_ONLY) */
#if !defined(CONFIG_USER_ONLY)
+static int subsection_register(PhysSection *psection, uint32_t start,
+ uint32_t end, uint16_t section);
+static void subsections_init(PhysSection *psection);
-#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
-typedef struct subpage_t {
- MemoryRegion iomem;
- hwaddr base;
- uint16_t sub_section[TARGET_PAGE_SIZE];
-} subpage_t;
-
-static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
- uint16_t section);
-static subpage_t *subpage_init(hwaddr base);
static void destroy_page_desc(uint16_t section_index)
{
- MemoryRegionSection *section = &phys_sections[section_index];
- MemoryRegion *mr = section->mr;
-
- if (mr->subpage) {
- subpage_t *subpage = container_of(mr, subpage_t, iomem);
- memory_region_destroy(&subpage->iomem);
- g_free(subpage);
- }
+ g_free(phys_sections[section_index].sub_section);
}
static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
@@ -722,10 +714,11 @@ static uint16_t phys_section_add(MemoryRegionSection *section)
{
if (phys_sections_nb == phys_sections_nb_alloc) {
phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
- phys_sections = g_renew(MemoryRegionSection, phys_sections,
+ phys_sections = g_renew(PhysSection, phys_sections,
phys_sections_nb_alloc);
}
- phys_sections[phys_sections_nb] = *section;
+ phys_sections[phys_sections_nb].section = *section;
+ phys_sections[phys_sections_nb].sub_section = NULL;
return phys_sections_nb++;
}
@@ -734,31 +727,31 @@ static void phys_sections_clear(void)
phys_sections_nb = 0;
}
-static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
+static void register_subsection(AddressSpaceDispatch *d,
+ MemoryRegionSection *section)
{
- subpage_t *subpage;
hwaddr base = section->offset_within_address_space
& TARGET_PAGE_MASK;
- MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
+ PhysSection *psection = phys_section_find(d, base >> TARGET_PAGE_BITS);
MemoryRegionSection subsection = {
.offset_within_address_space = base,
.size = TARGET_PAGE_SIZE,
};
+ uint16_t new_section;
hwaddr start, end;
- assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
+ assert(psection->sub_section ||
+ psection->section.mr == &io_mem_unassigned);
- if (!(existing->mr->subpage)) {
- subpage = subpage_init(base);
- subsection.mr = &subpage->iomem;
- phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
- phys_section_add(&subsection));
- } else {
- subpage = container_of(existing->mr, subpage_t, iomem);
+ if (!psection->sub_section) {
+ new_section = phys_section_add(&subsection);
+ psection = &phys_sections[new_section];
+ subsections_init(psection);
+ phys_page_set(d, base >> TARGET_PAGE_BITS, 1, new_section);
}
start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
end = start + section->size - 1;
- subpage_register(subpage, start, end, phys_section_add(section));
+ subsection_register(psection, start, end, phys_section_add(section));
}
@@ -786,7 +779,7 @@ static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
- now.offset_within_address_space,
now.size);
- register_subpage(d, &now);
+ register_subsection(d, &now);
remain.size -= now.size;
remain.offset_within_address_space += now.size;
remain.offset_within_region += now.size;
@@ -795,7 +788,7 @@ static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
now = remain;
if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
now.size = TARGET_PAGE_SIZE;
- register_subpage(d, &now);
+ register_subsection(d, &now);
} else {
now.size &= TARGET_PAGE_MASK;
register_multipage(d, &now);
@@ -806,7 +799,7 @@ static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
}
now = remain;
if (now.size) {
- register_subpage(d, &now);
+ register_subsection(d, &now);
}
}
@@ -1556,49 +1549,6 @@ static const MemoryRegionOps watch_mem_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static uint64_t subpage_read(void *opaque, hwaddr addr,
- unsigned len)
-{
- subpage_t *mmio = opaque;
- unsigned int idx = SUBPAGE_IDX(addr);
- MemoryRegionSection *section;
-#if defined(DEBUG_SUBPAGE)
- printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
- mmio, len, addr, idx);
-#endif
-
- section = &phys_sections[mmio->sub_section[idx]];
- addr += mmio->base;
- addr -= section->offset_within_address_space;
- addr += section->offset_within_region;
- return io_mem_read(section->mr, addr, len);
-}
-
-static void subpage_write(void *opaque, hwaddr addr,
- uint64_t value, unsigned len)
-{
- subpage_t *mmio = opaque;
- unsigned int idx = SUBPAGE_IDX(addr);
- MemoryRegionSection *section;
-#if defined(DEBUG_SUBPAGE)
- printf("%s: subpage %p len %d addr " TARGET_FMT_plx
- " idx %d value %"PRIx64"\n",
- __func__, mmio, len, addr, idx, value);
-#endif
-
- section = &phys_sections[mmio->sub_section[idx]];
- addr += mmio->base;
- addr -= section->offset_within_address_space;
- addr += section->offset_within_region;
- io_mem_write(section->mr, addr, value, len);
-}
-
-static const MemoryRegionOps subpage_ops = {
- .read = subpage_read,
- .write = subpage_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
unsigned size)
{
@@ -1631,48 +1581,32 @@ static const MemoryRegionOps subpage_ram_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
- uint16_t section)
+static int subsection_register(PhysSection *psection, uint32_t start,
+ uint32_t end, uint16_t section)
{
int idx, eidx;
if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
return -1;
- idx = SUBPAGE_IDX(start);
- eidx = SUBPAGE_IDX(end);
-#if defined(DEBUG_SUBPAGE)
- printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
- mmio, start, end, idx, eidx, memory);
-#endif
- if (memory_region_is_ram(phys_sections[section].mr)) {
- MemoryRegionSection new_section = phys_sections[section];
+ idx = SUBSECTION_IDX(start);
+ eidx = SUBSECTION_IDX(end);
+ if (memory_region_is_ram(phys_sections[section].section.mr)) {
+ MemoryRegionSection new_section = phys_sections[section].section;
new_section.mr = &io_mem_subpage_ram;
section = phys_section_add(&new_section);
}
for (; idx <= eidx; idx++) {
- mmio->sub_section[idx] = section;
+ psection->sub_section[idx] = section;
}
return 0;
}
-static subpage_t *subpage_init(hwaddr base)
+static void subsections_init(PhysSection *psection)
{
- subpage_t *mmio;
-
- mmio = g_malloc0(sizeof(subpage_t));
-
- mmio->base = base;
- memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
- "subpage", TARGET_PAGE_SIZE);
- mmio->iomem.subpage = true;
-#if defined(DEBUG_SUBPAGE)
- printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
- mmio, base, TARGET_PAGE_SIZE, subpage_memory);
-#endif
- subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
-
- return mmio;
+ psection->sub_section = g_malloc0(sizeof(uint16_t) * TARGET_PAGE_SIZE);
+ subsection_register(psection, 0, TARGET_PAGE_SIZE-1,
+ phys_section_unassigned);
}
static uint16_t dummy_section(MemoryRegion *mr)
@@ -1689,7 +1623,7 @@ static uint16_t dummy_section(MemoryRegion *mr)
MemoryRegion *iotlb_to_region(hwaddr index)
{
- return phys_sections[index & ~TARGET_PAGE_MASK].mr;
+ return phys_sections[index & ~TARGET_PAGE_MASK].section.mr;
}
static void io_mem_init(void)
@@ -2182,7 +2116,16 @@ void cpu_physical_memory_unmap(void *buffer, hwaddr len,
MemoryRegionSection *address_space_lookup_region(AddressSpace *as, hwaddr addr)
{
- return phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS);
+ PhysSection *psection;
+ uint16_t idx;
+
+ psection = phys_section_find(as->dispatch, addr >> TARGET_PAGE_BITS);
+ if (psection->sub_section) {
+ idx = psection->sub_section[SUBSECTION_IDX(addr)];
+ return &phys_sections[idx].section;
+ } else {
+ return &psection->section;
+ }
}
/* warning: addr must be aligned */
@@ -2383,7 +2326,7 @@ void stl_phys_notdirty(hwaddr addr, uint32_t val)
if (!memory_region_is_ram(section->mr) || section->readonly) {
addr = memory_region_section_addr(section, addr);
if (memory_region_is_ram(section->mr)) {
- section = &phys_sections[phys_section_rom];
+ section = &phys_sections[phys_section_rom].section;
}
io_mem_write(section->mr, addr, val, 4);
} else {
@@ -2415,7 +2358,7 @@ void stq_phys_notdirty(hwaddr addr, uint64_t val)
if (!memory_region_is_ram(section->mr) || section->readonly) {
addr = memory_region_section_addr(section, addr);
if (memory_region_is_ram(section->mr)) {
- section = &phys_sections[phys_section_rom];
+ section = &phys_sections[phys_section_rom].section;
}
#ifdef TARGET_WORDS_BIGENDIAN
io_mem_write(section->mr, addr, val >> 32, 4);
@@ -2444,7 +2387,7 @@ static inline void stl_phys_internal(hwaddr addr, uint32_t val,
if (!memory_region_is_ram(section->mr) || section->readonly) {
addr = memory_region_section_addr(section, addr);
if (memory_region_is_ram(section->mr)) {
- section = &phys_sections[phys_section_rom];
+ section = &phys_sections[phys_section_rom].section;
}
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
@@ -2511,7 +2454,7 @@ static inline void stw_phys_internal(hwaddr addr, uint32_t val,
if (!memory_region_is_ram(section->mr) || section->readonly) {
addr = memory_region_section_addr(section, addr);
if (memory_region_is_ram(section->mr)) {
- section = &phys_sections[phys_section_rom];
+ section = &phys_sections[phys_section_rom].section;
}
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
@@ -124,7 +124,6 @@ struct MemoryRegion {
hwaddr addr;
void (*destructor)(MemoryRegion *mr);
ram_addr_t ram_addr;
- bool subpage;
bool terminates;
bool readable;
bool ram;
@@ -797,7 +797,6 @@ void memory_region_init(MemoryRegion *mr,
mr->size = int128_2_64();
}
mr->addr = 0;
- mr->subpage = false;
mr->enabled = true;
mr->terminates = false;
mr->ram = false;
Simplify the sub-page handling by implementing it directly in the dispatcher instead of using a redirection memory region. We extend the phys_sections entries to optionally hold a pointer to the sub-section table that used to reside in the subpage_t structure. IOW, we add one optional dispatch level below the existing radix tree. address_space_lookup_region is extended to take this additional level into account. This direct dispatching to that target memory region will also be helpful when we want to add per-region locking control. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> --- exec.c | 175 +++++++++++++++++-------------------------------- include/exec/memory.h | 1 - memory.c | 1 - 3 files changed, 59 insertions(+), 118 deletions(-)