@@ -1609,7 +1609,7 @@ static void watch_mem_write(void *opaque, hwaddr addr,
stb_phys(addr, val);
break;
case 2:
- stw_phys(addr, val);
+ stw_phys(&address_space_memory, addr, val);
break;
case 4:
stl_phys(&address_space_memory, addr, val);
@@ -2586,7 +2586,8 @@ void stb_phys(hwaddr addr, uint32_t val)
}
/* warning: addr must be aligned */
-static inline void stw_phys_internal(hwaddr addr, uint32_t val,
+static inline void stw_phys_internal(AddressSpace *as,
+ hwaddr addr, uint32_t val,
enum device_endian endian)
{
uint8_t *ptr;
@@ -2594,8 +2595,7 @@ static inline void stw_phys_internal(hwaddr addr, uint32_t val,
hwaddr l = 2;
hwaddr addr1;
- mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
- true);
+ mr = address_space_translate(as, addr, &addr1, &l, true);
if (l < 2 || !memory_access_is_direct(mr, true)) {
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
@@ -2626,19 +2626,19 @@ static inline void stw_phys_internal(hwaddr addr, uint32_t val,
}
}
-void stw_phys(hwaddr addr, uint32_t val)
+void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
{
- stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
+ stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
}
-void stw_le_phys(hwaddr addr, uint32_t val)
+void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
{
- stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
+ stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
}
-void stw_be_phys(hwaddr addr, uint32_t val)
+void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
{
- stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
+ stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
}
/* XXX: optimize */
@@ -89,7 +89,7 @@ static inline void
vmw_shmem_st16(hwaddr addr, uint16_t value)
{
VMW_SHPRN("SHMEM store16: %" PRIx64 " (value 0x%X)", addr, value);
- stw_le_phys(addr, value);
+ stw_le_phys(&address_space_memory, addr, value);
}
static inline uint32_t
@@ -65,7 +65,7 @@ ram_addr_t ppc405_set_bootinfo (CPUPPCState *env, ppc4xx_bd_info_t *bd,
for (i = 0; i < 6; i++) {
stb_phys(bdloc + 0x24 + i, bd->bi_enetaddr[i]);
}
- stw_be_phys(bdloc + 0x2A, bd->bi_ethspeed);
+ stw_be_phys(cs->as, bdloc + 0x2A, bd->bi_ethspeed);
stl_be_phys(cs->as, bdloc + 0x2C, bd->bi_intfreq);
stl_be_phys(cs->as, bdloc + 0x30, bd->bi_busfreq);
stl_be_phys(cs->as, bdloc + 0x34, bd->bi_baudrate);
@@ -570,7 +570,7 @@ static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPREnvironment *spapr,
stb_phys(addr, val);
return H_SUCCESS;
case 2:
- stw_phys(addr, val);
+ stw_phys(cs->as, addr, val);
return H_SUCCESS;
case 4:
stl_phys(cs->as, addr, val);
@@ -635,7 +635,7 @@ static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPREnvironment *spapr,
stb_phys(dst, tmp);
break;
case 1:
- stw_phys(dst, tmp);
+ stw_phys(cs->as, dst, tmp);
break;
case 2:
stl_phys(cs->as, dst, tmp);
@@ -680,7 +680,8 @@ static void css_update_chnmon(SubchDev *sch)
count = lduw_phys(&address_space_memory,
channel_subsys->chnmon_area + offset);
count++;
- stw_phys(channel_subsys->chnmon_area + offset, count);
+ stw_phys(&address_space_memory,
+ channel_subsys->chnmon_area + offset, count);
}
}
@@ -77,10 +77,10 @@ void s390_virtio_reset_idx(VirtIOS390Device *dev)
for (i = 0; i < num_vq; i++) {
idx_addr = virtio_queue_get_avail_addr(dev->vdev, i) +
VIRTIO_VRING_AVAIL_IDX_OFFS;
- stw_phys(idx_addr, 0);
+ stw_phys(&address_space_memory, idx_addr, 0);
idx_addr = virtio_queue_get_used_addr(dev->vdev, i) +
VIRTIO_VRING_USED_IDX_OFFS;
- stw_phys(idx_addr, 0);
+ stw_phys(&address_space_memory, idx_addr, 0);
}
}
@@ -380,7 +380,9 @@ void s390_virtio_device_sync(VirtIOS390Device *dev)
virtio_queue_set_vector(dev->vdev, i, i);
stq_be_phys(&address_space_memory,
vq + VIRTIO_VQCONFIG_OFFS_ADDRESS, vring);
- stw_be_phys(vq + VIRTIO_VQCONFIG_OFFS_NUM, virtio_queue_get_num(dev->vdev, i));
+ stw_be_phys(&address_space_memory,
+ vq + VIRTIO_VQCONFIG_OFFS_NUM,
+ virtio_queue_get_num(dev->vdev, i));
}
cur_offs = dev->dev_offs;
@@ -477,7 +477,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
vq_config.index = lduw_phys(&address_space_memory, ccw.cda);
vq_config.num_max = virtio_queue_get_num(vdev,
vq_config.index);
- stw_phys(ccw.cda + sizeof(vq_config.index), vq_config.num_max);
+ stw_phys(&address_space_memory,
+ ccw.cda + sizeof(vq_config.index), vq_config.num_max);
sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
ret = 0;
}
@@ -319,7 +319,7 @@ static void r2d_init(QEMUMachineInitArgs *args)
/* initialization which should be done by firmware */
stl_phys(&address_space_memory, SH7750_BCR1, 1<<3); /* cs3 SDRAM */
- stw_phys(SH7750_BCR2, 3<<(3*2)); /* cs3 32bit */
+ stw_phys(&address_space_memory, SH7750_BCR2, 3<<(3*2)); /* cs3 32bit */
reset_info->vector = (SDRAM_BASE + LINUX_LOAD_OFFSET) | 0xa0000000; /* Start from P2 area */
}
@@ -180,21 +180,23 @@ static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, idx);
- stw_phys(pa, val);
+ stw_phys(&address_space_memory, pa, val);
}
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, flags);
- stw_phys(pa, lduw_phys(&address_space_memory, pa) | mask);
+ stw_phys(&address_space_memory,
+ pa, lduw_phys(&address_space_memory, pa) | mask);
}
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, flags);
- stw_phys(pa, lduw_phys(&address_space_memory, pa) & ~mask);
+ stw_phys(&address_space_memory,
+ pa, lduw_phys(&address_space_memory, pa) & ~mask);
}
static inline void vring_avail_event(VirtQueue *vq, uint16_t val)
@@ -204,7 +206,7 @@ static inline void vring_avail_event(VirtQueue *vq, uint16_t val)
return;
}
pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
- stw_phys(pa, val);
+ stw_phys(&address_space_memory, pa, val);
}
void virtio_queue_set_notification(VirtQueue *vq, int enable)
@@ -91,8 +91,8 @@ uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr);
uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr);
uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr);
void stb_phys(hwaddr addr, uint32_t val);
-void stw_le_phys(hwaddr addr, uint32_t val);
-void stw_be_phys(hwaddr addr, uint32_t val);
+void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
+void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val);
@@ -103,7 +103,7 @@ uint32_t lduw_phys(AddressSpace *as, hwaddr addr);
uint32_t ldl_phys(AddressSpace *as, hwaddr addr);
uint64_t ldq_phys(AddressSpace *as, hwaddr addr);
void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val);
-void stw_phys(hwaddr addr, uint32_t val);
+void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val);
void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val);
void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val);
#endif
@@ -60,8 +60,8 @@ void do_smm_enter(X86CPU *cpu)
for (i = 0; i < 6; i++) {
dt = &env->segs[i];
offset = 0x7e00 + i * 16;
- stw_phys(sm_state + offset, dt->selector);
- stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
+ stw_phys(cs->as, sm_state + offset, dt->selector);
+ stw_phys(cs->as, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
stl_phys(cs->as, sm_state + offset + 4, dt->limit);
stq_phys(cs->as, sm_state + offset + 8, dt->base);
}
@@ -69,18 +69,18 @@ void do_smm_enter(X86CPU *cpu)
stq_phys(cs->as, sm_state + 0x7e68, env->gdt.base);
stl_phys(cs->as, sm_state + 0x7e64, env->gdt.limit);
- stw_phys(sm_state + 0x7e70, env->ldt.selector);
+ stw_phys(cs->as, sm_state + 0x7e70, env->ldt.selector);
stq_phys(cs->as, sm_state + 0x7e78, env->ldt.base);
stl_phys(cs->as, sm_state + 0x7e74, env->ldt.limit);
- stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
+ stw_phys(cs->as, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
stq_phys(cs->as, sm_state + 0x7e88, env->idt.base);
stl_phys(cs->as, sm_state + 0x7e84, env->idt.limit);
- stw_phys(sm_state + 0x7e90, env->tr.selector);
+ stw_phys(cs->as, sm_state + 0x7e90, env->tr.selector);
stq_phys(cs->as, sm_state + 0x7e98, env->tr.base);
stl_phys(cs->as, sm_state + 0x7e94, env->tr.limit);
- stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
+ stw_phys(cs->as, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
stq_phys(cs->as, sm_state + 0x7ed0, env->efer);
@@ -89,13 +89,13 @@ static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
const SegmentCache *sc)
{
CPUState *cs = ENV_GET_CPU(env);
- stw_phys(addr + offsetof(struct vmcb_seg, selector),
+ stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector),
sc->selector);
stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base),
sc->base);
stl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit),
sc->limit);
- stw_phys(addr + offsetof(struct vmcb_seg, attrib),
+ stw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib),
((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
}
@@ -1043,7 +1043,8 @@ void HELPER(ptlb)(CPUS390XState *env)
/* store using real address */
void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
{
- stw_phys(get_address(env, 0, 0, addr), (uint32_t)v1);
+ CPUState *cs = ENV_GET_CPU(env);
+ stw_phys(cs->as, get_address(env, 0, 0, addr), (uint32_t)v1);
}
/* load real address */
@@ -1015,7 +1015,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
stb_phys(addr, val);
break;
case 2:
- stw_phys(addr, val);
+ stw_phys(cs->as, addr, val);
break;
case 4:
default:
@@ -1035,7 +1035,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
| ((hwaddr)(asi & 0xf) << 32), val);
break;
case 2:
- stw_phys((hwaddr)addr
+ stw_phys(cs->as, (hwaddr)addr
| ((hwaddr)(asi & 0xf) << 32), val);
break;
case 4:
@@ -1814,7 +1814,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
stb_phys(addr, val);
break;
case 2:
- stw_phys(addr, val);
+ stw_phys(cs->as, addr, val);
break;
case 4:
stl_phys(cs->as, addr, val);