@@ -168,16 +168,20 @@ static void mem_poison(struct free_hdr *f)
unsigned long start = (unsigned long)(f + 1);
unsigned long *t = tailer(f);
size_t poison_size = (unsigned long)t - start;
- void *mem;
/* We only poison up to a limit, as otherwise boot is
* kinda slow */
if (poison_size > POISON_MEM_REGION_LIMIT)
poison_size = POISON_MEM_REGION_LIMIT;
- mem = vm_map(start, poison_size, true);
- memset(mem, POISON_MEM_REGION_WITH, poison_size);
- vm_unmap(start, poison_size);
+ if (cpu_in_os()) {
+ memset((void *)start, POISON_MEM_REGION_WITH, poison_size);
+ } else {
+ void *mem;
+ mem = vm_map(start, poison_size, true);
+ memset(mem, POISON_MEM_REGION_WITH, poison_size);
+ vm_unmap(start, poison_size);
+ }
}
#endif
@@ -211,9 +215,14 @@ static void init_allocatable_region(struct mem_region *region)
f->hdr.prev_free = false;
list_add(®ion->free_list, &f->list);
- t = vm_map((unsigned long)tailer(f), sizeof(long), true);
- *t = num_longs;
- vm_unmap((unsigned long)tailer(f), sizeof(long));
+ if (cpu_in_os()) {
+ t = tailer(f);
+ *t = num_longs;
+ } else {
+ t = vm_map((unsigned long)tailer(f), sizeof(long), true);
+ *t = num_longs;
+ vm_unmap((unsigned long)tailer(f), sizeof(long));
+ }
#if POISON_MEM_REGION == 1
mem_poison(f);
@@ -277,9 +286,15 @@ static void make_free(struct mem_region *region, struct free_hdr *f,
no_unmap:
/* Fix up tailer. */
- t = vm_map((unsigned long)tailer(f), sizeof(long), true);
- *t = f->hdr.num_longs;
- vm_unmap((unsigned long)tailer(f), sizeof(long));
+ if (cpu_in_os()) {
+ t = tailer(f);
+ *t = f->hdr.num_longs;
+ } else {
+ t = vm_map((unsigned long)tailer(f), sizeof(long), true);
+ *t = f->hdr.num_longs;
+ vm_unmap((unsigned long)tailer(f), sizeof(long));
+ }
+
}
/* Can we fit this many longs with this alignment in this free block? */
@@ -398,6 +398,8 @@ static int64_t opal_v4_le_entry(uint64_t token, uint64_t r4, uint64_t r5,
{
struct cpu_thread *cpu;
opal_call_fn *fn;
+ bool was_vm_setup;
+ uint64_t old_opal_call_msr;
uint64_t msr;
uint32_t pir;
uint64_t r16;
@@ -406,14 +408,20 @@ static int64_t opal_v4_le_entry(uint64_t token, uint64_t r4, uint64_t r5,
msr = mfmsr();
assert(!(msr & MSR_EE));
- if (msr & (MSR_IR|MSR_DR))
- mtmsrd(msr & ~(MSR_IR|MSR_DR), 0);
-
pir = mfspr(SPR_PIR);
r16 = (uint64_t)__this_cpu;
__this_cpu = &cpu_stacks[pir].cpu;
cpu = this_cpu();
+ old_opal_call_msr = cpu->opal_call_msr;
+ cpu->opal_call_msr = msr;
+
+ was_vm_setup = this_cpu()->vm_setup;
+ if (msr & (MSR_IR|MSR_DR))
+ this_cpu()->vm_setup = true;
+ else
+ this_cpu()->vm_setup = false;
+
cpu->in_opal_call++;
if (cpu->in_opal_call == 1) {
cpu->current_token = token;
@@ -453,11 +461,13 @@ out:
assert(cpu->in_opal_call > 0);
cpu->in_opal_call--;
- if (msr != mfmsr())
- mtmsrd(msr, 0);
+ this_cpu()->vm_setup = was_vm_setup;
+ cpu->opal_call_msr = old_opal_call_msr;
__this_cpu = (struct cpu_thread *)r16;
+ assert(mfmsr() == msr);
+
return ret;
}
@@ -491,9 +491,18 @@ void *vm_map(unsigned long addr, unsigned long len, bool rw)
assert(!c->vm_local_map_inuse);
c->vm_local_map_inuse = true;
- if (c->vm_setup) {
+ if (cpu_in_os() && c->vm_setup) {
+ assert(c->opal_call_msr & (MSR_IR|MSR_DR));
+ newaddr = addr;
+ mtmsr(c->opal_call_msr & ~MSR_DR);
+ c->vm_setup = false;
+ } else if (!c->vm_setup) {
+ newaddr = addr;
+ } else {
struct vm_map *new = &c->vm_local_map;
+ assert(!cpu_in_os());
+
newaddr = LOCAL_EA_BEGIN + LOCAL_EA_PERCPU * c->pir;
new->name = "local";
@@ -504,9 +513,6 @@ void *vm_map(unsigned long addr, unsigned long len, bool rw)
new->writeable = rw;
new->executable = false;
new->ci = false;
-
- } else {
- newaddr = addr;
}
return (void *)newaddr + offset;
@@ -527,10 +533,18 @@ void vm_unmap(unsigned long addr, unsigned long len)
assert(c->vm_local_map_inuse);
c->vm_local_map_inuse = false;
- if (c->vm_setup) {
+ if (cpu_in_os() && (c->opal_call_msr & (MSR_IR|MSR_DR))) {
+ assert(!c->vm_setup);
+ c->vm_setup = true;
+ mtmsr(c->opal_call_msr);
+ } else if (!c->vm_setup) {
+ return;
+ } else {
struct vm_map *vmm;
unsigned long ea;
+ assert(!cpu_in_os());
+
newaddr = LOCAL_EA_BEGIN + LOCAL_EA_PERCPU * c->pir;
vmm = &c->vm_local_map;
@@ -59,6 +59,7 @@ struct cpu_thread {
enum cpu_thread_state state;
struct dt_node *node;
struct trace_info *trace;
+ uint64_t opal_call_msr;
uint64_t save_r1;
void *icp_regs;
uint32_t in_opal_call;
This patch removes the real-mode switch from the V4 entry, and allows skiboot to run in virtual mode. When vm_map is used to access addresses outside the global mappings, MSR[DR] is disabled, although MSR[IR] remains on (is this wise? It's not a common configuration for Linux). Signed-off-by: Nicholas Piggin <npiggin@gmail.com> --- core/mem_region.c | 35 +++++++++++++++++++++++++---------- core/opal.c | 20 +++++++++++++++----- core/vm.c | 24 +++++++++++++++++++----- include/cpu.h | 1 + 4 files changed, 60 insertions(+), 20 deletions(-)