@@ -665,6 +665,9 @@ static void kvm_check_ins(u32 *inst, u32 features)
}
}
+extern u32 kvm_template_start[];
+extern u32 kvm_template_end[];
+
static void kvm_use_magic_page(void)
{
u32 *p;
@@ -692,8 +695,12 @@ static void kvm_use_magic_page(void)
*/
local_irq_disable();
- for (p = start; p < end; p++)
+ for (p = start; p < end; p++) {
+ /* Avoid patching the template code */
+ if (p >= kvm_template_start && p < kvm_template_end)
+ p = kvm_template_end;
kvm_check_ins(p, features);
+ }
local_irq_enable();
@@ -66,6 +66,9 @@ kvm_hypercall_start:
shared->critical == r1 and r2 is always != r1 */ \
STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
+.global kvm_template_start
+kvm_template_start:
+
.global kvm_emulate_mtmsrd
kvm_emulate_mtmsrd:
@@ -350,3 +353,6 @@ kvm_emulate_mtsrin_orig_ins_offs:
.global kvm_emulate_mtsrin_len
kvm_emulate_mtsrin_len:
.long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
+
+.global kvm_template_end
+kvm_template_end:
Currently we patch the whole code include paravirt template code. This isn't safe for scratch area and has impact to performance. Signed-off-by: Liu Yu <yu.liu@freescale.com> --- v2: exclude the entire template region in the main loop arch/powerpc/kernel/kvm.c | 9 ++++++++- arch/powerpc/kernel/kvm_emul.S | 6 ++++++ 2 files changed, 14 insertions(+), 1 deletions(-)