new file mode 100644
@@ -0,0 +1,173 @@
+#ifndef _ASM_X86_APICDEF_H
+#define _ASM_X86_APICDEF_H
+
+/*
+ * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
+ *
+ * Alan Cox <Alan.Cox@linux.org>, 1995.
+ * Ingo Molnar <mingo@redhat.com>, 1999, 2000
+ */
+
+#define APIC_DEFAULT_PHYS_BASE 0xfee00000
+
+#define APIC_ID 0x20
+#define APIC_ID_MASK (0xFFu<<24)
+#define GET_APIC_ID(x) (((x)>>24)&0xFFu)
+#define SET_APIC_ID(x) (((x)<<24))
+#define APIC_LVR 0x30
+#define APIC_LVR_MASK 0xFF00FF
+#define GET_APIC_VERSION(x) ((x)&0xFFu)
+#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu)
+#define APIC_INTEGRATED(x) ((x)&0xF0u)
+#define APIC_XAPIC(x) ((x) >= 0x14)
+#define APIC_TASKPRI 0x80
+#define APIC_TPRI_MASK 0xFFu
+#define APIC_ARBPRI 0x90
+#define APIC_ARBPRI_MASK 0xFFu
+#define APIC_PROCPRI 0xA0
+#define APIC_EOI 0xB0
+#define APIC_EIO_ACK 0x0
+#define APIC_RRR 0xC0
+#define APIC_LDR 0xD0
+#define APIC_LDR_MASK (0xFFu<<24)
+#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu)
+#define SET_APIC_LOGICAL_ID(x) (((x)<<24))
+#define APIC_ALL_CPUS 0xFFu
+#define APIC_DFR 0xE0
+#define APIC_DFR_CLUSTER 0x0FFFFFFFul
+#define APIC_DFR_FLAT 0xFFFFFFFFul
+#define APIC_SPIV 0xF0
+#define APIC_SPIV_FOCUS_DISABLED (1<<9)
+#define APIC_SPIV_APIC_ENABLED (1<<8)
+#define APIC_ISR 0x100
+#define APIC_ISR_NR 0x8
+#define APIC_TMR 0x180
+#define APIC_IRR 0x200
+#define APIC_ESR 0x280
+#define APIC_ESR_SEND_CS 0x00001
+#define APIC_ESR_RECV_CS 0x00002
+#define APIC_ESR_SEND_ACC 0x00004
+#define APIC_ESR_RECV_ACC 0x00008
+#define APIC_ESR_SENDILL 0x00020
+#define APIC_ESR_RECVILL 0x00040
+#define APIC_ESR_ILLREGA 0x00080
+#define APIC_ICR 0x300
+#define APIC_DEST_SELF 0x40000
+#define APIC_DEST_ALLINC 0x80000
+#define APIC_DEST_ALLBUT 0xC0000
+#define APIC_ICR_RR_MASK 0x30000
+#define APIC_ICR_RR_INVALID 0x00000
+#define APIC_ICR_RR_INPROG 0x10000
+#define APIC_ICR_RR_VALID 0x20000
+#define APIC_INT_LEVELTRIG 0x08000
+#define APIC_INT_ASSERT 0x04000
+#define APIC_ICR_BUSY 0x01000
+#define APIC_DEST_LOGICAL 0x00800
+#define APIC_DEST_PHYSICAL 0x00000
+#define APIC_DM_FIXED 0x00000
+#define APIC_DM_LOWEST 0x00100
+#define APIC_DM_SMI 0x00200
+#define APIC_DM_REMRD 0x00300
+#define APIC_DM_NMI 0x00400
+#define APIC_DM_INIT 0x00500
+#define APIC_DM_STARTUP 0x00600
+#define APIC_DM_EXTINT 0x00700
+#define APIC_VECTOR_MASK 0x000FF
+#define APIC_ICR2 0x310
+#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF)
+#define SET_APIC_DEST_FIELD(x) ((x)<<24)
+#define APIC_LVTT 0x320
+#define APIC_LVTTHMR 0x330
+#define APIC_LVTPC 0x340
+#define APIC_LVT0 0x350
+#define APIC_LVT_TIMER_BASE_MASK (0x3<<18)
+#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3)
+#define SET_APIC_TIMER_BASE(x) (((x)<<18))
+#define APIC_TIMER_BASE_CLKIN 0x0
+#define APIC_TIMER_BASE_TMBASE 0x1
+#define APIC_TIMER_BASE_DIV 0x2
+#define APIC_LVT_TIMER_PERIODIC (1<<17)
+#define APIC_LVT_MASKED (1<<16)
+#define APIC_LVT_LEVEL_TRIGGER (1<<15)
+#define APIC_LVT_REMOTE_IRR (1<<14)
+#define APIC_INPUT_POLARITY (1<<13)
+#define APIC_SEND_PENDING (1<<12)
+#define APIC_MODE_MASK 0x700
+#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7)
+#define SET_APIC_DELIVERY_MODE(x, y) (((x)&~0x700)|((y)<<8))
+#define APIC_MODE_FIXED 0x0
+#define APIC_MODE_NMI 0x4
+#define APIC_MODE_EXTINT 0x7
+#define APIC_LVT1 0x360
+#define APIC_LVTERR 0x370
+#define APIC_TMICT 0x380
+#define APIC_TMCCT 0x390
+#define APIC_TDCR 0x3E0
+#define APIC_TDR_DIV_TMBASE (1<<2)
+#define APIC_TDR_DIV_1 0xB
+#define APIC_TDR_DIV_2 0x0
+#define APIC_TDR_DIV_4 0x1
+#define APIC_TDR_DIV_8 0x2
+#define APIC_TDR_DIV_16 0x3
+#define APIC_TDR_DIV_32 0x8
+#define APIC_TDR_DIV_64 0x9
+#define APIC_TDR_DIV_128 0xA
+#define APIC_EILVT0 0x500
+#define APIC_EILVT_NR_AMD_K8 1 /* Number of extended interrupts */
+#define APIC_EILVT_NR_AMD_10H 4
+#define APIC_EILVT_LVTOFF(x) (((x)>>4)&0xF)
+#define APIC_EILVT_MSG_FIX 0x0
+#define APIC_EILVT_MSG_SMI 0x2
+#define APIC_EILVT_MSG_NMI 0x4
+#define APIC_EILVT_MSG_EXT 0x7
+#define APIC_EILVT_MASKED (1<<16)
+#define APIC_EILVT1 0x510
+#define APIC_EILVT2 0x520
+#define APIC_EILVT3 0x530
+
+/*************** IO-APIC *************/
+
+#define IOAPIC_NUM_PINS KVM_IOAPIC_NUM_PINS
+#define IOAPIC_VERSION_ID 0x11 /* IOAPIC version */
+#define IOAPIC_EDGE_TRIG 0
+#define IOAPIC_LEVEL_TRIG 1
+
+#define IOAPIC_DEFAULT_BASE_ADDRESS 0xfec00000
+#define IOAPIC_MEM_LENGTH 0x100
+
+/* Direct registers. */
+#define IOAPIC_REG_SELECT 0x00
+#define IOAPIC_REG_WINDOW 0x10
+#define IOAPIC_REG_EOI 0x40 /* IA64 IOSAPIC only */
+
+/* Indirect registers. */
+#define IOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */
+#define IOAPIC_REG_VERSION 0x01
+#define IOAPIC_REG_ARB_ID 0x02 /* x86 IOAPIC only */
+
+/*ioapic delivery mode*/
+#define IOAPIC_FIXED 0x0
+#define IOAPIC_LOWEST_PRIORITY 0x1
+#define IOAPIC_PMI 0x2
+#define IOAPIC_NMI 0x4
+#define IOAPIC_INIT 0x5
+#define IOAPIC_EXTINT 0x7
+
+struct IO_APIC_route_entry {
+ uint32_t vector : 8,
+ delivery_mode : 3, /* 000: FIXED
+ * 001: lowest prio
+ * 111: ExtINT
+ */
+ dest_mode : 1, /* 0: physical, 1: logical */
+ delivery_status : 1,
+ polarity : 1,
+ irr : 1,
+ trigger : 1, /* 0: edge, 1: level */
+ mask : 1, /* 0: enabled, 1: disabled */
+ __reserved_2 : 15;
+ uint32_t __reserved_3 : 24,
+ dest : 8;
+} __attribute__ ((packed));
+
+#endif
new file mode 100644
@@ -0,0 +1,129 @@
+/*
+ * cpufeature.h
+ *
+ * Defines x86 CPU feature bits
+ */
+
+#ifndef __ASM_I386_CPUFEATURE_H
+#define __ASM_I386_CPUFEATURE_H
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
+#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
+#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */
+#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */
+#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */
+#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */
+#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */
+#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */
+#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */
+#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */
+#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */
+#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */
+#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */
+#define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */
+#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
+#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
+#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
+#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */
+#define X86_FEATURE_DS (0*32+21) /* Debug Store */
+#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
+#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
+ /* of FPU context), and CR4.OSFXSR available */
+#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
+#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
+#define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */
+#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */
+#define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */
+#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */
+#define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */
+
+/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
+/* Don't duplicate feature flags which are redundant with Intel! */
+#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */
+#define X86_FEATURE_MP (1*32+19) /* MP Capable. */
+#define X86_FEATURE_NX (1*32+20) /* Execute Disable */
+#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_FFXSR (1*32+25) /* FFXSR instruction optimizations */
+#define X86_FEATURE_PAGE1GB (1*32+26) /* 1Gb large page support */
+#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
+#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
+#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
+#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */
+
+/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
+#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */
+#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */
+#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */
+
+/* Other features, Linux-defined mapping, word 3 */
+/* This range is used for feature bits which conflict or are synthesized */
+#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */
+#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
+#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
+#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
+/* cpu types for specific tunings: */
+#define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */
+#define X86_FEATURE_K7 (3*32+ 5) /* Athlon */
+#define X86_FEATURE_P3 (3*32+ 6) /* P3 */
+#define X86_FEATURE_P4 (3*32+ 7) /* P4 */
+#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
+#define X86_FEATURE_NONSTOP_TSC (3*32+ 9) /* TSC does not stop in C states */
+#define X86_FEATURE_ARAT (3*32+ 10) /* Always running APIC timer */
+#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_TSC_RELIABLE (3*32+12) /* TSC is known to be reliable */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
+#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
+#define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */
+#define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */
+#define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */
+#define X86_FEATURE_VMXE (4*32+ 5) /* Virtual Machine Extensions */
+#define X86_FEATURE_SMXE (4*32+ 6) /* Safer Mode Extensions */
+#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */
+#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental Streaming SIMD Extensions-3 */
+#define X86_FEATURE_CID (4*32+10) /* Context ID */
+#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
+#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
+#define X86_FEATURE_PDCM (4*32+15) /* Perf/Debug Capability MSR */
+#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
+#define X86_FEATURE_SSE4_1 (4*32+19) /* Streaming SIMD Extensions 4.1 */
+#define X86_FEATURE_SSE4_2 (4*32+20) /* Streaming SIMD Extensions 4.2 */
+#define X86_FEATURE_X2APIC (4*32+21) /* Extended xAPIC */
+#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */
+#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
+#define X86_FEATURE_OSXSAVE (4*32+27) /* OSXSAVE */
+#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running under some hypervisor */
+
+/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
+#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */
+#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */
+#define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */
+#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */
+#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */
+#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */
+#define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */
+#define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */
+
+/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
+#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
+#define X86_FEATURE_SVME (6*32+ 2) /* Secure Virtual Machine */
+#define X86_FEATURE_EXTAPICSPACE (6*32+ 3) /* Extended APIC space */
+#define X86_FEATURE_ALTMOVCR (6*32+ 4) /* LOCK MOV CR accesses CR+8 */
+#define X86_FEATURE_ABM (6*32+ 5) /* Advanced Bit Manipulation */
+#define X86_FEATURE_SSE4A (6*32+ 6) /* AMD Streaming SIMD Extensions-4a */
+#define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE Access */
+#define X86_FEATURE_3DNOWPF (6*32+ 8) /* 3DNow! Prefetch */
+#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
+#define X86_FEATURE_IBS (6*32+ 10) /* Instruction Based Sampling */
+#define X86_FEATURE_SSE5 (6*32+ 11) /* AMD Streaming SIMD Extensions-5 */
+#define X86_FEATURE_SKINIT (6*32+ 12) /* SKINIT, STGI/CLGI, DEV */
+#define X86_FEATURE_WDT (6*32+ 13) /* Watchdog Timer */
+
+#endif
new file mode 100644
@@ -0,0 +1,169 @@
+#ifndef _LIST_H
+#define _LIST_H 1
+
+/*
+ * Simple doubly linked list implementation.
+ * -- shameless stolen from the linux kernel sources
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+#define INIT_LIST_HEAD(ptr) do { \
+ (ptr)->next = (ptr); (ptr)->prev = (ptr); \
+} while (0)
+
+/*
+ * Insert a item entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static __inline__ void __list_add(struct list_head * item,
+ struct list_head * prev,
+ struct list_head * next)
+{
+ next->prev = item;
+ item->next = next;
+ item->prev = prev;
+ prev->next = item;
+}
+
+/**
+ * list_add - add a item entry
+ * @item: item entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a item entry after the specified head.
+ * This is good for implementing stacks.
+ */
+static __inline__ void list_add(struct list_head *item, struct list_head *head)
+{
+ __list_add(item, head, head->next);
+}
+
+/**
+ * list_add_tail - add a item entry
+ * @item: item entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a item entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static __inline__ void list_add_tail(struct list_head *item, struct list_head *head)
+{
+ __list_add(item, head->prev, head);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static __inline__ void __list_del(struct list_head * prev,
+ struct list_head * next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty on entry does not return true after this, the entry is in an undefined state.
+ */
+static __inline__ void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+}
+
+/**
+ * list_del_init - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ */
+static __inline__ void list_del_init(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ INIT_LIST_HEAD(entry);
+}
+
+/**
+ * list_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static __inline__ int list_empty(struct list_head *head)
+{
+ return head->next == head;
+}
+
+/**
+ * list_splice - join two lists
+ * @list: the item list to add.
+ * @head: the place to add it in the first list.
+ */
+static __inline__ void list_splice(struct list_head *list, struct list_head *head)
+{
+ struct list_head *first = list->next;
+
+ if (first != list) {
+ struct list_head *last = list->prev;
+ struct list_head *at = head->next;
+
+ first->prev = head;
+ head->next = first;
+
+ last->next = at;
+ at->prev = last;
+ }
+}
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_entry(ptr, type, member) \
+ ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
+
+/**
+ * list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop counter.
+ * @head: the head for your list.
+ */
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+
+/**
+ * list_for_each_safe - iterate over a list safe against removal of list entry
+ * @pos: the &struct list_head to use as a loop counter.
+ * @n: another &struct list_head to use as temporary storage
+ * @head: the head for your list.
+ */
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+
+/**
+ * list_for_each_prev - iterate over a list in reverse order
+ * @pos: the &struct list_head to use as a loop counter.
+ * @head: the head for your list.
+ */
+#define list_for_each_prev(pos, head) \
+ for (pos = (head)->prev; pos != (head); pos = pos->prev)
+
+#endif /* _LIST_H */
new file mode 100644
@@ -0,0 +1,278 @@
+#ifndef __ASM_MSR_INDEX_H
+#define __ASM_MSR_INDEX_H
+
+/* CPU model specific register (MSR) numbers */
+
+/* x86-64 specific MSRs */
+#define MSR_EFER 0xc0000080 /* extended feature register */
+#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
+#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
+#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */
+#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
+#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
+#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
+#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */
+
+/* EFER bits: */
+#define _EFER_SCE 0 /* SYSCALL/SYSRET */
+#define _EFER_LME 8 /* Long mode enable */
+#define _EFER_LMA 10 /* Long mode active (read-only) */
+#define _EFER_NX 11 /* No execute enable */
+
+#define EFER_SCE (1<<_EFER_SCE)
+#define EFER_LME (1<<_EFER_LME)
+#define EFER_LMA (1<<_EFER_LMA)
+#define EFER_NX (1<<_EFER_NX)
+
+/* Intel MSRs. Some also available on other CPUs */
+#define MSR_IA32_PERFCTR0 0x000000c1
+#define MSR_IA32_PERFCTR1 0x000000c2
+#define MSR_FSB_FREQ 0x000000cd
+
+#define MSR_MTRRcap 0x000000fe
+#define MSR_IA32_BBL_CR_CTL 0x00000119
+
+#define MSR_IA32_SYSENTER_CS 0x00000174
+#define MSR_IA32_SYSENTER_ESP 0x00000175
+#define MSR_IA32_SYSENTER_EIP 0x00000176
+
+#define MSR_IA32_MCG_CAP 0x00000179
+#define MSR_IA32_MCG_STATUS 0x0000017a
+#define MSR_IA32_MCG_CTL 0x0000017b
+
+#define MSR_IA32_PEBS_ENABLE 0x000003f1
+#define MSR_IA32_DS_AREA 0x00000600
+#define MSR_IA32_PERF_CAPABILITIES 0x00000345
+
+#define MSR_MTRRfix64K_00000 0x00000250
+#define MSR_MTRRfix16K_80000 0x00000258
+#define MSR_MTRRfix16K_A0000 0x00000259
+#define MSR_MTRRfix4K_C0000 0x00000268
+#define MSR_MTRRfix4K_C8000 0x00000269
+#define MSR_MTRRfix4K_D0000 0x0000026a
+#define MSR_MTRRfix4K_D8000 0x0000026b
+#define MSR_MTRRfix4K_E0000 0x0000026c
+#define MSR_MTRRfix4K_E8000 0x0000026d
+#define MSR_MTRRfix4K_F0000 0x0000026e
+#define MSR_MTRRfix4K_F8000 0x0000026f
+#define MSR_MTRRdefType 0x000002ff
+
+#define MSR_IA32_DEBUGCTLMSR 0x000001d9
+#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
+#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
+#define MSR_IA32_LASTINTFROMIP 0x000001dd
+#define MSR_IA32_LASTINTTOIP 0x000001de
+
+#define MSR_IA32_MC0_CTL 0x00000400
+#define MSR_IA32_MC0_STATUS 0x00000401
+#define MSR_IA32_MC0_ADDR 0x00000402
+#define MSR_IA32_MC0_MISC 0x00000403
+
+#define MSR_P6_PERFCTR0 0x000000c1
+#define MSR_P6_PERFCTR1 0x000000c2
+#define MSR_P6_EVNTSEL0 0x00000186
+#define MSR_P6_EVNTSEL1 0x00000187
+
+/* K7/K8 MSRs. Not complete. See the architecture manual for a more
+ complete list. */
+#define MSR_K7_EVNTSEL0 0xc0010000
+#define MSR_K7_PERFCTR0 0xc0010004
+#define MSR_K7_EVNTSEL1 0xc0010001
+#define MSR_K7_PERFCTR1 0xc0010005
+#define MSR_K7_EVNTSEL2 0xc0010002
+#define MSR_K7_PERFCTR2 0xc0010006
+#define MSR_K7_EVNTSEL3 0xc0010003
+#define MSR_K7_PERFCTR3 0xc0010007
+#define MSR_K8_TOP_MEM1 0xc001001a
+#define MSR_K7_CLK_CTL 0xc001001b
+#define MSR_K8_TOP_MEM2 0xc001001d
+#define MSR_K8_SYSCFG 0xc0010010
+
+#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
+#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
+#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
+
+#define MSR_K7_HWCR 0xc0010015
+#define MSR_K8_HWCR 0xc0010015
+#define MSR_K7_FID_VID_CTL 0xc0010041
+#define MSR_K7_FID_VID_STATUS 0xc0010042
+#define MSR_K8_ENABLE_C1E 0xc0010055
+
+/* K6 MSRs */
+#define MSR_K6_EFER 0xc0000080
+#define MSR_K6_STAR 0xc0000081
+#define MSR_K6_WHCR 0xc0000082
+#define MSR_K6_UWCCR 0xc0000085
+#define MSR_K6_EPMR 0xc0000086
+#define MSR_K6_PSOR 0xc0000087
+#define MSR_K6_PFIR 0xc0000088
+
+/* Centaur-Hauls/IDT defined MSRs. */
+#define MSR_IDT_FCR1 0x00000107
+#define MSR_IDT_FCR2 0x00000108
+#define MSR_IDT_FCR3 0x00000109
+#define MSR_IDT_FCR4 0x0000010a
+
+#define MSR_IDT_MCR0 0x00000110
+#define MSR_IDT_MCR1 0x00000111
+#define MSR_IDT_MCR2 0x00000112
+#define MSR_IDT_MCR3 0x00000113
+#define MSR_IDT_MCR4 0x00000114
+#define MSR_IDT_MCR5 0x00000115
+#define MSR_IDT_MCR6 0x00000116
+#define MSR_IDT_MCR7 0x00000117
+#define MSR_IDT_MCR_CTRL 0x00000120
+
+/* VIA Cyrix defined MSRs*/
+#define MSR_VIA_FCR 0x00001107
+#define MSR_VIA_LONGHAUL 0x0000110a
+#define MSR_VIA_RNG 0x0000110b
+#define MSR_VIA_BCR2 0x00001147
+
+/* Transmeta defined MSRs */
+#define MSR_TMTA_LONGRUN_CTRL 0x80868010
+#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
+#define MSR_TMTA_LRTI_READOUT 0x80868018
+#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
+
+/* Intel defined MSRs. */
+#define MSR_IA32_P5_MC_ADDR 0x00000000
+#define MSR_IA32_P5_MC_TYPE 0x00000001
+#define MSR_IA32_TSC 0x00000010
+#define MSR_IA32_PLATFORM_ID 0x00000017
+#define MSR_IA32_EBL_CR_POWERON 0x0000002a
+
+#define MSR_IA32_APICBASE 0x0000001b
+#define MSR_IA32_APICBASE_BSP (1<<8)
+#define MSR_IA32_APICBASE_ENABLE (1<<11)
+#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+
+#define MSR_IA32_UCODE_WRITE 0x00000079
+#define MSR_IA32_UCODE_REV 0x0000008b
+
+#define MSR_IA32_PERF_STATUS 0x00000198
+#define MSR_IA32_PERF_CTL 0x00000199
+
+#define MSR_IA32_MPERF 0x000000e7
+#define MSR_IA32_APERF 0x000000e8
+
+#define MSR_IA32_THERM_CONTROL 0x0000019a
+#define MSR_IA32_THERM_INTERRUPT 0x0000019b
+#define MSR_IA32_THERM_STATUS 0x0000019c
+#define MSR_IA32_MISC_ENABLE 0x000001a0
+
+/* Intel Model 6 */
+#define MSR_P6_EVNTSEL0 0x00000186
+#define MSR_P6_EVNTSEL1 0x00000187
+
+/* P4/Xeon+ specific */
+#define MSR_IA32_MCG_EAX 0x00000180
+#define MSR_IA32_MCG_EBX 0x00000181
+#define MSR_IA32_MCG_ECX 0x00000182
+#define MSR_IA32_MCG_EDX 0x00000183
+#define MSR_IA32_MCG_ESI 0x00000184
+#define MSR_IA32_MCG_EDI 0x00000185
+#define MSR_IA32_MCG_EBP 0x00000186
+#define MSR_IA32_MCG_ESP 0x00000187
+#define MSR_IA32_MCG_EFLAGS 0x00000188
+#define MSR_IA32_MCG_EIP 0x00000189
+#define MSR_IA32_MCG_RESERVED 0x0000018a
+
+/* Pentium IV performance counter MSRs */
+#define MSR_P4_BPU_PERFCTR0 0x00000300
+#define MSR_P4_BPU_PERFCTR1 0x00000301
+#define MSR_P4_BPU_PERFCTR2 0x00000302
+#define MSR_P4_BPU_PERFCTR3 0x00000303
+#define MSR_P4_MS_PERFCTR0 0x00000304
+#define MSR_P4_MS_PERFCTR1 0x00000305
+#define MSR_P4_MS_PERFCTR2 0x00000306
+#define MSR_P4_MS_PERFCTR3 0x00000307
+#define MSR_P4_FLAME_PERFCTR0 0x00000308
+#define MSR_P4_FLAME_PERFCTR1 0x00000309
+#define MSR_P4_FLAME_PERFCTR2 0x0000030a
+#define MSR_P4_FLAME_PERFCTR3 0x0000030b
+#define MSR_P4_IQ_PERFCTR0 0x0000030c
+#define MSR_P4_IQ_PERFCTR1 0x0000030d
+#define MSR_P4_IQ_PERFCTR2 0x0000030e
+#define MSR_P4_IQ_PERFCTR3 0x0000030f
+#define MSR_P4_IQ_PERFCTR4 0x00000310
+#define MSR_P4_IQ_PERFCTR5 0x00000311
+#define MSR_P4_BPU_CCCR0 0x00000360
+#define MSR_P4_BPU_CCCR1 0x00000361
+#define MSR_P4_BPU_CCCR2 0x00000362
+#define MSR_P4_BPU_CCCR3 0x00000363
+#define MSR_P4_MS_CCCR0 0x00000364
+#define MSR_P4_MS_CCCR1 0x00000365
+#define MSR_P4_MS_CCCR2 0x00000366
+#define MSR_P4_MS_CCCR3 0x00000367
+#define MSR_P4_FLAME_CCCR0 0x00000368
+#define MSR_P4_FLAME_CCCR1 0x00000369
+#define MSR_P4_FLAME_CCCR2 0x0000036a
+#define MSR_P4_FLAME_CCCR3 0x0000036b
+#define MSR_P4_IQ_CCCR0 0x0000036c
+#define MSR_P4_IQ_CCCR1 0x0000036d
+#define MSR_P4_IQ_CCCR2 0x0000036e
+#define MSR_P4_IQ_CCCR3 0x0000036f
+#define MSR_P4_IQ_CCCR4 0x00000370
+#define MSR_P4_IQ_CCCR5 0x00000371
+#define MSR_P4_ALF_ESCR0 0x000003ca
+#define MSR_P4_ALF_ESCR1 0x000003cb
+#define MSR_P4_BPU_ESCR0 0x000003b2
+#define MSR_P4_BPU_ESCR1 0x000003b3
+#define MSR_P4_BSU_ESCR0 0x000003a0
+#define MSR_P4_BSU_ESCR1 0x000003a1
+#define MSR_P4_CRU_ESCR0 0x000003b8
+#define MSR_P4_CRU_ESCR1 0x000003b9
+#define MSR_P4_CRU_ESCR2 0x000003cc
+#define MSR_P4_CRU_ESCR3 0x000003cd
+#define MSR_P4_CRU_ESCR4 0x000003e0
+#define MSR_P4_CRU_ESCR5 0x000003e1
+#define MSR_P4_DAC_ESCR0 0x000003a8
+#define MSR_P4_DAC_ESCR1 0x000003a9
+#define MSR_P4_FIRM_ESCR0 0x000003a4
+#define MSR_P4_FIRM_ESCR1 0x000003a5
+#define MSR_P4_FLAME_ESCR0 0x000003a6
+#define MSR_P4_FLAME_ESCR1 0x000003a7
+#define MSR_P4_FSB_ESCR0 0x000003a2
+#define MSR_P4_FSB_ESCR1 0x000003a3
+#define MSR_P4_IQ_ESCR0 0x000003ba
+#define MSR_P4_IQ_ESCR1 0x000003bb
+#define MSR_P4_IS_ESCR0 0x000003b4
+#define MSR_P4_IS_ESCR1 0x000003b5
+#define MSR_P4_ITLB_ESCR0 0x000003b6
+#define MSR_P4_ITLB_ESCR1 0x000003b7
+#define MSR_P4_IX_ESCR0 0x000003c8
+#define MSR_P4_IX_ESCR1 0x000003c9
+#define MSR_P4_MOB_ESCR0 0x000003aa
+#define MSR_P4_MOB_ESCR1 0x000003ab
+#define MSR_P4_MS_ESCR0 0x000003c0
+#define MSR_P4_MS_ESCR1 0x000003c1
+#define MSR_P4_PMH_ESCR0 0x000003ac
+#define MSR_P4_PMH_ESCR1 0x000003ad
+#define MSR_P4_RAT_ESCR0 0x000003bc
+#define MSR_P4_RAT_ESCR1 0x000003bd
+#define MSR_P4_SAAT_ESCR0 0x000003ae
+#define MSR_P4_SAAT_ESCR1 0x000003af
+#define MSR_P4_SSU_ESCR0 0x000003be
+#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */
+
+#define MSR_P4_TBPU_ESCR0 0x000003c2
+#define MSR_P4_TBPU_ESCR1 0x000003c3
+#define MSR_P4_TC_ESCR0 0x000003c4
+#define MSR_P4_TC_ESCR1 0x000003c5
+#define MSR_P4_U2L_ESCR0 0x000003b0
+#define MSR_P4_U2L_ESCR1 0x000003b1
+
+/* Intel Core-based CPU performance counters */
+#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
+#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
+#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b
+#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d
+#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e
+#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
+
+/* Geode defined MSRs */
+#define MSR_GEODE_BUSCONT_CONF0 0x00001900
+
+#endif /* __ASM_MSR_INDEX_H */
new file mode 100644
@@ -0,0 +1,326 @@
+#ifndef __PROCESSOR_H__
+#define __PROCESSOR_H__ 1
+
+/*
+ * x86 hardware specific structs and defines
+ */
+
+/* page size */
+#undef PAGE_SHIFT
+#undef PAGE_SIZE
+#undef PAGE_MASK
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#define PAGE_SHIFT_2MB 21
+#define PAGE_SIZE_2MB (1 << PAGE_SHIFT_2MB)
+#define PAGE_MASK_2MB (~(PAGE_SIZE_2MB-1))
+
+#define addr_to_frame(addr) ((addr) >> PAGE_SHIFT)
+#define frame_to_addr(frame) ((frame) << PAGE_SHIFT)
+#define addr_offset(addr) ((addr) & ~PAGE_MASK)
+
+/* page flags */
+#define _PAGE_PRESENT 0x001
+#define _PAGE_RW 0x002
+#define _PAGE_USER 0x004
+#define _PAGE_PWT 0x008
+#define _PAGE_PCD 0x010
+#define _PAGE_ACCESSED 0x020
+#define _PAGE_DIRTY 0x040
+#define _PAGE_PSE 0x080
+#define _PAGE_GLOBAL 0x100
+#define _PAGE_NX ((uint64_t)1<<63)
+
+/* 32-bit paging */
+#define PGD_SHIFT_32 22
+#define PTE_SHIFT_32 12
+
+#define PGD_COUNT_32 1024
+#define PTE_COUNT_32 1024
+
+#define PGD_INDEX_32(va) (((va) >> PGD_SHIFT_32) & (PGD_COUNT_32-1))
+#define PTE_INDEX_32(va) (((va) >> PTE_SHIFT_32) & (PTE_COUNT_32-1))
+
+static inline uint32_t get_pgentry_32(uint32_t frame, uint32_t flags)
+{
+ return (frame << PAGE_SHIFT) | flags;
+}
+static inline uint32_t get_pgframe_32(uint32_t entry)
+{
+ return entry >> PAGE_SHIFT;
+}
+static inline uint32_t get_pgflags_32(uint32_t entry)
+{
+ return entry & ~PAGE_MASK;
+}
+static inline uint32_t test_pgflag_32(uint32_t entry, uint32_t flag)
+{
+ return entry & ~PAGE_MASK & flag;
+}
+
+/* 32-bit pae paging */
+#define PGD_SHIFT_PAE 30
+#define PMD_SHIFT_PAE 21
+#define PTE_SHIFT_PAE 12
+
+#define PGD_COUNT_PAE 4
+#define PMD_COUNT_PAE 512
+#define PTE_COUNT_PAE 512
+
+#define PGD_INDEX_PAE(va) (((va) >> PGD_SHIFT_PAE) & (PGD_COUNT_PAE-1))
+#define PMD_INDEX_PAE(va) (((va) >> PMD_SHIFT_PAE) & (PMD_COUNT_PAE-1))
+#define PTE_INDEX_PAE(va) (((va) >> PTE_SHIFT_PAE) & (PTE_COUNT_PAE-1))
+
+static inline uint64_t get_pgentry_pae(uint32_t frame, uint32_t flags)
+{
+ return (frame << PAGE_SHIFT) | flags;
+}
+static inline uint32_t get_pgframe_pae(uint64_t entry)
+{
+ return (entry & ~_PAGE_NX) >> PAGE_SHIFT;
+}
+static inline uint32_t get_pgflags_pae(uint64_t entry)
+{
+ return entry & ~PAGE_MASK;
+}
+static inline uint32_t test_pgflag_pae(uint64_t entry, uint32_t flag)
+{
+ return entry & ~PAGE_MASK & flag;
+}
+
+/* 64-bit paging */
+#define PGD_SHIFT_64 39
+#define PUD_SHIFT_64 30
+#define PMD_SHIFT_64 21
+#define PTE_SHIFT_64 12
+
+#define PGD_COUNT_64 512
+#define PUD_COUNT_64 512
+#define PMD_COUNT_64 512
+#define PTE_COUNT_64 512
+
+#define PGD_INDEX_64(va) (((va) >> PGD_SHIFT_64) & (PGD_COUNT_64-1))
+#define PUD_INDEX_64(va) (((va) >> PUD_SHIFT_64) & (PUD_COUNT_64-1))
+#define PMD_INDEX_64(va) (((va) >> PMD_SHIFT_64) & (PMD_COUNT_64-1))
+#define PTE_INDEX_64(va) (((va) >> PTE_SHIFT_64) & (PTE_COUNT_64-1))
+
+static inline uint64_t get_pgentry_64(uint64_t frame, uint32_t flags)
+{
+ if ((flags & _PAGE_PSE) && (frame & 0x1f)) {
+ /* adding huge page with invalid offset */
+ while(1) { }
+ }
+ return (frame << PAGE_SHIFT) | flags;
+}
+static inline uint64_t get_pgframe_64(uint64_t entry)
+{
+ return (entry & ~_PAGE_NX) >> PAGE_SHIFT;
+}
+static inline uint32_t get_pgflags_64(uint64_t entry)
+{
+ return entry & ~PAGE_MASK;
+}
+static inline uint32_t test_pgflag_64(uint64_t entry, uint32_t flag)
+{
+ return entry & ~PAGE_MASK & flag;
+}
+
+/* Generic functions */
+
+#if defined(CONFIG_64BIT) || defined(CONFIG_PAE)
+typedef uint64_t pte_t;
+#else
+typedef uint32_t pte_t;
+#endif
+
+static inline pte_t get_pgentry(unsigned long frame, uint32_t flags)
+{
+#if defined(CONFIG_64BIT) || defined(CONFIG_PAE)
+ return get_pgentry_64(frame, flags);
+#else
+ return get_pgentry_32(frame, flags);
+#endif
+}
+
+static inline pte_t get_pgframe(pte_t entry)
+{
+#if defined(CONFIG_64BIT) || defined(CONFIG_PAE)
+ return get_pgframe_64(entry);
+#else
+ return get_pgframe_32(entry);
+#endif
+}
+
+static inline pte_t get_pgflags(pte_t entry)
+{
+#if defined(CONFIG_64BIT) || defined(CONFIG_PAE)
+ return get_pgflags_64(entry);
+#else
+ return get_pgflags_32(entry);
+#endif
+}
+
+static inline pte_t test_pgflag(pte_t entry, uint32_t flag)
+{
+ return get_pgflags(entry) & flag;
+}
+
+/* ------------------------------------------------------------------ */
+
+/*
+ * EFLAGS bits
+ */
+#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
+#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
+#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
+#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
+#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
+#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
+#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
+#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
+#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
+#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
+#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
+#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
+#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
+#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
+#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
+#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+
+/*
+ * Basic CPU control in CR0
+ */
+#define X86_CR0_PE 0x00000001 /* Protection Enable */
+#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor */
+#define X86_CR0_EM 0x00000004 /* Emulation */
+#define X86_CR0_TS 0x00000008 /* Task Switched */
+#define X86_CR0_ET 0x00000010 /* Extension Type */
+#define X86_CR0_NE 0x00000020 /* Numeric Error */
+#define X86_CR0_WP 0x00010000 /* Write Protect */
+#define X86_CR0_AM 0x00040000 /* Alignment Mask */
+#define X86_CR0_NW 0x20000000 /* Not Write-through */
+#define X86_CR0_CD 0x40000000 /* Cache Disable */
+#define X86_CR0_PG 0x80000000 /* Paging */
+
+/*
+ * Paging options in CR3
+ */
+#define X86_CR3_PWT 0x00000008 /* Page Write Through */
+#define X86_CR3_PCD 0x00000010 /* Page Cache Disable */
+
+/*
+ * Intel CPU features in CR4
+ */
+#define X86_CR4_VME 0x00000001 /* enable vm86 extensions */
+#define X86_CR4_PVI 0x00000002 /* virtual interrupts flag enable */
+#define X86_CR4_TSD 0x00000004 /* disable time stamp at ipl 3 */
+#define X86_CR4_DE 0x00000008 /* enable debugging extensions */
+#define X86_CR4_PSE 0x00000010 /* enable page size extensions */
+#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */
+#define X86_CR4_MCE 0x00000040 /* Machine check enable */
+#define X86_CR4_PGE 0x00000080 /* enable global pages */
+#define X86_CR4_PCE 0x00000100 /* enable performance counters at ipl 3 */
+#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
+#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
+#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
+
+/* ------------------------------------------------------------------ */
+
+struct tss_32 {
+ /* hardware */
+ uint16_t back_link,__blh;
+ uint32_t esp0;
+ uint16_t ss0,__ss0h;
+ uint32_t esp1;
+ uint16_t ss1,__ss1h;
+ uint32_t esp2;
+ uint16_t ss2,__ss2h;
+ uint32_t __cr3;
+ uint32_t eip;
+ uint32_t eflags;
+ uint32_t eax,ecx,edx,ebx;
+ uint32_t esp;
+ uint32_t ebp;
+ uint32_t esi;
+ uint32_t edi;
+ uint16_t es, __esh;
+ uint16_t cs, __csh;
+ uint16_t ss, __ssh;
+ uint16_t ds, __dsh;
+ uint16_t fs, __fsh;
+ uint16_t gs, __gsh;
+ uint16_t ldt, __ldth;
+ uint16_t trace, io_bitmap_base;
+} __attribute__((packed));
+
+struct tss_64 {
+ uint32_t reserved1;
+ uint64_t rsp0;
+ uint64_t rsp1;
+ uint64_t rsp2;
+ uint64_t reserved2;
+ uint64_t ist[7];
+ uint32_t reserved3;
+ uint32_t reserved4;
+ uint16_t reserved5;
+ uint16_t io_bitmap_base;
+} __attribute__((packed));
+
+/* ------------------------------------------------------------------ */
+
+#define EFLAGS_TRAPMASK (~(X86_EFLAGS_VM | X86_EFLAGS_RF | X86_EFLAGS_NT | \
+ X86_EFLAGS_TF))
+
+/* ------------------------------------------------------------------ */
+
+struct descriptor_32 {
+ uint32_t a,b;
+};
+
+struct idt_64 {
+ uint32_t a,b,c,d;
+};
+
+#define DESC32(base,limit,type,flags) { \
+ .a = ((base & 0xffff) << 16) | (limit & 0xffff), \
+ .b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | \
+ (limit & 0x000f0000) | ((type & 0xff) << 8) | ((flags & 0xf) << 20) \
+}
+
+#define GATE32(seg,addr,type) { \
+ .a = ((seg & 0xffff) << 16) | (addr & 0xffff), \
+ .b = (addr & 0xffff0000) | ((type & 0xff) << 8) \
+}
+
+#define GATE64(seg,addr,type,ist) { \
+ .a = ((seg & 0xffff) << 16) | (addr & 0xffff), \
+ .b = (addr & 0xffff0000) | ((type & 0xff) << 8) | (ist & 0x07), \
+ .c = ((addr >> 32) & 0xffffffff), \
+ .d = 0 \
+}
+
+static inline struct descriptor_32 mkdesc32(uint32_t base, uint32_t limit,
+ uint32_t type, uint32_t flags)
+{
+ struct descriptor_32 desc = DESC32(base, limit, type, flags);
+ return desc;
+}
+
+static inline struct descriptor_32 mkgate32(uint32_t seg, uint32_t addr,
+ uint32_t type)
+{
+ struct descriptor_32 desc = GATE32(seg, addr, type);
+ return desc;
+}
+
+static inline struct idt_64 mkgate64(uint32_t seg, uint64_t addr,
+ uint32_t type, uint32_t ist)
+{
+ struct idt_64 desc = GATE64(seg, addr, type, ist);
+ return desc;
+}
+
+#endif /* __PROCESSOR_H__ */
new file mode 100644
@@ -0,0 +1,188 @@
+#undef PAGE_SIZE
+#undef PAGE_MASK
+
+#include <linux/kvm.h>
+#include <linux/kvm_para.h>
+
+#include "processor.h"
+
+#define GRANT_FRAMES_MAX (16)
+#define GRANT_ENTRIES (GRANT_FRAMES_MAX * PAGE_SIZE / sizeof(struct grant_entry_v1))
+
+#define VCPUS_MAX (4)
+
+/* useful helper macros */
+#define GETNAME(a, i) ( ((i) < sizeof(a)/sizeof(a[0]) && a[i]) ? a[i] : "UNKNOWN")
+#define SETBIT(a, n) ( (a)[(n)/(sizeof(a[0])*8)] |= (1<< ((n)%(sizeof(a[0])*8)) ))
+#define TESTBIT(a, n) ( (a)[(n)/(sizeof(a[0])*8)] & (1<< ((n)%(sizeof(a[0])*8)) ))
+
+/* common flags */
+#define ALL_PGFLAGS (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+/* emulator code+data */
+#define EMU_PGFLAGS (ALL_PGFLAGS | _PAGE_GLOBAL | _PAGE_RW)
+
+/* machphys table */
+#define M2P_PGFLAGS_32 (ALL_PGFLAGS | _PAGE_GLOBAL | _PAGE_RW)
+#define M2P_PGFLAGS_64 (ALL_PGFLAGS | _PAGE_GLOBAL | _PAGE_RW | _PAGE_USER)
+
+/* linear page tables */
+#define LPT_PGFLAGS (ALL_PGFLAGS | _PAGE_RW)
+
+/* pmd/pgt: pte pointer (map cache) */
+#define PGT_PGFLAGS_32 (ALL_PGFLAGS | _PAGE_GLOBAL | _PAGE_RW)
+#define PGT_PGFLAGS_64 (ALL_PGFLAGS | _PAGE_GLOBAL | _PAGE_RW | _PAGE_USER)
+
+/* misc xen defines */
+#define XEN_HCALL_MAX 64
+#define XEN_DEFAULT_PERIOD (10000000ll) /* 10 ms aka 100 Hz */
+
+/* misc xen addresses */
+#define XEN_IPT_32 0xfb800000
+#define XEN_M2P_32 0xfc000000
+#define XEN_LPT_32 0xfe000000
+#define XEN_MAP_32 0xfe800000
+#define XEN_TXT_32 0xff000000
+
+#define XEN_IPT_PAE 0xf4800000
+#define XEN_M2P_PAE 0xf5800000
+#define XEN_LPT_PAE 0xfd800000
+#define XEN_MAP_PAE 0xfe800000
+#define XEN_TXT_PAE 0xff000000
+
+#define XEN_M2P_64 0xffff800000000000 // 256 GB, pgd:256
+#define XEN_LPT_64 0xffff808000000000 // 512 GB, pgd:257
+#define XEN_MAP_64 0xffff820000000000 // 512 GB, pgd:260
+#define XEN_RAM_64 0xffff830000000000 // 1 TB, pgd:262,263
+#define XEN_DOM_64 0xffff880000000000 // 120 TB, pgd:272+
+
+#if defined(CONFIG_PAE) && defined(CONFIG_32BIT)
+#define XEN_IPT XEN_IPT_PAE
+#define XEN_M2P XEN_M2P_PAE
+#define XEN_LPT XEN_LPT_PAE
+#define XEN_MAP XEN_MAP_PAE
+#define XEN_TXT XEN_TXT_PAE
+#elif defined(CONFIG_32BIT)
+#define XEN_IPT XEN_IPT_32
+#define XEN_M2P XEN_M2P_32
+#define XEN_LPT XEN_LPT_32
+#define XEN_MAP XEN_MAP_32
+#define XEN_TXT XEN_TXT_32
+#elif defined(CONFIG_64BIT)
+#define XEN_M2P XEN_M2P_64
+#define XEN_LPT XEN_LPT_64
+#define XEN_MAP XEN_MAP_64
+#define XEN_TXT XEN_RAM_64
+#endif
+
+#define INVALID_M2P_ENTRY (~0UL)
+
+/* ------------------------------------------------------------------ */
+/* statistics */
+
+#define XEN_FAULT_ILLEGAL_INSTRUCTION 0
+#define XEN_FAULT_GENERAL_PROTECTION 10
+#define XEN_FAULT_GENERAL_PROTECTION_GUEST 11
+#define XEN_FAULT_GENERAL_PROTECTION_EMUINS 12
+#define XEN_FAULT_PAGE_FAULT 20
+#define XEN_FAULT_PAGE_FAULT_GUEST 21
+#define XEN_FAULT_PAGE_FAULT_FIX_RO 22
+#define XEN_FAULT_PAGE_FAULT_FIX_USER 23
+#define XEN_FAULT_PAGE_FAULT_FIX_EXTAB 24
+#define XEN_FAULT_UPDATE_VA_FIX_RO 25
+#define XEN_FAULT_UPDATE_VA_FIX_USER 26
+#define XEN_FAULT_SYSCALL 30
+#define XEN_FAULT_INT_80 31
+#define XEN_FAULT_EVENT_CALLBACK 32
+#define XEN_FAULT_LAZY_FPU 33
+#define XEN_FAULT_BOUNCE_TRAP 34
+#define XEN_FAULT_MAPS_MAPIT 40
+#define XEN_FAULT_MAPS_REUSE 41
+#define XEN_FAULT_OTHER_CR3_LOAD 50
+#define XEN_FAULT_OTHER_SWITCH_MODE 51
+#define XEN_FAULT_OTHER_CR3_CACHE_HIT 52
+#define XEN_FAULT_OTHER_FLUSH_TLB_ALL 53
+#define XEN_FAULT_OTHER_FLUSH_TLB_PAGE 54
+#define XEN_FAULT_OTHER_FLUSH_TLB_NONE 55
+
+#define XEN_FAULT_TMP_1 240
+#define XEN_FAULT_TMP_2 241
+#define XEN_FAULT_TMP_3 242
+#define XEN_FAULT_TMP_4 243
+#define XEN_FAULT_TMP_5 244
+#define XEN_FAULT_TMP_6 245
+#define XEN_FAULT_TMP_7 246
+#define XEN_FAULT_TMP_8 247
+
+#define XEN_FAULT_MAX 256
+
+#define XEN_EVENT_MAX 64
+#define XEN_ENAME_LEN 20
+
+/* ------------------------------------------------------------------ */
+/* state info */
+
+#define XENNER_ABI_VERSION 40
+
+struct xenner_info {
+ /* state bits info */
+ uint32_t abi_version;
+ uint32_t dying:1;
+
+ uint64_t vcpus_online;
+ uint64_t vcpus_running;
+ uint64_t vcpus;
+
+ /* statistics */
+ uint64_t hcalls[XEN_HCALL_MAX];
+ uint64_t faults[XEN_FAULT_MAX];
+ uint64_t events[XEN_EVENT_MAX];
+ char enames[XEN_EVENT_MAX * XEN_ENAME_LEN];
+};
+
+/* ------------------------------------------------------------------ */
+
+static inline uint32_t fix_sel32(uint32_t sel)
+{
+ /* fixup DPL: 0 -> 1 */
+ if (sel && 0 == (sel & 0x03)) {
+ sel |= 0x01;
+ }
+ return sel;
+}
+
+static inline uint32_t unfix_sel32(uint32_t sel)
+{
+ /* reverse DPL fixup: 1 -> 0 */
+ if (0x01 == (sel & 0x03)) {
+ sel &= ~0x03;
+ }
+ return sel;
+}
+
+static inline void fix_desc32(struct descriptor_32 *desc)
+{
+ if (desc->b & (1<<15)) { /* present ? */
+ if (0 == (desc->b & (3 << 13))) { /* dpl == 0 ? */
+ desc->b |= 1 << 13; /* fix: dpl = 1 */
+ }
+ }
+}
+
+static inline uint32_t fix_sel64(uint32_t sel)
+{
+ /* fixup DPL: 0 -> 3 */
+ if (sel && 0 == (sel & 0x03)) {
+ sel |= 0x03;
+ }
+ return sel;
+}
+
+static inline void fix_desc64(struct descriptor_32 *desc)
+{
+ if (desc->b & (1<<15)) { /* present ? */
+ if (0 == (desc->b & (3 << 13))) { /* dpl == 0 ? */
+ desc->b |= 3 << 13; /* fix: dpl = 3 */
+ }
+ }
+}
new file mode 100644
@@ -0,0 +1,57 @@
+#include "../../hw/xenner_emudev.h"
+
+#ifndef __XENNER_EMUDEV_GUEST_H__
+#define __XENNER_EMUDEV_GUEST_H__ 1
+
+/* --------- guest side bits --------- */
+
+static inline void emudev_set(uint16_t type, uint16_t index, uint32_t value)
+{
+ uint32_t entry = (uint32_t)type << 16 | index;
+
+ asm volatile("outl %[data],%w[port]\n"
+ : /* no output */
+ : [data] "a" (entry), [port] "Nd" (EMUDEV_REG_CONF_ENTRY)
+ : "memory");
+ asm volatile("outl %[data],%w[port]\n"
+ : /* no output */
+ : [data] "a" (value), [port] "Nd" (EMUDEV_REG_CONF_VALUE)
+ : "memory");
+}
+
+static inline uint32_t emudev_get32(uint16_t type, uint16_t index)
+{
+ uint32_t entry = (uint32_t)type << 16 | index;
+ uint32_t value;
+
+ asm volatile("outl %[data],%w[port]\n"
+ : /* no output */
+ : [data] "a" (entry), [port] "Nd" (EMUDEV_REG_CONF_ENTRY)
+ : "memory");
+ asm volatile("inl %w[port],%[data]\n"
+ : [data] "=a" (value)
+ : [port] "Nd" (EMUDEV_REG_CONF_VALUE)
+ : "memory");
+ return value;
+}
+
+static inline uint64_t emudev_get(uint16_t type, uint16_t index)
+{
+ uint64_t r;
+ r = emudev_get32(type, index);
+ r |= ((uint64_t)emudev_get32(type | EMUDEV_CONF_HIGH_32, index) << 32);
+
+ return r;
+}
+
+static inline void emudev_cmd(uint16_t cmd, uint16_t arg)
+{
+ uint32_t command = (uint32_t)cmd << 16 | arg;
+
+ asm volatile("outl %[data],%w[port]\n"
+ : /* no output */
+ : [data] "a" (command), [port] "Nd" (EMUDEV_REG_COMMAND)
+ : "memory");
+}
+
+#endif /* __XENNER_EMUDEV_GUEST_H__ */
new file mode 100644
@@ -0,0 +1,684 @@
+#include <stdarg.h>
+#include <stddef.h>
+#include <inttypes.h>
+#include <xen/xen.h>
+#include <xen/callback.h>
+#include <xen/grant_table.h>
+#include <xen/version.h>
+#include <xen/sched.h>
+#include <xen/memory.h>
+#include <xen/vcpu.h>
+#include <xen/physdev.h>
+
+#include "list.h"
+
+#include "shared.h"
+#include "xenner-emudev.h"
+#include "xen-names.h"
+
+/* attributes */
+#define asmlinkage __attribute__((regparm(0)))
+#define page_aligned __attribute__((aligned(4096))) __attribute__((__section__ (".pgdata")))
+
+/* fwd decl */
+struct xen_cpu;
+
+/* arch specific bits */
+#ifdef CONFIG_64BIT
+#include "xenner64.h"
+#else
+#include "xenner32.h"
+#endif
+
+#if defined(CONFIG_64BIT)
+#define CAP_VERSION_STRING "xen-3.0-x86_64"
+#elif defined(CONFIG_PAE)
+#define CAP_VERSION_STRING "xen-3.0-x86_32p"
+#else
+#define CAP_VERSION_STRING "xen-3.0-x86_32";
+#endif
+
+/* idt entry points */
+extern void division_by_zero(void);
+extern void debug_int1(void);
+extern void nmi(void);
+extern void debug_int3(void);
+extern void overflow(void);
+extern void bound_check(void);
+extern void illegal_instruction(void);
+extern void no_device(void);
+extern void double_fault(void);
+extern void coprocessor(void);
+extern void invalid_tss(void);
+extern void segment_not_present(void);
+extern void stack_fault(void);
+extern void general_protection(void);
+extern void page_fault(void);
+extern void floating_point(void);
+extern void alignment(void);
+extern void machine_check(void);
+extern void simd_floating_point(void);
+extern void smp_flush_tlb(void);
+extern void int_unknown(void);
+
+#ifdef CONFIG_64BIT
+/* 64bit only */
+extern void int_80(void);
+#else
+/* 32bit only */
+extern void xen_hypercall(void);
+#endif
+
+/* functions */
+extern uintptr_t emu_pa(uintptr_t va);
+#define EMU_PA(_vaddr) emu_pa((uintptr_t)_vaddr)
+#define EMU_MFN(_vaddr) (EMU_PA((uintptr_t)_vaddr) >> PAGE_SHIFT)
+extern uint8_t _vstart[];
+extern uint8_t _vstop[];
+extern uintptr_t _estart[];
+extern uintptr_t _estop[];
+extern uint8_t trampoline_syscall[];
+
+#define STACK_PTR(_cpu,_sym) ((void*)((_cpu)->stack_low + ((_sym) - boot_stack_low)))
+#define STACK_PTR(_cpu,_sym) ((void*)((_cpu)->stack_low + ((_sym) - boot_stack_low)))
+#define IRQSTACK_PTR(_cpu,_sym) ((void*)((_cpu)->irqstack_low + ((_sym) - boot_stack_low)))
+extern uint8_t boot_stack_low[];
+extern uint8_t cpu_ptr[];
+#ifdef CONFIG_64BIT
+extern uint8_t trampoline_start[];
+extern uint8_t trampoline_patch[];
+extern uint8_t trampoline_stop[];
+#endif
+extern uint8_t boot_stack_high[];
+
+extern uint8_t irq_entries[];
+extern uint8_t irq_common[];
+
+extern uint8_t sipi[];
+
+/* xenner-data.c */
+extern int grant_frames;
+extern struct grant_entry_v1 page_aligned grant_table[];
+extern struct xenner_info vminfo;
+extern int wrpt;
+extern unsigned long *m2p;
+
+struct vmconfig {
+ uint64_t mfn_emu;
+ uint64_t pg_emu;
+ uint64_t mfn_m2p;
+ uint64_t pg_m2p;
+ uint64_t mfn_guest;
+ uint64_t pg_guest;
+ uint64_t pg_total;
+ int debug_level;
+ int nr_cpus;
+};
+extern struct vmconfig vmconf;
+
+struct xen_vcpu {
+ void *vcpu_page;
+ struct vcpu_info *vcpu_info;
+ uint64_t vcpu_info_pa;
+};
+
+struct xen_cpu {
+ /* used by hardware */
+#ifdef CONFIG_64BIT
+ struct tss_64 tss;
+#else
+ struct tss_32 tss;
+#endif
+ void *lapic;
+
+ /* used by kvm */
+ struct kvm_cr3_cache *cr3_cache;
+ uint8_t mmu_queue[128];
+ int mmu_queue_len;
+
+ /* emu state */
+ struct xen_vcpu v;
+ struct descriptor_32 *gdt;
+ uint64_t gdt_mfns[16];
+ uint32_t virq_to_vector[NR_VIRQS];
+ uint8_t *stack_low;
+ uint8_t *stack_high;
+#ifdef CONFIG_64BIT
+ uint8_t *irqstack_low;
+ uint8_t *irqstack_high;
+#endif
+ ureg_t kernel_ss;
+ ureg_t kernel_sp;
+
+ /* timer */
+ uint64_t periodic;
+ uint64_t oneshot;
+ int timerport;
+
+ /* I/O */
+ int iopl;
+ int nr_ports;
+
+ /* initial state */
+ struct vcpu_guest_context *init_ctxt;
+
+#ifdef CONFIG_64BIT
+ uint64_t kernel_cr3_mfn;
+ uint64_t user_cr3_mfn;
+ int user_mode;
+#else
+ ureg_t cr3_mfn;
+#endif
+
+ int online;
+ int id;
+ struct list_head next;
+};
+extern struct list_head cpus;
+extern ureg_t cpumask_all;
+
+extern struct vcpu_guest_context boot_ctxt;
+extern struct shared_info page_aligned shared_info;
+extern xen_callback_t xencb[8];
+extern struct trap_info xentr[256];
+
+extern uint64_t emu_hcalls[XEN_HCALL_MAX];
+extern uint64_t emu_faults[XEN_FAULT_MAX];
+
+struct trapinfo {
+ char *name;
+ int ec; /* has error code */
+ int lvl; /* debug log level */
+};
+extern const struct trapinfo trapinfo[32];
+extern const char *cr0_bits[32];
+extern const char *cr4_bits[32];
+extern const char *pg_bits[32];
+extern const char *rflags_bits[32];
+
+/* xenner-main.c */
+void gdt_init(struct xen_cpu *cpu);
+void gdt_load(struct xen_cpu *cpu);
+void tss_init(struct xen_cpu *cpu);
+void msrs_init(struct xen_cpu *cpu);
+void idt_init(void);
+void idt_load(void);
+void guest_cpu_init(struct xen_cpu *cpu);
+void guest_regs_init(struct xen_cpu *cpu, struct regs *regs);
+struct xen_cpu *cpu_find(int id);
+void print_registers(int level, struct regs *regs);
+void print_stack(int level, ureg_t rsp);
+void print_state(struct regs *regs);
+
+int panic(const char *message, struct regs *regs);
+int bounce_trap(struct xen_cpu *cpu, struct regs *regs, int trapno, int cbno);
+void flush_tlb_remote(struct xen_cpu *cpu, ureg_t mask, ureg_t addr);
+
+asmlinkage void do_boot(struct regs *regs);
+asmlinkage void do_boot_secondary(ureg_t id, struct regs *regs);
+asmlinkage void do_illegal_instruction(struct regs *regs);
+asmlinkage void do_general_protection(struct regs *regs);
+asmlinkage void do_page_fault(struct regs *regs);
+asmlinkage void do_double_fault(struct regs *regs);
+asmlinkage void do_event_callback(struct regs *regs);
+asmlinkage void do_guest_forward(struct regs *regs);
+asmlinkage void do_int1(struct regs *regs);
+asmlinkage void do_int3(struct regs *regs);
+asmlinkage void do_lazy_fpu(struct regs *regs);
+asmlinkage void do_smp_flush_tlb(struct regs *regs);
+
+/* xenner-mm.c */
+void paging_init(struct xen_cpu *cpu);
+void paging_start(struct xen_cpu *cpu);
+void update_emu_mappings(ureg_t cr3_mfn);
+void *get_pages(int pages, const char *purpose);
+void *get_memory(int bytes, const char *purpose);
+void switch_heap(int heap_type);
+
+#define HEAP_EMU 0
+#define HEAP_HIGH 1
+
+unsigned long heap_size(void);
+void map_region(struct xen_cpu *cpu, uint64_t va, uint32_t flags,
+ uint64_t maddr, uint64_t count);
+
+/* xenner-hcall.c */
+#define HCALL_HANDLED 0
+#define HCALL_FORWARD -1
+#define HCALL_IRET -2
+void guest_gdt_copy_page(struct descriptor_32 *src, struct descriptor_32 *dst);
+int guest_gdt_init(struct xen_cpu *cpu, uint32_t entries, ureg_t *mfns);
+
+sreg_t error_noop(struct xen_cpu *cpu, ureg_t *args);
+sreg_t error_noperm(struct xen_cpu *cpu, ureg_t *args);
+sreg_t stack_switch(struct xen_cpu *cpu, ureg_t *args);
+sreg_t console_io(struct xen_cpu *cpu, ureg_t *args);
+sreg_t update_descriptor(struct xen_cpu *cpu, ureg_t *args);
+sreg_t fpu_taskswitch(struct xen_cpu *cpu, ureg_t *args);
+sreg_t grant_table_op(struct xen_cpu *cpu, ureg_t *args);
+sreg_t xen_version(struct xen_cpu *cpu, ureg_t *args);
+sreg_t vm_assist(struct xen_cpu *cpu, ureg_t *args);
+sreg_t sched_op(struct xen_cpu *cpu, ureg_t *args);
+sreg_t sched_op_compat(struct xen_cpu *cpu, ureg_t *args);
+sreg_t memory_op(struct xen_cpu *cpu, ureg_t *args);
+sreg_t set_trap_table(struct xen_cpu *cpu, ureg_t *args);
+sreg_t set_callbacks(struct xen_cpu *cpu, ureg_t *args);
+sreg_t callback_op(struct xen_cpu *cpu, ureg_t *args);
+sreg_t set_gdt(struct xen_cpu *cpu, ureg_t *args);
+sreg_t vcpu_op(struct xen_cpu *cpu, ureg_t *args);
+sreg_t set_timer_op(struct xen_cpu *cpu, ureg_t *args);
+sreg_t event_channel_op(struct xen_cpu *cpu, ureg_t *args);
+sreg_t event_channel_op_compat(struct xen_cpu *cpu, ureg_t *args);
+sreg_t mmuext_op(struct xen_cpu *cpu, ureg_t *args);
+sreg_t physdev_op(struct xen_cpu *cpu, ureg_t *args);
+sreg_t get_debugreg(struct xen_cpu *cpu, ureg_t *args);
+sreg_t set_debugreg(struct xen_cpu *cpu, ureg_t *args);
+
+/* xenner-pv.c */
+int pv_have_clock;
+
+void pv_clock_update(int wakeup);
+void pv_clock_sys(struct xen_cpu *cpu);
+void pv_write_cr3(struct xen_cpu *cpu, ureg_t cr3_mfn);
+void pv_init(struct xen_cpu *cpu);
+
+/* xenner-instr.c */
+void real_cpuid(struct kvm_cpuid_entry *entry);
+void print_bits(int level, const char *msg, uint32_t old, uint32_t new,
+ const char *names[]);
+void print_emu_instr(int level, const char *prefix, uint8_t *instr);
+int emulate(struct xen_cpu *cpu, struct regs *regs);
+
+/* xenner-lapic.c */
+#define VECTOR_FLUSH_TLB 0x20
+#define VECTOR_EVTCHN_START 0x21
+void lapic_eoi(struct xen_cpu *cpu);
+void lapic_timer(struct xen_cpu *cpu);
+void lapic_ipi_boot(struct xen_cpu *cpu, struct xen_cpu *ap);
+void lapic_ipi_flush_tlb(struct xen_cpu *cpu);
+int evtchn_route_interdomain(struct xen_cpu *cpu, int port, char *desc);
+int evtchn_route_virq(struct xen_cpu *cpu, int virq, int port, char *desc);
+int evtchn_route_ipi(struct xen_cpu *cpu, int port);
+int evtchn_send(struct xen_cpu *cpu, int port);
+void evtchn_unmask(struct xen_cpu *cpu, int port);
+void evtchn_close(struct xen_cpu *cpu, int port);
+int evtchn_alloc(int vcpu_id);
+int evtchn_pending(struct xen_cpu *cpu);
+void evtchn_try_forward(struct xen_cpu *cpu, struct regs *regs);
+int irq_init(struct xen_cpu *cpu);
+asmlinkage void do_irq(struct regs *regs);
+
+/* xenner*.S */
+extern pte_t emu_pgd[];
+
+/* printk.c */
+int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
+int snprintf(char * buf, size_t size, const char *fmt, ...);
+int printk(int level, const char *fmt, ...) __attribute__((format(printf, 2, 3)));
+void write_string(char *msg);
+
+/* inline asm bits */
+static inline ureg_t read_cr0(void)
+{
+ ureg_t val;
+ asm volatile("mov %%cr0,%0"
+ : "=r" (val));
+ return val;
+}
+
+static inline ureg_t read_cr2(void)
+{
+ ureg_t val;
+ asm volatile("mov %%cr2,%0"
+ : "=r" (val));
+ return val;
+}
+
+static inline ureg_t read_cr3_mfn(struct xen_cpu *cpu)
+{
+#ifdef CONFIG_64BIT
+ return cpu->user_mode ? cpu->user_cr3_mfn : cpu->kernel_cr3_mfn;
+#else
+ return cpu->cr3_mfn;
+#endif
+}
+
+static inline ureg_t read_cr4(void)
+{
+ ureg_t val;
+ asm volatile("mov %%cr4,%0"
+ : "=r" (val));
+ return val;
+}
+
+static inline void write_cr0(ureg_t val)
+{
+ asm volatile("mov %0, %%cr0"
+ : /* no output */
+ : "r" (val)
+ : "memory" );
+}
+
+static inline void write_cr3(ureg_t val)
+{
+ asm volatile("mov %0, %%cr3"
+ : /* no output */
+ : "r" (val)
+ : "memory");
+}
+
+static inline void write_cr4(ureg_t val)
+{
+ asm volatile("mov %0, %%cr4"
+ : /* no output */
+ : "r" (val)
+ : "memory");
+}
+
+static inline void flush_tlb(void)
+{
+ ureg_t tmpreg;
+
+ asm volatile("mov %%cr3, %0; \n"
+ "mov %0, %%cr3; # flush TLB \n"
+ : "=r" (tmpreg)
+ : /* no input */
+ : "memory");
+}
+
+static inline void flush_tlb_addr(ureg_t va)
+{
+ asm volatile("invlpg (%0)"
+ : /* no output */
+ : "r" (va)
+ : "memory");
+}
+
+static inline void outb(uint8_t value, uint16_t port)
+{
+ asm volatile("outb %[value],%w[port]"
+ : /* no output */
+ : [value] "a" (value), [port] "Nd" (port)
+ : "memory");
+}
+
+static inline void rdmsr(uint32_t msr, uint32_t *ax, uint32_t *dx)
+{
+ asm volatile("rdmsr"
+ : "=a" (*ax), "=d" (*dx)
+ : "c" (msr)
+ : "memory");
+}
+
+static inline void wrmsr(uint32_t msr, uint32_t ax, uint32_t dx)
+{
+ asm volatile("wrmsr"
+ : /* no outputs */
+ : "c" (msr), "a" (ax), "d" (dx)
+ : "memory");
+}
+
+static inline void wrmsrl(uint32_t msr, uint64_t val)
+{
+ uint32_t ax = (uint32_t)val;
+ uint32_t dx = (uint32_t)(val >> 32);
+ wrmsr(msr, ax, dx);
+}
+
+static inline int wrmsrl_safe(uint32_t msr, uint64_t val)
+{
+ uint32_t ax = (uint32_t)val;
+ uint32_t dx = (uint32_t)(val >> 32);
+ return wrmsr_safe(msr, ax, dx);
+}
+
+static inline void lldt(uint16_t sel)
+{
+ asm volatile("lldt %0"
+ : /* no outputs */
+ : "a" (sel)
+ : "memory");
+}
+
+static inline void ltr(uint16_t sel)
+{
+ asm volatile("ltr %0"
+ : /* no outputs */
+ : "a" (sel)
+ : "memory");
+}
+
+static inline void halt_i(int cpu_id)
+{
+ vminfo.vcpus_running &= ~(1 << cpu_id);
+ asm volatile("sti\n"
+ "hlt\n"
+ "cli\n"
+ : : : "memory");
+ vminfo.vcpus_running |= (1 << cpu_id);
+}
+
+static inline void clts(void)
+{
+ asm volatile("clts" : : : "memory");
+}
+
+static inline void pause(void)
+{
+ asm volatile("pause" : : : "memory");
+}
+
+static inline void sti(void)
+{
+ asm volatile("sti" : : : "memory");
+}
+
+static inline void cli(void)
+{
+ asm volatile("cli" : : : "memory");
+}
+
+static inline void int3(void)
+{
+ asm volatile("int3" : : : "memory");
+}
+
+static inline void set_eflag(ureg_t flag)
+{
+ ureg_t reg = 0;
+
+ asm volatile("pushf\n"
+ "pop %[reg]\n"
+ "or %[tf], %[reg]\n"
+ "push %[reg]\n"
+ "popf\n"
+ "nop\n"
+ : [reg] "+r" (reg)
+ : [tf] "r" (flag)
+ : "memory");
+}
+
+static inline uint64_t rdtsc(void)
+{
+ unsigned long low, high;
+
+ asm volatile("rdtsc" : "=a" (low) , "=d" (high));
+
+ return ((uint64_t)high << 32) | low;
+}
+
+/*
+ * We have 4k stacks (one page).
+ * - there is a pointer to the per-cpu data at the bottom.
+ * - (64bit also has the sysenter trampoline there).
+ */
+static inline struct xen_cpu *get_cpu(void)
+{
+ uintptr_t rsp;
+
+#ifdef CONFIG_64BIT
+ asm volatile("mov %%rsp, %[rsp]" : [rsp] "=a" (rsp) : /* no input */);
+#else
+ asm volatile("mov %%esp, %[esp]" : [esp] "=a" (rsp) : /* no input */);
+#endif
+ rsp &= PAGE_MASK;
+ return *((void**)rsp);
+}
+
+/* gcc builtins */
+void *memset(void *s, int c, size_t n);
+void *memcpy(void *dest, const void *src, size_t n);
+int memcmp(const void *s1, const void *s2, size_t n);
+
+/* guest virtual irq flag */
+#define guest_cli(_cpu) do { \
+ (_cpu)->v.vcpu_info->evtchn_upcall_mask = 1; \
+ } while (0)
+#define guest_sti(_cpu) do { \
+ (_cpu)->v.vcpu_info->evtchn_upcall_mask = 0; \
+ } while (0)
+#define guest_irq_flag(_cpu) (!((_cpu)->v.vcpu_info->evtchn_upcall_mask))
+
+
+/******************************************************************
+ * atomic operations *
+ ******************************************************************/
+
+typedef struct {
+ int counter;
+} atomic_t;
+
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ asm volatile("lock; addl %1,%0"
+ : "+m" (v->counter)
+ : "ir" (i));
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ asm volatile("lock; subl %1,%0"
+ : "+m" (v->counter)
+ : "ir" (i));
+}
+
+static inline void atomic_inc(atomic_t *v)
+{
+ asm volatile("lock; incl %0"
+ : "+m" (v->counter));
+}
+
+static inline void atomic_dec(atomic_t *v)
+{
+ asm volatile("lock; decl %0"
+ : "+m" (v->counter));
+}
+
+/******************************************************************
+ * bitops operations *
+ ******************************************************************/
+
+/* from linux/asm-x86/bitops.h */
+
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
+/* Technically wrong, but this avoids compilation errors on some gcc
+ versions. */
+#define ADDR "=m" (*(volatile long *) addr)
+#else
+#define ADDR "+m" (*(volatile long *) addr)
+#endif
+
+static inline void set_bit(int nr, volatile void *addr)
+{
+ asm volatile("lock bts %1,%0"
+ : ADDR
+ : "Ir" (nr) : "memory");
+}
+
+static inline void clear_bit(int nr, volatile void *addr)
+{
+ asm volatile("lock btr %1,%0"
+ : ADDR
+ : "Ir" (nr));
+}
+
+static inline int test_and_set_bit(int nr, volatile void *addr)
+{
+ int oldbit;
+
+ asm volatile("lock bts %2,%1\n\t"
+ "sbb %0,%0"
+ : "=r" (oldbit), ADDR
+ : "Ir" (nr) : "memory");
+
+ return oldbit;
+}
+
+static inline int test_and_clear_bit(int nr, volatile void *addr)
+{
+ int oldbit;
+
+ asm volatile("lock btr %2,%1\n\t"
+ "sbb %0,%0"
+ : "=r" (oldbit), ADDR
+ : "Ir" (nr) : "memory");
+
+ return oldbit;
+}
+
+static inline int test_bit(int nr, volatile const void *addr)
+{
+ int oldbit;
+
+ asm volatile("bt %2,%1\n\t"
+ "sbb %0,%0"
+ : "=r" (oldbit)
+ : "m" (*(unsigned long *)addr), "Ir" (nr));
+
+ return oldbit;
+}
+
+/******************************************************************
+ * spinlock operations *
+ ******************************************************************/
+
+#define barrier() asm volatile("": : :"memory")
+
+typedef struct {
+ volatile unsigned int slock;
+} spinlock_t;
+
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { .slock = 1 }
+#define spin_lock_init(x) do { (x)->slock = 1; } while(0)
+
+#define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0)
+#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ asm volatile(
+ "\n"
+ "1: lock ; decb %0\n"
+ " jns 3f\n"
+ "2: pause\n"
+ " cmpb $0,%0\n"
+ " jle 2b\n"
+ " jmp 1b\n"
+ "3:\n"
+ :"=m" (lock->slock) : : "memory");
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ char oldval = 1;
+
+ asm volatile(
+ "xchgb %b0, %1"
+ :"=q" (oldval), "=m" (lock->slock)
+ :"0" (oldval)
+ : "memory"
+ );
+}
This patch adds various header files required to build the xenner kernel. Signed-off-by: Alexander Graf <agraf@suse.de> --- pc-bios/xenner/apicdef.h | 173 ++++++++++ pc-bios/xenner/cpufeature.h | 129 ++++++++ pc-bios/xenner/list.h | 169 ++++++++++ pc-bios/xenner/msr-index.h | 278 ++++++++++++++++ pc-bios/xenner/processor.h | 326 +++++++++++++++++++ pc-bios/xenner/shared.h | 188 +++++++++++ pc-bios/xenner/xenner-emudev.h | 57 ++++ pc-bios/xenner/xenner.h | 684 ++++++++++++++++++++++++++++++++++++++++ 8 files changed, 2004 insertions(+), 0 deletions(-) create mode 100644 pc-bios/xenner/apicdef.h create mode 100644 pc-bios/xenner/cpufeature.h create mode 100644 pc-bios/xenner/list.h create mode 100644 pc-bios/xenner/msr-index.h create mode 100644 pc-bios/xenner/processor.h create mode 100644 pc-bios/xenner/shared.h create mode 100644 pc-bios/xenner/xenner-emudev.h create mode 100644 pc-bios/xenner/xenner.h