/******************************************************************************
* arch-i386/hypervisor-if.h
*
* Guest OS interface to x86 32-bit Xen.
*/
#ifndef __HYPERVISOR_IF_I386_H__
#define __HYPERVISOR_IF_I386_H__
/*
* Pointers and other address fields inside interface structures are padded to
* 64 bits. This means that field alignments aren't different between 32- and
* 64-bit architectures.
*/
/* the top of memory. 0-64M */
#define TOM ((ulong) (0 - 64*1024*1024))
#ifndef __ASSEMBLY__
typedef signed char s8;
typedef u8int u8;
typedef u16int u16;
typedef short s16;
typedef u32int u32;
typedef u64int u64;
typedef long s32;
struct pad {u32int pad;};
#define mb()
#define wmb()
#define unlikely(x) (x)
#define BUG() panic("BUG()");
#define kmem_cache_alloc(size ,unused) malloc(size)
#define kmem_cache_free(unused, x) free(x)
#define kmem_cache_create(a,size,b,c,d,e) size
/* ignore gcc juju for packing */
#define PACKED
#define MEMORY_PADDING struct pad
#define _MEMORY_PADDING(X) struct pad
#endif /* __ASSEMBLY__ */
/*
* SEGMENT DESCRIPTOR TABLES
*/
/*
* A number of GDT entries are reserved by Xen. These are not situated at the
* start of the GDT because some stupid OSes export hard-coded selector values
* in their ABI. These hard-coded values are always near the start of the GDT,
* so Xen places itself out of the way.
*
* NB. The reserved range is inclusive (that is, both FIRST_RESERVED_GDT_ENTRY
* and LAST_RESERVED_GDT_ENTRY are reserved).
*/
#define NR_RESERVED_GDT_ENTRIES 40
#define FIRST_RESERVED_GDT_ENTRY 256
#define LAST_RESERVED_GDT_ENTRY \
(FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1)
/*
* These flat segments are in the Xen-private section of every GDT. Since these
* are also present in the initial GDT, many OSes will be able to avoid
* installing their own GDT.
*/
#define FLAT_RING1_CS 0x0819 /* GDT index 259 */
#define FLAT_RING1_DS 0x0821 /* GDT index 260 */
#define FLAT_RING3_CS 0x082b /* GDT index 261 */
#define FLAT_RING3_DS 0x0833 /* GDT index 262 */
#define FLAT_GUESTOS_CS FLAT_RING1_CS
#define FLAT_GUESTOS_DS FLAT_RING1_DS
#define FLAT_USER_CS FLAT_RING3_CS
#define FLAT_USER_DS FLAT_RING3_DS
/* And the trap vector is... */
#define TRAP_INSTR "int $0x82"
/*
* Virtual addresses beyond this are not modifiable by guest OSes. The
* machine->physical mapping table starts at this address, read-only.
*/
#define HYPERVISOR_VIRT_START (0xFC000000UL)
#ifndef machine_to_phys_mapping
#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
#endif
#ifndef __ASSEMBLY__
/* NB. Both the following are 32 bits each. */
typedef unsigned long memory_t; /* Full-sized pointer/address/memory-size. */
typedef unsigned long cpureg_t; /* Full-sized register. */
/*
* Send an array of these to HYPERVISOR_set_trap_table()
*/
#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
#define TI_GET_IF(_ti) ((_ti)->flags & 4)
#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
typedef struct {
u8 vector; /* 0: exception vector */
u8 flags; /* 1: 0-3: privilege level; 4: clear event enable? */
u16 cs; /* 2: code selector */
memory_t address; /* 4: code address */
} PACKED trap_info_t; /* 8 bytes */
typedef struct
{
unsigned long ebx;
unsigned long ecx;
unsigned long edx;
unsigned long esi;
unsigned long edi;
unsigned long ebp;
unsigned long eax;
unsigned long ds;
unsigned long es;
unsigned long fs;
unsigned long gs;
unsigned long _unused;
unsigned long eip;
unsigned long cs;
unsigned long eflags;
unsigned long esp;
unsigned long ss;
} PACKED execution_context_t;
typedef struct {
u32 tsc_bits; /* 0: 32 bits read from the CPU's TSC. */
u32 tsc_bitshift; /* 4: 'tsc_bits' uses N:N+31 of TSC. */
} PACKED tsc_timestamp_t; /* 8 bytes */
/*
* The following is all CPU context. Note that the i387_ctxt block is filled
* in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
*/
typedef struct {
#define ECF_I387_VALID (1<<0)
unsigned long flags;
execution_context_t cpu_ctxt; /* User-level CPU registers */
char fpu_ctxt[256]; /* User-level FPU registers */
trap_info_t trap_ctxt[256]; /* Virtual IDT */
unsigned int fast_trap_idx; /* "Fast trap" vector offset */
unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
unsigned long guestos_ss, guestos_esp; /* Virtual TSS (only SS1/ESP1) */
unsigned long pt_base; /* CR3 (pagetable base) */
unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
unsigned long event_callback_cs; /* CS:EIP of event callback */
unsigned long event_callback_eip;
unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
unsigned long failsafe_callback_eip;
} PACKED full_execution_context_t;
#undef ARCH_HAS_FAST_TRAP
void queue_l1_entry_update(unsigned long *pteptr, unsigned long val);
void queue_l2_entry_update(unsigned long *ptr, unsigned long val);
void _flush_page_update_queue(void);
void queue_pgd_pin(unsigned long *ptr);
void queue_pgd_unpin(unsigned long *ptr);
void queue_pt_switch(unsigned long ptr);
void queue_tlb_flush(void);
int set_va_mfn(void *va, unsigned long mfn, unsigned long perm);
#endif
#endif
|