//#include <os.h>
#define __ASSEMBLY__
#include "xenmach.h"
#include "mem.h"
/**/
#define ES_offset 0x20
#define ORIG_EAX_offset 0x24
#define EIP_offset 0x28
#define CS_offset 0x2C
/**/
#define BTSL_31_4_ESI BYTE $0x0f; BYTE $0xba; BYTE $0x6e; BYTE $0x04
#define BTRL_AX_4_ESI BYTE $0x0f; BYTE $0xB3; BYTE $0x46; BYTE $0x04
#define ENTRY(X) TEXT X(SB), $0
#define SAVE_ALL \
CLD; PUSHL ES; PUSHL DS; PUSHL BP; PUSHL AX; \
PUSHL DX; PUSHL CX; PUSHL BX; \
PUSHL DI; PUSHL SI;
#define RESTORE_ALL\
POPL SI; POPL DI; \
POPL BX; POPL CX; POPL DX; \
POPL AX; POPL BP; POPL DS; POPL ES; \
IRETL
#define RESTORE_ALL_NO_IRET \
POPL SI; POPL DI; \
POPL BX; POPL CX; POPL DX; \
ADDL $12, SP
/*
POPL SI; POPL DI; POPL BP; \
ADDL $12, SP; \
IRETL
*/
#ifdef NOT
POPL BX; \
POPL CX; \
POPL DX; \
POPL SI; \
POPL DI; \
POPL BP; \
POPL AX; \
ADDL $12, SP; \
IRETL
#endif
/**/
#ifdef NO
/* what were they thinking?
POPL DS; \
POPL ES; \
ADDL $4,SP; \
IRETL;*/
#endif
GLOBL HYPERVISOR_SHARED_INFO+0(SB),$8
TEXT DIVIDE_ERROR(SB),$0
PUSHL $0 /* NO ERROR CODE */
// PUSHL $DO_DIVIDE_ERROR(SB)
do_exception:
PUSHL DS
PUSHL AX
XORL AX,AX
PUSHL BP
PUSHL DI
PUSHL SI
PUSHL DX
DECL AX /* EAX = -1*/
PUSHL CX
PUSHL BX
CLD
PUSHL ES
POPL CX
// MOVL ES,CX
MOVL ORIG_EAX_offset(SP), SI /* GET THE ERROR CODE*/
MOVL ES_offset(SP), DI /* GET THE FUNCTION ADDRESS*/
MOVL AX, ORIG_EAX_offset(SP)
MOVL CX, ES_offset(SP)
MOVL SP,DX
PUSHL SI /* PUSH THE ERROR CODE*/
PUSHL DX /* PUSH THE PT_REGS POINTER*/
PUSHL $(KDSEL)
PUSHL $(KDSEL)
POPL DS
POPL ES
CALL *DI
ADDL $8,SP
ret_from_exception:
MOVB CS_offset(SP),CL
TESTB $2,CL /* SLOW RETURN TO RING 2 OR 3*/
JNE SAFESTI
RESTORE_ALL
/* this is a note re Xen on linux. we don't do this stuff. */
/* A note on the "critical region" in our handler.*/
/* We want to avoid stacking callback handlers due to events occurring*/
/* during handling of the last event. To do this, we keep events disabled*/
/* until weve done all processing. HOWEVER, we must enable events before*/
/* popping the stack frame (cant be done atomically) and so it would still*/
/* be possible to get enough handler activations to overflow the stack.*/
/* Although unlikely, bugs of that kind are hard to track down, so wed*/
/* like to avoid the possibility.*/
/* So, on entry to the handler we detect whether we interrupted an*/
/* existing activation in its critical region -- if so, we pop the current*/
/* activation and restart the handler using the previous one.*/
/* end note of stuff we don't do. */
/* what we are trying to do is re-enable interrupts right at the very end,
* so that the most we'll ever have on the stack is an activation frame --
* 10 bytes or so. I don't think we'll overflow that.
*/
//oldstack: BYTE 0; BYTE 0; BYTE 0; BYTE 0
//oldss: BYTE 0; BYTE 0; BYTE 0; BYTE 0
GLOBL oldstack+0(SB), $4
GLOBL oldss+0(SB),$4
GLOBL p0+0(SB), $4
GLOBL p2+0(SB), $4
GLOBL p5+0(SB), $4
GLOBL p6+0(SB),$4
GLOBL hlock+0(SB),$4
/* there's a ton of iterations of ideas in here on how to do this.
* while the Xen/Linux stuff is clever it's a bitch to do anything in
* assembler in Plan 9 (that's a good thing, mind you) so I have tried
* to make things simple. I am leaving all my failed attempts in here
* for now -- detritus of ideas past. RGM.
*/
/* keep it simple. Don't re-enter if you need to. Make exit safe */
ENTRY(hypervisor_callback)
// SAVE_ALL
// MOVL $hlock(SB),BX
// PUSHL ES
// MOVL $225, AX
// POPL ES
// XCHGL AX, (BX)
// RESTORE_ALL
// INC hlock(SB)
SAVE_ALL /* save regs */
CALL evtchn_do_upcall+0(SB) /* evtchn_do_upcall is the trap */
CALL spllo+0(SB) /* spl back to lo */
RESTORE_ALL /* restore regs */
/* annoying assembler*/
/* MOVB $0, $(0x80002000+1)*/
/* */
/* this will be it eventually. For now, just call spllo
* the odds of a collision are *0*, and the odds of multiple
* collisions are less than zero*/
/* note if we don't do the spllo above, this code by itself
* does not re-enable interrupts -- another mystery we need
* to solve.
*/
BYTE $0xc6; BYTE $0x05; BYTE $0x01; BYTE $0x20;
BYTE $0x00; BYTE $0x80; BYTE $00
/**/
/**/
IRETL /* return from interrupt */
MOVL AX,p0+0(SB)
MOVL SP, oldstack+0(SB)
PUSHL AX
MOVL $1, AX
MOVL AX, p2+0(SB)
POPL AX
MOVL AX, p5+0(SB)
IRETL
CMPL AX, $0
JEQ _doit
RESTORE_ALL
_doit:
// CALL evtchn_do_upcall+0(SB)
RESTORE_ALL_NO_IRET
MOVL $0, hlock(SB)
IRETL
// MOVL $KDSEL, AX
// MOVW AX, SS
// MOVL $0x80002000, SP
MOVW SS, AX
MOVL AX, oldss+0(SB)
MOVL SP, oldstack+0(SB)
MOVL $0x80196800, AX
MOVL (AX), AX
MOVL AX, p0+0(SB)
MOVL $0x80197008, AX
MOVL (AX), AX
MOVL AX, p2+0(SB)
MOVL $0x80197014, AX
MOVL (AX), AX
MOVL AX, p5+0(SB)
MOVL $0x80197018, AX
MOVL (AX), AX
MOVL AX, p6+0(SB)
MOVL $0x80005000, AX
MOVL $1, (AX)
// PUSHL AX
SAVE_ALL
PUSHL SP
CALL evtchn_do_upcall+0(SB)
ADDL $4, SP
RESTORE_ALL
// POPL AX
IRETL
/* NO CHANGES AFTER THIS POINT YOU IDIOT*/
SAVE_ALL
MOVL EIP(SP),AX
CMPL AX,$SCRIT+0(SB)
JB L11
CMPL AX, $ECRIT(SB)
// shit JB critical_region_fixup(SB)
JB L111
L11: PUSHL SP
CALL evtchn_do_upcall(SB)
ADDL $4,SP
MOVL $HYPERVISOR_SHARED_INFO(SB),SI
XORL AX,AX
MOVB CS_offset(SP),CL
TESTB $2,CL /* SLOW RETURN TO RING 2 OR 3*/
JNE SAFESTI
SAFESTI:BTSL_31_4_ESI /* REENABLE EVENT CALLBACKS*/
TEXT SCRIT+0(SB), 0, $0
/*SCRIT: /**** START OF CRITICAL REGION ****/
CMPL AX,(SI)
JNE L14 /* PROCESS MORE EVENTS IF NECESSARY...*/
RESTORE_ALL
L14: BTRL_AX_4_ESI
JMP L11
/*ecrit: /**** END OF CRITICAL REGION ****/
TEXT ECRIT+0(SB), 0, $0
/* [How we do the fixup]. We want to merge the current stack frame with the*/
/* just-interrupted frame. How we do this depends on where in the critical*/
/* region the interrupted handler was executing, and so how many saved*/
/* registers are in each frame. We do this quickly using the lookup table*/
/* 'critical_fixup_table'. For each byte offset in the critical region, it*/
/* provides the number of bytes which have already been popped from the*/
/* interrupted stack frame. */
L111:
ENTRY(critical_region_fixup)
// ADDL $CRITICAL_FIXUP_TABLE(SB)-$SCRIT(SB),AX
ADDL $CRITICAL_FIXUP_TABLE(SB), AX
SUBL $SCRIT(SB), AX
/* MOVZBL (AX),AX /* AX CONTAINS NUM BYTES POPPED*/
MOVBLZX (AX), AX
MOVL SP,SI
ADDL AX,SI /* SI POINTS AT END OF SRC REGION*/
MOVL SP,DI
ADDL $0X34,DI /* DI POINTS AT END OF DST REGION*/
MOVL AX,CX
SHRL $2,CX /* CONVERT WORDS TO BYTES*/
JEQ L16 /* SKIP LOOP IF NOTHING TO COPY*/
L15: SUBL $4,SI /* PRE-DECREMENTING COPY LOOP*/
SUBL $4,DI
MOVL (SI),AX
MOVL AX,(DI)
LOOP L15
L16: MOVL DI,SP /* FINAL DI IS TOP OF MERGED STACK*/
JMP L11
TEXT CRITICAL_FIXUP_TABLE(SB), $0
BYTE $0x00; BYTE $0x00 /* CMPL AX; BYTE(SI)*/
BYTE $0x00; BYTE $0x00 /* JNE 14F*/
BYTE $0x00 /* POP BX*/
BYTE $0x04 /* POP CX*/
BYTE $0x08 /* POP DX*/
BYTE $0x0C /* POP SI*/
BYTE $0x10 /* POP DI*/
BYTE $0x14 /* POP EBP*/
BYTE $0x18 /* POP AX*/
BYTE $0x1C /* POP DS*/
BYTE $0x20 /* POP ES*/
BYTE $0x24; BYTE $0x24; BYTE $0x24 /* ADD $4; BYTESP*/
BYTE $0x28 /* IRET*/
BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x00 /* BTRL $31; BYTE 4(SI)*/
BYTE $0x00; BYTE $0x00 /* JMP 11B*/
/* Hypervisor uses this for application faults while it executes.*/
ENTRY(failsafe_callback)
IRETL
PUSHL AX
CALL install_safe_pf_handler(SB)
MOVL 32(SP), BX
MOVW BX, DS
MOVL 36(SP), BX
MOVW BX, ES
MOVL 40(SP), BX
MOVW BX, FS
MOVL 44(SP), BX
MOVW BX, GS
CALL install_normal_pf_handler(SB)
POPL AX
ADDL $16, SP
IRETL
#ifdef EXAMPLE
int gcc_call_b(int op, unsigned long ebx);
int gcc_call_b_c(int op, unsigned long ebx, unsigned long ecx);
int gcc_call_b_c_d(int op, unsigned long ebx, unsigned long ecx,
unsigned long edx);
int gcc_call_b_S(int op, unsigned long ebx, unsigned long esi);
#endif
TEXT gcc_call(SB), $0
MOVL op+0(FP), AX
INT $0x82
RET
TEXT gcc_call_b(SB), $0
MOVL op+0(FP), AX
MOVL VBX+4(FP), BX
INT $0x82
RET
TEXT gcc_call_b_c(SB), $0
MOVL op+0(FP), AX
MOVL VBX+4(FP), BX
MOVL VCX+8(FP), CX
INT $0x82
RET
TEXT gcc_call_b_c_d(SB), $0
MOVL op+0(FP), AX
MOVL VBX+4(FP), BX
MOVL VCX+8(FP), CX
MOVL VDX+12(FP), DX
INT $0x82
RET
TEXT gcc_call_b_c_d_S(SB), $0
MOVL op+0(FP), AX
MOVL VBX+4(FP), BX
MOVL VCX+8(FP), CX
MOVL VDX+12(FP), DX
MOVL VSI+16(FP), SI
INT $0x82
RET
TEXT gcc_call_b_c_d_S_D(SB), $0
MOVL op+0(FP), AX
MOVL VBX+4(FP), BX
MOVL VCX+8(FP), CX
MOVL VDX+12(FP), DX
MOVL VSI+16(FP), SI
MOVL VDI+20(FP), DI
INT $0x82
RET
TEXT gcc_call_b_S(SB), $0
MOVL op+0(FP), AX
MOVL VSI+4(FP),SI
INT $0x82
RET
#ifdef NOTYET
ENTRY(coprocessor_error)
PUSHL $0
PUSHL $DO_COPROCESSOR_ERROR(SB)
JMP do_exception
ENTRY(simd_coprocessor_error)
PUSHL $0
PUSHL $DO_SIMD_COPROCESSOR_ERROR(SB)
JMP do_exception
ENTRY(device_not_available)
IRETL
ENTRY(debug)
PUSHL $0
PUSHL $DO_DEBUG(SB)
JMP do_exception
ENTRY(int3)
PUSHL $0
PUSHL $DO_INT3(SB)
JMP do_exception
ENTRY(overflow)
PUSHL $0
PUSHL $DO_OVERFLOW(SB)
JMP do_exception
ENTRY(bounds)
PUSHL $0
PUSHL $DO_BOUNDS(SB)
JMP do_exception
ENTRY(invalid_op)
PUSHL $0
PUSHL $DO_INVALID_OP(SB)
JMP do_exception
ENTRY(coprocessor_segment_overrun)
PUSHL $0
PUSHL $DO_COPROCESSOR_SEGMENT_OVERRUN(SB)
JMP do_exception
ENTRY(double_fault)
PUSHL $DO_DOUBLE_FAULT(SB)
JMP do_exception
ENTRY(invalid_TSS)
PUSHL $DO_INVALID_TSS(SB)
JMP do_exception
ENTRY(segment_not_present)
PUSHL $do_segment_not_present(SB)
JMP do_exception
ENTRY(stack_segment)
PUSHL $do_stack_segment(SB)
JMP do_exception
ENTRY(general_protection)
PUSHL $do_general_protection(SB)
JMP do_exception
ENTRY(alignment_check)
PUSHL $do_alignment_check(SB)
JMP do_exception
/* This handler is special, because it gets an extra value on its stack,*/
/* which is the linear faulting address.*/
ENTRY(page_fault)
PUSHL DS
PUSHL AX
XORL AX,AX
PUSHL BP
PUSHL DI
PUSHL SI
PUSHL DX
DECL AX /* AX = -1*/
PUSHL CX
PUSHL BX
CLD
MOVL ES,CX
MOVL ORIG_AX(SP), SI /* GET THE ERROR CODE*/
MOVL ES(SP), DI /* GET THE FAULTING ADDRESS*/
MOVL AX, ORIG_AX(SP)
MOVL CX, ES(SP)
MOVL SP,DX
PUSHL DI /* PUSH THE FAULTING ADDRESS*/
PUSHL SI /* PUSH THE ERROR CODE*/
PUSHL DX /* PUSH THE PT_REGS POINTER*/
PUSHL $(KDSEL)
PUSHL $(KDSEL)
POPL DS
POPL ES
CALL do_page_fault(SB)
ADDL $12,SP
JMP ret_from_exception
ENTRY(machine_check)
PUSHL $0
PUSHL $DO_MACHINE_CHECK(SB)
JMP do_exception
ENTRY(spurious_interrupt_bug)
PUSHL $0
PUSHL $DO_SPURIOUS_INTERRUPT_BUG(SB)
JMP do_exception
#endif
|