#include "xenmach.h" int ffs(unsigned long); int synch_test_bit(int bit, void *p); void synch_set_bit(int bit, void *p); void synch_clear_bit(int bit, void *p); int synch_test_and_set_bit(int bit, void *p); int synch_test_and_clear_bit(int bit, void *p); ulong xchg(ulong *l, int val); void dp(char*, ...); #pragma varargck argpos dp 1 void xen_mm_info(void); unsigned long xen_ma_to_pa(unsigned long); /****************************************************************************** * hypervisor-if.h * * Guest OS interface to Xen. */ #ifndef __HYPERVISOR_IF_H__ #define __HYPERVISOR_IF_H__ /* GCC-specific way to pack structure definitions (no implicit padding). */ /* * HYPERVISOR "SYSTEM CALLS" */ /* EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5. */ #define __HYPERVISOR_set_trap_table 0 #define __HYPERVISOR_mmu_update 1 #define __HYPERVISOR_set_gdt 2 #define __HYPERVISOR_stack_switch 3 #define __HYPERVISOR_set_callbacks 4 #define __HYPERVISOR_fpu_taskswitch 5 #define __HYPERVISOR_sched_op 6 #define __HYPERVISOR_dom0_op 7 #define __HYPERVISOR_set_debugreg 8 #define __HYPERVISOR_get_debugreg 9 #define __HYPERVISOR_update_descriptor 10 #define __HYPERVISOR_set_fast_trap 11 #define __HYPERVISOR_dom_mem_op 12 #define __HYPERVISOR_multicall 13 #define __HYPERVISOR_update_va_mapping 14 #define __HYPERVISOR_set_timer_op 15 #define __HYPERVISOR_event_channel_op 16 #define __HYPERVISOR_xen_version 17 #define __HYPERVISOR_console_io 18 #define __HYPERVISOR_physdev_op 19 #define __HYPERVISOR_update_va_mapping_otherdomain 20 /* * MULTICALLS * * Multicalls are listed in an array, with each element being a fixed size * (BYTES_PER_MULTICALL_ENTRY). Each is of the form (op, arg1, ..., argN) * where each element of the tuple is a machine word. */ #define ARGS_PER_MULTICALL_ENTRY 8 /* * VIRTUAL INTERRUPTS * * Virtual interrupts that a guest OS may receive from the hypervisor. */ #define VIRQ_MISDIRECT 0 /* Catch-all interrupt for unbound VIRQs. */ #define VIRQ_TIMER 1 /* Timebase update, and/or requested timeout. */ #define VIRQ_DEBUG 2 /* Request guest to dump debug info. */ #define VIRQ_CONSOLE 3 /* (DOM0) bytes received on emergency console. */ #define VIRQ_DOM_EXC 4 /* (DOM0) Exceptional event for some domain. */ #define NR_VIRQS 5 /* * MMU-UPDATE REQUESTS * * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs. * ptr[1:0] specifies the appropriate MMU_* command. * * GPS (General-Purpose Subject) * ----------------------------- * This domain that must own all non-page-table pages that are involved in * MMU updates. By default it is the domain that executes mmu_update(). If the * caller has sufficient privilege then it can be changed by executing * MMUEXT_SET_SUBJECTDOM. * * PTS (Page-Table Subject) * ------------------------ * This domain must own all the page-table pages that are subject to MMU * updates. By default it is the domain that executes mmu_update(). If the * caller has sufficient privilege then it can be changed by executing * MMUEXT_SET_SUBJECTDOM with val[14] (SET_PAGETABLE_SUBJECTDOM) set. * * ptr[1:0] == MMU_NORMAL_PT_UPDATE: * Updates an entry in a page table. * ptr[:2] -- machine address of the page-table entry to modify [1] * val -- value to write [2] * * ptr[1:0] == MMU_MACHPHYS_UPDATE: * Updates an entry in the machine->pseudo-physical mapping table. * ptr[:2] -- machine address within the frame whose mapping to modify [3] * val -- value to write into the mapping entry * * ptr[1:0] == MMU_EXTENDED_COMMAND: * val[7:0] -- MMUEXT_* command * * val[7:0] == MMUEXT_(UN)PIN_*_TABLE: * ptr[:2] -- machine address of frame to be (un)pinned as a p.t. page [1] * * val[7:0] == MMUEXT_NEW_BASEPTR: * ptr[:2] -- machine address of new page-table base to install in MMU [1] * * val[7:0] == MMUEXT_TLB_FLUSH: * no additional arguments * * val[7:0] == MMUEXT_INVLPG: * ptr[:2] -- linear address to be flushed from the TLB * * val[7:0] == MMUEXT_SET_LDT: * ptr[:2] -- linear address of LDT base (NB. must be page-aligned) * val[:8] -- number of entries in LDT * * val[7:0] == MMUEXT_SET_SUBJECTDOM: * val[14] -- if TRUE then sets the PTS in addition to the GPS. * (ptr[31:15],val[31:15]) -- dom[31:0] * * val[7:0] == MMUEXT_REASSIGN_PAGE: * ptr[:2] -- machine address within page to be reassigned to the GPS. * * val[7:0] == MMUEXT_RESET_SUBJECTDOM: * Resets both the GPS and the PTS to their defaults (i.e., calling domain). * * Notes on constraints on the above arguments: * [1] The page frame containing the machine address must belong to the PTS. * [2] If the PTE is valid (i.e., bit 0 is set) then the specified page frame * must belong to: * (a) the PTS (if the PTE is part of a non-L1 table); or * (b) the GPS (if the PTE is part of an L1 table). * [3] The page frame containing the machine address must belong to the GPS. */ #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ #define MMU_MACHPHYS_UPDATE 2 /* ptr = MA of frame to modify entry for */ #define MMU_EXTENDED_COMMAND 3 /* least 8 bits of val demux further */ #define MMUEXT_PIN_L1_TABLE 0 /* ptr = MA of frame to pin */ #define MMUEXT_PIN_L2_TABLE 1 /* ptr = MA of frame to pin */ #define MMUEXT_PIN_L3_TABLE 2 /* ptr = MA of frame to pin */ #define MMUEXT_PIN_L4_TABLE 3 /* ptr = MA of frame to pin */ #define MMUEXT_UNPIN_TABLE 4 /* ptr = MA of frame to unpin */ #define MMUEXT_NEW_BASEPTR 5 /* ptr = MA of new pagetable base */ #define MMUEXT_TLB_FLUSH 6 /* ptr = nil */ #define MMUEXT_INVLPG 7 /* ptr = VA to invalidate */ #define MMUEXT_SET_LDT 8 /* ptr = VA of table; val = # entries */ #define MMUEXT_SET_SUBJECTDOM 9 /* (ptr[31:15],val[31:15]) = dom[31:0] */ #define SET_PAGETABLE_SUBJECTDOM (1<<14) /* OR into 'val' arg of SUBJECTDOM */ #define MMUEXT_REASSIGN_PAGE 10 #define MMUEXT_RESET_SUBJECTDOM 11 #define MMUEXT_CMD_MASK 255 #define MMUEXT_CMD_SHIFT 8 /* These are passed as 'flags' to update_va_mapping. They can be ORed. */ #define UVMF_FLUSH_TLB 1 /* Flush entire TLB. */ #define UVMF_INVLPG 2 /* Flush the VA mapping being updated. */ /* * Commands to HYPERVISOR_sched_op(). */ #define SCHEDOP_yield 0 /* Give up the CPU voluntarily. */ #define SCHEDOP_block 1 /* Block until an event is received. */ #define SCHEDOP_shutdown 2 /* Stop executing this domain. */ #define SCHEDOP_cmdmask 255 /* 8-bit command. */ #define SCHEDOP_reasonshift 8 /* 8-bit reason code. (SCHEDOP_shutdown) */ /* * Commands to HYPERVISOR_console_io(). */ #define CONSOLEIO_write 0 #define CONSOLEIO_read 1 /* * Commands to HYPERVISOR_dom_mem_op(). */ #define MEMOP_increase_reservation 0 #define MEMOP_decrease_reservation 1 #ifndef __ASSEMBLY__ typedef u16 domid_t; /* DOMID_SELF is used in certain contexts to refer to oneself. */ #define DOMID_SELF (0x7ff0) /* * Send an array of these to HYPERVISOR_mmu_update(). * NB. The fields are natural pointer/address size for this architecture. */ typedef struct { memory_t ptr; /* Machine address of PTE. */ memory_t val; /* New contents of PTE. */ } PACKED mmu_update_t; /* * Send an array of these to HYPERVISOR_multicall(). * NB. The fields are natural register size for this architecture. */ typedef struct { cpureg_t op; cpureg_t args[7]; } PACKED multicall_entry_t; /* Event channel endpoints per domain. */ #define NR_EVENT_CHANNELS 1024 /* No support for multi-processor guests. */ #define MAX_VIRT_CPUS 1 /* * Xen/guestos shared data -- pointer provided in start_info. * NB. We expect that this struct is smaller than a page. */ typedef struct shared_info_st { /* * Per-VCPU information goes here. This will be cleaned up more when Xen * actually supports multi-VCPU guests. */ struct { /* * 'evtchn_upcall_pending' is written non-zero by Xen to indicate * a pending notification for a particular VCPU. It is then cleared * by the guest OS /before/ checking for pending work, thus avoiding * a set-and-check race. Note that the mask is only accessed by Xen * on the CPU that is currently hosting the VCPU. This means that the * pending and mask flags can be updated by the guest without special * synchronisation (i.e., no need for the x86 LOCK prefix). * This may seem suboptimal because if the pending flag is set by * a different CPU then an IPI may be scheduled even when the mask * is set. However, note: * 1. The task of 'interrupt holdoff' is covered by the per-event- * channel mask bits. A 'noisy' event that is continually being * triggered can be masked at source at this very precise * granularity. * 2. The main purpose of the per-VCPU mask is therefore to restrict * reentrant execution: whether for concurrency control, or to * prevent unbounded stack usage. Whatever the purpose, we expect * that the mask will be asserted only for short periods at a time, * and so the likelihood of a 'spurious' IPI is suitably small. * The mask is read before making an event upcall to the guest: a * non-zero mask therefore guarantees that the VCPU will not receive * an upcall activation. The mask is cleared when the VCPU requests * to block: this avoids wakeup-waiting races. */ u8 evtchn_upcall_pending; u8 evtchn_upcall_mask; u8 pad0, pad1; } PACKED vcpu_data[MAX_VIRT_CPUS]; /* 0 */ /* * A domain can have up to 1024 "event channels" on which it can send * and receive asynchronous event notifications. There are three classes * of event that are delivered by this mechanism: * 1. Bi-directional inter- and intra-domain connections. Domains must * arrange out-of-band to set up a connection (usually the setup * is initiated and organised by a privileged third party such as * software running in domain 0). * 2. Physical interrupts. A domain with suitable hardware-access * privileges can bind an event-channel port to a physical interrupt * source. * 3. Virtual interrupts ('events'). A domain can bind an event-channel * port to a virtual interrupt source, such as the virtual-timer * device or the emergency console. * * Event channels are addressed by a "port index" between 0 and 1023. * Each channel is associated with two bits of information: * 1. PENDING -- notifies the domain that there is a pending notification * to be processed. This bit is cleared by the guest. * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING * will cause an asynchronous upcall to be scheduled. This bit is only * updated by the guest. It is read-only within Xen. If a channel * becomes pending while the channel is masked then the 'edge' is lost * (i.e., when the channel is unmasked, the guest must manually handle * pending notifications as no upcall will be scheduled by Xen). * * To expedite scanning of pending notifications, any 0->1 pending * transition on an unmasked channel causes a corresponding bit in a * 32-bit selector to be set. Each bit in the selector covers a 32-bit * word in the PENDING bitfield array. */ u32 evtchn_pending[32]; /* 4 */ u32 evtchn_pending_sel; /* 132 */ u32 evtchn_mask[32]; /* 136 */ /* * Time: The following abstractions are exposed: System Time, Clock Time, * Domain Virtual Time. Domains can access Cycle counter time directly. */ u64 cpu_freq; /* 264: CPU frequency (Hz). */ /* * The following values are updated periodically (and not necessarily * atomically!). The guest OS detects this because 'time_version1' is * incremented just before updating these values, and 'time_version2' is * incremented immediately after. See the Xen-specific Linux code for an * example of how to read these values safely (arch/xen/kernel/time.c). */ u32 time_version1; /* 272 */ u32 time_version2; /* 276 */ tsc_timestamp_t tsc_timestamp; /* TSC at last update of time vals. */ u64 system_time; /* Time, in nanosecs, since boot. */ u32 wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */ u32 wc_usec; /* Usecs 00:00:00 UTC, Jan 1, 1970. */ u64 domain_time; /* Domain virtual time, in nanosecs. */ /* * Timeout values: * Allow a domain to specify a timeout value in system time and * domain virtual time. */ u64 wall_timeout; /* 312 */ u64 domain_timeout; /* 320 */ execution_context_t execution_context; /* 328 */ } PACKED shared_info_t; /* * Start-of-day memory layout for the initial domain (DOM0): * 1. The domain is started within contiguous virtual-memory region. * 2. The contiguous region begins and ends on an aligned 4MB boundary. * 3. The region start corresponds to the load address of the OS image. * If the load address is not 4MB aligned then the address is rounded down. * 4. This the order of bootstrap elements in the initial virtual region: * a. relocated kernel image * b. initial ram disk [mod_start, mod_len] * c. list of allocated page frames [mfn_list, nr_pages] * d. bootstrap page tables [pt_base, CR3 (x86)] * e. start_info_t structure [register ESI (x86)] * f. bootstrap stack [register ESP (x86)] * 5. Bootstrap elements are packed together, but each is 4kB-aligned. * 6. The initial ram disk may be omitted. * 7. The list of page frames forms a contiguous 'pseudo-physical' memory * layout for the domain. In particular, the bootstrap virtual-memory * region is a 1:1 mapping to the first section of the pseudo-physical map. * 8. All bootstrap elements are mapped read-writeable for the guest OS. The * only exception is the bootstrap page table, which is mapped read-only. * 9. There is guaranteed to be at least 512kB padding after the final * bootstrap element. If necessary, the bootstrap virtual region is * extended by an extra 4MB to ensure this. */ /* * This is the basic bootstrap information structure as passed by Xen to the * initial controller domain. We want this structure to be easily extended by * more sophisticated domain builders and controllers, so we make the basic * fields of this structure available via a BASIC_START_INFO macro. * * Extended version of start_info_t should be defined as: * typedef struct { * BASIC_START_INFO; * <...extra fields...> * } extended_start_info_t; */ #define MAX_CMDLINE 256 #define BASIC_START_INFO \ /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ \ memory_t nr_pages; /* 0: Total pages allocated to this domain. */ \ _MEMORY_PADDING(A); \ memory_t shared_info; /* 8: MACHINE address of shared info struct.*/ \ _MEMORY_PADDING(B); \ u32 flags; /* 16: SIF_xxx flags. */ \ /* new in 2.0! */\ /* this but-stuff is just awful. I have no idea if this will work or not */\ u16 domain_controller_evtchn;\ u16 __pad; \ /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ \ memory_t pt_base; /* 24: VIRTUAL address of page directory. */ \ _MEMORY_PADDING(C); \ memory_t nr_pt_frames; /* 32: Number of bootstrap p.t. frames. */ \ _MEMORY_PADDING(D); \ memory_t mfn_list; /* 40: VIRTUAL address of page-frame list. */ \ _MEMORY_PADDING(E); \ memory_t mod_start; /* 48: VIRTUAL address of pre-loaded module. */ \ _MEMORY_PADDING(F); \ memory_t mod_len; /* 56: Size (bytes) of pre-loaded module. */ \ _MEMORY_PADDING(G); \ u8 cmd_line[MAX_CMDLINE] /* 64 */ typedef struct { BASIC_START_INFO; } PACKED start_info_t; /* 320 bytes */ /* These flags are passed in the 'flags' field of start_info_t. */ #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ /* For use in guest OSes. */ extern shared_info_t *HYPERVISOR_shared_info; #endif /* !__ASSEMBLY__ */ #endif /* __HYPERVISOR_IF_H__ */ /****************************************************************************** * event_channel.h * * Event channels between domains. * * Copyright (c) 2003-2004, K A Fraser. */ #ifndef __HYPERVISOR_IFS__EVENT_CHANNEL_H__ #define __HYPERVISOR_IFS__EVENT_CHANNEL_H__ /* * EVTCHNOP_bind_interdomain: Open an event channel between and . * NOTES: * 1. and/or may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may create an event channel. * 3. and are only supplied if the op succeeds. */ #define EVTCHNOP_bind_interdomain 0 typedef struct { /* IN parameters. */ domid_t dom1, dom2; /* 0, 4 */ /* OUT parameters. */ u32 port1, port2; /* 8, 12 */ } PACKED evtchn_bind_interdomain_t; /* 16 bytes */ /* * EVTCHNOP_bind_virq: Bind a local event channel to IRQ . * NOTES: * 1. A virtual IRQ may be bound to at most one event channel per domain. */ #define EVTCHNOP_bind_virq 1 typedef struct { /* IN parameters. */ u32 virq; /* 0 */ /* OUT parameters. */ u32 port; /* 4 */ } PACKED evtchn_bind_virq_t; /* 8 bytes */ /* * EVTCHNOP_bind_pirq: Bind a local event channel to IRQ . * NOTES: * 1. A physical IRQ may be bound to at most one event channel per domain. * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. */ #define EVTCHNOP_bind_pirq 2 typedef struct { /* IN parameters. */ u32 pirq; /* 0 */ #define BIND_PIRQ__WILL_SHARE 1 u32 flags; /* BIND_PIRQ__* */ /* 4 */ /* OUT parameters. */ u32 port; /* 8 */ } PACKED evtchn_bind_pirq_t; /* 12 bytes */ /* * EVTCHNOP_close: Close the communication channel which has an endpoint at * . * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may close an event channel * for which is not DOMID_SELF. */ #define EVTCHNOP_close 3 typedef struct { /* IN parameters. */ domid_t dom; /* 0 */ u32 port; /* 4 */ /* No OUT parameters. */ } PACKED evtchn_close_t; /* 8 bytes */ /* * EVTCHNOP_send: Send an event to the remote end of the channel whose local * endpoint is . */ #define EVTCHNOP_send 4 typedef struct { /* IN parameters. */ u32 local_port; /* 0 */ /* No OUT parameters. */ } PACKED evtchn_send_t; /* 4 bytes */ /* * EVTCHNOP_status: Get the current status of the communication channel which * has an endpoint at . * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may obtain the status of an event * channel for which is not DOMID_SELF. */ #define EVTCHNOP_status 5 typedef struct { /* IN parameters */ domid_t dom; /* 0 */ u32 port; /* 4 */ /* OUT parameters */ #define EVTCHNSTAT_closed 0 /* Chennel is not in use. */ #define EVTCHNSTAT_unbound 1 /* Channel is not bound to a source. */ #define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ #define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ #define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ u32 status; /* 8 */ union { /* 12 */ struct { domid_t dom; /* 12 */ u32 port; /* 16 */ } PACKED interdomain; /* EVTCHNSTAT_interdomain */ u32 pirq; /* EVTCHNSTAT_pirq */ /* 12 */ u32 virq; /* EVTCHNSTAT_virq */ /* 12 */ } PACKED u; } PACKED evtchn_status_t; /* 20 bytes */ typedef struct { u32 cmd; /* EVTCHNOP_* */ /* 0 */ u32 __reserved; /* 4 */ union { /* 8 */ evtchn_bind_interdomain_t bind_interdomain; evtchn_bind_virq_t bind_virq; evtchn_bind_pirq_t bind_pirq; evtchn_close_t close; evtchn_send_t send; evtchn_status_t status; u8 __dummy[24]; } PACKED u; } PACKED evtchn_op_t; /* 32 bytes */ #endif /* __HYPERVISOR_IFS__EVENT_CHANNEL_H__ */ /* -*- Mode:C; c-basic-offset:4; tab-width:4 -*- **************************************************************************** * (c) 2004 - Rolf Neugebauer - Intel Research Cambridge * (c) 2004 - Keir Fraser - University of Cambridge **************************************************************************** * Description: Interface for domains to access physical devices on the PCI bus */ #ifndef __HYPERVISOR_IFS_PHYSDEV_H__ #define __HYPERVISOR_IFS_PHYSDEV_H__ /* Commands to HYPERVISOR_physdev_op() */ #define PHYSDEVOP_PCI_CFGREG_READ 0 #define PHYSDEVOP_PCI_CFGREG_WRITE 1 #define PHYSDEVOP_PCI_INITIALISE_DEVICE 2 #define PHYSDEVOP_PCI_PROBE_ROOT_BUSES 3 #define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 #define PHYSDEVOP_IRQ_STATUS_QUERY 5 /* Read from PCI configuration space. */ typedef struct { /* IN */ u32 bus; /* 0 */ u32 dev; /* 4 */ u32 func; /* 8 */ u32 reg; /* 12 */ u32 len; /* 16 */ /* OUT */ u32 value; /* 20 */ } PACKED physdevop_pci_cfgreg_read_t; /* 24 bytes */ /* Write to PCI configuration space. */ typedef struct { /* IN */ u32 bus; /* 0 */ u32 dev; /* 4 */ u32 func; /* 8 */ u32 reg; /* 12 */ u32 len; /* 16 */ u32 value; /* 20 */ } PACKED physdevop_pci_cfgreg_write_t; /* 24 bytes */ /* Do final initialisation of a PCI device (e.g., last-moment IRQ routing). */ typedef struct { /* IN */ u32 bus; /* 0 */ u32 dev; /* 4 */ u32 func; /* 8 */ } PACKED physdevop_pci_initialise_device_t; /* 12 bytes */ /* Find the root buses for subsequent scanning. */ typedef struct { /* OUT */ u32 busmask[256/32]; /* 0 */ } PACKED physdevop_pci_probe_root_buses_t; /* 32 bytes */ typedef struct { /* IN */ u32 irq; /* 0 */ /* OUT */ /* Need to call PHYSDEVOP_IRQ_UNMASK_NOTIFY when the IRQ has been serviced? */ #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY (1<<0) u32 flags; /* 4 */ } PACKED physdevop_irq_status_query_t; /* 8 bytes */ typedef struct _physdev_op_st { u32 cmd; /* 0 */ u32 __pad; /* 4 */ union { /* 8 */ physdevop_pci_cfgreg_read_t pci_cfgreg_read; physdevop_pci_cfgreg_write_t pci_cfgreg_write; physdevop_pci_initialise_device_t pci_initialise_device; physdevop_pci_probe_root_buses_t pci_probe_root_buses; physdevop_irq_status_query_t irq_status_query; u8 __dummy[32]; } PACKED u; } PACKED physdev_op_t; /* 40 bytes */ #endif /* __HYPERVISOR_IFS_PHYSDEV_H__ */ /** * Generic scheduler control interface. * * Mark Williamson, (C) 2004 Intel Research Cambridge */ #ifndef __SCHED_CTL_H__ #define __SCHED_CTL_H__ /* Scheduler types */ #define SCHED_BVT 0 #define SCHED_FBVT 1 #define SCHED_ATROPOS 2 #define SCHED_RROBIN 3 /* these describe the intended direction used for a scheduler control or domain * command */ #define SCHED_INFO_PUT 0 #define SCHED_INFO_GET 1 /* * Generic scheduler control command - used to adjust system-wide scheduler * parameters */ struct sched_ctl_cmd { u32 sched_id; /* 0 */ u32 direction; /* 4 */ union { /* 8 */ struct bvt_ctl { /* IN variables. */ u32 ctx_allow; /* 8: context switch allowance */ } PACKED bvt; struct fbvt_ctl { /* IN variables. */ u32 ctx_allow; /* 8: context switch allowance */ } PACKED fbvt; struct rrobin_ctl { /* IN variables */ u64 slice; /* 8: round robin time slice */ } PACKED rrobin; } PACKED u; } PACKED; /* 16 bytes */ struct sched_adjdom_cmd { u32 sched_id; /* 0 */ u32 direction; /* 4 */ domid_t domain; /* 8 */ u32 __pad; union { /* 16 */ struct bvt_adjdom { u32 mcu_adv; /* 16: mcu advance: inverse of weight */ u32 warp; /* 20: time warp */ u32 warpl; /* 24: warp limit */ u32 warpu; /* 28: unwarp time requirement */ } PACKED bvt; struct fbvt_adjdom { u32 mcu_adv; /* 16: mcu advance: inverse of weight */ u32 warp; /* 20: time warp */ u32 warpl; /* 24: warp limit */ u32 warpu; /* 28: unwarp time requirement */ } PACKED fbvt; struct atropos_adjdom { u64 nat_period; /* 16 */ u64 nat_slice; /* 24 */ u64 latency; /* 32 */ u32 xtratime; /* 36 */ } PACKED atropos; } PACKED u; } PACKED; /* 40 bytes */ #endif /* __SCHED_CTL_H__ */ /****************************************************************************** * include/hypervisor-ifs/trace.h */ #ifndef __HYPERVISOR_IFS_TRACE_H__ #define __HYPERVISOR_IFS_TRACE_H__ /* This structure represents a single trace buffer record. */ struct t_rec { u64 cycles; /* 64 bit cycle counter timestamp */ u32 event; /* 32 bit event ID */ u32 d1, d2, d3, d4, d5; /* event data items */ }; /* * This structure contains the metadata for a single trace buffer. The head * field, indexes into an array of struct t_rec's. */ struct t_buf { struct t_rec *data; /* pointer to data area. physical address * for convenience in user space code */ unsigned long size; /* size of the data area, in t_recs */ unsigned long head; /* array index of the most recent record */ /* Kernel-private elements follow... */ struct t_rec *head_ptr; /* pointer to the head record */ struct t_rec *vdata; /* virtual address pointer to data */ }; #endif /* __HYPERVISOR_IFS_TRACE_H__ */ /****************************************************************************** * dom0_ops.h * * Process command requests from domain-0 guest OS. * * Copyright (c) 2002-2003, B Dragovic * Copyright (c) 2002-2004, K Fraser */ #ifndef __DOM0_OPS_H__ #define __DOM0_OPS_H__ /* * Make sure you increment the interface version whenever you modify this file! * This makes sure that old versions of dom0 tools will stop working in a * well-defined way (rather than crashing the machine, for instance). */ #define DOM0_INTERFACE_VERSION 0xAAAA0010 #define MAX_DOMAIN_NAME 16 /************************************************************************/ #define DOM0_GETMEMLIST 2 typedef struct { /* IN variables. */ domid_t domain; /* 0 */ u32 __pad; memory_t max_pfns; /* 8 */ MEMORY_PADDING; void *buffer; /* 16 */ MEMORY_PADDING; /* OUT variables. */ memory_t num_pfns; /* 24 */ MEMORY_PADDING; } PACKED dom0_getmemlist_t; /* 32 bytes */ #define DOM0_SCHEDCTL 6 /* struct sched_ctl_cmd is from sched-ctl.h */ typedef struct sched_ctl_cmd dom0_schedctl_t; #define DOM0_ADJUSTDOM 7 /* struct sched_adjdom_cmd is from sched-ctl.h */ typedef struct sched_adjdom_cmd dom0_adjustdom_t; #define DOM0_CREATEDOMAIN 8 typedef struct { /* IN parameters. */ memory_t memory_kb; /* 0 */ MEMORY_PADDING; u8 name[MAX_DOMAIN_NAME]; /* 8 */ u32 cpu; /* 24 */ u32 __pad; /* 28 */ /* IN/OUT parameters. */ /* If 0, domain is allocated. If non-zero use it unless in use. */ domid_t domain; /* 32 */ /* OUT parameters. */ } PACKED dom0_createdomain_t; /* 36 bytes */ #define DOM0_DESTROYDOMAIN 9 typedef struct { /* IN variables. */ domid_t domain; /* 0 */ } PACKED dom0_destroydomain_t; /* 4 bytes */ #define DOM0_PAUSEDOMAIN 10 typedef struct { /* IN parameters. */ domid_t domain; /* 0 */ } PACKED dom0_pausedomain_t; /* 4 bytes */ #define DOM0_UNPAUSEDOMAIN 11 typedef struct { /* IN parameters. */ domid_t domain; /* 0 */ } PACKED dom0_unpausedomain_t; /* 4 bytes */ #define DOM0_GETDOMAININFO 12 typedef struct { /* IN variables. */ domid_t domain; /* 0 */ /* NB. IN/OUT variable. */ /* OUT variables. */ #define DOMFLAGS_DYING (1<<0) /* Domain is scheduled to die. */ #define DOMFLAGS_CRASHED (1<<1) /* Crashed domain; frozen for postmortem. */ #define DOMFLAGS_SHUTDOWN (1<<2) /* The guest OS has shut itself down. */ #define DOMFLAGS_PAUSED (1<<3) /* Currently paused by control software. */ #define DOMFLAGS_BLOCKED (1<<4) /* Currently blocked pending an event. */ #define DOMFLAGS_RUNNING (1<<5) /* Domain is currently running. */ #define DOMFLAGS_CPUMASK 255 /* CPU to which this domain is bound. */ #define DOMFLAGS_CPUSHIFT 8 #define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code. */ #define DOMFLAGS_SHUTDOWNSHIFT 16 u32 flags; /* 4 */ u8 name[MAX_DOMAIN_NAME]; /* 8 */ full_execution_context_t *ctxt; /* 24 */ /* NB. IN/OUT variable. */ MEMORY_PADDING; memory_t tot_pages; /* 32 */ MEMORY_PADDING; memory_t max_pages; /* 40 */ MEMORY_PADDING; memory_t shared_info_frame; /* 48: MFN of shared_info struct */ MEMORY_PADDING; u64 cpu_time; /* 56 */ } PACKED dom0_getdomaininfo_t; /* 64 bytes */ #define DOM0_BUILDDOMAIN 13 typedef struct { /* IN variables. */ domid_t domain; /* 0 */ u32 __pad; /* 4 */ /* IN/OUT parameters */ full_execution_context_t *ctxt; /* 8 */ MEMORY_PADDING; } PACKED dom0_builddomain_t; /* 16 bytes */ #define DOM0_IOPL 14 typedef struct { domid_t domain; /* 0 */ u32 iopl; /* 4 */ } PACKED dom0_iopl_t; /* 8 bytes */ #define DOM0_MSR 15 typedef struct { /* IN variables. */ u32 write; /* 0 */ u32 cpu_mask; /* 4 */ u32 msr; /* 8 */ u32 in1; /* 12 */ u32 in2; /* 16 */ /* OUT variables. */ u32 out1; /* 20 */ u32 out2; /* 24 */ } PACKED dom0_msr_t; /* 28 bytes */ #define DOM0_DEBUG 16 typedef struct { /* IN variables. */ domid_t domain; /* 0 */ u8 opcode; /* 4 */ u8 __pad0, __pad1, __pad2; u32 in1; /* 8 */ u32 in2; /* 12 */ u32 in3; /* 16 */ u32 in4; /* 20 */ /* OUT variables. */ u32 status; /* 24 */ u32 out1; /* 28 */ u32 out2; /* 32 */ } PACKED dom0_debug_t; /* 36 bytes */ /* * Set clock such that it would read after 00:00:00 UTC, * 1 January, 1970 if the current system time was . */ #define DOM0_SETTIME 17 typedef struct { /* IN variables. */ u32 secs; /* 0 */ u32 usecs; /* 4 */ u64 system_time; /* 8 */ } PACKED dom0_settime_t; /* 16 bytes */ #define DOM0_GETPAGEFRAMEINFO 18 #define NOTAB 0 /* normal page */ #define L1TAB (1<<28) #define L2TAB (2<<28) #define L3TAB (3<<28) #define L4TAB (4<<28) #define XTAB (0xf<<28) /* invalid page */ #define LTAB_MASK XTAB typedef struct { /* IN variables. */ memory_t pfn; /* 0: Machine page frame number to query. */ MEMORY_PADDING; domid_t domain; /* 8: To which domain does the frame belong? */ /* OUT variables. */ /* Is the page PINNED to a type? */ u32 type; /* 12: see above type defs */ } PACKED dom0_getpageframeinfo_t; /* 16 bytes */ /* * Read console content from Xen buffer ring. */ #define DOM0_READCONSOLE 19 typedef struct { memory_t str; /* 0 */ MEMORY_PADDING; u32 count; /* 8 */ u32 cmd; /* 12 */ } PACKED dom0_readconsole_t; /* 16 bytes */ /* * Pin Domain to a particular CPU (use -1 to unpin) */ #define DOM0_PINCPUDOMAIN 20 typedef struct { /* IN variables. */ domid_t domain; /* 0 */ s32 cpu; /* 4: -1 implies unpin */ } PACKED dom0_pincpudomain_t; /* 8 bytes */ /* Get trace buffers physical base pointer */ #define DOM0_GETTBUFS 21 typedef struct { /* OUT variables */ memory_t phys_addr; /* 0: location of the trace buffers */ MEMORY_PADDING; u32 size; /* 8: size of each trace buffer, in bytes */ } PACKED dom0_gettbufs_t; /* 12 bytes */ /* * Get physical information about the host machine */ #define DOM0_PHYSINFO 22 typedef struct { u32 ht_per_core; /* 0 */ u32 cores; /* 4 */ u32 cpu_khz; /* 8 */ u32 __pad; /* 12 */ memory_t total_pages; /* 16 */ MEMORY_PADDING; memory_t free_pages; /* 24 */ MEMORY_PADDING; } PACKED dom0_physinfo_t; /* 32 bytes */ /* * Allow a domain access to a physical PCI device */ #define DOM0_PCIDEV_ACCESS 23 typedef struct { /* IN variables. */ domid_t domain; /* 0 */ u32 bus; /* 4 */ u32 dev; /* 8 */ u32 func; /* 12 */ u32 enable; /* 16 */ } PACKED dom0_pcidev_access_t; /* 20 bytes */ /* * Get the ID of the current scheduler. */ #define DOM0_SCHED_ID 24 typedef struct { /* OUT variable */ u32 sched_id; /* 0 */ } PACKED dom0_sched_id_t; /* 4 bytes */ /* * Control shadow pagetables operation */ #define DOM0_SHADOW_CONTROL 25 #define DOM0_SHADOW_CONTROL_OP_OFF 0 #define DOM0_SHADOW_CONTROL_OP_ENABLE_TEST 1 #define DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY 2 #define DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE 3 #define DOM0_SHADOW_CONTROL_OP_FLUSH 10 /* table ops */ #define DOM0_SHADOW_CONTROL_OP_CLEAN 11 #define DOM0_SHADOW_CONTROL_OP_PEEK 12 #define DOM0_SHADOW_CONTROL_OP_CLEAN2 13 typedef struct dom0_shadow_control { u32 fault_count; u32 dirty_count; u32 dirty_net_count; u32 dirty_block_count; } dom0_shadow_control_stats_t; typedef struct { /* IN variables. */ domid_t domain; /* 0 */ u32 op; /* 4 */ unsigned long *dirty_bitmap; /* 8: pointer to locked buffer */ MEMORY_PADDING; /* IN/OUT variables. */ memory_t pages; /* 16: size of buffer, updated with actual size */ MEMORY_PADDING; /* OUT variables. */ dom0_shadow_control_stats_t stats; } PACKED dom0_shadow_control_t; #define DOM0_SETDOMAINNAME 26 typedef struct { /* IN variables. */ domid_t domain; /* 0 */ u8 name[MAX_DOMAIN_NAME]; /* 4 */ } PACKED dom0_setdomainname_t; /* 20 bytes */ #define DOM0_SETDOMAININITIALMEM 27 typedef struct { /* IN variables. */ domid_t domain; /* 0 */ u32 __pad; memory_t initial_memkb; /* 8 */ MEMORY_PADDING; } PACKED dom0_setdomaininitialmem_t; /* 16 bytes */ #define DOM0_SETDOMAINMAXMEM 28 typedef struct { /* IN variables. */ domid_t domain; /* 0 */ u32 __pad; memory_t max_memkb; /* 8 */ MEMORY_PADDING; } PACKED dom0_setdomainmaxmem_t; /* 16 bytes */ #define DOM0_GETPAGEFRAMEINFO2 29 /* batched interface */ typedef struct { /* IN variables. */ domid_t domain; /* 0 */ u32 __pad; memory_t num; /* 8 */ MEMORY_PADDING; /* IN/OUT variables. */ unsigned long *array; /* 16 */ MEMORY_PADDING; } PACKED dom0_getpageframeinfo2_t; /* 24 bytes */ typedef struct { u32 cmd; /* 0 */ u32 interface_version; /* 4 */ /* DOM0_INTERFACE_VERSION */ union { /* 8 */ u32 dummy[18]; /* 72 bytes */ dom0_createdomain_t createdomain; dom0_pausedomain_t pausedomain; dom0_unpausedomain_t unpausedomain; dom0_destroydomain_t destroydomain; dom0_getmemlist_t getmemlist; dom0_schedctl_t schedctl; dom0_adjustdom_t adjustdom; dom0_builddomain_t builddomain; dom0_getdomaininfo_t getdomaininfo; dom0_getpageframeinfo_t getpageframeinfo; dom0_iopl_t iopl; dom0_msr_t msr; dom0_debug_t debug; dom0_settime_t settime; dom0_readconsole_t readconsole; dom0_pincpudomain_t pincpudomain; dom0_gettbufs_t gettbufs; dom0_physinfo_t physinfo; dom0_pcidev_access_t pcidev_access; dom0_sched_id_t sched_id; dom0_shadow_control_t shadow_control; dom0_setdomainname_t setdomainname; dom0_setdomaininitialmem_t setdomaininitialmem; dom0_setdomainmaxmem_t setdomainmaxmem; dom0_getpageframeinfo2_t getpageframeinfo2; } PACKED u; } PACKED dom0_op_t; /* 80 bytes */ #endif /* __DOM0_OPS_H__ */ /****************************************************************************** * domain_controller.h * * Interface to server controller (e.g., 'xend'). This header file defines the * interface that is shared with guest OSes. * * Copyright (c) 2004, K A Fraser */ #ifndef __DOMAIN_CONTROLLER_H__ #define __DOMAIN_CONTROLLER_H__ #ifndef BASIC_START_INFO #error "Xen header file hypervisor-if.h must already be included here." #endif /* * EXTENDED BOOTSTRAP STRUCTURE FOR NEW DOMAINS. */ typedef struct { BASIC_START_INFO; /* u16 domain_controller_evtchn; /* 320 */ } PACKED extended_start_info_t; /* 322 bytes */ #define SIF_BLK_BE_DOMAIN (1<<4) /* Is this a block backend domain? */ #define SIF_NET_BE_DOMAIN (1<<5) /* Is this a net backend domain? */ /* * Reason codes for SCHEDOP_shutdown. These are opaque to Xen but may be * interpreted by control software to determine the appropriate action. These * are only really advisories: the controller can actually do as it likes. */ #define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */ #define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */ #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ /* * CONTROLLER MESSAGING INTERFACE. */ typedef struct { u8 type; /* 0: echoed in response */ u8 subtype; /* 1: echoed in response */ u8 id; /* 2: echoed in response */ u8 length; /* 3: number of bytes in 'msg' */ u8 msg[60]; /* 4: type-specific message data */ } PACKED control_msg_t; /* 64 bytes */ #define CONTROL_RING_SIZE 8 typedef u32 CONTROL_RING_IDX; #define MASK_CONTROL_IDX(_i) ((_i)&(CONTROL_RING_SIZE-1)) typedef struct { control_msg_t tx_ring[CONTROL_RING_SIZE]; /* 0: guest -> controller */ control_msg_t rx_ring[CONTROL_RING_SIZE]; /* 512: controller -> guest */ CONTROL_RING_IDX tx_req_prod, tx_resp_prod; /* 1024, 1028 */ CONTROL_RING_IDX rx_req_prod, rx_resp_prod; /* 1032, 1036 */ } PACKED control_if_t; /* 1040 bytes */ /* * Top-level command types. */ #define CMSG_CONSOLE 0 /* Console */ #define CMSG_BLKIF_BE 1 /* Block-device backend */ #define CMSG_BLKIF_FE 2 /* Block-device frontend */ #define CMSG_NETIF_BE 3 /* Network-device backend */ #define CMSG_NETIF_FE 4 /* Network-device frontend */ #define CMSG_SHUTDOWN 6 /* Shutdown messages */ /****************************************************************************** * CONSOLE DEFINITIONS */ /* * Subtypes for console messages. */ #define CMSG_CONSOLE_DATA 0 /****************************************************************************** * BLOCK-INTERFACE FRONTEND DEFINITIONS */ /* Messages from domain controller to guest. */ #define CMSG_BLKIF_FE_INTERFACE_STATUS_CHANGED 0 /* Messages from guest to domain controller. */ #define CMSG_BLKIF_FE_DRIVER_STATUS_CHANGED 32 #define CMSG_BLKIF_FE_INTERFACE_CONNECT 33 #define CMSG_BLKIF_FE_INTERFACE_DISCONNECT 34 /* These are used by both front-end and back-end drivers. */ #define blkif_vdev_t u16 #define blkif_pdev_t u16 #define blkif_sector_t u64 /* * CMSG_BLKIF_FE_INTERFACE_STATUS_CHANGED: * Notify a guest about a status change on one of its block interfaces. * If the interface is DESTROYED or DOWN then the interface is disconnected: * 1. The shared-memory frame is available for reuse. * 2. Any unacknowledged messgaes pending on the interface were dropped. */ #define BLKIF_INTERFACE_STATUS_DESTROYED 0 /* Interface doesn't exist. */ #define BLKIF_INTERFACE_STATUS_DISCONNECTED 1 /* Exists but is disconnected. */ #define BLKIF_INTERFACE_STATUS_CONNECTED 2 /* Exists and is connected. */ typedef struct { u32 handle; /* 0 */ u32 status; /* 4 */ u16 evtchn; /* 8: (only if status == BLKIF_INTERFACE_STATUS_CONNECTED). */ domid_t domid; } PACKED blkif_fe_interface_status_changed_t; /* 10 bytes */ /* * CMSG_BLKIF_FE_DRIVER_STATUS_CHANGED: * Notify the domain controller that the front-end driver is DOWN or UP. * When the driver goes DOWN then the controller will send no more * status-change notifications. When the driver comes UP then the controller * will send a notification for each interface that currently exists. * If the driver goes DOWN while interfaces are still UP, the domain * will automatically take the interfaces DOWN. */ #define BLKIF_DRIVER_STATUS_DOWN 0 #define BLKIF_DRIVER_STATUS_UP 1 typedef struct { /* IN */ u32 status; /* 0: BLKIF_DRIVER_STATUS_??? */ /* OUT */ /* * Tells driver how many interfaces it should expect to immediately * receive notifications about. */ u32 nr_interfaces; /* 4 */ } PACKED blkif_fe_driver_status_changed_t; /* 8 bytes */ /* * CMSG_BLKIF_FE_INTERFACE_CONNECT: * If successful, the domain controller will acknowledge with a * STATUS_CONNECTED message. */ typedef struct { u32 handle; /* 0 */ u32 __pad; memory_t shmem_frame; /* 8 */ MEMORY_PADDING; } PACKED blkif_fe_interface_connect_t; /* 16 bytes */ /* * CMSG_BLKIF_FE_INTERFACE_DISCONNECT: * If successful, the domain controller will acknowledge with a * STATUS_DISCONNECTED message. */ typedef struct { u32 handle; /* 0 */ } PACKED blkif_fe_interface_disconnect_t; /* 4 bytes */ /****************************************************************************** * BLOCK-INTERFACE BACKEND DEFINITIONS */ /* Messages from domain controller. */ #define CMSG_BLKIF_BE_CREATE 0 /* Create a new block-device interface. */ #define CMSG_BLKIF_BE_DESTROY 1 /* Destroy a block-device interface. */ #define CMSG_BLKIF_BE_CONNECT 2 /* Connect i/f to remote driver. */ #define CMSG_BLKIF_BE_DISCONNECT 3 /* Disconnect i/f from remote driver. */ #define CMSG_BLKIF_BE_VBD_CREATE 4 /* Create a new VBD for an interface. */ #define CMSG_BLKIF_BE_VBD_DESTROY 5 /* Delete a VBD from an interface. */ #define CMSG_BLKIF_BE_VBD_GROW 6 /* Append an extent to a given VBD. */ #define CMSG_BLKIF_BE_VBD_SHRINK 7 /* Remove last extent from a given VBD. */ /* Messages to domain controller. */ #define CMSG_BLKIF_BE_DRIVER_STATUS_CHANGED 32 /* * Message request/response definitions for block-device messages. */ typedef struct { blkif_sector_t sector_start; /* 0 */ blkif_sector_t sector_length; /* 8 */ blkif_pdev_t device; /* 16 */ u16 __pad; /* 18 */ } PACKED blkif_extent_t; /* 20 bytes */ /* Non-specific 'okay' return. */ #define BLKIF_BE_STATUS_OKAY 0 /* Non-specific 'error' return. */ #define BLKIF_BE_STATUS_ERROR 1 /* The following are specific error returns. */ #define BLKIF_BE_STATUS_INTERFACE_EXISTS 2 #define BLKIF_BE_STATUS_INTERFACE_NOT_FOUND 3 #define BLKIF_BE_STATUS_INTERFACE_CONNECTED 4 #define BLKIF_BE_STATUS_VBD_EXISTS 5 #define BLKIF_BE_STATUS_VBD_NOT_FOUND 6 #define BLKIF_BE_STATUS_OUT_OF_MEMORY 7 #define BLKIF_BE_STATUS_EXTENT_NOT_FOUND 8 #define BLKIF_BE_STATUS_MAPPING_ERROR 9 /* This macro can be used to create an array of descriptive error strings. */ #define BLKIF_BE_STATUS_ERRORS { \ "Okay", \ "Non-specific error", \ "Interface already exists", \ "Interface not found", \ "Interface is still connected", \ "VBD already exists", \ "VBD not found", \ "Out of memory", \ "Extent not found for VBD", \ "Could not map domain memory" } /* * CMSG_BLKIF_BE_CREATE: * When the driver sends a successful response then the interface is fully * created. The controller will send a DOWN notification to the front-end * driver. */ typedef struct { /* IN */ domid_t domid; /* 0: Domain attached to new interface. */ u32 blkif_handle; /* 4: Domain-specific interface handle. */ /* OUT */ u32 status; /* 8 */ } PACKED blkif_be_create_t; /* 12 bytes */ /* * CMSG_BLKIF_BE_DESTROY: * When the driver sends a successful response then the interface is fully * torn down. The controller will send a DESTROYED notification to the * front-end driver. */ typedef struct { /* IN */ domid_t domid; /* 0: Identify interface to be destroyed. */ u32 blkif_handle; /* 4: ...ditto... */ /* OUT */ u32 status; /* 8 */ } PACKED blkif_be_destroy_t; /* 12 bytes */ /* * CMSG_BLKIF_BE_CONNECT: * When the driver sends a successful response then the interface is fully * connected. The controller will send a CONNECTED notification to the * front-end driver. */ typedef struct { /* IN */ domid_t domid; /* 0: Domain attached to new interface. */ u32 blkif_handle; /* 4: Domain-specific interface handle. */ memory_t shmem_frame; /* 8: Page cont. shared comms window. */ MEMORY_PADDING; u32 evtchn; /* 16: Event channel for notifications. */ /* OUT */ u32 status; /* 20 */ } PACKED blkif_be_connect_t; /* 24 bytes */ /* * CMSG_BLKIF_BE_DISCONNECT: * When the driver sends a successful response then the interface is fully * disconnected. The controller will send a DOWN notification to the front-end * driver. */ typedef struct { /* IN */ domid_t domid; /* 0: Domain attached to new interface. */ u32 blkif_handle; /* 4: Domain-specific interface handle. */ /* OUT */ u32 status; /* 8 */ } PACKED blkif_be_disconnect_t; /* 12 bytes */ /* CMSG_BLKIF_BE_VBD_CREATE */ typedef struct { /* IN */ domid_t domid; /* 0: Identify blkdev interface. */ u32 blkif_handle; /* 4: ...ditto... */ blkif_vdev_t vdevice; /* 8: Interface-specific id for this VBD. */ u16 readonly; /* 10: Non-zero -> VBD isn't writeable. */ /* OUT */ u32 status; /* 12 */ } PACKED blkif_be_vbd_create_t; /* 16 bytes */ /* CMSG_BLKIF_BE_VBD_DESTROY */ typedef struct { /* IN */ domid_t domid; /* 0: Identify blkdev interface. */ u32 blkif_handle; /* 4: ...ditto... */ blkif_vdev_t vdevice; /* 8: Interface-specific id of the VBD. */ u16 __pad; /* 10 */ /* OUT */ u32 status; /* 12 */ } PACKED blkif_be_vbd_destroy_t; /* 16 bytes */ /* CMSG_BLKIF_BE_VBD_GROW */ typedef struct { /* IN */ domid_t domid; /* 0: Identify blkdev interface. */ u32 blkif_handle; /* 4: ...ditto... */ blkif_extent_t extent; /* 8: Physical extent to append to VBD. */ blkif_vdev_t vdevice; /* 28: Interface-specific id of the VBD. */ u16 __pad; /* 30 */ /* OUT */ u32 status; /* 32 */ } PACKED blkif_be_vbd_grow_t; /* 36 bytes */ /* CMSG_BLKIF_BE_VBD_SHRINK */ typedef struct { /* IN */ domid_t domid; /* 0: Identify blkdev interface. */ u32 blkif_handle; /* 4: ...ditto... */ blkif_vdev_t vdevice; /* 8: Interface-specific id of the VBD. */ u16 __pad; /* 10 */ /* OUT */ u32 status; /* 12 */ } PACKED blkif_be_vbd_shrink_t; /* 16 bytes */ /* * CMSG_BLKIF_BE_DRIVER_STATUS_CHANGED: * Notify the domain controller that the back-end driver is DOWN or UP. * If the driver goes DOWN while interfaces are still UP, the controller * will automatically send DOWN notifications. */ typedef struct { u32 status; /* 0: BLKIF_DRIVER_STATUS_??? */ } PACKED blkif_be_driver_status_changed_t; /* 4 bytes */ /****************************************************************************** * NETWORK-INTERFACE FRONTEND DEFINITIONS */ /* Messages from domain controller to guest. */ #define CMSG_NETIF_FE_INTERFACE_STATUS_CHANGED 0 /* Messages from guest to domain controller. */ #define CMSG_NETIF_FE_DRIVER_STATUS 32 #define CMSG_NETIF_FE_INTERFACE_CONNECT 33 #define CMSG_NETIF_FE_INTERFACE_DISCONNECT 34 #define CMSG_NETIF_FE_INTERFACE_QUERY 35 /* * CMSG_NETIF_FE_INTERFACE_STATUS_CHANGED: * Notify a guest about a status change on one of its network interfaces. * If the interface is DESTROYED or DOWN then the interface is disconnected: * 1. The shared-memory frame is available for reuse. * 2. Any unacknowledged messgaes pending on the interface were dropped. */ #define NETIF_INTERFACE_STATUS_CLOSED 0 /* Interface doesn't exist. */ #define NETIF_INTERFACE_STATUS_DISCONNECTED 1 /* Exists but is disconnected. */ #define NETIF_INTERFACE_STATUS_CONNECTED 2 /* Exists and is connected. */ #define NETIF_INTERFACE_STATUS_CHANGED 3 typedef struct { u32 handle; /* 0 */ u32 status; /* 4 */ u16 evtchn; /* 8: status == NETIF_INTERFACE_STATUS_CONNECTED */ u8 mac[6]; /* 10: status == NETIF_INTERFACE_STATUS_CONNECTED */ domid_t dom; } PACKED netif_fe_interface_status_changed_t; /* 18 bytes */ /* * CMSG_NETIF_FE_DRIVER_STATUS_CHANGED: * Notify the domain controller that the front-end driver is DOWN or UP. * When the driver goes DOWN then the controller will send no more * status-change notifications. When the driver comes UP then the controller * will send a notification for each interface that currently exists. * If the driver goes DOWN while interfaces are still UP, the domain * will automatically take the interfaces DOWN. */ #define NETIF_DRIVER_STATUS_DOWN 0 #define NETIF_DRIVER_STATUS_UP 1 typedef struct { /* IN */ u32 status; /* 0: NETIF_DRIVER_STATUS_??? */ /* OUT */ /* * Tells driver how many interfaces it should expect to immediately * receive notifications about. */ u32 nr_interfaces; /* 4 */ } PACKED netif_fe_driver_status_changed_t; /* 8 bytes */ /* * CMSG_NETIF_FE_INTERFACE_CONNECT: * If successful, the domain controller will acknowledge with a * STATUS_CONNECTED message. */ typedef struct { u32 handle; /* 0 */ u32 __pad; /* 4 */ memory_t tx_shmem_frame; /* 8 */ MEMORY_PADDING; memory_t rx_shmem_frame; /* 16 */ MEMORY_PADDING; } PACKED netif_fe_interface_connect_t; /* 24 bytes */ /* * CMSG_NETIF_FE_INTERFACE_DISCONNECT: * If successful, the domain controller will acknowledge with a * STATUS_DISCONNECTED message. */ typedef struct { u32 handle; /* 0 */ } PACKED netif_fe_interface_disconnect_t; /* 4 bytes */ #ifdef NOT /****************************************************************************** * NETWORK-INTERFACE BACKEND DEFINITIONS */ /* Messages from domain controller. */ #define CMSG_NETIF_BE_CREATE 0 /* Create a new net-device interface. */ #define CMSG_NETIF_BE_DESTROY 1 /* Destroy a net-device interface. */ #define CMSG_NETIF_BE_CONNECT 2 /* Connect i/f to remote driver. */ #define CMSG_NETIF_BE_DISCONNECT 3 /* Disconnect i/f from remote driver. */ /* Messages to domain controller. */ #define CMSG_NETIF_BE_DRIVER_STATUS_CHANGED 32 /* * Message request/response definitions for net-device messages. */ /* Non-specific 'okay' return. */ #define NETIF_BE_STATUS_OKAY 0 /* Non-specific 'error' return. */ #define NETIF_BE_STATUS_ERROR 1 /* The following are specific error returns. */ #define NETIF_BE_STATUS_INTERFACE_EXISTS 2 #define NETIF_BE_STATUS_INTERFACE_NOT_FOUND 3 #define NETIF_BE_STATUS_INTERFACE_CONNECTED 4 #define NETIF_BE_STATUS_OUT_OF_MEMORY 5 #define NETIF_BE_STATUS_MAPPING_ERROR 6 /* This macro can be used to create an array of descriptive error strings. */ #define NETIF_BE_STATUS_ERRORS { \ "Okay", \ "Non-specific error", \ "Interface already exists", \ "Interface not found", \ "Interface is still connected", \ "Out of memory", \ "Could not map domain memory" } /* * CMSG_NETIF_BE_CREATE: * When the driver sends a successful response then the interface is fully * created. The controller will send a DOWN notification to the front-end * driver. */ typedef struct { /* IN */ domid_t domid; /* 0: Domain attached to new interface. */ u32 netif_handle; /* 4: Domain-specific interface handle. */ u8 mac[6]; /* 8 */ u16 __pad; /* 14 */ /* OUT */ u32 status; /* 16 */ } PACKED netif_be_create_t; /* 20 bytes */ /* * CMSG_NETIF_BE_DESTROY: * When the driver sends a successful response then the interface is fully * torn down. The controller will send a DESTROYED notification to the * front-end driver. */ typedef struct { /* IN */ domid_t domid; /* 0: Identify interface to be destroyed. */ u32 netif_handle; /* 4: ...ditto... */ /* OUT */ u32 status; /* 8 */ } PACKED netif_be_destroy_t; /* 12 bytes */ /* * CMSG_NETIF_BE_CONNECT: * When the driver sends a successful response then the interface is fully * connected. The controller will send a CONNECTED notification to the * front-end driver. */ typedef struct { /* IN */ domid_t domid; /* 0: Domain attached to new interface. */ u32 netif_handle; /* 4: Domain-specific interface handle. */ memory_t tx_shmem_frame; /* 8: Page cont. tx shared comms window. */ MEMORY_PADDING; memory_t rx_shmem_frame; /* 16: Page cont. rx shared comms window. */ MEMORY_PADDING; u16 evtchn; /* 24: Event channel for notifications. */ u16 __pad; /* 26 */ /* OUT */ u32 status; /* 28 */ } PACKED netif_be_connect_t; /* 32 bytes */ /* * CMSG_NETIF_BE_DISCONNECT: * When the driver sends a successful response then the interface is fully * disconnected. The controller will send a DOWN notification to the front-end * driver. */ typedef struct { /* IN */ domid_t domid; /* 0: Domain attached to new interface. */ u32 netif_handle; /* 4: Domain-specific interface handle. */ /* OUT */ u32 status; /* 8 */ } PACKED netif_be_disconnect_t; /* 12 bytes */ /* * CMSG_NETIF_BE_DRIVER_STATUS_CHANGED: * Notify the domain controller that the back-end driver is DOWN or UP. * If the driver goes DOWN while interfaces are still UP, the domain * will automatically send DOWN notifications. */ typedef struct { u32 status; /* 0: NETIF_DRIVER_STATUS_??? */ } PACKED netif_be_driver_status_changed_t; /* 4 bytes */ #endif /****************************************************************************** * SHUTDOWN DEFINITIONS */ /* * Subtypes for shutdown messages. */ #define CMSG_SHUTDOWN_POWEROFF 0 /* Clean shutdown (SHUTDOWN_poweroff). */ #define CMSG_SHUTDOWN_REBOOT 1 /* Clean shutdown (SHUTDOWN_reboot). */ #define CMSG_SHUTDOWN_SUSPEND 2 /* Create suspend info, then */ /* SHUTDOWN_suspend. */ #endif /* __DOMAIN_CONTROLLER_H__ */ /****************************************************************************** * ctrl_if.h * * Management functions for special interface to the domain controller. * * Copyright (c) 2004, K A Fraser */ #ifndef __ASM_XEN__CTRL_IF_H__ #define __ASM_XEN__CTRL_IF_H__ typedef control_msg_t ctrl_msg_t; /* * Callback function type. Called for asynchronous processing of received * request messages, and responses to previously-transmitted request messages. * The parameters are (@msg, @id). * @msg: Original request/response message (not a copy). The message can be * modified in-place by the handler (e.g., a response callback can * turn a request message into a response message in place). The message * is no longer accessible after the callback handler returns -- if the * message is required to persist for longer then it must be copied. * @id: (Response callbacks only) The 'id' that was specified when the * original request message was queued for transmission. */ typedef void (*ctrl_msg_handler_t)(ctrl_msg_t *, unsigned long); /* * Send @msg to the domain controller. Execute @hnd when a response is * received, passing the response message and the specified @id. This * operation will not block: it will return -EAGAIN if there is no space. * Notes: * 1. The @msg is copied if it is transmitted and so can be freed after this * function returns. * 2. If @hnd is nil then no callback is executed. */ int ctrl_if_send_message_noblock( ctrl_msg_t *msg, ctrl_msg_handler_t hnd, unsigned long id); /* * Send @msg to the domain controller. Execute @hnd when a response is * received, passing the response message and the specified @id. This * operation will block until the message is sent, or a signal is received * for the calling process (unless @wait_state is TASK_UNINTERRUPTIBLE). * Notes: * 1. The @msg is copied if it is transmitted and so can be freed after this * function returns. * 2. If @hnd is nil then no callback is executed. */ int ctrl_if_send_message_block( ctrl_msg_t *msg, ctrl_msg_handler_t hnd, unsigned long id, long wait_state); /* * Request a callback when there is /possibly/ space to immediately send a * message to the domain controller. This function returns 0 if there is * already space to trasnmit a message --- in this case the callback task /may/ * still be executed. If this function returns 1 then the callback /will/ be * executed when space becomes available. */ int ctrl_if_enqueue_space_callback(struct tq_struct *task); /* * Send a response (@msg) to a message from the domain controller. This will * never block. * Notes: * 1. The @msg is copied and so can be freed after this function returns. * 2. The @msg may be the original request message, modified in-place. */ void ctrl_if_send_response(ctrl_msg_t *msg); /* * Register a receiver for typed messages from the domain controller. The * handler (@hnd) is called for every received message of specified @type. * Returns TRUE (non-zero) if the handler was successfully registered. * If CALLBACK_IN_BLOCKING CONTEXT is specified in @flags then callbacks will * occur in a context in which it is safe to yield (i.e., process context). */ #define CALLBACK_IN_BLOCKING_CONTEXT 1 int ctrl_if_register_receiver( u8 type, ctrl_msg_handler_t hnd, unsigned int flags); /* * Unregister a receiver for typed messages from the domain controller. The * handler (@hnd) will not be executed after this function returns. */ void ctrl_if_unregister_receiver(u8 type, ctrl_msg_handler_t hnd); /* Suspend/resume notifications. */ void ctrl_if_suspend(void); void ctrl_if_resume(void); /* Start-of-day setup. */ void ctrl_if_init(void); /* * Returns TRUE if there are no outstanding message requests at the domain * controller. This can be used to ensure that messages have really flushed * through when it is not possible to use the response-callback interface. * WARNING: If other subsystems are using the control interface then this * function might never return TRUE! */ int ctrl_if_transmitter_empty(void); /* !! DANGEROUS FUNCTION !! */ /* * Manually discard response messages from the domain controller. * WARNING: This is usually done automatically -- this function should only * be called when normal interrupt mechanisms are disabled! */ void ctrl_if_discard_responses(void); /* !! DANGEROUS FUNCTION !! */ #endif /* __ASM_XEN__CONTROL_IF_H__ */ int HYPERVISOR_update_va_mapping( unsigned long pte_physical, unsigned long new_val, unsigned long flags); int HYPERVISOR_physdev_op(void *physdev_op); int HYPERVISOR_update_va_mapping_otherdomain( unsigned long page_nr, unsigned long new_val, unsigned long flags, uvlong domid); #ifdef shit long HYPERVISOR_set_timer_op(uvlong timeout); #endif int HYPERVISOR_dom0_op(dom0_op_t *dom0_op); int HYPERVISOR_network_op(void *network_op); int HYPERVISOR_block_io_op(void *block_io_op); int HYPERVISOR_set_trap_table(trap_info_t *table); int HYPERVISOR_mmu_update(mmu_update_t *req, int count, int *success_count); int HYPERVISOR_set_gdt(unsigned long *frame_list, int entries); int HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp); int HYPERVISOR_set_callbacks( unsigned long event_selector, unsigned long event_address, unsigned long failsafe_selector, unsigned long failsafe_address); int HYPERVISOR_fpu_taskswitch(void); int HYPERVISOR_yield(void); int HYPERVISOR_block(void); int HYPERVISOR_exit(void); int HYPERVISOR_stop(unsigned long srec); int HYPERVISOR_set_debugreg(int reg, unsigned long value); unsigned long HYPERVISOR_get_debugreg(int reg); int HYPERVISOR_update_descriptor( unsigned long pa, unsigned long word1, unsigned long word2); int HYPERVISOR_set_fast_trap(int idx); int HYPERVISOR_dom_mem_op(int op, unsigned long *extent_list, unsigned long num_extents, unsigned int order); int HYPERVISOR_multicall(void *call_list, int nr_calls); long HYPERVISOR_kbd_op(unsigned char op, unsigned char val); int HYPERVISOR_event_channel_op(void *op); int HYPERVISOR_xen_version(int cmd); int HYPERVISOR_console_io(int cmd, int count, char *str); /* random stuff. */ int bind_evtchn_to_irq(int evtchn, int want); void unbind_evtchn_from_irq(int evtchn); /****************************************************************************** * evtchn.h * * Communication via Xen event channels. * Also definitions for the device that demuxes notifications to userspace. * * Copyright (c) 2004, K A Fraser */ #ifndef __ASM_EVTCHN_H__ #define __ASM_EVTCHN_H__ /* * LOW-LEVEL DEFINITIONS */ /* Entry point for notifications into Linux subsystems. */ void evtchn_do_upcall(struct Ureg *regs); /* Entry point for notifications into the userland character device. */ void evtchn_device_upcall(int port); static void mask_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; //dp("M%d\n", port); synch_set_bit(port, &s->evtchn_mask[0]); } static void unmask_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; synch_clear_bit(port, &s->evtchn_mask[0]); //dp("U%d\n", port); /* * The following is basically the equivalent of 'hw_resend_irq'. Just like * a real IO-APIC we 'lose the interrupt edge' if the channel is masked. */ if ( synch_test_bit (port, &s->evtchn_pending[0]) && !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) ) { s->vcpu_data[0].evtchn_upcall_pending = 1; if ( !s->vcpu_data[0].evtchn_upcall_mask ) evtchn_do_upcall(nil); } } static void clear_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; synch_clear_bit(port, &s->evtchn_pending[0]); } static void notify_via_evtchn(int port) { evtchn_op_t op; op.cmd = EVTCHNOP_send; op.u.send.local_port = port; // dp("notify via evtchn port %d\n", port); HYPERVISOR_event_channel_op(&op); } /* * CHARACTER-DEVICE DEFINITIONS */ /* /dev/xen/evtchn resides at device number major=10, minor=200 */ #define EVTCHN_MINOR 200 /* /dev/xen/evtchn ioctls: */ /* EVTCHN_RESET: Clear and reinit the event buffer. Clear error condition. */ #define EVTCHN_RESET _IO('E', 1) /* EVTCHN_BIND: Bind to teh specified event-channel port. */ #define EVTCHN_BIND _IO('E', 2) /* EVTCHN_UNBIND: Unbind from the specified event-channel port. */ #define EVTCHN_UNBIND _IO('E', 3) #endif /* __ASM_EVTCHN_H__ */