From d1423b28827cdc817d486df09db3cf98d4e83201 Mon Sep 17 00:00:00 2001 From: "kaf24@firebug.cl.cam.ac.uk" Date: Fri, 15 Sep 2006 11:07:25 +0100 Subject: [MINIOS] Fix to use new event-channel API properly. Signed-off-by: Mark Williamson --- extras/mini-os/events.c | 33 ++++++++++------------ extras/mini-os/include/events.h | 7 ++--- .../mini-os/include/x86/x86_32/hypercall-x86_32.h | 4 +-- 3 files changed, 20 insertions(+), 24 deletions(-) (limited to 'extras') diff --git a/extras/mini-os/events.c b/extras/mini-os/events.c index 57e835225d..e05f68d83d 100644 --- a/extras/mini-os/events.c +++ b/extras/mini-os/events.c @@ -88,19 +88,18 @@ void unbind_evtchn(evtchn_port_t port ) int bind_virq(uint32_t virq, evtchn_handler_t handler, void *data) { - evtchn_op_t op; + evtchn_bind_virq_t op; /* Try to bind the virq to a port */ - op.cmd = EVTCHNOP_bind_virq; - op.u.bind_virq.virq = virq; - op.u.bind_virq.vcpu = smp_processor_id(); + op.virq = virq; + op.vcpu = smp_processor_id(); - if ( HYPERVISOR_event_channel_op(&op) != 0 ) + if ( HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &op) != 0 ) { printk("Failed to bind virtual IRQ %d\n", virq); return 1; } - bind_evtchn(op.u.bind_virq.port, handler, data); + bind_evtchn(op.port, handler, data); return 0; } @@ -151,14 +150,13 @@ void default_handler(evtchn_port_t port, struct pt_regs *regs, void *ignore) int evtchn_alloc_unbound(domid_t pal, evtchn_handler_t handler, void *data, evtchn_port_t *port) { - evtchn_op_t op; - op.cmd = EVTCHNOP_alloc_unbound; - op.u.alloc_unbound.dom = DOMID_SELF; - op.u.alloc_unbound.remote_dom = pal; - int err = HYPERVISOR_event_channel_op(&op); + evtchn_alloc_unbound_t op; + op.dom = DOMID_SELF; + op.remote_dom = pal; + int err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &op); if (err) return err; - *port = bind_evtchn(op.u.alloc_unbound.port, handler, data); + *port = bind_evtchn(op.port, handler, data); return err; } @@ -169,14 +167,13 @@ int evtchn_bind_interdomain(domid_t pal, evtchn_port_t remote_port, evtchn_handler_t handler, void *data, evtchn_port_t *local_port) { - evtchn_op_t op; - op.cmd = EVTCHNOP_bind_interdomain; - op.u.bind_interdomain.remote_dom = pal; - op.u.bind_interdomain.remote_port = remote_port; - int err = HYPERVISOR_event_channel_op(&op); + evtchn_bind_interdomain_t op; + op.remote_dom = pal; + op.remote_port = remote_port; + int err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &op); if (err) return err; - evtchn_port_t port = op.u.bind_interdomain.local_port; + evtchn_port_t port = op.local_port; clear_evtchn(port); /* Without, handler gets invoked now! */ *local_port = bind_evtchn(port, handler, data); return err; diff --git a/extras/mini-os/include/events.h b/extras/mini-os/include/events.h index cdb6311845..9adaf29711 100644 --- a/extras/mini-os/include/events.h +++ b/extras/mini-os/include/events.h @@ -39,10 +39,9 @@ int evtchn_bind_interdomain(domid_t pal, evtchn_port_t remote_port, static inline int notify_remote_via_evtchn(evtchn_port_t port) { - evtchn_op_t op; - op.cmd = EVTCHNOP_send; - op.u.send.port = port; - return HYPERVISOR_event_channel_op(&op); + evtchn_send_t op; + op.port = port; + return HYPERVISOR_event_channel_op(EVTCHNOP_send, &op); } diff --git a/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h b/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h index 6556c4f7e2..70e8997a86 100644 --- a/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h +++ b/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h @@ -238,9 +238,9 @@ HYPERVISOR_update_va_mapping( static inline int HYPERVISOR_event_channel_op( - void *op) + int cmd, void *op) { - return _hypercall1(int, event_channel_op, op); + return _hypercall2(int, event_channel_op, cmd, op); } static inline int -- cgit v1.2.3 From ff4b9c213435c4de737323f00cea4653cdec7b59 Mon Sep 17 00:00:00 2001 From: "kfraser@localhost.localdomain" Date: Sat, 23 Sep 2006 14:00:38 +0100 Subject: [MINIOS] Added a new file arc/x86/setup.c and moved some x86 specific initialization stuff from kernel.c there. Two new functions are added to handle this. Signed-off-by: Dietmar Hahn --- extras/mini-os/arch/x86/setup.c | 108 ++++++++++++++++++++++++++++++++++++++++ extras/mini-os/include/x86/os.h | 5 ++ extras/mini-os/kernel.c | 69 ++----------------------- 3 files changed, 117 insertions(+), 65 deletions(-) create mode 100644 extras/mini-os/arch/x86/setup.c (limited to 'extras') diff --git a/extras/mini-os/arch/x86/setup.c b/extras/mini-os/arch/x86/setup.c new file mode 100644 index 0000000000..db24b41ebc --- /dev/null +++ b/extras/mini-os/arch/x86/setup.c @@ -0,0 +1,108 @@ +/****************************************************************************** + * common.c + * + * Common stuff special to x86 goes here. + * + * Copyright (c) 2002-2003, K A Fraser & R Neugebauer + * Copyright (c) 2005, Grzegorz Milos, Intel Research Cambridge + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + */ + +#include + + +/* + * Shared page for communicating with the hypervisor. + * Events flags go here, for example. + */ +shared_info_t *HYPERVISOR_shared_info; + +/* + * This structure contains start-of-day info, such as pagetable base pointer, + * address of the shared_info structure, and things like that. + */ +union start_info_union start_info_union; + +/* + * Just allocate the kernel stack here. SS:ESP is set up to point here + * in head.S. + */ +char stack[8192]; + +extern char shared_info[PAGE_SIZE]; + +/* Assembler interface fns in entry.S. */ +void hypervisor_callback(void); +void failsafe_callback(void); + +#if !defined(CONFIG_X86_PAE) +#define __pte(x) ((pte_t) { (x) } ) +#else +#define __pte(x) ({ unsigned long long _x = (x); \ + ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); }) +#endif + +static +shared_info_t *map_shared_info(unsigned long pa) +{ + if ( HYPERVISOR_update_va_mapping( + (unsigned long)shared_info, __pte(pa | 7), UVMF_INVLPG) ) + { + printk("Failed to map shared_info!!\n"); + do_exit(); + } + return (shared_info_t *)shared_info; +} + +void +arch_init(start_info_t *si) +{ + /* Copy the start_info struct to a globally-accessible area. */ + /* WARN: don't do printk before here, it uses information from + shared_info. Use xprintk instead. */ + memcpy(&start_info, si, sizeof(*si)); + + /* set up minimal memory infos */ + phys_to_machine_mapping = (unsigned long *)start_info.mfn_list; + + /* Grab the shared_info pointer and put it in a safe place. */ + HYPERVISOR_shared_info = map_shared_info(start_info.shared_info); + + /* Set up event and failsafe callback addresses. */ +#ifdef __i386__ + HYPERVISOR_set_callbacks( + __KERNEL_CS, (unsigned long)hypervisor_callback, + __KERNEL_CS, (unsigned long)failsafe_callback); +#else + HYPERVISOR_set_callbacks( + (unsigned long)hypervisor_callback, + (unsigned long)failsafe_callback, 0); +#endif + +} + +void +arch_print_info(void) +{ + printk(" stack: %p-%p\n", stack, stack + 8192); +} + + diff --git a/extras/mini-os/include/x86/os.h b/extras/mini-os/include/x86/os.h index 2b6ed5512b..eed5bda7c8 100644 --- a/extras/mini-os/include/x86/os.h +++ b/extras/mini-os/include/x86/os.h @@ -61,6 +61,11 @@ extern shared_info_t *HYPERVISOR_shared_info; void trap_init(void); +void arch_init(start_info_t *si); +void arch_print_info(void); + + + /* diff --git a/extras/mini-os/kernel.c b/extras/mini-os/kernel.c index d66dd35ab0..17ccbad914 100644 --- a/extras/mini-os/kernel.c +++ b/extras/mini-os/kernel.c @@ -39,49 +39,6 @@ #include #include -/* - * Shared page for communicating with the hypervisor. - * Events flags go here, for example. - */ -shared_info_t *HYPERVISOR_shared_info; - -/* - * This structure contains start-of-day info, such as pagetable base pointer, - * address of the shared_info structure, and things like that. - */ -union start_info_union start_info_union; - -/* - * Just allocate the kernel stack here. SS:ESP is set up to point here - * in head.S. - */ -char stack[8192]; - - -/* Assembler interface fns in entry.S. */ -void hypervisor_callback(void); -void failsafe_callback(void); - -extern char shared_info[PAGE_SIZE]; - -#if !defined(CONFIG_X86_PAE) -#define __pte(x) ((pte_t) { (x) } ) -#else -#define __pte(x) ({ unsigned long long _x = (x); \ - ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); }) -#endif - -static shared_info_t *map_shared_info(unsigned long pa) -{ - if ( HYPERVISOR_update_va_mapping( - (unsigned long)shared_info, __pte(pa | 7), UVMF_INVLPG) ) - { - printk("Failed to map shared_info!!\n"); - do_exit(); - } - return (shared_info_t *)shared_info; -} - u8 xen_features[XENFEAT_NR_SUBMAPS * 32]; @@ -126,27 +83,8 @@ void start_kernel(start_info_t *si) (void)HYPERVISOR_console_io(CONSOLEIO_write, strlen(hello), hello); - /* Copy the start_info struct to a globally-accessible area. */ - /* WARN: don't do printk before here, it uses information from - shared_info. Use xprintk instead. */ - memcpy(&start_info, si, sizeof(*si)); - - /* set up minimal memory infos */ - phys_to_machine_mapping = (unsigned long *)start_info.mfn_list; - - /* Grab the shared_info pointer and put it in a safe place. */ - HYPERVISOR_shared_info = map_shared_info(start_info.shared_info); - - /* Set up event and failsafe callback addresses. */ -#ifdef __i386__ - HYPERVISOR_set_callbacks( - __KERNEL_CS, (unsigned long)hypervisor_callback, - __KERNEL_CS, (unsigned long)failsafe_callback); -#else - HYPERVISOR_set_callbacks( - (unsigned long)hypervisor_callback, - (unsigned long)failsafe_callback, 0); -#endif + arch_init(si); + trap_init(); /* ENABLE EVENT DELIVERY. This is disabled at start of day. */ @@ -163,7 +101,8 @@ void start_kernel(start_info_t *si) printk(" flags: 0x%x\n", (unsigned int)si->flags); printk(" cmd_line: %s\n", si->cmd_line ? (const char *)si->cmd_line : "NULL"); - printk(" stack: %p-%p\n", stack, stack + 8192); + + arch_print_info(); setup_xen_features(); -- cgit v1.2.3 From 4aad88265bede002cb3e8a01bc6a8581c3424f6e Mon Sep 17 00:00:00 2001 From: "kfraser@localhost.localdomain" Date: Wed, 15 Nov 2006 09:33:01 +0000 Subject: [MINIOS] Add timer support. Based on an original patch by Robert Kaiser. Signed-off-by: Grzegorz Milos --- extras/mini-os/README | 4 +-- extras/mini-os/include/sched.h | 3 ++ extras/mini-os/include/time.h | 6 ++-- extras/mini-os/kernel.c | 14 +++++++++ extras/mini-os/sched.c | 71 +++++++++++++++++++++++++++++++++++++++--- extras/mini-os/time.c | 26 +++++----------- 6 files changed, 97 insertions(+), 27 deletions(-) (limited to 'extras') diff --git a/extras/mini-os/README b/extras/mini-os/README index d4bab382af..afd880733a 100644 --- a/extras/mini-os/README +++ b/extras/mini-os/README @@ -26,5 +26,5 @@ Stuff it doesn't show: - to start it do the following in domain0 (assuming xend is running) # xm create domain_config -this starts the kernel and prints out a bunch of stuff and then every -1000 timer interrupts the system time. +this starts the kernel and prints out a bunch of stuff and then once +every second the system time. diff --git a/extras/mini-os/include/sched.h b/extras/mini-os/include/sched.h index ce57be8a6c..058941a0a1 100644 --- a/extras/mini-os/include/sched.h +++ b/extras/mini-os/include/sched.h @@ -2,6 +2,7 @@ #define __SCHED_H__ #include +#include struct thread { @@ -11,6 +12,7 @@ struct thread unsigned long ip; /* Instruction pointer */ struct list_head thread_list; u32 flags; + s_time_t wakeup_time; }; @@ -36,5 +38,6 @@ static inline struct thread* get_current(void) void wake(struct thread *thread); void block(struct thread *thread); +void sleep(u32 millisecs); #endif /* __SCHED_H__ */ diff --git a/extras/mini-os/include/time.h b/extras/mini-os/include/time.h index aacf1d17e7..468a8d9173 100644 --- a/extras/mini-os/include/time.h +++ b/extras/mini-os/include/time.h @@ -7,8 +7,9 @@ * File: time.h * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk) * Changes: Grzegorz Milos (gm281@cam.ac.uk) + * Robert Kaiser (kaiser@informatik.fh-wiesbaden.de) * - * Date: Jul 2003, changesJun 2005 + * Date: Jul 2003, changes: Jun 2005, Sep 2006 * * Environment: Xen Minimal OS * Description: Time and timer functions @@ -57,7 +58,8 @@ struct timespec { void init_time(void); s_time_t get_s_time(void); s_time_t get_v_time(void); +u64 monotonic_clock(void); void gettimeofday(struct timeval *tv); -void block_domain(u32 millisecs); +void block_domain(s_time_t until); #endif /* _TIME_H_ */ diff --git a/extras/mini-os/kernel.c b/extras/mini-os/kernel.c index 17ccbad914..72fc65ae1e 100644 --- a/extras/mini-os/kernel.c +++ b/extras/mini-os/kernel.c @@ -6,6 +6,7 @@ * * Copyright (c) 2002-2003, K A Fraser & R Neugebauer * Copyright (c) 2005, Grzegorz Milos, Intel Research Cambridge + * Copyright (c) 2006, Robert Kaiser, FH Wiesbaden * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to @@ -66,11 +67,24 @@ void xenbus_tester(void *p) /* test_xenbus(); */ } +void periodic_thread(void *p) +{ + struct timeval tv; + printk("Periodic thread started.\n"); + for(;;) + { + gettimeofday(&tv); + printk("T(s=%ld us=%ld)\n", tv.tv_sec, tv.tv_usec); + sleep(1000); + } +} + /* This should be overridden by the application we are linked against. */ __attribute__((weak)) int app_main(start_info_t *si) { printk("Dummy main: start_info=%p\n", si); create_thread("xenbus_tester", xenbus_tester, si); + create_thread("periodic_thread", periodic_thread, si); return 0; } diff --git a/extras/mini-os/sched.c b/extras/mini-os/sched.c index ee1adb6549..e6c9cc3771 100644 --- a/extras/mini-os/sched.c +++ b/extras/mini-os/sched.c @@ -5,7 +5,7 @@ * * File: sched.c * Author: Grzegorz Milos - * Changes: + * Changes: Robert Kaiser * * Date: Aug 2005 * @@ -142,6 +142,54 @@ void inline print_runqueue(void) printk("\n"); } +/* Find the time when the next timeout expires. If this is more than + 10 seconds from now, return 10 seconds from now. */ +static s_time_t blocking_time(void) +{ + struct thread *thread; + struct list_head *iterator; + s_time_t min_wakeup_time; + unsigned long flags; + local_irq_save(flags); + /* default-block the domain for 10 seconds: */ + min_wakeup_time = NOW() + SECONDS(10); + + /* Thread list needs to be protected */ + list_for_each(iterator, &idle_thread->thread_list) + { + thread = list_entry(iterator, struct thread, thread_list); + if(!is_runnable(thread) && thread->wakeup_time != 0LL) + { + if(thread->wakeup_time < min_wakeup_time) + { + min_wakeup_time = thread->wakeup_time; + } + } + } + local_irq_restore(flags); + return(min_wakeup_time); +} + +/* Wake up all threads with expired timeouts. */ +static void wake_expired(void) +{ + struct thread *thread; + struct list_head *iterator; + s_time_t now = NOW(); + unsigned long flags; + local_irq_save(flags); + /* Thread list needs to be protected */ + list_for_each(iterator, &idle_thread->thread_list) + { + thread = list_entry(iterator, struct thread, thread_list); + if(!is_runnable(thread) && thread->wakeup_time != 0LL) + { + if(thread->wakeup_time <= now) + wake(thread); + } + } + local_irq_restore(flags); +} void schedule(void) { @@ -229,8 +277,9 @@ struct thread* create_thread(char *name, void (*function)(void *), void *data) stack_push(thread, (unsigned long) data); thread->ip = (unsigned long) thread_starter; - /* Not runable, not exited */ + /* Not runable, not exited, not sleeping */ thread->flags = 0; + thread->wakeup_time = 0LL; set_runnable(thread); local_irq_save(flags); if(idle_thread != NULL) { @@ -247,20 +296,34 @@ struct thread* create_thread(char *name, void (*function)(void *), void *data) void block(struct thread *thread) { + thread->wakeup_time = 0LL; clear_runnable(thread); } +void sleep(u32 millisecs) +{ + struct thread *thread = get_current(); + thread->wakeup_time = NOW() + MILLISECS(millisecs); + clear_runnable(thread); + schedule(); +} + void wake(struct thread *thread) { + thread->wakeup_time = 0LL; set_runnable(thread); } void idle_thread_fn(void *unused) { + s_time_t until; for(;;) { schedule(); - block_domain(10000); + /* block until the next timeout expires, or for 10 secs, whichever comes first */ + until = blocking_time(); + block_domain(until); + wake_expired(); } } @@ -278,7 +341,7 @@ void run_idle_thread(void) "push %1\n\t" "ret" :"=m" (idle_thread->sp) - :"m" (idle_thread->ip)); + :"m" (idle_thread->ip)); #endif } diff --git a/extras/mini-os/time.c b/extras/mini-os/time.c index 5567b5d391..0fad40f6de 100644 --- a/extras/mini-os/time.c +++ b/extras/mini-os/time.c @@ -3,6 +3,7 @@ * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge * (C) 2002-2003 - Keir Fraser - University of Cambridge * (C) 2005 - Grzegorz Milos - Intel Research Cambridge + * (C) 2006 - Robert Kaiser - FH Wiesbaden **************************************************************************** * * File: time.c @@ -194,21 +195,15 @@ void gettimeofday(struct timeval *tv) } -static void print_current_time(void) -{ - struct timeval tv; - - gettimeofday(&tv); - printk("T(s=%ld us=%ld)\n", tv.tv_sec, tv.tv_usec); -} - - -void block_domain(u32 millisecs) +void block_domain(s_time_t until) { struct timeval tv; gettimeofday(&tv); - HYPERVISOR_set_timer_op(monotonic_clock() + 1000000LL * (s64) millisecs); - HYPERVISOR_sched_op(SCHEDOP_block, 0); + if(monotonic_clock() < until) + { + HYPERVISOR_set_timer_op(until); + HYPERVISOR_sched_op(SCHEDOP_block, 0); + } } @@ -217,15 +212,8 @@ void block_domain(u32 millisecs) */ static void timer_handler(evtchn_port_t ev, struct pt_regs *regs, void *ign) { - static int i; - get_time_values_from_xen(); update_wallclock(); - i++; - if (i >= 1000) { - print_current_time(); - i = 0; - } } -- cgit v1.2.3 From 7d58e874105e17c7cd15c1734e66dd8ccb14d996 Mon Sep 17 00:00:00 2001 From: "kfraser@localhost.localdomain" Date: Fri, 17 Nov 2006 09:16:27 +0000 Subject: [MINIOS] Move initialisation of events (masking event channels) earlier during the boot process. Otherwise 64bit guests would sometimes crash. Signed-off-by: Grzegorz Milos --- extras/mini-os/include/x86/os.h | 2 ++ extras/mini-os/kernel.c | 6 +++--- extras/mini-os/mm.c | 7 ++----- 3 files changed, 7 insertions(+), 8 deletions(-) (limited to 'extras') diff --git a/extras/mini-os/include/x86/os.h b/extras/mini-os/include/x86/os.h index eed5bda7c8..80f5586a49 100644 --- a/extras/mini-os/include/x86/os.h +++ b/extras/mini-os/include/x86/os.h @@ -19,6 +19,8 @@ #include #include +#define USED __attribute__ ((used)) + extern void do_exit(void); #define BUG do_exit diff --git a/extras/mini-os/kernel.c b/extras/mini-os/kernel.c index 72fc65ae1e..9b3c471fe7 100644 --- a/extras/mini-os/kernel.c +++ b/extras/mini-os/kernel.c @@ -116,6 +116,9 @@ void start_kernel(start_info_t *si) printk(" cmd_line: %s\n", si->cmd_line ? (const char *)si->cmd_line : "NULL"); + /* Set up events. */ + init_events(); + arch_print_info(); setup_xen_features(); @@ -123,9 +126,6 @@ void start_kernel(start_info_t *si) /* Init memory management. */ init_mm(); - /* Set up events. */ - init_events(); - /* Init time and timers. */ init_time(); diff --git a/extras/mini-os/mm.c b/extras/mini-os/mm.c index 03fb3e9ab5..85f9e92216 100644 --- a/extras/mini-os/mm.c +++ b/extras/mini-os/mm.c @@ -148,7 +148,7 @@ static chunk_head_t free_tail[FREELIST_SIZE]; * Prints allocation[0/1] for @nr_pages, starting at @start * address (virtual). */ -static void print_allocation(void *start, int nr_pages) +USED static void print_allocation(void *start, int nr_pages) { unsigned long pfn_start = virt_to_pfn(start); int count; @@ -163,7 +163,7 @@ static void print_allocation(void *start, int nr_pages) * Prints chunks (making them with letters) for @nr_pages starting * at @start (virtual). */ -static void print_chunks(void *start, int nr_pages) +USED static void print_chunks(void *start, int nr_pages) { char chunks[1001], current='A'; int order, count; @@ -408,7 +408,6 @@ void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn, do_exit(); break; } - /* Update the entry */ #if defined(__x86_64__) tab = pte_to_virt(tab[l4_table_offset(pt_page)]); @@ -446,7 +445,6 @@ void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn, printk("ERROR: mmu_update failed\n"); do_exit(); } - *pt_pfn += 1; } @@ -581,7 +579,6 @@ void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn) } start_address += PAGE_SIZE; } - *start_pfn = pt_pfn; } -- cgit v1.2.3 From 5d3a4a385800111da6ea0b6e607685c0c42a37fa Mon Sep 17 00:00:00 2001 From: "kfraser@localhost.localdomain" Date: Fri, 17 Nov 2006 09:16:51 +0000 Subject: [MINIOS] Event channel hypercall update for 64-bit guests. Signed-off-by: Grzegorz Milos --- extras/mini-os/Makefile | 6 +++++- extras/mini-os/include/events.h | 2 +- extras/mini-os/include/x86/x86_64/hypercall-x86_64.h | 4 ++-- 3 files changed, 8 insertions(+), 4 deletions(-) (limited to 'extras') diff --git a/extras/mini-os/Makefile b/extras/mini-os/Makefile index 9bc4eb37f9..a389bc5d03 100644 --- a/extras/mini-os/Makefile +++ b/extras/mini-os/Makefile @@ -122,6 +122,7 @@ clean: rm -f *.o *~ core $(TARGET).elf $(TARGET).raw $(TARGET) $(TARGET).gz rm -f libminios.a find . -type l | xargs rm -f + rm -f tags TAGS %.o: %.c $(HDRS) Makefile $(CC) $(CFLAGS) $(CPPFLAGS) -c $< -o $@ @@ -137,4 +138,7 @@ endef cscope: $(all_sources) > cscope.files cscope -k -b -q - + +.PHONY: tags +tags: + $(all_sources) | xargs ctags diff --git a/extras/mini-os/include/events.h b/extras/mini-os/include/events.h index 9adaf29711..45b47c1549 100644 --- a/extras/mini-os/include/events.h +++ b/extras/mini-os/include/events.h @@ -20,7 +20,7 @@ #define _EVENTS_H_ #include -#include +#include typedef void (*evtchn_handler_t)(evtchn_port_t, struct pt_regs *, void *); diff --git a/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h b/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h index 6a68a10b02..159149f48b 100644 --- a/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h +++ b/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h @@ -235,9 +235,9 @@ HYPERVISOR_update_va_mapping( static inline int HYPERVISOR_event_channel_op( - void *op) + int cmd, void *op) { - return _hypercall1(int, event_channel_op, op); + return _hypercall2(int, event_channel_op, cmd, op); } static inline int -- cgit v1.2.3 From 4fa75a7c3b58f11b028905f6a1d3af3c871b4b7a Mon Sep 17 00:00:00 2001 From: "kfraser@localhost.localdomain" Date: Fri, 17 Nov 2006 09:18:28 +0000 Subject: [MINIOS] Refactor spinlock header for multi-arch support. I separated the spinlock parts special to the x86 architecture and moved these to include/x86/arch_spinlock.h. The common code is now in include/spinlock.h. Signed-off-by: Dietmar Hahn --- extras/mini-os/include/spinlock.h | 55 +++++++++++++ extras/mini-os/include/x86/arch_spinlock.h | 93 ++++++++++++++++++++++ extras/mini-os/include/x86/spinlock.h | 121 ----------------------------- 3 files changed, 148 insertions(+), 121 deletions(-) create mode 100644 extras/mini-os/include/spinlock.h create mode 100644 extras/mini-os/include/x86/arch_spinlock.h delete mode 100644 extras/mini-os/include/x86/spinlock.h (limited to 'extras') diff --git a/extras/mini-os/include/spinlock.h b/extras/mini-os/include/spinlock.h new file mode 100644 index 0000000000..ecfe73627e --- /dev/null +++ b/extras/mini-os/include/spinlock.h @@ -0,0 +1,55 @@ +#ifndef __ASM_SPINLOCK_H +#define __ASM_SPINLOCK_H + +#include + +/* + * Your basic SMP spinlocks, allowing only a single CPU anywhere + */ + +typedef struct { + volatile unsigned int slock; +} spinlock_t; + + +#include "arch_spinlock.h" + + +#define SPINLOCK_MAGIC 0xdead4ead + +#define SPIN_LOCK_UNLOCKED ARCH_SPIN_LOCK_UNLOCKED + +#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) + +/* + * Simple spin lock operations. There are two variants, one clears IRQ's + * on the local processor, one does not. + * + * We make no fairness assumptions. They have a cost. + */ + +#define spin_is_locked(x) arch_spin_is_locked(x) + +#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) + + +#define _spin_trylock(lock) ({_raw_spin_trylock(lock) ? \ + 1 : ({ 0;});}) + +#define _spin_lock(lock) \ +do { \ + _raw_spin_lock(lock); \ +} while(0) + +#define _spin_unlock(lock) \ +do { \ + _raw_spin_unlock(lock); \ +} while (0) + + +#define spin_lock(lock) _spin_lock(lock) +#define spin_unlock(lock) _spin_unlock(lock) + +#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED + +#endif diff --git a/extras/mini-os/include/x86/arch_spinlock.h b/extras/mini-os/include/x86/arch_spinlock.h new file mode 100644 index 0000000000..a181ed3c92 --- /dev/null +++ b/extras/mini-os/include/x86/arch_spinlock.h @@ -0,0 +1,93 @@ + + +#ifndef __ARCH_ASM_SPINLOCK_H +#define __ARCH_ASM_SPINLOCK_H + +#include + + +#define ARCH_SPIN_LOCK_UNLOCKED (spinlock_t) { 1 } + +/* + * Simple spin lock operations. There are two variants, one clears IRQ's + * on the local processor, one does not. + * + * We make no fairness assumptions. They have a cost. + */ + +#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0) +#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) + +#define spin_lock_string \ + "1:\n" \ + LOCK \ + "decb %0\n\t" \ + "jns 3f\n" \ + "2:\t" \ + "rep;nop\n\t" \ + "cmpb $0,%0\n\t" \ + "jle 2b\n\t" \ + "jmp 1b\n" \ + "3:\n\t" + +#define spin_lock_string_flags \ + "1:\n" \ + LOCK \ + "decb %0\n\t" \ + "jns 4f\n\t" \ + "2:\t" \ + "testl $0x200, %1\n\t" \ + "jz 3f\n\t" \ + "#sti\n\t" \ + "3:\t" \ + "rep;nop\n\t" \ + "cmpb $0, %0\n\t" \ + "jle 3b\n\t" \ + "#cli\n\t" \ + "jmp 1b\n" \ + "4:\n\t" + +/* + * This works. Despite all the confusion. + * (except on PPro SMP or if we are using OOSTORE) + * (PPro errata 66, 92) + */ + +#define spin_unlock_string \ + "xchgb %b0, %1" \ + :"=q" (oldval), "=m" (lock->slock) \ + :"0" (oldval) : "memory" + +static inline void _raw_spin_unlock(spinlock_t *lock) +{ + char oldval = 1; + __asm__ __volatile__( + spin_unlock_string + ); +} + +static inline int _raw_spin_trylock(spinlock_t *lock) +{ + char oldval; + __asm__ __volatile__( + "xchgb %b0,%1\n" + :"=q" (oldval), "=m" (lock->slock) + :"0" (0) : "memory"); + return oldval > 0; +} + +static inline void _raw_spin_lock(spinlock_t *lock) +{ + __asm__ __volatile__( + spin_lock_string + :"=m" (lock->slock) : : "memory"); +} + +static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) +{ + __asm__ __volatile__( + spin_lock_string_flags + :"=m" (lock->slock) : "r" (flags) : "memory"); +} + +#endif diff --git a/extras/mini-os/include/x86/spinlock.h b/extras/mini-os/include/x86/spinlock.h deleted file mode 100644 index 4274cd2869..0000000000 --- a/extras/mini-os/include/x86/spinlock.h +++ /dev/null @@ -1,121 +0,0 @@ -#ifndef __ASM_SPINLOCK_H -#define __ASM_SPINLOCK_H - -#include - -/* - * Your basic SMP spinlocks, allowing only a single CPU anywhere - */ - -typedef struct { - volatile unsigned int slock; -} spinlock_t; - -#define SPINLOCK_MAGIC 0xdead4ead - -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 } - -#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) - -/* - * Simple spin lock operations. There are two variants, one clears IRQ's - * on the local processor, one does not. - * - * We make no fairness assumptions. They have a cost. - */ - -#define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0) -#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) - -#define spin_lock_string \ - "1:\n" \ - LOCK \ - "decb %0\n\t" \ - "jns 3f\n" \ - "2:\t" \ - "rep;nop\n\t" \ - "cmpb $0,%0\n\t" \ - "jle 2b\n\t" \ - "jmp 1b\n" \ - "3:\n\t" - -#define spin_lock_string_flags \ - "1:\n" \ - LOCK \ - "decb %0\n\t" \ - "jns 4f\n\t" \ - "2:\t" \ - "testl $0x200, %1\n\t" \ - "jz 3f\n\t" \ - "#sti\n\t" \ - "3:\t" \ - "rep;nop\n\t" \ - "cmpb $0, %0\n\t" \ - "jle 3b\n\t" \ - "#cli\n\t" \ - "jmp 1b\n" \ - "4:\n\t" - -/* - * This works. Despite all the confusion. - * (except on PPro SMP or if we are using OOSTORE) - * (PPro errata 66, 92) - */ - -#define spin_unlock_string \ - "xchgb %b0, %1" \ - :"=q" (oldval), "=m" (lock->slock) \ - :"0" (oldval) : "memory" - -static inline void _raw_spin_unlock(spinlock_t *lock) -{ - char oldval = 1; - __asm__ __volatile__( - spin_unlock_string - ); -} - -static inline int _raw_spin_trylock(spinlock_t *lock) -{ - char oldval; - __asm__ __volatile__( - "xchgb %b0,%1\n" - :"=q" (oldval), "=m" (lock->slock) - :"0" (0) : "memory"); - return oldval > 0; -} - -static inline void _raw_spin_lock(spinlock_t *lock) -{ - __asm__ __volatile__( - spin_lock_string - :"=m" (lock->slock) : : "memory"); -} - -static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) -{ - __asm__ __volatile__( - spin_lock_string_flags - :"=m" (lock->slock) : "r" (flags) : "memory"); -} - -#define _spin_trylock(lock) ({_raw_spin_trylock(lock) ? \ - 1 : ({ 0;});}) - -#define _spin_lock(lock) \ -do { \ - _raw_spin_lock(lock); \ -} while(0) - -#define _spin_unlock(lock) \ -do { \ - _raw_spin_unlock(lock); \ -} while (0) - - -#define spin_lock(lock) _spin_lock(lock) -#define spin_unlock(lock) _spin_unlock(lock) - -#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED - -#endif -- cgit v1.2.3 From 8f97ebc65a82ca41933884ac894a342090d3e532 Mon Sep 17 00:00:00 2001 From: "kfraser@localhost.localdomain" Date: Mon, 20 Nov 2006 10:33:35 +0000 Subject: [MINIOS] Delay enabling event delivery at start of day. Signed-off-by: Grzegorz Milos --- extras/mini-os/kernel.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'extras') diff --git a/extras/mini-os/kernel.c b/extras/mini-os/kernel.c index 9b3c471fe7..152499ceb4 100644 --- a/extras/mini-os/kernel.c +++ b/extras/mini-os/kernel.c @@ -101,9 +101,6 @@ void start_kernel(start_info_t *si) trap_init(); - /* ENABLE EVENT DELIVERY. This is disabled at start of day. */ - __sti(); - /* print out some useful information */ printk("Xen Minimal OS!\n"); printk("start_info: %p\n", si); @@ -119,6 +116,9 @@ void start_kernel(start_info_t *si) /* Set up events. */ init_events(); + /* ENABLE EVENT DELIVERY. This is disabled at start of day. */ + __sti(); + arch_print_info(); setup_xen_features(); -- cgit v1.2.3 From 531616bf98610766d45f9747f737db0c26c8207a Mon Sep 17 00:00:00 2001 From: "kfraser@localhost.localdomain" Date: Wed, 22 Nov 2006 10:11:36 +0000 Subject: [MINIOS] Refactored mm.c and sched.c. x86 arch specific code got moved to arch/x86/mm.c and arch/x86/sched.c. Header files were also refactored: arch specific code got moved to include/x86/arch_mm.h and include/x86/sched_mm.h. Signed-off-by: Dietmar Hahn Signed-off-by: Grzegorz Milos --- extras/mini-os/Makefile | 7 +- extras/mini-os/arch/x86/mm.c | 428 ++++++++++++++++++++++++++++++++ extras/mini-os/arch/x86/sched.c | 150 +++++++++++ extras/mini-os/include/mm.h | 175 +------------ extras/mini-os/include/sched.h | 26 +- extras/mini-os/include/x86/arch_mm.h | 209 ++++++++++++++++ extras/mini-os/include/x86/arch_sched.h | 58 +++++ extras/mini-os/mm.c | 376 +--------------------------- extras/mini-os/sched.c | 137 ---------- 9 files changed, 875 insertions(+), 691 deletions(-) create mode 100644 extras/mini-os/arch/x86/mm.c create mode 100644 extras/mini-os/arch/x86/sched.c create mode 100644 extras/mini-os/include/x86/arch_mm.h create mode 100644 extras/mini-os/include/x86/arch_sched.h (limited to 'extras') diff --git a/extras/mini-os/Makefile b/extras/mini-os/Makefile index a389bc5d03..01176a625f 100644 --- a/extras/mini-os/Makefile +++ b/extras/mini-os/Makefile @@ -55,9 +55,10 @@ EXTRA_SRC += arch/$(EXTRA_INC) endif ifeq ($(TARGET_ARCH),ia64) -CFLAGS += -mfixed-range=f12-f15,f32-f127 -ASFLAGS += -x assembler-with-cpp -ansi -Wall -ASFLAGS += -mfixed-range=f12-f15,f32-f127 +CFLAGS += -mfixed-range=f2-f5,f12-f15,f32-f127 -mconstant-gp +ASFLAGS += -x assembler-with-cpp -Wall +ASFLAGS += -mfixed-range=f2-f5,f12-f15,f32-f127 -fomit-frame-pointer +ASFLAGS += -fno-builtin -fno-common -fno-strict-aliasing -mconstant-gp ARCH_LINKS = IA64_LINKS # Special link on ia64 needed define arch_links [ -e include/ia64/asm-xsi-offsets.h ] || ln -sf ../../../../xen/include/asm-ia64/asm-xsi-offsets.h include/ia64/asm-xsi-offsets.h diff --git a/extras/mini-os/arch/x86/mm.c b/extras/mini-os/arch/x86/mm.c new file mode 100644 index 0000000000..20031a002e --- /dev/null +++ b/extras/mini-os/arch/x86/mm.c @@ -0,0 +1,428 @@ +/* + **************************************************************************** + * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge + * (C) 2005 - Grzegorz Milos - Intel Research Cambridge + **************************************************************************** + * + * File: mm.c + * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk) + * Changes: Grzegorz Milos + * + * Date: Aug 2003, chages Aug 2005 + * + * Environment: Xen Minimal OS + * Description: memory management related functions + * contains buddy page allocator from Xen. + * + **************************************************************************** + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include + +#ifdef MM_DEBUG +#define DEBUG(_f, _a...) \ + printk("MINI_OS(file=mm.c, line=%d) " _f "\n", __LINE__, ## _a) +#else +#define DEBUG(_f, _a...) ((void)0) +#endif + +unsigned long *phys_to_machine_mapping; +extern char *stack; +extern void page_walk(unsigned long virt_addr); + +void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn, + unsigned long offset, unsigned long level) +{ + pgentry_t *tab = (pgentry_t *)start_info.pt_base; + unsigned long pt_page = (unsigned long)pfn_to_virt(*pt_pfn); + unsigned long prot_e, prot_t, pincmd; + mmu_update_t mmu_updates[1]; + struct mmuext_op pin_request; + + DEBUG("Allocating new L%d pt frame for pt_pfn=%lx, " + "prev_l_mfn=%lx, offset=%lx", + level, *pt_pfn, prev_l_mfn, offset); + + /* We need to clear the page, otherwise we might fail to map it + as a page table page */ + memset((unsigned long*)pfn_to_virt(*pt_pfn), 0, PAGE_SIZE); + + switch ( level ) + { + case L1_FRAME: + prot_e = L1_PROT; + prot_t = L2_PROT; + pincmd = MMUEXT_PIN_L1_TABLE; + break; +#if defined(__x86_64__) || defined(CONFIG_X86_PAE) + case L2_FRAME: + prot_e = L2_PROT; + prot_t = L3_PROT; + pincmd = MMUEXT_PIN_L2_TABLE; + break; +#endif +#if defined(__x86_64__) + case L3_FRAME: + prot_e = L3_PROT; + prot_t = L4_PROT; + pincmd = MMUEXT_PIN_L3_TABLE; + break; +#endif + default: + printk("new_pt_frame() called with invalid level number %d\n", level); + do_exit(); + break; + } + + /* Update the entry */ +#if defined(__x86_64__) + tab = pte_to_virt(tab[l4_table_offset(pt_page)]); + tab = pte_to_virt(tab[l3_table_offset(pt_page)]); +#endif +#if defined(CONFIG_X86_PAE) + tab = pte_to_virt(tab[l3_table_offset(pt_page)]); +#endif + + mmu_updates[0].ptr = ((pgentry_t)tab[l2_table_offset(pt_page)] & PAGE_MASK) + + sizeof(pgentry_t) * l1_table_offset(pt_page); + mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | + (prot_e & ~_PAGE_RW); + if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0) + { + printk("PTE for new page table page could not be updated\n"); + do_exit(); + } + + /* Pin the page to provide correct protection */ + pin_request.cmd = pincmd; + pin_request.arg1.mfn = pfn_to_mfn(*pt_pfn); + if(HYPERVISOR_mmuext_op(&pin_request, 1, NULL, DOMID_SELF) < 0) + { + printk("ERROR: pinning failed\n"); + do_exit(); + } + + /* Now fill the new page table page with entries. + Update the page directory as well. */ + mmu_updates[0].ptr = ((pgentry_t)prev_l_mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset; + mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | prot_t; + if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0) + { + printk("ERROR: mmu_update failed\n"); + do_exit(); + } + + *pt_pfn += 1; +} + +/* Checks if a pagetable frame is needed (if weren't allocated by Xen) */ +static int need_pt_frame(unsigned long virt_address, int level) +{ + unsigned long hyp_virt_start = HYPERVISOR_VIRT_START; +#if defined(__x86_64__) + unsigned long hyp_virt_end = HYPERVISOR_VIRT_END; +#else + unsigned long hyp_virt_end = 0xffffffff; +#endif + + /* In general frames will _not_ be needed if they were already + allocated to map the hypervisor into our VA space */ +#if defined(__x86_64__) + if(level == L3_FRAME) + { + if(l4_table_offset(virt_address) >= + l4_table_offset(hyp_virt_start) && + l4_table_offset(virt_address) <= + l4_table_offset(hyp_virt_end)) + return 0; + return 1; + } else +#endif + +#if defined(__x86_64__) || defined(CONFIG_X86_PAE) + if(level == L2_FRAME) + { +#if defined(__x86_64__) + if(l4_table_offset(virt_address) >= + l4_table_offset(hyp_virt_start) && + l4_table_offset(virt_address) <= + l4_table_offset(hyp_virt_end)) +#endif + if(l3_table_offset(virt_address) >= + l3_table_offset(hyp_virt_start) && + l3_table_offset(virt_address) <= + l3_table_offset(hyp_virt_end)) + return 0; + + return 1; + } else +#endif /* defined(__x86_64__) || defined(CONFIG_X86_PAE) */ + + /* Always need l1 frames */ + if(level == L1_FRAME) + return 1; + + printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n", + level, hyp_virt_start, hyp_virt_end); + return -1; +} + +void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn) +{ + unsigned long start_address, end_address; + unsigned long pfn_to_map, pt_pfn = *start_pfn; + static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1]; + pgentry_t *tab = (pgentry_t *)start_info.pt_base, page; + unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base)); + unsigned long offset; + int count = 0; + + pfn_to_map = (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES; + + if (*max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START)) + { + printk("WARNING: Mini-OS trying to use Xen virtual space. " + "Truncating memory from %dMB to ", + ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20); + *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE); + printk("%dMB\n", + ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20); + } + + start_address = (unsigned long)pfn_to_virt(pfn_to_map); + end_address = (unsigned long)pfn_to_virt(*max_pfn); + + /* We worked out the virtual memory range to map, now mapping loop */ + printk("Mapping memory range 0x%lx - 0x%lx\n", start_address, end_address); + + while(start_address < end_address) + { + tab = (pgentry_t *)start_info.pt_base; + mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base)); + +#if defined(__x86_64__) + offset = l4_table_offset(start_address); + /* Need new L3 pt frame */ + if(!(start_address & L3_MASK)) + if(need_pt_frame(start_address, L3_FRAME)) + new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME); + + page = tab[offset]; + mfn = pte_to_mfn(page); + tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT); +#endif +#if defined(__x86_64__) || defined(CONFIG_X86_PAE) + offset = l3_table_offset(start_address); + /* Need new L2 pt frame */ + if(!(start_address & L2_MASK)) + if(need_pt_frame(start_address, L2_FRAME)) + new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME); + + page = tab[offset]; + mfn = pte_to_mfn(page); + tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT); +#endif + offset = l2_table_offset(start_address); + /* Need new L1 pt frame */ + if(!(start_address & L1_MASK)) + if(need_pt_frame(start_address, L1_FRAME)) + new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME); + + page = tab[offset]; + mfn = pte_to_mfn(page); + offset = l1_table_offset(start_address); + + mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset; + mmu_updates[count].val = (pgentry_t)pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT; + count++; + if (count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn) + { + if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0) + { + printk("PTE could not be updated\n"); + do_exit(); + } + count = 0; + } + start_address += PAGE_SIZE; + } + + *start_pfn = pt_pfn; +} + + +void mem_test(unsigned long *start_add, unsigned long *end_add) +{ + unsigned long mask = 0x10000; + unsigned long *pointer; + + for(pointer = start_add; pointer < end_add; pointer++) + { + if(!(((unsigned long)pointer) & 0xfffff)) + { + printk("Writing to %lx\n", pointer); + page_walk((unsigned long)pointer); + } + *pointer = (unsigned long)pointer & ~mask; + } + + for(pointer = start_add; pointer < end_add; pointer++) + { + if(((unsigned long)pointer & ~mask) != *pointer) + printk("Read error at 0x%lx. Read: 0x%lx, should read 0x%lx\n", + (unsigned long)pointer, + *pointer, + ((unsigned long)pointer & ~mask)); + } + +} + +static pgentry_t *demand_map_pgt; +static void *demand_map_area_start; + +void arch_init_demand_mapping_area(unsigned long max_pfn) +{ + unsigned long mfn; + pgentry_t *tab; + unsigned long start_addr; + unsigned long pt_pfn; + unsigned offset; + + /* Round up to four megs. + 1024 rather than + 1023 since we want + to be sure we don't end up in the same place we started. */ + max_pfn = (max_pfn + L1_PAGETABLE_ENTRIES) & ~(L1_PAGETABLE_ENTRIES - 1); + if (max_pfn == 0 || + (unsigned long)pfn_to_virt(max_pfn + L1_PAGETABLE_ENTRIES) >= + HYPERVISOR_VIRT_START) { + printk("Too much memory; no room for demand map hole.\n"); + do_exit(); + } + + demand_map_area_start = pfn_to_virt(max_pfn); + printk("Demand map pfns start at %lx (%p).\n", max_pfn, + demand_map_area_start); + start_addr = (unsigned long)demand_map_area_start; + + tab = (pgentry_t *)start_info.pt_base; + mfn = virt_to_mfn(start_info.pt_base); + pt_pfn = virt_to_pfn(alloc_page()); + +#if defined(__x86_64__) + offset = l4_table_offset(start_addr); + if (!(tab[offset] & _PAGE_PRESENT)) { + new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME); + pt_pfn = virt_to_pfn(alloc_page()); + } + ASSERT(tab[offset] & _PAGE_PRESENT); + mfn = pte_to_mfn(tab[offset]); + tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT); +#endif +#if defined(__x86_64__) || defined(CONFIG_X86_PAE) + offset = l3_table_offset(start_addr); + if (!(tab[offset] & _PAGE_PRESENT)) { + new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME); + pt_pfn = virt_to_pfn(alloc_page()); + } + ASSERT(tab[offset] & _PAGE_PRESENT); + mfn = pte_to_mfn(tab[offset]); + tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT); +#endif + offset = l2_table_offset(start_addr); + if (tab[offset] & _PAGE_PRESENT) { + printk("Demand map area already has a page table covering it?\n"); + BUG(); + } + demand_map_pgt = pfn_to_virt(pt_pfn); + new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME); + ASSERT(tab[offset] & _PAGE_PRESENT); + printk("Initialised demand area.\n"); +} + +void *map_frames(unsigned long *f, unsigned long n) +{ + unsigned long x; + unsigned long y = 0; + mmu_update_t mmu_updates[16]; + int rc; + + if (n > 16) { + printk("Tried to map too many (%ld) frames at once.\n", n); + return NULL; + } + + /* Find a run of n contiguous frames */ + for (x = 0; x <= 1024 - n; x += y + 1) { + for (y = 0; y < n; y++) + if (demand_map_pgt[x+y] & _PAGE_PRESENT) + break; + if (y == n) + break; + } + if (y != n) { + printk("Failed to map %ld frames!\n", n); + return NULL; + } + + /* Found it at x. Map it in. */ + for (y = 0; y < n; y++) { + mmu_updates[y].ptr = virt_to_mach(&demand_map_pgt[x + y]); + mmu_updates[y].val = (f[y] << PAGE_SHIFT) | L1_PROT; + } + + rc = HYPERVISOR_mmu_update(mmu_updates, n, NULL, DOMID_SELF); + if (rc < 0) { + printk("Map %ld failed: %d.\n", n, rc); + return NULL; + } else { + return (void *)(unsigned long)((unsigned long)demand_map_area_start + + x * PAGE_SIZE); + } +} + +void arch_init_mm(unsigned long* start_pfn_p, unsigned long* max_pfn_p) +{ + + unsigned long start_pfn, max_pfn; + + printk(" _text: %p\n", &_text); + printk(" _etext: %p\n", &_etext); + printk(" _edata: %p\n", &_edata); + printk(" stack start: %p\n", &stack); + printk(" _end: %p\n", &_end); + + /* First page follows page table pages and 3 more pages (store page etc) */ + start_pfn = PFN_UP(to_phys(start_info.pt_base)) + + start_info.nr_pt_frames + 3; + max_pfn = start_info.nr_pages; + + printk(" start_pfn: %lx\n", start_pfn); + printk(" max_pfn: %lx\n", max_pfn); + + build_pagetable(&start_pfn, &max_pfn); + + *start_pfn_p = start_pfn; + *max_pfn_p = max_pfn; +} + diff --git a/extras/mini-os/arch/x86/sched.c b/extras/mini-os/arch/x86/sched.c new file mode 100644 index 0000000000..e56479b90f --- /dev/null +++ b/extras/mini-os/arch/x86/sched.c @@ -0,0 +1,150 @@ +/* + **************************************************************************** + * (C) 2005 - Grzegorz Milos - Intel Research Cambridge + **************************************************************************** + * + * File: sched.c + * Author: Grzegorz Milos + * Changes: Robert Kaiser + * + * Date: Aug 2005 + * + * Environment: Xen Minimal OS + * Description: simple scheduler for Mini-Os + * + * The scheduler is non-preemptive (cooperative), and schedules according + * to Round Robin algorithm. + * + **************************************************************************** + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#ifdef SCHED_DEBUG +#define DEBUG(_f, _a...) \ + printk("MINI_OS(file=sched.c, line=%d) " _f "\n", __LINE__, ## _a) +#else +#define DEBUG(_f, _a...) ((void)0) +#endif + + +void dump_stack(struct thread *thread) +{ + unsigned long *bottom = (unsigned long *)(thread->stack + 2*4*1024); + unsigned long *pointer = (unsigned long *)thread->sp; + int count; + if(thread == current) + { +#ifdef __i386__ + asm("movl %%esp,%0" + : "=r"(pointer)); +#else + asm("movq %%rsp,%0" + : "=r"(pointer)); +#endif + } + printk("The stack for \"%s\"\n", thread->name); + for(count = 0; count < 25 && pointer < bottom; count ++) + { + printk("[0x%lx] 0x%lx\n", pointer, *pointer); + pointer++; + } + + if(pointer < bottom) printk(" ... continues.\n"); +} + +/* Gets run when a new thread is scheduled the first time ever, + defined in x86_[32/64].S */ +extern void thread_starter(void); + +/* Pushes the specified value onto the stack of the specified thread */ +static void stack_push(struct thread *thread, unsigned long value) +{ + thread->sp -= sizeof(unsigned long); + *((unsigned long *)thread->sp) = value; +} + +struct thread* create_thread(char *name, void (*function)(void *), void *data) +{ + struct thread *thread; + unsigned long flags; + + thread = xmalloc(struct thread); + /* Allocate 2 pages for stack, stack will be 2pages aligned */ + thread->stack = (char *)alloc_pages(1); + thread->name = name; + printk("Thread \"%s\": pointer: 0x%lx, stack: 0x%lx\n", name, thread, + thread->stack); + + thread->sp = (unsigned long)thread->stack + 4096 * 2; + /* Save pointer to the thread on the stack, used by current macro */ + *((unsigned long *)thread->stack) = (unsigned long)thread; + + stack_push(thread, (unsigned long) function); + stack_push(thread, (unsigned long) data); + thread->ip = (unsigned long) thread_starter; + + /* Not runable, not exited, not sleeping */ + thread->flags = 0; + thread->wakeup_time = 0LL; + set_runnable(thread); + local_irq_save(flags); + if(idle_thread != NULL) { + list_add_tail(&thread->thread_list, &idle_thread->thread_list); + } else if(function != idle_thread_fn) + { + printk("BUG: Not allowed to create thread before initialising scheduler.\n"); + BUG(); + } + local_irq_restore(flags); + return thread; +} + + +void run_idle_thread(void) +{ + /* Switch stacks and run the thread */ +#if defined(__i386__) + __asm__ __volatile__("mov %0,%%esp\n\t" + "push %1\n\t" + "ret" + :"=m" (idle_thread->sp) + :"m" (idle_thread->ip)); +#elif defined(__x86_64__) + __asm__ __volatile__("mov %0,%%rsp\n\t" + "push %1\n\t" + "ret" + :"=m" (idle_thread->sp) + :"m" (idle_thread->ip)); +#endif +} + + + diff --git a/extras/mini-os/include/mm.h b/extras/mini-os/include/mm.h index 4c820e07e4..cd53a4bf25 100644 --- a/extras/mini-os/include/mm.h +++ b/extras/mini-os/include/mm.h @@ -29,182 +29,15 @@ #include #elif defined(__x86_64__) #include +#elif defined(__ia64__) +#include #else #error "Unsupported architecture" #endif #include +#include -#define L1_FRAME 1 -#define L2_FRAME 2 -#define L3_FRAME 3 - -#define L1_PAGETABLE_SHIFT 12 - -#if defined(__i386__) - -#if !defined(CONFIG_X86_PAE) - -#define L2_PAGETABLE_SHIFT 22 - -#define L1_PAGETABLE_ENTRIES 1024 -#define L2_PAGETABLE_ENTRIES 1024 - -#define PADDR_BITS 32 -#define PADDR_MASK (~0UL) - -#define NOT_L1_FRAMES 1 -#define PRIpte "08lx" -typedef unsigned long pgentry_t; - -#else /* defined(CONFIG_X86_PAE) */ - -#define L2_PAGETABLE_SHIFT 21 -#define L3_PAGETABLE_SHIFT 30 - -#define L1_PAGETABLE_ENTRIES 512 -#define L2_PAGETABLE_ENTRIES 512 -#define L3_PAGETABLE_ENTRIES 4 - -#define PADDR_BITS 44 -#define PADDR_MASK ((1ULL << PADDR_BITS)-1) - -#define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1) - -/* - * If starting from virtual address greater than 0xc0000000, - * this value will be 2 to account for final mid-level page - * directory which is always mapped in at this location. - */ -#define NOT_L1_FRAMES 3 -#define PRIpte "016llx" -typedef uint64_t pgentry_t; - -#endif /* !defined(CONFIG_X86_PAE) */ - -#elif defined(__x86_64__) - -#define L2_PAGETABLE_SHIFT 21 -#define L3_PAGETABLE_SHIFT 30 -#define L4_PAGETABLE_SHIFT 39 - -#define L1_PAGETABLE_ENTRIES 512 -#define L2_PAGETABLE_ENTRIES 512 -#define L3_PAGETABLE_ENTRIES 512 -#define L4_PAGETABLE_ENTRIES 512 - -/* These are page-table limitations. Current CPUs support only 40-bit phys. */ -#define PADDR_BITS 52 -#define VADDR_BITS 48 -#define PADDR_MASK ((1UL << PADDR_BITS)-1) -#define VADDR_MASK ((1UL << VADDR_BITS)-1) - -#define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1) -#define L3_MASK ((1UL << L4_PAGETABLE_SHIFT) - 1) - -#define NOT_L1_FRAMES 3 -#define PRIpte "016lx" -typedef unsigned long pgentry_t; - -#endif - -#define L1_MASK ((1UL << L2_PAGETABLE_SHIFT) - 1) - -/* Given a virtual address, get an entry offset into a page table. */ -#define l1_table_offset(_a) \ - (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1)) -#define l2_table_offset(_a) \ - (((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1)) -#if defined(__x86_64__) || defined(CONFIG_X86_PAE) -#define l3_table_offset(_a) \ - (((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1)) -#endif -#if defined(__x86_64__) -#define l4_table_offset(_a) \ - (((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1)) -#endif - -#define _PAGE_PRESENT 0x001UL -#define _PAGE_RW 0x002UL -#define _PAGE_USER 0x004UL -#define _PAGE_PWT 0x008UL -#define _PAGE_PCD 0x010UL -#define _PAGE_ACCESSED 0x020UL -#define _PAGE_DIRTY 0x040UL -#define _PAGE_PAT 0x080UL -#define _PAGE_PSE 0x080UL -#define _PAGE_GLOBAL 0x100UL - -#if defined(__i386__) -#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED) -#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |_PAGE_USER) -#if defined(CONFIG_X86_PAE) -#define L3_PROT (_PAGE_PRESENT) -#endif /* CONFIG_X86_PAE */ -#elif defined(__x86_64__) -#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER) -#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER) -#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER) -#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER) -#endif /* __i386__ || __x86_64__ */ - -#ifndef CONFIG_X86_PAE -#define PAGE_SIZE (1UL << L1_PAGETABLE_SHIFT) -#else -#define PAGE_SIZE (1ULL << L1_PAGETABLE_SHIFT) -#endif -#define PAGE_SHIFT L1_PAGETABLE_SHIFT -#define PAGE_MASK (~(PAGE_SIZE-1)) - -#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> L1_PAGETABLE_SHIFT) -#define PFN_DOWN(x) ((x) >> L1_PAGETABLE_SHIFT) -#define PFN_PHYS(x) ((x) << L1_PAGETABLE_SHIFT) - -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - -/* Definitions for machine and pseudophysical addresses. */ -#ifdef CONFIG_X86_PAE -typedef unsigned long long paddr_t; -typedef unsigned long long maddr_t; -#else -typedef unsigned long paddr_t; -typedef unsigned long maddr_t; -#endif - -extern unsigned long *phys_to_machine_mapping; -extern char _text, _etext, _edata, _end; -#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)]) -static __inline__ maddr_t phys_to_machine(paddr_t phys) -{ - maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); - machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); - return machine; -} - -#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)]) -static __inline__ paddr_t machine_to_phys(maddr_t machine) -{ - paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); - phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); - return phys; -} - -#define VIRT_START ((unsigned long)&_text) - -#define to_phys(x) ((unsigned long)(x)-VIRT_START) -#define to_virt(x) ((void *)((unsigned long)(x)+VIRT_START)) - -#define virt_to_pfn(_virt) (PFN_DOWN(to_phys(_virt))) -#define virt_to_mfn(_virt) (pfn_to_mfn(virt_to_pfn(_virt))) -#define mach_to_virt(_mach) (to_virt(machine_to_phys(_mach))) -#define virt_to_mach(_virt) (phys_to_machine(to_phys(_virt))) -#define mfn_to_virt(_mfn) (to_virt(mfn_to_pfn(_mfn) << PAGE_SHIFT)) -#define pfn_to_virt(_pfn) (to_virt((_pfn) << PAGE_SHIFT)) - -/* Pagetable walking. */ -#define pte_to_mfn(_pte) (((_pte) & (PADDR_MASK&PAGE_MASK)) >> L1_PAGETABLE_SHIFT) -#define pte_to_virt(_pte) to_virt(mfn_to_pfn(pte_to_mfn(_pte)) << PAGE_SHIFT) void init_mm(void); unsigned long alloc_pages(int order); @@ -220,6 +53,8 @@ static __inline__ int get_order(unsigned long size) return order; } +void arch_init_demand_mapping_area(unsigned long max_pfn); +void arch_init_mm(unsigned long* start_pfn_p, unsigned long* max_pfn_p); void *map_frames(unsigned long *f, unsigned long n); diff --git a/extras/mini-os/include/sched.h b/extras/mini-os/include/sched.h index 058941a0a1..f162062c45 100644 --- a/extras/mini-os/include/sched.h +++ b/extras/mini-os/include/sched.h @@ -3,36 +3,40 @@ #include #include +#include struct thread { char *name; char *stack; +#if !defined(__ia64__) unsigned long sp; /* Stack pointer */ unsigned long ip; /* Instruction pointer */ +#else /* !defined(__ia64__) */ + thread_regs_t regs; +#endif /* !defined(__ia64__) */ struct list_head thread_list; u32 flags; s_time_t wakeup_time; }; +extern struct thread *idle_thread; +void idle_thread_fn(void *unused); +#define RUNNABLE_FLAG 0x00000001 + +#define is_runnable(_thread) (_thread->flags & RUNNABLE_FLAG) +#define set_runnable(_thread) (_thread->flags |= RUNNABLE_FLAG) +#define clear_runnable(_thread) (_thread->flags &= ~RUNNABLE_FLAG) + +#define switch_threads(prev, next) arch_switch_threads(prev, next) + void init_sched(void); void run_idle_thread(void); struct thread* create_thread(char *name, void (*function)(void *), void *data); void schedule(void); -static inline struct thread* get_current(void) -{ - struct thread **current; -#ifdef __i386__ - __asm__("andl %%esp,%0; ":"=r" (current) : "r" (~8191UL)); -#else - __asm__("andq %%rsp,%0; ":"=r" (current) : "r" (~8191UL)); -#endif - return *current; -} - #define current get_current() diff --git a/extras/mini-os/include/x86/arch_mm.h b/extras/mini-os/include/x86/arch_mm.h new file mode 100644 index 0000000000..795ef070a7 --- /dev/null +++ b/extras/mini-os/include/x86/arch_mm.h @@ -0,0 +1,209 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4 -*- + * + * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge + * Copyright (c) 2005, Keir A Fraser + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _ARCH_MM_H_ +#define _ARCH_MM_H_ + +#if defined(__i386__) +#include +#elif defined(__x86_64__) +#include +#else +#error "Unsupported architecture" +#endif + +#define L1_FRAME 1 +#define L2_FRAME 2 +#define L3_FRAME 3 + +#define L1_PAGETABLE_SHIFT 12 + +#if defined(__i386__) + +#if !defined(CONFIG_X86_PAE) + +#define L2_PAGETABLE_SHIFT 22 + +#define L1_PAGETABLE_ENTRIES 1024 +#define L2_PAGETABLE_ENTRIES 1024 + +#define PADDR_BITS 32 +#define PADDR_MASK (~0UL) + +#define NOT_L1_FRAMES 1 +#define PRIpte "08lx" +typedef unsigned long pgentry_t; + +#else /* defined(CONFIG_X86_PAE) */ + +#define L2_PAGETABLE_SHIFT 21 +#define L3_PAGETABLE_SHIFT 30 + +#define L1_PAGETABLE_ENTRIES 512 +#define L2_PAGETABLE_ENTRIES 512 +#define L3_PAGETABLE_ENTRIES 4 + +#define PADDR_BITS 44 +#define PADDR_MASK ((1ULL << PADDR_BITS)-1) + +#define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1) + +/* + * If starting from virtual address greater than 0xc0000000, + * this value will be 2 to account for final mid-level page + * directory which is always mapped in at this location. + */ +#define NOT_L1_FRAMES 3 +#define PRIpte "016llx" +typedef uint64_t pgentry_t; + +#endif /* !defined(CONFIG_X86_PAE) */ + +#elif defined(__x86_64__) + +#define L2_PAGETABLE_SHIFT 21 +#define L3_PAGETABLE_SHIFT 30 +#define L4_PAGETABLE_SHIFT 39 + +#define L1_PAGETABLE_ENTRIES 512 +#define L2_PAGETABLE_ENTRIES 512 +#define L3_PAGETABLE_ENTRIES 512 +#define L4_PAGETABLE_ENTRIES 512 + +/* These are page-table limitations. Current CPUs support only 40-bit phys. */ +#define PADDR_BITS 52 +#define VADDR_BITS 48 +#define PADDR_MASK ((1UL << PADDR_BITS)-1) +#define VADDR_MASK ((1UL << VADDR_BITS)-1) + +#define L2_MASK ((1UL << L3_PAGETABLE_SHIFT) - 1) +#define L3_MASK ((1UL << L4_PAGETABLE_SHIFT) - 1) + +#define NOT_L1_FRAMES 3 +#define PRIpte "016lx" +typedef unsigned long pgentry_t; + +#endif + +#define L1_MASK ((1UL << L2_PAGETABLE_SHIFT) - 1) + +/* Given a virtual address, get an entry offset into a page table. */ +#define l1_table_offset(_a) \ + (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1)) +#define l2_table_offset(_a) \ + (((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1)) +#if defined(__x86_64__) || defined(CONFIG_X86_PAE) +#define l3_table_offset(_a) \ + (((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1)) +#endif +#if defined(__x86_64__) +#define l4_table_offset(_a) \ + (((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1)) +#endif + +#define _PAGE_PRESENT 0x001UL +#define _PAGE_RW 0x002UL +#define _PAGE_USER 0x004UL +#define _PAGE_PWT 0x008UL +#define _PAGE_PCD 0x010UL +#define _PAGE_ACCESSED 0x020UL +#define _PAGE_DIRTY 0x040UL +#define _PAGE_PAT 0x080UL +#define _PAGE_PSE 0x080UL +#define _PAGE_GLOBAL 0x100UL + +#if defined(__i386__) +#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED) +#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |_PAGE_USER) +#if defined(CONFIG_X86_PAE) +#define L3_PROT (_PAGE_PRESENT) +#endif /* CONFIG_X86_PAE */ +#elif defined(__x86_64__) +#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER) +#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER) +#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER) +#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER) +#endif /* __i386__ || __x86_64__ */ + +#ifndef CONFIG_X86_PAE +#define PAGE_SIZE (1UL << L1_PAGETABLE_SHIFT) +#else +#define PAGE_SIZE (1ULL << L1_PAGETABLE_SHIFT) +#endif +#define PAGE_SHIFT L1_PAGETABLE_SHIFT +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> L1_PAGETABLE_SHIFT) +#define PFN_DOWN(x) ((x) >> L1_PAGETABLE_SHIFT) +#define PFN_PHYS(x) ((x) << L1_PAGETABLE_SHIFT) +#define PHYS_PFN(x) ((x) >> L1_PAGETABLE_SHIFT) + +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) + +/* Definitions for machine and pseudophysical addresses. */ +#ifdef CONFIG_X86_PAE +typedef unsigned long long paddr_t; +typedef unsigned long long maddr_t; +#else +typedef unsigned long paddr_t; +typedef unsigned long maddr_t; +#endif + +extern unsigned long *phys_to_machine_mapping; +extern char _text, _etext, _edata, _end; +#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)]) +static __inline__ maddr_t phys_to_machine(paddr_t phys) +{ + maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); + machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); + return machine; +} + +#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)]) +static __inline__ paddr_t machine_to_phys(maddr_t machine) +{ + paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); + phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); + return phys; +} + +#define VIRT_START ((unsigned long)&_text) + +#define to_phys(x) ((unsigned long)(x)-VIRT_START) +#define to_virt(x) ((void *)((unsigned long)(x)+VIRT_START)) + +#define virt_to_pfn(_virt) (PFN_DOWN(to_phys(_virt))) +#define virt_to_mfn(_virt) (pfn_to_mfn(virt_to_pfn(_virt))) +#define mach_to_virt(_mach) (to_virt(machine_to_phys(_mach))) +#define virt_to_mach(_virt) (phys_to_machine(to_phys(_virt))) +#define mfn_to_virt(_mfn) (to_virt(mfn_to_pfn(_mfn) << PAGE_SHIFT)) +#define pfn_to_virt(_pfn) (to_virt((_pfn) << PAGE_SHIFT)) + +/* Pagetable walking. */ +#define pte_to_mfn(_pte) (((_pte) & (PADDR_MASK&PAGE_MASK)) >> L1_PAGETABLE_SHIFT) +#define pte_to_virt(_pte) to_virt(mfn_to_pfn(pte_to_mfn(_pte)) << PAGE_SHIFT) + + +#endif /* _ARCH_MM_H_ */ diff --git a/extras/mini-os/include/x86/arch_sched.h b/extras/mini-os/include/x86/arch_sched.h new file mode 100644 index 0000000000..e02dbd05a5 --- /dev/null +++ b/extras/mini-os/include/x86/arch_sched.h @@ -0,0 +1,58 @@ + +#ifndef __ARCH_SCHED_H__ +#define __ARCH_SCHED_H__ + + +static inline struct thread* get_current(void) +{ + struct thread **current; +#ifdef __i386__ + __asm__("andl %%esp,%0; ":"=r" (current) : "r" (~8191UL)); +#else + __asm__("andq %%rsp,%0; ":"=r" (current) : "r" (~8191UL)); +#endif + return *current; +} + +#ifdef __i386__ +#define arch_switch_threads(prev, next) do { \ + unsigned long esi,edi; \ + __asm__ __volatile__("pushfl\n\t" \ + "pushl %%ebp\n\t" \ + "movl %%esp,%0\n\t" /* save ESP */ \ + "movl %4,%%esp\n\t" /* restore ESP */ \ + "movl $1f,%1\n\t" /* save EIP */ \ + "pushl %5\n\t" /* restore EIP */ \ + "ret\n\t" \ + "1:\t" \ + "popl %%ebp\n\t" \ + "popfl" \ + :"=m" (prev->sp),"=m" (prev->ip), \ + "=S" (esi),"=D" (edi) \ + :"m" (next->sp),"m" (next->ip), \ + "2" (prev), "d" (next)); \ +} while (0) +#elif __x86_64__ +#define arch_switch_threads(prev, next) do { \ + unsigned long rsi,rdi; \ + __asm__ __volatile__("pushfq\n\t" \ + "pushq %%rbp\n\t" \ + "movq %%rsp,%0\n\t" /* save RSP */ \ + "movq %4,%%rsp\n\t" /* restore RSP */ \ + "movq $1f,%1\n\t" /* save RIP */ \ + "pushq %5\n\t" /* restore RIP */ \ + "ret\n\t" \ + "1:\t" \ + "popq %%rbp\n\t" \ + "popfq" \ + :"=m" (prev->sp),"=m" (prev->ip), \ + "=S" (rsi),"=D" (rdi) \ + :"m" (next->sp),"m" (next->ip), \ + "2" (prev), "d" (next)); \ +} while (0) +#endif + + + + +#endif /* __ARCH_SCHED_H__ */ diff --git a/extras/mini-os/mm.c b/extras/mini-os/mm.c index 85f9e92216..a24a521cf6 100644 --- a/extras/mini-os/mm.c +++ b/extras/mini-os/mm.c @@ -48,10 +48,6 @@ #define DEBUG(_f, _a...) ((void)0) #endif -unsigned long *phys_to_machine_mapping; -extern char *stack; -extern void page_walk(unsigned long virt_addr); - /********************* * ALLOCATION BITMAP * One bit per page of memory. Bit set => page is allocated. @@ -226,11 +222,11 @@ static void init_page_allocator(unsigned long min, unsigned long max) /* All allocated by default. */ memset(alloc_bitmap, ~0, bitmap_size); /* Free up the memory we've been given to play with. */ - map_free(min>>PAGE_SHIFT, range>>PAGE_SHIFT); + map_free(PHYS_PFN(min), range>>PAGE_SHIFT); /* The buddy lists are addressed in high memory. */ - min += VIRT_START; - max += VIRT_START; + min = (unsigned long) to_virt(min); + max = (unsigned long) to_virt(max); while ( range != 0 ) { @@ -297,7 +293,7 @@ unsigned long alloc_pages(int order) free_head[i] = spare_ch; } - map_alloc(to_phys(alloc_ch)>>PAGE_SHIFT, 1<= - l4_table_offset(hyp_virt_start) && - l4_table_offset(virt_address) <= - l4_table_offset(hyp_virt_end)) - return 0; - return 1; - } else -#endif - -#if defined(__x86_64__) || defined(CONFIG_X86_PAE) - if(level == L2_FRAME) - { -#if defined(__x86_64__) - if(l4_table_offset(virt_address) >= - l4_table_offset(hyp_virt_start) && - l4_table_offset(virt_address) <= - l4_table_offset(hyp_virt_end)) -#endif - if(l3_table_offset(virt_address) >= - l3_table_offset(hyp_virt_start) && - l3_table_offset(virt_address) <= - l3_table_offset(hyp_virt_end)) - return 0; - - return 1; - } else -#endif /* defined(__x86_64__) || defined(CONFIG_X86_PAE) */ - - /* Always need l1 frames */ - if(level == L1_FRAME) - return 1; - - printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n", - level, hyp_virt_start, hyp_virt_end); - return -1; -} - -void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn) -{ - unsigned long start_address, end_address; - unsigned long pfn_to_map, pt_pfn = *start_pfn; - static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1]; - pgentry_t *tab = (pgentry_t *)start_info.pt_base, page; - unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base)); - unsigned long offset; - int count = 0; - - pfn_to_map = (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES; - - if (*max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START)) - { - printk("WARNING: Mini-OS trying to use Xen virtual space. " - "Truncating memory from %dMB to ", - ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20); - *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE); - printk("%dMB\n", - ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20); - } - - start_address = (unsigned long)pfn_to_virt(pfn_to_map); - end_address = (unsigned long)pfn_to_virt(*max_pfn); - - /* We worked out the virtual memory range to map, now mapping loop */ - printk("Mapping memory range 0x%lx - 0x%lx\n", start_address, end_address); - - while(start_address < end_address) - { - tab = (pgentry_t *)start_info.pt_base; - mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base)); - -#if defined(__x86_64__) - offset = l4_table_offset(start_address); - /* Need new L3 pt frame */ - if(!(start_address & L3_MASK)) - if(need_pt_frame(start_address, L3_FRAME)) - new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME); - - page = tab[offset]; - mfn = pte_to_mfn(page); - tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT); -#endif -#if defined(__x86_64__) || defined(CONFIG_X86_PAE) - offset = l3_table_offset(start_address); - /* Need new L2 pt frame */ - if(!(start_address & L2_MASK)) - if(need_pt_frame(start_address, L2_FRAME)) - new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME); - - page = tab[offset]; - mfn = pte_to_mfn(page); - tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT); -#endif - offset = l2_table_offset(start_address); - /* Need new L1 pt frame */ - if(!(start_address & L1_MASK)) - if(need_pt_frame(start_address, L1_FRAME)) - new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME); - - page = tab[offset]; - mfn = pte_to_mfn(page); - offset = l1_table_offset(start_address); - - mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset; - mmu_updates[count].val = (pgentry_t)pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT; - count++; - if (count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn) - { - if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0) - { - printk("PTE could not be updated\n"); - do_exit(); - } - count = 0; - } - start_address += PAGE_SIZE; - } - *start_pfn = pt_pfn; -} - - -void mem_test(unsigned long *start_add, unsigned long *end_add) -{ - unsigned long mask = 0x10000; - unsigned long *pointer; - - for(pointer = start_add; pointer < end_add; pointer++) - { - if(!(((unsigned long)pointer) & 0xfffff)) - { - printk("Writing to %lx\n", pointer); - page_walk((unsigned long)pointer); - } - *pointer = (unsigned long)pointer & ~mask; - } - - for(pointer = start_add; pointer < end_add; pointer++) - { - if(((unsigned long)pointer & ~mask) != *pointer) - printk("Read error at 0x%lx. Read: 0x%lx, should read 0x%lx\n", - (unsigned long)pointer, - *pointer, - ((unsigned long)pointer & ~mask)); - } - -} - -static pgentry_t *demand_map_pgt; -static void *demand_map_area_start; - -static void init_demand_mapping_area(unsigned long max_pfn) -{ - unsigned long mfn; - pgentry_t *tab; - unsigned long start_addr; - unsigned long pt_pfn; - unsigned offset; - - /* Round up to four megs. + 1024 rather than + 1023 since we want - to be sure we don't end up in the same place we started. */ - max_pfn = (max_pfn + L1_PAGETABLE_ENTRIES) & ~(L1_PAGETABLE_ENTRIES - 1); - if (max_pfn == 0 || - (unsigned long)pfn_to_virt(max_pfn + L1_PAGETABLE_ENTRIES) >= - HYPERVISOR_VIRT_START) { - printk("Too much memory; no room for demand map hole.\n"); - do_exit(); - } - - demand_map_area_start = pfn_to_virt(max_pfn); - printk("Demand map pfns start at %lx (%p).\n", max_pfn, - demand_map_area_start); - start_addr = (unsigned long)demand_map_area_start; - - tab = (pgentry_t *)start_info.pt_base; - mfn = virt_to_mfn(start_info.pt_base); - pt_pfn = virt_to_pfn(alloc_page()); - -#if defined(__x86_64__) - offset = l4_table_offset(start_addr); - if (!(tab[offset] & _PAGE_PRESENT)) { - new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME); - pt_pfn = virt_to_pfn(alloc_page()); - } - ASSERT(tab[offset] & _PAGE_PRESENT); - mfn = pte_to_mfn(tab[offset]); - tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT); -#endif -#if defined(__x86_64__) || defined(CONFIG_X86_PAE) - offset = l3_table_offset(start_addr); - if (!(tab[offset] & _PAGE_PRESENT)) { - new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME); - pt_pfn = virt_to_pfn(alloc_page()); - } - ASSERT(tab[offset] & _PAGE_PRESENT); - mfn = pte_to_mfn(tab[offset]); - tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT); -#endif - offset = l2_table_offset(start_addr); - if (tab[offset] & _PAGE_PRESENT) { - printk("Demand map area already has a page table covering it?\n"); - BUG(); - } - demand_map_pgt = pfn_to_virt(pt_pfn); - new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME); - ASSERT(tab[offset] & _PAGE_PRESENT); -} - -void *map_frames(unsigned long *f, unsigned long n) -{ - unsigned long x; - unsigned long y = 0; - mmu_update_t mmu_updates[16]; - int rc; - - if (n > 16) { - printk("Tried to map too many (%ld) frames at once.\n", n); - return NULL; - } - - /* Find a run of n contiguous frames */ - for (x = 0; x <= 1024 - n; x += y + 1) { - for (y = 0; y < n; y++) - if (demand_map_pgt[x+y] & _PAGE_PRESENT) - break; - if (y == n) - break; - } - if (y != n) { - printk("Failed to map %ld frames!\n", n); - return NULL; - } - - /* Found it at x. Map it in. */ - for (y = 0; y < n; y++) { - mmu_updates[y].ptr = virt_to_mach(&demand_map_pgt[x + y]); - mmu_updates[y].val = (f[y] << PAGE_SHIFT) | L1_PROT; - } - - rc = HYPERVISOR_mmu_update(mmu_updates, n, NULL, DOMID_SELF); - if (rc < 0) { - printk("Map %ld failed: %d.\n", n, rc); - return NULL; - } else { - return (void *)(unsigned long)((unsigned long)demand_map_area_start + - x * PAGE_SIZE); - } -} void init_mm(void) { @@ -717,22 +369,7 @@ void init_mm(void) printk("MM: Init\n"); - printk(" _text: %p\n", &_text); - printk(" _etext: %p\n", &_etext); - printk(" _edata: %p\n", &_edata); - printk(" stack start: %p\n", &stack); - printk(" _end: %p\n", &_end); - - /* First page follows page table pages and 3 more pages (store page etc) */ - start_pfn = PFN_UP(to_phys(start_info.pt_base)) + - start_info.nr_pt_frames + 3; - max_pfn = start_info.nr_pages; - - printk(" start_pfn: %lx\n", start_pfn); - printk(" max_pfn: %lx\n", max_pfn); - - build_pagetable(&start_pfn, &max_pfn); - + arch_init_mm(&start_pfn, &max_pfn); /* * now we can initialise the page allocator */ @@ -742,8 +379,7 @@ void init_mm(void) init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn)); printk("MM: done\n"); - init_demand_mapping_area(max_pfn); - printk("Initialised demand area.\n"); + arch_init_demand_mapping_area(max_pfn); } void sanity_check(void) diff --git a/extras/mini-os/sched.c b/extras/mini-os/sched.c index e6c9cc3771..9b111c28e5 100644 --- a/extras/mini-os/sched.c +++ b/extras/mini-os/sched.c @@ -54,82 +54,9 @@ #define DEBUG(_f, _a...) ((void)0) #endif - -#define RUNNABLE_FLAG 0x00000001 - -#define is_runnable(_thread) (_thread->flags & RUNNABLE_FLAG) -#define set_runnable(_thread) (_thread->flags |= RUNNABLE_FLAG) -#define clear_runnable(_thread) (_thread->flags &= ~RUNNABLE_FLAG) - - struct thread *idle_thread = NULL; LIST_HEAD(exited_threads); -void idle_thread_fn(void *unused); - -void dump_stack(struct thread *thread) -{ - unsigned long *bottom = (unsigned long *)(thread->stack + 2*4*1024); - unsigned long *pointer = (unsigned long *)thread->sp; - int count; - if(thread == current) - { -#ifdef __i386__ - asm("movl %%esp,%0" - : "=r"(pointer)); -#else - asm("movq %%rsp,%0" - : "=r"(pointer)); -#endif - } - printk("The stack for \"%s\"\n", thread->name); - for(count = 0; count < 25 && pointer < bottom; count ++) - { - printk("[0x%lx] 0x%lx\n", pointer, *pointer); - pointer++; - } - - if(pointer < bottom) printk(" ... continues.\n"); -} - -#ifdef __i386__ -#define switch_threads(prev, next) do { \ - unsigned long esi,edi; \ - __asm__ __volatile__("pushfl\n\t" \ - "pushl %%ebp\n\t" \ - "movl %%esp,%0\n\t" /* save ESP */ \ - "movl %4,%%esp\n\t" /* restore ESP */ \ - "movl $1f,%1\n\t" /* save EIP */ \ - "pushl %5\n\t" /* restore EIP */ \ - "ret\n\t" \ - "1:\t" \ - "popl %%ebp\n\t" \ - "popfl" \ - :"=m" (prev->sp),"=m" (prev->ip), \ - "=S" (esi),"=D" (edi) \ - :"m" (next->sp),"m" (next->ip), \ - "2" (prev), "d" (next)); \ -} while (0) -#elif __x86_64__ -#define switch_threads(prev, next) do { \ - unsigned long rsi,rdi; \ - __asm__ __volatile__("pushfq\n\t" \ - "pushq %%rbp\n\t" \ - "movq %%rsp,%0\n\t" /* save RSP */ \ - "movq %4,%%rsp\n\t" /* restore RSP */ \ - "movq $1f,%1\n\t" /* save RIP */ \ - "pushq %5\n\t" /* restore RIP */ \ - "ret\n\t" \ - "1:\t" \ - "popq %%rbp\n\t" \ - "popfq" \ - :"=m" (prev->sp),"=m" (prev->ip), \ - "=S" (rsi),"=D" (rdi) \ - :"m" (next->sp),"m" (next->ip), \ - "2" (prev), "d" (next)); \ -} while (0) -#endif - void inline print_runqueue(void) { struct list_head *it; @@ -250,50 +177,6 @@ void exit_thread(void) schedule(); } -/* Pushes the specified value onto the stack of the specified thread */ -static void stack_push(struct thread *thread, unsigned long value) -{ - thread->sp -= sizeof(unsigned long); - *((unsigned long *)thread->sp) = value; -} - -struct thread* create_thread(char *name, void (*function)(void *), void *data) -{ - struct thread *thread; - unsigned long flags; - - thread = xmalloc(struct thread); - /* Allocate 2 pages for stack, stack will be 2pages aligned */ - thread->stack = (char *)alloc_pages(1); - thread->name = name; - printk("Thread \"%s\": pointer: 0x%lx, stack: 0x%lx\n", name, thread, - thread->stack); - - thread->sp = (unsigned long)thread->stack + 4096 * 2; - /* Save pointer to the thread on the stack, used by current macro */ - *((unsigned long *)thread->stack) = (unsigned long)thread; - - stack_push(thread, (unsigned long) function); - stack_push(thread, (unsigned long) data); - thread->ip = (unsigned long) thread_starter; - - /* Not runable, not exited, not sleeping */ - thread->flags = 0; - thread->wakeup_time = 0LL; - set_runnable(thread); - local_irq_save(flags); - if(idle_thread != NULL) { - list_add_tail(&thread->thread_list, &idle_thread->thread_list); - } else if(function != idle_thread_fn) - { - printk("BUG: Not allowed to create thread before initialising scheduler.\n"); - BUG(); - } - local_irq_restore(flags); - return thread; -} - - void block(struct thread *thread) { thread->wakeup_time = 0LL; @@ -327,26 +210,6 @@ void idle_thread_fn(void *unused) } } -void run_idle_thread(void) -{ - /* Switch stacks and run the thread */ -#if defined(__i386__) - __asm__ __volatile__("mov %0,%%esp\n\t" - "push %1\n\t" - "ret" - :"=m" (idle_thread->sp) - :"m" (idle_thread->ip)); -#elif defined(__x86_64__) - __asm__ __volatile__("mov %0,%%rsp\n\t" - "push %1\n\t" - "ret" - :"=m" (idle_thread->sp) - :"m" (idle_thread->ip)); -#endif -} - - - DECLARE_MUTEX(mutex); void th_f1(void *data) -- cgit v1.2.3 From d255c83172c37319a8e8174e0abccb47abd1296c Mon Sep 17 00:00:00 2001 From: "kfraser@localhost.localdomain" Date: Mon, 27 Nov 2006 10:03:35 +0000 Subject: [MINIOS] Exports a function to have all Xen ports unbound. This is necessary when using mini-os as the foundation for a boot-loader. Signed-off-by: Jacob Gorm Hansen --- extras/mini-os/events.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'extras') diff --git a/extras/mini-os/events.c b/extras/mini-os/events.c index e05f68d83d..5d60ab70fc 100644 --- a/extras/mini-os/events.c +++ b/extras/mini-os/events.c @@ -35,6 +35,21 @@ typedef struct _ev_action_t { static ev_action_t ev_actions[NR_EVS]; void default_handler(evtchn_port_t port, struct pt_regs *regs, void *data); +void unbind_all_ports(void) +{ + int i; + + for(i=0;i Date: Mon, 27 Nov 2006 10:23:19 +0000 Subject: [MINIOS] Switched to new interface for HYPERVISOR_sched_op(). Signed-off-by: Dietmar Hahn --- extras/mini-os/include/x86/x86_32/hypercall-x86_32.h | 2 +- extras/mini-os/include/x86/x86_64/hypercall-x86_64.h | 2 +- extras/mini-os/kernel.c | 6 +++++- 3 files changed, 7 insertions(+), 3 deletions(-) (limited to 'extras') diff --git a/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h b/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h index 70e8997a86..5f8b51f872 100644 --- a/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h +++ b/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h @@ -167,7 +167,7 @@ HYPERVISOR_fpu_taskswitch( static inline int HYPERVISOR_sched_op( - int cmd, unsigned long arg) + int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } diff --git a/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h b/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h index 159149f48b..2d2904a218 100644 --- a/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h +++ b/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h @@ -171,7 +171,7 @@ HYPERVISOR_fpu_taskswitch( static inline int HYPERVISOR_sched_op( - int cmd, unsigned long arg) + int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } diff --git a/extras/mini-os/kernel.c b/extras/mini-os/kernel.c index 152499ceb4..0799116aa6 100644 --- a/extras/mini-os/kernel.c +++ b/extras/mini-os/kernel.c @@ -159,5 +159,9 @@ void start_kernel(start_info_t *si) void do_exit(void) { printk("Do_exit called!\n"); - for ( ;; ) HYPERVISOR_sched_op(SCHEDOP_shutdown, SHUTDOWN_crash); + for( ;; ) + { + struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_crash }; + HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); + } } -- cgit v1.2.3