diff options
author | smh22@boulderdash.cl.cam.ac.uk <smh22@boulderdash.cl.cam.ac.uk> | 2002-11-20 12:02:17 +0000 |
---|---|---|
committer | smh22@boulderdash.cl.cam.ac.uk <smh22@boulderdash.cl.cam.ac.uk> | 2002-11-20 12:02:17 +0000 |
commit | 4676bbf96dc88e9a70607fa79b3c83febc5dc54b (patch) | |
tree | ea469343cb62cad40367c2ec3fec038405649a5c /xenolinux-2.4.16-sparse/arch/xeno/kernel/init_task.c | |
parent | 19e522da85965a058fe60ab74270dc6cdfc4e0b9 (diff) | |
download | xen-4676bbf96dc88e9a70607fa79b3c83febc5dc54b.tar.gz xen-4676bbf96dc88e9a70607fa79b3c83febc5dc54b.tar.bz2 xen-4676bbf96dc88e9a70607fa79b3c83febc5dc54b.zip |
bitkeeper revision 1.2 (3ddb79c9KusG02eh7i-uXkgY0IksKA)
Import changeset
Diffstat (limited to 'xenolinux-2.4.16-sparse/arch/xeno/kernel/init_task.c')
-rw-r--r-- | xenolinux-2.4.16-sparse/arch/xeno/kernel/init_task.c | 33 |
1 files changed, 33 insertions, 0 deletions
diff --git a/xenolinux-2.4.16-sparse/arch/xeno/kernel/init_task.c b/xenolinux-2.4.16-sparse/arch/xeno/kernel/init_task.c new file mode 100644 index 0000000000..7779809ef2 --- /dev/null +++ b/xenolinux-2.4.16-sparse/arch/xeno/kernel/init_task.c @@ -0,0 +1,33 @@ +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/init.h> + +#include <asm/uaccess.h> +#include <asm/pgtable.h> +#include <asm/desc.h> + +static struct fs_struct init_fs = INIT_FS; +static struct files_struct init_files = INIT_FILES; +static struct signal_struct init_signals = INIT_SIGNALS; +struct mm_struct init_mm = INIT_MM(init_mm); + +/* + * Initial task structure. + * + * We need to make sure that this is 8192-byte aligned due to the + * way process stacks are handled. This is done by having a special + * "init_task" linker map entry.. + */ +union task_union init_task_union + __attribute__((__section__(".data.init_task"))) = + { INIT_TASK(init_task_union.task) }; + +/* + * per-CPU TSS segments. Threads are completely 'soft' on Linux, + * no more per-task TSS's. The TSS size is kept cacheline-aligned + * so they are allowed to end up in the .data.cacheline_aligned + * section. Since TSS's are completely CPU-local, we want them + * on exact cacheline boundaries, to eliminate cacheline ping-pong. + */ +struct tss_struct init_tss[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = INIT_TSS }; + |