diff options
Diffstat (limited to 'linux-2.4.26-xen-sparse/include/linux')
-rw-r--r-- | linux-2.4.26-xen-sparse/include/linux/blk.h | 409 | ||||
-rw-r--r-- | linux-2.4.26-xen-sparse/include/linux/major.h | 206 | ||||
-rw-r--r-- | linux-2.4.26-xen-sparse/include/linux/sched.h | 968 | ||||
-rw-r--r-- | linux-2.4.26-xen-sparse/include/linux/skbuff.h | 1174 | ||||
-rw-r--r-- | linux-2.4.26-xen-sparse/include/linux/sunrpc/debug.h | 95 | ||||
-rw-r--r-- | linux-2.4.26-xen-sparse/include/linux/timer.h | 77 |
6 files changed, 2929 insertions, 0 deletions
diff --git a/linux-2.4.26-xen-sparse/include/linux/blk.h b/linux-2.4.26-xen-sparse/include/linux/blk.h new file mode 100644 index 0000000000..e7c7575454 --- /dev/null +++ b/linux-2.4.26-xen-sparse/include/linux/blk.h @@ -0,0 +1,409 @@ +#ifndef _BLK_H +#define _BLK_H + +#include <linux/blkdev.h> +#include <linux/locks.h> +#include <linux/config.h> +#include <linux/spinlock.h> + +/* + * Spinlock for protecting the request queue which + * is mucked around with in interrupts on potentially + * multiple CPU's.. + */ +extern spinlock_t io_request_lock; + +/* + * Initialization functions. + */ +extern int isp16_init(void); +extern int cdu31a_init(void); +extern int acsi_init(void); +extern int mcd_init(void); +extern int mcdx_init(void); +extern int sbpcd_init(void); +extern int aztcd_init(void); +extern int sony535_init(void); +extern int gscd_init(void); +extern int cm206_init(void); +extern int optcd_init(void); +extern int sjcd_init(void); +extern int cdi_init(void); +extern int hd_init(void); +extern int ide_init(void); +extern int xd_init(void); +extern int mfm_init(void); +extern int loop_init(void); +extern int md_init(void); +extern int ap_init(void); +extern int ddv_init(void); +extern int z2_init(void); +extern int swim3_init(void); +extern int swimiop_init(void); +extern int amiga_floppy_init(void); +extern int atari_floppy_init(void); +extern int ez_init(void); +extern int bpcd_init(void); +extern int ps2esdi_init(void); +extern int jsfd_init(void); +extern int viodasd_init(void); +extern int viocd_init(void); + +#if defined(CONFIG_ARCH_S390) +extern int dasd_init(void); +extern int xpram_init(void); +extern int tapeblock_init(void); +#endif /* CONFIG_ARCH_S390 */ + +#if defined(CONFIG_XEN_VBD) +extern int xlblk_init(void); +#endif /* CONFIG_XEN_VBD */ + +extern void set_device_ro(kdev_t dev,int flag); +void add_blkdev_randomness(int major); + +extern int floppy_init(void); +extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */ +extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */ +extern int rd_image_start; /* starting block # of image */ + +#ifdef CONFIG_BLK_DEV_INITRD + +#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */ + +extern unsigned long initrd_start,initrd_end; +extern int initrd_below_start_ok; /* 1 if it is not an error if initrd_start < memory_start */ +void initrd_init(void); + +#endif + + +/* + * end_request() and friends. Must be called with the request queue spinlock + * acquired. All functions called within end_request() _must_be_ atomic. + * + * Several drivers define their own end_request and call + * end_that_request_first() and end_that_request_last() + * for parts of the original function. This prevents + * code duplication in drivers. + */ + +static inline void blkdev_dequeue_request(struct request * req) +{ + list_del(&req->queue); +} + +int end_that_request_first(struct request *req, int uptodate, char *name); +void end_that_request_last(struct request *req); + +#if defined(MAJOR_NR) || defined(IDE_DRIVER) + +#undef DEVICE_ON +#undef DEVICE_OFF + +/* + * Add entries as needed. + */ + +#ifdef IDE_DRIVER + +#define DEVICE_NR(device) (MINOR(device) >> PARTN_BITS) +#define DEVICE_NAME "ide" + +#elif (MAJOR_NR == RAMDISK_MAJOR) + +/* ram disk */ +#define DEVICE_NAME "ramdisk" +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_NO_RANDOM + +#elif (MAJOR_NR == Z2RAM_MAJOR) + +/* Zorro II Ram */ +#define DEVICE_NAME "Z2RAM" +#define DEVICE_REQUEST do_z2_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == FLOPPY_MAJOR) + +static void floppy_off(unsigned int nr); + +#define DEVICE_NAME "floppy" +#define DEVICE_INTR do_floppy +#define DEVICE_REQUEST do_fd_request +#define DEVICE_NR(device) ( (MINOR(device) & 3) | ((MINOR(device) & 0x80 ) >> 5 )) +#define DEVICE_OFF(device) floppy_off(DEVICE_NR(device)) + +#elif (MAJOR_NR == HD_MAJOR) + +/* Hard disk: timeout is 6 seconds. */ +#define DEVICE_NAME "hard disk" +#define DEVICE_INTR do_hd +#define TIMEOUT_VALUE (6*HZ) +#define DEVICE_REQUEST do_hd_request +#define DEVICE_NR(device) (MINOR(device)>>6) + +#elif (SCSI_DISK_MAJOR(MAJOR_NR)) + +#define DEVICE_NAME "scsidisk" +#define TIMEOUT_VALUE (2*HZ) +#define DEVICE_NR(device) (((MAJOR(device) & SD_MAJOR_MASK) << (8 - 4)) + (MINOR(device) >> 4)) + +/* Kludge to use the same number for both char and block major numbers */ +#elif (MAJOR_NR == MD_MAJOR) && defined(MD_DRIVER) + +#define DEVICE_NAME "Multiple devices driver" +#define DEVICE_REQUEST do_md_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == SCSI_TAPE_MAJOR) + +#define DEVICE_NAME "scsitape" +#define DEVICE_INTR do_st +#define DEVICE_NR(device) (MINOR(device) & 0x7f) + +#elif (MAJOR_NR == OSST_MAJOR) + +#define DEVICE_NAME "onstream" +#define DEVICE_INTR do_osst +#define DEVICE_NR(device) (MINOR(device) & 0x7f) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == SCSI_CDROM_MAJOR) + +#define DEVICE_NAME "CD-ROM" +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == XT_DISK_MAJOR) + +#define DEVICE_NAME "xt disk" +#define DEVICE_REQUEST do_xd_request +#define DEVICE_NR(device) (MINOR(device) >> 6) + +#elif (MAJOR_NR == PS2ESDI_MAJOR) + +#define DEVICE_NAME "PS/2 ESDI" +#define DEVICE_REQUEST do_ps2esdi_request +#define DEVICE_NR(device) (MINOR(device) >> 6) + +#elif (MAJOR_NR == CDU31A_CDROM_MAJOR) + +#define DEVICE_NAME "CDU31A" +#define DEVICE_REQUEST do_cdu31a_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == ACSI_MAJOR) && (defined(CONFIG_ATARI_ACSI) || defined(CONFIG_ATARI_ACSI_MODULE)) + +#define DEVICE_NAME "ACSI" +#define DEVICE_INTR do_acsi +#define DEVICE_REQUEST do_acsi_request +#define DEVICE_NR(device) (MINOR(device) >> 4) + +#elif (MAJOR_NR == MITSUMI_CDROM_MAJOR) + +#define DEVICE_NAME "Mitsumi CD-ROM" +/* #define DEVICE_INTR do_mcd */ +#define DEVICE_REQUEST do_mcd_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == MITSUMI_X_CDROM_MAJOR) + +#define DEVICE_NAME "Mitsumi CD-ROM" +/* #define DEVICE_INTR do_mcdx */ +#define DEVICE_REQUEST do_mcdx_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == MATSUSHITA_CDROM_MAJOR) + +#define DEVICE_NAME "Matsushita CD-ROM controller #1" +#define DEVICE_REQUEST do_sbpcd_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == MATSUSHITA_CDROM2_MAJOR) + +#define DEVICE_NAME "Matsushita CD-ROM controller #2" +#define DEVICE_REQUEST do_sbpcd2_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == MATSUSHITA_CDROM3_MAJOR) + +#define DEVICE_NAME "Matsushita CD-ROM controller #3" +#define DEVICE_REQUEST do_sbpcd3_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == MATSUSHITA_CDROM4_MAJOR) + +#define DEVICE_NAME "Matsushita CD-ROM controller #4" +#define DEVICE_REQUEST do_sbpcd4_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == AZTECH_CDROM_MAJOR) + +#define DEVICE_NAME "Aztech CD-ROM" +#define DEVICE_REQUEST do_aztcd_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == CDU535_CDROM_MAJOR) + +#define DEVICE_NAME "SONY-CDU535" +#define DEVICE_INTR do_cdu535 +#define DEVICE_REQUEST do_cdu535_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == GOLDSTAR_CDROM_MAJOR) + +#define DEVICE_NAME "Goldstar R420" +#define DEVICE_REQUEST do_gscd_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == CM206_CDROM_MAJOR) +#define DEVICE_NAME "Philips/LMS CD-ROM cm206" +#define DEVICE_REQUEST do_cm206_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == OPTICS_CDROM_MAJOR) + +#define DEVICE_NAME "DOLPHIN 8000AT CD-ROM" +#define DEVICE_REQUEST do_optcd_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == SANYO_CDROM_MAJOR) + +#define DEVICE_NAME "Sanyo H94A CD-ROM" +#define DEVICE_REQUEST do_sjcd_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == APBLOCK_MAJOR) + +#define DEVICE_NAME "apblock" +#define DEVICE_REQUEST ap_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == DDV_MAJOR) + +#define DEVICE_NAME "ddv" +#define DEVICE_REQUEST ddv_request +#define DEVICE_NR(device) (MINOR(device)>>PARTN_BITS) + +#elif (MAJOR_NR == MFM_ACORN_MAJOR) + +#define DEVICE_NAME "mfm disk" +#define DEVICE_INTR do_mfm +#define DEVICE_REQUEST do_mfm_request +#define DEVICE_NR(device) (MINOR(device) >> 6) + +#elif (MAJOR_NR == NBD_MAJOR) + +#define DEVICE_NAME "nbd" +#define DEVICE_REQUEST do_nbd_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == MDISK_MAJOR) + +#define DEVICE_NAME "mdisk" +#define DEVICE_REQUEST mdisk_request +#define DEVICE_NR(device) (MINOR(device)) + +#elif (MAJOR_NR == DASD_MAJOR) + +#define DEVICE_NAME "dasd" +#define DEVICE_REQUEST do_dasd_request +#define DEVICE_NR(device) (MINOR(device) >> PARTN_BITS) + +#elif (MAJOR_NR == I2O_MAJOR) + +#define DEVICE_NAME "I2O block" +#define DEVICE_REQUEST i2ob_request +#define DEVICE_NR(device) (MINOR(device)>>4) + +#elif (MAJOR_NR == COMPAQ_SMART2_MAJOR) + +#define DEVICE_NAME "ida" +#define TIMEOUT_VALUE (25*HZ) +#define DEVICE_REQUEST do_ida_request +#define DEVICE_NR(device) (MINOR(device) >> 4) + +#endif /* MAJOR_NR == whatever */ + +/* provide DEVICE_xxx defaults, if not explicitly defined + * above in the MAJOR_NR==xxx if-elif tree */ +#ifndef DEVICE_ON +#define DEVICE_ON(device) do {} while (0) +#endif +#ifndef DEVICE_OFF +#define DEVICE_OFF(device) do {} while (0) +#endif + +#if (MAJOR_NR != SCSI_TAPE_MAJOR) && (MAJOR_NR != OSST_MAJOR) +#if !defined(IDE_DRIVER) + +#ifndef CURRENT +#define CURRENT blkdev_entry_next_request(&blk_dev[MAJOR_NR].request_queue.queue_head) +#endif +#ifndef QUEUE_EMPTY +#define QUEUE_EMPTY list_empty(&blk_dev[MAJOR_NR].request_queue.queue_head) +#endif + +#ifndef DEVICE_NAME +#define DEVICE_NAME "unknown" +#endif + +#define CURRENT_DEV DEVICE_NR(CURRENT->rq_dev) + +#ifdef DEVICE_INTR +static void (*DEVICE_INTR)(void) = NULL; +#endif + +#define SET_INTR(x) (DEVICE_INTR = (x)) + +#ifdef DEVICE_REQUEST +static void (DEVICE_REQUEST)(request_queue_t *); +#endif + +#ifdef DEVICE_INTR +#define CLEAR_INTR SET_INTR(NULL) +#else +#define CLEAR_INTR +#endif + +#define INIT_REQUEST \ + if (QUEUE_EMPTY) {\ + CLEAR_INTR; \ + return; \ + } \ + if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) \ + panic(DEVICE_NAME ": request list destroyed"); \ + if (CURRENT->bh) { \ + if (!buffer_locked(CURRENT->bh)) \ + panic(DEVICE_NAME ": block not locked"); \ + } + +#endif /* !defined(IDE_DRIVER) */ + + +#ifndef LOCAL_END_REQUEST /* If we have our own end_request, we do not want to include this mess */ + +#if ! SCSI_BLK_MAJOR(MAJOR_NR) && (MAJOR_NR != COMPAQ_SMART2_MAJOR) + +static inline void end_request(int uptodate) { + struct request *req = CURRENT; + + if (end_that_request_first(req, uptodate, DEVICE_NAME)) + return; + +#ifndef DEVICE_NO_RANDOM + add_blkdev_randomness(MAJOR(req->rq_dev)); +#endif + DEVICE_OFF(req->rq_dev); + blkdev_dequeue_request(req); + end_that_request_last(req); +} + +#endif /* ! SCSI_BLK_MAJOR(MAJOR_NR) */ +#endif /* LOCAL_END_REQUEST */ + +#endif /* (MAJOR_NR != SCSI_TAPE_MAJOR) */ +#endif /* defined(MAJOR_NR) || defined(IDE_DRIVER) */ + +#endif /* _BLK_H */ diff --git a/linux-2.4.26-xen-sparse/include/linux/major.h b/linux-2.4.26-xen-sparse/include/linux/major.h new file mode 100644 index 0000000000..f311bd4a9d --- /dev/null +++ b/linux-2.4.26-xen-sparse/include/linux/major.h @@ -0,0 +1,206 @@ +#ifndef _LINUX_MAJOR_H +#define _LINUX_MAJOR_H + +/* + * This file has definitions for major device numbers. + * For the device number assignments, see Documentation/devices.txt. + */ + +/* limits */ + +/* + * Important: Don't change this to 256. Major number 255 is and must be + * reserved for future expansion into a larger dev_t space. + */ +#define MAX_CHRDEV 255 +#define MAX_BLKDEV 255 + +#define UNNAMED_MAJOR 0 +#define MEM_MAJOR 1 +#define RAMDISK_MAJOR 1 +#define FLOPPY_MAJOR 2 +#define PTY_MASTER_MAJOR 2 +#define IDE0_MAJOR 3 +#define PTY_SLAVE_MAJOR 3 +#define HD_MAJOR IDE0_MAJOR +#define TTY_MAJOR 4 +#define TTYAUX_MAJOR 5 +#define LP_MAJOR 6 +#define VCS_MAJOR 7 +#define LOOP_MAJOR 7 +#define SCSI_DISK0_MAJOR 8 +#define SCSI_TAPE_MAJOR 9 +#define MD_MAJOR 9 +#define MISC_MAJOR 10 +#define SCSI_CDROM_MAJOR 11 +#define MUX_MAJOR 11 /* PA-RISC only */ +#define QIC02_TAPE_MAJOR 12 +#define XT_DISK_MAJOR 13 +#define SOUND_MAJOR 14 +#define CDU31A_CDROM_MAJOR 15 +#define JOYSTICK_MAJOR 15 +#define GOLDSTAR_CDROM_MAJOR 16 +#define OPTICS_CDROM_MAJOR 17 +#define SANYO_CDROM_MAJOR 18 +#define CYCLADES_MAJOR 19 +#define CYCLADESAUX_MAJOR 20 +#define MITSUMI_X_CDROM_MAJOR 20 +#define MFM_ACORN_MAJOR 21 /* ARM Linux /dev/mfm */ +#define SCSI_GENERIC_MAJOR 21 +#define Z8530_MAJOR 34 +#define DIGI_MAJOR 23 +#define IDE1_MAJOR 22 +#define DIGICU_MAJOR 22 +#define MITSUMI_CDROM_MAJOR 23 +#define CDU535_CDROM_MAJOR 24 +#define STL_SERIALMAJOR 24 +#define MATSUSHITA_CDROM_MAJOR 25 +#define STL_CALLOUTMAJOR 25 +#define MATSUSHITA_CDROM2_MAJOR 26 +#define QIC117_TAPE_MAJOR 27 +#define MATSUSHITA_CDROM3_MAJOR 27 +#define MATSUSHITA_CDROM4_MAJOR 28 +#define STL_SIOMEMMAJOR 28 +#define ACSI_MAJOR 28 +#define AZTECH_CDROM_MAJOR 29 +#define GRAPHDEV_MAJOR 29 /* SparcLinux & Linux/68k /dev/fb */ +#define SHMIQ_MAJOR 85 /* Linux/mips, SGI /dev/shmiq */ +#define CM206_CDROM_MAJOR 32 +#define IDE2_MAJOR 33 +#define IDE3_MAJOR 34 +#define XPRAM_MAJOR 35 /* expanded storage on S/390 = "slow ram" */ + /* proposed by Peter */ +#define NETLINK_MAJOR 36 +#define PS2ESDI_MAJOR 36 +#define IDETAPE_MAJOR 37 +#define Z2RAM_MAJOR 37 +#define APBLOCK_MAJOR 38 /* AP1000 Block device */ +#define DDV_MAJOR 39 /* AP1000 DDV block device */ +#define NBD_MAJOR 43 /* Network block device */ +#define RISCOM8_NORMAL_MAJOR 48 +#define DAC960_MAJOR 48 /* 48..55 */ +#define RISCOM8_CALLOUT_MAJOR 49 +#define MKISS_MAJOR 55 +#define DSP56K_MAJOR 55 /* DSP56001 processor device */ + +#define IDE4_MAJOR 56 +#define IDE5_MAJOR 57 + +#define LVM_BLK_MAJOR 58 /* Logical Volume Manager */ + +#define SCSI_DISK1_MAJOR 65 +#define SCSI_DISK2_MAJOR 66 +#define SCSI_DISK3_MAJOR 67 +#define SCSI_DISK4_MAJOR 68 +#define SCSI_DISK5_MAJOR 69 +#define SCSI_DISK6_MAJOR 70 +#define SCSI_DISK7_MAJOR 71 + + +#define COMPAQ_SMART2_MAJOR 72 +#define COMPAQ_SMART2_MAJOR1 73 +#define COMPAQ_SMART2_MAJOR2 74 +#define COMPAQ_SMART2_MAJOR3 75 +#define COMPAQ_SMART2_MAJOR4 76 +#define COMPAQ_SMART2_MAJOR5 77 +#define COMPAQ_SMART2_MAJOR6 78 +#define COMPAQ_SMART2_MAJOR7 79 + +#define SPECIALIX_NORMAL_MAJOR 75 +#define SPECIALIX_CALLOUT_MAJOR 76 + +#define COMPAQ_CISS_MAJOR 104 +#define COMPAQ_CISS_MAJOR1 105 +#define COMPAQ_CISS_MAJOR2 106 +#define COMPAQ_CISS_MAJOR3 107 +#define COMPAQ_CISS_MAJOR4 108 +#define COMPAQ_CISS_MAJOR5 109 +#define COMPAQ_CISS_MAJOR6 110 +#define COMPAQ_CISS_MAJOR7 111 + +#define ATARAID_MAJOR 114 + +#define DASD_MAJOR 94 /* Official assignations from Peter */ + +#define MDISK_MAJOR 95 /* Official assignations from Peter */ + +#define I2O_MAJOR 80 /* 80->87 */ + +#define IDE6_MAJOR 88 +#define IDE7_MAJOR 89 +#define IDE8_MAJOR 90 +#define IDE9_MAJOR 91 + +#define UBD_MAJOR 98 + +#define AURORA_MAJOR 79 + +#define JSFD_MAJOR 99 + +#define PHONE_MAJOR 100 + +#define LVM_CHAR_MAJOR 109 /* Logical Volume Manager */ + +#define UMEM_MAJOR 116 /* http://www.umem.com/ Battery Backed RAM */ + +#define UMEM_MAJOR 116 /* http://www.umem.com/ Battery Backed RAM */ + +/* + * XLVIRT supports 16 devices of <= 16 partitions each. + * eg. xvda == (125, 0), xvdb == (125, 16), ... + */ +#define XLVIRT_MAJOR 125 + +#define RTF_MAJOR 150 +#define RAW_MAJOR 162 + +#define USB_ACM_MAJOR 166 +#define USB_ACM_AUX_MAJOR 167 +#define USB_CHAR_MAJOR 180 + +#define UNIX98_PTY_MASTER_MAJOR 128 +#define UNIX98_PTY_MAJOR_COUNT 8 +#define UNIX98_PTY_SLAVE_MAJOR (UNIX98_PTY_MASTER_MAJOR+UNIX98_PTY_MAJOR_COUNT) + +#define VXVM_MAJOR 199 /* VERITAS volume i/o driver */ +#define VXSPEC_MAJOR 200 /* VERITAS volume config driver */ +#define VXDMP_MAJOR 201 /* VERITAS volume multipath driver */ + +#define MSR_MAJOR 202 +#define CPUID_MAJOR 203 + +#define OSST_MAJOR 206 /* OnStream-SCx0 SCSI tape */ + +#define IBM_TTY3270_MAJOR 227 /* Official allocations now */ +#define IBM_FS3270_MAJOR 228 + +/* + * Tests for SCSI devices. + */ + +#define SCSI_DISK_MAJOR(M) ((M) == SCSI_DISK0_MAJOR || \ + ((M) >= SCSI_DISK1_MAJOR && (M) <= SCSI_DISK7_MAJOR)) + +#define SCSI_BLK_MAJOR(M) \ + (SCSI_DISK_MAJOR(M) \ + || (M) == SCSI_CDROM_MAJOR) + +static __inline__ int scsi_blk_major(int m) { + return SCSI_BLK_MAJOR(m); +} + +/* + * Tests for IDE devices + */ +#define IDE_DISK_MAJOR(M) ((M) == IDE0_MAJOR || (M) == IDE1_MAJOR || \ + (M) == IDE2_MAJOR || (M) == IDE3_MAJOR || \ + (M) == IDE4_MAJOR || (M) == IDE5_MAJOR || \ + (M) == IDE6_MAJOR || (M) == IDE7_MAJOR || \ + (M) == IDE8_MAJOR || (M) == IDE9_MAJOR) + +static __inline__ int ide_blk_major(int m) +{ + return IDE_DISK_MAJOR(m); +} + +#endif diff --git a/linux-2.4.26-xen-sparse/include/linux/sched.h b/linux-2.4.26-xen-sparse/include/linux/sched.h new file mode 100644 index 0000000000..1ac29aa143 --- /dev/null +++ b/linux-2.4.26-xen-sparse/include/linux/sched.h @@ -0,0 +1,968 @@ +#ifndef _LINUX_SCHED_H +#define _LINUX_SCHED_H + +#include <asm/param.h> /* for HZ */ + +extern unsigned long event; + +#include <linux/config.h> +#include <linux/binfmts.h> +#include <linux/threads.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/times.h> +#include <linux/timex.h> +#include <linux/rbtree.h> + +#include <asm/system.h> +#include <asm/semaphore.h> +#include <asm/page.h> +#include <asm/ptrace.h> +#include <asm/mmu.h> + +#include <linux/smp.h> +#include <linux/tty.h> +#include <linux/sem.h> +#include <linux/signal.h> +#include <linux/securebits.h> +#include <linux/fs_struct.h> + +struct exec_domain; + +/* + * cloning flags: + */ +#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */ +#define CLONE_VM 0x00000100 /* set if VM shared between processes */ +#define CLONE_FS 0x00000200 /* set if fs info shared between processes */ +#define CLONE_FILES 0x00000400 /* set if open files shared between processes */ +#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ +#define CLONE_PID 0x00001000 /* set if pid shared */ +#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ +#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ +#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ +#define CLONE_THREAD 0x00010000 /* Same thread group? */ +#define CLONE_NEWNS 0x00020000 /* New namespace group? */ + +#define CLONE_SIGNAL (CLONE_SIGHAND | CLONE_THREAD) + +/* + * These are the constant used to fake the fixed-point load-average + * counting. Some notes: + * - 11 bit fractions expand to 22 bits by the multiplies: this gives + * a load-average precision of 10 bits integer + 11 bits fractional + * - if you want to count load-averages more often, you need more + * precision, or rounding will get you. With 2-second counting freq, + * the EXP_n values would be 1981, 2034 and 2043 if still using only + * 11 bit fractions. + */ +extern unsigned long avenrun[]; /* Load averages */ + +#define FSHIFT 11 /* nr of bits of precision */ +#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ +#define LOAD_FREQ (5*HZ) /* 5 sec intervals */ +#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ +#define EXP_5 2014 /* 1/exp(5sec/5min) */ +#define EXP_15 2037 /* 1/exp(5sec/15min) */ + +#define CALC_LOAD(load,exp,n) \ + load *= exp; \ + load += n*(FIXED_1-exp); \ + load >>= FSHIFT; + +#define CT_TO_SECS(x) ((x) / HZ) +#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ) + +extern int nr_running, nr_threads; +extern int last_pid; + +#include <linux/fs.h> +#include <linux/time.h> +#include <linux/param.h> +#include <linux/resource.h> +#ifdef __KERNEL__ +#include <linux/timer.h> +#endif + +#include <asm/processor.h> + +#define TASK_RUNNING 0 +#define TASK_INTERRUPTIBLE 1 +#define TASK_UNINTERRUPTIBLE 2 +#define TASK_ZOMBIE 4 +#define TASK_STOPPED 8 + +#define __set_task_state(tsk, state_value) \ + do { (tsk)->state = (state_value); } while (0) +#define set_task_state(tsk, state_value) \ + set_mb((tsk)->state, (state_value)) + +#define __set_current_state(state_value) \ + do { current->state = (state_value); } while (0) +#define set_current_state(state_value) \ + set_mb(current->state, (state_value)) + +/* + * Scheduling policies + */ +#define SCHED_OTHER 0 +#define SCHED_FIFO 1 +#define SCHED_RR 2 + +/* + * This is an additional bit set when we want to + * yield the CPU for one re-schedule.. + */ +#define SCHED_YIELD 0x10 + +struct sched_param { + int sched_priority; +}; + +struct completion; + +#ifdef __KERNEL__ + +#include <linux/spinlock.h> + +/* + * This serializes "schedule()" and also protects + * the run-queue from deletions/modifications (but + * _adding_ to the beginning of the run-queue has + * a separate lock). + */ +extern rwlock_t tasklist_lock; +extern spinlock_t runqueue_lock; +extern spinlock_t mmlist_lock; + +extern void sched_init(void); +extern void init_idle(void); +extern void show_state(void); +extern void cpu_init (void); +extern void trap_init(void); +extern void update_process_times(int user); +#ifdef CONFIG_NO_IDLE_HZ +extern void update_process_times_us(int user, int system); +#endif +extern void update_one_process(struct task_struct *p, unsigned long user, + unsigned long system, int cpu); + +#define MAX_SCHEDULE_TIMEOUT LONG_MAX +extern signed long FASTCALL(schedule_timeout(signed long timeout)); +asmlinkage void schedule(void); + +extern int schedule_task(struct tq_struct *task); +extern void flush_scheduled_tasks(void); +extern int start_context_thread(void); +extern int current_is_keventd(void); + +#if CONFIG_SMP +extern void set_cpus_allowed(struct task_struct *p, unsigned long new_mask); +#else +# define set_cpus_allowed(p, new_mask) do { } while (0) +#endif + +/* + * The default fd array needs to be at least BITS_PER_LONG, + * as this is the granularity returned by copy_fdset(). + */ +#define NR_OPEN_DEFAULT BITS_PER_LONG + +struct namespace; +/* + * Open file table structure + */ +struct files_struct { + atomic_t count; + rwlock_t file_lock; /* Protects all the below members. Nests inside tsk->alloc_lock */ + int max_fds; + int max_fdset; + int next_fd; + struct file ** fd; /* current fd array */ + fd_set *close_on_exec; + fd_set *open_fds; + fd_set close_on_exec_init; + fd_set open_fds_init; + struct file * fd_array[NR_OPEN_DEFAULT]; +}; + +#define INIT_FILES \ +{ \ + count: ATOMIC_INIT(1), \ + file_lock: RW_LOCK_UNLOCKED, \ + max_fds: NR_OPEN_DEFAULT, \ + max_fdset: __FD_SETSIZE, \ + next_fd: 0, \ + fd: &init_files.fd_array[0], \ + close_on_exec: &init_files.close_on_exec_init, \ + open_fds: &init_files.open_fds_init, \ + close_on_exec_init: { { 0, } }, \ + open_fds_init: { { 0, } }, \ + fd_array: { NULL, } \ +} + +/* Maximum number of active map areas.. This is a random (large) number */ +#define DEFAULT_MAX_MAP_COUNT (65536) + +extern int max_map_count; + +struct mm_struct { + struct vm_area_struct * mmap; /* list of VMAs */ + rb_root_t mm_rb; + struct vm_area_struct * mmap_cache; /* last find_vma result */ + pgd_t * pgd; + atomic_t mm_users; /* How many users with user space? */ + atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ + int map_count; /* number of VMAs */ + struct rw_semaphore mmap_sem; + spinlock_t page_table_lock; /* Protects task page tables and mm->rss */ + + struct list_head mmlist; /* List of all active mm's. These are globally strung + * together off init_mm.mmlist, and are protected + * by mmlist_lock + */ + + unsigned long start_code, end_code, start_data, end_data; + unsigned long start_brk, brk, start_stack; + unsigned long arg_start, arg_end, env_start, env_end; + unsigned long rss, total_vm, locked_vm; + unsigned long def_flags; + unsigned long cpu_vm_mask; + unsigned long swap_address; + + unsigned dumpable:1; + + /* Architecture-specific MM context */ + mm_context_t context; +}; + +extern int mmlist_nr; + +#define INIT_MM(name) \ +{ \ + mm_rb: RB_ROOT, \ + pgd: swapper_pg_dir, \ + mm_users: ATOMIC_INIT(2), \ + mm_count: ATOMIC_INIT(1), \ + mmap_sem: __RWSEM_INITIALIZER(name.mmap_sem), \ + page_table_lock: SPIN_LOCK_UNLOCKED, \ + mmlist: LIST_HEAD_INIT(name.mmlist), \ +} + +struct signal_struct { + atomic_t count; + struct k_sigaction action[_NSIG]; + spinlock_t siglock; +}; + + +#define INIT_SIGNALS { \ + count: ATOMIC_INIT(1), \ + action: { {{0,}}, }, \ + siglock: SPIN_LOCK_UNLOCKED \ +} + +/* + * Some day this will be a full-fledged user tracking system.. + */ +struct user_struct { + atomic_t __count; /* reference count */ + atomic_t processes; /* How many processes does this user have? */ + atomic_t files; /* How many open files does this user have? */ + + /* Hash table maintenance information */ + struct user_struct *next, **pprev; + uid_t uid; +}; + +#define get_current_user() ({ \ + struct user_struct *__user = current->user; \ + atomic_inc(&__user->__count); \ + __user; }) + +extern struct user_struct root_user; +#define INIT_USER (&root_user) + +struct task_struct { + /* + * offsets of these are hardcoded elsewhere - touch with care + */ + volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ + unsigned long flags; /* per process flags, defined below */ + int sigpending; + mm_segment_t addr_limit; /* thread address space: + 0-0xBFFFFFFF for user-thead + 0-0xFFFFFFFF for kernel-thread + */ + struct exec_domain *exec_domain; + volatile long need_resched; + unsigned long ptrace; + + int lock_depth; /* Lock depth */ + +/* + * offset 32 begins here on 32-bit platforms. We keep + * all fields in a single cacheline that are needed for + * the goodness() loop in schedule(). + */ + long counter; + long nice; + unsigned long policy; + struct mm_struct *mm; + int processor; + /* + * cpus_runnable is ~0 if the process is not running on any + * CPU. It's (1 << cpu) if it's running on a CPU. This mask + * is updated under the runqueue lock. + * + * To determine whether a process might run on a CPU, this + * mask is AND-ed with cpus_allowed. + */ + unsigned long cpus_runnable, cpus_allowed; + /* + * (only the 'next' pointer fits into the cacheline, but + * that's just fine.) + */ + struct list_head run_list; + unsigned long sleep_time; + + struct task_struct *next_task, *prev_task; + struct mm_struct *active_mm; + struct list_head local_pages; + unsigned int allocation_order, nr_local_pages; + +/* task state */ + struct linux_binfmt *binfmt; + int exit_code, exit_signal; + int pdeath_signal; /* The signal sent when the parent dies */ + /* ??? */ + unsigned long personality; + int did_exec:1; + unsigned task_dumpable:1; + pid_t pid; + pid_t pgrp; + pid_t tty_old_pgrp; + pid_t session; + pid_t tgid; + /* boolean value for session group leader */ + int leader; + /* + * pointers to (original) parent process, youngest child, younger sibling, + * older sibling, respectively. (p->father can be replaced with + * p->p_pptr->pid) + */ + struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr; + struct list_head thread_group; + + /* PID hash table linkage. */ + struct task_struct *pidhash_next; + struct task_struct **pidhash_pprev; + + wait_queue_head_t wait_chldexit; /* for wait4() */ + struct completion *vfork_done; /* for vfork() */ + unsigned long rt_priority; + unsigned long it_real_value, it_prof_value, it_virt_value; + unsigned long it_real_incr, it_prof_incr, it_virt_incr; + struct timer_list real_timer; + struct tms times; + unsigned long start_time; + long per_cpu_utime[NR_CPUS], per_cpu_stime[NR_CPUS]; +/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ + unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap; + int swappable:1; +/* process credentials */ + uid_t uid,euid,suid,fsuid; + gid_t gid,egid,sgid,fsgid; + int ngroups; + gid_t groups[NGROUPS]; + kernel_cap_t cap_effective, cap_inheritable, cap_permitted; + int keep_capabilities:1; + struct user_struct *user; +/* limits */ + struct rlimit rlim[RLIM_NLIMITS]; + unsigned short used_math; + char comm[16]; +/* file system info */ + int link_count, total_link_count; + struct tty_struct *tty; /* NULL if no tty */ + unsigned int locks; /* How many file locks are being held */ +/* ipc stuff */ + struct sem_undo *semundo; + struct sem_queue *semsleeping; +/* CPU-specific state of this task */ + struct thread_struct thread; +/* filesystem information */ + struct fs_struct *fs; +/* open file information */ + struct files_struct *files; +/* namespace */ + struct namespace *namespace; +/* signal handlers */ + spinlock_t sigmask_lock; /* Protects signal and blocked */ + struct signal_struct *sig; + + sigset_t blocked; + struct sigpending pending; + + unsigned long sas_ss_sp; + size_t sas_ss_size; + int (*notifier)(void *priv); + void *notifier_data; + sigset_t *notifier_mask; + +/* Thread group tracking */ + u32 parent_exec_id; + u32 self_exec_id; +/* Protection of (de-)allocation: mm, files, fs, tty */ + spinlock_t alloc_lock; + +/* journalling filesystem info */ + void *journal_info; +}; + +/* + * Per process flags + */ +#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ + /* Not implemented yet, only for 486*/ +#define PF_STARTING 0x00000002 /* being created */ +#define PF_EXITING 0x00000004 /* getting shut down */ +#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ +#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ +#define PF_DUMPCORE 0x00000200 /* dumped core */ +#define PF_SIGNALED 0x00000400 /* killed by a signal */ +#define PF_MEMALLOC 0x00000800 /* Allocating memory */ +#define PF_MEMDIE 0x00001000 /* Killed for out-of-memory */ +#define PF_FREE_PAGES 0x00002000 /* per process page freeing */ +#define PF_NOIO 0x00004000 /* avoid generating further I/O */ +#define PF_FSTRANS 0x00008000 /* inside a filesystem transaction */ + +#define PF_USEDFPU 0x00100000 /* task used FPU this quantum (SMP) */ + +/* + * Ptrace flags + */ + +#define PT_PTRACED 0x00000001 +#define PT_TRACESYS 0x00000002 +#define PT_DTRACE 0x00000004 /* delayed trace (used on m68k, i386) */ +#define PT_TRACESYSGOOD 0x00000008 +#define PT_PTRACE_CAP 0x00000010 /* ptracer can follow suid-exec */ + +#define is_dumpable(tsk) ((tsk)->task_dumpable && (tsk)->mm && (tsk)->mm->dumpable) + +/* + * Limit the stack by to some sane default: root can always + * increase this limit if needed.. 8MB seems reasonable. + */ +#define _STK_LIM (8*1024*1024) + +#define DEF_COUNTER (10*HZ/100) /* 100 ms time slice */ +#define MAX_COUNTER (20*HZ/100) +#define DEF_NICE (0) + +extern void yield(void); + +/* + * The default (Linux) execution domain. + */ +extern struct exec_domain default_exec_domain; + +/* + * INIT_TASK is used to set up the first task table, touch at + * your own risk!. Base=0, limit=0x1fffff (=2MB) + */ +#define INIT_TASK(tsk) \ +{ \ + state: 0, \ + flags: 0, \ + sigpending: 0, \ + addr_limit: KERNEL_DS, \ + exec_domain: &default_exec_domain, \ + lock_depth: -1, \ + counter: DEF_COUNTER, \ + nice: DEF_NICE, \ + policy: SCHED_OTHER, \ + mm: NULL, \ + active_mm: &init_mm, \ + cpus_runnable: ~0UL, \ + cpus_allowed: ~0UL, \ + run_list: LIST_HEAD_INIT(tsk.run_list), \ + next_task: &tsk, \ + prev_task: &tsk, \ + p_opptr: &tsk, \ + p_pptr: &tsk, \ + thread_group: LIST_HEAD_INIT(tsk.thread_group), \ + wait_chldexit: __WAIT_QUEUE_HEAD_INITIALIZER(tsk.wait_chldexit),\ + real_timer: { \ + function: it_real_fn \ + }, \ + cap_effective: CAP_INIT_EFF_SET, \ + cap_inheritable: CAP_INIT_INH_SET, \ + cap_permitted: CAP_FULL_SET, \ + keep_capabilities: 0, \ + rlim: INIT_RLIMITS, \ + user: INIT_USER, \ + comm: "swapper", \ + thread: INIT_THREAD, \ + fs: &init_fs, \ + files: &init_files, \ + sigmask_lock: SPIN_LOCK_UNLOCKED, \ + sig: &init_signals, \ + pending: { NULL, &tsk.pending.head, {{0}}}, \ + blocked: {{0}}, \ + alloc_lock: SPIN_LOCK_UNLOCKED, \ + journal_info: NULL, \ +} + + +#ifndef INIT_TASK_SIZE +# define INIT_TASK_SIZE 2048*sizeof(long) +#endif + +union task_union { + struct task_struct task; + unsigned long stack[INIT_TASK_SIZE/sizeof(long)]; +}; + +extern union task_union init_task_union; + +extern struct mm_struct init_mm; +extern struct task_struct *init_tasks[NR_CPUS]; + +/* PID hashing. (shouldnt this be dynamic?) */ +#define PIDHASH_SZ (4096 >> 2) +extern struct task_struct *pidhash[PIDHASH_SZ]; + +#define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1)) + +static inline void hash_pid(struct task_struct *p) +{ + struct task_struct **htable = &pidhash[pid_hashfn(p->pid)]; + + if((p->pidhash_next = *htable) != NULL) + (*htable)->pidhash_pprev = &p->pidhash_next; + *htable = p; + p->pidhash_pprev = htable; +} + +static inline void unhash_pid(struct task_struct *p) +{ + if(p->pidhash_next) + p->pidhash_next->pidhash_pprev = p->pidhash_pprev; + *p->pidhash_pprev = p->pidhash_next; +} + +static inline struct task_struct *find_task_by_pid(int pid) +{ + struct task_struct *p, **htable = &pidhash[pid_hashfn(pid)]; + + for(p = *htable; p && p->pid != pid; p = p->pidhash_next) + ; + + return p; +} + +#define task_has_cpu(tsk) ((tsk)->cpus_runnable != ~0UL) + +static inline void task_set_cpu(struct task_struct *tsk, unsigned int cpu) +{ + tsk->processor = cpu; + tsk->cpus_runnable = 1UL << cpu; +} + +static inline void task_release_cpu(struct task_struct *tsk) +{ + tsk->cpus_runnable = ~0UL; +} + +/* per-UID process charging. */ +extern struct user_struct * alloc_uid(uid_t); +extern void free_uid(struct user_struct *); +extern void switch_uid(struct user_struct *); + +#include <asm/current.h> + +extern unsigned long volatile jiffies; +extern unsigned long itimer_ticks; +extern unsigned long itimer_next; +extern struct timeval xtime; +extern void do_timer(struct pt_regs *); +#ifdef CONFIG_NO_IDLE_HZ +extern void do_timer_ticks(int ticks); +#endif + +extern unsigned int * prof_buffer; +extern unsigned long prof_len; +extern unsigned long prof_shift; + +#define CURRENT_TIME (xtime.tv_sec) + +extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr)); +extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)); +extern void FASTCALL(sleep_on(wait_queue_head_t *q)); +extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q, + signed long timeout)); +extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q)); +extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q, + signed long timeout)); +extern int FASTCALL(wake_up_process(struct task_struct * tsk)); + +#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1) +#define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr) +#define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0) +#define wake_up_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1) +#define wake_up_sync_nr(x, nr) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr) +#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) +#define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr) +#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0) +#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) +#define wake_up_interruptible_sync_nr(x, nr) __wake_up_sync((x),TASK_INTERRUPTIBLE, nr) +asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru); + +extern int in_group_p(gid_t); +extern int in_egroup_p(gid_t); + +extern void proc_caches_init(void); +extern void flush_signals(struct task_struct *); +extern void flush_signal_handlers(struct task_struct *); +extern void sig_exit(int, int, struct siginfo *); +extern int dequeue_signal(sigset_t *, siginfo_t *); +extern void block_all_signals(int (*notifier)(void *priv), void *priv, + sigset_t *mask); +extern void unblock_all_signals(void); +extern int send_sig_info(int, struct siginfo *, struct task_struct *); +extern int force_sig_info(int, struct siginfo *, struct task_struct *); +extern int kill_pg_info(int, struct siginfo *, pid_t); +extern int kill_sl_info(int, struct siginfo *, pid_t); +extern int kill_proc_info(int, struct siginfo *, pid_t); +extern void notify_parent(struct task_struct *, int); +extern void do_notify_parent(struct task_struct *, int); +extern void force_sig(int, struct task_struct *); +extern int send_sig(int, struct task_struct *, int); +extern int kill_pg(pid_t, int, int); +extern int kill_sl(pid_t, int, int); +extern int kill_proc(pid_t, int, int); +extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *); +extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long); + +static inline int signal_pending(struct task_struct *p) +{ + return (p->sigpending != 0); +} + +/* + * Re-calculate pending state from the set of locally pending + * signals, globally pending signals, and blocked signals. + */ +static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) +{ + unsigned long ready; + long i; + + switch (_NSIG_WORDS) { + default: + for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) + ready |= signal->sig[i] &~ blocked->sig[i]; + break; + + case 4: ready = signal->sig[3] &~ blocked->sig[3]; + ready |= signal->sig[2] &~ blocked->sig[2]; + ready |= signal->sig[1] &~ blocked->sig[1]; + ready |= signal->sig[0] &~ blocked->sig[0]; + break; + + case 2: ready = signal->sig[1] &~ blocked->sig[1]; + ready |= signal->sig[0] &~ blocked->sig[0]; + break; + + case 1: ready = signal->sig[0] &~ blocked->sig[0]; + } + return ready != 0; +} + +/* Reevaluate whether the task has signals pending delivery. + This is required every time the blocked sigset_t changes. + All callers should have t->sigmask_lock. */ + +static inline void recalc_sigpending(struct task_struct *t) +{ + t->sigpending = has_pending_signals(&t->pending.signal, &t->blocked); +} + +/* True if we are on the alternate signal stack. */ + +static inline int on_sig_stack(unsigned long sp) +{ + return (sp - current->sas_ss_sp < current->sas_ss_size); +} + +static inline int sas_ss_flags(unsigned long sp) +{ + return (current->sas_ss_size == 0 ? SS_DISABLE + : on_sig_stack(sp) ? SS_ONSTACK : 0); +} + +extern int request_irq(unsigned int, + void (*handler)(int, void *, struct pt_regs *), + unsigned long, const char *, void *); +extern void free_irq(unsigned int, void *); + +/* + * This has now become a routine instead of a macro, it sets a flag if + * it returns true (to do BSD-style accounting where the process is flagged + * if it uses root privs). The implication of this is that you should do + * normal permissions checks first, and check suser() last. + * + * [Dec 1997 -- Chris Evans] + * For correctness, the above considerations need to be extended to + * fsuser(). This is done, along with moving fsuser() checks to be + * last. + * + * These will be removed, but in the mean time, when the SECURE_NOROOT + * flag is set, uids don't grant privilege. + */ +static inline int suser(void) +{ + if (!issecure(SECURE_NOROOT) && current->euid == 0) { + current->flags |= PF_SUPERPRIV; + return 1; + } + return 0; +} + +static inline int fsuser(void) +{ + if (!issecure(SECURE_NOROOT) && current->fsuid == 0) { + current->flags |= PF_SUPERPRIV; + return 1; + } + return 0; +} + +/* + * capable() checks for a particular capability. + * New privilege checks should use this interface, rather than suser() or + * fsuser(). See include/linux/capability.h for defined capabilities. + */ + +static inline int capable(int cap) +{ +#if 1 /* ok now */ + if (cap_raised(current->cap_effective, cap)) +#else + if (cap_is_fs_cap(cap) ? current->fsuid == 0 : current->euid == 0) +#endif + { + current->flags |= PF_SUPERPRIV; + return 1; + } + return 0; +} + +/* + * Routines for handling mm_structs + */ +extern struct mm_struct * mm_alloc(void); + +extern struct mm_struct * start_lazy_tlb(void); +extern void end_lazy_tlb(struct mm_struct *mm); + +/* mmdrop drops the mm and the page tables */ +extern inline void FASTCALL(__mmdrop(struct mm_struct *)); +static inline void mmdrop(struct mm_struct * mm) +{ + if (atomic_dec_and_test(&mm->mm_count)) + __mmdrop(mm); +} + +/* mmput gets rid of the mappings and all user-space */ +extern void mmput(struct mm_struct *); +/* Remove the current tasks stale references to the old mm_struct */ +extern void mm_release(void); + +/* + * Routines for handling the fd arrays + */ +extern struct file ** alloc_fd_array(int); +extern int expand_fd_array(struct files_struct *, int nr); +extern void free_fd_array(struct file **, int); + +extern fd_set *alloc_fdset(int); +extern int expand_fdset(struct files_struct *, int nr); +extern void free_fdset(fd_set *, int); + +extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); +extern void flush_thread(void); +extern void exit_thread(void); + +extern void exit_mm(struct task_struct *); +extern void exit_files(struct task_struct *); +extern void exit_sighand(struct task_struct *); + +extern void reparent_to_init(void); +extern void daemonize(void); + +extern int do_execve(char *, char **, char **, struct pt_regs *); +extern int do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long); + +extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); +extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)); +extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); + +extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); + +#define __wait_event(wq, condition) \ +do { \ + wait_queue_t __wait; \ + init_waitqueue_entry(&__wait, current); \ + \ + add_wait_queue(&wq, &__wait); \ + for (;;) { \ + set_current_state(TASK_UNINTERRUPTIBLE); \ + if (condition) \ + break; \ + schedule(); \ + } \ + current->state = TASK_RUNNING; \ + remove_wait_queue(&wq, &__wait); \ +} while (0) + +#define wait_event(wq, condition) \ +do { \ + if (condition) \ + break; \ + __wait_event(wq, condition); \ +} while (0) + +#define __wait_event_interruptible(wq, condition, ret) \ +do { \ + wait_queue_t __wait; \ + init_waitqueue_entry(&__wait, current); \ + \ + add_wait_queue(&wq, &__wait); \ + for (;;) { \ + set_current_state(TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (!signal_pending(current)) { \ + schedule(); \ + continue; \ + } \ + ret = -ERESTARTSYS; \ + break; \ + } \ + current->state = TASK_RUNNING; \ + remove_wait_queue(&wq, &__wait); \ +} while (0) + +#define wait_event_interruptible(wq, condition) \ +({ \ + int __ret = 0; \ + if (!(condition)) \ + __wait_event_interruptible(wq, condition, __ret); \ + __ret; \ +}) + +#define REMOVE_LINKS(p) do { \ + (p)->next_task->prev_task = (p)->prev_task; \ + (p)->prev_task->next_task = (p)->next_task; \ + if ((p)->p_osptr) \ + (p)->p_osptr->p_ysptr = (p)->p_ysptr; \ + if ((p)->p_ysptr) \ + (p)->p_ysptr->p_osptr = (p)->p_osptr; \ + else \ + (p)->p_pptr->p_cptr = (p)->p_osptr; \ + } while (0) + +#define SET_LINKS(p) do { \ + (p)->next_task = &init_task; \ + (p)->prev_task = init_task.prev_task; \ + init_task.prev_task->next_task = (p); \ + init_task.prev_task = (p); \ + (p)->p_ysptr = NULL; \ + if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \ + (p)->p_osptr->p_ysptr = p; \ + (p)->p_pptr->p_cptr = p; \ + } while (0) + +#define for_each_task(p) \ + for (p = &init_task ; (p = p->next_task) != &init_task ; ) + +#define for_each_thread(task) \ + for (task = next_thread(current) ; task != current ; task = next_thread(task)) + +#define next_thread(p) \ + list_entry((p)->thread_group.next, struct task_struct, thread_group) + +#define thread_group_leader(p) (p->pid == p->tgid) + +static inline void del_from_runqueue(struct task_struct * p) +{ + nr_running--; + p->sleep_time = jiffies; + list_del(&p->run_list); + p->run_list.next = NULL; +} + +static inline int task_on_runqueue(struct task_struct *p) +{ + return (p->run_list.next != NULL); +} + +static inline void unhash_process(struct task_struct *p) +{ + if (task_on_runqueue(p)) + out_of_line_bug(); + write_lock_irq(&tasklist_lock); + nr_threads--; + unhash_pid(p); + REMOVE_LINKS(p); + list_del(&p->thread_group); + write_unlock_irq(&tasklist_lock); +} + +/* Protects ->fs, ->files, ->mm, and synchronises with wait4(). Nests inside tasklist_lock */ +static inline void task_lock(struct task_struct *p) +{ + spin_lock(&p->alloc_lock); +} + +static inline void task_unlock(struct task_struct *p) +{ + spin_unlock(&p->alloc_lock); +} + +/* write full pathname into buffer and return start of pathname */ +static inline char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt, + char *buf, int buflen) +{ + char *res; + struct vfsmount *rootmnt; + struct dentry *root; + read_lock(¤t->fs->lock); + rootmnt = mntget(current->fs->rootmnt); + root = dget(current->fs->root); + read_unlock(¤t->fs->lock); + spin_lock(&dcache_lock); + res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen); + spin_unlock(&dcache_lock); + dput(root); + mntput(rootmnt); + return res; +} + +static inline int need_resched(void) +{ + return (unlikely(current->need_resched)); +} + +extern void __cond_resched(void); +static inline void cond_resched(void) +{ + if (need_resched()) + __cond_resched(); +} + +#endif /* __KERNEL__ */ +#endif diff --git a/linux-2.4.26-xen-sparse/include/linux/skbuff.h b/linux-2.4.26-xen-sparse/include/linux/skbuff.h new file mode 100644 index 0000000000..8c2b49b03e --- /dev/null +++ b/linux-2.4.26-xen-sparse/include/linux/skbuff.h @@ -0,0 +1,1174 @@ +/* + * Definitions for the 'struct sk_buff' memory handlers. + * + * Authors: + * Alan Cox, <gw4pts@gw4pts.ampr.org> + * Florian La Roche, <rzsfl@rz.uni-sb.de> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_SKBUFF_H +#define _LINUX_SKBUFF_H + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/time.h> +#include <linux/cache.h> + +#include <asm/atomic.h> +#include <asm/types.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/highmem.h> + +#define HAVE_ALLOC_SKB /* For the drivers to know */ +#define HAVE_ALIGNABLE_SKB /* Ditto 8) */ +#define SLAB_SKB /* Slabified skbuffs */ + +#define CHECKSUM_NONE 0 +#define CHECKSUM_HW 1 +#define CHECKSUM_UNNECESSARY 2 + +#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES-1)) & ~(SMP_CACHE_BYTES-1)) +#define SKB_MAX_ORDER(X,ORDER) (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct skb_shared_info))&~(SMP_CACHE_BYTES-1)) +#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X),0)) +#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0,2)) + +/* A. Checksumming of received packets by device. + * + * NONE: device failed to checksum this packet. + * skb->csum is undefined. + * + * UNNECESSARY: device parsed packet and wouldbe verified checksum. + * skb->csum is undefined. + * It is bad option, but, unfortunately, many of vendors do this. + * Apparently with secret goal to sell you new device, when you + * will add new protocol to your host. F.e. IPv6. 8) + * + * HW: the most generic way. Device supplied checksum of _all_ + * the packet as seen by netif_rx in skb->csum. + * NOTE: Even if device supports only some protocols, but + * is able to produce some skb->csum, it MUST use HW, + * not UNNECESSARY. + * + * B. Checksumming on output. + * + * NONE: skb is checksummed by protocol or csum is not required. + * + * HW: device is required to csum packet as seen by hard_start_xmit + * from skb->h.raw to the end and to record the checksum + * at skb->h.raw+skb->csum. + * + * Device must show its capabilities in dev->features, set + * at device setup time. + * NETIF_F_HW_CSUM - it is clever device, it is able to checksum + * everything. + * NETIF_F_NO_CSUM - loopback or reliable single hop media. + * NETIF_F_IP_CSUM - device is dumb. It is able to csum only + * TCP/UDP over IPv4. Sigh. Vendors like this + * way by an unknown reason. Though, see comment above + * about CHECKSUM_UNNECESSARY. 8) + * + * Any questions? No questions, good. --ANK + */ + +#ifdef __i386__ +#define NET_CALLER(arg) (*(((void**)&arg)-1)) +#else +#define NET_CALLER(arg) __builtin_return_address(0) +#endif + +#ifdef CONFIG_NETFILTER +struct nf_conntrack { + atomic_t use; + void (*destroy)(struct nf_conntrack *); +}; + +struct nf_ct_info { + struct nf_conntrack *master; +}; +#endif + +struct sk_buff_head { + /* These two members must be first. */ + struct sk_buff * next; + struct sk_buff * prev; + + __u32 qlen; + spinlock_t lock; +}; + +struct sk_buff; + +#define MAX_SKB_FRAGS 6 + +typedef struct skb_frag_struct skb_frag_t; + +struct skb_frag_struct +{ + struct page *page; + __u16 page_offset; + __u16 size; +}; + +/* This data is invariant across clones and lives at + * the end of the header data, ie. at skb->end. + */ +struct skb_shared_info { + atomic_t dataref; + unsigned int nr_frags; + struct sk_buff *frag_list; + skb_frag_t frags[MAX_SKB_FRAGS]; +}; + +struct sk_buff { + /* These two members must be first. */ + struct sk_buff * next; /* Next buffer in list */ + struct sk_buff * prev; /* Previous buffer in list */ + + struct sk_buff_head * list; /* List we are on */ + struct sock *sk; /* Socket we are owned by */ + struct timeval stamp; /* Time we arrived */ + struct net_device *dev; /* Device we arrived on/are leaving by */ + struct net_device *real_dev; /* For support of point to point protocols + (e.g. 802.3ad) over bonding, we must save the + physical device that got the packet before + replacing skb->dev with the virtual device. */ + + /* Transport layer header */ + union + { + struct tcphdr *th; + struct udphdr *uh; + struct icmphdr *icmph; + struct igmphdr *igmph; + struct iphdr *ipiph; + struct spxhdr *spxh; + unsigned char *raw; + } h; + + /* Network layer header */ + union + { + struct iphdr *iph; + struct ipv6hdr *ipv6h; + struct arphdr *arph; + struct ipxhdr *ipxh; + unsigned char *raw; + } nh; + + /* Link layer header */ + union + { + struct ethhdr *ethernet; + unsigned char *raw; + } mac; + + struct dst_entry *dst; + + /* + * This is the control buffer. It is free to use for every + * layer. Please put your private variables there. If you + * want to keep them across layers you have to do a skb_clone() + * first. This is owned by whoever has the skb queued ATM. + */ + char cb[48]; + + unsigned int len; /* Length of actual data */ + unsigned int data_len; + unsigned int csum; /* Checksum */ + unsigned char __unused, /* Dead field, may be reused */ + cloned, /* head may be cloned (check refcnt to be sure). */ + pkt_type, /* Packet class */ + ip_summed; /* Driver fed us an IP checksum */ + __u32 priority; /* Packet queueing priority */ + atomic_t users; /* User count - see datagram.c,tcp.c */ + unsigned short protocol; /* Packet protocol from driver. */ + unsigned short security; /* Security level of packet */ + unsigned int truesize; /* Buffer size */ + + unsigned char *head; /* Head of buffer */ + unsigned char *data; /* Data head pointer */ + unsigned char *tail; /* Tail pointer */ + unsigned char *end; /* End pointer */ + + void (*destructor)(struct sk_buff *); /* Destruct function */ +#ifdef CONFIG_NETFILTER + /* Can be used for communication between hooks. */ + unsigned long nfmark; + /* Cache info */ + __u32 nfcache; + /* Associated connection, if any */ + struct nf_ct_info *nfct; +#ifdef CONFIG_NETFILTER_DEBUG + unsigned int nf_debug; +#endif +#endif /*CONFIG_NETFILTER*/ + +#if defined(CONFIG_HIPPI) + union{ + __u32 ifield; + } private; +#endif + +#ifdef CONFIG_NET_SCHED + __u32 tc_index; /* traffic control index */ +#endif +}; + +#ifdef __KERNEL__ +/* + * Handling routines are only of interest to the kernel + */ +#include <linux/slab.h> + +#include <asm/system.h> + +extern void __kfree_skb(struct sk_buff *skb); +extern struct sk_buff * alloc_skb(unsigned int size, int priority); +extern void kfree_skbmem(struct sk_buff *skb); +extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority); +extern struct sk_buff * skb_copy(const struct sk_buff *skb, int priority); +extern struct sk_buff * pskb_copy(struct sk_buff *skb, int gfp_mask); +extern int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask); +extern struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom); +extern struct sk_buff * skb_copy_expand(const struct sk_buff *skb, + int newheadroom, + int newtailroom, + int priority); +extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad); +#define dev_kfree_skb(a) kfree_skb(a) +extern void skb_over_panic(struct sk_buff *skb, int len, void *here); +extern void skb_under_panic(struct sk_buff *skb, int len, void *here); + +/* Internal */ +#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end)) + +/** + * skb_queue_empty - check if a queue is empty + * @list: queue head + * + * Returns true if the queue is empty, false otherwise. + */ + +static inline int skb_queue_empty(struct sk_buff_head *list) +{ + return (list->next == (struct sk_buff *) list); +} + +/** + * skb_get - reference buffer + * @skb: buffer to reference + * + * Makes another reference to a socket buffer and returns a pointer + * to the buffer. + */ + +static inline struct sk_buff *skb_get(struct sk_buff *skb) +{ + atomic_inc(&skb->users); + return skb; +} + +/* + * If users==1, we are the only owner and are can avoid redundant + * atomic change. + */ + +/** + * kfree_skb - free an sk_buff + * @skb: buffer to free + * + * Drop a reference to the buffer and free it if the usage count has + * hit zero. + */ + +static inline void kfree_skb(struct sk_buff *skb) +{ + if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users)) + __kfree_skb(skb); +} + +/* Use this if you didn't touch the skb state [for fast switching] */ +static inline void kfree_skb_fast(struct sk_buff *skb) +{ + if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users)) + kfree_skbmem(skb); +} + +/** + * skb_cloned - is the buffer a clone + * @skb: buffer to check + * + * Returns true if the buffer was generated with skb_clone() and is + * one of multiple shared copies of the buffer. Cloned buffers are + * shared data so must not be written to under normal circumstances. + */ + +static inline int skb_cloned(struct sk_buff *skb) +{ + return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1; +} + +/** + * skb_shared - is the buffer shared + * @skb: buffer to check + * + * Returns true if more than one person has a reference to this + * buffer. + */ + +static inline int skb_shared(struct sk_buff *skb) +{ + return (atomic_read(&skb->users) != 1); +} + +/** + * skb_share_check - check if buffer is shared and if so clone it + * @skb: buffer to check + * @pri: priority for memory allocation + * + * If the buffer is shared the buffer is cloned and the old copy + * drops a reference. A new clone with a single reference is returned. + * If the buffer is not shared the original buffer is returned. When + * being called from interrupt status or with spinlocks held pri must + * be GFP_ATOMIC. + * + * NULL is returned on a memory allocation failure. + */ + +static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri) +{ + if (skb_shared(skb)) { + struct sk_buff *nskb; + nskb = skb_clone(skb, pri); + kfree_skb(skb); + return nskb; + } + return skb; +} + + +/* + * Copy shared buffers into a new sk_buff. We effectively do COW on + * packets to handle cases where we have a local reader and forward + * and a couple of other messy ones. The normal one is tcpdumping + * a packet thats being forwarded. + */ + +/** + * skb_unshare - make a copy of a shared buffer + * @skb: buffer to check + * @pri: priority for memory allocation + * + * If the socket buffer is a clone then this function creates a new + * copy of the data, drops a reference count on the old copy and returns + * the new copy with the reference count at 1. If the buffer is not a clone + * the original buffer is returned. When called with a spinlock held or + * from interrupt state @pri must be %GFP_ATOMIC + * + * %NULL is returned on a memory allocation failure. + */ + +static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri) +{ + struct sk_buff *nskb; + if(!skb_cloned(skb)) + return skb; + nskb=skb_copy(skb, pri); + kfree_skb(skb); /* Free our shared copy */ + return nskb; +} + +/** + * skb_peek + * @list_: list to peek at + * + * Peek an &sk_buff. Unlike most other operations you _MUST_ + * be careful with this one. A peek leaves the buffer on the + * list and someone else may run off with it. You must hold + * the appropriate locks or have a private queue to do this. + * + * Returns %NULL for an empty list or a pointer to the head element. + * The reference count is not incremented and the reference is therefore + * volatile. Use with caution. + */ + +static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) +{ + struct sk_buff *list = ((struct sk_buff *)list_)->next; + if (list == (struct sk_buff *)list_) + list = NULL; + return list; +} + +/** + * skb_peek_tail + * @list_: list to peek at + * + * Peek an &sk_buff. Unlike most other operations you _MUST_ + * be careful with this one. A peek leaves the buffer on the + * list and someone else may run off with it. You must hold + * the appropriate locks or have a private queue to do this. + * + * Returns %NULL for an empty list or a pointer to the tail element. + * The reference count is not incremented and the reference is therefore + * volatile. Use with caution. + */ + +static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) +{ + struct sk_buff *list = ((struct sk_buff *)list_)->prev; + if (list == (struct sk_buff *)list_) + list = NULL; + return list; +} + +/** + * skb_queue_len - get queue length + * @list_: list to measure + * + * Return the length of an &sk_buff queue. + */ + +static inline __u32 skb_queue_len(struct sk_buff_head *list_) +{ + return(list_->qlen); +} + +static inline void skb_queue_head_init(struct sk_buff_head *list) +{ + spin_lock_init(&list->lock); + list->prev = (struct sk_buff *)list; + list->next = (struct sk_buff *)list; + list->qlen = 0; +} + +/* + * Insert an sk_buff at the start of a list. + * + * The "__skb_xxxx()" functions are the non-atomic ones that + * can only be called with interrupts disabled. + */ + +/** + * __skb_queue_head - queue a buffer at the list head + * @list: list to use + * @newsk: buffer to queue + * + * Queue a buffer at the start of a list. This function takes no locks + * and you must therefore hold required locks before calling it. + * + * A buffer cannot be placed on two lists at the same time. + */ + +static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) +{ + struct sk_buff *prev, *next; + + newsk->list = list; + list->qlen++; + prev = (struct sk_buff *)list; + next = prev->next; + newsk->next = next; + newsk->prev = prev; + next->prev = newsk; + prev->next = newsk; +} + + +/** + * skb_queue_head - queue a buffer at the list head + * @list: list to use + * @newsk: buffer to queue + * + * Queue a buffer at the start of the list. This function takes the + * list lock and can be used safely with other locking &sk_buff functions + * safely. + * + * A buffer cannot be placed on two lists at the same time. + */ + +static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) +{ + unsigned long flags; + + spin_lock_irqsave(&list->lock, flags); + __skb_queue_head(list, newsk); + spin_unlock_irqrestore(&list->lock, flags); +} + +/** + * __skb_queue_tail - queue a buffer at the list tail + * @list: list to use + * @newsk: buffer to queue + * + * Queue a buffer at the end of a list. This function takes no locks + * and you must therefore hold required locks before calling it. + * + * A buffer cannot be placed on two lists at the same time. + */ + + +static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) +{ + struct sk_buff *prev, *next; + + newsk->list = list; + list->qlen++; + next = (struct sk_buff *)list; + prev = next->prev; + newsk->next = next; + newsk->prev = prev; + next->prev = newsk; + prev->next = newsk; +} + +/** + * skb_queue_tail - queue a buffer at the list tail + * @list: list to use + * @newsk: buffer to queue + * + * Queue a buffer at the tail of the list. This function takes the + * list lock and can be used safely with other locking &sk_buff functions + * safely. + * + * A buffer cannot be placed on two lists at the same time. + */ + +static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) +{ + unsigned long flags; + + spin_lock_irqsave(&list->lock, flags); + __skb_queue_tail(list, newsk); + spin_unlock_irqrestore(&list->lock, flags); +} + +/** + * __skb_dequeue - remove from the head of the queue + * @list: list to dequeue from + * + * Remove the head of the list. This function does not take any locks + * so must be used with appropriate locks held only. The head item is + * returned or %NULL if the list is empty. + */ + +static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) +{ + struct sk_buff *next, *prev, *result; + + prev = (struct sk_buff *) list; + next = prev->next; + result = NULL; + if (next != prev) { + result = next; + next = next->next; + list->qlen--; + next->prev = prev; + prev->next = next; + result->next = NULL; + result->prev = NULL; + result->list = NULL; + } + return result; +} + +/** + * skb_dequeue - remove from the head of the queue + * @list: list to dequeue from + * + * Remove the head of the list. The list lock is taken so the function + * may be used safely with other locking list functions. The head item is + * returned or %NULL if the list is empty. + */ + +static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list) +{ + unsigned long flags; + struct sk_buff *result; + + spin_lock_irqsave(&list->lock, flags); + result = __skb_dequeue(list); + spin_unlock_irqrestore(&list->lock, flags); + return result; +} + +/* + * Insert a packet on a list. + */ + +static inline void __skb_insert(struct sk_buff *newsk, + struct sk_buff * prev, struct sk_buff *next, + struct sk_buff_head * list) +{ + newsk->next = next; + newsk->prev = prev; + next->prev = newsk; + prev->next = newsk; + newsk->list = list; + list->qlen++; +} + +/** + * skb_insert - insert a buffer + * @old: buffer to insert before + * @newsk: buffer to insert + * + * Place a packet before a given packet in a list. The list locks are taken + * and this function is atomic with respect to other list locked calls + * A buffer cannot be placed on two lists at the same time. + */ + +static inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk) +{ + unsigned long flags; + + spin_lock_irqsave(&old->list->lock, flags); + __skb_insert(newsk, old->prev, old, old->list); + spin_unlock_irqrestore(&old->list->lock, flags); +} + +/* + * Place a packet after a given packet in a list. + */ + +static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk) +{ + __skb_insert(newsk, old, old->next, old->list); +} + +/** + * skb_append - append a buffer + * @old: buffer to insert after + * @newsk: buffer to insert + * + * Place a packet after a given packet in a list. The list locks are taken + * and this function is atomic with respect to other list locked calls. + * A buffer cannot be placed on two lists at the same time. + */ + + +static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk) +{ + unsigned long flags; + + spin_lock_irqsave(&old->list->lock, flags); + __skb_append(old, newsk); + spin_unlock_irqrestore(&old->list->lock, flags); +} + +/* + * remove sk_buff from list. _Must_ be called atomically, and with + * the list known.. + */ + +static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) +{ + struct sk_buff * next, * prev; + + list->qlen--; + next = skb->next; + prev = skb->prev; + skb->next = NULL; + skb->prev = NULL; + skb->list = NULL; + next->prev = prev; + prev->next = next; +} + +/** + * skb_unlink - remove a buffer from a list + * @skb: buffer to remove + * + * Place a packet after a given packet in a list. The list locks are taken + * and this function is atomic with respect to other list locked calls + * + * Works even without knowing the list it is sitting on, which can be + * handy at times. It also means that THE LIST MUST EXIST when you + * unlink. Thus a list must have its contents unlinked before it is + * destroyed. + */ + +static inline void skb_unlink(struct sk_buff *skb) +{ + struct sk_buff_head *list = skb->list; + + if(list) { + unsigned long flags; + + spin_lock_irqsave(&list->lock, flags); + if(skb->list == list) + __skb_unlink(skb, skb->list); + spin_unlock_irqrestore(&list->lock, flags); + } +} + +/* XXX: more streamlined implementation */ + +/** + * __skb_dequeue_tail - remove from the tail of the queue + * @list: list to dequeue from + * + * Remove the tail of the list. This function does not take any locks + * so must be used with appropriate locks held only. The tail item is + * returned or %NULL if the list is empty. + */ + +static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) +{ + struct sk_buff *skb = skb_peek_tail(list); + if (skb) + __skb_unlink(skb, list); + return skb; +} + +/** + * skb_dequeue - remove from the head of the queue + * @list: list to dequeue from + * + * Remove the head of the list. The list lock is taken so the function + * may be used safely with other locking list functions. The tail item is + * returned or %NULL if the list is empty. + */ + +static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) +{ + unsigned long flags; + struct sk_buff *result; + + spin_lock_irqsave(&list->lock, flags); + result = __skb_dequeue_tail(list); + spin_unlock_irqrestore(&list->lock, flags); + return result; +} + +static inline int skb_is_nonlinear(const struct sk_buff *skb) +{ + return skb->data_len; +} + +static inline unsigned int skb_headlen(const struct sk_buff *skb) +{ + return skb->len - skb->data_len; +} + +#define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) out_of_line_bug(); } while (0) +#define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) out_of_line_bug(); } while (0) +#define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) out_of_line_bug(); } while (0) + +/* + * Add data to an sk_buff + */ + +static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) +{ + unsigned char *tmp=skb->tail; + SKB_LINEAR_ASSERT(skb); + skb->tail+=len; + skb->len+=len; + return tmp; +} + +/** + * skb_put - add data to a buffer + * @skb: buffer to use + * @len: amount of data to add + * + * This function extends the used data area of the buffer. If this would + * exceed the total buffer size the kernel will panic. A pointer to the + * first byte of the extra data is returned. + */ + +static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len) +{ + unsigned char *tmp=skb->tail; + SKB_LINEAR_ASSERT(skb); + skb->tail+=len; + skb->len+=len; + if(skb->tail>skb->end) { + skb_over_panic(skb, len, current_text_addr()); + } + return tmp; +} + +static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) +{ + skb->data-=len; + skb->len+=len; + return skb->data; +} + +/** + * skb_push - add data to the start of a buffer + * @skb: buffer to use + * @len: amount of data to add + * + * This function extends the used data area of the buffer at the buffer + * start. If this would exceed the total buffer headroom the kernel will + * panic. A pointer to the first byte of the extra data is returned. + */ + +static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len) +{ + skb->data-=len; + skb->len+=len; + if(skb->data<skb->head) { + skb_under_panic(skb, len, current_text_addr()); + } + return skb->data; +} + +static inline char *__skb_pull(struct sk_buff *skb, unsigned int len) +{ + skb->len-=len; + if (skb->len < skb->data_len) + out_of_line_bug(); + return skb->data+=len; +} + +/** + * skb_pull - remove data from the start of a buffer + * @skb: buffer to use + * @len: amount of data to remove + * + * This function removes data from the start of a buffer, returning + * the memory to the headroom. A pointer to the next data in the buffer + * is returned. Once the data has been pulled future pushes will overwrite + * the old data. + */ + +static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len) +{ + if (len > skb->len) + return NULL; + return __skb_pull(skb,len); +} + +extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta); + +static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len) +{ + if (len > skb_headlen(skb) && + __pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL) + return NULL; + skb->len -= len; + return skb->data += len; +} + +static inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len) +{ + if (len > skb->len) + return NULL; + return __pskb_pull(skb,len); +} + +static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) +{ + if (len <= skb_headlen(skb)) + return 1; + if (len > skb->len) + return 0; + return (__pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL); +} + +/** + * skb_headroom - bytes at buffer head + * @skb: buffer to check + * + * Return the number of bytes of free space at the head of an &sk_buff. + */ + +static inline int skb_headroom(const struct sk_buff *skb) +{ + return skb->data-skb->head; +} + +/** + * skb_tailroom - bytes at buffer end + * @skb: buffer to check + * + * Return the number of bytes of free space at the tail of an sk_buff + */ + +static inline int skb_tailroom(const struct sk_buff *skb) +{ + return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail; +} + +/** + * skb_reserve - adjust headroom + * @skb: buffer to alter + * @len: bytes to move + * + * Increase the headroom of an empty &sk_buff by reducing the tail + * room. This is only allowed for an empty buffer. + */ + +static inline void skb_reserve(struct sk_buff *skb, unsigned int len) +{ + skb->data+=len; + skb->tail+=len; +} + +extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc); + +static inline void __skb_trim(struct sk_buff *skb, unsigned int len) +{ + if (!skb->data_len) { + skb->len = len; + skb->tail = skb->data+len; + } else { + ___pskb_trim(skb, len, 0); + } +} + +/** + * skb_trim - remove end from a buffer + * @skb: buffer to alter + * @len: new length + * + * Cut the length of a buffer down by removing data from the tail. If + * the buffer is already under the length specified it is not modified. + */ + +static inline void skb_trim(struct sk_buff *skb, unsigned int len) +{ + if (skb->len > len) { + __skb_trim(skb, len); + } +} + + +static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) +{ + if (!skb->data_len) { + skb->len = len; + skb->tail = skb->data+len; + return 0; + } else { + return ___pskb_trim(skb, len, 1); + } +} + +static inline int pskb_trim(struct sk_buff *skb, unsigned int len) +{ + if (len < skb->len) + return __pskb_trim(skb, len); + return 0; +} + +/** + * skb_orphan - orphan a buffer + * @skb: buffer to orphan + * + * If a buffer currently has an owner then we call the owner's + * destructor function and make the @skb unowned. The buffer continues + * to exist but is no longer charged to its former owner. + */ + + +static inline void skb_orphan(struct sk_buff *skb) +{ + if (skb->destructor) + skb->destructor(skb); + skb->destructor = NULL; + skb->sk = NULL; +} + +/** + * skb_purge - empty a list + * @list: list to empty + * + * Delete all buffers on an &sk_buff list. Each buffer is removed from + * the list and one reference dropped. This function takes the list + * lock and is atomic with respect to other list locking functions. + */ + + +static inline void skb_queue_purge(struct sk_buff_head *list) +{ + struct sk_buff *skb; + while ((skb=skb_dequeue(list))!=NULL) + kfree_skb(skb); +} + +/** + * __skb_purge - empty a list + * @list: list to empty + * + * Delete all buffers on an &sk_buff list. Each buffer is removed from + * the list and one reference dropped. This function does not take the + * list lock and the caller must hold the relevant locks to use it. + */ + + +static inline void __skb_queue_purge(struct sk_buff_head *list) +{ + struct sk_buff *skb; + while ((skb=__skb_dequeue(list))!=NULL) + kfree_skb(skb); +} + +/** + * __dev_alloc_skb - allocate an skbuff for sending + * @length: length to allocate + * @gfp_mask: get_free_pages mask, passed to alloc_skb + * + * Allocate a new &sk_buff and assign it a usage count of one. The + * buffer has unspecified headroom built in. Users should allocate + * the headroom they think they need without accounting for the + * built in space. The built in space is used for optimisations. + * + * %NULL is returned in there is no free memory. + */ + +static inline struct sk_buff *__dev_alloc_skb(unsigned int length, + int gfp_mask) +{ + struct sk_buff *skb; +#if defined(CONFIG_XEN) + length = (PAGE_SIZE/2)+1; /* force slab allocater to give us a page */ +#endif + skb = alloc_skb(length+16, gfp_mask); + if (skb) + skb_reserve(skb,16); + return skb; +} + +/** + * dev_alloc_skb - allocate an skbuff for sending + * @length: length to allocate + * + * Allocate a new &sk_buff and assign it a usage count of one. The + * buffer has unspecified headroom built in. Users should allocate + * the headroom they think they need without accounting for the + * built in space. The built in space is used for optimisations. + * + * %NULL is returned in there is no free memory. Although this function + * allocates memory it can be called from an interrupt. + */ + +static inline struct sk_buff *dev_alloc_skb(unsigned int length) +{ + return __dev_alloc_skb(length, GFP_ATOMIC); +} + +/** + * skb_cow - copy header of skb when it is required + * @skb: buffer to cow + * @headroom: needed headroom + * + * If the skb passed lacks sufficient headroom or its data part + * is shared, data is reallocated. If reallocation fails, an error + * is returned and original skb is not changed. + * + * The result is skb with writable area skb->head...skb->tail + * and at least @headroom of space at head. + */ + +static inline int +skb_cow(struct sk_buff *skb, unsigned int headroom) +{ + int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb); + + if (delta < 0) + delta = 0; + + if (delta || skb_cloned(skb)) + return pskb_expand_head(skb, (delta+15)&~15, 0, GFP_ATOMIC); + return 0; +} + +/** + * skb_padto - pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Returns the buffer, which may be a replacement + * for the original, or NULL for out of memory - in which case + * the original buffer is still freed. + */ + +static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if(likely(size >= len)) + return skb; + return skb_pad(skb, len-size); +} + +/** + * skb_linearize - convert paged skb to linear one + * @skb: buffer to linarize + * @gfp: allocation mode + * + * If there is no free memory -ENOMEM is returned, otherwise zero + * is returned and the old skb data released. */ +int skb_linearize(struct sk_buff *skb, int gfp); + +static inline void *kmap_skb_frag(const skb_frag_t *frag) +{ +#ifdef CONFIG_HIGHMEM + if (in_irq()) + out_of_line_bug(); + + local_bh_disable(); +#endif + return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ); +} + +static inline void kunmap_skb_frag(void *vaddr) +{ + kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); +#ifdef CONFIG_HIGHMEM + local_bh_enable(); +#endif +} + +#define skb_queue_walk(queue, skb) \ + for (skb = (queue)->next; \ + (skb != (struct sk_buff *)(queue)); \ + skb=skb->next) + + +extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err); +extern unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); +extern int skb_copy_datagram(const struct sk_buff *from, int offset, char *to,int size); +extern int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, struct iovec *to,int size); +extern int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump); +extern int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov); +extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb); + +extern unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum); +extern int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); +extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum); +extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); + +extern void skb_init(void); +extern void skb_add_mtu(int mtu); + +#ifdef CONFIG_NETFILTER +static inline void +nf_conntrack_put(struct nf_ct_info *nfct) +{ + if (nfct && atomic_dec_and_test(&nfct->master->use)) + nfct->master->destroy(nfct->master); +} +static inline void +nf_conntrack_get(struct nf_ct_info *nfct) +{ + if (nfct) + atomic_inc(&nfct->master->use); +} +#endif + +#endif /* __KERNEL__ */ +#endif /* _LINUX_SKBUFF_H */ diff --git a/linux-2.4.26-xen-sparse/include/linux/sunrpc/debug.h b/linux-2.4.26-xen-sparse/include/linux/sunrpc/debug.h new file mode 100644 index 0000000000..67dbfb887e --- /dev/null +++ b/linux-2.4.26-xen-sparse/include/linux/sunrpc/debug.h @@ -0,0 +1,95 @@ +/* + * linux/include/linux/sunrpc/debug.h + * + * Debugging support for sunrpc module + * + * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef _LINUX_SUNRPC_DEBUG_H_ +#define _LINUX_SUNRPC_DEBUG_H_ + +#include <linux/config.h> + +#include <linux/timer.h> +#include <linux/tqueue.h> + +/* + * Enable RPC debugging/profiling. + */ +/*#ifdef CONFIG_SYSCTL*/ +/*#define RPC_DEBUG*/ +/*#endif*/ +/* #define RPC_PROFILE */ + +/* + * RPC debug facilities + */ +#define RPCDBG_XPRT 0x0001 +#define RPCDBG_CALL 0x0002 +#define RPCDBG_DEBUG 0x0004 +#define RPCDBG_NFS 0x0008 +#define RPCDBG_AUTH 0x0010 +#define RPCDBG_PMAP 0x0020 +#define RPCDBG_SCHED 0x0040 +#define RPCDBG_SVCSOCK 0x0100 +#define RPCDBG_SVCDSP 0x0200 +#define RPCDBG_MISC 0x0400 +#define RPCDBG_ALL 0x7fff + +#ifdef __KERNEL__ + +/* + * Debugging macros etc + */ +#ifdef RPC_DEBUG +extern unsigned int rpc_debug; +extern unsigned int nfs_debug; +extern unsigned int nfsd_debug; +extern unsigned int nlm_debug; +#endif + +#define dprintk(args...) dfprintk(FACILITY, ## args) + +#undef ifdebug +#ifdef RPC_DEBUG +# define ifdebug(fac) if (rpc_debug & RPCDBG_##fac) +# define dfprintk(fac, args...) do { ifdebug(fac) printk(args); } while(0) +# define RPC_IFDEBUG(x) x +#else +# define dfprintk(fac, args...) do ; while (0) +# define RPC_IFDEBUG(x) +#endif + +#ifdef RPC_PROFILE +# define pprintk(args...) printk(## args) +#else +# define pprintk(args...) do ; while (0) +#endif + +/* + * Sysctl interface for RPC debugging + */ +#ifdef RPC_DEBUG +void rpc_register_sysctl(void); +void rpc_unregister_sysctl(void); +#endif + +#endif /* __KERNEL__ */ + +/* + * Declarations for the sysctl debug interface, which allows to read or + * change the debug flags for rpc, nfs, nfsd, and lockd. Since the sunrpc + * module currently registers its sysctl table dynamically, the sysctl path + * for module FOO is <CTL_SUNRPC, CTL_FOODEBUG>. + */ +#define CTL_SUNRPC 7249 /* arbitrary and hopefully unused */ + +enum { + CTL_RPCDEBUG = 1, + CTL_NFSDEBUG, + CTL_NFSDDEBUG, + CTL_NLMDEBUG, +}; + +#endif /* _LINUX_SUNRPC_DEBUG_H_ */ diff --git a/linux-2.4.26-xen-sparse/include/linux/timer.h b/linux-2.4.26-xen-sparse/include/linux/timer.h new file mode 100644 index 0000000000..238083218f --- /dev/null +++ b/linux-2.4.26-xen-sparse/include/linux/timer.h @@ -0,0 +1,77 @@ +#ifndef _LINUX_TIMER_H +#define _LINUX_TIMER_H + +#include <linux/config.h> +#include <linux/list.h> + +/* + * In Linux 2.4, static timers have been removed from the kernel. + * Timers may be dynamically created and destroyed, and should be initialized + * by a call to init_timer() upon creation. + * + * The "data" field enables use of a common timeout function for several + * timeouts. You can use this field to distinguish between the different + * invocations. + */ +struct timer_list { + struct list_head list; + unsigned long expires; + unsigned long data; + void (*function)(unsigned long); +}; + +extern void add_timer(struct timer_list * timer); +extern int del_timer(struct timer_list * timer); +#ifdef CONFIG_NO_IDLE_HZ +extern struct timer_list *next_timer_event(void); +#endif + +#ifdef CONFIG_SMP +extern int del_timer_sync(struct timer_list * timer); +extern void sync_timers(void); +#else +#define del_timer_sync(t) del_timer(t) +#define sync_timers() do { } while (0) +#endif + +/* + * mod_timer is a more efficient way to update the expire field of an + * active timer (if the timer is inactive it will be activated) + * mod_timer(a,b) is equivalent to del_timer(a); a->expires = b; add_timer(a). + * If the timer is known to be not pending (ie, in the handler), mod_timer + * is less efficient than a->expires = b; add_timer(a). + */ +int mod_timer(struct timer_list *timer, unsigned long expires); + +extern void it_real_fn(unsigned long); + +static inline void init_timer(struct timer_list * timer) +{ + timer->list.next = timer->list.prev = NULL; +} + +static inline int timer_pending (const struct timer_list * timer) +{ + return timer->list.next != NULL; +} + +/* + * These inlines deal with timer wrapping correctly. You are + * strongly encouraged to use them + * 1. Because people otherwise forget + * 2. Because if the timer wrap changes in future you wont have to + * alter your driver code. + * + * time_after(a,b) returns true if the time a is after time b. + * + * Do this with "<0" and ">=0" to only test the sign of the result. A + * good compiler would generate better code (and a really good compiler + * wouldn't care). Gcc is currently neither. + */ +#define time_after(a,b) ((long)(b) - (long)(a) < 0) +#define time_before(a,b) time_after(b,a) + +#define time_after_eq(a,b) ((long)(a) - (long)(b) >= 0) +#define time_before_eq(a,b) time_after_eq(b,a) + +#endif |