/*
* linux/drivers/block/ll_rw_blk.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994, Karl Keyte: Added support for disk statistics
* Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
* Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
*/
/*
* This handles all read/write requests to block devices
*/
#include <xeno/config.h>
#include <xeno/types.h>
#include <xeno/lib.h>
#include <xeno/sched.h>
/*#include <xeno/kernel_stat.h>*/
#include <xeno/errno.h>
/*#include <xeno/locks.h>*/
#include <xeno/mm.h>
/*#include <xeno/swap.h>*/
#include <xeno/init.h>
/*#include <xeno/smp_lock.h>*/
/*#include <xeno/completion.h>*/
#include <asm/system.h>
#include <asm/io.h>
#include <xeno/blk.h>
/*#include <xeno/highmem.h>*/
#include <xeno/slab.h>
#include <xeno/module.h>
/*
* KAF: We can turn off noise relating to barking guest-OS requests.
*/
#if 0
#define DPRINTK(_f, _a...) printk(_f , ## _a)
#else
#define DPRINTK(_f, _a...) ((void)0)
#endif
/* This will die as all synchronous stuff is coming to an end */
#define complete(_r) panic("completion.h stuff may be needed...")
/*
* MAC Floppy IWM hooks
*/
#ifdef CONFIG_MAC_FLOPPY_IWM
extern int mac_floppy_init(void);
#endif
/*
* For the allocated request tables
*/
static kmem_cache_t *request_cachep;
/*
* The "disk" task queue is used to start the actual requests
* after a plug
*/
DECLARE_TASK_QUEUE(tq_disk);
/*
* Protect the request list against multiple users..
*
* With this spinlock the Linux block IO subsystem is 100% SMP threaded
* from the IRQ event side, and almost 100% SMP threaded from the syscall
* side (we still have protect against block device array operations, and
* the do_request() side is casually still unsafe. The kernel lock protects
* this part currently.).
*
* there is a fair chance that things will work just OK if these functions
* are called with no global kernel lock held ...
*/
spinlock_t io_request_lock = SPIN_LOCK_UNLOCKED;
/* This specifies how many sectors to read ahead on the disk. */
int read_ahead[MAX_BLKDEV];
/* blk_dev_struct is:
* *request_fn
* *current_request
*/
struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() */
/*
* blk_size contains the size of all block-devices in units of 1024 byte
* sectors:
*
* blk_size[MAJOR][MINOR]
*
* if (!blk_size[MAJOR]) then no minor size checking is done.
*/
int * blk_size[MAX_BLKDEV];
/*
* blksize_size contains the size of all block-devices:
*
* blksize_size[MAJOR][MINOR]
*
* if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
*/
int * blksize_size[MAX_BLKDEV];
/*
* hardsect_size contains the size of the hardware sector of a device.
*
* hardsect_size[MAJOR][MINOR]
*
* if (!hardsect_size[MAJOR])
* then 512 bytes is assumed.
* else
* sector_size is hardsect_size[MAJOR][MINOR]
* This is currently set by some scsi devices and read by the msdos fs driver.
* Other uses may appear later.
*/
int * hardsect_size[MAX_BLKDEV];
/*
* The following tunes the read-ahead algorithm in mm/filemap.c
*/
int * max_readahead[MAX_BLKDEV];
/*
* Max number of sectors per request
*/
int * max_sectors[MAX_BLKDEV];
static inline int get_max_sectors(kdev_t dev)
{
if (!max_sectors[MAJOR(dev)])
return MAX_SECTORS;
return max_sectors[MAJOR(dev)][MINOR(dev)];
}
inline request_queue_t *blk_get_queue(kdev_t dev)
{
struct blk_dev_struct *bdev = blk_dev + MAJOR(dev);
if (bdev->queue)
return bdev->queue(dev);
else
return &blk_dev[MAJOR(dev)].request_queue;
}
static int __blk_cleanup_queue(struct request_list *list)
{
struct list_head *head = &list->free;
struct request *rq;
int i = 0;
while (!list_empty(head)) {
rq = list_entry(head->next, struct request, queue);