diff options
Diffstat (limited to 'linux-2.4.26-xen-sparse/arch/xen/drivers/blkif')
12 files changed, 2977 insertions, 0 deletions
diff --git a/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/Makefile b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/Makefile new file mode 100644 index 0000000000..20c8192d3d --- /dev/null +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/Makefile @@ -0,0 +1,10 @@ + +O_TARGET := drv.o + +subdir-y += frontend +obj-y += frontend/drv.o + +subdir-$(CONFIG_XEN_PHYSDEV_ACCESS) += backend +obj-$(CONFIG_XEN_PHYSDEV_ACCESS) += backend/drv.o + +include $(TOPDIR)/Rules.make diff --git a/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/Makefile b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/Makefile new file mode 100644 index 0000000000..4c8c17367c --- /dev/null +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/Makefile @@ -0,0 +1,3 @@ +O_TARGET := drv.o +obj-y := main.o control.o interface.o vbd.o +include $(TOPDIR)/Rules.make diff --git a/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/common.h b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/common.h new file mode 100644 index 0000000000..d9f1d22908 --- /dev/null +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/common.h @@ -0,0 +1,108 @@ +/****************************************************************************** + * arch/xen/drivers/blkif/backend/common.h + */ + +#ifndef __BLKIF__BACKEND__COMMON_H__ +#define __BLKIF__BACKEND__COMMON_H__ + +#include <linux/config.h> +#include <linux/module.h> +#include <linux/rbtree.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/blkdev.h> +#include <asm/ctrl_if.h> +#include <asm/io.h> +#include "../blkif.h" + +#ifndef NDEBUG +#define ASSERT(_p) \ + if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \ + __LINE__, __FILE__); *(int*)0=0; } +#define DPRINTK(_f, _a...) printk("(file=%s, line=%d) " _f, \ + __FILE__ , __LINE__ , ## _a ) +#else +#define ASSERT(_p) ((void)0) +#define DPRINTK(_f, _a...) ((void)0) +#endif + +typedef struct blkif_st { + /* Unique identifier for this interface. */ + domid_t domid; + unsigned int handle; + /* Physical parameters of the comms window. */ + unsigned long shmem_frame; + unsigned int evtchn; + int irq; + /* Comms information. */ + blkif_ring_t *blk_ring_base; /* ioremap()'ed ptr to shmem_frame. */ + BLK_RING_IDX blk_req_cons; /* Request consumer. */ + BLK_RING_IDX blk_resp_prod; /* Private version of response producer. */ + /* VBDs attached to this interface. */ + rb_root_t vbd_rb; /* Mapping from 16-bit vdevices to VBDs. */ + spinlock_t vbd_lock; /* Protects VBD mapping. */ + /* Private fields. */ + enum { DISCONNECTED, DISCONNECTING, CONNECTED } status; + /* + * DISCONNECT response is deferred until pending requests are ack'ed. + * We therefore need to store the id from the original request. + */ + u8 disconnect_rspid; + struct blkif_st *hash_next; + struct list_head blkdev_list; + spinlock_t blk_ring_lock; + atomic_t refcnt; +} blkif_t; + +void blkif_create(blkif_be_create_t *create); +void blkif_destroy(blkif_be_destroy_t *destroy); +void blkif_connect(blkif_be_connect_t *connect); +int blkif_disconnect(blkif_be_disconnect_t *disconnect, u8 rsp_id); +void __blkif_disconnect_complete(blkif_t *blkif); +blkif_t *blkif_find_by_handle(domid_t domid, unsigned int handle); +#define blkif_get(_b) (atomic_inc(&(_b)->refcnt)) +#define blkif_put(_b) \ + do { \ + if ( atomic_dec_and_test(&(_b)->refcnt) ) \ + __blkif_disconnect_complete(_b); \ + } while (0) + +/* An entry in a list of xen_extents. */ +typedef struct _blkif_extent_le { + blkif_extent_t extent; /* an individual extent */ + struct _blkif_extent_le *next; /* and a pointer to the next */ +} blkif_extent_le_t; + +typedef struct _vbd { + blkif_vdev_t vdevice; /* what the domain refers to this vbd as */ + unsigned char readonly; /* Non-zero -> read-only */ + unsigned char type; /* XD_TYPE_xxx */ + blkif_extent_le_t *extents; /* list of xen_extents making up this vbd */ + rb_node_t rb; /* for linking into R-B tree lookup struct */ +} vbd_t; + +void vbd_create(blkif_be_vbd_create_t *create); +void vbd_grow(blkif_be_vbd_grow_t *grow); +void vbd_shrink(blkif_be_vbd_shrink_t *shrink); +void vbd_destroy(blkif_be_vbd_destroy_t *delete); +int vbd_probe(blkif_t *blkif, vdisk_t *vbd_info, int max_vbds); +void destroy_all_vbds(blkif_t *blkif); + +/* Describes a [partial] disk extent (part of a block io request) */ +typedef struct { + unsigned short dev; + unsigned short nr_sects; + unsigned long buffer; + xen_sector_t sector_number; +} phys_seg_t; + +int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation); + +void blkif_interface_init(void); +void blkif_ctrlif_init(void); + +void blkif_deschedule(blkif_t *blkif); + +void blkif_be_int(int irq, void *dev_id, struct pt_regs *regs); + +#endif /* __BLKIF__BACKEND__COMMON_H__ */ diff --git a/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/control.c b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/control.c new file mode 100644 index 0000000000..0b26224651 --- /dev/null +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/control.c @@ -0,0 +1,87 @@ +/****************************************************************************** + * arch/xen/drivers/blkif/backend/control.c + * + * Routines for interfacing with the control plane. + * + * Copyright (c) 2004, Keir Fraser + */ + +#include "common.h" + +static void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id) +{ + DPRINTK("Received blkif backend message, subtype=%d\n", msg->subtype); + + switch ( msg->subtype ) + { + case CMSG_BLKIF_BE_CREATE: + if ( msg->length != sizeof(blkif_be_create_t) ) + goto parse_error; + blkif_create((blkif_be_create_t *)&msg->msg[0]); + break; + case CMSG_BLKIF_BE_DESTROY: + if ( msg->length != sizeof(blkif_be_destroy_t) ) + goto parse_error; + blkif_destroy((blkif_be_destroy_t *)&msg->msg[0]); + break; + case CMSG_BLKIF_BE_CONNECT: + if ( msg->length != sizeof(blkif_be_connect_t) ) + goto parse_error; + blkif_connect((blkif_be_connect_t *)&msg->msg[0]); + break; + case CMSG_BLKIF_BE_DISCONNECT: + if ( msg->length != sizeof(blkif_be_disconnect_t) ) + goto parse_error; + if ( !blkif_disconnect((blkif_be_disconnect_t *)&msg->msg[0],msg->id) ) + return; /* Sending the response is deferred until later. */ + break; + case CMSG_BLKIF_BE_VBD_CREATE: + if ( msg->length != sizeof(blkif_be_vbd_create_t) ) + goto parse_error; + vbd_create((blkif_be_vbd_create_t *)&msg->msg[0]); + break; + case CMSG_BLKIF_BE_VBD_DESTROY: + if ( msg->length != sizeof(blkif_be_vbd_destroy_t) ) + goto parse_error; + vbd_destroy((blkif_be_vbd_destroy_t *)&msg->msg[0]); + break; + case CMSG_BLKIF_BE_VBD_GROW: + if ( msg->length != sizeof(blkif_be_vbd_grow_t) ) + goto parse_error; + vbd_grow((blkif_be_vbd_grow_t *)&msg->msg[0]); + break; + case CMSG_BLKIF_BE_VBD_SHRINK: + if ( msg->length != sizeof(blkif_be_vbd_shrink_t) ) + goto parse_error; + vbd_shrink((blkif_be_vbd_shrink_t *)&msg->msg[0]); + break; + default: + goto parse_error; + } + + ctrl_if_send_response(msg); + return; + + parse_error: + DPRINTK("Parse error while reading message subtype %d, len %d\n", + msg->subtype, msg->length); + msg->length = 0; + ctrl_if_send_response(msg); +} + +void blkif_ctrlif_init(void) +{ + ctrl_msg_t cmsg; + blkif_be_driver_status_changed_t st; + + (void)ctrl_if_register_receiver(CMSG_BLKIF_BE, blkif_ctrlif_rx, + CALLBACK_IN_BLOCKING_CONTEXT); + + /* Send a driver-UP notification to the domain controller. */ + cmsg.type = CMSG_BLKIF_BE; + cmsg.subtype = CMSG_BLKIF_BE_DRIVER_STATUS_CHANGED; + cmsg.length = sizeof(blkif_be_driver_status_changed_t); + st.status = BLKIF_DRIVER_STATUS_UP; + memcpy(cmsg.msg, &st, sizeof(st)); + ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE); +} diff --git a/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/interface.c b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/interface.c new file mode 100644 index 0000000000..780d793c6c --- /dev/null +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/interface.c @@ -0,0 +1,233 @@ +/****************************************************************************** + * arch/xen/drivers/blkif/backend/interface.c + * + * Block-device interface management. + * + * Copyright (c) 2004, Keir Fraser + */ + +#include "common.h" + +#define BLKIF_HASHSZ 1024 +#define BLKIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(BLKIF_HASHSZ-1)) + +static kmem_cache_t *blkif_cachep; +static blkif_t *blkif_hash[BLKIF_HASHSZ]; + +blkif_t *blkif_find_by_handle(domid_t domid, unsigned int handle) +{ + blkif_t *blkif = blkif_hash[BLKIF_HASH(domid, handle)]; + while ( (blkif != NULL) && + ((blkif->domid != domid) || (blkif->handle != handle)) ) + blkif = blkif->hash_next; + return blkif; +} + +void __blkif_disconnect_complete(blkif_t *blkif) +{ + ctrl_msg_t cmsg; + blkif_be_disconnect_t disc; + + /* + * These can't be done in __blkif_disconnect() because at that point there + * may be outstanding requests at the disc whose asynchronous responses + * must still be notified to the remote driver. + */ + unbind_evtchn_from_irq(blkif->evtchn); + vfree(blkif->blk_ring_base); + + /* Construct the deferred response message. */ + cmsg.type = CMSG_BLKIF_BE; + cmsg.subtype = CMSG_BLKIF_BE_DISCONNECT; + cmsg.id = blkif->disconnect_rspid; + cmsg.length = sizeof(blkif_be_disconnect_t); + disc.domid = blkif->domid; + disc.blkif_handle = blkif->handle; + disc.status = BLKIF_BE_STATUS_OKAY; + memcpy(cmsg.msg, &disc, sizeof(disc)); + + /* + * Make sure message is constructed /before/ status change, because + * after the status change the 'blkif' structure could be deallocated at + * any time. Also make sure we send the response /after/ status change, + * as otherwise a subsequent CONNECT request could spuriously fail if + * another CPU doesn't see the status change yet. + */ + mb(); + if ( blkif->status != DISCONNECTING ) + BUG(); + blkif->status = DISCONNECTED; + mb(); + + /* Send the successful response. */ + ctrl_if_send_response(&cmsg); +} + +void blkif_create(blkif_be_create_t *create) +{ + domid_t domid = create->domid; + unsigned int handle = create->blkif_handle; + blkif_t **pblkif, *blkif; + + if ( (blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL)) == NULL ) + { + DPRINTK("Could not create blkif: out of memory\n"); + create->status = BLKIF_BE_STATUS_OUT_OF_MEMORY; + return; + } + + memset(blkif, 0, sizeof(*blkif)); + blkif->domid = domid; + blkif->handle = handle; + blkif->status = DISCONNECTED; + spin_lock_init(&blkif->vbd_lock); + spin_lock_init(&blkif->blk_ring_lock); + atomic_set(&blkif->refcnt, 0); + + pblkif = &blkif_hash[BLKIF_HASH(domid, handle)]; + while ( *pblkif != NULL ) + { + if ( ((*pblkif)->domid == domid) && ((*pblkif)->handle == handle) ) + { + DPRINTK("Could not create blkif: already exists\n"); + create->status = BLKIF_BE_STATUS_INTERFACE_EXISTS; + kmem_cache_free(blkif_cachep, blkif); + return; + } + pblkif = &(*pblkif)->hash_next; + } + + blkif->hash_next = *pblkif; + *pblkif = blkif; + + DPRINTK("Successfully created blkif\n"); + create->status = BLKIF_BE_STATUS_OKAY; +} + +void blkif_destroy(blkif_be_destroy_t *destroy) +{ + domid_t domid = destroy->domid; + unsigned int handle = destroy->blkif_handle; + blkif_t **pblkif, *blkif; + + pblkif = &blkif_hash[BLKIF_HASH(domid, handle)]; + while ( (blkif = *pblkif) != NULL ) + { + if ( (blkif->domid == domid) && (blkif->handle == handle) ) + { + if ( blkif->status != DISCONNECTED ) + goto still_connected; + goto destroy; + } + pblkif = &blkif->hash_next; + } + + destroy->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND; + return; + + still_connected: + destroy->status = BLKIF_BE_STATUS_INTERFACE_CONNECTED; + return; + + destroy: + *pblkif = blkif->hash_next; + destroy_all_vbds(blkif); + kmem_cache_free(blkif_cachep, blkif); + destroy->status = BLKIF_BE_STATUS_OKAY; +} + +void blkif_connect(blkif_be_connect_t *connect) +{ + domid_t domid = connect->domid; + unsigned int handle = connect->blkif_handle; + unsigned int evtchn = connect->evtchn; + unsigned long shmem_frame = connect->shmem_frame; + struct vm_struct *vma; + pgprot_t prot; + int error; + blkif_t *blkif; + + blkif = blkif_find_by_handle(domid, handle); + if ( unlikely(blkif == NULL) ) + { + DPRINTK("blkif_connect attempted for non-existent blkif (%u,%u)\n", + connect->domid, connect->blkif_handle); + connect->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND; + return; + } + + if ( (vma = get_vm_area(PAGE_SIZE, VM_IOREMAP)) == NULL ) + { + connect->status = BLKIF_BE_STATUS_OUT_OF_MEMORY; + return; + } + + prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED); + error = direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(vma->addr), + shmem_frame<<PAGE_SHIFT, PAGE_SIZE, + prot, domid); + if ( error != 0 ) + { + if ( error == -ENOMEM ) + connect->status = BLKIF_BE_STATUS_OUT_OF_MEMORY; + else if ( error == -EFAULT ) + connect->status = BLKIF_BE_STATUS_MAPPING_ERROR; + else + connect->status = BLKIF_BE_STATUS_ERROR; + vfree(vma->addr); + return; + } + + if ( blkif->status != DISCONNECTED ) + { + connect->status = BLKIF_BE_STATUS_INTERFACE_CONNECTED; + vfree(vma->addr); + return; + } + + blkif->evtchn = evtchn; + blkif->irq = bind_evtchn_to_irq(evtchn); + blkif->shmem_frame = shmem_frame; + blkif->blk_ring_base = (blkif_ring_t *)vma->addr; + blkif->status = CONNECTED; + blkif_get(blkif); + + request_irq(blkif->irq, blkif_be_int, 0, "blkif-backend", blkif); + + connect->status = BLKIF_BE_STATUS_OKAY; +} + +int blkif_disconnect(blkif_be_disconnect_t *disconnect, u8 rsp_id) +{ + domid_t domid = disconnect->domid; + unsigned int handle = disconnect->blkif_handle; + blkif_t *blkif; + + blkif = blkif_find_by_handle(domid, handle); + if ( unlikely(blkif == NULL) ) + { + DPRINTK("blkif_disconnect attempted for non-existent blkif" + " (%u,%u)\n", disconnect->domid, disconnect->blkif_handle); + disconnect->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND; + return 1; /* Caller will send response error message. */ + } + + if ( blkif->status == CONNECTED ) + { + blkif->status = DISCONNECTING; + blkif->disconnect_rspid = rsp_id; + wmb(); /* Let other CPUs see the status change. */ + free_irq(blkif->irq, NULL); + blkif_deschedule(blkif); + blkif_put(blkif); + } + + return 0; /* Caller should not send response message. */ +} + +void __init blkif_interface_init(void) +{ + blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), + 0, 0, NULL, NULL); + memset(blkif_hash, 0, sizeof(blkif_hash)); +} diff --git a/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/main.c b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/main.c new file mode 100644 index 0000000000..803af976d2 --- /dev/null +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/main.c @@ -0,0 +1,523 @@ +/****************************************************************************** + * arch/xen/drivers/blkif/backend/main.c + * + * Back-end of the driver for virtual block devices. This portion of the + * driver exports a 'unified' block-device interface that can be accessed + * by any operating system that implements a compatible front end. A + * reference front-end implementation can be found in: + * arch/xen/drivers/blkif/frontend + * + * Copyright (c) 2003-2004, Keir Fraser & Steve Hand + */ + +#include "common.h" + +/* + * These are rather arbitrary. They are fairly large because adjacent requests + * pulled from a communication ring are quite likely to end up being part of + * the same scatter/gather request at the disc. + * + * ** TRY INCREASING 'MAX_PENDING_REQS' IF WRITE SPEEDS SEEM TOO LOW ** + * This will increase the chances of being able to write whole tracks. + * 64 should be enough to keep us competitive with Linux. + */ +#define MAX_PENDING_REQS 64 +#define BATCH_PER_DOMAIN 16 + +/* + * NB. We place a page of padding between each buffer page to avoid incorrect + * merging of requests by the IDE and SCSI merging routines. Otherwise, two + * adjacent buffers in a scatter-gather request would have adjacent page + * numbers: since the merge routines don't realise that this is in *pseudophys* + * space, not real space, they may collapse the s-g elements! + */ +static unsigned long mmap_vstart; +#define MMAP_PAGES_PER_REQUEST \ + (2 * (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1)) +#define MMAP_PAGES \ + (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST) +#define MMAP_VADDR(_req,_seg) \ + (mmap_vstart + \ + ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \ + ((_seg) * 2 * PAGE_SIZE)) + +/* + * Each outstanding request that we've passed to the lower device layers has a + * 'pending_req' allocated to it. Each buffer_head that completes decrements + * the pendcnt towards zero. When it hits zero, the specified domain has a + * response queued for it, with the saved 'id' passed back. + */ +typedef struct { + blkif_t *blkif; + unsigned long id; + int nr_pages; + atomic_t pendcnt; + unsigned short operation; + int status; +} pending_req_t; + +/* + * We can't allocate pending_req's in order, since they may complete out of + * order. We therefore maintain an allocation ring. This ring also indicates + * when enough work has been passed down -- at that point the allocation ring + * will be empty. + */ +static pending_req_t pending_reqs[MAX_PENDING_REQS]; +static unsigned char pending_ring[MAX_PENDING_REQS]; +static spinlock_t pend_prod_lock = SPIN_LOCK_UNLOCKED; +/* NB. We use a different index type to differentiate from shared blk rings. */ +typedef unsigned int PEND_RING_IDX; +#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1)) +static PEND_RING_IDX pending_prod, pending_cons; +#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons) + +static kmem_cache_t *buffer_head_cachep; + +static int do_block_io_op(blkif_t *blkif, int max_to_do); +static void dispatch_probe(blkif_t *blkif, blkif_request_t *req); +static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req); +static void make_response(blkif_t *blkif, unsigned long id, + unsigned short op, int st); + +static void fast_flush_area(int idx, int nr_pages) +{ + multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST]; + int i; + + for ( i = 0; i < nr_pages; i++ ) + { + mcl[i].op = __HYPERVISOR_update_va_mapping; + mcl[i].args[0] = MMAP_VADDR(idx, i) >> PAGE_SHIFT; + mcl[i].args[1] = 0; + mcl[i].args[2] = 0; + } + + mcl[nr_pages-1].args[2] = UVMF_FLUSH_TLB; + (void)HYPERVISOR_multicall(mcl, nr_pages); +} + + +/****************************************************************** + * BLOCK-DEVICE SCHEDULER LIST MAINTENANCE + */ + +static struct list_head io_schedule_list; +static spinlock_t io_schedule_list_lock; + +static int __on_blkdev_list(blkif_t *blkif) +{ + return blkif->blkdev_list.next != NULL; +} + +static void remove_from_blkdev_list(blkif_t *blkif) +{ + unsigned long flags; + if ( !__on_blkdev_list(blkif) ) return; + spin_lock_irqsave(&io_schedule_list_lock, flags); + if ( __on_blkdev_list(blkif) ) + { + list_del(&blkif->blkdev_list); + blkif->blkdev_list.next = NULL; + blkif_put(blkif); + } + spin_unlock_irqrestore(&io_schedule_list_lock, flags); +} + +static void add_to_blkdev_list_tail(blkif_t *blkif) +{ + unsigned long flags; + if ( __on_blkdev_list(blkif) ) return; + spin_lock_irqsave(&io_schedule_list_lock, flags); + if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) ) + { + list_add_tail(&blkif->blkdev_list, &io_schedule_list); + blkif_get(blkif); + } + spin_unlock_irqrestore(&io_schedule_list_lock, flags); +} + + +/****************************************************************** + * SCHEDULER FUNCTIONS + */ + +static void io_schedule(unsigned long unused) +{ + blkif_t *blkif; + struct list_head *ent; + + /* Queue up a batch of requests. */ + while ( (NR_PENDING_REQS < MAX_PENDING_REQS) && + !list_empty(&io_schedule_list) ) + { + ent = io_schedule_list.next; + blkif = list_entry(ent, blkif_t, blkdev_list); + blkif_get(blkif); + remove_from_blkdev_list(blkif); + if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) ) + add_to_blkdev_list_tail(blkif); + blkif_put(blkif); + } + + /* Push the batch through to disc. */ + run_task_queue(&tq_disk); +} + +static DECLARE_TASKLET(io_schedule_tasklet, io_schedule, 0); + +static void maybe_trigger_io_schedule(void) +{ + /* + * Needed so that two processes, who together make the following predicate + * true, don't both read stale values and evaluate the predicate + * incorrectly. Incredibly unlikely to stall the scheduler on x86, but... + */ + smp_mb(); + + if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) && + !list_empty(&io_schedule_list) ) + tasklet_schedule(&io_schedule_tasklet); +} + + + +/****************************************************************** + * COMPLETION CALLBACK -- Called as bh->b_end_io() + */ + +static void __end_block_io_op(pending_req_t *pending_req, int uptodate) +{ + unsigned long flags; + + /* An error fails the entire request. */ + if ( !uptodate ) + { + DPRINTK("Buffer not up-to-date at end of operation\n"); + pending_req->status = BLKIF_RSP_ERROR; + } + + if ( atomic_dec_and_test(&pending_req->pendcnt) ) + { + int pending_idx = pending_req - pending_reqs; + fast_flush_area(pending_idx, pending_req->nr_pages); + make_response(pending_req->blkif, pending_req->id, + pending_req->operation, pending_req->status); + blkif_put(pending_req->blkif); + spin_lock_irqsave(&pend_prod_lock, flags); + pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; + spin_unlock_irqrestore(&pend_prod_lock, flags); + maybe_trigger_io_schedule(); + } +} + +static void end_block_io_op(struct buffer_head *bh, int uptodate) +{ + __end_block_io_op(bh->b_private, uptodate); + kmem_cache_free(buffer_head_cachep, bh); +} + + + +/****************************************************************************** + * NOTIFICATION FROM GUEST OS. + */ + +void blkif_be_int(int irq, void *dev_id, struct pt_regs *regs) +{ + blkif_t *blkif = dev_id; + add_to_blkdev_list_tail(blkif); + maybe_trigger_io_schedule(); +} + + + +/****************************************************************** + * DOWNWARD CALLS -- These interface with the block-device layer proper. + */ + +static int do_block_io_op(blkif_t *blkif, int max_to_do) +{ + blkif_ring_t *blk_ring = blkif->blk_ring_base; + blkif_request_t *req; + BLK_RING_IDX i; + int more_to_do = 0; + + /* Take items off the comms ring, taking care not to overflow. */ + for ( i = blkif->blk_req_cons; + (i != blk_ring->req_prod) && ((i-blkif->blk_resp_prod) != + BLK_RING_SIZE); + i++ ) + { + if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) ) + { + more_to_do = 1; + break; + } + + req = &blk_ring->ring[MASK_BLK_IDX(i)].req; + switch ( req->operation ) + { + case BLKIF_OP_READ: + case BLKIF_OP_WRITE: + dispatch_rw_block_io(blkif, req); + break; + + case BLKIF_OP_PROBE: + dispatch_probe(blkif, req); + break; + + default: + DPRINTK("error: unknown block io operation [%d]\n", + blk_ring->ring[i].req.operation); + make_response(blkif, blk_ring->ring[i].req.id, + blk_ring->ring[i].req.operation, BLKIF_RSP_ERROR); + break; + } + } + + blkif->blk_req_cons = i; + return more_to_do; +} + +static void dispatch_probe(blkif_t *blkif, blkif_request_t *req) +{ + int rsp = BLKIF_RSP_ERROR; + int pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)]; + + /* We expect one buffer only. */ + if ( unlikely(req->nr_segments != 1) ) + goto out; + + /* Make sure the buffer is page-sized. */ + if ( (blkif_first_sect(req->frame_and_sects[0]) != 0) || + (blkif_last_sect(req->frame_and_sects[0]) != 7) ) + goto out; + + if ( HYPERVISOR_update_va_mapping_otherdomain( + MMAP_VADDR(pending_idx, 0) >> PAGE_SHIFT, + (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL }, + 0, blkif->domid) ) + goto out; + + rsp = vbd_probe(blkif, (vdisk_t *)MMAP_VADDR(pending_idx, 0), + PAGE_SIZE / sizeof(vdisk_t)); + + out: + fast_flush_area(pending_idx, 1); + make_response(blkif, req->id, req->operation, rsp); +} + +static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req) +{ + extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); + struct buffer_head *bh; + int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ; + short nr_sects; + unsigned long buffer, fas; + int i, tot_sects, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)]; + pending_req_t *pending_req; + unsigned long remap_prot; + multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST]; + + /* We map virtual scatter/gather segments to physical segments. */ + int new_segs, nr_psegs = 0; + phys_seg_t phys_seg[BLKIF_MAX_SEGMENTS_PER_REQUEST + 1]; + + /* Check that number of segments is sane. */ + if ( unlikely(req->nr_segments == 0) || + unlikely(req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) + { + DPRINTK("Bad number of segments in request (%d)\n", req->nr_segments); + goto bad_descriptor; + } + + /* + * Check each address/size pair is sane, and convert into a + * physical device and block offset. Note that if the offset and size + * crosses a virtual extent boundary, we may end up with more + * physical scatter/gather segments than virtual segments. + */ + for ( i = tot_sects = 0; i < req->nr_segments; i++, tot_sects += nr_sects ) + { + fas = req->frame_and_sects[i]; + buffer = (fas & PAGE_MASK) | (blkif_first_sect(fas) << 9); + nr_sects = blkif_last_sect(fas) - blkif_first_sect(fas) + 1; + + if ( nr_sects <= 0 ) + goto bad_descriptor; + + phys_seg[nr_psegs].dev = req->device; + phys_seg[nr_psegs].sector_number = req->sector_number + tot_sects; + phys_seg[nr_psegs].buffer = buffer; + phys_seg[nr_psegs].nr_sects = nr_sects; + + /* Translate the request into the relevant 'physical device' */ + new_segs = vbd_translate(&phys_seg[nr_psegs], blkif, operation); + if ( new_segs < 0 ) + { + DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", + operation == READ ? "read" : "write", + req->sector_number + tot_sects, + req->sector_number + tot_sects + nr_sects, + req->device); + goto bad_descriptor; + } + + nr_psegs += new_segs; + ASSERT(nr_psegs <= (BLKIF_MAX_SEGMENTS_PER_REQUEST+1)); + } + + /* Nonsensical zero-sized request? */ + if ( unlikely(nr_psegs == 0) ) + goto bad_descriptor; + + if ( operation == READ ) + remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW; + else + remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED; + + for ( i = 0; i < nr_psegs; i++ ) + { + mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain; + mcl[i].args[0] = MMAP_VADDR(pending_idx, i) >> PAGE_SHIFT; + mcl[i].args[1] = (phys_seg[i].buffer & PAGE_MASK) | remap_prot; + mcl[i].args[2] = 0; + mcl[i].args[3] = blkif->domid; + + phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] = + phys_seg[i].buffer >> PAGE_SHIFT; + } + + (void)HYPERVISOR_multicall(mcl, nr_psegs); + + for ( i = 0; i < nr_psegs; i++ ) + { + if ( unlikely(mcl[i].args[5] != 0) ) + { + DPRINTK("invalid buffer -- could not remap it\n"); + fast_flush_area(pending_idx, nr_psegs); + goto bad_descriptor; + } + } + + pending_req = &pending_reqs[pending_idx]; + pending_req->blkif = blkif; + pending_req->id = req->id; + pending_req->operation = operation; + pending_req->status = BLKIF_RSP_OKAY; + pending_req->nr_pages = nr_psegs; + atomic_set(&pending_req->pendcnt, nr_psegs); + pending_cons++; + + blkif_get(blkif); + + /* Now we pass each segment down to the real blkdev layer. */ + for ( i = 0; i < nr_psegs; i++ ) + { + bh = kmem_cache_alloc(buffer_head_cachep, GFP_ATOMIC); + if ( unlikely(bh == NULL) ) + { + __end_block_io_op(pending_req, 0); + continue; + } + memset(bh, 0, sizeof (struct buffer_head)); + + init_waitqueue_head(&bh->b_wait); + bh->b_size = phys_seg[i].nr_sects << 9; + bh->b_dev = phys_seg[i].dev; + bh->b_rdev = phys_seg[i].dev; + bh->b_rsector = (unsigned long)phys_seg[i].sector_number; + bh->b_data = (char *)MMAP_VADDR(pending_idx, i) + + (phys_seg[i].buffer & ~PAGE_MASK); + bh->b_page = virt_to_page(MMAP_VADDR(pending_idx, i)); + bh->b_end_io = end_block_io_op; + bh->b_private = pending_req; + + bh->b_state = (1 << BH_Mapped) | (1 << BH_Lock) | + (1 << BH_Req) | (1 << BH_Launder); + if ( operation == WRITE ) + bh->b_state |= (1 << BH_JBD) | (1 << BH_Req) | (1 << BH_Uptodate); + + atomic_set(&bh->b_count, 1); + + /* Dispatch a single request. We'll flush it to disc later. */ + generic_make_request(operation, bh); + } + + return; + + bad_descriptor: + make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); +} + + + +/****************************************************************** + * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING + */ + + +static void make_response(blkif_t *blkif, unsigned long id, + unsigned short op, int st) +{ + blkif_response_t *resp; + unsigned long flags; + + /* Place on the response ring for the relevant domain. */ + spin_lock_irqsave(&blkif->blk_ring_lock, flags); + resp = &blkif->blk_ring_base-> + ring[MASK_BLK_IDX(blkif->blk_resp_prod)].resp; + resp->id = id; + resp->operation = op; + resp->status = st; + wmb(); + blkif->blk_ring_base->resp_prod = ++blkif->blk_resp_prod; + spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); + + /* Kick the relevant domain. */ + notify_via_evtchn(blkif->evtchn); +} + +void blkif_deschedule(blkif_t *blkif) +{ + remove_from_blkdev_list(blkif); +} + +static int __init init_module(void) +{ + int i; + + if ( !(start_info.flags & SIF_INITDOMAIN) + && !(start_info.flags & SIF_BLK_BE_DOMAIN) ) + return 0; + + blkif_interface_init(); + + if ( (mmap_vstart = allocate_empty_lowmem_region(MMAP_PAGES)) == 0 ) + BUG(); + + pending_cons = 0; + pending_prod = MAX_PENDING_REQS; + memset(pending_reqs, 0, sizeof(pending_reqs)); + for ( i = 0; i < MAX_PENDING_REQS; i++ ) + pending_ring[i] = i; + + spin_lock_init(&io_schedule_list_lock); + INIT_LIST_HEAD(&io_schedule_list); + + buffer_head_cachep = kmem_cache_create( + "buffer_head_cache", sizeof(struct buffer_head), + 0, SLAB_HWCACHE_ALIGN, NULL, NULL); + + blkif_ctrlif_init(); + + return 0; +} + +static void cleanup_module(void) +{ + BUG(); +} + +module_init(init_module); +module_exit(cleanup_module); diff --git a/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/vbd.c b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/vbd.c new file mode 100644 index 0000000000..6704fbb541 --- /dev/null +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/backend/vbd.c @@ -0,0 +1,436 @@ +/****************************************************************************** + * arch/xen/drivers/blkif/backend/vbd.c + * + * Routines for managing virtual block devices (VBDs). + * + * Copyright (c) 2003-2004, Keir Fraser & Steve Hand + */ + +#include "common.h" + +void vbd_create(blkif_be_vbd_create_t *create) +{ + vbd_t *vbd; + rb_node_t **rb_p, *rb_parent = NULL; + blkif_t *blkif; + blkif_vdev_t vdevice = create->vdevice; + + blkif = blkif_find_by_handle(create->domid, create->blkif_handle); + if ( unlikely(blkif == NULL) ) + { + DPRINTK("vbd_create attempted for non-existent blkif (%u,%u)\n", + create->domid, create->blkif_handle); + create->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND; + return; + } + + spin_lock(&blkif->vbd_lock); + + rb_p = &blkif->vbd_rb.rb_node; + while ( *rb_p != NULL ) + { + rb_parent = *rb_p; + vbd = rb_entry(rb_parent, vbd_t, rb); + if ( vdevice < vbd->vdevice ) + { + rb_p = &rb_parent->rb_left; + } + else if ( vdevice > vbd->vdevice ) + { + rb_p = &rb_parent->rb_right; + } + else + { + DPRINTK("vbd_create attempted for already existing vbd\n"); + create->status = BLKIF_BE_STATUS_VBD_EXISTS; + goto out; + } + } + + if ( unlikely((vbd = kmalloc(sizeof(vbd_t), GFP_KERNEL)) == NULL) ) + { + DPRINTK("vbd_create: out of memory\n"); + create->status = BLKIF_BE_STATUS_OUT_OF_MEMORY; + goto out; + } + + vbd->vdevice = vdevice; + vbd->readonly = create->readonly; + vbd->type = VDISK_TYPE_DISK | VDISK_FLAG_VIRT; + vbd->extents = NULL; + + rb_link_node(&vbd->rb, rb_parent, rb_p); + rb_insert_color(&vbd->rb, &blkif->vbd_rb); + + DPRINTK("Successful creation of vdev=%04x (dom=%u)\n", + vdevice, create->domid); + create->status = BLKIF_BE_STATUS_OKAY; + + out: + spin_unlock(&blkif->vbd_lock); +} + + +/* Grow a VBD by appending a new extent. Fails if the VBD doesn't exist. */ +void vbd_grow(blkif_be_vbd_grow_t *grow) +{ + blkif_t *blkif; + blkif_extent_le_t **px, *x; + vbd_t *vbd = NULL; + rb_node_t *rb; + blkif_vdev_t vdevice = grow->vdevice; + + blkif = blkif_find_by_handle(grow->domid, grow->blkif_handle); + if ( unlikely(blkif == NULL) ) + { + DPRINTK("vbd_grow attempted for non-existent blkif (%u,%u)\n", + grow->domid, grow->blkif_handle); + grow->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND; + return; + } + + spin_lock(&blkif->vbd_lock); + + rb = blkif->vbd_rb.rb_node; + while ( rb != NULL ) + { + vbd = rb_entry(rb, vbd_t, rb); + if ( vdevice < vbd->vdevice ) + rb = rb->rb_left; + else if ( vdevice > vbd->vdevice ) + rb = rb->rb_right; + else + break; + } + + if ( unlikely(vbd == NULL) || unlikely(vbd->vdevice != vdevice) ) + { + DPRINTK("vbd_grow: attempted to append extent to non-existent VBD.\n"); + grow->status = BLKIF_BE_STATUS_VBD_NOT_FOUND; + goto out; + } + + if ( unlikely((x = kmalloc(sizeof(blkif_extent_le_t), + GFP_KERNEL)) == NULL) ) + { + DPRINTK("vbd_grow: out of memory\n"); + grow->status = BLKIF_BE_STATUS_OUT_OF_MEMORY; + goto out; + } + + x->extent.device = grow->extent.device; + x->extent.sector_start = grow->extent.sector_start; + x->extent.sector_length = grow->extent.sector_length; + x->next = (blkif_extent_le_t *)NULL; + + for ( px = &vbd->extents; *px != NULL; px = &(*px)->next ) + continue; + + *px = x; + + DPRINTK("Successful grow of vdev=%04x (dom=%u)\n", + vdevice, grow->domid); + grow->status = BLKIF_BE_STATUS_OKAY; + + out: + spin_unlock(&blkif->vbd_lock); +} + + +void vbd_shrink(blkif_be_vbd_shrink_t *shrink) +{ + blkif_t *blkif; + blkif_extent_le_t **px, *x; + vbd_t *vbd = NULL; + rb_node_t *rb; + blkif_vdev_t vdevice = shrink->vdevice; + + blkif = blkif_find_by_handle(shrink->domid, shrink->blkif_handle); + if ( unlikely(blkif == NULL) ) + { + DPRINTK("vbd_shrink attempted for non-existent blkif (%u,%u)\n", + shrink->domid, shrink->blkif_handle); + shrink->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND; + return; + } + + spin_lock(&blkif->vbd_lock); + + rb = blkif->vbd_rb.rb_node; + while ( rb != NULL ) + { + vbd = rb_entry(rb, vbd_t, rb); + if ( vdevice < vbd->vdevice ) + rb = rb->rb_left; + else if ( vdevice > vbd->vdevice ) + rb = rb->rb_right; + else + break; + } + + if ( unlikely(vbd == NULL) || unlikely(vbd->vdevice != vdevice) ) + { + shrink->status = BLKIF_BE_STATUS_VBD_NOT_FOUND; + goto out; + } + + if ( unlikely(vbd->extents == NULL) ) + { + shrink->status = BLKIF_BE_STATUS_EXTENT_NOT_FOUND; + goto out; + } + + /* Find the last extent. We now know that there is at least one. */ + for ( px = &vbd->extents; (*px)->next != NULL; px = &(*px)->next ) + continue; + + x = *px; + *px = x->next; + kfree(x); + + shrink->status = BLKIF_BE_STATUS_OKAY; + + out: + spin_unlock(&blkif->vbd_lock); +} + + +void vbd_destroy(blkif_be_vbd_destroy_t *destroy) +{ + blkif_t *blkif; + vbd_t *vbd; + rb_node_t *rb; + blkif_extent_le_t *x, *t; + blkif_vdev_t vdevice = destroy->vdevice; + + blkif = blkif_find_by_handle(destroy->domid, destroy->blkif_handle); + if ( unlikely(blkif == NULL) ) + { + DPRINTK("vbd_destroy attempted for non-existent blkif (%u,%u)\n", + destroy->domid, destroy->blkif_handle); + destroy->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND; + return; + } + + spin_lock(&blkif->vbd_lock); + + rb = blkif->vbd_rb.rb_node; + while ( rb != NULL ) + { + vbd = rb_entry(rb, vbd_t, rb); + if ( vdevice < vbd->vdevice ) + rb = rb->rb_left; + else if ( vdevice > vbd->vdevice ) + rb = rb->rb_right; + else + goto found; + } + + destroy->status = BLKIF_BE_STATUS_VBD_NOT_FOUND; + goto out; + + found: + rb_erase(rb, &blkif->vbd_rb); + x = vbd->extents; + kfree(vbd); + + while ( x != NULL ) + { + t = x->next; + kfree(x); + x = t; + } + + out: + spin_unlock(&blkif->vbd_lock); +} + + +void destroy_all_vbds(blkif_t *blkif) +{ + vbd_t *vbd; + rb_node_t *rb; + blkif_extent_le_t *x, *t; + + spin_lock(&blkif->vbd_lock); + + while ( (rb = blkif->vbd_rb.rb_node) != NULL ) + { + vbd = rb_entry(rb, vbd_t, rb); + + rb_erase(rb, &blkif->vbd_rb); + x = vbd->extents; + kfree(vbd); + + while ( x != NULL ) + { + t = x->next; + kfree(x); + x = t; + } + } + + spin_unlock(&blkif->vbd_lock); +} + + +static int vbd_probe_single(blkif_t *blkif, vdisk_t *vbd_info, vbd_t *vbd) +{ + blkif_extent_le_t *x; + + vbd_info->device = vbd->vdevice; + vbd_info->info = vbd->type; + if ( vbd->readonly ) + vbd_info->info |= VDISK_FLAG_RO; + vbd_info->capacity = 0ULL; + for ( x = vbd->extents; x != NULL; x = x->next ) + vbd_info->capacity += x->extent.sector_length; + + return 0; +} + + +int vbd_probe(blkif_t *blkif, vdisk_t *vbd_info, int max_vbds) +{ + int rc = 0, nr_vbds = 0; + rb_node_t *rb; + + spin_lock(&blkif->vbd_lock); + + if ( (rb = blkif->vbd_rb.rb_node) == NULL ) + goto out; + + new_subtree: + /* STEP 1. Find least node (it'll be left-most). */ + while ( rb->rb_left != NULL ) + rb = rb->rb_left; + + for ( ; ; ) + { + /* STEP 2. Dealt with left subtree. Now process current node. */ + if ( (rc = vbd_probe_single(blkif, &vbd_info[nr_vbds], + rb_entry(rb, vbd_t, rb))) != 0 ) + goto out; + if ( ++nr_vbds == max_vbds ) + goto out; + + /* STEP 3. Process right subtree, if any. */ + if ( rb->rb_right != NULL ) + { + rb = rb->rb_right; + goto new_subtree; + } + + /* STEP 4. Done both subtrees. Head back through ancesstors. */ + for ( ; ; ) + { + /* We're done when we get back to the root node. */ + if ( rb->rb_parent == NULL ) + goto out; + /* If we are left of parent, then parent is next to process. */ + if ( rb->rb_parent->rb_left == rb ) + break; + /* If we are right of parent, then we climb to grandparent. */ + rb = rb->rb_parent; + } + + rb = rb->rb_parent; + } + + out: + spin_unlock(&blkif->vbd_lock); + return (rc == 0) ? nr_vbds : rc; +} + + +int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation) +{ + blkif_extent_le_t *x; + vbd_t *vbd; + rb_node_t *rb; + blkif_sector_t sec_off; + unsigned long nr_secs; + + spin_lock(&blkif->vbd_lock); + + rb = blkif->vbd_rb.rb_node; + while ( rb != NULL ) + { + vbd = rb_entry(rb, vbd_t, rb); + if ( pseg->dev < vbd->vdevice ) + rb = rb->rb_left; + else if ( pseg->dev > vbd->vdevice ) + rb = rb->rb_right; + else + goto found; + } + + DPRINTK("vbd_translate; domain %u attempted to access " + "non-existent VBD.\n", blkif->domid); + + spin_unlock(&blkif->vbd_lock); + return -ENODEV; + + found: + + if ( (operation == WRITE) && vbd->readonly ) + { + spin_unlock(&blkif->vbd_lock); + return -EACCES; + } + + /* + * Now iterate through the list of blkif_extents, working out which should + * be used to perform the translation. + */ + sec_off = pseg->sector_number; + nr_secs = pseg->nr_sects; + for ( x = vbd->extents; x != NULL; x = x->next ) + { + if ( sec_off < x->extent.sector_length ) + { + pseg->dev = x->extent.device; + pseg->sector_number = x->extent.sector_start + sec_off; + if ( unlikely((sec_off + nr_secs) > x->extent.sector_length) ) + goto overrun; + spin_unlock(&p->vbd_lock); + return 1; + } + sec_off -= x->extent.sector_length; + } + + DPRINTK("vbd_translate: end of vbd.\n"); + spin_unlock(&blkif->vbd_lock); + return -EACCES; + + /* + * Here we deal with overrun onto the following extent. We don't deal with + * overrun of more than one boundary since each request is restricted to + * 2^9 512-byte sectors, so it should be trivial for control software to + * ensure that extents are large enough to prevent excessive overrun. + */ + overrun: + + /* Adjust length of first chunk to run to end of first extent. */ + pseg[0].nr_sects = x->extent.sector_length - sec_off; + + /* Set second chunk buffer and length to start where first chunk ended. */ + pseg[1].buffer = pseg[0].buffer + (pseg[0].nr_sects << 9); + pseg[1].nr_sects = nr_secs - pseg[0].nr_sects; + + /* Now move to the next extent. Check it exists and is long enough! */ + if ( unlikely((x = x->next) == NULL) || + unlikely(x->extent.sector_length < pseg[1].nr_sects) ) + { + DPRINTK("vbd_translate: multiple overruns or end of vbd.\n"); + spin_unlock(&p->vbd_lock); + return -EACCES; + } + + /* Store the real device and start sector for the second chunk. */ + pseg[1].dev = x->extent.device; + pseg[1].sector_number = x->extent.sector_start; + + spin_unlock(&blkif->vbd_lock); + return 2; +} diff --git a/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/blkif.h b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/blkif.h new file mode 100644 index 0000000000..1024629ea7 --- /dev/null +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/blkif.h @@ -0,0 +1,115 @@ +/****************************************************************************** + * blkif.h + * + * Unified block-device I/O interface for Xen guest OSes. + * + * Copyright (c) 2003-2004, Keir Fraser + */ + +#ifndef __SHARED_BLKIF_H__ +#define __SHARED_BLKIF_H__ + +#define blkif_vdev_t u16 +#define blkif_sector_t u64 + +#define BLKIF_OP_READ 0 +#define BLKIF_OP_WRITE 1 +#define BLKIF_OP_PROBE 2 + +/* NB. Ring size must be small enough for sizeof(blkif_ring_t) <= PAGE_SIZE. */ +#define BLKIF_RING_SIZE 64 + +/* + * Maximum scatter/gather segments per request. + * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE. + * NB. This could be 12 if the ring indexes weren't stored in the same page. + */ +#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 + +typedef struct { + u8 operation; /* 0: BLKIF_OP_??? */ + u8 nr_segments; /* 1: number of segments */ + blkif_vdev_t device; /* 2: only for read/write requests */ + unsigned long id; /* 4: private guest value, echoed in resp */ + blkif_sector_t sector_number; /* start sector idx on disk (r/w only) */ + /* @f_a_s[2:0]=last_sect ; @f_a_s[5:3]=first_sect ; @f_a_s[:12]=frame. */ + /* @first_sect: first sector in frame to transfer (inclusive). */ + /* @last_sect: last sector in frame to transfer (inclusive). */ + /* @frame: machine page frame number. */ + unsigned long frame_and_sects[BLKIF_MAX_SEGMENTS_PER_REQUEST]; +} PACKED blkif_request_t; + +#define blkif_first_sect(_fas) (((_fas)>>3)&7) +#define blkif_last_sect(_fas) ((_fas)&7) + +typedef struct { + unsigned long id; /* copied from request */ + u8 operation; /* copied from request */ + s16 status; /* BLKIF_RSP_??? */ +} PACKED blkif_response_t; + +#define BLKIF_RSP_ERROR -1 /* non-specific 'error' */ +#define BLKIF_RSP_OKAY 0 /* non-specific 'okay' */ + +/* + * We use a special capitalised type name because it is _essential_ that all + * arithmetic on indexes is done on an integer type of the correct size. + */ +typedef u32 BLKIF_RING_IDX; + +/* + * Ring indexes are 'free running'. That is, they are not stored modulo the + * size of the ring buffer. The following macro converts a free-running counter + * into a value that can directly index a ring-buffer array. + */ +#define MASK_BLKIF_IDX(_i) ((_i)&(BLKIF_RING_SIZE-1)) + +typedef struct { + BLKIF_RING_IDX req_prod; /* 0: Request producer. Updated by front-end. */ + BLKIF_RING_IDX resp_prod; /* 4: Response producer. Updated by back-end. */ + union { /* 8 */ + blkif_request_t req; + blkif_response_t resp; + } PACKED ring[BLKIF_RING_SIZE]; +} PACKED blkif_ring_t; + + +/* + * BLKIF_OP_PROBE: + * The request format for a probe request is constrained as follows: + * @operation == BLKIF_OP_PROBE + * @nr_segments == size of probe buffer in pages + * @device == unused (zero) + * @id == any value (echoed in response message) + * @sector_num == unused (zero) + * @frame_and_sects == list of page-sized buffers. + * (i.e., @first_sect == 0, @last_sect == 7). + * + * The response is a list of vdisk_t elements copied into the out-of-band + * probe buffer. On success the response status field contains the number + * of vdisk_t elements. + */ + +/* XXX SMH: Type values below are chosen to match ide_xxx in Linux ide.h. */ +#define VDISK_TYPE_FLOPPY 0x00 +#define VDISK_TYPE_TAPE 0x01 +#define VDISK_TYPE_CDROM 0x05 +#define VDISK_TYPE_OPTICAL 0x07 +#define VDISK_TYPE_DISK 0x20 + +#define VDISK_TYPE_MASK 0x3F +#define VDISK_TYPE(_x) ((_x) & VDISK_TYPE_MASK) + +/* The top two bits of the type field encode various flags. */ +#define VDISK_FLAG_RO 0x40 +#define VDISK_FLAG_VIRT 0x80 +#define VDISK_READONLY(_x) ((_x) & VDISK_FLAG_RO) +#define VDISK_VIRTUAL(_x) ((_x) & VDISK_FLAG_VIRT) + +typedef struct { + blkif_sector_t capacity; /* 0: Size in terms of 512-byte sectors. */ + blkif_vdev_t device; /* 8: Device number (opaque 16 bit value). */ + u16 info; /* 10: Device type and flags (VDISK_*). */ +} PACKED vdisk_t; /* 12 bytes */ + +#endif /* __SHARED_BLKIF_H__ */ diff --git a/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/Makefile b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/Makefile new file mode 100644 index 0000000000..b0d27cf698 --- /dev/null +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/Makefile @@ -0,0 +1,3 @@ +O_TARGET := drv.o +obj-y := main.o vbd.o +include $(TOPDIR)/Rules.make diff --git a/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/common.h b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/common.h new file mode 100644 index 0000000000..2d4415bdef --- /dev/null +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/common.h @@ -0,0 +1,84 @@ +/****************************************************************************** + * arch/xen/drivers/blkif/frontend/common.h + * + * Shared definitions between all levels of XenoLinux Virtual block devices. + */ + +#ifndef __XEN_DRIVERS_COMMON_H__ +#define __XEN_DRIVERS_COMMON_H__ + +#include <linux/config.h> +#include <linux/module.h> + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/errno.h> + +#include <linux/fs.h> +#include <linux/hdreg.h> +#include <linux/blkdev.h> +#include <linux/major.h> + +#include <asm/hypervisor-ifs/hypervisor-if.h> +#include <asm/hypervisor-ifs/vbd.h> +#include <asm/io.h> +#include <asm/atomic.h> +#include <asm/uaccess.h> + +#include "../blkif.h" + +#if 0 +#define DPRINTK(_f, _a...) printk ( KERN_ALERT _f , ## _a ) +#else +#define DPRINTK(_f, _a...) ((void)0) +#endif + +#if 0 +#define DPRINTK_IOCTL(_f, _a...) printk ( KERN_ALERT _f , ## _a ) +#else +#define DPRINTK_IOCTL(_f, _a...) ((void)0) +#endif + +/* Private gendisk->flags[] values. */ +#define GENHD_FL_XEN 2 /* Is unit a Xen block device? */ +#define GENHD_FL_VIRT_PARTNS 4 /* Are unit partitions virtual? */ + +/* + * We have one of these per vbd, whether ide, scsi or 'other'. + * They hang in an array off the gendisk structure. We may end up putting + * all kinds of interesting stuff here :-) + */ +typedef struct xl_disk { + int usage; +} xl_disk_t; + +extern int blkif_open(struct inode *inode, struct file *filep); +extern int blkif_release(struct inode *inode, struct file *filep); +extern int blkif_ioctl(struct inode *inode, struct file *filep, + unsigned command, unsigned long argument); +extern int blkif_check(kdev_t dev); +extern int blkif_revalidate(kdev_t dev); +extern void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp); +extern void do_blkif_request (request_queue_t *rq); + +extern void xlvbd_update_vbds(void); + +static inline xl_disk_t *xldev_to_xldisk(kdev_t xldev) +{ + struct gendisk *gd = get_gendisk(xldev); + + if ( gd == NULL ) + return NULL; + + return (xl_disk_t *)gd->real_devices + + (MINOR(xldev) >> gd->minor_shift); +} + + +/* Virtual block-device subsystem. */ +extern int xlvbd_init(void); +extern void xlvbd_cleanup(void); + +#endif /* __XEN_DRIVERS_COMMON_H__ */ diff --git a/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/main.c b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/main.c new file mode 100644 index 0000000000..4e5dbca093 --- /dev/null +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/main.c @@ -0,0 +1,814 @@ +/****************************************************************************** + * arch/xen/drivers/blkif/frontend/main.c + * + * Xenolinux virtual block-device driver. + * + * Copyright (c) 2003-2004, Keir Fraser & Steve Hand + * Modifications by Mark A. Williamson are (c) Intel Research Cambridge + */ + +#include "common.h" +#include <linux/blk.h> +#include <linux/cdrom.h> +#include <linux/tqueue.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <scsi/scsi.h> +#include <asm/ctrl_if.h> + + + +typedef unsigned char byte; /* from linux/ide.h */ + +#define BLKIF_STATE_CLOSED 0 +#define BLKIF_STATE_DISCONNECTED 1 +#define BLKIF_STATE_CONNECTED 2 +static unsigned int blkif_state = BLKIF_STATE_CLOSED; +static unsigned int blkif_evtchn, blkif_irq; + +static int blkif_control_rsp_valid; +static blkif_response_t blkif_control_rsp; + +static blkif_ring_t *blk_ring; +static BLK_RING_IDX resp_cons; /* Response consumer for comms ring. */ +static BLK_RING_IDX req_prod; /* Private request producer. */ + + +static blkif_ring_t *blk_ring_rec; /* Private copy of requests, used for + * recovery. Responses not stored here. */ +static BLK_RING_IDX resp_cons_rec; /* Copy of response consumer, used for + * recovery */ +static int recovery = 0; /* "Recovery in progress" flag. Protected + * by the io_request_lock */ + + +/* We plug the I/O ring if the driver is suspended or if the ring is full. */ +#define RING_PLUGGED (((req_prod - resp_cons) == BLK_RING_SIZE) || \ + (blkif_state != BLKIF_STATE_CONNECTED)) + + +/* + * Request queues with outstanding work, but ring is currently full. + * We need no special lock here, as we always access this with the + * io_request_lock held. We only need a small maximum list. + */ +#define MAX_PENDING 8 +static request_queue_t *pending_queues[MAX_PENDING]; +static int nr_pending; + +static kdev_t sg_dev; +static int sg_operation = -1; +static unsigned long sg_next_sect; +#define DISABLE_SCATTERGATHER() (sg_operation = -1) + +static inline void flush_requests(void) +{ + DISABLE_SCATTERGATHER(); + blk_ring->req_prod = req_prod; + notify_via_evtchn(blkif_evtchn); +} + + +/* + * blkif_update_int/update-vbds_task - handle VBD update events. + * Schedule a task for keventd to run, which will update the VBDs and perform + * the corresponding updates to our view of VBD state. + */ +static struct tq_struct update_tq; +static void update_vbds_task(void *unused) +{ + xlvbd_update_vbds(); +} + + +int blkif_open(struct inode *inode, struct file *filep) +{ + short xldev = inode->i_rdev; + struct gendisk *gd = get_gendisk(xldev); + xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev); + short minor = MINOR(xldev); + + if ( gd->part[minor].nr_sects == 0 ) + { + /* + * Device either doesn't exist, or has zero capacity; we use a few + * cheesy heuristics to return the relevant error code + */ + if ( (gd->sizes[minor >> gd->minor_shift] != 0) || + ((minor & (gd->max_p - 1)) != 0) ) + { + /* + * We have a real device, but no such partition, or we just have a + * partition number so guess this is the problem. + */ + return -ENXIO; /* no such device or address */ + } + else if ( gd->flags[minor >> gd->minor_shift] & GENHD_FL_REMOVABLE ) + { + /* This is a removable device => assume that media is missing. */ + return -ENOMEDIUM; /* media not present (this is a guess) */ + } + else + { + /* Just go for the general 'no such device' error. */ + return -ENODEV; /* no such device */ + } + } + + /* Update of usage count is protected by per-device semaphore. */ + disk->usage++; + + return 0; +} + + +int blkif_release(struct inode *inode, struct file *filep) +{ + xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev); + + /* + * When usage drops to zero it may allow more VBD updates to occur. + * Update of usage count is protected by a per-device semaphore. + */ + if ( --disk->usage == 0 ) + { +#if 0 + update_tq.routine = update_vbds_task; + schedule_task(&update_tq); +#endif + } + + return 0; +} + + +int blkif_ioctl(struct inode *inode, struct file *filep, + unsigned command, unsigned long argument) +{ + kdev_t dev = inode->i_rdev; + struct hd_geometry *geo = (struct hd_geometry *)argument; + struct gendisk *gd; + struct hd_struct *part; + int i; + + /* NB. No need to check permissions. That is done for us. */ + + DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", + command, (long) argument, dev); + + gd = get_gendisk(dev); + part = &gd->part[MINOR(dev)]; + + switch ( command ) + { + case BLKGETSIZE: + DPRINTK_IOCTL(" BLKGETSIZE: %x %lx\n", BLKGETSIZE, part->nr_sects); + return put_user(part->nr_sects, (unsigned long *) argument); + + case BLKGETSIZE64: + DPRINTK_IOCTL(" BLKGETSIZE64: %x %llx\n", BLKGETSIZE64, + (u64)part->nr_sects * 512); + return put_user((u64)part->nr_sects * 512, (u64 *) argument); + + case BLKRRPART: /* re-read partition table */ + DPRINTK_IOCTL(" BLKRRPART: %x\n", BLKRRPART); + return blkif_revalidate(dev); + + case BLKSSZGET: + return hardsect_size[MAJOR(dev)][MINOR(dev)]; + + case BLKBSZGET: /* get block size */ + DPRINTK_IOCTL(" BLKBSZGET: %x\n", BLKBSZGET); + break; + + case BLKBSZSET: /* set block size */ + DPRINTK_IOCTL(" BLKBSZSET: %x\n", BLKBSZSET); + break; + + case BLKRASET: /* set read-ahead */ + DPRINTK_IOCTL(" BLKRASET: %x\n", BLKRASET); + break; + + case BLKRAGET: /* get read-ahead */ + DPRINTK_IOCTL(" BLKRAFET: %x\n", BLKRAGET); + break; + + case HDIO_GETGEO: + /* note: these values are complete garbage */ + DPRINTK_IOCTL(" HDIO_GETGEO: %x\n", HDIO_GETGEO); + if (!argument) return -EINVAL; + if (put_user(0x00, (unsigned long *) &geo->start)) return -EFAULT; + if (put_user(0xff, (byte *)&geo->heads)) return -EFAULT; + if (put_user(0x3f, (byte *)&geo->sectors)) return -EFAULT; + if (put_user(0x106, (unsigned short *)&geo->cylinders)) return -EFAULT; + return 0; + + case HDIO_GETGEO_BIG: + /* note: these values are complete garbage */ + DPRINTK_IOCTL(" HDIO_GETGEO_BIG: %x\n", HDIO_GETGEO_BIG); + if (!argument) return -EINVAL; + if (put_user(0x00, (unsigned long *) &geo->start)) return -EFAULT; + if (put_user(0xff, (byte *)&geo->heads)) return -EFAULT; + if (put_user(0x3f, (byte *)&geo->sectors)) return -EFAULT; + if (put_user(0x106, (unsigned int *) &geo->cylinders)) return -EFAULT; + return 0; + + case CDROMMULTISESSION: + DPRINTK("FIXME: support multisession CDs later\n"); + for ( i = 0; i < sizeof(struct cdrom_multisession); i++ ) + if ( put_user(0, (byte *)(argument + i)) ) return -EFAULT; + return 0; + + case SCSI_IOCTL_GET_BUS_NUMBER: + DPRINTK("FIXME: SCSI_IOCTL_GET_BUS_NUMBER ioctl in XL blkif"); + return -ENOSYS; + + default: + printk(KERN_ALERT "ioctl %08x not supported by XL blkif\n", command); + return -ENOSYS; + } + + return 0; +} + +/* check media change: should probably do something here in some cases :-) */ +int blkif_check(kdev_t dev) +{ + DPRINTK("blkif_check\n"); + return 0; +} + +int blkif_revalidate(kdev_t dev) +{ + struct block_device *bd; + struct gendisk *gd; + xl_disk_t *disk; + unsigned long capacity; + int i, rc = 0; + + if ( (bd = bdget(dev)) == NULL ) + return -EINVAL; + + /* + * Update of partition info, and check of usage count, is protected + * by the per-block-device semaphore. + */ + down(&bd->bd_sem); + + if ( ((gd = get_gendisk(dev)) == NULL) || + ((disk = xldev_to_xldisk(dev)) == NULL) || + ((capacity = gd->part[MINOR(dev)].nr_sects) == 0) ) + { + rc = -EINVAL; + goto out; + } + + if ( disk->usage > 1 ) + { + rc = -EBUSY; + goto out; + } + + /* Only reread partition table if VBDs aren't mapped to partitions. */ + if ( !(gd->flags[MINOR(dev) >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) ) + { + for ( i = gd->max_p - 1; i >= 0; i-- ) + { + invalidate_device(dev+i, 1); + gd->part[MINOR(dev+i)].start_sect = 0; + gd->part[MINOR(dev+i)].nr_sects = 0; + gd->sizes[MINOR(dev+i)] = 0; + } + + grok_partitions(gd, MINOR(dev)>>gd->minor_shift, gd->max_p, capacity); + } + + out: + up(&bd->bd_sem); + bdput(bd); + return rc; +} + + +/* + * blkif_queue_request + * + * request block io + * + * id: for guest use only. + * operation: BLKIF_OP_{READ,WRITE,PROBE} + * buffer: buffer to read/write into. this should be a + * virtual address in the guest os. + */ +static int blkif_queue_request(unsigned long id, + int operation, + char * buffer, + unsigned long sector_number, + unsigned short nr_sectors, + kdev_t device) +{ + unsigned long buffer_ma = phys_to_machine(virt_to_phys(buffer)); + struct gendisk *gd; + blkif_request_t *req; + struct buffer_head *bh; + unsigned int fsect, lsect; + + fsect = (buffer_ma & ~PAGE_MASK) >> 9; + lsect = fsect + nr_sectors - 1; + + /* Buffer must be sector-aligned. Extent mustn't cross a page boundary. */ + if ( unlikely((buffer_ma & ((1<<9)-1)) != 0) ) + BUG(); + if ( lsect > 7 ) + BUG(); + + buffer_ma &= PAGE_MASK; + + if ( unlikely(blkif_state != BLKIF_STATE_CONNECTED) ) + return 1; + + switch ( operation ) + { + + case BLKIF_OP_READ: + case BLKIF_OP_WRITE: + gd = get_gendisk(device); + + /* + * Update the sector_number we'll pass down as appropriate; note that + * we could sanity check that resulting sector will be in this + * partition, but this will happen in driver backend anyhow. + */ + sector_number += gd->part[MINOR(device)].start_sect; + + /* + * If this unit doesn't consist of virtual partitions then we clear + * the partn bits from the device number. + */ + if ( !(gd->flags[MINOR(device)>>gd->minor_shift] & + GENHD_FL_VIRT_PARTNS) ) + device &= ~(gd->max_p - 1); + + if ( (sg_operation == operation) && + (sg_dev == device) && + (sg_next_sect == sector_number) ) + { + req = &blk_ring->ring[MASK_BLK_IDX(req_prod-1)].req; + bh = (struct buffer_head *)id; + bh->b_reqnext = (struct buffer_head *)req->id; + req->id = id; + req->frame_and_sects[req->nr_segments] = + buffer_ma | (fsect<<3) | lsect; + if ( ++req->nr_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST ) + sg_next_sect += nr_sectors; + else + DISABLE_SCATTERGATHER(); + + /* Update the copy of the request in the recovery ring. */ + blk_ring_rec->ring[MASK_BLK_IDX(blk_ring_rec->req_prod - 1)].req + = *req; + + return 0; + } + else if ( RING_PLUGGED ) + { + return 1; + } + else + { + sg_operation = operation; + sg_dev = device; + sg_next_sect = sector_number + nr_sectors; + } + break; + + default: + panic("unknown op %d\n", operation); + } + + /* Fill out a communications ring structure. */ + req = &blk_ring->ring[MASK_BLK_IDX(req_prod)].req; + req->id = id; + req->operation = operation; + req->sector_number = (blkif_sector_t)sector_number; + req->device = device; + req->nr_segments = 1; + req->frame_and_sects[0] = buffer_ma | (fsect<<3) | lsect; + req_prod++; + + /* Keep a private copy so we can reissue requests when recovering. */ + blk_ring_rec->ring[MASK_BLK_IDX(blk_ring_rec->req_prod)].req = *req; + blk_ring_rec->req_prod++; + + return 0; +} + + +/* + * do_blkif_request + * read a block; request is in a request queue + */ +void do_blkif_request(request_queue_t *rq) +{ + struct request *req; + struct buffer_head *bh, *next_bh; + int rw, nsect, full, queued = 0; + + DPRINTK("Entered do_blkif_request\n"); + + while ( !rq->plugged && !list_empty(&rq->queue_head)) + { + if ( (req = blkdev_entry_next_request(&rq->queue_head)) == NULL ) + goto out; + + DPRINTK("do_blkif_request %p: cmd %i, sec %lx, (%li/%li) bh:%p\n", + req, req->cmd, req->sector, + req->current_nr_sectors, req->nr_sectors, req->bh); + + rw = req->cmd; + if ( rw == READA ) + rw = READ; + if ( unlikely((rw != READ) && (rw != WRITE)) ) + panic("XenoLinux Virtual Block Device: bad cmd: %d\n", rw); + + req->errors = 0; + + bh = req->bh; + while ( bh != NULL ) + { + next_bh = bh->b_reqnext; + bh->b_reqnext = NULL; + + full = blkif_queue_request( + (unsigned long)bh, + (rw == READ) ? BLKIF_OP_READ : BLKIF_OP_WRITE, + bh->b_data, bh->b_rsector, bh->b_size>>9, bh->b_rdev); + + if ( full ) + { + bh->b_reqnext = next_bh; + pending_queues[nr_pending++] = rq; + if ( unlikely(nr_pending >= MAX_PENDING) ) + BUG(); + goto out; + } + + queued++; + + /* Dequeue the buffer head from the request. */ + nsect = bh->b_size >> 9; + bh = req->bh = next_bh; + + if ( bh != NULL ) + { + /* There's another buffer head to do. Update the request. */ + req->hard_sector += nsect; + req->hard_nr_sectors -= nsect; + req->sector = req->hard_sector; + req->nr_sectors = req->hard_nr_sectors; + req->current_nr_sectors = bh->b_size >> 9; + req->buffer = bh->b_data; + } + else + { + /* That was the last buffer head. Finalise the request. */ + if ( unlikely(end_that_request_first(req, 1, "XenBlk")) ) + BUG(); + blkdev_dequeue_request(req); + end_that_request_last(req); + } + } + } + + out: + if ( queued != 0 ) + flush_requests(); +} + + +static void kick_pending_request_queues(void) +{ + /* We kick pending request queues if the ring is reasonably empty. */ + if ( (nr_pending != 0) && + ((req_prod - resp_cons) < (BLK_RING_SIZE >> 1)) ) + { + /* Attempt to drain the queue, but bail if the ring becomes full. */ + while ( (nr_pending != 0) && !RING_PLUGGED ) + do_blkif_request(pending_queues[--nr_pending]); + } +} + + +static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs) +{ + BLK_RING_IDX i; + unsigned long flags; + struct buffer_head *bh, *next_bh; + +// printk(KERN_ALERT "blkif_int\n"); + + spin_lock_irqsave(&io_request_lock, flags); + + if ( unlikely(blkif_state == BLKIF_STATE_CLOSED || recovery) ) + { + printk("Bailed out\n"); + + spin_unlock_irqrestore(&io_request_lock, flags); + return; + } + + for ( i = resp_cons; i != blk_ring->resp_prod; i++ ) + { + blkif_response_t *bret = &blk_ring->ring[MASK_BLK_IDX(i)].resp; + switch ( bret->operation ) + { + case BLKIF_OP_READ: + case BLKIF_OP_WRITE: + if ( unlikely(bret->status != BLKIF_RSP_OKAY) ) + DPRINTK("Bad return from blkdev data request: %lx\n", + bret->status); + for ( bh = (struct buffer_head *)bret->id; + bh != NULL; + bh = next_bh ) + { + next_bh = bh->b_reqnext; + bh->b_reqnext = NULL; + bh->b_end_io(bh, bret->status == BLKIF_RSP_OKAY); + } + break; + case BLKIF_OP_PROBE: + memcpy(&blkif_control_rsp, bret, sizeof(*bret)); + blkif_control_rsp_valid = 1; + break; + default: + BUG(); + } + } + + resp_cons = i; + resp_cons_rec = i; + + kick_pending_request_queues(); + + spin_unlock_irqrestore(&io_request_lock, flags); +} + + +void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp) +{ + unsigned long flags; + + retry: + while ( (req_prod - resp_cons) == BLK_RING_SIZE ) + { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + } + + spin_lock_irqsave(&io_request_lock, flags); + if ( (req_prod - resp_cons) == BLK_RING_SIZE ) + { + spin_unlock_irqrestore(&io_request_lock, flags); + goto retry; + } + + DISABLE_SCATTERGATHER(); + memcpy(&blk_ring->ring[MASK_BLK_IDX(req_prod)].req, req, sizeof(*req)); + memcpy(&blk_ring_rec->ring[MASK_BLK_IDX(blk_ring_rec->req_prod++)].req, + req, sizeof(*req)); + req_prod++; + flush_requests(); + + spin_unlock_irqrestore(&io_request_lock, flags); + + while ( !blkif_control_rsp_valid ) + { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + } + + memcpy(rsp, &blkif_control_rsp, sizeof(*rsp)); + blkif_control_rsp_valid = 0; +} + + +static void blkif_status_change(blkif_fe_interface_status_changed_t *status) +{ + ctrl_msg_t cmsg; + blkif_fe_interface_connect_t up; + + if ( status->handle != 0 ) + { + printk(KERN_WARNING "Status change on unsupported blkif %d\n", + status->handle); + return; + } + + switch ( status->status ) + { + case BLKIF_INTERFACE_STATUS_DESTROYED: + printk(KERN_WARNING "Unexpected blkif-DESTROYED message in state %d\n", + blkif_state); + break; + + case BLKIF_INTERFACE_STATUS_DISCONNECTED: + if ( blkif_state != BLKIF_STATE_CLOSED ) + { + printk(KERN_WARNING "Unexpected blkif-DISCONNECTED message" + " in state %d\n", blkif_state); + + printk(KERN_INFO "VBD driver recovery in progress\n"); + + /* Prevent new requests being issued until we've fixed things up. */ + spin_lock_irq(&io_request_lock); + recovery = 1; + blkif_state = BLKIF_STATE_DISCONNECTED; + spin_unlock_irq(&io_request_lock); + + /* Free resources associated with old device channel. */ + free_page((unsigned long)blk_ring); + free_irq(blkif_irq, NULL); + unbind_evtchn_from_irq(blkif_evtchn); + } + + /* Move from CLOSED to DISCONNECTED state. */ + blk_ring = (blkif_ring_t *)__get_free_page(GFP_KERNEL); + blk_ring->req_prod = blk_ring->resp_prod = resp_cons = req_prod = 0; + blkif_state = BLKIF_STATE_DISCONNECTED; + + /* Construct an interface-CONNECT message for the domain controller. */ + cmsg.type = CMSG_BLKIF_FE; + cmsg.subtype = CMSG_BLKIF_FE_INTERFACE_CONNECT; + cmsg.length = sizeof(blkif_fe_interface_connect_t); + up.handle = 0; + up.shmem_frame = virt_to_machine(blk_ring) >> PAGE_SHIFT; + memcpy(cmsg.msg, &up, sizeof(up)); + + /* Tell the controller to bring up the interface. */ + ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE); + break; + + case BLKIF_INTERFACE_STATUS_CONNECTED: + if ( blkif_state == BLKIF_STATE_CLOSED ) + { + printk(KERN_WARNING "Unexpected blkif-CONNECTED message" + " in state %d\n", blkif_state); + break; + } + + blkif_evtchn = status->evtchn; + blkif_irq = bind_evtchn_to_irq(blkif_evtchn); + (void)request_irq(blkif_irq, blkif_int, 0, "blkif", NULL); + + if ( recovery ) + { + int i; + + /* Shouldn't need the io_request_lock here - the device is + * plugged and the recovery flag prevents the interrupt handler + * changing anything. */ + + /* Reissue requests from the private block ring. */ + for ( i = 0; + resp_cons_rec < blk_ring_rec->req_prod; + resp_cons_rec++, i++ ) + { + blk_ring->ring[i].req + = blk_ring_rec->ring[MASK_BLK_IDX(resp_cons_rec)].req; + } + + /* Reset the private block ring to match the new ring. */ + memcpy(blk_ring_rec, blk_ring, sizeof(*blk_ring)); + resp_cons_rec = 0; + + /* blk_ring->req_prod will be set when we flush_requests().*/ + blk_ring_rec->req_prod = req_prod = i; + + wmb(); + + /* Switch off recovery mode, using a memory barrier to ensure that + * it's seen before we flush requests - we don't want to miss any + * interrupts. */ + recovery = 0; + wmb(); + + /* Kicks things back into life. */ + flush_requests(); + } + else + { + /* Probe for discs that are attached to the interface. */ + xlvbd_init(); + } + + blkif_state = BLKIF_STATE_CONNECTED; + + /* Kick pending requests. */ + spin_lock_irq(&io_request_lock); + kick_pending_request_queues(); + spin_unlock_irq(&io_request_lock); + + break; + + default: + printk(KERN_WARNING "Status change to unknown value %d\n", + status->status); + break; + } +} + + +static void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id) +{ + switch ( msg->subtype ) + { + case CMSG_BLKIF_FE_INTERFACE_STATUS_CHANGED: + if ( msg->length != sizeof(blkif_fe_interface_status_changed_t) ) + goto parse_error; + blkif_status_change((blkif_fe_interface_status_changed_t *) + &msg->msg[0]); + break; +#if 0 + case CMSG_BLKIF_FE_VBD_STATUS_CHANGED: + update_tq.routine = update_vbds_task; + schedule_task(&update_tq); + break; +#endif + default: + goto parse_error; + } + + ctrl_if_send_response(msg); + return; + + parse_error: + msg->length = 0; + ctrl_if_send_response(msg); +} + + +int __init xlblk_init(void) +{ + ctrl_msg_t cmsg; + blkif_fe_driver_status_changed_t st; + + if ( (start_info.flags & SIF_INITDOMAIN) + || (start_info.flags & SIF_BLK_BE_DOMAIN) ) + return 0; + + printk(KERN_INFO "Initialising Xen virtual block device\n"); + + blk_ring_rec = (blkif_ring_t *)__get_free_page(GFP_KERNEL); + memset(blk_ring_rec, 0, sizeof(*blk_ring_rec)); + + (void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx, + CALLBACK_IN_BLOCKING_CONTEXT); + + /* Send a driver-UP notification to the domain controller. */ + cmsg.type = CMSG_BLKIF_FE; + cmsg.subtype = CMSG_BLKIF_FE_DRIVER_STATUS_CHANGED; + cmsg.length = sizeof(blkif_fe_driver_status_changed_t); + st.status = BLKIF_DRIVER_STATUS_UP; + memcpy(cmsg.msg, &st, sizeof(st)); + ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE); + + /* + * We should read 'nr_interfaces' from response message and wait + * for notifications before proceeding. For now we assume that we + * will be notified of exactly one interface. + */ + while ( blkif_state != BLKIF_STATE_CONNECTED ) + { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + } + + return 0; +} + + +static void __exit xlblk_cleanup(void) +{ + /* XXX FIXME */ + BUG(); +} + + +#ifdef MODULE +module_init(xlblk_init); +module_exit(xlblk_cleanup); +#endif + + +void blkdev_suspend(void) +{ + /* XXX FIXME */ + BUG(); +} + + +void blkdev_resume(void) +{ + /* XXX FIXME */ + BUG(); +} diff --git a/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/vbd.c b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/vbd.c new file mode 100644 index 0000000000..12ce976cb5 --- /dev/null +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/vbd.c @@ -0,0 +1,561 @@ +/****************************************************************************** + * arch/xen/drivers/blkif/frontend/vbd.c + * + * Xenolinux virtual block-device driver. + * + * Copyright (c) 2003-2004, Keir Fraser & Steve Hand + * Modifications by Mark A. Williamson are (c) Intel Research Cambridge + */ + +#include "common.h" +#include <linux/blk.h> + +/* + * For convenience we distinguish between ide, scsi and 'other' (i.e. + * potentially combinations of the two) in the naming scheme and in a few + * other places (like default readahead, etc). + */ +#define XLIDE_MAJOR_NAME "hd" +#define XLSCSI_MAJOR_NAME "sd" +#define XLVBD_MAJOR_NAME "xvd" + +#define XLIDE_DEVS_PER_MAJOR 2 +#define XLSCSI_DEVS_PER_MAJOR 16 +#define XLVBD_DEVS_PER_MAJOR 16 + +#define XLIDE_PARTN_SHIFT 6 /* amount to shift minor to get 'real' minor */ +#define XLIDE_MAX_PART (1 << XLIDE_PARTN_SHIFT) /* minors per ide vbd */ + +#define XLSCSI_PARTN_SHIFT 4 /* amount to shift minor to get 'real' minor */ +#define XLSCSI_MAX_PART (1 << XLSCSI_PARTN_SHIFT) /* minors per scsi vbd */ + +#define XLVBD_PARTN_SHIFT 4 /* amount to shift minor to get 'real' minor */ +#define XLVBD_MAX_PART (1 << XLVBD_PARTN_SHIFT) /* minors per 'other' vbd */ + +/* The below are for the generic drivers/block/ll_rw_block.c code. */ +static int xlide_blksize_size[256]; +static int xlide_hardsect_size[256]; +static int xlide_max_sectors[256]; +static int xlscsi_blksize_size[256]; +static int xlscsi_hardsect_size[256]; +static int xlscsi_max_sectors[256]; +static int xlvbd_blksize_size[256]; +static int xlvbd_hardsect_size[256]; +static int xlvbd_max_sectors[256]; + +/* Information about our VBDs. */ +#define MAX_VBDS 64 +static int nr_vbds; +static vdisk_t *vbd_info; + +static struct block_device_operations xlvbd_block_fops = +{ + open: blkif_open, + release: blkif_release, + ioctl: blkif_ioctl, + check_media_change: blkif_check, + revalidate: blkif_revalidate, +}; + +static int xlvbd_get_vbd_info(vdisk_t *disk_info) +{ + vdisk_t *buf = (vdisk_t *)__get_free_page(GFP_KERNEL); + blkif_request_t req; + blkif_response_t rsp; + int nr; + + memset(&req, 0, sizeof(req)); + req.operation = BLKIF_OP_PROBE; + req.nr_segments = 1; + req.frame_and_sects[0] = virt_to_machine(buf) | 7; + + blkif_control_send(&req, &rsp); + + if ( rsp.status <= 0 ) + { + printk(KERN_ALERT "Could not probe disks (%d)\n", rsp.status); + return -1; + } + + if ( (nr = rsp.status) > MAX_VBDS ) + nr = MAX_VBDS; + memcpy(disk_info, buf, nr * sizeof(vdisk_t)); + + return nr; +} + +/* + * xlvbd_init_device - initialise a VBD device + * @disk: a vdisk_t describing the VBD + * + * Takes a vdisk_t * that describes a VBD the domain has access to. + * Performs appropriate initialisation and registration of the device. + * + * Care needs to be taken when making re-entrant calls to ensure that + * corruption does not occur. Also, devices that are in use should not have + * their details updated. This is the caller's responsibility. + */ +static int xlvbd_init_device(vdisk_t *xd) +{ + int device = xd->device; + int major = MAJOR(device); + int minor = MINOR(device); + int is_ide = IDE_DISK_MAJOR(major); /* is this an ide device? */ + int is_scsi= SCSI_BLK_MAJOR(major); /* is this a scsi device? */ + char *major_name; + struct gendisk *gd; + struct block_device *bd; + xl_disk_t *disk; + int i, rc = 0, max_part, partno; + unsigned long capacity; + + unsigned char buf[64]; + + if ( (bd = bdget(device)) == NULL ) + return -1; + + /* + * Update of partition info, and check of usage count, is protected + * by the per-block-device semaphore. + */ + down(&bd->bd_sem); + + if ( ((disk = xldev_to_xldisk(device)) != NULL) && (disk->usage != 0) ) + { + printk(KERN_ALERT "VBD update failed - in use [dev=%x]\n", device); + rc = -1; + goto out; + } + + if ( is_ide ) { + + major_name = XLIDE_MAJOR_NAME; + max_part = XLIDE_MAX_PART; + + } else if ( is_scsi ) { + + major_name = XLSCSI_MAJOR_NAME; + max_part = XLSCSI_MAX_PART; + + } else if (XD_VIRTUAL(xd->info)) { + + major_name = XLVBD_MAJOR_NAME; + max_part = XLVBD_MAX_PART; + + } else { + + /* SMH: hmm - probably a CCISS driver or sim; assume CCISS for now */ + printk(KERN_ALERT "Assuming device %02x:%02x is CCISS/SCSI\n", + major, minor); + is_scsi = 1; + major_name = "cciss"; + max_part = XLSCSI_MAX_PART; + + } + + partno = minor & (max_part - 1); + + if ( (gd = get_gendisk(device)) == NULL ) + { + rc = register_blkdev(major, major_name, &xlvbd_block_fops); + if ( rc < 0 ) + { + printk(KERN_ALERT "XL VBD: can't get major %d\n", major); + goto out; + } + + if ( is_ide ) + { + blksize_size[major] = xlide_blksize_size; + hardsect_size[major] = xlide_hardsect_size; + max_sectors[major] = xlide_max_sectors; + read_ahead[major] = 8; /* from drivers/ide/ide-probe.c */ + } + else if ( is_scsi ) + { + blksize_size[major] = xlscsi_blksize_size; + hardsect_size[major] = xlscsi_hardsect_size; + max_sectors[major] = xlscsi_max_sectors; + read_ahead[major] = 0; /* XXX 8; -- guessing */ + } + else + { + blksize_size[major] = xlvbd_blksize_size; + hardsect_size[major] = xlvbd_hardsect_size; + max_sectors[major] = xlvbd_max_sectors; + read_ahead[major] = 8; + } + + blk_init_queue(BLK_DEFAULT_QUEUE(major), do_blkif_request); + + /* + * Turn off barking 'headactive' mode. We dequeue buffer heads as + * soon as we pass them to the back-end driver. + */ + blk_queue_headactive(BLK_DEFAULT_QUEUE(major), 0); + + /* Construct an appropriate gendisk structure. */ + gd = kmalloc(sizeof(struct gendisk), GFP_KERNEL); + gd->major = major; + gd->major_name = major_name; + + gd->max_p = max_part; + if ( is_ide ) + { + gd->minor_shift = XLIDE_PARTN_SHIFT; + gd->nr_real = XLIDE_DEVS_PER_MAJOR; + } + else if ( is_scsi ) + { + gd->minor_shift = XLSCSI_PARTN_SHIFT; + gd->nr_real = XLSCSI_DEVS_PER_MAJOR; + } + else + { + gd->minor_shift = XLVBD_PARTN_SHIFT; + gd->nr_real = XLVBD_DEVS_PER_MAJOR; + } + + /* + ** The sizes[] and part[] arrays hold the sizes and other + ** information about every partition with this 'major' (i.e. + ** every disk sharing the 8 bit prefix * max partns per disk) + */ + gd->sizes = kmalloc(max_part*gd->nr_real*sizeof(int), GFP_KERNEL); + gd->part = kmalloc(max_part*gd->nr_real*sizeof(struct hd_struct), + GFP_KERNEL); + memset(gd->sizes, 0, max_part * gd->nr_real * sizeof(int)); + memset(gd->part, 0, max_part * gd->nr_real + * sizeof(struct hd_struct)); + + + gd->real_devices = kmalloc(gd->nr_real * sizeof(xl_disk_t), + GFP_KERNEL); + memset(gd->real_devices, 0, gd->nr_real * sizeof(xl_disk_t)); + + gd->next = NULL; + gd->fops = &xlvbd_block_fops; + + gd->de_arr = kmalloc(gd->nr_real * sizeof(*gd->de_arr), + GFP_KERNEL); + gd->flags = kmalloc(gd->nr_real * sizeof(*gd->flags), GFP_KERNEL); + + memset(gd->de_arr, 0, gd->nr_real * sizeof(*gd->de_arr)); + memset(gd->flags, 0, gd->nr_real * sizeof(*gd->flags)); + + add_gendisk(gd); + + blk_size[major] = gd->sizes; + } + + if ( XD_READONLY(xd->info) ) + set_device_ro(device, 1); + + gd->flags[minor >> gd->minor_shift] |= GENHD_FL_XEN; + + /* NB. Linux 2.4 only handles 32-bit sector offsets and capacities. */ + capacity = (unsigned long)xd->capacity; + + if ( partno != 0 ) + { + /* + * If this was previously set up as a real disc we will have set + * up partition-table information. Virtual partitions override + * 'real' partitions, and the two cannot coexist on a device. + */ + if ( !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) && + (gd->sizes[minor & ~(max_part-1)] != 0) ) + { + /* + * Any non-zero sub-partition entries must be cleaned out before + * installing 'virtual' partition entries. The two types cannot + * coexist, and virtual partitions are favoured. + */ + kdev_t dev = device & ~(max_part-1); + for ( i = max_part - 1; i > 0; i-- ) + { + invalidate_device(dev+i, 1); + gd->part[MINOR(dev+i)].start_sect = 0; + gd->part[MINOR(dev+i)].nr_sects = 0; + gd->sizes[MINOR(dev+i)] = 0; + } + printk(KERN_ALERT + "Virtual partitions found for /dev/%s - ignoring any " + "real partition information we may have found.\n", + disk_name(gd, MINOR(device), buf)); + } + + /* Need to skankily setup 'partition' information */ + gd->part[minor].start_sect = 0; + gd->part[minor].nr_sects = capacity; + gd->sizes[minor] = capacity; + + gd->flags[minor >> gd->minor_shift] |= GENHD_FL_VIRT_PARTNS; + } + else + { + gd->part[minor].nr_sects = capacity; + gd->sizes[minor] = capacity>>(BLOCK_SIZE_BITS-9); + + /* Some final fix-ups depending on the device type */ + switch ( XD_TYPE(xd->info) ) + { + case XD_TYPE_CDROM: + case XD_TYPE_FLOPPY: + case XD_TYPE_TAPE: + gd->flags[minor >> gd->minor_shift] |= GENHD_FL_REMOVABLE; + printk(KERN_ALERT + "Skipping partition check on %s /dev/%s\n", + XD_TYPE(xd->info)==XD_TYPE_CDROM ? "cdrom" : + (XD_TYPE(xd->info)==XD_TYPE_TAPE ? "tape" : + "floppy"), disk_name(gd, MINOR(device), buf)); + break; + + case XD_TYPE_DISK: + /* Only check partitions on real discs (not virtual!). */ + if ( gd->flags[minor>>gd->minor_shift] & GENHD_FL_VIRT_PARTNS ) + { + printk(KERN_ALERT + "Skipping partition check on virtual /dev/%s\n", + disk_name(gd, MINOR(device), buf)); + break; + } + register_disk(gd, device, gd->max_p, &xlvbd_block_fops, capacity); + break; + + default: + printk(KERN_ALERT "XenoLinux: unknown device type %d\n", + XD_TYPE(xd->info)); + break; + } + } + + out: + up(&bd->bd_sem); + bdput(bd); + return rc; +} + + +/* + * xlvbd_remove_device - remove a device node if possible + * @device: numeric device ID + * + * Updates the gendisk structure and invalidates devices. + * + * This is OK for now but in future, should perhaps consider where this should + * deallocate gendisks / unregister devices. + */ +static int xlvbd_remove_device(int device) +{ + int i, rc = 0, minor = MINOR(device); + struct gendisk *gd; + struct block_device *bd; + xl_disk_t *disk = NULL; + + if ( (bd = bdget(device)) == NULL ) + return -1; + + /* + * Update of partition info, and check of usage count, is protected + * by the per-block-device semaphore. + */ + down(&bd->bd_sem); + + if ( ((gd = get_gendisk(device)) == NULL) || + ((disk = xldev_to_xldisk(device)) == NULL) ) + BUG(); + + if ( disk->usage != 0 ) + { + printk(KERN_ALERT "VBD removal failed - in use [dev=%x]\n", device); + rc = -1; + goto out; + } + + if ( (minor & (gd->max_p-1)) != 0 ) + { + /* 1: The VBD is mapped to a partition rather than a whole unit. */ + invalidate_device(device, 1); + gd->part[minor].start_sect = 0; + gd->part[minor].nr_sects = 0; + gd->sizes[minor] = 0; + + /* Clear the consists-of-virtual-partitions flag if possible. */ + gd->flags[minor >> gd->minor_shift] &= ~GENHD_FL_VIRT_PARTNS; + for ( i = 1; i < gd->max_p; i++ ) + if ( gd->sizes[(minor & ~(gd->max_p-1)) + i] != 0 ) + gd->flags[minor >> gd->minor_shift] |= GENHD_FL_VIRT_PARTNS; + + /* + * If all virtual partitions are now gone, and a 'whole unit' VBD is + * present, then we can try to grok the unit's real partition table. + */ + if ( !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) && + (gd->sizes[minor & ~(gd->max_p-1)] != 0) && + !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_REMOVABLE) ) + { + register_disk(gd, + device&~(gd->max_p-1), + gd->max_p, + &xlvbd_block_fops, + gd->part[minor&~(gd->max_p-1)].nr_sects); + } + } + else + { + /* + * 2: The VBD is mapped to an entire 'unit'. Clear all partitions. + * NB. The partition entries are only cleared if there are no VBDs + * mapped to individual partitions on this unit. + */ + i = gd->max_p - 1; /* Default: clear subpartitions as well. */ + if ( gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS ) + i = 0; /* 'Virtual' mode: only clear the 'whole unit' entry. */ + while ( i >= 0 ) + { + invalidate_device(device+i, 1); + gd->part[minor+i].start_sect = 0; + gd->part[minor+i].nr_sects = 0; + gd->sizes[minor+i] = 0; + i--; + } + } + + out: + up(&bd->bd_sem); + bdput(bd); + return rc; +} + +/* + * xlvbd_update_vbds - reprobes the VBD status and performs updates driver + * state. The VBDs need to be updated in this way when the domain is + * initialised and also each time we receive an XLBLK_UPDATE event. + */ +void xlvbd_update_vbds(void) +{ + int i, j, k, old_nr, new_nr; + vdisk_t *old_info, *new_info, *merged_info; + + old_info = vbd_info; + old_nr = nr_vbds; + + new_info = kmalloc(MAX_VBDS * sizeof(vdisk_t), GFP_KERNEL); + if ( unlikely(new_nr = xlvbd_get_vbd_info(new_info)) < 0 ) + { + kfree(new_info); + return; + } + + /* + * Final list maximum size is old list + new list. This occurs only when + * old list and new list do not overlap at all, and we cannot yet destroy + * VBDs in the old list because the usage counts are busy. + */ + merged_info = kmalloc((old_nr + new_nr) * sizeof(vdisk_t), GFP_KERNEL); + + /* @i tracks old list; @j tracks new list; @k tracks merged list. */ + i = j = k = 0; + + while ( (i < old_nr) && (j < new_nr) ) + { + if ( old_info[i].device < new_info[j].device ) + { + if ( xlvbd_remove_device(old_info[i].device) != 0 ) + memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t)); + i++; + } + else if ( old_info[i].device > new_info[j].device ) + { + if ( xlvbd_init_device(&new_info[j]) == 0 ) + memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t)); + j++; + } + else + { + if ( ((old_info[i].capacity == new_info[j].capacity) && + (old_info[i].info == new_info[j].info)) || + (xlvbd_remove_device(old_info[i].device) != 0) ) + memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t)); + else if ( xlvbd_init_device(&new_info[j]) == 0 ) + memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t)); + i++; j++; + } + } + + for ( ; i < old_nr; i++ ) + { + if ( xlvbd_remove_device(old_info[i].device) != 0 ) + memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t)); + } + + for ( ; j < new_nr; j++ ) + { + if ( xlvbd_init_device(&new_info[j]) == 0 ) + memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t)); + } + + vbd_info = merged_info; + nr_vbds = k; + + kfree(old_info); + kfree(new_info); +} + + +/* + * Set up all the linux device goop for the virtual block devices (vbd's) that + * we know about. Note that although from the backend driver's p.o.v. VBDs are + * addressed simply an opaque 16-bit device number, the domain creation tools + * conventionally allocate these numbers to correspond to those used by 'real' + * linux -- this is just for convenience as it means e.g. that the same + * /etc/fstab can be used when booting with or without Xen. + */ +int xlvbd_init(void) +{ + int i; + + /* + * If compiled as a module, we don't support unloading yet. We therefore + * permanently increment the reference count to disallow it. + */ + SET_MODULE_OWNER(&xlvbd_block_fops); + MOD_INC_USE_COUNT; + + /* Initialize the global arrays. */ + for ( i = 0; i < 256; i++ ) + { + /* from the generic ide code (drivers/ide/ide-probe.c, etc) */ + xlide_blksize_size[i] = 1024; + xlide_hardsect_size[i] = 512; + xlide_max_sectors[i] = 128; /* 'hwif->rqsize' if we knew it */ + + /* from the generic scsi disk code (drivers/scsi/sd.c) */ + xlscsi_blksize_size[i] = 1024; /* XXX 512; */ + xlscsi_hardsect_size[i] = 512; + xlscsi_max_sectors[i] = 128*8; /* XXX 128; */ + + /* we don't really know what to set these too since it depends */ + xlvbd_blksize_size[i] = 512; + xlvbd_hardsect_size[i] = 512; + xlvbd_max_sectors[i] = 128; + } + + vbd_info = kmalloc(MAX_VBDS * sizeof(vdisk_t), GFP_KERNEL); + nr_vbds = xlvbd_get_vbd_info(vbd_info); + + if ( nr_vbds < 0 ) + { + kfree(vbd_info); + vbd_info = NULL; + nr_vbds = 0; + } + else + { + for ( i = 0; i < nr_vbds; i++ ) + xlvbd_init_device(&vbd_info[i]); + } + + return 0; +} |