aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2003-10-13 22:09:23 +0000
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2003-10-13 22:09:23 +0000
commit8874486120eb5e00d74752b1948a2b46f041ae5a (patch)
tree6bf5f335438df2436f60145b9efbc780fbd89ff2
parent88a3ea096b4573d34094796617ead5c5766d55e7 (diff)
downloadxen-8874486120eb5e00d74752b1948a2b46f041ae5a.tar.gz
xen-8874486120eb5e00d74752b1948a2b46f041ae5a.tar.bz2
xen-8874486120eb5e00d74752b1948a2b46f041ae5a.zip
bitkeeper revision 1.510 (3f8b2293sVxqxMv1f_A1X3GD3f08Fg)
xen_physdisk.c, domain.c, dom0_ops.c: Various bug fixes and cleanups. Killing domains now works again.
-rw-r--r--xen/common/dom0_ops.c9
-rw-r--r--xen/common/domain.c28
-rw-r--r--xen/drivers/block/xen_physdisk.c90
3 files changed, 68 insertions, 59 deletions
diff --git a/xen/common/dom0_ops.c b/xen/common/dom0_ops.c
index 95ba419eb1..c3e098dd3c 100644
--- a/xen/common/dom0_ops.c
+++ b/xen/common/dom0_ops.c
@@ -148,9 +148,10 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
if ( p == NULL )
goto exit_create;
- if (op.u.newdomain.name[0]) {
- strncpy (p -> name, op.u.newdomain.name, MAX_DOMAIN_NAME);
- p -> name[MAX_DOMAIN_NAME - 1] = 0;
+ if ( op.u.newdomain.name[0] )
+ {
+ strncpy (p->name, op.u.newdomain.name, MAX_DOMAIN_NAME);
+ p->name[MAX_DOMAIN_NAME - 1] = 0;
}
ret = alloc_new_dom_mem(p, op.u.newdomain.memory_kb);
@@ -166,7 +167,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
op.u.newdomain.domain = ret;
copy_to_user(u_dom0_op, &op, sizeof(op));
-
+
exit_create:
spin_unlock_irq(&create_dom_lock);
}
diff --git a/xen/common/domain.c b/xen/common/domain.c
index ab2ead683d..f16a25f30c 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -91,7 +91,7 @@ struct task_struct *do_newdomain(unsigned int dom_id, unsigned int cpu)
task_hash[TASK_HASH(dom_id)] = p;
write_unlock_irqrestore(&tasklist_lock, flags);
- return(p);
+ return p;
}
@@ -128,6 +128,8 @@ void kill_domain_with_errmsg(const char *err)
void __kill_domain(struct task_struct *p)
{
int i;
+ struct task_struct **pp;
+ unsigned long flags;
if ( p->domain == 0 )
{
@@ -148,6 +150,17 @@ void __kill_domain(struct task_struct *p)
for ( i = 0; i < MAX_DOMAIN_VIFS; i++ )
unlink_net_vif(p->net_vif_list[i]);
+ /*
+ * Note this means that find_domain_by_id may fail, even when the caller
+ * holds a reference to the domain being queried. Take care!
+ */
+ write_lock_irqsave(&tasklist_lock, flags);
+ REMOVE_LINKS(p);
+ pp = &task_hash[TASK_HASH(p->domain)];
+ while ( *pp != p ) *pp = (*pp)->next_hash;
+ *pp = p->next_hash;
+ write_unlock_irqrestore(&tasklist_lock, flags);
+
if ( p == current )
{
__enter_scheduler();
@@ -289,21 +302,11 @@ void free_all_dom_mem(struct task_struct *p)
/* Release resources belonging to task @p. */
void release_task(struct task_struct *p)
{
- struct task_struct **pp;
- unsigned long flags;
-
ASSERT(p->state == TASK_DYING);
ASSERT(!p->has_cpu);
printk("Releasing task %d\n", p->domain);
- write_lock_irqsave(&tasklist_lock, flags);
- REMOVE_LINKS(p);
- pp = &task_hash[TASK_HASH(p->domain)];
- while ( *pp != p ) *pp = (*pp)->next_hash;
- *pp = p->next_hash;
- write_unlock_irqrestore(&tasklist_lock, flags);
-
/*
* This frees up blkdev rings. Totally safe since blkdev ref counting
* actually uses the task_struct refcnt.
@@ -318,7 +321,8 @@ void release_task(struct task_struct *p)
UNSHARE_PFN(virt_to_page(p->shared_info));
free_page((unsigned long)p->shared_info);
free_all_dom_mem(p);
- free_pages((unsigned long)p, 1);
+
+ kmem_cache_free(task_struct_cachep, p);
}
diff --git a/xen/drivers/block/xen_physdisk.c b/xen/drivers/block/xen_physdisk.c
index e7e0bf73b4..3c7a9df3b4 100644
--- a/xen/drivers/block/xen_physdisk.c
+++ b/xen/drivers/block/xen_physdisk.c
@@ -23,7 +23,6 @@
and we fake the logical view on top of that. */
struct physdisk_ace {
struct list_head list;
-
unsigned short device;
unsigned short partition;
unsigned long start_sect;
@@ -40,30 +39,23 @@ static struct physdisk_ace *find_ace(const struct task_struct *p,
struct list_head *cur_ace_head;
struct physdisk_ace *cur_ace;
- list_for_each(cur_ace_head, &p->physdisk_aces) {
+ list_for_each(cur_ace_head, &p->physdisk_aces)
+ {
cur_ace = list_entry(cur_ace_head, struct physdisk_ace, list);
DPRINTK("Is [%lx, %lx) good for %lx?\n",
cur_ace->start_sect,
cur_ace->start_sect + cur_ace->n_sectors, sect);
- if (sect >= cur_ace->start_sect
- && sect < cur_ace->start_sect + cur_ace->n_sectors
- && dev == cur_ace->device
- && ((operation == READ && (cur_ace->mode & PHYSDISK_MODE_R))
- || (operation == WRITE
- && (cur_ace->mode & PHYSDISK_MODE_W)))) {
- DPRINTK("Yes.\n");
+ if ( (sect >= cur_ace->start_sect) &&
+ (sect < (cur_ace->start_sect + cur_ace->n_sectors)) &&
+ (dev == cur_ace->device) &&
+ (((operation == READ) && (cur_ace->mode & PHYSDISK_MODE_R)) ||
+ ((operation == WRITE) && (cur_ace->mode & PHYSDISK_MODE_W))) )
return cur_ace;
- } else {
- DPRINTK("No.\n");
- }
}
return NULL;
}
/* Hold the lock on entry, it remains held on exit. */
-/* XXX we call kmalloc and kfree with GFP_KERNEL and a spinlock held
- in here. That wouldn't be allowed under Linux, but, from reading
- the source, it seems to be okay under Xen... */
static void xen_physdisk_revoke_access(unsigned short dev,
unsigned long start_sect,
unsigned long n_sectors,
@@ -77,31 +69,41 @@ static void xen_physdisk_revoke_access(unsigned short dev,
unsigned long kill_zone_end, ace_end;
kill_zone_end = start_sect + n_sectors;
- list_for_each(cur_ace_head, &p->physdisk_aces) {
+ list_for_each(cur_ace_head, &p->physdisk_aces)
+ {
cur_ace = list_entry(cur_ace_head, struct physdisk_ace, list);
ace_end = cur_ace->start_sect + cur_ace->n_sectors;
- if (cur_ace->start_sect >= kill_zone_end ||
- ace_end <= start_sect || cur_ace->device != dev)
+ if ( (cur_ace->start_sect >= kill_zone_end) ||
+ (ace_end <= start_sect) ||
+ (cur_ace->device != dev) )
continue;
DPRINTK("Killing ace [%lx, %lx) against kill zone [%lx, %lx)\n",
cur_ace->start_sect, ace_end, start_sect, kill_zone_end);
- if (cur_ace->start_sect >= start_sect && ace_end <= kill_zone_end) {
+ if ( (cur_ace->start_sect >= start_sect) &&
+ (ace_end <= kill_zone_end) )
+ {
/* ace entirely within kill zone -> kill it */
list_del(cur_ace_head);
cur_ace_head = cur_ace_head->prev;
kfree(cur_ace);
- } else if (ace_end <= kill_zone_end) {
+ }
+ else if ( ace_end <= kill_zone_end )
+ {
/* ace start before kill start, ace end in kill zone,
move ace end. */
cur_ace->n_sectors = start_sect - cur_ace->start_sect;
- } else if (cur_ace->start_sect >= start_sect) {
+ }
+ else if ( cur_ace->start_sect >= start_sect )
+ {
/* ace start after kill start, ace end outside kill zone,
move ace start. */
cur_ace->start_sect = kill_zone_end;
cur_ace->n_sectors = ace_end - cur_ace->start_sect;
- } else {
+ }
+ else
+ {
/* The fun one: the ace entirely includes the kill zone. */
/* Cut the current ace down to just the bit before the kzone,
create a new ace for the bit just after it. */
@@ -132,7 +134,8 @@ static int xen_physdisk_grant_access(unsigned short dev,
and we try to grant write access, or vice versa. */
xen_physdisk_revoke_access(dev, start_sect, n_sectors, p);
- if (mode) {
+ if ( mode )
+ {
cur_ace = kmalloc(sizeof(*cur_ace), GFP_KERNEL);
cur_ace->device = dev;
cur_ace->start_sect = start_sect;
@@ -155,9 +158,11 @@ static void xen_physdisk_probe_access(physdisk_probebuf_t * buf,
int x = 0;
n_aces = 0;
- list_for_each(cur_ace_head, &p->physdisk_aces) {
+ list_for_each(cur_ace_head, &p->physdisk_aces)
+ {
x++;
- if (x >= buf->start_ind) {
+ if ( x >= buf->start_ind )
+ {
cur_ace = list_entry(cur_ace_head, struct physdisk_ace, list);
buf->entries[n_aces].device = cur_ace->device;
buf->entries[n_aces].partition = cur_ace->partition;
@@ -172,20 +177,18 @@ static void xen_physdisk_probe_access(physdisk_probebuf_t * buf,
int xen_physdisk_grant(xp_disk_t * xpd_in)
{
- struct task_struct *p;
+ struct task_struct *p = current;
xp_disk_t *xpd = map_domain_mem(virt_to_phys(xpd_in));
int res;
- p = current;
- DPRINTK("Have current.\n");
- DPRINTK("Target domain %x\n", xpd->domain);
-
p = find_domain_by_id(xpd->domain);
- if (p == NULL) {
+ if ( p == NULL )
+ {
DPRINTK("Bad domain!\n");
res = 1;
goto out;
}
+
spin_lock(&p->physdev_lock);
res = xen_physdisk_grant_access(xpd->device,
xpd->partition,
@@ -206,14 +209,16 @@ int xen_physdisk_probe(struct task_struct *requesting_domain,
physdisk_probebuf_t *buf = map_domain_mem(virt_to_phys(buf_in));
int res;
- if (requesting_domain->domain != 0 &&
- requesting_domain->domain != buf->domain) {
+ if ( (requesting_domain->domain != 0) &&
+ (requesting_domain->domain != buf->domain) )
+ {
res = 1;
goto out;
}
p = find_domain_by_id(buf->domain);
- if (p == NULL) {
+ if ( p == NULL )
+ {
res = 1;
goto out;
}
@@ -239,17 +244,16 @@ int xen_physdisk_access_okay(phys_seg_t * pseg, struct task_struct *p,
("Checking access for domain %d, start sect 0x%lx, length 0x%x.\n",
p->domain, pseg->sector_number, pseg->nr_sects);
- for (sect = pseg->sector_number;
- sect < pseg->sector_number + pseg->nr_sects;) {
+ for ( sect = pseg->sector_number;
+ sect < pseg->sector_number + pseg->nr_sects; )
+ {
/* XXX this would be a lot faster if the aces were sorted on start
address. Also in revoke_access. */
spin_lock(&p->physdev_lock);
cur_ace = find_ace(p, pseg->dev, sect, operation);
spin_unlock(&p->physdev_lock);
- if (!cur_ace) {
- /* Default closed. */
+ if ( cur_ace == NULL )
return 0;
- }
sect +=
MAX(cur_ace->n_sectors,
pseg->nr_sects + pseg->sector_number - sect);
@@ -262,10 +266,10 @@ void destroy_physdisk_aces(struct task_struct *p)
struct list_head *cur_ace_head, *next_head;
struct physdisk_ace *cur_ace;
- spin_lock(&p->physdev_lock); /* We never release this again. */
-
- for (cur_ace_head = p->physdisk_aces.next;
- cur_ace_head != &p->physdisk_aces; cur_ace_head = next_head) {
+ for ( cur_ace_head = p->physdisk_aces.next;
+ cur_ace_head != &p->physdisk_aces;
+ cur_ace_head = next_head )
+ {
cur_ace = list_entry(cur_ace_head, struct physdisk_ace, list);
next_head = cur_ace_head->next;
kfree(cur_ace);