#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef COMPAT typedef long ret_t; #endif int ioapic_guest_read( unsigned long physbase, unsigned int reg, u32 *pval); int ioapic_guest_write( unsigned long physbase, unsigned int reg, u32 pval); static int physdev_map_pirq(struct physdev_map_pirq *map) { struct domain *d; int vector, pirq, ret = 0; struct msi_info _msi; void *map_data = NULL; if ( !IS_PRIV(current->domain) ) return -EPERM; if ( !map ) return -EINVAL; if ( map->domid == DOMID_SELF ) d = rcu_lock_domain(current->domain); else d = rcu_lock_domain_by_id(map->domid); if ( d == NULL ) { ret = -ESRCH; goto free_domain; } /* Verify or get vector. */ switch ( map->type ) { case MAP_PIRQ_TYPE_GSI: if ( map->index < 0 || map->index >= NR_IRQS ) { dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n", d->domain_id, map->index); ret = -EINVAL; goto free_domain; } vector = IO_APIC_VECTOR(map->index); if ( !vector ) { dprintk(XENLOG_G_ERR, "dom%d: map irq with no vector %d\n", d->domain_id, vector); ret = -EINVAL; goto free_domain; } break; case MAP_PIRQ_TYPE_MSI: vector = map->index; if ( vector == -1 ) vector = assign_irq_vector(AUTO_ASSIGN); if ( vector < 0 || vector >= NR_VECTORS ) { dprintk(XENLOG_G_ERR, "dom%d: map irq with wrong vector %d\n", d->domain_id, vector); ret = -EINVAL; goto free_domain; } _msi.bus = map->bus; _msi.devfn = map->devfn; _msi.entry_nr = map->entry_nr; _msi.table_base = map->table_base; _msi.vector = vector; map_data = &_msi; break; default: dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n", d->domain_id, map->type); ret = -EINVAL; goto free_domain; } spin_lock(&pcidevs_lock); /* Verify or get pirq. */ spin_lock(&d->event_lock); if ( map->pirq < 0 ) { if ( d->arch.vector_pirq[vector] ) { dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n", d->domain_id, map->index, map->pirq, d->arch.vector_pirq[vector]); pirq = d->arch.vector_pirq[vector]; if ( pirq < 0 ) { ret = -EBUSY; goto done; } } else { pirq = get_free_pirq(d, map->type, map->index); if ( pirq < 0 ) { dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id); ret = pirq; goto done; } } } else { if ( d->arch.vector_pirq[vector] && d->arch.vector_pirq[vector] != map->pirq ) { dprintk(XENLOG_G_ERR, "dom%d: vector %d conflicts with irq %d\n", d->domain_id, map->index, map->pirq); ret = -EEXIST; goto done; } else pirq = map->pirq; } ret = map_domain_pirq(d, pirq, vector, map->type, map_data); if ( ret == 0 ) map->pirq = pirq; done: spin_unlock(&d->event_lock); spin_unlock(&pcidevs_lock); if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) ) free_irq_vector(vector); free_domain: rcu_unlock_domain(d); return ret; } static int physdev_unmap_pirq(struct physdev_unmap_pirq *unmap) { struct domain *d; int ret; if ( !IS_PRIV(current->domain) ) return -EPERM; if ( unmap->domid == DOMID_SELF ) d = rcu_lock_domain(current->domain); else d = rcu_lock_domain_by_id(unmap->domid); if ( d == NULL ) return -ESRCH; spin_lock(&pcidevs_lock); spin_lock(&d->event_lock); ret = unmap_domain_pirq(d, unmap->pirq); spin_unlock(&d->event_lock); spin_unlock(&pcidevs_lock); rcu_unlock_domain(d); return ret; } ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg) { int irq; ret_t ret; struct vcpu *v = current; switch ( cmd ) { case PHYSDEVOP_eoi: { struct physdev_eoi eoi; ret = -EFAULT; if ( copy_from_guest(&eoi, arg, 1) != 0 ) break; ret = -EINVAL; if ( eoi.irq < 0 || eoi.irq >= NR_IRQS ) break; if ( v->domain->arch.pirq_eoi_map ) evtchn_unmask(v->domain->pirq_to_evtchn[eoi.irq]); ret = pirq_guest_eoi(v->domain, eoi.irq); break; } case PHYSDEVOP_pirq_eoi_gmfn: { struct physdev_pirq_eoi_gmfn info; unsigned long mfn; BUILD_BUG_ON(NR_IRQS > (PAGE_SIZE * 8)); ret = -EFAULT; if ( copy_from_guest(&info, arg, 1) != 0 ) break; ret = -EINVAL; mfn = gmfn_to_mfn(current->domain, info.gmfn); if ( !mfn_valid(mfn) || !get_page_and_type(mfn_to_page(mfn), v->domain, PGT_writable_page) ) break; if ( cmpxchg(&v->domain->arch.pirq_eoi_map_mfn, 0, mfn) != 0 ) { put_page_and_type(mfn_to_page(mfn)); ret = -EBUSY; break; } v->domain->arch.pirq_eoi_map = map_domain_page_global(mfn); if ( v->domain->arch.pirq_eoi_map == NULL ) { v->domain->arch.pirq_eoi_map_mfn = 0; put_page_and_type(mfn_to_page(mfn)); ret = -ENOSPC; break; } ret = 0; break; } /* Legacy since 0x00030202. */ case PHYSDEVOP_IRQ_UNMASK_NOTIFY: { ret = pirq_guest_unmask(v->domain); break; } case PHYSDEVOP_irq_status_query: { struct physdev_irq_status_query irq_status_query; ret = -EFAULT; if ( copy_from_guest(&irq_status_query, arg, 1) != 0 ) break; irq = irq_status_query.irq; ret = -EINVAL; if ( (irq < 0) || (irq >= NR_IRQS) ) break; irq_status_query.flags = 0; if ( pirq_acktype(v->domain, irq) != 0 ) irq_status_query.flags |= XENIRQSTAT_needs_eoi; if ( pirq_shared(v->domain, irq) ) irq_status_query.flags |= XENIRQSTAT_shared; ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0; break; } case PHYSDEVOP_map_pirq: { struct physdev_map_pirq map; ret = -EFAULT; if ( copy_from_guest(&map, arg, 1) != 0 ) break; ret = physdev_map_pirq(&map); if ( copy_to_guest(arg, &map, 1) != 0 ) ret = -EFAULT; break; } case PHYSDEVOP_unmap_pirq: { struct physdev_unmap_pirq unmap; ret = -EFAULT; if ( copy_from_guest(&unmap, arg, 1) != 0 ) break; ret = physdev_unmap_pirq(&unmap); break; } case PHYSDEVOP_apic_read: { struct physdev_apic apic; ret = -EFAULT; if ( copy_from_guest(&apic, arg, 1) != 0 ) break; ret = -EPERM; if ( !IS_PRIV(v->domain) ) break; ret = xsm_apic(v->domain, cmd); if ( ret ) break; ret = ioapic_guest_read(apic.apic_physbase, apic.reg, &apic.value); if ( copy_to_guest(arg, &apic, 1) != 0 ) ret = -EFAULT; break; } case PHYSDEVOP_apic_write: { struct physdev_apic apic; ret = -EFAULT; if ( copy_from_guest(&apic, arg, 1) != 0 ) break; ret = -EPERM; if ( !IS_PRIV(v->domain) ) break; ret = xsm_apic(v->domain, cmd); if ( ret ) break; ret = ioapic_guest_write(apic.apic_physbase, apic.reg, apic.value); break; } case PHYSDEVOP_alloc_irq_vector: { struct physdev_irq irq_op; ret = -EFAULT; if ( copy_from_guest(&irq_op, arg, 1) != 0 ) break; ret = -EPERM; if ( !IS_PRIV(v->domain) ) break; ret = xsm_assign_vector(v->domain, irq_op.irq); if ( ret ) break; irq = irq_op.irq; ret = -EINVAL; if ( (irq < 0) || (irq >= NR_IRQS) ) break; irq_op.vector = assign_irq_vector(irq); spin_lock(&pcidevs_lock); spin_lock(&dom0->event_lock); ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector, MAP_PIRQ_TYPE_GSI, NULL); spin_unlock(&dom0->event_lock); spin_unlock(&pcidevs_lock); if ( copy_to_guest(arg, &irq_op, 1) != 0 ) ret = -EFAULT; break; } case PHYSDEVOP_set_iopl: { struct physdev_set_iopl set_iopl; ret = -EFAULT; if ( copy_from_guest(&set_iopl, arg, 1) != 0 ) break; ret = -EINVAL; if ( set_iopl.iopl > 3 ) break; ret = 0; v->arch.iopl = set_iopl.iopl; break; } case PHYSDEVOP_set_iobitmap: { struct physdev_set_iobitmap set_iobitmap; ret = -EFAULT; if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 ) break; ret = -EINVAL; if ( !guest_handle_okay(set_iobitmap.bitmap, IOBMP_BYTES) || (set_iobitmap.nr_ports > 65536) ) break; ret = 0; #ifndef COMPAT v->arch.iobmp = set_iobitmap.bitmap; #else guest_from_compat_handle(v->arch.iobmp, set_iobitmap.bitmap); #endif v->arch.iobmp_limit = set_iobitmap.nr_ports; break; } case PHYSDEVOP_manage_pci_add: { struct physdev_manage_pci manage_pci; ret = -EPERM; if ( !IS_PRIV(v->domain) ) break; ret = -EFAULT; if ( copy_from_guest(&manage_pci, arg, 1) != 0 ) break; ret = pci_add_device(manage_pci.bus, manage_pci.devfn); break; } case PHYSDEVOP_manage_pci_remove: { struct physdev_manage_pci manage_pci; ret = -EPERM; if ( !IS_PRIV(v->domain) ) break; ret = -EFAULT; if ( copy_from_guest(&manage_pci, arg, 1) != 0 ) break; ret = pci_remove_device(manage_pci.bus, manage_pci.devfn); break; } case PHYSDEVOP_restore_msi: { struct physdev_restore_msi restore_msi; struct pci_dev *pdev; ret = -EPERM; if ( !IS_PRIV(v->domain) ) break; ret = -EFAULT; if ( copy_from_guest(&restore_msi, arg, 1) != 0 ) break; spin_lock(&pcidevs_lock); pdev = pci_get_pdev(restore_msi.bus, restore_msi.devfn); ret = pdev ? pci_restore_msi_state(pdev) : -ENODEV; spin_unlock(&pcidevs_lock); break; } default: ret = -ENOSYS; break; } return ret; } /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ #n241'>241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
/*
 * lib/object.c		Generic Cacheable Object
 *
 *	This library is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU Lesser General Public
 *	License as published by the Free Software Foundation version 2.1
 *	of the License.
 *
 * Copyright (c) 2003-2008 Thomas Graf <tgraf@suug.ch>
 */

/**
 * @ingroup cache
 * @defgroup object Object
 * @{
 */

#include <netlink-local.h>
#include <netlink/netlink.h>
#include <netlink/cache.h>
#include <netlink/object.h>
#include <netlink/utils.h>

static inline struct nl_object_ops *obj_ops(struct nl_object *obj)
{
	if (!obj->ce_ops)
		BUG();

	return obj->ce_ops;
}

/**
 * @name Object Creation/Deletion
 * @{
 */

/**
 * Allocate a new object of kind specified by the operations handle
 * @arg ops		cache operations handle
 * @return The new object or NULL
 */
struct nl_object *nl_object_alloc(struct nl_object_ops *ops)
{
	struct nl_object *new;

	if (ops->oo_size < sizeof(*new))
		BUG();

	new = calloc(1, ops->oo_size);
	if (!new)
		return NULL;

	new->ce_refcnt = 1;
	nl_init_list_head(&new->ce_list);

	new->ce_ops = ops;
	if (ops->oo_constructor)
		ops->oo_constructor(new);

	NL_DBG(4, "Allocated new object %p\n", new);

	return new;
}

#ifdef disabled
/**
 * Allocate a new object of kind specified by the name
 * @arg kind		name of object type
 * @return The new object or nULL
 */
int nl_object_alloc_name(const char *kind, struct nl_object **result)
{
	struct nl_cache_ops *ops;

	ops = nl_cache_ops_lookup(kind);
	if (!ops)
		return -NLE_OPNOTSUPP;

	if (!(*result = nl_object_alloc(ops->co_obj_ops)))
		return -NLE_NOMEM;

	return 0;
}
#endif

struct nl_derived_object {
	NLHDR_COMMON
	char data;
};

/**
 * Allocate a new object and copy all data from an existing object
 * @arg obj		object to inherite data from
 * @return The new object or NULL.
 */
struct nl_object *nl_object_clone(struct nl_object *obj)
{
	struct nl_object *new;
	struct nl_object_ops *ops = obj_ops(obj);
	int doff = offsetof(struct nl_derived_object, data);
	int size;

	new = nl_object_alloc(ops);
	if (!new)
		return NULL;

	size = ops->oo_size - doff;
	if (size < 0)
		BUG();

	new->ce_ops = obj->ce_ops;
	new->ce_msgtype = obj->ce_msgtype;

	if (size)
		memcpy((void *)new + doff, (void *)obj + doff, size);

	if (ops->oo_clone) {
		if (ops->oo_clone(new, obj) < 0) {
			nl_object_free(new);
			return NULL;
		}
	} else if (size && ops->oo_free_data)
		BUG();

	return new;
}

/**
 * Free a cacheable object
 * @arg obj		object to free
 *
 * @return 0 or a negative error code.
 */
void nl_object_free(struct nl_object *obj)
{
	struct nl_object_ops *ops = obj_ops(obj);

	if (obj->ce_refcnt > 0)
		NL_DBG(1, "Warning: Freeing object in use...\n");

	if (obj->ce_cache)
		nl_cache_remove(obj);

	if (ops->oo_free_data)
		ops->oo_free_data(obj);

	free(obj);

	NL_DBG(4, "Freed object %p\n", obj);
}

/** @} */

/**
 * @name Reference Management
 * @{
 */

/**
 * Acquire a reference on a object
 * @arg obj		object to acquire reference from
 */
void nl_object_get(struct nl_object *obj)
{
	obj->ce_refcnt++;
	NL_DBG(4, "New reference to object %p, total %d\n",
	       obj, obj->ce_refcnt);
}

/**
 * Release a reference from an object
 * @arg obj		object to release reference from
 */
void nl_object_put(struct nl_object *obj)
{
	if (!obj)
		return;

	obj->ce_refcnt--;
	NL_DBG(4, "Returned object reference %p, %d remaining\n",
	       obj, obj->ce_refcnt);

	if (obj->ce_refcnt < 0)
		BUG();

	if (obj->ce_refcnt <= 0)
		nl_object_free(obj);
}

/** @} */

/**
 * @name Utillities
 * @{
 */

#ifdef disabled
/**
 * Dump this object according to the specified parameters
 * @arg obj		object to dump
 * @arg params		dumping parameters
 */
void nl_object_dump(struct nl_object *obj, struct nl_dump_params *params)
{
	dump_from_ops(obj, params);
}

/**
 * Check if the identifiers of two objects are identical 
 * @arg a		an object
 * @arg b		another object of same type
 *
 * @return true if both objects have equal identifiers, otherwise false.
 */
int nl_object_identical(struct nl_object *a, struct nl_object *b)
{
	struct nl_object_ops *ops = obj_ops(a);
	int req_attrs;

	/* Both objects must be of same type */
	if (ops != obj_ops(b))
		return 0;

	req_attrs = ops->oo_id_attrs;

	/* Both objects must provide all required attributes to uniquely
	 * identify an object */
	if ((a->ce_mask & req_attrs) != req_attrs ||
	    (b->ce_mask & req_attrs) != req_attrs)
		return 0;

	/* Can't judge unless we can compare */
	if (ops->oo_compare == NULL)
		return 0;

	return !(ops->oo_compare(a, b, req_attrs, 0));
}
#endif

/**
 * Compute bitmask representing difference in attribute values
 * @arg a		an object
 * @arg b		another object of same type
 *
 * The bitmask returned is specific to an object type, each bit set represents
 * an attribute which mismatches in either of the two objects. Unavailability
 * of an attribute in one object and presence in the other is regarded a
 * mismatch as well.
 *
 * @return Bitmask describing differences or 0 if they are completely identical.
 */
uint32_t nl_object_diff(struct nl_object *a, struct nl_object *b)
{
	struct nl_object_ops *ops = obj_ops(a);

	if (ops != obj_ops(b) || ops->oo_compare == NULL)
		return UINT_MAX;

	return ops->oo_compare(a, b, ~0, 0);
}

/**
 * Match a filter against an object
 * @arg obj		object to check
 * @arg filter		object of same type acting as filter
 *
 * @return 1 if the object matches the filter or 0
 *           if no filter procedure is available or if the
 *           filter does not match.
 */
int nl_object_match_filter(struct nl_object *obj, struct nl_object *filter)
{
	struct nl_object_ops *ops = obj_ops(obj);

	if (ops != obj_ops(filter) || ops->oo_compare == NULL)
		return 0;
	
	return !(ops->oo_compare(obj, filter, filter->ce_mask,
				 LOOSE_COMPARISON));
}

/**
 * Convert bitmask of attributes to a character string
 * @arg obj		object of same type as attribute bitmask
 * @arg attrs		bitmask of attribute types
 * @arg buf		destination buffer
 * @arg len		length of destination buffer
 *
 * Converts the bitmask of attribute types into a list of attribute
 * names separated by comas.
 *
 * @return destination buffer.
 */
char *nl_object_attrs2str(struct nl_object *obj, uint32_t attrs,
			  char *buf, size_t len)
{
	struct nl_object_ops *ops = obj_ops(obj);

	if (ops->oo_attrs2str != NULL)
		return ops->oo_attrs2str(attrs, buf, len);
	else {
		memset(buf, 0, len);
		return buf;
	}
}

/** @} */

/** @} */