aboutsummaryrefslogtreecommitdiffstats
path: root/package/kernel/button-hotplug/src/button-hotplug.c
blob: 41fdf3a256280336dd09fe0b0860c574c666c45c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
/*
 *  Button Hotplug driver
 *
 *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
 *
 *  Based on the diag.c - GPIO interface driver for Broadcom boards
 *    Copyright (C) 2006 Mike Baker <mbm@openwrt.org>,
 *    Copyright (C) 2006-2007 Felix Fietkau <nbd@openwrt.org>
 *    Copyright (C) 2008 Andy Boyett <agb@openwrt.org>
 *
 *  This program is free software; you can redistribute it and/or modify it
 *  under the terms of the GNU General Public License version 2 as published
 *  by the Free Software Foundation.
 */

#include <linux/module.h>
#include <linux/version.h>
#include <linux/kmod.h>
#include <linux/input.h>

#include <linux/workqueue.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/kobject.h>

#define DRV_NAME	"button-hotplug"
#define DRV_VERSION	"0.4.1"
#define DRV_DESC	"Button Hotplug driver"

#define BH_SKB_SIZE	2048

#define PFX	DRV_NAME ": "

#undef BH_DEBUG

#ifdef BH_DEBUG
#define BH_DBG(fmt, args...) printk(KERN_DEBUG "%s: " fmt, DRV_NAME, ##args )
#else
#define BH_DBG(fmt, args...) do {} while (0)
#endif

#define BH_ERR(fmt, args...) printk(KERN_ERR "%s: " fmt, DRV_NAME, ##args )

#ifndef BIT_MASK
#define BIT_MASK(nr)            (1UL << ((nr) % BITS_PER_LONG))
#endif

struct bh_priv {
	unsigned long		*seen;
	struct input_handle	handle;
};

struct bh_event {
	const char		*name;
	char			*action;
	unsigned long		seen;

	struct sk_buff		*skb;
	struct work_struct	work;
};

struct bh_map {
	unsigned int	code;
	const char	*name;
};

extern u64 uevent_next_seqnum(void);

#define BH_MAP(_code, _name)		\
	{				\
		.code = (_code),	\
		.name = (_name),	\
	}

static struct bh_map button_map[] = {
	BH_MAP(BTN_0,		"BTN_0"),
	BH_MAP(BTN_1,		"BTN_1"),
	BH_MAP(BTN_2,		"BTN_2"),
	BH_MAP(BTN_3,		"BTN_3"),
	BH_MAP(BTN_4,		"BTN_4"),
	BH_MAP(BTN_5,		"BTN_5"),
	BH_MAP(BTN_6,		"BTN_6"),
	BH_MAP(BTN_7,		"BTN_7"),
	BH_MAP(BTN_8,		"BTN_8"),
	BH_MAP(BTN_9,		"BTN_9"),
	BH_MAP(KEY_RESTART,	"reset"),
	BH_MAP(KEY_POWER,	"power"),
	BH_MAP(KEY_RFKILL,	"rfkill"),
	BH_MAP(KEY_WPS_BUTTON,	"wps"),
	BH_MAP(KEY_WIMAX,	"wwan"),
};

/* -------------------------------------------------------------------------*/

static int bh_event_add_var(struct bh_event *event, int argv,
		const char *format, ...)
{
	static char buf[128];
	char *s;
	va_list args;
	int len;

	if (argv)
		return 0;

	va_start(args, format);
	len = vsnprintf(buf, sizeof(buf), format, args);
	va_end(args);

	if (len >= sizeof(buf)) {
		BH_ERR("buffer size too small\n");
		WARN_ON(1);
		return -ENOMEM;
	}

	s = skb_put(event->skb, len + 1);
	strcpy(s, buf);

	BH_DBG("added variable '%s'\n", s);

	return 0;
}

static int button_hotplug_fill_event(struct bh_event *event)
{
	int ret;

	ret = bh_event_add_var(event, 0, "HOME=%s", "/");
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "PATH=%s",
					"/sbin:/bin:/usr/sbin:/usr/bin");
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "SUBSYSTEM=%s", "button");
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "ACTION=%s", event->action);
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "BUTTON=%s", event->name);
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "SEEN=%ld", event->seen);
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "SEQNUM=%llu", uevent_next_seqnum());

	return ret;
}

static void button_hotplug_work(struct work_struct *work)
{
	struct bh_event *event = container_of(work, struct bh_event, work);
	int ret = 0;

	event->skb = alloc_skb(BH_SKB_SIZE, GFP_KERNEL);
	if (!event->skb)
		goto out_free_event;

	ret = bh_event_add_var(event, 0, "%s@", event->action);
	if (ret)
		goto out_free_skb;

	ret = button_hotplug_fill_event(event);
	if (ret)
		goto out_free_skb;

	NETLINK_CB(event->skb).dst_group = 1;
	broadcast_uevent(event->skb, 0, 1, GFP_KERNEL);

 out_free_skb:
	if (ret) {
		BH_ERR("work error %d\n", ret);
		kfree_skb(event->skb);
	}
 out_free_event:
	kfree(event);
}

static int button_hotplug_create_event(const char *name, unsigned long seen,
		int pressed)
{
	struct bh_event *event;

	BH_DBG("create event, name=%s, seen=%lu, pressed=%d\n",
		name, seen, pressed);

	event = kzalloc(sizeof(*event), GFP_KERNEL);
	if (!event)
		return -ENOMEM;

	event->name = name;
	event->seen = seen;
	event->action = pressed ? "pressed" : "released";

	INIT_WORK(&event->work, (void *)(void *)button_hotplug_work);
	schedule_work(&event->work);

	return 0;
}

/* -------------------------------------------------------------------------*/

static int button_get_index(unsigned int code)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(button_map); i++)
		if (button_map[i].code == code)
			return i;

	return -1;
}
static void button_hotplug_event(struct input_handle *handle,
			   unsigned int type, unsigned int code, int value)
{
	struct bh_priv *priv = handle->private;
	unsigned long seen = jiffies;
	int btn;

	BH_DBG("event type=%u, code=%u, value=%d\n", type, code, value);

	if (type != EV_KEY)
		return;

	btn = button_get_index(code);
	if (btn < 0)
		return;

	button_hotplug_create_event(button_map[btn].name,
			(seen - priv->seen[btn]) / HZ, value);
	priv->seen[btn] = seen;
}

static int button_hotplug_connect(struct input_handler *handler,
		struct input_dev *dev, const struct input_device_id *id)
{
	struct bh_priv *priv;
	int ret;
	int i;

	for (i = 0; i < ARRAY_SIZE(button_map); i++)
		if (test_bit(button_map[i].code, dev->keybit))
			break;

	if (i == ARRAY_SIZE(button_map))
		return -ENODEV;

	priv = kzalloc(sizeof(*priv) +
		       (sizeof(unsigned long) * ARRAY_SIZE(button_map)),
		       GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->seen = (unsigned long *) &priv[1];
	priv->handle.private = priv;
	priv->handle.dev = dev;
	priv->handle.handler = handler;
	priv->handle.name = DRV_NAME;

	ret = input_register_handle(&priv->handle);
	if (ret)
		goto err_free_priv;

	ret = input_open_device(&priv->handle);
	if (ret)
		goto err_unregister_handle;

	BH_DBG("connected to %s\n", dev->name);

	return 0;

 err_unregister_handle:
	input_unregister_handle(&priv->handle);

 err_free_priv:
	kfree(priv);
	return ret;
}

static void button_hotplug_disconnect(struct input_handle *handle)
{
	struct bh_priv *priv = handle->private;

	input_close_device(handle);
	input_unregister_handle(handle);

	kfree(priv);
}

static const struct input_device_id button_hotplug_ids[] = {
	{
                .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
                .evbit = { BIT_MASK(EV_KEY) },
        },
	{
		/* Terminating entry */
	},
};

MODULE_DEVICE_TABLE(input, button_hotplug_ids);

static struct input_handler button_hotplug_handler = {
	.event =	button_hotplug_event,
	.connect =	button_hotplug_connect,
	.disconnect =	button_hotplug_disconnect,
	.name =		DRV_NAME,
	.id_table =	button_hotplug_ids,
};

/* -------------------------------------------------------------------------*/

static int __init button_hotplug_init(void)
{
	int ret;

	printk(KERN_INFO DRV_DESC " version " DRV_VERSION "\n");
	ret = input_register_handler(&button_hotplug_handler);
	if (ret)
		BH_ERR("unable to register input handler\n");

	return ret;
}
module_init(button_hotplug_init);

static void __exit button_hotplug_exit(void)
{
	input_unregister_handler(&button_hotplug_handler);
}
module_exit(button_hotplug_exit);

MODULE_DESCRIPTION(DRV_DESC);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
MODULE_LICENSE("GPL v2");
="n">flags |= XEN_DOMINF_hvm_guest; xsm_security_domaininfo(d, info); info->tot_pages = d->tot_pages; info->max_pages = d->max_pages; info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT); memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t)); } static unsigned int default_vcpu0_location(void) { struct domain *d; struct vcpu *v; unsigned int i, cpu, nr_cpus, *cnt; cpumask_t cpu_exclude_map; /* Do an initial CPU placement. Pick the least-populated CPU. */ nr_cpus = last_cpu(cpu_possible_map) + 1; cnt = xmalloc_array(unsigned int, nr_cpus); if ( cnt ) { memset(cnt, 0, nr_cpus * sizeof(*cnt)); rcu_read_lock(&domlist_read_lock); for_each_domain ( d ) for_each_vcpu ( d, v ) if ( !test_bit(_VPF_down, &v->pause_flags) ) cnt[v->processor]++; rcu_read_unlock(&domlist_read_lock); } /* * If we're on a HT system, we only auto-allocate to a non-primary HT. We * favour high numbered CPUs in the event of a tie. */ cpu = first_cpu(per_cpu(cpu_sibling_map, 0)); if ( cpus_weight(per_cpu(cpu_sibling_map, 0)) > 1 ) cpu = next_cpu(cpu, per_cpu(cpu_sibling_map, 0)); cpu_exclude_map = per_cpu(cpu_sibling_map, 0); for_each_online_cpu ( i ) { if ( cpu_isset(i, cpu_exclude_map) ) continue; if ( (i == first_cpu(per_cpu(cpu_sibling_map, i))) && (cpus_weight(per_cpu(cpu_sibling_map, i)) > 1) ) continue; cpus_or(cpu_exclude_map, cpu_exclude_map, per_cpu(cpu_sibling_map, i)); if ( !cnt || cnt[i] <= cnt[cpu] ) cpu = i; } xfree(cnt); return cpu; } bool_t domctl_lock_acquire(void) { /* * Caller may try to pause its own VCPUs. We must prevent deadlock * against other non-domctl routines which try to do the same. */ if ( !spin_trylock(&current->domain->hypercall_deadlock_mutex) ) return 0; /* * Trylock here is paranoia if we have multiple privileged domains. Then * we could have one domain trying to pause another which is spinning * on domctl_lock -- results in deadlock. */ if ( spin_trylock(&domctl_lock) ) return 1; spin_unlock(&current->domain->hypercall_deadlock_mutex); return 0; } void domctl_lock_release(void) { spin_unlock(&domctl_lock); spin_unlock(&current->domain->hypercall_deadlock_mutex); } long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl) { long ret = 0; struct xen_domctl curop, *op = &curop; if ( !IS_PRIV(current->domain) ) return -EPERM; if ( copy_from_guest(op, u_domctl, 1) ) return -EFAULT; if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION ) return -EACCES; if ( !domctl_lock_acquire() ) return hypercall_create_continuation( __HYPERVISOR_domctl, "h", u_domctl); switch ( op->cmd ) { case XEN_DOMCTL_setvcpucontext: { struct domain *d = rcu_lock_domain_by_id(op->domain); vcpu_guest_context_u c = { .nat = NULL }; unsigned int vcpu = op->u.vcpucontext.vcpu; struct vcpu *v; ret = -ESRCH; if ( d == NULL ) break; ret = xsm_setvcpucontext(d); if ( ret ) goto svc_out; ret = -EINVAL; if ( (d == current->domain) || /* no domain_pause() */ (vcpu >= d->max_vcpus) || ((v = d->vcpu[vcpu]) == NULL) ) goto svc_out; if ( guest_handle_is_null(op->u.vcpucontext.ctxt) ) { vcpu_reset(v); ret = 0; goto svc_out; } #ifdef CONFIG_COMPAT BUILD_BUG_ON(sizeof(struct vcpu_guest_context) < sizeof(struct compat_vcpu_guest_context)); #endif ret = -ENOMEM; if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL ) goto svc_out; #ifdef CONFIG_COMPAT if ( !is_pv_32on64_vcpu(v) ) ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1); else ret = copy_from_guest(c.cmp, guest_handle_cast(op->u.vcpucontext.ctxt, void), 1); #else ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1); #endif ret = ret ? -EFAULT : 0; if ( ret == 0 ) { domain_pause(d); ret = arch_set_info_guest(v, c); domain_unpause(d); } svc_out: xfree(c.nat); rcu_unlock_domain(d); } break; case XEN_DOMCTL_pausedomain: { struct domain *d = rcu_lock_domain_by_id(op->domain); ret = -ESRCH; if ( d != NULL ) { ret = xsm_pausedomain(d); if ( ret ) goto pausedomain_out; ret = -EINVAL; if ( d != current->domain ) { domain_pause_by_systemcontroller(d); ret = 0; } pausedomain_out: rcu_unlock_domain(d); } } break; case XEN_DOMCTL_unpausedomain: { struct domain *d = rcu_lock_domain_by_id(op->domain); ret = -ESRCH; if ( d == NULL ) break; ret = xsm_unpausedomain(d); if ( ret ) { rcu_unlock_domain(d); break; } domain_unpause_by_systemcontroller(d); rcu_unlock_domain(d); ret = 0; } break; case XEN_DOMCTL_resumedomain: { struct domain *d = rcu_lock_domain_by_id(op->domain); ret = -ESRCH; if ( d == NULL ) break; ret = xsm_resumedomain(d); if ( ret ) { rcu_unlock_domain(d); break; } domain_resume(d); rcu_unlock_domain(d); ret = 0; } break; case XEN_DOMCTL_createdomain: { struct domain *d; domid_t dom; static domid_t rover = 0; unsigned int domcr_flags; ret = -EINVAL; if ( supervisor_mode_kernel || (op->u.createdomain.flags & ~(XEN_DOMCTL_CDF_hvm_guest | XEN_DOMCTL_CDF_hap | XEN_DOMCTL_CDF_s3_integrity)) ) break; dom = op->domain; if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) ) { ret = -EINVAL; if ( !is_free_domid(dom) ) break; } else { for ( dom = rover + 1; dom != rover; dom++ ) { if ( dom == DOMID_FIRST_RESERVED ) dom = 0; if ( is_free_domid(dom) ) break; } ret = -ENOMEM; if ( dom == rover ) break; rover = dom; } domcr_flags = 0; if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest ) domcr_flags |= DOMCRF_hvm; if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hap ) domcr_flags |= DOMCRF_hap; if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_s3_integrity ) domcr_flags |= DOMCRF_s3_integrity; ret = -ENOMEM; d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref); if ( d == NULL ) break; ret = 0; memcpy(d->handle, op->u.createdomain.handle, sizeof(xen_domain_handle_t)); op->domain = d->domain_id; if ( copy_to_guest(u_domctl, op, 1) ) ret = -EFAULT; } break; case XEN_DOMCTL_max_vcpus: { struct domain *d; unsigned int i, max = op->u.max_vcpus.max, cpu; ret = -ESRCH; if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL ) break; ret = -EINVAL; if ( (d == current->domain) || /* no domain_pause() */ (max > MAX_VIRT_CPUS) || (is_hvm_domain(d) && max > XEN_LEGACY_MAX_VCPUS) ) { rcu_unlock_domain(d); break; } ret = xsm_max_vcpus(d); if ( ret ) { rcu_unlock_domain(d); break; } /* Until Xenoprof can dynamically grow its vcpu-s array... */ if ( d->xenoprof ) { rcu_unlock_domain(d); ret = -EAGAIN; break; } /* Needed, for example, to ensure writable p.t. state is synced. */ domain_pause(d); /* We cannot reduce maximum VCPUs. */ ret = -EINVAL; if ( (max < d->max_vcpus) && (d->vcpu[max] != NULL) ) goto maxvcpu_out; /* * For now don't allow increasing the vcpu count from a non-zero * value: This code and all readers of d->vcpu would otherwise need * to be converted to use RCU, but at present there's no tools side * code path that would issue such a request. */ ret = -EBUSY; if ( (d->max_vcpus > 0) && (max > d->max_vcpus) ) goto maxvcpu_out; ret = -ENOMEM; if ( max > d->max_vcpus ) { struct vcpu **vcpus; BUG_ON(d->vcpu != NULL); BUG_ON(d->max_vcpus != 0); if ( (vcpus = xmalloc_array(struct vcpu *, max)) == NULL ) goto maxvcpu_out; memset(vcpus, 0, max * sizeof(*vcpus)); /* Install vcpu array /then/ update max_vcpus. */ d->vcpu = vcpus; wmb(); d->max_vcpus = max; } for ( i = 0; i < max; i++ ) { if ( d->vcpu[i] != NULL ) continue; cpu = (i == 0) ? default_vcpu0_location() : cycle_cpu(d->vcpu[i-1]->processor, cpu_online_map); if ( alloc_vcpu(d, i, cpu) == NULL ) goto maxvcpu_out; } ret = 0; maxvcpu_out: domain_unpause(d); rcu_unlock_domain(d); } break; case XEN_DOMCTL_destroydomain: { struct domain *d = rcu_lock_domain_by_id(op->domain); ret = -ESRCH; if ( d != NULL ) { ret = xsm_destroydomain(d) ? : domain_kill(d); rcu_unlock_domain(d); } } break; case XEN_DOMCTL_setvcpuaffinity: case XEN_DOMCTL_getvcpuaffinity: { domid_t dom = op->domain; struct domain *d = rcu_lock_domain_by_id(dom); struct vcpu *v; cpumask_t new_affinity; ret = -ESRCH; if ( d == NULL ) break; ret = xsm_vcpuaffinity(op->cmd, d); if ( ret ) goto vcpuaffinity_out; ret = -EINVAL; if ( op->u.vcpuaffinity.vcpu >= d->max_vcpus ) goto vcpuaffinity_out; ret = -ESRCH; if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL ) goto vcpuaffinity_out; if ( op->cmd == XEN_DOMCTL_setvcpuaffinity ) { xenctl_cpumap_to_cpumask( &new_affinity, &op->u.vcpuaffinity.cpumap); ret = vcpu_set_affinity(v, &new_affinity); } else { cpumask_to_xenctl_cpumap( &op->u.vcpuaffinity.cpumap, &v->cpu_affinity); ret = 0; } vcpuaffinity_out: rcu_unlock_domain(d); } break; case XEN_DOMCTL_scheduler_op: { struct domain *d; ret = -ESRCH; if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL ) break; ret = xsm_scheduler(d); if ( ret ) goto scheduler_op_out; ret = sched_adjust(d, &op->u.scheduler_op); if ( copy_to_guest(u_domctl, op, 1) ) ret = -EFAULT; scheduler_op_out: rcu_unlock_domain(d); } break; case XEN_DOMCTL_getdomaininfo: { struct domain *d; domid_t dom = op->domain; rcu_read_lock(&domlist_read_lock); for_each_domain ( d ) if ( d->domain_id >= dom ) break; if ( d == NULL ) { rcu_read_unlock(&domlist_read_lock); ret = -ESRCH; break; } ret = xsm_getdomaininfo(d); if ( ret ) goto getdomaininfo_out; getdomaininfo(d, &op->u.getdomaininfo); op->domain = op->u.getdomaininfo.domain; if ( copy_to_guest(u_domctl, op, 1) ) ret = -EFAULT; getdomaininfo_out: rcu_read_unlock(&domlist_read_lock); } break; case XEN_DOMCTL_getvcpucontext: { vcpu_guest_context_u c = { .nat = NULL }; struct domain *d; struct vcpu *v; ret = -ESRCH; if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL ) break; ret = xsm_getvcpucontext(d); if ( ret ) goto getvcpucontext_out; ret = -EINVAL; if ( op->u.vcpucontext.vcpu >= d->max_vcpus ) goto getvcpucontext_out; ret = -ESRCH; if ( (v = d->vcpu[op->u.vcpucontext.vcpu]) == NULL ) goto getvcpucontext_out; ret = -ENODATA; if ( !v->is_initialised ) goto getvcpucontext_out; #ifdef CONFIG_COMPAT BUILD_BUG_ON(sizeof(struct vcpu_guest_context) < sizeof(struct compat_vcpu_guest_context)); #endif ret = -ENOMEM; if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL ) goto getvcpucontext_out; if ( v != current ) vcpu_pause(v); arch_get_info_guest(v, c); ret = 0; if ( v != current ) vcpu_unpause(v); #ifdef CONFIG_COMPAT if ( !is_pv_32on64_vcpu(v) ) ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1); else ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt, void), c.cmp, 1); #else ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1); #endif if ( copy_to_guest(u_domctl, op, 1) || ret ) ret = -EFAULT; getvcpucontext_out: xfree(c.nat); rcu_unlock_domain(d); } break; case XEN_DOMCTL_getvcpuinfo: { struct domain *d; struct vcpu *v; struct vcpu_runstate_info runstate; ret = -ESRCH; if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL ) break; ret = xsm_getvcpuinfo(d); if ( ret ) goto getvcpuinfo_out; ret = -EINVAL; if ( op->u.getvcpuinfo.vcpu >= d->max_vcpus ) goto getvcpuinfo_out; ret = -ESRCH; if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL ) goto getvcpuinfo_out; vcpu_runstate_get(v, &runstate); op->u.getvcpuinfo.online = !test_bit(_VPF_down, &v->pause_flags); op->u.getvcpuinfo.blocked = test_bit(_VPF_blocked, &v->pause_flags); op->u.getvcpuinfo.running = v->is_running; op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running]; op->u.getvcpuinfo.cpu = v->processor; ret = 0; if ( copy_to_guest(u_domctl, op, 1) ) ret = -EFAULT; getvcpuinfo_out: rcu_unlock_domain(d); } break; case XEN_DOMCTL_max_mem: { struct domain *d; unsigned long new_max; ret = -ESRCH; d = rcu_lock_domain_by_id(op->domain); if ( d == NULL ) break; ret = xsm_setdomainmaxmem(d); if ( ret ) goto max_mem_out; ret = -EINVAL; new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10); spin_lock(&d->page_alloc_lock); if ( new_max >= d->tot_pages ) { d->max_pages = new_max; ret = 0; } spin_unlock(&d->page_alloc_lock); max_mem_out: rcu_unlock_domain(d); } break; case XEN_DOMCTL_setdomainhandle: { struct domain *d; ret = -ESRCH; d = rcu_lock_domain_by_id(op->domain); if ( d == NULL ) break; ret = xsm_setdomainhandle(d); if ( ret ) { rcu_unlock_domain(d); break; } memcpy(d->handle, op->u.setdomainhandle.handle, sizeof(xen_domain_handle_t)); rcu_unlock_domain(d); ret = 0; } break; case XEN_DOMCTL_setdebugging: { struct domain *d; ret = -ESRCH; d = rcu_lock_domain_by_id(op->domain); if ( d == NULL ) break; ret = -EINVAL; if ( d == current->domain ) /* no domain_pause() */ { rcu_unlock_domain(d); break; } ret = xsm_setdebugging(d); if ( ret ) { rcu_unlock_domain(d); break; } domain_pause(d); d->debugger_attached = !!op->u.setdebugging.enable; domain_unpause(d); /* causes guest to latch new status */ rcu_unlock_domain(d); ret = 0; } break; case XEN_DOMCTL_irq_permission: { struct domain *d; unsigned int pirq = op->u.irq_permission.pirq; ret = -ESRCH; d = rcu_lock_domain_by_id(op->domain); if ( d == NULL ) break; if ( pirq >= d->nr_pirqs ) ret = -EINVAL; else if ( op->u.irq_permission.allow_access ) ret = irq_permit_access(d, pirq); else ret = irq_deny_access(d, pirq); rcu_unlock_domain(d); } break; case XEN_DOMCTL_iomem_permission: { struct domain *d; unsigned long mfn = op->u.iomem_permission.first_mfn; unsigned long nr_mfns = op->u.iomem_permission.nr_mfns; ret = -EINVAL; if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */ break; ret = -ESRCH; d = rcu_lock_domain_by_id(op->domain); if ( d == NULL ) break; if ( op->u.iomem_permission.allow_access ) ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1); else ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1); rcu_unlock_domain(d); } break; case XEN_DOMCTL_settimeoffset: { struct domain *d; ret = -ESRCH; d = rcu_lock_domain_by_id(op->domain); if ( d == NULL ) break; ret = xsm_domain_settime(d); if ( ret ) { rcu_unlock_domain(d); break; } domain_set_time_offset(d, op->u.settimeoffset.time_offset_seconds); rcu_unlock_domain(d); ret = 0; } break; case XEN_DOMCTL_set_target: { struct domain *d, *e; ret = -ESRCH; d = rcu_lock_domain_by_id(op->domain); if ( d == NULL ) break; ret = -ESRCH; e = get_domain_by_id(op->u.set_target.target); if ( e == NULL ) goto set_target_out; ret = -EINVAL; if ( (d == e) || (d->target != NULL) ) { put_domain(e); goto set_target_out; } ret = xsm_set_target(d, e); if ( ret ) { put_domain(e); goto set_target_out; } /* Hold reference on @e until we destroy @d. */ d->target = e; ret = 0; set_target_out: rcu_unlock_domain(d); } break; case XEN_DOMCTL_subscribe: { struct domain *d; ret = -ESRCH; d = rcu_lock_domain_by_id(op->domain); if ( d != NULL ) { d->suspend_evtchn = op->u.subscribe.port; rcu_unlock_domain(d); ret = 0; } } break; default: ret = arch_do_domctl(op, u_domctl); break; } domctl_lock_release(); return ret; } /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */