aboutsummaryrefslogtreecommitdiffstats
path: root/techlibs/ice40/ice40_opt.cc
blob: e492454fb220e3befbd1d361d5d6cba42da5cb3b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
/*
 *  yosys -- Yosys Open SYnthesis Suite
 *
 *  Copyright (C) 2012  Clifford Wolf <clifford@clifford.at>
 *
 *  Permission to use, copy, modify, and/or distribute this software for any
 *  purpose with or without fee is hereby granted, provided that the above
 *  copyright notice and this permission notice appear in all copies.
 *
 *  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 *  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 *  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 *  ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 *  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 *  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 *  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 *
 */

#include "kernel/yosys.h"
#include "kernel/sigtools.h"
#include "passes/techmap/simplemap.h"
#include <stdlib.h>
#include <stdio.h>

USING_YOSYS_NAMESPACE
PRIVATE_NAMESPACE_BEGIN

static SigBit get_bit_or_zero(const SigSpec &sig)
{
	if (GetSize(sig) == 0)
		return State::S0;
	return sig[0];
}

static void run_ice40_opts(Module *module)
{
	pool<SigBit> optimized_co;
	vector<Cell*> sb_lut_cells;
	SigMap sigmap(module);

	for (auto cell : module->selected_cells())
	{
		if (cell->type == "\\SB_LUT4")
		{
			sb_lut_cells.push_back(cell);
			continue;
		}

		if (cell->type == "\\SB_CARRY")
		{
			SigSpec non_const_inputs, replacement_output;
			int count_zeros = 0, count_ones = 0;

			SigBit inbit[3] = {
				get_bit_or_zero(cell->getPort("\\I0")),
				get_bit_or_zero(cell->getPort("\\I1")),
				get_bit_or_zero(cell->getPort("\\CI"))
			};
			for (int i = 0; i < 3; i++)
				if (inbit[i].wire == nullptr) {
					if (inbit[i] == State::S1)
						count_ones++;
					else
						count_zeros++;
				} else
					non_const_inputs.append(inbit[i]);

			if (count_zeros >= 2)
				replacement_output = State::S0;
			else if (count_ones >= 2)
				replacement_output = State::S1;
			else if (GetSize(non_const_inputs) == 1)
				replacement_output = non_const_inputs;

			if (GetSize(replacement_output)) {
				optimized_co.insert(sigmap(cell->getPort("\\CO")[0]));
				module->connect(cell->getPort("\\CO")[0], replacement_output);
				module->design->scratchpad_set_bool("opt.did_something", true);
				log("Optimized away SB_CARRY cell %s.%s: CO=%s\n",
						log_id(module), log_id(cell), log_signal(replacement_output));
				module->remove(cell);
			}
			continue;
		}

		if (cell->type == "$__ICE40_FULL_ADDER")
		{
			SigSpec non_const_inputs, replacement_output;
			int count_zeros = 0, count_ones = 0;

			SigBit inbit[3] = {
				cell->getPort("\\A"),
				cell->getPort("\\B"),
				cell->getPort("\\CI")
			};
			for (int i = 0; i < 3; i++)
				if (inbit[i].wire == nullptr) {
					if (inbit[i] == State::S1)
						count_ones++;
					else
						count_zeros++;
				} else
					non_const_inputs.append(inbit[i]);

			if (count_zeros >= 2)
				replacement_output = State::S0;
			else if (count_ones >= 2)
				replacement_output = State::S1;
			else if (GetSize(non_const_inputs) == 1)
				replacement_output = non_const_inputs;

			if (GetSize(replacement_output)) {
				optimized_co.insert(sigmap(cell->getPort("\\CO")[0]));
				module->connect(cell->getPort("\\CO")[0], replacement_output);
				module->design->scratchpad_set_bool("opt.did_something", true);
				log("Optimized $__ICE40_FULL_ADDER cell back to logic (without SB_CARRY) %s.%s: CO=%s\n",
						log_id(module), log_id(cell), log_signal(replacement_output));
				cell->type = "$lut";
				cell->setPort("\\A", { RTLIL::S0, inbit[0], inbit[1], inbit[2] });
				cell->setPort("\\Y", cell->getPort("\\O"));
				cell->unsetPort("\\B");
				cell->unsetPort("\\CI");
				cell->unsetPort("\\CO");
				cell->unsetPort("\\O");
				cell->setParam("\\LUT", RTLIL::Const::from_string("0110100110010110"));
				cell->setParam("\\WIDTH", 4);
			}
			continue;
		}
	}

	for (auto cell : sb_lut_cells)
	{
		SigSpec inbits;

		inbits.append(get_bit_or_zero(cell->getPort("\\I0")));
		inbits.append(get_bit_or_zero(cell->getPort("\\I1")));
		inbits.append(get_bit_or_zero(cell->getPort("\\I2")));
		inbits.append(get_bit_or_zero(cell->getPort("\\I3")));
		sigmap.apply(inbits);

		if (optimized_co.count(inbits[0])) goto remap_lut;
		if (optimized_co.count(inbits[1])) goto remap_lut;
		if (optimized_co.count(inbits[2])) goto remap_lut;
		if (optimized_co.count(inbits[3])) goto remap_lut;

		if (!sigmap(inbits).is_fully_const())
			continue;

	remap_lut:
		module->design->scratchpad_set_bool("opt.did_something", true);
		log("Mapping SB_LUT4 cell %s.%s back to logic.\n", log_id(module), log_id(cell));

		cell->type ="$lut";
		cell->setParam("\\WIDTH", 4);
		cell->setParam("\\LUT", cell->getParam("\\LUT_INIT"));
		cell->unsetParam("\\LUT_INIT");

		cell->setPort("\\A", SigSpec({
			get_bit_or_zero(cell->getPort("\\I3")),
			get_bit_or_zero(cell->getPort("\\I2")),
			get_bit_or_zero(cell->getPort("\\I1")),
			get_bit_or_zero(cell->getPort("\\I0"))
		}));
		cell->setPort("\\Y", cell->getPort("\\O")[0]);
		cell->unsetPort("\\I0");
		cell->unsetPort("\\I1");
		cell->unsetPort("\\I2");
		cell->unsetPort("\\I3");
		cell->unsetPort("\\O");

		cell->check();
		simplemap_lut(module, cell);
		module->remove(cell);
	}
}

struct Ice40OptPass : public Pass {
	Ice40OptPass() : Pass("ice40_opt", "iCE40: perform simple optimizations") { }
	void help() YS_OVERRIDE
	{
		//   |---v---|---v---|---v---|---v---|---v---|---v---|---v---|---v---|---v---|---v---|
		log("\n");
		log("    ice40_opt [options] [selection]\n");
		log("\n");
		log("This command executes the following script:\n");
		log("\n");
		log("    do\n");
		log("        <ice40 specific optimizations>\n");
		log("        opt_expr -mux_undef -undriven [-full]\n");
		log("        opt_merge\n");
		log("        opt_rmdff\n");
		log("        opt_clean\n");
		log("    while <changed design>\n");
		log("\n");
	}
	void execute(std::vector<std::string> args, RTLIL::Design *design) YS_OVERRIDE
	{
		string opt_expr_args = "-mux_undef -undriven";

		log_header(design, "Executing ICE40_OPT pass (performing simple optimizations).\n");
		log_push();

		size_t argidx;
		for (argidx = 1; argidx < args.size(); argidx++) {
			if (args[argidx] == "-full") {
				opt_expr_args += " -full";
				continue;
			}
			break;
		}
		extra_args(args, argidx, design);

		while (1)
		{
			design->scratchpad_unset("opt.did_something");

			log_header(design, "Running ICE40 specific optimizations.\n");
			for (auto module : design->selected_modules())
				run_ice40_opts(module);

			Pass::call(design, "opt_expr " + opt_expr_args);
			Pass::call(design, "opt_merge");
			Pass::call(design, "opt_rmdff");
			Pass::call(design, "opt_clean");

			if (design->scratchpad_get_bool("opt.did_something") == false)
				break;

			log_header(design, "Rerunning OPT passes. (Removed registers in this run.)\n");
		}

		design->optimize();
		design->sort();
		design->check();

		log_header(design, "Finished OPT passes. (There is nothing left to do.)\n");
		log_pop();
	}
} Ice40OptPass;

PRIVATE_NAMESPACE_END
="n">alloc->port = port; out: spin_unlock(&d->evtchn_lock); put_domain(d); return rc; } static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind) { struct evtchn *lchn, *rchn; struct domain *ld = current->domain, *rd; int lport, rport = bind->remote_port; domid_t rdom = bind->remote_dom; long rc; if ( (rc = acm_pre_eventchannel_interdomain(rdom)) != 0 ) return rc; if ( rdom == DOMID_SELF ) rdom = current->domain->domain_id; if ( (rd = get_domain_by_id(rdom)) == NULL ) return -ESRCH; /* Avoid deadlock by first acquiring lock of domain with smaller id. */ if ( ld < rd ) { spin_lock(&ld->evtchn_lock); spin_lock(&rd->evtchn_lock); } else { if ( ld != rd ) spin_lock(&rd->evtchn_lock); spin_lock(&ld->evtchn_lock); } if ( (lport = get_free_port(ld)) < 0 ) ERROR_EXIT(lport); lchn = evtchn_from_port(ld, lport); if ( !port_is_valid(rd, rport) ) ERROR_EXIT(-EINVAL); rchn = evtchn_from_port(rd, rport); if ( (rchn->state != ECS_UNBOUND) || (rchn->u.unbound.remote_domid != ld->domain_id) ) ERROR_EXIT(-EINVAL); lchn->u.interdomain.remote_dom = rd; lchn->u.interdomain.remote_port = (u16)rport; lchn->state = ECS_INTERDOMAIN; rchn->u.interdomain.remote_dom = ld; rchn->u.interdomain.remote_port = (u16)lport; rchn->state = ECS_INTERDOMAIN; /* * We may have lost notifications on the remote unbound port. Fix that up * here by conservatively always setting a notification on the local port. */ evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport); bind->local_port = lport; out: spin_unlock(&ld->evtchn_lock); if ( ld != rd ) spin_unlock(&rd->evtchn_lock); put_domain(rd); return rc; } static long evtchn_bind_virq(evtchn_bind_virq_t *bind) { struct evtchn *chn; struct vcpu *v; struct domain *d = current->domain; int port, virq = bind->virq, vcpu = bind->vcpu; long rc = 0; if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) ) return -EINVAL; if ( virq_is_global(virq) && (vcpu != 0) ) return -EINVAL; if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) || ((v = d->vcpu[vcpu]) == NULL) ) return -ENOENT; spin_lock(&d->evtchn_lock); if ( v->virq_to_evtchn[virq] != 0 ) ERROR_EXIT(-EEXIST); if ( (port = get_free_port(d)) < 0 ) ERROR_EXIT(port); chn = evtchn_from_port(d, port); chn->state = ECS_VIRQ; chn->notify_vcpu_id = vcpu; chn->u.virq = virq; v->virq_to_evtchn[virq] = bind->port = port; out: spin_unlock(&d->evtchn_lock); return rc; } static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind) { struct evtchn *chn; struct domain *d = current->domain; int port, vcpu = bind->vcpu; long rc = 0; if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu] == NULL) ) return -ENOENT; spin_lock(&d->evtchn_lock); if ( (port = get_free_port(d)) < 0 ) ERROR_EXIT(port); chn = evtchn_from_port(d, port); chn->state = ECS_IPI; chn->notify_vcpu_id = vcpu; bind->port = port; out: spin_unlock(&d->evtchn_lock); return rc; } static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) { struct evtchn *chn; struct domain *d = current->domain; int port, pirq = bind->pirq; long rc; if ( (pirq < 0) || (pirq >= ARRAY_SIZE(d->pirq_to_evtchn)) ) return -EINVAL; if ( !irq_access_permitted(d, pirq) ) return -EPERM; spin_lock(&d->evtchn_lock); if ( d->pirq_to_evtchn[pirq] != 0 ) ERROR_EXIT(-EEXIST); if ( (port = get_free_port(d)) < 0 ) ERROR_EXIT(port); chn = evtchn_from_port(d, port); d->pirq_to_evtchn[pirq] = port; rc = pirq_guest_bind(d->vcpu[0], pirq, !!(bind->flags & BIND_PIRQ__WILL_SHARE)); if ( rc != 0 ) { d->pirq_to_evtchn[pirq] = 0; goto out; } chn->state = ECS_PIRQ; chn->u.pirq = pirq; bind->port = port; out: spin_unlock(&d->evtchn_lock); return rc; } static long __evtchn_close(struct domain *d1, int port1) { struct domain *d2 = NULL; struct vcpu *v; struct evtchn *chn1, *chn2; int port2; long rc = 0; again: spin_lock(&d1->evtchn_lock); if ( !port_is_valid(d1, port1) ) { rc = -EINVAL; goto out; } chn1 = evtchn_from_port(d1, port1); /* Guest cannot close a Xen-attached event channel. */ if ( unlikely(chn1->consumer_is_xen) ) { rc = -EINVAL; goto out; } switch ( chn1->state ) { case ECS_FREE: case ECS_RESERVED: rc = -EINVAL; goto out; case ECS_UNBOUND: break; case ECS_PIRQ: if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 ) d1->pirq_to_evtchn[chn1->u.pirq] = 0; break; case ECS_VIRQ: for_each_vcpu ( d1, v ) if ( v->virq_to_evtchn[chn1->u.virq] == port1 ) v->virq_to_evtchn[chn1->u.virq] = 0; break; case ECS_IPI: break; case ECS_INTERDOMAIN: if ( d2 == NULL ) { d2 = chn1->u.interdomain.remote_dom; /* If we unlock d1 then we could lose d2. Must get a reference. */ if ( unlikely(!get_domain(d2)) ) { /* * Failed to obtain a reference. No matter: d2 must be dying * and so will close this event channel for us. */ d2 = NULL; goto out; } if ( d1 < d2 ) { spin_lock(&d2->evtchn_lock); } else if ( d1 != d2 ) { spin_unlock(&d1->evtchn_lock); spin_lock(&d2->evtchn_lock); goto again; } } else if ( d2 != chn1->u.interdomain.remote_dom ) { /* * We can only get here if the port was closed and re-bound after * unlocking d1 but before locking d2 above. We could retry but * it is easier to return the same error as if we had seen the * port in ECS_CLOSED. It must have passed through that state for * us to end up here, so it's a valid error to return. */ BUG_ON(d1 != current->domain); rc = -EINVAL; goto out; } port2 = chn1->u.interdomain.remote_port; BUG_ON(!port_is_valid(d2, port2)); chn2 = evtchn_from_port(d2, port2); BUG_ON(chn2->state != ECS_INTERDOMAIN); BUG_ON(chn2->u.interdomain.remote_dom != d1); chn2->state = ECS_UNBOUND; chn2->u.unbound.remote_domid = d1->domain_id; break; default: BUG(); } /* Reset binding to vcpu0 when the channel is freed. */ chn1->state = ECS_FREE; chn1->notify_vcpu_id = 0; out: if ( d2 != NULL ) { if ( d1 != d2 ) spin_unlock(&d2->evtchn_lock); put_domain(d2); } spin_unlock(&d1->evtchn_lock); return rc; } static long evtchn_close(evtchn_close_t *close) { return __evtchn_close(current->domain, close->port); } long evtchn_send(unsigned int lport) { struct evtchn *lchn, *rchn; struct domain *ld = current->domain, *rd; struct vcpu *rvcpu; int rport, ret = 0; spin_lock(&ld->evtchn_lock); if ( unlikely(!port_is_valid(ld, lport)) ) { spin_unlock(&ld->evtchn_lock); return -EINVAL; } lchn = evtchn_from_port(ld, lport); /* Guest cannot send via a Xen-attached event channel. */ if ( unlikely(lchn->consumer_is_xen) ) { spin_unlock(&ld->evtchn_lock); return -EINVAL; } switch ( lchn->state ) { case ECS_INTERDOMAIN: rd = lchn->u.interdomain.remote_dom; rport = lchn->u.interdomain.remote_port; rchn = evtchn_from_port(rd, rport); rvcpu = rd->vcpu[rchn->notify_vcpu_id]; if ( rchn->consumer_is_xen ) { /* Xen consumers need notification only if they are blocked. */ if ( test_and_clear_bit(_VCPUF_blocked_in_xen, &rvcpu->vcpu_flags) ) vcpu_wake(rvcpu); } else { evtchn_set_pending(rvcpu, rport); } break; case ECS_IPI: evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport); break; case ECS_UNBOUND: /* silently drop the notification */ break; default: ret = -EINVAL; } spin_unlock(&ld->evtchn_lock); return ret; } void evtchn_set_pending(struct vcpu *v, int port) { struct domain *d = v->domain; shared_info_t *s = d->shared_info; /* * The following bit operations must happen in strict order. * NB. On x86, the atomic bit operations also act as memory barriers. * There is therefore sufficiently strict ordering for this architecture -- * others may require explicit memory barriers. */ if ( test_and_set_bit(port, __shared_info_addr(d, s, evtchn_pending)) ) return; if ( !test_bit (port, __shared_info_addr(d, s, evtchn_mask)) && !test_and_set_bit(port / BITS_PER_GUEST_LONG(d), vcpu_info_addr(v, evtchn_pending_sel)) ) { vcpu_mark_events_pending(v); } /* Check if some VCPU might be polling for this event. */ if ( unlikely(test_bit(_DOMF_polling, &d->domain_flags)) && likely(test_and_clear_bit(_DOMF_polling, &d->domain_flags)) ) { for_each_vcpu ( d, v ) if ( test_and_clear_bit(_VCPUF_polling, &v->vcpu_flags) ) vcpu_unblock(v); } } void send_guest_vcpu_virq(struct vcpu *v, int virq) { int port; ASSERT(!virq_is_global(virq)); port = v->virq_to_evtchn[virq]; if ( unlikely(port == 0) ) return; evtchn_set_pending(v, port); } void send_guest_global_virq(struct domain *d, int virq) { int port; struct vcpu *v; struct evtchn *chn; ASSERT(virq_is_global(virq)); v = d->vcpu[0]; if ( unlikely(v == NULL) ) return; port = v->virq_to_evtchn[virq]; if ( unlikely(port == 0) ) return; chn = evtchn_from_port(d, port); evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port); } void send_guest_pirq(struct domain *d, int pirq) { int port = d->pirq_to_evtchn[pirq]; struct evtchn *chn; ASSERT(port != 0); chn = evtchn_from_port(d, port); evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port); } static long evtchn_status(evtchn_status_t *status) { struct domain *d; domid_t dom = status->dom; int port = status->port; struct evtchn *chn; long rc = 0; if ( dom == DOMID_SELF ) dom = current->domain->domain_id; else if ( !IS_PRIV(current->domain) ) return -EPERM; if ( (d = get_domain_by_id(dom)) == NULL ) return -ESRCH; spin_lock(&d->evtchn_lock); if ( !port_is_valid(d, port) ) { rc = -EINVAL; goto out; } chn = evtchn_from_port(d, port); switch ( chn->state ) { case ECS_FREE: case ECS_RESERVED: status->status = EVTCHNSTAT_closed; break; case ECS_UNBOUND: status->status = EVTCHNSTAT_unbound; status->u.unbound.dom = chn->u.unbound.remote_domid; break; case ECS_INTERDOMAIN: status->status = EVTCHNSTAT_interdomain; status->u.interdomain.dom = chn->u.interdomain.remote_dom->domain_id; status->u.interdomain.port = chn->u.interdomain.remote_port; break; case ECS_PIRQ: status->status = EVTCHNSTAT_pirq; status->u.pirq = chn->u.pirq; break; case ECS_VIRQ: status->status = EVTCHNSTAT_virq; status->u.virq = chn->u.virq; break; case ECS_IPI: status->status = EVTCHNSTAT_ipi; break; default: BUG(); } status->vcpu = chn->notify_vcpu_id; out: spin_unlock(&d->evtchn_lock); put_domain(d); return rc; } long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id) { struct domain *d = current->domain; struct evtchn *chn; long rc = 0; if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) ) return -ENOENT; spin_lock(&d->evtchn_lock); if ( !port_is_valid(d, port) ) { rc = -EINVAL; goto out; } chn = evtchn_from_port(d, port); /* Guest cannot re-bind a Xen-attached event channel. */ if ( unlikely(chn->consumer_is_xen) ) { rc = -EINVAL; goto out; } switch ( chn->state ) { case ECS_VIRQ: if ( virq_is_global(chn->u.virq) ) chn->notify_vcpu_id = vcpu_id; else rc = -EINVAL; break; case ECS_UNBOUND: case ECS_INTERDOMAIN: case ECS_PIRQ: chn->notify_vcpu_id = vcpu_id; break; default: rc = -EINVAL; break; } out: spin_unlock(&d->evtchn_lock); return rc; } static long evtchn_unmask(evtchn_unmask_t *unmask) { struct domain *d = current->domain; shared_info_t *s = d->shared_info; int port = unmask->port; struct vcpu *v; spin_lock(&d->evtchn_lock); if ( unlikely(!port_is_valid(d, port)) ) { spin_unlock(&d->evtchn_lock); return -EINVAL; } v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id]; /* * These operations must happen in strict order. Based on * include/xen/event.h:evtchn_set_pending(). */ if ( test_and_clear_bit(port, __shared_info_addr(d, s, evtchn_mask)) && test_bit (port, __shared_info_addr(d, s, evtchn_pending)) && !test_and_set_bit (port / BITS_PER_GUEST_LONG(d), vcpu_info_addr(v, evtchn_pending_sel)) ) { vcpu_mark_events_pending(v); } spin_unlock(&d->evtchn_lock); return 0; } static long evtchn_reset(evtchn_reset_t *r) { domid_t dom = r->dom; struct domain *d; int i; if ( dom == DOMID_SELF ) dom = current->domain->domain_id; else if ( !IS_PRIV(current->domain) ) return -EPERM; if ( (d = get_domain_by_id(dom)) == NULL ) return -ESRCH; for ( i = 0; port_is_valid(d, i); i++ ) (void)__evtchn_close(d, i); put_domain(d); return 0; } long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg) { long rc; switch ( cmd ) { case EVTCHNOP_alloc_unbound: { struct evtchn_alloc_unbound alloc_unbound; if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 ) return -EFAULT; rc = evtchn_alloc_unbound(&alloc_unbound); if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) ) rc = -EFAULT; /* Cleaning up here would be a mess! */ break; } case EVTCHNOP_bind_interdomain: { struct evtchn_bind_interdomain bind_interdomain; if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 ) return -EFAULT; rc = evtchn_bind_interdomain(&bind_interdomain); if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) ) rc = -EFAULT; /* Cleaning up here would be a mess! */ break; } case EVTCHNOP_bind_virq: { struct evtchn_bind_virq bind_virq; if ( copy_from_guest(&bind_virq, arg, 1) != 0 ) return -EFAULT; rc = evtchn_bind_virq(&bind_virq); if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) ) rc = -EFAULT; /* Cleaning up here would be a mess! */ break; } case EVTCHNOP_bind_ipi: { struct evtchn_bind_ipi bind_ipi; if ( copy_from_guest(&bind_ipi, arg, 1) != 0 ) return -EFAULT; rc = evtchn_bind_ipi(&bind_ipi); if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) ) rc = -EFAULT; /* Cleaning up here would be a mess! */ break; } case EVTCHNOP_bind_pirq: { struct evtchn_bind_pirq bind_pirq; if ( copy_from_guest(&bind_pirq, arg, 1) != 0 ) return -EFAULT; rc = evtchn_bind_pirq(&bind_pirq); if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) ) rc = -EFAULT; /* Cleaning up here would be a mess! */ break; } case EVTCHNOP_close: { struct evtchn_close close; if ( copy_from_guest(&close, arg, 1) != 0 ) return -EFAULT; rc = evtchn_close(&close); break; } case EVTCHNOP_send: { struct evtchn_send send; if ( copy_from_guest(&send, arg, 1) != 0 ) return -EFAULT; rc = evtchn_send(send.port); break; } case EVTCHNOP_status: { struct evtchn_status status; if ( copy_from_guest(&status, arg, 1) != 0 ) return -EFAULT; rc = evtchn_status(&status); if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) ) rc = -EFAULT; break; } case EVTCHNOP_bind_vcpu: { struct evtchn_bind_vcpu bind_vcpu; if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 ) return -EFAULT; rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu); break; } case EVTCHNOP_unmask: { struct evtchn_unmask unmask; if ( copy_from_guest(&unmask, arg, 1) != 0 ) return -EFAULT; rc = evtchn_unmask(&unmask); break; } case EVTCHNOP_reset: { struct evtchn_reset reset; if ( copy_from_guest(&reset, arg, 1) != 0 ) return -EFAULT; rc = evtchn_reset(&reset); break; } default: rc = -ENOSYS; break; } return rc; } int alloc_unbound_xen_event_channel( struct vcpu *local_vcpu, domid_t remote_domid) { struct evtchn *chn; struct domain *d = local_vcpu->domain; int port; spin_lock(&d->evtchn_lock); if ( (port = get_free_port(d)) < 0 ) goto out; chn = evtchn_from_port(d, port); chn->state = ECS_UNBOUND; chn->consumer_is_xen = 1; chn->notify_vcpu_id = local_vcpu->vcpu_id; chn->u.unbound.remote_domid = remote_domid; out: spin_unlock(&d->evtchn_lock); return port; } void free_xen_event_channel( struct vcpu *local_vcpu, int port) { struct evtchn *chn; struct domain *d = local_vcpu->domain; spin_lock(&d->evtchn_lock); chn = evtchn_from_port(d, port); BUG_ON(!chn->consumer_is_xen); chn->consumer_is_xen = 0; spin_unlock(&d->evtchn_lock); (void)__evtchn_close(d, port); } void notify_via_xen_event_channel(int lport) { struct evtchn *lchn, *rchn; struct domain *ld = current->domain, *rd; int rport; spin_lock(&ld->evtchn_lock); ASSERT(port_is_valid(ld, lport)); lchn = evtchn_from_port(ld, lport); ASSERT(lchn->consumer_is_xen); if ( likely(lchn->state == ECS_INTERDOMAIN) ) { rd = lchn->u.interdomain.remote_dom; rport = lchn->u.interdomain.remote_port; rchn = evtchn_from_port(rd, rport); evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport); } spin_unlock(&ld->evtchn_lock); } int evtchn_init(struct domain *d) { spin_lock_init(&d->evtchn_lock); if ( get_free_port(d) != 0 ) return -EINVAL; evtchn_from_port(d, 0)->state = ECS_RESERVED; return 0; } void evtchn_destroy(struct domain *d) { int i; for ( i = 0; port_is_valid(d, i); i++ ) { evtchn_from_port(d, i)->consumer_is_xen = 0; (void)__evtchn_close(d, i); } for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ ) xfree(d->evtchn[i]); } /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */