aboutsummaryrefslogtreecommitdiffstats
path: root/passes/cmds/connect.cc
blob: f93bada27904a27f873aaacf3415835d7b9909ed (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
/*
 *  yosys -- Yosys Open SYnthesis Suite
 *
 *  Copyright (C) 2012  Clifford Wolf <clifford@clifford.at>
 *
 *  Permission to use, copy, modify, and/or distribute this software for any
 *  purpose with or without fee is hereby granted, provided that the above
 *  copyright notice and this permission notice appear in all copies.
 *
 *  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 *  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 *  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 *  ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 *  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 *  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 *  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 *
 */

#include "kernel/register.h"
#include "kernel/rtlil.h"
#include "kernel/sigtools.h"
#include "kernel/celltypes.h"
#include "kernel/log.h"

USING_YOSYS_NAMESPACE
PRIVATE_NAMESPACE_BEGIN

static void unset_drivers(RTLIL::Design *design, RTLIL::Module *module, SigMap &sigmap, RTLIL::SigSpec &sig)
{
	CellTypes ct(design);

	RTLIL::Wire *dummy_wire = module->addWire(NEW_ID, sig.size());

	for (auto &it : module->cells_)
	for (auto &port : it.second->connections_)
		if (ct.cell_output(it.second->type, port.first))
			sigmap(port.second).replace(sig, dummy_wire, &port.second);

	for (auto &conn : module->connections_)
		sigmap(conn.first).replace(sig, dummy_wire, &conn.first);
}

struct ConnectPass : public Pass {
	ConnectPass() : Pass("connect", "create or remove connections") { }
	void help() YS_OVERRIDE
	{
		//   |---v---|---v---|---v---|---v---|---v---|---v---|---v---|---v---|---v---|---v---|
		log("\n");
		log("    connect [-nomap] [-nounset] -set <lhs-expr> <rhs-expr>\n");
		log("\n");
		log("Create a connection. This is equivalent to adding the statement 'assign\n");
		log("<lhs-expr> = <rhs-expr>;' to the Verilog input. Per default, all existing\n");
		log("drivers for <lhs-expr> are unconnected. This can be overwritten by using\n");
		log("the -nounset option.\n");
		log("\n");
		log("\n");
		log("    connect [-nomap] -unset <expr>\n");
		log("\n");
		log("Unconnect all existing drivers for the specified expression.\n");
		log("\n");
		log("\n");
		log("    connect [-nomap] -port <cell> <port> <expr>\n");
		log("\n");
		log("Connect the specified cell port to the specified cell port.\n");
		log("\n");
		log("\n");
		log("Per default signal alias names are resolved and all signal names are mapped\n");
		log("the the signal name of the primary driver. Using the -nomap option deactivates\n");
		log("this behavior.\n");
		log("\n");
		log("The connect command operates in one module only. Either only one module must\n");
		log("be selected or an active module must be set using the 'cd' command.\n");
		log("\n");
		log("This command does not operate on module with processes.\n");
		log("\n");
	}
	void execute(std::vector<std::string> args, RTLIL::Design *design) YS_OVERRIDE
	{
		RTLIL::Module *module = NULL;
		for (auto &it : design->modules_) {
			if (!design->selected(it.second))
				continue;
			if (module != NULL)
				log_cmd_error("Multiple modules selected: %s, %s\n", RTLIL::id2cstr(module->name), RTLIL::id2cstr(it.first));
			module = it.second;
		}
		if (module == NULL)
			log_cmd_error("No modules selected.\n");
		if (!module->processes.empty())
			log_cmd_error("Found processes in selected module.\n");

		bool flag_nounset = false, flag_nomap = false;
		std::string set_lhs, set_rhs, unset_expr;
		std::string port_cell, port_port, port_expr;

		size_t argidx;
		for (argidx = 1; argidx < args.size(); argidx++)
		{
			std::string arg = args[argidx];
			if (arg == "-nounset") {
				flag_nounset = true;
				continue;
			}
			if (arg == "-nomap") {
				flag_nomap = true;
				continue;
			}
			if (arg == "-set" && argidx+2 < args.size()) {
				set_lhs = args[++argidx];
				set_rhs = args[++argidx];
				continue;
			}
			if (arg == "-unset" && argidx+1 < args.size()) {
				unset_expr = args[++argidx];
				continue;
			}
			if (arg == "-port" && argidx+3 < args.size()) {
				port_cell = args[++argidx];
				port_port = args[++argidx];
				port_expr = args[++argidx];
				continue;
			}
			break;
		}

		SigMap sigmap;
		if (!flag_nomap)
			for (auto &it : module->connections()) {
				std::vector<RTLIL::SigBit> lhs = it.first.to_sigbit_vector();
				std::vector<RTLIL::SigBit> rhs = it.first.to_sigbit_vector();
				for (size_t i = 0; i < lhs.size(); i++)
					if (rhs[i].wire != NULL)
						sigmap.add(lhs[i], rhs[i]);
			}

		if (!set_lhs.empty())
		{
			if (!unset_expr.empty() || !port_cell.empty())
				log_cmd_error("Can't use -set together with -unset and/or -port.\n");

			RTLIL::SigSpec sig_lhs, sig_rhs;
			if (!RTLIL::SigSpec::parse_sel(sig_lhs, design, module, set_lhs))
				log_cmd_error("Failed to parse set lhs expression `%s'.\n", set_lhs.c_str());
			if (!RTLIL::SigSpec::parse_rhs(sig_lhs, sig_rhs, module, set_rhs))
				log_cmd_error("Failed to parse set rhs expression `%s'.\n", set_rhs.c_str());

			sigmap.apply(sig_lhs);
			sigmap.apply(sig_rhs);

			if (!flag_nounset)
				unset_drivers(design, module, sigmap, sig_lhs);

			module->connect(RTLIL::SigSig(sig_lhs, sig_rhs));
		}
		else
		if (!unset_expr.empty())
		{
			if (!port_cell.empty() || flag_nounset)
				log_cmd_error("Can't use -unset together with -port and/or -nounset.\n");

			RTLIL::SigSpec sig;
			if (!RTLIL::SigSpec::parse_sel(sig, design, module, unset_expr))
				log_cmd_error("Failed to parse unset expression `%s'.\n", unset_expr.c_str());

			sigmap.apply(sig);
			unset_drivers(design, module, sigmap, sig);
		}
		else
		if (!port_cell.empty())
		{
			if (flag_nounset)
				log_cmd_error("Can't use -port together with -nounset.\n");

			if (module->cells_.count(RTLIL::escape_id(port_cell)) == 0)
				log_cmd_error("Can't find cell %s.\n", port_cell.c_str());

			RTLIL::SigSpec sig;
			if (!RTLIL::SigSpec::parse_sel(sig, design, module, port_expr))
				log_cmd_error("Failed to parse port expression `%s'.\n", port_expr.c_str());

			module->cells_.at(RTLIL::escape_id(port_cell))->setPort(RTLIL::escape_id(port_port), sigmap(sig));
		}
		else
			log_cmd_error("Expected -set, -unset, or -port.\n");
	}
} ConnectPass;

PRIVATE_NAMESPACE_END
>evt = schedule_data[p->processor].svt; /* set some default values here */ p->warpback = 0; p->warp = 0; p->warpl = 0; p->warpu = 0; } } int sched_rem_domain(struct task_struct *p) { int x, y = p->state; do { if ( (x = y) == TASK_DYING ) return 0; } while ( (y = cmpxchg(&p->state, x, TASK_DYING)) != x ); return 1; } void init_idle_task(void) { unsigned long flags; struct task_struct *p = current; spin_lock_irqsave(&schedule_data[p->processor].lock, flags); p->has_cpu = 1; p->state = TASK_RUNNING; if ( !__task_on_runqueue(p) ) __add_to_runqueue_head(p); spin_unlock_irqrestore(&schedule_data[p->processor].lock, flags); } /* * wake up a domain which had been sleeping */ int wake_up(struct task_struct *p) { unsigned long flags; int ret = 0; spin_lock_irqsave(&schedule_data[p->processor].lock, flags); /* XXX RN: should we warp here? Might be a good idea to also boost a * domain which currently is unwarped and on run queue and * the receives an event. */ if ( __task_on_runqueue(p) ) goto out; p->state = TASK_RUNNING; __add_to_runqueue_head(p); /* set the BVT parameters */ if (p->avt < schedule_data[p->processor].svt) p->avt = schedule_data[p->processor].svt; /* deal with warping here */ p->warpback = 1; p->warped = NOW(); __calc_evt(p); #ifdef SCHED_HISTO p->wokenup = NOW(); #endif ret = 1; out: spin_unlock_irqrestore(&schedule_data[p->processor].lock, flags); return ret; } /* * Voluntarily yield the processor to another domain, until an event occurs. */ long do_yield(void) { current->state = TASK_INTERRUPTIBLE; current->warpback = 0; /* XXX should only do this when blocking */ __enter_scheduler(); return 0; } /* * Demultiplex scheduler-related hypercalls. */ long do_sched_op(unsigned long op) { long ret = 0; switch( op ) { case SCHEDOP_yield: { ret = do_yield(); break; } case SCHEDOP_exit: { kill_domain(); break; } case SCHEDOP_stop: { stop_domain(); break; } default: ret = -ENOSYS; } return ret; } /* * Control the scheduler */ long sched_bvtctl(unsigned long c_allow) { ctx_allow = c_allow; return 0; } /* * Adjust scheduling parameter for a given domain */ long sched_adjdom(int dom, unsigned long mcu_adv, unsigned long warp, unsigned long warpl, unsigned long warpu) { struct task_struct *p; /* Sanity -- this can avoid divide-by-zero. */ if ( mcu_adv == 0 ) return -EINVAL; p = find_domain_by_id(dom); if ( p == NULL ) return -ESRCH; spin_lock_irq(&schedule_data[p->processor].lock); p->mcu_advance = mcu_adv; spin_unlock_irq(&schedule_data[p->processor].lock); put_task_struct(p); return 0; } /* * cause a run through the scheduler when appropriate * Appropriate is: * - current task is idle task * - the current task already ran for it's context switch allowance * Otherwise we do a run through the scheduler after the current tasks * context switch allowance is over. */ void reschedule(struct task_struct *p) { int cpu = p->processor; struct task_struct *curr; unsigned long flags; s_time_t now, min_time; if ( p->has_cpu ) return; spin_lock_irqsave(&schedule_data[cpu].lock, flags); now = NOW(); curr = schedule_data[cpu].curr; /* domain should run at least for ctx_allow */ min_time = curr->lastschd + ctx_allow; if ( is_idle_task(curr) || (min_time <= now) ) { /* reschedule */ set_bit(_HYP_EVENT_NEED_RESCHED, &curr->hyp_events); spin_unlock_irqrestore(&schedule_data[cpu].lock, flags); if ( cpu != smp_processor_id() ) smp_send_event_check_cpu(cpu); return; } /* current hasn't been running for long enough -> reprogram timer. * but don't bother if timer would go off soon anyway */ if ( schedule_data[cpu].s_timer.expires > min_time + TIME_SLOP ) mod_ac_timer(&schedule_data[cpu].s_timer, min_time); spin_unlock_irqrestore(&schedule_data[cpu].lock, flags); } /* * The main function * - deschedule the current domain. * - pick a new domain. * i.e., the domain with lowest EVT. * The runqueue should be ordered by EVT so that is easy. */ asmlinkage void __enter_scheduler(void) { struct task_struct *prev, *next, *next_prime, *p; struct list_head *tmp; int this_cpu; s_time_t now; s32 r_time; /* time for new dom to run */ s32 ranfor; /* assume we never run longer than 2.1s! */ s32 mcus; u32 next_evt, next_prime_evt, min_avt; perfc_incrc(sched_run); prev = current; next = NULL; this_cpu = prev->processor; spin_lock_irq(&schedule_data[this_cpu].lock); now = NOW(); /* remove timer, if still on list */ rem_ac_timer(&schedule_data[this_cpu].s_timer); /* deschedule the current domain */ ASSERT(!in_interrupt()); ASSERT(__task_on_runqueue(prev)); if ( is_idle_task(prev) ) goto deschedule_done; /* do some accounting */ ranfor = (s32)(now - prev->lastschd); prev->cpu_time += ranfor; /* calculate mcu and update avt */ mcus = ranfor/MCU; if (ranfor % MCU) mcus ++; /* always round up */ prev->avt += mcus * prev->mcu_advance; /* recalculate evt */ __calc_evt(prev); /* dequeue */ __del_from_runqueue(prev); switch ( prev->state ) { case TASK_INTERRUPTIBLE: if ( signal_pending(prev) ) { prev->state = TASK_RUNNING; /* but has events pending */ break; } case TASK_UNINTERRUPTIBLE: case TASK_DYING: case TASK_STOPPED: default: /* Done if not running. Else continue. */ goto deschedule_done; case TASK_RUNNING:; } /* requeue */ __add_to_runqueue_tail(prev); deschedule_done: clear_bit(_HYP_EVENT_NEED_RESCHED, &prev->hyp_events); /* * Pick a new domain */ /* we should at least have the idle task */ ASSERT(!list_empty(&schedule_data[this_cpu].runqueue)); /* * scan through the run queue and pick the task with the lowest evt * *and* the task the second lowest evt. * this code is O(n) but we expect n to be small. */ next = schedule_data[this_cpu].idle; next_prime = NULL; next_evt = 0xffffffff; next_prime_evt = 0xffffffff; min_avt = 0xffffffff; /* to calculate svt */ list_for_each(tmp, &schedule_data[this_cpu].runqueue) { p = list_entry(tmp, struct task_struct, run_list); if (p->evt < next_evt) { next_prime = next; next_prime_evt = next_evt; next = p; next_evt = p->evt; } else if (next_prime_evt == 0xffffffff) { next_prime_evt = p->evt; next_prime = p; } else if (p->evt < next_prime_evt) { next_prime_evt = p->evt; next_prime = p; } /* determine system virtual time */ if (p->avt < min_avt) min_avt = p->avt; } ASSERT(next != NULL); /* we should have at least the idle task */ /* update system virtual time */ if (min_avt != 0xffffffff) schedule_data[this_cpu].svt = min_avt; /* check for virtual time overrun on this cpu */ if (schedule_data[this_cpu].svt >= 0xf0000000) { u_long t_flags; write_lock_irqsave(&tasklist_lock, t_flags); p = &idle0_task; do { if (p->processor == this_cpu && !is_idle_task(p)) { p->evt -= 0xe0000000; p->avt -= 0xe0000000; } } while ( (p = p->next_task) != &idle0_task ); write_unlock_irqrestore(&tasklist_lock, t_flags); schedule_data[this_cpu].svt -= 0xe0000000; } /* work out time for next run through scheduler */ if (is_idle_task(next)) { r_time = ctx_allow; goto sched_done; } if (next_prime == NULL || is_idle_task(next_prime)) { /* we have only one runable task besides the idle task */ r_time = 10 * ctx_allow; /* RN: random constant */ goto sched_done; } /* * if we are here we have two runable tasks. * work out how long 'next' can run till its evt is greater than * 'next_prime's evt. Taking context switch allowance into account. */ ASSERT(next_prime->evt >= next->evt); r_time = ((next_prime->evt - next->evt)/next->mcu_advance) + ctx_allow; sched_done: ASSERT(r_time >= ctx_allow); #ifndef NDEBUG if (r_time < ctx_allow) { printk("[%02d]: %lx\n", this_cpu, (unsigned long)r_time); dump_rqueue(&schedule_data[this_cpu].runqueue, "foo"); } #endif prev->has_cpu = 0; next->has_cpu = 1; schedule_data[this_cpu].curr = next; next->lastschd = now; /* reprogramm the timer */ schedule_data[this_cpu].s_timer.expires = now + r_time; add_ac_timer(&schedule_data[this_cpu].s_timer); spin_unlock_irq(&schedule_data[this_cpu].lock); /* done, switch tasks */ if ( unlikely(prev == next) ) { /* We won't go through the normal tail, so do this by hand */ update_dom_time(prev->shared_info); return; } perfc_incrc(sched_ctx); #ifdef SCHED_HISTO { ulong diff; /* should fit in 32bits */ if (!is_idle_task(next) && next->wokenup) { diff = (ulong)(now - next->wokenup); diff /= (ulong)MILLISECS(1); if (diff <= BUCKETS-2) schedule_data[this_cpu].hist[diff]++; else schedule_data[this_cpu].hist[BUCKETS-1]++; } next->wokenup = (s_time_t)0; } #endif switch_to(prev, next); if ( unlikely(prev->state == TASK_DYING) ) put_task_struct(prev); update_dom_time(next->shared_info); schedule_tail(next); BUG(); } /* No locking needed -- pointer comparison is safe :-) */ int idle_cpu(int cpu) { struct task_struct *p = schedule_data[cpu].curr; return p == idle_task[cpu]; } /* * The scheduler timer. */ static void sched_timer(unsigned long foo) { int cpu = smp_processor_id(); struct task_struct *curr = schedule_data[cpu].curr; /* cause a reschedule */ set_bit(_HYP_EVENT_NEED_RESCHED, &curr->hyp_events); perfc_incrc(sched_irq); } /* * The Domain virtual time timer */ static void virt_timer(unsigned long foo) { unsigned long cpu_mask = 0; struct task_struct *p; s_time_t now; /* send virtual timer interrupt */ read_lock(&tasklist_lock); p = &idle0_task; do { if ( is_idle_task(p) ) continue; cpu_mask |= mark_guest_event(p, _EVENT_TIMER); if ( p->has_cpu ) update_dom_time(p->shared_info); } while ( (p = p->next_task) != &idle0_task ); read_unlock(&tasklist_lock); guest_event_notify(cpu_mask); now = NOW(); v_timer.expires = now + MILLISECS(20); add_ac_timer(&v_timer); } /* * Initialise the data structures */ void __init scheduler_init(void) { int i; printk("Initialising schedulers\n"); for ( i = 0; i < NR_CPUS; i++ ) { INIT_LIST_HEAD(&schedule_data[i].runqueue); spin_lock_init(&schedule_data[i].lock); schedule_data[i].curr = &idle0_task; /* a timer for each CPU */ init_ac_timer(&schedule_data[i].s_timer, i); schedule_data[i].s_timer.data = 2; schedule_data[i].s_timer.function = &sched_timer; } schedule_data[0].idle = &idle0_task; /* idle on CPU 0 is special */ init_ac_timer(&v_timer, 0); v_timer.data = 3; v_timer.function = &virt_timer; } /* * Start a scheduler for each CPU * This has to be done *after* the timers, e.g., APICs, have been initialised */ void schedulers_start(void) { printk("Start schedulers\n"); sched_timer(0); virt_timer(0); smp_call_function((void *)sched_timer, NULL, 1, 1); } static void process_timeout(unsigned long __data)